Compare commits
123 Commits
nux/poleca
...
main
| Author | SHA1 | Date | |
|---|---|---|---|
|
|
e2e43b8bf5 | ||
|
|
0e19529186 | ||
|
|
2a789c8440 | ||
|
|
9d87f01823 | ||
|
|
149da3d2e2 | ||
|
|
bad278d797 | ||
|
|
1c9ce267d5 | ||
|
|
01e336edb6 | ||
|
|
a1f843c11d | ||
|
|
1f364ee540 | ||
|
|
afdadc77ff | ||
|
|
c66dc4594c | ||
|
|
5ab01f383a | ||
|
|
d0036b0768 | ||
|
|
eaff7c3936 | ||
|
|
77a01010c3 | ||
|
|
9c1601df82 | ||
|
|
9cac229023 | ||
|
|
6202d783c2 | ||
|
|
04ddb2c3bd | ||
|
|
acd66002ca | ||
|
|
5a0dac8f93 | ||
|
|
c486732856 | ||
|
|
1d2b29b994 | ||
|
|
55048333c6 | ||
|
|
c4b74ee7bf | ||
|
|
43f6b63792 | ||
|
|
c9e877cdd6 | ||
|
|
538f44c038 | ||
|
|
beb863897f | ||
|
|
d12408703b | ||
|
|
abf72fb412 | ||
|
|
4d83d79da9 | ||
|
|
4c5f0f4e11 | ||
|
|
749ff07ca4 | ||
|
|
0960fd8bf1 | ||
|
|
43d76df0fa | ||
|
|
4e3941bcb0 | ||
|
|
39401e3606 | ||
|
|
9cf012c0d5 | ||
|
|
36b3c3b00a | ||
|
|
a8406554a0 | ||
|
|
996cf4a670 | ||
|
|
dfc5605831 | ||
|
|
dcc5f9c767 | ||
|
|
33e509ea39 | ||
|
|
8957aec489 | ||
| 0c398df67b | |||
|
|
9308de59a9 | ||
|
|
5971f4c470 | ||
| 539a75e4e6 | |||
| bc930cf00e | |||
| 5dd1cffe05 | |||
| a9662da3a1 | |||
| 87ddb4da13 | |||
|
|
baec5b6147 | ||
|
|
5c21e110d0 | ||
|
|
45ffac6e92 | ||
|
|
809b0eb028 | ||
|
|
31bd120077 | ||
|
|
92ccacffd9 | ||
|
|
a86c7d954f | ||
|
|
c94a2301eb | ||
|
|
6d18e0a88b | ||
|
|
0e0547b3e1 | ||
|
|
0ff092ae9f | ||
|
|
065d428f76 | ||
|
|
072c4649de | ||
|
|
fe09e59c8c | ||
|
|
57f062a9b6 | ||
|
|
8a8603e6df | ||
|
|
212f08ad03 | ||
|
|
7926d7b3e8 | ||
|
|
aca753296b | ||
|
|
75739cbaaf | ||
|
|
0c791a4d40 | ||
|
|
2ee5e1c5ad | ||
|
|
e937717147 | ||
|
|
b316239d12 | ||
|
|
1d260d377b | ||
|
|
30e65b5ca7 | ||
|
|
b178d056f6 | ||
|
|
2f0f0763cc | ||
|
|
b1e8b11948 | ||
|
|
36c7222d5b | ||
|
|
baf9311bfe | ||
|
|
377b4877cd | ||
|
|
9dcddaf13d | ||
|
|
db60489d0f | ||
|
|
63d60f1dcd | ||
|
|
533caf8e4b | ||
|
|
f635555f93 | ||
|
|
5bb74b19ed | ||
|
|
9db9fc2af8 | ||
|
|
3da0d5a7c8 | ||
|
|
4e4824a6c6 | ||
|
|
2fb787c7a2 | ||
|
|
70ca511ee2 | ||
|
|
71077e93dd | ||
|
|
6c86616273 | ||
|
|
3442471a93 | ||
|
|
f276b9d28a | ||
|
|
8941e7b049 | ||
|
|
44fbd6eac7 | ||
|
|
2a0420177e | ||
|
|
dc3fd47a32 | ||
|
|
889c5863fa | ||
|
|
d9f1fe9e48 | ||
|
|
c4d8e26dcb | ||
|
|
d4126bb876 | ||
|
|
be96bb0050 | ||
|
|
bebf425ac5 | ||
|
|
ee5221889f | ||
|
|
73d577e3c3 | ||
|
|
6d32c6206f | ||
|
|
493507ad4e | ||
|
|
232fc79cd5 | ||
|
|
429f8e96ef | ||
|
|
83ddef4f88 | ||
|
|
14435cacad | ||
|
|
adf41b4096 | ||
|
|
0fb3e8d5fe | ||
|
|
16d3a92455 |
1
.beads/.gitignore
vendored
1
.beads/.gitignore
vendored
@@ -10,6 +10,7 @@ daemon.lock
|
||||
daemon.log
|
||||
daemon.pid
|
||||
bd.sock
|
||||
bd.sock.startlock
|
||||
sync-state.json
|
||||
last-touched
|
||||
|
||||
|
||||
@@ -28,16 +28,13 @@ This is physics, not politeness. Gas Town is a steam engine - you are a piston.
|
||||
- `gt mol status` - Check your hooked work
|
||||
- `gt mail inbox` - Check for messages
|
||||
- `bd ready` - Find available work (no blockers)
|
||||
- `bd sync` - Sync beads changes
|
||||
|
||||
## Session Close Protocol
|
||||
|
||||
Before saying "done":
|
||||
1. git status (check what changed)
|
||||
2. git add <files> (stage code changes)
|
||||
3. bd sync (commit beads changes)
|
||||
4. git commit -m "..." (commit code)
|
||||
5. bd sync (commit any new beads changes)
|
||||
6. git push (push to remote)
|
||||
3. git commit -m "..." (commit code)
|
||||
4. git push (push to remote)
|
||||
|
||||
**Work is not done until pushed.**
|
||||
**Work is not done until pushed.** Beads changes are automatically committed with Dolt.
|
||||
|
||||
@@ -67,3 +67,6 @@ sync-branch: beads-sync
|
||||
# Format: external:<project>:<capability> in bd dep commands
|
||||
external_projects:
|
||||
beads: ../../../beads/mayor/rig
|
||||
|
||||
# Custom issue types for Gas Town (fallback when database is unavailable)
|
||||
types.custom: "agent,role,rig,convoy,slot,queue,event,message,molecule,gate,merge-request"
|
||||
|
||||
@@ -341,6 +341,19 @@ needs = ["trigger-pending-spawns", "dispatch-gated-molecules", "fire-notificatio
|
||||
description = """
|
||||
Check Witness and Refinery health for each rig.
|
||||
|
||||
**IMPORTANT: Skip DOCKED/PARKED rigs**
|
||||
Before checking any rig, verify its operational state:
|
||||
```bash
|
||||
gt rig status <rig>
|
||||
# Check the Status: line - if DOCKED or PARKED, skip entirely
|
||||
```
|
||||
|
||||
DOCKED rigs are globally shut down - do NOT:
|
||||
- Check their witness/refinery status
|
||||
- Send health pings
|
||||
- Attempt restarts
|
||||
Simply skip them and move to the next rig.
|
||||
|
||||
**IMPORTANT: Idle Town Protocol**
|
||||
Before sending health check nudges, check if the town is idle:
|
||||
```bash
|
||||
|
||||
@@ -246,5 +246,4 @@ Dog returns to available state in the pool.
|
||||
[vars]
|
||||
[vars.mode]
|
||||
description = "GC mode: 'conservative' or 'aggressive'"
|
||||
required = true
|
||||
default = "conservative"
|
||||
|
||||
@@ -1,4 +1,4 @@
|
||||
#!/bin/bash
|
||||
#!/usr/bin/env bash
|
||||
# Block PRs by preventing pushes to arbitrary feature branches.
|
||||
# Gas Town agents push to main (crew) or polecat/* branches (polecats).
|
||||
# PRs are for external contributors only.
|
||||
|
||||
10
.github/workflows/release.yml
vendored
10
.github/workflows/release.yml
vendored
@@ -60,15 +60,15 @@ jobs:
|
||||
node-version: '22'
|
||||
registry-url: 'https://registry.npmjs.org'
|
||||
|
||||
- name: Update npm for OIDC trusted publishing
|
||||
run: npm install -g npm@latest # Requires npm >= 11.5.1 for trusted publishing
|
||||
- name: Update npm for provenance support
|
||||
run: npm install -g npm@latest
|
||||
|
||||
- name: Publish to npm
|
||||
env:
|
||||
NODE_AUTH_TOKEN: ${{ secrets.NPM_TOKEN }}
|
||||
run: |
|
||||
cd npm-package
|
||||
npm publish --access public
|
||||
# Uses OIDC trusted publishing - no token needed
|
||||
# Provenance attestations are automatic with trusted publishing
|
||||
npm publish --access public --provenance
|
||||
|
||||
update-homebrew:
|
||||
runs-on: ubuntu-latest
|
||||
|
||||
@@ -164,7 +164,7 @@ release:
|
||||
|
||||
**Homebrew (macOS/Linux):**
|
||||
```bash
|
||||
brew install steveyegge/gastown/gt
|
||||
brew install gastown
|
||||
```
|
||||
|
||||
**npm (Node.js):**
|
||||
|
||||
11
Makefile
11
Makefile
@@ -2,6 +2,7 @@
|
||||
|
||||
BINARY := gt
|
||||
BUILD_DIR := .
|
||||
INSTALL_DIR := $(HOME)/.local/bin
|
||||
|
||||
# Get version info for ldflags
|
||||
VERSION := $(shell git describe --tags --always --dirty 2>/dev/null || echo "dev")
|
||||
@@ -10,7 +11,8 @@ BUILD_TIME := $(shell date -u +"%Y-%m-%dT%H:%M:%SZ")
|
||||
|
||||
LDFLAGS := -X github.com/steveyegge/gastown/internal/cmd.Version=$(VERSION) \
|
||||
-X github.com/steveyegge/gastown/internal/cmd.Commit=$(COMMIT) \
|
||||
-X github.com/steveyegge/gastown/internal/cmd.BuildTime=$(BUILD_TIME)
|
||||
-X github.com/steveyegge/gastown/internal/cmd.BuildTime=$(BUILD_TIME) \
|
||||
-X github.com/steveyegge/gastown/internal/cmd.BuiltProperly=1
|
||||
|
||||
generate:
|
||||
go generate ./...
|
||||
@@ -22,8 +24,11 @@ ifeq ($(shell uname),Darwin)
|
||||
@echo "Signed $(BINARY) for macOS"
|
||||
endif
|
||||
|
||||
install: generate
|
||||
go install -ldflags "$(LDFLAGS)" ./cmd/gt
|
||||
install: build
|
||||
@mkdir -p $(INSTALL_DIR)
|
||||
@rm -f $(INSTALL_DIR)/$(BINARY)
|
||||
@cp $(BUILD_DIR)/$(BINARY) $(INSTALL_DIR)/$(BINARY)
|
||||
@echo "Installed $(BINARY) to $(INSTALL_DIR)/$(BINARY)"
|
||||
|
||||
clean:
|
||||
rm -f $(BUILD_DIR)/$(BINARY)
|
||||
|
||||
14
README.md
14
README.md
@@ -97,9 +97,11 @@ Git-backed issue tracking system that stores work state as structured data.
|
||||
|
||||
```bash
|
||||
# Install Gas Town
|
||||
go install github.com/steveyegge/gastown/cmd/gt@latest
|
||||
$ brew install gastown # Homebrew (recommended)
|
||||
$ npm install -g @gastown/gt # npm
|
||||
$ go install github.com/steveyegge/gastown/cmd/gt@latest # From source
|
||||
|
||||
# Add Go binaries to PATH (add to ~/.zshrc or ~/.bashrc)
|
||||
# If using go install, add Go binaries to PATH (add to ~/.zshrc or ~/.bashrc)
|
||||
export PATH="$PATH:$HOME/go/bin"
|
||||
|
||||
# Create workspace with git initialization
|
||||
@@ -120,11 +122,11 @@ gt mayor attach
|
||||
## Quick Start Guide
|
||||
|
||||
### Getting Started
|
||||
Run
|
||||
Run
|
||||
```shell
|
||||
gt install ~/gt --git &&
|
||||
cd ~/gt &&
|
||||
gt config agent list &&
|
||||
gt install ~/gt --git &&
|
||||
cd ~/gt &&
|
||||
gt config agent list &&
|
||||
gt mayor attach
|
||||
```
|
||||
and tell the Mayor what you want to build!
|
||||
|
||||
@@ -268,13 +268,13 @@ ssh -T git@github.com
|
||||
git config --global credential.helper cache
|
||||
```
|
||||
|
||||
### Beads sync issues
|
||||
### Beads issues
|
||||
|
||||
If beads aren't syncing across clones:
|
||||
If experiencing beads problems:
|
||||
|
||||
```bash
|
||||
cd ~/gt/myproject/mayor/rig
|
||||
bd sync --status # Check sync status
|
||||
bd status # Check database health
|
||||
bd doctor # Run beads health check
|
||||
```
|
||||
|
||||
|
||||
@@ -626,7 +626,6 @@ bd create --title="..." --type=task
|
||||
bd update <id> --status=in_progress
|
||||
bd close <id>
|
||||
bd dep add <child> <parent> # child depends on parent
|
||||
bd sync # Push/pull changes
|
||||
```
|
||||
|
||||
## Patrol Agents
|
||||
|
||||
@@ -119,6 +119,12 @@ type Beads struct {
|
||||
// Populated on first call to getTownRoot() to avoid filesystem walk on every operation.
|
||||
townRoot string
|
||||
searchedRoot bool
|
||||
|
||||
// RPC client for daemon communication (lazy-initialized).
|
||||
// When available, RPC is preferred over subprocess for performance.
|
||||
rpcClient *rpcClient
|
||||
rpcChecked bool
|
||||
rpcAvailable bool
|
||||
}
|
||||
|
||||
// New creates a new Beads wrapper for the given directory.
|
||||
@@ -287,7 +293,14 @@ func filterBeadsEnv(environ []string) []string {
|
||||
}
|
||||
|
||||
// List returns issues matching the given options.
|
||||
// Uses daemon RPC when available for better performance (~40ms faster).
|
||||
func (b *Beads) List(opts ListOptions) ([]*Issue, error) {
|
||||
// Try RPC first (faster when daemon is running)
|
||||
if issues, err := b.listViaRPC(opts); err == nil {
|
||||
return issues, nil
|
||||
}
|
||||
|
||||
// Fall back to subprocess
|
||||
args := []string{"list", "--json"}
|
||||
|
||||
if opts.Status != "" {
|
||||
@@ -327,7 +340,7 @@ func (b *Beads) List(opts ListOptions) ([]*Issue, error) {
|
||||
}
|
||||
|
||||
// ListByAssignee returns all issues assigned to a specific assignee.
|
||||
// The assignee is typically in the format "rig/polecatName" (e.g., "gastown/Toast").
|
||||
// The assignee is typically in the format "rig/polecats/polecatName" (e.g., "gastown/polecats/Toast").
|
||||
func (b *Beads) ListByAssignee(assignee string) ([]*Issue, error) {
|
||||
return b.List(ListOptions{
|
||||
Status: "all", // Include both open and closed for state derivation
|
||||
@@ -400,7 +413,14 @@ func (b *Beads) ReadyWithType(issueType string) ([]*Issue, error) {
|
||||
}
|
||||
|
||||
// Show returns detailed information about an issue.
|
||||
// Uses daemon RPC when available for better performance (~40ms faster).
|
||||
func (b *Beads) Show(id string) (*Issue, error) {
|
||||
// Try RPC first (faster when daemon is running)
|
||||
if issue, err := b.showViaRPC(id); err == nil {
|
||||
return issue, nil
|
||||
}
|
||||
|
||||
// Fall back to subprocess
|
||||
out, err := b.run("show", id, "--json")
|
||||
if err != nil {
|
||||
return nil, err
|
||||
@@ -559,7 +579,14 @@ func (b *Beads) CreateWithID(id string, opts CreateOptions) (*Issue, error) {
|
||||
}
|
||||
|
||||
// Update updates an existing issue.
|
||||
// Uses daemon RPC when available for better performance (~40ms faster).
|
||||
func (b *Beads) Update(id string, opts UpdateOptions) error {
|
||||
// Try RPC first (faster when daemon is running)
|
||||
if err := b.updateViaRPC(id, opts); err == nil {
|
||||
return nil
|
||||
}
|
||||
|
||||
// Fall back to subprocess
|
||||
args := []string{"update", id}
|
||||
|
||||
if opts.Title != nil {
|
||||
@@ -598,15 +625,26 @@ func (b *Beads) Update(id string, opts UpdateOptions) error {
|
||||
// Close closes one or more issues.
|
||||
// If a runtime session ID is set in the environment, it is passed to bd close
|
||||
// for work attribution tracking (see decision 009-session-events-architecture.md).
|
||||
// Uses daemon RPC when available for better performance (~40ms faster per call).
|
||||
func (b *Beads) Close(ids ...string) error {
|
||||
if len(ids) == 0 {
|
||||
return nil
|
||||
}
|
||||
|
||||
sessionID := runtime.SessionIDFromEnv()
|
||||
|
||||
// Try RPC for single-issue closes (faster when daemon is running)
|
||||
if len(ids) == 1 {
|
||||
if err := b.closeViaRPC(ids[0], "", sessionID, false); err == nil {
|
||||
return nil
|
||||
}
|
||||
}
|
||||
|
||||
// Fall back to subprocess
|
||||
args := append([]string{"close"}, ids...)
|
||||
|
||||
// Pass session ID for work attribution if available
|
||||
if sessionID := runtime.SessionIDFromEnv(); sessionID != "" {
|
||||
if sessionID != "" {
|
||||
args = append(args, "--session="+sessionID)
|
||||
}
|
||||
|
||||
@@ -617,16 +655,51 @@ func (b *Beads) Close(ids ...string) error {
|
||||
// CloseWithReason closes one or more issues with a reason.
|
||||
// If a runtime session ID is set in the environment, it is passed to bd close
|
||||
// for work attribution tracking (see decision 009-session-events-architecture.md).
|
||||
// Uses daemon RPC when available for better performance (~40ms faster per call).
|
||||
func (b *Beads) CloseWithReason(reason string, ids ...string) error {
|
||||
if len(ids) == 0 {
|
||||
return nil
|
||||
}
|
||||
|
||||
sessionID := runtime.SessionIDFromEnv()
|
||||
|
||||
// Try RPC for single-issue closes (faster when daemon is running)
|
||||
if len(ids) == 1 {
|
||||
if err := b.closeViaRPC(ids[0], reason, sessionID, false); err == nil {
|
||||
return nil
|
||||
}
|
||||
}
|
||||
|
||||
// Fall back to subprocess
|
||||
args := append([]string{"close"}, ids...)
|
||||
args = append(args, "--reason="+reason)
|
||||
|
||||
// Pass session ID for work attribution if available
|
||||
if sessionID := runtime.SessionIDFromEnv(); sessionID != "" {
|
||||
if sessionID != "" {
|
||||
args = append(args, "--session="+sessionID)
|
||||
}
|
||||
|
||||
_, err := b.run(args...)
|
||||
return err
|
||||
}
|
||||
|
||||
// CloseForced closes an issue with force flag and optional reason.
|
||||
// The force flag bypasses blockers and other validation checks.
|
||||
// Uses daemon RPC when available for better performance (~40ms faster).
|
||||
func (b *Beads) CloseForced(id, reason string) error {
|
||||
sessionID := runtime.SessionIDFromEnv()
|
||||
|
||||
// Try RPC first (faster when daemon is running)
|
||||
if err := b.closeViaRPC(id, reason, sessionID, true); err == nil {
|
||||
return nil
|
||||
}
|
||||
|
||||
// Fall back to subprocess
|
||||
args := []string{"close", id, "--force"}
|
||||
if reason != "" {
|
||||
args = append(args, "--reason="+reason)
|
||||
}
|
||||
if sessionID != "" {
|
||||
args = append(args, "--session="+sessionID)
|
||||
}
|
||||
|
||||
@@ -747,6 +820,7 @@ This is physics, not politeness. Gas Town is a steam engine - you are a piston.
|
||||
- ` + "`gt mol status`" + ` - Check your hooked work
|
||||
- ` + "`gt mail inbox`" + ` - Check for messages
|
||||
- ` + "`bd ready`" + ` - Find available work (no blockers)
|
||||
- ` + "`bd tree <id>`" + ` - View bead ancestry, siblings, and dependencies
|
||||
- ` + "`bd sync`" + ` - Sync beads changes
|
||||
|
||||
## Session Close Protocol
|
||||
@@ -754,11 +828,9 @@ This is physics, not politeness. Gas Town is a steam engine - you are a piston.
|
||||
Before signaling completion:
|
||||
1. git status (check what changed)
|
||||
2. git add <files> (stage code changes)
|
||||
3. bd sync (commit beads changes)
|
||||
4. git commit -m "..." (commit code)
|
||||
5. bd sync (commit any new beads changes)
|
||||
6. git push (push to remote)
|
||||
7. ` + "`gt done`" + ` (submit to merge queue and exit)
|
||||
3. git commit -m "..." (commit code)
|
||||
4. git push (push to remote)
|
||||
5. ` + "`gt done`" + ` (submit to merge queue and exit)
|
||||
|
||||
**Polecats MUST call ` + "`gt done`" + ` - this submits work and exits the session.**
|
||||
`
|
||||
@@ -799,3 +871,19 @@ func ProvisionPrimeMDForWorktree(worktreePath string) error {
|
||||
// Provision PRIME.md in the target directory
|
||||
return ProvisionPrimeMD(beadsDir)
|
||||
}
|
||||
|
||||
// GetPrimeContent returns the beads workflow context content.
|
||||
// It checks for a custom PRIME.md file first, otherwise returns the default.
|
||||
// This eliminates the need to spawn a bd subprocess for gt prime.
|
||||
func GetPrimeContent(workDir string) string {
|
||||
beadsDir := ResolveBeadsDir(workDir)
|
||||
primePath := filepath.Join(beadsDir, "PRIME.md")
|
||||
|
||||
// Check for custom PRIME.md
|
||||
if content, err := os.ReadFile(primePath); err == nil {
|
||||
return strings.TrimSpace(string(content))
|
||||
}
|
||||
|
||||
// Return default content
|
||||
return strings.TrimSpace(primeContent)
|
||||
}
|
||||
|
||||
334
internal/beads/beads_rpc.go
Normal file
334
internal/beads/beads_rpc.go
Normal file
@@ -0,0 +1,334 @@
|
||||
package beads
|
||||
|
||||
import (
|
||||
"bufio"
|
||||
"crypto/sha256"
|
||||
"encoding/hex"
|
||||
"encoding/json"
|
||||
"fmt"
|
||||
"net"
|
||||
"os"
|
||||
"path/filepath"
|
||||
"time"
|
||||
)
|
||||
|
||||
// MaxUnixSocketPath is the maximum length for Unix socket paths.
|
||||
const MaxUnixSocketPath = 103
|
||||
|
||||
// rpcClient represents an RPC client for the bd daemon.
|
||||
type rpcClient struct {
|
||||
conn net.Conn
|
||||
socketPath string
|
||||
timeout time.Duration
|
||||
cwd string
|
||||
}
|
||||
|
||||
// rpcRequest represents an RPC request to the daemon.
|
||||
type rpcRequest struct {
|
||||
Operation string `json:"operation"`
|
||||
Args json.RawMessage `json:"args"`
|
||||
Cwd string `json:"cwd,omitempty"`
|
||||
}
|
||||
|
||||
// rpcResponse represents an RPC response from the daemon.
|
||||
type rpcResponse struct {
|
||||
Success bool `json:"success"`
|
||||
Data json.RawMessage `json:"data,omitempty"`
|
||||
Error string `json:"error,omitempty"`
|
||||
}
|
||||
|
||||
// tryConnectRPC attempts to connect to the bd daemon.
|
||||
// Returns nil if no daemon is running.
|
||||
func tryConnectRPC(workspacePath string) *rpcClient {
|
||||
socketPath := socketPathForWorkspace(workspacePath)
|
||||
|
||||
// Check if socket exists
|
||||
if _, err := os.Stat(socketPath); os.IsNotExist(err) {
|
||||
return nil
|
||||
}
|
||||
|
||||
conn, err := net.DialTimeout("unix", socketPath, 200*time.Millisecond)
|
||||
if err != nil {
|
||||
return nil
|
||||
}
|
||||
|
||||
client := &rpcClient{
|
||||
conn: conn,
|
||||
socketPath: socketPath,
|
||||
timeout: 30 * time.Second,
|
||||
cwd: workspacePath,
|
||||
}
|
||||
|
||||
// Quick health check
|
||||
if err := client.ping(); err != nil {
|
||||
_ = conn.Close()
|
||||
return nil
|
||||
}
|
||||
|
||||
return client
|
||||
}
|
||||
|
||||
// close closes the RPC connection.
|
||||
func (c *rpcClient) close() error {
|
||||
if c.conn != nil {
|
||||
return c.conn.Close()
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
// execute sends a request and returns the response.
|
||||
func (c *rpcClient) execute(operation string, args interface{}) (*rpcResponse, error) {
|
||||
argsJSON, err := json.Marshal(args)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("marshaling args: %w", err)
|
||||
}
|
||||
|
||||
req := rpcRequest{
|
||||
Operation: operation,
|
||||
Args: argsJSON,
|
||||
Cwd: c.cwd,
|
||||
}
|
||||
|
||||
reqJSON, err := json.Marshal(req)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("marshaling request: %w", err)
|
||||
}
|
||||
|
||||
if c.timeout > 0 {
|
||||
deadline := time.Now().Add(c.timeout)
|
||||
if err := c.conn.SetDeadline(deadline); err != nil {
|
||||
return nil, fmt.Errorf("setting deadline: %w", err)
|
||||
}
|
||||
}
|
||||
|
||||
writer := bufio.NewWriter(c.conn)
|
||||
if _, err := writer.Write(reqJSON); err != nil {
|
||||
return nil, fmt.Errorf("writing request: %w", err)
|
||||
}
|
||||
if err := writer.WriteByte('\n'); err != nil {
|
||||
return nil, fmt.Errorf("writing newline: %w", err)
|
||||
}
|
||||
if err := writer.Flush(); err != nil {
|
||||
return nil, fmt.Errorf("flushing: %w", err)
|
||||
}
|
||||
|
||||
reader := bufio.NewReader(c.conn)
|
||||
respLine, err := reader.ReadBytes('\n')
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("reading response: %w", err)
|
||||
}
|
||||
|
||||
var resp rpcResponse
|
||||
if err := json.Unmarshal(respLine, &resp); err != nil {
|
||||
return nil, fmt.Errorf("unmarshaling response: %w", err)
|
||||
}
|
||||
|
||||
if !resp.Success {
|
||||
return &resp, fmt.Errorf("operation failed: %s", resp.Error)
|
||||
}
|
||||
|
||||
return &resp, nil
|
||||
}
|
||||
|
||||
// ping verifies the daemon is alive.
|
||||
func (c *rpcClient) ping() error {
|
||||
_, err := c.execute("ping", nil)
|
||||
return err
|
||||
}
|
||||
|
||||
// socketPathForWorkspace returns the socket path for a workspace.
|
||||
// This mirrors the logic in beads/internal/rpc/socket_path.go.
|
||||
func socketPathForWorkspace(workspacePath string) string {
|
||||
// Compute the "natural" socket path in .beads/
|
||||
naturalPath := filepath.Join(workspacePath, ".beads", "bd.sock")
|
||||
|
||||
// If natural path is short enough, use it
|
||||
if len(naturalPath) <= MaxUnixSocketPath {
|
||||
return naturalPath
|
||||
}
|
||||
|
||||
// Path too long - use /tmp with hash
|
||||
hash := sha256.Sum256([]byte(workspacePath))
|
||||
hashStr := hex.EncodeToString(hash[:4])
|
||||
return filepath.Join("/tmp", "beads-"+hashStr, "bd.sock")
|
||||
}
|
||||
|
||||
// getRPCClient returns the RPC client, initializing on first call.
|
||||
// Returns nil if daemon is not available.
|
||||
func (b *Beads) getRPCClient() *rpcClient {
|
||||
if b.rpcChecked {
|
||||
return b.rpcClient
|
||||
}
|
||||
|
||||
b.rpcChecked = true
|
||||
|
||||
// Don't use RPC in isolated mode (tests)
|
||||
if b.isolated {
|
||||
return nil
|
||||
}
|
||||
|
||||
// Resolve workspace path for socket discovery
|
||||
workspacePath := b.beadsDir
|
||||
if workspacePath == "" {
|
||||
workspacePath = ResolveBeadsDir(b.workDir)
|
||||
}
|
||||
|
||||
// Get the workspace root (parent of .beads)
|
||||
if filepath.Base(workspacePath) == ".beads" {
|
||||
workspacePath = filepath.Dir(workspacePath)
|
||||
}
|
||||
|
||||
b.rpcClient = tryConnectRPC(workspacePath)
|
||||
b.rpcAvailable = b.rpcClient != nil
|
||||
return b.rpcClient
|
||||
}
|
||||
|
||||
// closeRPC closes the RPC client if connected.
|
||||
func (b *Beads) closeRPC() {
|
||||
if b.rpcClient != nil {
|
||||
_ = b.rpcClient.close()
|
||||
b.rpcClient = nil
|
||||
}
|
||||
}
|
||||
|
||||
// RPC operation argument types
|
||||
|
||||
type rpcListArgs struct {
|
||||
Status string `json:"status,omitempty"`
|
||||
Assignee string `json:"assignee,omitempty"`
|
||||
Labels []string `json:"labels,omitempty"`
|
||||
LabelsAny []string `json:"labels_any,omitempty"`
|
||||
ExcludeStatus []string `json:"exclude_status,omitempty"`
|
||||
Priority *int `json:"priority,omitempty"`
|
||||
ParentID string `json:"parent_id,omitempty"`
|
||||
NoAssignee bool `json:"no_assignee,omitempty"`
|
||||
Limit int `json:"limit,omitempty"`
|
||||
}
|
||||
|
||||
type rpcShowArgs struct {
|
||||
ID string `json:"id"`
|
||||
}
|
||||
|
||||
type rpcUpdateArgs struct {
|
||||
ID string `json:"id"`
|
||||
Title *string `json:"title,omitempty"`
|
||||
Status *string `json:"status,omitempty"`
|
||||
Priority *int `json:"priority,omitempty"`
|
||||
Description *string `json:"description,omitempty"`
|
||||
Assignee *string `json:"assignee,omitempty"`
|
||||
AddLabels []string `json:"add_labels,omitempty"`
|
||||
RemoveLabels []string `json:"remove_labels,omitempty"`
|
||||
SetLabels []string `json:"set_labels,omitempty"`
|
||||
}
|
||||
|
||||
type rpcCloseArgs struct {
|
||||
ID string `json:"id"`
|
||||
Reason string `json:"reason,omitempty"`
|
||||
Session string `json:"session,omitempty"`
|
||||
Force bool `json:"force,omitempty"`
|
||||
}
|
||||
|
||||
// listViaRPC performs a list operation via the daemon RPC.
|
||||
func (b *Beads) listViaRPC(opts ListOptions) ([]*Issue, error) {
|
||||
client := b.getRPCClient()
|
||||
if client == nil {
|
||||
return nil, fmt.Errorf("no RPC client")
|
||||
}
|
||||
|
||||
args := rpcListArgs{
|
||||
Status: opts.Status,
|
||||
Assignee: opts.Assignee,
|
||||
ParentID: opts.Parent,
|
||||
}
|
||||
|
||||
// Convert Label to Labels array if set
|
||||
// Also handle deprecated Type field by converting to gt: label
|
||||
if opts.Label != "" {
|
||||
args.Labels = []string{opts.Label}
|
||||
} else if opts.Type != "" {
|
||||
// Deprecated: convert type to label for backward compatibility
|
||||
args.Labels = []string{"gt:" + opts.Type}
|
||||
}
|
||||
|
||||
// Handle priority: -1 means no filter
|
||||
if opts.Priority >= 0 {
|
||||
args.Priority = &opts.Priority
|
||||
}
|
||||
|
||||
if opts.NoAssignee {
|
||||
args.NoAssignee = true
|
||||
}
|
||||
|
||||
resp, err := client.execute("list", args)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
var issues []*Issue
|
||||
if err := json.Unmarshal(resp.Data, &issues); err != nil {
|
||||
return nil, fmt.Errorf("unmarshaling issues: %w", err)
|
||||
}
|
||||
|
||||
return issues, nil
|
||||
}
|
||||
|
||||
// showViaRPC performs a show operation via the daemon RPC.
|
||||
func (b *Beads) showViaRPC(id string) (*Issue, error) {
|
||||
client := b.getRPCClient()
|
||||
if client == nil {
|
||||
return nil, fmt.Errorf("no RPC client")
|
||||
}
|
||||
|
||||
resp, err := client.execute("show", rpcShowArgs{ID: id})
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
var issue Issue
|
||||
if err := json.Unmarshal(resp.Data, &issue); err != nil {
|
||||
return nil, fmt.Errorf("unmarshaling issue: %w", err)
|
||||
}
|
||||
|
||||
return &issue, nil
|
||||
}
|
||||
|
||||
// updateViaRPC performs an update operation via the daemon RPC.
|
||||
func (b *Beads) updateViaRPC(id string, opts UpdateOptions) error {
|
||||
client := b.getRPCClient()
|
||||
if client == nil {
|
||||
return fmt.Errorf("no RPC client")
|
||||
}
|
||||
|
||||
args := rpcUpdateArgs{
|
||||
ID: id,
|
||||
Title: opts.Title,
|
||||
Status: opts.Status,
|
||||
Priority: opts.Priority,
|
||||
Description: opts.Description,
|
||||
Assignee: opts.Assignee,
|
||||
AddLabels: opts.AddLabels,
|
||||
RemoveLabels: opts.RemoveLabels,
|
||||
SetLabels: opts.SetLabels,
|
||||
}
|
||||
|
||||
_, err := client.execute("update", args)
|
||||
return err
|
||||
}
|
||||
|
||||
// closeViaRPC performs a close operation via the daemon RPC.
|
||||
func (b *Beads) closeViaRPC(id, reason, session string, force bool) error {
|
||||
client := b.getRPCClient()
|
||||
if client == nil {
|
||||
return fmt.Errorf("no RPC client")
|
||||
}
|
||||
|
||||
args := rpcCloseArgs{
|
||||
ID: id,
|
||||
Reason: reason,
|
||||
Session: session,
|
||||
Force: force,
|
||||
}
|
||||
|
||||
_, err := client.execute("close", args)
|
||||
return err
|
||||
}
|
||||
@@ -903,6 +903,80 @@ func TestAttachmentFieldsRoundTrip(t *testing.T) {
|
||||
}
|
||||
}
|
||||
|
||||
// TestNoMergeField tests the no_merge field in AttachmentFields.
|
||||
// The no_merge flag tells gt done to skip the merge queue and keep work on a feature branch.
|
||||
func TestNoMergeField(t *testing.T) {
|
||||
t.Run("parse no_merge true", func(t *testing.T) {
|
||||
issue := &Issue{Description: "no_merge: true\ndispatched_by: mayor"}
|
||||
fields := ParseAttachmentFields(issue)
|
||||
if fields == nil {
|
||||
t.Fatal("ParseAttachmentFields() = nil")
|
||||
}
|
||||
if !fields.NoMerge {
|
||||
t.Error("NoMerge should be true")
|
||||
}
|
||||
if fields.DispatchedBy != "mayor" {
|
||||
t.Errorf("DispatchedBy = %q, want 'mayor'", fields.DispatchedBy)
|
||||
}
|
||||
})
|
||||
|
||||
t.Run("parse no_merge false", func(t *testing.T) {
|
||||
issue := &Issue{Description: "no_merge: false\ndispatched_by: crew"}
|
||||
fields := ParseAttachmentFields(issue)
|
||||
if fields == nil {
|
||||
t.Fatal("ParseAttachmentFields() = nil")
|
||||
}
|
||||
if fields.NoMerge {
|
||||
t.Error("NoMerge should be false")
|
||||
}
|
||||
})
|
||||
|
||||
t.Run("parse no-merge alternate format", func(t *testing.T) {
|
||||
issue := &Issue{Description: "no-merge: true"}
|
||||
fields := ParseAttachmentFields(issue)
|
||||
if fields == nil {
|
||||
t.Fatal("ParseAttachmentFields() = nil")
|
||||
}
|
||||
if !fields.NoMerge {
|
||||
t.Error("NoMerge should be true with hyphen format")
|
||||
}
|
||||
})
|
||||
|
||||
t.Run("format no_merge", func(t *testing.T) {
|
||||
fields := &AttachmentFields{
|
||||
NoMerge: true,
|
||||
DispatchedBy: "mayor",
|
||||
}
|
||||
got := FormatAttachmentFields(fields)
|
||||
if !strings.Contains(got, "no_merge: true") {
|
||||
t.Errorf("FormatAttachmentFields() missing no_merge, got:\n%s", got)
|
||||
}
|
||||
if !strings.Contains(got, "dispatched_by: mayor") {
|
||||
t.Errorf("FormatAttachmentFields() missing dispatched_by, got:\n%s", got)
|
||||
}
|
||||
})
|
||||
|
||||
t.Run("round-trip with no_merge", func(t *testing.T) {
|
||||
original := &AttachmentFields{
|
||||
AttachedMolecule: "mol-test",
|
||||
AttachedAt: "2026-01-24T12:00:00Z",
|
||||
DispatchedBy: "gastown/crew/max",
|
||||
NoMerge: true,
|
||||
}
|
||||
|
||||
formatted := FormatAttachmentFields(original)
|
||||
issue := &Issue{Description: formatted}
|
||||
parsed := ParseAttachmentFields(issue)
|
||||
|
||||
if parsed == nil {
|
||||
t.Fatal("round-trip parse returned nil")
|
||||
}
|
||||
if *parsed != *original {
|
||||
t.Errorf("round-trip mismatch:\ngot %+v\nwant %+v", parsed, original)
|
||||
}
|
||||
})
|
||||
}
|
||||
|
||||
// TestResolveBeadsDir tests the redirect following logic.
|
||||
func TestResolveBeadsDir(t *testing.T) {
|
||||
// Create temp directory structure
|
||||
|
||||
@@ -107,6 +107,7 @@ func EnsureCustomTypes(beadsDir string) error {
|
||||
typesList := strings.Join(constants.BeadsCustomTypesList(), ",")
|
||||
cmd := exec.Command("bd", "config", "set", "types.custom", typesList)
|
||||
cmd.Dir = beadsDir
|
||||
// Set BEADS_DIR explicitly to ensure bd operates on the correct database
|
||||
cmd.Env = append(os.Environ(), "BEADS_DIR="+beadsDir)
|
||||
if output, err := cmd.CombinedOutput(); err != nil {
|
||||
return fmt.Errorf("configure custom types in %s: %s: %w",
|
||||
|
||||
@@ -1,244 +0,0 @@
|
||||
package beads
|
||||
|
||||
import (
|
||||
"bytes"
|
||||
"encoding/json"
|
||||
"fmt"
|
||||
"os/exec"
|
||||
"strconv"
|
||||
"strings"
|
||||
"time"
|
||||
)
|
||||
|
||||
const (
|
||||
gracefulTimeout = 2 * time.Second
|
||||
)
|
||||
|
||||
// BdDaemonInfo represents the status of a single bd daemon instance.
|
||||
type BdDaemonInfo struct {
|
||||
Workspace string `json:"workspace"`
|
||||
SocketPath string `json:"socket_path"`
|
||||
PID int `json:"pid"`
|
||||
Version string `json:"version"`
|
||||
Status string `json:"status"`
|
||||
Issue string `json:"issue,omitempty"`
|
||||
VersionMismatch bool `json:"version_mismatch,omitempty"`
|
||||
}
|
||||
|
||||
// BdDaemonHealth represents the overall health of bd daemons.
|
||||
type BdDaemonHealth struct {
|
||||
Total int `json:"total"`
|
||||
Healthy int `json:"healthy"`
|
||||
Stale int `json:"stale"`
|
||||
Mismatched int `json:"mismatched"`
|
||||
Unresponsive int `json:"unresponsive"`
|
||||
Daemons []BdDaemonInfo `json:"daemons"`
|
||||
}
|
||||
|
||||
// CheckBdDaemonHealth checks the health of all bd daemons.
|
||||
// Returns nil if no daemons are running (which is fine, bd will use direct mode).
|
||||
func CheckBdDaemonHealth() (*BdDaemonHealth, error) {
|
||||
cmd := exec.Command("bd", "daemon", "health", "--json")
|
||||
var stdout, stderr bytes.Buffer
|
||||
cmd.Stdout = &stdout
|
||||
cmd.Stderr = &stderr
|
||||
|
||||
err := cmd.Run()
|
||||
if err != nil {
|
||||
// bd daemon health may fail if bd not installed or other issues
|
||||
// Return nil to indicate we can't check (not an error for status display)
|
||||
return nil, nil
|
||||
}
|
||||
|
||||
var health BdDaemonHealth
|
||||
if err := json.Unmarshal(stdout.Bytes(), &health); err != nil {
|
||||
return nil, fmt.Errorf("parsing daemon health: %w", err)
|
||||
}
|
||||
|
||||
return &health, nil
|
||||
}
|
||||
|
||||
// EnsureBdDaemonHealth checks if bd daemons are healthy and attempts to restart if needed.
|
||||
// Returns a warning message if there were issues, or empty string if everything is fine.
|
||||
// This is non-blocking - it will not fail if daemons can't be started.
|
||||
func EnsureBdDaemonHealth(workDir string) string {
|
||||
health, err := CheckBdDaemonHealth()
|
||||
if err != nil || health == nil {
|
||||
// Can't check daemon health - proceed without warning
|
||||
return ""
|
||||
}
|
||||
|
||||
// No daemons running is fine - bd will use direct mode
|
||||
if health.Total == 0 {
|
||||
return ""
|
||||
}
|
||||
|
||||
// Check if any daemons need attention
|
||||
needsRestart := false
|
||||
for _, d := range health.Daemons {
|
||||
switch d.Status {
|
||||
case "healthy":
|
||||
// Good
|
||||
case "version_mismatch", "stale", "unresponsive":
|
||||
needsRestart = true
|
||||
}
|
||||
}
|
||||
|
||||
if !needsRestart {
|
||||
return ""
|
||||
}
|
||||
|
||||
// Attempt to restart daemons
|
||||
if restartErr := restartBdDaemons(); restartErr != nil {
|
||||
return fmt.Sprintf("bd daemons unhealthy (restart failed: %v)", restartErr)
|
||||
}
|
||||
|
||||
// Verify restart worked
|
||||
time.Sleep(500 * time.Millisecond)
|
||||
newHealth, err := CheckBdDaemonHealth()
|
||||
if err != nil || newHealth == nil {
|
||||
return "bd daemons restarted but status unknown"
|
||||
}
|
||||
|
||||
if newHealth.Healthy < newHealth.Total {
|
||||
return fmt.Sprintf("bd daemons partially healthy (%d/%d)", newHealth.Healthy, newHealth.Total)
|
||||
}
|
||||
|
||||
return "" // Successfully restarted
|
||||
}
|
||||
|
||||
// restartBdDaemons restarts all bd daemons.
|
||||
func restartBdDaemons() error { //nolint:unparam // error return kept for future use
|
||||
// Stop all daemons first using pkill to avoid auto-start side effects
|
||||
_ = exec.Command("pkill", "-TERM", "-f", "bd daemon").Run()
|
||||
|
||||
// Give time for cleanup
|
||||
time.Sleep(200 * time.Millisecond)
|
||||
|
||||
// Start daemons for known locations
|
||||
// The daemon will auto-start when bd commands are run in those directories
|
||||
// Just running any bd command will trigger daemon startup if configured
|
||||
return nil
|
||||
}
|
||||
|
||||
// StartBdDaemonIfNeeded starts the bd daemon for a specific workspace if not running.
|
||||
// This is a best-effort operation - failures are logged but don't block execution.
|
||||
func StartBdDaemonIfNeeded(workDir string) error {
|
||||
cmd := exec.Command("bd", "daemon", "start")
|
||||
cmd.Dir = workDir
|
||||
return cmd.Run()
|
||||
}
|
||||
|
||||
// StopAllBdProcesses stops all bd daemon and activity processes.
|
||||
// Returns (daemonsKilled, activityKilled, error).
|
||||
// If dryRun is true, returns counts without stopping anything.
|
||||
func StopAllBdProcesses(dryRun, force bool) (int, int, error) {
|
||||
if _, err := exec.LookPath("bd"); err != nil {
|
||||
return 0, 0, nil
|
||||
}
|
||||
|
||||
daemonsBefore := CountBdDaemons()
|
||||
activityBefore := CountBdActivityProcesses()
|
||||
|
||||
if dryRun {
|
||||
return daemonsBefore, activityBefore, nil
|
||||
}
|
||||
|
||||
daemonsKilled, daemonsRemaining := stopBdDaemons(force)
|
||||
activityKilled, activityRemaining := stopBdActivityProcesses(force)
|
||||
|
||||
if daemonsRemaining > 0 {
|
||||
return daemonsKilled, activityKilled, fmt.Errorf("bd daemon shutdown incomplete: %d still running", daemonsRemaining)
|
||||
}
|
||||
if activityRemaining > 0 {
|
||||
return daemonsKilled, activityKilled, fmt.Errorf("bd activity shutdown incomplete: %d still running", activityRemaining)
|
||||
}
|
||||
|
||||
return daemonsKilled, activityKilled, nil
|
||||
}
|
||||
|
||||
// CountBdDaemons returns count of running bd daemons.
|
||||
// Uses pgrep instead of "bd daemon list" to avoid triggering daemon auto-start
|
||||
// during shutdown verification.
|
||||
func CountBdDaemons() int {
|
||||
// Use pgrep -f with wc -l for cross-platform compatibility
|
||||
// (macOS pgrep doesn't support -c flag)
|
||||
cmd := exec.Command("sh", "-c", "pgrep -f 'bd daemon' 2>/dev/null | wc -l")
|
||||
output, err := cmd.Output()
|
||||
if err != nil {
|
||||
return 0
|
||||
}
|
||||
count, _ := strconv.Atoi(strings.TrimSpace(string(output)))
|
||||
return count
|
||||
}
|
||||
|
||||
|
||||
func stopBdDaemons(force bool) (int, int) {
|
||||
before := CountBdDaemons()
|
||||
if before == 0 {
|
||||
return 0, 0
|
||||
}
|
||||
|
||||
// Use pkill directly instead of "bd daemon killall" to avoid triggering
|
||||
// daemon auto-start as a side effect of running bd commands.
|
||||
// Note: pkill -f pattern may match unintended processes in rare cases
|
||||
// (e.g., editors with "bd daemon" in file content). This is acceptable
|
||||
// given the alternative of respawning daemons during shutdown.
|
||||
if force {
|
||||
_ = exec.Command("pkill", "-9", "-f", "bd daemon").Run()
|
||||
} else {
|
||||
_ = exec.Command("pkill", "-TERM", "-f", "bd daemon").Run()
|
||||
time.Sleep(gracefulTimeout)
|
||||
if remaining := CountBdDaemons(); remaining > 0 {
|
||||
_ = exec.Command("pkill", "-9", "-f", "bd daemon").Run()
|
||||
}
|
||||
}
|
||||
|
||||
time.Sleep(100 * time.Millisecond)
|
||||
|
||||
final := CountBdDaemons()
|
||||
killed := before - final
|
||||
if killed < 0 {
|
||||
killed = 0 // Race condition: more processes spawned than we killed
|
||||
}
|
||||
return killed, final
|
||||
}
|
||||
|
||||
// CountBdActivityProcesses returns count of running `bd activity` processes.
|
||||
func CountBdActivityProcesses() int {
|
||||
// Use pgrep -f with wc -l for cross-platform compatibility
|
||||
// (macOS pgrep doesn't support -c flag)
|
||||
cmd := exec.Command("sh", "-c", "pgrep -f 'bd activity' 2>/dev/null | wc -l")
|
||||
output, err := cmd.Output()
|
||||
if err != nil {
|
||||
return 0
|
||||
}
|
||||
count, _ := strconv.Atoi(strings.TrimSpace(string(output)))
|
||||
return count
|
||||
}
|
||||
|
||||
func stopBdActivityProcesses(force bool) (int, int) {
|
||||
before := CountBdActivityProcesses()
|
||||
if before == 0 {
|
||||
return 0, 0
|
||||
}
|
||||
|
||||
if force {
|
||||
_ = exec.Command("pkill", "-9", "-f", "bd activity").Run()
|
||||
} else {
|
||||
_ = exec.Command("pkill", "-TERM", "-f", "bd activity").Run()
|
||||
time.Sleep(gracefulTimeout)
|
||||
if remaining := CountBdActivityProcesses(); remaining > 0 {
|
||||
_ = exec.Command("pkill", "-9", "-f", "bd activity").Run()
|
||||
}
|
||||
}
|
||||
|
||||
time.Sleep(100 * time.Millisecond)
|
||||
|
||||
after := CountBdActivityProcesses()
|
||||
killed := before - after
|
||||
if killed < 0 {
|
||||
killed = 0 // Race condition: more processes spawned than we killed
|
||||
}
|
||||
return killed, after
|
||||
}
|
||||
@@ -1,33 +0,0 @@
|
||||
package beads
|
||||
|
||||
import (
|
||||
"os/exec"
|
||||
"testing"
|
||||
)
|
||||
|
||||
func TestCountBdActivityProcesses(t *testing.T) {
|
||||
count := CountBdActivityProcesses()
|
||||
if count < 0 {
|
||||
t.Errorf("count should be non-negative, got %d", count)
|
||||
}
|
||||
}
|
||||
|
||||
func TestCountBdDaemons(t *testing.T) {
|
||||
if _, err := exec.LookPath("bd"); err != nil {
|
||||
t.Skip("bd not installed")
|
||||
}
|
||||
count := CountBdDaemons()
|
||||
if count < 0 {
|
||||
t.Errorf("count should be non-negative, got %d", count)
|
||||
}
|
||||
}
|
||||
|
||||
func TestStopAllBdProcesses_DryRun(t *testing.T) {
|
||||
daemonsKilled, activityKilled, err := StopAllBdProcesses(true, false)
|
||||
if err != nil {
|
||||
t.Errorf("unexpected error: %v", err)
|
||||
}
|
||||
if daemonsKilled < 0 || activityKilled < 0 {
|
||||
t.Errorf("counts should be non-negative: daemons=%d, activity=%d", daemonsKilled, activityKilled)
|
||||
}
|
||||
}
|
||||
@@ -21,6 +21,7 @@ type AttachmentFields struct {
|
||||
AttachedAt string // ISO 8601 timestamp when attached
|
||||
AttachedArgs string // Natural language args passed via gt sling --args (no-tmux mode)
|
||||
DispatchedBy string // Agent ID that dispatched this work (for completion notification)
|
||||
NoMerge bool // If true, gt done skips merge queue (for upstream PRs/human review)
|
||||
}
|
||||
|
||||
// ParseAttachmentFields extracts attachment fields from an issue's description.
|
||||
@@ -65,6 +66,9 @@ func ParseAttachmentFields(issue *Issue) *AttachmentFields {
|
||||
case "dispatched_by", "dispatched-by", "dispatchedby":
|
||||
fields.DispatchedBy = value
|
||||
hasFields = true
|
||||
case "no_merge", "no-merge", "nomerge":
|
||||
fields.NoMerge = strings.ToLower(value) == "true"
|
||||
hasFields = true
|
||||
}
|
||||
}
|
||||
|
||||
@@ -95,6 +99,9 @@ func FormatAttachmentFields(fields *AttachmentFields) string {
|
||||
if fields.DispatchedBy != "" {
|
||||
lines = append(lines, "dispatched_by: "+fields.DispatchedBy)
|
||||
}
|
||||
if fields.NoMerge {
|
||||
lines = append(lines, "no_merge: true")
|
||||
}
|
||||
|
||||
return strings.Join(lines, "\n")
|
||||
}
|
||||
@@ -117,6 +124,9 @@ func SetAttachmentFields(issue *Issue, fields *AttachmentFields) string {
|
||||
"dispatched_by": true,
|
||||
"dispatched-by": true,
|
||||
"dispatchedby": true,
|
||||
"no_merge": true,
|
||||
"no-merge": true,
|
||||
"nomerge": true,
|
||||
}
|
||||
|
||||
// Collect non-attachment lines from existing description
|
||||
|
||||
@@ -12,16 +12,10 @@ import (
|
||||
"time"
|
||||
|
||||
"github.com/steveyegge/gastown/internal/config"
|
||||
"github.com/steveyegge/gastown/internal/session"
|
||||
"github.com/steveyegge/gastown/internal/tmux"
|
||||
)
|
||||
|
||||
// SessionName is the tmux session name for Boot.
|
||||
// Note: We use "gt-boot" instead of "hq-deacon-boot" to avoid tmux prefix
|
||||
// matching collisions. Tmux matches session names by prefix, so "hq-deacon-boot"
|
||||
// would match when checking for "hq-deacon", causing HasSession("hq-deacon")
|
||||
// to return true when only Boot is running.
|
||||
const SessionName = "gt-boot"
|
||||
|
||||
// MarkerFileName is the lock file for Boot startup coordination.
|
||||
const MarkerFileName = ".boot-running"
|
||||
|
||||
@@ -81,7 +75,7 @@ func (b *Boot) IsRunning() bool {
|
||||
|
||||
// IsSessionAlive checks if the Boot tmux session exists.
|
||||
func (b *Boot) IsSessionAlive() bool {
|
||||
has, err := b.tmux.HasSession(SessionName)
|
||||
has, err := b.tmux.HasSession(session.BootSessionName())
|
||||
return err == nil && has
|
||||
}
|
||||
|
||||
@@ -163,7 +157,7 @@ func (b *Boot) spawnTmux(agentOverride string) error {
|
||||
// Kill any stale session first.
|
||||
// Use KillSessionWithProcesses to ensure all descendant processes are killed.
|
||||
if b.IsSessionAlive() {
|
||||
_ = b.tmux.KillSessionWithProcesses(SessionName)
|
||||
_ = b.tmux.KillSessionWithProcesses(session.BootSessionName())
|
||||
}
|
||||
|
||||
// Ensure boot directory exists (it should have CLAUDE.md with Boot context)
|
||||
@@ -171,22 +165,26 @@ func (b *Boot) spawnTmux(agentOverride string) error {
|
||||
return fmt.Errorf("ensuring boot dir: %w", err)
|
||||
}
|
||||
|
||||
// Build startup command with optional agent override
|
||||
// The "gt boot triage" prompt tells Boot to immediately start triage (GUPP principle)
|
||||
initialPrompt := session.BuildStartupPrompt(session.BeaconConfig{
|
||||
Recipient: "boot",
|
||||
Sender: "daemon",
|
||||
Topic: "triage",
|
||||
}, "Run `gt boot triage` now.")
|
||||
|
||||
var startCmd string
|
||||
if agentOverride != "" {
|
||||
var err error
|
||||
startCmd, err = config.BuildAgentStartupCommandWithAgentOverride("boot", "", b.townRoot, "", "gt boot triage", agentOverride)
|
||||
startCmd, err = config.BuildAgentStartupCommandWithAgentOverride("boot", "", b.townRoot, "", initialPrompt, agentOverride)
|
||||
if err != nil {
|
||||
return fmt.Errorf("building startup command with agent override: %w", err)
|
||||
}
|
||||
} else {
|
||||
startCmd = config.BuildAgentStartupCommand("boot", "", b.townRoot, "", "gt boot triage")
|
||||
startCmd = config.BuildAgentStartupCommand("boot", "", b.townRoot, "", initialPrompt)
|
||||
}
|
||||
|
||||
// Create session with command directly to avoid send-keys race condition.
|
||||
// See: https://github.com/anthropics/gastown/issues/280
|
||||
if err := b.tmux.NewSessionWithCommand(SessionName, b.bootDir, startCmd); err != nil {
|
||||
if err := b.tmux.NewSessionWithCommand(session.BootSessionName(), b.bootDir, startCmd); err != nil {
|
||||
return fmt.Errorf("creating boot session: %w", err)
|
||||
}
|
||||
|
||||
@@ -196,7 +194,7 @@ func (b *Boot) spawnTmux(agentOverride string) error {
|
||||
TownRoot: b.townRoot,
|
||||
})
|
||||
for k, v := range envVars {
|
||||
_ = b.tmux.SetEnvironment(SessionName, k, v)
|
||||
_ = b.tmux.SetEnvironment(session.BootSessionName(), k, v)
|
||||
}
|
||||
|
||||
return nil
|
||||
|
||||
@@ -1,4 +1,5 @@
|
||||
{
|
||||
"editorMode": "normal",
|
||||
"enabledPlugins": {
|
||||
"beads@beads-marketplace": false
|
||||
},
|
||||
@@ -64,17 +65,6 @@
|
||||
}
|
||||
]
|
||||
}
|
||||
],
|
||||
"Stop": [
|
||||
{
|
||||
"matcher": "",
|
||||
"hooks": [
|
||||
{
|
||||
"type": "command",
|
||||
"command": "export PATH=\"$HOME/go/bin:$HOME/bin:$PATH\" && gt costs record"
|
||||
}
|
||||
]
|
||||
}
|
||||
]
|
||||
}
|
||||
}
|
||||
|
||||
@@ -1,4 +1,5 @@
|
||||
{
|
||||
"editorMode": "normal",
|
||||
"enabledPlugins": {
|
||||
"beads@beads-marketplace": false
|
||||
},
|
||||
@@ -64,17 +65,6 @@
|
||||
}
|
||||
]
|
||||
}
|
||||
],
|
||||
"Stop": [
|
||||
{
|
||||
"matcher": "",
|
||||
"hooks": [
|
||||
{
|
||||
"type": "command",
|
||||
"command": "export PATH=\"$HOME/go/bin:$HOME/bin:$PATH\" && gt costs record"
|
||||
}
|
||||
]
|
||||
}
|
||||
]
|
||||
}
|
||||
}
|
||||
|
||||
374
internal/cmd/attention.go
Normal file
374
internal/cmd/attention.go
Normal file
@@ -0,0 +1,374 @@
|
||||
package cmd
|
||||
|
||||
import (
|
||||
"bytes"
|
||||
"encoding/json"
|
||||
"fmt"
|
||||
"os"
|
||||
"os/exec"
|
||||
"path/filepath"
|
||||
"sort"
|
||||
"strings"
|
||||
|
||||
"github.com/spf13/cobra"
|
||||
"github.com/steveyegge/gastown/internal/style"
|
||||
"github.com/steveyegge/gastown/internal/workspace"
|
||||
)
|
||||
|
||||
var attentionJSON bool
|
||||
var attentionAll bool
|
||||
|
||||
var attentionCmd = &cobra.Command{
|
||||
Use: "attention",
|
||||
GroupID: GroupWork,
|
||||
Short: "Show items requiring overseer attention",
|
||||
Long: `Show what specifically needs the overseer's attention.
|
||||
|
||||
Groups items into categories:
|
||||
REQUIRES DECISION - Issues needing architectural/design choices
|
||||
REQUIRES REVIEW - PRs and design docs awaiting approval
|
||||
BLOCKED - Items stuck on unresolved dependencies
|
||||
|
||||
Examples:
|
||||
gt attention # Show all attention items
|
||||
gt attention --json # Machine-readable output`,
|
||||
RunE: runAttention,
|
||||
}
|
||||
|
||||
func init() {
|
||||
attentionCmd.Flags().BoolVar(&attentionJSON, "json", false, "Output as JSON")
|
||||
attentionCmd.Flags().BoolVar(&attentionAll, "all", false, "Include lower-priority items")
|
||||
rootCmd.AddCommand(attentionCmd)
|
||||
}
|
||||
|
||||
// AttentionCategory represents a group of items needing attention.
|
||||
type AttentionCategory string
|
||||
|
||||
const (
|
||||
CategoryDecision AttentionCategory = "REQUIRES_DECISION"
|
||||
CategoryReview AttentionCategory = "REQUIRES_REVIEW"
|
||||
CategoryBlocked AttentionCategory = "BLOCKED"
|
||||
CategoryStuck AttentionCategory = "STUCK_WORKERS"
|
||||
)
|
||||
|
||||
// AttentionItem represents something needing overseer attention.
|
||||
type AttentionItem struct {
|
||||
Category AttentionCategory `json:"category"`
|
||||
Priority int `json:"priority"`
|
||||
ID string `json:"id"`
|
||||
Title string `json:"title"`
|
||||
Context string `json:"context,omitempty"`
|
||||
DrillDown string `json:"drill_down"`
|
||||
Source string `json:"source,omitempty"` // "beads", "github", "agent"
|
||||
Details string `json:"details,omitempty"`
|
||||
}
|
||||
|
||||
// AttentionOutput is the full attention report.
|
||||
type AttentionOutput struct {
|
||||
Decisions []AttentionItem `json:"decisions,omitempty"`
|
||||
Reviews []AttentionItem `json:"reviews,omitempty"`
|
||||
Blocked []AttentionItem `json:"blocked,omitempty"`
|
||||
StuckWorkers []AttentionItem `json:"stuck_workers,omitempty"`
|
||||
}
|
||||
|
||||
func runAttention(cmd *cobra.Command, args []string) error {
|
||||
townRoot, err := workspace.FindFromCwdOrError()
|
||||
if err != nil {
|
||||
return fmt.Errorf("not in a Gas Town workspace: %w", err)
|
||||
}
|
||||
|
||||
output := AttentionOutput{}
|
||||
|
||||
// Collect items from various sources in parallel
|
||||
// 1. Blocked beads
|
||||
output.Blocked = collectBlockedItems(townRoot)
|
||||
|
||||
// 2. Items needing decision (issues with needs-decision label)
|
||||
output.Decisions = collectDecisionItems(townRoot)
|
||||
|
||||
// 3. PRs awaiting review
|
||||
output.Reviews = collectReviewItems(townRoot)
|
||||
|
||||
// 4. Stuck workers (agents marked as stuck)
|
||||
output.StuckWorkers = collectStuckWorkers(townRoot)
|
||||
|
||||
// Sort each category by priority
|
||||
sortByPriority := func(items []AttentionItem) {
|
||||
sort.Slice(items, func(i, j int) bool {
|
||||
return items[i].Priority < items[j].Priority // Lower priority number = higher importance
|
||||
})
|
||||
}
|
||||
sortByPriority(output.Decisions)
|
||||
sortByPriority(output.Reviews)
|
||||
sortByPriority(output.Blocked)
|
||||
sortByPriority(output.StuckWorkers)
|
||||
|
||||
if attentionJSON {
|
||||
enc := json.NewEncoder(os.Stdout)
|
||||
enc.SetIndent("", " ")
|
||||
return enc.Encode(output)
|
||||
}
|
||||
|
||||
return outputAttentionText(output)
|
||||
}
|
||||
|
||||
func collectBlockedItems(townRoot string) []AttentionItem {
|
||||
var items []AttentionItem
|
||||
|
||||
// Query blocked issues from beads
|
||||
blockedCmd := exec.Command("bd", "blocked", "--json")
|
||||
var stdout bytes.Buffer
|
||||
blockedCmd.Stdout = &stdout
|
||||
|
||||
if err := blockedCmd.Run(); err != nil {
|
||||
return items
|
||||
}
|
||||
|
||||
var blocked []struct {
|
||||
ID string `json:"id"`
|
||||
Title string `json:"title"`
|
||||
Priority int `json:"priority"`
|
||||
BlockedBy []string `json:"blocked_by,omitempty"`
|
||||
}
|
||||
if err := json.Unmarshal(stdout.Bytes(), &blocked); err != nil {
|
||||
return items
|
||||
}
|
||||
|
||||
for _, b := range blocked {
|
||||
// Skip ephemeral/internal issues
|
||||
if strings.Contains(b.ID, "wisp") || strings.Contains(b.ID, "-mol-") {
|
||||
continue
|
||||
}
|
||||
if strings.Contains(b.ID, "-agent-") {
|
||||
continue
|
||||
}
|
||||
|
||||
context := ""
|
||||
if len(b.BlockedBy) > 0 {
|
||||
context = fmt.Sprintf("Blocked by: %s", strings.Join(b.BlockedBy, ", "))
|
||||
}
|
||||
|
||||
items = append(items, AttentionItem{
|
||||
Category: CategoryBlocked,
|
||||
Priority: b.Priority,
|
||||
ID: b.ID,
|
||||
Title: b.Title,
|
||||
Context: context,
|
||||
DrillDown: fmt.Sprintf("bd show %s", b.ID),
|
||||
Source: "beads",
|
||||
})
|
||||
}
|
||||
|
||||
return items
|
||||
}
|
||||
|
||||
func collectDecisionItems(townRoot string) []AttentionItem {
|
||||
var items []AttentionItem
|
||||
|
||||
// Query issues with needs-decision label
|
||||
listCmd := exec.Command("bd", "list", "--label=needs-decision", "--status=open", "--json")
|
||||
var stdout bytes.Buffer
|
||||
listCmd.Stdout = &stdout
|
||||
|
||||
if err := listCmd.Run(); err != nil {
|
||||
return items
|
||||
}
|
||||
|
||||
var issues []struct {
|
||||
ID string `json:"id"`
|
||||
Title string `json:"title"`
|
||||
Priority int `json:"priority"`
|
||||
}
|
||||
if err := json.Unmarshal(stdout.Bytes(), &issues); err != nil {
|
||||
return items
|
||||
}
|
||||
|
||||
for _, issue := range issues {
|
||||
items = append(items, AttentionItem{
|
||||
Category: CategoryDecision,
|
||||
Priority: issue.Priority,
|
||||
ID: issue.ID,
|
||||
Title: issue.Title,
|
||||
Context: "Needs architectural/design decision",
|
||||
DrillDown: fmt.Sprintf("bd show %s", issue.ID),
|
||||
Source: "beads",
|
||||
})
|
||||
}
|
||||
|
||||
return items
|
||||
}
|
||||
|
||||
func collectReviewItems(townRoot string) []AttentionItem {
|
||||
var items []AttentionItem
|
||||
|
||||
// Query open PRs from GitHub
|
||||
prCmd := exec.Command("gh", "pr", "list", "--json", "number,title,headRefName,reviewDecision,additions,deletions")
|
||||
var stdout bytes.Buffer
|
||||
prCmd.Stdout = &stdout
|
||||
|
||||
if err := prCmd.Run(); err != nil {
|
||||
// gh not available or not in a git repo - skip
|
||||
return items
|
||||
}
|
||||
|
||||
var prs []struct {
|
||||
Number int `json:"number"`
|
||||
Title string `json:"title"`
|
||||
HeadRefName string `json:"headRefName"`
|
||||
ReviewDecision string `json:"reviewDecision"`
|
||||
Additions int `json:"additions"`
|
||||
Deletions int `json:"deletions"`
|
||||
}
|
||||
if err := json.Unmarshal(stdout.Bytes(), &prs); err != nil {
|
||||
return items
|
||||
}
|
||||
|
||||
for _, pr := range prs {
|
||||
// Skip PRs that are already approved
|
||||
if pr.ReviewDecision == "APPROVED" {
|
||||
continue
|
||||
}
|
||||
|
||||
details := fmt.Sprintf("+%d/-%d lines", pr.Additions, pr.Deletions)
|
||||
|
||||
items = append(items, AttentionItem{
|
||||
Category: CategoryReview,
|
||||
Priority: 2, // Default P2 for PRs
|
||||
ID: fmt.Sprintf("PR #%d", pr.Number),
|
||||
Title: pr.Title,
|
||||
Context: fmt.Sprintf("Branch: %s", pr.HeadRefName),
|
||||
DrillDown: fmt.Sprintf("gh pr view %d", pr.Number),
|
||||
Source: "github",
|
||||
Details: details,
|
||||
})
|
||||
}
|
||||
|
||||
return items
|
||||
}
|
||||
|
||||
func collectStuckWorkers(townRoot string) []AttentionItem {
|
||||
var items []AttentionItem
|
||||
|
||||
// Query agent beads with stuck state
|
||||
// Check each rig's beads for stuck agents
|
||||
rigDirs, _ := filepath.Glob(filepath.Join(townRoot, "*", "mayor", "rig", ".beads"))
|
||||
for _, rigBeads := range rigDirs {
|
||||
rigItems := queryStuckAgents(rigBeads)
|
||||
items = append(items, rigItems...)
|
||||
}
|
||||
|
||||
return items
|
||||
}
|
||||
|
||||
func queryStuckAgents(beadsPath string) []AttentionItem {
|
||||
var items []AttentionItem
|
||||
|
||||
// Query agents with stuck state
|
||||
dbPath := filepath.Join(beadsPath, "beads.db")
|
||||
if _, err := os.Stat(dbPath); err != nil {
|
||||
return items
|
||||
}
|
||||
|
||||
// Query for agent beads with agent_state = 'stuck'
|
||||
query := `SELECT id, title, agent_state FROM issues WHERE issue_type = 'agent' AND agent_state = 'stuck'`
|
||||
queryCmd := exec.Command("sqlite3", "-json", dbPath, query)
|
||||
var stdout bytes.Buffer
|
||||
queryCmd.Stdout = &stdout
|
||||
|
||||
if err := queryCmd.Run(); err != nil {
|
||||
return items
|
||||
}
|
||||
|
||||
var agents []struct {
|
||||
ID string `json:"id"`
|
||||
Title string `json:"title"`
|
||||
AgentState string `json:"agent_state"`
|
||||
}
|
||||
if err := json.Unmarshal(stdout.Bytes(), &agents); err != nil {
|
||||
return items
|
||||
}
|
||||
|
||||
for _, agent := range agents {
|
||||
// Extract agent name from ID (e.g., "gt-gastown-polecat-goose" -> "goose")
|
||||
parts := strings.Split(agent.ID, "-")
|
||||
name := parts[len(parts)-1]
|
||||
|
||||
items = append(items, AttentionItem{
|
||||
Category: CategoryStuck,
|
||||
Priority: 1, // Stuck workers are high priority
|
||||
ID: agent.ID,
|
||||
Title: fmt.Sprintf("Worker %s is stuck", name),
|
||||
Context: "Agent escalated - needs help",
|
||||
DrillDown: fmt.Sprintf("bd show %s", agent.ID),
|
||||
Source: "agent",
|
||||
})
|
||||
}
|
||||
|
||||
return items
|
||||
}
|
||||
|
||||
func outputAttentionText(output AttentionOutput) error {
|
||||
hasContent := false
|
||||
|
||||
// Decisions
|
||||
if len(output.Decisions) > 0 {
|
||||
hasContent = true
|
||||
fmt.Printf("%s (%d items)\n", style.Bold.Render("REQUIRES DECISION"), len(output.Decisions))
|
||||
for i, item := range output.Decisions {
|
||||
fmt.Printf("%d. [P%d] %s: %s\n", i+1, item.Priority, item.ID, item.Title)
|
||||
if item.Context != "" {
|
||||
fmt.Printf(" %s\n", style.Dim.Render(item.Context))
|
||||
}
|
||||
fmt.Printf(" %s\n\n", style.Dim.Render("→ "+item.DrillDown))
|
||||
}
|
||||
}
|
||||
|
||||
// Reviews
|
||||
if len(output.Reviews) > 0 {
|
||||
hasContent = true
|
||||
fmt.Printf("%s (%d items)\n", style.Bold.Render("REQUIRES REVIEW"), len(output.Reviews))
|
||||
for i, item := range output.Reviews {
|
||||
fmt.Printf("%d. [P%d] %s: %s\n", i+1, item.Priority, item.ID, item.Title)
|
||||
if item.Details != "" {
|
||||
fmt.Printf(" %s\n", style.Dim.Render(item.Details))
|
||||
}
|
||||
if item.Context != "" {
|
||||
fmt.Printf(" %s\n", style.Dim.Render(item.Context))
|
||||
}
|
||||
fmt.Printf(" %s\n\n", style.Dim.Render("→ "+item.DrillDown))
|
||||
}
|
||||
}
|
||||
|
||||
// Stuck Workers
|
||||
if len(output.StuckWorkers) > 0 {
|
||||
hasContent = true
|
||||
fmt.Printf("%s (%d items)\n", style.Bold.Render("STUCK WORKERS"), len(output.StuckWorkers))
|
||||
for i, item := range output.StuckWorkers {
|
||||
fmt.Printf("%d. %s\n", i+1, item.Title)
|
||||
if item.Context != "" {
|
||||
fmt.Printf(" %s\n", style.Dim.Render(item.Context))
|
||||
}
|
||||
fmt.Printf(" %s\n\n", style.Dim.Render("→ "+item.DrillDown))
|
||||
}
|
||||
}
|
||||
|
||||
// Blocked
|
||||
if len(output.Blocked) > 0 {
|
||||
hasContent = true
|
||||
fmt.Printf("%s (%d items)\n", style.Bold.Render("BLOCKED"), len(output.Blocked))
|
||||
for i, item := range output.Blocked {
|
||||
fmt.Printf("%d. [P%d] %s: %s\n", i+1, item.Priority, item.ID, item.Title)
|
||||
if item.Context != "" {
|
||||
fmt.Printf(" %s\n", style.Dim.Render(item.Context))
|
||||
}
|
||||
fmt.Printf(" %s\n\n", style.Dim.Render("→ "+item.DrillDown))
|
||||
}
|
||||
}
|
||||
|
||||
if !hasContent {
|
||||
fmt.Println("No items requiring attention.")
|
||||
fmt.Println(style.Dim.Render("All clear - nothing blocked, no pending reviews."))
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
@@ -3,13 +3,18 @@ package cmd
|
||||
|
||||
import (
|
||||
"context"
|
||||
"encoding/json"
|
||||
"fmt"
|
||||
"os"
|
||||
"os/exec"
|
||||
"path/filepath"
|
||||
"regexp"
|
||||
"strconv"
|
||||
"strings"
|
||||
"sync"
|
||||
"time"
|
||||
|
||||
"github.com/steveyegge/gastown/internal/state"
|
||||
)
|
||||
|
||||
// MinBeadsVersion is the minimum required beads version for Gas Town.
|
||||
@@ -90,8 +95,60 @@ func (v beadsVersion) compare(other beadsVersion) int {
|
||||
// Pre-compiled regex for beads version parsing
|
||||
var beadsVersionRe = regexp.MustCompile(`bd version (\d+\.\d+(?:\.\d+)?(?:-\w+)?)`)
|
||||
|
||||
// versionCacheTTL is how long a cached version check remains valid.
|
||||
// 24 hours is reasonable since version upgrades are infrequent.
|
||||
const versionCacheTTL = 24 * time.Hour
|
||||
|
||||
// versionCache stores the result of a beads version check.
|
||||
type versionCache struct {
|
||||
Version string `json:"version"`
|
||||
CheckedAt time.Time `json:"checked_at"`
|
||||
Valid bool `json:"valid"` // true if version meets minimum requirement
|
||||
}
|
||||
|
||||
// versionCachePath returns the path to the version cache file.
|
||||
func versionCachePath() string {
|
||||
return filepath.Join(state.CacheDir(), "beads-version.json")
|
||||
}
|
||||
|
||||
// loadVersionCache reads the cached version check result.
|
||||
func loadVersionCache() (*versionCache, error) {
|
||||
data, err := os.ReadFile(versionCachePath())
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
var cache versionCache
|
||||
if err := json.Unmarshal(data, &cache); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return &cache, nil
|
||||
}
|
||||
|
||||
// saveVersionCache writes the version check result to cache.
|
||||
func saveVersionCache(c *versionCache) error {
|
||||
dir := state.CacheDir()
|
||||
if err := os.MkdirAll(dir, 0755); err != nil {
|
||||
return err
|
||||
}
|
||||
data, err := json.MarshalIndent(c, "", " ")
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
// Atomic write via temp file
|
||||
tmp := versionCachePath() + ".tmp"
|
||||
if err := os.WriteFile(tmp, data, 0600); err != nil {
|
||||
return err
|
||||
}
|
||||
return os.Rename(tmp, versionCachePath())
|
||||
}
|
||||
|
||||
// isCacheFresh returns true if the cache is within the TTL.
|
||||
func (c *versionCache) isCacheFresh() bool {
|
||||
return time.Since(c.CheckedAt) < versionCacheTTL
|
||||
}
|
||||
|
||||
func getBeadsVersion() (string, error) {
|
||||
ctx, cancel := context.WithTimeout(context.Background(), 2*time.Second)
|
||||
ctx, cancel := context.WithTimeout(context.Background(), 5*time.Second)
|
||||
defer cancel()
|
||||
|
||||
cmd := exec.CommandContext(ctx, "bd", "version")
|
||||
@@ -132,8 +189,27 @@ func CheckBeadsVersion() error {
|
||||
}
|
||||
|
||||
func checkBeadsVersionInternal() error {
|
||||
// Try to use cached result first to avoid subprocess spawning
|
||||
if cache, err := loadVersionCache(); err == nil && cache.isCacheFresh() {
|
||||
if cache.Valid {
|
||||
return nil // Cached successful check
|
||||
}
|
||||
// Cached failure - still need to check (version might have been upgraded)
|
||||
}
|
||||
|
||||
installedStr, err := getBeadsVersion()
|
||||
if err != nil {
|
||||
// On timeout, try to use stale cache or gracefully degrade
|
||||
if strings.Contains(err.Error(), "timed out") {
|
||||
if cache, cacheErr := loadVersionCache(); cacheErr == nil && cache.Valid {
|
||||
// Use stale cache but warn
|
||||
fmt.Fprintf(os.Stderr, "Warning: bd version check timed out, using cached result (v%s)\n", cache.Version)
|
||||
return nil
|
||||
}
|
||||
// No cache available - gracefully degrade with warning
|
||||
fmt.Fprintf(os.Stderr, "Warning: bd version check timed out (high system load?), proceeding anyway\n")
|
||||
return nil
|
||||
}
|
||||
return fmt.Errorf("cannot verify beads version: %w", err)
|
||||
}
|
||||
|
||||
@@ -148,7 +224,16 @@ func checkBeadsVersionInternal() error {
|
||||
return fmt.Errorf("cannot parse required beads version %q: %w", MinBeadsVersion, err)
|
||||
}
|
||||
|
||||
if installed.compare(required) < 0 {
|
||||
valid := installed.compare(required) >= 0
|
||||
|
||||
// Cache the result
|
||||
_ = saveVersionCache(&versionCache{
|
||||
Version: installedStr,
|
||||
CheckedAt: time.Now(),
|
||||
Valid: valid,
|
||||
})
|
||||
|
||||
if !valid {
|
||||
return fmt.Errorf("beads version %s is required, but %s is installed\n\nPlease upgrade beads: go install github.com/steveyegge/beads/cmd/bd@latest", MinBeadsVersion, installedStr)
|
||||
}
|
||||
|
||||
|
||||
@@ -9,6 +9,7 @@ import (
|
||||
"github.com/spf13/cobra"
|
||||
"github.com/steveyegge/gastown/internal/boot"
|
||||
"github.com/steveyegge/gastown/internal/deacon"
|
||||
"github.com/steveyegge/gastown/internal/session"
|
||||
"github.com/steveyegge/gastown/internal/style"
|
||||
"github.com/steveyegge/gastown/internal/workspace"
|
||||
)
|
||||
@@ -141,7 +142,7 @@ func runBootStatus(cmd *cobra.Command, args []string) error {
|
||||
}
|
||||
|
||||
if sessionAlive {
|
||||
fmt.Printf(" Session: %s (alive)\n", boot.SessionName)
|
||||
fmt.Printf(" Session: %s (alive)\n", session.BootSessionName())
|
||||
} else {
|
||||
fmt.Printf(" Session: %s\n", style.Dim.Render("not running"))
|
||||
}
|
||||
@@ -219,7 +220,7 @@ func runBootSpawn(cmd *cobra.Command, args []string) error {
|
||||
if b.IsDegraded() {
|
||||
fmt.Println("Boot spawned in degraded mode (subprocess)")
|
||||
} else {
|
||||
fmt.Printf("Boot spawned in session: %s\n", boot.SessionName)
|
||||
fmt.Printf("Boot spawned in session: %s\n", session.BootSessionName())
|
||||
}
|
||||
|
||||
return nil
|
||||
|
||||
127
internal/cmd/cleanup.go
Normal file
127
internal/cmd/cleanup.go
Normal file
@@ -0,0 +1,127 @@
|
||||
package cmd
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
|
||||
"github.com/spf13/cobra"
|
||||
"github.com/steveyegge/gastown/internal/style"
|
||||
"github.com/steveyegge/gastown/internal/util"
|
||||
)
|
||||
|
||||
var (
|
||||
cleanupDryRun bool
|
||||
cleanupForce bool
|
||||
)
|
||||
|
||||
var cleanupCmd = &cobra.Command{
|
||||
Use: "cleanup",
|
||||
GroupID: GroupWork,
|
||||
Short: "Clean up orphaned Claude processes",
|
||||
Long: `Clean up orphaned Claude processes that survived session termination.
|
||||
|
||||
This command finds and kills Claude processes that are not associated with
|
||||
any active Gas Town tmux session. These orphans can accumulate when:
|
||||
- Polecat sessions are killed without proper cleanup
|
||||
- Claude spawns subagent processes that outlive their parent
|
||||
- Network or system issues interrupt normal shutdown
|
||||
|
||||
Uses aggressive tmux session verification to detect ALL orphaned processes,
|
||||
not just those with PPID=1.
|
||||
|
||||
Examples:
|
||||
gt cleanup # Clean up orphans with confirmation
|
||||
gt cleanup --dry-run # Show what would be killed
|
||||
gt cleanup --force # Kill without confirmation`,
|
||||
RunE: runCleanup,
|
||||
}
|
||||
|
||||
func init() {
|
||||
cleanupCmd.Flags().BoolVar(&cleanupDryRun, "dry-run", false, "Show what would be killed without killing")
|
||||
cleanupCmd.Flags().BoolVarP(&cleanupForce, "force", "f", false, "Kill without confirmation")
|
||||
|
||||
rootCmd.AddCommand(cleanupCmd)
|
||||
}
|
||||
|
||||
func runCleanup(cmd *cobra.Command, args []string) error {
|
||||
// Find orphaned processes using aggressive zombie detection
|
||||
zombies, err := util.FindZombieClaudeProcesses()
|
||||
if err != nil {
|
||||
return fmt.Errorf("finding orphaned processes: %w", err)
|
||||
}
|
||||
|
||||
if len(zombies) == 0 {
|
||||
fmt.Printf("%s No orphaned Claude processes found\n", style.Bold.Render("✓"))
|
||||
return nil
|
||||
}
|
||||
|
||||
// Show what we found
|
||||
fmt.Printf("%s Found %d orphaned Claude process(es):\n\n", style.Warning.Render("⚠"), len(zombies))
|
||||
for _, z := range zombies {
|
||||
ageStr := formatProcessAgeCleanup(z.Age)
|
||||
fmt.Printf(" %s %s (age: %s, tty: %s)\n",
|
||||
style.Bold.Render(fmt.Sprintf("PID %d", z.PID)),
|
||||
z.Cmd,
|
||||
style.Dim.Render(ageStr),
|
||||
z.TTY)
|
||||
}
|
||||
fmt.Println()
|
||||
|
||||
if cleanupDryRun {
|
||||
fmt.Printf("%s Dry run - no processes killed\n", style.Dim.Render("ℹ"))
|
||||
return nil
|
||||
}
|
||||
|
||||
// Confirm unless --force
|
||||
if !cleanupForce {
|
||||
fmt.Printf("Kill these %d process(es)? [y/N] ", len(zombies))
|
||||
var response string
|
||||
_, _ = fmt.Scanln(&response)
|
||||
if response != "y" && response != "Y" && response != "yes" && response != "Yes" {
|
||||
fmt.Println("Aborted")
|
||||
return nil
|
||||
}
|
||||
}
|
||||
|
||||
// Kill the processes using the standard cleanup function
|
||||
results, err := util.CleanupZombieClaudeProcesses()
|
||||
if err != nil {
|
||||
return fmt.Errorf("cleaning up processes: %w", err)
|
||||
}
|
||||
|
||||
// Report results
|
||||
var killed, escalated int
|
||||
for _, r := range results {
|
||||
switch r.Signal {
|
||||
case "SIGTERM":
|
||||
fmt.Printf(" %s PID %d sent SIGTERM\n", style.Success.Render("✓"), r.Process.PID)
|
||||
killed++
|
||||
case "SIGKILL":
|
||||
fmt.Printf(" %s PID %d sent SIGKILL (didn't respond to SIGTERM)\n", style.Warning.Render("⚠"), r.Process.PID)
|
||||
killed++
|
||||
case "UNKILLABLE":
|
||||
fmt.Printf(" %s PID %d survived SIGKILL\n", style.Error.Render("✗"), r.Process.PID)
|
||||
escalated++
|
||||
}
|
||||
}
|
||||
|
||||
fmt.Printf("\n%s Cleaned up %d process(es)", style.Bold.Render("✓"), killed)
|
||||
if escalated > 0 {
|
||||
fmt.Printf(", %d unkillable", escalated)
|
||||
}
|
||||
fmt.Println()
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
// formatProcessAgeCleanup formats seconds into a human-readable age string
|
||||
func formatProcessAgeCleanup(seconds int) string {
|
||||
if seconds < 60 {
|
||||
return fmt.Sprintf("%ds", seconds)
|
||||
}
|
||||
if seconds < 3600 {
|
||||
return fmt.Sprintf("%dm%ds", seconds/60, seconds%60)
|
||||
}
|
||||
hours := seconds / 3600
|
||||
mins := (seconds % 3600) / 60
|
||||
return fmt.Sprintf("%dh%dm", hours, mins)
|
||||
}
|
||||
@@ -9,6 +9,7 @@ import (
|
||||
"os"
|
||||
"os/exec"
|
||||
"path/filepath"
|
||||
"sort"
|
||||
"strconv"
|
||||
"strings"
|
||||
"sync"
|
||||
@@ -69,11 +70,15 @@ var (
|
||||
convoyListStatus string
|
||||
convoyListAll bool
|
||||
convoyListTree bool
|
||||
convoyListOrphans bool
|
||||
convoyListEpic string
|
||||
convoyListByEpic bool
|
||||
convoyInteractive bool
|
||||
convoyStrandedJSON bool
|
||||
convoyCloseReason string
|
||||
convoyCloseNotify string
|
||||
convoyCheckDryRun bool
|
||||
convoyEpic string // --epic: link convoy to parent epic (Goals layer)
|
||||
)
|
||||
|
||||
var convoyCmd = &cobra.Command{
|
||||
@@ -159,6 +164,9 @@ Examples:
|
||||
gt convoy list --all # All convoys (open + closed)
|
||||
gt convoy list --status=closed # Recently landed
|
||||
gt convoy list --tree # Show convoy + child status tree
|
||||
gt convoy list --orphans # Convoys with no parent epic
|
||||
gt convoy list --epic gt-abc # Convoys linked to specific epic
|
||||
gt convoy list --by-epic # Group convoys by parent epic
|
||||
gt convoy list --json`,
|
||||
RunE: runConvoyList,
|
||||
}
|
||||
@@ -253,6 +261,9 @@ func init() {
|
||||
convoyListCmd.Flags().StringVar(&convoyListStatus, "status", "", "Filter by status (open, closed)")
|
||||
convoyListCmd.Flags().BoolVar(&convoyListAll, "all", false, "Show all convoys (open and closed)")
|
||||
convoyListCmd.Flags().BoolVar(&convoyListTree, "tree", false, "Show convoy + child status tree")
|
||||
convoyListCmd.Flags().BoolVar(&convoyListOrphans, "orphans", false, "Show only orphan convoys (no parent epic)")
|
||||
convoyListCmd.Flags().StringVar(&convoyListEpic, "epic", "", "Show convoys for a specific epic")
|
||||
convoyListCmd.Flags().BoolVar(&convoyListByEpic, "by-epic", false, "Group convoys by parent epic")
|
||||
|
||||
// Interactive TUI flag (on parent command)
|
||||
convoyCmd.Flags().BoolVarP(&convoyInteractive, "interactive", "i", false, "Interactive tree view")
|
||||
@@ -309,6 +320,12 @@ func runConvoyCreate(cmd *cobra.Command, args []string) error {
|
||||
return err
|
||||
}
|
||||
|
||||
// Ensure custom types (including 'convoy') are registered in town beads.
|
||||
// This handles cases where install didn't complete or beads was initialized manually.
|
||||
if err := beads.EnsureCustomTypes(townBeads); err != nil {
|
||||
return fmt.Errorf("ensuring custom types: %w", err)
|
||||
}
|
||||
|
||||
// Create convoy issue in town beads
|
||||
description := fmt.Sprintf("Convoy tracking %d issues", len(trackedIssues))
|
||||
|
||||
@@ -1163,6 +1180,16 @@ func showAllConvoyStatus(townBeads string) error {
|
||||
return nil
|
||||
}
|
||||
|
||||
// convoyListItem holds convoy info for list display.
|
||||
type convoyListItem struct {
|
||||
ID string `json:"id"`
|
||||
Title string `json:"title"`
|
||||
Status string `json:"status"`
|
||||
CreatedAt string `json:"created_at"`
|
||||
ParentEpic string `json:"parent_epic,omitempty"`
|
||||
Description string `json:"description,omitempty"`
|
||||
}
|
||||
|
||||
func runConvoyList(cmd *cobra.Command, args []string) error {
|
||||
townBeads, err := getTownBeadsDir()
|
||||
if err != nil {
|
||||
@@ -1187,16 +1214,59 @@ func runConvoyList(cmd *cobra.Command, args []string) error {
|
||||
return fmt.Errorf("listing convoys: %w", err)
|
||||
}
|
||||
|
||||
var convoys []struct {
|
||||
ID string `json:"id"`
|
||||
Title string `json:"title"`
|
||||
Status string `json:"status"`
|
||||
CreatedAt string `json:"created_at"`
|
||||
var rawConvoys []struct {
|
||||
ID string `json:"id"`
|
||||
Title string `json:"title"`
|
||||
Status string `json:"status"`
|
||||
CreatedAt string `json:"created_at"`
|
||||
Description string `json:"description"`
|
||||
}
|
||||
if err := json.Unmarshal(stdout.Bytes(), &convoys); err != nil {
|
||||
if err := json.Unmarshal(stdout.Bytes(), &rawConvoys); err != nil {
|
||||
return fmt.Errorf("parsing convoy list: %w", err)
|
||||
}
|
||||
|
||||
// Convert to convoyListItem and extract parent_epic from description
|
||||
convoys := make([]convoyListItem, 0, len(rawConvoys))
|
||||
for _, rc := range rawConvoys {
|
||||
item := convoyListItem{
|
||||
ID: rc.ID,
|
||||
Title: rc.Title,
|
||||
Status: rc.Status,
|
||||
CreatedAt: rc.CreatedAt,
|
||||
Description: rc.Description,
|
||||
}
|
||||
// Extract parent_epic from description (format: "Parent-Epic: xxx")
|
||||
for _, line := range strings.Split(rc.Description, "\n") {
|
||||
if strings.HasPrefix(line, "Parent-Epic: ") {
|
||||
item.ParentEpic = strings.TrimPrefix(line, "Parent-Epic: ")
|
||||
break
|
||||
}
|
||||
}
|
||||
convoys = append(convoys, item)
|
||||
}
|
||||
|
||||
// Apply filtering based on new flags
|
||||
if convoyListOrphans {
|
||||
// Filter to only orphan convoys (no parent epic)
|
||||
filtered := make([]convoyListItem, 0)
|
||||
for _, c := range convoys {
|
||||
if c.ParentEpic == "" {
|
||||
filtered = append(filtered, c)
|
||||
}
|
||||
}
|
||||
convoys = filtered
|
||||
} else if convoyListEpic != "" {
|
||||
// Filter to convoys linked to specific epic
|
||||
filtered := make([]convoyListItem, 0)
|
||||
for _, c := range convoys {
|
||||
if c.ParentEpic == convoyListEpic {
|
||||
filtered = append(filtered, c)
|
||||
}
|
||||
}
|
||||
convoys = filtered
|
||||
}
|
||||
|
||||
|
||||
if convoyListJSON {
|
||||
enc := json.NewEncoder(os.Stdout)
|
||||
enc.SetIndent("", " ")
|
||||
@@ -1204,33 +1274,81 @@ func runConvoyList(cmd *cobra.Command, args []string) error {
|
||||
}
|
||||
|
||||
if len(convoys) == 0 {
|
||||
fmt.Println("No convoys found.")
|
||||
if convoyListOrphans {
|
||||
fmt.Println("No orphan convoys found.")
|
||||
} else if convoyListEpic != "" {
|
||||
fmt.Printf("No convoys found for epic %s.\n", convoyListEpic)
|
||||
} else {
|
||||
fmt.Println("No convoys found.")
|
||||
}
|
||||
fmt.Println("Create a convoy with: gt convoy create <name> [issues...]")
|
||||
return nil
|
||||
}
|
||||
|
||||
// Group by epic view
|
||||
if convoyListByEpic {
|
||||
return printConvoysByEpic(townBeads, convoys)
|
||||
}
|
||||
|
||||
// Tree view: show convoys with their child issues
|
||||
if convoyListTree {
|
||||
return printConvoyTree(townBeads, convoys)
|
||||
return printConvoyTreeFromItems(townBeads, convoys)
|
||||
}
|
||||
|
||||
fmt.Printf("%s\n\n", style.Bold.Render("Convoys"))
|
||||
for i, c := range convoys {
|
||||
status := formatConvoyStatus(c.Status)
|
||||
fmt.Printf(" %d. 🚚 %s: %s %s\n", i+1, c.ID, c.Title, status)
|
||||
epicSuffix := ""
|
||||
if c.ParentEpic != "" {
|
||||
epicSuffix = style.Dim.Render(fmt.Sprintf(" [%s]", c.ParentEpic))
|
||||
}
|
||||
fmt.Printf(" %d. 🚚 %s: %s %s%s\n", i+1, c.ID, c.Title, status, epicSuffix)
|
||||
}
|
||||
fmt.Printf("\nUse 'gt convoy status <id>' or 'gt convoy status <n>' for detailed view.\n")
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
// printConvoyTree displays convoys with their child issues in a tree format.
|
||||
func printConvoyTree(townBeads string, convoys []struct {
|
||||
ID string `json:"id"`
|
||||
Title string `json:"title"`
|
||||
Status string `json:"status"`
|
||||
CreatedAt string `json:"created_at"`
|
||||
}) error {
|
||||
// printConvoysByEpic groups and displays convoys by their parent epic.
|
||||
func printConvoysByEpic(townBeads string, convoys []convoyListItem) error {
|
||||
// Group convoys by parent epic
|
||||
byEpic := make(map[string][]convoyListItem)
|
||||
for _, c := range convoys {
|
||||
epic := c.ParentEpic
|
||||
if epic == "" {
|
||||
epic = "(No Epic)"
|
||||
}
|
||||
byEpic[epic] = append(byEpic[epic], c)
|
||||
}
|
||||
|
||||
// Get sorted epic keys (No Epic last)
|
||||
var epics []string
|
||||
for epic := range byEpic {
|
||||
if epic != "(No Epic)" {
|
||||
epics = append(epics, epic)
|
||||
}
|
||||
}
|
||||
sort.Strings(epics)
|
||||
if _, ok := byEpic["(No Epic)"]; ok {
|
||||
epics = append(epics, "(No Epic)")
|
||||
}
|
||||
|
||||
// Print grouped output
|
||||
for _, epic := range epics {
|
||||
convoys := byEpic[epic]
|
||||
fmt.Printf("%s (%d convoys)\n", style.Bold.Render(epic), len(convoys))
|
||||
for _, c := range convoys {
|
||||
status := formatConvoyStatus(c.Status)
|
||||
fmt.Printf(" 🚚 %s: %s %s\n", c.ID, c.Title, status)
|
||||
}
|
||||
fmt.Println()
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
// printConvoyTreeFromItems displays convoys with their child issues in a tree format.
|
||||
func printConvoyTreeFromItems(townBeads string, convoys []convoyListItem) error {
|
||||
for _, c := range convoys {
|
||||
// Get tracked issues for this convoy
|
||||
tracked := getTrackedIssues(townBeads, c.ID)
|
||||
@@ -1249,7 +1367,11 @@ func printConvoyTree(townBeads string, convoys []struct {
|
||||
if total > 0 {
|
||||
progress = fmt.Sprintf(" (%d/%d)", completed, total)
|
||||
}
|
||||
fmt.Printf("🚚 %s: %s%s\n", c.ID, c.Title, progress)
|
||||
epicSuffix := ""
|
||||
if c.ParentEpic != "" {
|
||||
epicSuffix = style.Dim.Render(fmt.Sprintf(" [%s]", c.ParentEpic))
|
||||
}
|
||||
fmt.Printf("🚚 %s: %s%s%s\n", c.ID, c.Title, progress, epicSuffix)
|
||||
|
||||
// Print tracked issues as tree children
|
||||
for i, t := range tracked {
|
||||
@@ -1279,6 +1401,40 @@ func printConvoyTree(townBeads string, convoys []struct {
|
||||
return nil
|
||||
}
|
||||
|
||||
// getEpicTitles fetches titles for the given epic IDs.
|
||||
func getEpicTitles(epicIDs []string) map[string]string {
|
||||
result := make(map[string]string)
|
||||
if len(epicIDs) == 0 {
|
||||
return result
|
||||
}
|
||||
|
||||
// Use bd show to get epic details (handles routing automatically)
|
||||
args := append([]string{"show"}, epicIDs...)
|
||||
args = append(args, "--json")
|
||||
|
||||
showCmd := exec.Command("bd", args...)
|
||||
var stdout bytes.Buffer
|
||||
showCmd.Stdout = &stdout
|
||||
|
||||
if err := showCmd.Run(); err != nil {
|
||||
return result
|
||||
}
|
||||
|
||||
var issues []struct {
|
||||
ID string `json:"id"`
|
||||
Title string `json:"title"`
|
||||
}
|
||||
if err := json.Unmarshal(stdout.Bytes(), &issues); err != nil {
|
||||
return result
|
||||
}
|
||||
|
||||
for _, issue := range issues {
|
||||
result[issue.ID] = issue.Title
|
||||
}
|
||||
|
||||
return result
|
||||
}
|
||||
|
||||
func formatConvoyStatus(status string) string {
|
||||
switch status {
|
||||
case "open":
|
||||
@@ -1292,6 +1448,61 @@ func formatConvoyStatus(status string) string {
|
||||
}
|
||||
}
|
||||
|
||||
// getConvoyParentEpics returns a map from convoy ID to parent epic ID.
|
||||
// Convoys link to epics via child_of dependency type.
|
||||
// Uses a single batched query for efficiency.
|
||||
func getConvoyParentEpics(townBeads string, convoyIDs []string) map[string]string {
|
||||
result := make(map[string]string)
|
||||
if len(convoyIDs) == 0 {
|
||||
return result
|
||||
}
|
||||
|
||||
dbPath := filepath.Join(townBeads, "beads.db")
|
||||
|
||||
// Build IN clause with properly escaped IDs
|
||||
var quotedIDs []string
|
||||
for _, id := range convoyIDs {
|
||||
safeID := strings.ReplaceAll(id, "'", "''")
|
||||
quotedIDs = append(quotedIDs, fmt.Sprintf("'%s'", safeID))
|
||||
}
|
||||
inClause := strings.Join(quotedIDs, ", ")
|
||||
|
||||
// Query child_of dependencies for all convoys at once
|
||||
query := fmt.Sprintf(
|
||||
`SELECT issue_id, depends_on_id FROM dependencies WHERE issue_id IN (%s) AND type = 'child_of'`,
|
||||
inClause)
|
||||
|
||||
queryCmd := exec.Command("sqlite3", "-json", dbPath, query)
|
||||
var stdout bytes.Buffer
|
||||
queryCmd.Stdout = &stdout
|
||||
|
||||
if err := queryCmd.Run(); err != nil {
|
||||
return result
|
||||
}
|
||||
|
||||
var deps []struct {
|
||||
IssueID string `json:"issue_id"`
|
||||
DependsOnID string `json:"depends_on_id"`
|
||||
}
|
||||
if err := json.Unmarshal(stdout.Bytes(), &deps); err != nil {
|
||||
return result
|
||||
}
|
||||
|
||||
for _, dep := range deps {
|
||||
epicID := dep.DependsOnID
|
||||
// Handle external reference format: external:rig:issue-id
|
||||
if strings.HasPrefix(epicID, "external:") {
|
||||
parts := strings.SplitN(epicID, ":", 3)
|
||||
if len(parts) == 3 {
|
||||
epicID = parts[2] // Extract the actual issue ID
|
||||
}
|
||||
}
|
||||
result[dep.IssueID] = epicID
|
||||
}
|
||||
|
||||
return result
|
||||
}
|
||||
|
||||
// trackedIssueInfo holds info about an issue being tracked by a convoy.
|
||||
type trackedIssueInfo struct {
|
||||
ID string `json:"id"`
|
||||
@@ -1304,82 +1515,58 @@ type trackedIssueInfo struct {
|
||||
WorkerAge string `json:"worker_age,omitempty"` // How long worker has been on this issue
|
||||
}
|
||||
|
||||
// getTrackedIssues queries SQLite directly to get issues tracked by a convoy.
|
||||
// This is needed because bd dep list doesn't properly show cross-rig external dependencies.
|
||||
// Uses batched lookup to avoid N+1 subprocess calls.
|
||||
// getTrackedIssues uses bd dep list to get issues tracked by a convoy.
|
||||
// Returns issue details including status, type, and worker info.
|
||||
func getTrackedIssues(townBeads, convoyID string) []trackedIssueInfo {
|
||||
dbPath := filepath.Join(townBeads, "beads.db")
|
||||
|
||||
// Query tracked dependencies from SQLite
|
||||
// Escape single quotes to prevent SQL injection
|
||||
safeConvoyID := strings.ReplaceAll(convoyID, "'", "''")
|
||||
queryCmd := exec.Command("sqlite3", "-json", dbPath,
|
||||
fmt.Sprintf(`SELECT depends_on_id, type FROM dependencies WHERE issue_id = '%s' AND type = 'tracks'`, safeConvoyID))
|
||||
// Use bd dep list to get tracked dependencies
|
||||
// Run from town root (parent of .beads) so bd routes correctly
|
||||
townRoot := filepath.Dir(townBeads)
|
||||
depCmd := exec.Command("bd", "--no-daemon", "dep", "list", convoyID, "--direction=down", "--type=tracks", "--json")
|
||||
depCmd.Dir = townRoot
|
||||
|
||||
var stdout bytes.Buffer
|
||||
queryCmd.Stdout = &stdout
|
||||
if err := queryCmd.Run(); err != nil {
|
||||
depCmd.Stdout = &stdout
|
||||
if err := depCmd.Run(); err != nil {
|
||||
return nil
|
||||
}
|
||||
|
||||
// Parse the JSON output - bd dep list returns full issue details
|
||||
var deps []struct {
|
||||
DependsOnID string `json:"depends_on_id"`
|
||||
Type string `json:"type"`
|
||||
ID string `json:"id"`
|
||||
Title string `json:"title"`
|
||||
Status string `json:"status"`
|
||||
IssueType string `json:"issue_type"`
|
||||
Assignee string `json:"assignee"`
|
||||
DependencyType string `json:"dependency_type"`
|
||||
Labels []string `json:"labels"`
|
||||
}
|
||||
if err := json.Unmarshal(stdout.Bytes(), &deps); err != nil {
|
||||
return nil
|
||||
}
|
||||
|
||||
// First pass: collect all issue IDs (normalized from external refs)
|
||||
issueIDs := make([]string, 0, len(deps))
|
||||
idToDepType := make(map[string]string)
|
||||
// Collect non-closed issue IDs for worker lookup
|
||||
openIssueIDs := make([]string, 0, len(deps))
|
||||
for _, dep := range deps {
|
||||
issueID := dep.DependsOnID
|
||||
|
||||
// Handle external reference format: external:rig:issue-id
|
||||
if strings.HasPrefix(issueID, "external:") {
|
||||
parts := strings.SplitN(issueID, ":", 3)
|
||||
if len(parts) == 3 {
|
||||
issueID = parts[2] // Extract the actual issue ID
|
||||
}
|
||||
}
|
||||
|
||||
issueIDs = append(issueIDs, issueID)
|
||||
idToDepType[issueID] = dep.Type
|
||||
}
|
||||
|
||||
// Single batch call to get all issue details
|
||||
detailsMap := getIssueDetailsBatch(issueIDs)
|
||||
|
||||
// Get workers for these issues (only for non-closed issues)
|
||||
openIssueIDs := make([]string, 0, len(issueIDs))
|
||||
for _, id := range issueIDs {
|
||||
if details, ok := detailsMap[id]; ok && details.Status != "closed" {
|
||||
openIssueIDs = append(openIssueIDs, id)
|
||||
if dep.Status != "closed" {
|
||||
openIssueIDs = append(openIssueIDs, dep.ID)
|
||||
}
|
||||
}
|
||||
workersMap := getWorkersForIssues(openIssueIDs)
|
||||
|
||||
// Second pass: build result using the batch lookup
|
||||
// Build result
|
||||
var tracked []trackedIssueInfo
|
||||
for _, issueID := range issueIDs {
|
||||
for _, dep := range deps {
|
||||
info := trackedIssueInfo{
|
||||
ID: issueID,
|
||||
Type: idToDepType[issueID],
|
||||
}
|
||||
|
||||
if details, ok := detailsMap[issueID]; ok {
|
||||
info.Title = details.Title
|
||||
info.Status = details.Status
|
||||
info.IssueType = details.IssueType
|
||||
info.Assignee = details.Assignee
|
||||
} else {
|
||||
info.Title = "(external)"
|
||||
info.Status = "unknown"
|
||||
ID: dep.ID,
|
||||
Title: dep.Title,
|
||||
Status: dep.Status,
|
||||
Type: dep.DependencyType,
|
||||
IssueType: dep.IssueType,
|
||||
Assignee: dep.Assignee,
|
||||
}
|
||||
|
||||
// Add worker info if available
|
||||
if worker, ok := workersMap[issueID]; ok {
|
||||
if worker, ok := workersMap[dep.ID]; ok {
|
||||
info.Worker = worker.Worker
|
||||
info.WorkerAge = worker.Age
|
||||
}
|
||||
@@ -1390,6 +1577,58 @@ func getTrackedIssues(townBeads, convoyID string) []trackedIssueInfo {
|
||||
return tracked
|
||||
}
|
||||
|
||||
// getExternalIssueDetails fetches issue details from an external rig database.
|
||||
// townBeads: path to town .beads directory
|
||||
// rigName: name of the rig (e.g., "claycantrell")
|
||||
// issueID: the issue ID to look up
|
||||
func getExternalIssueDetails(townBeads, rigName, issueID string) *issueDetails {
|
||||
// Resolve rig directory path: town parent + rig name
|
||||
townParent := filepath.Dir(townBeads)
|
||||
rigDir := filepath.Join(townParent, rigName)
|
||||
|
||||
// Check if rig directory exists
|
||||
if _, err := os.Stat(rigDir); os.IsNotExist(err) {
|
||||
return nil
|
||||
}
|
||||
|
||||
// Query the rig database by running bd show from the rig directory
|
||||
// Use --allow-stale to handle cases where JSONL and DB are out of sync
|
||||
showCmd := exec.Command("bd", "--no-daemon", "show", issueID, "--json", "--allow-stale")
|
||||
showCmd.Dir = rigDir // Set working directory to rig directory
|
||||
var stdout bytes.Buffer
|
||||
showCmd.Stdout = &stdout
|
||||
|
||||
if err := showCmd.Run(); err != nil {
|
||||
return nil
|
||||
}
|
||||
if stdout.Len() == 0 {
|
||||
return nil
|
||||
}
|
||||
|
||||
var issues []struct {
|
||||
ID string `json:"id"`
|
||||
Title string `json:"title"`
|
||||
Status string `json:"status"`
|
||||
IssueType string `json:"issue_type"`
|
||||
Assignee string `json:"assignee"`
|
||||
}
|
||||
if err := json.Unmarshal(stdout.Bytes(), &issues); err != nil {
|
||||
return nil
|
||||
}
|
||||
if len(issues) == 0 {
|
||||
return nil
|
||||
}
|
||||
|
||||
issue := issues[0]
|
||||
return &issueDetails{
|
||||
ID: issue.ID,
|
||||
Title: issue.Title,
|
||||
Status: issue.Status,
|
||||
IssueType: issue.IssueType,
|
||||
Assignee: issue.Assignee,
|
||||
}
|
||||
}
|
||||
|
||||
// issueDetails holds basic issue info.
|
||||
type issueDetails struct {
|
||||
ID string
|
||||
|
||||
@@ -2,8 +2,10 @@
|
||||
package cmd
|
||||
|
||||
import (
|
||||
"bufio"
|
||||
"encoding/json"
|
||||
"fmt"
|
||||
"io/fs"
|
||||
"os"
|
||||
"os/exec"
|
||||
"path/filepath"
|
||||
@@ -44,47 +46,39 @@ var (
|
||||
var costsCmd = &cobra.Command{
|
||||
Use: "costs",
|
||||
GroupID: GroupDiag,
|
||||
Short: "Show costs for running Claude sessions [DISABLED]",
|
||||
Short: "Show costs for running Claude sessions",
|
||||
Long: `Display costs for Claude Code sessions in Gas Town.
|
||||
|
||||
⚠️ COST TRACKING IS CURRENTLY DISABLED
|
||||
|
||||
Claude Code displays costs in the TUI status bar, which cannot be captured
|
||||
via tmux. All sessions will show $0.00 until Claude Code exposes cost data
|
||||
through an API or environment variable.
|
||||
|
||||
What we need from Claude Code:
|
||||
- Stop hook env var (e.g., $CLAUDE_SESSION_COST)
|
||||
- Or queryable file/API endpoint
|
||||
|
||||
See: GH#24, gt-7awfj
|
||||
|
||||
The infrastructure remains in place and will work once cost data is available.
|
||||
Costs are calculated from Claude Code transcript files at ~/.claude/projects/
|
||||
by summing token usage from assistant messages and applying model-specific pricing.
|
||||
|
||||
Examples:
|
||||
gt costs # Live costs from running sessions
|
||||
gt costs --today # Today's costs from wisps (not yet digested)
|
||||
gt costs --week # This week's costs from digest beads + today's wisps
|
||||
gt costs --today # Today's costs from log file (not yet digested)
|
||||
gt costs --week # This week's costs from digest beads + today's log
|
||||
gt costs --by-role # Breakdown by role (polecat, witness, etc.)
|
||||
gt costs --by-rig # Breakdown by rig
|
||||
gt costs --json # Output as JSON
|
||||
gt costs -v # Show debug output for failures
|
||||
|
||||
Subcommands:
|
||||
gt costs record # Record session cost as ephemeral wisp (Stop hook)
|
||||
gt costs digest # Aggregate wisps into daily digest bead (Deacon patrol)`,
|
||||
gt costs record # Record session cost to local log file (Stop hook)
|
||||
gt costs digest # Aggregate log entries into daily digest bead (Deacon patrol)`,
|
||||
RunE: runCosts,
|
||||
}
|
||||
|
||||
var costsRecordCmd = &cobra.Command{
|
||||
Use: "record",
|
||||
Short: "Record session cost as an ephemeral wisp (called by Stop hook)",
|
||||
Long: `Record the final cost of a session as an ephemeral wisp.
|
||||
Short: "Record session cost to local log file (called by Stop hook)",
|
||||
Long: `Record the final cost of a session to a local log file.
|
||||
|
||||
This command is intended to be called from a Claude Code Stop hook.
|
||||
It captures the final cost from the tmux session and creates an ephemeral
|
||||
event that is NOT exported to JSONL (avoiding log-in-database pollution).
|
||||
It reads token usage from the Claude Code transcript file (~/.claude/projects/...)
|
||||
and calculates the cost based on model pricing, then appends it to
|
||||
~/.gt/costs.jsonl. This is a simple append operation that never fails
|
||||
due to database availability.
|
||||
|
||||
Session cost wisps are aggregated daily by 'gt costs digest' into a single
|
||||
Session costs are aggregated daily by 'gt costs digest' into a single
|
||||
permanent "Cost Report YYYY-MM-DD" bead for audit purposes.
|
||||
|
||||
Examples:
|
||||
@@ -95,12 +89,12 @@ Examples:
|
||||
|
||||
var costsDigestCmd = &cobra.Command{
|
||||
Use: "digest",
|
||||
Short: "Aggregate session cost wisps into a daily digest bead",
|
||||
Long: `Aggregate ephemeral session cost wisps into a permanent daily digest.
|
||||
Short: "Aggregate session cost log entries into a daily digest bead",
|
||||
Long: `Aggregate session cost log entries into a permanent daily digest.
|
||||
|
||||
This command is intended to be run by Deacon patrol (daily) or manually.
|
||||
It queries session.ended wisps for a target date, creates a single aggregate
|
||||
"Cost Report YYYY-MM-DD" bead, then deletes the source wisps.
|
||||
It reads entries from ~/.gt/costs.jsonl for a target date, creates a single
|
||||
aggregate "Cost Report YYYY-MM-DD" bead, then removes the source entries.
|
||||
|
||||
The resulting digest bead is permanent (exported to JSONL, synced via git)
|
||||
and provides an audit trail without log-in-database pollution.
|
||||
@@ -114,18 +108,18 @@ Examples:
|
||||
|
||||
var costsMigrateCmd = &cobra.Command{
|
||||
Use: "migrate",
|
||||
Short: "Migrate legacy session.ended beads to the new wisp architecture",
|
||||
Short: "Migrate legacy session.ended beads to the new log-file architecture",
|
||||
Long: `Migrate legacy session.ended event beads to the new cost tracking system.
|
||||
|
||||
This command handles the transition from the old architecture (where each
|
||||
session.ended event was a permanent bead) to the new wisp-based system.
|
||||
session.ended event was a permanent bead) to the new log-file-based system.
|
||||
|
||||
The migration:
|
||||
1. Finds all open session.ended event beads (should be none if auto-close worked)
|
||||
2. Closes them with reason "migrated to wisp architecture"
|
||||
2. Closes them with reason "migrated to log-file architecture"
|
||||
|
||||
Legacy beads remain in the database for historical queries but won't interfere
|
||||
with the new wisp-based cost tracking.
|
||||
with the new log-file-based cost tracking.
|
||||
|
||||
Examples:
|
||||
gt costs migrate # Migrate legacy beads
|
||||
@@ -192,6 +186,56 @@ type CostsOutput struct {
|
||||
// costRegex matches cost patterns like "$1.23" or "$12.34"
|
||||
var costRegex = regexp.MustCompile(`\$(\d+\.\d{2})`)
|
||||
|
||||
// TranscriptMessage represents a message from a Claude Code transcript file.
|
||||
type TranscriptMessage struct {
|
||||
Type string `json:"type"`
|
||||
SessionID string `json:"sessionId"`
|
||||
CWD string `json:"cwd"`
|
||||
Message *TranscriptMessageBody `json:"message,omitempty"`
|
||||
}
|
||||
|
||||
// TranscriptMessageBody contains the message content and usage info.
|
||||
type TranscriptMessageBody struct {
|
||||
Model string `json:"model"`
|
||||
Role string `json:"role"`
|
||||
Usage *TranscriptUsage `json:"usage,omitempty"`
|
||||
}
|
||||
|
||||
// TranscriptUsage contains token usage information.
|
||||
type TranscriptUsage struct {
|
||||
InputTokens int `json:"input_tokens"`
|
||||
CacheCreationInputTokens int `json:"cache_creation_input_tokens"`
|
||||
CacheReadInputTokens int `json:"cache_read_input_tokens"`
|
||||
OutputTokens int `json:"output_tokens"`
|
||||
}
|
||||
|
||||
// TokenUsage aggregates token usage across a session.
|
||||
type TokenUsage struct {
|
||||
Model string
|
||||
InputTokens int
|
||||
CacheCreationInputTokens int
|
||||
CacheReadInputTokens int
|
||||
OutputTokens int
|
||||
}
|
||||
|
||||
// Model pricing per million tokens (as of Jan 2025).
|
||||
// See: https://www.anthropic.com/pricing
|
||||
var modelPricing = map[string]struct {
|
||||
InputPerMillion float64
|
||||
OutputPerMillion float64
|
||||
CacheReadPerMillion float64 // 90% discount on input price
|
||||
CacheCreatePerMillion float64 // 25% premium on input price
|
||||
}{
|
||||
// Claude Opus 4.5
|
||||
"claude-opus-4-5-20251101": {15.0, 75.0, 1.5, 18.75},
|
||||
// Claude Sonnet 4
|
||||
"claude-sonnet-4-20250514": {3.0, 15.0, 0.3, 3.75},
|
||||
// Claude Haiku 3.5
|
||||
"claude-3-5-haiku-20241022": {1.0, 5.0, 0.1, 1.25},
|
||||
// Fallback for unknown models (use Sonnet pricing)
|
||||
"default": {3.0, 15.0, 0.3, 3.75},
|
||||
}
|
||||
|
||||
func runCosts(cmd *cobra.Command, args []string) error {
|
||||
// If querying ledger, use ledger functions
|
||||
if costsToday || costsWeek || costsByRole || costsByRig {
|
||||
@@ -203,11 +247,6 @@ func runCosts(cmd *cobra.Command, args []string) error {
|
||||
}
|
||||
|
||||
func runLiveCosts() error {
|
||||
// Warn that cost tracking is disabled
|
||||
fmt.Fprintf(os.Stderr, "%s Cost tracking is disabled - Claude Code does not expose session costs.\n",
|
||||
style.Warning.Render("⚠"))
|
||||
fmt.Fprintf(os.Stderr, " All sessions will show $0.00. See: GH#24, gt-7awfj\n\n")
|
||||
|
||||
t := tmux.NewTmux()
|
||||
|
||||
// Get all tmux sessions
|
||||
@@ -228,14 +267,24 @@ func runLiveCosts() error {
|
||||
// Parse session name to get role/rig/worker
|
||||
role, rig, worker := parseSessionName(session)
|
||||
|
||||
// Capture pane content
|
||||
content, err := t.CapturePaneAll(session)
|
||||
// Get working directory of the session
|
||||
workDir, err := getTmuxSessionWorkDir(session)
|
||||
if err != nil {
|
||||
continue // Skip sessions we can't capture
|
||||
if costsVerbose {
|
||||
fmt.Fprintf(os.Stderr, "[costs] could not get workdir for %s: %v\n", session, err)
|
||||
}
|
||||
continue
|
||||
}
|
||||
|
||||
// Extract cost from content
|
||||
cost := extractCost(content)
|
||||
// Extract cost from Claude transcript
|
||||
cost, err := extractCostFromWorkDir(workDir)
|
||||
if err != nil {
|
||||
if costsVerbose {
|
||||
fmt.Fprintf(os.Stderr, "[costs] could not extract cost for %s: %v\n", session, err)
|
||||
}
|
||||
// Still include the session with zero cost
|
||||
cost = 0.0
|
||||
}
|
||||
|
||||
// Check if an agent appears to be running
|
||||
running := t.IsAgentRunning(session)
|
||||
@@ -267,11 +316,6 @@ func runLiveCosts() error {
|
||||
}
|
||||
|
||||
func runCostsFromLedger() error {
|
||||
// Warn that cost tracking is disabled
|
||||
fmt.Fprintf(os.Stderr, "%s Cost tracking is disabled - Claude Code does not expose session costs.\n",
|
||||
style.Warning.Render("⚠"))
|
||||
fmt.Fprintf(os.Stderr, " Historical data may show $0.00 for all sessions. See: GH#24, gt-7awfj\n\n")
|
||||
|
||||
now := time.Now()
|
||||
var entries []CostEntry
|
||||
var err error
|
||||
@@ -279,7 +323,7 @@ func runCostsFromLedger() error {
|
||||
if costsToday {
|
||||
// For today: query ephemeral wisps (not yet digested)
|
||||
// This gives real-time view of today's costs
|
||||
entries, err = querySessionCostWisps(now)
|
||||
entries, err = querySessionCostEntries(now)
|
||||
if err != nil {
|
||||
return fmt.Errorf("querying session cost wisps: %w", err)
|
||||
}
|
||||
@@ -292,10 +336,17 @@ func runCostsFromLedger() error {
|
||||
}
|
||||
|
||||
// Also include today's wisps (not yet digested)
|
||||
todayWisps, _ := querySessionCostWisps(now)
|
||||
entries = append(entries, todayWisps...)
|
||||
todayEntries, _ := querySessionCostEntries(now)
|
||||
entries = append(entries, todayEntries...)
|
||||
} else if costsByRole || costsByRig {
|
||||
// When using --by-role or --by-rig without time filter, default to today
|
||||
// (querying all historical events would be expensive and likely empty)
|
||||
entries, err = querySessionCostEntries(now)
|
||||
if err != nil {
|
||||
return fmt.Errorf("querying session cost entries: %w", err)
|
||||
}
|
||||
} else {
|
||||
// No time filter: query both digests and legacy session.ended events
|
||||
// No time filter and no breakdown flags: query both digests and legacy session.ended events
|
||||
// (for backwards compatibility during migration)
|
||||
entries = querySessionEvents()
|
||||
}
|
||||
@@ -636,7 +687,9 @@ func parseSessionName(session string) (role, rig, worker string) {
|
||||
}
|
||||
|
||||
// extractCost finds the most recent cost value in pane content.
|
||||
// Claude Code displays cost in the format "$X.XX" in the status area.
|
||||
// DEPRECATED: Claude Code no longer displays cost in a scrapable format.
|
||||
// This is kept for backwards compatibility but always returns 0.0.
|
||||
// Use extractCostFromTranscript instead.
|
||||
func extractCost(content string) float64 {
|
||||
matches := costRegex.FindAllStringSubmatch(content, -1)
|
||||
if len(matches) == 0 {
|
||||
@@ -654,6 +707,156 @@ func extractCost(content string) float64 {
|
||||
return cost
|
||||
}
|
||||
|
||||
// getClaudeProjectDir returns the Claude Code project directory for a working directory.
|
||||
// Claude Code stores transcripts in ~/.claude/projects/<path-with-dashes-instead-of-slashes>/
|
||||
func getClaudeProjectDir(workDir string) (string, error) {
|
||||
home, err := os.UserHomeDir()
|
||||
if err != nil {
|
||||
return "", err
|
||||
}
|
||||
|
||||
// Convert path to Claude's directory naming: replace / with -
|
||||
// Keep leading slash - it becomes a leading dash in Claude's encoding
|
||||
projectName := strings.ReplaceAll(workDir, "/", "-")
|
||||
return filepath.Join(home, ".claude", "projects", projectName), nil
|
||||
}
|
||||
|
||||
// findLatestTranscript finds the most recently modified .jsonl file in a directory.
|
||||
func findLatestTranscript(projectDir string) (string, error) {
|
||||
var latestPath string
|
||||
var latestTime time.Time
|
||||
|
||||
err := filepath.WalkDir(projectDir, func(path string, d fs.DirEntry, err error) error {
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
if d.IsDir() && path != projectDir {
|
||||
return fs.SkipDir // Don't recurse into subdirectories
|
||||
}
|
||||
if !d.IsDir() && strings.HasSuffix(path, ".jsonl") {
|
||||
info, err := d.Info()
|
||||
if err != nil {
|
||||
return nil // Skip files we can't stat
|
||||
}
|
||||
if info.ModTime().After(latestTime) {
|
||||
latestTime = info.ModTime()
|
||||
latestPath = path
|
||||
}
|
||||
}
|
||||
return nil
|
||||
})
|
||||
|
||||
if err != nil {
|
||||
return "", err
|
||||
}
|
||||
if latestPath == "" {
|
||||
return "", fmt.Errorf("no transcript files found in %s", projectDir)
|
||||
}
|
||||
return latestPath, nil
|
||||
}
|
||||
|
||||
// parseTranscriptUsage reads a transcript file and sums token usage from assistant messages.
|
||||
func parseTranscriptUsage(transcriptPath string) (*TokenUsage, error) {
|
||||
file, err := os.Open(transcriptPath)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
defer file.Close()
|
||||
|
||||
usage := &TokenUsage{}
|
||||
scanner := bufio.NewScanner(file)
|
||||
// Increase buffer for potentially large JSON lines
|
||||
buf := make([]byte, 0, 256*1024)
|
||||
scanner.Buffer(buf, 1024*1024)
|
||||
|
||||
for scanner.Scan() {
|
||||
line := scanner.Bytes()
|
||||
if len(line) == 0 {
|
||||
continue
|
||||
}
|
||||
|
||||
var msg TranscriptMessage
|
||||
if err := json.Unmarshal(line, &msg); err != nil {
|
||||
continue // Skip malformed lines
|
||||
}
|
||||
|
||||
// Only process assistant messages with usage info
|
||||
if msg.Type != "assistant" || msg.Message == nil || msg.Message.Usage == nil {
|
||||
continue
|
||||
}
|
||||
|
||||
// Capture the model (use first one found, they should all be the same)
|
||||
if usage.Model == "" && msg.Message.Model != "" {
|
||||
usage.Model = msg.Message.Model
|
||||
}
|
||||
|
||||
// Sum token usage
|
||||
u := msg.Message.Usage
|
||||
usage.InputTokens += u.InputTokens
|
||||
usage.CacheCreationInputTokens += u.CacheCreationInputTokens
|
||||
usage.CacheReadInputTokens += u.CacheReadInputTokens
|
||||
usage.OutputTokens += u.OutputTokens
|
||||
}
|
||||
|
||||
if err := scanner.Err(); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
return usage, nil
|
||||
}
|
||||
|
||||
// calculateCost converts token usage to USD cost based on model pricing.
|
||||
func calculateCost(usage *TokenUsage) float64 {
|
||||
if usage == nil {
|
||||
return 0.0
|
||||
}
|
||||
|
||||
// Look up pricing for the model
|
||||
pricing, ok := modelPricing[usage.Model]
|
||||
if !ok {
|
||||
pricing = modelPricing["default"]
|
||||
}
|
||||
|
||||
// Calculate cost (prices are per million tokens)
|
||||
inputCost := float64(usage.InputTokens) / 1_000_000 * pricing.InputPerMillion
|
||||
cacheReadCost := float64(usage.CacheReadInputTokens) / 1_000_000 * pricing.CacheReadPerMillion
|
||||
cacheCreateCost := float64(usage.CacheCreationInputTokens) / 1_000_000 * pricing.CacheCreatePerMillion
|
||||
outputCost := float64(usage.OutputTokens) / 1_000_000 * pricing.OutputPerMillion
|
||||
|
||||
return inputCost + cacheReadCost + cacheCreateCost + outputCost
|
||||
}
|
||||
|
||||
// extractCostFromWorkDir extracts cost from Claude Code transcript for a working directory.
|
||||
// This reads the most recent transcript file and sums all token usage.
|
||||
func extractCostFromWorkDir(workDir string) (float64, error) {
|
||||
projectDir, err := getClaudeProjectDir(workDir)
|
||||
if err != nil {
|
||||
return 0, fmt.Errorf("getting project dir: %w", err)
|
||||
}
|
||||
|
||||
transcriptPath, err := findLatestTranscript(projectDir)
|
||||
if err != nil {
|
||||
return 0, fmt.Errorf("finding transcript: %w", err)
|
||||
}
|
||||
|
||||
usage, err := parseTranscriptUsage(transcriptPath)
|
||||
if err != nil {
|
||||
return 0, fmt.Errorf("parsing transcript: %w", err)
|
||||
}
|
||||
|
||||
return calculateCost(usage), nil
|
||||
}
|
||||
|
||||
// getTmuxSessionWorkDir gets the current working directory of a tmux session.
|
||||
func getTmuxSessionWorkDir(session string) (string, error) {
|
||||
cmd := exec.Command("tmux", "display-message", "-t", session, "-p", "#{pane_current_path}")
|
||||
output, err := cmd.Output()
|
||||
if err != nil {
|
||||
return "", err
|
||||
}
|
||||
return strings.TrimSpace(string(output)), nil
|
||||
}
|
||||
|
||||
func outputCostsJSON(output CostsOutput) error {
|
||||
enc := json.NewEncoder(os.Stdout)
|
||||
enc.SetIndent("", " ")
|
||||
@@ -738,8 +941,29 @@ func outputLedgerHuman(output CostsOutput, entries []CostEntry) error {
|
||||
return nil
|
||||
}
|
||||
|
||||
// runCostsRecord captures the final cost from a session and records it as a bead event.
|
||||
// This is called by the Claude Code Stop hook.
|
||||
// CostLogEntry represents a single entry in the costs.jsonl log file.
|
||||
type CostLogEntry struct {
|
||||
SessionID string `json:"session_id"`
|
||||
Role string `json:"role"`
|
||||
Rig string `json:"rig,omitempty"`
|
||||
Worker string `json:"worker,omitempty"`
|
||||
CostUSD float64 `json:"cost_usd"`
|
||||
EndedAt time.Time `json:"ended_at"`
|
||||
WorkItem string `json:"work_item,omitempty"`
|
||||
}
|
||||
|
||||
// getCostsLogPath returns the path to the costs log file (~/.gt/costs.jsonl).
|
||||
func getCostsLogPath() string {
|
||||
home, err := os.UserHomeDir()
|
||||
if err != nil {
|
||||
return "/tmp/gt-costs.jsonl" // Fallback
|
||||
}
|
||||
return filepath.Join(home, ".gt", "costs.jsonl")
|
||||
}
|
||||
|
||||
// runCostsRecord captures the final cost from a session and appends it to a local log file.
|
||||
// This is called by the Claude Code Stop hook. It's designed to never fail due to
|
||||
// database availability - it's a simple file append operation.
|
||||
func runCostsRecord(cmd *cobra.Command, args []string) error {
|
||||
// Get session from flag or try to detect from environment
|
||||
session := recordSession
|
||||
@@ -758,107 +982,78 @@ func runCostsRecord(cmd *cobra.Command, args []string) error {
|
||||
return fmt.Errorf("--session flag required (or set GT_SESSION env var, or GT_RIG/GT_ROLE)")
|
||||
}
|
||||
|
||||
t := tmux.NewTmux()
|
||||
|
||||
// Capture pane content
|
||||
content, err := t.CapturePaneAll(session)
|
||||
if err != nil {
|
||||
// Session may already be gone - that's OK, we'll record with zero cost
|
||||
content = ""
|
||||
// Get working directory from environment or tmux session
|
||||
workDir := os.Getenv("GT_CWD")
|
||||
if workDir == "" {
|
||||
// Try to get from tmux session
|
||||
var err error
|
||||
workDir, err = getTmuxSessionWorkDir(session)
|
||||
if err != nil {
|
||||
if costsVerbose {
|
||||
fmt.Fprintf(os.Stderr, "[costs] could not get workdir for %s: %v\n", session, err)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// Extract cost
|
||||
cost := extractCost(content)
|
||||
// Extract cost from Claude transcript
|
||||
var cost float64
|
||||
if workDir != "" {
|
||||
var err error
|
||||
cost, err = extractCostFromWorkDir(workDir)
|
||||
if err != nil {
|
||||
if costsVerbose {
|
||||
fmt.Fprintf(os.Stderr, "[costs] could not extract cost from transcript: %v\n", err)
|
||||
}
|
||||
cost = 0.0
|
||||
}
|
||||
}
|
||||
|
||||
// Parse session name
|
||||
role, rig, worker := parseSessionName(session)
|
||||
|
||||
// Build agent path for actor field
|
||||
agentPath := buildAgentPath(role, rig, worker)
|
||||
|
||||
// Build event title
|
||||
title := fmt.Sprintf("Session ended: %s", session)
|
||||
if recordWorkItem != "" {
|
||||
title = fmt.Sprintf("Session: %s completed %s", session, recordWorkItem)
|
||||
// Build log entry
|
||||
entry := CostLogEntry{
|
||||
SessionID: session,
|
||||
Role: role,
|
||||
Rig: rig,
|
||||
Worker: worker,
|
||||
CostUSD: cost,
|
||||
EndedAt: time.Now(),
|
||||
WorkItem: recordWorkItem,
|
||||
}
|
||||
|
||||
// Build payload JSON
|
||||
payload := map[string]interface{}{
|
||||
"cost_usd": cost,
|
||||
"session_id": session,
|
||||
"role": role,
|
||||
"ended_at": time.Now().Format(time.RFC3339),
|
||||
}
|
||||
if rig != "" {
|
||||
payload["rig"] = rig
|
||||
}
|
||||
if worker != "" {
|
||||
payload["worker"] = worker
|
||||
}
|
||||
payloadJSON, err := json.Marshal(payload)
|
||||
// Marshal to JSON
|
||||
entryJSON, err := json.Marshal(entry)
|
||||
if err != nil {
|
||||
return fmt.Errorf("marshaling payload: %w", err)
|
||||
return fmt.Errorf("marshaling cost entry: %w", err)
|
||||
}
|
||||
|
||||
// Build bd create command for ephemeral wisp
|
||||
// Using --ephemeral creates a wisp that:
|
||||
// - Is stored locally only (not exported to JSONL)
|
||||
// - Won't pollute git history with O(sessions/day) events
|
||||
// - Will be aggregated into daily digests by 'gt costs digest'
|
||||
bdArgs := []string{
|
||||
"create",
|
||||
"--ephemeral",
|
||||
"--type=event",
|
||||
"--title=" + title,
|
||||
"--event-category=session.ended",
|
||||
"--event-actor=" + agentPath,
|
||||
"--event-payload=" + string(payloadJSON),
|
||||
"--silent",
|
||||
// Append to log file
|
||||
logPath := getCostsLogPath()
|
||||
|
||||
// Ensure directory exists
|
||||
logDir := filepath.Dir(logPath)
|
||||
if err := os.MkdirAll(logDir, 0755); err != nil {
|
||||
return fmt.Errorf("creating log directory: %w", err)
|
||||
}
|
||||
|
||||
// Add work item as event target if specified
|
||||
if recordWorkItem != "" {
|
||||
bdArgs = append(bdArgs, "--event-target="+recordWorkItem)
|
||||
}
|
||||
|
||||
// NOTE: We intentionally don't use --rig flag here because it causes
|
||||
// event fields (event_kind, actor, payload) to not be stored properly.
|
||||
// The bd command will auto-detect the correct rig from cwd.
|
||||
|
||||
// Find town root so bd can find the .beads database.
|
||||
// The stop hook may run from a role subdirectory (e.g., mayor/) that
|
||||
// doesn't have its own .beads, so we need to run bd from town root.
|
||||
townRoot, err := workspace.FindFromCwd()
|
||||
// Open file for append (create if doesn't exist).
|
||||
// O_APPEND writes are atomic on POSIX for writes < PIPE_BUF (~4KB).
|
||||
// A JSON log entry is ~200 bytes, so concurrent appends are safe.
|
||||
f, err := os.OpenFile(logPath, os.O_APPEND|os.O_CREATE|os.O_WRONLY, 0644)
|
||||
if err != nil {
|
||||
return fmt.Errorf("finding town root: %w", err)
|
||||
}
|
||||
if townRoot == "" {
|
||||
return fmt.Errorf("not in a Gas Town workspace")
|
||||
return fmt.Errorf("opening costs log: %w", err)
|
||||
}
|
||||
defer f.Close()
|
||||
|
||||
// Execute bd create from town root
|
||||
bdCmd := exec.Command("bd", bdArgs...)
|
||||
bdCmd.Dir = townRoot
|
||||
output, err := bdCmd.CombinedOutput()
|
||||
if err != nil {
|
||||
return fmt.Errorf("creating session cost wisp: %w\nOutput: %s", err, string(output))
|
||||
}
|
||||
|
||||
wispID := strings.TrimSpace(string(output))
|
||||
|
||||
// Auto-close session cost wisps immediately after creation.
|
||||
// These are informational records that don't need to stay open.
|
||||
// The wisp data is preserved and queryable until digested.
|
||||
closeCmd := exec.Command("bd", "close", wispID, "--reason=auto-closed session cost wisp")
|
||||
closeCmd.Dir = townRoot
|
||||
if closeErr := closeCmd.Run(); closeErr != nil {
|
||||
// Non-fatal: wisp was created, just couldn't auto-close
|
||||
fmt.Fprintf(os.Stderr, "warning: could not auto-close session cost wisp %s: %v\n", wispID, closeErr)
|
||||
// Write entry with newline
|
||||
if _, err := f.Write(append(entryJSON, '\n')); err != nil {
|
||||
return fmt.Errorf("writing to costs log: %w", err)
|
||||
}
|
||||
|
||||
// Output confirmation (silent if cost is zero and no work item)
|
||||
if cost > 0 || recordWorkItem != "" {
|
||||
fmt.Printf("%s Recorded $%.2f for %s (wisp: %s)", style.Success.Render("✓"), cost, session, wispID)
|
||||
fmt.Printf("%s Recorded $%.2f for %s", style.Success.Render("✓"), cost, session)
|
||||
if recordWorkItem != "" {
|
||||
fmt.Printf(" (work: %s)", recordWorkItem)
|
||||
}
|
||||
@@ -928,44 +1123,6 @@ func detectCurrentTmuxSession() string {
|
||||
return ""
|
||||
}
|
||||
|
||||
// buildAgentPath builds the agent path from role, rig, and worker.
|
||||
// Examples: "mayor", "gastown/witness", "gastown/polecats/toast"
|
||||
func buildAgentPath(role, rig, worker string) string {
|
||||
switch role {
|
||||
case constants.RoleMayor, constants.RoleDeacon:
|
||||
return role
|
||||
case constants.RoleWitness, constants.RoleRefinery:
|
||||
if rig != "" {
|
||||
return rig + "/" + role
|
||||
}
|
||||
return role
|
||||
case constants.RolePolecat:
|
||||
if rig != "" && worker != "" {
|
||||
return rig + "/polecats/" + worker
|
||||
}
|
||||
if rig != "" {
|
||||
return rig + "/polecat"
|
||||
}
|
||||
return "polecat/" + worker
|
||||
case constants.RoleCrew:
|
||||
if rig != "" && worker != "" {
|
||||
return rig + "/crew/" + worker
|
||||
}
|
||||
if rig != "" {
|
||||
return rig + "/crew"
|
||||
}
|
||||
return "crew/" + worker
|
||||
default:
|
||||
if rig != "" && worker != "" {
|
||||
return rig + "/" + worker
|
||||
}
|
||||
if rig != "" {
|
||||
return rig
|
||||
}
|
||||
return worker
|
||||
}
|
||||
}
|
||||
|
||||
// CostDigest represents the aggregated daily cost report.
|
||||
type CostDigest struct {
|
||||
Date string `json:"date"`
|
||||
@@ -976,21 +1133,7 @@ type CostDigest struct {
|
||||
ByRig map[string]float64 `json:"by_rig,omitempty"`
|
||||
}
|
||||
|
||||
// WispListOutput represents the JSON output from bd mol wisp list.
|
||||
type WispListOutput struct {
|
||||
Wisps []WispItem `json:"wisps"`
|
||||
Count int `json:"count"`
|
||||
}
|
||||
|
||||
// WispItem represents a single wisp from bd mol wisp list.
|
||||
type WispItem struct {
|
||||
ID string `json:"id"`
|
||||
Title string `json:"title"`
|
||||
Status string `json:"status"`
|
||||
CreatedAt time.Time `json:"created_at"`
|
||||
}
|
||||
|
||||
// runCostsDigest aggregates session cost wisps into a daily digest bead.
|
||||
// runCostsDigest aggregates session cost entries into a daily digest bead.
|
||||
func runCostsDigest(cmd *cobra.Command, args []string) error {
|
||||
// Determine target date
|
||||
var targetDate time.Time
|
||||
@@ -1009,31 +1152,31 @@ func runCostsDigest(cmd *cobra.Command, args []string) error {
|
||||
|
||||
dateStr := targetDate.Format("2006-01-02")
|
||||
|
||||
// Query ephemeral session.ended wisps for target date
|
||||
wisps, err := querySessionCostWisps(targetDate)
|
||||
// Query session cost entries for target date
|
||||
costEntries, err := querySessionCostEntries(targetDate)
|
||||
if err != nil {
|
||||
return fmt.Errorf("querying session cost wisps: %w", err)
|
||||
return fmt.Errorf("querying session cost entries: %w", err)
|
||||
}
|
||||
|
||||
if len(wisps) == 0 {
|
||||
fmt.Printf("%s No session cost wisps found for %s\n", style.Dim.Render("○"), dateStr)
|
||||
if len(costEntries) == 0 {
|
||||
fmt.Printf("%s No session cost entries found for %s\n", style.Dim.Render("○"), dateStr)
|
||||
return nil
|
||||
}
|
||||
|
||||
// Build digest
|
||||
digest := CostDigest{
|
||||
Date: dateStr,
|
||||
Sessions: wisps,
|
||||
Sessions: costEntries,
|
||||
ByRole: make(map[string]float64),
|
||||
ByRig: make(map[string]float64),
|
||||
}
|
||||
|
||||
for _, w := range wisps {
|
||||
digest.TotalUSD += w.CostUSD
|
||||
for _, e := range costEntries {
|
||||
digest.TotalUSD += e.CostUSD
|
||||
digest.SessionCount++
|
||||
digest.ByRole[w.Role] += w.CostUSD
|
||||
if w.Rig != "" {
|
||||
digest.ByRig[w.Rig] += w.CostUSD
|
||||
digest.ByRole[e.Role] += e.CostUSD
|
||||
if e.Rig != "" {
|
||||
digest.ByRig[e.Rig] += e.CostUSD
|
||||
}
|
||||
}
|
||||
|
||||
@@ -1060,105 +1203,70 @@ func runCostsDigest(cmd *cobra.Command, args []string) error {
|
||||
return fmt.Errorf("creating digest bead: %w", err)
|
||||
}
|
||||
|
||||
// Delete source wisps (they're ephemeral, use bd mol burn)
|
||||
deletedCount, deleteErr := deleteSessionCostWisps(targetDate)
|
||||
// Delete source entries from log file
|
||||
deletedCount, deleteErr := deleteSessionCostEntries(targetDate)
|
||||
if deleteErr != nil {
|
||||
fmt.Fprintf(os.Stderr, "warning: failed to delete some source wisps: %v\n", deleteErr)
|
||||
fmt.Fprintf(os.Stderr, "warning: failed to delete some source entries: %v\n", deleteErr)
|
||||
}
|
||||
|
||||
fmt.Printf("%s Created Cost Report %s (bead: %s)\n", style.Success.Render("✓"), dateStr, digestID)
|
||||
fmt.Printf(" Total: $%.2f from %d sessions\n", digest.TotalUSD, digest.SessionCount)
|
||||
if deletedCount > 0 {
|
||||
fmt.Printf(" Deleted %d source wisps\n", deletedCount)
|
||||
fmt.Printf(" Removed %d entries from costs log\n", deletedCount)
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
// querySessionCostWisps queries ephemeral session.ended events for a target date.
|
||||
func querySessionCostWisps(targetDate time.Time) ([]CostEntry, error) {
|
||||
// List all wisps including closed ones
|
||||
listCmd := exec.Command("bd", "mol", "wisp", "list", "--all", "--json")
|
||||
listOutput, err := listCmd.Output()
|
||||
// querySessionCostEntries reads session cost entries from the local log file for a target date.
|
||||
func querySessionCostEntries(targetDate time.Time) ([]CostEntry, error) {
|
||||
logPath := getCostsLogPath()
|
||||
|
||||
// Read log file
|
||||
data, err := os.ReadFile(logPath)
|
||||
if err != nil {
|
||||
// No wisps database or command failed
|
||||
if costsVerbose {
|
||||
fmt.Fprintf(os.Stderr, "[costs] wisp list failed: %v\n", err)
|
||||
if os.IsNotExist(err) {
|
||||
return nil, nil // No log file yet
|
||||
}
|
||||
return nil, nil
|
||||
return nil, fmt.Errorf("reading costs log: %w", err)
|
||||
}
|
||||
|
||||
var wispList WispListOutput
|
||||
if err := json.Unmarshal(listOutput, &wispList); err != nil {
|
||||
return nil, fmt.Errorf("parsing wisp list: %w", err)
|
||||
}
|
||||
|
||||
if wispList.Count == 0 {
|
||||
return nil, nil
|
||||
}
|
||||
|
||||
// Batch all wisp IDs into a single bd show call to avoid N+1 queries
|
||||
showArgs := []string{"show", "--json"}
|
||||
for _, wisp := range wispList.Wisps {
|
||||
showArgs = append(showArgs, wisp.ID)
|
||||
}
|
||||
|
||||
showCmd := exec.Command("bd", showArgs...)
|
||||
showOutput, err := showCmd.Output()
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("showing wisps: %w", err)
|
||||
}
|
||||
|
||||
var events []SessionEvent
|
||||
if err := json.Unmarshal(showOutput, &events); err != nil {
|
||||
return nil, fmt.Errorf("parsing wisp details: %w", err)
|
||||
}
|
||||
|
||||
var sessionCostWisps []CostEntry
|
||||
targetDay := targetDate.Format("2006-01-02")
|
||||
var entries []CostEntry
|
||||
|
||||
for _, event := range events {
|
||||
// Filter for session.ended events only
|
||||
if event.EventKind != "session.ended" {
|
||||
// Parse each line as a CostLogEntry
|
||||
lines := strings.Split(string(data), "\n")
|
||||
for _, line := range lines {
|
||||
line = strings.TrimSpace(line)
|
||||
if line == "" {
|
||||
continue
|
||||
}
|
||||
|
||||
// Parse payload
|
||||
var payload SessionPayload
|
||||
if event.Payload != "" {
|
||||
if err := json.Unmarshal([]byte(event.Payload), &payload); err != nil {
|
||||
if costsVerbose {
|
||||
fmt.Fprintf(os.Stderr, "[costs] payload unmarshal failed for event %s: %v\n", event.ID, err)
|
||||
}
|
||||
continue
|
||||
var logEntry CostLogEntry
|
||||
if err := json.Unmarshal([]byte(line), &logEntry); err != nil {
|
||||
if costsVerbose {
|
||||
fmt.Fprintf(os.Stderr, "[costs] failed to parse log entry: %v\n", err)
|
||||
}
|
||||
}
|
||||
|
||||
// Parse ended_at and filter by target date
|
||||
endedAt := event.CreatedAt
|
||||
if payload.EndedAt != "" {
|
||||
if parsed, err := time.Parse(time.RFC3339, payload.EndedAt); err == nil {
|
||||
endedAt = parsed
|
||||
}
|
||||
}
|
||||
|
||||
// Check if this event is from the target date
|
||||
if endedAt.Format("2006-01-02") != targetDay {
|
||||
continue
|
||||
}
|
||||
|
||||
sessionCostWisps = append(sessionCostWisps, CostEntry{
|
||||
SessionID: payload.SessionID,
|
||||
Role: payload.Role,
|
||||
Rig: payload.Rig,
|
||||
Worker: payload.Worker,
|
||||
CostUSD: payload.CostUSD,
|
||||
EndedAt: endedAt,
|
||||
WorkItem: event.Target,
|
||||
// Filter by target date
|
||||
if logEntry.EndedAt.Format("2006-01-02") != targetDay {
|
||||
continue
|
||||
}
|
||||
|
||||
entries = append(entries, CostEntry{
|
||||
SessionID: logEntry.SessionID,
|
||||
Role: logEntry.Role,
|
||||
Rig: logEntry.Rig,
|
||||
Worker: logEntry.Worker,
|
||||
CostUSD: logEntry.CostUSD,
|
||||
EndedAt: logEntry.EndedAt,
|
||||
WorkItem: logEntry.WorkItem,
|
||||
})
|
||||
}
|
||||
|
||||
return sessionCostWisps, nil
|
||||
return entries, nil
|
||||
}
|
||||
|
||||
// createCostDigestBead creates a permanent bead for the daily cost digest.
|
||||
@@ -1228,96 +1336,63 @@ func createCostDigestBead(digest CostDigest) (string, error) {
|
||||
return digestID, nil
|
||||
}
|
||||
|
||||
// deleteSessionCostWisps deletes ephemeral session.ended wisps for a target date.
|
||||
func deleteSessionCostWisps(targetDate time.Time) (int, error) {
|
||||
// List all wisps
|
||||
listCmd := exec.Command("bd", "mol", "wisp", "list", "--all", "--json")
|
||||
listOutput, err := listCmd.Output()
|
||||
if err != nil {
|
||||
if costsVerbose {
|
||||
fmt.Fprintf(os.Stderr, "[costs] wisp list failed in deletion: %v\n", err)
|
||||
}
|
||||
return 0, nil
|
||||
}
|
||||
// deleteSessionCostEntries removes entries for a target date from the costs log file.
|
||||
// It rewrites the file without the entries for that date.
|
||||
func deleteSessionCostEntries(targetDate time.Time) (int, error) {
|
||||
logPath := getCostsLogPath()
|
||||
|
||||
var wispList WispListOutput
|
||||
if err := json.Unmarshal(listOutput, &wispList); err != nil {
|
||||
return 0, fmt.Errorf("parsing wisp list: %w", err)
|
||||
// Read log file
|
||||
data, err := os.ReadFile(logPath)
|
||||
if err != nil {
|
||||
if os.IsNotExist(err) {
|
||||
return 0, nil // No log file
|
||||
}
|
||||
return 0, fmt.Errorf("reading costs log: %w", err)
|
||||
}
|
||||
|
||||
targetDay := targetDate.Format("2006-01-02")
|
||||
var keepLines []string
|
||||
deletedCount := 0
|
||||
|
||||
// Collect all wisp IDs that match our criteria
|
||||
var wispIDsToDelete []string
|
||||
|
||||
for _, wisp := range wispList.Wisps {
|
||||
// Get full wisp details to check if it's a session.ended event
|
||||
showCmd := exec.Command("bd", "show", wisp.ID, "--json")
|
||||
showOutput, err := showCmd.Output()
|
||||
if err != nil {
|
||||
if costsVerbose {
|
||||
fmt.Fprintf(os.Stderr, "[costs] bd show failed for wisp %s: %v\n", wisp.ID, err)
|
||||
}
|
||||
// Filter out entries for target date
|
||||
lines := strings.Split(string(data), "\n")
|
||||
for _, line := range lines {
|
||||
line = strings.TrimSpace(line)
|
||||
if line == "" {
|
||||
continue
|
||||
}
|
||||
|
||||
var events []SessionEvent
|
||||
if err := json.Unmarshal(showOutput, &events); err != nil {
|
||||
if costsVerbose {
|
||||
fmt.Fprintf(os.Stderr, "[costs] JSON unmarshal failed for wisp %s: %v\n", wisp.ID, err)
|
||||
}
|
||||
var logEntry CostLogEntry
|
||||
if err := json.Unmarshal([]byte(line), &logEntry); err != nil {
|
||||
// Keep unparseable lines (shouldn't happen but be safe)
|
||||
keepLines = append(keepLines, line)
|
||||
continue
|
||||
}
|
||||
|
||||
if len(events) == 0 {
|
||||
// Remove entries from target date
|
||||
if logEntry.EndedAt.Format("2006-01-02") == targetDay {
|
||||
deletedCount++
|
||||
continue
|
||||
}
|
||||
|
||||
event := events[0]
|
||||
|
||||
// Only delete session.ended wisps
|
||||
if event.EventKind != "session.ended" {
|
||||
continue
|
||||
}
|
||||
|
||||
// Parse payload to get ended_at for date filtering
|
||||
var payload SessionPayload
|
||||
if event.Payload != "" {
|
||||
if err := json.Unmarshal([]byte(event.Payload), &payload); err != nil {
|
||||
if costsVerbose {
|
||||
fmt.Fprintf(os.Stderr, "[costs] payload unmarshal failed for wisp %s: %v\n", wisp.ID, err)
|
||||
}
|
||||
continue
|
||||
}
|
||||
}
|
||||
|
||||
endedAt := event.CreatedAt
|
||||
if payload.EndedAt != "" {
|
||||
if parsed, err := time.Parse(time.RFC3339, payload.EndedAt); err == nil {
|
||||
endedAt = parsed
|
||||
}
|
||||
}
|
||||
|
||||
// Only delete wisps from the target date
|
||||
if endedAt.Format("2006-01-02") != targetDay {
|
||||
continue
|
||||
}
|
||||
|
||||
wispIDsToDelete = append(wispIDsToDelete, wisp.ID)
|
||||
keepLines = append(keepLines, line)
|
||||
}
|
||||
|
||||
if len(wispIDsToDelete) == 0 {
|
||||
if deletedCount == 0 {
|
||||
return 0, nil
|
||||
}
|
||||
|
||||
// Batch delete all wisps in a single subprocess call
|
||||
burnArgs := append([]string{"mol", "burn", "--force"}, wispIDsToDelete...)
|
||||
burnCmd := exec.Command("bd", burnArgs...)
|
||||
if burnErr := burnCmd.Run(); burnErr != nil {
|
||||
return 0, fmt.Errorf("batch burn failed: %w", burnErr)
|
||||
// Rewrite file without deleted entries
|
||||
newContent := strings.Join(keepLines, "\n")
|
||||
if len(keepLines) > 0 {
|
||||
newContent += "\n"
|
||||
}
|
||||
|
||||
return len(wispIDsToDelete), nil
|
||||
if err := os.WriteFile(logPath, []byte(newContent), 0644); err != nil {
|
||||
return 0, fmt.Errorf("rewriting costs log: %w", err)
|
||||
}
|
||||
|
||||
return deletedCount, nil
|
||||
}
|
||||
|
||||
// runCostsMigrate migrates legacy session.ended beads to the new architecture.
|
||||
@@ -1399,7 +1474,7 @@ func runCostsMigrate(cmd *cobra.Command, args []string) error {
|
||||
// Close all open session.ended events
|
||||
closedMigrated := 0
|
||||
for _, event := range openEvents {
|
||||
closeCmd := exec.Command("bd", "close", event.ID, "--reason=migrated to wisp architecture")
|
||||
closeCmd := exec.Command("bd", "close", event.ID, "--reason=migrated to log-file architecture")
|
||||
if err := closeCmd.Run(); err != nil {
|
||||
fmt.Fprintf(os.Stderr, "warning: could not close %s: %v\n", event.ID, err)
|
||||
continue
|
||||
@@ -1409,7 +1484,7 @@ func runCostsMigrate(cmd *cobra.Command, args []string) error {
|
||||
|
||||
fmt.Printf("\n%s Migrated %d session.ended events (closed)\n", style.Success.Render("✓"), closedMigrated)
|
||||
fmt.Println(style.Dim.Render("Legacy beads preserved for historical queries."))
|
||||
fmt.Println(style.Dim.Render("New session costs will use ephemeral wisps + daily digests."))
|
||||
fmt.Println(style.Dim.Render("New session costs will use ~/.gt/costs.jsonl + daily digests."))
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
@@ -237,7 +237,7 @@ var crewPristineCmd = &cobra.Command{
|
||||
Short: "Sync crew workspaces with remote",
|
||||
Long: `Ensure crew workspace(s) are up-to-date.
|
||||
|
||||
Runs git pull and bd sync for the specified crew, or all crew workers.
|
||||
Runs git pull for the specified crew, or all crew workers.
|
||||
Reports any uncommitted changes that may need attention.
|
||||
|
||||
Examples:
|
||||
|
||||
@@ -193,10 +193,10 @@ func runCrewAt(cmd *cobra.Command, args []string) error {
|
||||
}
|
||||
|
||||
// Build startup beacon for predecessor discovery via /resume
|
||||
// Use FormatStartupNudge instead of bare "gt prime" which confuses agents
|
||||
// Use FormatStartupBeacon instead of bare "gt prime" which confuses agents
|
||||
// The SessionStart hook handles context injection (gt prime --hook)
|
||||
address := fmt.Sprintf("%s/crew/%s", r.Name, name)
|
||||
beacon := session.FormatStartupNudge(session.StartupNudgeConfig{
|
||||
beacon := session.FormatStartupBeacon(session.BeaconConfig{
|
||||
Recipient: address,
|
||||
Sender: "human",
|
||||
Topic: "start",
|
||||
@@ -242,16 +242,30 @@ func runCrewAt(cmd *cobra.Command, args []string) error {
|
||||
}
|
||||
|
||||
// Build startup beacon for predecessor discovery via /resume
|
||||
// Use FormatStartupNudge instead of bare "gt prime" which confuses agents
|
||||
// Use FormatStartupBeacon instead of bare "gt prime" which confuses agents
|
||||
address := fmt.Sprintf("%s/crew/%s", r.Name, name)
|
||||
beacon := session.FormatStartupNudge(session.StartupNudgeConfig{
|
||||
beacon := session.FormatStartupBeacon(session.BeaconConfig{
|
||||
Recipient: address,
|
||||
Sender: "human",
|
||||
Topic: "restart",
|
||||
})
|
||||
|
||||
// Ensure tmux session environment is set (for gt status-line to read).
|
||||
// Sessions created before this was added may be missing GT_CREW, etc.
|
||||
envVars := config.AgentEnv(config.AgentEnvConfig{
|
||||
Role: "crew",
|
||||
Rig: r.Name,
|
||||
AgentName: name,
|
||||
TownRoot: townRoot,
|
||||
RuntimeConfigDir: claudeConfigDir,
|
||||
BeadsNoDaemon: true,
|
||||
})
|
||||
for k, v := range envVars {
|
||||
_ = t.SetEnvironment(sessionID, k, v)
|
||||
}
|
||||
|
||||
// Use respawn-pane to replace shell with runtime directly
|
||||
// Export GT_ROLE and BD_ACTOR since tmux SetEnvironment only affects new panes
|
||||
// Export GT_ROLE and BD_ACTOR in the command since pane inherits from shell, not session env
|
||||
startupCmd, err := config.BuildCrewStartupCommandWithAgentOverride(r.Name, name, r.Path, beacon, crewAgentOverride)
|
||||
if err != nil {
|
||||
return fmt.Errorf("building startup command: %w", err)
|
||||
@@ -301,7 +315,7 @@ func runCrewAt(cmd *cobra.Command, args []string) error {
|
||||
// We're in the session at a shell prompt - start the agent
|
||||
// Build startup beacon for predecessor discovery via /resume
|
||||
address := fmt.Sprintf("%s/crew/%s", r.Name, name)
|
||||
beacon := session.FormatStartupNudge(session.StartupNudgeConfig{
|
||||
beacon := session.FormatStartupBeacon(session.BeaconConfig{
|
||||
Recipient: address,
|
||||
Sender: "human",
|
||||
Topic: "start",
|
||||
|
||||
@@ -122,12 +122,6 @@ func runCrewPristine(cmd *cobra.Command, args []string) error {
|
||||
} else if result.PullError != "" {
|
||||
fmt.Printf(" %s git pull: %s\n", style.Bold.Render("✗"), result.PullError)
|
||||
}
|
||||
|
||||
if result.Synced {
|
||||
fmt.Printf(" %s bd sync\n", style.Dim.Render("✓"))
|
||||
} else if result.SyncError != "" {
|
||||
fmt.Printf(" %s bd sync: %s\n", style.Bold.Render("✗"), result.SyncError)
|
||||
}
|
||||
}
|
||||
|
||||
return nil
|
||||
|
||||
204
internal/cmd/crew_sync.go
Normal file
204
internal/cmd/crew_sync.go
Normal file
@@ -0,0 +1,204 @@
|
||||
package cmd
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"path/filepath"
|
||||
|
||||
"github.com/spf13/cobra"
|
||||
"github.com/steveyegge/gastown/internal/beads"
|
||||
"github.com/steveyegge/gastown/internal/config"
|
||||
"github.com/steveyegge/gastown/internal/crew"
|
||||
"github.com/steveyegge/gastown/internal/git"
|
||||
"github.com/steveyegge/gastown/internal/rig"
|
||||
"github.com/steveyegge/gastown/internal/style"
|
||||
"github.com/steveyegge/gastown/internal/workspace"
|
||||
)
|
||||
|
||||
var crewSyncCmd = &cobra.Command{
|
||||
Use: "sync",
|
||||
Short: "Create missing crew members from rigs.json config",
|
||||
Long: `Sync crew members from rigs.json configuration.
|
||||
|
||||
Creates any crew members defined in rigs.json that don't already exist locally.
|
||||
This enables sharing crew configuration across machines.
|
||||
|
||||
Configuration in mayor/rigs.json:
|
||||
{
|
||||
"rigs": {
|
||||
"gastown": {
|
||||
"crew": {
|
||||
"theme": "mad-max",
|
||||
"members": ["diesel", "chrome", "nitro"]
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
Examples:
|
||||
gt crew sync # Sync crew in current rig
|
||||
gt crew sync --rig gastown # Sync crew in specific rig
|
||||
gt crew sync --dry-run # Show what would be created`,
|
||||
RunE: runCrewSync,
|
||||
}
|
||||
|
||||
func init() {
|
||||
crewSyncCmd.Flags().StringVar(&crewRig, "rig", "", "Rig to sync crew in")
|
||||
crewSyncCmd.Flags().BoolVar(&crewDryRun, "dry-run", false, "Show what would be created without creating")
|
||||
crewCmd.AddCommand(crewSyncCmd)
|
||||
}
|
||||
|
||||
func runCrewSync(cmd *cobra.Command, args []string) error {
|
||||
// Find workspace
|
||||
townRoot, err := workspace.FindFromCwdOrError()
|
||||
if err != nil {
|
||||
return fmt.Errorf("not in a Gas Town workspace: %w", err)
|
||||
}
|
||||
|
||||
// Load rigs config
|
||||
rigsConfigPath := filepath.Join(townRoot, "mayor", "rigs.json")
|
||||
rigsConfig, err := config.LoadRigsConfig(rigsConfigPath)
|
||||
if err != nil {
|
||||
return fmt.Errorf("loading rigs config: %w", err)
|
||||
}
|
||||
|
||||
// Determine rig
|
||||
rigName := crewRig
|
||||
if rigName == "" {
|
||||
rigName, err = inferRigFromCwd(townRoot)
|
||||
if err != nil {
|
||||
return fmt.Errorf("could not determine rig (use --rig flag): %w", err)
|
||||
}
|
||||
}
|
||||
|
||||
// Get rig entry from rigs.json
|
||||
rigEntry, ok := rigsConfig.Rigs[rigName]
|
||||
if !ok {
|
||||
return fmt.Errorf("rig '%s' not found in rigs.json", rigName)
|
||||
}
|
||||
|
||||
// Check if crew config exists
|
||||
if rigEntry.Crew == nil || len(rigEntry.Crew.Members) == 0 {
|
||||
fmt.Printf("No crew members configured for rig '%s' in rigs.json\n", rigName)
|
||||
fmt.Printf("\nTo configure crew, add to mayor/rigs.json:\n")
|
||||
fmt.Printf(" \"crew\": {\n")
|
||||
fmt.Printf(" \"theme\": \"mad-max\",\n")
|
||||
fmt.Printf(" \"members\": [\"diesel\", \"chrome\", \"nitro\"]\n")
|
||||
fmt.Printf(" }\n")
|
||||
return nil
|
||||
}
|
||||
|
||||
// Get rig
|
||||
g := git.NewGit(townRoot)
|
||||
rigMgr := rig.NewManager(townRoot, rigsConfig, g)
|
||||
r, err := rigMgr.GetRig(rigName)
|
||||
if err != nil {
|
||||
return fmt.Errorf("rig '%s' not found", rigName)
|
||||
}
|
||||
|
||||
// Create crew manager
|
||||
crewGit := git.NewGit(r.Path)
|
||||
crewMgr := crew.NewManager(r, crewGit)
|
||||
|
||||
bd := beads.New(beads.ResolveBeadsDir(r.Path))
|
||||
|
||||
// Get existing crew
|
||||
existingCrew, err := crewMgr.List()
|
||||
if err != nil {
|
||||
return fmt.Errorf("listing existing crew: %w", err)
|
||||
}
|
||||
existingNames := make(map[string]bool)
|
||||
for _, c := range existingCrew {
|
||||
existingNames[c.Name] = true
|
||||
}
|
||||
|
||||
// Track results
|
||||
var created []string
|
||||
var skipped []string
|
||||
var failed []string
|
||||
|
||||
// Process each configured member
|
||||
for _, name := range rigEntry.Crew.Members {
|
||||
if existingNames[name] {
|
||||
skipped = append(skipped, name)
|
||||
continue
|
||||
}
|
||||
|
||||
if crewDryRun {
|
||||
fmt.Printf("Would create: %s/%s\n", rigName, name)
|
||||
created = append(created, name)
|
||||
continue
|
||||
}
|
||||
|
||||
// Create crew workspace
|
||||
fmt.Printf("Creating crew workspace %s in %s...\n", name, rigName)
|
||||
|
||||
worker, err := crewMgr.Add(name, false) // No feature branch for synced crew
|
||||
if err != nil {
|
||||
if err == crew.ErrCrewExists {
|
||||
skipped = append(skipped, name)
|
||||
continue
|
||||
}
|
||||
style.PrintWarning("creating crew workspace '%s': %v", name, err)
|
||||
failed = append(failed, name)
|
||||
continue
|
||||
}
|
||||
|
||||
fmt.Printf("%s Created crew workspace: %s/%s\n",
|
||||
style.Bold.Render("\u2713"), rigName, name)
|
||||
fmt.Printf(" Path: %s\n", worker.ClonePath)
|
||||
fmt.Printf(" Branch: %s\n", worker.Branch)
|
||||
|
||||
// Create agent bead for the crew worker
|
||||
prefix := beads.GetPrefixForRig(townRoot, rigName)
|
||||
crewID := beads.CrewBeadIDWithPrefix(prefix, rigName, name)
|
||||
if _, err := bd.Show(crewID); err != nil {
|
||||
// Agent bead doesn't exist, create it
|
||||
fields := &beads.AgentFields{
|
||||
RoleType: "crew",
|
||||
Rig: rigName,
|
||||
AgentState: "idle",
|
||||
}
|
||||
desc := fmt.Sprintf("Crew worker %s in %s - synced from rigs.json.", name, rigName)
|
||||
if _, err := bd.CreateAgentBead(crewID, desc, fields); err != nil {
|
||||
style.PrintWarning("could not create agent bead for %s: %v", name, err)
|
||||
} else {
|
||||
fmt.Printf(" Agent bead: %s\n", crewID)
|
||||
}
|
||||
}
|
||||
|
||||
created = append(created, name)
|
||||
fmt.Println()
|
||||
}
|
||||
|
||||
// Summary
|
||||
if crewDryRun {
|
||||
fmt.Printf("\n%s Dry run complete\n", style.Bold.Render("\u2713"))
|
||||
if len(created) > 0 {
|
||||
fmt.Printf(" Would create: %v\n", created)
|
||||
}
|
||||
if len(skipped) > 0 {
|
||||
fmt.Printf(" Already exist: %v\n", skipped)
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
if len(created) > 0 {
|
||||
fmt.Printf("%s Created %d crew workspace(s): %v\n",
|
||||
style.Bold.Render("\u2713"), len(created), created)
|
||||
}
|
||||
if len(skipped) > 0 {
|
||||
fmt.Printf("%s Skipped %d (already exist): %v\n",
|
||||
style.Dim.Render("-"), len(skipped), skipped)
|
||||
}
|
||||
if len(failed) > 0 {
|
||||
fmt.Printf("%s Failed to create %d: %v\n",
|
||||
style.Warning.Render("!"), len(failed), failed)
|
||||
}
|
||||
|
||||
// Show theme if configured
|
||||
if rigEntry.Crew.Theme != "" {
|
||||
fmt.Printf("\nCrew theme: %s\n", rigEntry.Crew.Theme)
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
@@ -413,9 +413,12 @@ func startDeaconSession(t *tmux.Tmux, sessionName, agentOverride string) error {
|
||||
return fmt.Errorf("creating deacon settings: %w", err)
|
||||
}
|
||||
|
||||
// Build startup command first
|
||||
// Export GT_ROLE and BD_ACTOR in the command since tmux SetEnvironment only affects new panes
|
||||
startupCmd, err := config.BuildAgentStartupCommandWithAgentOverride("deacon", "", townRoot, "", "", agentOverride)
|
||||
initialPrompt := session.BuildStartupPrompt(session.BeaconConfig{
|
||||
Recipient: "deacon",
|
||||
Sender: "daemon",
|
||||
Topic: "patrol",
|
||||
}, "I am Deacon. Start patrol: check gt hook, if empty create mol-deacon-patrol wisp and execute it.")
|
||||
startupCmd, err := config.BuildAgentStartupCommandWithAgentOverride("deacon", "", townRoot, "", initialPrompt, agentOverride)
|
||||
if err != nil {
|
||||
return fmt.Errorf("building startup command: %w", err)
|
||||
}
|
||||
@@ -451,23 +454,6 @@ func startDeaconSession(t *tmux.Tmux, sessionName, agentOverride string) error {
|
||||
runtimeConfig := config.LoadRuntimeConfig("")
|
||||
_ = runtime.RunStartupFallback(t, sessionName, "deacon", runtimeConfig)
|
||||
|
||||
// Inject startup nudge for predecessor discovery via /resume
|
||||
if err := session.StartupNudge(t, sessionName, session.StartupNudgeConfig{
|
||||
Recipient: "deacon",
|
||||
Sender: "daemon",
|
||||
Topic: "patrol",
|
||||
}); err != nil {
|
||||
style.PrintWarning("failed to send startup nudge: %v", err)
|
||||
}
|
||||
|
||||
// GUPP: Gas Town Universal Propulsion Principle
|
||||
// Send the propulsion nudge to trigger autonomous patrol execution.
|
||||
// Wait for beacon to be fully processed (needs to be separate prompt)
|
||||
time.Sleep(2 * time.Second)
|
||||
if err := t.NudgeSession(sessionName, session.PropulsionNudgeForRole("deacon", deaconDir)); err != nil {
|
||||
return fmt.Errorf("sending propulsion nudge: %w", err)
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
|
||||
@@ -129,7 +129,6 @@ func runDoctor(cmd *cobra.Command, args []string) error {
|
||||
d.Register(doctor.NewCustomTypesCheck())
|
||||
d.Register(doctor.NewRoleLabelCheck())
|
||||
d.Register(doctor.NewFormulaCheck())
|
||||
d.Register(doctor.NewBdDaemonCheck())
|
||||
d.Register(doctor.NewPrefixConflictCheck())
|
||||
d.Register(doctor.NewPrefixMismatchCheck())
|
||||
d.Register(doctor.NewRoutesCheck())
|
||||
|
||||
394
internal/cmd/dolt.go
Normal file
394
internal/cmd/dolt.go
Normal file
@@ -0,0 +1,394 @@
|
||||
package cmd
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"os"
|
||||
"os/exec"
|
||||
"strconv"
|
||||
"strings"
|
||||
|
||||
"github.com/spf13/cobra"
|
||||
"github.com/steveyegge/gastown/internal/doltserver"
|
||||
"github.com/steveyegge/gastown/internal/style"
|
||||
"github.com/steveyegge/gastown/internal/workspace"
|
||||
)
|
||||
|
||||
var doltCmd = &cobra.Command{
|
||||
Use: "dolt",
|
||||
GroupID: GroupServices,
|
||||
Short: "Manage the Dolt SQL server",
|
||||
RunE: requireSubcommand,
|
||||
Long: `Manage the Dolt SQL server for Gas Town beads.
|
||||
|
||||
The Dolt server provides multi-client access to all rig databases,
|
||||
avoiding the single-writer limitation of embedded Dolt mode.
|
||||
|
||||
Server configuration:
|
||||
- Port: 3307 (avoids conflict with MySQL on 3306)
|
||||
- User: root (default Dolt user, no password for localhost)
|
||||
- Data directory: .dolt-data/ (contains all rig databases)
|
||||
|
||||
Each rig (hq, gastown, beads) has its own database subdirectory.`,
|
||||
}
|
||||
|
||||
var doltStartCmd = &cobra.Command{
|
||||
Use: "start",
|
||||
Short: "Start the Dolt server",
|
||||
Long: `Start the Dolt SQL server in the background.
|
||||
|
||||
The server will run until stopped with 'gt dolt stop'.`,
|
||||
RunE: runDoltStart,
|
||||
}
|
||||
|
||||
var doltStopCmd = &cobra.Command{
|
||||
Use: "stop",
|
||||
Short: "Stop the Dolt server",
|
||||
Long: `Stop the running Dolt SQL server.`,
|
||||
RunE: runDoltStop,
|
||||
}
|
||||
|
||||
var doltStatusCmd = &cobra.Command{
|
||||
Use: "status",
|
||||
Short: "Show Dolt server status",
|
||||
Long: `Show the current status of the Dolt SQL server.`,
|
||||
RunE: runDoltStatus,
|
||||
}
|
||||
|
||||
var doltLogsCmd = &cobra.Command{
|
||||
Use: "logs",
|
||||
Short: "View Dolt server logs",
|
||||
Long: `View the Dolt server log file.`,
|
||||
RunE: runDoltLogs,
|
||||
}
|
||||
|
||||
var doltSQLCmd = &cobra.Command{
|
||||
Use: "sql",
|
||||
Short: "Open Dolt SQL shell",
|
||||
Long: `Open an interactive SQL shell to the Dolt database.
|
||||
|
||||
Works in both embedded mode (no server) and server mode.
|
||||
For multi-client access, start the server first with 'gt dolt start'.`,
|
||||
RunE: runDoltSQL,
|
||||
}
|
||||
|
||||
var doltInitRigCmd = &cobra.Command{
|
||||
Use: "init-rig <name>",
|
||||
Short: "Initialize a new rig database",
|
||||
Long: `Initialize a new rig database in the Dolt data directory.
|
||||
|
||||
Each rig (e.g., gastown, beads) gets its own database that will be
|
||||
served by the Dolt server. The rig name becomes the database name
|
||||
when connecting via MySQL protocol.
|
||||
|
||||
Example:
|
||||
gt dolt init-rig gastown
|
||||
gt dolt init-rig beads`,
|
||||
Args: cobra.ExactArgs(1),
|
||||
RunE: runDoltInitRig,
|
||||
}
|
||||
|
||||
var doltListCmd = &cobra.Command{
|
||||
Use: "list",
|
||||
Short: "List available rig databases",
|
||||
Long: `List all rig databases in the Dolt data directory.`,
|
||||
RunE: runDoltList,
|
||||
}
|
||||
|
||||
var doltMigrateCmd = &cobra.Command{
|
||||
Use: "migrate",
|
||||
Short: "Migrate existing dolt databases to centralized data directory",
|
||||
Long: `Migrate existing dolt databases from .beads/dolt/ locations to the
|
||||
centralized .dolt-data/ directory structure.
|
||||
|
||||
This command will:
|
||||
1. Detect existing dolt databases in .beads/dolt/ directories
|
||||
2. Move them to .dolt-data/<rigname>/
|
||||
3. Remove the old empty directories
|
||||
|
||||
After migration, start the server with 'gt dolt start'.`,
|
||||
RunE: runDoltMigrate,
|
||||
}
|
||||
|
||||
var (
|
||||
doltLogLines int
|
||||
doltLogFollow bool
|
||||
)
|
||||
|
||||
func init() {
|
||||
doltCmd.AddCommand(doltStartCmd)
|
||||
doltCmd.AddCommand(doltStopCmd)
|
||||
doltCmd.AddCommand(doltStatusCmd)
|
||||
doltCmd.AddCommand(doltLogsCmd)
|
||||
doltCmd.AddCommand(doltSQLCmd)
|
||||
doltCmd.AddCommand(doltInitRigCmd)
|
||||
doltCmd.AddCommand(doltListCmd)
|
||||
doltCmd.AddCommand(doltMigrateCmd)
|
||||
|
||||
doltLogsCmd.Flags().IntVarP(&doltLogLines, "lines", "n", 50, "Number of lines to show")
|
||||
doltLogsCmd.Flags().BoolVarP(&doltLogFollow, "follow", "f", false, "Follow log output")
|
||||
|
||||
rootCmd.AddCommand(doltCmd)
|
||||
}
|
||||
|
||||
func runDoltStart(cmd *cobra.Command, args []string) error {
|
||||
townRoot, err := workspace.FindFromCwdOrError()
|
||||
if err != nil {
|
||||
return fmt.Errorf("not in a Gas Town workspace: %w", err)
|
||||
}
|
||||
|
||||
if err := doltserver.Start(townRoot); err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
// Get state for display
|
||||
state, _ := doltserver.LoadState(townRoot)
|
||||
config := doltserver.DefaultConfig(townRoot)
|
||||
|
||||
fmt.Printf("%s Dolt server started (PID %d, port %d)\n",
|
||||
style.Bold.Render("✓"), state.PID, config.Port)
|
||||
fmt.Printf(" Data dir: %s\n", state.DataDir)
|
||||
fmt.Printf(" Databases: %s\n", style.Dim.Render(strings.Join(state.Databases, ", ")))
|
||||
fmt.Printf(" Connection: %s\n", style.Dim.Render(doltserver.GetConnectionString(townRoot)))
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
func runDoltStop(cmd *cobra.Command, args []string) error {
|
||||
townRoot, err := workspace.FindFromCwdOrError()
|
||||
if err != nil {
|
||||
return fmt.Errorf("not in a Gas Town workspace: %w", err)
|
||||
}
|
||||
|
||||
_, pid, _ := doltserver.IsRunning(townRoot)
|
||||
|
||||
if err := doltserver.Stop(townRoot); err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
fmt.Printf("%s Dolt server stopped (was PID %d)\n", style.Bold.Render("✓"), pid)
|
||||
return nil
|
||||
}
|
||||
|
||||
func runDoltStatus(cmd *cobra.Command, args []string) error {
|
||||
townRoot, err := workspace.FindFromCwdOrError()
|
||||
if err != nil {
|
||||
return fmt.Errorf("not in a Gas Town workspace: %w", err)
|
||||
}
|
||||
|
||||
running, pid, err := doltserver.IsRunning(townRoot)
|
||||
if err != nil {
|
||||
return fmt.Errorf("checking server status: %w", err)
|
||||
}
|
||||
|
||||
config := doltserver.DefaultConfig(townRoot)
|
||||
|
||||
if running {
|
||||
fmt.Printf("%s Dolt server is %s (PID %d)\n",
|
||||
style.Bold.Render("●"),
|
||||
style.Bold.Render("running"),
|
||||
pid)
|
||||
|
||||
// Load state for more details
|
||||
state, err := doltserver.LoadState(townRoot)
|
||||
if err == nil && !state.StartedAt.IsZero() {
|
||||
fmt.Printf(" Started: %s\n", state.StartedAt.Format("2006-01-02 15:04:05"))
|
||||
fmt.Printf(" Port: %d\n", state.Port)
|
||||
fmt.Printf(" Data dir: %s\n", state.DataDir)
|
||||
if len(state.Databases) > 0 {
|
||||
fmt.Printf(" Databases:\n")
|
||||
for _, db := range state.Databases {
|
||||
fmt.Printf(" - %s\n", db)
|
||||
}
|
||||
}
|
||||
fmt.Printf(" Connection: %s\n", doltserver.GetConnectionString(townRoot))
|
||||
}
|
||||
} else {
|
||||
fmt.Printf("%s Dolt server is %s\n",
|
||||
style.Dim.Render("○"),
|
||||
"not running")
|
||||
|
||||
// List available databases
|
||||
databases, _ := doltserver.ListDatabases(townRoot)
|
||||
if len(databases) == 0 {
|
||||
fmt.Printf("\n%s No rig databases found in %s\n",
|
||||
style.Bold.Render("!"),
|
||||
config.DataDir)
|
||||
fmt.Printf(" Initialize with: %s\n", style.Dim.Render("gt dolt init-rig <name>"))
|
||||
} else {
|
||||
fmt.Printf("\nAvailable databases in %s:\n", config.DataDir)
|
||||
for _, db := range databases {
|
||||
fmt.Printf(" - %s\n", db)
|
||||
}
|
||||
fmt.Printf("\nStart with: %s\n", style.Dim.Render("gt dolt start"))
|
||||
}
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
func runDoltLogs(cmd *cobra.Command, args []string) error {
|
||||
townRoot, err := workspace.FindFromCwdOrError()
|
||||
if err != nil {
|
||||
return fmt.Errorf("not in a Gas Town workspace: %w", err)
|
||||
}
|
||||
|
||||
config := doltserver.DefaultConfig(townRoot)
|
||||
|
||||
if _, err := os.Stat(config.LogFile); os.IsNotExist(err) {
|
||||
return fmt.Errorf("no log file found at %s", config.LogFile)
|
||||
}
|
||||
|
||||
if doltLogFollow {
|
||||
// Use tail -f for following
|
||||
tailCmd := exec.Command("tail", "-f", config.LogFile)
|
||||
tailCmd.Stdout = os.Stdout
|
||||
tailCmd.Stderr = os.Stderr
|
||||
return tailCmd.Run()
|
||||
}
|
||||
|
||||
// Use tail -n for last N lines
|
||||
tailCmd := exec.Command("tail", "-n", strconv.Itoa(doltLogLines), config.LogFile)
|
||||
tailCmd.Stdout = os.Stdout
|
||||
tailCmd.Stderr = os.Stderr
|
||||
return tailCmd.Run()
|
||||
}
|
||||
|
||||
func runDoltSQL(cmd *cobra.Command, args []string) error {
|
||||
townRoot, err := workspace.FindFromCwdOrError()
|
||||
if err != nil {
|
||||
return fmt.Errorf("not in a Gas Town workspace: %w", err)
|
||||
}
|
||||
|
||||
config := doltserver.DefaultConfig(townRoot)
|
||||
|
||||
// Check if server is running - if so, connect via Dolt SQL client
|
||||
running, _, _ := doltserver.IsRunning(townRoot)
|
||||
if running {
|
||||
// Connect to running server using dolt sql client
|
||||
// Using --no-tls since local server doesn't have TLS configured
|
||||
sqlCmd := exec.Command("dolt",
|
||||
"--host", "127.0.0.1",
|
||||
"--port", strconv.Itoa(config.Port),
|
||||
"--user", config.User,
|
||||
"--password", "",
|
||||
"--no-tls",
|
||||
"sql",
|
||||
)
|
||||
sqlCmd.Stdin = os.Stdin
|
||||
sqlCmd.Stdout = os.Stdout
|
||||
sqlCmd.Stderr = os.Stderr
|
||||
return sqlCmd.Run()
|
||||
}
|
||||
|
||||
// Server not running - list databases and pick first one for embedded mode
|
||||
databases, err := doltserver.ListDatabases(townRoot)
|
||||
if err != nil {
|
||||
return fmt.Errorf("listing databases: %w", err)
|
||||
}
|
||||
|
||||
if len(databases) == 0 {
|
||||
return fmt.Errorf("no databases found in %s\nInitialize with: gt dolt init-rig <name>", config.DataDir)
|
||||
}
|
||||
|
||||
// Use first database for embedded SQL shell
|
||||
dbDir := doltserver.RigDatabaseDir(townRoot, databases[0])
|
||||
fmt.Printf("Using database: %s (start server with 'gt dolt start' for multi-database access)\n\n", databases[0])
|
||||
|
||||
sqlCmd := exec.Command("dolt", "sql")
|
||||
sqlCmd.Dir = dbDir
|
||||
sqlCmd.Stdin = os.Stdin
|
||||
sqlCmd.Stdout = os.Stdout
|
||||
sqlCmd.Stderr = os.Stderr
|
||||
|
||||
return sqlCmd.Run()
|
||||
}
|
||||
|
||||
func runDoltInitRig(cmd *cobra.Command, args []string) error {
|
||||
townRoot, err := workspace.FindFromCwdOrError()
|
||||
if err != nil {
|
||||
return fmt.Errorf("not in a Gas Town workspace: %w", err)
|
||||
}
|
||||
|
||||
rigName := args[0]
|
||||
|
||||
if err := doltserver.InitRig(townRoot, rigName); err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
config := doltserver.DefaultConfig(townRoot)
|
||||
rigDir := doltserver.RigDatabaseDir(townRoot, rigName)
|
||||
|
||||
fmt.Printf("%s Initialized rig database %q\n", style.Bold.Render("✓"), rigName)
|
||||
fmt.Printf(" Location: %s\n", rigDir)
|
||||
fmt.Printf(" Data dir: %s\n", config.DataDir)
|
||||
fmt.Printf("\nStart server with: %s\n", style.Dim.Render("gt dolt start"))
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
func runDoltList(cmd *cobra.Command, args []string) error {
|
||||
townRoot, err := workspace.FindFromCwdOrError()
|
||||
if err != nil {
|
||||
return fmt.Errorf("not in a Gas Town workspace: %w", err)
|
||||
}
|
||||
|
||||
config := doltserver.DefaultConfig(townRoot)
|
||||
databases, err := doltserver.ListDatabases(townRoot)
|
||||
if err != nil {
|
||||
return fmt.Errorf("listing databases: %w", err)
|
||||
}
|
||||
|
||||
if len(databases) == 0 {
|
||||
fmt.Printf("No rig databases found in %s\n", config.DataDir)
|
||||
fmt.Printf("\nInitialize with: %s\n", style.Dim.Render("gt dolt init-rig <name>"))
|
||||
return nil
|
||||
}
|
||||
|
||||
fmt.Printf("Rig databases in %s:\n\n", config.DataDir)
|
||||
for _, db := range databases {
|
||||
dbDir := doltserver.RigDatabaseDir(townRoot, db)
|
||||
fmt.Printf(" %s\n %s\n", style.Bold.Render(db), style.Dim.Render(dbDir))
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
func runDoltMigrate(cmd *cobra.Command, args []string) error {
|
||||
townRoot, err := workspace.FindFromCwdOrError()
|
||||
if err != nil {
|
||||
return fmt.Errorf("not in a Gas Town workspace: %w", err)
|
||||
}
|
||||
|
||||
// Check if server is running - must stop first
|
||||
running, _, _ := doltserver.IsRunning(townRoot)
|
||||
if running {
|
||||
return fmt.Errorf("Dolt server is running. Stop it first with: gt dolt stop")
|
||||
}
|
||||
|
||||
// Find databases to migrate
|
||||
migrations := doltserver.FindMigratableDatabases(townRoot)
|
||||
if len(migrations) == 0 {
|
||||
fmt.Println("No databases found to migrate.")
|
||||
return nil
|
||||
}
|
||||
|
||||
fmt.Printf("Found %d database(s) to migrate:\n\n", len(migrations))
|
||||
for _, m := range migrations {
|
||||
fmt.Printf(" %s\n", m.SourcePath)
|
||||
fmt.Printf(" → %s\n\n", m.TargetPath)
|
||||
}
|
||||
|
||||
// Perform migrations
|
||||
for _, m := range migrations {
|
||||
fmt.Printf("Migrating %s...\n", m.RigName)
|
||||
if err := doltserver.MigrateRigFromBeads(townRoot, m.RigName, m.SourcePath); err != nil {
|
||||
return fmt.Errorf("migrating %s: %w", m.RigName, err)
|
||||
}
|
||||
fmt.Printf(" %s Migrated to %s\n", style.Bold.Render("✓"), m.TargetPath)
|
||||
}
|
||||
|
||||
fmt.Printf("\n%s Migration complete.\n", style.Bold.Render("✓"))
|
||||
fmt.Printf("\nStart server with: %s\n", style.Dim.Render("gt dolt start"))
|
||||
|
||||
return nil
|
||||
}
|
||||
@@ -310,6 +310,38 @@ func runDone(cmd *cobra.Command, args []string) error {
|
||||
// Initialize beads
|
||||
bd := beads.New(beads.ResolveBeadsDir(cwd))
|
||||
|
||||
// Check for no_merge flag - if set, skip merge queue and notify for review
|
||||
sourceIssueForNoMerge, err := bd.Show(issueID)
|
||||
if err == nil {
|
||||
attachmentFields := beads.ParseAttachmentFields(sourceIssueForNoMerge)
|
||||
if attachmentFields != nil && attachmentFields.NoMerge {
|
||||
fmt.Printf("%s No-merge mode: skipping merge queue\n", style.Bold.Render("→"))
|
||||
fmt.Printf(" Branch: %s\n", branch)
|
||||
fmt.Printf(" Issue: %s\n", issueID)
|
||||
fmt.Println()
|
||||
fmt.Printf("%s\n", style.Dim.Render("Work stays on feature branch for human review."))
|
||||
|
||||
// Mail dispatcher with READY_FOR_REVIEW
|
||||
if dispatcher := attachmentFields.DispatchedBy; dispatcher != "" {
|
||||
townRouter := mail.NewRouter(townRoot)
|
||||
reviewMsg := &mail.Message{
|
||||
To: dispatcher,
|
||||
From: detectSender(),
|
||||
Subject: fmt.Sprintf("READY_FOR_REVIEW: %s", issueID),
|
||||
Body: fmt.Sprintf("Branch: %s\nIssue: %s\nReady for review.", branch, issueID),
|
||||
}
|
||||
if err := townRouter.Send(reviewMsg); err != nil {
|
||||
style.PrintWarning("could not notify dispatcher: %v", err)
|
||||
} else {
|
||||
fmt.Printf("%s Dispatcher notified: READY_FOR_REVIEW\n", style.Bold.Render("✓"))
|
||||
}
|
||||
}
|
||||
|
||||
// Skip MR creation, go to witness notification
|
||||
goto notifyWitness
|
||||
}
|
||||
}
|
||||
|
||||
// Determine target branch (auto-detect integration branch if applicable)
|
||||
target := defaultBranch
|
||||
autoTarget, err := detectIntegrationBranch(bd, g, issueID)
|
||||
@@ -456,7 +488,7 @@ notifyWitness:
|
||||
|
||||
// Notify dispatcher if work was dispatched by another agent
|
||||
if issueID != "" {
|
||||
if dispatcher := getDispatcherFromBead(cwd, issueID); dispatcher != "" && dispatcher != sender {
|
||||
if dispatcher := getDispatcherFromBead(townRoot, cwd, issueID); dispatcher != "" && dispatcher != sender {
|
||||
dispatcherNotification := &mail.Message{
|
||||
To: dispatcher,
|
||||
From: sender,
|
||||
@@ -608,11 +640,21 @@ func updateAgentStateOnDone(cwd, townRoot, exitType, _ string) { // issueID unus
|
||||
// has attached_molecule pointing to the wisp. Without this fix, gt done
|
||||
// only closed the hooked bead, leaving the wisp orphaned.
|
||||
// Order matters: wisp closes -> unblocks base bead -> base bead closes.
|
||||
//
|
||||
// BUG FIX (gt-zbnr): Close child wisps BEFORE closing the molecule itself.
|
||||
// Deacon patrol molecules have child step wisps that were being orphaned
|
||||
// when the patrol completed. Now we cascade-close all descendants first.
|
||||
attachment := beads.ParseAttachmentFields(hookedBead)
|
||||
if attachment != nil && attachment.AttachedMolecule != "" {
|
||||
if err := bd.Close(attachment.AttachedMolecule); err != nil {
|
||||
moleculeID := attachment.AttachedMolecule
|
||||
// Cascade-close all child wisps before closing the molecule
|
||||
childrenClosed := closeDescendants(bd, moleculeID)
|
||||
if childrenClosed > 0 {
|
||||
fmt.Printf(" Closed %d child step issues\n", childrenClosed)
|
||||
}
|
||||
if err := bd.Close(moleculeID); err != nil {
|
||||
// Non-fatal: warn but continue
|
||||
fmt.Fprintf(os.Stderr, "Warning: couldn't close attached molecule %s: %v\n", attachment.AttachedMolecule, err)
|
||||
fmt.Fprintf(os.Stderr, "Warning: couldn't close attached molecule %s: %v\n", moleculeID, err)
|
||||
}
|
||||
}
|
||||
|
||||
@@ -645,7 +687,7 @@ func updateAgentStateOnDone(cwd, townRoot, exitType, _ string) { // issueID unus
|
||||
if _, err := bd.Run("agent", "state", agentBeadID, "awaiting-gate"); err != nil {
|
||||
fmt.Fprintf(os.Stderr, "Warning: couldn't set agent %s to awaiting-gate: %v\n", agentBeadID, err)
|
||||
}
|
||||
// ExitCompleted and ExitDeferred don't set state - observable from tmux
|
||||
// ExitCompleted and ExitDeferred don't set state - observable from tmux
|
||||
}
|
||||
|
||||
// ZFC #10: Self-report cleanup status
|
||||
@@ -678,12 +720,19 @@ func getIssueFromAgentHook(bd *beads.Beads, agentBeadID string) string {
|
||||
|
||||
// getDispatcherFromBead retrieves the dispatcher agent ID from the bead's attachment fields.
|
||||
// Returns empty string if no dispatcher is recorded.
|
||||
func getDispatcherFromBead(cwd, issueID string) string {
|
||||
//
|
||||
// BUG FIX (sc-g7bl3): Use townRoot and ResolveHookDir for bead lookup instead of
|
||||
// ResolveBeadsDir(cwd). When the polecat's worktree is deleted before gt done finishes,
|
||||
// ResolveBeadsDir(cwd) fails because the redirect file is gone. ResolveHookDir uses
|
||||
// prefix-based routing via routes.jsonl which works regardless of worktree state.
|
||||
func getDispatcherFromBead(townRoot, cwd, issueID string) string {
|
||||
if issueID == "" {
|
||||
return ""
|
||||
}
|
||||
|
||||
bd := beads.New(beads.ResolveBeadsDir(cwd))
|
||||
// Use ResolveHookDir for resilient bead lookup - works even if worktree is deleted
|
||||
beadsDir := beads.ResolveHookDir(townRoot, issueID, cwd)
|
||||
bd := beads.New(beadsDir)
|
||||
issue, err := bd.Show(issueID)
|
||||
if err != nil {
|
||||
return ""
|
||||
|
||||
@@ -11,7 +11,6 @@ import (
|
||||
|
||||
"github.com/gofrs/flock"
|
||||
"github.com/spf13/cobra"
|
||||
"github.com/steveyegge/gastown/internal/beads"
|
||||
"github.com/steveyegge/gastown/internal/config"
|
||||
"github.com/steveyegge/gastown/internal/daemon"
|
||||
"github.com/steveyegge/gastown/internal/events"
|
||||
@@ -136,35 +135,7 @@ func runDown(cmd *cobra.Command, args []string) error {
|
||||
fmt.Println()
|
||||
}
|
||||
|
||||
// Phase 1: Stop bd resurrection layer (--all only)
|
||||
if downAll {
|
||||
daemonsKilled, activityKilled, err := beads.StopAllBdProcesses(downDryRun, downForce)
|
||||
if err != nil {
|
||||
printDownStatus("bd processes", false, err.Error())
|
||||
allOK = false
|
||||
} else {
|
||||
if downDryRun {
|
||||
if daemonsKilled > 0 || activityKilled > 0 {
|
||||
printDownStatus("bd daemon", true, fmt.Sprintf("%d would stop", daemonsKilled))
|
||||
printDownStatus("bd activity", true, fmt.Sprintf("%d would stop", activityKilled))
|
||||
} else {
|
||||
printDownStatus("bd processes", true, "none running")
|
||||
}
|
||||
} else {
|
||||
if daemonsKilled > 0 {
|
||||
printDownStatus("bd daemon", true, fmt.Sprintf("%d stopped", daemonsKilled))
|
||||
}
|
||||
if activityKilled > 0 {
|
||||
printDownStatus("bd activity", true, fmt.Sprintf("%d stopped", activityKilled))
|
||||
}
|
||||
if daemonsKilled == 0 && activityKilled == 0 {
|
||||
printDownStatus("bd processes", true, "none running")
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// Phase 2a: Stop refineries
|
||||
// Phase 1: Stop refineries
|
||||
for _, rigName := range rigs {
|
||||
sessionName := fmt.Sprintf("gt-%s-refinery", rigName)
|
||||
if downDryRun {
|
||||
@@ -184,7 +155,7 @@ func runDown(cmd *cobra.Command, args []string) error {
|
||||
}
|
||||
}
|
||||
|
||||
// Phase 2b: Stop witnesses
|
||||
// Phase 2: Stop witnesses
|
||||
for _, rigName := range rigs {
|
||||
sessionName := fmt.Sprintf("gt-%s-witness", rigName)
|
||||
if downDryRun {
|
||||
@@ -428,14 +399,6 @@ func acquireShutdownLock(townRoot string) (*flock.Flock, error) {
|
||||
func verifyShutdown(t *tmux.Tmux, townRoot string) []string {
|
||||
var respawned []string
|
||||
|
||||
if count := beads.CountBdDaemons(); count > 0 {
|
||||
respawned = append(respawned, fmt.Sprintf("bd daemon (%d running)", count))
|
||||
}
|
||||
|
||||
if count := beads.CountBdActivityProcesses(); count > 0 {
|
||||
respawned = append(respawned, fmt.Sprintf("bd activity (%d running)", count))
|
||||
}
|
||||
|
||||
sessions, err := t.ListSessions()
|
||||
if err == nil {
|
||||
for _, sess := range sessions {
|
||||
|
||||
351
internal/cmd/focus.go
Normal file
351
internal/cmd/focus.go
Normal file
@@ -0,0 +1,351 @@
|
||||
package cmd
|
||||
|
||||
import (
|
||||
"bytes"
|
||||
"encoding/json"
|
||||
"fmt"
|
||||
"os"
|
||||
"os/exec"
|
||||
"path/filepath"
|
||||
"sort"
|
||||
"strings"
|
||||
"time"
|
||||
|
||||
"github.com/spf13/cobra"
|
||||
"github.com/steveyegge/gastown/internal/style"
|
||||
"github.com/steveyegge/gastown/internal/workspace"
|
||||
)
|
||||
|
||||
var focusJSON bool
|
||||
var focusAll bool
|
||||
var focusLimit int
|
||||
|
||||
var focusCmd = &cobra.Command{
|
||||
Use: "focus",
|
||||
GroupID: GroupWork,
|
||||
Short: "Show what needs attention (stalest high-priority goals)",
|
||||
Long: `Show what the overseer should focus on next.
|
||||
|
||||
Analyzes active epics (goals) and sorts them by staleness × priority.
|
||||
Items that haven't moved in a while and have high priority appear first.
|
||||
|
||||
Staleness indicators:
|
||||
🔴 stuck - no movement for 4+ hours (high urgency)
|
||||
🟡 stale - no movement for 1-4 hours (needs attention)
|
||||
🟢 active - moved within the last hour (probably fine)
|
||||
|
||||
Examples:
|
||||
gt focus # Top 5 suggestions
|
||||
gt focus --all # All active goals with staleness
|
||||
gt focus --limit=10 # Top 10 suggestions
|
||||
gt focus --json # Machine-readable output`,
|
||||
RunE: runFocus,
|
||||
}
|
||||
|
||||
func init() {
|
||||
focusCmd.Flags().BoolVar(&focusJSON, "json", false, "Output as JSON")
|
||||
focusCmd.Flags().BoolVar(&focusAll, "all", false, "Show all active goals (not just top N)")
|
||||
focusCmd.Flags().IntVarP(&focusLimit, "limit", "n", 5, "Number of suggestions to show")
|
||||
rootCmd.AddCommand(focusCmd)
|
||||
}
|
||||
|
||||
// FocusItem represents a goal that needs attention.
|
||||
type FocusItem struct {
|
||||
ID string `json:"id"`
|
||||
Title string `json:"title"`
|
||||
Priority int `json:"priority"`
|
||||
Status string `json:"status"`
|
||||
Staleness string `json:"staleness"` // "active", "stale", "stuck"
|
||||
StalenessHours float64 `json:"staleness_hours"` // Hours since last movement
|
||||
Score float64 `json:"score"` // priority × staleness_hours
|
||||
UpdatedAt string `json:"updated_at"`
|
||||
Assignee string `json:"assignee,omitempty"`
|
||||
DrillDown string `json:"drill_down"` // Suggested command
|
||||
}
|
||||
|
||||
func runFocus(cmd *cobra.Command, args []string) error {
|
||||
// Find town root to query both town and rig beads
|
||||
townRoot, err := workspace.FindFromCwdOrError()
|
||||
if err != nil {
|
||||
return fmt.Errorf("not in a Gas Town workspace: %w", err)
|
||||
}
|
||||
|
||||
// Collect epics from town beads and all rig beads
|
||||
items, err := collectFocusItems(townRoot)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
if len(items) == 0 {
|
||||
fmt.Println("No active goals found.")
|
||||
fmt.Println("Goals are epics with open status. Create one with: bd create --type=epic \"Goal name\"")
|
||||
return nil
|
||||
}
|
||||
|
||||
// Sort by score (highest first)
|
||||
sort.Slice(items, func(i, j int) bool {
|
||||
return items[i].Score > items[j].Score
|
||||
})
|
||||
|
||||
// Apply limit
|
||||
if !focusAll && len(items) > focusLimit {
|
||||
items = items[:focusLimit]
|
||||
}
|
||||
|
||||
if focusJSON {
|
||||
enc := json.NewEncoder(os.Stdout)
|
||||
enc.SetIndent("", " ")
|
||||
return enc.Encode(items)
|
||||
}
|
||||
|
||||
return outputFocusText(items)
|
||||
}
|
||||
|
||||
// collectFocusItems gathers epics from all beads databases in the town.
|
||||
func collectFocusItems(townRoot string) ([]FocusItem, error) {
|
||||
var items []FocusItem
|
||||
seenIDs := make(map[string]bool) // Dedupe across databases
|
||||
|
||||
// 1. Query town beads (hq-* prefix)
|
||||
townBeads := filepath.Join(townRoot, ".beads")
|
||||
if _, err := os.Stat(townBeads); err == nil {
|
||||
townItems := queryEpicsFromBeads(townBeads)
|
||||
for _, item := range townItems {
|
||||
if !seenIDs[item.ID] {
|
||||
items = append(items, item)
|
||||
seenIDs[item.ID] = true
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// 2. Query each rig's beads (gt-*, bd-*, sc-* etc. prefixes)
|
||||
rigDirs, _ := filepath.Glob(filepath.Join(townRoot, "*", "mayor", "rig", ".beads"))
|
||||
for _, rigBeads := range rigDirs {
|
||||
rigItems := queryEpicsFromBeads(rigBeads)
|
||||
for _, item := range rigItems {
|
||||
if !seenIDs[item.ID] {
|
||||
items = append(items, item)
|
||||
seenIDs[item.ID] = true
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
return items, nil
|
||||
}
|
||||
|
||||
// queryEpicsFromBeads queries a beads database for open epics.
|
||||
func queryEpicsFromBeads(beadsPath string) []FocusItem {
|
||||
var items []FocusItem
|
||||
|
||||
// Use bd to query epics
|
||||
listCmd := exec.Command("bd", "list", "--type=epic", "--status=open", "--json")
|
||||
listCmd.Dir = beadsPath
|
||||
var stdout bytes.Buffer
|
||||
listCmd.Stdout = &stdout
|
||||
|
||||
if err := listCmd.Run(); err != nil {
|
||||
// Also try in_progress and hooked statuses
|
||||
return items
|
||||
}
|
||||
|
||||
var epics []struct {
|
||||
ID string `json:"id"`
|
||||
Title string `json:"title"`
|
||||
Status string `json:"status"`
|
||||
Priority int `json:"priority"`
|
||||
UpdatedAt string `json:"updated_at"`
|
||||
Assignee string `json:"assignee,omitempty"`
|
||||
Labels []string `json:"labels,omitempty"`
|
||||
Ephemeral bool `json:"ephemeral,omitempty"`
|
||||
}
|
||||
if err := json.Unmarshal(stdout.Bytes(), &epics); err != nil {
|
||||
return items
|
||||
}
|
||||
|
||||
now := time.Now()
|
||||
|
||||
for _, epic := range epics {
|
||||
// Skip ephemeral issues (molecules, wisps, etc.) - these aren't real goals
|
||||
if epic.Ephemeral {
|
||||
continue
|
||||
}
|
||||
// Also skip by ID pattern - wisps have "wisp" in the ID
|
||||
if strings.Contains(epic.ID, "wisp") || strings.Contains(epic.ID, "-mol-") {
|
||||
continue
|
||||
}
|
||||
|
||||
item := FocusItem{
|
||||
ID: epic.ID,
|
||||
Title: strings.TrimPrefix(epic.Title, "[EPIC] "),
|
||||
Priority: epic.Priority,
|
||||
Status: epic.Status,
|
||||
UpdatedAt: epic.UpdatedAt,
|
||||
Assignee: epic.Assignee,
|
||||
}
|
||||
|
||||
// Calculate staleness
|
||||
if epic.UpdatedAt != "" {
|
||||
if updated, err := time.Parse(time.RFC3339, epic.UpdatedAt); err == nil {
|
||||
staleDuration := now.Sub(updated)
|
||||
item.StalenessHours = staleDuration.Hours()
|
||||
|
||||
// Classify staleness
|
||||
switch {
|
||||
case staleDuration >= 4*time.Hour:
|
||||
item.Staleness = "stuck"
|
||||
case staleDuration >= 1*time.Hour:
|
||||
item.Staleness = "stale"
|
||||
default:
|
||||
item.Staleness = "active"
|
||||
}
|
||||
}
|
||||
}
|
||||
if item.Staleness == "" {
|
||||
item.Staleness = "active"
|
||||
}
|
||||
|
||||
// Calculate score: priority × staleness_hours
|
||||
// P1 = 1, P2 = 2, etc. Lower priority number = higher importance
|
||||
// Invert so P1 has higher score
|
||||
priorityWeight := float64(5 - item.Priority) // P1=4, P2=3, P3=2, P4=1
|
||||
if priorityWeight < 1 {
|
||||
priorityWeight = 1
|
||||
}
|
||||
item.Score = priorityWeight * item.StalenessHours
|
||||
|
||||
// Suggest drill-down command
|
||||
item.DrillDown = fmt.Sprintf("bd show %s", epic.ID)
|
||||
|
||||
items = append(items, item)
|
||||
}
|
||||
|
||||
// Also query in_progress and hooked epics
|
||||
for _, status := range []string{"in_progress", "hooked"} {
|
||||
extraCmd := exec.Command("bd", "list", "--type=epic", "--status="+status, "--json")
|
||||
extraCmd.Dir = beadsPath
|
||||
var extraStdout bytes.Buffer
|
||||
extraCmd.Stdout = &extraStdout
|
||||
|
||||
if err := extraCmd.Run(); err != nil {
|
||||
continue
|
||||
}
|
||||
|
||||
var extraEpics []struct {
|
||||
ID string `json:"id"`
|
||||
Title string `json:"title"`
|
||||
Status string `json:"status"`
|
||||
Priority int `json:"priority"`
|
||||
UpdatedAt string `json:"updated_at"`
|
||||
Assignee string `json:"assignee,omitempty"`
|
||||
Ephemeral bool `json:"ephemeral,omitempty"`
|
||||
}
|
||||
if err := json.Unmarshal(extraStdout.Bytes(), &extraEpics); err != nil {
|
||||
continue
|
||||
}
|
||||
|
||||
for _, epic := range extraEpics {
|
||||
// Skip ephemeral issues
|
||||
if epic.Ephemeral {
|
||||
continue
|
||||
}
|
||||
if strings.Contains(epic.ID, "wisp") || strings.Contains(epic.ID, "-mol-") {
|
||||
continue
|
||||
}
|
||||
|
||||
item := FocusItem{
|
||||
ID: epic.ID,
|
||||
Title: strings.TrimPrefix(epic.Title, "[EPIC] "),
|
||||
Priority: epic.Priority,
|
||||
Status: epic.Status,
|
||||
UpdatedAt: epic.UpdatedAt,
|
||||
Assignee: epic.Assignee,
|
||||
}
|
||||
|
||||
if epic.UpdatedAt != "" {
|
||||
if updated, err := time.Parse(time.RFC3339, epic.UpdatedAt); err == nil {
|
||||
staleDuration := now.Sub(updated)
|
||||
item.StalenessHours = staleDuration.Hours()
|
||||
|
||||
switch {
|
||||
case staleDuration >= 4*time.Hour:
|
||||
item.Staleness = "stuck"
|
||||
case staleDuration >= 1*time.Hour:
|
||||
item.Staleness = "stale"
|
||||
default:
|
||||
item.Staleness = "active"
|
||||
}
|
||||
}
|
||||
}
|
||||
if item.Staleness == "" {
|
||||
item.Staleness = "active"
|
||||
}
|
||||
|
||||
priorityWeight := float64(5 - item.Priority)
|
||||
if priorityWeight < 1 {
|
||||
priorityWeight = 1
|
||||
}
|
||||
item.Score = priorityWeight * item.StalenessHours
|
||||
item.DrillDown = fmt.Sprintf("bd show %s", epic.ID)
|
||||
|
||||
items = append(items, item)
|
||||
}
|
||||
}
|
||||
|
||||
return items
|
||||
}
|
||||
|
||||
func outputFocusText(items []FocusItem) error {
|
||||
fmt.Printf("%s\n\n", style.Bold.Render("Suggested focus (stalest high-priority first):"))
|
||||
|
||||
for i, item := range items {
|
||||
// Staleness indicator
|
||||
var indicator string
|
||||
switch item.Staleness {
|
||||
case "stuck":
|
||||
indicator = style.Error.Render("🔴")
|
||||
case "stale":
|
||||
indicator = style.Warning.Render("🟡")
|
||||
default:
|
||||
indicator = style.Success.Render("🟢")
|
||||
}
|
||||
|
||||
// Priority display
|
||||
priorityStr := fmt.Sprintf("P%d", item.Priority)
|
||||
|
||||
// Format staleness duration
|
||||
stalenessStr := formatStaleness(item.StalenessHours)
|
||||
|
||||
// Main line
|
||||
fmt.Printf("%d. %s [%s] %s: %s\n", i+1, indicator, priorityStr, item.ID, item.Title)
|
||||
|
||||
// Details
|
||||
if item.Assignee != "" {
|
||||
// Extract short name from assignee path
|
||||
parts := strings.Split(item.Assignee, "/")
|
||||
shortAssignee := parts[len(parts)-1]
|
||||
fmt.Printf(" Last movement: %s Assignee: %s\n", stalenessStr, shortAssignee)
|
||||
} else {
|
||||
fmt.Printf(" Last movement: %s\n", stalenessStr)
|
||||
}
|
||||
|
||||
// Drill-down hint
|
||||
fmt.Printf(" %s\n\n", style.Dim.Render("→ "+item.DrillDown))
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
// formatStaleness formats staleness duration as human-readable string.
|
||||
func formatStaleness(hours float64) string {
|
||||
if hours < 1.0/60.0 { // Less than 1 minute
|
||||
return "just now"
|
||||
}
|
||||
if hours < 1 {
|
||||
return fmt.Sprintf("%dm ago", int(hours*60))
|
||||
}
|
||||
if hours < 24 {
|
||||
return fmt.Sprintf("%.1fh ago", hours)
|
||||
}
|
||||
days := hours / 24
|
||||
return fmt.Sprintf("%.1fd ago", days)
|
||||
}
|
||||
@@ -315,6 +315,12 @@ func executeConvoyFormula(f *formulaData, formulaName, targetRig string) error {
|
||||
}
|
||||
townBeads := filepath.Join(townRoot, ".beads")
|
||||
|
||||
// Ensure custom types (including 'convoy') are registered in town beads.
|
||||
// This handles cases where install didn't complete or beads was initialized manually.
|
||||
if err := beads.EnsureCustomTypes(townBeads); err != nil {
|
||||
return fmt.Errorf("ensuring custom types: %w", err)
|
||||
}
|
||||
|
||||
// Step 1: Create convoy bead
|
||||
convoyID := fmt.Sprintf("hq-cv-%s", generateFormulaShortID())
|
||||
convoyTitle := fmt.Sprintf("%s: %s", formulaName, f.Description)
|
||||
|
||||
651
internal/cmd/goals.go
Normal file
651
internal/cmd/goals.go
Normal file
@@ -0,0 +1,651 @@
|
||||
package cmd
|
||||
|
||||
import (
|
||||
"bytes"
|
||||
"encoding/json"
|
||||
"fmt"
|
||||
"os"
|
||||
"os/exec"
|
||||
"path/filepath"
|
||||
"sort"
|
||||
"strconv"
|
||||
"strings"
|
||||
"time"
|
||||
|
||||
"github.com/spf13/cobra"
|
||||
"github.com/steveyegge/gastown/internal/beads"
|
||||
"github.com/steveyegge/gastown/internal/style"
|
||||
"github.com/steveyegge/gastown/internal/workspace"
|
||||
)
|
||||
|
||||
// Goal command flags
|
||||
var (
|
||||
goalsJSON bool
|
||||
goalsStatus string
|
||||
goalsPriority string
|
||||
goalsIncludeWisp bool
|
||||
)
|
||||
|
||||
var goalsCmd = &cobra.Command{
|
||||
Use: "goals [goal-id]",
|
||||
GroupID: GroupWork,
|
||||
Short: "View strategic goals (epics) with staleness indicators",
|
||||
Long: `View strategic goals (epics) across the workspace.
|
||||
|
||||
Goals are high-level objectives that organize related work items.
|
||||
This command shows goals with staleness indicators to help identify
|
||||
stale or neglected strategic initiatives.
|
||||
|
||||
Staleness indicators:
|
||||
🟢 active: movement in last hour
|
||||
🟡 stale: no movement for 1+ hours
|
||||
🔴 stuck: no movement for 4+ hours
|
||||
|
||||
Goals are sorted by staleness × priority (highest attention needed first).
|
||||
|
||||
Examples:
|
||||
gt goals # List all open goals
|
||||
gt goals --json # Output as JSON
|
||||
gt goals --status=all # Show all goals including closed
|
||||
gt goals gt-abc # Show details for a specific goal`,
|
||||
RunE: runGoals,
|
||||
}
|
||||
|
||||
func init() {
|
||||
goalsCmd.Flags().BoolVar(&goalsJSON, "json", false, "Output as JSON")
|
||||
goalsCmd.Flags().StringVar(&goalsStatus, "status", "open", "Filter by status (open, closed, all)")
|
||||
goalsCmd.Flags().StringVar(&goalsPriority, "priority", "", "Filter by priority (e.g., P0, P1, P2)")
|
||||
goalsCmd.Flags().BoolVar(&goalsIncludeWisp, "include-wisp", false, "Include transient wisp molecules (normally hidden)")
|
||||
rootCmd.AddCommand(goalsCmd)
|
||||
}
|
||||
|
||||
func runGoals(cmd *cobra.Command, args []string) error {
|
||||
// If arg provided, show specific goal
|
||||
if len(args) > 0 {
|
||||
goalID := args[0]
|
||||
return showGoal(goalID)
|
||||
}
|
||||
|
||||
// Otherwise list all goals
|
||||
return listGoals()
|
||||
}
|
||||
|
||||
// goalInfo holds computed goal data for display and sorting.
|
||||
type goalInfo struct {
|
||||
ID string `json:"id"`
|
||||
Title string `json:"title"`
|
||||
Status string `json:"status"`
|
||||
Priority int `json:"priority"`
|
||||
Assignee string `json:"assignee,omitempty"`
|
||||
ConvoyCount int `json:"convoy_count"`
|
||||
LastMovement time.Time `json:"last_movement,omitempty"`
|
||||
StalenessHrs float64 `json:"staleness_hours"`
|
||||
StalenessIcon string `json:"staleness_icon"`
|
||||
Score float64 `json:"score"` // priority × staleness for sorting
|
||||
}
|
||||
|
||||
func showGoal(goalID string) error {
|
||||
// Get goal details via bd show
|
||||
showCmd := exec.Command("bd", "show", goalID, "--json")
|
||||
var stdout bytes.Buffer
|
||||
showCmd.Stdout = &stdout
|
||||
|
||||
if err := showCmd.Run(); err != nil {
|
||||
return fmt.Errorf("goal '%s' not found", goalID)
|
||||
}
|
||||
|
||||
var goals []struct {
|
||||
ID string `json:"id"`
|
||||
Title string `json:"title"`
|
||||
Description string `json:"description"`
|
||||
Status string `json:"status"`
|
||||
Priority int `json:"priority"`
|
||||
IssueType string `json:"issue_type"`
|
||||
Assignee string `json:"assignee"`
|
||||
CreatedAt string `json:"created_at"`
|
||||
UpdatedAt string `json:"updated_at"`
|
||||
}
|
||||
if err := json.Unmarshal(stdout.Bytes(), &goals); err != nil {
|
||||
return fmt.Errorf("parsing goal data: %w", err)
|
||||
}
|
||||
|
||||
if len(goals) == 0 {
|
||||
return fmt.Errorf("goal '%s' not found", goalID)
|
||||
}
|
||||
|
||||
goal := goals[0]
|
||||
|
||||
// Verify it's an epic
|
||||
if goal.IssueType != "epic" {
|
||||
return fmt.Errorf("'%s' is not a goal/epic (type: %s)", goalID, goal.IssueType)
|
||||
}
|
||||
|
||||
// Get linked convoys (no dbPath available for single goal lookup, use fallback)
|
||||
convoys := getLinkedConvoys(goalID, "")
|
||||
|
||||
// Compute staleness
|
||||
lastMovement := computeGoalLastMovement(goal.UpdatedAt, convoys)
|
||||
stalenessHrs := time.Since(lastMovement).Hours()
|
||||
icon := stalenessIcon(stalenessHrs)
|
||||
|
||||
if goalsJSON {
|
||||
out := goalInfo{
|
||||
ID: goal.ID,
|
||||
Title: goal.Title,
|
||||
Status: goal.Status,
|
||||
Priority: goal.Priority,
|
||||
Assignee: goal.Assignee,
|
||||
ConvoyCount: len(convoys),
|
||||
LastMovement: lastMovement,
|
||||
StalenessHrs: stalenessHrs,
|
||||
StalenessIcon: icon,
|
||||
}
|
||||
enc := json.NewEncoder(os.Stdout)
|
||||
enc.SetIndent("", " ")
|
||||
return enc.Encode(out)
|
||||
}
|
||||
|
||||
// Human-readable output
|
||||
fmt.Printf("%s P%d %s: %s\n\n", icon, goal.Priority, style.Bold.Render(goal.ID), goal.Title)
|
||||
fmt.Printf(" Status: %s\n", goal.Status)
|
||||
fmt.Printf(" Priority: P%d\n", goal.Priority)
|
||||
if goal.Assignee != "" {
|
||||
fmt.Printf(" Assignee: @%s\n", goal.Assignee)
|
||||
}
|
||||
fmt.Printf(" Convoys: %d\n", len(convoys))
|
||||
fmt.Printf(" Last activity: %s\n", formatLastActivity(lastMovement))
|
||||
|
||||
if goal.Description != "" {
|
||||
fmt.Printf("\n %s\n", style.Bold.Render("Description:"))
|
||||
// Indent description
|
||||
for _, line := range strings.Split(goal.Description, "\n") {
|
||||
fmt.Printf(" %s\n", line)
|
||||
}
|
||||
}
|
||||
|
||||
if len(convoys) > 0 {
|
||||
fmt.Printf("\n %s\n", style.Bold.Render("Linked Convoys:"))
|
||||
for _, c := range convoys {
|
||||
statusIcon := "○"
|
||||
if c.Status == "closed" {
|
||||
statusIcon = "✓"
|
||||
}
|
||||
fmt.Printf(" %s %s: %s\n", statusIcon, c.ID, c.Title)
|
||||
}
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
func listGoals() error {
|
||||
// Collect epics from all rigs (goals are cross-rig strategic objectives)
|
||||
epics, err := collectEpicsFromAllRigs()
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
// Filter out wisp molecules by default (transient/operational, not strategic goals)
|
||||
// These have IDs like "gt-wisp-*" and are molecule-tracking beads, not human goals
|
||||
if !goalsIncludeWisp {
|
||||
filtered := make([]epicRecord, 0)
|
||||
for _, e := range epics {
|
||||
if !isWispEpic(e.ID, e.Title) {
|
||||
filtered = append(filtered, e)
|
||||
}
|
||||
}
|
||||
epics = filtered
|
||||
}
|
||||
|
||||
// Filter by priority if specified
|
||||
if goalsPriority != "" {
|
||||
targetPriority := parsePriority(goalsPriority)
|
||||
filtered := make([]epicRecord, 0)
|
||||
for _, e := range epics {
|
||||
if e.Priority == targetPriority {
|
||||
filtered = append(filtered, e)
|
||||
}
|
||||
}
|
||||
epics = filtered
|
||||
}
|
||||
|
||||
// Build goal info with staleness computation
|
||||
var goals []goalInfo
|
||||
for _, epic := range epics {
|
||||
convoys := getLinkedConvoys(epic.ID, epic.dbPath)
|
||||
lastMovement := computeGoalLastMovement(epic.UpdatedAt, convoys)
|
||||
stalenessHrs := time.Since(lastMovement).Hours()
|
||||
icon := stalenessIcon(stalenessHrs)
|
||||
|
||||
// Score = priority_value × staleness_hours
|
||||
// Lower priority number = higher priority, so invert (4 - priority)
|
||||
priorityWeight := float64(4 - epic.Priority)
|
||||
if priorityWeight < 1 {
|
||||
priorityWeight = 1
|
||||
}
|
||||
score := priorityWeight * stalenessHrs
|
||||
|
||||
goals = append(goals, goalInfo{
|
||||
ID: epic.ID,
|
||||
Title: epic.Title,
|
||||
Status: epic.Status,
|
||||
Priority: epic.Priority,
|
||||
Assignee: epic.Assignee,
|
||||
ConvoyCount: len(convoys),
|
||||
LastMovement: lastMovement,
|
||||
StalenessHrs: stalenessHrs,
|
||||
StalenessIcon: icon,
|
||||
Score: score,
|
||||
})
|
||||
}
|
||||
|
||||
// Sort by score (highest attention needed first)
|
||||
sort.Slice(goals, func(i, j int) bool {
|
||||
return goals[i].Score > goals[j].Score
|
||||
})
|
||||
|
||||
if goalsJSON {
|
||||
enc := json.NewEncoder(os.Stdout)
|
||||
enc.SetIndent("", " ")
|
||||
return enc.Encode(goals)
|
||||
}
|
||||
|
||||
if len(goals) == 0 {
|
||||
fmt.Println("No goals found.")
|
||||
fmt.Println("Create a goal with: bd create --type=epic --title=\"Goal name\"")
|
||||
return nil
|
||||
}
|
||||
|
||||
// Count active (non-closed) goals
|
||||
activeCount := 0
|
||||
for _, g := range goals {
|
||||
if g.Status != "closed" {
|
||||
activeCount++
|
||||
}
|
||||
}
|
||||
|
||||
fmt.Printf("%s\n\n", style.Bold.Render(fmt.Sprintf("Goals (%d active, sorted by staleness × priority)", activeCount)))
|
||||
|
||||
for _, g := range goals {
|
||||
// Format: 🔴 P1 sc-xyz: Title
|
||||
// 3 convoys | stale 6h
|
||||
priorityStr := fmt.Sprintf("P%d", g.Priority)
|
||||
|
||||
fmt.Printf(" %s %s %s: %s\n", g.StalenessIcon, priorityStr, g.ID, g.Title)
|
||||
|
||||
// Second line with convoy count, staleness, and assignee (if any)
|
||||
activityStr := formatActivityShort(g.StalenessHrs)
|
||||
if g.Assignee != "" {
|
||||
fmt.Printf(" %d convoy(s) | %s | @%s\n\n", g.ConvoyCount, activityStr, g.Assignee)
|
||||
} else {
|
||||
fmt.Printf(" %d convoy(s) | %s\n\n", g.ConvoyCount, activityStr)
|
||||
}
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
// convoyInfo holds basic convoy info.
|
||||
type convoyInfo struct {
|
||||
ID string
|
||||
Title string
|
||||
Status string
|
||||
}
|
||||
|
||||
// getLinkedConvoys finds convoys linked to a goal (via parent-child relation).
|
||||
// dbPath is the path to beads.db containing the goal for direct SQLite queries.
|
||||
func getLinkedConvoys(goalID, dbPath string) []convoyInfo {
|
||||
var convoys []convoyInfo
|
||||
|
||||
// If no dbPath provided, fall back to bd subprocess (shouldn't happen normally)
|
||||
if dbPath == "" {
|
||||
return getLinkedConvoysFallback(goalID)
|
||||
}
|
||||
|
||||
// Query dependencies directly from SQLite
|
||||
// Children are stored as: depends_on_id = goalID (parent) with type 'blocks'
|
||||
safeGoalID := strings.ReplaceAll(goalID, "'", "''")
|
||||
query := fmt.Sprintf(`
|
||||
SELECT i.id, i.title, i.status
|
||||
FROM dependencies d
|
||||
JOIN issues i ON d.issue_id = i.id
|
||||
WHERE d.depends_on_id = '%s' AND d.type = 'blocks' AND i.issue_type = 'convoy'
|
||||
`, safeGoalID)
|
||||
|
||||
queryCmd := exec.Command("sqlite3", "-json", dbPath, query)
|
||||
var stdout bytes.Buffer
|
||||
queryCmd.Stdout = &stdout
|
||||
|
||||
if err := queryCmd.Run(); err != nil {
|
||||
return convoys
|
||||
}
|
||||
|
||||
if stdout.Len() == 0 {
|
||||
return convoys
|
||||
}
|
||||
|
||||
var results []struct {
|
||||
ID string `json:"id"`
|
||||
Title string `json:"title"`
|
||||
Status string `json:"status"`
|
||||
}
|
||||
if err := json.Unmarshal(stdout.Bytes(), &results); err != nil {
|
||||
return convoys
|
||||
}
|
||||
|
||||
for _, r := range results {
|
||||
convoys = append(convoys, convoyInfo{
|
||||
ID: r.ID,
|
||||
Title: r.Title,
|
||||
Status: r.Status,
|
||||
})
|
||||
}
|
||||
|
||||
return convoys
|
||||
}
|
||||
|
||||
// getLinkedConvoysFallback uses bd subprocess (for when dbPath is unknown).
|
||||
func getLinkedConvoysFallback(goalID string) []convoyInfo {
|
||||
var convoys []convoyInfo
|
||||
|
||||
depArgs := []string{"dep", "list", goalID, "--json"}
|
||||
depCmd := exec.Command("bd", depArgs...)
|
||||
var stdout bytes.Buffer
|
||||
depCmd.Stdout = &stdout
|
||||
|
||||
if err := depCmd.Run(); err != nil {
|
||||
return convoys
|
||||
}
|
||||
|
||||
var deps struct {
|
||||
Children []struct {
|
||||
ID string `json:"id"`
|
||||
Type string `json:"type"`
|
||||
} `json:"children"`
|
||||
}
|
||||
if err := json.Unmarshal(stdout.Bytes(), &deps); err != nil {
|
||||
return convoys
|
||||
}
|
||||
|
||||
for _, child := range deps.Children {
|
||||
details := getIssueDetails(child.ID)
|
||||
if details != nil && details.IssueType == "convoy" {
|
||||
convoys = append(convoys, convoyInfo{
|
||||
ID: details.ID,
|
||||
Title: details.Title,
|
||||
Status: details.Status,
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
return convoys
|
||||
}
|
||||
|
||||
// computeGoalLastMovement computes when the goal last had activity.
|
||||
// It looks at:
|
||||
// 1. The goal's own updated_at (passed directly to avoid re-querying)
|
||||
// 2. The last activity of any linked convoy's tracked issues
|
||||
func computeGoalLastMovement(goalUpdatedAt string, convoys []convoyInfo) time.Time {
|
||||
// Start with the goal's own updated_at
|
||||
lastMovement := time.Now().Add(-24 * time.Hour) // Default to 24 hours ago
|
||||
if goalUpdatedAt != "" {
|
||||
if t, err := time.Parse(time.RFC3339, goalUpdatedAt); err == nil {
|
||||
lastMovement = t
|
||||
}
|
||||
}
|
||||
|
||||
// If no convoys, return early (common case - avoids unnecessary work)
|
||||
if len(convoys) == 0 {
|
||||
return lastMovement
|
||||
}
|
||||
|
||||
// Check convoy activity
|
||||
townBeads, err := getTownBeadsDir()
|
||||
if err != nil {
|
||||
return lastMovement
|
||||
}
|
||||
|
||||
for _, convoy := range convoys {
|
||||
tracked := getTrackedIssues(townBeads, convoy.ID)
|
||||
for _, t := range tracked {
|
||||
// Get issue's updated_at
|
||||
details := getIssueDetails(t.ID)
|
||||
if details == nil {
|
||||
continue
|
||||
}
|
||||
showCmd := exec.Command("bd", "show", t.ID, "--json")
|
||||
var out bytes.Buffer
|
||||
showCmd.Stdout = &out
|
||||
showCmd.Run()
|
||||
|
||||
var issues []struct {
|
||||
UpdatedAt string `json:"updated_at"`
|
||||
}
|
||||
json.Unmarshal(out.Bytes(), &issues)
|
||||
if len(issues) > 0 && issues[0].UpdatedAt != "" {
|
||||
if t, err := time.Parse(time.RFC3339, issues[0].UpdatedAt); err == nil {
|
||||
if t.After(lastMovement) {
|
||||
lastMovement = t
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
return lastMovement
|
||||
}
|
||||
|
||||
// stalenessIcon returns the appropriate staleness indicator.
|
||||
// 🟢 active: moved in last hour
|
||||
// 🟡 stale: no movement for 1+ hours
|
||||
// 🔴 stuck: no movement for 4+ hours
|
||||
func stalenessIcon(hours float64) string {
|
||||
if hours < 1 {
|
||||
return "🟢"
|
||||
}
|
||||
if hours < 4 {
|
||||
return "🟡"
|
||||
}
|
||||
return "🔴"
|
||||
}
|
||||
|
||||
// formatLastActivity formats the last activity time for display.
|
||||
func formatLastActivity(t time.Time) string {
|
||||
if t.IsZero() {
|
||||
return "unknown"
|
||||
}
|
||||
d := time.Since(t)
|
||||
if d < time.Minute {
|
||||
return "just now"
|
||||
}
|
||||
if d < time.Hour {
|
||||
return fmt.Sprintf("%d minutes ago", int(d.Minutes()))
|
||||
}
|
||||
if d < 24*time.Hour {
|
||||
return fmt.Sprintf("%d hours ago", int(d.Hours()))
|
||||
}
|
||||
return fmt.Sprintf("%d days ago", int(d.Hours()/24))
|
||||
}
|
||||
|
||||
// formatActivityShort returns a short activity string for the list view.
|
||||
func formatActivityShort(hours float64) string {
|
||||
if hours < 1 {
|
||||
mins := int(hours * 60)
|
||||
if mins < 1 {
|
||||
return "active just now"
|
||||
}
|
||||
return fmt.Sprintf("active %dm ago", mins)
|
||||
}
|
||||
if hours < 4 {
|
||||
return fmt.Sprintf("stale %.0fh", hours)
|
||||
}
|
||||
return fmt.Sprintf("stuck %.0fh", hours)
|
||||
}
|
||||
|
||||
// parsePriority converts a priority string (P0, P1, etc.) to an int.
|
||||
func parsePriority(s string) int {
|
||||
s = strings.TrimPrefix(strings.ToUpper(s), "P")
|
||||
if p, err := strconv.Atoi(s); err == nil {
|
||||
return p
|
||||
}
|
||||
return 2 // Default to P2
|
||||
}
|
||||
|
||||
// isWispEpic returns true if the epic is a transient wisp molecule.
|
||||
// These are operational/infrastructure beads, not strategic goals that need human attention.
|
||||
// Detection criteria:
|
||||
// - ID contains "-wisp-" (molecule tracking beads)
|
||||
// - Title starts with "mol-" (molecule beads)
|
||||
func isWispEpic(id, title string) bool {
|
||||
// Check for wisp ID pattern (e.g., "gt-wisp-abc123")
|
||||
if strings.Contains(id, "-wisp-") {
|
||||
return true
|
||||
}
|
||||
// Check for molecule title pattern
|
||||
if strings.HasPrefix(title, "mol-") {
|
||||
return true
|
||||
}
|
||||
return false
|
||||
}
|
||||
|
||||
// epicRecord represents an epic from bd list output.
|
||||
type epicRecord struct {
|
||||
ID string `json:"id"`
|
||||
Title string `json:"title"`
|
||||
Status string `json:"status"`
|
||||
Priority int `json:"priority"`
|
||||
UpdatedAt string `json:"updated_at"`
|
||||
Assignee string `json:"assignee"`
|
||||
// dbPath is the path to beads.db containing this epic (for direct queries)
|
||||
dbPath string
|
||||
}
|
||||
|
||||
// collectEpicsFromAllRigs queries all rigs for epics and aggregates them.
|
||||
// Goals are cross-rig strategic objectives, so we need to query each rig's beads.
|
||||
func collectEpicsFromAllRigs() ([]epicRecord, error) {
|
||||
var allEpics []epicRecord
|
||||
seen := make(map[string]bool) // Deduplicate by ID
|
||||
|
||||
// Find the town root
|
||||
townRoot, err := workspace.FindFromCwdOrError()
|
||||
if err != nil {
|
||||
// Not in a Gas Town workspace, fall back to single query
|
||||
return queryEpicsInDir("")
|
||||
}
|
||||
|
||||
// Also query town-level beads (for hq- prefixed epics)
|
||||
townBeadsDir := filepath.Join(townRoot, ".beads")
|
||||
if _, err := os.Stat(townBeadsDir); err == nil {
|
||||
epics, err := queryEpicsInDir(townRoot)
|
||||
if err == nil {
|
||||
for _, e := range epics {
|
||||
if !seen[e.ID] {
|
||||
seen[e.ID] = true
|
||||
allEpics = append(allEpics, e)
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// Find all rig directories (they have .beads/ subdirectories)
|
||||
entries, err := os.ReadDir(townRoot)
|
||||
if err != nil {
|
||||
return allEpics, nil // Return what we have
|
||||
}
|
||||
|
||||
for _, entry := range entries {
|
||||
if !entry.IsDir() {
|
||||
continue
|
||||
}
|
||||
// Skip hidden directories and known non-rig directories
|
||||
name := entry.Name()
|
||||
if strings.HasPrefix(name, ".") || name == "plugins" || name == "docs" {
|
||||
continue
|
||||
}
|
||||
|
||||
rigPath := filepath.Join(townRoot, name)
|
||||
rigBeadsDir := filepath.Join(rigPath, ".beads")
|
||||
|
||||
// Check if this directory has a beads database
|
||||
if _, err := os.Stat(rigBeadsDir); os.IsNotExist(err) {
|
||||
continue
|
||||
}
|
||||
|
||||
// Query this rig for epics
|
||||
epics, err := queryEpicsInDir(rigPath)
|
||||
if err != nil {
|
||||
// Log but continue - one rig failing shouldn't stop the whole query
|
||||
continue
|
||||
}
|
||||
|
||||
for _, e := range epics {
|
||||
if !seen[e.ID] {
|
||||
seen[e.ID] = true
|
||||
allEpics = append(allEpics, e)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
return allEpics, nil
|
||||
}
|
||||
|
||||
// queryEpicsInDir queries epics directly from SQLite in the specified directory.
|
||||
// If dir is empty, uses current working directory.
|
||||
func queryEpicsInDir(dir string) ([]epicRecord, error) {
|
||||
beadsDir := dir
|
||||
if beadsDir == "" {
|
||||
var err error
|
||||
beadsDir, err = os.Getwd()
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("getting working directory: %w", err)
|
||||
}
|
||||
}
|
||||
|
||||
// Resolve redirects to find actual beads.db
|
||||
resolvedBeads := beads.ResolveBeadsDir(beadsDir)
|
||||
dbPath := filepath.Join(resolvedBeads, "beads.db")
|
||||
|
||||
// Check if database exists
|
||||
if _, err := os.Stat(dbPath); os.IsNotExist(err) {
|
||||
return nil, nil // No database, no epics
|
||||
}
|
||||
|
||||
// Build SQL query for epics
|
||||
query := `SELECT id, title, status, priority, updated_at, assignee
|
||||
FROM issues
|
||||
WHERE issue_type = 'epic'`
|
||||
|
||||
if goalsStatus == "" || goalsStatus == "open" {
|
||||
query += ` AND status <> 'closed' AND status <> 'tombstone'`
|
||||
} else if goalsStatus != "all" {
|
||||
query += fmt.Sprintf(` AND status = '%s'`, strings.ReplaceAll(goalsStatus, "'", "''"))
|
||||
} else {
|
||||
// --all: exclude tombstones but include everything else
|
||||
query += ` AND status <> 'tombstone'`
|
||||
}
|
||||
|
||||
queryCmd := exec.Command("sqlite3", "-json", dbPath, query)
|
||||
var stdout bytes.Buffer
|
||||
queryCmd.Stdout = &stdout
|
||||
|
||||
if err := queryCmd.Run(); err != nil {
|
||||
// Database might be empty or have no epics - not an error
|
||||
return nil, nil
|
||||
}
|
||||
|
||||
// Handle empty result (sqlite3 -json returns nothing for empty sets)
|
||||
if stdout.Len() == 0 {
|
||||
return nil, nil
|
||||
}
|
||||
|
||||
var epics []epicRecord
|
||||
if err := json.Unmarshal(stdout.Bytes(), &epics); err != nil {
|
||||
return nil, fmt.Errorf("parsing epics: %w", err)
|
||||
}
|
||||
|
||||
// Set dbPath on each epic for direct queries later
|
||||
for i := range epics {
|
||||
epics[i].dbPath = dbPath
|
||||
}
|
||||
|
||||
return epics, nil
|
||||
}
|
||||
@@ -172,16 +172,14 @@ func runHandoff(cmd *cobra.Command, args []string) error {
|
||||
return nil
|
||||
}
|
||||
|
||||
// If subject/message provided, send handoff mail to self first
|
||||
// The mail is auto-hooked so the next session picks it up
|
||||
if handoffSubject != "" || handoffMessage != "" {
|
||||
beadID, err := sendHandoffMail(handoffSubject, handoffMessage)
|
||||
if err != nil {
|
||||
style.PrintWarning("could not send handoff mail: %v", err)
|
||||
// Continue anyway - the respawn is more important
|
||||
} else {
|
||||
fmt.Printf("%s Sent handoff mail %s (auto-hooked)\n", style.Bold.Render("📬"), beadID)
|
||||
}
|
||||
// Send handoff mail to self (defaults applied inside sendHandoffMail).
|
||||
// The mail is auto-hooked so the next session picks it up.
|
||||
beadID, err := sendHandoffMail(handoffSubject, handoffMessage)
|
||||
if err != nil {
|
||||
style.PrintWarning("could not send handoff mail: %v", err)
|
||||
// Continue anyway - the respawn is more important
|
||||
} else {
|
||||
fmt.Printf("%s Sent handoff mail %s (auto-hooked)\n", style.Bold.Render("📬"), beadID)
|
||||
}
|
||||
|
||||
// NOTE: reportAgentState("stopped") removed (gt-zecmc)
|
||||
@@ -204,14 +202,17 @@ func runHandoff(cmd *cobra.Command, args []string) error {
|
||||
_ = os.WriteFile(markerPath, []byte(currentSession), 0644)
|
||||
}
|
||||
|
||||
// Kill all processes in the pane before respawning to prevent orphan leaks
|
||||
// RespawnPane's -k flag only sends SIGHUP which Claude/Node may ignore
|
||||
if err := t.KillPaneProcesses(pane); err != nil {
|
||||
// Non-fatal but log the warning
|
||||
style.PrintWarning("could not kill pane processes: %v", err)
|
||||
}
|
||||
// NOTE: We intentionally do NOT kill pane processes before respawning (hq-bv7ef).
|
||||
// Previous approach (KillPaneProcessesExcluding) killed the pane's main process,
|
||||
// which caused the pane to close (remain-on-exit is off by default), making
|
||||
// RespawnPane fail because the target pane no longer exists.
|
||||
//
|
||||
// The respawn-pane -k flag handles killing atomically - it kills the old process
|
||||
// and starts the new one in a single operation without closing the pane.
|
||||
// If orphan processes remain (e.g., Claude ignoring SIGHUP), they will be cleaned
|
||||
// up when the new session starts or when the Witness runs periodic cleanup.
|
||||
|
||||
// Use exec to respawn the pane - this kills us and restarts
|
||||
// Use respawn-pane to atomically kill old process and start new one
|
||||
return t.RespawnPane(pane, restartCmd)
|
||||
}
|
||||
|
||||
@@ -377,9 +378,9 @@ func buildRestartCommand(sessionName string) (string, error) {
|
||||
gtRole := identity.GTRole()
|
||||
|
||||
// Build startup beacon for predecessor discovery via /resume
|
||||
// Use FormatStartupNudge instead of bare "gt prime" which confuses agents
|
||||
// Use FormatStartupBeacon instead of bare "gt prime" which confuses agents
|
||||
// The SessionStart hook handles context injection (gt prime --hook)
|
||||
beacon := session.FormatStartupNudge(session.StartupNudgeConfig{
|
||||
beacon := session.FormatStartupBeacon(session.BeaconConfig{
|
||||
Recipient: identity.Address(),
|
||||
Sender: "self",
|
||||
Topic: "handoff",
|
||||
@@ -567,12 +568,10 @@ func handoffRemoteSession(t *tmux.Tmux, targetSession, restartCmd string) error
|
||||
return nil
|
||||
}
|
||||
|
||||
// Kill all processes in the pane before respawning to prevent orphan leaks
|
||||
// RespawnPane's -k flag only sends SIGHUP which Claude/Node may ignore
|
||||
if err := t.KillPaneProcesses(targetPane); err != nil {
|
||||
// Non-fatal but log the warning
|
||||
style.PrintWarning("could not kill pane processes: %v", err)
|
||||
}
|
||||
// NOTE: We intentionally do NOT kill pane processes before respawning (hq-bv7ef).
|
||||
// Previous approach (KillPaneProcesses) killed the pane's main process, which caused
|
||||
// the pane to close (remain-on-exit is off by default), making RespawnPane fail.
|
||||
// The respawn-pane -k flag handles killing atomically without closing the pane.
|
||||
|
||||
// Clear scrollback history before respawn (resets copy-mode from [0/N] to [0/0])
|
||||
if err := t.ClearHistory(targetPane); err != nil {
|
||||
@@ -580,7 +579,7 @@ func handoffRemoteSession(t *tmux.Tmux, targetSession, restartCmd string) error
|
||||
style.PrintWarning("could not clear history: %v", err)
|
||||
}
|
||||
|
||||
// Respawn the remote session's pane
|
||||
// Respawn the remote session's pane - -k flag atomically kills old process and starts new one
|
||||
if err := t.RespawnPane(targetPane, restartCmd); err != nil {
|
||||
return fmt.Errorf("respawning pane: %w", err)
|
||||
}
|
||||
|
||||
@@ -5,13 +5,14 @@ import (
|
||||
"fmt"
|
||||
"os"
|
||||
"os/exec"
|
||||
"path/filepath"
|
||||
"strings"
|
||||
|
||||
"github.com/spf13/cobra"
|
||||
"github.com/steveyegge/gastown/internal/beads"
|
||||
"github.com/steveyegge/gastown/internal/events"
|
||||
"github.com/steveyegge/gastown/internal/runtime"
|
||||
"github.com/steveyegge/gastown/internal/style"
|
||||
"github.com/steveyegge/gastown/internal/workspace"
|
||||
)
|
||||
|
||||
var hookCmd = &cobra.Command{
|
||||
@@ -147,6 +148,12 @@ func runHook(_ *cobra.Command, args []string) error {
|
||||
return fmt.Errorf("detecting agent identity: %w", err)
|
||||
}
|
||||
|
||||
// Find town root (needed for cross-prefix bead resolution)
|
||||
townRoot, err := workspace.FindFromCwd()
|
||||
if err != nil {
|
||||
return fmt.Errorf("finding workspace: %w", err)
|
||||
}
|
||||
|
||||
// Find beads directory
|
||||
workDir, err := findLocalBeadsDir()
|
||||
if err != nil {
|
||||
@@ -183,15 +190,8 @@ func runHook(_ *cobra.Command, args []string) error {
|
||||
fmt.Printf("%s Replacing completed bead %s...\n", style.Dim.Render("ℹ"), existing.ID)
|
||||
if !hookDryRun {
|
||||
if hasAttachment {
|
||||
// Close completed molecule bead (use bd close --force for pinned)
|
||||
closeArgs := []string{"close", existing.ID, "--force",
|
||||
"--reason=Auto-replaced by gt hook (molecule complete)"}
|
||||
if sessionID := runtime.SessionIDFromEnv(); sessionID != "" {
|
||||
closeArgs = append(closeArgs, "--session="+sessionID)
|
||||
}
|
||||
closeCmd := exec.Command("bd", closeArgs...)
|
||||
closeCmd.Stderr = os.Stderr
|
||||
if err := closeCmd.Run(); err != nil {
|
||||
// Close completed molecule bead (use force for pinned)
|
||||
if err := b.CloseForced(existing.ID, "Auto-replaced by gt hook (molecule complete)"); err != nil {
|
||||
return fmt.Errorf("closing completed bead %s: %w", existing.ID, err)
|
||||
}
|
||||
} else {
|
||||
@@ -232,8 +232,13 @@ func runHook(_ *cobra.Command, args []string) error {
|
||||
return nil
|
||||
}
|
||||
|
||||
// Hook the bead using bd update (discovery-based approach)
|
||||
hookCmd := exec.Command("bd", "update", beadID, "--status=hooked", "--assignee="+agentID)
|
||||
// Hook the bead using bd update with cross-prefix routing.
|
||||
// The bead may be in a different beads database than the agent's local one
|
||||
// (e.g., hooking an hq-* bead from a rig worker). Use ResolveHookDir to
|
||||
// find the correct database directory based on the bead's prefix.
|
||||
// See: https://github.com/steveyegge/gastown/issues/gt-rphsv
|
||||
hookCmd := exec.Command("bd", "--no-daemon", "update", beadID, "--status=hooked", "--assignee="+agentID)
|
||||
hookCmd.Dir = beads.ResolveHookDir(townRoot, beadID, workDir)
|
||||
hookCmd.Stderr = os.Stderr
|
||||
if err := hookCmd.Run(); err != nil {
|
||||
return fmt.Errorf("hooking bead: %w", err)
|
||||
@@ -243,6 +248,12 @@ func runHook(_ *cobra.Command, args []string) error {
|
||||
fmt.Printf(" Use 'gt handoff' to restart with this work\n")
|
||||
fmt.Printf(" Use 'gt hook' to see hook status\n")
|
||||
|
||||
// Update agent bead's hook_bead slot for status queries.
|
||||
// This enables `gt hook status` to find cross-prefix hooked beads.
|
||||
// The agent bead has a hook_bead database field that tracks current work.
|
||||
townBeadsDir := filepath.Join(townRoot, ".beads")
|
||||
updateAgentHookBead(agentID, beadID, workDir, townBeadsDir)
|
||||
|
||||
// Log hook event to activity feed (non-fatal)
|
||||
if err := events.LogFeed(events.TypeHook, agentID, events.HookPayload(beadID)); err != nil {
|
||||
fmt.Fprintf(os.Stderr, "%s Warning: failed to log hook event: %v\n", style.Dim.Render("⚠"), err)
|
||||
@@ -310,11 +321,30 @@ func runHookShow(cmd *cobra.Command, args []string) error {
|
||||
return fmt.Errorf("listing hooked beads: %w", err)
|
||||
}
|
||||
|
||||
// If nothing found, try scanning all rigs for town-level roles
|
||||
if len(hookedBeads) == 0 && isTownLevelRole(target) {
|
||||
// If nothing found in local beads, also check town beads for hooked convoys.
|
||||
// Convoys (hq-cv-*) are stored in town beads (~/gt/.beads) and any agent
|
||||
// can hook them for convoy-driver mode.
|
||||
if len(hookedBeads) == 0 {
|
||||
townRoot, err := findTownRoot()
|
||||
if err == nil && townRoot != "" {
|
||||
hookedBeads = scanAllRigsForHookedBeads(townRoot, target)
|
||||
// Check town beads for hooked items
|
||||
townBeadsDir := filepath.Join(townRoot, ".beads")
|
||||
if _, err := os.Stat(townBeadsDir); err == nil {
|
||||
townBeads := beads.New(townBeadsDir)
|
||||
townHooked, err := townBeads.List(beads.ListOptions{
|
||||
Status: beads.StatusHooked,
|
||||
Assignee: target,
|
||||
Priority: -1,
|
||||
})
|
||||
if err == nil && len(townHooked) > 0 {
|
||||
hookedBeads = townHooked
|
||||
}
|
||||
}
|
||||
|
||||
// If still nothing found and town-level role, scan all rigs
|
||||
if len(hookedBeads) == 0 && isTownLevelRole(target) {
|
||||
hookedBeads = scanAllRigsForHookedBeads(townRoot, target)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
|
||||
@@ -29,8 +29,8 @@ func setupHookTestTown(t *testing.T) (townRoot, polecatDir string) {
|
||||
|
||||
// Create routes.jsonl
|
||||
routes := []beads.Route{
|
||||
{Prefix: "hq-", Path: "."}, // Town-level beads
|
||||
{Prefix: "gt-", Path: "gastown/mayor/rig"}, // Gastown rig
|
||||
{Prefix: "hq-", Path: "."}, // Town-level beads
|
||||
{Prefix: "gt-", Path: "gastown/mayor/rig"}, // Gastown rig
|
||||
}
|
||||
if err := beads.WriteRoutes(townBeadsDir, routes); err != nil {
|
||||
t.Fatalf("write routes: %v", err)
|
||||
@@ -81,6 +81,8 @@ func initBeadsDB(t *testing.T, dir string) {
|
||||
}
|
||||
}
|
||||
|
||||
// Note: initBeadsDBWithPrefix is defined in beads_routing_integration_test.go
|
||||
|
||||
// TestHookSlot_BasicHook verifies that a bead can be hooked to an agent.
|
||||
func TestHookSlot_BasicHook(t *testing.T) {
|
||||
// Skip if bd is not available
|
||||
@@ -486,3 +488,118 @@ func TestHookSlot_StatusTransitions(t *testing.T) {
|
||||
t.Errorf("final status = %s, want closed", closed.Status)
|
||||
}
|
||||
}
|
||||
|
||||
// TestHookSlot_CrossPrefixHook verifies that beads with different prefixes can be hooked
|
||||
// using the correct database routing. This is the fix for issue gt-rphsv.
|
||||
func TestHookSlot_CrossPrefixHook(t *testing.T) {
|
||||
if _, err := exec.LookPath("bd"); err != nil {
|
||||
t.Skip("bd not installed, skipping test")
|
||||
}
|
||||
|
||||
townRoot, polecatDir := setupHookTestTown(t)
|
||||
|
||||
// Initialize beads in both town-level (hq- prefix) and rig-level (gt- prefix)
|
||||
// Note: bd init must be run from parent directory, not inside .beads
|
||||
initBeadsDBWithPrefix(t, townRoot, "hq")
|
||||
|
||||
rigDir := filepath.Join(polecatDir, "..", "..", "mayor", "rig")
|
||||
initBeadsDBWithPrefix(t, rigDir, "gt")
|
||||
|
||||
// Create beads instances for both databases
|
||||
townBeads := beads.New(townRoot) // Uses routes.jsonl to route to correct DB
|
||||
rigBeads := beads.New(rigDir)
|
||||
|
||||
// Create an hq-* bead in town beads
|
||||
townBeadsInstance := beads.New(townRoot)
|
||||
hqIssue, err := townBeadsInstance.Create(beads.CreateOptions{
|
||||
Title: "HQ task for cross-prefix test",
|
||||
Type: "task",
|
||||
Priority: 2,
|
||||
})
|
||||
if err != nil {
|
||||
t.Fatalf("create hq bead: %v", err)
|
||||
}
|
||||
// The bead ID should have hq- prefix since we initialized town beads with that prefix
|
||||
t.Logf("Created HQ bead: %s", hqIssue.ID)
|
||||
|
||||
// Create a gt-* bead in rig beads
|
||||
gtIssue, err := rigBeads.Create(beads.CreateOptions{
|
||||
Title: "Rig task for cross-prefix test",
|
||||
Type: "task",
|
||||
Priority: 2,
|
||||
})
|
||||
if err != nil {
|
||||
t.Fatalf("create rig bead: %v", err)
|
||||
}
|
||||
t.Logf("Created rig bead: %s", gtIssue.ID)
|
||||
|
||||
agentID := "gastown/polecats/toast"
|
||||
|
||||
// Test 1: Hook the HQ bead using ResolveHookDir (simulating runHook fix)
|
||||
hookDir := beads.ResolveHookDir(townRoot, hqIssue.ID, rigDir)
|
||||
t.Logf("ResolveHookDir(%s, %s, %s) = %s", townRoot, hqIssue.ID, rigDir, hookDir)
|
||||
|
||||
// Hook the HQ bead via bd command with correct directory routing
|
||||
hookCmd := exec.Command("bd", "--no-daemon", "update", hqIssue.ID, "--status=hooked", "--assignee="+agentID)
|
||||
hookCmd.Dir = hookDir
|
||||
if output, err := hookCmd.CombinedOutput(); err != nil {
|
||||
t.Fatalf("hook hq bead: %v\n%s", err, output)
|
||||
}
|
||||
|
||||
// Verify the HQ bead is hooked by querying town beads
|
||||
hookedHQ, err := townBeadsInstance.List(beads.ListOptions{
|
||||
Status: beads.StatusHooked,
|
||||
Assignee: agentID,
|
||||
Priority: -1,
|
||||
})
|
||||
if err != nil {
|
||||
t.Fatalf("list hooked hq beads: %v", err)
|
||||
}
|
||||
|
||||
if len(hookedHQ) != 1 {
|
||||
t.Errorf("expected 1 hooked HQ bead, got %d", len(hookedHQ))
|
||||
}
|
||||
if len(hookedHQ) > 0 && hookedHQ[0].ID != hqIssue.ID {
|
||||
t.Errorf("hooked HQ bead ID = %s, want %s", hookedHQ[0].ID, hqIssue.ID)
|
||||
}
|
||||
|
||||
// Test 2: Verify rig beads are still queryable separately
|
||||
status := beads.StatusHooked
|
||||
if err := rigBeads.Update(gtIssue.ID, beads.UpdateOptions{
|
||||
Status: &status,
|
||||
Assignee: &agentID,
|
||||
}); err != nil {
|
||||
t.Fatalf("hook rig bead: %v", err)
|
||||
}
|
||||
|
||||
hookedRig, err := rigBeads.List(beads.ListOptions{
|
||||
Status: beads.StatusHooked,
|
||||
Assignee: agentID,
|
||||
Priority: -1,
|
||||
})
|
||||
if err != nil {
|
||||
t.Fatalf("list hooked rig beads: %v", err)
|
||||
}
|
||||
|
||||
if len(hookedRig) != 1 {
|
||||
t.Errorf("expected 1 hooked rig bead, got %d", len(hookedRig))
|
||||
}
|
||||
if len(hookedRig) > 0 && hookedRig[0].ID != gtIssue.ID {
|
||||
t.Errorf("hooked rig bead ID = %s, want %s", hookedRig[0].ID, gtIssue.ID)
|
||||
}
|
||||
|
||||
// Verify the databases are separate
|
||||
t.Logf("HQ bead %s hooked in town DB, Rig bead %s hooked in rig DB", hqIssue.ID, gtIssue.ID)
|
||||
|
||||
// Verify the HQ bead is NOT in the rig database
|
||||
_, err = rigBeads.Show(hqIssue.ID)
|
||||
if err == nil {
|
||||
t.Log("Note: HQ bead found in rig DB - this may indicate routing is working via redirect")
|
||||
}
|
||||
|
||||
// Verify the rig bead is NOT in the town database
|
||||
_, err = townBeads.Show(gtIssue.ID)
|
||||
if err == nil {
|
||||
t.Log("Note: Rig bead found in town DB - this may indicate routing is working")
|
||||
}
|
||||
}
|
||||
|
||||
@@ -135,8 +135,14 @@ func discoverHooks(townRoot string) ([]HookInfo, error) {
|
||||
agent string
|
||||
}{filepath.Join(rigPath, ".claude", "settings.json"), fmt.Sprintf("%s/rig", rigName)})
|
||||
|
||||
// Polecats
|
||||
// Polecats-level hooks (inherited by all polecats)
|
||||
polecatsDir := filepath.Join(rigPath, "polecats")
|
||||
locations = append(locations, struct {
|
||||
path string
|
||||
agent string
|
||||
}{filepath.Join(polecatsDir, ".claude", "settings.json"), fmt.Sprintf("%s/polecats", rigName)})
|
||||
|
||||
// Individual polecat hooks
|
||||
if polecats, err := os.ReadDir(polecatsDir); err == nil {
|
||||
for _, p := range polecats {
|
||||
if p.IsDir() && !strings.HasPrefix(p.Name(), ".") {
|
||||
@@ -148,11 +154,17 @@ func discoverHooks(townRoot string) ([]HookInfo, error) {
|
||||
}
|
||||
}
|
||||
|
||||
// Crew members
|
||||
// Crew-level hooks (inherited by all crew members)
|
||||
crewDir := filepath.Join(rigPath, "crew")
|
||||
locations = append(locations, struct {
|
||||
path string
|
||||
agent string
|
||||
}{filepath.Join(crewDir, ".claude", "settings.json"), fmt.Sprintf("%s/crew", rigName)})
|
||||
|
||||
// Individual crew member hooks
|
||||
if crew, err := os.ReadDir(crewDir); err == nil {
|
||||
for _, c := range crew {
|
||||
if c.IsDir() {
|
||||
if c.IsDir() && !strings.HasPrefix(c.Name(), ".") {
|
||||
locations = append(locations, struct {
|
||||
path string
|
||||
agent string
|
||||
|
||||
@@ -85,7 +85,7 @@ func runHooksInstall(cmd *cobra.Command, args []string) error {
|
||||
// Install to each target
|
||||
installed := 0
|
||||
for _, target := range targets {
|
||||
if err := installHookTo(target, hookName, hookDef, installDryRun); err != nil {
|
||||
if err := installHookTo(target, hookDef, installDryRun); err != nil {
|
||||
fmt.Printf("%s Failed to install to %s: %v\n", style.Error.Render("Error:"), target, err)
|
||||
continue
|
||||
}
|
||||
@@ -189,7 +189,7 @@ func determineTargets(townRoot, role string, allRigs bool, allowedRoles []string
|
||||
}
|
||||
|
||||
// installHookTo installs a hook to a specific worktree.
|
||||
func installHookTo(worktreePath, hookName string, hookDef HookDefinition, dryRun bool) error {
|
||||
func installHookTo(worktreePath string, hookDef HookDefinition, dryRun bool) error {
|
||||
settingsPath := filepath.Join(worktreePath, ".claude", "settings.json")
|
||||
|
||||
// Load existing settings or create new
|
||||
|
||||
@@ -133,3 +133,82 @@ func TestParseHooksFileEmptyHooks(t *testing.T) {
|
||||
t.Errorf("expected 0 hooks, got %d", len(hooks))
|
||||
}
|
||||
}
|
||||
|
||||
func TestDiscoverHooksCrewLevel(t *testing.T) {
|
||||
// Create a temp directory structure simulating a Gas Town workspace
|
||||
tmpDir := t.TempDir()
|
||||
|
||||
// Create rig structure with crew-level and polecats-level settings
|
||||
rigName := "testrig"
|
||||
rigDir := filepath.Join(tmpDir, rigName)
|
||||
|
||||
// Create crew-level settings (inherited by all crew members)
|
||||
crewClaudeDir := filepath.Join(rigDir, "crew", ".claude")
|
||||
if err := os.MkdirAll(crewClaudeDir, 0755); err != nil {
|
||||
t.Fatalf("failed to create crew/.claude dir: %v", err)
|
||||
}
|
||||
|
||||
crewSettings := ClaudeSettings{
|
||||
Hooks: map[string][]ClaudeHookMatcher{
|
||||
"SessionStart": {
|
||||
{
|
||||
Matcher: "",
|
||||
Hooks: []ClaudeHook{
|
||||
{Type: "command", Command: "crew-level-hook"},
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
}
|
||||
crewData, _ := json.Marshal(crewSettings)
|
||||
if err := os.WriteFile(filepath.Join(crewClaudeDir, "settings.json"), crewData, 0644); err != nil {
|
||||
t.Fatalf("failed to write crew settings: %v", err)
|
||||
}
|
||||
|
||||
// Create polecats-level settings (inherited by all polecats)
|
||||
polecatsClaudeDir := filepath.Join(rigDir, "polecats", ".claude")
|
||||
if err := os.MkdirAll(polecatsClaudeDir, 0755); err != nil {
|
||||
t.Fatalf("failed to create polecats/.claude dir: %v", err)
|
||||
}
|
||||
|
||||
polecatsSettings := ClaudeSettings{
|
||||
Hooks: map[string][]ClaudeHookMatcher{
|
||||
"PreToolUse": {
|
||||
{
|
||||
Matcher: "",
|
||||
Hooks: []ClaudeHook{
|
||||
{Type: "command", Command: "polecats-level-hook"},
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
}
|
||||
polecatsData, _ := json.Marshal(polecatsSettings)
|
||||
if err := os.WriteFile(filepath.Join(polecatsClaudeDir, "settings.json"), polecatsData, 0644); err != nil {
|
||||
t.Fatalf("failed to write polecats settings: %v", err)
|
||||
}
|
||||
|
||||
// Discover hooks
|
||||
hooks, err := discoverHooks(tmpDir)
|
||||
if err != nil {
|
||||
t.Fatalf("discoverHooks failed: %v", err)
|
||||
}
|
||||
|
||||
// Verify crew-level hook was discovered
|
||||
var foundCrewLevel, foundPolecatsLevel bool
|
||||
for _, h := range hooks {
|
||||
if h.Agent == "testrig/crew" && len(h.Commands) > 0 && h.Commands[0] == "crew-level-hook" {
|
||||
foundCrewLevel = true
|
||||
}
|
||||
if h.Agent == "testrig/polecats" && len(h.Commands) > 0 && h.Commands[0] == "polecats-level-hook" {
|
||||
foundPolecatsLevel = true
|
||||
}
|
||||
}
|
||||
|
||||
if !foundCrewLevel {
|
||||
t.Error("expected crew-level hook to be discovered (testrig/crew)")
|
||||
}
|
||||
if !foundPolecatsLevel {
|
||||
t.Error("expected polecats-level hook to be discovered (testrig/polecats)")
|
||||
}
|
||||
}
|
||||
|
||||
@@ -11,9 +11,9 @@ import (
|
||||
|
||||
"github.com/spf13/cobra"
|
||||
"github.com/steveyegge/gastown/internal/beads"
|
||||
"github.com/steveyegge/gastown/internal/constants"
|
||||
"github.com/steveyegge/gastown/internal/claude"
|
||||
"github.com/steveyegge/gastown/internal/config"
|
||||
"github.com/steveyegge/gastown/internal/constants"
|
||||
"github.com/steveyegge/gastown/internal/deps"
|
||||
"github.com/steveyegge/gastown/internal/formula"
|
||||
"github.com/steveyegge/gastown/internal/shell"
|
||||
@@ -258,12 +258,6 @@ func runInstall(cmd *cobra.Command, args []string) error {
|
||||
// Town beads (hq- prefix) stores mayor mail, cross-rig coordination, and handoffs.
|
||||
// Rig beads are separate and have their own prefixes.
|
||||
if !installNoBeads {
|
||||
// Kill any orphaned bd daemons before initializing beads.
|
||||
// Stale daemons can interfere with fresh database creation.
|
||||
if killed, _, _ := beads.StopAllBdProcesses(false, true); killed > 0 {
|
||||
fmt.Printf(" ✓ Stopped %d orphaned bd daemon(s)\n", killed)
|
||||
}
|
||||
|
||||
if err := initTownBeads(absPath); err != nil {
|
||||
fmt.Printf(" %s Could not initialize town beads: %v\n", style.Dim.Render("⚠"), err)
|
||||
} else {
|
||||
@@ -414,11 +408,8 @@ func initTownBeads(townPath string) error {
|
||||
|
||||
// Configure custom types for Gas Town (agent, role, rig, convoy, slot).
|
||||
// These were extracted from beads core in v0.46.0 and now require explicit config.
|
||||
configCmd := exec.Command("bd", "config", "set", "types.custom", constants.BeadsCustomTypes)
|
||||
configCmd.Dir = townPath
|
||||
if configOutput, configErr := configCmd.CombinedOutput(); configErr != nil {
|
||||
// Non-fatal: older beads versions don't need this, newer ones do
|
||||
fmt.Printf(" %s Could not set custom types: %s\n", style.Dim.Render("⚠"), strings.TrimSpace(string(configOutput)))
|
||||
if err := beads.EnsureCustomTypes(beadsDir); err != nil {
|
||||
return fmt.Errorf("ensuring custom types: %w", err)
|
||||
}
|
||||
|
||||
// Configure allowed_prefixes for convoy beads (hq-cv-* IDs).
|
||||
|
||||
@@ -42,6 +42,10 @@ var (
|
||||
|
||||
// Clear flags
|
||||
mailClearAll bool
|
||||
|
||||
// Archive flags
|
||||
mailArchiveStale bool
|
||||
mailArchiveDryRun bool
|
||||
)
|
||||
|
||||
var mailCmd = &cobra.Command{
|
||||
@@ -196,16 +200,22 @@ Examples:
|
||||
}
|
||||
|
||||
var mailArchiveCmd = &cobra.Command{
|
||||
Use: "archive <message-id> [message-id...]",
|
||||
Use: "archive [message-id...]",
|
||||
Short: "Archive messages",
|
||||
Long: `Archive one or more messages.
|
||||
|
||||
Removes the messages from your inbox by closing them in beads.
|
||||
|
||||
Use --stale to archive messages sent before your current session started.
|
||||
|
||||
Examples:
|
||||
gt mail archive hq-abc123
|
||||
gt mail archive hq-abc123 hq-def456 hq-ghi789`,
|
||||
Args: cobra.MinimumNArgs(1),
|
||||
gt mail archive hq-abc123
|
||||
gt mail archive hq-abc123 hq-def456 hq-ghi789
|
||||
gt mail archive --stale
|
||||
gt mail archive --stale --dry-run`,
|
||||
Args: func(cmd *cobra.Command, args []string) error {
|
||||
return nil
|
||||
},
|
||||
RunE: runMailArchive,
|
||||
}
|
||||
|
||||
@@ -487,6 +497,10 @@ func init() {
|
||||
// Clear flags
|
||||
mailClearCmd.Flags().BoolVar(&mailClearAll, "all", false, "Clear all messages (default behavior)")
|
||||
|
||||
// Archive flags
|
||||
mailArchiveCmd.Flags().BoolVar(&mailArchiveStale, "stale", false, "Archive messages sent before session start")
|
||||
mailArchiveCmd.Flags().BoolVarP(&mailArchiveDryRun, "dry-run", "n", false, "Show what would be archived without archiving")
|
||||
|
||||
// Add subcommands
|
||||
mailCmd.AddCommand(mailSendCmd)
|
||||
mailCmd.AddCommand(mailInboxCmd)
|
||||
|
||||
25
internal/cmd/mail_archive_test.go
Normal file
25
internal/cmd/mail_archive_test.go
Normal file
@@ -0,0 +1,25 @@
|
||||
package cmd
|
||||
|
||||
import (
|
||||
"testing"
|
||||
"time"
|
||||
|
||||
"github.com/steveyegge/gastown/internal/mail"
|
||||
)
|
||||
|
||||
func TestStaleMessagesForSession(t *testing.T) {
|
||||
sessionStart := time.Date(2026, 1, 24, 2, 0, 0, 0, time.UTC)
|
||||
messages := []*mail.Message{
|
||||
{ID: "msg-1", Subject: "Older", Timestamp: sessionStart.Add(-2 * time.Minute)},
|
||||
{ID: "msg-2", Subject: "Newer", Timestamp: sessionStart.Add(2 * time.Minute)},
|
||||
{ID: "msg-3", Subject: "Equal", Timestamp: sessionStart},
|
||||
}
|
||||
|
||||
stale := staleMessagesForSession(messages, sessionStart)
|
||||
if len(stale) != 1 {
|
||||
t.Fatalf("expected 1 stale message, got %d", len(stale))
|
||||
}
|
||||
if stale[0].Message.ID != "msg-1" {
|
||||
t.Fatalf("expected msg-1 stale, got %s", stale[0].Message.ID)
|
||||
}
|
||||
}
|
||||
@@ -189,6 +189,11 @@ func detectSenderFromCwd() string {
|
||||
}
|
||||
}
|
||||
|
||||
// If in the town's mayor directory
|
||||
if strings.Contains(cwd, "/mayor") {
|
||||
return "mayor"
|
||||
}
|
||||
|
||||
// Default to overseer (human)
|
||||
return "overseer"
|
||||
}
|
||||
|
||||
@@ -7,9 +7,11 @@ import (
|
||||
"os"
|
||||
"strconv"
|
||||
"strings"
|
||||
"time"
|
||||
|
||||
"github.com/spf13/cobra"
|
||||
"github.com/steveyegge/gastown/internal/mail"
|
||||
"github.com/steveyegge/gastown/internal/session"
|
||||
"github.com/steveyegge/gastown/internal/style"
|
||||
)
|
||||
|
||||
@@ -289,6 +291,23 @@ func runMailArchive(cmd *cobra.Command, args []string) error {
|
||||
return err
|
||||
}
|
||||
|
||||
if mailArchiveStale {
|
||||
if len(args) > 0 {
|
||||
return errors.New("--stale cannot be combined with message IDs")
|
||||
}
|
||||
return runMailArchiveStale(mailbox, address)
|
||||
}
|
||||
if len(args) == 0 {
|
||||
return errors.New("message ID required unless using --stale")
|
||||
}
|
||||
if mailArchiveDryRun {
|
||||
fmt.Printf("%s Would archive %d message(s)\n", style.Dim.Render("(dry-run)"), len(args))
|
||||
for _, msgID := range args {
|
||||
fmt.Printf(" %s\n", style.Dim.Render(msgID))
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
// Archive all specified messages
|
||||
archived := 0
|
||||
var errors []string
|
||||
@@ -318,6 +337,87 @@ func runMailArchive(cmd *cobra.Command, args []string) error {
|
||||
return nil
|
||||
}
|
||||
|
||||
type staleMessage struct {
|
||||
Message *mail.Message
|
||||
Reason string
|
||||
}
|
||||
|
||||
func runMailArchiveStale(mailbox *mail.Mailbox, address string) error {
|
||||
identity, err := session.ParseAddress(address)
|
||||
if err != nil {
|
||||
return fmt.Errorf("determining session for %s: %w", address, err)
|
||||
}
|
||||
|
||||
sessionName := identity.SessionName()
|
||||
if sessionName == "" {
|
||||
return fmt.Errorf("could not determine session name for %s", address)
|
||||
}
|
||||
|
||||
sessionStart, err := session.SessionCreatedAt(sessionName)
|
||||
if err != nil {
|
||||
return fmt.Errorf("getting session start time for %s: %w", sessionName, err)
|
||||
}
|
||||
|
||||
messages, err := mailbox.List()
|
||||
if err != nil {
|
||||
return fmt.Errorf("listing messages: %w", err)
|
||||
}
|
||||
|
||||
staleMessages := staleMessagesForSession(messages, sessionStart)
|
||||
if mailArchiveDryRun {
|
||||
if len(staleMessages) == 0 {
|
||||
fmt.Printf("%s No stale messages found\n", style.Success.Render("✓"))
|
||||
return nil
|
||||
}
|
||||
fmt.Printf("%s Would archive %d stale message(s):\n", style.Dim.Render("(dry-run)"), len(staleMessages))
|
||||
for _, stale := range staleMessages {
|
||||
fmt.Printf(" %s %s\n", style.Dim.Render(stale.Message.ID), stale.Message.Subject)
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
if len(staleMessages) == 0 {
|
||||
fmt.Printf("%s No stale messages to archive\n", style.Success.Render("✓"))
|
||||
return nil
|
||||
}
|
||||
|
||||
archived := 0
|
||||
var errors []string
|
||||
for _, stale := range staleMessages {
|
||||
if err := mailbox.Delete(stale.Message.ID); err != nil {
|
||||
errors = append(errors, fmt.Sprintf("%s: %v", stale.Message.ID, err))
|
||||
} else {
|
||||
archived++
|
||||
}
|
||||
}
|
||||
|
||||
if len(errors) > 0 {
|
||||
fmt.Printf("%s Archived %d/%d stale messages\n", style.Bold.Render("⚠"), archived, len(staleMessages))
|
||||
for _, e := range errors {
|
||||
fmt.Printf(" Error: %s\n", e)
|
||||
}
|
||||
return fmt.Errorf("failed to archive %d stale messages", len(errors))
|
||||
}
|
||||
|
||||
if archived == 1 {
|
||||
fmt.Printf("%s Stale message archived\n", style.Bold.Render("✓"))
|
||||
} else {
|
||||
fmt.Printf("%s Archived %d stale messages\n", style.Bold.Render("✓"), archived)
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
func staleMessagesForSession(messages []*mail.Message, sessionStart time.Time) []staleMessage {
|
||||
var staleMessages []staleMessage
|
||||
for _, msg := range messages {
|
||||
stale, reason := session.StaleReasonForTimes(msg.Timestamp, sessionStart)
|
||||
if stale {
|
||||
staleMessages = append(staleMessages, staleMessage{Message: msg, Reason: reason})
|
||||
}
|
||||
}
|
||||
return staleMessages
|
||||
}
|
||||
|
||||
func runMailMarkRead(cmd *cobra.Command, args []string) error {
|
||||
// Determine which inbox
|
||||
address := detectSender()
|
||||
|
||||
@@ -188,7 +188,7 @@ func runMayorAttach(cmd *cobra.Command, args []string) error {
|
||||
}
|
||||
|
||||
// Build startup beacon for context (like gt handoff does)
|
||||
beacon := session.FormatStartupNudge(session.StartupNudgeConfig{
|
||||
beacon := session.FormatStartupBeacon(session.BeaconConfig{
|
||||
Recipient: "mayor",
|
||||
Sender: "human",
|
||||
Topic: "attach",
|
||||
@@ -200,6 +200,12 @@ func runMayorAttach(cmd *cobra.Command, args []string) error {
|
||||
return fmt.Errorf("building startup command: %w", err)
|
||||
}
|
||||
|
||||
// Set remain-on-exit so the pane survives process death during respawn.
|
||||
// Without this, killing processes causes tmux to destroy the pane.
|
||||
if err := t.SetRemainOnExit(paneID, true); err != nil {
|
||||
style.PrintWarning("could not set remain-on-exit: %v", err)
|
||||
}
|
||||
|
||||
// Kill all processes in the pane before respawning to prevent orphan leaks
|
||||
// RespawnPane's -k flag only sends SIGHUP which Claude/Node may ignore
|
||||
if err := t.KillPaneProcesses(paneID); err != nil {
|
||||
@@ -207,6 +213,7 @@ func runMayorAttach(cmd *cobra.Command, args []string) error {
|
||||
style.PrintWarning("could not kill pane processes: %v", err)
|
||||
}
|
||||
|
||||
// Note: respawn-pane automatically resets remain-on-exit to off
|
||||
if err := t.RespawnPane(paneID, startupCmd); err != nil {
|
||||
return fmt.Errorf("restarting runtime: %w", err)
|
||||
}
|
||||
|
||||
@@ -4,6 +4,7 @@ import (
|
||||
"fmt"
|
||||
"os"
|
||||
"path/filepath"
|
||||
"runtime"
|
||||
"strings"
|
||||
"testing"
|
||||
)
|
||||
@@ -91,10 +92,42 @@ case "$cmd" in
|
||||
esac
|
||||
exit 0
|
||||
`
|
||||
bdPath := filepath.Join(binDir, "bd")
|
||||
if err := os.WriteFile(bdPath, []byte(bdScript), 0755); err != nil {
|
||||
t.Fatalf("write bd stub: %v", err)
|
||||
}
|
||||
bdScriptWindows := `@echo off
|
||||
setlocal enableextensions
|
||||
echo %*>>"%BD_LOG%"
|
||||
set "cmd=%1"
|
||||
set "sub=%2"
|
||||
if "%cmd%"=="--no-daemon" (
|
||||
set "cmd=%2"
|
||||
set "sub=%3"
|
||||
)
|
||||
if "%cmd%"=="--allow-stale" (
|
||||
set "cmd=%2"
|
||||
set "sub=%3"
|
||||
)
|
||||
if "%cmd%"=="show" (
|
||||
echo [{^"id^":^"gt-abc123^",^"title^":^"Bug to fix^",^"status^":^"open^",^"assignee^":^"^",^"description^":^"^"}]
|
||||
exit /b 0
|
||||
)
|
||||
if "%cmd%"=="formula" (
|
||||
echo {^"name^":^"mol-polecat-work^"}
|
||||
exit /b 0
|
||||
)
|
||||
if "%cmd%"=="cook" exit /b 0
|
||||
if "%cmd%"=="mol" (
|
||||
if "%sub%"=="wisp" (
|
||||
echo {^"new_epic_id^":^"gt-wisp-xyz^"}
|
||||
exit /b 0
|
||||
)
|
||||
if "%sub%"=="bond" (
|
||||
echo {^"root_id^":^"gt-wisp-xyz^"}
|
||||
exit /b 0
|
||||
)
|
||||
)
|
||||
if "%cmd%"=="update" exit /b 0
|
||||
exit /b 0
|
||||
`
|
||||
_ = writeBDStub(t, binDir, bdScript, bdScriptWindows)
|
||||
|
||||
t.Setenv("BD_LOG", logPath)
|
||||
t.Setenv("PATH", binDir+string(os.PathListSeparator)+os.Getenv("PATH"))
|
||||
@@ -185,6 +218,9 @@ exit 0
|
||||
// - Compound resolution: base bead -> attached_molecule -> wisp
|
||||
// - gt hook/gt prime: read base bead, follow attached_molecule to show wisp steps
|
||||
func TestSlingFormulaOnBeadSetsAttachedMoleculeInBaseBead(t *testing.T) {
|
||||
if runtime.GOOS == "windows" {
|
||||
t.Skip("Windows batch script JSON output causes storeAttachedMoleculeInBead to fail silently")
|
||||
}
|
||||
townRoot := t.TempDir()
|
||||
|
||||
// Minimal workspace marker
|
||||
@@ -256,10 +292,42 @@ case "$cmd" in
|
||||
esac
|
||||
exit 0
|
||||
`
|
||||
bdPath := filepath.Join(binDir, "bd")
|
||||
if err := os.WriteFile(bdPath, []byte(bdScript), 0755); err != nil {
|
||||
t.Fatalf("write bd stub: %v", err)
|
||||
}
|
||||
bdScriptWindows := `@echo off
|
||||
setlocal enableextensions
|
||||
echo %*>>"%BD_LOG%"
|
||||
set "cmd=%1"
|
||||
set "sub=%2"
|
||||
if "%cmd%"=="--no-daemon" (
|
||||
set "cmd=%2"
|
||||
set "sub=%3"
|
||||
)
|
||||
if "%cmd%"=="--allow-stale" (
|
||||
set "cmd=%2"
|
||||
set "sub=%3"
|
||||
)
|
||||
if "%cmd%"=="show" (
|
||||
echo [{^"id^":^"gt-abc123^",^"title^":^"Bug to fix^",^"status^":^"open^",^"assignee^":^"^",^"description^":^"^"}]
|
||||
exit /b 0
|
||||
)
|
||||
if "%cmd%"=="formula" (
|
||||
echo {^"name^":^"mol-polecat-work^"}
|
||||
exit /b 0
|
||||
)
|
||||
if "%cmd%"=="cook" exit /b 0
|
||||
if "%cmd%"=="mol" (
|
||||
if "%sub%"=="wisp" (
|
||||
echo {^"new_epic_id^":^"gt-wisp-xyz^"}
|
||||
exit /b 0
|
||||
)
|
||||
if "%sub%"=="bond" (
|
||||
echo {^"root_id^":^"gt-wisp-xyz^"}
|
||||
exit /b 0
|
||||
)
|
||||
)
|
||||
if "%cmd%"=="update" exit /b 0
|
||||
exit /b 0
|
||||
`
|
||||
_ = writeBDStub(t, binDir, bdScript, bdScriptWindows)
|
||||
|
||||
t.Setenv("BD_LOG", logPath)
|
||||
t.Setenv("PATH", binDir+string(os.PathListSeparator)+os.Getenv("PATH"))
|
||||
@@ -416,9 +484,60 @@ esac
|
||||
exit 0
|
||||
`, townRoot, closesPath)
|
||||
|
||||
bdPath := filepath.Join(binDir, "bd")
|
||||
if err := os.WriteFile(bdPath, []byte(bdScript), 0755); err != nil {
|
||||
t.Fatalf("write bd stub: %v", err)
|
||||
bdScriptWindows := fmt.Sprintf(`@echo off
|
||||
setlocal enableextensions
|
||||
echo %%*>>"%s\bd.log"
|
||||
set "cmd=%%1"
|
||||
set "beadID=%%2"
|
||||
:strip_flags
|
||||
if "%%cmd%%"=="--no-daemon" (
|
||||
set "cmd=%%2"
|
||||
set "beadID=%%3"
|
||||
shift
|
||||
goto strip_flags
|
||||
)
|
||||
if "%%cmd%%"=="--allow-stale" (
|
||||
set "cmd=%%2"
|
||||
set "beadID=%%3"
|
||||
shift
|
||||
goto strip_flags
|
||||
)
|
||||
if "%%cmd%%"=="show" (
|
||||
if "%%beadID%%"=="gt-gastown-polecat-nux" (
|
||||
echo [{^"id^":^"gt-gastown-polecat-nux^",^"title^":^"Polecat nux^",^"status^":^"open^",^"hook_bead^":^"gt-abc123^",^"agent_state^":^"working^"}]
|
||||
exit /b 0
|
||||
)
|
||||
if "%%beadID%%"=="gt-abc123" (
|
||||
echo [{^"id^":^"gt-abc123^",^"title^":^"Bug to fix^",^"status^":^"hooked^",^"description^":^"attached_molecule: gt-wisp-xyz^"}]
|
||||
exit /b 0
|
||||
)
|
||||
if "%%beadID%%"=="gt-wisp-xyz" (
|
||||
echo [{^"id^":^"gt-wisp-xyz^",^"title^":^"mol-polecat-work^",^"status^":^"open^",^"ephemeral^":true}]
|
||||
exit /b 0
|
||||
)
|
||||
echo []
|
||||
exit /b 0
|
||||
)
|
||||
if "%%cmd%%"=="close" (
|
||||
echo %%beadID%%>>"%s"
|
||||
exit /b 0
|
||||
)
|
||||
if "%%cmd%%"=="agent" exit /b 0
|
||||
if "%%cmd%%"=="update" exit /b 0
|
||||
if "%%cmd%%"=="slot" exit /b 0
|
||||
exit /b 0
|
||||
`, townRoot, closesPath)
|
||||
|
||||
if runtime.GOOS == "windows" {
|
||||
bdPath := filepath.Join(binDir, "bd.cmd")
|
||||
if err := os.WriteFile(bdPath, []byte(bdScriptWindows), 0644); err != nil {
|
||||
t.Fatalf("write bd stub: %v", err)
|
||||
}
|
||||
} else {
|
||||
bdPath := filepath.Join(binDir, "bd")
|
||||
if err := os.WriteFile(bdPath, []byte(bdScript), 0755); err != nil {
|
||||
t.Fatalf("write bd stub: %v", err)
|
||||
}
|
||||
}
|
||||
|
||||
t.Setenv("PATH", binDir+string(os.PathListSeparator)+os.Getenv("PATH"))
|
||||
|
||||
@@ -4,13 +4,13 @@ import (
|
||||
"encoding/json"
|
||||
"fmt"
|
||||
"os"
|
||||
"os/exec"
|
||||
"path/filepath"
|
||||
"strings"
|
||||
|
||||
"github.com/spf13/cobra"
|
||||
"github.com/steveyegge/gastown/internal/beads"
|
||||
"github.com/steveyegge/gastown/internal/config"
|
||||
"github.com/steveyegge/gastown/internal/git"
|
||||
"github.com/steveyegge/gastown/internal/style"
|
||||
"github.com/steveyegge/gastown/internal/workspace"
|
||||
)
|
||||
@@ -184,11 +184,25 @@ func runMoleculeProgress(cmd *cobra.Command, args []string) error {
|
||||
}
|
||||
}
|
||||
|
||||
// Build set of closed issue IDs for dependency checking
|
||||
// Build set of closed issue IDs and collect open step IDs for dependency checking
|
||||
closedIDs := make(map[string]bool)
|
||||
var openStepIDs []string
|
||||
for _, child := range children {
|
||||
if child.Status == "closed" {
|
||||
closedIDs[child.ID] = true
|
||||
} else if child.Status == "open" {
|
||||
openStepIDs = append(openStepIDs, child.ID)
|
||||
}
|
||||
}
|
||||
|
||||
// Fetch full details for open steps to get dependency info.
|
||||
// bd list doesn't return dependencies, but bd show does.
|
||||
var openStepsMap map[string]*beads.Issue
|
||||
if len(openStepIDs) > 0 {
|
||||
openStepsMap, err = b.ShowMultiple(openStepIDs)
|
||||
if err != nil {
|
||||
// Non-fatal: continue without dependency info (all open steps will be "ready")
|
||||
openStepsMap = make(map[string]*beads.Issue)
|
||||
}
|
||||
}
|
||||
|
||||
@@ -202,16 +216,30 @@ func runMoleculeProgress(cmd *cobra.Command, args []string) error {
|
||||
case "in_progress":
|
||||
progress.InProgress++
|
||||
case "open":
|
||||
// Check if all dependencies are closed
|
||||
// Get full step info with dependencies
|
||||
step := openStepsMap[child.ID]
|
||||
|
||||
// Check if all dependencies are closed using Dependencies field
|
||||
// (from bd show), not DependsOn (which is empty from bd list).
|
||||
// Only "blocks" type dependencies block progress - ignore "parent-child".
|
||||
allDepsClosed := true
|
||||
for _, depID := range child.DependsOn {
|
||||
if !closedIDs[depID] {
|
||||
hasBlockingDeps := false
|
||||
var deps []beads.IssueDep
|
||||
if step != nil {
|
||||
deps = step.Dependencies
|
||||
}
|
||||
for _, dep := range deps {
|
||||
if dep.DependencyType != "blocks" {
|
||||
continue // Skip parent-child and other non-blocking relationships
|
||||
}
|
||||
hasBlockingDeps = true
|
||||
if !closedIDs[dep.ID] {
|
||||
allDepsClosed = false
|
||||
break
|
||||
}
|
||||
}
|
||||
|
||||
if len(child.DependsOn) == 0 || allDepsClosed {
|
||||
if !hasBlockingDeps || allDepsClosed {
|
||||
progress.ReadySteps = append(progress.ReadySteps, child.ID)
|
||||
} else {
|
||||
progress.BlockedSteps = append(progress.BlockedSteps, child.ID)
|
||||
@@ -303,6 +331,11 @@ func runMoleculeStatus(cmd *cobra.Command, args []string) error {
|
||||
// not the agent from the GT_ROLE env var (which might be different if
|
||||
// we cd'd into another rig's crew/polecat directory)
|
||||
roleCtx = detectRole(cwd, townRoot)
|
||||
if roleCtx.Role == RoleUnknown {
|
||||
// Fall back to GT_ROLE when cwd doesn't identify an agent
|
||||
// (e.g., at rig root like ~/gt/beads instead of ~/gt/beads/witness)
|
||||
roleCtx, _ = GetRoleWithContext(cwd, townRoot)
|
||||
}
|
||||
target = buildAgentIdentity(roleCtx)
|
||||
if target == "" {
|
||||
return fmt.Errorf("cannot determine agent identity (role: %s)", roleCtx.Role)
|
||||
@@ -339,8 +372,14 @@ func runMoleculeStatus(cmd *cobra.Command, args []string) error {
|
||||
// IMPORTANT: Don't use ParseAgentFieldsFromDescription - the description
|
||||
// field may contain stale data, causing the wrong issue to be hooked.
|
||||
if agentBead.HookBead != "" {
|
||||
// Fetch the bead on the hook
|
||||
hookBead, err = b.Show(agentBead.HookBead)
|
||||
// Fetch the bead on the hook, using cross-prefix database routing.
|
||||
// The hooked bead may be in a different database than the agent bead
|
||||
// (e.g., hq-* bead hooked by a rig worker). Use ResolveHookDir to
|
||||
// find the correct database directory based on the bead's prefix.
|
||||
// See: https://github.com/steveyegge/gastown/issues/gt-rphsv
|
||||
hookBeadDir := beads.ResolveHookDir(townRoot, agentBead.HookBead, workDir)
|
||||
hookBeadDB := beads.New(hookBeadDir)
|
||||
hookBead, err = hookBeadDB.Show(agentBead.HookBead)
|
||||
if err != nil {
|
||||
// Hook bead referenced but not found - report error but continue
|
||||
hookBead = nil
|
||||
@@ -405,6 +444,24 @@ func runMoleculeStatus(cmd *cobra.Command, args []string) error {
|
||||
hookedBeads = scanAllRigsForHookedBeads(townRoot, target)
|
||||
}
|
||||
|
||||
// For rig-level roles, also check town-level beads (hq-* prefix).
|
||||
// Agents can hook cross-prefix beads (e.g., crew worker taking an HQ task).
|
||||
// See: https://github.com/steveyegge/gastown/issues/gt-rphsv
|
||||
if len(hookedBeads) == 0 && !isTownLevelRole(target) {
|
||||
townBeadsDir := filepath.Join(townRoot, ".beads")
|
||||
if _, statErr := os.Stat(townBeadsDir); statErr == nil {
|
||||
townBeads := beads.New(townBeadsDir)
|
||||
townHooked, listErr := townBeads.List(beads.ListOptions{
|
||||
Status: beads.StatusHooked,
|
||||
Assignee: target,
|
||||
Priority: -1,
|
||||
})
|
||||
if listErr == nil && len(townHooked) > 0 {
|
||||
hookedBeads = townHooked
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
status.HasWork = len(hookedBeads) > 0
|
||||
|
||||
if len(hookedBeads) > 0 {
|
||||
@@ -509,11 +566,25 @@ func getMoleculeProgressInfo(b *beads.Beads, moleculeRootID string) (*MoleculePr
|
||||
}
|
||||
}
|
||||
|
||||
// Build set of closed issue IDs for dependency checking
|
||||
// Build set of closed issue IDs and collect open step IDs for dependency checking
|
||||
closedIDs := make(map[string]bool)
|
||||
var openStepIDs []string
|
||||
for _, child := range children {
|
||||
if child.Status == "closed" {
|
||||
closedIDs[child.ID] = true
|
||||
} else if child.Status == "open" {
|
||||
openStepIDs = append(openStepIDs, child.ID)
|
||||
}
|
||||
}
|
||||
|
||||
// Fetch full details for open steps to get dependency info.
|
||||
// bd list doesn't return dependencies, but bd show does.
|
||||
var openStepsMap map[string]*beads.Issue
|
||||
if len(openStepIDs) > 0 {
|
||||
openStepsMap, err = b.ShowMultiple(openStepIDs)
|
||||
if err != nil {
|
||||
// Non-fatal: continue without dependency info (all open steps will be "ready")
|
||||
openStepsMap = make(map[string]*beads.Issue)
|
||||
}
|
||||
}
|
||||
|
||||
@@ -527,16 +598,30 @@ func getMoleculeProgressInfo(b *beads.Beads, moleculeRootID string) (*MoleculePr
|
||||
case "in_progress":
|
||||
progress.InProgress++
|
||||
case "open":
|
||||
// Check if all dependencies are closed
|
||||
// Get full step info with dependencies
|
||||
step := openStepsMap[child.ID]
|
||||
|
||||
// Check if all dependencies are closed using Dependencies field
|
||||
// (from bd show), not DependsOn (which is empty from bd list).
|
||||
// Only "blocks" type dependencies block progress - ignore "parent-child".
|
||||
allDepsClosed := true
|
||||
for _, depID := range child.DependsOn {
|
||||
if !closedIDs[depID] {
|
||||
hasBlockingDeps := false
|
||||
var deps []beads.IssueDep
|
||||
if step != nil {
|
||||
deps = step.Dependencies
|
||||
}
|
||||
for _, dep := range deps {
|
||||
if dep.DependencyType != "blocks" {
|
||||
continue // Skip parent-child and other non-blocking relationships
|
||||
}
|
||||
hasBlockingDeps = true
|
||||
if !closedIDs[dep.ID] {
|
||||
allDepsClosed = false
|
||||
break
|
||||
}
|
||||
}
|
||||
|
||||
if len(child.DependsOn) == 0 || allDepsClosed {
|
||||
if !hasBlockingDeps || allDepsClosed {
|
||||
progress.ReadySteps = append(progress.ReadySteps, child.ID)
|
||||
} else {
|
||||
progress.BlockedSteps = append(progress.BlockedSteps, child.ID)
|
||||
@@ -704,6 +789,11 @@ func runMoleculeCurrent(cmd *cobra.Command, args []string) error {
|
||||
// not the agent from the GT_ROLE env var (which might be different if
|
||||
// we cd'd into another rig's crew/polecat directory)
|
||||
roleCtx = detectRole(cwd, townRoot)
|
||||
if roleCtx.Role == RoleUnknown {
|
||||
// Fall back to GT_ROLE when cwd doesn't identify an agent
|
||||
// (e.g., at rig root like ~/gt/beads instead of ~/gt/beads/witness)
|
||||
roleCtx, _ = GetRoleWithContext(cwd, townRoot)
|
||||
}
|
||||
target = buildAgentIdentity(roleCtx)
|
||||
if target == "" {
|
||||
return fmt.Errorf("cannot determine agent identity (role: %s)", roleCtx.Role)
|
||||
@@ -774,10 +864,10 @@ func runMoleculeCurrent(cmd *cobra.Command, args []string) error {
|
||||
|
||||
info.StepsTotal = len(children)
|
||||
|
||||
// Build set of closed issue IDs for dependency checking
|
||||
// Build set of closed issue IDs and collect open step IDs for dependency checking
|
||||
closedIDs := make(map[string]bool)
|
||||
var inProgressSteps []*beads.Issue
|
||||
var readySteps []*beads.Issue
|
||||
var openStepIDs []string
|
||||
|
||||
for _, child := range children {
|
||||
switch child.Status {
|
||||
@@ -786,23 +876,47 @@ func runMoleculeCurrent(cmd *cobra.Command, args []string) error {
|
||||
closedIDs[child.ID] = true
|
||||
case "in_progress":
|
||||
inProgressSteps = append(inProgressSteps, child)
|
||||
case "open":
|
||||
openStepIDs = append(openStepIDs, child.ID)
|
||||
}
|
||||
}
|
||||
|
||||
// Fetch full details for open steps to get dependency info.
|
||||
// bd list doesn't return dependencies, but bd show does.
|
||||
var openStepsMap map[string]*beads.Issue
|
||||
if len(openStepIDs) > 0 {
|
||||
openStepsMap, _ = b.ShowMultiple(openStepIDs)
|
||||
if openStepsMap == nil {
|
||||
openStepsMap = make(map[string]*beads.Issue)
|
||||
}
|
||||
}
|
||||
|
||||
// Find ready steps (open with all deps closed)
|
||||
for _, child := range children {
|
||||
if child.Status == "open" {
|
||||
allDepsClosed := true
|
||||
for _, depID := range child.DependsOn {
|
||||
if !closedIDs[depID] {
|
||||
allDepsClosed = false
|
||||
break
|
||||
}
|
||||
var readySteps []*beads.Issue
|
||||
for _, stepID := range openStepIDs {
|
||||
step := openStepsMap[stepID]
|
||||
if step == nil {
|
||||
continue
|
||||
}
|
||||
|
||||
// Check dependencies using Dependencies field (from bd show),
|
||||
// not DependsOn (which is empty from bd list).
|
||||
// Only "blocks" type dependencies block progress - ignore "parent-child".
|
||||
allDepsClosed := true
|
||||
hasBlockingDeps := false
|
||||
for _, dep := range step.Dependencies {
|
||||
if dep.DependencyType != "blocks" {
|
||||
continue // Skip parent-child and other non-blocking relationships
|
||||
}
|
||||
if len(child.DependsOn) == 0 || allDepsClosed {
|
||||
readySteps = append(readySteps, child)
|
||||
hasBlockingDeps = true
|
||||
if !closedIDs[dep.ID] {
|
||||
allDepsClosed = false
|
||||
break
|
||||
}
|
||||
}
|
||||
if !hasBlockingDeps || allDepsClosed {
|
||||
readySteps = append(readySteps, step)
|
||||
}
|
||||
}
|
||||
|
||||
// Determine current step and status
|
||||
@@ -873,13 +987,9 @@ func outputMoleculeCurrent(info MoleculeCurrentInfo) error {
|
||||
}
|
||||
|
||||
// getGitRootForMolStatus returns the git root for hook file lookup.
|
||||
// Uses cached value to avoid repeated git subprocess calls.
|
||||
func getGitRootForMolStatus() (string, error) {
|
||||
cmd := exec.Command("git", "rev-parse", "--show-toplevel")
|
||||
out, err := cmd.Output()
|
||||
if err != nil {
|
||||
return "", err
|
||||
}
|
||||
return strings.TrimSpace(string(out)), nil
|
||||
return git.RepoRoot()
|
||||
}
|
||||
|
||||
// isTownLevelRole returns true if the agent ID is a town-level role.
|
||||
|
||||
@@ -53,13 +53,13 @@ func init() {
|
||||
|
||||
// StepDoneResult is the result of a step done operation.
|
||||
type StepDoneResult struct {
|
||||
StepID string `json:"step_id"`
|
||||
MoleculeID string `json:"molecule_id"`
|
||||
StepClosed bool `json:"step_closed"`
|
||||
NextStepID string `json:"next_step_id,omitempty"`
|
||||
StepID string `json:"step_id"`
|
||||
MoleculeID string `json:"molecule_id"`
|
||||
StepClosed bool `json:"step_closed"`
|
||||
NextStepID string `json:"next_step_id,omitempty"`
|
||||
NextStepTitle string `json:"next_step_title,omitempty"`
|
||||
Complete bool `json:"complete"`
|
||||
Action string `json:"action"` // "continue", "done", "no_more_ready"
|
||||
Complete bool `json:"complete"`
|
||||
Action string `json:"action"` // "continue", "done", "no_more_ready"
|
||||
}
|
||||
|
||||
func runMoleculeStepDone(cmd *cobra.Command, args []string) error {
|
||||
@@ -162,9 +162,10 @@ func runMoleculeStepDone(cmd *cobra.Command, args []string) error {
|
||||
// extractMoleculeIDFromStep extracts the molecule ID from a step ID.
|
||||
// Step IDs have format: mol-id.N where N is the step number.
|
||||
// Examples:
|
||||
// gt-abc.1 -> gt-abc
|
||||
// gt-xyz.3 -> gt-xyz
|
||||
// bd-mol-abc.2 -> bd-mol-abc
|
||||
//
|
||||
// gt-abc.1 -> gt-abc
|
||||
// gt-xyz.3 -> gt-xyz
|
||||
// bd-mol-abc.2 -> bd-mol-abc
|
||||
func extractMoleculeIDFromStep(stepID string) string {
|
||||
// Find the last dot
|
||||
lastDot := strings.LastIndex(stepID, ".")
|
||||
@@ -205,11 +206,11 @@ func findNextReadyStep(b *beads.Beads, moleculeID string) (*beads.Issue, bool, e
|
||||
return nil, true, nil // No steps = complete
|
||||
}
|
||||
|
||||
// Build set of closed step IDs and collect open steps
|
||||
// Build set of closed step IDs and collect open step IDs
|
||||
// Note: "open" means not started. "in_progress" means someone's working on it.
|
||||
// We only consider "open" steps as candidates for the next step.
|
||||
closedIDs := make(map[string]bool)
|
||||
var openSteps []*beads.Issue
|
||||
var openStepIDs []string
|
||||
hasNonClosedSteps := false
|
||||
|
||||
for _, child := range children {
|
||||
@@ -217,7 +218,7 @@ func findNextReadyStep(b *beads.Beads, moleculeID string) (*beads.Issue, bool, e
|
||||
case "closed":
|
||||
closedIDs[child.ID] = true
|
||||
case "open":
|
||||
openSteps = append(openSteps, child)
|
||||
openStepIDs = append(openStepIDs, child.ID)
|
||||
hasNonClosedSteps = true
|
||||
default:
|
||||
// in_progress or other status - not closed, not available
|
||||
@@ -230,17 +231,42 @@ func findNextReadyStep(b *beads.Beads, moleculeID string) (*beads.Issue, bool, e
|
||||
return nil, true, nil
|
||||
}
|
||||
|
||||
// No open steps to check
|
||||
if len(openStepIDs) == 0 {
|
||||
return nil, false, nil
|
||||
}
|
||||
|
||||
// Fetch full details for open steps to get dependency info.
|
||||
// bd list doesn't return dependencies, but bd show does.
|
||||
openStepsMap, err := b.ShowMultiple(openStepIDs)
|
||||
if err != nil {
|
||||
return nil, false, fmt.Errorf("fetching step details: %w", err)
|
||||
}
|
||||
|
||||
// Find ready steps (open steps with all dependencies closed)
|
||||
for _, step := range openSteps {
|
||||
for _, stepID := range openStepIDs {
|
||||
step, ok := openStepsMap[stepID]
|
||||
if !ok {
|
||||
continue
|
||||
}
|
||||
|
||||
// Check dependencies using the Dependencies field (from bd show),
|
||||
// not DependsOn (which is empty from bd list).
|
||||
// Only "blocks" type dependencies block progress - ignore "parent-child".
|
||||
allDepsClosed := true
|
||||
for _, depID := range step.DependsOn {
|
||||
if !closedIDs[depID] {
|
||||
hasBlockingDeps := false
|
||||
for _, dep := range step.Dependencies {
|
||||
if dep.DependencyType != "blocks" {
|
||||
continue // Skip parent-child and other non-blocking relationships
|
||||
}
|
||||
hasBlockingDeps = true
|
||||
if !closedIDs[dep.ID] {
|
||||
allDepsClosed = false
|
||||
break
|
||||
}
|
||||
}
|
||||
|
||||
if len(step.DependsOn) == 0 || allDepsClosed {
|
||||
if !hasBlockingDeps || allDepsClosed {
|
||||
return step, false, nil
|
||||
}
|
||||
}
|
||||
@@ -363,14 +389,26 @@ func handleMoleculeComplete(cwd, townRoot, moleculeID string, dryRun bool) error
|
||||
}
|
||||
|
||||
if dryRun {
|
||||
fmt.Printf("[dry-run] Would close child steps of %s\n", moleculeID)
|
||||
fmt.Printf("[dry-run] Would unpin work for %s\n", agentID)
|
||||
fmt.Printf("[dry-run] Would send POLECAT_DONE to witness\n")
|
||||
return nil
|
||||
}
|
||||
|
||||
// Unpin the molecule bead (set status to open, will be closed by gt done or manually)
|
||||
// BUG FIX (gt-zbnr): Close child steps before unpinning/completing.
|
||||
// Deacon patrol molecules have child step wisps that were being orphaned
|
||||
// when the patrol completed. Now we cascade-close all descendants first.
|
||||
workDir, err := findLocalBeadsDir()
|
||||
if err == nil {
|
||||
b := beads.New(workDir)
|
||||
childrenClosed := closeDescendants(b, moleculeID)
|
||||
if childrenClosed > 0 {
|
||||
fmt.Printf("%s Closed %d child step issues\n", style.Bold.Render("✓"), childrenClosed)
|
||||
}
|
||||
}
|
||||
|
||||
// Unpin the molecule bead (set status to open, will be closed by gt done or manually)
|
||||
if workDir, err := findLocalBeadsDir(); err == nil {
|
||||
b := beads.New(workDir)
|
||||
pinnedBeads, err := b.List(beads.ListOptions{
|
||||
Status: beads.StatusPinned,
|
||||
|
||||
@@ -79,7 +79,10 @@ func TestExtractMoleculeIDFromStep(t *testing.T) {
|
||||
}
|
||||
}
|
||||
|
||||
// mockBeadsForStep extends mockBeads with parent filtering for step tests
|
||||
// mockBeadsForStep extends mockBeads with parent filtering for step tests.
|
||||
// It simulates the real bd behavior where:
|
||||
// - List() returns issues with DependsOn empty (bd list doesn't return deps)
|
||||
// - Show()/ShowMultiple() returns issues with Dependencies populated (bd show does)
|
||||
type mockBeadsForStep struct {
|
||||
issues map[string]*beads.Issue
|
||||
}
|
||||
@@ -101,6 +104,19 @@ func (m *mockBeadsForStep) Show(id string) (*beads.Issue, error) {
|
||||
return nil, beads.ErrNotFound
|
||||
}
|
||||
|
||||
// ShowMultiple simulates bd show with multiple IDs - returns full issue data including Dependencies
|
||||
func (m *mockBeadsForStep) ShowMultiple(ids []string) (map[string]*beads.Issue, error) {
|
||||
result := make(map[string]*beads.Issue)
|
||||
for _, id := range ids {
|
||||
if issue, ok := m.issues[id]; ok {
|
||||
result[id] = issue
|
||||
}
|
||||
}
|
||||
return result, nil
|
||||
}
|
||||
|
||||
// List simulates bd list behavior - returns issues but with DependsOn EMPTY.
|
||||
// This is the key behavior that caused the bug: bd list doesn't return dependency info.
|
||||
func (m *mockBeadsForStep) List(opts beads.ListOptions) ([]*beads.Issue, error) {
|
||||
var result []*beads.Issue
|
||||
for _, issue := range m.issues {
|
||||
@@ -112,7 +128,11 @@ func (m *mockBeadsForStep) List(opts beads.ListOptions) ([]*beads.Issue, error)
|
||||
if opts.Status != "" && opts.Status != "all" && issue.Status != opts.Status {
|
||||
continue
|
||||
}
|
||||
result = append(result, issue)
|
||||
// CRITICAL: Simulate bd list behavior - DependsOn is NOT populated
|
||||
// Create a copy with empty DependsOn to simulate real bd list output
|
||||
issueCopy := *issue
|
||||
issueCopy.DependsOn = nil // bd list doesn't return this
|
||||
result = append(result, &issueCopy)
|
||||
}
|
||||
return result, nil
|
||||
}
|
||||
@@ -128,19 +148,34 @@ func (m *mockBeadsForStep) Close(ids ...string) error {
|
||||
return nil
|
||||
}
|
||||
|
||||
// makeStepIssue creates a test step issue
|
||||
// makeStepIssue creates a test step issue with both DependsOn and Dependencies set.
|
||||
// In real usage:
|
||||
// - bd list returns issues with DependsOn empty
|
||||
// - bd show returns issues with Dependencies populated (with DependencyType)
|
||||
// The mock simulates this: List() clears DependsOn, Show() returns the full issue.
|
||||
func makeStepIssue(id, title, parent, status string, dependsOn []string) *beads.Issue {
|
||||
return &beads.Issue{
|
||||
issue := &beads.Issue{
|
||||
ID: id,
|
||||
Title: title,
|
||||
Type: "task",
|
||||
Status: status,
|
||||
Priority: 2,
|
||||
Parent: parent,
|
||||
DependsOn: dependsOn,
|
||||
DependsOn: dependsOn, // This gets cleared by mock List() to simulate bd list
|
||||
CreatedAt: "2025-01-01T12:00:00Z",
|
||||
UpdatedAt: "2025-01-01T12:00:00Z",
|
||||
}
|
||||
// Also set Dependencies (what bd show returns) for proper testing.
|
||||
// Use "blocks" dependency type since that's what formula instantiation creates
|
||||
// for inter-step dependencies (vs "parent-child" for parent relationships).
|
||||
for _, depID := range dependsOn {
|
||||
issue.Dependencies = append(issue.Dependencies, beads.IssueDep{
|
||||
ID: depID,
|
||||
Title: "Dependency " + depID,
|
||||
DependencyType: "blocks", // Only "blocks" deps should block progress
|
||||
})
|
||||
}
|
||||
return issue
|
||||
}
|
||||
|
||||
func TestFindNextReadyStep(t *testing.T) {
|
||||
@@ -232,24 +267,22 @@ func TestFindNextReadyStep(t *testing.T) {
|
||||
m := newMockBeadsForStep()
|
||||
tt.setupFunc(m)
|
||||
|
||||
// Create a real Beads instance but we'll use our mock
|
||||
// For now, we test the logic by calling the actual function with mock data
|
||||
// This requires refactoring findNextReadyStep to accept an interface
|
||||
// For now, we'll test the logic inline
|
||||
// Test the FIXED algorithm that uses ShowMultiple for dependency info
|
||||
// (simulating the real findNextReadyStep behavior after the fix)
|
||||
|
||||
// Get children from mock
|
||||
// Get children from mock (DependsOn will be empty - simulating bd list)
|
||||
children, _ := m.List(beads.ListOptions{Parent: tt.moleculeID, Status: "all"})
|
||||
|
||||
// Build closed IDs set - only "open" steps are candidates
|
||||
// Build closed IDs set and collect open step IDs
|
||||
closedIDs := make(map[string]bool)
|
||||
var openSteps []*beads.Issue
|
||||
var openStepIDs []string
|
||||
hasNonClosedSteps := false
|
||||
for _, child := range children {
|
||||
switch child.Status {
|
||||
case "closed":
|
||||
closedIDs[child.ID] = true
|
||||
case "open":
|
||||
openSteps = append(openSteps, child)
|
||||
openStepIDs = append(openStepIDs, child.ID)
|
||||
hasNonClosedSteps = true
|
||||
default:
|
||||
// in_progress or other - not closed, not available
|
||||
@@ -268,17 +301,32 @@ func TestFindNextReadyStep(t *testing.T) {
|
||||
return
|
||||
}
|
||||
|
||||
// Find ready step
|
||||
// Fetch full details for open steps (Dependencies will be populated)
|
||||
openStepsMap, _ := m.ShowMultiple(openStepIDs)
|
||||
|
||||
// Find ready step using Dependencies (not DependsOn!)
|
||||
// Only "blocks" type dependencies block progress - ignore "parent-child".
|
||||
var readyStep *beads.Issue
|
||||
for _, step := range openSteps {
|
||||
for _, stepID := range openStepIDs {
|
||||
step := openStepsMap[stepID]
|
||||
if step == nil {
|
||||
continue
|
||||
}
|
||||
|
||||
// Use Dependencies (from bd show), NOT DependsOn (empty from bd list)
|
||||
allDepsClosed := true
|
||||
for _, depID := range step.DependsOn {
|
||||
if !closedIDs[depID] {
|
||||
hasBlockingDeps := false
|
||||
for _, dep := range step.Dependencies {
|
||||
if dep.DependencyType != "blocks" {
|
||||
continue // Skip parent-child and other non-blocking relationships
|
||||
}
|
||||
hasBlockingDeps = true
|
||||
if !closedIDs[dep.ID] {
|
||||
allDepsClosed = false
|
||||
break
|
||||
}
|
||||
}
|
||||
if len(step.DependsOn) == 0 || allDepsClosed {
|
||||
if !hasBlockingDeps || allDepsClosed {
|
||||
readyStep = step
|
||||
break
|
||||
}
|
||||
@@ -372,18 +420,18 @@ func TestStepDoneScenarios(t *testing.T) {
|
||||
t.Fatalf("failed to close step: %v", err)
|
||||
}
|
||||
|
||||
// Now find next ready step
|
||||
// Now find next ready step using the FIXED algorithm
|
||||
children, _ := m.List(beads.ListOptions{Parent: moleculeID, Status: "all"})
|
||||
|
||||
closedIDs := make(map[string]bool)
|
||||
var openSteps []*beads.Issue
|
||||
var openStepIDs []string
|
||||
hasNonClosedSteps := false
|
||||
for _, child := range children {
|
||||
switch child.Status {
|
||||
case "closed":
|
||||
closedIDs[child.ID] = true
|
||||
case "open":
|
||||
openSteps = append(openSteps, child)
|
||||
openStepIDs = append(openStepIDs, child.ID)
|
||||
hasNonClosedSteps = true
|
||||
default:
|
||||
// in_progress or other - not closed, not available
|
||||
@@ -399,17 +447,32 @@ func TestStepDoneScenarios(t *testing.T) {
|
||||
if allComplete {
|
||||
action = "done"
|
||||
} else {
|
||||
// Find ready step
|
||||
// Fetch full details for open steps (Dependencies will be populated)
|
||||
openStepsMap, _ := m.ShowMultiple(openStepIDs)
|
||||
|
||||
// Find ready step using Dependencies (not DependsOn!)
|
||||
// Only "blocks" type dependencies block progress - ignore "parent-child".
|
||||
var readyStep *beads.Issue
|
||||
for _, step := range openSteps {
|
||||
for _, stepID := range openStepIDs {
|
||||
step := openStepsMap[stepID]
|
||||
if step == nil {
|
||||
continue
|
||||
}
|
||||
|
||||
// Use Dependencies (from bd show), NOT DependsOn (empty from bd list)
|
||||
allDepsClosed := true
|
||||
for _, depID := range step.DependsOn {
|
||||
if !closedIDs[depID] {
|
||||
hasBlockingDeps := false
|
||||
for _, dep := range step.Dependencies {
|
||||
if dep.DependencyType != "blocks" {
|
||||
continue // Skip parent-child and other non-blocking relationships
|
||||
}
|
||||
hasBlockingDeps = true
|
||||
if !closedIDs[dep.ID] {
|
||||
allDepsClosed = false
|
||||
break
|
||||
}
|
||||
}
|
||||
if len(step.DependsOn) == 0 || allDepsClosed {
|
||||
if !hasBlockingDeps || allDepsClosed {
|
||||
readyStep = step
|
||||
break
|
||||
}
|
||||
@@ -433,3 +496,224 @@ func TestStepDoneScenarios(t *testing.T) {
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
// TestFindNextReadyStepWithBdListBehavior tests the fix for the bug where
|
||||
// bd list doesn't return dependency info (DependsOn is always empty), but
|
||||
// bd show returns Dependencies. The old code checked DependsOn (always empty),
|
||||
// so all open steps looked "ready" even when blocked.
|
||||
//
|
||||
// This test simulates real bd behavior and verifies the fix works correctly.
|
||||
func TestFindNextReadyStepWithBdListBehavior(t *testing.T) {
|
||||
tests := []struct {
|
||||
name string
|
||||
moleculeID string
|
||||
setupFunc func(*mockBeadsForStep)
|
||||
wantStepID string // Expected ready step ID, or "" if none ready
|
||||
wantComplete bool
|
||||
wantBlocked bool // True if all remaining steps are blocked (none ready)
|
||||
}{
|
||||
{
|
||||
name: "blocked step should NOT be ready - dependency not closed",
|
||||
moleculeID: "gt-mol",
|
||||
setupFunc: func(m *mockBeadsForStep) {
|
||||
// Step 1 is open (first step, no deps)
|
||||
m.addIssue(makeStepIssue("gt-mol.1", "Step 1", "gt-mol", "open", nil))
|
||||
// Step 2 depends on Step 1, which is NOT closed
|
||||
// BUG: Old code would mark Step 2 as ready because DependsOn is empty from bd list
|
||||
// FIX: New code uses Dependencies from bd show
|
||||
m.addIssue(makeStepIssue("gt-mol.2", "Step 2", "gt-mol", "open", []string{"gt-mol.1"}))
|
||||
},
|
||||
wantStepID: "gt-mol.1", // Only step 1 should be ready
|
||||
wantComplete: false,
|
||||
},
|
||||
{
|
||||
name: "step becomes ready when dependency closes",
|
||||
moleculeID: "gt-mol",
|
||||
setupFunc: func(m *mockBeadsForStep) {
|
||||
m.addIssue(makeStepIssue("gt-mol.1", "Step 1", "gt-mol", "closed", nil))
|
||||
m.addIssue(makeStepIssue("gt-mol.2", "Step 2", "gt-mol", "open", []string{"gt-mol.1"}))
|
||||
},
|
||||
wantStepID: "gt-mol.2", // Step 2 is ready now that step 1 is closed
|
||||
wantComplete: false,
|
||||
},
|
||||
{
|
||||
name: "multiple blocked steps - none ready",
|
||||
moleculeID: "gt-mol",
|
||||
setupFunc: func(m *mockBeadsForStep) {
|
||||
// Step 1 is in_progress (not closed)
|
||||
m.addIssue(makeStepIssue("gt-mol.1", "Step 1", "gt-mol", "in_progress", nil))
|
||||
// Steps 2 and 3 both depend on step 1
|
||||
m.addIssue(makeStepIssue("gt-mol.2", "Step 2", "gt-mol", "open", []string{"gt-mol.1"}))
|
||||
m.addIssue(makeStepIssue("gt-mol.3", "Step 3", "gt-mol", "open", []string{"gt-mol.1"}))
|
||||
},
|
||||
wantBlocked: true, // No open steps are ready (all blocked by step 1)
|
||||
wantComplete: false,
|
||||
},
|
||||
{
|
||||
name: "diamond dependency - synthesis blocked until both complete",
|
||||
moleculeID: "gt-mol",
|
||||
setupFunc: func(m *mockBeadsForStep) {
|
||||
m.addIssue(makeStepIssue("gt-mol.1", "Step A", "gt-mol", "closed", nil))
|
||||
m.addIssue(makeStepIssue("gt-mol.2", "Step B", "gt-mol", "open", nil))
|
||||
// Synthesis depends on BOTH A and B
|
||||
m.addIssue(makeStepIssue("gt-mol.3", "Synthesis", "gt-mol", "open", []string{"gt-mol.1", "gt-mol.2"}))
|
||||
},
|
||||
wantStepID: "gt-mol.2", // B is ready (no deps), synthesis is blocked
|
||||
wantComplete: false,
|
||||
},
|
||||
{
|
||||
name: "diamond dependency - synthesis ready when both complete",
|
||||
moleculeID: "gt-mol",
|
||||
setupFunc: func(m *mockBeadsForStep) {
|
||||
m.addIssue(makeStepIssue("gt-mol.1", "Step A", "gt-mol", "closed", nil))
|
||||
m.addIssue(makeStepIssue("gt-mol.2", "Step B", "gt-mol", "closed", nil))
|
||||
// Synthesis depends on BOTH A and B, both are now closed
|
||||
m.addIssue(makeStepIssue("gt-mol.3", "Synthesis", "gt-mol", "open", []string{"gt-mol.1", "gt-mol.2"}))
|
||||
},
|
||||
wantStepID: "gt-mol.3", // Synthesis is now ready
|
||||
wantComplete: false,
|
||||
},
|
||||
}
|
||||
|
||||
for _, tt := range tests {
|
||||
t.Run(tt.name, func(t *testing.T) {
|
||||
m := newMockBeadsForStep()
|
||||
tt.setupFunc(m)
|
||||
|
||||
// Simulate the FIXED algorithm that uses ShowMultiple for dependency info
|
||||
// Step 1: List children (DependsOn will be empty - simulating bd list)
|
||||
children, _ := m.List(beads.ListOptions{Parent: tt.moleculeID, Status: "all"})
|
||||
|
||||
// Build closed IDs and collect open step IDs
|
||||
closedIDs := make(map[string]bool)
|
||||
var openStepIDs []string
|
||||
hasNonClosedSteps := false
|
||||
|
||||
for _, child := range children {
|
||||
switch child.Status {
|
||||
case "closed":
|
||||
closedIDs[child.ID] = true
|
||||
case "open":
|
||||
openStepIDs = append(openStepIDs, child.ID)
|
||||
hasNonClosedSteps = true
|
||||
default:
|
||||
hasNonClosedSteps = true
|
||||
}
|
||||
}
|
||||
|
||||
allComplete := !hasNonClosedSteps
|
||||
if allComplete != tt.wantComplete {
|
||||
t.Errorf("allComplete = %v, want %v", allComplete, tt.wantComplete)
|
||||
}
|
||||
|
||||
if tt.wantComplete {
|
||||
return
|
||||
}
|
||||
|
||||
// Step 2: Fetch full details for open steps (Dependencies will be populated)
|
||||
openStepsMap, _ := m.ShowMultiple(openStepIDs)
|
||||
|
||||
// Step 3: Find ready step using Dependencies (not DependsOn!)
|
||||
// Only "blocks" type dependencies block progress - ignore "parent-child".
|
||||
var readyStep *beads.Issue
|
||||
for _, stepID := range openStepIDs {
|
||||
step := openStepsMap[stepID]
|
||||
if step == nil {
|
||||
continue
|
||||
}
|
||||
|
||||
// Use Dependencies (from bd show), NOT DependsOn (empty from bd list)
|
||||
allDepsClosed := true
|
||||
hasBlockingDeps := false
|
||||
for _, dep := range step.Dependencies {
|
||||
if dep.DependencyType != "blocks" {
|
||||
continue // Skip parent-child and other non-blocking relationships
|
||||
}
|
||||
hasBlockingDeps = true
|
||||
if !closedIDs[dep.ID] {
|
||||
allDepsClosed = false
|
||||
break
|
||||
}
|
||||
}
|
||||
|
||||
if !hasBlockingDeps || allDepsClosed {
|
||||
readyStep = step
|
||||
break
|
||||
}
|
||||
}
|
||||
|
||||
// Verify results
|
||||
if tt.wantBlocked {
|
||||
if readyStep != nil {
|
||||
t.Errorf("expected no ready steps (all blocked), got %s", readyStep.ID)
|
||||
}
|
||||
return
|
||||
}
|
||||
|
||||
if tt.wantStepID == "" {
|
||||
if readyStep != nil {
|
||||
t.Errorf("expected no ready step, got %s", readyStep.ID)
|
||||
}
|
||||
return
|
||||
}
|
||||
|
||||
if readyStep == nil {
|
||||
t.Errorf("expected ready step %s, got nil", tt.wantStepID)
|
||||
return
|
||||
}
|
||||
|
||||
if readyStep.ID != tt.wantStepID {
|
||||
t.Errorf("ready step = %s, want %s", readyStep.ID, tt.wantStepID)
|
||||
}
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
// TestOldBuggyBehavior demonstrates what the old buggy code would have done.
|
||||
// With the old code, since DependsOn was always empty from bd list,
|
||||
// ALL open steps would appear "ready" regardless of actual dependencies.
|
||||
// This test verifies the bug exists when using the old approach.
|
||||
func TestOldBuggyBehavior(t *testing.T) {
|
||||
m := newMockBeadsForStep()
|
||||
|
||||
// Setup: Step 2 depends on Step 1, but Step 1 is NOT closed
|
||||
m.addIssue(makeStepIssue("gt-mol.1", "Step 1", "gt-mol", "open", nil))
|
||||
m.addIssue(makeStepIssue("gt-mol.2", "Step 2", "gt-mol", "open", []string{"gt-mol.1"}))
|
||||
|
||||
// Get children via List (simulates bd list - DependsOn is empty)
|
||||
children, _ := m.List(beads.ListOptions{Parent: "gt-mol", Status: "all"})
|
||||
|
||||
// OLD BUGGY CODE: Check DependsOn (which is empty from bd list)
|
||||
closedIDs := make(map[string]bool)
|
||||
var openSteps []*beads.Issue
|
||||
for _, child := range children {
|
||||
if child.Status == "closed" {
|
||||
closedIDs[child.ID] = true
|
||||
} else if child.Status == "open" {
|
||||
openSteps = append(openSteps, child)
|
||||
}
|
||||
}
|
||||
|
||||
// Count how many steps the OLD buggy code thinks are "ready"
|
||||
readyCount := 0
|
||||
for _, step := range openSteps {
|
||||
allDepsClosed := true
|
||||
for _, depID := range step.DependsOn { // BUG: This is always empty!
|
||||
if !closedIDs[depID] {
|
||||
allDepsClosed = false
|
||||
break
|
||||
}
|
||||
}
|
||||
if len(step.DependsOn) == 0 || allDepsClosed { // Always true since DependsOn is empty
|
||||
readyCount++
|
||||
}
|
||||
}
|
||||
|
||||
// The bug: OLD code thinks BOTH steps are ready (2 ready)
|
||||
// Correct behavior: Only Step 1 should be ready (1 ready)
|
||||
if readyCount != 2 {
|
||||
t.Errorf("Expected old buggy code to mark 2 steps as ready, got %d", readyCount)
|
||||
}
|
||||
|
||||
t.Log("Old buggy behavior confirmed: both steps marked ready when only step 1 should be")
|
||||
}
|
||||
|
||||
@@ -83,8 +83,25 @@ func runNamepool(cmd *cobra.Command, args []string) error {
|
||||
return fmt.Errorf("not in a rig directory")
|
||||
}
|
||||
|
||||
// Load pool
|
||||
pool := polecat.NewNamePool(rigPath, rigName)
|
||||
// Load settings for namepool config
|
||||
settingsPath := filepath.Join(rigPath, "settings", "config.json")
|
||||
var pool *polecat.NamePool
|
||||
|
||||
settings, err := config.LoadRigSettings(settingsPath)
|
||||
if err == nil && settings.Namepool != nil {
|
||||
// Use configured namepool settings
|
||||
pool = polecat.NewNamePoolWithConfig(
|
||||
rigPath,
|
||||
rigName,
|
||||
settings.Namepool.Style,
|
||||
settings.Namepool.Names,
|
||||
settings.Namepool.MaxBeforeNumbering,
|
||||
)
|
||||
} else {
|
||||
// Use defaults
|
||||
pool = polecat.NewNamePool(rigPath, rigName)
|
||||
}
|
||||
|
||||
if err := pool.Load(); err != nil {
|
||||
// Pool doesn't exist yet, show defaults
|
||||
fmt.Printf("Rig: %s\n", rigName)
|
||||
@@ -104,9 +121,8 @@ func runNamepool(cmd *cobra.Command, args []string) error {
|
||||
fmt.Printf("In use: %s\n", strings.Join(activeNames, ", "))
|
||||
}
|
||||
|
||||
// Check if configured
|
||||
settingsPath := filepath.Join(rigPath, "settings", "config.json")
|
||||
if settings, err := config.LoadRigSettings(settingsPath); err == nil && settings.Namepool != nil {
|
||||
// Check if configured (already loaded above)
|
||||
if settings.Namepool != nil {
|
||||
fmt.Printf("(configured in settings/config.json)\n")
|
||||
}
|
||||
|
||||
|
||||
@@ -433,19 +433,10 @@ func runPluginRun(cmd *cobra.Command, args []string) error {
|
||||
fmt.Printf("%s\n", style.Bold.Render("Instructions:"))
|
||||
fmt.Println(p.Instructions)
|
||||
|
||||
// Record the run
|
||||
recorder := plugin.NewRecorder(townRoot)
|
||||
beadID, err := recorder.RecordRun(plugin.PluginRunRecord{
|
||||
PluginName: p.Name,
|
||||
RigName: p.RigName,
|
||||
Result: plugin.ResultSuccess, // Manual runs are marked success
|
||||
Body: "Manual run via gt plugin run",
|
||||
})
|
||||
if err != nil {
|
||||
fmt.Fprintf(os.Stderr, "Warning: failed to record run: %v\n", err)
|
||||
} else {
|
||||
fmt.Printf("\n%s Recorded run: %s\n", style.Dim.Render("●"), beadID)
|
||||
}
|
||||
// NOTE: We intentionally do NOT record a run here. This command only prints
|
||||
// instructions for an agent/user to execute - it doesn't actually run the plugin.
|
||||
// Recording "success" here would poison the cooldown gate, preventing real executions.
|
||||
// The actual execution (by whatever follows these instructions) should record the result.
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
@@ -18,6 +18,7 @@ import (
|
||||
"github.com/steveyegge/gastown/internal/runtime"
|
||||
"github.com/steveyegge/gastown/internal/style"
|
||||
"github.com/steveyegge/gastown/internal/tmux"
|
||||
"github.com/steveyegge/gastown/internal/util"
|
||||
)
|
||||
|
||||
// Polecat command flags
|
||||
@@ -109,11 +110,11 @@ Examples:
|
||||
|
||||
var polecatSyncCmd = &cobra.Command{
|
||||
Use: "sync <rig>/<polecat>",
|
||||
Short: "Sync beads for a polecat",
|
||||
Short: "Sync beads for a polecat (deprecated with Dolt backend)",
|
||||
Long: `Sync beads for a polecat's worktree.
|
||||
|
||||
Runs 'bd sync' in the polecat's worktree to push local beads changes
|
||||
to the shared sync branch and pull remote changes.
|
||||
NOTE: With Dolt backend, beads changes are persisted immediately.
|
||||
This command is a no-op when using Dolt.
|
||||
|
||||
Use --all to sync all polecats in a rig.
|
||||
Use --from-main to only pull (no push).
|
||||
@@ -533,87 +534,9 @@ func runPolecatRemove(cmd *cobra.Command, args []string) error {
|
||||
}
|
||||
|
||||
func runPolecatSync(cmd *cobra.Command, args []string) error {
|
||||
if len(args) < 1 {
|
||||
return fmt.Errorf("rig or rig/polecat address required")
|
||||
}
|
||||
|
||||
// Parse address - could be "rig" or "rig/polecat"
|
||||
rigName, polecatName, err := parseAddress(args[0])
|
||||
if err != nil {
|
||||
// Might just be a rig name
|
||||
rigName = args[0]
|
||||
polecatName = ""
|
||||
}
|
||||
|
||||
mgr, _, err := getPolecatManager(rigName)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
// Get list of polecats to sync
|
||||
var polecatsToSync []string
|
||||
if polecatSyncAll || polecatName == "" {
|
||||
polecats, err := mgr.List()
|
||||
if err != nil {
|
||||
return fmt.Errorf("listing polecats: %w", err)
|
||||
}
|
||||
for _, p := range polecats {
|
||||
polecatsToSync = append(polecatsToSync, p.Name)
|
||||
}
|
||||
} else {
|
||||
polecatsToSync = []string{polecatName}
|
||||
}
|
||||
|
||||
if len(polecatsToSync) == 0 {
|
||||
fmt.Println("No polecats to sync.")
|
||||
return nil
|
||||
}
|
||||
|
||||
// Sync each polecat
|
||||
var syncErrors []string
|
||||
for _, name := range polecatsToSync {
|
||||
// Get polecat to get correct clone path (handles old vs new structure)
|
||||
p, err := mgr.Get(name)
|
||||
if err != nil {
|
||||
syncErrors = append(syncErrors, fmt.Sprintf("%s: %v", name, err))
|
||||
continue
|
||||
}
|
||||
|
||||
// Check directory exists
|
||||
if _, err := os.Stat(p.ClonePath); os.IsNotExist(err) {
|
||||
syncErrors = append(syncErrors, fmt.Sprintf("%s: directory not found", name))
|
||||
continue
|
||||
}
|
||||
|
||||
// Build sync command
|
||||
syncArgs := []string{"sync"}
|
||||
if polecatSyncFromMain {
|
||||
syncArgs = append(syncArgs, "--from-main")
|
||||
}
|
||||
|
||||
fmt.Printf("Syncing %s/%s...\n", rigName, name)
|
||||
|
||||
syncCmd := exec.Command("bd", syncArgs...)
|
||||
syncCmd.Dir = p.ClonePath
|
||||
output, err := syncCmd.CombinedOutput()
|
||||
if err != nil {
|
||||
syncErrors = append(syncErrors, fmt.Sprintf("%s: %v", name, err))
|
||||
if len(output) > 0 {
|
||||
fmt.Printf(" %s\n", style.Dim.Render(string(output)))
|
||||
}
|
||||
} else {
|
||||
fmt.Printf(" %s\n", style.Success.Render("✓ synced"))
|
||||
}
|
||||
}
|
||||
|
||||
if len(syncErrors) > 0 {
|
||||
fmt.Printf("\n%s Some syncs failed:\n", style.Warning.Render("Warning:"))
|
||||
for _, e := range syncErrors {
|
||||
fmt.Printf(" - %s\n", e)
|
||||
}
|
||||
return fmt.Errorf("%d sync(s) failed", len(syncErrors))
|
||||
}
|
||||
|
||||
// With Dolt backend, beads changes are persisted immediately - no sync needed
|
||||
fmt.Println("Note: With Dolt backend, beads changes are persisted immediately.")
|
||||
fmt.Println("No sync step is required.")
|
||||
return nil
|
||||
}
|
||||
|
||||
@@ -1268,6 +1191,12 @@ func runPolecatNuke(cmd *cobra.Command, args []string) error {
|
||||
fmt.Printf("\n%s Nuked %d polecat(s).\n", style.SuccessPrefix, nuked)
|
||||
}
|
||||
|
||||
// Final cleanup: Kill any orphaned Claude processes that escaped the session termination.
|
||||
// This catches processes that called setsid() or were reparented during session shutdown.
|
||||
if !polecatNukeDryRun {
|
||||
cleanupOrphanedProcesses()
|
||||
}
|
||||
|
||||
if len(nukeErrors) > 0 {
|
||||
return fmt.Errorf("%d nuke(s) failed", len(nukeErrors))
|
||||
}
|
||||
@@ -1275,6 +1204,39 @@ func runPolecatNuke(cmd *cobra.Command, args []string) error {
|
||||
return nil
|
||||
}
|
||||
|
||||
// cleanupOrphanedProcesses kills Claude processes that survived session termination.
|
||||
// Uses aggressive zombie detection via tmux session verification.
|
||||
func cleanupOrphanedProcesses() {
|
||||
results, err := util.CleanupZombieClaudeProcesses()
|
||||
if err != nil {
|
||||
// Non-fatal: log and continue
|
||||
fmt.Printf(" %s orphan cleanup check failed: %v\n", style.Dim.Render("○"), err)
|
||||
return
|
||||
}
|
||||
|
||||
if len(results) == 0 {
|
||||
return
|
||||
}
|
||||
|
||||
// Report what was cleaned up
|
||||
var killed, escalated int
|
||||
for _, r := range results {
|
||||
switch r.Signal {
|
||||
case "SIGTERM", "SIGKILL":
|
||||
killed++
|
||||
case "UNKILLABLE":
|
||||
escalated++
|
||||
}
|
||||
}
|
||||
|
||||
if killed > 0 {
|
||||
fmt.Printf(" %s cleaned up %d orphaned process(es)\n", style.Success.Render("✓"), killed)
|
||||
}
|
||||
if escalated > 0 {
|
||||
fmt.Printf(" %s %d process(es) survived SIGKILL (unkillable)\n", style.Warning.Render("⚠"), escalated)
|
||||
}
|
||||
}
|
||||
|
||||
func runPolecatStale(cmd *cobra.Command, args []string) error {
|
||||
rigName := args[0]
|
||||
mgr, r, err := getPolecatManager(rigName)
|
||||
|
||||
@@ -2,7 +2,6 @@ package cmd
|
||||
|
||||
import (
|
||||
"bytes"
|
||||
"encoding/json"
|
||||
"errors"
|
||||
"fmt"
|
||||
"os"
|
||||
@@ -12,6 +11,7 @@ import (
|
||||
|
||||
"github.com/spf13/cobra"
|
||||
"github.com/steveyegge/gastown/internal/beads"
|
||||
"github.com/steveyegge/gastown/internal/git"
|
||||
"github.com/steveyegge/gastown/internal/lock"
|
||||
"github.com/steveyegge/gastown/internal/state"
|
||||
"github.com/steveyegge/gastown/internal/style"
|
||||
@@ -340,29 +340,13 @@ func detectRole(cwd, townRoot string) RoleInfo {
|
||||
return ctx
|
||||
}
|
||||
|
||||
// runBdPrime runs `bd prime` and outputs the result.
|
||||
// This provides beads workflow context to the agent.
|
||||
// runBdPrime outputs beads workflow context directly.
|
||||
// This replaces the bd subprocess call to eliminate ~40ms startup overhead.
|
||||
func runBdPrime(workDir string) {
|
||||
cmd := exec.Command("bd", "prime")
|
||||
cmd.Dir = workDir
|
||||
|
||||
var stdout, stderr bytes.Buffer
|
||||
cmd.Stdout = &stdout
|
||||
cmd.Stderr = &stderr
|
||||
|
||||
if err := cmd.Run(); err != nil {
|
||||
// Skip if bd prime fails (beads might not be available)
|
||||
// But log stderr if present for debugging
|
||||
if errMsg := strings.TrimSpace(stderr.String()); errMsg != "" {
|
||||
fmt.Fprintf(os.Stderr, "bd prime: %s\n", errMsg)
|
||||
}
|
||||
return
|
||||
}
|
||||
|
||||
output := strings.TrimSpace(stdout.String())
|
||||
if output != "" {
|
||||
content := beads.GetPrimeContent(workDir)
|
||||
if content != "" {
|
||||
fmt.Println()
|
||||
fmt.Println(output)
|
||||
fmt.Println(content)
|
||||
}
|
||||
}
|
||||
|
||||
@@ -433,6 +417,11 @@ func checkSlungWork(ctx RoleContext) bool {
|
||||
// Build the role announcement string
|
||||
roleAnnounce := buildRoleAnnouncement(ctx)
|
||||
|
||||
// Check for attached molecule FIRST - this determines the instruction flow
|
||||
// With formula-on-bead, the base bead is hooked with attached_molecule pointing to wisp.
|
||||
attachment := beads.ParseAttachmentFields(hookedBead)
|
||||
hasMolecule := attachment != nil && attachment.AttachedMolecule != ""
|
||||
|
||||
// Found hooked work! Display AUTONOMOUS MODE prominently
|
||||
fmt.Println()
|
||||
fmt.Printf("%s\n\n", style.Bold.Render("## 🚨 AUTONOMOUS WORK MODE 🚨"))
|
||||
@@ -447,14 +436,25 @@ func checkSlungWork(ctx RoleContext) bool {
|
||||
fmt.Println("a track record that proves autonomous execution works at scale.")
|
||||
fmt.Println()
|
||||
fmt.Println("1. Announce: \"" + roleAnnounce + "\" (ONE line, no elaboration)")
|
||||
fmt.Printf("2. Then IMMEDIATELY run: `bd show %s`\n", hookedBead.ID)
|
||||
fmt.Println("3. Begin execution - no waiting for user input")
|
||||
|
||||
// Instructions differ based on whether molecule is attached
|
||||
if hasMolecule {
|
||||
fmt.Println("2. This bead has an ATTACHED MOLECULE (formula workflow)")
|
||||
fmt.Println("3. Work through molecule steps in order - see CURRENT STEP below")
|
||||
fmt.Println("4. Close each step with `bd close <step-id>`, then check `bd ready`")
|
||||
} else {
|
||||
fmt.Printf("2. Then IMMEDIATELY run: `bd show %s`\n", hookedBead.ID)
|
||||
fmt.Println("3. Begin execution - no waiting for user input")
|
||||
}
|
||||
fmt.Println()
|
||||
fmt.Println("**DO NOT:**")
|
||||
fmt.Println("- Wait for user response after announcing")
|
||||
fmt.Println("- Ask clarifying questions")
|
||||
fmt.Println("- Describe what you're going to do")
|
||||
fmt.Println("- Check mail first (hook takes priority)")
|
||||
if hasMolecule {
|
||||
fmt.Println("- Skip molecule steps or work on the base bead directly")
|
||||
}
|
||||
fmt.Println()
|
||||
|
||||
// Show the hooked work details
|
||||
@@ -476,46 +476,47 @@ func checkSlungWork(ctx RoleContext) bool {
|
||||
}
|
||||
fmt.Println()
|
||||
|
||||
// Show bead preview using bd show
|
||||
fmt.Println("**Bead details:**")
|
||||
cmd := exec.Command("bd", "show", hookedBead.ID)
|
||||
var stdout, stderr bytes.Buffer
|
||||
cmd.Stdout = &stdout
|
||||
cmd.Stderr = &stderr
|
||||
if err := cmd.Run(); err != nil {
|
||||
if errMsg := strings.TrimSpace(stderr.String()); errMsg != "" {
|
||||
fmt.Fprintf(os.Stderr, " bd show %s: %s\n", hookedBead.ID, errMsg)
|
||||
} else {
|
||||
fmt.Fprintf(os.Stderr, " bd show %s: %v\n", hookedBead.ID, err)
|
||||
}
|
||||
} else {
|
||||
lines := strings.Split(stdout.String(), "\n")
|
||||
maxLines := 15
|
||||
if len(lines) > maxLines {
|
||||
lines = lines[:maxLines]
|
||||
lines = append(lines, "...")
|
||||
}
|
||||
for _, line := range lines {
|
||||
fmt.Printf(" %s\n", line)
|
||||
}
|
||||
}
|
||||
fmt.Println()
|
||||
|
||||
// Check for attached molecule and show execution prompt
|
||||
// This was missing for hooked beads (only worked for pinned beads).
|
||||
// With formula-on-bead, the base bead is hooked with attached_molecule pointing to wisp.
|
||||
attachment := beads.ParseAttachmentFields(hookedBead)
|
||||
if attachment != nil && attachment.AttachedMolecule != "" {
|
||||
fmt.Printf("%s\n\n", style.Bold.Render("## 🎯 ATTACHED MOLECULE"))
|
||||
fmt.Printf("Molecule: %s\n", attachment.AttachedMolecule)
|
||||
// If molecule attached, show molecule context prominently INSTEAD of bd show
|
||||
if hasMolecule {
|
||||
fmt.Printf("%s\n\n", style.Bold.Render("## 🧬 ATTACHED MOLECULE (FORMULA WORKFLOW)"))
|
||||
fmt.Printf("Molecule ID: %s\n", attachment.AttachedMolecule)
|
||||
if attachment.AttachedArgs != "" {
|
||||
fmt.Printf("\n%s\n", style.Bold.Render("📋 ARGS (use these to guide execution):"))
|
||||
fmt.Printf(" %s\n", attachment.AttachedArgs)
|
||||
}
|
||||
fmt.Println()
|
||||
|
||||
// Show current step from molecule
|
||||
// Show current step from molecule - THIS IS THE PRIMARY INSTRUCTION
|
||||
showMoleculeExecutionPrompt(ctx.WorkDir, attachment.AttachedMolecule)
|
||||
|
||||
fmt.Println()
|
||||
fmt.Printf("%s\n", style.Bold.Render("⚠️ IMPORTANT: Follow the molecule steps above, NOT the base bead."))
|
||||
fmt.Println("The base bead is just a container. The molecule steps define your workflow.")
|
||||
} else {
|
||||
// No molecule - show bead preview using bd show
|
||||
fmt.Println("**Bead details:**")
|
||||
cmd := exec.Command("bd", "show", hookedBead.ID)
|
||||
var stdout, stderr bytes.Buffer
|
||||
cmd.Stdout = &stdout
|
||||
cmd.Stderr = &stderr
|
||||
if err := cmd.Run(); err != nil {
|
||||
if errMsg := strings.TrimSpace(stderr.String()); errMsg != "" {
|
||||
fmt.Fprintf(os.Stderr, " bd show %s: %s\n", hookedBead.ID, errMsg)
|
||||
} else {
|
||||
fmt.Fprintf(os.Stderr, " bd show %s: %v\n", hookedBead.ID, err)
|
||||
}
|
||||
} else {
|
||||
lines := strings.Split(stdout.String(), "\n")
|
||||
maxLines := 15
|
||||
if len(lines) > maxLines {
|
||||
lines = lines[:maxLines]
|
||||
lines = append(lines, "...")
|
||||
}
|
||||
for _, line := range lines {
|
||||
fmt.Printf(" %s\n", line)
|
||||
}
|
||||
}
|
||||
fmt.Println()
|
||||
}
|
||||
|
||||
return true
|
||||
@@ -544,13 +545,9 @@ func buildRoleAnnouncement(ctx RoleContext) string {
|
||||
}
|
||||
|
||||
// getGitRoot returns the root of the current git repository.
|
||||
// Uses cached value to avoid repeated git subprocess calls.
|
||||
func getGitRoot() (string, error) {
|
||||
cmd := exec.Command("git", "rev-parse", "--show-toplevel")
|
||||
out, err := cmd.Output()
|
||||
if err != nil {
|
||||
return "", err
|
||||
}
|
||||
return strings.TrimSpace(string(out)), nil
|
||||
return git.RepoRoot()
|
||||
}
|
||||
|
||||
// getAgentIdentity returns the agent identity string for hook lookup.
|
||||
@@ -689,34 +686,20 @@ func ensureBeadsRedirect(ctx RoleContext) {
|
||||
|
||||
// checkPendingEscalations queries for open escalation beads and displays them prominently.
|
||||
// This is called on Mayor startup to surface issues needing human attention.
|
||||
// Uses beads package which leverages RPC when daemon is available.
|
||||
func checkPendingEscalations(ctx RoleContext) {
|
||||
// Query for open escalations using bd list with tag filter
|
||||
cmd := exec.Command("bd", "list", "--status=open", "--tag=escalation", "--json")
|
||||
cmd.Dir = ctx.WorkDir
|
||||
|
||||
var stdout, stderr bytes.Buffer
|
||||
cmd.Stdout = &stdout
|
||||
cmd.Stderr = &stderr
|
||||
|
||||
if err := cmd.Run(); err != nil {
|
||||
// Query for open escalations using beads package (uses RPC when available)
|
||||
b := beads.New(ctx.WorkDir)
|
||||
escalations, err := b.List(beads.ListOptions{
|
||||
Status: "open",
|
||||
Label: "escalation",
|
||||
Priority: -1,
|
||||
})
|
||||
if err != nil || len(escalations) == 0 {
|
||||
// Silently skip - escalation check is best-effort
|
||||
return
|
||||
}
|
||||
|
||||
// Parse JSON output
|
||||
var escalations []struct {
|
||||
ID string `json:"id"`
|
||||
Title string `json:"title"`
|
||||
Priority int `json:"priority"`
|
||||
Description string `json:"description"`
|
||||
Created string `json:"created"`
|
||||
}
|
||||
|
||||
if err := json.Unmarshal(stdout.Bytes(), &escalations); err != nil || len(escalations) == 0 {
|
||||
// No escalations or parse error
|
||||
return
|
||||
}
|
||||
|
||||
// Count by severity
|
||||
critical := 0
|
||||
high := 0
|
||||
|
||||
@@ -88,9 +88,9 @@ func showMoleculeExecutionPrompt(workDir, moleculeID string) {
|
||||
fmt.Println(style.Bold.Render("→ EXECUTE THIS STEP NOW."))
|
||||
fmt.Println()
|
||||
fmt.Println("When complete:")
|
||||
fmt.Printf(" 1. Close the step: bd close %s\n", step.ID)
|
||||
fmt.Println(" 2. Check for next step: bd ready")
|
||||
fmt.Println(" 3. Continue until molecule complete")
|
||||
fmt.Printf(" gt mol step done %s\n", step.ID)
|
||||
fmt.Println()
|
||||
fmt.Println("This closes the step and respawns your session with fresh context for the next step.")
|
||||
} else {
|
||||
// No next step - molecule may be complete
|
||||
fmt.Println(style.Bold.Render("✓ MOLECULE COMPLETE"))
|
||||
@@ -162,11 +162,10 @@ func outputMoleculeContext(ctx RoleContext) {
|
||||
showMoleculeProgress(b, rootID)
|
||||
|
||||
fmt.Println()
|
||||
fmt.Println("**Molecule Work Loop:**")
|
||||
fmt.Println("1. Complete current step, then `bd close " + issue.ID + "`")
|
||||
fmt.Println("2. Check for next steps: `bd ready --parent " + rootID + "`")
|
||||
fmt.Println("3. Work on next ready step(s)")
|
||||
fmt.Println("4. When all steps done, run `gt done`")
|
||||
fmt.Println("**When step complete:**")
|
||||
fmt.Println(" `gt mol step done " + issue.ID + "`")
|
||||
fmt.Println()
|
||||
fmt.Println("This closes the step and respawns with fresh context for the next step.")
|
||||
break // Only show context for first molecule step found
|
||||
}
|
||||
}
|
||||
|
||||
@@ -119,6 +119,7 @@ func outputMayorContext(ctx RoleContext) {
|
||||
fmt.Println("- `gt status` - Show overall town status")
|
||||
fmt.Println("- `gt rig list` - List all rigs")
|
||||
fmt.Println("- `bd ready` - Issues ready to work")
|
||||
fmt.Println("- `bd tree <issue>` - View ancestry, siblings, dependencies")
|
||||
fmt.Println()
|
||||
fmt.Println("## Hookable Mail")
|
||||
fmt.Println("Mail can be hooked for ad-hoc instructions: `gt hook attach <mail-id>`")
|
||||
@@ -182,6 +183,7 @@ func outputPolecatContext(ctx RoleContext) {
|
||||
fmt.Println("## Key Commands")
|
||||
fmt.Println("- `gt mail inbox` - Check your inbox for work assignments")
|
||||
fmt.Println("- `bd show <issue>` - View your assigned issue")
|
||||
fmt.Println("- `bd tree <issue>` - View ancestry, siblings, dependencies")
|
||||
fmt.Println("- `bd close <issue>` - Mark issue complete")
|
||||
fmt.Println("- `gt done` - Signal work ready for merge")
|
||||
fmt.Println()
|
||||
@@ -206,6 +208,7 @@ func outputCrewContext(ctx RoleContext) {
|
||||
fmt.Println("- `gt mail inbox` - Check your inbox")
|
||||
fmt.Println("- `bd ready` - Available issues")
|
||||
fmt.Println("- `bd show <issue>` - View issue details")
|
||||
fmt.Println("- `bd tree <issue>` - View ancestry, siblings, dependencies")
|
||||
fmt.Println("- `bd close <issue>` - Mark issue complete")
|
||||
fmt.Println()
|
||||
fmt.Println("## Hookable Mail")
|
||||
|
||||
@@ -147,9 +147,9 @@ func runReady(cmd *cobra.Command, args []string) error {
|
||||
wg.Add(1)
|
||||
go func(r *rig.Rig) {
|
||||
defer wg.Done()
|
||||
// Use mayor/rig path where rig-level beads are stored
|
||||
rigBeadsPath := constants.RigMayorPath(r.Path)
|
||||
rigBeads := beads.New(rigBeadsPath)
|
||||
// Use rig root path where rig-level beads are stored
|
||||
// BeadsPath returns rig root; redirect system handles mayor/rig routing
|
||||
rigBeads := beads.New(r.BeadsPath())
|
||||
issues, err := rigBeads.Ready()
|
||||
|
||||
mu.Lock()
|
||||
@@ -159,10 +159,10 @@ func runReady(cmd *cobra.Command, args []string) error {
|
||||
src.Error = err.Error()
|
||||
} else {
|
||||
// Filter out formula scaffolds (gt-579)
|
||||
formulaNames := getFormulaNames(rigBeadsPath)
|
||||
formulaNames := getFormulaNames(r.BeadsPath())
|
||||
filtered := filterFormulaScaffolds(issues, formulaNames)
|
||||
// Defense-in-depth: also filter wisps that shouldn't appear in ready work
|
||||
wispIDs := getWispIDs(rigBeadsPath)
|
||||
wispIDs := getWispIDs(r.BeadsPath())
|
||||
src.Issues = filterWisps(filtered, wispIDs)
|
||||
}
|
||||
sources = append(sources, src)
|
||||
|
||||
@@ -505,6 +505,12 @@ func runRigRemove(cmd *cobra.Command, args []string) error {
|
||||
return fmt.Errorf("loading rigs config: %w", err)
|
||||
}
|
||||
|
||||
// Get the rig's beads prefix before removing (needed for route cleanup)
|
||||
var beadsPrefix string
|
||||
if entry, ok := rigsConfig.Rigs[name]; ok && entry.BeadsConfig != nil {
|
||||
beadsPrefix = entry.BeadsConfig.Prefix
|
||||
}
|
||||
|
||||
// Create rig manager
|
||||
g := git.NewGit(townRoot)
|
||||
mgr := rig.NewManager(townRoot, rigsConfig, g)
|
||||
@@ -518,6 +524,14 @@ func runRigRemove(cmd *cobra.Command, args []string) error {
|
||||
return fmt.Errorf("saving rigs config: %w", err)
|
||||
}
|
||||
|
||||
// Remove route from routes.jsonl (issue #899)
|
||||
if beadsPrefix != "" {
|
||||
if err := beads.RemoveRoute(townRoot, beadsPrefix+"-"); err != nil {
|
||||
// Non-fatal: log warning but continue
|
||||
fmt.Printf(" %s Could not remove route from routes.jsonl: %v\n", style.Warning.Render("!"), err)
|
||||
}
|
||||
}
|
||||
|
||||
fmt.Printf("%s Rig %s removed from registry\n", style.Success.Render("✓"), name)
|
||||
fmt.Printf("\nNote: Files at %s were NOT deleted.\n", filepath.Join(townRoot, name))
|
||||
fmt.Printf("To delete: %s\n", style.Dim.Render(fmt.Sprintf("rm -rf %s", filepath.Join(townRoot, name))))
|
||||
|
||||
@@ -66,6 +66,18 @@ func init() {
|
||||
func runRigDock(cmd *cobra.Command, args []string) error {
|
||||
rigName := args[0]
|
||||
|
||||
// Check we're on main branch - docking on other branches won't persist
|
||||
branchCmd := exec.Command("git", "branch", "--show-current")
|
||||
branchOutput, err := branchCmd.Output()
|
||||
if err == nil {
|
||||
currentBranch := string(branchOutput)
|
||||
currentBranch = currentBranch[:len(currentBranch)-1] // trim newline
|
||||
if currentBranch != "main" && currentBranch != "master" {
|
||||
return fmt.Errorf("cannot dock: must be on main branch (currently on %s)\n"+
|
||||
"Docking on other branches won't persist. Run: git checkout main", currentBranch)
|
||||
}
|
||||
}
|
||||
|
||||
// Get rig
|
||||
_, r, err := getRig(rigName)
|
||||
if err != nil {
|
||||
@@ -144,21 +156,12 @@ func runRigDock(cmd *cobra.Command, args []string) error {
|
||||
return fmt.Errorf("setting docked label: %w", err)
|
||||
}
|
||||
|
||||
// Sync beads to propagate to other clones
|
||||
fmt.Printf(" Syncing beads...\n")
|
||||
syncCmd := exec.Command("bd", "sync")
|
||||
syncCmd.Dir = r.BeadsPath()
|
||||
if output, err := syncCmd.CombinedOutput(); err != nil {
|
||||
fmt.Printf(" %s bd sync warning: %v\n%s", style.Warning.Render("!"), err, string(output))
|
||||
}
|
||||
|
||||
// Output
|
||||
fmt.Printf("%s Rig %s docked (global)\n", style.Success.Render("✓"), rigName)
|
||||
fmt.Printf(" Label added: %s\n", RigDockedLabel)
|
||||
for _, msg := range stoppedAgents {
|
||||
fmt.Printf(" %s\n", msg)
|
||||
}
|
||||
fmt.Printf(" Run '%s' to propagate to other clones\n", style.Dim.Render("bd sync"))
|
||||
|
||||
return nil
|
||||
}
|
||||
@@ -166,6 +169,18 @@ func runRigDock(cmd *cobra.Command, args []string) error {
|
||||
func runRigUndock(cmd *cobra.Command, args []string) error {
|
||||
rigName := args[0]
|
||||
|
||||
// Check we're on main branch - undocking on other branches won't persist
|
||||
branchCmd := exec.Command("git", "branch", "--show-current")
|
||||
branchOutput, err := branchCmd.Output()
|
||||
if err == nil {
|
||||
currentBranch := string(branchOutput)
|
||||
currentBranch = currentBranch[:len(currentBranch)-1] // trim newline
|
||||
if currentBranch != "main" && currentBranch != "master" {
|
||||
return fmt.Errorf("cannot undock: must be on main branch (currently on %s)\n"+
|
||||
"Undocking on other branches won't persist. Run: git checkout main", currentBranch)
|
||||
}
|
||||
}
|
||||
|
||||
// Get rig and town root
|
||||
_, r, err := getRig(rigName)
|
||||
if err != nil {
|
||||
@@ -210,14 +225,6 @@ func runRigUndock(cmd *cobra.Command, args []string) error {
|
||||
return fmt.Errorf("removing docked label: %w", err)
|
||||
}
|
||||
|
||||
// Sync beads to propagate to other clones
|
||||
fmt.Printf(" Syncing beads...\n")
|
||||
syncCmd := exec.Command("bd", "sync")
|
||||
syncCmd.Dir = r.BeadsPath()
|
||||
if output, err := syncCmd.CombinedOutput(); err != nil {
|
||||
fmt.Printf(" %s bd sync warning: %v\n%s", style.Warning.Render("!"), err, string(output))
|
||||
}
|
||||
|
||||
fmt.Printf("%s Rig %s undocked\n", style.Success.Render("✓"), rigName)
|
||||
fmt.Printf(" Label removed: %s\n", RigDockedLabel)
|
||||
fmt.Printf(" Daemon can now auto-restart agents\n")
|
||||
|
||||
@@ -11,6 +11,7 @@ import (
|
||||
"strings"
|
||||
|
||||
"github.com/spf13/cobra"
|
||||
"github.com/steveyegge/gastown/internal/git"
|
||||
"github.com/steveyegge/gastown/internal/style"
|
||||
"github.com/steveyegge/gastown/internal/workspace"
|
||||
)
|
||||
@@ -138,13 +139,7 @@ func runRigQuickAdd(cmd *cobra.Command, args []string) error {
|
||||
}
|
||||
|
||||
func findGitRoot(path string) (string, error) {
|
||||
cmd := exec.Command("git", "rev-parse", "--show-toplevel")
|
||||
cmd.Dir = path
|
||||
out, err := cmd.Output()
|
||||
if err != nil {
|
||||
return "", err
|
||||
}
|
||||
return strings.TrimSpace(string(out)), nil
|
||||
return git.RepoRootFrom(path)
|
||||
}
|
||||
|
||||
func findGitRemoteURL(gitRoot string) (string, error) {
|
||||
|
||||
@@ -8,7 +8,9 @@ import (
|
||||
"strings"
|
||||
|
||||
"github.com/spf13/cobra"
|
||||
"github.com/steveyegge/gastown/internal/config"
|
||||
"github.com/steveyegge/gastown/internal/style"
|
||||
"github.com/steveyegge/gastown/internal/ui"
|
||||
"github.com/steveyegge/gastown/internal/version"
|
||||
"github.com/steveyegge/gastown/internal/workspace"
|
||||
)
|
||||
@@ -25,11 +27,32 @@ across distributed teams of AI agents working on shared codebases.`,
|
||||
}
|
||||
|
||||
// Commands that don't require beads to be installed/checked.
|
||||
// These are basic utility commands that should work without beads.
|
||||
// NOTE: Gas Town has migrated to Dolt for beads storage. The bd version
|
||||
// check is obsolete. Exempt all common commands.
|
||||
var beadsExemptCommands = map[string]bool{
|
||||
"version": true,
|
||||
"help": true,
|
||||
"completion": true,
|
||||
"crew": true,
|
||||
"polecat": true,
|
||||
"witness": true,
|
||||
"refinery": true,
|
||||
"status": true,
|
||||
"mail": true,
|
||||
"hook": true,
|
||||
"prime": true,
|
||||
"nudge": true,
|
||||
"seance": true,
|
||||
"doctor": true,
|
||||
"dolt": true,
|
||||
"handoff": true,
|
||||
"costs": true,
|
||||
"feed": true,
|
||||
"rig": true,
|
||||
"config": true,
|
||||
"install": true,
|
||||
"tap": true,
|
||||
"dnd": true,
|
||||
}
|
||||
|
||||
// Commands exempt from the town root branch warning.
|
||||
@@ -45,9 +68,28 @@ var branchCheckExemptCommands = map[string]bool{
|
||||
|
||||
// persistentPreRun runs before every command.
|
||||
func persistentPreRun(cmd *cobra.Command, args []string) error {
|
||||
// Check if binary was built properly (via make build, not raw go build).
|
||||
// Raw go build produces unsigned binaries that macOS will kill.
|
||||
if BuiltProperly == "" {
|
||||
fmt.Fprintln(os.Stderr, "ERROR: This binary was built with 'go build' directly.")
|
||||
fmt.Fprintln(os.Stderr, " Use 'make build' to create a properly signed binary.")
|
||||
if gtRoot := os.Getenv("GT_ROOT"); gtRoot != "" {
|
||||
fmt.Fprintf(os.Stderr, " Run from: %s\n", gtRoot)
|
||||
}
|
||||
os.Exit(1)
|
||||
}
|
||||
|
||||
// Initialize CLI theme (dark/light mode support)
|
||||
initCLITheme()
|
||||
|
||||
// Get the root command name being run
|
||||
cmdName := cmd.Name()
|
||||
|
||||
// Check for stale binary (warning only, doesn't block)
|
||||
if !beadsExemptCommands[cmdName] {
|
||||
checkStaleBinaryWarning()
|
||||
}
|
||||
|
||||
// Check town root branch (warning only, non-blocking)
|
||||
if !branchCheckExemptCommands[cmdName] {
|
||||
warnIfTownRootOffMain()
|
||||
@@ -62,6 +104,22 @@ func persistentPreRun(cmd *cobra.Command, args []string) error {
|
||||
return CheckBeadsVersion()
|
||||
}
|
||||
|
||||
// initCLITheme initializes the CLI color theme based on settings and environment.
|
||||
func initCLITheme() {
|
||||
// Try to load town settings for CLITheme config
|
||||
var configTheme string
|
||||
if townRoot, err := workspace.FindFromCwd(); err == nil && townRoot != "" {
|
||||
settingsPath := config.TownSettingsPath(townRoot)
|
||||
if settings, err := config.LoadOrCreateTownSettings(settingsPath); err == nil {
|
||||
configTheme = settings.CLITheme
|
||||
}
|
||||
}
|
||||
|
||||
// Initialize theme with config value (env var takes precedence inside InitTheme)
|
||||
ui.InitTheme(configTheme)
|
||||
ui.ApplyThemeMode()
|
||||
}
|
||||
|
||||
// warnIfTownRootOffMain prints a warning if the town root is not on main branch.
|
||||
// This is a non-blocking warning to help catch accidental branch switches.
|
||||
func warnIfTownRootOffMain() {
|
||||
|
||||
@@ -4,6 +4,7 @@ import (
|
||||
"encoding/json"
|
||||
"os"
|
||||
"path/filepath"
|
||||
"runtime"
|
||||
"testing"
|
||||
|
||||
"github.com/steveyegge/gastown/internal/config"
|
||||
@@ -163,6 +164,10 @@ func TestFindSessionLocation(t *testing.T) {
|
||||
}
|
||||
|
||||
func TestSymlinkSessionToCurrentAccount(t *testing.T) {
|
||||
if runtime.GOOS == "windows" {
|
||||
t.Skip("symlink tests require elevated privileges on Windows")
|
||||
}
|
||||
|
||||
t.Run("creates symlink for session in other account", func(t *testing.T) {
|
||||
townRoot, fakeHome, cleanup := setupSeanceTestEnv(t)
|
||||
defer cleanup()
|
||||
@@ -264,6 +269,10 @@ func TestSymlinkSessionToCurrentAccount(t *testing.T) {
|
||||
}
|
||||
|
||||
func TestCleanupOrphanedSessionSymlinks(t *testing.T) {
|
||||
if runtime.GOOS == "windows" {
|
||||
t.Skip("symlink tests require elevated privileges on Windows")
|
||||
}
|
||||
|
||||
t.Run("removes orphaned symlinks", func(t *testing.T) {
|
||||
_, fakeHome, cleanup := setupSeanceTestEnv(t)
|
||||
defer cleanup()
|
||||
|
||||
@@ -96,6 +96,9 @@ var (
|
||||
slingAccount string // --account: Claude Code account handle to use
|
||||
slingAgent string // --agent: override runtime agent for this sling/spawn
|
||||
slingNoConvoy bool // --no-convoy: skip auto-convoy creation
|
||||
slingNoMerge bool // --no-merge: skip merge queue on completion (for upstream PRs/human review)
|
||||
slingEpic string // --epic: link auto-created convoy to parent epic
|
||||
slingConvoy string // --convoy: add to existing convoy instead of creating new
|
||||
)
|
||||
|
||||
func init() {
|
||||
@@ -113,6 +116,9 @@ func init() {
|
||||
slingCmd.Flags().StringVar(&slingAgent, "agent", "", "Override agent/runtime for this sling (e.g., claude, gemini, codex, or custom alias)")
|
||||
slingCmd.Flags().BoolVar(&slingNoConvoy, "no-convoy", false, "Skip auto-convoy creation for single-issue sling")
|
||||
slingCmd.Flags().BoolVar(&slingHookRawBead, "hook-raw-bead", false, "Hook raw bead without default formula (expert mode)")
|
||||
slingCmd.Flags().BoolVar(&slingNoMerge, "no-merge", false, "Skip merge queue on completion (keep work on feature branch for review)")
|
||||
slingCmd.Flags().StringVar(&slingEpic, "epic", "", "Link auto-created convoy to parent epic")
|
||||
slingCmd.Flags().StringVar(&slingConvoy, "convoy", "", "Add to existing convoy instead of creating new")
|
||||
|
||||
rootCmd.AddCommand(slingCmd)
|
||||
}
|
||||
@@ -131,11 +137,6 @@ func runSling(cmd *cobra.Command, args []string) error {
|
||||
}
|
||||
townBeadsDir := filepath.Join(townRoot, ".beads")
|
||||
|
||||
// --var is only for standalone formula mode, not formula-on-bead mode
|
||||
if slingOnTarget != "" && len(slingVars) > 0 {
|
||||
return fmt.Errorf("--var cannot be used with --on (formula-on-bead mode doesn't support variables)")
|
||||
}
|
||||
|
||||
// Batch mode detection: multiple beads with rig target
|
||||
// Pattern: gt sling gt-abc gt-def gt-ghi gastown
|
||||
// When len(args) > 2 and last arg is a rig, sling each bead to its own polecat
|
||||
@@ -191,8 +192,8 @@ func runSling(cmd *cobra.Command, args []string) error {
|
||||
// Determine target agent (self or specified)
|
||||
var targetAgent string
|
||||
var targetPane string
|
||||
var hookWorkDir string // Working directory for running bd hook commands
|
||||
var hookSetAtomically bool // True if hook was set during polecat spawn (skip redundant update)
|
||||
var hookWorkDir string // Working directory for running bd hook commands
|
||||
var hookSetAtomically bool // True if hook was set during polecat spawn (skip redundant update)
|
||||
|
||||
if len(args) > 1 {
|
||||
target := args[1]
|
||||
@@ -376,16 +377,28 @@ func runSling(cmd *cobra.Command, args []string) error {
|
||||
}
|
||||
}
|
||||
|
||||
// Auto-convoy: check if issue is already tracked by a convoy
|
||||
// If not, create one for dashboard visibility (unless --no-convoy is set)
|
||||
if !slingNoConvoy && formulaName == "" {
|
||||
// Convoy handling: --convoy adds to existing, otherwise auto-create (unless --no-convoy)
|
||||
if slingConvoy != "" {
|
||||
// Use existing convoy specified by --convoy flag
|
||||
if slingDryRun {
|
||||
fmt.Printf("Would add to convoy %s\n", slingConvoy)
|
||||
fmt.Printf("Would add tracking relation to %s\n", beadID)
|
||||
} else {
|
||||
if err := addToExistingConvoy(slingConvoy, beadID); err != nil {
|
||||
return fmt.Errorf("adding to convoy: %w", err)
|
||||
}
|
||||
fmt.Printf("%s Added to convoy %s\n", style.Bold.Render("→"), slingConvoy)
|
||||
}
|
||||
} else if !slingNoConvoy && formulaName == "" {
|
||||
// Auto-convoy: check if issue is already tracked by a convoy
|
||||
// If not, create one for dashboard visibility
|
||||
existingConvoy := isTrackedByConvoy(beadID)
|
||||
if existingConvoy == "" {
|
||||
if slingDryRun {
|
||||
fmt.Printf("Would create convoy 'Work: %s'\n", info.Title)
|
||||
fmt.Printf("Would add tracking relation to %s\n", beadID)
|
||||
} else {
|
||||
convoyID, err := createAutoConvoy(beadID, info.Title)
|
||||
convoyID, err := createAutoConvoy(beadID, info.Title, slingEpic)
|
||||
if err != nil {
|
||||
// Log warning but don't fail - convoy is optional
|
||||
fmt.Printf("%s Could not create auto-convoy: %v\n", style.Dim.Render("Warning:"), err)
|
||||
@@ -434,7 +447,7 @@ func runSling(cmd *cobra.Command, args []string) error {
|
||||
if formulaName != "" {
|
||||
fmt.Printf(" Instantiating formula %s...\n", formulaName)
|
||||
|
||||
result, err := InstantiateFormulaOnBead(formulaName, beadID, info.Title, hookWorkDir, townRoot, false)
|
||||
result, err := InstantiateFormulaOnBead(formulaName, beadID, info.Title, hookWorkDir, townRoot, false, slingVars)
|
||||
if err != nil {
|
||||
return fmt.Errorf("instantiating formula %s: %w", formulaName, err)
|
||||
}
|
||||
@@ -499,6 +512,15 @@ func runSling(cmd *cobra.Command, args []string) error {
|
||||
}
|
||||
}
|
||||
|
||||
// Store no_merge flag in bead (skips merge queue on completion)
|
||||
if slingNoMerge {
|
||||
if err := storeNoMergeInBead(beadID, true); err != nil {
|
||||
fmt.Printf("%s Could not store no_merge in bead: %v\n", style.Dim.Render("Warning:"), err)
|
||||
} else {
|
||||
fmt.Printf("%s No-merge mode enabled (work stays on feature branch)\n", style.Bold.Render("✓"))
|
||||
}
|
||||
}
|
||||
|
||||
// Record the attached molecule in the BASE bead's description.
|
||||
// This field points to the wisp (compound root) and enables:
|
||||
// - gt hook/gt prime: follow attached_molecule to show molecule steps
|
||||
|
||||
@@ -74,10 +74,38 @@ case "$cmd" in
|
||||
esac
|
||||
exit 0
|
||||
`
|
||||
bdPath := filepath.Join(binDir, "bd")
|
||||
if err := os.WriteFile(bdPath, []byte(bdScript), 0755); err != nil {
|
||||
t.Fatalf("write bd stub: %v", err)
|
||||
}
|
||||
bdScriptWindows := `@echo off
|
||||
setlocal enableextensions
|
||||
echo CMD:%*>>"%BD_LOG%"
|
||||
set "cmd=%1"
|
||||
set "sub=%2"
|
||||
if "%cmd%"=="--no-daemon" (
|
||||
set "cmd=%2"
|
||||
set "sub=%3"
|
||||
)
|
||||
if "%cmd%"=="show" (
|
||||
echo [{^"title^":^"Fix bug ABC^",^"status^":^"open^",^"assignee^":^"^",^"description^":^"^"}]
|
||||
exit /b 0
|
||||
)
|
||||
if "%cmd%"=="formula" (
|
||||
echo {^"name^":^"mol-polecat-work^"}
|
||||
exit /b 0
|
||||
)
|
||||
if "%cmd%"=="cook" exit /b 0
|
||||
if "%cmd%"=="mol" (
|
||||
if "%sub%"=="wisp" (
|
||||
echo {^"new_epic_id^":^"gt-wisp-288^"}
|
||||
exit /b 0
|
||||
)
|
||||
if "%sub%"=="bond" (
|
||||
echo {^"root_id^":^"gt-wisp-288^"}
|
||||
exit /b 0
|
||||
)
|
||||
)
|
||||
if "%cmd%"=="update" exit /b 0
|
||||
exit /b 0
|
||||
`
|
||||
_ = writeBDStub(t, binDir, bdScript, bdScriptWindows)
|
||||
|
||||
t.Setenv("BD_LOG", logPath)
|
||||
t.Setenv("PATH", binDir+string(os.PathListSeparator)+os.Getenv("PATH"))
|
||||
@@ -92,7 +120,8 @@ exit 0
|
||||
}
|
||||
|
||||
// Test the helper function directly
|
||||
result, err := InstantiateFormulaOnBead("mol-polecat-work", "gt-abc123", "Test Bug Fix", "", townRoot, false)
|
||||
extraVars := []string{"branch=polecat/furiosa/gt-abc123"}
|
||||
result, err := InstantiateFormulaOnBead("mol-polecat-work", "gt-abc123", "Test Bug Fix", "", townRoot, false, extraVars)
|
||||
if err != nil {
|
||||
t.Fatalf("InstantiateFormulaOnBead failed: %v", err)
|
||||
}
|
||||
@@ -117,6 +146,9 @@ exit 0
|
||||
if !strings.Contains(logContent, "mol wisp mol-polecat-work") {
|
||||
t.Errorf("mol wisp command not found in log:\n%s", logContent)
|
||||
}
|
||||
if !strings.Contains(logContent, "--var branch=polecat/furiosa/gt-abc123") {
|
||||
t.Errorf("extra vars not passed to wisp command:\n%s", logContent)
|
||||
}
|
||||
if !strings.Contains(logContent, "mol bond") {
|
||||
t.Errorf("mol bond command not found in log:\n%s", logContent)
|
||||
}
|
||||
@@ -160,9 +192,28 @@ case "$cmd" in
|
||||
esac
|
||||
exit 0
|
||||
`
|
||||
if err := os.WriteFile(filepath.Join(binDir, "bd"), []byte(bdScript), 0755); err != nil {
|
||||
t.Fatalf("write bd stub: %v", err)
|
||||
}
|
||||
bdScriptWindows := `@echo off
|
||||
setlocal enableextensions
|
||||
echo CMD:%*>>"%BD_LOG%"
|
||||
set "cmd=%1"
|
||||
set "sub=%2"
|
||||
if "%cmd%"=="--no-daemon" (
|
||||
set "cmd=%2"
|
||||
set "sub=%3"
|
||||
)
|
||||
if "%cmd%"=="mol" (
|
||||
if "%sub%"=="wisp" (
|
||||
echo {^"new_epic_id^":^"gt-wisp-skip^"}
|
||||
exit /b 0
|
||||
)
|
||||
if "%sub%"=="bond" (
|
||||
echo {^"root_id^":^"gt-wisp-skip^"}
|
||||
exit /b 0
|
||||
)
|
||||
)
|
||||
exit /b 0
|
||||
`
|
||||
_ = writeBDStub(t, binDir, bdScript, bdScriptWindows)
|
||||
|
||||
t.Setenv("BD_LOG", logPath)
|
||||
t.Setenv("PATH", binDir+string(os.PathListSeparator)+os.Getenv("PATH"))
|
||||
@@ -172,7 +223,7 @@ exit 0
|
||||
_ = os.Chdir(townRoot)
|
||||
|
||||
// Test with skipCook=true
|
||||
_, err := InstantiateFormulaOnBead("mol-polecat-work", "gt-test", "Test", "", townRoot, true)
|
||||
_, err := InstantiateFormulaOnBead("mol-polecat-work", "gt-test", "Test", "", townRoot, true, nil)
|
||||
if err != nil {
|
||||
t.Fatalf("InstantiateFormulaOnBead failed: %v", err)
|
||||
}
|
||||
@@ -207,9 +258,11 @@ func TestCookFormula(t *testing.T) {
|
||||
echo "CMD:$*" >> "${BD_LOG}"
|
||||
exit 0
|
||||
`
|
||||
if err := os.WriteFile(filepath.Join(binDir, "bd"), []byte(bdScript), 0755); err != nil {
|
||||
t.Fatalf("write bd stub: %v", err)
|
||||
}
|
||||
bdScriptWindows := `@echo off
|
||||
echo CMD:%*>>"%BD_LOG%"
|
||||
exit /b 0
|
||||
`
|
||||
_ = writeBDStub(t, binDir, bdScript, bdScriptWindows)
|
||||
|
||||
t.Setenv("BD_LOG", logPath)
|
||||
t.Setenv("PATH", binDir+string(os.PathListSeparator)+os.Getenv("PATH"))
|
||||
@@ -246,11 +299,11 @@ func TestSlingHookRawBeadFlag(t *testing.T) {
|
||||
// When formulaName is empty and target contains "/polecats/", mol-polecat-work should be applied.
|
||||
func TestAutoApplyLogic(t *testing.T) {
|
||||
tests := []struct {
|
||||
name string
|
||||
formulaName string
|
||||
hookRawBead bool
|
||||
targetAgent string
|
||||
wantAutoApply bool
|
||||
name string
|
||||
formulaName string
|
||||
hookRawBead bool
|
||||
targetAgent string
|
||||
wantAutoApply bool
|
||||
}{
|
||||
{
|
||||
name: "bare bead to polecat - should auto-apply",
|
||||
@@ -336,9 +389,29 @@ case "$cmd" in
|
||||
esac
|
||||
exit 0
|
||||
`
|
||||
if err := os.WriteFile(filepath.Join(binDir, "bd"), []byte(bdScript), 0755); err != nil {
|
||||
t.Fatalf("write bd: %v", err)
|
||||
}
|
||||
bdScriptWindows := `@echo off
|
||||
setlocal enableextensions
|
||||
echo CMD:%*>>"%BD_LOG%"
|
||||
set "cmd=%1"
|
||||
set "sub=%2"
|
||||
if "%cmd%"=="--no-daemon" (
|
||||
set "cmd=%2"
|
||||
set "sub=%3"
|
||||
)
|
||||
if "%cmd%"=="cook" exit /b 0
|
||||
if "%cmd%"=="mol" (
|
||||
if "%sub%"=="wisp" (
|
||||
echo {^"new_epic_id^":^"gt-wisp-var^"}
|
||||
exit /b 0
|
||||
)
|
||||
if "%sub%"=="bond" (
|
||||
echo {^"root_id^":^"gt-wisp-var^"}
|
||||
exit /b 0
|
||||
)
|
||||
)
|
||||
exit /b 0
|
||||
`
|
||||
_ = writeBDStub(t, binDir, bdScript, bdScriptWindows)
|
||||
|
||||
t.Setenv("BD_LOG", logPath)
|
||||
t.Setenv("PATH", binDir+string(os.PathListSeparator)+os.Getenv("PATH"))
|
||||
@@ -347,7 +420,7 @@ exit 0
|
||||
t.Cleanup(func() { _ = os.Chdir(cwd) })
|
||||
_ = os.Chdir(townRoot)
|
||||
|
||||
_, err := InstantiateFormulaOnBead("mol-polecat-work", "gt-abc123", "My Cool Feature", "", townRoot, false)
|
||||
_, err := InstantiateFormulaOnBead("mol-polecat-work", "gt-abc123", "My Cool Feature", "", townRoot, false, nil)
|
||||
if err != nil {
|
||||
t.Fatalf("InstantiateFormulaOnBead: %v", err)
|
||||
}
|
||||
|
||||
@@ -87,7 +87,7 @@ func runBatchSling(beadIDs []string, rigName string, townBeadsDir string) error
|
||||
if !slingNoConvoy {
|
||||
existingConvoy := isTrackedByConvoy(beadID)
|
||||
if existingConvoy == "" {
|
||||
convoyID, err := createAutoConvoy(beadID, info.Title)
|
||||
convoyID, err := createAutoConvoy(beadID, info.Title, slingEpic)
|
||||
if err != nil {
|
||||
fmt.Printf(" %s Could not create auto-convoy: %v\n", style.Dim.Render("Warning:"), err)
|
||||
} else {
|
||||
@@ -113,7 +113,7 @@ func runBatchSling(beadIDs []string, rigName string, townBeadsDir string) error
|
||||
beadToHook := beadID
|
||||
attachedMoleculeID := ""
|
||||
if formulaCooked {
|
||||
result, err := InstantiateFormulaOnBead(formulaName, beadID, info.Title, hookWorkDir, townRoot, true)
|
||||
result, err := InstantiateFormulaOnBead(formulaName, beadID, info.Title, hookWorkDir, townRoot, true, slingVars)
|
||||
if err != nil {
|
||||
fmt.Printf(" %s Could not apply formula: %v (hooking raw bead)\n", style.Dim.Render("Warning:"), err)
|
||||
} else {
|
||||
|
||||
@@ -3,6 +3,7 @@ package cmd
|
||||
import (
|
||||
"crypto/rand"
|
||||
"encoding/base32"
|
||||
"encoding/json"
|
||||
"fmt"
|
||||
"os"
|
||||
"os/exec"
|
||||
@@ -29,37 +30,40 @@ func isTrackedByConvoy(beadID string) string {
|
||||
return ""
|
||||
}
|
||||
|
||||
// Query town beads for any convoy that tracks this issue
|
||||
// Convoys use "tracks" dependency type: convoy -> tracked issue
|
||||
townBeads := filepath.Join(townRoot, ".beads")
|
||||
dbPath := filepath.Join(townBeads, "beads.db")
|
||||
// Use bd dep list to find what tracks this issue (direction=up)
|
||||
// Filter for open convoys in the results
|
||||
depCmd := exec.Command("bd", "--no-daemon", "dep", "list", beadID, "--direction=up", "--type=tracks", "--json")
|
||||
depCmd.Dir = townRoot
|
||||
|
||||
// Query dependencies where this bead is being tracked
|
||||
// Also check for external reference format: external:rig:issue-id
|
||||
query := fmt.Sprintf(`
|
||||
SELECT d.issue_id
|
||||
FROM dependencies d
|
||||
JOIN issues i ON d.issue_id = i.id
|
||||
WHERE d.type = 'tracks'
|
||||
AND i.issue_type = 'convoy'
|
||||
AND i.status = 'open'
|
||||
AND (d.depends_on_id = '%s' OR d.depends_on_id LIKE '%%:%s')
|
||||
LIMIT 1
|
||||
`, beadID, beadID)
|
||||
|
||||
queryCmd := exec.Command("sqlite3", dbPath, query)
|
||||
out, err := queryCmd.Output()
|
||||
out, err := depCmd.Output()
|
||||
if err != nil {
|
||||
return ""
|
||||
}
|
||||
|
||||
convoyID := strings.TrimSpace(string(out))
|
||||
return convoyID
|
||||
// Parse results and find an open convoy
|
||||
var trackers []struct {
|
||||
ID string `json:"id"`
|
||||
IssueType string `json:"issue_type"`
|
||||
Status string `json:"status"`
|
||||
}
|
||||
if err := json.Unmarshal(out, &trackers); err != nil {
|
||||
return ""
|
||||
}
|
||||
|
||||
// Return the first open convoy that tracks this issue
|
||||
for _, tracker := range trackers {
|
||||
if tracker.IssueType == "convoy" && tracker.Status == "open" {
|
||||
return tracker.ID
|
||||
}
|
||||
}
|
||||
|
||||
return ""
|
||||
}
|
||||
|
||||
// createAutoConvoy creates an auto-convoy for a single issue and tracks it.
|
||||
// If epicID is provided, links the convoy to the parent epic.
|
||||
// Returns the created convoy ID.
|
||||
func createAutoConvoy(beadID, beadTitle string) (string, error) {
|
||||
func createAutoConvoy(beadID, beadTitle string, epicID string) (string, error) {
|
||||
townRoot, err := workspace.FindFromCwd()
|
||||
if err != nil {
|
||||
return "", fmt.Errorf("finding town root: %w", err)
|
||||
@@ -67,6 +71,12 @@ func createAutoConvoy(beadID, beadTitle string) (string, error) {
|
||||
|
||||
townBeads := filepath.Join(townRoot, ".beads")
|
||||
|
||||
// Ensure custom types (including 'convoy') are registered in town beads.
|
||||
// This handles cases where install didn't complete or beads was initialized manually.
|
||||
if err := beads.EnsureCustomTypes(townBeads); err != nil {
|
||||
return "", fmt.Errorf("ensuring custom types: %w", err)
|
||||
}
|
||||
|
||||
// Generate convoy ID with hq-cv- prefix for visual distinction
|
||||
// The hq-cv- prefix is registered in routes during gt install
|
||||
convoyID := fmt.Sprintf("hq-cv-%s", slingGenerateShortID())
|
||||
@@ -74,6 +84,9 @@ func createAutoConvoy(beadID, beadTitle string) (string, error) {
|
||||
// Create convoy with title "Work: <issue-title>"
|
||||
convoyTitle := fmt.Sprintf("Work: %s", beadTitle)
|
||||
description := fmt.Sprintf("Auto-created convoy tracking %s", beadID)
|
||||
if epicID != "" {
|
||||
description += fmt.Sprintf("\nParent-Epic: %s", epicID)
|
||||
}
|
||||
|
||||
createArgs := []string{
|
||||
"create",
|
||||
@@ -106,9 +119,61 @@ func createAutoConvoy(beadID, beadTitle string) (string, error) {
|
||||
fmt.Printf("%s Could not add tracking relation: %v\n", style.Dim.Render("Warning:"), err)
|
||||
}
|
||||
|
||||
// Link convoy to parent epic if specified (Goals layer)
|
||||
if epicID != "" {
|
||||
epicDepArgs := []string{"--no-daemon", "dep", "add", convoyID, epicID, "--type=child_of"}
|
||||
epicDepCmd := exec.Command("bd", epicDepArgs...)
|
||||
epicDepCmd.Dir = townBeads
|
||||
epicDepCmd.Stderr = os.Stderr
|
||||
|
||||
if err := epicDepCmd.Run(); err != nil {
|
||||
// Epic link failed - log warning but continue
|
||||
fmt.Printf("%s Could not link convoy to epic: %v\n", style.Dim.Render("Warning:"), err)
|
||||
}
|
||||
}
|
||||
|
||||
return convoyID, nil
|
||||
}
|
||||
|
||||
// addToExistingConvoy adds a bead to an existing convoy by creating a tracking relation.
|
||||
// Returns an error if the convoy doesn't exist or the tracking relation fails.
|
||||
func addToExistingConvoy(convoyID, beadID string) error {
|
||||
townRoot, err := workspace.FindFromCwd()
|
||||
if err != nil {
|
||||
return fmt.Errorf("finding town root: %w", err)
|
||||
}
|
||||
|
||||
townBeads := filepath.Join(townRoot, ".beads")
|
||||
dbPath := filepath.Join(townBeads, "beads.db")
|
||||
|
||||
// Verify convoy exists and is open
|
||||
query := fmt.Sprintf(`
|
||||
SELECT id FROM issues
|
||||
WHERE id = '%s'
|
||||
AND issue_type = 'convoy'
|
||||
AND status = 'open'
|
||||
`, convoyID)
|
||||
|
||||
queryCmd := exec.Command("sqlite3", dbPath, query)
|
||||
out, err := queryCmd.Output()
|
||||
if err != nil || strings.TrimSpace(string(out)) == "" {
|
||||
return fmt.Errorf("convoy %s not found or not open", convoyID)
|
||||
}
|
||||
|
||||
// Add tracking relation: convoy tracks the issue
|
||||
trackBeadID := formatTrackBeadID(beadID)
|
||||
depArgs := []string{"--no-daemon", "dep", "add", convoyID, trackBeadID, "--type=tracks"}
|
||||
depCmd := exec.Command("bd", depArgs...)
|
||||
depCmd.Dir = townBeads
|
||||
depCmd.Stderr = os.Stderr
|
||||
|
||||
if err := depCmd.Run(); err != nil {
|
||||
return fmt.Errorf("adding tracking relation: %w", err)
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
// formatTrackBeadID formats a bead ID for use in convoy tracking dependencies.
|
||||
// Cross-rig beads (non-hq- prefixed) are formatted as external references
|
||||
// so the bd tool can resolve them when running from HQ context.
|
||||
|
||||
@@ -10,6 +10,7 @@ import (
|
||||
|
||||
"github.com/steveyegge/gastown/internal/beads"
|
||||
"github.com/steveyegge/gastown/internal/constants"
|
||||
"github.com/steveyegge/gastown/internal/git"
|
||||
"github.com/steveyegge/gastown/internal/tmux"
|
||||
"github.com/steveyegge/gastown/internal/workspace"
|
||||
)
|
||||
@@ -137,12 +138,18 @@ func storeDispatcherInBead(beadID, dispatcher string) error {
|
||||
}
|
||||
|
||||
// Get the bead to preserve existing description content
|
||||
showCmd := exec.Command("bd", "show", beadID, "--json")
|
||||
// Use --no-daemon for consistency with other sling operations (see h-3f96b)
|
||||
showCmd := exec.Command("bd", "--no-daemon", "show", beadID, "--json", "--allow-stale")
|
||||
out, err := showCmd.Output()
|
||||
if err != nil {
|
||||
return fmt.Errorf("fetching bead: %w", err)
|
||||
}
|
||||
|
||||
// Handle bd --no-daemon exit 0 bug: empty stdout means not found
|
||||
if len(out) == 0 {
|
||||
return fmt.Errorf("bead not found")
|
||||
}
|
||||
|
||||
// Parse the bead
|
||||
var issues []beads.Issue
|
||||
if err := json.Unmarshal(out, &issues); err != nil {
|
||||
@@ -165,8 +172,8 @@ func storeDispatcherInBead(beadID, dispatcher string) error {
|
||||
// Update the description
|
||||
newDesc := beads.SetAttachmentFields(issue, fields)
|
||||
|
||||
// Update the bead
|
||||
updateCmd := exec.Command("bd", "update", beadID, "--description="+newDesc)
|
||||
// Update the bead (use --no-daemon for consistency)
|
||||
updateCmd := exec.Command("bd", "--no-daemon", "update", beadID, "--description="+newDesc)
|
||||
updateCmd.Stderr = os.Stderr
|
||||
if err := updateCmd.Run(); err != nil {
|
||||
return fmt.Errorf("updating bead description: %w", err)
|
||||
@@ -190,12 +197,18 @@ func storeAttachedMoleculeInBead(beadID, moleculeID string) error {
|
||||
issue := &beads.Issue{}
|
||||
if logPath == "" {
|
||||
// Get the bead to preserve existing description content
|
||||
showCmd := exec.Command("bd", "show", beadID, "--json")
|
||||
// Use --no-daemon for consistency with other sling operations (see h-3f96b)
|
||||
showCmd := exec.Command("bd", "--no-daemon", "show", beadID, "--json", "--allow-stale")
|
||||
out, err := showCmd.Output()
|
||||
if err != nil {
|
||||
return fmt.Errorf("fetching bead: %w", err)
|
||||
}
|
||||
|
||||
// Handle bd --no-daemon exit 0 bug: empty stdout means not found
|
||||
if len(out) == 0 {
|
||||
return fmt.Errorf("bead not found")
|
||||
}
|
||||
|
||||
// Parse the bead
|
||||
var issues []beads.Issue
|
||||
if err := json.Unmarshal(out, &issues); err != nil {
|
||||
@@ -225,6 +238,53 @@ func storeAttachedMoleculeInBead(beadID, moleculeID string) error {
|
||||
_ = os.WriteFile(logPath, []byte(newDesc), 0644)
|
||||
}
|
||||
|
||||
// Update the bead (use --no-daemon for consistency)
|
||||
updateCmd := exec.Command("bd", "--no-daemon", "update", beadID, "--description="+newDesc)
|
||||
updateCmd.Stderr = os.Stderr
|
||||
if err := updateCmd.Run(); err != nil {
|
||||
return fmt.Errorf("updating bead description: %w", err)
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
// storeNoMergeInBead sets the no_merge field in a bead's description.
|
||||
// When set, gt done will skip the merge queue and keep work on the feature branch.
|
||||
// This is useful for upstream contributions or when human review is needed before merge.
|
||||
func storeNoMergeInBead(beadID string, noMerge bool) error {
|
||||
if !noMerge {
|
||||
return nil
|
||||
}
|
||||
|
||||
// Get the bead to preserve existing description content
|
||||
showCmd := exec.Command("bd", "show", beadID, "--json")
|
||||
out, err := showCmd.Output()
|
||||
if err != nil {
|
||||
return fmt.Errorf("fetching bead: %w", err)
|
||||
}
|
||||
|
||||
// Parse the bead
|
||||
var issues []beads.Issue
|
||||
if err := json.Unmarshal(out, &issues); err != nil {
|
||||
return fmt.Errorf("parsing bead: %w", err)
|
||||
}
|
||||
if len(issues) == 0 {
|
||||
return fmt.Errorf("bead not found")
|
||||
}
|
||||
issue := &issues[0]
|
||||
|
||||
// Get or create attachment fields
|
||||
fields := beads.ParseAttachmentFields(issue)
|
||||
if fields == nil {
|
||||
fields = &beads.AttachmentFields{}
|
||||
}
|
||||
|
||||
// Set the no_merge flag
|
||||
fields.NoMerge = true
|
||||
|
||||
// Update the description
|
||||
newDesc := beads.SetAttachmentFields(issue, fields)
|
||||
|
||||
// Update the bead
|
||||
updateCmd := exec.Command("bd", "update", beadID, "--description="+newDesc)
|
||||
updateCmd.Stderr = os.Stderr
|
||||
@@ -319,13 +379,13 @@ func ensureAgentReady(sessionName string) error {
|
||||
}
|
||||
|
||||
// detectCloneRoot finds the root of the current git clone.
|
||||
// Uses cached value to avoid repeated git subprocess calls.
|
||||
func detectCloneRoot() (string, error) {
|
||||
cmd := exec.Command("git", "rev-parse", "--show-toplevel")
|
||||
out, err := cmd.Output()
|
||||
root, err := git.RepoRoot()
|
||||
if err != nil {
|
||||
return "", fmt.Errorf("not in a git repository")
|
||||
}
|
||||
return strings.TrimSpace(string(out)), nil
|
||||
return root, nil
|
||||
}
|
||||
|
||||
// detectActor returns the current agent's actor string for event logging.
|
||||
@@ -475,9 +535,10 @@ type FormulaOnBeadResult struct {
|
||||
// - hookWorkDir: working directory for bd commands (polecat's worktree)
|
||||
// - townRoot: the town root directory
|
||||
// - skipCook: if true, skip cooking (for batch mode optimization where cook happens once)
|
||||
// - extraVars: additional --var values supplied by the user
|
||||
//
|
||||
// Returns the wisp root ID which should be hooked.
|
||||
func InstantiateFormulaOnBead(formulaName, beadID, title, hookWorkDir, townRoot string, skipCook bool) (*FormulaOnBeadResult, error) {
|
||||
func InstantiateFormulaOnBead(formulaName, beadID, title, hookWorkDir, townRoot string, skipCook bool, extraVars []string) (*FormulaOnBeadResult, error) {
|
||||
// Route bd mutations (wisp/bond) to the correct beads context for the target bead.
|
||||
formulaWorkDir := beads.ResolveHookDir(townRoot, beadID, hookWorkDir)
|
||||
|
||||
@@ -494,7 +555,11 @@ func InstantiateFormulaOnBead(formulaName, beadID, title, hookWorkDir, townRoot
|
||||
// Step 2: Create wisp with feature and issue variables from bead
|
||||
featureVar := fmt.Sprintf("feature=%s", title)
|
||||
issueVar := fmt.Sprintf("issue=%s", beadID)
|
||||
wispArgs := []string{"--no-daemon", "mol", "wisp", formulaName, "--var", featureVar, "--var", issueVar, "--json"}
|
||||
wispArgs := []string{"--no-daemon", "mol", "wisp", formulaName, "--var", featureVar, "--var", issueVar}
|
||||
for _, variable := range extraVars {
|
||||
wispArgs = append(wispArgs, "--var", variable)
|
||||
}
|
||||
wispArgs = append(wispArgs, "--json")
|
||||
wispCmd := exec.Command("bd", wispArgs...)
|
||||
wispCmd.Dir = formulaWorkDir
|
||||
wispCmd.Env = append(os.Environ(), "GT_ROOT="+townRoot)
|
||||
|
||||
@@ -1034,3 +1034,107 @@ exit /b 0
|
||||
"Log output:\n%s\nAttached log:\n%s", string(logBytes), attachedLog)
|
||||
}
|
||||
}
|
||||
|
||||
// TestSlingNoMergeFlag verifies that gt sling --no-merge stores the no_merge flag
|
||||
// in the bead's description. This flag tells gt done to skip the merge queue
|
||||
// and keep work on the feature branch for human review.
|
||||
func TestSlingNoMergeFlag(t *testing.T) {
|
||||
townRoot := t.TempDir()
|
||||
|
||||
// Minimal workspace marker so workspace.FindFromCwd() succeeds.
|
||||
if err := os.MkdirAll(filepath.Join(townRoot, "mayor", "rig"), 0755); err != nil {
|
||||
t.Fatalf("mkdir mayor/rig: %v", err)
|
||||
}
|
||||
|
||||
// Create stub bd that logs update commands
|
||||
binDir := filepath.Join(townRoot, "bin")
|
||||
if err := os.MkdirAll(binDir, 0755); err != nil {
|
||||
t.Fatalf("mkdir binDir: %v", err)
|
||||
}
|
||||
logPath := filepath.Join(townRoot, "bd.log")
|
||||
bdScript := `#!/bin/sh
|
||||
set -e
|
||||
echo "ARGS:$*" >> "${BD_LOG}"
|
||||
if [ "$1" = "--no-daemon" ]; then
|
||||
shift
|
||||
fi
|
||||
cmd="$1"
|
||||
shift || true
|
||||
case "$cmd" in
|
||||
show)
|
||||
echo '[{"title":"Test issue","status":"open","assignee":"","description":""}]'
|
||||
;;
|
||||
update)
|
||||
exit 0
|
||||
;;
|
||||
esac
|
||||
exit 0
|
||||
`
|
||||
bdScriptWindows := `@echo off
|
||||
setlocal enableextensions
|
||||
echo ARGS:%*>>"%BD_LOG%"
|
||||
set "cmd=%1"
|
||||
if "%cmd%"=="--no-daemon" set "cmd=%2"
|
||||
if "%cmd%"=="show" (
|
||||
echo [{"title":"Test issue","status":"open","assignee":"","description":""}]
|
||||
exit /b 0
|
||||
)
|
||||
if "%cmd%"=="update" exit /b 0
|
||||
exit /b 0
|
||||
`
|
||||
_ = writeBDStub(t, binDir, bdScript, bdScriptWindows)
|
||||
|
||||
t.Setenv("BD_LOG", logPath)
|
||||
t.Setenv("PATH", binDir+string(os.PathListSeparator)+os.Getenv("PATH"))
|
||||
t.Setenv(EnvGTRole, "mayor")
|
||||
t.Setenv("GT_CREW", "")
|
||||
t.Setenv("GT_POLECAT", "")
|
||||
t.Setenv("TMUX_PANE", "")
|
||||
t.Setenv("GT_TEST_NO_NUDGE", "1")
|
||||
|
||||
cwd, err := os.Getwd()
|
||||
if err != nil {
|
||||
t.Fatalf("getwd: %v", err)
|
||||
}
|
||||
t.Cleanup(func() { _ = os.Chdir(cwd) })
|
||||
if err := os.Chdir(filepath.Join(townRoot, "mayor", "rig")); err != nil {
|
||||
t.Fatalf("chdir: %v", err)
|
||||
}
|
||||
|
||||
// Save and restore global flags
|
||||
prevDryRun := slingDryRun
|
||||
prevNoConvoy := slingNoConvoy
|
||||
prevNoMerge := slingNoMerge
|
||||
t.Cleanup(func() {
|
||||
slingDryRun = prevDryRun
|
||||
slingNoConvoy = prevNoConvoy
|
||||
slingNoMerge = prevNoMerge
|
||||
})
|
||||
|
||||
slingDryRun = false
|
||||
slingNoConvoy = true
|
||||
slingNoMerge = true // This is what we're testing
|
||||
|
||||
if err := runSling(nil, []string{"gt-test123"}); err != nil {
|
||||
t.Fatalf("runSling: %v", err)
|
||||
}
|
||||
|
||||
logBytes, err := os.ReadFile(logPath)
|
||||
if err != nil {
|
||||
t.Fatalf("read bd log: %v", err)
|
||||
}
|
||||
|
||||
// Look for update command that includes no_merge in description
|
||||
logLines := strings.Split(string(logBytes), "\n")
|
||||
foundNoMerge := false
|
||||
for _, line := range logLines {
|
||||
if strings.Contains(line, "update") && strings.Contains(line, "no_merge") {
|
||||
foundNoMerge = true
|
||||
break
|
||||
}
|
||||
}
|
||||
|
||||
if !foundNoMerge {
|
||||
t.Errorf("--no-merge flag not stored in bead description\nLog:\n%s", string(logBytes))
|
||||
}
|
||||
}
|
||||
|
||||
@@ -43,6 +43,7 @@ func init() {
|
||||
// StaleOutput represents the JSON output structure.
|
||||
type StaleOutput struct {
|
||||
Stale bool `json:"stale"`
|
||||
ForkBuild bool `json:"fork_build,omitempty"`
|
||||
BinaryCommit string `json:"binary_commit"`
|
||||
RepoCommit string `json:"repo_commit"`
|
||||
CommitsBehind int `json:"commits_behind,omitempty"`
|
||||
@@ -77,8 +78,9 @@ func runStale(cmd *cobra.Command, args []string) error {
|
||||
}
|
||||
|
||||
// Quiet mode: just exit with appropriate code
|
||||
// Fork builds are treated as "fresh" (exit 1) since they're intentional
|
||||
if staleQuiet {
|
||||
if info.IsStale {
|
||||
if info.IsStale && !info.IsForkBuild {
|
||||
os.Exit(0)
|
||||
}
|
||||
os.Exit(1)
|
||||
@@ -87,6 +89,7 @@ func runStale(cmd *cobra.Command, args []string) error {
|
||||
// Build output
|
||||
output := StaleOutput{
|
||||
Stale: info.IsStale,
|
||||
ForkBuild: info.IsForkBuild,
|
||||
BinaryCommit: info.BinaryCommit,
|
||||
RepoCommit: info.RepoCommit,
|
||||
CommitsBehind: info.CommitsBehind,
|
||||
@@ -106,7 +109,11 @@ func outputStaleJSON(output StaleOutput) error {
|
||||
}
|
||||
|
||||
func outputStaleText(output StaleOutput) error {
|
||||
if output.Stale {
|
||||
if output.ForkBuild {
|
||||
fmt.Printf("%s Binary built from fork\n", style.Success.Render("✓"))
|
||||
fmt.Printf(" Commit: %s\n", version.ShortCommit(output.BinaryCommit))
|
||||
fmt.Printf(" %s\n", style.Dim.Render("(commit not in rig repo - likely built from local fork)"))
|
||||
} else if output.Stale {
|
||||
fmt.Printf("%s Binary is stale\n", style.Warning.Render("⚠"))
|
||||
fmt.Printf(" Binary: %s\n", version.ShortCommit(output.BinaryCommit))
|
||||
fmt.Printf(" Repo: %s\n", version.ShortCommit(output.RepoCommit))
|
||||
|
||||
@@ -174,6 +174,15 @@ func runStart(cmd *cobra.Command, args []string) error {
|
||||
|
||||
t := tmux.NewTmux()
|
||||
|
||||
// Clean up orphaned tmux sessions before starting new agents.
|
||||
// This prevents session name conflicts and resource accumulation from
|
||||
// zombie sessions (tmux alive but Claude dead).
|
||||
if cleaned, err := t.CleanupOrphanedSessions(); err != nil {
|
||||
fmt.Printf(" %s Could not clean orphaned sessions: %v\n", style.Dim.Render("○"), err)
|
||||
} else if cleaned > 0 {
|
||||
fmt.Printf(" %s Cleaned up %d orphaned session(s)\n", style.Bold.Render("✓"), cleaned)
|
||||
}
|
||||
|
||||
fmt.Printf("Starting Gas Town from %s\n\n", style.Dim.Render(townRoot))
|
||||
fmt.Println("Starting all agents in parallel...")
|
||||
fmt.Println()
|
||||
@@ -395,7 +404,7 @@ func startOrRestartCrewMember(t *tmux.Tmux, r *rig.Rig, crewName, townRoot strin
|
||||
// Agent has exited, restart it
|
||||
// Build startup beacon for predecessor discovery via /resume
|
||||
address := fmt.Sprintf("%s/crew/%s", r.Name, crewName)
|
||||
beacon := session.FormatStartupNudge(session.StartupNudgeConfig{
|
||||
beacon := session.FormatStartupBeacon(session.BeaconConfig{
|
||||
Recipient: address,
|
||||
Sender: "human",
|
||||
Topic: "restart",
|
||||
|
||||
@@ -6,6 +6,7 @@ import (
|
||||
"os"
|
||||
"os/signal"
|
||||
"path/filepath"
|
||||
"sort"
|
||||
"strings"
|
||||
"sync"
|
||||
"syscall"
|
||||
@@ -189,10 +190,6 @@ func runStatusOnce(_ *cobra.Command, _ []string) error {
|
||||
return fmt.Errorf("not in a Gas Town workspace: %w", err)
|
||||
}
|
||||
|
||||
// Check bd daemon health and attempt restart if needed
|
||||
// This is non-blocking - if daemons can't be started, we show a warning but continue
|
||||
bdWarning := beads.EnsureBdDaemonHealth(townRoot)
|
||||
|
||||
// Load town config
|
||||
townConfigPath := constants.MayorTownPath(townRoot)
|
||||
townConfig, err := config.LoadTownConfig(townConfigPath)
|
||||
@@ -404,12 +401,6 @@ func runStatusOnce(_ *cobra.Command, _ []string) error {
|
||||
return err
|
||||
}
|
||||
|
||||
// Show bd daemon warning at the end if there were issues
|
||||
if bdWarning != "" {
|
||||
fmt.Printf("%s %s\n", style.Warning.Render("⚠"), bdWarning)
|
||||
fmt.Printf(" Run 'bd daemon killall && bd daemon start' to restart daemons\n")
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
@@ -439,6 +430,55 @@ func outputStatusText(status TownStatus) error {
|
||||
fmt.Println()
|
||||
}
|
||||
|
||||
// Goals summary (top 3 stalest high-priority)
|
||||
goals, _ := collectFocusItems(status.Location)
|
||||
// Sort by score (highest first)
|
||||
sort.Slice(goals, func(i, j int) bool {
|
||||
return goals[i].Score > goals[j].Score
|
||||
})
|
||||
if len(goals) > 0 {
|
||||
fmt.Printf("%s (%d active)\n", style.Bold.Render("GOALS"), len(goals))
|
||||
// Show top 3
|
||||
showCount := 3
|
||||
if len(goals) < showCount {
|
||||
showCount = len(goals)
|
||||
}
|
||||
for i := 0; i < showCount; i++ {
|
||||
g := goals[i]
|
||||
var indicator string
|
||||
switch g.Staleness {
|
||||
case "stuck":
|
||||
indicator = style.Error.Render("🔴")
|
||||
case "stale":
|
||||
indicator = style.Warning.Render("🟡")
|
||||
default:
|
||||
indicator = style.Success.Render("🟢")
|
||||
}
|
||||
fmt.Printf(" %s P%d %s: %s\n", indicator, g.Priority, g.ID, truncateWithEllipsis(g.Title, 40))
|
||||
}
|
||||
if len(goals) > showCount {
|
||||
fmt.Printf(" %s\n", style.Dim.Render(fmt.Sprintf("... and %d more (gt focus)", len(goals)-showCount)))
|
||||
}
|
||||
fmt.Println()
|
||||
}
|
||||
|
||||
// Attention summary (blocked items, reviews)
|
||||
attention := collectAttentionSummary(status.Location)
|
||||
if attention.Total > 0 {
|
||||
fmt.Printf("%s (%d items)\n", style.Bold.Render("ATTENTION"), attention.Total)
|
||||
if attention.Blocked > 0 {
|
||||
fmt.Printf(" • %d blocked issue(s)\n", attention.Blocked)
|
||||
}
|
||||
if attention.Reviews > 0 {
|
||||
fmt.Printf(" • %d PR(s) awaiting review\n", attention.Reviews)
|
||||
}
|
||||
if attention.Stuck > 0 {
|
||||
fmt.Printf(" • %d stuck worker(s)\n", attention.Stuck)
|
||||
}
|
||||
fmt.Printf(" %s\n", style.Dim.Render("→ gt attention for details"))
|
||||
fmt.Println()
|
||||
}
|
||||
|
||||
// Role icons - uses centralized emojis from constants package
|
||||
roleIcons := map[string]string{
|
||||
constants.RoleMayor: constants.EmojiMayor,
|
||||
@@ -1232,3 +1272,36 @@ func getAgentHook(b *beads.Beads, role, agentAddress, roleType string) AgentHook
|
||||
|
||||
return hook
|
||||
}
|
||||
|
||||
// AttentionSummary holds counts of items needing attention for status display.
|
||||
type AttentionSummary struct {
|
||||
Blocked int
|
||||
Reviews int
|
||||
Stuck int
|
||||
Decisions int
|
||||
Total int
|
||||
}
|
||||
|
||||
// collectAttentionSummary gathers counts of items needing attention.
|
||||
func collectAttentionSummary(townRoot string) AttentionSummary {
|
||||
summary := AttentionSummary{}
|
||||
|
||||
// Count blocked items (reuse logic from attention.go)
|
||||
blocked := collectBlockedItems(townRoot)
|
||||
summary.Blocked = len(blocked)
|
||||
|
||||
// Count reviews
|
||||
reviews := collectReviewItems(townRoot)
|
||||
summary.Reviews = len(reviews)
|
||||
|
||||
// Count stuck workers
|
||||
stuck := collectStuckWorkers(townRoot)
|
||||
summary.Stuck = len(stuck)
|
||||
|
||||
// Count decisions
|
||||
decisions := collectDecisionItems(townRoot)
|
||||
summary.Decisions = len(decisions)
|
||||
|
||||
summary.Total = summary.Blocked + summary.Reviews + summary.Stuck + summary.Decisions
|
||||
return summary
|
||||
}
|
||||
|
||||
@@ -5,6 +5,7 @@ import (
|
||||
"os"
|
||||
"path/filepath"
|
||||
"sort"
|
||||
"strconv"
|
||||
"strings"
|
||||
|
||||
"github.com/spf13/cobra"
|
||||
@@ -115,7 +116,7 @@ func runWorkerStatusLine(t *tmux.Tmux, session, rigName, polecat, crew, issue st
|
||||
// Priority 2: Fall back to GT_ISSUE env var or in_progress beads
|
||||
currentWork := issue
|
||||
if currentWork == "" && hookedWork == "" && session != "" {
|
||||
currentWork = getCurrentWork(t, session, 40)
|
||||
currentWork = getCurrentWork(t, session, identity, 40)
|
||||
}
|
||||
|
||||
// Show hooked work (takes precedence)
|
||||
@@ -320,6 +321,20 @@ func runMayorStatusLine(t *tmux.Tmux) error {
|
||||
return rigs[i].name < rigs[j].name
|
||||
})
|
||||
|
||||
// Truncate to max rigs if there are too many to display
|
||||
// Default: 5, configurable via GT_MAX_RIGS_DISPLAY env var (0 = unlimited)
|
||||
maxRigs := 5
|
||||
if envMax := os.Getenv("GT_MAX_RIGS_DISPLAY"); envMax != "" {
|
||||
if parsed, err := strconv.Atoi(envMax); err == nil && parsed >= 0 {
|
||||
maxRigs = parsed
|
||||
}
|
||||
}
|
||||
overflow := 0
|
||||
if maxRigs > 0 && len(rigs) > maxRigs {
|
||||
overflow = len(rigs) - maxRigs
|
||||
rigs = rigs[:maxRigs]
|
||||
}
|
||||
|
||||
// Build display with group separators
|
||||
var rigParts []string
|
||||
var lastGroup string
|
||||
@@ -366,6 +381,11 @@ func runMayorStatusLine(t *tmux.Tmux) error {
|
||||
rigParts = append(rigParts, led+space+rig.name)
|
||||
}
|
||||
|
||||
// Add overflow indicator if rigs were truncated
|
||||
if overflow > 0 {
|
||||
rigParts = append(rigParts, fmt.Sprintf("+%d", overflow))
|
||||
}
|
||||
|
||||
if len(rigParts) > 0 {
|
||||
parts = append(parts, strings.Join(rigParts, " "))
|
||||
}
|
||||
@@ -713,6 +733,12 @@ func getMailPreviewWithRoot(identity string, maxLen int, townRoot string) (int,
|
||||
// beadsDir should be the directory containing .beads (for rig-level) or
|
||||
// empty to use the town root (for town-level roles).
|
||||
func getHookedWork(identity string, maxLen int, beadsDir string) string {
|
||||
// Guard: identity must be non-empty to filter by assignee.
|
||||
// Without identity, the query would return ALL hooked beads regardless of assignee.
|
||||
if identity == "" {
|
||||
return ""
|
||||
}
|
||||
|
||||
// If no beadsDir specified, use town root
|
||||
if beadsDir == "" {
|
||||
var err error
|
||||
@@ -743,9 +769,15 @@ func getHookedWork(identity string, maxLen int, beadsDir string) string {
|
||||
return display
|
||||
}
|
||||
|
||||
// getCurrentWork returns a truncated title of the first in_progress issue.
|
||||
// getCurrentWork returns a truncated title of the first in_progress issue assigned to this agent.
|
||||
// Uses the pane's working directory to find the beads.
|
||||
func getCurrentWork(t *tmux.Tmux, session string, maxLen int) string {
|
||||
func getCurrentWork(t *tmux.Tmux, session, identity string, maxLen int) string {
|
||||
// Guard: identity must be non-empty to filter by assignee.
|
||||
// Without identity, the query would return ALL in_progress beads regardless of assignee.
|
||||
if identity == "" {
|
||||
return ""
|
||||
}
|
||||
|
||||
// Get the pane's working directory
|
||||
workDir, err := t.GetPaneWorkDir(session)
|
||||
if err != nil || workDir == "" {
|
||||
@@ -758,10 +790,11 @@ func getCurrentWork(t *tmux.Tmux, session string, maxLen int) string {
|
||||
return ""
|
||||
}
|
||||
|
||||
// Query beads for in_progress issues
|
||||
// Query beads for in_progress issues assigned to this agent
|
||||
b := beads.New(workDir)
|
||||
issues, err := b.List(beads.ListOptions{
|
||||
Status: "in_progress",
|
||||
Assignee: identity,
|
||||
Priority: -1,
|
||||
})
|
||||
if err != nil || len(issues) == 0 {
|
||||
|
||||
@@ -6,6 +6,7 @@ import (
|
||||
"path/filepath"
|
||||
"strings"
|
||||
|
||||
"github.com/muesli/termenv"
|
||||
"github.com/spf13/cobra"
|
||||
"github.com/steveyegge/gastown/internal/config"
|
||||
"github.com/steveyegge/gastown/internal/session"
|
||||
@@ -14,11 +15,14 @@ import (
|
||||
)
|
||||
|
||||
var (
|
||||
themeListFlag bool
|
||||
themeApplyFlag bool
|
||||
themeListFlag bool
|
||||
themeApplyFlag bool
|
||||
themeApplyAllFlag bool
|
||||
)
|
||||
|
||||
// Valid CLI theme modes
|
||||
var validCLIThemes = []string{"auto", "dark", "light"}
|
||||
|
||||
var themeCmd = &cobra.Command{
|
||||
Use: "theme [name]",
|
||||
GroupID: GroupConfig,
|
||||
@@ -43,12 +47,37 @@ var themeApplyCmd = &cobra.Command{
|
||||
|
||||
By default, only applies to sessions in the current rig.
|
||||
Use --all to apply to sessions across all rigs.`,
|
||||
RunE: runThemeApply,
|
||||
RunE: runThemeApply,
|
||||
}
|
||||
|
||||
var themeCLICmd = &cobra.Command{
|
||||
Use: "cli [mode]",
|
||||
Short: "View or set CLI color scheme (dark/light/auto)",
|
||||
Long: `Manage CLI output color scheme for Gas Town commands.
|
||||
|
||||
Without arguments, shows the current CLI theme mode and detection.
|
||||
With a mode argument, sets the CLI theme preference.
|
||||
|
||||
Modes:
|
||||
auto - Automatically detect terminal background (default)
|
||||
dark - Force dark mode colors (light text for dark backgrounds)
|
||||
light - Force light mode colors (dark text for light backgrounds)
|
||||
|
||||
The setting is stored in town settings (settings/config.json) and can
|
||||
be overridden per-session via the GT_THEME environment variable.
|
||||
|
||||
Examples:
|
||||
gt theme cli # Show current CLI theme
|
||||
gt theme cli dark # Set CLI theme to dark mode
|
||||
gt theme cli auto # Reset to auto-detection
|
||||
GT_THEME=light gt status # Override for a single command`,
|
||||
RunE: runThemeCLI,
|
||||
}
|
||||
|
||||
func init() {
|
||||
rootCmd.AddCommand(themeCmd)
|
||||
themeCmd.AddCommand(themeApplyCmd)
|
||||
themeCmd.AddCommand(themeCLICmd)
|
||||
themeCmd.Flags().BoolVarP(&themeListFlag, "list", "l", false, "List available themes")
|
||||
themeApplyCmd.Flags().BoolVarP(&themeApplyAllFlag, "all", "a", false, "Apply to all rigs, not just current")
|
||||
}
|
||||
@@ -362,3 +391,99 @@ func saveRigTheme(rigName, themeName string) error {
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
func runThemeCLI(cmd *cobra.Command, args []string) error {
|
||||
townRoot, err := workspace.FindFromCwd()
|
||||
if err != nil {
|
||||
return fmt.Errorf("finding workspace: %w", err)
|
||||
}
|
||||
if townRoot == "" {
|
||||
return fmt.Errorf("not in a Gas Town workspace")
|
||||
}
|
||||
|
||||
settingsPath := config.TownSettingsPath(townRoot)
|
||||
|
||||
// Show current theme
|
||||
if len(args) == 0 {
|
||||
settings, err := config.LoadOrCreateTownSettings(settingsPath)
|
||||
if err != nil {
|
||||
return fmt.Errorf("loading settings: %w", err)
|
||||
}
|
||||
|
||||
// Determine effective mode
|
||||
configValue := settings.CLITheme
|
||||
if configValue == "" {
|
||||
configValue = "auto"
|
||||
}
|
||||
|
||||
// Check for env override
|
||||
envValue := os.Getenv("GT_THEME")
|
||||
effectiveMode := configValue
|
||||
if envValue != "" {
|
||||
effectiveMode = strings.ToLower(envValue)
|
||||
}
|
||||
|
||||
fmt.Printf("CLI Theme:\n")
|
||||
fmt.Printf(" Configured: %s\n", configValue)
|
||||
if envValue != "" {
|
||||
fmt.Printf(" Override: %s (via GT_THEME)\n", envValue)
|
||||
}
|
||||
fmt.Printf(" Effective: %s\n", effectiveMode)
|
||||
|
||||
// Show detection result for auto mode
|
||||
if effectiveMode == "auto" {
|
||||
detected := "light"
|
||||
if detectTerminalBackground() {
|
||||
detected = "dark"
|
||||
}
|
||||
fmt.Printf(" Detected: %s background\n", detected)
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
// Set CLI theme
|
||||
mode := strings.ToLower(args[0])
|
||||
if !isValidCLITheme(mode) {
|
||||
return fmt.Errorf("invalid CLI theme '%s' (valid: auto, dark, light)", mode)
|
||||
}
|
||||
|
||||
// Load existing settings
|
||||
settings, err := config.LoadOrCreateTownSettings(settingsPath)
|
||||
if err != nil {
|
||||
return fmt.Errorf("loading settings: %w", err)
|
||||
}
|
||||
|
||||
// Update CLITheme
|
||||
settings.CLITheme = mode
|
||||
|
||||
// Save
|
||||
if err := config.SaveTownSettings(settingsPath, settings); err != nil {
|
||||
return fmt.Errorf("saving settings: %w", err)
|
||||
}
|
||||
|
||||
fmt.Printf("CLI theme set to '%s'\n", mode)
|
||||
if mode == "auto" {
|
||||
fmt.Println("Colors will adapt to your terminal's background.")
|
||||
} else {
|
||||
fmt.Printf("Colors optimized for %s backgrounds.\n", mode)
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
// isValidCLITheme checks if a CLI theme mode is valid.
|
||||
func isValidCLITheme(mode string) bool {
|
||||
for _, valid := range validCLIThemes {
|
||||
if mode == valid {
|
||||
return true
|
||||
}
|
||||
}
|
||||
return false
|
||||
}
|
||||
|
||||
// detectTerminalBackground returns true if terminal has dark background.
|
||||
func detectTerminalBackground() bool {
|
||||
// Use termenv for detection
|
||||
return termenv.HasDarkBackground()
|
||||
}
|
||||
|
||||
@@ -18,6 +18,9 @@ var (
|
||||
// Commit and Branch - the git revision the binary was built from (optional ldflag)
|
||||
Commit = ""
|
||||
Branch = ""
|
||||
// BuiltProperly is set to "1" by `make build`. If empty, the binary was built
|
||||
// with raw `go build` and is likely unsigned (will be killed on macOS).
|
||||
BuiltProperly = ""
|
||||
)
|
||||
|
||||
var versionCmd = &cobra.Command{
|
||||
|
||||
208
internal/config/hooks_test.go
Normal file
208
internal/config/hooks_test.go
Normal file
@@ -0,0 +1,208 @@
|
||||
// Test Hook Configuration Validation
|
||||
//
|
||||
// These tests ensure Claude Code hook configurations are correct across Gas Town.
|
||||
// Specifically, they validate that:
|
||||
// - All SessionStart hooks with `gt prime` include the `--hook` flag
|
||||
// - The registry.toml includes all required roles for session-prime
|
||||
//
|
||||
// These tests exist because hook misconfiguration causes seance to fail
|
||||
// (predecessor sessions become undiscoverable).
|
||||
|
||||
package config
|
||||
|
||||
import (
|
||||
"encoding/json"
|
||||
"os"
|
||||
"path/filepath"
|
||||
"strings"
|
||||
"testing"
|
||||
|
||||
"github.com/BurntSushi/toml"
|
||||
)
|
||||
|
||||
// ClaudeSettings represents the structure of .claude/settings.json
|
||||
type ClaudeSettings struct {
|
||||
Hooks map[string][]HookEntry `json:"hooks"`
|
||||
}
|
||||
|
||||
type HookEntry struct {
|
||||
Matcher string `json:"matcher"`
|
||||
Hooks []HookAction `json:"hooks"`
|
||||
}
|
||||
|
||||
type HookAction struct {
|
||||
Type string `json:"type"`
|
||||
Command string `json:"command"`
|
||||
}
|
||||
|
||||
// HookRegistry represents the structure of hooks/registry.toml
|
||||
type HookRegistry struct {
|
||||
Hooks map[string]RegistryHook `toml:"hooks"`
|
||||
}
|
||||
|
||||
type RegistryHook struct {
|
||||
Description string `toml:"description"`
|
||||
Event string `toml:"event"`
|
||||
Matchers []string `toml:"matchers"`
|
||||
Command string `toml:"command"`
|
||||
Roles []string `toml:"roles"`
|
||||
Scope string `toml:"scope"`
|
||||
Enabled bool `toml:"enabled"`
|
||||
}
|
||||
|
||||
// findTownRoot walks up from cwd to find the Gas Town root.
|
||||
// We look for hooks/registry.toml as the unique marker (mayor/ exists at multiple levels).
|
||||
func findTownRoot() (string, error) {
|
||||
dir, err := os.Getwd()
|
||||
if err != nil {
|
||||
return "", err
|
||||
}
|
||||
|
||||
for {
|
||||
// hooks/registry.toml is unique to the town root
|
||||
registryPath := filepath.Join(dir, "hooks", "registry.toml")
|
||||
if _, err := os.Stat(registryPath); err == nil {
|
||||
return dir, nil
|
||||
}
|
||||
|
||||
parent := filepath.Dir(dir)
|
||||
if parent == dir {
|
||||
return "", os.ErrNotExist
|
||||
}
|
||||
dir = parent
|
||||
}
|
||||
}
|
||||
|
||||
// TestSessionStartHooksHaveHookFlag ensures all SessionStart hooks with
|
||||
// `gt prime` include the `--hook` flag. Without this flag, sessions won't
|
||||
// emit session_start events and seance can't discover predecessor sessions.
|
||||
func TestSessionStartHooksHaveHookFlag(t *testing.T) {
|
||||
townRoot, err := findTownRoot()
|
||||
if err != nil {
|
||||
t.Skip("Not running inside Gas Town directory structure")
|
||||
}
|
||||
|
||||
var settingsFiles []string
|
||||
|
||||
err = filepath.Walk(townRoot, func(path string, info os.FileInfo, err error) error {
|
||||
if err != nil {
|
||||
return nil // Skip inaccessible paths
|
||||
}
|
||||
if info.Name() == "settings.json" && strings.Contains(path, ".claude") {
|
||||
settingsFiles = append(settingsFiles, path)
|
||||
}
|
||||
return nil
|
||||
})
|
||||
if err != nil {
|
||||
t.Fatalf("Failed to walk directory: %v", err)
|
||||
}
|
||||
|
||||
if len(settingsFiles) == 0 {
|
||||
t.Skip("No .claude/settings.json files found")
|
||||
}
|
||||
|
||||
var failures []string
|
||||
|
||||
for _, path := range settingsFiles {
|
||||
data, err := os.ReadFile(path)
|
||||
if err != nil {
|
||||
t.Logf("Warning: failed to read %s: %v", path, err)
|
||||
continue
|
||||
}
|
||||
|
||||
var settings ClaudeSettings
|
||||
if err := json.Unmarshal(data, &settings); err != nil {
|
||||
t.Logf("Warning: failed to parse %s: %v", path, err)
|
||||
continue
|
||||
}
|
||||
|
||||
sessionStartHooks, ok := settings.Hooks["SessionStart"]
|
||||
if !ok {
|
||||
continue // No SessionStart hooks in this file
|
||||
}
|
||||
|
||||
for _, entry := range sessionStartHooks {
|
||||
for _, hook := range entry.Hooks {
|
||||
cmd := hook.Command
|
||||
// Check if command contains "gt prime" but not "--hook"
|
||||
if strings.Contains(cmd, "gt prime") && !strings.Contains(cmd, "--hook") {
|
||||
relPath, _ := filepath.Rel(townRoot, path)
|
||||
failures = append(failures, relPath)
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
if len(failures) > 0 {
|
||||
t.Errorf("SessionStart hooks missing --hook flag in gt prime command:\n %s\n\n"+
|
||||
"The --hook flag is required for seance to discover predecessor sessions.\n"+
|
||||
"Fix by changing 'gt prime' to 'gt prime --hook' in these files.",
|
||||
strings.Join(failures, "\n "))
|
||||
}
|
||||
}
|
||||
|
||||
// TestRegistrySessionPrimeIncludesAllRoles ensures the session-prime hook
|
||||
// in registry.toml includes all worker roles that need session discovery.
|
||||
func TestRegistrySessionPrimeIncludesAllRoles(t *testing.T) {
|
||||
townRoot, err := findTownRoot()
|
||||
if err != nil {
|
||||
t.Skip("Not running inside Gas Town directory structure")
|
||||
}
|
||||
|
||||
registryPath := filepath.Join(townRoot, "hooks", "registry.toml")
|
||||
data, err := os.ReadFile(registryPath)
|
||||
if err != nil {
|
||||
t.Skipf("hooks/registry.toml not found: %v", err)
|
||||
}
|
||||
|
||||
var registry HookRegistry
|
||||
if err := toml.Unmarshal(data, ®istry); err != nil {
|
||||
t.Fatalf("Failed to parse registry.toml: %v", err)
|
||||
}
|
||||
|
||||
sessionPrime, ok := registry.Hooks["session-prime"]
|
||||
if !ok {
|
||||
t.Fatal("session-prime hook not found in registry.toml")
|
||||
}
|
||||
|
||||
// All roles that should be able to use seance
|
||||
requiredRoles := []string{"crew", "polecat", "witness", "refinery", "mayor", "deacon"}
|
||||
|
||||
roleSet := make(map[string]bool)
|
||||
for _, role := range sessionPrime.Roles {
|
||||
roleSet[role] = true
|
||||
}
|
||||
|
||||
var missingRoles []string
|
||||
for _, role := range requiredRoles {
|
||||
if !roleSet[role] {
|
||||
missingRoles = append(missingRoles, role)
|
||||
}
|
||||
}
|
||||
|
||||
if len(missingRoles) > 0 {
|
||||
t.Errorf("session-prime hook missing roles: %v\n\n"+
|
||||
"Current roles: %v\n"+
|
||||
"All roles need session-prime for seance to discover their predecessor sessions.",
|
||||
missingRoles, sessionPrime.Roles)
|
||||
}
|
||||
|
||||
// Also verify the command has --hook
|
||||
if !strings.Contains(sessionPrime.Command, "--hook") {
|
||||
t.Errorf("session-prime command missing --hook flag:\n %s\n\n"+
|
||||
"The --hook flag is required for seance to discover predecessor sessions.",
|
||||
sessionPrime.Command)
|
||||
}
|
||||
}
|
||||
|
||||
// TestPreCompactPrimeDoesNotNeedHookFlag documents that PreCompact hooks
|
||||
// don't need --hook (session already started, ID already persisted).
|
||||
func TestPreCompactPrimeDoesNotNeedHookFlag(t *testing.T) {
|
||||
// This test documents the intentional difference:
|
||||
// - SessionStart: needs --hook to capture session ID from stdin
|
||||
// - PreCompact: session already running, ID already persisted
|
||||
//
|
||||
// If this test fails, it means someone added --hook to PreCompact
|
||||
// which is harmless but unnecessary.
|
||||
t.Log("PreCompact hooks don't need --hook (session ID already persisted at SessionStart)")
|
||||
}
|
||||
@@ -1075,29 +1075,84 @@ func lookupAgentConfig(name string, townSettings *TownSettings, rigSettings *Rig
|
||||
}
|
||||
|
||||
// fillRuntimeDefaults fills in default values for empty RuntimeConfig fields.
|
||||
// It creates a deep copy to prevent mutation of the original config.
|
||||
//
|
||||
// Default behavior:
|
||||
// - Command defaults to "claude" if empty
|
||||
// - Args defaults to ["--dangerously-skip-permissions"] if nil
|
||||
// - Empty Args slice ([]string{}) means "no args" and is preserved as-is
|
||||
//
|
||||
// All fields are deep-copied: modifying the returned config will not affect
|
||||
// the input config, including nested structs and slices.
|
||||
func fillRuntimeDefaults(rc *RuntimeConfig) *RuntimeConfig {
|
||||
if rc == nil {
|
||||
return DefaultRuntimeConfig()
|
||||
}
|
||||
// Create a copy to avoid modifying the original
|
||||
|
||||
// Create result with scalar fields (strings are immutable in Go)
|
||||
result := &RuntimeConfig{
|
||||
Provider: rc.Provider,
|
||||
Command: rc.Command,
|
||||
Args: rc.Args,
|
||||
InitialPrompt: rc.InitialPrompt,
|
||||
PromptMode: rc.PromptMode,
|
||||
}
|
||||
// Copy Env map to avoid mutation and preserve agent-specific env vars
|
||||
|
||||
// Deep copy Args slice to avoid sharing backing array
|
||||
if rc.Args != nil {
|
||||
result.Args = make([]string, len(rc.Args))
|
||||
copy(result.Args, rc.Args)
|
||||
}
|
||||
|
||||
// Deep copy Env map
|
||||
if len(rc.Env) > 0 {
|
||||
result.Env = make(map[string]string, len(rc.Env))
|
||||
for k, v := range rc.Env {
|
||||
result.Env[k] = v
|
||||
}
|
||||
}
|
||||
|
||||
// Deep copy nested structs (nil checks prevent panic on access)
|
||||
if rc.Session != nil {
|
||||
result.Session = &RuntimeSessionConfig{
|
||||
SessionIDEnv: rc.Session.SessionIDEnv,
|
||||
ConfigDirEnv: rc.Session.ConfigDirEnv,
|
||||
}
|
||||
}
|
||||
|
||||
if rc.Hooks != nil {
|
||||
result.Hooks = &RuntimeHooksConfig{
|
||||
Provider: rc.Hooks.Provider,
|
||||
Dir: rc.Hooks.Dir,
|
||||
SettingsFile: rc.Hooks.SettingsFile,
|
||||
}
|
||||
}
|
||||
|
||||
if rc.Tmux != nil {
|
||||
result.Tmux = &RuntimeTmuxConfig{
|
||||
ReadyPromptPrefix: rc.Tmux.ReadyPromptPrefix,
|
||||
ReadyDelayMs: rc.Tmux.ReadyDelayMs,
|
||||
}
|
||||
// Deep copy ProcessNames slice
|
||||
if rc.Tmux.ProcessNames != nil {
|
||||
result.Tmux.ProcessNames = make([]string, len(rc.Tmux.ProcessNames))
|
||||
copy(result.Tmux.ProcessNames, rc.Tmux.ProcessNames)
|
||||
}
|
||||
}
|
||||
|
||||
if rc.Instructions != nil {
|
||||
result.Instructions = &RuntimeInstructionsConfig{
|
||||
File: rc.Instructions.File,
|
||||
}
|
||||
}
|
||||
|
||||
// Apply defaults for required fields
|
||||
if result.Command == "" {
|
||||
result.Command = "claude"
|
||||
}
|
||||
if result.Args == nil {
|
||||
result.Args = []string{"--dangerously-skip-permissions"}
|
||||
}
|
||||
|
||||
return result
|
||||
}
|
||||
|
||||
|
||||
@@ -26,8 +26,11 @@ func skipIfAgentBinaryMissing(t *testing.T, agents ...string) {
|
||||
|
||||
// isClaudeCommand checks if a command is claude (either "claude" or a path ending in "/claude").
|
||||
// This handles the case where resolveClaudePath returns the full path to the claude binary.
|
||||
// Also handles Windows paths with .exe extension.
|
||||
func isClaudeCommand(cmd string) bool {
|
||||
return cmd == "claude" || strings.HasSuffix(cmd, "/claude")
|
||||
base := filepath.Base(cmd)
|
||||
base = strings.TrimSuffix(base, filepath.Ext(base))
|
||||
return base == "claude"
|
||||
}
|
||||
|
||||
func TestTownConfigRoundTrip(t *testing.T) {
|
||||
@@ -981,9 +984,9 @@ func TestBuildAgentStartupCommand(t *testing.T) {
|
||||
// New signature: (role, rig, townRoot, rigPath, prompt)
|
||||
cmd := BuildAgentStartupCommand("witness", "gastown", "", "", "")
|
||||
|
||||
// Should contain environment prefix and claude command
|
||||
// Should contain environment variables (via 'exec env') and claude command
|
||||
if !strings.Contains(cmd, "exec env") {
|
||||
t.Error("expected exec env in command")
|
||||
t.Error("expected 'exec env' in command")
|
||||
}
|
||||
if !strings.Contains(cmd, "GT_ROLE=witness") {
|
||||
t.Error("expected GT_ROLE=witness in command")
|
||||
@@ -2017,6 +2020,740 @@ func TestLookupAgentConfigWithRigSettings(t *testing.T) {
|
||||
}
|
||||
}
|
||||
|
||||
// TestFillRuntimeDefaults tests the fillRuntimeDefaults function comprehensively.
|
||||
func TestFillRuntimeDefaults(t *testing.T) {
|
||||
t.Parallel()
|
||||
|
||||
t.Run("preserves all fields", func(t *testing.T) {
|
||||
t.Parallel()
|
||||
input := &RuntimeConfig{
|
||||
Provider: "codex",
|
||||
Command: "opencode",
|
||||
Args: []string{"-m", "gpt-5"},
|
||||
Env: map[string]string{"OPENCODE_PERMISSION": `{"*":"allow"}`},
|
||||
InitialPrompt: "test prompt",
|
||||
PromptMode: "none",
|
||||
Session: &RuntimeSessionConfig{
|
||||
SessionIDEnv: "OPENCODE_SESSION_ID",
|
||||
},
|
||||
Hooks: &RuntimeHooksConfig{
|
||||
Provider: "opencode",
|
||||
},
|
||||
Tmux: &RuntimeTmuxConfig{
|
||||
ProcessNames: []string{"opencode", "node"},
|
||||
},
|
||||
Instructions: &RuntimeInstructionsConfig{
|
||||
File: "OPENCODE.md",
|
||||
},
|
||||
}
|
||||
|
||||
result := fillRuntimeDefaults(input)
|
||||
|
||||
if result.Provider != input.Provider {
|
||||
t.Errorf("Provider: got %q, want %q", result.Provider, input.Provider)
|
||||
}
|
||||
if result.Command != input.Command {
|
||||
t.Errorf("Command: got %q, want %q", result.Command, input.Command)
|
||||
}
|
||||
if len(result.Args) != len(input.Args) {
|
||||
t.Errorf("Args: got %v, want %v", result.Args, input.Args)
|
||||
}
|
||||
if result.Env["OPENCODE_PERMISSION"] != input.Env["OPENCODE_PERMISSION"] {
|
||||
t.Errorf("Env: got %v, want %v", result.Env, input.Env)
|
||||
}
|
||||
if result.InitialPrompt != input.InitialPrompt {
|
||||
t.Errorf("InitialPrompt: got %q, want %q", result.InitialPrompt, input.InitialPrompt)
|
||||
}
|
||||
if result.PromptMode != input.PromptMode {
|
||||
t.Errorf("PromptMode: got %q, want %q", result.PromptMode, input.PromptMode)
|
||||
}
|
||||
if result.Session == nil || result.Session.SessionIDEnv != input.Session.SessionIDEnv {
|
||||
t.Errorf("Session: got %+v, want %+v", result.Session, input.Session)
|
||||
}
|
||||
if result.Hooks == nil || result.Hooks.Provider != input.Hooks.Provider {
|
||||
t.Errorf("Hooks: got %+v, want %+v", result.Hooks, input.Hooks)
|
||||
}
|
||||
if result.Tmux == nil || len(result.Tmux.ProcessNames) != len(input.Tmux.ProcessNames) {
|
||||
t.Errorf("Tmux: got %+v, want %+v", result.Tmux, input.Tmux)
|
||||
}
|
||||
if result.Instructions == nil || result.Instructions.File != input.Instructions.File {
|
||||
t.Errorf("Instructions: got %+v, want %+v", result.Instructions, input.Instructions)
|
||||
}
|
||||
})
|
||||
|
||||
t.Run("nil input returns defaults", func(t *testing.T) {
|
||||
t.Parallel()
|
||||
result := fillRuntimeDefaults(nil)
|
||||
|
||||
if result == nil {
|
||||
t.Fatal("fillRuntimeDefaults(nil) returned nil")
|
||||
}
|
||||
if result.Command == "" {
|
||||
t.Error("Command should have default value")
|
||||
}
|
||||
})
|
||||
|
||||
t.Run("empty command defaults to claude", func(t *testing.T) {
|
||||
t.Parallel()
|
||||
input := &RuntimeConfig{
|
||||
Command: "",
|
||||
Args: []string{"--custom-flag"},
|
||||
}
|
||||
|
||||
result := fillRuntimeDefaults(input)
|
||||
|
||||
// Use isClaudeCommand to handle resolved paths (e.g., /opt/homebrew/bin/claude)
|
||||
if !isClaudeCommand(result.Command) {
|
||||
t.Errorf("Command: got %q, want claude or path ending in claude", result.Command)
|
||||
}
|
||||
// Args should be preserved, not overwritten
|
||||
if len(result.Args) != 1 || result.Args[0] != "--custom-flag" {
|
||||
t.Errorf("Args should be preserved: got %v", result.Args)
|
||||
}
|
||||
})
|
||||
|
||||
t.Run("nil args defaults to skip-permissions", func(t *testing.T) {
|
||||
t.Parallel()
|
||||
input := &RuntimeConfig{
|
||||
Command: "claude",
|
||||
Args: nil,
|
||||
}
|
||||
|
||||
result := fillRuntimeDefaults(input)
|
||||
|
||||
if result.Args == nil || len(result.Args) == 0 {
|
||||
t.Error("Args should have default value")
|
||||
}
|
||||
if result.Args[0] != "--dangerously-skip-permissions" {
|
||||
t.Errorf("Args: got %v, want [--dangerously-skip-permissions]", result.Args)
|
||||
}
|
||||
})
|
||||
|
||||
t.Run("empty args slice is preserved", func(t *testing.T) {
|
||||
t.Parallel()
|
||||
input := &RuntimeConfig{
|
||||
Command: "claude",
|
||||
Args: []string{}, // Explicitly empty, not nil
|
||||
}
|
||||
|
||||
result := fillRuntimeDefaults(input)
|
||||
|
||||
// Empty slice means "no args", not "use defaults"
|
||||
// This is intentional per RuntimeConfig docs
|
||||
if result.Args == nil {
|
||||
t.Error("Empty Args slice should be preserved as empty, not nil")
|
||||
}
|
||||
})
|
||||
|
||||
t.Run("env map is copied not shared", func(t *testing.T) {
|
||||
t.Parallel()
|
||||
input := &RuntimeConfig{
|
||||
Command: "opencode",
|
||||
Env: map[string]string{"KEY": "value"},
|
||||
}
|
||||
|
||||
result := fillRuntimeDefaults(input)
|
||||
|
||||
// Modify result's env
|
||||
result.Env["NEW_KEY"] = "new_value"
|
||||
|
||||
// Original should be unchanged
|
||||
if _, ok := input.Env["NEW_KEY"]; ok {
|
||||
t.Error("Env map was not copied - modifications affect original")
|
||||
}
|
||||
})
|
||||
|
||||
t.Run("prompt_mode none is preserved for custom agents", func(t *testing.T) {
|
||||
t.Parallel()
|
||||
// This is the specific bug that was fixed - opencode needs prompt_mode: "none"
|
||||
// to prevent the startup beacon from being passed as an argument
|
||||
input := &RuntimeConfig{
|
||||
Provider: "opencode",
|
||||
Command: "opencode",
|
||||
Args: []string{"-m", "gpt-5"},
|
||||
PromptMode: "none",
|
||||
}
|
||||
|
||||
result := fillRuntimeDefaults(input)
|
||||
|
||||
if result.PromptMode != "none" {
|
||||
t.Errorf("PromptMode: got %q, want %q - custom prompt_mode was not preserved", result.PromptMode, "none")
|
||||
}
|
||||
})
|
||||
|
||||
t.Run("args slice is deep copied not shared", func(t *testing.T) {
|
||||
t.Parallel()
|
||||
input := &RuntimeConfig{
|
||||
Command: "opencode",
|
||||
Args: []string{"original-arg"},
|
||||
}
|
||||
|
||||
result := fillRuntimeDefaults(input)
|
||||
|
||||
// Modify result's args
|
||||
result.Args[0] = "modified-arg"
|
||||
|
||||
// Original should be unchanged
|
||||
if input.Args[0] != "original-arg" {
|
||||
t.Errorf("Args slice was not deep copied - modifications affect original: got %q, want %q",
|
||||
input.Args[0], "original-arg")
|
||||
}
|
||||
})
|
||||
|
||||
t.Run("session struct is deep copied", func(t *testing.T) {
|
||||
t.Parallel()
|
||||
input := &RuntimeConfig{
|
||||
Command: "claude",
|
||||
Session: &RuntimeSessionConfig{
|
||||
SessionIDEnv: "ORIGINAL_SESSION_ID",
|
||||
ConfigDirEnv: "ORIGINAL_CONFIG_DIR",
|
||||
},
|
||||
}
|
||||
|
||||
result := fillRuntimeDefaults(input)
|
||||
|
||||
// Modify result's session
|
||||
result.Session.SessionIDEnv = "MODIFIED_SESSION_ID"
|
||||
|
||||
// Original should be unchanged
|
||||
if input.Session.SessionIDEnv != "ORIGINAL_SESSION_ID" {
|
||||
t.Errorf("Session struct was not deep copied - modifications affect original: got %q, want %q",
|
||||
input.Session.SessionIDEnv, "ORIGINAL_SESSION_ID")
|
||||
}
|
||||
})
|
||||
|
||||
t.Run("hooks struct is deep copied", func(t *testing.T) {
|
||||
t.Parallel()
|
||||
input := &RuntimeConfig{
|
||||
Command: "claude",
|
||||
Hooks: &RuntimeHooksConfig{
|
||||
Provider: "original-provider",
|
||||
Dir: "original-dir",
|
||||
SettingsFile: "original-file",
|
||||
},
|
||||
}
|
||||
|
||||
result := fillRuntimeDefaults(input)
|
||||
|
||||
// Modify result's hooks
|
||||
result.Hooks.Provider = "modified-provider"
|
||||
|
||||
// Original should be unchanged
|
||||
if input.Hooks.Provider != "original-provider" {
|
||||
t.Errorf("Hooks struct was not deep copied - modifications affect original: got %q, want %q",
|
||||
input.Hooks.Provider, "original-provider")
|
||||
}
|
||||
})
|
||||
|
||||
t.Run("tmux struct and process_names are deep copied", func(t *testing.T) {
|
||||
t.Parallel()
|
||||
input := &RuntimeConfig{
|
||||
Command: "opencode",
|
||||
Tmux: &RuntimeTmuxConfig{
|
||||
ProcessNames: []string{"original-process"},
|
||||
ReadyPromptPrefix: "original-prefix",
|
||||
ReadyDelayMs: 5000,
|
||||
},
|
||||
}
|
||||
|
||||
result := fillRuntimeDefaults(input)
|
||||
|
||||
// Modify result's tmux
|
||||
result.Tmux.ProcessNames[0] = "modified-process"
|
||||
result.Tmux.ReadyPromptPrefix = "modified-prefix"
|
||||
|
||||
// Original should be unchanged
|
||||
if input.Tmux.ProcessNames[0] != "original-process" {
|
||||
t.Errorf("Tmux.ProcessNames was not deep copied - modifications affect original: got %q, want %q",
|
||||
input.Tmux.ProcessNames[0], "original-process")
|
||||
}
|
||||
if input.Tmux.ReadyPromptPrefix != "original-prefix" {
|
||||
t.Errorf("Tmux struct was not deep copied - modifications affect original: got %q, want %q",
|
||||
input.Tmux.ReadyPromptPrefix, "original-prefix")
|
||||
}
|
||||
})
|
||||
|
||||
t.Run("instructions struct is deep copied", func(t *testing.T) {
|
||||
t.Parallel()
|
||||
input := &RuntimeConfig{
|
||||
Command: "opencode",
|
||||
Instructions: &RuntimeInstructionsConfig{
|
||||
File: "ORIGINAL.md",
|
||||
},
|
||||
}
|
||||
|
||||
result := fillRuntimeDefaults(input)
|
||||
|
||||
// Modify result's instructions
|
||||
result.Instructions.File = "MODIFIED.md"
|
||||
|
||||
// Original should be unchanged
|
||||
if input.Instructions.File != "ORIGINAL.md" {
|
||||
t.Errorf("Instructions struct was not deep copied - modifications affect original: got %q, want %q",
|
||||
input.Instructions.File, "ORIGINAL.md")
|
||||
}
|
||||
})
|
||||
|
||||
t.Run("nil nested structs remain nil", func(t *testing.T) {
|
||||
t.Parallel()
|
||||
input := &RuntimeConfig{
|
||||
Command: "claude",
|
||||
// All nested structs left nil
|
||||
}
|
||||
|
||||
result := fillRuntimeDefaults(input)
|
||||
|
||||
// Nil nested structs should remain nil (not get zero-value structs)
|
||||
if result.Session != nil {
|
||||
t.Error("Session should remain nil when input has nil Session")
|
||||
}
|
||||
if result.Hooks != nil {
|
||||
t.Error("Hooks should remain nil when input has nil Hooks")
|
||||
}
|
||||
if result.Tmux != nil {
|
||||
t.Error("Tmux should remain nil when input has nil Tmux")
|
||||
}
|
||||
if result.Instructions != nil {
|
||||
t.Error("Instructions should remain nil when input has nil Instructions")
|
||||
}
|
||||
})
|
||||
|
||||
t.Run("partial nested struct is copied without defaults", func(t *testing.T) {
|
||||
t.Parallel()
|
||||
// User defines partial Tmux config - only ProcessNames, no other fields
|
||||
input := &RuntimeConfig{
|
||||
Command: "opencode",
|
||||
Tmux: &RuntimeTmuxConfig{
|
||||
ProcessNames: []string{"opencode"},
|
||||
// ReadyPromptPrefix and ReadyDelayMs left at zero values
|
||||
},
|
||||
}
|
||||
|
||||
result := fillRuntimeDefaults(input)
|
||||
|
||||
// ProcessNames should be copied
|
||||
if len(result.Tmux.ProcessNames) != 1 || result.Tmux.ProcessNames[0] != "opencode" {
|
||||
t.Errorf("Tmux.ProcessNames not copied correctly: got %v", result.Tmux.ProcessNames)
|
||||
}
|
||||
// Zero values should remain zero (fillRuntimeDefaults doesn't fill nested defaults)
|
||||
if result.Tmux.ReadyDelayMs != 0 {
|
||||
t.Errorf("Tmux.ReadyDelayMs should be 0 (unfilled), got %d", result.Tmux.ReadyDelayMs)
|
||||
}
|
||||
})
|
||||
}
|
||||
|
||||
// TestLookupAgentConfigPreservesCustomFields verifies that custom agents
|
||||
// have all their settings preserved through the lookup chain.
|
||||
func TestLookupAgentConfigPreservesCustomFields(t *testing.T) {
|
||||
t.Parallel()
|
||||
|
||||
townSettings := &TownSettings{
|
||||
Type: "town-settings",
|
||||
Version: 1,
|
||||
DefaultAgent: "claude",
|
||||
Agents: map[string]*RuntimeConfig{
|
||||
"opencode-mayor": {
|
||||
Command: "opencode",
|
||||
Args: []string{"-m", "gpt-5"},
|
||||
PromptMode: "none",
|
||||
Env: map[string]string{"OPENCODE_PERMISSION": `{"*":"allow"}`},
|
||||
Tmux: &RuntimeTmuxConfig{
|
||||
ProcessNames: []string{"opencode", "node"},
|
||||
},
|
||||
},
|
||||
},
|
||||
}
|
||||
|
||||
rc := lookupAgentConfig("opencode-mayor", townSettings, nil)
|
||||
|
||||
if rc == nil {
|
||||
t.Fatal("lookupAgentConfig returned nil for custom agent")
|
||||
}
|
||||
if rc.PromptMode != "none" {
|
||||
t.Errorf("PromptMode: got %q, want %q - setting was lost in lookup chain", rc.PromptMode, "none")
|
||||
}
|
||||
if rc.Command != "opencode" {
|
||||
t.Errorf("Command: got %q, want %q", rc.Command, "opencode")
|
||||
}
|
||||
if rc.Env["OPENCODE_PERMISSION"] != `{"*":"allow"}` {
|
||||
t.Errorf("Env was not preserved: got %v", rc.Env)
|
||||
}
|
||||
if rc.Tmux == nil || len(rc.Tmux.ProcessNames) != 2 {
|
||||
t.Errorf("Tmux.ProcessNames not preserved: got %+v", rc.Tmux)
|
||||
}
|
||||
}
|
||||
|
||||
// TestBuildCommandWithPromptRespectsPromptModeNone verifies that when PromptMode
|
||||
// is "none", the prompt is not appended to the command.
|
||||
func TestBuildCommandWithPromptRespectsPromptModeNone(t *testing.T) {
|
||||
t.Parallel()
|
||||
|
||||
rc := &RuntimeConfig{
|
||||
Command: "opencode",
|
||||
Args: []string{"-m", "gpt-5"},
|
||||
PromptMode: "none",
|
||||
}
|
||||
|
||||
// Build command with a prompt that should be ignored
|
||||
cmd := rc.BuildCommandWithPrompt("This prompt should not appear")
|
||||
|
||||
if strings.Contains(cmd, "This prompt should not appear") {
|
||||
t.Errorf("prompt_mode=none should prevent prompt from being added, got: %s", cmd)
|
||||
}
|
||||
if !strings.HasPrefix(cmd, "opencode") {
|
||||
t.Errorf("Command should start with opencode, got: %s", cmd)
|
||||
}
|
||||
}
|
||||
|
||||
// TestRoleAgentConfigWithCustomAgent tests role-based agent resolution with
|
||||
// custom agents that have special settings like prompt_mode: "none".
|
||||
//
|
||||
// This test mirrors manual verification using settings/config.json:
|
||||
//
|
||||
// {
|
||||
// "type": "town-settings",
|
||||
// "version": 1,
|
||||
// "default_agent": "claude-opus",
|
||||
// "agents": {
|
||||
// "amp-yolo": {
|
||||
// "command": "amp",
|
||||
// "args": ["--dangerously-allow-all"]
|
||||
// },
|
||||
// "opencode-mayor": {
|
||||
// "command": "opencode",
|
||||
// "args": ["-m", "openai/gpt-5.2-codex"],
|
||||
// "prompt_mode": "none",
|
||||
// "process_names": ["opencode", "node"],
|
||||
// "env": {
|
||||
// "OPENCODE_PERMISSION": "{\"*\":\"allow\"}"
|
||||
// }
|
||||
// }
|
||||
// },
|
||||
// "role_agents": {
|
||||
// "crew": "claude-sonnet",
|
||||
// "deacon": "claude-haiku",
|
||||
// "mayor": "opencode-mayor",
|
||||
// "polecat": "claude-opus",
|
||||
// "refinery": "claude-opus",
|
||||
// "witness": "claude-sonnet"
|
||||
// }
|
||||
// }
|
||||
//
|
||||
// Manual test procedure:
|
||||
// 1. Set role_agents.mayor to each agent (claude, gemini, codex, cursor, auggie, amp, opencode)
|
||||
// 2. Run: gt start
|
||||
// 3. Verify mayor starts with correct agent config
|
||||
// 4. Run: GT_NUKE_ACKNOWLEDGED=1 gt down --nuke
|
||||
// 5. Repeat for all 7 built-in agents
|
||||
func TestRoleAgentConfigWithCustomAgent(t *testing.T) {
|
||||
t.Parallel()
|
||||
|
||||
townRoot := t.TempDir()
|
||||
rigPath := filepath.Join(townRoot, "testrig")
|
||||
|
||||
// Create town settings mirroring the manual test config
|
||||
townSettings := NewTownSettings()
|
||||
townSettings.DefaultAgent = "claude-opus"
|
||||
townSettings.RoleAgents = map[string]string{
|
||||
constants.RoleMayor: "opencode-mayor",
|
||||
constants.RoleDeacon: "claude-haiku",
|
||||
constants.RolePolecat: "claude-opus",
|
||||
constants.RoleRefinery: "claude-opus",
|
||||
constants.RoleWitness: "claude-sonnet",
|
||||
constants.RoleCrew: "claude-sonnet",
|
||||
}
|
||||
townSettings.Agents = map[string]*RuntimeConfig{
|
||||
"opencode-mayor": {
|
||||
Command: "opencode",
|
||||
Args: []string{"-m", "openai/gpt-5.2-codex"},
|
||||
PromptMode: "none",
|
||||
Env: map[string]string{"OPENCODE_PERMISSION": `{"*":"allow"}`},
|
||||
Tmux: &RuntimeTmuxConfig{
|
||||
ProcessNames: []string{"opencode", "node"},
|
||||
},
|
||||
},
|
||||
"amp-yolo": {
|
||||
Command: "amp",
|
||||
Args: []string{"--dangerously-allow-all"},
|
||||
},
|
||||
}
|
||||
if err := SaveTownSettings(TownSettingsPath(townRoot), townSettings); err != nil {
|
||||
t.Fatalf("SaveTownSettings: %v", err)
|
||||
}
|
||||
|
||||
// Create minimal rig settings
|
||||
rigSettings := NewRigSettings()
|
||||
if err := SaveRigSettings(RigSettingsPath(rigPath), rigSettings); err != nil {
|
||||
t.Fatalf("SaveRigSettings: %v", err)
|
||||
}
|
||||
|
||||
// Test mayor role gets opencode-mayor with prompt_mode: none
|
||||
t.Run("mayor gets opencode-mayor config", func(t *testing.T) {
|
||||
rc := ResolveRoleAgentConfig(constants.RoleMayor, townRoot, rigPath)
|
||||
if rc == nil {
|
||||
t.Fatal("ResolveRoleAgentConfig returned nil for mayor")
|
||||
}
|
||||
if rc.Command != "opencode" {
|
||||
t.Errorf("Command: got %q, want %q", rc.Command, "opencode")
|
||||
}
|
||||
if rc.PromptMode != "none" {
|
||||
t.Errorf("PromptMode: got %q, want %q - critical for opencode", rc.PromptMode, "none")
|
||||
}
|
||||
if rc.Env["OPENCODE_PERMISSION"] != `{"*":"allow"}` {
|
||||
t.Errorf("Env not preserved: got %v", rc.Env)
|
||||
}
|
||||
|
||||
// Verify startup beacon is NOT added to command
|
||||
cmd := rc.BuildCommandWithPrompt("[GAS TOWN] mayor <- human • cold-start")
|
||||
if strings.Contains(cmd, "GAS TOWN") {
|
||||
t.Errorf("prompt_mode=none should prevent beacon, got: %s", cmd)
|
||||
}
|
||||
})
|
||||
|
||||
// Test other roles get their configured agents
|
||||
t.Run("deacon gets claude-haiku", func(t *testing.T) {
|
||||
rc := ResolveRoleAgentConfig(constants.RoleDeacon, townRoot, rigPath)
|
||||
if rc == nil {
|
||||
t.Fatal("ResolveRoleAgentConfig returned nil for deacon")
|
||||
}
|
||||
// claude-haiku is a built-in preset
|
||||
if !strings.Contains(rc.Command, "claude") && rc.Command != "claude" {
|
||||
t.Errorf("Command: got %q, want claude-based command", rc.Command)
|
||||
}
|
||||
})
|
||||
|
||||
t.Run("polecat gets claude-opus", func(t *testing.T) {
|
||||
rc := ResolveRoleAgentConfig(constants.RolePolecat, townRoot, rigPath)
|
||||
if rc == nil {
|
||||
t.Fatal("ResolveRoleAgentConfig returned nil for polecat")
|
||||
}
|
||||
if !strings.Contains(rc.Command, "claude") && rc.Command != "claude" {
|
||||
t.Errorf("Command: got %q, want claude-based command", rc.Command)
|
||||
}
|
||||
})
|
||||
}
|
||||
|
||||
// TestMultipleAgentTypes tests that various built-in agent presets work correctly.
|
||||
// NOTE: Only these are actual built-in presets: claude, gemini, codex, cursor, auggie, amp, opencode.
|
||||
// Variants like "claude-opus", "claude-haiku", "claude-sonnet" are NOT built-in - they need
|
||||
// to be defined as custom agents in TownSettings.Agents if specific model selection is needed.
|
||||
func TestMultipleAgentTypes(t *testing.T) {
|
||||
t.Parallel()
|
||||
|
||||
testCases := []struct {
|
||||
name string
|
||||
agentName string
|
||||
expectCommand string
|
||||
isBuiltIn bool // true if this is an actual built-in preset
|
||||
}{
|
||||
{
|
||||
name: "claude built-in preset",
|
||||
agentName: "claude",
|
||||
expectCommand: "claude",
|
||||
isBuiltIn: true,
|
||||
},
|
||||
{
|
||||
name: "codex built-in preset",
|
||||
agentName: "codex",
|
||||
expectCommand: "codex",
|
||||
isBuiltIn: true,
|
||||
},
|
||||
{
|
||||
name: "gemini built-in preset",
|
||||
agentName: "gemini",
|
||||
expectCommand: "gemini",
|
||||
isBuiltIn: true,
|
||||
},
|
||||
{
|
||||
name: "amp built-in preset",
|
||||
agentName: "amp",
|
||||
expectCommand: "amp",
|
||||
isBuiltIn: true,
|
||||
},
|
||||
{
|
||||
name: "opencode built-in preset",
|
||||
agentName: "opencode",
|
||||
expectCommand: "opencode",
|
||||
isBuiltIn: true,
|
||||
},
|
||||
}
|
||||
|
||||
for _, tc := range testCases {
|
||||
t.Run(tc.name, func(t *testing.T) {
|
||||
t.Parallel()
|
||||
|
||||
// Skip if agent binary not installed (prevents flaky CI failures)
|
||||
skipIfAgentBinaryMissing(t, tc.agentName)
|
||||
|
||||
// Verify it's actually a built-in preset
|
||||
if tc.isBuiltIn {
|
||||
preset := GetAgentPresetByName(tc.agentName)
|
||||
if preset == nil {
|
||||
t.Errorf("%s should be a built-in preset but GetAgentPresetByName returned nil", tc.agentName)
|
||||
return
|
||||
}
|
||||
}
|
||||
|
||||
townRoot := t.TempDir()
|
||||
rigPath := filepath.Join(townRoot, "testrig")
|
||||
|
||||
townSettings := NewTownSettings()
|
||||
townSettings.DefaultAgent = "claude"
|
||||
townSettings.RoleAgents = map[string]string{
|
||||
constants.RoleMayor: tc.agentName,
|
||||
}
|
||||
if err := SaveTownSettings(TownSettingsPath(townRoot), townSettings); err != nil {
|
||||
t.Fatalf("SaveTownSettings: %v", err)
|
||||
}
|
||||
|
||||
rigSettings := NewRigSettings()
|
||||
if err := SaveRigSettings(RigSettingsPath(rigPath), rigSettings); err != nil {
|
||||
t.Fatalf("SaveRigSettings: %v", err)
|
||||
}
|
||||
|
||||
rc := ResolveRoleAgentConfig(constants.RoleMayor, townRoot, rigPath)
|
||||
if rc == nil {
|
||||
t.Fatalf("ResolveRoleAgentConfig returned nil for %s", tc.agentName)
|
||||
}
|
||||
|
||||
// Allow path-based commands (e.g., /opt/homebrew/bin/claude)
|
||||
if !strings.Contains(rc.Command, tc.expectCommand) {
|
||||
t.Errorf("Command: got %q, want command containing %q", rc.Command, tc.expectCommand)
|
||||
}
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
// TestCustomClaudeVariants tests that Claude model variants (opus, sonnet, haiku) need
|
||||
// to be explicitly defined as custom agents since they are NOT built-in presets.
|
||||
func TestCustomClaudeVariants(t *testing.T) {
|
||||
t.Parallel()
|
||||
|
||||
// Verify that claude-opus/sonnet/haiku are NOT built-in presets
|
||||
variants := []string{"claude-opus", "claude-sonnet", "claude-haiku"}
|
||||
for _, variant := range variants {
|
||||
if preset := GetAgentPresetByName(variant); preset != nil {
|
||||
t.Errorf("%s should NOT be a built-in preset (only 'claude' is), but GetAgentPresetByName returned non-nil", variant)
|
||||
}
|
||||
}
|
||||
|
||||
// Test that custom claude variants work when explicitly defined
|
||||
townRoot := t.TempDir()
|
||||
rigPath := filepath.Join(townRoot, "testrig")
|
||||
|
||||
townSettings := NewTownSettings()
|
||||
townSettings.DefaultAgent = "claude"
|
||||
townSettings.RoleAgents = map[string]string{
|
||||
constants.RoleMayor: "claude-opus",
|
||||
constants.RoleDeacon: "claude-haiku",
|
||||
}
|
||||
// Define the custom variants
|
||||
townSettings.Agents = map[string]*RuntimeConfig{
|
||||
"claude-opus": {
|
||||
Command: "claude",
|
||||
Args: []string{"--model", "claude-opus-4", "--dangerously-skip-permissions"},
|
||||
},
|
||||
"claude-haiku": {
|
||||
Command: "claude",
|
||||
Args: []string{"--model", "claude-haiku-3", "--dangerously-skip-permissions"},
|
||||
},
|
||||
}
|
||||
if err := SaveTownSettings(TownSettingsPath(townRoot), townSettings); err != nil {
|
||||
t.Fatalf("SaveTownSettings: %v", err)
|
||||
}
|
||||
|
||||
rigSettings := NewRigSettings()
|
||||
if err := SaveRigSettings(RigSettingsPath(rigPath), rigSettings); err != nil {
|
||||
t.Fatalf("SaveRigSettings: %v", err)
|
||||
}
|
||||
|
||||
// Test claude-opus custom agent
|
||||
rc := ResolveRoleAgentConfig(constants.RoleMayor, townRoot, rigPath)
|
||||
if rc == nil {
|
||||
t.Fatal("ResolveRoleAgentConfig returned nil for claude-opus")
|
||||
}
|
||||
if !strings.Contains(rc.Command, "claude") {
|
||||
t.Errorf("claude-opus Command: got %q, want claude", rc.Command)
|
||||
}
|
||||
foundModel := false
|
||||
for _, arg := range rc.Args {
|
||||
if arg == "claude-opus-4" {
|
||||
foundModel = true
|
||||
break
|
||||
}
|
||||
}
|
||||
if !foundModel {
|
||||
t.Errorf("claude-opus Args should contain model flag: got %v", rc.Args)
|
||||
}
|
||||
|
||||
// Test claude-haiku custom agent
|
||||
rc = ResolveRoleAgentConfig(constants.RoleDeacon, townRoot, rigPath)
|
||||
if rc == nil {
|
||||
t.Fatal("ResolveRoleAgentConfig returned nil for claude-haiku")
|
||||
}
|
||||
foundModel = false
|
||||
for _, arg := range rc.Args {
|
||||
if arg == "claude-haiku-3" {
|
||||
foundModel = true
|
||||
break
|
||||
}
|
||||
}
|
||||
if !foundModel {
|
||||
t.Errorf("claude-haiku Args should contain model flag: got %v", rc.Args)
|
||||
}
|
||||
}
|
||||
|
||||
// TestCustomAgentWithAmp tests custom agent configuration for amp.
|
||||
// This mirrors the manual test: amp-yolo started successfully with custom args.
|
||||
func TestCustomAgentWithAmp(t *testing.T) {
|
||||
skipIfAgentBinaryMissing(t, "amp")
|
||||
t.Parallel()
|
||||
|
||||
townRoot := t.TempDir()
|
||||
rigPath := filepath.Join(townRoot, "testrig")
|
||||
|
||||
townSettings := NewTownSettings()
|
||||
townSettings.DefaultAgent = "claude"
|
||||
townSettings.RoleAgents = map[string]string{
|
||||
constants.RoleMayor: "amp-yolo",
|
||||
}
|
||||
townSettings.Agents = map[string]*RuntimeConfig{
|
||||
"amp-yolo": {
|
||||
Command: "amp",
|
||||
Args: []string{"--dangerously-allow-all"},
|
||||
},
|
||||
}
|
||||
if err := SaveTownSettings(TownSettingsPath(townRoot), townSettings); err != nil {
|
||||
t.Fatalf("SaveTownSettings: %v", err)
|
||||
}
|
||||
|
||||
rigSettings := NewRigSettings()
|
||||
if err := SaveRigSettings(RigSettingsPath(rigPath), rigSettings); err != nil {
|
||||
t.Fatalf("SaveRigSettings: %v", err)
|
||||
}
|
||||
|
||||
rc := ResolveRoleAgentConfig(constants.RoleMayor, townRoot, rigPath)
|
||||
if rc == nil {
|
||||
t.Fatal("ResolveRoleAgentConfig returned nil for amp-yolo")
|
||||
}
|
||||
|
||||
if rc.Command != "amp" {
|
||||
t.Errorf("Command: got %q, want %q", rc.Command, "amp")
|
||||
}
|
||||
if len(rc.Args) != 1 || rc.Args[0] != "--dangerously-allow-all" {
|
||||
t.Errorf("Args: got %v, want [--dangerously-allow-all]", rc.Args)
|
||||
}
|
||||
|
||||
// Verify command generation
|
||||
cmd := rc.BuildCommand()
|
||||
if !strings.Contains(cmd, "amp") {
|
||||
t.Errorf("BuildCommand should contain amp, got: %s", cmd)
|
||||
}
|
||||
if !strings.Contains(cmd, "--dangerously-allow-all") {
|
||||
t.Errorf("BuildCommand should contain custom args, got: %s", cmd)
|
||||
}
|
||||
}
|
||||
|
||||
func TestResolveRoleAgentConfig(t *testing.T) {
|
||||
skipIfAgentBinaryMissing(t, "gemini", "codex")
|
||||
t.Parallel()
|
||||
@@ -2654,8 +3391,9 @@ func TestBuildStartupCommandWithAgentOverride_IncludesGTRoot(t *testing.T) {
|
||||
}
|
||||
|
||||
// Should include GT_ROOT in export
|
||||
if !strings.Contains(cmd, "GT_ROOT="+townRoot) {
|
||||
t.Errorf("expected GT_ROOT=%s in command, got: %q", townRoot, cmd)
|
||||
expected := "GT_ROOT=" + ShellQuote(townRoot)
|
||||
if !strings.Contains(cmd, expected) {
|
||||
t.Errorf("expected %s in command, got: %q", expected, cmd)
|
||||
}
|
||||
}
|
||||
|
||||
@@ -2777,97 +3515,3 @@ func TestBuildStartupCommandWithAgentOverride_NoGTAgentWhenNoOverride(t *testing
|
||||
t.Errorf("expected no GT_AGENT in command when no override, got: %q", cmd)
|
||||
}
|
||||
}
|
||||
|
||||
func TestFillRuntimeDefaultsPreservesEnv(t *testing.T) {
|
||||
t.Parallel()
|
||||
tests := []struct {
|
||||
name string
|
||||
input *RuntimeConfig
|
||||
wantEnv map[string]string
|
||||
wantNil bool
|
||||
}{
|
||||
{
|
||||
name: "nil input returns default",
|
||||
input: nil,
|
||||
wantNil: false,
|
||||
},
|
||||
{
|
||||
name: "preserves Env map",
|
||||
input: &RuntimeConfig{
|
||||
Command: "test-cmd",
|
||||
Env: map[string]string{
|
||||
"TEST_VAR": "test-value",
|
||||
"JSON_VAR": `{"*":"allow"}`,
|
||||
},
|
||||
},
|
||||
wantEnv: map[string]string{
|
||||
"TEST_VAR": "test-value",
|
||||
"JSON_VAR": `{"*":"allow"}`,
|
||||
},
|
||||
},
|
||||
{
|
||||
name: "nil Env stays nil",
|
||||
input: &RuntimeConfig{
|
||||
Command: "test-cmd",
|
||||
Env: nil,
|
||||
},
|
||||
wantEnv: nil,
|
||||
},
|
||||
{
|
||||
name: "empty Env stays empty",
|
||||
input: &RuntimeConfig{
|
||||
Command: "test-cmd",
|
||||
Env: map[string]string{},
|
||||
},
|
||||
wantEnv: nil, // Empty map is treated as nil (not copied)
|
||||
},
|
||||
}
|
||||
|
||||
for _, tt := range tests {
|
||||
t.Run(tt.name, func(t *testing.T) {
|
||||
result := fillRuntimeDefaults(tt.input)
|
||||
if result == nil {
|
||||
if !tt.wantNil {
|
||||
t.Fatal("fillRuntimeDefaults returned nil unexpectedly")
|
||||
}
|
||||
return
|
||||
}
|
||||
|
||||
if tt.wantEnv == nil {
|
||||
if result.Env != nil && len(result.Env) > 0 {
|
||||
t.Errorf("expected nil/empty Env, got %v", result.Env)
|
||||
}
|
||||
return
|
||||
}
|
||||
|
||||
if len(result.Env) != len(tt.wantEnv) {
|
||||
t.Errorf("expected %d env vars, got %d", len(tt.wantEnv), len(result.Env))
|
||||
}
|
||||
for k, want := range tt.wantEnv {
|
||||
if got := result.Env[k]; got != want {
|
||||
t.Errorf("Env[%s] = %q, want %q", k, got, want)
|
||||
}
|
||||
}
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
func TestFillRuntimeDefaultsEnvIsCopy(t *testing.T) {
|
||||
t.Parallel()
|
||||
original := &RuntimeConfig{
|
||||
Command: "test-cmd",
|
||||
Env: map[string]string{
|
||||
"ORIGINAL": "value",
|
||||
},
|
||||
}
|
||||
|
||||
result := fillRuntimeDefaults(original)
|
||||
|
||||
// Mutate the result
|
||||
result.Env["MUTATED"] = "yes"
|
||||
|
||||
// Original should be unchanged
|
||||
if _, exists := original.Env["MUTATED"]; exists {
|
||||
t.Error("Mutation of result.Env affected original config")
|
||||
}
|
||||
}
|
||||
|
||||
@@ -22,6 +22,7 @@ func TestMain(m *testing.M) {
|
||||
"cursor-agent",
|
||||
"auggie",
|
||||
"amp",
|
||||
"opencode",
|
||||
}
|
||||
for _, name := range binaries {
|
||||
path := filepath.Join(stubDir, name)
|
||||
|
||||
@@ -39,6 +39,12 @@ type TownSettings struct {
|
||||
Type string `json:"type"` // "town-settings"
|
||||
Version int `json:"version"` // schema version
|
||||
|
||||
// CLITheme controls CLI output color scheme.
|
||||
// Values: "dark", "light", "auto" (default).
|
||||
// "auto" lets the terminal emulator's background color guide the choice.
|
||||
// Can be overridden by GT_THEME environment variable.
|
||||
CLITheme string `json:"cli_theme,omitempty"`
|
||||
|
||||
// DefaultAgent is the name of the agent preset to use by default.
|
||||
// Can be a built-in preset ("claude", "gemini", "codex", "cursor", "auggie", "amp")
|
||||
// or a custom agent name defined in settings/agents.json.
|
||||
@@ -157,10 +163,11 @@ type RigsConfig struct {
|
||||
|
||||
// RigEntry represents a single rig in the registry.
|
||||
type RigEntry struct {
|
||||
GitURL string `json:"git_url"`
|
||||
LocalRepo string `json:"local_repo,omitempty"`
|
||||
AddedAt time.Time `json:"added_at"`
|
||||
BeadsConfig *BeadsConfig `json:"beads,omitempty"`
|
||||
GitURL string `json:"git_url"`
|
||||
LocalRepo string `json:"local_repo,omitempty"`
|
||||
AddedAt time.Time `json:"added_at"`
|
||||
BeadsConfig *BeadsConfig `json:"beads,omitempty"`
|
||||
Crew *CrewRegistryConfig `json:"crew,omitempty"`
|
||||
}
|
||||
|
||||
// BeadsConfig represents beads configuration for a rig.
|
||||
@@ -169,6 +176,18 @@ type BeadsConfig struct {
|
||||
Prefix string `json:"prefix"` // issue prefix
|
||||
}
|
||||
|
||||
// CrewRegistryConfig represents crew configuration for a rig in rigs.json.
|
||||
// This enables cross-machine sync of crew member definitions.
|
||||
type CrewRegistryConfig struct {
|
||||
// Theme selects the naming theme for crew members (e.g., "mad-max", "minerals").
|
||||
// Used when displaying crew member names and for consistency across machines.
|
||||
Theme string `json:"theme,omitempty"`
|
||||
|
||||
// Members lists the crew member names to create on this rig.
|
||||
// Use `gt crew sync` to create missing members from this list.
|
||||
Members []string `json:"members,omitempty"`
|
||||
}
|
||||
|
||||
// CurrentTownVersion is the current schema version for TownConfig.
|
||||
// Version 2: Added Owner and PublicName fields for federation identity.
|
||||
const CurrentTownVersion = 2
|
||||
|
||||
@@ -5,7 +5,6 @@ import (
|
||||
"errors"
|
||||
"fmt"
|
||||
"os"
|
||||
"os/exec"
|
||||
"path/filepath"
|
||||
"strings"
|
||||
"time"
|
||||
@@ -373,7 +372,7 @@ func (m *Manager) Rename(oldName, newName string) error {
|
||||
}
|
||||
|
||||
// Pristine ensures a crew worker is up-to-date with remote.
|
||||
// It runs git pull --rebase and bd sync.
|
||||
// It runs git pull --rebase.
|
||||
func (m *Manager) Pristine(name string) (*PristineResult, error) {
|
||||
if err := validateCrewName(name); err != nil {
|
||||
return nil, err
|
||||
@@ -403,23 +402,12 @@ func (m *Manager) Pristine(name string) (*PristineResult, error) {
|
||||
result.Pulled = true
|
||||
}
|
||||
|
||||
// Run bd sync
|
||||
if err := m.runBdSync(crewPath); err != nil {
|
||||
result.SyncError = err.Error()
|
||||
} else {
|
||||
result.Synced = true
|
||||
}
|
||||
// Note: With Dolt backend, beads changes are persisted immediately - no sync needed
|
||||
result.Synced = true
|
||||
|
||||
return result, nil
|
||||
}
|
||||
|
||||
// runBdSync runs bd sync in the given directory.
|
||||
func (m *Manager) runBdSync(dir string) error {
|
||||
cmd := exec.Command("bd", "sync")
|
||||
cmd.Dir = dir
|
||||
return cmd.Run()
|
||||
}
|
||||
|
||||
// PristineResult captures the results of a pristine operation.
|
||||
type PristineResult struct {
|
||||
Name string `json:"name"`
|
||||
@@ -503,7 +491,7 @@ func (m *Manager) Start(name string, opts StartOptions) error {
|
||||
if topic == "" {
|
||||
topic = "start"
|
||||
}
|
||||
beacon := session.FormatStartupNudge(session.StartupNudgeConfig{
|
||||
beacon := session.FormatStartupBeacon(session.BeaconConfig{
|
||||
Recipient: address,
|
||||
Sender: "human",
|
||||
Topic: topic,
|
||||
|
||||
@@ -6,6 +6,7 @@ import (
|
||||
"context"
|
||||
"encoding/json"
|
||||
"fmt"
|
||||
"os"
|
||||
"os/exec"
|
||||
"path/filepath"
|
||||
"strings"
|
||||
@@ -87,6 +88,7 @@ func (w *ConvoyWatcher) run() {
|
||||
func (w *ConvoyWatcher) watchActivity() error {
|
||||
cmd := exec.CommandContext(w.ctx, "bd", "activity", "--follow", "--town", "--json")
|
||||
cmd.Dir = w.townRoot
|
||||
cmd.Env = os.Environ() // Inherit PATH to find bd executable
|
||||
|
||||
stdout, err := cmd.StdoutPipe()
|
||||
if err != nil {
|
||||
@@ -168,6 +170,7 @@ func (w *ConvoyWatcher) getTrackingConvoys(issueID string) []string {
|
||||
`, safeIssueID, safeIssueID)
|
||||
|
||||
queryCmd := exec.Command("sqlite3", "-json", dbPath, query)
|
||||
queryCmd.Env = os.Environ() // Inherit PATH to find sqlite3 executable
|
||||
var stdout bytes.Buffer
|
||||
queryCmd.Stdout = &stdout
|
||||
|
||||
@@ -200,6 +203,7 @@ func (w *ConvoyWatcher) checkConvoyCompletion(convoyID string) {
|
||||
strings.ReplaceAll(convoyID, "'", "''"))
|
||||
|
||||
queryCmd := exec.Command("sqlite3", "-json", dbPath, convoyQuery)
|
||||
queryCmd.Env = os.Environ() // Inherit PATH to find sqlite3 executable
|
||||
var stdout bytes.Buffer
|
||||
queryCmd.Stdout = &stdout
|
||||
|
||||
@@ -224,6 +228,7 @@ func (w *ConvoyWatcher) checkConvoyCompletion(convoyID string) {
|
||||
|
||||
checkCmd := exec.Command("gt", "convoy", "check", convoyID)
|
||||
checkCmd.Dir = w.townRoot
|
||||
checkCmd.Env = os.Environ() // Inherit PATH to find gt executable
|
||||
var checkStdout, checkStderr bytes.Buffer
|
||||
checkCmd.Stdout = &checkStdout
|
||||
checkCmd.Stderr = &checkStderr
|
||||
|
||||
@@ -46,6 +46,7 @@ type Daemon struct {
|
||||
cancel context.CancelFunc
|
||||
curator *feed.Curator
|
||||
convoyWatcher *ConvoyWatcher
|
||||
doltServer *DoltServerManager
|
||||
|
||||
// Mass death detection: track recent session deaths
|
||||
deathsMu sync.Mutex
|
||||
@@ -93,6 +94,15 @@ func New(config *Config) (*Daemon, error) {
|
||||
logger.Printf("Loaded patrol config from %s", PatrolConfigFile(config.TownRoot))
|
||||
}
|
||||
|
||||
// Initialize Dolt server manager if configured
|
||||
var doltServer *DoltServerManager
|
||||
if patrolConfig != nil && patrolConfig.Patrols != nil && patrolConfig.Patrols.DoltServer != nil {
|
||||
doltServer = NewDoltServerManager(config.TownRoot, patrolConfig.Patrols.DoltServer, logger.Printf)
|
||||
if doltServer.IsEnabled() {
|
||||
logger.Printf("Dolt server management enabled (port %d)", patrolConfig.Patrols.DoltServer.Port)
|
||||
}
|
||||
}
|
||||
|
||||
return &Daemon{
|
||||
config: config,
|
||||
patrolConfig: patrolConfig,
|
||||
@@ -100,6 +110,7 @@ func New(config *Config) (*Daemon, error) {
|
||||
logger: logger,
|
||||
ctx: ctx,
|
||||
cancel: cancel,
|
||||
doltServer: doltServer,
|
||||
}, nil
|
||||
}
|
||||
|
||||
@@ -219,6 +230,10 @@ func (d *Daemon) heartbeat(state *State) {
|
||||
|
||||
d.logger.Println("Heartbeat starting (recovery-focused)")
|
||||
|
||||
// 0. Ensure Dolt server is running (if configured)
|
||||
// This must happen before beads operations that depend on Dolt.
|
||||
d.ensureDoltServerRunning()
|
||||
|
||||
// 1. Ensure Deacon is running (restart if dead)
|
||||
// Check patrol config - can be disabled in mayor/daemon.json
|
||||
if IsPatrolEnabled(d.patrolConfig, "deacon") {
|
||||
@@ -292,6 +307,18 @@ func (d *Daemon) heartbeat(state *State) {
|
||||
d.logger.Printf("Heartbeat complete (#%d)", state.HeartbeatCount)
|
||||
}
|
||||
|
||||
// ensureDoltServerRunning ensures the Dolt SQL server is running if configured.
|
||||
// This provides the backend for beads database access in server mode.
|
||||
func (d *Daemon) ensureDoltServerRunning() {
|
||||
if d.doltServer == nil || !d.doltServer.IsEnabled() {
|
||||
return
|
||||
}
|
||||
|
||||
if err := d.doltServer.EnsureRunning(); err != nil {
|
||||
d.logger.Printf("Error ensuring Dolt server is running: %v", err)
|
||||
}
|
||||
}
|
||||
|
||||
// DeaconRole is the role name for the Deacon's handoff bead.
|
||||
const DeaconRole = "deacon"
|
||||
|
||||
@@ -569,7 +596,7 @@ func (d *Daemon) isRigOperational(rigName string) (bool, string) {
|
||||
d.logger.Printf("Warning: no wisp config for %s - parked state may have been lost", rigName)
|
||||
}
|
||||
|
||||
// Check rig status - parked and docked rigs should not have agents auto-started
|
||||
// Check wisp layer first (local/ephemeral overrides)
|
||||
status := cfg.GetString("status")
|
||||
switch status {
|
||||
case "parked":
|
||||
@@ -578,6 +605,25 @@ func (d *Daemon) isRigOperational(rigName string) (bool, string) {
|
||||
return false, "rig is docked"
|
||||
}
|
||||
|
||||
// Check rig bead labels (global/synced docked status)
|
||||
// This is the persistent docked state set by 'gt rig dock'
|
||||
rigPath := filepath.Join(d.config.TownRoot, rigName)
|
||||
if rigCfg, err := rig.LoadRigConfig(rigPath); err == nil && rigCfg.Beads != nil {
|
||||
rigBeadID := fmt.Sprintf("%s-rig-%s", rigCfg.Beads.Prefix, rigName)
|
||||
rigBeadsDir := beads.ResolveBeadsDir(rigPath)
|
||||
bd := beads.NewWithBeadsDir(rigPath, rigBeadsDir)
|
||||
if issue, err := bd.Show(rigBeadID); err == nil {
|
||||
for _, label := range issue.Labels {
|
||||
if label == "status:docked" {
|
||||
return false, "rig is docked (global)"
|
||||
}
|
||||
if label == "status:parked" {
|
||||
return false, "rig is parked (global)"
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// Check auto_restart config
|
||||
// If explicitly blocked (nil), auto-restart is disabled
|
||||
if cfg.IsBlocked("auto_restart") {
|
||||
@@ -666,6 +712,15 @@ func (d *Daemon) shutdown(state *State) error { //nolint:unparam // error return
|
||||
d.logger.Println("Convoy watcher stopped")
|
||||
}
|
||||
|
||||
// Stop Dolt server if we're managing it
|
||||
if d.doltServer != nil && d.doltServer.IsEnabled() && !d.doltServer.IsExternal() {
|
||||
if err := d.doltServer.Stop(); err != nil {
|
||||
d.logger.Printf("Warning: failed to stop Dolt server: %v", err)
|
||||
} else {
|
||||
d.logger.Println("Dolt server stopped")
|
||||
}
|
||||
}
|
||||
|
||||
state.Running = false
|
||||
if err := SaveState(d.config.TownRoot, state); err != nil {
|
||||
d.logger.Printf("Warning: failed to save final state: %v", err)
|
||||
@@ -1101,6 +1156,7 @@ Manual intervention may be required.`,
|
||||
|
||||
cmd := exec.Command("gt", "mail", "send", witnessAddr, "-s", subject, "-m", body) //nolint:gosec // G204: args are constructed internally
|
||||
cmd.Dir = d.config.TownRoot
|
||||
cmd.Env = os.Environ() // Inherit PATH to find gt executable
|
||||
if err := cmd.Run(); err != nil {
|
||||
d.logger.Printf("Warning: failed to notify witness of crashed polecat: %v", err)
|
||||
}
|
||||
|
||||
486
internal/daemon/dolt.go
Normal file
486
internal/daemon/dolt.go
Normal file
@@ -0,0 +1,486 @@
|
||||
package daemon
|
||||
|
||||
import (
|
||||
"bytes"
|
||||
"encoding/json"
|
||||
"fmt"
|
||||
"os"
|
||||
"os/exec"
|
||||
"path/filepath"
|
||||
"strconv"
|
||||
"strings"
|
||||
"sync"
|
||||
"time"
|
||||
)
|
||||
|
||||
// DoltServerConfig holds configuration for the Dolt SQL server.
|
||||
type DoltServerConfig struct {
|
||||
// Enabled controls whether the daemon manages a Dolt server.
|
||||
Enabled bool `json:"enabled"`
|
||||
|
||||
// External indicates the server is externally managed (daemon monitors only).
|
||||
External bool `json:"external,omitempty"`
|
||||
|
||||
// Port is the MySQL protocol port (default 3306).
|
||||
Port int `json:"port,omitempty"`
|
||||
|
||||
// Host is the bind address (default 127.0.0.1).
|
||||
Host string `json:"host,omitempty"`
|
||||
|
||||
// DataDir is the directory containing Dolt databases.
|
||||
// Each subdirectory becomes a database.
|
||||
DataDir string `json:"data_dir,omitempty"`
|
||||
|
||||
// LogFile is the path to the Dolt server log file.
|
||||
LogFile string `json:"log_file,omitempty"`
|
||||
|
||||
// AutoRestart controls whether to restart on crash.
|
||||
AutoRestart bool `json:"auto_restart,omitempty"`
|
||||
|
||||
// RestartDelay is the delay before restarting after crash.
|
||||
RestartDelay time.Duration `json:"restart_delay,omitempty"`
|
||||
}
|
||||
|
||||
// DefaultDoltServerConfig returns sensible defaults for Dolt server config.
|
||||
func DefaultDoltServerConfig(townRoot string) *DoltServerConfig {
|
||||
return &DoltServerConfig{
|
||||
Enabled: false, // Opt-in
|
||||
Port: 3306,
|
||||
Host: "127.0.0.1",
|
||||
DataDir: filepath.Join(townRoot, "dolt"),
|
||||
LogFile: filepath.Join(townRoot, "daemon", "dolt-server.log"),
|
||||
AutoRestart: true,
|
||||
RestartDelay: 5 * time.Second,
|
||||
}
|
||||
}
|
||||
|
||||
// DoltServerStatus represents the current status of the Dolt server.
|
||||
type DoltServerStatus struct {
|
||||
Running bool `json:"running"`
|
||||
PID int `json:"pid,omitempty"`
|
||||
Port int `json:"port,omitempty"`
|
||||
Host string `json:"host,omitempty"`
|
||||
StartedAt time.Time `json:"started_at,omitempty"`
|
||||
Version string `json:"version,omitempty"`
|
||||
Databases []string `json:"databases,omitempty"`
|
||||
Error string `json:"error,omitempty"`
|
||||
}
|
||||
|
||||
// DoltServerManager manages the Dolt SQL server lifecycle.
|
||||
type DoltServerManager struct {
|
||||
config *DoltServerConfig
|
||||
townRoot string
|
||||
logger func(format string, v ...interface{})
|
||||
|
||||
mu sync.Mutex
|
||||
process *os.Process
|
||||
startedAt time.Time
|
||||
lastCheck time.Time
|
||||
}
|
||||
|
||||
// NewDoltServerManager creates a new Dolt server manager.
|
||||
func NewDoltServerManager(townRoot string, config *DoltServerConfig, logger func(format string, v ...interface{})) *DoltServerManager {
|
||||
if config == nil {
|
||||
config = DefaultDoltServerConfig(townRoot)
|
||||
}
|
||||
return &DoltServerManager{
|
||||
config: config,
|
||||
townRoot: townRoot,
|
||||
logger: logger,
|
||||
}
|
||||
}
|
||||
|
||||
// pidFile returns the path to the Dolt server PID file.
|
||||
func (m *DoltServerManager) pidFile() string {
|
||||
return filepath.Join(m.townRoot, "daemon", "dolt-server.pid")
|
||||
}
|
||||
|
||||
// IsEnabled returns whether Dolt server management is enabled.
|
||||
func (m *DoltServerManager) IsEnabled() bool {
|
||||
return m.config != nil && m.config.Enabled
|
||||
}
|
||||
|
||||
// IsExternal returns whether the Dolt server is externally managed.
|
||||
func (m *DoltServerManager) IsExternal() bool {
|
||||
return m.config != nil && m.config.External
|
||||
}
|
||||
|
||||
// Status returns the current status of the Dolt server.
|
||||
func (m *DoltServerManager) Status() *DoltServerStatus {
|
||||
m.mu.Lock()
|
||||
defer m.mu.Unlock()
|
||||
|
||||
status := &DoltServerStatus{
|
||||
Port: m.config.Port,
|
||||
Host: m.config.Host,
|
||||
}
|
||||
|
||||
// Check if process is running
|
||||
pid, running := m.isRunning()
|
||||
status.Running = running
|
||||
status.PID = pid
|
||||
|
||||
if running {
|
||||
status.StartedAt = m.startedAt
|
||||
|
||||
// Get version
|
||||
if version, err := m.getDoltVersion(); err == nil {
|
||||
status.Version = version
|
||||
}
|
||||
|
||||
// List databases
|
||||
if databases, err := m.listDatabases(); err == nil {
|
||||
status.Databases = databases
|
||||
}
|
||||
}
|
||||
|
||||
return status
|
||||
}
|
||||
|
||||
// isRunning checks if the Dolt server process is running.
|
||||
// Must be called with m.mu held.
|
||||
func (m *DoltServerManager) isRunning() (int, bool) {
|
||||
// First check our tracked process
|
||||
if m.process != nil {
|
||||
if isProcessAlive(m.process) {
|
||||
return m.process.Pid, true
|
||||
}
|
||||
// Process died, clear it
|
||||
m.process = nil
|
||||
}
|
||||
|
||||
// Check PID file
|
||||
data, err := os.ReadFile(m.pidFile())
|
||||
if err != nil {
|
||||
return 0, false
|
||||
}
|
||||
|
||||
pid, err := strconv.Atoi(strings.TrimSpace(string(data)))
|
||||
if err != nil {
|
||||
return 0, false
|
||||
}
|
||||
|
||||
// Verify process is alive and is dolt
|
||||
process, err := os.FindProcess(pid)
|
||||
if err != nil {
|
||||
return 0, false
|
||||
}
|
||||
|
||||
if !isProcessAlive(process) {
|
||||
// Process not running, clean up stale PID file
|
||||
_ = os.Remove(m.pidFile())
|
||||
return 0, false
|
||||
}
|
||||
|
||||
// Verify it's actually dolt sql-server
|
||||
if !isDoltSqlServer(pid) {
|
||||
_ = os.Remove(m.pidFile())
|
||||
return 0, false
|
||||
}
|
||||
|
||||
m.process = process
|
||||
return pid, true
|
||||
}
|
||||
|
||||
// isDoltSqlServer checks if a PID is actually a dolt sql-server process.
|
||||
func isDoltSqlServer(pid int) bool {
|
||||
cmd := exec.Command("ps", "-p", strconv.Itoa(pid), "-o", "command=")
|
||||
output, err := cmd.Output()
|
||||
if err != nil {
|
||||
return false
|
||||
}
|
||||
|
||||
cmdline := strings.TrimSpace(string(output))
|
||||
return strings.Contains(cmdline, "dolt") && strings.Contains(cmdline, "sql-server")
|
||||
}
|
||||
|
||||
// EnsureRunning ensures the Dolt server is running.
|
||||
// If not running, starts it. If running but unhealthy, restarts it.
|
||||
func (m *DoltServerManager) EnsureRunning() error {
|
||||
if !m.IsEnabled() {
|
||||
return nil
|
||||
}
|
||||
|
||||
if m.IsExternal() {
|
||||
// External mode: just check health, don't manage lifecycle
|
||||
return m.checkHealth()
|
||||
}
|
||||
|
||||
m.mu.Lock()
|
||||
defer m.mu.Unlock()
|
||||
|
||||
pid, running := m.isRunning()
|
||||
if running {
|
||||
// Already running, check health
|
||||
m.lastCheck = time.Now()
|
||||
if err := m.checkHealthLocked(); err != nil {
|
||||
m.logger("Dolt server unhealthy: %v, restarting...", err)
|
||||
m.stopLocked()
|
||||
time.Sleep(m.config.RestartDelay)
|
||||
return m.startLocked()
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
// Not running, start it
|
||||
if pid > 0 {
|
||||
m.logger("Dolt server PID %d is dead, cleaning up and restarting...", pid)
|
||||
}
|
||||
return m.startLocked()
|
||||
}
|
||||
|
||||
// Start starts the Dolt SQL server.
|
||||
func (m *DoltServerManager) Start() error {
|
||||
m.mu.Lock()
|
||||
defer m.mu.Unlock()
|
||||
return m.startLocked()
|
||||
}
|
||||
|
||||
// startLocked starts the Dolt server. Must be called with m.mu held.
|
||||
func (m *DoltServerManager) startLocked() error {
|
||||
// Ensure data directory exists
|
||||
if err := os.MkdirAll(m.config.DataDir, 0755); err != nil {
|
||||
return fmt.Errorf("creating data directory: %w", err)
|
||||
}
|
||||
|
||||
// Check if dolt is installed
|
||||
doltPath, err := exec.LookPath("dolt")
|
||||
if err != nil {
|
||||
return fmt.Errorf("dolt not found in PATH: %w", err)
|
||||
}
|
||||
|
||||
// Build command arguments
|
||||
args := []string{
|
||||
"sql-server",
|
||||
"--host", m.config.Host,
|
||||
"--port", strconv.Itoa(m.config.Port),
|
||||
"--data-dir", m.config.DataDir,
|
||||
}
|
||||
|
||||
// Open log file
|
||||
logFile, err := os.OpenFile(m.config.LogFile, os.O_CREATE|os.O_APPEND|os.O_WRONLY, 0600)
|
||||
if err != nil {
|
||||
return fmt.Errorf("opening log file: %w", err)
|
||||
}
|
||||
|
||||
// Start dolt sql-server as background process
|
||||
cmd := exec.Command(doltPath, args...)
|
||||
cmd.Dir = m.config.DataDir
|
||||
cmd.Stdout = logFile
|
||||
cmd.Stderr = logFile
|
||||
|
||||
// Detach from this process group so it survives daemon restart
|
||||
setSysProcAttr(cmd)
|
||||
|
||||
if err := cmd.Start(); err != nil {
|
||||
logFile.Close()
|
||||
return fmt.Errorf("starting dolt sql-server: %w", err)
|
||||
}
|
||||
|
||||
// Don't wait for it - it's a long-running server
|
||||
go func() {
|
||||
_ = cmd.Wait()
|
||||
logFile.Close()
|
||||
}()
|
||||
|
||||
m.process = cmd.Process
|
||||
m.startedAt = time.Now()
|
||||
|
||||
// Write PID file
|
||||
if err := os.WriteFile(m.pidFile(), []byte(strconv.Itoa(cmd.Process.Pid)), 0644); err != nil {
|
||||
m.logger("Warning: failed to write PID file: %v", err)
|
||||
}
|
||||
|
||||
m.logger("Started Dolt SQL server (PID %d) on %s:%d", cmd.Process.Pid, m.config.Host, m.config.Port)
|
||||
|
||||
// Wait a moment for server to initialize
|
||||
time.Sleep(500 * time.Millisecond)
|
||||
|
||||
// Verify it started successfully
|
||||
if err := m.checkHealthLocked(); err != nil {
|
||||
m.logger("Warning: Dolt server may not be healthy: %v", err)
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
// Stop stops the Dolt SQL server.
|
||||
func (m *DoltServerManager) Stop() error {
|
||||
m.mu.Lock()
|
||||
defer m.mu.Unlock()
|
||||
return m.stopLocked()
|
||||
}
|
||||
|
||||
// stopLocked stops the Dolt server. Must be called with m.mu held.
|
||||
func (m *DoltServerManager) stopLocked() error {
|
||||
pid, running := m.isRunning()
|
||||
if !running {
|
||||
return nil
|
||||
}
|
||||
|
||||
m.logger("Stopping Dolt SQL server (PID %d)...", pid)
|
||||
|
||||
process, err := os.FindProcess(pid)
|
||||
if err != nil {
|
||||
return nil // Already gone
|
||||
}
|
||||
|
||||
// Send termination signal for graceful shutdown
|
||||
if err := sendTermSignal(process); err != nil {
|
||||
m.logger("Warning: failed to send termination signal: %v", err)
|
||||
}
|
||||
|
||||
// Wait for graceful shutdown (up to 5 seconds)
|
||||
done := make(chan struct{})
|
||||
go func() {
|
||||
for i := 0; i < 50; i++ {
|
||||
if !isProcessAlive(process) {
|
||||
close(done)
|
||||
return
|
||||
}
|
||||
time.Sleep(100 * time.Millisecond)
|
||||
}
|
||||
}()
|
||||
|
||||
select {
|
||||
case <-done:
|
||||
m.logger("Dolt SQL server stopped gracefully")
|
||||
case <-time.After(5 * time.Second):
|
||||
// Force kill
|
||||
m.logger("Dolt SQL server did not stop gracefully, forcing termination")
|
||||
_ = sendKillSignal(process)
|
||||
}
|
||||
|
||||
// Clean up
|
||||
_ = os.Remove(m.pidFile())
|
||||
m.process = nil
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
// checkHealth checks if the Dolt server is healthy (can accept connections).
|
||||
func (m *DoltServerManager) checkHealth() error {
|
||||
m.mu.Lock()
|
||||
defer m.mu.Unlock()
|
||||
return m.checkHealthLocked()
|
||||
}
|
||||
|
||||
// checkHealthLocked checks health. Must be called with m.mu held.
|
||||
func (m *DoltServerManager) checkHealthLocked() error {
|
||||
// Try to connect via MySQL protocol
|
||||
// Use dolt sql -q to test connectivity
|
||||
cmd := exec.Command("dolt", "sql",
|
||||
"--host", m.config.Host,
|
||||
"--port", strconv.Itoa(m.config.Port),
|
||||
"--no-auto-commit",
|
||||
"-q", "SELECT 1",
|
||||
)
|
||||
|
||||
var stderr bytes.Buffer
|
||||
cmd.Stderr = &stderr
|
||||
|
||||
if err := cmd.Run(); err != nil {
|
||||
return fmt.Errorf("health check failed: %w (%s)", err, strings.TrimSpace(stderr.String()))
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
// getDoltVersion returns the Dolt server version.
|
||||
func (m *DoltServerManager) getDoltVersion() (string, error) {
|
||||
cmd := exec.Command("dolt", "version")
|
||||
output, err := cmd.Output()
|
||||
if err != nil {
|
||||
return "", err
|
||||
}
|
||||
|
||||
// Parse "dolt version X.Y.Z"
|
||||
line := strings.TrimSpace(string(output))
|
||||
parts := strings.Fields(line)
|
||||
if len(parts) >= 3 {
|
||||
return parts[2], nil
|
||||
}
|
||||
return line, nil
|
||||
}
|
||||
|
||||
// listDatabases returns the list of databases in the Dolt server.
|
||||
func (m *DoltServerManager) listDatabases() ([]string, error) {
|
||||
cmd := exec.Command("dolt", "sql",
|
||||
"--host", m.config.Host,
|
||||
"--port", strconv.Itoa(m.config.Port),
|
||||
"--no-auto-commit",
|
||||
"-q", "SHOW DATABASES",
|
||||
"--result-format", "json",
|
||||
)
|
||||
|
||||
output, err := cmd.Output()
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
// Parse JSON output
|
||||
var result struct {
|
||||
Rows []struct {
|
||||
Database string `json:"Database"`
|
||||
} `json:"rows"`
|
||||
}
|
||||
|
||||
if err := json.Unmarshal(output, &result); err != nil {
|
||||
// Fall back to line parsing
|
||||
var databases []string
|
||||
for _, line := range strings.Split(string(output), "\n") {
|
||||
line = strings.TrimSpace(line)
|
||||
if line != "" && line != "Database" && !strings.HasPrefix(line, "+") && !strings.HasPrefix(line, "|") {
|
||||
databases = append(databases, line)
|
||||
}
|
||||
}
|
||||
return databases, nil
|
||||
}
|
||||
|
||||
var databases []string
|
||||
for _, row := range result.Rows {
|
||||
if row.Database != "" && row.Database != "information_schema" {
|
||||
databases = append(databases, row.Database)
|
||||
}
|
||||
}
|
||||
return databases, nil
|
||||
}
|
||||
|
||||
// CountDoltServers returns the count of running dolt sql-server processes.
|
||||
func CountDoltServers() int {
|
||||
cmd := exec.Command("sh", "-c", "pgrep -f 'dolt sql-server' 2>/dev/null | wc -l")
|
||||
output, err := cmd.Output()
|
||||
if err != nil {
|
||||
return 0
|
||||
}
|
||||
count, _ := strconv.Atoi(strings.TrimSpace(string(output)))
|
||||
return count
|
||||
}
|
||||
|
||||
// StopAllDoltServers stops all dolt sql-server processes.
|
||||
// Returns (killed, remaining).
|
||||
func StopAllDoltServers(force bool) (int, int) {
|
||||
before := CountDoltServers()
|
||||
if before == 0 {
|
||||
return 0, 0
|
||||
}
|
||||
|
||||
if force {
|
||||
_ = exec.Command("pkill", "-9", "-f", "dolt sql-server").Run()
|
||||
} else {
|
||||
_ = exec.Command("pkill", "-TERM", "-f", "dolt sql-server").Run()
|
||||
time.Sleep(2 * time.Second)
|
||||
if remaining := CountDoltServers(); remaining > 0 {
|
||||
_ = exec.Command("pkill", "-9", "-f", "dolt sql-server").Run()
|
||||
}
|
||||
}
|
||||
|
||||
time.Sleep(100 * time.Millisecond)
|
||||
|
||||
after := CountDoltServers()
|
||||
killed := before - after
|
||||
if killed < 0 {
|
||||
killed = 0
|
||||
}
|
||||
return killed, after
|
||||
}
|
||||
@@ -40,6 +40,7 @@ func (d *Daemon) ProcessLifecycleRequests() {
|
||||
// Get mail for deacon identity (using gt mail, not bd mail)
|
||||
cmd := exec.Command("gt", "mail", "inbox", "--identity", "deacon/", "--json")
|
||||
cmd.Dir = d.config.TownRoot
|
||||
cmd.Env = os.Environ() // Inherit PATH to find gt executable
|
||||
|
||||
output, err := cmd.Output()
|
||||
if err != nil {
|
||||
@@ -394,20 +395,6 @@ func (d *Daemon) restartSession(sessionName, identity string) error {
|
||||
_ = d.tmux.AcceptBypassPermissionsWarning(sessionName)
|
||||
time.Sleep(constants.ShutdownNotifyDelay)
|
||||
|
||||
// GUPP: Gas Town Universal Propulsion Principle
|
||||
// Send startup nudge for predecessor discovery via /resume
|
||||
recipient := identityToBDActor(identity)
|
||||
_ = session.StartupNudge(d.tmux, sessionName, session.StartupNudgeConfig{
|
||||
Recipient: recipient,
|
||||
Sender: "deacon",
|
||||
Topic: "lifecycle-restart",
|
||||
}) // Non-fatal
|
||||
|
||||
// Send propulsion nudge to trigger autonomous execution.
|
||||
// Wait for beacon to be fully processed (needs to be separate prompt)
|
||||
time.Sleep(2 * time.Second)
|
||||
_ = d.tmux.NudgeSession(sessionName, session.PropulsionNudgeForRole(parsed.RoleType, workDir)) // Non-fatal
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
@@ -463,6 +450,7 @@ func (d *Daemon) getNeedsPreSync(config *beads.RoleConfig, parsed *ParsedIdentit
|
||||
|
||||
// getStartCommand determines the startup command for an agent.
|
||||
// Uses role config if available, then role-based agent selection, then hardcoded defaults.
|
||||
// Includes beacon + role-specific instructions in the CLI prompt.
|
||||
func (d *Daemon) getStartCommand(roleConfig *beads.RoleConfig, parsed *ParsedIdentity) string {
|
||||
// If role config is available, use it
|
||||
if roleConfig != nil && roleConfig.StartCommand != "" {
|
||||
@@ -478,8 +466,22 @@ func (d *Daemon) getStartCommand(roleConfig *beads.RoleConfig, parsed *ParsedIde
|
||||
// Use role-based agent resolution for per-role model selection
|
||||
runtimeConfig := config.ResolveRoleAgentConfig(parsed.RoleType, d.config.TownRoot, rigPath)
|
||||
|
||||
// Build recipient for beacon
|
||||
recipient := identityToBDActor(parsed.RigName + "/" + parsed.RoleType)
|
||||
if parsed.AgentName != "" {
|
||||
recipient = identityToBDActor(parsed.RigName + "/" + parsed.RoleType + "/" + parsed.AgentName)
|
||||
}
|
||||
if parsed.RoleType == "deacon" || parsed.RoleType == "mayor" {
|
||||
recipient = parsed.RoleType
|
||||
}
|
||||
prompt := session.BuildStartupPrompt(session.BeaconConfig{
|
||||
Recipient: recipient,
|
||||
Sender: "daemon",
|
||||
Topic: "lifecycle-restart",
|
||||
}, "Check your hook and begin work.")
|
||||
|
||||
// Build default command using the role-resolved runtime config
|
||||
defaultCmd := "exec " + runtimeConfig.BuildCommand()
|
||||
defaultCmd := "exec " + runtimeConfig.BuildCommandWithPrompt(prompt)
|
||||
if runtimeConfig.Session != nil && runtimeConfig.Session.SessionIDEnv != "" {
|
||||
defaultCmd = config.PrependEnv(defaultCmd, map[string]string{"GT_SESSION_ID_ENV": runtimeConfig.Session.SessionIDEnv})
|
||||
}
|
||||
@@ -497,7 +499,7 @@ func (d *Daemon) getStartCommand(roleConfig *beads.RoleConfig, parsed *ParsedIde
|
||||
TownRoot: d.config.TownRoot,
|
||||
SessionIDEnv: sessionIDEnv,
|
||||
})
|
||||
return config.PrependEnv("exec "+runtimeConfig.BuildCommand(), envVars)
|
||||
return config.PrependEnv("exec "+runtimeConfig.BuildCommandWithPrompt(prompt), envVars)
|
||||
}
|
||||
|
||||
if parsed.RoleType == "crew" {
|
||||
@@ -512,7 +514,7 @@ func (d *Daemon) getStartCommand(roleConfig *beads.RoleConfig, parsed *ParsedIde
|
||||
TownRoot: d.config.TownRoot,
|
||||
SessionIDEnv: sessionIDEnv,
|
||||
})
|
||||
return config.PrependEnv("exec "+runtimeConfig.BuildCommand(), envVars)
|
||||
return config.PrependEnv("exec "+runtimeConfig.BuildCommandWithPrompt(prompt), envVars)
|
||||
}
|
||||
|
||||
return defaultCmd
|
||||
@@ -576,6 +578,7 @@ func (d *Daemon) syncWorkspace(workDir string) {
|
||||
fetchCmd := exec.Command("git", "fetch", "origin")
|
||||
fetchCmd.Dir = workDir
|
||||
fetchCmd.Stderr = &stderr
|
||||
fetchCmd.Env = os.Environ() // Inherit PATH to find git executable
|
||||
if err := fetchCmd.Run(); err != nil {
|
||||
errMsg := strings.TrimSpace(stderr.String())
|
||||
if errMsg == "" {
|
||||
@@ -592,6 +595,7 @@ func (d *Daemon) syncWorkspace(workDir string) {
|
||||
pullCmd := exec.Command("git", "pull", "--rebase", "origin", defaultBranch)
|
||||
pullCmd.Dir = workDir
|
||||
pullCmd.Stderr = &stderr
|
||||
pullCmd.Env = os.Environ() // Inherit PATH to find git executable
|
||||
if err := pullCmd.Run(); err != nil {
|
||||
errMsg := strings.TrimSpace(stderr.String())
|
||||
if errMsg == "" {
|
||||
@@ -601,21 +605,7 @@ func (d *Daemon) syncWorkspace(workDir string) {
|
||||
// Don't fail - agent can handle conflicts
|
||||
}
|
||||
|
||||
// Reset stderr buffer
|
||||
stderr.Reset()
|
||||
|
||||
// Sync beads
|
||||
bdCmd := exec.Command("bd", "sync")
|
||||
bdCmd.Dir = workDir
|
||||
bdCmd.Stderr = &stderr
|
||||
if err := bdCmd.Run(); err != nil {
|
||||
errMsg := strings.TrimSpace(stderr.String())
|
||||
if errMsg == "" {
|
||||
errMsg = err.Error()
|
||||
}
|
||||
d.logger.Printf("Warning: bd sync failed in %s: %s", workDir, errMsg)
|
||||
// Don't fail - sync issues may be recoverable
|
||||
}
|
||||
// Note: With Dolt backend, beads changes are persisted immediately - no sync needed
|
||||
}
|
||||
|
||||
// closeMessage removes a lifecycle mail message after processing.
|
||||
@@ -625,6 +615,7 @@ func (d *Daemon) closeMessage(id string) error {
|
||||
// Use gt mail delete to actually remove the message
|
||||
cmd := exec.Command("gt", "mail", "delete", id)
|
||||
cmd.Dir = d.config.TownRoot
|
||||
cmd.Env = os.Environ() // Inherit PATH to find gt executable
|
||||
|
||||
output, err := cmd.CombinedOutput()
|
||||
if err != nil {
|
||||
@@ -662,6 +653,7 @@ func (d *Daemon) getAgentBeadState(agentBeadID string) (string, error) {
|
||||
func (d *Daemon) getAgentBeadInfo(agentBeadID string) (*AgentBeadInfo, error) {
|
||||
cmd := exec.Command("bd", "show", agentBeadID, "--json")
|
||||
cmd.Dir = d.config.TownRoot
|
||||
cmd.Env = os.Environ() // Inherit PATH to find bd executable
|
||||
|
||||
output, err := cmd.Output()
|
||||
if err != nil {
|
||||
@@ -799,6 +791,7 @@ func (d *Daemon) checkRigGUPPViolations(rigName string) {
|
||||
// Pattern: <prefix>-<rig>-polecat-<name> (e.g., gt-gastown-polecat-Toast)
|
||||
cmd := exec.Command("bd", "list", "--type=agent", "--json")
|
||||
cmd.Dir = d.config.TownRoot
|
||||
cmd.Env = os.Environ() // Inherit PATH to find bd executable
|
||||
|
||||
output, err := cmd.Output()
|
||||
if err != nil {
|
||||
@@ -874,6 +867,7 @@ Action needed: Check if agent is alive and responsive. Consider restarting if st
|
||||
|
||||
cmd := exec.Command("gt", "mail", "send", witnessAddr, "-s", subject, "-m", body)
|
||||
cmd.Dir = d.config.TownRoot
|
||||
cmd.Env = os.Environ() // Inherit PATH to find gt executable
|
||||
|
||||
if err := cmd.Run(); err != nil {
|
||||
d.logger.Printf("Warning: failed to notify witness of GUPP violation: %v", err)
|
||||
@@ -897,6 +891,7 @@ func (d *Daemon) checkOrphanedWork() {
|
||||
func (d *Daemon) checkRigOrphanedWork(rigName string) {
|
||||
cmd := exec.Command("bd", "list", "--type=agent", "--json")
|
||||
cmd.Dir = d.config.TownRoot
|
||||
cmd.Env = os.Environ() // Inherit PATH to find bd executable
|
||||
|
||||
output, err := cmd.Output()
|
||||
if err != nil {
|
||||
@@ -970,6 +965,7 @@ Action needed: Either restart the agent or reassign the work.`,
|
||||
|
||||
cmd := exec.Command("gt", "mail", "send", witnessAddr, "-s", subject, "-m", body)
|
||||
cmd.Dir = d.config.TownRoot
|
||||
cmd.Env = os.Environ() // Inherit PATH to find gt executable
|
||||
|
||||
if err := cmd.Run(); err != nil {
|
||||
d.logger.Printf("Warning: failed to notify witness of orphaned work: %v", err)
|
||||
|
||||
32
internal/daemon/proc_unix.go
Normal file
32
internal/daemon/proc_unix.go
Normal file
@@ -0,0 +1,32 @@
|
||||
//go:build unix
|
||||
|
||||
package daemon
|
||||
|
||||
import (
|
||||
"os"
|
||||
"os/exec"
|
||||
"syscall"
|
||||
)
|
||||
|
||||
// setSysProcAttr sets platform-specific process attributes.
|
||||
// On Unix, we detach from the process group so the server survives daemon restart.
|
||||
func setSysProcAttr(cmd *exec.Cmd) {
|
||||
cmd.SysProcAttr = &syscall.SysProcAttr{
|
||||
Setpgid: true,
|
||||
}
|
||||
}
|
||||
|
||||
// isProcessAlive checks if a process is still running.
|
||||
func isProcessAlive(p *os.Process) bool {
|
||||
return p.Signal(syscall.Signal(0)) == nil
|
||||
}
|
||||
|
||||
// sendTermSignal sends SIGTERM for graceful shutdown.
|
||||
func sendTermSignal(p *os.Process) error {
|
||||
return p.Signal(syscall.SIGTERM)
|
||||
}
|
||||
|
||||
// sendKillSignal sends SIGKILL for forced termination.
|
||||
func sendKillSignal(p *os.Process) error {
|
||||
return p.Signal(syscall.SIGKILL)
|
||||
}
|
||||
38
internal/daemon/proc_windows.go
Normal file
38
internal/daemon/proc_windows.go
Normal file
@@ -0,0 +1,38 @@
|
||||
//go:build windows
|
||||
|
||||
package daemon
|
||||
|
||||
import (
|
||||
"os"
|
||||
"os/exec"
|
||||
)
|
||||
|
||||
// setSysProcAttr sets platform-specific process attributes.
|
||||
// On Windows, no special attributes needed for process group detachment.
|
||||
func setSysProcAttr(cmd *exec.Cmd) {
|
||||
// No-op on Windows - process will run independently
|
||||
}
|
||||
|
||||
// isProcessAlive checks if a process is still running.
|
||||
// On Windows, we try to open the process with minimal access.
|
||||
func isProcessAlive(p *os.Process) bool {
|
||||
// On Windows, FindProcess always succeeds, and Signal(0) may not work.
|
||||
// The best we can do is try to signal and see if it fails.
|
||||
// A killed process will return an error.
|
||||
err := p.Signal(os.Signal(nil))
|
||||
// If err is nil or "not supported", process may still be alive
|
||||
// If err mentions "finished" or similar, process is dead
|
||||
return err == nil
|
||||
}
|
||||
|
||||
// sendTermSignal sends a termination signal.
|
||||
// On Windows, there's no SIGTERM - we use Kill() directly.
|
||||
func sendTermSignal(p *os.Process) error {
|
||||
return p.Kill()
|
||||
}
|
||||
|
||||
// sendKillSignal sends a kill signal.
|
||||
// On Windows, Kill() is the only option.
|
||||
func sendKillSignal(p *os.Process) error {
|
||||
return p.Kill()
|
||||
}
|
||||
@@ -110,9 +110,10 @@ type PatrolConfig struct {
|
||||
|
||||
// PatrolsConfig holds configuration for all patrols.
|
||||
type PatrolsConfig struct {
|
||||
Refinery *PatrolConfig `json:"refinery,omitempty"`
|
||||
Witness *PatrolConfig `json:"witness,omitempty"`
|
||||
Deacon *PatrolConfig `json:"deacon,omitempty"`
|
||||
Refinery *PatrolConfig `json:"refinery,omitempty"`
|
||||
Witness *PatrolConfig `json:"witness,omitempty"`
|
||||
Deacon *PatrolConfig `json:"deacon,omitempty"`
|
||||
DoltServer *DoltServerConfig `json:"dolt_server,omitempty"`
|
||||
}
|
||||
|
||||
// DaemonPatrolConfig is the structure of mayor/daemon.json.
|
||||
|
||||
@@ -80,10 +80,11 @@ func (m *Manager) Start(agentOverride string) error {
|
||||
return fmt.Errorf("ensuring Claude settings: %w", err)
|
||||
}
|
||||
|
||||
// Build startup command with initial prompt for autonomous patrol.
|
||||
// The prompt triggers GUPP: deacon starts patrol immediately without waiting for input.
|
||||
// This prevents the agent from sitting idle at the prompt after SessionStart hooks run.
|
||||
initialPrompt := "I am Deacon. Start patrol: check gt hook, if empty create mol-deacon-patrol wisp and execute it."
|
||||
initialPrompt := session.BuildStartupPrompt(session.BeaconConfig{
|
||||
Recipient: "deacon",
|
||||
Sender: "daemon",
|
||||
Topic: "patrol",
|
||||
}, "I am Deacon. Start patrol: check gt hook, if empty create mol-deacon-patrol wisp and execute it.")
|
||||
startupCmd, err := config.BuildAgentStartupCommandWithAgentOverride("deacon", "", m.townRoot, "", initialPrompt, agentOverride)
|
||||
if err != nil {
|
||||
return fmt.Errorf("building startup command: %w", err)
|
||||
@@ -121,19 +122,6 @@ func (m *Manager) Start(agentOverride string) error {
|
||||
|
||||
time.Sleep(constants.ShutdownNotifyDelay)
|
||||
|
||||
// Inject startup nudge for predecessor discovery via /resume
|
||||
_ = session.StartupNudge(t, sessionID, session.StartupNudgeConfig{
|
||||
Recipient: "deacon",
|
||||
Sender: "daemon",
|
||||
Topic: "patrol",
|
||||
}) // Non-fatal
|
||||
|
||||
// GUPP: Gas Town Universal Propulsion Principle
|
||||
// Send the propulsion nudge to trigger autonomous patrol execution.
|
||||
// Wait for beacon to be fully processed (needs to be separate prompt)
|
||||
time.Sleep(2 * time.Second)
|
||||
_ = t.NudgeSession(sessionID, session.PropulsionNudgeForRole("deacon", deaconDir)) // Non-fatal
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
|
||||
@@ -1,213 +0,0 @@
|
||||
package doctor
|
||||
|
||||
import (
|
||||
"bytes"
|
||||
"os/exec"
|
||||
"strings"
|
||||
)
|
||||
|
||||
// BdDaemonCheck verifies that the bd (beads) daemon is running and healthy.
|
||||
// When the daemon fails to start, it surfaces the actual error (e.g., legacy
|
||||
// database detected, repo mismatch) and provides actionable fix commands.
|
||||
type BdDaemonCheck struct {
|
||||
FixableCheck
|
||||
}
|
||||
|
||||
// NewBdDaemonCheck creates a new bd daemon check.
|
||||
func NewBdDaemonCheck() *BdDaemonCheck {
|
||||
return &BdDaemonCheck{
|
||||
FixableCheck: FixableCheck{
|
||||
BaseCheck: BaseCheck{
|
||||
CheckName: "bd-daemon",
|
||||
CheckDescription: "Check if bd (beads) daemon is running",
|
||||
CheckCategory: CategoryInfrastructure,
|
||||
},
|
||||
},
|
||||
}
|
||||
}
|
||||
|
||||
// Run checks if the bd daemon is running and healthy.
|
||||
func (c *BdDaemonCheck) Run(ctx *CheckContext) *CheckResult {
|
||||
// Check daemon status
|
||||
cmd := exec.Command("bd", "daemon", "status")
|
||||
cmd.Dir = ctx.TownRoot
|
||||
var stdout, stderr bytes.Buffer
|
||||
cmd.Stdout = &stdout
|
||||
cmd.Stderr = &stderr
|
||||
|
||||
err := cmd.Run()
|
||||
output := strings.TrimSpace(stdout.String() + stderr.String())
|
||||
|
||||
// Check if daemon is running
|
||||
if err == nil && strings.Contains(output, "Daemon is running") {
|
||||
// Daemon is running, now check health
|
||||
healthCmd := exec.Command("bd", "daemon", "health")
|
||||
healthCmd.Dir = ctx.TownRoot
|
||||
var healthOut bytes.Buffer
|
||||
healthCmd.Stdout = &healthOut
|
||||
_ = healthCmd.Run() // Ignore error, health check is optional
|
||||
|
||||
healthOutput := healthOut.String()
|
||||
if strings.Contains(healthOutput, "HEALTHY") {
|
||||
return &CheckResult{
|
||||
Name: c.Name(),
|
||||
Status: StatusOK,
|
||||
Message: "bd daemon is running and healthy",
|
||||
}
|
||||
}
|
||||
|
||||
// Daemon running but unhealthy
|
||||
return &CheckResult{
|
||||
Name: c.Name(),
|
||||
Status: StatusWarning,
|
||||
Message: "bd daemon is running but may be unhealthy",
|
||||
Details: []string{strings.TrimSpace(healthOutput)},
|
||||
}
|
||||
}
|
||||
|
||||
// Daemon is not running - try to start it and capture any errors
|
||||
startErr := c.tryStartDaemon(ctx)
|
||||
if startErr != nil {
|
||||
// Parse the error to provide specific guidance
|
||||
return c.parseStartError(startErr)
|
||||
}
|
||||
|
||||
// Started successfully
|
||||
return &CheckResult{
|
||||
Name: c.Name(),
|
||||
Status: StatusOK,
|
||||
Message: "bd daemon started successfully",
|
||||
}
|
||||
}
|
||||
|
||||
// tryStartDaemon attempts to start the bd daemon and returns any error output.
|
||||
func (c *BdDaemonCheck) tryStartDaemon(ctx *CheckContext) *startError {
|
||||
cmd := exec.Command("bd", "daemon", "start")
|
||||
cmd.Dir = ctx.TownRoot
|
||||
var stdout, stderr bytes.Buffer
|
||||
cmd.Stdout = &stdout
|
||||
cmd.Stderr = &stderr
|
||||
|
||||
err := cmd.Run()
|
||||
if err != nil {
|
||||
return &startError{
|
||||
output: strings.TrimSpace(stdout.String() + stderr.String()),
|
||||
exitCode: cmd.ProcessState.ExitCode(),
|
||||
}
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
// startError holds information about a failed daemon start.
|
||||
type startError struct {
|
||||
output string
|
||||
exitCode int
|
||||
}
|
||||
|
||||
// parseStartError analyzes the error output and returns a helpful CheckResult.
|
||||
func (c *BdDaemonCheck) parseStartError(err *startError) *CheckResult {
|
||||
output := err.output
|
||||
|
||||
// Check for legacy database error
|
||||
if strings.Contains(output, "LEGACY DATABASE DETECTED") {
|
||||
return &CheckResult{
|
||||
Name: c.Name(),
|
||||
Status: StatusError,
|
||||
Message: "bd daemon failed: legacy database detected",
|
||||
Details: []string{
|
||||
"Database was created before bd version 0.17.5",
|
||||
"Missing repository fingerprint prevents daemon from starting",
|
||||
},
|
||||
FixHint: "Run 'bd migrate --update-repo-id' to add fingerprint",
|
||||
}
|
||||
}
|
||||
|
||||
// Check for database mismatch error
|
||||
if strings.Contains(output, "DATABASE MISMATCH DETECTED") {
|
||||
return &CheckResult{
|
||||
Name: c.Name(),
|
||||
Status: StatusError,
|
||||
Message: "bd daemon failed: database belongs to different repository",
|
||||
Details: []string{
|
||||
"The .beads database was created for a different git repository",
|
||||
"This can happen if .beads was copied or if the git remote URL changed",
|
||||
},
|
||||
FixHint: "Run 'bd migrate --update-repo-id' if URL changed, or 'rm -rf .beads && bd init' for fresh start",
|
||||
}
|
||||
}
|
||||
|
||||
// Check for already running (not actually an error)
|
||||
if strings.Contains(output, "daemon already running") {
|
||||
return &CheckResult{
|
||||
Name: c.Name(),
|
||||
Status: StatusOK,
|
||||
Message: "bd daemon is already running",
|
||||
}
|
||||
}
|
||||
|
||||
// Check for permission/lock errors
|
||||
if strings.Contains(output, "lock") || strings.Contains(output, "permission") {
|
||||
return &CheckResult{
|
||||
Name: c.Name(),
|
||||
Status: StatusError,
|
||||
Message: "bd daemon failed: lock or permission issue",
|
||||
Details: []string{output},
|
||||
FixHint: "Check if another bd daemon is running, or remove .beads/daemon.lock",
|
||||
}
|
||||
}
|
||||
|
||||
// Check for database corruption
|
||||
if strings.Contains(output, "corrupt") || strings.Contains(output, "malformed") {
|
||||
return &CheckResult{
|
||||
Name: c.Name(),
|
||||
Status: StatusError,
|
||||
Message: "bd daemon failed: database may be corrupted",
|
||||
Details: []string{output},
|
||||
FixHint: "Run 'bd repair' or 'rm .beads/issues.db && bd sync --from-main'",
|
||||
}
|
||||
}
|
||||
|
||||
// Generic error with full output
|
||||
details := []string{output}
|
||||
if output == "" {
|
||||
details = []string{"No error output captured (exit code " + string(rune('0'+err.exitCode)) + ")"}
|
||||
}
|
||||
|
||||
return &CheckResult{
|
||||
Name: c.Name(),
|
||||
Status: StatusError,
|
||||
Message: "bd daemon failed to start",
|
||||
Details: details,
|
||||
FixHint: "Check 'bd daemon status' and logs in .beads/daemon.log",
|
||||
}
|
||||
}
|
||||
|
||||
// Fix attempts to start the bd daemon.
|
||||
func (c *BdDaemonCheck) Fix(ctx *CheckContext) error {
|
||||
// First check if it's a legacy database issue
|
||||
startErr := c.tryStartDaemon(ctx)
|
||||
if startErr == nil {
|
||||
return nil
|
||||
}
|
||||
|
||||
// If legacy database, run migrate first
|
||||
if strings.Contains(startErr.output, "LEGACY DATABASE") ||
|
||||
strings.Contains(startErr.output, "DATABASE MISMATCH") {
|
||||
|
||||
migrateCmd := exec.Command("bd", "migrate", "--update-repo-id", "--yes")
|
||||
migrateCmd.Dir = ctx.TownRoot
|
||||
if err := migrateCmd.Run(); err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
// Try starting again
|
||||
startCmd := exec.Command("bd", "daemon", "start")
|
||||
startCmd.Dir = ctx.TownRoot
|
||||
return startCmd.Run()
|
||||
}
|
||||
|
||||
// For other errors, just try to start
|
||||
startCmd := exec.Command("bd", "daemon", "start")
|
||||
startCmd.Dir = ctx.TownRoot
|
||||
return startCmd.Run()
|
||||
}
|
||||
@@ -62,6 +62,7 @@ func (c *BeadsDatabaseCheck) Run(ctx *CheckContext) *CheckResult {
|
||||
}
|
||||
|
||||
// If database file is empty but JSONL has content, this is the bug
|
||||
// Note: This check is for SQLite backend; Dolt backend doesn't use these files
|
||||
if dbErr == nil && dbInfo.Size() == 0 {
|
||||
if jsonlErr == nil && jsonlInfo.Size() > 0 {
|
||||
return &CheckResult{
|
||||
@@ -72,7 +73,7 @@ func (c *BeadsDatabaseCheck) Run(ctx *CheckContext) *CheckResult {
|
||||
"This can cause 'table issues has no column named pinned' errors",
|
||||
"The database needs to be rebuilt from the JSONL file",
|
||||
},
|
||||
FixHint: "Run 'gt doctor --fix' or delete issues.db and run 'bd sync --from-main'",
|
||||
FixHint: "Run 'gt doctor --fix' or delete issues.db and run 'bd import'",
|
||||
}
|
||||
}
|
||||
}
|
||||
@@ -113,6 +114,7 @@ func (c *BeadsDatabaseCheck) Run(ctx *CheckContext) *CheckResult {
|
||||
}
|
||||
|
||||
// Fix attempts to rebuild the database from JSONL.
|
||||
// Note: This fix is for SQLite backend. With Dolt backend, this is a no-op.
|
||||
func (c *BeadsDatabaseCheck) Fix(ctx *CheckContext) error {
|
||||
beadsDir := filepath.Join(ctx.TownRoot, ".beads")
|
||||
issuesDB := filepath.Join(beadsDir, "issues.db")
|
||||
@@ -128,8 +130,8 @@ func (c *BeadsDatabaseCheck) Fix(ctx *CheckContext) error {
|
||||
return err
|
||||
}
|
||||
|
||||
// Run bd sync to rebuild from JSONL
|
||||
cmd := exec.Command("bd", "sync", "--from-main")
|
||||
// Run bd import to rebuild from JSONL
|
||||
cmd := exec.Command("bd", "import")
|
||||
cmd.Dir = ctx.TownRoot
|
||||
var stderr bytes.Buffer
|
||||
cmd.Stderr = &stderr
|
||||
@@ -152,7 +154,7 @@ func (c *BeadsDatabaseCheck) Fix(ctx *CheckContext) error {
|
||||
return err
|
||||
}
|
||||
|
||||
cmd := exec.Command("bd", "sync", "--from-main")
|
||||
cmd := exec.Command("bd", "import")
|
||||
cmd.Dir = ctx.RigPath()
|
||||
var stderr bytes.Buffer
|
||||
cmd.Stderr = &stderr
|
||||
|
||||
@@ -333,7 +333,12 @@ func TestNewRoleLabelCheck(t *testing.T) {
|
||||
func TestRoleLabelCheck_NoBeadsDir(t *testing.T) {
|
||||
tmpDir := t.TempDir()
|
||||
|
||||
// Inject empty mock to skip exec.LookPath("bd") check
|
||||
// (bd may not be installed on all CI platforms like Windows)
|
||||
mock := &mockBeadShower{beads: map[string]*beads.Issue{}}
|
||||
|
||||
check := NewRoleLabelCheck()
|
||||
check.beadShower = mock
|
||||
ctx := &CheckContext{TownRoot: tmpDir}
|
||||
|
||||
result := check.Run(ctx)
|
||||
|
||||
@@ -6,6 +6,7 @@ import (
|
||||
"time"
|
||||
|
||||
"github.com/steveyegge/gastown/internal/boot"
|
||||
"github.com/steveyegge/gastown/internal/session"
|
||||
)
|
||||
|
||||
// BootHealthCheck verifies Boot watchdog health.
|
||||
@@ -63,9 +64,9 @@ func (c *BootHealthCheck) Run(ctx *CheckContext) *CheckResult {
|
||||
// Check 2: Session alive
|
||||
sessionAlive := b.IsSessionAlive()
|
||||
if sessionAlive {
|
||||
details = append(details, fmt.Sprintf("Session: %s (alive)", boot.SessionName))
|
||||
details = append(details, fmt.Sprintf("Session: %s (alive)", session.BootSessionName()))
|
||||
} else {
|
||||
details = append(details, fmt.Sprintf("Session: %s (not running)", boot.SessionName))
|
||||
details = append(details, fmt.Sprintf("Session: %s (not running)", session.BootSessionName()))
|
||||
}
|
||||
|
||||
// Check 3: Last execution status
|
||||
|
||||
@@ -335,8 +335,8 @@ func (c *ClaudeSettingsCheck) checkSettings(path, _ string) []string {
|
||||
// All templates should have:
|
||||
// 1. enabledPlugins
|
||||
// 2. PATH export in hooks
|
||||
// 3. Stop hook with gt costs record (for autonomous)
|
||||
// 4. gt nudge deacon session-started in SessionStart
|
||||
// 3. gt nudge deacon session-started in SessionStart
|
||||
// Note: Stop hook was removed (gt-quoj) - cost tracking is disabled
|
||||
|
||||
// Check enabledPlugins
|
||||
if _, ok := actual["enabledPlugins"]; !ok {
|
||||
@@ -359,10 +359,9 @@ func (c *ClaudeSettingsCheck) checkSettings(path, _ string) []string {
|
||||
missing = append(missing, "deacon nudge")
|
||||
}
|
||||
|
||||
// Check Stop hook exists with gt costs record (for all roles)
|
||||
if !c.hookHasPattern(hooks, "Stop", "gt costs record") {
|
||||
missing = append(missing, "Stop hook")
|
||||
}
|
||||
// Note: Stop hook with gt costs record was removed in gt-quoj.
|
||||
// Cost tracking is disabled - Claude Code doesn't expose session costs.
|
||||
// The Stop hook was causing 30s timeouts on session stop with no benefit.
|
||||
|
||||
return missing
|
||||
}
|
||||
|
||||
@@ -56,17 +56,6 @@ func createValidSettings(t *testing.T, path string) {
|
||||
},
|
||||
},
|
||||
},
|
||||
"Stop": []any{
|
||||
map[string]any{
|
||||
"matcher": "**",
|
||||
"hooks": []any{
|
||||
map[string]any{
|
||||
"type": "command",
|
||||
"command": "gt costs record --session $CLAUDE_SESSION_ID",
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
}
|
||||
|
||||
@@ -106,17 +95,6 @@ func createStaleSettings(t *testing.T, path string, missingElements ...string) {
|
||||
},
|
||||
},
|
||||
},
|
||||
"Stop": []any{
|
||||
map[string]any{
|
||||
"matcher": "**",
|
||||
"hooks": []any{
|
||||
map[string]any{
|
||||
"type": "command",
|
||||
"command": "gt costs record --session $CLAUDE_SESSION_ID",
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
}
|
||||
|
||||
@@ -156,9 +134,6 @@ func createStaleSettings(t *testing.T, path string, missingElements ...string) {
|
||||
}
|
||||
}
|
||||
hookObj["hooks"] = filtered
|
||||
case "Stop":
|
||||
hooks := settings["hooks"].(map[string]any)
|
||||
delete(hooks, "Stop")
|
||||
}
|
||||
}
|
||||
|
||||
@@ -374,33 +349,6 @@ func TestClaudeSettingsCheck_MissingDeaconNudge(t *testing.T) {
|
||||
}
|
||||
}
|
||||
|
||||
func TestClaudeSettingsCheck_MissingStopHook(t *testing.T) {
|
||||
tmpDir := t.TempDir()
|
||||
|
||||
// Create stale settings missing Stop hook (at correct location)
|
||||
mayorSettings := filepath.Join(tmpDir, "mayor", ".claude", "settings.json")
|
||||
createStaleSettings(t, mayorSettings, "Stop")
|
||||
|
||||
check := NewClaudeSettingsCheck()
|
||||
ctx := &CheckContext{TownRoot: tmpDir}
|
||||
|
||||
result := check.Run(ctx)
|
||||
|
||||
if result.Status != StatusError {
|
||||
t.Errorf("expected StatusError for missing Stop hook, got %v", result.Status)
|
||||
}
|
||||
found := false
|
||||
for _, d := range result.Details {
|
||||
if strings.Contains(d, "Stop hook") {
|
||||
found = true
|
||||
break
|
||||
}
|
||||
}
|
||||
if !found {
|
||||
t.Errorf("expected details to mention Stop hook, got %v", result.Details)
|
||||
}
|
||||
}
|
||||
|
||||
func TestClaudeSettingsCheck_WrongLocationWitness(t *testing.T) {
|
||||
tmpDir := t.TempDir()
|
||||
rigName := "testrig"
|
||||
@@ -468,7 +416,7 @@ func TestClaudeSettingsCheck_MultipleStaleFiles(t *testing.T) {
|
||||
createStaleSettings(t, mayorSettings, "PATH")
|
||||
|
||||
deaconSettings := filepath.Join(tmpDir, "deacon", ".claude", "settings.json")
|
||||
createStaleSettings(t, deaconSettings, "Stop")
|
||||
createStaleSettings(t, deaconSettings, "deacon-nudge")
|
||||
|
||||
// Settings inside git repo (witness/rig/.claude/) are wrong location
|
||||
witnessWrong := filepath.Join(tmpDir, rigName, "witness", "rig", ".claude", "settings.json")
|
||||
@@ -1037,8 +985,7 @@ func TestClaudeSettingsCheck_TownRootSettingsWarnsInsteadOfKilling(t *testing.T)
|
||||
"env": {"PATH": "/usr/bin"},
|
||||
"enabledPlugins": ["claude-code-expert"],
|
||||
"hooks": {
|
||||
"SessionStart": [{"matcher": "", "hooks": [{"type": "command", "command": "gt prime"}]}],
|
||||
"Stop": [{"matcher": "", "hooks": [{"type": "command", "command": "gt handoff"}]}]
|
||||
"SessionStart": [{"matcher": "", "hooks": [{"type": "command", "command": "gt prime"}]}]
|
||||
}
|
||||
}`
|
||||
if err := os.WriteFile(staleTownRootSettings, []byte(settingsContent), 0644); err != nil {
|
||||
|
||||
Some files were not shown because too many files have changed in this diff Show More
Reference in New Issue
Block a user