diff --git a/.beads/issues.jsonl b/.beads/issues.jsonl index 8243c2bb..1e487de8 100644 --- a/.beads/issues.jsonl +++ b/.beads/issues.jsonl @@ -96,6 +96,7 @@ {"id":"gt-9a2.8","title":"CloudRunOutpost: Basic implementation","description":"## Overview\n\nBasic CloudRunOutpost implementation. Persistent connections and cost tracking are separate tasks.\n\n## Implementation\n\n```go\ntype CloudRunOutpost struct {\n name string\n project string\n region string\n service string\n maxWorkers int\n client *WorkClient\n workers map[string]*CloudRunWorker\n mu sync.RWMutex\n}\n\nfunc NewCloudRunOutpost(cfg OutpostConfig) (*CloudRunOutpost, error) {\n serviceURL := fmt.Sprintf(\n \"https://%s-%s.a.run.app\",\n cfg.Service, cfg.Region,\n )\n return \u0026CloudRunOutpost{\n name: cfg.Name,\n project: cfg.Project,\n region: cfg.Region,\n service: cfg.Service,\n maxWorkers: cfg.MaxWorkers,\n client: NewWorkClient(serviceURL),\n workers: make(map[string]*CloudRunWorker),\n }, nil\n}\n```\n\n## Spawn\n\n```go\nfunc (o *CloudRunOutpost) Spawn(issue string, cfg WorkerConfig) (Worker, error) {\n req := WorkRequest{\n IssueID: issue,\n Rig: RigConfig{URL: cfg.RigURL, Branch: cfg.GitBranch},\n Beads: BeadsConfig{URL: cfg.BeadsURL, Branch: \"beads-sync\"},\n Branch: \"polecat/\" + issue,\n }\n \n events, err := o.client.DispatchWork(context.Background(), req)\n if err != nil {\n return nil, err\n }\n \n worker := \u0026CloudRunWorker{\n id: uuid.New().String(),\n outpost: o.name,\n issue: issue,\n events: events,\n status: WorkerStatusWorking,\n }\n \n o.mu.Lock()\n o.workers[worker.id] = worker\n o.mu.Unlock()\n \n go worker.monitor()\n return worker, nil\n}\n```\n\n## CloudRunWorker\n\n```go\ntype CloudRunWorker struct {\n id string\n outpost string\n issue string\n status WorkerStatus\n events \u003c-chan WorkEvent\n logs []string\n}\n\nfunc (w *CloudRunWorker) Attach() error {\n return errors.New(\"Cloud Run workers do not support attach\")\n}\n\nfunc (w *CloudRunWorker) Logs() (io.Reader, error) {\n return strings.NewReader(strings.Join(w.logs, \"\\n\")), nil\n}\n```\n\n## Files\n\n- `internal/outpost/cloudrun.go`\n\n## Dependencies\n\nDepends on: gt-9a2.1 (interfaces), gt-9a2.12 (HTTP client)\nBlocks: gt-9a2.13 (persistent connections), gt-9a2.14 (cost tracking)","status":"open","priority":3,"issue_type":"task","created_at":"2025-12-16T18:03:06.803401-08:00","updated_at":"2025-12-16T18:15:39.752892-08:00","dependencies":[{"issue_id":"gt-9a2.8","depends_on_id":"gt-9a2","type":"parent-child","created_at":"2025-12-16T18:03:06.805524-08:00","created_by":"daemon","metadata":"{}"},{"issue_id":"gt-9a2.8","depends_on_id":"gt-9a2.1","type":"blocks","created_at":"2025-12-16T18:03:46.081721-08:00","created_by":"daemon","metadata":"{}"},{"issue_id":"gt-9a2.8","depends_on_id":"gt-9a2.12","type":"blocks","created_at":"2025-12-16T18:15:54.915831-08:00","created_by":"daemon","metadata":"{}"}]} {"id":"gt-9a2.9","title":"Outpost assignment policy: Smart work routing","description":"## Overview\n\nPolicy engine for deciding which outpost gets which work.\n\n## Policy Configuration\n\n```yaml\npolicy:\n # Default order of preference\n default_preference: [local, gce-burst, cloudrun-burst]\n \n # Rules applied in order\n rules:\n # Background work → Cloud Run (cheap)\n - condition: \"priority \u003e= P3\"\n prefer: cloudrun-burst\n \n # Long tasks → VM (persistent)\n - condition: \"estimated_duration \u003e 30m\"\n prefer: gce-burst\n \n # Specific epic → specific outpost\n - condition: \"epic == gt-abc\"\n prefer: local\n```\n\n## Implementation\n\n```go\ntype AssignmentPolicy struct {\n DefaultPreference []string\n Rules []PolicyRule\n}\n\ntype PolicyRule struct {\n Condition string // Simple expression\n Prefer string // Outpost name\n Require string // Must use this outpost\n}\n\nfunc (p *AssignmentPolicy) SelectOutpost(\n issue Issue, \n outposts map[string]Outpost,\n) Outpost {\n // Check rules in order\n for _, rule := range p.Rules {\n if rule.Matches(issue) {\n if op, ok := outposts[rule.Prefer]; ok {\n if op.ActiveWorkers() \u003c op.MaxWorkers() {\n return op\n }\n }\n }\n }\n \n // Fall back to default preference\n for _, name := range p.DefaultPreference {\n if op, ok := outposts[name]; ok {\n if op.ActiveWorkers() \u003c op.MaxWorkers() {\n return op\n }\n }\n }\n \n return nil // All outposts at capacity\n}\n```\n\n## Condition Language\n\nSimple expressions, not a full DSL:\n\n```\npriority \u003e= P3\npriority == P0\nestimated_duration \u003e 30m\nepic == gt-abc\ntype == bug\nlabel contains \"urgent\"\n```\n\n## Files\n\n- `internal/outpost/policy.go`\n- `internal/outpost/condition.go`\n\nDepends on: gt-9a2.3 (config)","status":"open","priority":3,"issue_type":"task","created_at":"2025-12-16T18:03:21.08101-08:00","updated_at":"2025-12-16T18:03:21.08101-08:00","dependencies":[{"issue_id":"gt-9a2.9","depends_on_id":"gt-9a2","type":"parent-child","created_at":"2025-12-16T18:03:21.083256-08:00","created_by":"daemon","metadata":"{}"},{"issue_id":"gt-9a2.9","depends_on_id":"gt-9a2.3","type":"blocks","created_at":"2025-12-16T18:03:46.300288-08:00","created_by":"daemon","metadata":"{}"}]} {"id":"gt-9j9","title":"CLI: worker status reporting commands","description":"Worker status reporting CLI for polecats to report progress.\n\n## Commands\n\n### gt worker started\n```\ngt worker started \u003cissue-id\u003e [-m MESSAGE]\n```\nReports work started on issue.\n\n### gt worker progress\n```\ngt worker progress \u003cissue-id\u003e \u003c0-100\u003e [-m MESSAGE]\n```\nReports percentage complete.\n\n### gt worker blocked\n```\ngt worker blocked \u003cissue-id\u003e \u003creason\u003e [-m MESSAGE]\n```\nReports blocked status with reason.\n\n### gt worker completed\n```\ngt worker completed \u003cissue-id\u003e [-m MESSAGE]\n```\nReports task completion.\n\n### gt worker failed\n```\ngt worker failed \u003cissue-id\u003e \u003creason\u003e [-m MESSAGE]\n```\nReports task failure.\n\n## Implementation\nEach command sends mail to refinery with structured content:\n```go\ntype WorkerStatusReport struct {\n IssueID string\n Status string // started|progress|blocked|completed|failed\n Progress int // 0-100 for progress\n Reason string // for blocked/failed\n Message string // optional detail\n ReportedAt time.Time\n}\n```\n\n## Message Format\nSubject: \"[STATUS] \u003cissue-id\u003e: \u003cstatus\u003e\"\nBody: JSON-encoded WorkerStatusReport\n\n## Default Recipient\n```go\n// Determine from context\nfunc getDefaultRecipient() string {\n rig := os.Getenv(\"GT_RIG\")\n if rig != \"\" {\n return rig + \"/refinery\"\n }\n return \"refinery/\"\n}\n```\n\n## New File\ninternal/cmd/worker.go\n\n## PGT Reference\ngastown-py/src/gastown/cli/worker_cmd.py\n\n## Acceptance Criteria\n- [ ] All 5 commands implemented\n- [ ] Status sent as mail to refinery\n- [ ] Structured JSON body for parsing\n- [ ] Works from polecat session context","status":"open","priority":2,"issue_type":"task","created_at":"2025-12-16T14:47:52.795695-08:00","updated_at":"2025-12-16T16:05:26.715967-08:00"} +{"id":"gt-9mb","title":"Recreate beads rigs with fresh clones","description":"## Problem\n\nBeads rigs have schema mismatches (missing thread_id column, etc.) from development iteration.\n\n## Tasks\n\n1. Shut down any active polecats\n2. Delete existing beads rigs: mayor/rig, refinery/rig, witness/rig, crew/*\n3. Re-clone from beads repo\n4. Run bd init in each new clone\n\n## Rigs to recreate\n\n- /Users/stevey/gt/beads/mayor/rig\n- /Users/stevey/gt/beads/refinery/rig\n- /Users/stevey/gt/beads/crew/* (if any)\n\n## Source\n\nClone from beads repo (need to confirm remote URL)","status":"closed","priority":0,"issue_type":"task","created_at":"2025-12-18T19:13:32.208448-08:00","updated_at":"2025-12-18T19:16:27.096311-08:00","closed_at":"2025-12-18T19:16:27.096311-08:00","close_reason":"Recreated all beads rigs: mayor/rig, refinery/rig, crew/main. Fresh clones from git@github.com:steveyegge/beads.git with bd init and hooks installed. 236 issues each."} {"id":"gt-a95","title":"Refinery background daemon mode","description":"Refinery currently only works in foreground mode. Background daemon is stubbed.\n\n## Current State\nmanager.go line 128-129:\n```go\n// Background mode: spawn a new process\n// For MVP, we just mark as running - actual daemon implementation in gt-ov2\nreturn nil\n```\n\n## Requirements\n\n### 1. Background Process Spawning\n```go\nfunc (m *Manager) Start(foreground bool) error {\n if !foreground {\n // Spawn gt refinery start --foreground as subprocess\n cmd := exec.Command(os.Args[0], \"refinery\", \"start\", m.rig.Name, \"--foreground\")\n cmd.Start() // Don't wait\n // Record PID\n }\n}\n```\n\n### 2. PID File Management\n- Write PID to .gastown/refinery.pid\n- Check PID validity on status\n- Clean up stale PID files\n\n### 3. Log Output\n- Redirect stdout/stderr to .gastown/refinery.log\n- Log rotation (optional for MVP)\n\n### 4. Graceful Shutdown\n- Handle SIGTERM/SIGINT\n- Complete current merge before exit\n- Update state to stopped\n\n### 5. Health Check\n- Process existence check via kill -0\n- Optional: heartbeat file with timestamp\n\n## Files to Modify\n- internal/refinery/manager.go: Start(), Status(), process spawning\n\n## Acceptance Criteria\n- [ ] gt refinery start \u003crig\u003e spawns background process\n- [ ] gt refinery status shows running with PID\n- [ ] gt refinery stop sends SIGTERM and waits\n- [ ] Logs written to .gastown/refinery.log\n- [ ] Survives terminal close","status":"open","priority":1,"issue_type":"task","created_at":"2025-12-16T14:46:53.366619-08:00","updated_at":"2025-12-16T16:04:06.010761-08:00"} {"id":"gt-a9y","title":"File locking for concurrent access","description":"Add file locking for concurrent access safety.\n\n## At-Risk Files\n- .gastown/swarms.json (or per-swarm state.json)\n- .gastown/refinery.json\n- polecats/\u003cname\u003e/state.json\n- inbox.jsonl files\n\n## Go File Locking\nUse syscall.Flock for advisory locking:\n```go\ntype FileLock struct {\n file *os.File\n}\n\nfunc AcquireLock(path string, timeout time.Duration) (*FileLock, error) {\n f, err := os.OpenFile(path+\".lock\", os.O_CREATE|os.O_RDWR, 0644)\n if err != nil {\n return nil, err\n }\n // Use syscall.Flock with timeout\n}\n\nfunc (l *FileLock) Release() error\n```\n\n## Integration Pattern\n```go\nfunc (m *Manager) saveState(ref *Refinery) error {\n lock, err := AcquireLock(m.stateFile(), 5*time.Second)\n if err != nil {\n return fmt.Errorf(\"could not acquire lock: %w\", err)\n }\n defer lock.Release()\n \n // Read-modify-write cycle\n}\n```\n\n## New Package\ninternal/filelock/\n├── lock.go # FileLock, AcquireLock\n└── lock_test.go\n\n## Apply To\n- internal/refinery/manager.go: loadState/saveState\n- internal/cmd/swarm.go: SwarmStore\n- internal/mail/mailbox.go: Append, rewrite\n- internal/polecat/manager.go: state operations\n\n## Timeout Handling\nDefault 5 second timeout. Return error if lock not acquired.\n\n## Acceptance Criteria\n- [ ] Lock files created (.lock extension)\n- [ ] Timeout on lock contention\n- [ ] All state files protected\n- [ ] Locks released on error paths","status":"open","priority":2,"issue_type":"task","created_at":"2025-12-16T14:48:15.641938-08:00","updated_at":"2025-12-16T16:06:32.441426-08:00"} {"id":"gt-alx","title":"Swarm: ephemeral rig support","description":"PGT has ephemeral rigs for swarms - temporary worker groups that are destroyed after landing.\n\nMissing Features:\n- gt swarm init [--rig \u003cname\u003e|--git-url \u003curl\u003e] [--num-workers N]\n- gt swarm worker add/remove/list \u003crig-id\u003e\n- gt swarm rigs - List ephemeral rigs\n- gt swarm destroy \u003crig-id\u003e - Destroy ephemeral rig\n\nDirectory structure:\n\u003cworkspace\u003e/mayor/workers/\u003crig-id\u003e/\n├── rig.json (metadata)\n├── alice/ (git clone)\n├── bob/\n└── carol/\n\nPGT Reference: gastown-py/src/gastown/ephemeral.py\n\nNote: Beads issue gt-kmn.12 mentions this but implementation is missing.","status":"closed","priority":2,"issue_type":"task","created_at":"2025-12-16T14:47:14.302762-08:00","updated_at":"2025-12-16T16:03:47.902849-08:00","closed_at":"2025-12-16T16:03:47.902849-08:00","close_reason":"Duplicate of gt-kmn.12 which has more detail"} @@ -105,6 +106,7 @@ {"id":"gt-b1g","title":"MVP Cutover: GGT replaces PGT for batch work","description":"When this is closed, stop using town and start using gt.\n\n## Acceptance Criteria\n\n1. gt spawn assigns issue to polecat and starts session\n2. gt spawn --epic spawns workers for all epic children\n3. gt session manages tmux lifecycle \n4. gt send / gt inbox work for mail\n5. Refinery processes merge queue with semantic merges\n6. Integration branches created and landed correctly\n7. gt stop --all halts all sessions\n8. One successful test batch completed end-to-end\n\n## What Must Work\n\n- Spawn polecat with issue assignment\n- Spawn workers for epic children\n- Session start/stop/attach\n- Mail send/inbox/read\n- Refinery merge loop (semantic)\n- Integration branch → main landing\n- Witness cleanup protocol\n- Emergency stop\n\n## What Can Be Deferred\n\n- Doctor checks (use PGT)\n- TUI dashboard\n- Plugin system\n- Federation\n- Ephemeral rigs\n- Detailed landing reports\n\n## Test Plan\n\n1. Create epic with 2 tasks, spawn 2 workers\n2. Verify polecats get assigned and sessions start\n3. Simulate task completion\n4. Verify Refinery merges to integration\n5. Verify landing to main\n6. Verify cleanup\n\n## Validation\n\nRun one real batch implementing GGT issues using GGT.\n\n## Note\n\nNo \"swarm IDs\" - just spawn workers for epic, let merge queue coordinate.","status":"open","priority":0,"issue_type":"epic","created_at":"2025-12-16T00:11:09.148751-08:00","updated_at":"2025-12-16T17:26:58.140535-08:00","dependencies":[{"issue_id":"gt-b1g","depends_on_id":"gt-u1j.19","type":"blocks","created_at":"2025-12-16T00:11:36.196292-08:00","created_by":"daemon","metadata":"{}"},{"issue_id":"gt-b1g","depends_on_id":"gt-kmn.4","type":"blocks","created_at":"2025-12-16T00:11:36.273483-08:00","created_by":"daemon","metadata":"{}"},{"issue_id":"gt-b1g","depends_on_id":"gt-kmn.6","type":"blocks","created_at":"2025-12-16T00:11:36.351097-08:00","created_by":"daemon","metadata":"{}"},{"issue_id":"gt-b1g","depends_on_id":"gt-kmn.7","type":"blocks","created_at":"2025-12-16T00:11:36.431641-08:00","created_by":"daemon","metadata":"{}"},{"issue_id":"gt-b1g","depends_on_id":"gt-u1j.22","type":"blocks","created_at":"2025-12-16T00:11:36.511124-08:00","created_by":"daemon","metadata":"{}"},{"issue_id":"gt-b1g","depends_on_id":"gt-ov2","type":"blocks","created_at":"2025-12-16T00:11:51.609649-08:00","created_by":"daemon","metadata":"{}"},{"issue_id":"gt-b1g","depends_on_id":"gt-rm3","type":"blocks","created_at":"2025-12-16T00:11:51.69062-08:00","created_by":"daemon","metadata":"{}"},{"issue_id":"gt-b1g","depends_on_id":"gt-u1j.6","type":"blocks","created_at":"2025-12-16T21:36:32.942855-08:00","created_by":"daemon","metadata":"{}"},{"issue_id":"gt-b1g","depends_on_id":"gt-u1j.12","type":"blocks","created_at":"2025-12-16T21:36:35.053559-08:00","created_by":"daemon","metadata":"{}"}]} {"id":"gt-b3p","title":"Resource Beads: Leases, locks, and quotas","description":"Resource beads represent reserved resources. Types: vm, lock, slot, quota. Fields: holder, expires, renewable. Daemon monitors for expiry and manages contention.","status":"open","priority":2,"issue_type":"epic","created_at":"2025-12-18T18:08:12.745602-08:00","updated_at":"2025-12-18T18:08:12.745602-08:00"} {"id":"gt-bfd","title":"Keepalive signal from bd/gt commands","description":"Every bd and gt command should touch a keepalive file to signal 'agent is alive/working'.\n\n## Implementation\n\nTouch `\u003cworkspace\u003e/.gastown/keepalive.json`:\n```json\n{\"last_command\": \"bd show gt-99m\", \"timestamp\": \"2025-12-18T13:45:00Z\"}\n```\n\n## Usage by Daemon\n\n- Fresh (\u003c 2 min) → agent is working, skip heartbeat\n- Stale (2-5 min) → might be thinking, gentle poke\n- Very stale (\u003e 5 min) → likely idle, safe to interrupt\n\n## Benefits\n\n- Zero cost (just file I/O)\n- Works during long tool calls\n- Doesn't require agent cooperation\n- Foundation for smarter backoff strategies","status":"open","priority":1,"issue_type":"task","created_at":"2025-12-18T14:19:26.241957-08:00","updated_at":"2025-12-18T14:19:26.241957-08:00","dependencies":[{"issue_id":"gt-bfd","depends_on_id":"gt-99m","type":"blocks","created_at":"2025-12-18T14:19:46.407664-08:00","created_by":"daemon"}]} +{"id":"gt-bzd","title":"beads: Stop searching upward when .beads found","description":"## Problem\n\nWhen running bd commands in a nested directory structure with multiple .beads directories, bd shows a confusing warning:\n\n```\n╔══════════════════════════════════════════════════════════════════════════╗\n║ WARNING: 2 beads databases detected in directory hierarchy ║\n╠══════════════════════════════════════════════════════════════════════════╣\n║ Multiple databases can cause confusion and database pollution. ║\n║ ║\n║ /Users/stevey/gt/gastown/.beads (261 issues) ║\n║ /Users/stevey/gt/.beads (21 issues) ║\n║ ║\n║ WARNING: Not using the closest database! Check your BEADS_DB setting. ║\n║ ║\n║ RECOMMENDED: Consolidate or remove unused databases to avoid confusion. ║\n╚══════════════════════════════════════════════════════════════════════════╝\n```\n\n## Why This Is Wrong\n\nIn Gas Town, nested .beads directories are **intentional and necessary**:\n- Town level: /Users/stevey/gt/.beads (mail, town-level issues)\n- Rig level: /Users/stevey/gt/gastown/.beads (gastown project issues)\n- Worker level: polecats have their own beads in worktrees\n\nThese are **unrelated** beads instances for different scopes. They should never be consolidated.\n\n## Expected Behavior\n\nWhen bd finds a .beads directory, it should:\n1. Use that directory (closest ancestor wins)\n2. **Stop searching upward** - do not look for parent .beads directories\n3. **No warning** about multiple databases\n\n## Current Behavior\n\nbd searches all the way up to root, finds all .beads directories, and warns about \"multiple databases\" even though they are separate, intentional instances.\n\n## Fix\n\nIn the database discovery code, stop the upward search as soon as a .beads directory is found. The first .beads found is the one to use, and parent directories are out of scope.\n\n## Note\n\nThis is a beads issue, filed here for tracking. Should be implemented in the beads codebase.","status":"open","priority":1,"issue_type":"bug","created_at":"2025-12-18T19:09:44.295743-08:00","updated_at":"2025-12-18T19:09:44.295743-08:00"} {"id":"gt-c92","title":"CLI: all command for batch polecat operations","description":"Batch operations across multiple polecats.\n\n## Commands\n\n### gt all start\n```\ngt all start [--awake-only] [\u003cspecs\u003e...]\n```\nStart sessions for multiple polecats.\n- --awake-only: Only start awake polecats\n- specs: Polecat names, rig/polecat patterns\n\n### gt all stop\n```\ngt all stop [\u003cspecs\u003e...] [--force]\n```\nStop sessions for multiple polecats.\n\n### gt all status\n```\ngt all status [\u003cspecs\u003e...] [--json]\n```\nShow status of multiple polecats.\n\n### gt all attach\n```\ngt all attach [\u003cspecs\u003e...]\n```\nAttach to multiple sessions in tmux panes/windows.\n\n### gt all run\n```\ngt all run \u003ccommand\u003e [\u003cspecs\u003e...]\n```\nRun command in multiple polecat sessions.\n\n## Spec Patterns\n- `Toast`: Specific polecat (in default/current rig)\n- `gastown/Toast`: Specific rig/polecat\n- `gastown/*`: All polecats in rig\n- `*`: All polecats everywhere\n\n## Implementation\n```go\nfunc expandSpecs(specs []string, awakeOnly bool) ([]*polecat.Polecat, error) {\n // Expand patterns to list of polecats\n}\n\nfunc runForAll(polecats []*polecat.Polecat, action func(*polecat.Polecat) error) error {\n // Run action for each, collect errors\n}\n```\n\n## New File\ninternal/cmd/all.go\n\n## PGT Reference\ngastown-py/src/gastown/cli/all_cmd.py\n\n## Acceptance Criteria\n- [ ] Pattern expansion works\n- [ ] Parallel execution where safe\n- [ ] Aggregate error reporting\n- [ ] --awake-only filter works","status":"open","priority":2,"issue_type":"task","created_at":"2025-12-16T14:48:12.411789-08:00","updated_at":"2025-12-16T16:05:47.255503-08:00"} {"id":"gt-caz","title":"Timed Beads: Scheduled recurring work","description":"## Summary\n\nTimed beads wake up periodically and get injected into the ready queue by the daemon.\n\n## Schema Extension\n\n```yaml\nid: gt-weekly-sync\ntype: task # or sentinel\nschedule: \"0 9 * * 1\" # cron: Monday 9am\n# OR\ninterval: 24h # every 24 hours\ntier: haiku # cheap model for routine checks\nnext_run: 2025-12-20T09:00:00Z\n```\n\n## Daemon Integration\n\nDaemon heartbeat loop:\n1. Check timed beads where `next_run \u003c= now`\n2. For each due bead:\n - Inject into ready queue (set status to open if needed)\n - Update `next_run` based on schedule/interval\n3. Witnesses pick up work via `bd ready`\n\n## Use Cases\n\n- Weekly team sync reminders\n- Daily health checks\n- Periodic cleanup tasks\n- Scheduled reports\n\n## Interaction with Pinned Beads\n\nA pinned bead can be timed - it wakes up periodically but never closes.\nThis is how you model \"background services\" in Gas Town.","status":"open","priority":2,"issue_type":"epic","created_at":"2025-12-18T18:07:39.665294-08:00","updated_at":"2025-12-18T18:07:39.665294-08:00"} {"id":"gt-cik","title":"Overseer Crew: User-managed persistent workspaces","description":"## Overview\n\nCrew workers are the overseer's (human's) personal workspaces within a rig. Unlike polecats which are witness-managed and ephemeral, crew workers are:\n\n- **Persistent**: Not auto-garbage-collected\n- **User-managed**: Overseer controls lifecycle\n- **Long-lived identities**: dave, emma, fred - recognizable names\n- **Gas Town integrated**: Mail, handoff mechanics work\n- **Tmux optional**: Can work in terminal directly\n\n## Directory Structure\n\n```\n\u003crig\u003e/\n polecats/ # Managed workers (witness controls)\n refinery/ # Merge queue processor\n witness/ # Pit boss\n crew/ # Overseer's personal workspaces\n dave/ # Full clone, persistent\n emma/ # Full clone, persistent\n fred/ # Full clone, persistent\n```\n\n## Key Differences from Polecats\n\n- Location: crew/ instead of polecats/\n- Lifecycle: User-managed, not witness-managed\n- Auto-cleanup: Never (polecats auto-cleanup on swarm land)\n- Issue assignment: Optional (polecats require it)\n- Tmux: Optional (polecats require it)\n- Mail \u0026 Handoff: Yes for both\n- Identity: Persistent (polecats are ephemeral)\n\n## CLI Commands\n\n- gt crew add \u003cname\u003e [--rig \u003crig\u003e] - Create crew workspace\n- gt crew list [--rig \u003crig\u003e] - List crew workspaces\n- gt crew at \u003crig\u003e/\u003cname\u003e - Attach to workspace (start session)\n- gt crew attach \u003cname\u003e - Attach (infer rig from cwd)\n- gt crew refresh \u003cname\u003e - Handoff + restart (context cycling)\n- gt crew remove \u003cname\u003e [--force] - Remove workspace\n- gt crew status [\u003cname\u003e] - Show workspace status\n\n## Design Notes\n\n- Crew workers use full git clones (not worktrees)\n- Optional beads integration via BEADS_DIR\n- Mail-to-self handoff works for context cycling\n- No witness monitoring or nudging\n- No automatic issue assignment required\n\n## Background\n\nUsers often maintain separate repo clones for serial agent work. This is tedious to set up manually. Crew workspaces bring these into Gas Town's infrastructure while keeping user control.","status":"closed","priority":1,"issue_type":"epic","created_at":"2025-12-16T16:47:37.529887-08:00","updated_at":"2025-12-16T20:59:46.13518-08:00","closed_at":"2025-12-16T20:59:46.13518-08:00","close_reason":"All crew commands implemented and merged"} diff --git a/internal/cmd/crew.go b/internal/cmd/crew.go index e28d743a..5e21696f 100644 --- a/internal/cmd/crew.go +++ b/internal/cmd/crew.go @@ -150,6 +150,36 @@ Examples: RunE: runCrewStatus, } +var crewRenameCmd = &cobra.Command{ + Use: "rename ", + Short: "Rename a crew workspace", + Long: `Rename a crew workspace. + +Kills any running session, renames the directory, and updates state. +The new session will use the new name (gt--crew-). + +Examples: + gt crew rename dave david # Rename dave to david + gt crew rename madmax max # Rename madmax to max`, + Args: cobra.ExactArgs(2), + RunE: runCrewRename, +} + +var crewPristineCmd = &cobra.Command{ + Use: "pristine []", + Short: "Sync crew workspaces with remote", + Long: `Ensure crew workspace(s) are up-to-date. + +Runs git pull and bd sync for the specified crew, or all crew workers. +Reports any uncommitted changes that may need attention. + +Examples: + gt crew pristine # Pristine all crew workers + gt crew pristine dave # Pristine specific worker + gt crew pristine --json # JSON output`, + RunE: runCrewPristine, +} + func init() { // Add flags crewAddCmd.Flags().StringVar(&crewRig, "rig", "", "Rig to create crew workspace in") @@ -170,6 +200,11 @@ func init() { crewStatusCmd.Flags().StringVar(&crewRig, "rig", "", "Filter by rig name") crewStatusCmd.Flags().BoolVar(&crewJSON, "json", false, "Output as JSON") + crewRenameCmd.Flags().StringVar(&crewRig, "rig", "", "Rig to use") + + crewPristineCmd.Flags().StringVar(&crewRig, "rig", "", "Filter by rig name") + crewPristineCmd.Flags().BoolVar(&crewJSON, "json", false, "Output as JSON") + // Add subcommands crewCmd.AddCommand(crewAddCmd) crewCmd.AddCommand(crewListCmd) @@ -177,6 +212,8 @@ func init() { crewCmd.AddCommand(crewRemoveCmd) crewCmd.AddCommand(crewRefreshCmd) crewCmd.AddCommand(crewStatusCmd) + crewCmd.AddCommand(crewRenameCmd) + crewCmd.AddCommand(crewPristineCmd) rootCmd.AddCommand(crewCmd) } @@ -819,3 +856,112 @@ func runCrewStatus(cmd *cobra.Command, args []string) error { return nil } + +func runCrewRename(cmd *cobra.Command, args []string) error { + oldName := args[0] + newName := args[1] + + crewMgr, r, err := getCrewManager(crewRig) + if err != nil { + return err + } + + // Kill any running session for the old name + t := tmux.NewTmux() + oldSessionID := crewSessionName(r.Name, oldName) + if hasSession, _ := t.HasSession(oldSessionID); hasSession { + if err := t.KillSession(oldSessionID); err != nil { + return fmt.Errorf("killing old session: %w", err) + } + fmt.Printf("Killed session %s\n", oldSessionID) + } + + // Perform the rename + if err := crewMgr.Rename(oldName, newName); err != nil { + if err == crew.ErrCrewNotFound { + return fmt.Errorf("crew workspace '%s' not found", oldName) + } + if err == crew.ErrCrewExists { + return fmt.Errorf("crew workspace '%s' already exists", newName) + } + return fmt.Errorf("renaming crew workspace: %w", err) + } + + fmt.Printf("%s Renamed crew workspace: %s/%s → %s/%s\n", + style.Bold.Render("✓"), r.Name, oldName, r.Name, newName) + fmt.Printf("New session will be: %s\n", style.Dim.Render(crewSessionName(r.Name, newName))) + + return nil +} + +func runCrewPristine(cmd *cobra.Command, args []string) error { + crewMgr, r, err := getCrewManager(crewRig) + if err != nil { + return err + } + + var workers []*crew.CrewWorker + + if len(args) > 0 { + // Specific worker + name := args[0] + worker, err := crewMgr.Get(name) + if err != nil { + if err == crew.ErrCrewNotFound { + return fmt.Errorf("crew workspace '%s' not found", name) + } + return fmt.Errorf("getting crew worker: %w", err) + } + workers = []*crew.CrewWorker{worker} + } else { + // All workers + workers, err = crewMgr.List() + if err != nil { + return fmt.Errorf("listing crew workers: %w", err) + } + } + + if len(workers) == 0 { + fmt.Println("No crew workspaces found.") + return nil + } + + var results []*crew.PristineResult + + for _, w := range workers { + result, err := crewMgr.Pristine(w.Name) + if err != nil { + return fmt.Errorf("pristine %s: %w", w.Name, err) + } + results = append(results, result) + } + + if crewJSON { + enc := json.NewEncoder(os.Stdout) + enc.SetIndent("", " ") + return enc.Encode(results) + } + + // Text output + for _, result := range results { + fmt.Printf("%s %s/%s\n", style.Bold.Render("→"), r.Name, result.Name) + + if result.HadChanges { + fmt.Printf(" %s\n", style.Bold.Render("⚠ Has uncommitted changes")) + } + + if result.Pulled { + fmt.Printf(" %s git pull\n", style.Dim.Render("✓")) + } else if result.PullError != "" { + fmt.Printf(" %s git pull: %s\n", style.Bold.Render("✗"), result.PullError) + } + + if result.Synced { + fmt.Printf(" %s bd sync\n", style.Dim.Render("✓")) + } else if result.SyncError != "" { + fmt.Printf(" %s bd sync: %s\n", style.Bold.Render("✗"), result.SyncError) + } + } + + return nil +} diff --git a/internal/cmd/status.go b/internal/cmd/status.go index 1ed69e58..358f8aae 100644 --- a/internal/cmd/status.go +++ b/internal/cmd/status.go @@ -8,6 +8,7 @@ import ( "github.com/spf13/cobra" "github.com/steveyegge/gastown/internal/config" + "github.com/steveyegge/gastown/internal/crew" "github.com/steveyegge/gastown/internal/git" "github.com/steveyegge/gastown/internal/rig" "github.com/steveyegge/gastown/internal/style" @@ -44,6 +45,8 @@ type RigStatus struct { Name string `json:"name"` Polecats []string `json:"polecats"` PolecatCount int `json:"polecat_count"` + Crews []string `json:"crews"` + CrewCount int `json:"crew_count"` HasWitness bool `json:"has_witness"` HasRefinery bool `json:"has_refinery"` } @@ -52,6 +55,7 @@ type RigStatus struct { type StatusSum struct { RigCount int `json:"rig_count"` PolecatCount int `json:"polecat_count"` + CrewCount int `json:"crew_count"` WitnessCount int `json:"witness_count"` RefineryCount int `json:"refinery_count"` } @@ -104,10 +108,22 @@ func runStatus(cmd *cobra.Command, args []string) error { HasWitness: r.HasWitness, HasRefinery: r.HasRefinery, } + + // Count crew workers + crewGit := git.NewGit(r.Path) + crewMgr := crew.NewManager(r, crewGit) + if workers, err := crewMgr.List(); err == nil { + for _, w := range workers { + rs.Crews = append(rs.Crews, w.Name) + } + rs.CrewCount = len(workers) + } + status.Rigs = append(status.Rigs, rs) // Update summary status.Summary.PolecatCount += len(r.Polecats) + status.Summary.CrewCount += rs.CrewCount if r.HasWitness { status.Summary.WitnessCount++ } @@ -139,6 +155,7 @@ func outputStatusText(status TownStatus) error { fmt.Printf("%s\n", style.Bold.Render("Summary")) fmt.Printf(" Rigs: %d\n", status.Summary.RigCount) fmt.Printf(" Polecats: %d\n", status.Summary.PolecatCount) + fmt.Printf(" Crews: %d\n", status.Summary.CrewCount) fmt.Printf(" Witnesses: %d\n", status.Summary.WitnessCount) fmt.Printf(" Refineries: %d\n", status.Summary.RefineryCount) @@ -158,6 +175,9 @@ func outputStatusText(status TownStatus) error { if r.HasRefinery { indicators += " 🏭" } + if r.CrewCount > 0 { + indicators += " 👤" + } fmt.Printf(" %s%s\n", style.Bold.Render(r.Name), indicators) @@ -166,6 +186,10 @@ func outputStatusText(status TownStatus) error { } else { fmt.Printf(" %s\n", style.Dim.Render("No polecats")) } + + if len(r.Crews) > 0 { + fmt.Printf(" Crews: %v\n", r.Crews) + } } return nil diff --git a/internal/crew/manager.go b/internal/crew/manager.go index e9927e65..a9a3ccce 100644 --- a/internal/crew/manager.go +++ b/internal/crew/manager.go @@ -5,6 +5,7 @@ import ( "errors" "fmt" "os" + "os/exec" "path/filepath" "time" @@ -274,3 +275,96 @@ func (m *Manager) loadState(name string) (*CrewWorker, error) { return &crew, nil } + +// Rename renames a crew worker from oldName to newName. +func (m *Manager) Rename(oldName, newName string) error { + if !m.exists(oldName) { + return ErrCrewNotFound + } + if m.exists(newName) { + return ErrCrewExists + } + + oldPath := m.crewDir(oldName) + newPath := m.crewDir(newName) + + // Rename directory + if err := os.Rename(oldPath, newPath); err != nil { + return fmt.Errorf("renaming crew dir: %w", err) + } + + // Update state file with new name and path + crew, err := m.loadState(newName) + if err != nil { + // Rollback on error + os.Rename(newPath, oldPath) + return fmt.Errorf("loading state: %w", err) + } + + crew.Name = newName + crew.ClonePath = newPath + crew.UpdatedAt = time.Now() + + if err := m.saveState(crew); err != nil { + // Rollback on error + os.Rename(newPath, oldPath) + return fmt.Errorf("saving state: %w", err) + } + + return nil +} + +// Pristine ensures a crew worker is up-to-date with remote. +// It runs git pull --rebase and bd sync. +func (m *Manager) Pristine(name string) (*PristineResult, error) { + if !m.exists(name) { + return nil, ErrCrewNotFound + } + + crewPath := m.crewDir(name) + crewGit := git.NewGit(crewPath) + + result := &PristineResult{ + Name: name, + } + + // Check for uncommitted changes + hasChanges, err := crewGit.HasUncommittedChanges() + if err != nil { + return nil, fmt.Errorf("checking changes: %w", err) + } + result.HadChanges = hasChanges + + // Pull latest (use origin and current branch) + if err := crewGit.Pull("origin", ""); err != nil { + result.PullError = err.Error() + } else { + result.Pulled = true + } + + // Run bd sync + if err := m.runBdSync(crewPath); err != nil { + result.SyncError = err.Error() + } else { + result.Synced = true + } + + return result, nil +} + +// runBdSync runs bd sync in the given directory. +func (m *Manager) runBdSync(dir string) error { + cmd := exec.Command("bd", "sync") + cmd.Dir = dir + return cmd.Run() +} + +// PristineResult captures the results of a pristine operation. +type PristineResult struct { + Name string `json:"name"` + HadChanges bool `json:"had_changes"` + Pulled bool `json:"pulled"` + PullError string `json:"pull_error,omitempty"` + Synced bool `json:"synced"` + SyncError string `json:"sync_error,omitempty"` +}