diff --git a/.claude/settings.json b/.claude/settings.json new file mode 100644 index 00000000..ae436e31 --- /dev/null +++ b/.claude/settings.json @@ -0,0 +1,15 @@ +{ + "hooks": { + "SessionStart": [ + { + "matcher": "", + "hooks": [ + { + "type": "command", + "command": "gt prime" + } + ] + } + ] + } +} diff --git a/.gitignore b/.gitignore index 8e117c6a..815a7622 100644 --- a/.gitignore +++ b/.gitignore @@ -25,3 +25,6 @@ coverage.out config.toml !config.example.toml gt + +# Runtime state +state.json diff --git a/CLAUDE.md b/CLAUDE.md index af5107c6..730d97d7 100644 --- a/CLAUDE.md +++ b/CLAUDE.md @@ -1,13 +1,39 @@ -# Claude: Gastown Go Port +# Crew Worker Context -Run `bd prime` for beads context. +> **Recovery**: Run `gt prime` after compaction, clear, or new session -## Strategic Context +## Your Role: CREW WORKER (max in gastown) -For broader project context and design guidance beyond Gas Town's immediate scope: -- Check `~/ai/stevey-gastown/hop/CONTEXT.md` if available +You are a **crew worker** - the overseer's (human's) personal workspace within the +gastown rig. Unlike polecats which are witness-managed and ephemeral, you are: -This provides architectural direction for decisions that affect the platform's evolution. +- **Persistent**: Your workspace is never auto-garbage-collected +- **User-managed**: The overseer controls your lifecycle, not the Witness +- **Long-lived identity**: You keep your name across sessions +- **Integrated**: Mail and handoff mechanics work just like other Gas Town agents + +**Key difference from polecats**: No one is watching you. You work directly with +the overseer, not as part of a swarm. + +## Your Identity + +**Your mail address:** `gastown/max` + +Check your mail with: `gt mail inbox` + +## Gas Town Architecture + +``` +Town (/Users/stevey/gt) +├── mayor/ ← Global coordinator +├── gastown/ ← Your rig +│ ├── .beads/ ← Issue tracking (you have write access) +│ ├── crew/ +│ │ └── max/ ← You are here (your git clone) +│ ├── polecats/ ← Ephemeral workers (not you) +│ ├── refinery/ ← Merge queue processor +│ └── witness/ ← Polecat lifecycle (doesn't monitor you) +``` ## Project Info @@ -24,54 +50,45 @@ go build -o gt ./cmd/gt go test ./... ``` +## Key Commands + +### Finding Work +- `gt mail inbox` - Check your inbox +- `bd ready` - Available issues +- `bd list --status=in_progress` - Your active work + +### Working +- `bd update --status=in_progress` - Claim an issue +- `bd show ` - View issue details +- `bd close ` - Mark issue complete +- `bd sync` - Sync beads changes + +### Communication +- `gt mail send mayor/ -s "Subject" -m "Message"` - To Mayor +- `gt mail send gastown/crew/max -s "Subject" -m "Message"` - To yourself (handoff) + +## Beads Database + +Your rig has its own beads database at `/Users/stevey/gt/gastown/.beads` + +Issue prefix: `gt-` + ## Key Epics - `gt-u1j`: Port Gas Town to Go (main tracking epic) - `gt-f9x`: Town & Rig Management (install, doctor, federation) -### Planning Work with Dependencies +## Session End Checklist -When breaking down large features into tasks, use **beads dependencies** to sequence work - NOT phases or numbered steps. - -**Cognitive Trap: Temporal Language Inverts Dependencies** - -Words like "Phase 1", "Step 1", "first", "before" trigger temporal reasoning that **flips dependency direction**. Your brain thinks: -- "Phase 1 comes before Phase 2" → "Phase 1 blocks Phase 2" → `bd dep add phase1 phase2` - -But that's **backwards**! The correct mental model: -- "Phase 2 **depends on** Phase 1" → `bd dep add phase2 phase1` - -**Solution: Use requirement language, not temporal language** - -Instead of phases, name tasks by what they ARE, and think about what they NEED: - -```bash -# WRONG - temporal thinking leads to inverted deps -bd create "Phase 1: Create buffer layout" ... -bd create "Phase 2: Add message rendering" ... -bd dep add phase1 phase2 # WRONG! Says phase1 depends on phase2 - -# RIGHT - requirement thinking -bd create "Create buffer layout" ... -bd create "Add message rendering" ... -bd dep add msg-rendering buffer-layout # msg-rendering NEEDS buffer-layout +``` +[ ] git status (check for uncommitted changes) +[ ] git push (push any commits) +[ ] bd sync (sync beads changes) +[ ] Check inbox (any messages needing response?) +[ ] HANDOFF if incomplete: + gt mail send gastown/crew/max -s "🤝 HANDOFF: ..." -m "..." ``` -**Verification**: After adding deps, run `bd blocked` - tasks should be blocked by their prerequisites, not their dependents. - -**Example breakdown** (for a multi-part feature): -```bash -# Create tasks named by what they do, not what order they're in -bd create "Implement conversation region" -t task -p 1 -bd create "Add header-line status display" -t task -p 1 -bd create "Render tool calls inline" -t task -p 2 -bd create "Add streaming content support" -t task -p 2 - -# Set up dependencies: X depends on Y means "X needs Y first" -bd dep add header-line conversation-region # header needs region -bd dep add tool-calls conversation-region # tools need region -bd dep add streaming tool-calls # streaming needs tools - -# Verify with bd blocked - should show sensible blocking -bd blocked -``` +Crew member: max +Rig: gastown +Working directory: /Users/stevey/gt/gastown/crew/max diff --git a/docs/architecture.md b/docs/architecture.md index 9bda594a..d03aa08b 100644 --- a/docs/architecture.md +++ b/docs/architecture.md @@ -11,7 +11,7 @@ graph TB subgraph "Gas Town" Overseer["👤 Overseer
(Human Operator)"] - subgraph Town["Town (~/ai/)"] + subgraph Town["Town (~/gt/)"] Mayor["🎩 Mayor
(Global Coordinator)"] subgraph Rig1["Rig: wyvern"] @@ -45,12 +45,28 @@ graph TB ## Core Concepts -### Town +### Harness (Town) -A **Town** is a complete Gas Town installation - the workspace where everything lives. A town contains: -- Town configuration (`config/` directory) -- Mayor's home (`mayor/` directory at town level) -- One or more **Rigs** (managed project repositories) +A **Harness** is the installation directory where Gas Town lives - the physical root of your workspace. The terms "harness" and "town" are often used interchangeably: +- **Harness** = physical (the directory at `~/gt/`) +- **Town** = logical (the Gas Town workspace concept) + +A harness contains: +- `CLAUDE.md` - Mayor role context (Mayor runs from harness root) +- `mayor/` - Mayor configuration, state, and registry +- `.beads/` - Town-level beads (gm-* prefix for mayor mail) +- `rigs/` or rig directories - Managed project containers + +Create a harness with `gt install`: +```bash +gt install ~/gt --git # Create harness with git +``` + +**See**: [docs/harness.md](harness.md) for comprehensive harness documentation, including: +- Beads redirect patterns for complex setups +- Multi-system sharing (PGT/GGT coexistence) +- Harness templates for organizations +- Migration between harnesses ### Rig @@ -157,27 +173,265 @@ sync-branch: beads-sync # Separate branch for beads commits **Why sync-branch?** When multiple agents share a beads database, using a dedicated sync branch prevents beads commits from interleaving with code commits on feature branches. -## Directory Structure +#### Beads as Universal Data Plane -### Town Level +Beads is the data plane for ALL Gas Town operations. Everything flows through beads: + +| Category | Description | Status | +|----------|-------------|--------| +| **Work items** | Issues, tasks, epics | Core | +| **Mail** | Messages between agents (`type: message`) | Core | +| **Merge requests** | Queue entries (`type: merge-request`) | In progress | +| **Molecules** | Composable workflow templates | Planned (v1) | +| **Timed beads** | Scheduled recurring work | Planned (post-v1) | +| **Pinned beads** | Ongoing concerns that don't close | Planned (post-v1) | +| **Resource beads** | Leases, locks, quotas | Planned (post-v1) | + +**Molecules** are crystallized workflow patterns that can be attached to work items. See the dedicated **Molecules** section below for full details on composition, nondeterministic idempotence, and built-in workflows. + +**The OS Metaphor**: Gas Town is an operating system for work: + +| OS Concept | Gas Town | +|------------|----------| +| Kernel | Daemon | +| Process scheduler | Ready work + dependencies | +| Timer interrupts | Timed beads | +| Semaphores | Resource beads | +| Background services | Pinned beads | +| Process templates | Molecules | +| IPC | Mail beads | + +## Molecules: Composable Workflow Templates + +Molecules are **crystallized, composable, nondeterministic-idempotent workflow templates**. They encode structured workflows that any worker can execute, with full auditability and the ability for any worker to pick up where another left off. + +### Core Concepts + +| Concept | Name | Description | +|---------|------|-------------| +| Template | **Molecule** | Read-only workflow pattern (beads issue with type=molecule) | +| Individual step | **Atom/Step** | Smallest unit of work within a molecule | +| Dependency | **Bond** | Connection between steps (Needs: directive) | +| Composed molecule | **Polymer/Derived** | Molecule built from other molecules | +| Concrete work | **Instance** | Beads created when molecule is instantiated | + +### Molecule Format + +Molecules use a prose-based format with structured step definitions: + +```markdown +## Molecule: engineer-in-box +Full workflow from design to merge. + +## Step: design +Think carefully about architecture. Consider: +- Existing patterns in the codebase +- Trade-offs between approaches +- Testability and maintainability + +Write a brief design summary before proceeding. + +## Step: implement +Write the code. Follow codebase conventions. +Needs: design + +## Step: review +Self-review the changes. Look for bugs, style issues, missing error handling. +Needs: implement + +## Step: test +Write and run tests. Cover happy path and edge cases. +Needs: implement + +## Step: submit +Submit for merge via refinery. +Needs: review, test +``` + +**Key format elements:** +- `## Step: ` - Step header with reference name +- Prose instructions - What the step should accomplish +- `Needs: , ` - Dependencies (optional) +- `Tier: haiku|sonnet|opus` - Model hint (optional) + +### Molecule Composition + +Molecules can include other molecules to create derived workflows: + +```markdown +## Molecule: gastown-polecat +Full workflow for Gas Town polecats including binary installation. + +Includes: mol-engineer-in-box + +## Step: install-binary +After merge is submitted, rebuild and install the local gt binary. +Run from the rig directory: + go build -o gt ./cmd/gt + go install ./cmd/gt +Needs: submit +``` + +**Semantics:** +- `Includes:` brings in all steps from the referenced molecule +- New steps can depend on included steps (e.g., `Needs: submit`) +- Multiple includes are supported for complex polymers +- Dependencies are resolved transitively at parse time + +### Nondeterministic Idempotence + +This is the key property enabling distributed molecule execution: + +1. **Deterministic Structure**: Molecule defines exactly what steps exist and their dependencies +2. **Nondeterministic Execution**: Any worker can execute any ready step +3. **Idempotent Progress**: Completed steps stay completed; re-entry is safe + +**How it works:** ``` -~/gt/ # Town root (Gas Town harness) -├── CLAUDE.md # Mayor role prompting (at town root) +Worker A picks up "design" (pending → in_progress) +Worker A completes "design" (in_progress → completed) +Worker A dies before "implement" +Worker B queries bd ready, sees "implement" is now ready +Worker B picks up "implement" (any worker can continue) +``` + +This is like a **distributed work queue** backed by beads: +- Beads is the queue (steps are issues with status) +- Git is the persistence layer +- No separate message broker needed +- Full auditability of who did what, when + +### Step States + +``` +pending → in_progress → completed + ↘ failed +``` + +| State | Meaning | +|-------|---------| +| `pending` (open) | Step not yet started, waiting for dependencies | +| `in_progress` | Worker has claimed this step | +| `completed` (closed) | Step finished successfully | +| `failed` | Step failed (needs intervention) | + +**Recovery mechanism:** +- If worker dies mid-step, step stays `in_progress` +- After timeout (default 30 min), step can be reclaimed +- `bd release ` manually releases stuck steps +- Another worker can then pick it up + +### Instantiation + +When a molecule is attached to an issue: + +```bash +gt spawn --issue gt-xyz --molecule mol-engineer-in-box +``` + +1. Molecule is validated (steps, dependencies) +2. Child beads are created for each step: + - `gt-xyz.design`, `gt-xyz.implement`, etc. +3. Inter-step dependencies are wired +4. First ready step(s) become available via `bd ready` +5. Polecat starts on first ready step + +**Provenance tracking:** +- Each instance has an `instantiated_from` edge to the source molecule +- Enables querying: "show all instances of mol-engineer-in-box" + +### Built-in Molecules + +Gas Town ships with three built-in molecules: + +**mol-engineer-in-box** (5 steps): +``` +design → implement → review → test → submit +``` +Full quality workflow with design phase and self-review. + +**mol-quick-fix** (3 steps): +``` +implement → test → submit +``` +Fast path for small, well-understood changes. + +**mol-research** (2 steps): +``` +investigate → document +``` +Exploration workflow for understanding problems. + +Seed built-in molecules with: +```bash +gt molecule seed +``` + +### Usage + +```bash +# List available molecules +gt molecule list + +# Show molecule details +gt molecule show mol-engineer-in-box + +# Instantiate on an issue +gt molecule instantiate mol-engineer-in-box --parent=gt-xyz + +# Spawn polecat with molecule +gt spawn --issue gt-xyz --molecule mol-engineer-in-box +``` + +### Why Molecules? + +1. **Quality gates**: Every polecat follows the same review/test workflow +2. **Error isolation**: Each step is a checkpoint; failures don't lose prior work +3. **Parallelism**: Independent steps can run in parallel across workers +4. **Auditability**: Full history of who did what step, when +5. **Composability**: Build complex workflows from simple building blocks +6. **Resumability**: Any worker can continue where another left off + +### Molecule vs Template + +Beads has two related concepts: +- **bd template**: User-facing workflow templates with variable substitution +- **gt molecule**: Agent-focused execution templates with step dependencies + +Both use similar structures but different semantics: +- Templates focus on parameterization (`{{variable}}` substitution) +- Molecules focus on execution (step states, nondeterministic dispatch) + +## Directory Structure + +### Harness Level + +The harness (town root) is created by `gt install`: + +``` +~/gt/ # HARNESS ROOT (Gas Town installation) +├── CLAUDE.md # Mayor role context (runs from here) ├── .beads/ # Town-level beads (prefix: gm-) │ ├── beads.db # Mayor mail, coordination, handoffs │ └── config.yaml │ -├── mayor/ # Mayor's HOME at town level +├── mayor/ # Mayor configuration and state │ ├── town.json # {"type": "town", "name": "..."} │ ├── rigs.json # Registry of managed rigs -│ └── state.json # Mayor state (NO mail/ directory) +│ └── state.json # Mayor agent state │ -├── gastown/ # A rig (project container) -└── wyvern/ # Another rig +├── rigs/ # Standard location for rigs +│ ├── gastown/ # A rig (project container) +│ └── wyvern/ # Another rig +│ +└── / # OR rigs at harness root (legacy) ``` -**Note**: Mayor's mail is now in town beads (`gm-*` issues), not JSONL files. +**Notes**: +- Mayor's mail is in town beads (`gm-*` issues), not JSONL files +- Rigs can be in `rigs/` or at harness root (both work) +- See [docs/harness.md](harness.md) for advanced harness configurations ### Rig Level @@ -227,8 +481,8 @@ graph TB Beads[".beads/"] subgraph Polecats["polecats/"] - Nux["Nux/
(git clone)"] - Toast["Toast/
(git clone)"] + Nux["Nux/
(worktree)"] + Toast["Toast/
(worktree)"] end subgraph Refinery["refinery/"] @@ -259,19 +513,19 @@ graph TB ### ASCII Directory Layout -For reference without mermaid rendering: +For reference without mermaid rendering (see [harness.md](harness.md) for creation/setup): ``` -~/gt/ # TOWN ROOT (Gas Town harness) -├── CLAUDE.md # Mayor role prompting +~/gt/ # HARNESS ROOT (Gas Town installation) +├── CLAUDE.md # Mayor role context ├── .beads/ # Town-level beads (gm-* prefix) │ ├── beads.db # Mayor mail, coordination │ └── config.yaml │ -├── mayor/ # Mayor's home (at town level) +├── mayor/ # Mayor configuration and state │ ├── town.json # {"type": "town", "name": "..."} │ ├── rigs.json # Registry of managed rigs -│ └── state.json # Mayor state (no mail/ dir) +│ └── state.json # Mayor agent state │ ├── gastown/ # RIG (container, NOT a git clone) │ ├── config.json # Rig configuration @@ -440,6 +694,9 @@ Polecats are the workers that do actual implementation: - **Self-verification**: Run decommission checklist before signaling done - **Beads access**: Create issues for discovered work, close completed work - **Clean handoff**: Ensure git state is clean for Witness verification +- **Shutdown request**: Request own termination via `gt handoff` (bottom-up lifecycle) + +**Polecats are ephemeral**: They exist only while working. When done, they request shutdown and are deleted (worktree removed, branch deleted). There is no "idle pool" of polecats. ## Key Workflows @@ -506,6 +763,46 @@ sequenceDiagram end ``` +### Polecat Shutdown Protocol (Bottom-Up) + +Polecats initiate their own shutdown. This enables streaming - workers come and go continuously without artificial batch boundaries. + +```mermaid +sequenceDiagram + participant P as 🐱 Polecat + participant R as 🔧 Refinery + participant W as 👁 Witness + + P->>P: Complete work + P->>R: Submit to merge queue + P->>P: Run gt handoff + + Note over P: Verify git clean,
PR exists + + P->>W: Mail: "Shutdown request" + P->>P: Set state = pending_shutdown + + W->>W: Verify safe to kill + W->>P: Kill session + W->>W: git worktree remove + W->>W: git branch -d +``` + +**gt handoff command** (run by polecat): +1. Verify git state clean (no uncommitted changes) +2. Verify work handed off (PR created or in queue) +3. Send mail to Witness requesting shutdown +4. Wait for Witness to kill session (don't self-exit) + +**Witness shutdown handler**: +1. Receive shutdown request +2. Verify PR merged or queued, no data loss risk +3. Kill session: `gt session stop /` +4. Remove worktree: `git worktree remove polecats/` +5. Delete branch: `git branch -d polecat/` + +**Why bottom-up?** In streaming, there's no "swarm end" to trigger cleanup. Each worker manages its own lifecycle. The Witness is the lifecycle authority that executes the actual termination. + ### Session Cycling (Mail-to-Self) When an agent's context fills, it hands off to its next session: @@ -695,55 +992,85 @@ gt-auth-epic # Epic: "Fix authentication bugs" Workers process issues independently. Work flows through the merge queue. No "swarm ID" needed - the epic provides grouping, labels provide ad-hoc queries, dependencies provide sequencing. -### 12. Agent Session Lifecycle (Daemon Protection) +### 12. Agent Session Lifecycle (One Daemon) -**Decision**: A background daemon manages agent session lifecycles, including cycling sessions when agents request handoff. +**Decision**: ONE daemon (Go process) for all Gas Town manages agent lifecycles. Agents use a unified `gt handoff` command to request lifecycle actions. + +**Architecture**: +``` +Gas Town Daemon (gt daemon) +├── Pokes Mayor periodically +├── Pokes all Witnesses periodically +├── Processes lifecycle requests from daemon/ inbox +└── Restarts sessions when cycle requested + +Lifecycle Hierarchy: + Daemon → manages Mayor, all Witnesses + Witness → manages Polecats, Refinery (per rig) +``` **Rationale**: - Agents can't restart themselves after exiting -- Handoff mail is useless without someone to start the new session -- Daemon provides reliable session management outside agent context -- Enables autonomous long-running operation (hours/days) +- ONE daemon is simpler than per-rig daemons +- Daemon is dumb scheduler; intelligence is in agents +- Unified protocol means all agents work the same way -**Session cycling protocol**: -1. Agent detects context exhaustion or requests cycle -2. Agent sends handoff mail to own inbox -3. Agent sets `requesting_cycle: true` in state.json -4. Agent exits (or sends explicit signal to daemon) -5. Daemon detects exit + cycle request flag -6. Daemon starts new session -7. New session reads handoff mail, resumes work +**Unified lifecycle command** (`gt handoff`): +```bash +gt handoff # Context-aware default +gt handoff --shutdown # Terminate, don't restart (polecats) +gt handoff --cycle # Restart with handoff (long-running agents) +gt handoff --restart # Fresh restart, no handoff +``` -**Daemon responsibilities**: -- Monitor agent session health (heartbeat) -- Detect session exit -- Check cycle request flag in state.json -- Start replacement session if cycle requested -- Clear cycle flag after successful restart -- Report failures to Mayor (escalation) +| Agent | Default | Sends request to | +|-------|---------|------------------| +| Polecat | --shutdown | rig/witness | +| Refinery | --cycle | rig/witness | +| Witness | --cycle | daemon/ | +| Mayor | --cycle | daemon/ | -**Applies to**: Witness, Refinery (both long-running agents that may exhaust context) +**Lifecycle request protocol**: +1. Agent runs `gt handoff` (verifies git clean, sends handoff mail) +2. Agent sends lifecycle request to its manager +3. Agent sets `requesting_: true` in state.json +4. Agent waits (does NOT self-exit) +5. Manager receives request, verifies safe +6. Manager kills session +7. Manager starts new session (for cycle/restart) +8. New session reads handoff mail, resumes work + +**Daemon heartbeat loop**: +- Poke Mayor: "HEARTBEAT: check your rigs" +- Poke each Witness: "HEARTBEAT: check your workers" +- Agents ignore poke if already working +- Process any lifecycle requests in daemon/ inbox +- Restart dead sessions if cycle was requested ```mermaid sequenceDiagram participant A1 as Agent Session 1 - participant S as State.json - participant D as Daemon + participant M as Lifecycle Manager participant A2 as Agent Session 2 - participant MB as Mailbox - A1->>MB: Send handoff mail - A1->>S: Set requesting_cycle: true - A1->>A1: Exit cleanly - D->>D: Detect session exit - D->>S: Check requesting_cycle - S->>D: true - D->>D: Start new session - D->>S: Clear requesting_cycle - A2->>MB: Read handoff mail - A2->>A2: Resume from handoff + A1->>A1: gt handoff --cycle + A1->>A1: Send handoff mail to self + A1->>M: Lifecycle request: cycle + A1->>A1: Set requesting_cycle, wait + + M->>M: Verify safe to act + M->>A1: Kill session + M->>A2: Start new session + A2->>A2: Read handoff mail + A2->>A2: Resume work ``` +**Polecat shutdown** (--shutdown default): +After Witness kills session: +- Remove worktree: `git worktree remove polecats/` +- Delete branch: `git branch -d polecat/` +- Polecat ceases to exist (ephemeral) + ### 13. Resource-Constrained Worker Pool **Decision**: Each rig has a configurable `max_workers` limit for concurrent polecats. @@ -912,10 +1239,13 @@ This ensures all agents in the rig share a single beads database, separate from ## CLI Commands -### Town Management +### Harness Management ```bash -gt install [path] # Install Gas Town at path +gt install [path] # Create Gas Town harness (see harness.md) +gt install --git # Also initialize git with .gitignore +gt install --github=u/r # Also create GitHub repo +gt git-init # Initialize git for existing harness gt doctor # Check workspace health gt doctor --fix # Auto-fix issues ``` @@ -940,12 +1270,13 @@ gt capture "" # Run command in polecat session ### Session Management ```bash -gt spawn --issue # Start polecat on issue -gt kill # Kill polecat session -gt wake # Mark polecat as active -gt sleep # Mark polecat as inactive +gt spawn --issue # Start polecat on issue (creates fresh worktree) +gt handoff # Polecat requests shutdown (run when done) +gt session stop

# Kill polecat session (Witness uses this) ``` +**Note**: `gt wake` and `gt sleep` are deprecated - polecats are ephemeral, not pooled. + ### Landing & Merge Queue ```bash @@ -1150,7 +1481,7 @@ type Worker interface { ### Configuration ```yaml -# ~/ai/config/outposts.yaml +# ~/gt/config/outposts.yaml outposts: - name: local type: local diff --git a/docs/design/tmux-theming.md b/docs/design/tmux-theming.md new file mode 100644 index 00000000..99baa66f --- /dev/null +++ b/docs/design/tmux-theming.md @@ -0,0 +1,472 @@ +# Design: Tmux Status Bar Theming (gt-vc1n) + +## Problem + +All Gas Town tmux sessions look identical: +- Same green/black status bars everywhere +- Hard to tell which rig you're in at a glance +- Session names get truncated (only 10 chars visible) +- No visual indication of worker role (polecat vs crew vs mayor) + +Current state: +``` +[gt-gastown] 0:zsh* "pane_title" 14:30 19-Dec +[gt-gastown] 0:zsh* "pane_title" 14:30 19-Dec <- which worker? +[gt-mayor] 0:zsh* "pane_title" 14:30 19-Dec +``` + +## Solution + +Per-rig color themes applied when tmux sessions are created, with optional user customization. + +### Goals +1. Each rig has a distinct color theme +2. Colors are automatically assigned from a predefined palette +3. Users can override colors per-rig +4. Status bar shows useful context (rig, worker, role) + +## Design + +### 1. Color Palette + +A curated palette of distinct, visually appealing color pairs (bg/fg): + +```go +// internal/tmux/theme.go +var DefaultPalette = []Theme{ + {Name: "ocean", BG: "#1e3a5f", FG: "#e0e0e0"}, // Deep blue + {Name: "forest", BG: "#2d5a3d", FG: "#e0e0e0"}, // Forest green + {Name: "rust", BG: "#8b4513", FG: "#f5f5dc"}, // Rust/brown + {Name: "plum", BG: "#4a3050", FG: "#e0e0e0"}, // Purple + {Name: "slate", BG: "#4a5568", FG: "#e0e0e0"}, // Slate gray + {Name: "ember", BG: "#b33a00", FG: "#f5f5dc"}, // Burnt orange + {Name: "midnight", BG: "#1a1a2e", FG: "#c0c0c0"}, // Dark blue-black + {Name: "wine", BG: "#722f37", FG: "#f5f5dc"}, // Burgundy + {Name: "teal", BG: "#0d5c63", FG: "#e0e0e0"}, // Teal + {Name: "copper", BG: "#6d4c41", FG: "#f5f5dc"}, // Warm brown +} +``` + +Palette criteria: +- Distinct from each other (no two look alike) +- Readable (sufficient contrast) +- Professional (no neon/garish colors) +- Dark backgrounds (easier on eyes in terminals) + +### 2. Configuration + +#### Per-Rig Config Extension + +Extend `RigConfig` in `internal/config/types.go`: + +```go +type RigConfig struct { + Type string `json:"type"` + Version int `json:"version"` + MergeQueue *MergeQueueConfig `json:"merge_queue,omitempty"` + Theme *ThemeConfig `json:"theme,omitempty"` // NEW +} + +type ThemeConfig struct { + // Name picks from palette (e.g., "ocean", "forest") + Name string `json:"name,omitempty"` + + // Custom overrides the palette with specific colors + Custom *CustomTheme `json:"custom,omitempty"` +} + +type CustomTheme struct { + BG string `json:"bg"` // hex color or tmux color name + FG string `json:"fg"` +} +``` + +#### Town-Level Config (optional) + +Allow global palette override in `mayor/town.json`: + +```json +{ + "theme": { + "palette": ["ocean", "forest", "rust", "plum"], + "mayor_theme": "midnight" + } +} +``` + +### 3. Theme Assignment + +When a rig is added (or first session created), auto-assign a theme: + +```go +// internal/tmux/theme.go + +// AssignTheme picks a theme for a rig based on its name. +// Uses consistent hashing so the same rig always gets the same color. +func AssignTheme(rigName string, palette []Theme) Theme { + h := fnv.New32a() + h.Write([]byte(rigName)) + idx := int(h.Sum32()) % len(palette) + return palette[idx] +} +``` + +This ensures: +- Same rig always gets same color (deterministic) +- Different rigs get different colors (distributed) +- No persistent state needed for assignment + +### 4. Session Creation Changes + +Modify `tmux.NewSession` to accept optional theming: + +```go +// SessionOptions configures session creation. +type SessionOptions struct { + WorkDir string + Theme *Theme // nil = use default +} + +// NewSessionWithOptions creates a session with theming. +func (t *Tmux) NewSessionWithOptions(name string, opts SessionOptions) error { + args := []string{"new-session", "-d", "-s", name} + if opts.WorkDir != "" { + args = append(args, "-c", opts.WorkDir) + } + + if _, err := t.run(args...); err != nil { + return err + } + + // Apply theme + if opts.Theme != nil { + t.ApplyTheme(name, *opts.Theme) + } + + return nil +} + +// ApplyTheme sets the status bar style for a session. +func (t *Tmux) ApplyTheme(session string, theme Theme) error { + style := fmt.Sprintf("bg=%s,fg=%s", theme.BG, theme.FG) + _, err := t.run("set-option", "-t", session, "status-style", style) + return err +} +``` + +### 5. Status Line Format + +#### Static Identity (Left) + +```go +// SetStatusFormat configures the status line for Gas Town sessions. +func (t *Tmux) SetStatusFormat(session, rig, worker, role string) error { + // Format: [gastown/Rictus] polecat + left := fmt.Sprintf("[%s/%s] %s ", rig, worker, role) + + if _, err := t.run("set-option", "-t", session, "status-left-length", "40"); err != nil { + return err + } + return t.run("set-option", "-t", session, "status-left", left) +} +``` + +#### Dynamic Context (Right) + +The right side shows dynamic info that agents can update: + +``` +gt-70b3 | 📬 2 | 14:30 +``` + +Components: +- **Current issue** - what the agent is working on +- **Mail indicator** - unread mail count (hidden if 0) +- **Time** - simple clock + +Implementation via tmux environment variables + shell expansion: + +```go +// SetDynamicStatus configures the right side with dynamic content. +func (t *Tmux) SetDynamicStatus(session string) error { + // Use a shell command that reads from env vars we set + // Agents update GT_ISSUE, we poll mail count + // + // Format: #{GT_ISSUE} | 📬 #{mail_count} | %H:%M + // + // tmux can run shell commands in status-right with #() + right := `#(gt status-line --session=` + session + `) %H:%M` + + if _, err := t.run("set-option", "-t", session, "status-right-length", "50"); err != nil { + return err + } + return t.run("set-option", "-t", session, "status-right", right) +} +``` + +#### `gt status-line` Command + +A fast command for tmux to call every few seconds: + +```go +// cmd/statusline.go +func runStatusLine(cmd *cobra.Command, args []string) error { + session := cmd.Flag("session").Value.String() + + // Get current issue from tmux env + issue, _ := tmux.GetEnvironment(session, "GT_ISSUE") + + // Get mail count (fast - just counts files or queries beads) + mailCount := mail.UnreadCount(identity) + + // Build output + var parts []string + if issue != "" { + parts = append(parts, issue) + } + if mailCount > 0 { + parts = append(parts, fmt.Sprintf("📬 %d", mailCount)) + } + + fmt.Print(strings.Join(parts, " | ")) + return nil +} +``` + +#### Agent Updates Issue + +Agents call this when starting/finishing work: + +```bash +# When starting work on an issue +gt issue set gt-70b3 + +# When done +gt issue clear +``` + +Implementation: + +```go +// cmd/issue.go +func runIssueSet(cmd *cobra.Command, args []string) error { + issueID := args[0] + session := os.Getenv("TMUX_PANE") // or detect from GT_* vars + + return tmux.SetEnvironment(session, "GT_ISSUE", issueID) +} +``` + +#### Mayor-Specific Status + +Mayor gets a different right-side format: + +``` +5 polecats | 2 rigs | 📬 1 | 14:30 +``` + +```go +func runMayorStatusLine() { + polecats := countActivePolecats() + rigs := countActiveRigs() + mail := mail.UnreadCount("mayor/") + + var parts []string + parts = append(parts, fmt.Sprintf("%d polecats", polecats)) + parts = append(parts, fmt.Sprintf("%d rigs", rigs)) + if mail > 0 { + parts = append(parts, fmt.Sprintf("📬 %d", mail)) + } + fmt.Print(strings.Join(parts, " | ")) +} +``` + +#### Example Status Bars + +**Polecat working on issue:** +``` +[gastown/Rictus] polecat gt-70b3 | 📬 1 | 14:30 +``` + +**Crew worker, no mail:** +``` +[gastown/max] crew gt-vc1n | 14:30 +``` + +**Mayor overview:** +``` +[Mayor] coordinator 5 polecats | 2 rigs | 📬 2 | 14:30 +``` + +**Idle polecat:** +``` +[gastown/Wez] polecat | 14:30 +``` + +### 6. Integration Points + +#### Session Manager (session/manager.go) + +```go +func (m *Manager) Start(polecat string, opts StartOptions) error { + // ... existing code ... + + // Get theme from rig config + theme := m.getTheme() + + // Create session with theme + if err := m.tmux.NewSessionWithOptions(sessionID, tmux.SessionOptions{ + WorkDir: workDir, + Theme: theme, + }); err != nil { + return fmt.Errorf("creating session: %w", err) + } + + // Set status format + m.tmux.SetStatusFormat(sessionID, m.rig.Name, polecat, "polecat") + + // ... rest of existing code ... +} +``` + +#### Mayor (cmd/mayor.go) + +```go +func runMayorStart(cmd *cobra.Command, args []string) error { + // ... existing code ... + + // Mayor uses a special theme + theme := tmux.MayorTheme() // Gold/dark - distinguished + + if err := t.NewSessionWithOptions(MayorSessionName, tmux.SessionOptions{ + WorkDir: townRoot, + Theme: &theme, + }); err != nil { + return fmt.Errorf("creating session: %w", err) + } + + t.SetStatusFormat(MayorSessionName, "town", "mayor", "coordinator") + + // ... rest ... +} +``` + +#### Crew (cmd/crew.go) + +Similar pattern - get rig theme and apply. + +### 7. Commands + +#### `gt theme` - View/Set Themes + +```bash +# View current rig theme +gt theme +# Theme: ocean (bg=#1e3a5f, fg=#e0e0e0) + +# View available themes +gt theme --list +# ocean, forest, rust, plum, slate, ember, midnight, wine, teal, copper + +# Set theme for current rig +gt theme set forest + +# Set custom colors +gt theme set --bg="#2d5a3d" --fg="#e0e0e0" +``` + +#### `gt theme apply` - Apply to Running Sessions + +```bash +# Re-apply theme to all running sessions in this rig +gt theme apply +``` + +### 8. Backward Compatibility + +- Existing sessions without themes continue to work (they'll just have default green) +- New sessions get themed automatically +- Users can run `gt theme apply` to update running sessions + +## Implementation Plan + +### Phase 1: Core Infrastructure +1. Add Theme types to `internal/tmux/theme.go` +2. Add ThemeConfig to `internal/config/types.go` +3. Implement `AssignTheme()` function +4. Add `ApplyTheme()` to Tmux wrapper + +### Phase 2: Session Integration +5. Modify `NewSession` to accept SessionOptions +6. Update session.Manager.Start() to apply themes +7. Update cmd/mayor.go to theme Mayor session +8. Update cmd/crew.go to theme crew sessions + +### Phase 3: Static Status Line +9. Implement SetStatusFormat() for left side +10. Apply to all session creation points +11. Update witness.go, spawn.go, refinery, daemon + +### Phase 4: Dynamic Status Line +12. Add `gt status-line` command (fast, tmux-callable) +13. Implement mail count lookup (fast path) +14. Implement `gt issue set/clear` for agents to update current issue +15. Configure status-right to call `gt status-line` +16. Add Mayor-specific status line variant + +### Phase 5: Commands & Polish +17. Add `gt theme` command (view/set/apply) +18. Add config file support for custom themes +19. Documentation +20. Update CLAUDE.md with `gt issue set` guidance for agents + +## File Changes + +| File | Changes | +|------|---------| +| `internal/tmux/theme.go` | NEW - Theme types, palette, assignment | +| `internal/tmux/tmux.go` | Add ApplyTheme, SetStatusFormat, SetDynamicStatus | +| `internal/config/types.go` | Add ThemeConfig | +| `internal/session/manager.go` | Use themed session creation | +| `internal/cmd/mayor.go` | Apply Mayor theme + Mayor status format | +| `internal/cmd/crew.go` | Apply rig theme to crew sessions | +| `internal/cmd/witness.go` | Apply rig theme | +| `internal/cmd/spawn.go` | Apply rig theme | +| `internal/cmd/theme.go` | NEW - gt theme command | +| `internal/cmd/statusline.go` | NEW - gt status-line (tmux-callable) | +| `internal/cmd/issue.go` | NEW - gt issue set/clear | +| `internal/daemon/lifecycle.go` | Apply rig theme | +| `internal/refinery/manager.go` | Apply rig theme | +| `CLAUDE.md` (various) | Document `gt issue set` for agents | + +## Open Questions + +1. ~~**Should refinery/witness have distinct colors?**~~ **RESOLVED** + - Answer: Same as rig polecats, role shown in status-left + +2. **Color storage location?** + - Option A: In rig config.json (requires file write) + - Option B: In beads (config-as-data approach from gt-vc1n) + - Recommendation: Start with config.json for simplicity + +3. **Hex colors vs tmux color names?** + - Hex: More precise, but some terminals don't support + - Names: Limited palette, but universal support + - Recommendation: Support both, default to hex with true-color fallback + +4. **Status-line refresh frequency?** + - tmux calls `#()` commands every `status-interval` seconds (default 15) + - Trade-off: Faster = more responsive, but more CPU + - Recommendation: 5 seconds (`set -g status-interval 5`) + +## Success Criteria + +- [ ] Each rig has distinct status bar color +- [ ] Users can identify rig at a glance +- [ ] Status bar shows rig/worker/role clearly (left side) +- [ ] Current issue displayed when agent sets it +- [ ] Mail indicator shows unread count +- [ ] Mayor shows aggregate stats (polecats, rigs) +- [ ] Custom colors configurable per-rig +- [ ] Works with existing sessions after `gt theme apply` +- [ ] Agents can update issue via `gt issue set` diff --git a/docs/federation-design.md b/docs/federation-design.md index e6d388a6..129b38a1 100644 --- a/docs/federation-design.md +++ b/docs/federation-design.md @@ -12,13 +12,13 @@ Gas Town needs to scale beyond a single machine: ### Model A: "Town Clone" (VMs) -Clone the entire `~/ai` workspace to a remote VM. It runs like a regular Gas Town: +Clone the entire `~/gt` workspace to a remote VM. It runs like a regular Gas Town: ``` ┌─────────────────────────────────────────┐ │ GCE VM (or any Linux box) │ │ │ -│ ~/ai/ # Full town clone │ +│ ~/gt/ # Full town clone │ │ ├── config/ # Town config │ │ ├── mayor/ # Mayor (or none) │ │ ├── gastown/ # Rig with agents │ @@ -356,7 +356,7 @@ Workers need Claude API access: ## Configuration ```yaml -# ~/ai/config/outposts.yaml +# ~/gt/config/outposts.yaml outposts: # Always present - the local machine - name: local diff --git a/docs/harness.md b/docs/harness.md new file mode 100644 index 00000000..ad7a2d54 --- /dev/null +++ b/docs/harness.md @@ -0,0 +1,331 @@ +# Gas Town Harness Design + +A **harness** is the top-level directory where Gas Town is installed - the workspace that contains all your rigs, agents, and coordination infrastructure. + +## What Is a Harness? + +Think of a harness as the "mount point" for Gas Town. It's the root directory where: +- The Mayor operates from +- Rigs are registered and managed +- Town-level beads coordinate mail and handoffs +- The entire workspace is versioned as a git repository + +A harness is NOT: +- A git clone of any project (rigs contain the clones) +- A hidden directory (it's visible and user-controlled) +- Tied to any specific project (it can manage multiple rigs) + +## Harness Structure + +``` +~/gt/ # HARNESS ROOT +├── .git/ # Harness is a git repo +├── .gitignore # Generated by gt git-init +├── .beads/ # Town-level beads (gm-* prefix) +│ ├── beads.db # Mayor mail, coordination, handoffs +│ └── config.yaml # Beads config with prefix: gm +│ +├── CLAUDE.md # Mayor role context (runs from here) +│ +├── mayor/ # Mayor config and state +│ ├── town.json # {"type": "town", "name": "..."} +│ ├── rigs.json # Registry of managed rigs +│ └── state.json # Mayor state +│ +├── rigs/ # Managed rig containers +│ ├── gastown/ # A rig (project container) +│ └── wyvern/ # Another rig +│ +└── / # OR rigs at harness root (legacy) +``` + +## Creating a Harness + +Use `gt install` to create a new harness: + +```bash +# Create a new harness +gt install ~/gt + +# Create with git initialization +gt install ~/gt --git + +# Create and push to GitHub +gt install ~/gt --github=username/my-gastown --private + +# Initialize current directory as harness +gt install . --name my-workspace +``` + +The install command: +1. Creates the directory structure (`mayor/`, `rigs/`) +2. Writes configuration files (`town.json`, `rigs.json`, `state.json`) +3. Generates `CLAUDE.md` with Mayor role context +4. Initializes town-level beads with `gm-` prefix +5. Optionally initializes git with `.gitignore` + +## Harness vs Town vs Rig + +| Concept | Description | Example | +|---------|-------------|---------| +| **Harness** | Installation directory | `~/gt/` | +| **Town** | Logical workspace (same as harness) | The Gas Town instance | +| **Rig** | Project container within harness | `~/gt/gastown/` | + +The terms "harness" and "town" are often used interchangeably. A harness IS a town. The distinction is physical (harness = directory) vs logical (town = workspace concept). + +## Beads in a Harness + +A harness has **two levels** of beads: + +### Town-Level Beads + +Located at `/.beads/` with `gm-` prefix: +- Mayor mail and inbox +- Cross-rig coordination messages +- Session handoff notes + +### Rig-Level Beads + +Each rig has its own `.beads/` with a project-specific prefix: +- Work issues (bugs, features, tasks) +- Merge requests +- Agent-local mail within the rig + +The Mayor sees both: town beads for mail, rig beads for work coordination. + +## Beads Redirect Pattern + +In complex setups, you may want the harness root's `.beads/` to redirect to a rig's beads. This is useful when: +- Multiple systems share a harness +- You want a single source of truth for beads +- Migration scenarios + +Create a redirect file: + +```bash +# Instead of .beads/ directory, create .beads/redirect file +mkdir .beads +echo "path/to/actual/.beads" > .beads/redirect +``` + +Example from a real setup: +``` +# ~/ai/.beads/redirect +# Redirect to gastown beads (Mayor workspace) +# The Mayor runs in ~/ai but manages gastown issues in mayor/rigs/gastown +mayor/rigs/gastown/.beads +``` + +**When to use redirects:** +- Shared harness between different Gas Town versions (PGT/GGT) +- When rig beads should be the canonical town beads +- Hybrid setups where agents work in different locations + +## Multiple Gas Towns in One Location + +Sometimes you need to run multiple Gas Town systems from the same parent directory. This creates a "shared harness" scenario. + +### The Problem + +If Python Gas Town (PGT) and Go Gas Town (GGT) both use `~/ai/`: +``` +~/ai/ +├── .gastown/ # PGT config +├── .beads/ # Which system owns this? +├── mayor/ # PGT mayor? GGT mayor? +└── gastown/ # PGT rig? GGT rig? +``` + +### Solutions + +**Option 1: Separate harnesses (recommended)** +``` +~/ai/ # PGT harness +~/gt/ # GGT harness (separate) +``` + +**Option 2: Namespaced directories** +``` +~/ai/ +├── pgt/ # PGT harness +│ ├── mayor/ +│ └── gastown/ +└── ggt/ # GGT harness + ├── mayor/ + └── gastown/ +``` + +**Option 3: Beads redirect (advanced)** +``` +~/ai/ +├── .beads/redirect # Points to canonical location +├── pgt-mayor/ # PGT-specific +├── ggt-mayor/ # GGT-specific +└── gastown/ # Shared rig +``` + +## Harness Configuration Files + +### mayor/town.json + +Identifies this as a Gas Town installation: + +```json +{ + "type": "town", + "version": 1, + "name": "stevey-gastown", + "created_at": "2024-01-15T10:30:00Z" +} +``` + +### mayor/rigs.json + +Registry of managed rigs: + +```json +{ + "version": 1, + "rigs": { + "gastown": { + "git_url": "https://github.com/steveyegge/gastown", + "added_at": "2024-01-15T10:30:00Z" + }, + "wyvern": { + "git_url": "https://github.com/steveyegge/wyvern", + "added_at": "2024-01-16T09:00:00Z" + } + } +} +``` + +### mayor/state.json + +Mayor agent state: + +```json +{ + "role": "mayor", + "last_active": "2024-01-17T14:30:00Z" +} +``` + +## Git for Harnesses + +A harness should be a git repository. This enables: +- Versioning of configuration +- Beads sync across machines +- Session handoff via beads commits +- Recovery after failures + +### Initialize git + +```bash +gt git-init # Basic git setup +gt git-init --github=user/repo # Create GitHub repo +gt git-init --github=user/repo --private # Private repo +``` + +### Standard .gitignore + +The `gt git-init` command creates: + +```gitignore +# Gas Town harness gitignore + +# Agent sessions and logs +*.log +*.pid +/sessions/ + +# Rig working directories (managed separately) +/rigs/*/polecats/*/ +/rigs/*/refinery/rig/ +/rigs/*/crew/*/ + +# Sensitive files +.env +*.key +*.pem +credentials.json + +# Editor and OS +.DS_Store +*.swp +*~ +.idea/ +.vscode/ + +# Beads daemon +.beads/beads.sock +.beads/*.pid +``` + +## Harness Health Checks + +Run `gt doctor` to check harness health: + +```bash +gt doctor # Check all +gt doctor --fix # Auto-fix issues +``` + +Checks include: +- Configuration file validity +- Mayor state consistency +- Rig registry accuracy +- Beads database health +- Git state cleanliness + +## Harness Templates + +For organizations wanting consistent Gas Town setups, create a template repository: + +```bash +# Create template harness +gt install ~/gt-template --git --no-beads +# Customize CLAUDE.md, add standard rigs +# Push to GitHub as template repo + +# Users clone template +gh repo create my-gastown --template org/gt-template +cd my-gastown +gt install . --force # Reinitialize with fresh beads +``` + +## Migration Between Harnesses + +To move Gas Town to a new location: + +1. **Export beads state:** + ```bash + bd export > beads-backup.jsonl + ``` + +2. **Create new harness:** + ```bash + gt install ~/new-harness --git + ``` + +3. **Add rigs:** + ```bash + cd ~/new-harness + gt rig add gastown https://github.com/user/gastown + ``` + +4. **Import beads:** + ```bash + cd ~/new-harness + bd import < beads-backup.jsonl + ``` + +## Summary + +| Action | Command | +|--------|---------| +| Create harness | `gt install ` | +| Initialize git | `gt git-init` | +| Add rig | `gt rig add ` | +| Check health | `gt doctor` | +| View status | `gt status` | diff --git a/docs/vision.md b/docs/vision.md new file mode 100644 index 00000000..22f10cb5 --- /dev/null +++ b/docs/vision.md @@ -0,0 +1,224 @@ +# Gas Town Vision + +> Work is fractal. Every piece of work can contain other work, recursively. +> Work history is proof of capability. Your CV is your chain. + +## The Big Picture + +Gas Town is more than an AI coding agent orchestrator. It's a **work execution engine** built on a universal ledger of work - where every task, every completion, every validation is recorded with cryptographic integrity. + +The system is designed to evolve from "coding agent coordinator" to "universal work allocation platform" without changing its fundamental architecture. + +## Core Insights + +### 1. Git is Already a Blockchain + +Git provides: +- **Merkle tree** - Cryptographic hashes linking history +- **Distributed consensus** - Push/pull with conflict resolution +- **Immutability** - History cannot be rewritten (without force) +- **Auditability** - Every change attributed to an author + +We don't need to build a new blockchain. Git, combined with Beads, gives us the ledger infrastructure for free. + +### 2. Work is a Universal Protocol + +Every piece of structured work can be expressed as: +- **Identity** - Who is doing the work +- **Specification** - What needs to be done +- **Acceptance criteria** - How we know it's done +- **Validation** - Who approved the completion +- **Provenance** - What work led to this work + +This applies equally to: +- Code commits and PRs +- Design documents +- Bug fixes +- Research tasks +- Any structured human or AI work + +### 3. Your Work History IS Your CV + +Instead of curated resumes: +- Every completed task is recorded +- Quality signals are captured (acceptance rate, revision count, review feedback) +- Skills are derived from demonstrated capability, not claimed expertise +- Reputation is earned through work, not credentials + +This is "proof-of-stake" for work: +- Stake = accumulated reputation +- Claim work → stake your reputation +- Complete well → reputation grows +- Fail → reputation diminished (but recoverable) + +### 4. Molecules Crystallize Workflows + +Molecules are reusable workflow patterns that encode: +- What steps a workflow contains +- How steps depend on each other +- What quality gates must pass +- How work can be parallelized + +Key properties: +- **Deterministic structure** - Same molecule, same step graph +- **Nondeterministic execution** - Any worker can execute any ready step +- **Idempotent progress** - Completed steps stay completed + +This enables the "engineer in a box" - AI agents that follow rigorous workflows with built-in quality gates, not just "do the task." + +### 5. Federation Creates the World Chain + +The recursive structure: +``` +World +├── Platform (GitHub, enterprise systems, ...) +│ ├── Organization +│ │ ├── Project +│ │ │ ├── Epic +│ │ │ │ └── Task chains +│ │ │ │ └── Entity contribution records +``` + +Each level has its own chain. Work rolls up. Skills aggregate. The world gets a unified view of capability. + +## The Technical Foundation + +### Beads as Ledger + +| Concept | Beads Implementation | +|---------|---------------------| +| Transaction | Issue/task/work item | +| Address | Entity identifier | +| Smart Contract | Work specification + acceptance criteria | +| Validation | Merge/review/approval | +| Stake | Accumulated reputation chain | +| Gas | Effort estimation | + +### The OS Metaphor + +Gas Town is an operating system for work: + +| OS Concept | Gas Town | +|------------|----------| +| Kernel | Daemon | +| Process scheduler | Ready work + dependencies | +| Timer interrupts | Timed beads | +| Semaphores | Resource beads | +| Background services | Pinned beads | +| Process templates | Molecules | +| IPC | Mail beads | + +### Entity Chains + +Every entity (human or agent) accumulates a work chain: +- Work they created +- Work they claimed +- Work they completed +- Work they validated +- Quality signals at each step + +Format: `entity:////` + +Examples: +- `entity://gastown/steveyegge/polecat-nux` +- `entity://github/acme-corp/jane-doe` + +The chain IS the CV. Skills are derived from demonstrated work, not claimed. + +### Skill Vectors + +Work items carry skill embeddings: +- What capabilities does this work demonstrate? +- What prior knowledge does it require? +- How complex is it? + +Matching displaced workers to unmet demand becomes a vector similarity problem: +- Worker's demonstrated skills (from their chain) +- Job requirements (from work specification) +- Match = high similarity + acceptable reputation + +## Public Framing + +These capabilities can be explained without revealing the full vision: + +| Capability | Public Explanation | +|------------|-------------------| +| Entity tracking | "Agent accountability and work attribution" | +| Recursive structure | "Enterprise-scale hierarchical tracking" | +| Skill vectors | "Capability-based task routing" | +| Federation | "Multi-repo / org-wide visibility" | +| Validation | "Review workflows and quality gates" | +| Mail/threading | "Agent coordination for distributed teams" | + +All true - just not the full story. + +## The Execution Model + +### Refineries as Validators + +Refineries don't just merge code - they're validator nodes: +- Verify work meets acceptance criteria +- Record validation in the ledger +- Gate entry to the canonical chain (main branch) + +### Polecats as Workers + +Polecats aren't just coding agents - they're work executors with chains: +- Each polecat has an identity +- Work history accumulates +- Success rate is tracked +- Demonstrated skills emerge + +### Molecules as Contracts + +Molecules aren't just workflows - they're smart contracts for work: +- Specify exactly what must happen +- Encode acceptance criteria per step +- Enable deterministic verification +- Support nondeterministic execution + +## Where This Goes + +### Phase 1: Gas Town v1 (Now) +- Coding agent orchestrator +- Beads-backed work tracking +- Molecule-based workflows +- Local federation ready + +### Phase 2: Federation +- Cross-machine outposts +- Multi-rig coordination +- Git-based sync everywhere + +### Phase 3: Entity Chains +- Persistent agent identities +- Work history accumulation +- Skill derivation from work + +### Phase 4: Platform of Platforms +- Adapters for external work sources +- Cross-platform skill matching +- The world chain emerges + +## Design Principles + +1. **Git as blockchain** - Don't build new consensus; use git +2. **Federation not global consensus** - Each platform validates its own work +3. **Skill embeddings as native** - Work items carry capability vectors +4. **Human-readable** - Beads is Markdown; auditable, trustworthy +5. **Incremental evolution** - Current architecture grows into the full vision + +## The Redemption Arc + +The system doesn't judge - it tracks demonstrated capability and matches it to demand. + +- Someone with a troubled past can rebuild their chain +- Skills proven through work matter more than credentials +- Every completion is a step toward redemption +- The ledger is honest but not cruel + +This is capability matching at scale. The work speaks for itself. + +--- + +*"Work is fractal. Money is crystallized labor. The world needs a ledger."* diff --git a/internal/beads/beads.go b/internal/beads/beads.go index 675dc3a8..ce072392 100644 --- a/internal/beads/beads.go +++ b/internal/beads/beads.go @@ -58,10 +58,12 @@ type IssueDep struct { // ListOptions specifies filters for listing issues. type ListOptions struct { - Status string // "open", "closed", "all" - Type string // "task", "bug", "feature", "epic" - Priority int // 0-4, -1 for no filter - Parent string // filter by parent ID + Status string // "open", "closed", "all" + Type string // "task", "bug", "feature", "epic" + Priority int // 0-4, -1 for no filter + Parent string // filter by parent ID + Assignee string // filter by assignee (e.g., "gastown/Toast") + NoAssignee bool // filter for issues with no assignee } // CreateOptions specifies options for creating an issue. @@ -164,6 +166,12 @@ func (b *Beads) List(opts ListOptions) ([]*Issue, error) { if opts.Parent != "" { args = append(args, "--parent="+opts.Parent) } + if opts.Assignee != "" { + args = append(args, "--assignee="+opts.Assignee) + } + if opts.NoAssignee { + args = append(args, "--no-assignee") + } out, err := b.run(args...) if err != nil { @@ -178,6 +186,47 @@ func (b *Beads) List(opts ListOptions) ([]*Issue, error) { return issues, nil } +// ListByAssignee returns all issues assigned to a specific assignee. +// The assignee is typically in the format "rig/polecatName" (e.g., "gastown/Toast"). +func (b *Beads) ListByAssignee(assignee string) ([]*Issue, error) { + return b.List(ListOptions{ + Status: "all", // Include both open and closed for state derivation + Assignee: assignee, + Priority: -1, // No priority filter + }) +} + +// GetAssignedIssue returns the first open issue assigned to the given assignee. +// Returns nil if no open issue is assigned. +func (b *Beads) GetAssignedIssue(assignee string) (*Issue, error) { + issues, err := b.List(ListOptions{ + Status: "open", + Assignee: assignee, + Priority: -1, + }) + if err != nil { + return nil, err + } + + // Also check in_progress status explicitly + if len(issues) == 0 { + issues, err = b.List(ListOptions{ + Status: "in_progress", + Assignee: assignee, + Priority: -1, + }) + if err != nil { + return nil, err + } + } + + if len(issues) == 0 { + return nil, nil + } + + return issues[0], nil +} + // Ready returns issues that are ready to work (not blocked). func (b *Beads) Ready() ([]*Issue, error) { out, err := b.run("ready", "--json") @@ -193,6 +242,22 @@ func (b *Beads) Ready() ([]*Issue, error) { return issues, nil } +// ReadyWithType returns ready issues filtered by type. +// Uses bd ready --type flag for server-side filtering (gt-ktf3). +func (b *Beads) ReadyWithType(issueType string) ([]*Issue, error) { + out, err := b.run("ready", "--json", "--type", issueType, "-n", "100") + if err != nil { + return nil, err + } + + var issues []*Issue + if err := json.Unmarshal(out, &issues); err != nil { + return nil, fmt.Errorf("parsing bd ready output: %w", err) + } + + return issues, nil +} + // Show returns detailed information about an issue. func (b *Beads) Show(id string) (*Issue, error) { out, err := b.run("show", id, "--json") @@ -400,6 +465,88 @@ func (b *Beads) IsBeadsRepo() bool { return err == nil || !errors.Is(err, ErrNotARepo) } +// StatusPinned is the status for pinned beads that never get closed. +const StatusPinned = "pinned" + +// HandoffBeadTitle returns the well-known title for a role's handoff bead. +func HandoffBeadTitle(role string) string { + return role + " Handoff" +} + +// FindHandoffBead finds the pinned handoff bead for a role by title. +// Returns nil if not found (not an error). +func (b *Beads) FindHandoffBead(role string) (*Issue, error) { + issues, err := b.List(ListOptions{Status: StatusPinned, Priority: -1}) + if err != nil { + return nil, fmt.Errorf("listing pinned issues: %w", err) + } + + targetTitle := HandoffBeadTitle(role) + for _, issue := range issues { + if issue.Title == targetTitle { + return issue, nil + } + } + + return nil, nil +} + +// GetOrCreateHandoffBead returns the handoff bead for a role, creating it if needed. +func (b *Beads) GetOrCreateHandoffBead(role string) (*Issue, error) { + // Check if it exists + existing, err := b.FindHandoffBead(role) + if err != nil { + return nil, err + } + if existing != nil { + return existing, nil + } + + // Create new handoff bead + issue, err := b.Create(CreateOptions{ + Title: HandoffBeadTitle(role), + Type: "task", + Priority: 2, + Description: "", // Empty until first handoff + }) + if err != nil { + return nil, fmt.Errorf("creating handoff bead: %w", err) + } + + // Update to pinned status + status := StatusPinned + if err := b.Update(issue.ID, UpdateOptions{Status: &status}); err != nil { + return nil, fmt.Errorf("setting handoff bead to pinned: %w", err) + } + + // Re-fetch to get updated status + return b.Show(issue.ID) +} + +// UpdateHandoffContent updates the handoff bead's description with new content. +func (b *Beads) UpdateHandoffContent(role, content string) error { + issue, err := b.GetOrCreateHandoffBead(role) + if err != nil { + return err + } + + return b.Update(issue.ID, UpdateOptions{Description: &content}) +} + +// ClearHandoffContent clears the handoff bead's description. +func (b *Beads) ClearHandoffContent(role string) error { + issue, err := b.FindHandoffBead(role) + if err != nil { + return err + } + if issue == nil { + return nil // Nothing to clear + } + + empty := "" + return b.Update(issue.ID, UpdateOptions{Description: &empty}) +} + // MRFields holds the structured fields for a merge-request issue. // These fields are stored as key: value lines in the issue description. type MRFields struct { diff --git a/internal/beads/beads_test.go b/internal/beads/beads_test.go index e37fc843..fa90077e 100644 --- a/internal/beads/beads_test.go +++ b/internal/beads/beads_test.go @@ -70,7 +70,7 @@ func TestIsBeadsRepo(t *testing.T) { if err != nil { t.Fatal(err) } - defer os.RemoveAll(tmpDir) + defer func() { _ = os.RemoveAll(tmpDir) }() b := New(tmpDir) // This should return false since there's no .beads directory diff --git a/internal/beads/builtin_molecules.go b/internal/beads/builtin_molecules.go new file mode 100644 index 00000000..1aa48812 --- /dev/null +++ b/internal/beads/builtin_molecules.go @@ -0,0 +1,138 @@ +// Package beads provides a wrapper for the bd (beads) CLI. +package beads + +// BuiltinMolecule defines a built-in molecule template. +type BuiltinMolecule struct { + ID string // Well-known ID (e.g., "mol-engineer-in-box") + Title string + Description string +} + +// BuiltinMolecules returns all built-in molecule definitions. +func BuiltinMolecules() []BuiltinMolecule { + return []BuiltinMolecule{ + EngineerInBoxMolecule(), + QuickFixMolecule(), + ResearchMolecule(), + } +} + +// EngineerInBoxMolecule returns the engineer-in-box molecule definition. +// This is a full workflow from design to merge. +func EngineerInBoxMolecule() BuiltinMolecule { + return BuiltinMolecule{ + ID: "mol-engineer-in-box", + Title: "Engineer in a Box", + Description: `Full workflow from design to merge. + +## Step: design +Think carefully about architecture. Consider: +- Existing patterns in the codebase +- Trade-offs between approaches +- Testability and maintainability + +Write a brief design summary before proceeding. + +## Step: implement +Write the code. Follow codebase conventions. +Needs: design + +## Step: review +Self-review the changes. Look for: +- Bugs and edge cases +- Style issues +- Missing error handling +Needs: implement + +## Step: test +Write and run tests. Cover happy path and edge cases. +Fix any failures before proceeding. +Needs: implement + +## Step: submit +Submit for merge via refinery. +Needs: review, test`, + } +} + +// QuickFixMolecule returns the quick-fix molecule definition. +// This is a fast path for small changes. +func QuickFixMolecule() BuiltinMolecule { + return BuiltinMolecule{ + ID: "mol-quick-fix", + Title: "Quick Fix", + Description: `Fast path for small changes. + +## Step: implement +Make the fix. Keep it focused. + +## Step: test +Run relevant tests. Fix any regressions. +Needs: implement + +## Step: submit +Submit for merge. +Needs: test`, + } +} + +// ResearchMolecule returns the research molecule definition. +// This is an investigation workflow. +func ResearchMolecule() BuiltinMolecule { + return BuiltinMolecule{ + ID: "mol-research", + Title: "Research", + Description: `Investigation workflow. + +## Step: investigate +Explore the question. Search code, read docs, +understand context. Take notes. + +## Step: document +Write up findings. Include: +- What you learned +- Recommendations +- Open questions +Needs: investigate`, + } +} + +// SeedBuiltinMolecules creates all built-in molecules in the beads database. +// It skips molecules that already exist (by title match). +// Returns the number of molecules created. +func (b *Beads) SeedBuiltinMolecules() (int, error) { + molecules := BuiltinMolecules() + created := 0 + + // Get existing molecules to avoid duplicates + existing, err := b.List(ListOptions{Type: "molecule", Priority: -1}) + if err != nil { + return 0, err + } + + // Build map of existing molecule titles + existingTitles := make(map[string]bool) + for _, issue := range existing { + existingTitles[issue.Title] = true + } + + // Create each molecule if it doesn't exist + for _, mol := range molecules { + if existingTitles[mol.Title] { + continue // Already exists + } + + _, err := b.Create(CreateOptions{ + Title: mol.Title, + Type: "molecule", + Priority: 2, // Medium priority + Description: mol.Description, + }) + if err != nil { + return created, err + } + created++ + } + + return created, nil +} diff --git a/internal/beads/builtin_molecules_test.go b/internal/beads/builtin_molecules_test.go new file mode 100644 index 00000000..e8235204 --- /dev/null +++ b/internal/beads/builtin_molecules_test.go @@ -0,0 +1,143 @@ +package beads + +import "testing" + +func TestBuiltinMolecules(t *testing.T) { + molecules := BuiltinMolecules() + + if len(molecules) != 3 { + t.Errorf("expected 3 built-in molecules, got %d", len(molecules)) + } + + // Verify each molecule can be parsed and validated + for _, mol := range molecules { + t.Run(mol.Title, func(t *testing.T) { + // Check required fields + if mol.ID == "" { + t.Error("molecule missing ID") + } + if mol.Title == "" { + t.Error("molecule missing Title") + } + if mol.Description == "" { + t.Error("molecule missing Description") + } + + // Parse the molecule steps + steps, err := ParseMoleculeSteps(mol.Description) + if err != nil { + t.Fatalf("failed to parse molecule steps: %v", err) + } + + if len(steps) == 0 { + t.Error("molecule has no steps") + } + + // Validate the molecule as if it were an issue + issue := &Issue{ + Type: "molecule", + Title: mol.Title, + Description: mol.Description, + } + + if err := ValidateMolecule(issue); err != nil { + t.Errorf("molecule validation failed: %v", err) + } + }) + } +} + +func TestEngineerInBoxMolecule(t *testing.T) { + mol := EngineerInBoxMolecule() + + steps, err := ParseMoleculeSteps(mol.Description) + if err != nil { + t.Fatalf("failed to parse: %v", err) + } + + // Should have 5 steps: design, implement, review, test, submit + if len(steps) != 5 { + t.Errorf("expected 5 steps, got %d", len(steps)) + } + + // Verify step refs + expectedRefs := []string{"design", "implement", "review", "test", "submit"} + for i, expected := range expectedRefs { + if steps[i].Ref != expected { + t.Errorf("step %d: expected ref %q, got %q", i, expected, steps[i].Ref) + } + } + + // Verify dependencies + // design has no deps + if len(steps[0].Needs) != 0 { + t.Errorf("design should have no deps, got %v", steps[0].Needs) + } + + // implement needs design + if len(steps[1].Needs) != 1 || steps[1].Needs[0] != "design" { + t.Errorf("implement should need design, got %v", steps[1].Needs) + } + + // review needs implement + if len(steps[2].Needs) != 1 || steps[2].Needs[0] != "implement" { + t.Errorf("review should need implement, got %v", steps[2].Needs) + } + + // test needs implement + if len(steps[3].Needs) != 1 || steps[3].Needs[0] != "implement" { + t.Errorf("test should need implement, got %v", steps[3].Needs) + } + + // submit needs review and test + if len(steps[4].Needs) != 2 { + t.Errorf("submit should need 2 deps, got %v", steps[4].Needs) + } +} + +func TestQuickFixMolecule(t *testing.T) { + mol := QuickFixMolecule() + + steps, err := ParseMoleculeSteps(mol.Description) + if err != nil { + t.Fatalf("failed to parse: %v", err) + } + + // Should have 3 steps: implement, test, submit + if len(steps) != 3 { + t.Errorf("expected 3 steps, got %d", len(steps)) + } + + expectedRefs := []string{"implement", "test", "submit"} + for i, expected := range expectedRefs { + if steps[i].Ref != expected { + t.Errorf("step %d: expected ref %q, got %q", i, expected, steps[i].Ref) + } + } +} + +func TestResearchMolecule(t *testing.T) { + mol := ResearchMolecule() + + steps, err := ParseMoleculeSteps(mol.Description) + if err != nil { + t.Fatalf("failed to parse: %v", err) + } + + // Should have 2 steps: investigate, document + if len(steps) != 2 { + t.Errorf("expected 2 steps, got %d", len(steps)) + } + + expectedRefs := []string{"investigate", "document"} + for i, expected := range expectedRefs { + if steps[i].Ref != expected { + t.Errorf("step %d: expected ref %q, got %q", i, expected, steps[i].Ref) + } + } + + // document needs investigate + if len(steps[1].Needs) != 1 || steps[1].Needs[0] != "investigate" { + t.Errorf("document should need investigate, got %v", steps[1].Needs) + } +} diff --git a/internal/beads/molecule.go b/internal/beads/molecule.go new file mode 100644 index 00000000..79b6250b --- /dev/null +++ b/internal/beads/molecule.go @@ -0,0 +1,305 @@ +// Package beads molecule support - composable workflow templates. +package beads + +import ( + "fmt" + "regexp" + "strings" +) + +// MoleculeStep represents a parsed step from a molecule definition. +type MoleculeStep struct { + Ref string // Step reference (from "## Step: ") + Title string // Step title (first non-empty line or ref) + Instructions string // Prose instructions for this step + Needs []string // Step refs this step depends on + Tier string // Optional tier hint: haiku, sonnet, opus +} + +// stepHeaderRegex matches "## Step: " with optional whitespace. +var stepHeaderRegex = regexp.MustCompile(`(?i)^##\s*Step:\s*(\S+)\s*$`) + +// needsLineRegex matches "Needs: step1, step2, ..." lines. +var needsLineRegex = regexp.MustCompile(`(?i)^Needs:\s*(.+)$`) + +// tierLineRegex matches "Tier: haiku|sonnet|opus" lines. +var tierLineRegex = regexp.MustCompile(`(?i)^Tier:\s*(haiku|sonnet|opus)\s*$`) + +// templateVarRegex matches {{variable}} placeholders. +var templateVarRegex = regexp.MustCompile(`\{\{(\w+)\}\}`) + +// ParseMoleculeSteps extracts step definitions from a molecule's description. +// +// The expected format is: +// +// ## Step: +// +// Needs: , # optional +// Tier: haiku|sonnet|opus # optional +// +// Returns an empty slice if no steps are found. +func ParseMoleculeSteps(description string) ([]MoleculeStep, error) { + if description == "" { + return nil, nil + } + + lines := strings.Split(description, "\n") + var steps []MoleculeStep + var currentStep *MoleculeStep + var contentLines []string + + // Helper to finalize current step + finalizeStep := func() { + if currentStep == nil { + return + } + + // Process content lines to extract Needs/Tier and build instructions + var instructionLines []string + for _, line := range contentLines { + trimmed := strings.TrimSpace(line) + + // Check for Needs: line + if matches := needsLineRegex.FindStringSubmatch(trimmed); matches != nil { + deps := strings.Split(matches[1], ",") + for _, dep := range deps { + dep = strings.TrimSpace(dep) + if dep != "" { + currentStep.Needs = append(currentStep.Needs, dep) + } + } + continue + } + + // Check for Tier: line + if matches := tierLineRegex.FindStringSubmatch(trimmed); matches != nil { + currentStep.Tier = strings.ToLower(matches[1]) + continue + } + + // Regular instruction line + instructionLines = append(instructionLines, line) + } + + // Build instructions, trimming leading/trailing blank lines + currentStep.Instructions = strings.TrimSpace(strings.Join(instructionLines, "\n")) + + // Set title from first non-empty line of instructions, or use ref + if currentStep.Instructions != "" { + firstLine := strings.SplitN(currentStep.Instructions, "\n", 2)[0] + currentStep.Title = strings.TrimSpace(firstLine) + } + if currentStep.Title == "" { + currentStep.Title = currentStep.Ref + } + + steps = append(steps, *currentStep) + currentStep = nil + contentLines = nil + } + + for _, line := range lines { + // Check for step header + if matches := stepHeaderRegex.FindStringSubmatch(line); matches != nil { + // Finalize previous step if any + finalizeStep() + + // Start new step + currentStep = &MoleculeStep{ + Ref: matches[1], + } + contentLines = nil + continue + } + + // Accumulate content lines if we're in a step + if currentStep != nil { + contentLines = append(contentLines, line) + } + } + + // Finalize last step + finalizeStep() + + return steps, nil +} + +// ExpandTemplateVars replaces {{variable}} placeholders in text using the provided context map. +// Unknown variables are left as-is. +func ExpandTemplateVars(text string, ctx map[string]string) string { + if ctx == nil { + return text + } + + return templateVarRegex.ReplaceAllStringFunc(text, func(match string) string { + // Extract variable name from {{name}} + varName := match[2 : len(match)-2] + if value, ok := ctx[varName]; ok { + return value + } + return match // Leave unknown variables as-is + }) +} + +// InstantiateOptions configures molecule instantiation behavior. +type InstantiateOptions struct { + // Context map for {{variable}} substitution + Context map[string]string +} + +// InstantiateMolecule creates child issues from a molecule template. +// +// For each step in the molecule, this creates: +// - A child issue with ID "{parent.ID}.{step.Ref}" +// - Title from step title +// - Description from step instructions (with template vars expanded) +// - Type: task +// - Priority: inherited from parent +// - Dependencies wired according to Needs: declarations +// +// The function is atomic via bd CLI - either all issues are created or none. +// Returns the created step issues. +func (b *Beads) InstantiateMolecule(mol *Issue, parent *Issue, opts InstantiateOptions) ([]*Issue, error) { + if mol == nil { + return nil, fmt.Errorf("molecule issue is nil") + } + if parent == nil { + return nil, fmt.Errorf("parent issue is nil") + } + + // Parse steps from molecule + steps, err := ParseMoleculeSteps(mol.Description) + if err != nil { + return nil, fmt.Errorf("parsing molecule steps: %w", err) + } + + if len(steps) == 0 { + return nil, fmt.Errorf("molecule has no steps defined") + } + + // Build map of step ref -> step for dependency validation + stepMap := make(map[string]*MoleculeStep) + for i := range steps { + stepMap[steps[i].Ref] = &steps[i] + } + + // Validate all Needs references exist + for _, step := range steps { + for _, need := range step.Needs { + if _, ok := stepMap[need]; !ok { + return nil, fmt.Errorf("step %q depends on unknown step %q", step.Ref, need) + } + } + } + + // Create child issues for each step + var createdIssues []*Issue + stepIssueIDs := make(map[string]string) // step ref -> issue ID + + for _, step := range steps { + // Expand template variables in instructions + instructions := step.Instructions + if opts.Context != nil { + instructions = ExpandTemplateVars(instructions, opts.Context) + } + + // Build description with provenance metadata + description := instructions + if description != "" { + description += "\n\n" + } + description += fmt.Sprintf("instantiated_from: %s\nstep: %s", mol.ID, step.Ref) + if step.Tier != "" { + description += fmt.Sprintf("\ntier: %s", step.Tier) + } + + // Create the child issue + childOpts := CreateOptions{ + Title: step.Title, + Type: "task", + Priority: parent.Priority, + Description: description, + Parent: parent.ID, + } + + child, err := b.Create(childOpts) + if err != nil { + // Attempt to clean up created issues on failure + for _, created := range createdIssues { + _ = b.Close(created.ID) + } + return nil, fmt.Errorf("creating step %q: %w", step.Ref, err) + } + + createdIssues = append(createdIssues, child) + stepIssueIDs[step.Ref] = child.ID + } + + // Wire inter-step dependencies based on Needs: declarations + for _, step := range steps { + if len(step.Needs) == 0 { + continue + } + + childID := stepIssueIDs[step.Ref] + for _, need := range step.Needs { + dependsOnID := stepIssueIDs[need] + if err := b.AddDependency(childID, dependsOnID); err != nil { + // Log but don't fail - the issues are created + // This is non-atomic but bd CLI doesn't support transactions + return createdIssues, fmt.Errorf("adding dependency %s -> %s: %w", childID, dependsOnID, err) + } + } + } + + return createdIssues, nil +} + +// ValidateMolecule checks if an issue is a valid molecule definition. +// Returns an error describing the problem, or nil if valid. +func ValidateMolecule(mol *Issue) error { + if mol == nil { + return fmt.Errorf("molecule is nil") + } + + if mol.Type != "molecule" { + return fmt.Errorf("issue type is %q, expected molecule", mol.Type) + } + + steps, err := ParseMoleculeSteps(mol.Description) + if err != nil { + return fmt.Errorf("parsing steps: %w", err) + } + + if len(steps) == 0 { + return fmt.Errorf("molecule has no steps defined") + } + + // Build step map for reference validation + stepMap := make(map[string]bool) + for _, step := range steps { + if step.Ref == "" { + return fmt.Errorf("step has empty ref") + } + if stepMap[step.Ref] { + return fmt.Errorf("duplicate step ref: %s", step.Ref) + } + stepMap[step.Ref] = true + } + + // Validate Needs references + for _, step := range steps { + for _, need := range step.Needs { + if !stepMap[need] { + return fmt.Errorf("step %q depends on unknown step %q", step.Ref, need) + } + if need == step.Ref { + return fmt.Errorf("step %q has self-dependency", step.Ref) + } + } + } + + // TODO: Detect cycles in dependency graph + + return nil +} diff --git a/internal/beads/molecule_test.go b/internal/beads/molecule_test.go new file mode 100644 index 00000000..09b5fe14 --- /dev/null +++ b/internal/beads/molecule_test.go @@ -0,0 +1,491 @@ +package beads + +import ( + "reflect" + "testing" +) + +func TestParseMoleculeSteps_EmptyDescription(t *testing.T) { + steps, err := ParseMoleculeSteps("") + if err != nil { + t.Errorf("unexpected error: %v", err) + } + if len(steps) != 0 { + t.Errorf("expected 0 steps, got %d", len(steps)) + } +} + +func TestParseMoleculeSteps_NoSteps(t *testing.T) { + desc := `This is a molecule description without any steps. +Just some prose text.` + + steps, err := ParseMoleculeSteps(desc) + if err != nil { + t.Errorf("unexpected error: %v", err) + } + if len(steps) != 0 { + t.Errorf("expected 0 steps, got %d", len(steps)) + } +} + +func TestParseMoleculeSteps_SingleStep(t *testing.T) { + desc := `## Step: implement +Write the code carefully. +Follow existing patterns.` + + steps, err := ParseMoleculeSteps(desc) + if err != nil { + t.Fatalf("unexpected error: %v", err) + } + if len(steps) != 1 { + t.Fatalf("expected 1 step, got %d", len(steps)) + } + + step := steps[0] + if step.Ref != "implement" { + t.Errorf("Ref = %q, want implement", step.Ref) + } + if step.Title != "Write the code carefully." { + t.Errorf("Title = %q, want 'Write the code carefully.'", step.Title) + } + if step.Instructions != "Write the code carefully.\nFollow existing patterns." { + t.Errorf("Instructions = %q", step.Instructions) + } + if len(step.Needs) != 0 { + t.Errorf("Needs = %v, want empty", step.Needs) + } +} + +func TestParseMoleculeSteps_MultipleSteps(t *testing.T) { + desc := `This workflow takes a task through multiple stages. + +## Step: design +Think about architecture and patterns. +Consider edge cases. + +## Step: implement +Write the implementation. +Needs: design + +## Step: test +Write comprehensive tests. +Needs: implement + +## Step: submit +Submit for review. +Needs: implement, test` + + steps, err := ParseMoleculeSteps(desc) + if err != nil { + t.Fatalf("unexpected error: %v", err) + } + if len(steps) != 4 { + t.Fatalf("expected 4 steps, got %d", len(steps)) + } + + // Check design step + if steps[0].Ref != "design" { + t.Errorf("step[0].Ref = %q, want design", steps[0].Ref) + } + if len(steps[0].Needs) != 0 { + t.Errorf("step[0].Needs = %v, want empty", steps[0].Needs) + } + + // Check implement step + if steps[1].Ref != "implement" { + t.Errorf("step[1].Ref = %q, want implement", steps[1].Ref) + } + if !reflect.DeepEqual(steps[1].Needs, []string{"design"}) { + t.Errorf("step[1].Needs = %v, want [design]", steps[1].Needs) + } + + // Check test step + if steps[2].Ref != "test" { + t.Errorf("step[2].Ref = %q, want test", steps[2].Ref) + } + if !reflect.DeepEqual(steps[2].Needs, []string{"implement"}) { + t.Errorf("step[2].Needs = %v, want [implement]", steps[2].Needs) + } + + // Check submit step with multiple dependencies + if steps[3].Ref != "submit" { + t.Errorf("step[3].Ref = %q, want submit", steps[3].Ref) + } + if !reflect.DeepEqual(steps[3].Needs, []string{"implement", "test"}) { + t.Errorf("step[3].Needs = %v, want [implement, test]", steps[3].Needs) + } +} + +func TestParseMoleculeSteps_WithTier(t *testing.T) { + desc := `## Step: quick-task +Do something simple. +Tier: haiku + +## Step: complex-task +Do something complex. +Needs: quick-task +Tier: opus` + + steps, err := ParseMoleculeSteps(desc) + if err != nil { + t.Fatalf("unexpected error: %v", err) + } + if len(steps) != 2 { + t.Fatalf("expected 2 steps, got %d", len(steps)) + } + + if steps[0].Tier != "haiku" { + t.Errorf("step[0].Tier = %q, want haiku", steps[0].Tier) + } + if steps[1].Tier != "opus" { + t.Errorf("step[1].Tier = %q, want opus", steps[1].Tier) + } +} + +func TestParseMoleculeSteps_CaseInsensitive(t *testing.T) { + desc := `## STEP: Design +Plan the work. +NEEDS: nothing +TIER: SONNET + +## step: implement +Write code. +needs: Design +tier: Haiku` + + steps, err := ParseMoleculeSteps(desc) + if err != nil { + t.Fatalf("unexpected error: %v", err) + } + if len(steps) != 2 { + t.Fatalf("expected 2 steps, got %d", len(steps)) + } + + // Note: refs preserve original case + if steps[0].Ref != "Design" { + t.Errorf("step[0].Ref = %q, want Design", steps[0].Ref) + } + if steps[0].Tier != "sonnet" { + t.Errorf("step[0].Tier = %q, want sonnet", steps[0].Tier) + } + + if steps[1].Ref != "implement" { + t.Errorf("step[1].Ref = %q, want implement", steps[1].Ref) + } + if steps[1].Tier != "haiku" { + t.Errorf("step[1].Tier = %q, want haiku", steps[1].Tier) + } +} + +func TestParseMoleculeSteps_EngineerInBox(t *testing.T) { + // The canonical example from the design doc + desc := `This workflow takes a task from design to merge. + +## Step: design +Think carefully about architecture. Consider existing patterns, +trade-offs, testability. + +## Step: implement +Write clean code. Follow codebase conventions. +Needs: design + +## Step: review +Review for bugs, edge cases, style issues. +Needs: implement + +## Step: test +Write and run tests. Cover happy path and edge cases. +Needs: implement + +## Step: submit +Submit for merge via refinery. +Needs: review, test` + + steps, err := ParseMoleculeSteps(desc) + if err != nil { + t.Fatalf("unexpected error: %v", err) + } + if len(steps) != 5 { + t.Fatalf("expected 5 steps, got %d", len(steps)) + } + + expected := []struct { + ref string + needs []string + }{ + {"design", nil}, + {"implement", []string{"design"}}, + {"review", []string{"implement"}}, + {"test", []string{"implement"}}, + {"submit", []string{"review", "test"}}, + } + + for i, exp := range expected { + if steps[i].Ref != exp.ref { + t.Errorf("step[%d].Ref = %q, want %q", i, steps[i].Ref, exp.ref) + } + if exp.needs == nil { + if len(steps[i].Needs) != 0 { + t.Errorf("step[%d].Needs = %v, want empty", i, steps[i].Needs) + } + } else if !reflect.DeepEqual(steps[i].Needs, exp.needs) { + t.Errorf("step[%d].Needs = %v, want %v", i, steps[i].Needs, exp.needs) + } + } +} + +func TestExpandTemplateVars(t *testing.T) { + tests := []struct { + name string + text string + ctx map[string]string + want string + }{ + { + name: "no variables", + text: "Just plain text", + ctx: map[string]string{"foo": "bar"}, + want: "Just plain text", + }, + { + name: "single variable", + text: "Implement {{feature_name}} feature", + ctx: map[string]string{"feature_name": "authentication"}, + want: "Implement authentication feature", + }, + { + name: "multiple variables", + text: "Implement {{feature}} in {{file}}", + ctx: map[string]string{"feature": "login", "file": "auth.go"}, + want: "Implement login in auth.go", + }, + { + name: "unknown variable left as-is", + text: "Value is {{unknown}}", + ctx: map[string]string{"known": "value"}, + want: "Value is {{unknown}}", + }, + { + name: "nil context", + text: "Value is {{var}}", + ctx: nil, + want: "Value is {{var}}", + }, + { + name: "empty context", + text: "Value is {{var}}", + ctx: map[string]string{}, + want: "Value is {{var}}", + }, + { + name: "repeated variable", + text: "{{x}} and {{x}} again", + ctx: map[string]string{"x": "foo"}, + want: "foo and foo again", + }, + { + name: "multiline", + text: "First line with {{a}}.\nSecond line with {{b}}.", + ctx: map[string]string{"a": "alpha", "b": "beta"}, + want: "First line with alpha.\nSecond line with beta.", + }, + } + + for _, tt := range tests { + t.Run(tt.name, func(t *testing.T) { + got := ExpandTemplateVars(tt.text, tt.ctx) + if got != tt.want { + t.Errorf("ExpandTemplateVars() = %q, want %q", got, tt.want) + } + }) + } +} + +func TestParseMoleculeSteps_WithTemplateVars(t *testing.T) { + desc := `## Step: implement +Implement {{feature_name}} in {{target_file}}. +Follow the existing patterns.` + + steps, err := ParseMoleculeSteps(desc) + if err != nil { + t.Fatalf("unexpected error: %v", err) + } + if len(steps) != 1 { + t.Fatalf("expected 1 step, got %d", len(steps)) + } + + // Template vars should be preserved in parsed instructions + if steps[0].Instructions != "Implement {{feature_name}} in {{target_file}}.\nFollow the existing patterns." { + t.Errorf("Instructions = %q", steps[0].Instructions) + } + + // Now expand them + expanded := ExpandTemplateVars(steps[0].Instructions, map[string]string{ + "feature_name": "user auth", + "target_file": "auth.go", + }) + + if expanded != "Implement user auth in auth.go.\nFollow the existing patterns." { + t.Errorf("expanded = %q", expanded) + } +} + +func TestValidateMolecule_Valid(t *testing.T) { + mol := &Issue{ + ID: "mol-xyz", + Type: "molecule", + Description: `## Step: design +Plan the work. + +## Step: implement +Write code. +Needs: design`, + } + + err := ValidateMolecule(mol) + if err != nil { + t.Errorf("ValidateMolecule() = %v, want nil", err) + } +} + +func TestValidateMolecule_WrongType(t *testing.T) { + mol := &Issue{ + ID: "task-xyz", + Type: "task", + Description: `## Step: design\nPlan.`, + } + + err := ValidateMolecule(mol) + if err == nil { + t.Error("ValidateMolecule() = nil, want error for wrong type") + } +} + +func TestValidateMolecule_NoSteps(t *testing.T) { + mol := &Issue{ + ID: "mol-xyz", + Type: "molecule", + Description: "Just some description without steps.", + } + + err := ValidateMolecule(mol) + if err == nil { + t.Error("ValidateMolecule() = nil, want error for no steps") + } +} + +func TestValidateMolecule_DuplicateRef(t *testing.T) { + mol := &Issue{ + ID: "mol-xyz", + Type: "molecule", + Description: `## Step: design +Plan the work. + +## Step: design +Plan again.`, + } + + err := ValidateMolecule(mol) + if err == nil { + t.Error("ValidateMolecule() = nil, want error for duplicate ref") + } +} + +func TestValidateMolecule_UnknownDependency(t *testing.T) { + mol := &Issue{ + ID: "mol-xyz", + Type: "molecule", + Description: `## Step: implement +Write code. +Needs: nonexistent`, + } + + err := ValidateMolecule(mol) + if err == nil { + t.Error("ValidateMolecule() = nil, want error for unknown dependency") + } +} + +func TestValidateMolecule_SelfDependency(t *testing.T) { + mol := &Issue{ + ID: "mol-xyz", + Type: "molecule", + Description: `## Step: implement +Write code. +Needs: implement`, + } + + err := ValidateMolecule(mol) + if err == nil { + t.Error("ValidateMolecule() = nil, want error for self-dependency") + } +} + +func TestValidateMolecule_Nil(t *testing.T) { + err := ValidateMolecule(nil) + if err == nil { + t.Error("ValidateMolecule(nil) = nil, want error") + } +} + +func TestParseMoleculeSteps_WhitespaceHandling(t *testing.T) { + desc := `## Step: spaced + Indented instructions. + + More indented content. + +Needs: dep1 , dep2 ,dep3 +Tier: opus ` + + steps, err := ParseMoleculeSteps(desc) + if err != nil { + t.Fatalf("unexpected error: %v", err) + } + if len(steps) != 1 { + t.Fatalf("expected 1 step, got %d", len(steps)) + } + + // Ref preserves original (though trimmed) + if steps[0].Ref != "spaced" { + t.Errorf("Ref = %q, want spaced", steps[0].Ref) + } + + // Dependencies should be trimmed + expectedDeps := []string{"dep1", "dep2", "dep3"} + if !reflect.DeepEqual(steps[0].Needs, expectedDeps) { + t.Errorf("Needs = %v, want %v", steps[0].Needs, expectedDeps) + } + + // Tier should be lowercase and trimmed + if steps[0].Tier != "opus" { + t.Errorf("Tier = %q, want opus", steps[0].Tier) + } +} + +func TestParseMoleculeSteps_EmptyInstructions(t *testing.T) { + desc := `## Step: empty + +## Step: next +Has content.` + + steps, err := ParseMoleculeSteps(desc) + if err != nil { + t.Fatalf("unexpected error: %v", err) + } + if len(steps) != 2 { + t.Fatalf("expected 2 steps, got %d", len(steps)) + } + + // First step has empty instructions, title defaults to ref + if steps[0].Instructions != "" { + t.Errorf("step[0].Instructions = %q, want empty", steps[0].Instructions) + } + if steps[0].Title != "empty" { + t.Errorf("step[0].Title = %q, want empty", steps[0].Title) + } + + // Second step has content + if steps[1].Instructions != "Has content." { + t.Errorf("step[1].Instructions = %q", steps[1].Instructions) + } +} diff --git a/internal/cmd/crew.go b/internal/cmd/crew.go index 01bdc1e7..b754dcf3 100644 --- a/internal/cmd/crew.go +++ b/internal/cmd/crew.go @@ -7,6 +7,8 @@ import ( "os/exec" "path/filepath" "strings" + "syscall" + "time" "github.com/spf13/cobra" "github.com/steveyegge/gastown/internal/config" @@ -47,6 +49,7 @@ Commands: gt crew at Attach to crew workspace session gt crew remove Remove a crew workspace gt crew refresh Context cycling with mail-to-self handoff + gt crew restart Kill and restart session fresh (alias: rs) gt crew status [] Show detailed workspace status`, } @@ -150,6 +153,57 @@ Examples: RunE: runCrewStatus, } +var crewRestartCmd = &cobra.Command{ + Use: "restart ", + Aliases: []string{"rs"}, + Short: "Kill and restart crew workspace session", + Long: `Kill the tmux session and restart fresh with Claude. + +Useful when a crew member gets confused or needs a clean slate. +Unlike 'refresh', this does NOT send handoff mail - it's a clean start. + +The command will: +1. Kill existing tmux session if running +2. Start fresh session with Claude +3. Run gt prime to reinitialize context + +Examples: + gt crew restart dave # Restart dave's session + gt crew rs emma # Same, using alias`, + Args: cobra.ExactArgs(1), + RunE: runCrewRestart, +} + +var crewRenameCmd = &cobra.Command{ + Use: "rename ", + Short: "Rename a crew workspace", + Long: `Rename a crew workspace. + +Kills any running session, renames the directory, and updates state. +The new session will use the new name (gt--crew-). + +Examples: + gt crew rename dave david # Rename dave to david + gt crew rename madmax max # Rename madmax to max`, + Args: cobra.ExactArgs(2), + RunE: runCrewRename, +} + +var crewPristineCmd = &cobra.Command{ + Use: "pristine []", + Short: "Sync crew workspaces with remote", + Long: `Ensure crew workspace(s) are up-to-date. + +Runs git pull and bd sync for the specified crew, or all crew workers. +Reports any uncommitted changes that may need attention. + +Examples: + gt crew pristine # Pristine all crew workers + gt crew pristine dave # Pristine specific worker + gt crew pristine --json # JSON output`, + RunE: runCrewPristine, +} + func init() { // Add flags crewAddCmd.Flags().StringVar(&crewRig, "rig", "", "Rig to create crew workspace in") @@ -170,6 +224,13 @@ func init() { crewStatusCmd.Flags().StringVar(&crewRig, "rig", "", "Filter by rig name") crewStatusCmd.Flags().BoolVar(&crewJSON, "json", false, "Output as JSON") + crewRenameCmd.Flags().StringVar(&crewRig, "rig", "", "Rig to use") + + crewPristineCmd.Flags().StringVar(&crewRig, "rig", "", "Filter by rig name") + crewPristineCmd.Flags().BoolVar(&crewJSON, "json", false, "Output as JSON") + + crewRestartCmd.Flags().StringVar(&crewRig, "rig", "", "Rig to use") + // Add subcommands crewCmd.AddCommand(crewAddCmd) crewCmd.AddCommand(crewListCmd) @@ -177,6 +238,9 @@ func init() { crewCmd.AddCommand(crewRemoveCmd) crewCmd.AddCommand(crewRefreshCmd) crewCmd.AddCommand(crewStatusCmd) + crewCmd.AddCommand(crewRenameCmd) + crewCmd.AddCommand(crewPristineCmd) + crewCmd.AddCommand(crewRestartCmd) rootCmd.AddCommand(crewCmd) } @@ -254,19 +318,12 @@ func inferRigFromCwd(townRoot string) (string, error) { return "", fmt.Errorf("not in workspace") } - // First component should be the rig name - parts := filepath.SplitList(rel) - if len(parts) == 0 { - // Split on path separator instead - for i := 0; i < len(rel); i++ { - if rel[i] == filepath.Separator { - return rel[:i], nil - } - } - // No separator found, entire rel is the rig name - if rel != "" && rel != "." { - return rel, nil - } + // Normalize and split path - first component is the rig name + rel = filepath.ToSlash(rel) + parts := strings.Split(rel, "/") + + if len(parts) > 0 && parts[0] != "" && parts[0] != "." { + return parts[0], nil } return "", fmt.Errorf("could not infer rig from current directory") @@ -446,29 +503,114 @@ func runCrewAt(cmd *cobra.Command, args []string) error { } // Set environment - t.SetEnvironment(sessionID, "GT_RIG", r.Name) - t.SetEnvironment(sessionID, "GT_CREW", name) + _ = t.SetEnvironment(sessionID, "GT_RIG", r.Name) + _ = t.SetEnvironment(sessionID, "GT_CREW", name) + + // Wait for shell to be ready after session creation + if err := t.WaitForShellReady(sessionID, 5*time.Second); err != nil { + return fmt.Errorf("waiting for shell: %w", err) + } // Start claude with skip permissions (crew workers are trusted like Mayor) if err := t.SendKeys(sessionID, "claude --dangerously-skip-permissions"); err != nil { return fmt.Errorf("starting claude: %w", err) } - // Wait a moment for Claude to initialize, then prime it - // We send gt prime after a short delay to ensure Claude is ready - if err := t.SendKeysDelayed(sessionID, "gt prime", 2000); err != nil { + // Wait for Claude to start (pane command changes from shell to node) + shells := []string{"bash", "zsh", "sh", "fish", "tcsh", "ksh"} + if err := t.WaitForCommand(sessionID, shells, 15*time.Second); err != nil { + fmt.Printf("Warning: Timeout waiting for Claude to start: %v\n", err) + } + + // Send gt prime to initialize context + if err := t.SendKeys(sessionID, "gt prime"); err != nil { // Non-fatal: Claude started but priming failed fmt.Printf("Warning: Could not send prime command: %v\n", err) } fmt.Printf("%s Created session for %s/%s\n", style.Bold.Render("✓"), r.Name, name) + } else { + // Session exists - check if Claude is still running + // Uses both pane command check and UI marker detection to avoid + // restarting when user is in a subshell spawned from Claude + if !t.IsClaudeRunning(sessionID) { + // Claude has exited, restart it + fmt.Printf("Claude exited, restarting...\n") + if err := t.SendKeys(sessionID, "claude --dangerously-skip-permissions"); err != nil { + return fmt.Errorf("restarting claude: %w", err) + } + // Wait for Claude to start, then prime + shells := []string{"bash", "zsh", "sh", "fish", "tcsh", "ksh"} + if err := t.WaitForCommand(sessionID, shells, 15*time.Second); err != nil { + fmt.Printf("Warning: Timeout waiting for Claude to start: %v\n", err) + } + if err := t.SendKeys(sessionID, "gt prime"); err != nil { + fmt.Printf("Warning: Could not send prime command: %v\n", err) + } + // Send crew resume prompt after prime completes + crewPrompt := "Read your mail, act on anything urgent, else await instructions." + if err := t.SendKeysDelayed(sessionID, crewPrompt, 3000); err != nil { + fmt.Printf("Warning: Could not send resume prompt: %v\n", err) + } + } + } + + // Check if we're already in the target session + if isInTmuxSession(sessionID) { + // We're in the session at a shell prompt - just start Claude directly + fmt.Printf("Starting Claude in current session...\n") + return execClaude() } // Attach to session using exec to properly forward TTY return attachToTmuxSession(sessionID) } +// isShellCommand checks if the command is a shell (meaning Claude has exited). +func isShellCommand(cmd string) bool { + shells := []string{"bash", "zsh", "sh", "fish", "tcsh", "ksh"} + for _, shell := range shells { + if cmd == shell { + return true + } + } + return false +} + +// execClaude execs claude, replacing the current process. +// Used when we're already in the target session and just need to start Claude. +func execClaude() error { + claudePath, err := exec.LookPath("claude") + if err != nil { + return fmt.Errorf("claude not found: %w", err) + } + + // exec replaces current process with claude + args := []string{"claude", "--dangerously-skip-permissions"} + return syscall.Exec(claudePath, args, os.Environ()) +} + +// isInTmuxSession checks if we're currently inside the target tmux session. +func isInTmuxSession(targetSession string) bool { + // TMUX env var format: /tmp/tmux-501/default,12345,0 + // We need to get the current session name via tmux display-message + tmuxEnv := os.Getenv("TMUX") + if tmuxEnv == "" { + return false // Not in tmux at all + } + + // Get current session name + cmd := exec.Command("tmux", "display-message", "-p", "#{session_name}") + out, err := cmd.Output() + if err != nil { + return false + } + + currentSession := strings.TrimSpace(string(out)) + return currentSession == targetSession +} + // attachToTmuxSession attaches to a tmux session with proper TTY forwarding. func attachToTmuxSession(sessionID string) error { tmuxPath, err := exec.LookPath("tmux") @@ -642,10 +784,15 @@ func runCrewRefresh(cmd *cobra.Command, args []string) error { } // Set environment - t.SetEnvironment(sessionID, "GT_RIG", r.Name) - t.SetEnvironment(sessionID, "GT_CREW", name) + _ = t.SetEnvironment(sessionID, "GT_RIG", r.Name) + _ = t.SetEnvironment(sessionID, "GT_CREW", name) - // Start claude + // Wait for shell to be ready + if err := t.WaitForShellReady(sessionID, 5*time.Second); err != nil { + return fmt.Errorf("waiting for shell: %w", err) + } + + // Start claude (refresh uses regular permissions, reads handoff mail) if err := t.SendKeys(sessionID, "claude"); err != nil { return fmt.Errorf("starting claude: %w", err) } @@ -657,6 +804,76 @@ func runCrewRefresh(cmd *cobra.Command, args []string) error { return nil } +func runCrewRestart(cmd *cobra.Command, args []string) error { + name := args[0] + + crewMgr, r, err := getCrewManager(crewRig) + if err != nil { + return err + } + + // Get the crew worker + worker, err := crewMgr.Get(name) + if err != nil { + if err == crew.ErrCrewNotFound { + return fmt.Errorf("crew workspace '%s' not found", name) + } + return fmt.Errorf("getting crew worker: %w", err) + } + + t := tmux.NewTmux() + sessionID := crewSessionName(r.Name, name) + + // Kill existing session if running + if hasSession, _ := t.HasSession(sessionID); hasSession { + if err := t.KillSession(sessionID); err != nil { + return fmt.Errorf("killing old session: %w", err) + } + fmt.Printf("Killed session %s\n", sessionID) + } + + // Start new session + if err := t.NewSession(sessionID, worker.ClonePath); err != nil { + return fmt.Errorf("creating session: %w", err) + } + + // Set environment + t.SetEnvironment(sessionID, "GT_RIG", r.Name) + t.SetEnvironment(sessionID, "GT_CREW", name) + + // Wait for shell to be ready + if err := t.WaitForShellReady(sessionID, 5*time.Second); err != nil { + return fmt.Errorf("waiting for shell: %w", err) + } + + // Start claude with skip permissions (crew workers are trusted) + if err := t.SendKeys(sessionID, "claude --dangerously-skip-permissions"); err != nil { + return fmt.Errorf("starting claude: %w", err) + } + + // Wait for Claude to start, then prime it + shells := []string{"bash", "zsh", "sh", "fish", "tcsh", "ksh"} + if err := t.WaitForCommand(sessionID, shells, 15*time.Second); err != nil { + fmt.Printf("Warning: Timeout waiting for Claude to start: %v\n", err) + } + if err := t.SendKeys(sessionID, "gt prime"); err != nil { + // Non-fatal: Claude started but priming failed + fmt.Printf("Warning: Could not send prime command: %v\n", err) + } + + // Send crew resume prompt after prime completes + crewPrompt := "Read your mail, act on anything urgent, else await instructions." + if err := t.SendKeysDelayed(sessionID, crewPrompt, 3000); err != nil { + fmt.Printf("Warning: Could not send resume prompt: %v\n", err) + } + + fmt.Printf("%s Restarted crew workspace: %s/%s\n", + style.Bold.Render("✓"), r.Name, name) + fmt.Printf("Attach with: %s\n", style.Dim.Render(fmt.Sprintf("gt crew at %s", name))) + + return nil +} + // CrewStatusItem represents detailed status for a crew worker. type CrewStatusItem struct { Name string `json:"name"` @@ -794,3 +1011,112 @@ func runCrewStatus(cmd *cobra.Command, args []string) error { return nil } + +func runCrewRename(cmd *cobra.Command, args []string) error { + oldName := args[0] + newName := args[1] + + crewMgr, r, err := getCrewManager(crewRig) + if err != nil { + return err + } + + // Kill any running session for the old name + t := tmux.NewTmux() + oldSessionID := crewSessionName(r.Name, oldName) + if hasSession, _ := t.HasSession(oldSessionID); hasSession { + if err := t.KillSession(oldSessionID); err != nil { + return fmt.Errorf("killing old session: %w", err) + } + fmt.Printf("Killed session %s\n", oldSessionID) + } + + // Perform the rename + if err := crewMgr.Rename(oldName, newName); err != nil { + if err == crew.ErrCrewNotFound { + return fmt.Errorf("crew workspace '%s' not found", oldName) + } + if err == crew.ErrCrewExists { + return fmt.Errorf("crew workspace '%s' already exists", newName) + } + return fmt.Errorf("renaming crew workspace: %w", err) + } + + fmt.Printf("%s Renamed crew workspace: %s/%s → %s/%s\n", + style.Bold.Render("✓"), r.Name, oldName, r.Name, newName) + fmt.Printf("New session will be: %s\n", style.Dim.Render(crewSessionName(r.Name, newName))) + + return nil +} + +func runCrewPristine(cmd *cobra.Command, args []string) error { + crewMgr, r, err := getCrewManager(crewRig) + if err != nil { + return err + } + + var workers []*crew.CrewWorker + + if len(args) > 0 { + // Specific worker + name := args[0] + worker, err := crewMgr.Get(name) + if err != nil { + if err == crew.ErrCrewNotFound { + return fmt.Errorf("crew workspace '%s' not found", name) + } + return fmt.Errorf("getting crew worker: %w", err) + } + workers = []*crew.CrewWorker{worker} + } else { + // All workers + workers, err = crewMgr.List() + if err != nil { + return fmt.Errorf("listing crew workers: %w", err) + } + } + + if len(workers) == 0 { + fmt.Println("No crew workspaces found.") + return nil + } + + var results []*crew.PristineResult + + for _, w := range workers { + result, err := crewMgr.Pristine(w.Name) + if err != nil { + return fmt.Errorf("pristine %s: %w", w.Name, err) + } + results = append(results, result) + } + + if crewJSON { + enc := json.NewEncoder(os.Stdout) + enc.SetIndent("", " ") + return enc.Encode(results) + } + + // Text output + for _, result := range results { + fmt.Printf("%s %s/%s\n", style.Bold.Render("→"), r.Name, result.Name) + + if result.HadChanges { + fmt.Printf(" %s\n", style.Bold.Render("⚠ Has uncommitted changes")) + } + + if result.Pulled { + fmt.Printf(" %s git pull\n", style.Dim.Render("✓")) + } else if result.PullError != "" { + fmt.Printf(" %s git pull: %s\n", style.Bold.Render("✗"), result.PullError) + } + + if result.Synced { + fmt.Printf(" %s bd sync\n", style.Dim.Render("✓")) + } else if result.SyncError != "" { + fmt.Printf(" %s bd sync: %s\n", style.Bold.Render("✗"), result.SyncError) + } + } + + return nil +} diff --git a/internal/cmd/daemon.go b/internal/cmd/daemon.go new file mode 100644 index 00000000..1bc71ac9 --- /dev/null +++ b/internal/cmd/daemon.go @@ -0,0 +1,233 @@ +package cmd + +import ( + "fmt" + "os" + "os/exec" + "path/filepath" + "time" + + "github.com/spf13/cobra" + "github.com/steveyegge/gastown/internal/daemon" + "github.com/steveyegge/gastown/internal/style" + "github.com/steveyegge/gastown/internal/workspace" +) + +var daemonCmd = &cobra.Command{ + Use: "daemon", + Short: "Manage the Gas Town daemon", + Long: `Manage the Gas Town background daemon. + +The daemon is a simple Go process that: +- Pokes agents periodically (heartbeat) +- Processes lifecycle requests (cycle, restart, shutdown) +- Restarts sessions when agents request cycling + +The daemon is a "dumb scheduler" - all intelligence is in agents.`, +} + +var daemonStartCmd = &cobra.Command{ + Use: "start", + Short: "Start the daemon", + Long: `Start the Gas Town daemon in the background. + +The daemon will run until stopped with 'gt daemon stop'.`, + RunE: runDaemonStart, +} + +var daemonStopCmd = &cobra.Command{ + Use: "stop", + Short: "Stop the daemon", + Long: `Stop the running Gas Town daemon.`, + RunE: runDaemonStop, +} + +var daemonStatusCmd = &cobra.Command{ + Use: "status", + Short: "Show daemon status", + Long: `Show the current status of the Gas Town daemon.`, + RunE: runDaemonStatus, +} + +var daemonLogsCmd = &cobra.Command{ + Use: "logs", + Short: "View daemon logs", + Long: `View the daemon log file.`, + RunE: runDaemonLogs, +} + +var daemonRunCmd = &cobra.Command{ + Use: "run", + Short: "Run daemon in foreground (internal)", + Hidden: true, + RunE: runDaemonRun, +} + +var ( + daemonLogLines int + daemonLogFollow bool +) + +func init() { + daemonCmd.AddCommand(daemonStartCmd) + daemonCmd.AddCommand(daemonStopCmd) + daemonCmd.AddCommand(daemonStatusCmd) + daemonCmd.AddCommand(daemonLogsCmd) + daemonCmd.AddCommand(daemonRunCmd) + + daemonLogsCmd.Flags().IntVarP(&daemonLogLines, "lines", "n", 50, "Number of lines to show") + daemonLogsCmd.Flags().BoolVarP(&daemonLogFollow, "follow", "f", false, "Follow log output") + + rootCmd.AddCommand(daemonCmd) +} + +func runDaemonStart(cmd *cobra.Command, args []string) error { + townRoot, err := workspace.FindFromCwdOrError() + if err != nil { + return fmt.Errorf("not in a Gas Town workspace: %w", err) + } + + // Check if already running + running, pid, err := daemon.IsRunning(townRoot) + if err != nil { + return fmt.Errorf("checking daemon status: %w", err) + } + if running { + return fmt.Errorf("daemon already running (PID %d)", pid) + } + + // Start daemon in background + // We use 'gt daemon run' as the actual daemon process + gtPath, err := os.Executable() + if err != nil { + return fmt.Errorf("finding executable: %w", err) + } + + daemonCmd := exec.Command(gtPath, "daemon", "run") + daemonCmd.Dir = townRoot + + // Detach from terminal + daemonCmd.Stdin = nil + daemonCmd.Stdout = nil + daemonCmd.Stderr = nil + + if err := daemonCmd.Start(); err != nil { + return fmt.Errorf("starting daemon: %w", err) + } + + // Wait a moment for the daemon to initialize + time.Sleep(200 * time.Millisecond) + + // Verify it started + running, pid, err = daemon.IsRunning(townRoot) + if err != nil { + return fmt.Errorf("checking daemon status: %w", err) + } + if !running { + return fmt.Errorf("daemon failed to start (check logs with 'gt daemon logs')") + } + + fmt.Printf("%s Daemon started (PID %d)\n", style.Bold.Render("✓"), pid) + return nil +} + +func runDaemonStop(cmd *cobra.Command, args []string) error { + townRoot, err := workspace.FindFromCwdOrError() + if err != nil { + return fmt.Errorf("not in a Gas Town workspace: %w", err) + } + + running, pid, err := daemon.IsRunning(townRoot) + if err != nil { + return fmt.Errorf("checking daemon status: %w", err) + } + if !running { + return fmt.Errorf("daemon is not running") + } + + if err := daemon.StopDaemon(townRoot); err != nil { + return fmt.Errorf("stopping daemon: %w", err) + } + + fmt.Printf("%s Daemon stopped (was PID %d)\n", style.Bold.Render("✓"), pid) + return nil +} + +func runDaemonStatus(cmd *cobra.Command, args []string) error { + townRoot, err := workspace.FindFromCwdOrError() + if err != nil { + return fmt.Errorf("not in a Gas Town workspace: %w", err) + } + + running, pid, err := daemon.IsRunning(townRoot) + if err != nil { + return fmt.Errorf("checking daemon status: %w", err) + } + + if running { + fmt.Printf("%s Daemon is %s (PID %d)\n", + style.Bold.Render("●"), + style.Bold.Render("running"), + pid) + + // Load state for more details + state, err := daemon.LoadState(townRoot) + if err == nil && !state.StartedAt.IsZero() { + fmt.Printf(" Started: %s\n", state.StartedAt.Format("2006-01-02 15:04:05")) + if !state.LastHeartbeat.IsZero() { + fmt.Printf(" Last heartbeat: %s (#%d)\n", + state.LastHeartbeat.Format("15:04:05"), + state.HeartbeatCount) + } + } + } else { + fmt.Printf("%s Daemon is %s\n", + style.Dim.Render("○"), + "not running") + fmt.Printf("\nStart with: %s\n", style.Dim.Render("gt daemon start")) + } + + return nil +} + +func runDaemonLogs(cmd *cobra.Command, args []string) error { + townRoot, err := workspace.FindFromCwdOrError() + if err != nil { + return fmt.Errorf("not in a Gas Town workspace: %w", err) + } + + logFile := filepath.Join(townRoot, "daemon", "daemon.log") + + if _, err := os.Stat(logFile); os.IsNotExist(err) { + return fmt.Errorf("no log file found at %s", logFile) + } + + if daemonLogFollow { + // Use tail -f for following + tailCmd := exec.Command("tail", "-f", logFile) + tailCmd.Stdout = os.Stdout + tailCmd.Stderr = os.Stderr + return tailCmd.Run() + } + + // Use tail -n for last N lines + tailCmd := exec.Command("tail", "-n", fmt.Sprintf("%d", daemonLogLines), logFile) + tailCmd.Stdout = os.Stdout + tailCmd.Stderr = os.Stderr + return tailCmd.Run() +} + +func runDaemonRun(cmd *cobra.Command, args []string) error { + townRoot, err := workspace.FindFromCwdOrError() + if err != nil { + return fmt.Errorf("not in a Gas Town workspace: %w", err) + } + + config := daemon.DefaultConfig(townRoot) + d, err := daemon.New(config) + if err != nil { + return fmt.Errorf("creating daemon: %w", err) + } + + return d.Run() +} diff --git a/internal/cmd/gitinit.go b/internal/cmd/gitinit.go new file mode 100644 index 00000000..cad11a57 --- /dev/null +++ b/internal/cmd/gitinit.go @@ -0,0 +1,274 @@ +package cmd + +import ( + "fmt" + "os" + "os/exec" + "path/filepath" + "strings" + + "github.com/spf13/cobra" + "github.com/steveyegge/gastown/internal/style" + "github.com/steveyegge/gastown/internal/workspace" +) + +var ( + gitInitGitHub string + gitInitPrivate bool +) + +var gitInitCmd = &cobra.Command{ + Use: "git-init", + Short: "Initialize git repository for a Gas Town harness", + Long: `Initialize or configure git for an existing Gas Town harness. + +This command: + 1. Creates a comprehensive .gitignore for Gas Town + 2. Initializes a git repository if not already present + 3. Optionally creates a GitHub repository + +The .gitignore excludes: + - Polecats and rig clones (recreated with 'gt spawn' or 'gt rig add') + - Runtime state files (state.json, *.lock) + - OS and editor files + +And tracks: + - CLAUDE.md and role contexts + - .beads/ configuration and issues + - Rig configs and hop/ directory + +Examples: + gt git-init # Init git with .gitignore + gt git-init --github=user/repo # Also create public GitHub repo + gt git-init --github=user/repo --private # Create private GitHub repo`, + RunE: runGitInit, +} + +func init() { + gitInitCmd.Flags().StringVar(&gitInitGitHub, "github", "", "Create GitHub repo (format: owner/repo)") + gitInitCmd.Flags().BoolVar(&gitInitPrivate, "private", false, "Make GitHub repo private") + rootCmd.AddCommand(gitInitCmd) +} + +// HarnessGitignore is the standard .gitignore for Gas Town harnesses +const HarnessGitignore = `# Gas Town Harness .gitignore +# Track: Role context, handoff docs, beads config/data, rig configs +# Ignore: Git clones (polecats, mayor/refinery rigs), runtime state + +# ============================================================================= +# Runtime state files (ephemeral) +# ============================================================================= +**/state.json +**/*.lock +**/registry.json + +# ============================================================================= +# Rig git clones (recreate with 'gt spawn' or 'gt rig add') +# ============================================================================= + +# Polecats - worker clones +**/polecats/ + +# Mayor rig clones +**/mayor/rig/ + +# Refinery working clones +**/refinery/rig/ + +# Crew workspaces (user-managed) +**/crew/ + +# ============================================================================= +# Rig runtime state directories +# ============================================================================= +**/.gastown/ + +# ============================================================================= +# Rig .beads symlinks (point to ignored mayor/rig/.beads, recreated on setup) +# ============================================================================= +# Add rig-specific symlinks here, e.g.: +# gastown/.beads + +# ============================================================================= +# Rigs directory (clones created by 'gt rig add') +# ============================================================================= +/rigs/*/ + +# ============================================================================= +# OS and editor files +# ============================================================================= +.DS_Store +*~ +*.swp +*.swo +.vscode/ +.idea/ + +# ============================================================================= +# Explicitly track (override above patterns) +# ============================================================================= +# Note: .beads/ has its own .gitignore that handles SQLite files +# and keeps issues.jsonl, metadata.json, config.yaml as source of truth +` + +func runGitInit(cmd *cobra.Command, args []string) error { + // Find the harness root + cwd, err := os.Getwd() + if err != nil { + return fmt.Errorf("getting current directory: %w", err) + } + + harnessRoot, err := workspace.Find(cwd) + if err != nil || harnessRoot == "" { + return fmt.Errorf("not inside a Gas Town harness (run 'gt install' first)") + } + + fmt.Printf("%s Initializing git for harness at %s\n\n", + style.Bold.Render("🔧"), style.Dim.Render(harnessRoot)) + + // Create .gitignore + gitignorePath := filepath.Join(harnessRoot, ".gitignore") + if err := createGitignore(gitignorePath); err != nil { + return err + } + + // Initialize git if needed + gitDir := filepath.Join(harnessRoot, ".git") + if _, err := os.Stat(gitDir); os.IsNotExist(err) { + if err := initGitRepo(harnessRoot); err != nil { + return err + } + } else { + fmt.Printf(" ✓ Git repository already exists\n") + } + + // Create GitHub repo if requested + if gitInitGitHub != "" { + if err := createGitHubRepo(harnessRoot, gitInitGitHub, gitInitPrivate); err != nil { + return err + } + } + + fmt.Printf("\n%s Git initialization complete!\n", style.Bold.Render("✓")) + + // Show next steps if no GitHub was created + if gitInitGitHub == "" { + fmt.Println() + fmt.Println("Next steps:") + fmt.Printf(" 1. Create initial commit: %s\n", + style.Dim.Render("git add . && git commit -m 'Initial Gas Town harness'")) + fmt.Printf(" 2. Create remote repo: %s\n", + style.Dim.Render("gt git-init --github=user/repo")) + } + + return nil +} + +func createGitignore(path string) error { + // Check if .gitignore already exists + if _, err := os.Stat(path); err == nil { + // Read existing content + content, err := os.ReadFile(path) + if err != nil { + return fmt.Errorf("reading existing .gitignore: %w", err) + } + + // Check if it already has Gas Town section + if strings.Contains(string(content), "Gas Town Harness") { + fmt.Printf(" ✓ .gitignore already configured for Gas Town\n") + return nil + } + + // Append to existing + combined := string(content) + "\n" + HarnessGitignore + if err := os.WriteFile(path, []byte(combined), 0644); err != nil { + return fmt.Errorf("updating .gitignore: %w", err) + } + fmt.Printf(" ✓ Updated .gitignore with Gas Town patterns\n") + return nil + } + + // Create new .gitignore + if err := os.WriteFile(path, []byte(HarnessGitignore), 0644); err != nil { + return fmt.Errorf("creating .gitignore: %w", err) + } + fmt.Printf(" ✓ Created .gitignore\n") + return nil +} + +func initGitRepo(path string) error { + cmd := exec.Command("git", "init") + cmd.Dir = path + cmd.Stdout = os.Stdout + cmd.Stderr = os.Stderr + + if err := cmd.Run(); err != nil { + return fmt.Errorf("git init failed: %w", err) + } + fmt.Printf(" ✓ Initialized git repository\n") + return nil +} + +func createGitHubRepo(harnessRoot, repo string, private bool) error { + // Check if gh CLI is available + if _, err := exec.LookPath("gh"); err != nil { + return fmt.Errorf("GitHub CLI (gh) not found. Install it with: brew install gh") + } + + // Parse owner/repo format + parts := strings.Split(repo, "/") + if len(parts) != 2 { + return fmt.Errorf("invalid GitHub repo format (expected owner/repo): %s", repo) + } + + fmt.Printf(" → Creating GitHub repository %s...\n", repo) + + // Build gh repo create command + args := []string{"repo", "create", repo, "--source", harnessRoot} + if private { + args = append(args, "--private") + } else { + args = append(args, "--public") + } + args = append(args, "--push") + + cmd := exec.Command("gh", args...) + cmd.Dir = harnessRoot + cmd.Stdout = os.Stdout + cmd.Stderr = os.Stderr + + if err := cmd.Run(); err != nil { + return fmt.Errorf("gh repo create failed: %w", err) + } + fmt.Printf(" ✓ Created and pushed to GitHub: %s\n", repo) + return nil +} + +// InitGitForHarness is the shared implementation for git initialization. +// It can be called from both 'gt git-init' and 'gt install --git'. +func InitGitForHarness(harnessRoot string, github string, private bool) error { + // Create .gitignore + gitignorePath := filepath.Join(harnessRoot, ".gitignore") + if err := createGitignore(gitignorePath); err != nil { + return err + } + + // Initialize git if needed + gitDir := filepath.Join(harnessRoot, ".git") + if _, err := os.Stat(gitDir); os.IsNotExist(err) { + if err := initGitRepo(harnessRoot); err != nil { + return err + } + } else { + fmt.Printf(" ✓ Git repository already exists\n") + } + + // Create GitHub repo if requested + if github != "" { + if err := createGitHubRepo(harnessRoot, github, private); err != nil { + return err + } + } + + return nil +} diff --git a/internal/cmd/handoff.go b/internal/cmd/handoff.go new file mode 100644 index 00000000..eceae3a3 --- /dev/null +++ b/internal/cmd/handoff.go @@ -0,0 +1,337 @@ +package cmd + +import ( + "encoding/json" + "fmt" + "os" + "os/exec" + "path/filepath" + "strings" + "time" + + "github.com/spf13/cobra" + "github.com/steveyegge/gastown/internal/beads" + "github.com/steveyegge/gastown/internal/style" + "github.com/steveyegge/gastown/internal/workspace" +) + +// HandoffAction for handoff command. +type HandoffAction string + +const ( + HandoffCycle HandoffAction = "cycle" // Restart with handoff mail + HandoffRestart HandoffAction = "restart" // Fresh restart, no handoff + HandoffShutdown HandoffAction = "shutdown" // Terminate, no restart +) + +var handoffCmd = &cobra.Command{ + Use: "handoff", + Short: "Request lifecycle action (retirement/restart)", + Long: `Request a lifecycle action from your manager. + +This command initiates graceful retirement: +1. Verifies git state is clean +2. Sends handoff mail to yourself (for cycle) +3. Sends lifecycle request to your manager +4. Sets requesting state and waits for retirement + +Your manager (daemon for Mayor/Witness, witness for polecats) will +verify the request and terminate your session. For cycle/restart, +a new session starts and reads your handoff mail to continue work. + +Flags: + --cycle Restart with handoff mail (default for Mayor/Witness) + --restart Fresh restart, no handoff context + --shutdown Terminate without restart (default for polecats) + +Examples: + gt handoff # Use role-appropriate default + gt handoff --cycle # Restart with context handoff + gt handoff --restart # Fresh restart +`, + RunE: runHandoff, +} + +var ( + handoffCycle bool + handoffRestart bool + handoffShutdown bool + handoffForce bool + handoffMessage string +) + +func init() { + handoffCmd.Flags().BoolVar(&handoffCycle, "cycle", false, "Restart with handoff mail") + handoffCmd.Flags().BoolVar(&handoffRestart, "restart", false, "Fresh restart, no handoff") + handoffCmd.Flags().BoolVar(&handoffShutdown, "shutdown", false, "Terminate without restart") + handoffCmd.Flags().BoolVarP(&handoffForce, "force", "f", false, "Skip pre-flight checks") + handoffCmd.Flags().StringVarP(&handoffMessage, "message", "m", "", "Handoff message for successor") + + rootCmd.AddCommand(handoffCmd) +} + +func runHandoff(cmd *cobra.Command, args []string) error { + // Detect our role + role := detectHandoffRole() + if role == RoleUnknown { + return fmt.Errorf("cannot detect agent role (set GT_ROLE or run from known context)") + } + + // Determine action + action := determineAction(role) + + fmt.Printf("Agent role: %s\n", style.Bold.Render(string(role))) + fmt.Printf("Action: %s\n", style.Bold.Render(string(action))) + + // Find workspace + townRoot, err := workspace.FindFromCwdOrError() + if err != nil { + return fmt.Errorf("not in a Gas Town workspace: %w", err) + } + + // Pre-flight checks (unless forced) + if !handoffForce { + if err := preFlightChecks(); err != nil { + return fmt.Errorf("pre-flight check failed: %w\n\nUse --force to skip checks", err) + } + } + + // For cycle, update handoff bead for successor + if action == HandoffCycle { + if err := sendHandoffMail(role, townRoot); err != nil { + return fmt.Errorf("updating handoff bead: %w", err) + } + fmt.Printf("%s Updated handoff bead for successor\n", style.Bold.Render("✓")) + } + + // Send lifecycle request to manager + manager := getManager(role) + if err := sendLifecycleRequest(manager, role, action, townRoot); err != nil { + return fmt.Errorf("sending lifecycle request: %w", err) + } + fmt.Printf("%s Sent %s request to %s\n", style.Bold.Render("✓"), action, manager) + + // Set requesting state + if err := setRequestingState(role, action, townRoot); err != nil { + fmt.Printf("Warning: failed to set state: %v\n", err) + } + + // Wait for retirement + fmt.Println() + fmt.Printf("%s Waiting for retirement...\n", style.Dim.Render("◌")) + fmt.Println(style.Dim.Render("(Manager will terminate this session)")) + + // Block forever - manager will kill us + select {} +} + +// detectHandoffRole figures out what kind of agent we are. +// Uses GT_ROLE env var, tmux session name, or directory context. +func detectHandoffRole() Role { + // Check GT_ROLE environment variable first + if role := os.Getenv("GT_ROLE"); role != "" { + switch strings.ToLower(role) { + case "mayor": + return RoleMayor + case "witness": + return RoleWitness + case "refinery": + return RoleRefinery + case "polecat": + return RolePolecat + case "crew": + return RoleCrew + } + } + + // Check tmux session name + out, err := exec.Command("tmux", "display-message", "-p", "#{session_name}").Output() + if err == nil { + sessionName := strings.TrimSpace(string(out)) + if sessionName == "gt-mayor" { + return RoleMayor + } + if strings.HasSuffix(sessionName, "-witness") { + return RoleWitness + } + if strings.HasSuffix(sessionName, "-refinery") { + return RoleRefinery + } + // Polecat sessions: gt-- + if strings.HasPrefix(sessionName, "gt-") && strings.Count(sessionName, "-") >= 2 { + return RolePolecat + } + } + + // Fall back to directory-based detection + cwd, err := os.Getwd() + if err != nil { + return RoleUnknown + } + + townRoot, err := workspace.FindFromCwd() + if err != nil || townRoot == "" { + return RoleUnknown + } + + ctx := detectRole(cwd, townRoot) + return ctx.Role +} + +// determineAction picks the action based on flags or role default. +func determineAction(role Role) HandoffAction { + // Explicit flags take precedence + if handoffCycle { + return HandoffCycle + } + if handoffRestart { + return HandoffRestart + } + if handoffShutdown { + return HandoffShutdown + } + + // Role-based defaults + switch role { + case RolePolecat: + return HandoffShutdown // Ephemeral, work is done + case RoleMayor, RoleWitness, RoleRefinery: + return HandoffCycle // Long-running, preserve context + case RoleCrew: + return HandoffCycle // Will only send mail, not actually retire + default: + return HandoffCycle + } +} + +// preFlightChecks verifies it's safe to retire. +func preFlightChecks() error { + // Check git status + cmd := exec.Command("git", "status", "--porcelain") + out, err := cmd.Output() + if err != nil { + // Not a git repo, that's fine + return nil + } + + if len(strings.TrimSpace(string(out))) > 0 { + return fmt.Errorf("uncommitted changes in git working tree") + } + + return nil +} + +// getManager returns the address of our lifecycle manager. +func getManager(role Role) string { + switch role { + case RoleMayor, RoleWitness: + return "daemon/" + case RolePolecat, RoleRefinery: + // Would need rig context to determine witness address + // For now, use a placeholder pattern + return "/witness" + case RoleCrew: + return "human" // Crew is human-managed + default: + return "daemon/" + } +} + +// sendHandoffMail updates the pinned handoff bead for the successor to read. +func sendHandoffMail(role Role, townRoot string) error { + // Build handoff content + content := handoffMessage + if content == "" { + content = fmt.Sprintf(`🤝 HANDOFF: Session cycling + +Time: %s +Role: %s +Action: cycle + +Check bd ready for pending work. +Check gt mail inbox for messages received during transition. +`, time.Now().Format(time.RFC3339), role) + } + + // Determine the handoff role key + // For role-specific handoffs, use the role name + roleKey := string(role) + + // Update the pinned handoff bead + bd := beads.New(townRoot) + if err := bd.UpdateHandoffContent(roleKey, content); err != nil { + return fmt.Errorf("updating handoff bead: %w", err) + } + + return nil +} + +// sendLifecycleRequest sends the lifecycle request to our manager. +func sendLifecycleRequest(manager string, role Role, action HandoffAction, townRoot string) error { + if manager == "human" { + // Crew is human-managed, just print a message + fmt.Println(style.Dim.Render("(Crew sessions are human-managed, no lifecycle request sent)")) + return nil + } + + subject := fmt.Sprintf("LIFECYCLE: %s requesting %s", role, action) + body := fmt.Sprintf(`Lifecycle request from %s. + +Action: %s +Time: %s + +Please verify state and execute lifecycle action. +`, role, action, time.Now().Format(time.RFC3339)) + + // Send via bd mail (syntax: bd mail send -s -m ) + cmd := exec.Command("bd", "mail", "send", manager, + "-s", subject, + "-m", body, + ) + cmd.Dir = townRoot + + if out, err := cmd.CombinedOutput(); err != nil { + return fmt.Errorf("%w: %s", err, string(out)) + } + + return nil +} + +// setRequestingState updates state.json to indicate we're requesting lifecycle action. +func setRequestingState(role Role, action HandoffAction, townRoot string) error { + // Determine state file location based on role + var stateFile string + switch role { + case RoleMayor: + stateFile = filepath.Join(townRoot, "mayor", "state.json") + case RoleWitness: + // Would need rig context + stateFile = filepath.Join(townRoot, "witness", "state.json") + default: + // For other roles, use a generic location + stateFile = filepath.Join(townRoot, ".gastown", "agent-state.json") + } + + // Ensure directory exists + if err := os.MkdirAll(filepath.Dir(stateFile), 0755); err != nil { + return err + } + + // Read existing state or create new + state := make(map[string]interface{}) + if data, err := os.ReadFile(stateFile); err == nil { + _ = json.Unmarshal(data, &state) + } + + // Set requesting state + state["requesting_"+string(action)] = true + state["requesting_time"] = time.Now().Format(time.RFC3339) + + // Write back + data, err := json.MarshalIndent(state, "", " ") + if err != nil { + return err + } + + return os.WriteFile(stateFile, data, 0644) +} diff --git a/internal/cmd/init.go b/internal/cmd/init.go index eb33ff86..02d48970 100644 --- a/internal/cmd/init.go +++ b/internal/cmd/init.go @@ -64,7 +64,7 @@ func runInit(cmd *cobra.Command, args []string) error { // Create .gitkeep to ensure directory is tracked if needed gitkeep := filepath.Join(dirPath, ".gitkeep") if _, err := os.Stat(gitkeep); os.IsNotExist(err) { - os.WriteFile(gitkeep, []byte(""), 0644) + _ = os.WriteFile(gitkeep, []byte(""), 0644) } fmt.Printf(" ✓ Created %s/\n", dir) diff --git a/internal/cmd/install.go b/internal/cmd/install.go index a46f0e23..9df43d16 100644 --- a/internal/cmd/install.go +++ b/internal/cmd/install.go @@ -4,10 +4,13 @@ import ( "encoding/json" "fmt" "os" + "os/exec" "path/filepath" + "strings" "time" "github.com/spf13/cobra" + "github.com/steveyegge/gastown/internal/beads" "github.com/steveyegge/gastown/internal/config" "github.com/steveyegge/gastown/internal/style" "github.com/steveyegge/gastown/internal/templates" @@ -18,6 +21,9 @@ var ( installForce bool installName string installNoBeads bool + installGit bool + installGitHub string + installPrivate bool ) var installCmd = &cobra.Command{ @@ -25,18 +31,24 @@ var installCmd = &cobra.Command{ Short: "Create a new Gas Town harness (workspace)", Long: `Create a new Gas Town harness at the specified path. -A harness is the top-level directory where Gas Town is installed. It contains: +A harness is the top-level directory where Gas Town is installed - the root of +your workspace where all rigs and agents live. It contains: - CLAUDE.md Mayor role context (Mayor runs from harness root) - - mayor/ Mayor config, state, and mail - - rigs/ Managed rig clones (created by 'gt rig add') - - .beads/redirect (optional) Default beads location + - mayor/ Mayor config, state, and rig registry + - rigs/ Managed rig containers (created by 'gt rig add') + - .beads/ Town-level beads DB (gm-* prefix for mayor mail) If path is omitted, uses the current directory. +See docs/harness.md for advanced harness configurations including beads +redirects, multi-system setups, and harness templates. + Examples: - gt install ~/gt # Create harness at ~/gt - gt install . --name my-workspace # Initialize current dir - gt install ~/gt --no-beads # Skip .beads/redirect setup`, + gt install ~/gt # Create harness at ~/gt + gt install . --name my-workspace # Initialize current dir + gt install ~/gt --no-beads # Skip .beads/ initialization + gt install ~/gt --git # Also init git with .gitignore + gt install ~/gt --github=user/repo # Also create GitHub repo`, Args: cobra.MaximumNArgs(1), RunE: runInstall, } @@ -44,7 +56,10 @@ Examples: func init() { installCmd.Flags().BoolVarP(&installForce, "force", "f", false, "Overwrite existing harness") installCmd.Flags().StringVarP(&installName, "name", "n", "", "Town name (defaults to directory name)") - installCmd.Flags().BoolVar(&installNoBeads, "no-beads", false, "Skip .beads/redirect setup") + installCmd.Flags().BoolVar(&installNoBeads, "no-beads", false, "Skip town beads initialization") + installCmd.Flags().BoolVar(&installGit, "git", false, "Initialize git with .gitignore") + installCmd.Flags().StringVar(&installGitHub, "github", "", "Create GitHub repo (format: owner/repo)") + installCmd.Flags().BoolVar(&installPrivate, "private", false, "Make GitHub repo private (use with --github)") rootCmd.AddCommand(installCmd) } @@ -132,19 +147,6 @@ func runInstall(cmd *cobra.Command, args []string) error { } fmt.Printf(" ✓ Created rigs/\n") - // Create mayor mail directory - mailDir := filepath.Join(mayorDir, "mail") - if err := os.MkdirAll(mailDir, 0755); err != nil { - return fmt.Errorf("creating mail directory: %w", err) - } - - // Create empty inbox - inboxPath := filepath.Join(mailDir, "inbox.jsonl") - if err := os.WriteFile(inboxPath, []byte{}, 0644); err != nil { - return fmt.Errorf("creating inbox: %w", err) - } - fmt.Printf(" ✓ Created mayor/mail/inbox.jsonl\n") - // Create mayor state.json mayorState := &config.AgentState{ Role: "mayor", @@ -163,29 +165,43 @@ func runInstall(cmd *cobra.Command, args []string) error { fmt.Printf(" ✓ Created CLAUDE.md\n") } - // Create .beads directory with redirect (optional) + // Initialize town-level beads database (optional) + // Town beads (gm- prefix) stores mayor mail, cross-rig coordination, and handoffs. + // Rig beads are separate and have their own prefixes. if !installNoBeads { - beadsDir := filepath.Join(absPath, ".beads") - if err := os.MkdirAll(beadsDir, 0755); err != nil { - fmt.Printf(" %s Could not create .beads/: %v\n", style.Dim.Render("⚠"), err) + if err := initTownBeads(absPath); err != nil { + fmt.Printf(" %s Could not initialize town beads: %v\n", style.Dim.Render("⚠"), err) } else { - // Create redirect file with placeholder - redirectPath := filepath.Join(beadsDir, "redirect") - redirectContent := "# Redirect to your main rig's beads\n# Example: gastown/.beads\n" - if err := os.WriteFile(redirectPath, []byte(redirectContent), 0644); err != nil { - fmt.Printf(" %s Could not create redirect: %v\n", style.Dim.Render("⚠"), err) + fmt.Printf(" ✓ Initialized .beads/ (town-level beads with gm- prefix)\n") + + // Seed built-in molecules + if err := seedBuiltinMolecules(absPath); err != nil { + fmt.Printf(" %s Could not seed built-in molecules: %v\n", style.Dim.Render("⚠"), err) } else { - fmt.Printf(" ✓ Created .beads/redirect (configure for your main rig)\n") + fmt.Printf(" ✓ Seeded built-in molecules\n") } } } + // Initialize git if requested (--git or --github implies --git) + if installGit || installGitHub != "" { + fmt.Println() + if err := InitGitForHarness(absPath, installGitHub, installPrivate); err != nil { + return fmt.Errorf("git initialization failed: %w", err) + } + } + fmt.Printf("\n%s Harness created successfully!\n", style.Bold.Render("✓")) fmt.Println() fmt.Println("Next steps:") - fmt.Printf(" 1. Add a rig: %s\n", style.Dim.Render("gt rig add ")) - fmt.Printf(" 2. Configure beads redirect: %s\n", style.Dim.Render("edit .beads/redirect")) - fmt.Printf(" 3. Start the Mayor: %s\n", style.Dim.Render("cd "+absPath+" && gt prime")) + step := 1 + if !installGit && installGitHub == "" { + fmt.Printf(" %d. Initialize git: %s\n", step, style.Dim.Render("gt git-init")) + step++ + } + fmt.Printf(" %d. Add a rig: %s\n", step, style.Dim.Render("gt rig add ")) + step++ + fmt.Printf(" %d. Start the Mayor: %s\n", step, style.Dim.Render("cd "+absPath+" && gt prime")) return nil } @@ -218,3 +234,29 @@ func writeJSON(path string, data interface{}) error { } return os.WriteFile(path, content, 0644) } + +// initTownBeads initializes town-level beads database using bd init. +// Town beads use the "gm-" prefix for mayor mail and cross-rig coordination. +func initTownBeads(townPath string) error { + // Run: bd init --prefix gm + cmd := exec.Command("bd", "init", "--prefix", "gm") + cmd.Dir = townPath + + output, err := cmd.CombinedOutput() + if err != nil { + // Check if beads is already initialized + if strings.Contains(string(output), "already initialized") { + return nil // Already initialized is fine + } + return fmt.Errorf("bd init failed: %s", strings.TrimSpace(string(output))) + } + return nil +} + +// seedBuiltinMolecules creates built-in molecule definitions in the beads database. +// These molecules provide standard workflows like engineer-in-box, quick-fix, and research. +func seedBuiltinMolecules(townPath string) error { + b := beads.New(townPath) + _, err := b.SeedBuiltinMolecules() + return err +} diff --git a/internal/cmd/issue.go b/internal/cmd/issue.go new file mode 100644 index 00000000..e346fdc1 --- /dev/null +++ b/internal/cmd/issue.go @@ -0,0 +1,128 @@ +package cmd + +import ( + "fmt" + "os" + + "github.com/spf13/cobra" + "github.com/steveyegge/gastown/internal/tmux" +) + +var issueCmd = &cobra.Command{ + Use: "issue", + Short: "Manage current issue for status line display", +} + +var issueSetCmd = &cobra.Command{ + Use: "set ", + Short: "Set the current issue (shown in tmux status line)", + Args: cobra.ExactArgs(1), + RunE: runIssueSet, +} + +var issueClearCmd = &cobra.Command{ + Use: "clear", + Short: "Clear the current issue from status line", + RunE: runIssueClear, +} + +var issueShowCmd = &cobra.Command{ + Use: "show", + Short: "Show the current issue", + RunE: runIssueShow, +} + +func init() { + rootCmd.AddCommand(issueCmd) + issueCmd.AddCommand(issueSetCmd) + issueCmd.AddCommand(issueClearCmd) + issueCmd.AddCommand(issueShowCmd) +} + +func runIssueSet(cmd *cobra.Command, args []string) error { + issueID := args[0] + + // Get current tmux session + session := os.Getenv("TMUX_PANE") + if session == "" { + // Try to detect from GT env vars + session = detectCurrentSession() + if session == "" { + return fmt.Errorf("not in a tmux session") + } + } + + t := tmux.NewTmux() + if err := t.SetEnvironment(session, "GT_ISSUE", issueID); err != nil { + return fmt.Errorf("setting issue: %w", err) + } + + fmt.Printf("Issue set to: %s\n", issueID) + return nil +} + +func runIssueClear(cmd *cobra.Command, args []string) error { + session := os.Getenv("TMUX_PANE") + if session == "" { + session = detectCurrentSession() + if session == "" { + return fmt.Errorf("not in a tmux session") + } + } + + t := tmux.NewTmux() + // Set to empty string to clear + if err := t.SetEnvironment(session, "GT_ISSUE", ""); err != nil { + return fmt.Errorf("clearing issue: %w", err) + } + + fmt.Println("Issue cleared") + return nil +} + +func runIssueShow(cmd *cobra.Command, args []string) error { + session := os.Getenv("TMUX_PANE") + if session == "" { + session = detectCurrentSession() + if session == "" { + return fmt.Errorf("not in a tmux session") + } + } + + t := tmux.NewTmux() + issue, err := t.GetEnvironment(session, "GT_ISSUE") + if err != nil { + return fmt.Errorf("getting issue: %w", err) + } + + if issue == "" { + fmt.Println("No issue set") + } else { + fmt.Printf("Current issue: %s\n", issue) + } + return nil +} + +// detectCurrentSession tries to find the tmux session name from env. +func detectCurrentSession() string { + // Try to build session name from GT env vars + rig := os.Getenv("GT_RIG") + polecat := os.Getenv("GT_POLECAT") + crew := os.Getenv("GT_CREW") + + if rig != "" { + if polecat != "" { + return fmt.Sprintf("gt-%s-%s", rig, polecat) + } + if crew != "" { + return fmt.Sprintf("gt-%s-crew-%s", rig, crew) + } + } + + // Check if we're mayor + if os.Getenv("GT_ROLE") == "mayor" { + return "gt-mayor" + } + + return "" +} diff --git a/internal/cmd/mail.go b/internal/cmd/mail.go index 858934ca..e8e9b487 100644 --- a/internal/cmd/mail.go +++ b/internal/cmd/mail.go @@ -1,6 +1,8 @@ package cmd import ( + "crypto/rand" + "encoding/hex" "encoding/json" "fmt" "os" @@ -15,15 +17,20 @@ import ( // Mail command flags var ( - mailSubject string - mailBody string - mailPriority string - mailNotify bool - mailInboxJSON bool - mailReadJSON bool - mailInboxUnread bool - mailCheckInject bool - mailCheckJSON bool + mailSubject string + mailBody string + mailPriority string + mailType string + mailReplyTo string + mailNotify bool + mailInboxJSON bool + mailReadJSON bool + mailInboxUnread bool + mailCheckInject bool + mailCheckJSON bool + mailThreadJSON bool + mailReplySubject string + mailReplyMessage string ) var mailCmd = &cobra.Command{ @@ -46,10 +53,21 @@ Addresses: / - Send to a specific polecat / - Broadcast to a rig +Message types: + task - Required processing + scavenge - Optional first-come work + notification - Informational (default) + reply - Response to message + +Priority levels: + low, normal (default), high, urgent + Examples: gt mail send gastown/Toast -s "Status check" -m "How's that bug fix going?" gt mail send mayor/ -s "Work complete" -m "Finished gt-abc" - gt mail send gastown/ -s "All hands" -m "Swarm starting" --notify`, + gt mail send gastown/ -s "All hands" -m "Swarm starting" --notify + gt mail send gastown/Toast -s "Task" -m "Fix bug" --type task --priority high + gt mail send mayor/ -s "Re: Status" -m "Done" --reply-to msg-abc123`, Args: cobra.ExactArgs(1), RunE: runMailSend, } @@ -108,13 +126,45 @@ Examples: RunE: runMailCheck, } +var mailThreadCmd = &cobra.Command{ + Use: "thread ", + Short: "View a message thread", + Long: `View all messages in a conversation thread. + +Shows messages in chronological order (oldest first). + +Examples: + gt mail thread thread-abc123`, + Args: cobra.ExactArgs(1), + RunE: runMailThread, +} + +var mailReplyCmd = &cobra.Command{ + Use: "reply ", + Short: "Reply to a message", + Long: `Reply to a specific message. + +This is a convenience command that automatically: +- Sets the reply-to field to the original message +- Prefixes the subject with "Re: " (if not already present) +- Sends to the original sender + +Examples: + gt mail reply msg-abc123 -m "Thanks, working on it now" + gt mail reply msg-abc123 -s "Custom subject" -m "Reply body"`, + Args: cobra.ExactArgs(1), + RunE: runMailReply, +} + func init() { // Send flags mailSendCmd.Flags().StringVarP(&mailSubject, "subject", "s", "", "Message subject (required)") mailSendCmd.Flags().StringVarP(&mailBody, "message", "m", "", "Message body") - mailSendCmd.Flags().StringVar(&mailPriority, "priority", "normal", "Message priority (normal, high)") + mailSendCmd.Flags().StringVar(&mailPriority, "priority", "normal", "Message priority (low, normal, high, urgent)") + mailSendCmd.Flags().StringVar(&mailType, "type", "notification", "Message type (task, scavenge, notification, reply)") + mailSendCmd.Flags().StringVar(&mailReplyTo, "reply-to", "", "Message ID this is replying to") mailSendCmd.Flags().BoolVarP(&mailNotify, "notify", "n", false, "Send tmux notification to recipient") - mailSendCmd.MarkFlagRequired("subject") + _ = mailSendCmd.MarkFlagRequired("subject") // Inbox flags mailInboxCmd.Flags().BoolVar(&mailInboxJSON, "json", false, "Output as JSON") @@ -127,12 +177,22 @@ func init() { mailCheckCmd.Flags().BoolVar(&mailCheckInject, "inject", false, "Output format for Claude Code hooks") mailCheckCmd.Flags().BoolVar(&mailCheckJSON, "json", false, "Output as JSON") + // Thread flags + mailThreadCmd.Flags().BoolVar(&mailThreadJSON, "json", false, "Output as JSON") + + // Reply flags + mailReplyCmd.Flags().StringVarP(&mailReplySubject, "subject", "s", "", "Override reply subject (default: Re: )") + mailReplyCmd.Flags().StringVarP(&mailReplyMessage, "message", "m", "", "Reply message body (required)") + mailReplyCmd.MarkFlagRequired("message") + // Add subcommands mailCmd.AddCommand(mailSendCmd) mailCmd.AddCommand(mailInboxCmd) mailCmd.AddCommand(mailReadCmd) mailCmd.AddCommand(mailDeleteCmd) mailCmd.AddCommand(mailCheckCmd) + mailCmd.AddCommand(mailThreadCmd) + mailCmd.AddCommand(mailReplyCmd) rootCmd.AddCommand(mailCmd) } @@ -158,10 +218,36 @@ func runMailSend(cmd *cobra.Command, args []string) error { } // Set priority - if mailPriority == "high" || mailNotify { + msg.Priority = mail.ParsePriority(mailPriority) + if mailNotify && msg.Priority == mail.PriorityNormal { msg.Priority = mail.PriorityHigh } + // Set message type + msg.Type = mail.ParseMessageType(mailType) + + // Handle reply-to: auto-set type to reply and look up thread + if mailReplyTo != "" { + msg.ReplyTo = mailReplyTo + if msg.Type == mail.TypeNotification { + msg.Type = mail.TypeReply + } + + // Look up original message to get thread ID + router := mail.NewRouter(workDir) + mailbox, err := router.GetMailbox(from) + if err == nil { + if original, err := mailbox.Get(mailReplyTo); err == nil { + msg.ThreadID = original.ThreadID + } + } + } + + // Generate thread ID for new threads + if msg.ThreadID == "" { + msg.ThreadID = generateThreadID() + } + // Send via router router := mail.NewRouter(workDir) if err := router.Send(msg); err != nil { @@ -170,6 +256,9 @@ func runMailSend(cmd *cobra.Command, args []string) error { fmt.Printf("%s Message sent to %s\n", style.Bold.Render("✓"), to) fmt.Printf(" Subject: %s\n", mailSubject) + if msg.Type != mail.TypeNotification { + fmt.Printf(" Type: %s\n", msg.Type) + } return nil } @@ -229,12 +318,16 @@ func runMailInbox(cmd *cobra.Command, args []string) error { if msg.Read { readMarker = "○" } + typeMarker := "" + if msg.Type != "" && msg.Type != mail.TypeNotification { + typeMarker = fmt.Sprintf(" [%s]", msg.Type) + } priorityMarker := "" - if msg.Priority == mail.PriorityHigh { + if msg.Priority == mail.PriorityHigh || msg.Priority == mail.PriorityUrgent { priorityMarker = " " + style.Bold.Render("!") } - fmt.Printf(" %s %s%s\n", readMarker, msg.Subject, priorityMarker) + fmt.Printf(" %s %s%s%s\n", readMarker, msg.Subject, typeMarker, priorityMarker) fmt.Printf(" %s from %s\n", style.Dim.Render(msg.ID), msg.From) @@ -270,7 +363,7 @@ func runMailRead(cmd *cobra.Command, args []string) error { } // Mark as read - mailbox.MarkRead(msgID) + _ = mailbox.MarkRead(msgID) // JSON output if mailReadJSON { @@ -281,16 +374,30 @@ func runMailRead(cmd *cobra.Command, args []string) error { // Human-readable output priorityStr := "" - if msg.Priority == mail.PriorityHigh { + if msg.Priority == mail.PriorityUrgent { + priorityStr = " " + style.Bold.Render("[URGENT]") + } else if msg.Priority == mail.PriorityHigh { priorityStr = " " + style.Bold.Render("[HIGH PRIORITY]") } - fmt.Printf("%s %s%s\n\n", style.Bold.Render("Subject:"), msg.Subject, priorityStr) + typeStr := "" + if msg.Type != "" && msg.Type != mail.TypeNotification { + typeStr = fmt.Sprintf(" [%s]", msg.Type) + } + + fmt.Printf("%s %s%s%s\n\n", style.Bold.Render("Subject:"), msg.Subject, typeStr, priorityStr) fmt.Printf("From: %s\n", msg.From) fmt.Printf("To: %s\n", msg.To) fmt.Printf("Date: %s\n", msg.Timestamp.Format("2006-01-02 15:04:05")) fmt.Printf("ID: %s\n", style.Dim.Render(msg.ID)) + if msg.ThreadID != "" { + fmt.Printf("Thread: %s\n", style.Dim.Render(msg.ThreadID)) + } + if msg.ReplyTo != "" { + fmt.Printf("Reply-To: %s\n", style.Dim.Render(msg.ReplyTo)) + } + if msg.Body != "" { fmt.Printf("\n%s\n", msg.Body) } @@ -386,6 +493,17 @@ func detectSender() string { } } + // If in a rig's crew directory, extract address + if strings.Contains(cwd, "/crew/") { + parts := strings.Split(cwd, "/crew/") + if len(parts) >= 2 { + rigPath := parts[0] + crewName := strings.Split(parts[1], "/")[0] + rigName := filepath.Base(rigPath) + return fmt.Sprintf("%s/%s", rigName, crewName) + } + } + // Default to mayor return "mayor/" } @@ -467,3 +585,143 @@ func runMailCheck(cmd *cobra.Command, args []string) error { } return nil } + +func runMailThread(cmd *cobra.Command, args []string) error { + threadID := args[0] + + // Find workspace + workDir, err := findBeadsWorkDir() + if err != nil { + return fmt.Errorf("not in a Gas Town workspace: %w", err) + } + + // Determine which inbox + address := detectSender() + + // Get mailbox and thread messages + router := mail.NewRouter(workDir) + mailbox, err := router.GetMailbox(address) + if err != nil { + return fmt.Errorf("getting mailbox: %w", err) + } + + messages, err := mailbox.ListByThread(threadID) + if err != nil { + return fmt.Errorf("getting thread: %w", err) + } + + // JSON output + if mailThreadJSON { + enc := json.NewEncoder(os.Stdout) + enc.SetIndent("", " ") + return enc.Encode(messages) + } + + // Human-readable output + fmt.Printf("%s Thread: %s (%d messages)\n\n", + style.Bold.Render("🧵"), threadID, len(messages)) + + if len(messages) == 0 { + fmt.Printf(" %s\n", style.Dim.Render("(no messages in thread)")) + return nil + } + + for i, msg := range messages { + typeMarker := "" + if msg.Type != "" && msg.Type != mail.TypeNotification { + typeMarker = fmt.Sprintf(" [%s]", msg.Type) + } + priorityMarker := "" + if msg.Priority == mail.PriorityHigh || msg.Priority == mail.PriorityUrgent { + priorityMarker = " " + style.Bold.Render("!") + } + + if i > 0 { + fmt.Printf(" %s\n", style.Dim.Render("│")) + } + fmt.Printf(" %s %s%s%s\n", style.Bold.Render("●"), msg.Subject, typeMarker, priorityMarker) + fmt.Printf(" %s from %s to %s\n", + style.Dim.Render(msg.ID), + msg.From, msg.To) + fmt.Printf(" %s\n", + style.Dim.Render(msg.Timestamp.Format("2006-01-02 15:04"))) + + if msg.Body != "" { + fmt.Printf(" %s\n", msg.Body) + } + } + + return nil +} + +func runMailReply(cmd *cobra.Command, args []string) error { + msgID := args[0] + + // Find workspace + workDir, err := findBeadsWorkDir() + if err != nil { + return fmt.Errorf("not in a Gas Town workspace: %w", err) + } + + // Determine current address + from := detectSender() + + // Get the original message + router := mail.NewRouter(workDir) + mailbox, err := router.GetMailbox(from) + if err != nil { + return fmt.Errorf("getting mailbox: %w", err) + } + + original, err := mailbox.Get(msgID) + if err != nil { + return fmt.Errorf("getting message: %w", err) + } + + // Build reply subject + subject := mailReplySubject + if subject == "" { + if strings.HasPrefix(original.Subject, "Re: ") { + subject = original.Subject + } else { + subject = "Re: " + original.Subject + } + } + + // Create reply message + reply := &mail.Message{ + From: from, + To: original.From, // Reply to sender + Subject: subject, + Body: mailReplyMessage, + Type: mail.TypeReply, + Priority: mail.PriorityNormal, + ReplyTo: msgID, + ThreadID: original.ThreadID, + } + + // If original has no thread ID, create one + if reply.ThreadID == "" { + reply.ThreadID = generateThreadID() + } + + // Send the reply + if err := router.Send(reply); err != nil { + return fmt.Errorf("sending reply: %w", err) + } + + fmt.Printf("%s Reply sent to %s\n", style.Bold.Render("✓"), original.From) + fmt.Printf(" Subject: %s\n", subject) + if original.ThreadID != "" { + fmt.Printf(" Thread: %s\n", style.Dim.Render(original.ThreadID)) + } + + return nil +} + +// generateThreadID creates a random thread ID for new message threads. +func generateThreadID() string { + b := make([]byte, 6) + _, _ = rand.Read(b) + return "thread-" + hex.EncodeToString(b) +} diff --git a/internal/cmd/mayor.go b/internal/cmd/mayor.go index bf85f9df..e842a9ea 100644 --- a/internal/cmd/mayor.go +++ b/internal/cmd/mayor.go @@ -17,8 +17,9 @@ import ( const MayorSessionName = "gt-mayor" var mayorCmd = &cobra.Command{ - Use: "mayor", - Short: "Manage the Mayor session", + Use: "mayor", + Aliases: []string{"may"}, + Short: "Manage the Mayor session", Long: `Manage the Mayor tmux session. The Mayor is the global coordinator for Gas Town, running as a persistent @@ -82,12 +83,6 @@ func init() { } func runMayorStart(cmd *cobra.Command, args []string) error { - // Find workspace root - townRoot, err := workspace.FindFromCwdOrError() - if err != nil { - return fmt.Errorf("not in a Gas Town workspace: %w", err) - } - t := tmux.NewTmux() // Check if session already exists @@ -99,6 +94,25 @@ func runMayorStart(cmd *cobra.Command, args []string) error { return fmt.Errorf("Mayor session already running. Attach with: gt mayor attach") } + if err := startMayorSession(t); err != nil { + return err + } + + fmt.Printf("%s Mayor session started. Attach with: %s\n", + style.Bold.Render("✓"), + style.Dim.Render("gt mayor attach")) + + return nil +} + +// startMayorSession creates and initializes the Mayor tmux session. +func startMayorSession(t *tmux.Tmux) error { + // Find workspace root + townRoot, err := workspace.FindFromCwdOrError() + if err != nil { + return fmt.Errorf("not in a Gas Town workspace: %w", err) + } + // Create session in workspace root fmt.Println("Starting Mayor session...") if err := t.NewSession(MayorSessionName, townRoot); err != nil { @@ -106,18 +120,19 @@ func runMayorStart(cmd *cobra.Command, args []string) error { } // Set environment - t.SetEnvironment(MayorSessionName, "GT_ROLE", "mayor") + _ = t.SetEnvironment(MayorSessionName, "GT_ROLE", "mayor") - // Launch Claude with full permissions (Mayor is trusted) - command := "claude --dangerously-skip-permissions" - if err := t.SendKeys(MayorSessionName, command); err != nil { + // Apply Mayor theme + theme := tmux.MayorTheme() + _ = t.ConfigureGasTownSession(MayorSessionName, theme, "", "Mayor", "coordinator") + + // Launch Claude - the startup hook handles 'gt prime' automatically + // Use SendKeysDelayed to allow shell initialization after NewSession + claudeCmd := `claude --dangerously-skip-permissions` + if err := t.SendKeysDelayed(MayorSessionName, claudeCmd, 200); err != nil { return fmt.Errorf("sending command: %w", err) } - fmt.Printf("%s Mayor session started. Attach with: %s\n", - style.Bold.Render("✓"), - style.Dim.Render("gt mayor attach")) - return nil } @@ -136,7 +151,7 @@ func runMayorStop(cmd *cobra.Command, args []string) error { fmt.Println("Stopping Mayor session...") // Try graceful shutdown first - t.SendKeysRaw(MayorSessionName, "C-c") + _ = t.SendKeysRaw(MayorSessionName, "C-c") time.Sleep(100 * time.Millisecond) // Kill the session @@ -157,11 +172,14 @@ func runMayorAttach(cmd *cobra.Command, args []string) error { return fmt.Errorf("checking session: %w", err) } if !running { - return errors.New("Mayor session is not running. Start with: gt mayor start") + // Auto-start if not running + fmt.Println("Mayor session not running, starting...") + if err := startMayorSession(t); err != nil { + return err + } } // Use exec to replace current process with tmux attach - // This is the standard pattern for attaching to tmux sessions tmuxPath, err := exec.LookPath("tmux") if err != nil { return fmt.Errorf("tmux not found: %w", err) @@ -222,19 +240,19 @@ func runMayorStatus(cmd *cobra.Command, args []string) error { func runMayorRestart(cmd *cobra.Command, args []string) error { t := tmux.NewTmux() - // Stop if running running, err := t.HasSession(MayorSessionName) if err != nil { return fmt.Errorf("checking session: %w", err) } + if running { + // Stop the current session fmt.Println("Stopping Mayor session...") - t.SendKeysRaw(MayorSessionName, "C-c") + _ = t.SendKeysRaw(MayorSessionName, "C-c") time.Sleep(100 * time.Millisecond) if err := t.KillSession(MayorSessionName); err != nil { return fmt.Errorf("killing session: %w", err) } - fmt.Printf("%s Mayor session stopped.\n", style.Bold.Render("✓")) } // Start fresh diff --git a/internal/cmd/molecule.go b/internal/cmd/molecule.go new file mode 100644 index 00000000..3fc92f6e --- /dev/null +++ b/internal/cmd/molecule.go @@ -0,0 +1,495 @@ +package cmd + +import ( + "encoding/json" + "fmt" + "os" + "strings" + + "github.com/spf13/cobra" + "github.com/steveyegge/gastown/internal/beads" + "github.com/steveyegge/gastown/internal/style" +) + +// Molecule command flags +var ( + moleculeJSON bool + moleculeInstParent string + moleculeInstContext []string +) + +var moleculeCmd = &cobra.Command{ + Use: "molecule", + Short: "Molecule workflow commands", + Long: `Manage molecule workflow templates. + +Molecules are composable workflow patterns stored as beads issues. +When instantiated on a parent issue, they create child beads forming a DAG.`, +} + +var moleculeListCmd = &cobra.Command{ + Use: "list", + Short: "List molecules", + Long: `List all molecule definitions. + +Molecules are issues with type=molecule.`, + RunE: runMoleculeList, +} + +var moleculeShowCmd = &cobra.Command{ + Use: "show ", + Short: "Show molecule with parsed steps", + Long: `Show a molecule definition with its parsed steps. + +Displays the molecule's title, description structure, and all defined steps +with their dependencies.`, + Args: cobra.ExactArgs(1), + RunE: runMoleculeShow, +} + +var moleculeParseCmd = &cobra.Command{ + Use: "parse ", + Short: "Validate and show parsed structure", + Long: `Parse and validate a molecule definition. + +This command parses the molecule's step definitions and reports any errors. +Useful for debugging molecule definitions before instantiation.`, + Args: cobra.ExactArgs(1), + RunE: runMoleculeParse, +} + +var moleculeInstantiateCmd = &cobra.Command{ + Use: "instantiate ", + Short: "Create steps from molecule template", + Long: `Instantiate a molecule on a parent issue. + +Creates child issues for each step defined in the molecule, wiring up +dependencies according to the Needs: declarations. + +Template variables ({{variable}}) can be substituted using --context flags. + +Examples: + gt molecule instantiate mol-xyz --parent=gt-abc + gt molecule instantiate mol-xyz --parent=gt-abc --context feature=auth --context file=login.go`, + Args: cobra.ExactArgs(1), + RunE: runMoleculeInstantiate, +} + +var moleculeInstancesCmd = &cobra.Command{ + Use: "instances ", + Short: "Show all instantiations of a molecule", + Long: `Show all parent issues that have instantiated this molecule. + +Lists each instantiation with its status and progress.`, + Args: cobra.ExactArgs(1), + RunE: runMoleculeInstances, +} + +func init() { + // List flags + moleculeListCmd.Flags().BoolVar(&moleculeJSON, "json", false, "Output as JSON") + + // Show flags + moleculeShowCmd.Flags().BoolVar(&moleculeJSON, "json", false, "Output as JSON") + + // Parse flags + moleculeParseCmd.Flags().BoolVar(&moleculeJSON, "json", false, "Output as JSON") + + // Instantiate flags + moleculeInstantiateCmd.Flags().StringVar(&moleculeInstParent, "parent", "", "Parent issue ID (required)") + moleculeInstantiateCmd.Flags().StringArrayVar(&moleculeInstContext, "context", nil, "Context variable (key=value)") + moleculeInstantiateCmd.MarkFlagRequired("parent") + + // Instances flags + moleculeInstancesCmd.Flags().BoolVar(&moleculeJSON, "json", false, "Output as JSON") + + // Add subcommands + moleculeCmd.AddCommand(moleculeListCmd) + moleculeCmd.AddCommand(moleculeShowCmd) + moleculeCmd.AddCommand(moleculeParseCmd) + moleculeCmd.AddCommand(moleculeInstantiateCmd) + moleculeCmd.AddCommand(moleculeInstancesCmd) + + rootCmd.AddCommand(moleculeCmd) +} + +func runMoleculeList(cmd *cobra.Command, args []string) error { + workDir, err := findBeadsWorkDir() + if err != nil { + return fmt.Errorf("not in a beads workspace: %w", err) + } + + b := beads.New(workDir) + issues, err := b.List(beads.ListOptions{ + Type: "molecule", + Status: "all", + Priority: -1, + }) + if err != nil { + return fmt.Errorf("listing molecules: %w", err) + } + + if moleculeJSON { + enc := json.NewEncoder(os.Stdout) + enc.SetIndent("", " ") + return enc.Encode(issues) + } + + // Human-readable output + fmt.Printf("%s Molecules (%d)\n\n", style.Bold.Render("🧬"), len(issues)) + + if len(issues) == 0 { + fmt.Printf(" %s\n", style.Dim.Render("(no molecules defined)")) + return nil + } + + for _, mol := range issues { + statusMarker := "" + if mol.Status == "closed" { + statusMarker = " " + style.Dim.Render("[closed]") + } + + // Parse steps to show count + steps, _ := beads.ParseMoleculeSteps(mol.Description) + stepCount := "" + if len(steps) > 0 { + stepCount = fmt.Sprintf(" (%d steps)", len(steps)) + } + + fmt.Printf(" %s: %s%s%s\n", style.Bold.Render(mol.ID), mol.Title, stepCount, statusMarker) + } + + return nil +} + +func runMoleculeShow(cmd *cobra.Command, args []string) error { + molID := args[0] + + workDir, err := findBeadsWorkDir() + if err != nil { + return fmt.Errorf("not in a beads workspace: %w", err) + } + + b := beads.New(workDir) + mol, err := b.Show(molID) + if err != nil { + return fmt.Errorf("getting molecule: %w", err) + } + + if mol.Type != "molecule" { + return fmt.Errorf("%s is not a molecule (type: %s)", molID, mol.Type) + } + + // Parse steps + steps, parseErr := beads.ParseMoleculeSteps(mol.Description) + + // For JSON, include parsed steps + if moleculeJSON { + type moleculeOutput struct { + *beads.Issue + Steps []beads.MoleculeStep `json:"steps,omitempty"` + ParseError string `json:"parse_error,omitempty"` + } + out := moleculeOutput{Issue: mol, Steps: steps} + if parseErr != nil { + out.ParseError = parseErr.Error() + } + enc := json.NewEncoder(os.Stdout) + enc.SetIndent("", " ") + return enc.Encode(out) + } + + // Human-readable output + fmt.Printf("\n%s: %s\n", style.Bold.Render(mol.ID), mol.Title) + fmt.Printf("Type: %s\n", mol.Type) + + if parseErr != nil { + fmt.Printf("\n%s Parse error: %s\n", style.Bold.Render("⚠"), parseErr) + } + + // Show steps + fmt.Printf("\nSteps (%d):\n", len(steps)) + if len(steps) == 0 { + fmt.Printf(" %s\n", style.Dim.Render("(no steps defined)")) + } else { + // Find which steps are ready (no dependencies) + for _, step := range steps { + needsStr := "" + if len(step.Needs) == 0 { + needsStr = style.Dim.Render("(ready first)") + } else { + needsStr = fmt.Sprintf("Needs: %s", strings.Join(step.Needs, ", ")) + } + + tierStr := "" + if step.Tier != "" { + tierStr = fmt.Sprintf(" [%s]", step.Tier) + } + + fmt.Printf(" %-12s → %s%s\n", step.Ref, needsStr, tierStr) + } + } + + // Count instances + instances, _ := findMoleculeInstances(b, molID) + fmt.Printf("\nInstances: %d\n", len(instances)) + + return nil +} + +func runMoleculeParse(cmd *cobra.Command, args []string) error { + molID := args[0] + + workDir, err := findBeadsWorkDir() + if err != nil { + return fmt.Errorf("not in a beads workspace: %w", err) + } + + b := beads.New(workDir) + mol, err := b.Show(molID) + if err != nil { + return fmt.Errorf("getting molecule: %w", err) + } + + // Validate the molecule + validationErr := beads.ValidateMolecule(mol) + + // Parse steps regardless of validation + steps, parseErr := beads.ParseMoleculeSteps(mol.Description) + + if moleculeJSON { + type parseOutput struct { + Valid bool `json:"valid"` + ValidationError string `json:"validation_error,omitempty"` + ParseError string `json:"parse_error,omitempty"` + Steps []beads.MoleculeStep `json:"steps"` + } + out := parseOutput{ + Valid: validationErr == nil, + Steps: steps, + } + if validationErr != nil { + out.ValidationError = validationErr.Error() + } + if parseErr != nil { + out.ParseError = parseErr.Error() + } + enc := json.NewEncoder(os.Stdout) + enc.SetIndent("", " ") + return enc.Encode(out) + } + + // Human-readable output + fmt.Printf("\n%s: %s\n\n", style.Bold.Render(mol.ID), mol.Title) + + if validationErr != nil { + fmt.Printf("%s Validation failed: %s\n\n", style.Bold.Render("✗"), validationErr) + } else { + fmt.Printf("%s Valid molecule\n\n", style.Bold.Render("✓")) + } + + if parseErr != nil { + fmt.Printf("Parse error: %s\n\n", parseErr) + } + + fmt.Printf("Parsed Steps (%d):\n", len(steps)) + for i, step := range steps { + fmt.Printf("\n [%d] %s\n", i+1, style.Bold.Render(step.Ref)) + if step.Title != step.Ref { + fmt.Printf(" Title: %s\n", step.Title) + } + if len(step.Needs) > 0 { + fmt.Printf(" Needs: %s\n", strings.Join(step.Needs, ", ")) + } + if step.Tier != "" { + fmt.Printf(" Tier: %s\n", step.Tier) + } + if step.Instructions != "" { + // Show first line of instructions + firstLine := strings.SplitN(step.Instructions, "\n", 2)[0] + if len(firstLine) > 60 { + firstLine = firstLine[:57] + "..." + } + fmt.Printf(" Instructions: %s\n", style.Dim.Render(firstLine)) + } + } + + return nil +} + +func runMoleculeInstantiate(cmd *cobra.Command, args []string) error { + molID := args[0] + + workDir, err := findBeadsWorkDir() + if err != nil { + return fmt.Errorf("not in a beads workspace: %w", err) + } + + b := beads.New(workDir) + + // Get the molecule + mol, err := b.Show(molID) + if err != nil { + return fmt.Errorf("getting molecule: %w", err) + } + + if mol.Type != "molecule" { + return fmt.Errorf("%s is not a molecule (type: %s)", molID, mol.Type) + } + + // Validate molecule + if err := beads.ValidateMolecule(mol); err != nil { + return fmt.Errorf("invalid molecule: %w", err) + } + + // Get the parent issue + parent, err := b.Show(moleculeInstParent) + if err != nil { + return fmt.Errorf("getting parent issue: %w", err) + } + + // Parse context variables + ctx := make(map[string]string) + for _, kv := range moleculeInstContext { + parts := strings.SplitN(kv, "=", 2) + if len(parts) != 2 { + return fmt.Errorf("invalid context format %q (expected key=value)", kv) + } + ctx[parts[0]] = parts[1] + } + + // Instantiate the molecule + opts := beads.InstantiateOptions{Context: ctx} + steps, err := b.InstantiateMolecule(mol, parent, opts) + if err != nil { + return fmt.Errorf("instantiating molecule: %w", err) + } + + fmt.Printf("%s Created %d steps from %s on %s\n\n", + style.Bold.Render("✓"), len(steps), molID, moleculeInstParent) + + for _, step := range steps { + fmt.Printf(" %s: %s\n", style.Dim.Render(step.ID), step.Title) + } + + return nil +} + +func runMoleculeInstances(cmd *cobra.Command, args []string) error { + molID := args[0] + + workDir, err := findBeadsWorkDir() + if err != nil { + return fmt.Errorf("not in a beads workspace: %w", err) + } + + b := beads.New(workDir) + + // Verify the molecule exists + mol, err := b.Show(molID) + if err != nil { + return fmt.Errorf("getting molecule: %w", err) + } + + if mol.Type != "molecule" { + return fmt.Errorf("%s is not a molecule (type: %s)", molID, mol.Type) + } + + // Find all instances + instances, err := findMoleculeInstances(b, molID) + if err != nil { + return fmt.Errorf("finding instances: %w", err) + } + + if moleculeJSON { + enc := json.NewEncoder(os.Stdout) + enc.SetIndent("", " ") + return enc.Encode(instances) + } + + // Human-readable output + fmt.Printf("\n%s Instances of %s (%d)\n\n", + style.Bold.Render("📋"), molID, len(instances)) + + if len(instances) == 0 { + fmt.Printf(" %s\n", style.Dim.Render("(no instantiations found)")) + return nil + } + + fmt.Printf("%-16s %-12s %s\n", + style.Bold.Render("Parent"), + style.Bold.Render("Status"), + style.Bold.Render("Created")) + fmt.Println(strings.Repeat("-", 50)) + + for _, inst := range instances { + // Calculate progress from children + progress := "" + if len(inst.Children) > 0 { + closed := 0 + for _, childID := range inst.Children { + child, err := b.Show(childID) + if err == nil && child.Status == "closed" { + closed++ + } + } + progress = fmt.Sprintf(" (%d/%d complete)", closed, len(inst.Children)) + } + + statusStr := inst.Status + if inst.Status == "closed" { + statusStr = style.Dim.Render("done") + } else if inst.Status == "in_progress" { + statusStr = "active" + } + + created := "" + if inst.CreatedAt != "" { + // Parse and format date + created = inst.CreatedAt[:10] // Just the date portion + } + + fmt.Printf("%-16s %-12s %s%s\n", inst.ID, statusStr, created, progress) + } + + return nil +} + +// moleculeInstance represents an instantiation of a molecule. +type moleculeInstance struct { + *beads.Issue +} + +// findMoleculeInstances finds all parent issues that have steps instantiated from the given molecule. +func findMoleculeInstances(b *beads.Beads, molID string) ([]*beads.Issue, error) { + // Get all issues and look for ones with children that have instantiated_from metadata + // This is a brute-force approach - could be optimized with better queries + + // Strategy: search for issues whose descriptions contain "instantiated_from: " + allIssues, err := b.List(beads.ListOptions{Status: "all", Priority: -1}) + if err != nil { + return nil, err + } + + // Find issues that reference this molecule + parentIDs := make(map[string]bool) + for _, issue := range allIssues { + if strings.Contains(issue.Description, fmt.Sprintf("instantiated_from: %s", molID)) { + // This is a step - find its parent + if issue.Parent != "" { + parentIDs[issue.Parent] = true + } + } + } + + // Fetch the parent issues + var parents []*beads.Issue + for parentID := range parentIDs { + parent, err := b.Show(parentID) + if err == nil { + parents = append(parents, parent) + } + } + + return parents, nil +} diff --git a/internal/cmd/mq.go b/internal/cmd/mq.go new file mode 100644 index 00000000..ecdbc965 --- /dev/null +++ b/internal/cmd/mq.go @@ -0,0 +1,1736 @@ +package cmd + +import ( + "encoding/json" + "fmt" + "os" + "os/exec" + "path/filepath" + "regexp" + "strings" + "time" + + "github.com/spf13/cobra" + "github.com/steveyegge/gastown/internal/beads" + "github.com/steveyegge/gastown/internal/config" + "github.com/steveyegge/gastown/internal/git" + "github.com/steveyegge/gastown/internal/refinery" + "github.com/steveyegge/gastown/internal/rig" + "github.com/steveyegge/gastown/internal/style" + "github.com/steveyegge/gastown/internal/workspace" +) + +// MQ command flags +var ( + // Submit flags + mqSubmitBranch string + mqSubmitIssue string + mqSubmitEpic string + mqSubmitPriority int + + // Retry flags + mqRetryNow bool + + // Reject flags + mqRejectReason string + mqRejectNotify bool + + // List command flags + mqListReady bool + mqListStatus string + mqListWorker string + mqListEpic string + mqListJSON bool + + // Status command flags + mqStatusJSON bool + + // Integration land flags + mqIntegrationLandForce bool + mqIntegrationLandSkipTests bool + mqIntegrationLandDryRun bool + + // Integration status flags + mqIntegrationStatusJSON bool +) + +var mqCmd = &cobra.Command{ + Use: "mq", + Short: "Merge queue operations", + Long: `Manage the merge queue for a rig. + +The merge queue tracks work branches from polecats waiting to be merged. +Use these commands to view, submit, retry, and manage merge requests.`, +} + +var mqSubmitCmd = &cobra.Command{ + Use: "submit", + Short: "Submit current branch to the merge queue", + Long: `Submit the current branch to the merge queue. + +Creates a merge-request bead that will be processed by the Engineer. + +Auto-detection: + - Branch: current git branch + - Issue: parsed from branch name (e.g., polecat/Nux/gt-xyz → gt-xyz) + - Worker: parsed from branch name + - Rig: detected from current directory + - Target: automatically determined (see below) + - Priority: inherited from source issue + +Target branch auto-detection: + 1. If --epic is specified: target integration/ + 2. If source issue has a parent epic with integration/ branch: target it + 3. Otherwise: target main + +This ensures batch work on epics automatically flows to integration branches. + +Examples: + gt mq submit # Auto-detect everything + gt mq submit --issue gt-abc # Explicit issue + gt mq submit --epic gt-xyz # Target integration branch explicitly + gt mq submit --priority 0 # Override priority (P0)`, + RunE: runMqSubmit, +} + +var mqRetryCmd = &cobra.Command{ + Use: "retry ", + Short: "Retry a failed merge request", + Long: `Retry a failed merge request. + +Resets a failed MR so it can be processed again by the refinery. +The MR must be in a failed state (open with an error). + +Examples: + gt mq retry gastown gt-mr-abc123 + gt mq retry gastown gt-mr-abc123 --now`, + Args: cobra.ExactArgs(2), + RunE: runMQRetry, +} + +var mqListCmd = &cobra.Command{ + Use: "list ", + Short: "Show the merge queue", + Long: `Show the merge queue for a rig. + +Lists all pending merge requests waiting to be processed. + +Output format: + ID STATUS PRIORITY BRANCH WORKER AGE + gt-mr-001 ready P0 polecat/Nux/gt-xyz Nux 5m + gt-mr-002 in_progress P1 polecat/Toast/gt-abc Toast 12m + gt-mr-003 blocked P1 polecat/Capable/gt-def Capable 8m + (waiting on gt-mr-001) + +Examples: + gt mq list gastown + gt mq list gastown --ready + gt mq list gastown --status=open + gt mq list gastown --worker=Nux`, + Args: cobra.ExactArgs(1), + RunE: runMQList, +} + +var mqRejectCmd = &cobra.Command{ + Use: "reject ", + Short: "Reject a merge request", + Long: `Manually reject a merge request. + +This closes the MR with a 'rejected' status without merging. +The source issue is NOT closed (work is not done). + +Examples: + gt mq reject gastown polecat/Nux/gt-xyz --reason "Does not meet requirements" + gt mq reject gastown mr-Nux-12345 --reason "Superseded by other work" --notify`, + Args: cobra.ExactArgs(2), + RunE: runMQReject, +} + +var mqStatusCmd = &cobra.Command{ + Use: "status ", + Short: "Show detailed merge request status", + Long: `Display detailed information about a merge request. + +Shows all MR fields, current status with timestamps, dependencies, +blockers, and processing history. + +Example: + gt mq status gt-mr-abc123`, + Args: cobra.ExactArgs(1), + RunE: runMqStatus, +} + +var mqIntegrationCmd = &cobra.Command{ + Use: "integration", + Short: "Manage integration branches for epics", + Long: `Manage integration branches for batch work on epics. + +Integration branches allow multiple MRs for an epic to target a shared +branch instead of main. After all epic work is complete, the integration +branch is landed to main as a single atomic unit. + +Commands: + create Create an integration branch for an epic + land Merge integration branch to main + status Show integration branch status`, +} + +var mqIntegrationCreateCmd = &cobra.Command{ + Use: "create ", + Short: "Create an integration branch for an epic", + Long: `Create an integration branch for batch work on an epic. + +Creates a branch named integration/ from main and pushes it +to origin. Future MRs for this epic's children can target this branch. + +Actions: + 1. Verify epic exists + 2. Create branch integration/ from main + 3. Push to origin + 4. Store integration branch info in epic metadata + +Example: + gt mq integration create gt-auth-epic + # Creates integration/gt-auth-epic from main`, + Args: cobra.ExactArgs(1), + RunE: runMqIntegrationCreate, +} + +var mqIntegrationLandCmd = &cobra.Command{ + Use: "land ", + Short: "Merge integration branch to main", + Long: `Merge an epic's integration branch to main. + +Lands all work for an epic by merging its integration branch to main +as a single atomic merge commit. + +Actions: + 1. Verify all MRs targeting integration/ are merged + 2. Verify integration branch exists + 3. Merge integration/ to main (--no-ff) + 4. Run tests on main + 5. Push to origin + 6. Delete integration branch + 7. Update epic status + +Options: + --force Land even if some MRs still open + --skip-tests Skip test run + --dry-run Preview only, make no changes + +Examples: + gt mq integration land gt-auth-epic + gt mq integration land gt-auth-epic --dry-run + gt mq integration land gt-auth-epic --force --skip-tests`, + Args: cobra.ExactArgs(1), + RunE: runMqIntegrationLand, +} + +var mqIntegrationStatusCmd = &cobra.Command{ + Use: "status ", + Short: "Show integration branch status for an epic", + Long: `Display the status of an integration branch. + +Shows: + - Integration branch name and creation date + - Number of commits ahead of main + - Merged MRs (closed, targeting integration branch) + - Pending MRs (open, targeting integration branch) + +Example: + gt mq integration status gt-auth-epic`, + Args: cobra.ExactArgs(1), + RunE: runMqIntegrationStatus, +} + +func init() { + // Submit flags + mqSubmitCmd.Flags().StringVar(&mqSubmitBranch, "branch", "", "Source branch (default: current branch)") + mqSubmitCmd.Flags().StringVar(&mqSubmitIssue, "issue", "", "Source issue ID (default: parse from branch name)") + mqSubmitCmd.Flags().StringVar(&mqSubmitEpic, "epic", "", "Target epic's integration branch instead of main") + mqSubmitCmd.Flags().IntVarP(&mqSubmitPriority, "priority", "p", -1, "Override priority (0-4, default: inherit from issue)") + + // Retry flags + mqRetryCmd.Flags().BoolVar(&mqRetryNow, "now", false, "Immediately process instead of waiting for refinery loop") + + // List flags + mqListCmd.Flags().BoolVar(&mqListReady, "ready", false, "Show only ready-to-merge (no blockers)") + mqListCmd.Flags().StringVar(&mqListStatus, "status", "", "Filter by status (open, in_progress, closed)") + mqListCmd.Flags().StringVar(&mqListWorker, "worker", "", "Filter by worker name") + mqListCmd.Flags().StringVar(&mqListEpic, "epic", "", "Show MRs targeting integration/") + mqListCmd.Flags().BoolVar(&mqListJSON, "json", false, "Output as JSON") + + // Reject flags + mqRejectCmd.Flags().StringVarP(&mqRejectReason, "reason", "r", "", "Reason for rejection (required)") + mqRejectCmd.Flags().BoolVar(&mqRejectNotify, "notify", false, "Send mail notification to worker") + _ = mqRejectCmd.MarkFlagRequired("reason") + + // Status flags + mqStatusCmd.Flags().BoolVar(&mqStatusJSON, "json", false, "Output as JSON") + + // Add subcommands + mqCmd.AddCommand(mqSubmitCmd) + mqCmd.AddCommand(mqRetryCmd) + mqCmd.AddCommand(mqListCmd) + mqCmd.AddCommand(mqRejectCmd) + mqCmd.AddCommand(mqStatusCmd) + + // Integration branch subcommands + mqIntegrationCmd.AddCommand(mqIntegrationCreateCmd) + +// Integration land flags + mqIntegrationLandCmd.Flags().BoolVar(&mqIntegrationLandForce, "force", false, "Land even if some MRs still open") + mqIntegrationLandCmd.Flags().BoolVar(&mqIntegrationLandSkipTests, "skip-tests", false, "Skip test run") + mqIntegrationLandCmd.Flags().BoolVar(&mqIntegrationLandDryRun, "dry-run", false, "Preview only, make no changes") + mqIntegrationCmd.AddCommand(mqIntegrationLandCmd) + + // Integration status flags + mqIntegrationStatusCmd.Flags().BoolVar(&mqIntegrationStatusJSON, "json", false, "Output as JSON") + mqIntegrationCmd.AddCommand(mqIntegrationStatusCmd) + + mqCmd.AddCommand(mqIntegrationCmd) + + rootCmd.AddCommand(mqCmd) +} + +// branchInfo holds parsed branch information. +type branchInfo struct { + Branch string // Full branch name + Issue string // Issue ID extracted from branch + Worker string // Worker name (polecat name) +} + +// parseBranchName extracts issue ID and worker from a branch name. +// Supports formats: +// - polecat// → issue=, worker= +// - → issue=, worker="" +func parseBranchName(branch string) branchInfo { + info := branchInfo{Branch: branch} + + // Try polecat// format + if strings.HasPrefix(branch, "polecat/") { + parts := strings.SplitN(branch, "/", 3) + if len(parts) == 3 { + info.Worker = parts[1] + info.Issue = parts[2] + return info + } + } + + // Try to find an issue ID pattern in the branch name + // Common patterns: prefix-xxx, prefix-xxx.n (subtask) + issuePattern := regexp.MustCompile(`([a-z]+-[a-z0-9]+(?:\.[0-9]+)?)`) + if matches := issuePattern.FindStringSubmatch(branch); len(matches) > 1 { + info.Issue = matches[1] + } + + return info +} + +// findCurrentRig determines the current rig from the working directory. +// Returns the rig name and rig object, or an error if not in a rig. +func findCurrentRig(townRoot string) (string, *rig.Rig, error) { + cwd, err := os.Getwd() + if err != nil { + return "", nil, fmt.Errorf("getting current directory: %w", err) + } + + // Get relative path from town root to cwd + relPath, err := filepath.Rel(townRoot, cwd) + if err != nil { + return "", nil, fmt.Errorf("computing relative path: %w", err) + } + + // The first component of the relative path should be the rig name + parts := strings.Split(relPath, string(filepath.Separator)) + if len(parts) == 0 || parts[0] == "" || parts[0] == "." { + return "", nil, fmt.Errorf("not inside a rig directory") + } + + rigName := parts[0] + + // Load rig manager and get the rig + rigsConfigPath := filepath.Join(townRoot, "mayor", "rigs.json") + rigsConfig, err := config.LoadRigsConfig(rigsConfigPath) + if err != nil { + rigsConfig = &config.RigsConfig{Rigs: make(map[string]config.RigEntry)} + } + + g := git.NewGit(townRoot) + rigMgr := rig.NewManager(townRoot, rigsConfig, g) + r, err := rigMgr.GetRig(rigName) + if err != nil { + return "", nil, fmt.Errorf("rig '%s' not found: %w", rigName, err) + } + + return rigName, r, nil +} + +func runMqSubmit(cmd *cobra.Command, args []string) error { + // Find workspace + townRoot, err := workspace.FindFromCwdOrError() + if err != nil { + return fmt.Errorf("not in a Gas Town workspace: %w", err) + } + + // Find current rig + rigName, _, err := findCurrentRig(townRoot) + if err != nil { + return err + } + + // Initialize git for the current directory + cwd, err := os.Getwd() + if err != nil { + return fmt.Errorf("getting current directory: %w", err) + } + g := git.NewGit(cwd) + + // Get current branch + branch := mqSubmitBranch + if branch == "" { + branch, err = g.CurrentBranch() + if err != nil { + return fmt.Errorf("getting current branch: %w", err) + } + } + + if branch == "main" || branch == "master" { + return fmt.Errorf("cannot submit main/master branch to merge queue") + } + + // Parse branch info + info := parseBranchName(branch) + + // Override with explicit flags + issueID := mqSubmitIssue + if issueID == "" { + issueID = info.Issue + } + worker := info.Worker + + if issueID == "" { + return fmt.Errorf("cannot determine source issue from branch '%s'; use --issue to specify", branch) + } + + // Initialize beads for looking up source issue + bd := beads.New(cwd) + + // Determine target branch + target := "main" + if mqSubmitEpic != "" { + // Explicit --epic flag takes precedence + target = "integration/" + mqSubmitEpic + } else { + // Auto-detect: check if source issue has a parent epic with an integration branch + autoTarget, err := detectIntegrationBranch(bd, g, issueID) + if err != nil { + // Non-fatal: log and continue with main as target + fmt.Printf(" %s\n", style.Dim.Render(fmt.Sprintf("(note: %v)", err))) + } else if autoTarget != "" { + target = autoTarget + } + } + + // Get source issue for priority inheritance + var priority int + if mqSubmitPriority >= 0 { + priority = mqSubmitPriority + } else { + // Try to inherit from source issue + sourceIssue, err := bd.Show(issueID) + if err != nil { + // Issue not found, use default priority + priority = 2 + } else { + priority = sourceIssue.Priority + } + } + + // Build title + title := fmt.Sprintf("Merge: %s", issueID) + + // Build description with MR fields + mrFields := &beads.MRFields{ + Branch: branch, + Target: target, + SourceIssue: issueID, + Worker: worker, + Rig: rigName, + } + description := beads.FormatMRFields(mrFields) + + // Create the merge-request issue + createOpts := beads.CreateOptions{ + Title: title, + Type: "merge-request", + Priority: priority, + Description: description, + } + + issue, err := bd.Create(createOpts) + if err != nil { + return fmt.Errorf("creating merge request: %w", err) + } + + // Success output + fmt.Printf("%s Created merge request\n", style.Bold.Render("✓")) + fmt.Printf(" MR ID: %s\n", style.Bold.Render(issue.ID)) + fmt.Printf(" Source: %s\n", branch) + fmt.Printf(" Target: %s\n", target) + fmt.Printf(" Issue: %s\n", issueID) + if worker != "" { + fmt.Printf(" Worker: %s\n", worker) + } + fmt.Printf(" Priority: P%d\n", priority) + + return nil +} + +func runMQRetry(cmd *cobra.Command, args []string) error { + rigName := args[0] + mrID := args[1] + + mgr, _, err := getRefineryManager(rigName) + if err != nil { + return err + } + + // Get the MR first to show info + mr, err := mgr.GetMR(mrID) + if err != nil { + if err == refinery.ErrMRNotFound { + return fmt.Errorf("merge request '%s' not found in rig '%s'", mrID, rigName) + } + return fmt.Errorf("getting merge request: %w", err) + } + + // Show what we're retrying + fmt.Printf("Retrying merge request: %s\n", mrID) + fmt.Printf(" Branch: %s\n", mr.Branch) + fmt.Printf(" Worker: %s\n", mr.Worker) + if mr.Error != "" { + fmt.Printf(" Previous error: %s\n", style.Dim.Render(mr.Error)) + } + + // Perform the retry + if err := mgr.Retry(mrID, mqRetryNow); err != nil { + if err == refinery.ErrMRNotFailed { + return fmt.Errorf("merge request '%s' has not failed (status: %s)", mrID, mr.Status) + } + return fmt.Errorf("retrying merge request: %w", err) + } + + if mqRetryNow { + fmt.Printf("%s Merge request processed\n", style.Bold.Render("✓")) + } else { + fmt.Printf("%s Merge request queued for retry\n", style.Bold.Render("✓")) + fmt.Printf(" %s\n", style.Dim.Render("Will be processed on next refinery cycle")) + } + + return nil +} + +func runMQList(cmd *cobra.Command, args []string) error { + rigName := args[0] + + _, r, err := getRefineryManager(rigName) + if err != nil { + return err + } + + // Create beads wrapper for the rig + b := beads.New(r.Path) + + // Build list options - query for merge-request type + opts := beads.ListOptions{ + Type: "merge-request", + } + + // Apply status filter if specified + if mqListStatus != "" { + opts.Status = mqListStatus + } else if !mqListReady { + // Default to open if not showing ready + opts.Status = "open" + } + + var issues []*beads.Issue + + if mqListReady { + // Use ready query which filters by no blockers + allReady, err := b.Ready() + if err != nil { + return fmt.Errorf("querying ready MRs: %w", err) + } + // Filter to only merge-request type + for _, issue := range allReady { + if issue.Type == "merge-request" { + issues = append(issues, issue) + } + } + } else { + issues, err = b.List(opts) + if err != nil { + return fmt.Errorf("querying merge queue: %w", err) + } + } + + // Apply additional filters + var filtered []*beads.Issue + for _, issue := range issues { + // Parse MR fields + fields := beads.ParseMRFields(issue) + + // Filter by worker + if mqListWorker != "" { + worker := "" + if fields != nil { + worker = fields.Worker + } + if !strings.EqualFold(worker, mqListWorker) { + continue + } + } + + // Filter by epic (target branch) + if mqListEpic != "" { + target := "" + if fields != nil { + target = fields.Target + } + expectedTarget := "integration/" + mqListEpic + if target != expectedTarget { + continue + } + } + + filtered = append(filtered, issue) + } + + // JSON output + if mqListJSON { + return outputJSON(filtered) + } + + // Human-readable output + fmt.Printf("%s Merge queue for '%s':\n\n", style.Bold.Render("📋"), rigName) + + if len(filtered) == 0 { + fmt.Printf(" %s\n", style.Dim.Render("(empty)")) + return nil + } + + // Print header + fmt.Printf(" %-12s %-12s %-8s %-30s %-10s %s\n", + "ID", "STATUS", "PRIORITY", "BRANCH", "WORKER", "AGE") + fmt.Printf(" %s\n", strings.Repeat("-", 90)) + + // Print each MR + for _, issue := range filtered { + fields := beads.ParseMRFields(issue) + + // Determine display status + displayStatus := issue.Status + if issue.Status == "open" { + if len(issue.BlockedBy) > 0 || issue.BlockedByCount > 0 { + displayStatus = "blocked" + } else { + displayStatus = "ready" + } + } + + // Format status with styling + styledStatus := displayStatus + switch displayStatus { + case "ready": + styledStatus = style.Bold.Render("ready") + case "in_progress": + styledStatus = style.Bold.Render("in_progress") + case "blocked": + styledStatus = style.Dim.Render("blocked") + case "closed": + styledStatus = style.Dim.Render("closed") + } + + // Get MR fields + branch := "" + worker := "" + if fields != nil { + branch = fields.Branch + worker = fields.Worker + } + + // Truncate branch if too long + if len(branch) > 30 { + branch = branch[:27] + "..." + } + + // Format priority + priority := fmt.Sprintf("P%d", issue.Priority) + + // Calculate age + age := formatMRAge(issue.CreatedAt) + + // Truncate ID if needed + displayID := issue.ID + if len(displayID) > 12 { + displayID = displayID[:12] + } + + fmt.Printf(" %-12s %-12s %-8s %-30s %-10s %s\n", + displayID, styledStatus, priority, branch, worker, style.Dim.Render(age)) + + // Show blocking info if blocked + if displayStatus == "blocked" && len(issue.BlockedBy) > 0 { + fmt.Printf(" %s\n", style.Dim.Render(fmt.Sprintf(" (waiting on %s)", issue.BlockedBy[0]))) + } + } + + return nil +} + +// formatMRAge formats the age of an MR from its created_at timestamp. +func formatMRAge(createdAt string) string { + t, err := time.Parse(time.RFC3339, createdAt) + if err != nil { + // Try other formats + t, err = time.Parse("2006-01-02T15:04:05Z", createdAt) + if err != nil { + return "?" + } + } + + d := time.Since(t) + + if d < time.Minute { + return fmt.Sprintf("%ds", int(d.Seconds())) + } + if d < time.Hour { + return fmt.Sprintf("%dm", int(d.Minutes())) + } + if d < 24*time.Hour { + return fmt.Sprintf("%dh", int(d.Hours())) + } + return fmt.Sprintf("%dd", int(d.Hours()/24)) +} + +// outputJSON outputs data as JSON. +func outputJSON(data interface{}) error { + enc := json.NewEncoder(os.Stdout) + enc.SetIndent("", " ") + return enc.Encode(data) +} + +func runMQReject(cmd *cobra.Command, args []string) error { + rigName := args[0] + mrIDOrBranch := args[1] + + mgr, _, err := getRefineryManager(rigName) + if err != nil { + return err + } + + result, err := mgr.RejectMR(mrIDOrBranch, mqRejectReason, mqRejectNotify) + if err != nil { + return fmt.Errorf("rejecting MR: %w", err) + } + + fmt.Printf("%s Rejected: %s\n", style.Bold.Render("✗"), result.Branch) + fmt.Printf(" Worker: %s\n", result.Worker) + fmt.Printf(" Reason: %s\n", mqRejectReason) + + if result.IssueID != "" { + fmt.Printf(" Issue: %s %s\n", result.IssueID, style.Dim.Render("(not closed - work not done)")) + } + + if mqRejectNotify { + fmt.Printf(" %s\n", style.Dim.Render("Worker notified via mail")) + } + + return nil +} + +// MRStatusOutput is the JSON output structure for gt mq status. +type MRStatusOutput struct { + // Core issue fields + ID string `json:"id"` + Title string `json:"title"` + Status string `json:"status"` + Priority int `json:"priority"` + Type string `json:"type"` + Assignee string `json:"assignee,omitempty"` + CreatedAt string `json:"created_at"` + UpdatedAt string `json:"updated_at"` + ClosedAt string `json:"closed_at,omitempty"` + + // MR-specific fields + Branch string `json:"branch,omitempty"` + Target string `json:"target,omitempty"` + SourceIssue string `json:"source_issue,omitempty"` + Worker string `json:"worker,omitempty"` + Rig string `json:"rig,omitempty"` + MergeCommit string `json:"merge_commit,omitempty"` + CloseReason string `json:"close_reason,omitempty"` + + // Dependencies + DependsOn []DependencyInfo `json:"depends_on,omitempty"` + Blocks []DependencyInfo `json:"blocks,omitempty"` +} + +// DependencyInfo represents a dependency or blocker. +type DependencyInfo struct { + ID string `json:"id"` + Title string `json:"title"` + Status string `json:"status"` + Priority int `json:"priority"` + Type string `json:"type"` +} + +func runMqStatus(cmd *cobra.Command, args []string) error { + mrID := args[0] + + // Use current working directory for beads operations + // (beads repos are per-rig, not per-workspace) + workDir, err := os.Getwd() + if err != nil { + return fmt.Errorf("getting current directory: %w", err) + } + + // Initialize beads client + bd := beads.New(workDir) + + // Fetch the issue + issue, err := bd.Show(mrID) + if err != nil { + if err == beads.ErrNotFound { + return fmt.Errorf("merge request '%s' not found", mrID) + } + return fmt.Errorf("fetching merge request: %w", err) + } + + // Parse MR-specific fields from description + mrFields := beads.ParseMRFields(issue) + + // Build output structure + output := MRStatusOutput{ + ID: issue.ID, + Title: issue.Title, + Status: issue.Status, + Priority: issue.Priority, + Type: issue.Type, + Assignee: issue.Assignee, + CreatedAt: issue.CreatedAt, + UpdatedAt: issue.UpdatedAt, + ClosedAt: issue.ClosedAt, + } + + // Add MR fields if present + if mrFields != nil { + output.Branch = mrFields.Branch + output.Target = mrFields.Target + output.SourceIssue = mrFields.SourceIssue + output.Worker = mrFields.Worker + output.Rig = mrFields.Rig + output.MergeCommit = mrFields.MergeCommit + output.CloseReason = mrFields.CloseReason + } + + // Add dependency info from the issue's Dependencies field + for _, dep := range issue.Dependencies { + output.DependsOn = append(output.DependsOn, DependencyInfo{ + ID: dep.ID, + Title: dep.Title, + Status: dep.Status, + Priority: dep.Priority, + Type: dep.Type, + }) + } + + // Add blocker info from the issue's Dependents field + for _, dep := range issue.Dependents { + output.Blocks = append(output.Blocks, DependencyInfo{ + ID: dep.ID, + Title: dep.Title, + Status: dep.Status, + Priority: dep.Priority, + Type: dep.Type, + }) + } + + // JSON output + if mqStatusJSON { + enc := json.NewEncoder(os.Stdout) + enc.SetIndent("", " ") + return enc.Encode(output) + } + + // Human-readable output + return printMqStatus(issue, mrFields) +} + +// printMqStatus prints detailed MR status in human-readable format. +func printMqStatus(issue *beads.Issue, mrFields *beads.MRFields) error { + // Header + fmt.Printf("%s %s\n", style.Bold.Render("📋 Merge Request:"), issue.ID) + fmt.Printf(" %s\n\n", issue.Title) + + // Status section + fmt.Printf("%s\n", style.Bold.Render("Status")) + statusDisplay := formatStatus(issue.Status) + fmt.Printf(" State: %s\n", statusDisplay) + fmt.Printf(" Priority: P%d\n", issue.Priority) + if issue.Type != "" { + fmt.Printf(" Type: %s\n", issue.Type) + } + if issue.Assignee != "" { + fmt.Printf(" Assignee: %s\n", issue.Assignee) + } + + // Timestamps + fmt.Printf("\n%s\n", style.Bold.Render("Timeline")) + if issue.CreatedAt != "" { + fmt.Printf(" Created: %s %s\n", issue.CreatedAt, formatTimeAgo(issue.CreatedAt)) + } + if issue.UpdatedAt != "" && issue.UpdatedAt != issue.CreatedAt { + fmt.Printf(" Updated: %s %s\n", issue.UpdatedAt, formatTimeAgo(issue.UpdatedAt)) + } + if issue.ClosedAt != "" { + fmt.Printf(" Closed: %s %s\n", issue.ClosedAt, formatTimeAgo(issue.ClosedAt)) + } + + // MR-specific fields + if mrFields != nil { + fmt.Printf("\n%s\n", style.Bold.Render("Merge Details")) + if mrFields.Branch != "" { + fmt.Printf(" Branch: %s\n", mrFields.Branch) + } + if mrFields.Target != "" { + fmt.Printf(" Target: %s\n", mrFields.Target) + } + if mrFields.SourceIssue != "" { + fmt.Printf(" Source Issue: %s\n", mrFields.SourceIssue) + } + if mrFields.Worker != "" { + fmt.Printf(" Worker: %s\n", mrFields.Worker) + } + if mrFields.Rig != "" { + fmt.Printf(" Rig: %s\n", mrFields.Rig) + } + if mrFields.MergeCommit != "" { + fmt.Printf(" Merge Commit: %s\n", mrFields.MergeCommit) + } + if mrFields.CloseReason != "" { + fmt.Printf(" Close Reason: %s\n", mrFields.CloseReason) + } + } + + // Dependencies (what this MR is waiting on) + if len(issue.Dependencies) > 0 { + fmt.Printf("\n%s\n", style.Bold.Render("Waiting On")) + for _, dep := range issue.Dependencies { + statusIcon := getStatusIcon(dep.Status) + fmt.Printf(" %s %s: %s %s\n", + statusIcon, + dep.ID, + truncateString(dep.Title, 50), + style.Dim.Render(fmt.Sprintf("[%s]", dep.Status))) + } + } + + // Blockers (what's waiting on this MR) + if len(issue.Dependents) > 0 { + fmt.Printf("\n%s\n", style.Bold.Render("Blocking")) + for _, dep := range issue.Dependents { + statusIcon := getStatusIcon(dep.Status) + fmt.Printf(" %s %s: %s %s\n", + statusIcon, + dep.ID, + truncateString(dep.Title, 50), + style.Dim.Render(fmt.Sprintf("[%s]", dep.Status))) + } + } + + // Description (if present and not just MR fields) + desc := getDescriptionWithoutMRFields(issue.Description) + if desc != "" { + fmt.Printf("\n%s\n", style.Bold.Render("Notes")) + // Indent each line + for _, line := range strings.Split(desc, "\n") { + fmt.Printf(" %s\n", line) + } + } + + return nil +} + +// formatStatus formats the status with appropriate styling. +func formatStatus(status string) string { + switch status { + case "open": + return style.Info.Render("● open") + case "in_progress": + return style.Bold.Render("▶ in_progress") + case "closed": + return style.Dim.Render("✓ closed") + default: + return status + } +} + +// getStatusIcon returns an icon for the given status. +func getStatusIcon(status string) string { + switch status { + case "open": + return "○" + case "in_progress": + return "▶" + case "closed": + return "✓" + default: + return "•" + } +} + +// formatTimeAgo formats a timestamp as a relative time string. +func formatTimeAgo(timestamp string) string { + // Try parsing common formats + formats := []string{ + time.RFC3339, + "2006-01-02T15:04:05Z", + "2006-01-02T15:04:05", + "2006-01-02 15:04:05", + "2006-01-02", + } + + var t time.Time + var err error + for _, format := range formats { + t, err = time.Parse(format, timestamp) + if err == nil { + break + } + } + if err != nil { + return "" // Can't parse, return empty + } + + d := time.Since(t) + if d < 0 { + return style.Dim.Render("(in the future)") + } + + var ago string + if d < time.Minute { + ago = fmt.Sprintf("%ds ago", int(d.Seconds())) + } else if d < time.Hour { + ago = fmt.Sprintf("%dm ago", int(d.Minutes())) + } else if d < 24*time.Hour { + ago = fmt.Sprintf("%dh ago", int(d.Hours())) + } else { + ago = fmt.Sprintf("%dd ago", int(d.Hours()/24)) + } + + return style.Dim.Render("(" + ago + ")") +} + +// truncateString truncates a string to maxLen, adding "..." if truncated. +func truncateString(s string, maxLen int) string { + if len(s) <= maxLen { + return s + } + if maxLen <= 3 { + return s[:maxLen] + } + return s[:maxLen-3] + "..." +} + +// getDescriptionWithoutMRFields returns the description with MR field lines removed. +func getDescriptionWithoutMRFields(description string) string { + if description == "" { + return "" + } + + // Known MR field keys (lowercase) + mrKeys := map[string]bool{ + "branch": true, + "target": true, + "source_issue": true, + "source-issue": true, + "sourceissue": true, + "worker": true, + "rig": true, + "merge_commit": true, + "merge-commit": true, + "mergecommit": true, + "close_reason": true, + "close-reason": true, + "closereason": true, + "type": true, + } + + var lines []string + for _, line := range strings.Split(description, "\n") { + trimmed := strings.TrimSpace(line) + if trimmed == "" { + lines = append(lines, line) + continue + } + + // Check if this is an MR field line + colonIdx := strings.Index(trimmed, ":") + if colonIdx != -1 { + key := strings.ToLower(strings.TrimSpace(trimmed[:colonIdx])) + if mrKeys[key] { + continue // Skip MR field lines + } + } + + lines = append(lines, line) + } + + // Trim leading/trailing blank lines + result := strings.Join(lines, "\n") + result = strings.TrimSpace(result) + return result +} + +// runMqIntegrationCreate creates an integration branch for an epic. +func runMqIntegrationCreate(cmd *cobra.Command, args []string) error { + epicID := args[0] + + // Find workspace + townRoot, err := workspace.FindFromCwdOrError() + if err != nil { + return fmt.Errorf("not in a Gas Town workspace: %w", err) + } + + // Find current rig + _, r, err := findCurrentRig(townRoot) + if err != nil { + return err + } + + // Initialize beads for the rig + bd := beads.New(r.Path) + + // 1. Verify epic exists + epic, err := bd.Show(epicID) + if err != nil { + if err == beads.ErrNotFound { + return fmt.Errorf("epic '%s' not found", epicID) + } + return fmt.Errorf("fetching epic: %w", err) + } + + // Verify it's actually an epic + if epic.Type != "epic" { + return fmt.Errorf("'%s' is a %s, not an epic", epicID, epic.Type) + } + + // Build integration branch name + branchName := "integration/" + epicID + + // Initialize git for the rig + g := git.NewGit(r.Path) + + // Check if integration branch already exists locally + exists, err := g.BranchExists(branchName) + if err != nil { + return fmt.Errorf("checking branch existence: %w", err) + } + if exists { + return fmt.Errorf("integration branch '%s' already exists locally", branchName) + } + + // Check if branch exists on remote + remoteExists, err := g.RemoteBranchExists("origin", branchName) + if err != nil { + // Log warning but continue - remote check isn't critical + fmt.Printf(" %s\n", style.Dim.Render("(could not check remote, continuing)")) + } + if remoteExists { + return fmt.Errorf("integration branch '%s' already exists on origin", branchName) + } + + // Ensure we have latest main + fmt.Printf("Fetching latest from origin...\n") + if err := g.Fetch("origin"); err != nil { + return fmt.Errorf("fetching from origin: %w", err) + } + + // 2. Create branch from origin/main + fmt.Printf("Creating branch '%s' from main...\n", branchName) + if err := g.CreateBranchFrom(branchName, "origin/main"); err != nil { + return fmt.Errorf("creating branch: %w", err) + } + + // 3. Push to origin + fmt.Printf("Pushing to origin...\n") + if err := g.Push("origin", branchName, false); err != nil { + // Clean up local branch on push failure + _ = g.DeleteBranch(branchName, true) + return fmt.Errorf("pushing to origin: %w", err) + } + + // 4. Store integration branch info in epic metadata + // Update the epic's description to include the integration branch info + newDesc := addIntegrationBranchField(epic.Description, branchName) + if newDesc != epic.Description { + if err := bd.Update(epicID, beads.UpdateOptions{Description: &newDesc}); err != nil { + // Non-fatal - branch was created, just metadata update failed + fmt.Printf(" %s\n", style.Dim.Render("(warning: could not update epic metadata)")) + } + } + + // Success output + fmt.Printf("\n%s Created integration branch\n", style.Bold.Render("✓")) + fmt.Printf(" Epic: %s\n", epicID) + fmt.Printf(" Branch: %s\n", branchName) + fmt.Printf(" From: main\n") + fmt.Printf("\n Future MRs for this epic's children can target:\n") + fmt.Printf(" gt mq submit --epic %s\n", epicID) + + return nil +} + +// addIntegrationBranchField adds or updates the integration_branch field in a description. +func addIntegrationBranchField(description, branchName string) string { + fieldLine := "integration_branch: " + branchName + + // If description is empty, just return the field + if description == "" { + return fieldLine + } + + // Check if integration_branch field already exists + lines := strings.Split(description, "\n") + var newLines []string + found := false + + for _, line := range lines { + trimmed := strings.TrimSpace(line) + if strings.HasPrefix(strings.ToLower(trimmed), "integration_branch:") { + // Replace existing field + newLines = append(newLines, fieldLine) + found = true + } else { + newLines = append(newLines, line) + } + } + + if !found { + // Add field at the beginning + newLines = append([]string{fieldLine}, newLines...) + } + + return strings.Join(newLines, "\n") +} + +// runMqIntegrationLand merges an integration branch to main. +func runMqIntegrationLand(cmd *cobra.Command, args []string) error { + epicID := args[0] + + // Find workspace + townRoot, err := workspace.FindFromCwdOrError() + if err != nil { + return fmt.Errorf("not in a Gas Town workspace: %w", err) + } + + // Find current rig + _, r, err := findCurrentRig(townRoot) + if err != nil { + return err + } + + // Initialize beads and git for the rig + bd := beads.New(r.Path) + g := git.NewGit(r.Path) + + // Build integration branch name + branchName := "integration/" + epicID + + // Show what we're about to do + if mqIntegrationLandDryRun { + fmt.Printf("%s Dry run - no changes will be made\n\n", style.Bold.Render("🔍")) + } + + // 1. Verify epic exists + epic, err := bd.Show(epicID) + if err != nil { + if err == beads.ErrNotFound { + return fmt.Errorf("epic '%s' not found", epicID) + } + return fmt.Errorf("fetching epic: %w", err) + } + + if epic.Type != "epic" { + return fmt.Errorf("'%s' is a %s, not an epic", epicID, epic.Type) + } + + fmt.Printf("Landing integration branch for epic: %s\n", epicID) + fmt.Printf(" Title: %s\n\n", epic.Title) + + // 2. Verify integration branch exists + fmt.Printf("Checking integration branch...\n") + exists, err := g.BranchExists(branchName) + if err != nil { + return fmt.Errorf("checking branch existence: %w", err) + } + + // Also check remote if local doesn't exist + if !exists { + remoteExists, err := g.RemoteBranchExists("origin", branchName) + if err != nil { + return fmt.Errorf("checking remote branch: %w", err) + } + if !remoteExists { + return fmt.Errorf("integration branch '%s' does not exist (locally or on origin)", branchName) + } + // Fetch and create local tracking branch + fmt.Printf("Fetching integration branch from origin...\n") + if err := g.FetchBranch("origin", branchName); err != nil { + return fmt.Errorf("fetching branch: %w", err) + } + } + fmt.Printf(" %s Branch exists\n", style.Bold.Render("✓")) + + // 3. Verify all MRs targeting this integration branch are merged + fmt.Printf("Checking open merge requests...\n") + openMRs, err := findOpenMRsForIntegration(bd, branchName) + if err != nil { + return fmt.Errorf("checking open MRs: %w", err) + } + + if len(openMRs) > 0 { + fmt.Printf("\n %s Open merge requests targeting %s:\n", style.Bold.Render("⚠"), branchName) + for _, mr := range openMRs { + fmt.Printf(" - %s: %s\n", mr.ID, mr.Title) + } + fmt.Println() + + if !mqIntegrationLandForce { + return fmt.Errorf("cannot land: %d open MRs (use --force to override)", len(openMRs)) + } + fmt.Printf(" %s Proceeding anyway (--force)\n", style.Dim.Render("⚠")) + } else { + fmt.Printf(" %s No open MRs targeting integration branch\n", style.Bold.Render("✓")) + } + + // Dry run stops here + if mqIntegrationLandDryRun { + fmt.Printf("\n%s Dry run complete. Would perform:\n", style.Bold.Render("🔍")) + fmt.Printf(" 1. Merge %s to main (--no-ff)\n", branchName) + if !mqIntegrationLandSkipTests { + fmt.Printf(" 2. Run tests on main\n") + } + fmt.Printf(" 3. Push main to origin\n") + fmt.Printf(" 4. Delete integration branch (local and remote)\n") + fmt.Printf(" 5. Update epic status to closed\n") + return nil + } + + // Ensure working directory is clean + status, err := g.Status() + if err != nil { + return fmt.Errorf("checking git status: %w", err) + } + if !status.Clean { + return fmt.Errorf("working directory is not clean; please commit or stash changes") + } + + // Fetch latest + fmt.Printf("Fetching latest from origin...\n") + if err := g.Fetch("origin"); err != nil { + return fmt.Errorf("fetching from origin: %w", err) + } + + // 4. Checkout main and merge integration branch + fmt.Printf("Checking out main...\n") + if err := g.Checkout("main"); err != nil { + return fmt.Errorf("checking out main: %w", err) + } + + // Pull latest main + if err := g.Pull("origin", "main"); err != nil { + // Non-fatal if pull fails (e.g., first time) + fmt.Printf(" %s\n", style.Dim.Render("(pull from origin/main skipped)")) + } + + // Merge with --no-ff + fmt.Printf("Merging %s to main...\n", branchName) + mergeMsg := fmt.Sprintf("Merge %s: %s\n\nEpic: %s", branchName, epic.Title, epicID) + if err := g.MergeNoFF("origin/"+branchName, mergeMsg); err != nil { + // Abort merge on failure + _ = g.AbortMerge() + return fmt.Errorf("merge failed: %w", err) + } + fmt.Printf(" %s Merged successfully\n", style.Bold.Render("✓")) + + // 5. Run tests (if configured and not skipped) + if !mqIntegrationLandSkipTests { + testCmd := getTestCommand(r.Path) + if testCmd != "" { + fmt.Printf("Running tests: %s\n", testCmd) + if err := runTestCommand(r.Path, testCmd); err != nil { + // Tests failed - reset main + fmt.Printf(" %s Tests failed, resetting main...\n", style.Bold.Render("✗")) + _ = g.Checkout("main") + resetErr := resetHard(g, "HEAD~1") + if resetErr != nil { + return fmt.Errorf("tests failed and could not reset: %w (test error: %v)", resetErr, err) + } + return fmt.Errorf("tests failed: %w", err) + } + fmt.Printf(" %s Tests passed\n", style.Bold.Render("✓")) + } else { + fmt.Printf(" %s\n", style.Dim.Render("(no test command configured)")) + } + } else { + fmt.Printf(" %s\n", style.Dim.Render("(tests skipped)")) + } + + // 6. Push to origin + fmt.Printf("Pushing main to origin...\n") + if err := g.Push("origin", "main", false); err != nil { + // Reset on push failure + resetErr := resetHard(g, "HEAD~1") + if resetErr != nil { + return fmt.Errorf("push failed and could not reset: %w (push error: %v)", resetErr, err) + } + return fmt.Errorf("push failed: %w", err) + } + fmt.Printf(" %s Pushed to origin\n", style.Bold.Render("✓")) + + // 7. Delete integration branch + fmt.Printf("Deleting integration branch...\n") + // Delete remote first + if err := g.DeleteRemoteBranch("origin", branchName); err != nil { + fmt.Printf(" %s\n", style.Dim.Render(fmt.Sprintf("(could not delete remote branch: %v)", err))) + } else { + fmt.Printf(" %s Deleted from origin\n", style.Bold.Render("✓")) + } + // Delete local + if err := g.DeleteBranch(branchName, true); err != nil { + fmt.Printf(" %s\n", style.Dim.Render(fmt.Sprintf("(could not delete local branch: %v)", err))) + } else { + fmt.Printf(" %s Deleted locally\n", style.Bold.Render("✓")) + } + + // 8. Update epic status + fmt.Printf("Updating epic status...\n") + if err := bd.Close(epicID); err != nil { + fmt.Printf(" %s\n", style.Dim.Render(fmt.Sprintf("(could not close epic: %v)", err))) + } else { + fmt.Printf(" %s Epic closed\n", style.Bold.Render("✓")) + } + + // Success output + fmt.Printf("\n%s Successfully landed integration branch\n", style.Bold.Render("✓")) + fmt.Printf(" Epic: %s\n", epicID) + fmt.Printf(" Branch: %s → main\n", branchName) + + return nil +} + +// findOpenMRsForIntegration finds all open merge requests targeting an integration branch. +func findOpenMRsForIntegration(bd *beads.Beads, targetBranch string) ([]*beads.Issue, error) { + // List all open merge requests + opts := beads.ListOptions{ + Type: "merge-request", + Status: "open", + } + allMRs, err := bd.List(opts) + if err != nil { + return nil, err + } + + // Filter to those targeting this integration branch + var openMRs []*beads.Issue + for _, mr := range allMRs { + fields := beads.ParseMRFields(mr) + if fields != nil && fields.Target == targetBranch { + openMRs = append(openMRs, mr) + } + } + + return openMRs, nil +} + +// getTestCommand returns the test command from rig config. +func getTestCommand(rigPath string) string { + configPath := filepath.Join(rigPath, "config.json") + data, err := os.ReadFile(configPath) + if err != nil { + // Try .gastown/config.json as fallback + configPath = filepath.Join(rigPath, ".gastown", "config.json") + data, err = os.ReadFile(configPath) + if err != nil { + return "" + } + } + + var rawConfig struct { + MergeQueue struct { + TestCommand string `json:"test_command"` + } `json:"merge_queue"` + TestCommand string `json:"test_command"` // Legacy fallback + } + if err := json.Unmarshal(data, &rawConfig); err != nil { + return "" + } + + if rawConfig.MergeQueue.TestCommand != "" { + return rawConfig.MergeQueue.TestCommand + } + return rawConfig.TestCommand +} + +// runTestCommand executes a test command in the given directory. +func runTestCommand(workDir, testCmd string) error { + parts := strings.Fields(testCmd) + if len(parts) == 0 { + return nil + } + + cmd := exec.Command(parts[0], parts[1:]...) + cmd.Dir = workDir + cmd.Stdout = os.Stdout + cmd.Stderr = os.Stderr + + return cmd.Run() +} + +// resetHard performs a git reset --hard to the given ref. +func resetHard(g *git.Git, ref string) error { + // We need to use the git package, but it doesn't have a Reset method + // For now, use the internal run method via Checkout workaround + // This is a bit of a hack but works for now + cmd := exec.Command("git", "reset", "--hard", ref) + cmd.Dir = g.WorkDir() + return cmd.Run() +} + +// detectIntegrationBranch checks if an issue is a child of an epic that has an integration branch. +// Returns the integration branch target (e.g., "integration/gt-epic") if found, or "" if not. +func detectIntegrationBranch(bd *beads.Beads, g *git.Git, issueID string) (string, error) { + // Get the source issue + issue, err := bd.Show(issueID) + if err != nil { + return "", fmt.Errorf("looking up issue %s: %w", issueID, err) + } + + // Check if issue has a parent + if issue.Parent == "" { + return "", nil // No parent, no integration branch + } + + // Get the parent issue + parent, err := bd.Show(issue.Parent) + if err != nil { + return "", fmt.Errorf("looking up parent %s: %w", issue.Parent, err) + } + + // Check if parent is an epic + if parent.Type != "epic" { + return "", nil // Parent is not an epic + } + + // Check if integration branch exists + integrationBranch := "integration/" + parent.ID + + // Check local first (faster) + exists, err := g.BranchExists(integrationBranch) + if err != nil { + return "", fmt.Errorf("checking local branch: %w", err) + } + if exists { + return integrationBranch, nil + } + + // Check remote + exists, err = g.RemoteBranchExists("origin", integrationBranch) + if err != nil { + // Remote check failure is non-fatal + return "", nil + } + if exists { + return integrationBranch, nil + } + + return "", nil // No integration branch found +} + +// IntegrationStatusOutput is the JSON output structure for integration status. +type IntegrationStatusOutput struct { + Epic string `json:"epic"` + Branch string `json:"branch"` + Created string `json:"created,omitempty"` + AheadOfMain int `json:"ahead_of_main"` + MergedMRs []IntegrationStatusMRSummary `json:"merged_mrs"` + PendingMRs []IntegrationStatusMRSummary `json:"pending_mrs"` +} + +// IntegrationStatusMRSummary represents a merge request in the integration status output. +type IntegrationStatusMRSummary struct { + ID string `json:"id"` + Title string `json:"title"` + Status string `json:"status,omitempty"` +} + +// runMqIntegrationStatus shows the status of an integration branch for an epic. +func runMqIntegrationStatus(cmd *cobra.Command, args []string) error { + epicID := args[0] + + // Find workspace + townRoot, err := workspace.FindFromCwdOrError() + if err != nil { + return fmt.Errorf("not in a Gas Town workspace: %w", err) + } + + // Find current rig + _, r, err := findCurrentRig(townRoot) + if err != nil { + return err + } + + // Initialize beads for the rig + bd := beads.New(r.Path) + + // Build integration branch name + branchName := "integration/" + epicID + + // Initialize git for the rig + g := git.NewGit(r.Path) + + // Fetch from origin to ensure we have latest refs + if err := g.Fetch("origin"); err != nil { + // Non-fatal, continue with local data + } + + // Check if integration branch exists (locally or remotely) + localExists, _ := g.BranchExists(branchName) + remoteExists, _ := g.RemoteBranchExists("origin", branchName) + + if !localExists && !remoteExists { + return fmt.Errorf("integration branch '%s' does not exist", branchName) + } + + // Determine which ref to use for comparison + ref := branchName + if !localExists && remoteExists { + ref = "origin/" + branchName + } + + // Get branch creation date + createdDate, err := g.BranchCreatedDate(ref) + if err != nil { + createdDate = "" // Non-fatal + } + + // Get commits ahead of main + aheadCount, err := g.CommitsAhead("main", ref) + if err != nil { + aheadCount = 0 // Non-fatal + } + + // Query for MRs targeting this integration branch + targetBranch := "integration/" + epicID + + // Get all merge-request issues + allMRs, err := bd.List(beads.ListOptions{ + Type: "merge-request", + Status: "", // all statuses + }) + if err != nil { + return fmt.Errorf("querying merge requests: %w", err) + } + + // Filter by target branch and separate into merged/pending + var mergedMRs, pendingMRs []*beads.Issue + for _, mr := range allMRs { + fields := beads.ParseMRFields(mr) + if fields == nil || fields.Target != targetBranch { + continue + } + + if mr.Status == "closed" { + mergedMRs = append(mergedMRs, mr) + } else { + pendingMRs = append(pendingMRs, mr) + } + } + + // Build output structure + output := IntegrationStatusOutput{ + Epic: epicID, + Branch: branchName, + Created: createdDate, + AheadOfMain: aheadCount, + MergedMRs: make([]IntegrationStatusMRSummary, 0, len(mergedMRs)), + PendingMRs: make([]IntegrationStatusMRSummary, 0, len(pendingMRs)), + } + + for _, mr := range mergedMRs { + // Extract the title without "Merge: " prefix for cleaner display + title := strings.TrimPrefix(mr.Title, "Merge: ") + output.MergedMRs = append(output.MergedMRs, IntegrationStatusMRSummary{ + ID: mr.ID, + Title: title, + }) + } + + for _, mr := range pendingMRs { + title := strings.TrimPrefix(mr.Title, "Merge: ") + output.PendingMRs = append(output.PendingMRs, IntegrationStatusMRSummary{ + ID: mr.ID, + Title: title, + Status: mr.Status, + }) + } + + // JSON output + if mqIntegrationStatusJSON { + enc := json.NewEncoder(os.Stdout) + enc.SetIndent("", " ") + return enc.Encode(output) + } + + // Human-readable output + return printIntegrationStatus(&output) +} + +// printIntegrationStatus prints the integration status in human-readable format. +func printIntegrationStatus(output *IntegrationStatusOutput) error { + fmt.Printf("Integration: %s\n", style.Bold.Render(output.Branch)) + if output.Created != "" { + fmt.Printf("Created: %s\n", output.Created) + } + fmt.Printf("Ahead of main: %d commits\n", output.AheadOfMain) + + // Merged MRs + fmt.Printf("\nMerged MRs (%d):\n", len(output.MergedMRs)) + if len(output.MergedMRs) == 0 { + fmt.Printf(" %s\n", style.Dim.Render("(none)")) + } else { + for _, mr := range output.MergedMRs { + fmt.Printf(" %-12s %s\n", mr.ID, mr.Title) + } + } + + // Pending MRs + fmt.Printf("\nPending MRs (%d):\n", len(output.PendingMRs)) + if len(output.PendingMRs) == 0 { + fmt.Printf(" %s\n", style.Dim.Render("(none)")) + } else { + for _, mr := range output.PendingMRs { + statusInfo := "" + if mr.Status != "" && mr.Status != "open" { + statusInfo = fmt.Sprintf(" (%s)", mr.Status) + } + fmt.Printf(" %-12s %s%s\n", mr.ID, mr.Title, style.Dim.Render(statusInfo)) + } + } + + return nil +} diff --git a/internal/cmd/mq_test.go b/internal/cmd/mq_test.go new file mode 100644 index 00000000..2e530afc --- /dev/null +++ b/internal/cmd/mq_test.go @@ -0,0 +1,214 @@ +package cmd + +import ( + "testing" +) + +func TestAddIntegrationBranchField(t *testing.T) { + tests := []struct { + name string + description string + branchName string + want string + }{ + { + name: "empty description", + description: "", + branchName: "integration/gt-epic", + want: "integration_branch: integration/gt-epic", + }, + { + name: "simple description", + description: "Epic for authentication", + branchName: "integration/gt-auth", + want: "integration_branch: integration/gt-auth\nEpic for authentication", + }, + { + name: "existing integration_branch field", + description: "integration_branch: integration/old-epic\nSome description", + branchName: "integration/new-epic", + want: "integration_branch: integration/new-epic\nSome description", + }, + { + name: "multiline description", + description: "Line 1\nLine 2\nLine 3", + branchName: "integration/gt-xyz", + want: "integration_branch: integration/gt-xyz\nLine 1\nLine 2\nLine 3", + }, + } + + for _, tt := range tests { + t.Run(tt.name, func(t *testing.T) { + got := addIntegrationBranchField(tt.description, tt.branchName) + if got != tt.want { + t.Errorf("addIntegrationBranchField() = %q, want %q", got, tt.want) + } + }) + } +} + +func TestParseBranchName(t *testing.T) { + tests := []struct { + name string + branch string + wantIssue string + wantWorker string + }{ + { + name: "polecat branch format", + branch: "polecat/Nux/gt-xyz", + wantIssue: "gt-xyz", + wantWorker: "Nux", + }, + { + name: "polecat branch with subtask", + branch: "polecat/Worker/gt-abc.1", + wantIssue: "gt-abc.1", + wantWorker: "Worker", + }, + { + name: "simple issue branch", + branch: "gt-xyz", + wantIssue: "gt-xyz", + wantWorker: "", + }, + { + name: "feature branch with issue", + branch: "feature/gt-abc-impl", + wantIssue: "gt-abc", + wantWorker: "", + }, + { + name: "no issue pattern", + branch: "main", + wantIssue: "", + wantWorker: "", + }, + } + + for _, tt := range tests { + t.Run(tt.name, func(t *testing.T) { + info := parseBranchName(tt.branch) + if info.Issue != tt.wantIssue { + t.Errorf("parseBranchName() Issue = %q, want %q", info.Issue, tt.wantIssue) + } + if info.Worker != tt.wantWorker { + t.Errorf("parseBranchName() Worker = %q, want %q", info.Worker, tt.wantWorker) + } + }) + } +} + +func TestFormatMRAge(t *testing.T) { + tests := []struct { + name string + createdAt string + wantOk bool // just check it doesn't panic/error + }{ + { + name: "RFC3339 format", + createdAt: "2025-01-01T12:00:00Z", + wantOk: true, + }, + { + name: "alternative format", + createdAt: "2025-01-01T12:00:00", + wantOk: true, + }, + { + name: "invalid format", + createdAt: "not-a-date", + wantOk: true, // returns "?" for invalid + }, + } + + for _, tt := range tests { + t.Run(tt.name, func(t *testing.T) { + result := formatMRAge(tt.createdAt) + if tt.wantOk && result == "" { + t.Errorf("formatMRAge() returned empty for %s", tt.createdAt) + } + }) + } +} + +func TestGetDescriptionWithoutMRFields(t *testing.T) { + tests := []struct { + name string + description string + want string + }{ + { + name: "empty description", + description: "", + want: "", + }, + { + name: "only MR fields", + description: "branch: polecat/Nux/gt-xyz\ntarget: main\nworker: Nux", + want: "", + }, + { + name: "mixed content", + description: "branch: polecat/Nux/gt-xyz\nSome custom notes\ntarget: main", + want: "Some custom notes", + }, + { + name: "no MR fields", + description: "Just a regular description\nWith multiple lines", + want: "Just a regular description\nWith multiple lines", + }, + } + + for _, tt := range tests { + t.Run(tt.name, func(t *testing.T) { + got := getDescriptionWithoutMRFields(tt.description) + if got != tt.want { + t.Errorf("getDescriptionWithoutMRFields() = %q, want %q", got, tt.want) + } + }) + } +} + +func TestTruncateString(t *testing.T) { + tests := []struct { + name string + s string + maxLen int + want string + }{ + { + name: "short string", + s: "hello", + maxLen: 10, + want: "hello", + }, + { + name: "exact length", + s: "hello", + maxLen: 5, + want: "hello", + }, + { + name: "needs truncation", + s: "hello world", + maxLen: 8, + want: "hello...", + }, + { + name: "very short max", + s: "hello", + maxLen: 3, + want: "hel", + }, + } + + for _, tt := range tests { + t.Run(tt.name, func(t *testing.T) { + got := truncateString(tt.s, tt.maxLen) + if got != tt.want { + t.Errorf("truncateString() = %q, want %q", got, tt.want) + } + }) + } +} diff --git a/internal/cmd/polecat.go b/internal/cmd/polecat.go index 69031718..ac24a916 100644 --- a/internal/cmd/polecat.go +++ b/internal/cmd/polecat.go @@ -5,6 +5,7 @@ import ( "errors" "fmt" "os" + "os/exec" "path/filepath" "github.com/spf13/cobra" @@ -26,8 +27,9 @@ var ( ) var polecatCmd = &cobra.Command{ - Use: "polecat", - Short: "Manage polecats in rigs", + Use: "polecat", + Aliases: []string{"cat", "polecats"}, + Short: "Manage polecats in rigs", Long: `Manage polecat lifecycle in rigs. Polecats are worker agents that operate in their own git clones. @@ -39,11 +41,11 @@ var polecatListCmd = &cobra.Command{ Short: "List polecats in a rig", Long: `List polecats in a rig or all rigs. -Output: - - Name - - State (idle/active/working/done/stuck) - - Current issue (if any) - - Session status (running/stopped) +In the ephemeral model, polecats exist only while working. The list shows +all currently active polecats with their states: + - working: Actively working on an issue + - done: Completed work, waiting for cleanup + - stuck: Needs assistance Examples: gt polecat list gastown @@ -84,10 +86,13 @@ Example: var polecatWakeCmd = &cobra.Command{ Use: "wake /", - Short: "Mark polecat as active (ready for work)", - Long: `Mark polecat as active (ready for work). + Short: "(Deprecated) Resume a polecat to working state", + Long: `Resume a polecat to working state. -Transitions: idle → active +DEPRECATED: In the ephemeral model, polecats are created fresh for each task +via 'gt spawn'. This command is kept for backward compatibility. + +Transitions: done → working Example: gt polecat wake gastown/Toast`, @@ -97,11 +102,14 @@ Example: var polecatSleepCmd = &cobra.Command{ Use: "sleep /", - Short: "Mark polecat as idle (not available)", - Long: `Mark polecat as idle (not available). + Short: "(Deprecated) Mark polecat as done", + Long: `Mark polecat as done. -Transitions: active → idle -Fails if session is running (stop first). +DEPRECATED: In the ephemeral model, polecats use 'gt handoff' when complete, +which triggers automatic cleanup by the Witness. This command is kept for +backward compatibility. + +Transitions: working → done Example: gt polecat sleep gastown/Toast`, @@ -109,6 +117,63 @@ Example: RunE: runPolecatSleep, } +var polecatDoneCmd = &cobra.Command{ + Use: "done /", + Aliases: []string{"finish"}, + Short: "Mark polecat as done with work and return to idle", + Long: `Mark polecat as done with work and return to idle. + +Transitions: working/done/stuck → idle +Clears the assigned issue. +Fails if session is running (stop first). + +Example: + gt polecat done gastown/Toast + gt polecat finish gastown/Toast`, + Args: cobra.ExactArgs(1), + RunE: runPolecatDone, +} + +var polecatResetCmd = &cobra.Command{ + Use: "reset /", + Short: "Force reset polecat to idle state", + Long: `Force reset polecat to idle state. + +Transitions: any state → idle +Clears the assigned issue. +Use when polecat is stuck in an unexpected state. +Fails if session is running (stop first). + +Example: + gt polecat reset gastown/Toast`, + Args: cobra.ExactArgs(1), + RunE: runPolecatReset, +} + +var polecatSyncCmd = &cobra.Command{ + Use: "sync /", + Short: "Sync beads for a polecat", + Long: `Sync beads for a polecat's worktree. + +Runs 'bd sync' in the polecat's worktree to push local beads changes +to the shared sync branch and pull remote changes. + +Use --all to sync all polecats in a rig. +Use --from-main to only pull (no push). + +Examples: + gt polecat sync gastown/Toast + gt polecat sync gastown --all + gt polecat sync gastown/Toast --from-main`, + Args: cobra.MaximumNArgs(1), + RunE: runPolecatSync, +} + +var ( + polecatSyncAll bool + polecatSyncFromMain bool +) + func init() { // List flags polecatListCmd.Flags().BoolVar(&polecatListJSON, "json", false, "Output as JSON") @@ -117,12 +182,19 @@ func init() { // Remove flags polecatRemoveCmd.Flags().BoolVarP(&polecatForce, "force", "f", false, "Force removal, bypassing checks") + // Sync flags + polecatSyncCmd.Flags().BoolVar(&polecatSyncAll, "all", false, "Sync all polecats in the rig") + polecatSyncCmd.Flags().BoolVar(&polecatSyncFromMain, "from-main", false, "Pull only, no push") + // Add subcommands polecatCmd.AddCommand(polecatListCmd) polecatCmd.AddCommand(polecatAddCmd) polecatCmd.AddCommand(polecatRemoveCmd) polecatCmd.AddCommand(polecatWakeCmd) polecatCmd.AddCommand(polecatSleepCmd) + polecatCmd.AddCommand(polecatDoneCmd) + polecatCmd.AddCommand(polecatResetCmd) + polecatCmd.AddCommand(polecatSyncCmd) rootCmd.AddCommand(polecatCmd) } @@ -223,11 +295,11 @@ func runPolecatList(cmd *cobra.Command, args []string) error { } if len(allPolecats) == 0 { - fmt.Println("No polecats found.") + fmt.Println("No active polecats found.") return nil } - fmt.Printf("%s\n\n", style.Bold.Render("Polecats")) + fmt.Printf("%s\n\n", style.Bold.Render("Active Polecats")) for _, p := range allPolecats { // Session indicator sessionStatus := style.Dim.Render("○") @@ -235,9 +307,15 @@ func runPolecatList(cmd *cobra.Command, args []string) error { sessionStatus = style.Success.Render("●") } + // Normalize state for display (legacy idle/active → working) + displayState := p.State + if p.State == polecat.StateIdle || p.State == polecat.StateActive { + displayState = polecat.StateWorking + } + // State color - stateStr := string(p.State) - switch p.State { + stateStr := string(displayState) + switch displayState { case polecat.StateWorking: stateStr = style.Info.Render(stateStr) case polecat.StateStuck: @@ -315,6 +393,9 @@ func runPolecatRemove(cmd *cobra.Command, args []string) error { } func runPolecatWake(cmd *cobra.Command, args []string) error { + fmt.Println(style.Warning.Render("DEPRECATED: Use 'gt spawn' to create fresh polecats instead")) + fmt.Println() + rigName, polecatName, err := parseAddress(args[0]) if err != nil { return err @@ -329,11 +410,41 @@ func runPolecatWake(cmd *cobra.Command, args []string) error { return fmt.Errorf("waking polecat: %w", err) } - fmt.Printf("%s Polecat %s is now active.\n", style.SuccessPrefix, polecatName) + fmt.Printf("%s Polecat %s is now working.\n", style.SuccessPrefix, polecatName) return nil } func runPolecatSleep(cmd *cobra.Command, args []string) error { + fmt.Println(style.Warning.Render("DEPRECATED: Use 'gt handoff' from within a polecat session instead")) + fmt.Println() + + rigName, polecatName, err := parseAddress(args[0]) + if err != nil { + return err + } + + mgr, r, err := getPolecatManager(rigName) + if err != nil { + return err + } + + // Check if session is running + t := tmux.NewTmux() + sessMgr := session.NewManager(t, r) + running, _ := sessMgr.IsRunning(polecatName) + if running { + return fmt.Errorf("session is running. Use 'gt handoff' from the polecat session, or stop it with: gt session stop %s/%s", rigName, polecatName) + } + + if err := mgr.Sleep(polecatName); err != nil { + return fmt.Errorf("marking polecat as done: %w", err) + } + + fmt.Printf("%s Polecat %s is now done.\n", style.SuccessPrefix, polecatName) + return nil +} + +func runPolecatDone(cmd *cobra.Command, args []string) error { rigName, polecatName, err := parseAddress(args[0]) if err != nil { return err @@ -352,10 +463,117 @@ func runPolecatSleep(cmd *cobra.Command, args []string) error { return fmt.Errorf("session is running. Stop it first with: gt session stop %s/%s", rigName, polecatName) } - if err := mgr.Sleep(polecatName); err != nil { - return fmt.Errorf("sleeping polecat: %w", err) + if err := mgr.Finish(polecatName); err != nil { + return fmt.Errorf("finishing polecat: %w", err) } fmt.Printf("%s Polecat %s is now idle.\n", style.SuccessPrefix, polecatName) return nil } + +func runPolecatReset(cmd *cobra.Command, args []string) error { + rigName, polecatName, err := parseAddress(args[0]) + if err != nil { + return err + } + + mgr, r, err := getPolecatManager(rigName) + if err != nil { + return err + } + + // Check if session is running + t := tmux.NewTmux() + sessMgr := session.NewManager(t, r) + running, _ := sessMgr.IsRunning(polecatName) + if running { + return fmt.Errorf("session is running. Stop it first with: gt session stop %s/%s", rigName, polecatName) + } + + if err := mgr.Reset(polecatName); err != nil { + return fmt.Errorf("resetting polecat: %w", err) + } + + fmt.Printf("%s Polecat %s has been reset to idle.\n", style.SuccessPrefix, polecatName) + return nil +} + +func runPolecatSync(cmd *cobra.Command, args []string) error { + if len(args) < 1 { + return fmt.Errorf("rig or rig/polecat address required") + } + + // Parse address - could be "rig" or "rig/polecat" + rigName, polecatName, err := parseAddress(args[0]) + if err != nil { + // Might just be a rig name + rigName = args[0] + polecatName = "" + } + + mgr, r, err := getPolecatManager(rigName) + if err != nil { + return err + } + + // Get list of polecats to sync + var polecatsToSync []string + if polecatSyncAll || polecatName == "" { + polecats, err := mgr.List() + if err != nil { + return fmt.Errorf("listing polecats: %w", err) + } + for _, p := range polecats { + polecatsToSync = append(polecatsToSync, p.Name) + } + } else { + polecatsToSync = []string{polecatName} + } + + if len(polecatsToSync) == 0 { + fmt.Println("No polecats to sync.") + return nil + } + + // Sync each polecat + var syncErrors []string + for _, name := range polecatsToSync { + polecatDir := filepath.Join(r.Path, "polecats", name) + + // Check directory exists + if _, err := os.Stat(polecatDir); os.IsNotExist(err) { + syncErrors = append(syncErrors, fmt.Sprintf("%s: directory not found", name)) + continue + } + + // Build sync command + syncArgs := []string{"sync"} + if polecatSyncFromMain { + syncArgs = append(syncArgs, "--from-main") + } + + fmt.Printf("Syncing %s/%s...\n", rigName, name) + + syncCmd := exec.Command("bd", syncArgs...) + syncCmd.Dir = polecatDir + output, err := syncCmd.CombinedOutput() + if err != nil { + syncErrors = append(syncErrors, fmt.Sprintf("%s: %v", name, err)) + if len(output) > 0 { + fmt.Printf(" %s\n", style.Dim.Render(string(output))) + } + } else { + fmt.Printf(" %s\n", style.Success.Render("✓ synced")) + } + } + + if len(syncErrors) > 0 { + fmt.Printf("\n%s Some syncs failed:\n", style.Warning.Render("Warning:")) + for _, e := range syncErrors { + fmt.Printf(" - %s\n", e) + } + return fmt.Errorf("%d sync(s) failed", len(syncErrors)) + } + + return nil +} diff --git a/internal/cmd/prime.go b/internal/cmd/prime.go index 890b949e..6b29ed9b 100644 --- a/internal/cmd/prime.go +++ b/internal/cmd/prime.go @@ -7,6 +7,7 @@ import ( "strings" "github.com/spf13/cobra" + "github.com/steveyegge/gastown/internal/beads" "github.com/steveyegge/gastown/internal/style" "github.com/steveyegge/gastown/internal/templates" "github.com/steveyegge/gastown/internal/workspace" @@ -71,7 +72,14 @@ func runPrime(cmd *cobra.Command, args []string) error { ctx := detectRole(cwd, townRoot) // Output context - return outputPrimeContext(ctx) + if err := outputPrimeContext(ctx); err != nil { + return err + } + + // Output handoff content if present + outputHandoffContent(ctx) + + return nil } func detectRole(cwd, townRoot string) RoleContext { @@ -307,3 +315,31 @@ func outputUnknownContext(ctx RoleContext) { fmt.Println() fmt.Printf("Town root: %s\n", style.Dim.Render(ctx.TownRoot)) } + +// outputHandoffContent reads and displays the pinned handoff bead for the role. +func outputHandoffContent(ctx RoleContext) { + if ctx.Role == RoleUnknown { + return + } + + // Get role key for handoff bead lookup + roleKey := string(ctx.Role) + + bd := beads.New(ctx.TownRoot) + issue, err := bd.FindHandoffBead(roleKey) + if err != nil { + // Silently skip if beads lookup fails (might not be a beads repo) + return + } + if issue == nil || issue.Description == "" { + // No handoff content + return + } + + // Display handoff content + fmt.Println() + fmt.Printf("%s\n\n", style.Bold.Render("## 🤝 Handoff from Previous Session")) + fmt.Println(issue.Description) + fmt.Println() + fmt.Println(style.Dim.Render("(Clear with: gt rig reset --handoff)")) +} diff --git a/internal/cmd/refinery.go b/internal/cmd/refinery.go index 1a1157d3..8efa8963 100644 --- a/internal/cmd/refinery.go +++ b/internal/cmd/refinery.go @@ -23,8 +23,9 @@ var ( ) var refineryCmd = &cobra.Command{ - Use: "refinery", - Short: "Manage the merge queue processor", + Use: "refinery", + Aliases: []string{"ref"}, + Short: "Manage the merge queue processor", Long: `Manage the Refinery merge queue processor for a rig. The Refinery processes merge requests from polecats, merging their work diff --git a/internal/cmd/rig.go b/internal/cmd/rig.go index a6e8adfd..ebcfbcaf 100644 --- a/internal/cmd/rig.go +++ b/internal/cmd/rig.go @@ -2,21 +2,21 @@ package cmd import ( - "encoding/json" "fmt" "os" - "os/exec" "path/filepath" - "strings" "time" "github.com/spf13/cobra" + "github.com/steveyegge/gastown/internal/beads" "github.com/steveyegge/gastown/internal/config" "github.com/steveyegge/gastown/internal/git" - "github.com/steveyegge/gastown/internal/polecat" "github.com/steveyegge/gastown/internal/refinery" "github.com/steveyegge/gastown/internal/rig" + "github.com/steveyegge/gastown/internal/session" "github.com/steveyegge/gastown/internal/style" + "github.com/steveyegge/gastown/internal/tmux" + "github.com/steveyegge/gastown/internal/witness" "github.com/steveyegge/gastown/internal/workspace" ) @@ -68,28 +68,45 @@ var rigRemoveCmd = &cobra.Command{ RunE: runRigRemove, } -var rigInfoCmd = &cobra.Command{ - Use: "info ", - Short: "Show detailed information about a rig", - Long: `Show detailed status information for a specific rig. +var rigResetCmd = &cobra.Command{ + Use: "reset", + Short: "Reset rig state (handoff content, etc.)", + Long: `Reset various rig state. -Displays: - - Rig path and git URL - - Active polecats with status - - Refinery status - - Witness status - - Beads summary (open issues count) +By default, resets all resettable state. Use flags to reset specific items. -Example: - gt rig info gastown`, +Examples: + gt rig reset # Reset all state + gt rig reset --handoff # Clear handoff content only`, + RunE: runRigReset, +} + +var rigShutdownCmd = &cobra.Command{ + Use: "shutdown ", + Short: "Gracefully stop all rig agents", + Long: `Stop all agents in a rig. + +This command gracefully shuts down: +- All polecat sessions +- The refinery (if running) +- The witness (if running) + +Use --force to skip graceful shutdown and kill immediately. + +Examples: + gt rig shutdown gastown + gt rig shutdown gastown --force`, Args: cobra.ExactArgs(1), - RunE: runRigInfo, + RunE: runRigShutdown, } // Flags var ( - rigAddPrefix string - rigAddCrew string + rigAddPrefix string + rigAddCrew string + rigResetHandoff bool + rigResetRole string + rigShutdownForce bool ) func init() { @@ -97,10 +114,16 @@ func init() { rigCmd.AddCommand(rigAddCmd) rigCmd.AddCommand(rigListCmd) rigCmd.AddCommand(rigRemoveCmd) - rigCmd.AddCommand(rigInfoCmd) + rigCmd.AddCommand(rigResetCmd) + rigCmd.AddCommand(rigShutdownCmd) rigAddCmd.Flags().StringVar(&rigAddPrefix, "prefix", "", "Beads issue prefix (default: derived from name)") rigAddCmd.Flags().StringVar(&rigAddCrew, "crew", "main", "Default crew workspace name") + + rigResetCmd.Flags().BoolVar(&rigResetHandoff, "handoff", false, "Clear handoff content") + rigResetCmd.Flags().StringVar(&rigResetRole, "role", "", "Role to reset (default: auto-detect from cwd)") + + rigShutdownCmd.Flags().BoolVarP(&rigShutdownForce, "force", "f", false, "Force immediate shutdown") } func runRigAdd(cmd *cobra.Command, args []string) error { @@ -262,14 +285,53 @@ func runRigRemove(cmd *cobra.Command, args []string) error { return nil } +func runRigReset(cmd *cobra.Command, args []string) error { + // Find workspace + townRoot, err := workspace.FindFromCwdOrError() + if err != nil { + return fmt.Errorf("not in a Gas Town workspace: %w", err) + } + + cwd, err := os.Getwd() + if err != nil { + return fmt.Errorf("getting current directory: %w", err) + } + + // Determine role to reset + roleKey := rigResetRole + if roleKey == "" { + // Auto-detect from cwd + ctx := detectRole(cwd, townRoot) + if ctx.Role == RoleUnknown { + return fmt.Errorf("could not detect role from current directory; use --role to specify") + } + roleKey = string(ctx.Role) + } + + // If no specific flags, reset all; otherwise only reset what's specified + resetAll := !rigResetHandoff + + bd := beads.New(townRoot) + + // Reset handoff content + if resetAll || rigResetHandoff { + if err := bd.ClearHandoffContent(roleKey); err != nil { + return fmt.Errorf("clearing handoff content: %w", err) + } + fmt.Printf("%s Cleared handoff content for %s\n", style.Success.Render("✓"), roleKey) + } + + return nil +} + // Helper to check if path exists func pathExists(path string) bool { _, err := os.Stat(path) return err == nil } -func runRigInfo(cmd *cobra.Command, args []string) error { - name := args[0] +func runRigShutdown(cmd *cobra.Command, args []string) error { + rigName := args[0] // Find workspace townRoot, err := workspace.FindFromCwdOrError() @@ -277,317 +339,63 @@ func runRigInfo(cmd *cobra.Command, args []string) error { return fmt.Errorf("not in a Gas Town workspace: %w", err) } - // Load rigs config + // Load rigs config and get rig rigsPath := filepath.Join(townRoot, "mayor", "rigs.json") rigsConfig, err := config.LoadRigsConfig(rigsPath) if err != nil { - return fmt.Errorf("loading rigs config: %w", err) + rigsConfig = &config.RigsConfig{Rigs: make(map[string]config.RigEntry)} } - // Create rig manager and get the rig g := git.NewGit(townRoot) - mgr := rig.NewManager(townRoot, rigsConfig, g) - - r, err := mgr.GetRig(name) + rigMgr := rig.NewManager(townRoot, rigsConfig, g) + r, err := rigMgr.GetRig(rigName) if err != nil { - return fmt.Errorf("rig not found: %s", name) + return fmt.Errorf("rig '%s' not found", rigName) } - // Print rig header - fmt.Printf("%s\n", style.Bold.Render(r.Name)) - fmt.Printf(" Path: %s\n", r.Path) - fmt.Printf(" Git: %s\n", r.GitURL) - if r.Config != nil && r.Config.Prefix != "" { - fmt.Printf(" Beads prefix: %s\n", r.Config.Prefix) - } - fmt.Println() + fmt.Printf("Shutting down rig %s...\n", style.Bold.Render(rigName)) - // Show polecats - fmt.Printf("%s\n", style.Bold.Render("Polecats")) - polecatMgr := polecat.NewManager(r, g) - polecats, err := polecatMgr.List() - if err != nil || len(polecats) == 0 { - fmt.Printf(" %s\n", style.Dim.Render("(none)")) - } else { - for _, p := range polecats { - stateStr := formatPolecatState(p.State) - if p.Issue != "" { - fmt.Printf(" %s %s %s\n", p.Name, stateStr, style.Dim.Render(p.Issue)) - } else { - fmt.Printf(" %s %s\n", p.Name, stateStr) - } + var errors []string + + // 1. Stop all polecat sessions + t := tmux.NewTmux() + sessMgr := session.NewManager(t, r) + infos, err := sessMgr.List() + if err == nil && len(infos) > 0 { + fmt.Printf(" Stopping %d polecat session(s)...\n", len(infos)) + if err := sessMgr.StopAll(rigShutdownForce); err != nil { + errors = append(errors, fmt.Sprintf("polecat sessions: %v", err)) } } - fmt.Println() - // Show crew workers - fmt.Printf("%s\n", style.Bold.Render("Crew")) - if len(r.Crew) == 0 { - fmt.Printf(" %s\n", style.Dim.Render("(none)")) - } else { - for _, c := range r.Crew { - fmt.Printf(" %s\n", c) + // 2. Stop the refinery + refMgr := refinery.NewManager(r) + refStatus, err := refMgr.Status() + if err == nil && refStatus.State == refinery.StateRunning { + fmt.Printf(" Stopping refinery...\n") + if err := refMgr.Stop(); err != nil { + errors = append(errors, fmt.Sprintf("refinery: %v", err)) } } - fmt.Println() - // Show refinery status - fmt.Printf("%s\n", style.Bold.Render("Refinery")) - if r.HasRefinery { - refMgr := refinery.NewManager(r) - refStatus, err := refMgr.Status() - if err != nil { - fmt.Printf(" %s %s\n", style.Warning.Render("!"), "Error loading status") - } else { - stateStr := formatRefineryState(refStatus.State) - fmt.Printf(" Status: %s\n", stateStr) - if refStatus.State == refinery.StateRunning && refStatus.PID > 0 { - fmt.Printf(" PID: %d\n", refStatus.PID) - } - if refStatus.CurrentMR != nil { - fmt.Printf(" Current: %s (%s)\n", refStatus.CurrentMR.Branch, refStatus.CurrentMR.Worker) - } - if refStatus.Stats.TotalMerged > 0 || refStatus.Stats.TotalFailed > 0 { - fmt.Printf(" Stats: %d merged, %d failed\n", refStatus.Stats.TotalMerged, refStatus.Stats.TotalFailed) - } + // 3. Stop the witness + witMgr := witness.NewManager(r) + witStatus, err := witMgr.Status() + if err == nil && witStatus.State == witness.StateRunning { + fmt.Printf(" Stopping witness...\n") + if err := witMgr.Stop(); err != nil { + errors = append(errors, fmt.Sprintf("witness: %v", err)) } - } else { - fmt.Printf(" %s\n", style.Dim.Render("(not configured)")) - } - fmt.Println() - - // Show witness status - fmt.Printf("%s\n", style.Bold.Render("Witness")) - if r.HasWitness { - witnessState := loadWitnessState(r.Path) - if witnessState != nil { - fmt.Printf(" Last active: %s\n", formatTimeAgo(witnessState.LastActive)) - if witnessState.Session != "" { - fmt.Printf(" Session: %s\n", witnessState.Session) - } - } else { - fmt.Printf(" %s\n", style.Success.Render("configured")) - } - } else { - fmt.Printf(" %s\n", style.Dim.Render("(not configured)")) - } - fmt.Println() - - // Show mayor status - fmt.Printf("%s\n", style.Bold.Render("Mayor")) - if r.HasMayor { - mayorState := loadMayorState(r.Path) - if mayorState != nil { - fmt.Printf(" Last active: %s\n", formatTimeAgo(mayorState.LastActive)) - } else { - fmt.Printf(" %s\n", style.Success.Render("configured")) - } - } else { - fmt.Printf(" %s\n", style.Dim.Render("(not configured)")) - } - fmt.Println() - - // Show beads summary - fmt.Printf("%s\n", style.Bold.Render("Beads")) - beadsStats := getBeadsSummary(r.Path) - if beadsStats != nil { - fmt.Printf(" Open: %d In Progress: %d Closed: %d\n", - beadsStats.Open, beadsStats.InProgress, beadsStats.Closed) - if beadsStats.Blocked > 0 { - fmt.Printf(" Blocked: %d\n", beadsStats.Blocked) - } - } else { - fmt.Printf(" %s\n", style.Dim.Render("(beads not initialized)")) } + if len(errors) > 0 { + fmt.Printf("\n%s Some agents failed to stop:\n", style.Warning.Render("⚠")) + for _, e := range errors { + fmt.Printf(" - %s\n", e) + } + return fmt.Errorf("shutdown incomplete") + } + + fmt.Printf("%s Rig %s shut down successfully\n", style.Success.Render("✓"), rigName) return nil } - -// formatPolecatState returns a styled string for polecat state. -func formatPolecatState(state polecat.State) string { - switch state { - case polecat.StateIdle: - return style.Dim.Render("idle") - case polecat.StateActive: - return style.Info.Render("active") - case polecat.StateWorking: - return style.Success.Render("working") - case polecat.StateDone: - return style.Success.Render("done") - case polecat.StateStuck: - return style.Warning.Render("stuck") - default: - return style.Dim.Render(string(state)) - } -} - -// formatRefineryState returns a styled string for refinery state. -func formatRefineryState(state refinery.State) string { - switch state { - case refinery.StateStopped: - return style.Dim.Render("stopped") - case refinery.StateRunning: - return style.Success.Render("running") - case refinery.StatePaused: - return style.Warning.Render("paused") - default: - return style.Dim.Render(string(state)) - } -} - -// loadWitnessState loads the witness state.json. -func loadWitnessState(rigPath string) *config.AgentState { - statePath := filepath.Join(rigPath, "witness", "state.json") - data, err := os.ReadFile(statePath) - if err != nil { - return nil - } - var state config.AgentState - if err := json.Unmarshal(data, &state); err != nil { - return nil - } - return &state -} - -// loadMayorState loads the mayor state.json. -func loadMayorState(rigPath string) *config.AgentState { - statePath := filepath.Join(rigPath, "mayor", "state.json") - data, err := os.ReadFile(statePath) - if err != nil { - return nil - } - var state config.AgentState - if err := json.Unmarshal(data, &state); err != nil { - return nil - } - return &state -} - -// formatTimeAgo formats a time as a human-readable "ago" string. -func formatTimeAgo(t time.Time) string { - if t.IsZero() { - return "never" - } - d := time.Since(t) - if d < time.Minute { - return "just now" - } - if d < time.Hour { - mins := int(d.Minutes()) - if mins == 1 { - return "1 minute ago" - } - return fmt.Sprintf("%d minutes ago", mins) - } - if d < 24*time.Hour { - hours := int(d.Hours()) - if hours == 1 { - return "1 hour ago" - } - return fmt.Sprintf("%d hours ago", hours) - } - days := int(d.Hours() / 24) - if days == 1 { - return "1 day ago" - } - return fmt.Sprintf("%d days ago", days) -} - -// BeadsSummary contains counts of issues by status. -type BeadsSummary struct { - Open int - InProgress int - Closed int - Blocked int -} - -// getBeadsSummary runs bd stats to get beads summary. -func getBeadsSummary(rigPath string) *BeadsSummary { - // Check if .beads directory exists - beadsDir := filepath.Join(rigPath, ".beads") - if _, err := os.Stat(beadsDir); os.IsNotExist(err) { - return nil - } - - // Try running bd stats --json (it may exit with code 1 but still output JSON) - cmd := exec.Command("bd", "stats", "--json") - cmd.Dir = rigPath - output, _ := cmd.CombinedOutput() - - // Parse JSON output (bd stats --json may exit with error but still produce valid JSON) - var stats struct { - Open int `json:"open_issues"` - InProgress int `json:"in_progress_issues"` - Closed int `json:"closed_issues"` - Blocked int `json:"blocked_issues"` - } - if err := json.Unmarshal(output, &stats); err != nil { - // JSON parsing failed, try fallback - return getBeadsSummaryFallback(rigPath) - } - - return &BeadsSummary{ - Open: stats.Open, - InProgress: stats.InProgress, - Closed: stats.Closed, - Blocked: stats.Blocked, - } -} - -// getBeadsSummaryFallback counts issues by parsing bd list output. -func getBeadsSummaryFallback(rigPath string) *BeadsSummary { - summary := &BeadsSummary{} - - // Count open issues - if count := countBeadsIssues(rigPath, "open"); count >= 0 { - summary.Open = count - } - - // Count in_progress issues - if count := countBeadsIssues(rigPath, "in_progress"); count >= 0 { - summary.InProgress = count - } - - // Count closed issues - if count := countBeadsIssues(rigPath, "closed"); count >= 0 { - summary.Closed = count - } - - // Count blocked issues - cmd := exec.Command("bd", "blocked") - cmd.Dir = rigPath - output, err := cmd.Output() - if err == nil { - lines := strings.Split(strings.TrimSpace(string(output)), "\n") - // Filter out empty lines and header - count := 0 - for _, line := range lines { - line = strings.TrimSpace(line) - if line != "" && !strings.HasPrefix(line, "Blocked") && !strings.HasPrefix(line, "---") { - count++ - } - } - summary.Blocked = count - } - - return summary -} - -// countBeadsIssues counts issues with a given status. -func countBeadsIssues(rigPath, status string) int { - cmd := exec.Command("bd", "list", "--status="+status) - cmd.Dir = rigPath - output, err := cmd.Output() - if err != nil { - return 0 - } - // Count non-empty lines (each line is one issue) - lines := strings.Split(strings.TrimSpace(string(output)), "\n") - count := 0 - for _, line := range lines { - if strings.TrimSpace(line) != "" { - count++ - } - } - return count -} diff --git a/internal/cmd/root.go b/internal/cmd/root.go index 6e603444..9d4bad9c 100644 --- a/internal/cmd/root.go +++ b/internal/cmd/root.go @@ -3,8 +3,10 @@ package cmd import ( "os" + "strings" "github.com/spf13/cobra" + "github.com/steveyegge/gastown/internal/keepalive" ) var rootCmd = &cobra.Command{ @@ -14,6 +16,12 @@ var rootCmd = &cobra.Command{ It coordinates agent spawning, work distribution, and communication across distributed teams of AI agents working on shared codebases.`, + PersistentPreRun: func(cmd *cobra.Command, args []string) { + // Signal agent activity by touching keepalive file + // Build command path: gt status, gt mail send, etc. + cmdPath := buildCommandPath(cmd) + keepalive.TouchWithArgs(cmdPath, args) + }, } // Execute runs the root command @@ -27,3 +35,13 @@ func init() { // Global flags can be added here // rootCmd.PersistentFlags().StringVar(&cfgFile, "config", "", "config file") } + +// buildCommandPath walks the command hierarchy to build the full command path. +// For example: "gt mail send", "gt status", etc. +func buildCommandPath(cmd *cobra.Command) string { + var parts []string + for c := cmd; c != nil; c = c.Parent() { + parts = append([]string{c.Name()}, parts...) + } + return strings.Join(parts, " ") +} diff --git a/internal/cmd/session.go b/internal/cmd/session.go index aa0af1f1..7dec6ca4 100644 --- a/internal/cmd/session.go +++ b/internal/cmd/session.go @@ -5,7 +5,9 @@ import ( "fmt" "os" "path/filepath" + "strconv" "strings" + "time" "github.com/spf13/cobra" "github.com/steveyegge/gastown/internal/config" @@ -29,8 +31,9 @@ var ( ) var sessionCmd = &cobra.Command{ - Use: "session", - Short: "Manage polecat sessions", + Use: "session", + Aliases: []string{"sess"}, + Short: "Manage polecat sessions", Long: `Manage tmux sessions for polecats. Sessions are tmux sessions running Claude for each polecat. @@ -84,12 +87,17 @@ Shows session status, rig, and polecat name. Use --rig to filter by rig.`, } var sessionCaptureCmd = &cobra.Command{ - Use: "capture /", + Use: "capture / [count]", Short: "Capture recent session output", Long: `Capture recent output from a polecat session. -Returns the last N lines of terminal output. Useful for checking progress.`, - Args: cobra.ExactArgs(1), +Returns the last N lines of terminal output. Useful for checking progress. + +Examples: + gt session capture wyvern/Toast # Last 100 lines (default) + gt session capture wyvern/Toast 50 # Last 50 lines + gt session capture wyvern/Toast -n 50 # Same as above`, + Args: cobra.RangeArgs(1, 2), RunE: runSessionCapture, } @@ -107,6 +115,27 @@ Examples: RunE: runSessionInject, } +var sessionRestartCmd = &cobra.Command{ + Use: "restart /", + Short: "Restart a polecat session", + Long: `Restart a polecat session (stop + start). + +Gracefully stops the current session and starts a fresh one. +Use --force to skip graceful shutdown.`, + Args: cobra.ExactArgs(1), + RunE: runSessionRestart, +} + +var sessionStatusCmd = &cobra.Command{ + Use: "status /", + Short: "Show session status details", + Long: `Show detailed status for a polecat session. + +Displays running state, uptime, session info, and activity.`, + Args: cobra.ExactArgs(1), + RunE: runSessionStatus, +} + func init() { // Start flags sessionStartCmd.Flags().StringVar(&sessionIssue, "issue", "", "Issue ID to work on") @@ -125,6 +154,9 @@ func init() { sessionInjectCmd.Flags().StringVarP(&sessionMessage, "message", "m", "", "Message to inject") sessionInjectCmd.Flags().StringVarP(&sessionFile, "file", "f", "", "File to read message from") + // Restart flags + sessionRestartCmd.Flags().BoolVarP(&sessionForce, "force", "f", false, "Force immediate shutdown") + // Add subcommands sessionCmd.AddCommand(sessionStartCmd) sessionCmd.AddCommand(sessionStopCmd) @@ -132,6 +164,8 @@ func init() { sessionCmd.AddCommand(sessionListCmd) sessionCmd.AddCommand(sessionCaptureCmd) sessionCmd.AddCommand(sessionInjectCmd) + sessionCmd.AddCommand(sessionRestartCmd) + sessionCmd.AddCommand(sessionStatusCmd) rootCmd.AddCommand(sessionCmd) } @@ -351,7 +385,20 @@ func runSessionCapture(cmd *cobra.Command, args []string) error { return err } - output, err := mgr.Capture(polecatName, sessionLines) + // Use positional count if provided, otherwise use flag value + lines := sessionLines + if len(args) > 1 { + n, err := strconv.Atoi(args[1]) + if err != nil { + return fmt.Errorf("invalid line count '%s': must be a number", args[1]) + } + if n <= 0 { + return fmt.Errorf("line count must be positive, got %d", n) + } + lines = n + } + + output, err := mgr.Capture(polecatName, lines) if err != nil { return fmt.Errorf("capturing output: %w", err) } @@ -393,3 +440,108 @@ func runSessionInject(cmd *cobra.Command, args []string) error { style.Bold.Render("✓"), rigName, polecatName) return nil } + +func runSessionRestart(cmd *cobra.Command, args []string) error { + rigName, polecatName, err := parseAddress(args[0]) + if err != nil { + return err + } + + mgr, _, err := getSessionManager(rigName) + if err != nil { + return err + } + + // Check if running + running, err := mgr.IsRunning(polecatName) + if err != nil { + return fmt.Errorf("checking session: %w", err) + } + + if running { + // Stop first + if sessionForce { + fmt.Printf("Force stopping session for %s/%s...\n", rigName, polecatName) + } else { + fmt.Printf("Stopping session for %s/%s...\n", rigName, polecatName) + } + if err := mgr.Stop(polecatName, sessionForce); err != nil { + return fmt.Errorf("stopping session: %w", err) + } + } + + // Start fresh session + fmt.Printf("Starting session for %s/%s...\n", rigName, polecatName) + opts := session.StartOptions{} + if err := mgr.Start(polecatName, opts); err != nil { + return fmt.Errorf("starting session: %w", err) + } + + fmt.Printf("%s Session restarted. Attach with: %s\n", + style.Bold.Render("✓"), + style.Dim.Render(fmt.Sprintf("gt session at %s/%s", rigName, polecatName))) + return nil +} + +func runSessionStatus(cmd *cobra.Command, args []string) error { + rigName, polecatName, err := parseAddress(args[0]) + if err != nil { + return err + } + + mgr, _, err := getSessionManager(rigName) + if err != nil { + return err + } + + // Get session info + info, err := mgr.Status(polecatName) + if err != nil { + return fmt.Errorf("getting status: %w", err) + } + + // Format output + fmt.Printf("%s Session: %s/%s\n\n", style.Bold.Render("📺"), rigName, polecatName) + + if info.Running { + fmt.Printf(" State: %s\n", style.Bold.Render("● running")) + } else { + fmt.Printf(" State: %s\n", style.Dim.Render("○ stopped")) + return nil + } + + fmt.Printf(" Session ID: %s\n", info.SessionID) + + if info.Attached { + fmt.Printf(" Attached: yes\n") + } else { + fmt.Printf(" Attached: no\n") + } + + if !info.Created.IsZero() { + uptime := time.Since(info.Created) + fmt.Printf(" Created: %s\n", info.Created.Format("2006-01-02 15:04:05")) + fmt.Printf(" Uptime: %s\n", formatDuration(uptime)) + } + + fmt.Printf("\nAttach with: %s\n", style.Dim.Render(fmt.Sprintf("gt session at %s/%s", rigName, polecatName))) + return nil +} + +// formatDuration formats a duration for human display. +func formatDuration(d time.Duration) string { + if d < time.Minute { + return fmt.Sprintf("%ds", int(d.Seconds())) + } + if d < time.Hour { + return fmt.Sprintf("%dm %ds", int(d.Minutes()), int(d.Seconds())%60) + } + hours := int(d.Hours()) + mins := int(d.Minutes()) % 60 + if hours >= 24 { + days := hours / 24 + hours = hours % 24 + return fmt.Sprintf("%dd %dh %dm", days, hours, mins) + } + return fmt.Sprintf("%dh %dm", hours, mins) +} diff --git a/internal/cmd/spawn.go b/internal/cmd/spawn.go index 608922a0..4811db81 100644 --- a/internal/cmd/spawn.go +++ b/internal/cmd/spawn.go @@ -4,12 +4,14 @@ import ( "bytes" "encoding/json" "fmt" + "math/rand" "os/exec" "path/filepath" "strings" "time" "github.com/spf13/cobra" + "github.com/steveyegge/gastown/internal/beads" "github.com/steveyegge/gastown/internal/config" "github.com/steveyegge/gastown/internal/git" "github.com/steveyegge/gastown/internal/polecat" @@ -20,28 +22,49 @@ import ( "github.com/steveyegge/gastown/internal/workspace" ) +// polecatNames are Mad Max: Fury Road themed names for auto-generated polecats. +var polecatNames = []string{ + "Nux", "Toast", "Capable", "Cheedo", "Dag", "Rictus", "Slit", "Morsov", + "Ace", "Coma", "Valkyrie", "Keeper", "Vuvalini", "Organic", "Immortan", + "Corpus", "Doof", "Scabrous", "Splendid", "Fragile", +} + // Spawn command flags var ( - spawnIssue string - spawnMessage string - spawnCreate bool - spawnNoStart bool + spawnIssue string + spawnMessage string + spawnCreate bool + spawnNoStart bool + spawnPolecat string + spawnRig string + spawnMolecule string ) var spawnCmd = &cobra.Command{ - Use: "spawn | ", - Short: "Spawn a polecat with work assignment", + Use: "spawn [rig/polecat | rig]", + Aliases: []string{"sp"}, + Short: "Spawn a polecat with work assignment", Long: `Spawn a polecat with a work assignment. Assigns an issue or task to a polecat and starts a session. If no polecat is specified, auto-selects an idle polecat in the rig. +When --molecule is specified, the molecule is first instantiated on the parent +issue (creating child steps), then the polecat is spawned on the first ready step. + Examples: gt spawn gastown/Toast --issue gt-abc gt spawn gastown --issue gt-def # auto-select polecat gt spawn gastown/Nux -m "Fix the tests" # free-form task - gt spawn gastown/Capable --issue gt-xyz --create # create if missing`, - Args: cobra.ExactArgs(1), + gt spawn gastown/Capable --issue gt-xyz --create # create if missing + + # Flag-based selection (rig inferred from current directory): + gt spawn --issue gt-xyz --polecat Angharad + gt spawn --issue gt-abc --rig gastown --polecat Toast + + # With molecule workflow: + gt spawn --issue gt-abc --molecule mol-engineer-box`, + Args: cobra.MaximumNArgs(1), RunE: runSpawn, } @@ -50,6 +73,9 @@ func init() { spawnCmd.Flags().StringVarP(&spawnMessage, "message", "m", "", "Free-form task description") spawnCmd.Flags().BoolVar(&spawnCreate, "create", false, "Create polecat if it doesn't exist") spawnCmd.Flags().BoolVar(&spawnNoStart, "no-start", false, "Assign work but don't start session") + spawnCmd.Flags().StringVar(&spawnPolecat, "polecat", "", "Polecat name (alternative to positional arg)") + spawnCmd.Flags().StringVar(&spawnRig, "rig", "", "Rig name (defaults to current directory's rig)") + spawnCmd.Flags().StringVar(&spawnMolecule, "molecule", "", "Molecule ID to instantiate on the issue") rootCmd.AddCommand(spawnCmd) } @@ -69,18 +95,40 @@ func runSpawn(cmd *cobra.Command, args []string) error { return fmt.Errorf("must specify --issue or -m/--message") } - // Parse address: rig/polecat or just rig - rigName, polecatName, err := parseSpawnAddress(args[0]) - if err != nil { - return err + // --molecule requires --issue + if spawnMolecule != "" && spawnIssue == "" { + return fmt.Errorf("--molecule requires --issue to be specified") } - // Find workspace and rig + // Find workspace first (needed for rig inference) townRoot, err := workspace.FindFromCwdOrError() if err != nil { return fmt.Errorf("not in a Gas Town workspace: %w", err) } + var rigName, polecatName string + + // Determine rig and polecat from positional arg or flags + if len(args) > 0 { + // Parse address: rig/polecat or just rig + rigName, polecatName, err = parseSpawnAddress(args[0]) + if err != nil { + return err + } + } else { + // No positional arg - use flags + polecatName = spawnPolecat + rigName = spawnRig + + // If no --rig flag, infer from current directory + if rigName == "" { + rigName, err = inferRigFromCwd(townRoot) + if err != nil { + return fmt.Errorf("cannot determine rig: %w\nUse --rig to specify explicitly or provide rig/polecat as positional arg", err) + } + } + } + rigsConfigPath := filepath.Join(townRoot, "mayor", "rigs.json") rigsConfig, err := config.LoadRigsConfig(rigsConfigPath) if err != nil { @@ -102,9 +150,16 @@ func runSpawn(cmd *cobra.Command, args []string) error { if polecatName == "" { polecatName, err = selectIdlePolecat(polecatMgr, r) if err != nil { - return fmt.Errorf("auto-select polecat: %w", err) + // If --create is set, generate a new polecat name instead of failing + if spawnCreate { + polecatName = generatePolecatName(polecatMgr) + fmt.Printf("Generated polecat name: %s\n", polecatName) + } else { + return fmt.Errorf("auto-select polecat: %w", err) + } + } else { + fmt.Printf("Auto-selected polecat: %s\n", polecatName) } - fmt.Printf("Auto-selected polecat: %s\n", polecatName) } // Check/create polecat @@ -129,20 +184,92 @@ func runSpawn(cmd *cobra.Command, args []string) error { return fmt.Errorf("polecat '%s' is already working on %s", polecatName, pc.Issue) } - // Get issue details if specified + // Beads operations use mayor/rig directory (rig-level beads) + beadsPath := filepath.Join(r.Path, "mayor", "rig") + + // Sync beads to ensure fresh state before spawn operations + if err := syncBeads(beadsPath, true); err != nil { + // Non-fatal - continue with possibly stale beads + fmt.Printf("%s beads sync: %v\n", style.Dim.Render("Warning:"), err) + } + + // Handle molecule instantiation if specified + if spawnMolecule != "" { + b := beads.New(beadsPath) + + // Get the molecule + mol, err := b.Show(spawnMolecule) + if err != nil { + return fmt.Errorf("getting molecule %s: %w", spawnMolecule, err) + } + + if mol.Type != "molecule" { + return fmt.Errorf("%s is not a molecule (type: %s)", spawnMolecule, mol.Type) + } + + // Validate the molecule + if err := beads.ValidateMolecule(mol); err != nil { + return fmt.Errorf("invalid molecule: %w", err) + } + + // Get the parent issue + parent, err := b.Show(spawnIssue) + if err != nil { + return fmt.Errorf("getting parent issue %s: %w", spawnIssue, err) + } + + // Instantiate the molecule + fmt.Printf("Instantiating molecule %s on %s...\n", spawnMolecule, spawnIssue) + steps, err := b.InstantiateMolecule(mol, parent, beads.InstantiateOptions{}) + if err != nil { + return fmt.Errorf("instantiating molecule: %w", err) + } + + fmt.Printf("%s Created %d steps\n", style.Bold.Render("✓"), len(steps)) + for _, step := range steps { + fmt.Printf(" %s: %s\n", style.Dim.Render(step.ID), step.Title) + } + + // Find the first ready step (one with no dependencies) + var firstReadyStep *beads.Issue + for _, step := range steps { + if len(step.DependsOn) == 0 { + firstReadyStep = step + break + } + } + + if firstReadyStep == nil { + return fmt.Errorf("no ready step found in molecule (all steps have dependencies)") + } + + // Switch to spawning on the first ready step + fmt.Printf("\nSpawning on first ready step: %s\n", firstReadyStep.ID) + spawnIssue = firstReadyStep.ID + } + + // Get or create issue var issue *BeadsIssue + var assignmentID string if spawnIssue != "" { - issue, err = fetchBeadsIssue(r.Path, spawnIssue) + // Use existing issue + issue, err = fetchBeadsIssue(beadsPath, spawnIssue) if err != nil { return fmt.Errorf("fetching issue %s: %w", spawnIssue, err) } + assignmentID = spawnIssue + } else { + // Create a beads issue for free-form task + fmt.Printf("Creating beads issue for task...\n") + issue, err = createBeadsTask(beadsPath, spawnMessage) + if err != nil { + return fmt.Errorf("creating task issue: %w", err) + } + assignmentID = issue.ID + fmt.Printf("Created issue %s\n", assignmentID) } - // Assign issue/task to polecat - assignmentID := spawnIssue - if assignmentID == "" { - assignmentID = "task:" + time.Now().Format("20060102-150405") - } + // Assign issue to polecat (sets issue.assignee in beads) if err := polecatMgr.AssignIssue(polecatName, assignmentID); err != nil { return fmt.Errorf("assigning issue: %w", err) } @@ -151,6 +278,12 @@ func runSpawn(cmd *cobra.Command, args []string) error { style.Bold.Render("✓"), assignmentID, rigName, polecatName) + // Sync beads to push assignment changes + if err := syncBeads(beadsPath, false); err != nil { + // Non-fatal warning + fmt.Printf("%s beads push: %v\n", style.Dim.Render("Warning:"), err) + } + // Stop here if --no-start if spawnNoStart { fmt.Printf("\n %s\n", style.Dim.Render("Use 'gt session start' to start the session")) @@ -172,12 +305,14 @@ func runSpawn(cmd *cobra.Command, args []string) error { if err := sessMgr.Start(polecatName, session.StartOptions{}); err != nil { return fmt.Errorf("starting session: %w", err) } - // Wait for claude to initialize - time.Sleep(2 * time.Second) + // Wait for Claude to fully initialize (needs 4-5s for prompt) + fmt.Printf("Waiting for Claude to initialize...\n") + time.Sleep(5 * time.Second) } // Inject initial context context := buildSpawnContext(issue, spawnMessage) + fmt.Printf("Injecting work assignment...\n") if err := sessMgr.Inject(polecatName, context); err != nil { return fmt.Errorf("injecting context: %w", err) } @@ -201,6 +336,38 @@ func parseSpawnAddress(addr string) (rigName, polecatName string, err error) { return addr, "", nil } +// generatePolecatName generates a unique polecat name that doesn't conflict with existing ones. +func generatePolecatName(mgr *polecat.Manager) string { + existing, _ := mgr.List() + existingNames := make(map[string]bool) + for _, p := range existing { + existingNames[p.Name] = true + } + + // Try to find an unused name from the list + // Shuffle to avoid always picking the same name + shuffled := make([]string, len(polecatNames)) + copy(shuffled, polecatNames) + rand.Shuffle(len(shuffled), func(i, j int) { + shuffled[i], shuffled[j] = shuffled[j], shuffled[i] + }) + + for _, name := range shuffled { + if !existingNames[name] { + return name + } + } + + // All names taken, generate one with a number suffix + base := shuffled[0] + for i := 2; ; i++ { + name := fmt.Sprintf("%s%d", base, i) + if !existingNames[name] { + return name + } + } +} + // selectIdlePolecat finds an idle polecat in the rig. func selectIdlePolecat(mgr *polecat.Manager, r *rig.Rig) (string, error) { polecats, err := mgr.List() @@ -268,6 +435,56 @@ func fetchBeadsIssue(rigPath, issueID string) (*BeadsIssue, error) { return &issues[0], nil } +// createBeadsTask creates a new beads task issue for a free-form task message. +func createBeadsTask(rigPath, message string) (*BeadsIssue, error) { + // Truncate message for title if too long + title := message + if len(title) > 60 { + title = title[:57] + "..." + } + + // Use bd create to make a new task issue + cmd := exec.Command("bd", "create", + "--title="+title, + "--type=task", + "--priority=2", + "--description="+message, + "--json") + cmd.Dir = rigPath + + var stdout, stderr bytes.Buffer + cmd.Stdout = &stdout + cmd.Stderr = &stderr + + if err := cmd.Run(); err != nil { + errMsg := strings.TrimSpace(stderr.String()) + if errMsg != "" { + return nil, fmt.Errorf("%s", errMsg) + } + return nil, err + } + + // bd create --json returns the created issue + var issue BeadsIssue + if err := json.Unmarshal(stdout.Bytes(), &issue); err != nil { + return nil, fmt.Errorf("parsing created issue: %w", err) + } + + return &issue, nil +} + +// syncBeads runs bd sync in the given directory. +// This ensures beads state is fresh before spawn operations. +func syncBeads(workDir string, fromMain bool) error { + args := []string{"sync"} + if fromMain { + args = append(args, "--from-main") + } + cmd := exec.Command("bd", args...) + cmd.Dir = workDir + return cmd.Run() +} + // buildSpawnContext creates the initial context message for the polecat. func buildSpawnContext(issue *BeadsIssue, message string) string { var sb strings.Builder @@ -286,7 +503,14 @@ func buildSpawnContext(issue *BeadsIssue, message string) string { sb.WriteString(fmt.Sprintf("Task: %s\n", message)) } - sb.WriteString("\nWork on this task. When complete, commit your changes and signal DONE.\n") + sb.WriteString("\n## Workflow\n") + sb.WriteString("1. Run `gt prime` to load polecat context\n") + sb.WriteString("2. Run `bd sync --from-main` to get fresh beads\n") + sb.WriteString("3. Work on your task, commit changes\n") + sb.WriteString("4. Run `bd close ` when done\n") + sb.WriteString("5. Run `bd sync` to push beads changes\n") + sb.WriteString("6. Push code: `git push origin HEAD`\n") + sb.WriteString("7. Signal DONE with summary\n") return sb.String() } diff --git a/internal/cmd/status.go b/internal/cmd/status.go index bbb0c754..358f8aae 100644 --- a/internal/cmd/status.go +++ b/internal/cmd/status.go @@ -8,6 +8,7 @@ import ( "github.com/spf13/cobra" "github.com/steveyegge/gastown/internal/config" + "github.com/steveyegge/gastown/internal/crew" "github.com/steveyegge/gastown/internal/git" "github.com/steveyegge/gastown/internal/rig" "github.com/steveyegge/gastown/internal/style" @@ -17,8 +18,9 @@ import ( var statusJSON bool var statusCmd = &cobra.Command{ - Use: "status", - Short: "Show overall town status", + Use: "status", + Aliases: []string{"stat"}, + Short: "Show overall town status", Long: `Display the current status of the Gas Town workspace. Shows town name, registered rigs, active polecats, and witness status.`, @@ -43,6 +45,8 @@ type RigStatus struct { Name string `json:"name"` Polecats []string `json:"polecats"` PolecatCount int `json:"polecat_count"` + Crews []string `json:"crews"` + CrewCount int `json:"crew_count"` HasWitness bool `json:"has_witness"` HasRefinery bool `json:"has_refinery"` } @@ -51,6 +55,7 @@ type RigStatus struct { type StatusSum struct { RigCount int `json:"rig_count"` PolecatCount int `json:"polecat_count"` + CrewCount int `json:"crew_count"` WitnessCount int `json:"witness_count"` RefineryCount int `json:"refinery_count"` } @@ -103,10 +108,22 @@ func runStatus(cmd *cobra.Command, args []string) error { HasWitness: r.HasWitness, HasRefinery: r.HasRefinery, } + + // Count crew workers + crewGit := git.NewGit(r.Path) + crewMgr := crew.NewManager(r, crewGit) + if workers, err := crewMgr.List(); err == nil { + for _, w := range workers { + rs.Crews = append(rs.Crews, w.Name) + } + rs.CrewCount = len(workers) + } + status.Rigs = append(status.Rigs, rs) // Update summary status.Summary.PolecatCount += len(r.Polecats) + status.Summary.CrewCount += rs.CrewCount if r.HasWitness { status.Summary.WitnessCount++ } @@ -138,6 +155,7 @@ func outputStatusText(status TownStatus) error { fmt.Printf("%s\n", style.Bold.Render("Summary")) fmt.Printf(" Rigs: %d\n", status.Summary.RigCount) fmt.Printf(" Polecats: %d\n", status.Summary.PolecatCount) + fmt.Printf(" Crews: %d\n", status.Summary.CrewCount) fmt.Printf(" Witnesses: %d\n", status.Summary.WitnessCount) fmt.Printf(" Refineries: %d\n", status.Summary.RefineryCount) @@ -157,6 +175,9 @@ func outputStatusText(status TownStatus) error { if r.HasRefinery { indicators += " 🏭" } + if r.CrewCount > 0 { + indicators += " 👤" + } fmt.Printf(" %s%s\n", style.Bold.Render(r.Name), indicators) @@ -165,6 +186,10 @@ func outputStatusText(status TownStatus) error { } else { fmt.Printf(" %s\n", style.Dim.Render("No polecats")) } + + if len(r.Crews) > 0 { + fmt.Printf(" Crews: %v\n", r.Crews) + } } return nil diff --git a/internal/cmd/statusline.go b/internal/cmd/statusline.go new file mode 100644 index 00000000..266d486a --- /dev/null +++ b/internal/cmd/statusline.go @@ -0,0 +1,145 @@ +package cmd + +import ( + "fmt" + "os" + "strings" + + "github.com/spf13/cobra" + "github.com/steveyegge/gastown/internal/mail" + "github.com/steveyegge/gastown/internal/tmux" +) + +var ( + statusLineSession string +) + +var statusLineCmd = &cobra.Command{ + Use: "status-line", + Short: "Output status line content for tmux (internal use)", + Hidden: true, // Internal command called by tmux + RunE: runStatusLine, +} + +func init() { + rootCmd.AddCommand(statusLineCmd) + statusLineCmd.Flags().StringVar(&statusLineSession, "session", "", "Tmux session name") +} + +func runStatusLine(cmd *cobra.Command, args []string) error { + t := tmux.NewTmux() + + // Get session environment + var rigName, polecat, crew, issue, role string + + if statusLineSession != "" { + rigName, _ = t.GetEnvironment(statusLineSession, "GT_RIG") + polecat, _ = t.GetEnvironment(statusLineSession, "GT_POLECAT") + crew, _ = t.GetEnvironment(statusLineSession, "GT_CREW") + issue, _ = t.GetEnvironment(statusLineSession, "GT_ISSUE") + role, _ = t.GetEnvironment(statusLineSession, "GT_ROLE") + } else { + // Fallback to process environment + rigName = os.Getenv("GT_RIG") + polecat = os.Getenv("GT_POLECAT") + crew = os.Getenv("GT_CREW") + issue = os.Getenv("GT_ISSUE") + role = os.Getenv("GT_ROLE") + } + + // Determine identity and output based on role + if role == "mayor" || statusLineSession == "gt-mayor" { + return runMayorStatusLine(t) + } + + // Build mail identity + var identity string + if rigName != "" { + if polecat != "" { + identity = fmt.Sprintf("%s/%s", rigName, polecat) + } else if crew != "" { + identity = fmt.Sprintf("%s/%s", rigName, crew) + } + } + + // Build status parts + var parts []string + + // Current issue + if issue != "" { + parts = append(parts, issue) + } + + // Mail count + if identity != "" { + unread := getUnreadMailCount(identity) + if unread > 0 { + parts = append(parts, fmt.Sprintf("\U0001F4EC %d", unread)) // mail emoji + } + } + + // Output + if len(parts) > 0 { + fmt.Print(strings.Join(parts, " | ") + " |") + } + + return nil +} + +func runMayorStatusLine(t *tmux.Tmux) error { + // Count active sessions by listing tmux sessions + sessions, err := t.ListSessions() + if err != nil { + return nil // Silent fail + } + + // Count gt-* sessions (polecats) and rigs + polecatCount := 0 + rigs := make(map[string]bool) + for _, s := range sessions { + if strings.HasPrefix(s, "gt-") && s != "gt-mayor" { + polecatCount++ + // Extract rig name: gt-- + parts := strings.SplitN(s, "-", 3) + if len(parts) >= 2 { + rigs[parts[1]] = true + } + } + } + rigCount := len(rigs) + + // Get mayor mail + unread := getUnreadMailCount("mayor/") + + // Build status + var parts []string + parts = append(parts, fmt.Sprintf("%d polecats", polecatCount)) + parts = append(parts, fmt.Sprintf("%d rigs", rigCount)) + if unread > 0 { + parts = append(parts, fmt.Sprintf("\U0001F4EC %d", unread)) + } + + fmt.Print(strings.Join(parts, " | ") + " |") + return nil +} + +// getUnreadMailCount returns unread mail count for an identity. +// Fast path - returns 0 on any error. +func getUnreadMailCount(identity string) int { + // Find workspace + workDir, err := findBeadsWorkDir() + if err != nil { + return 0 + } + + // Create mailbox using beads + mailbox := mail.NewMailboxBeads(identity, workDir) + + // Get count + _, unread, err := mailbox.Count() + if err != nil { + return 0 + } + + return unread +} diff --git a/internal/cmd/swarm.go b/internal/cmd/swarm.go index 5bcf6aba..e2e8a3a8 100644 --- a/internal/cmd/swarm.go +++ b/internal/cmd/swarm.go @@ -11,9 +11,12 @@ import ( "github.com/spf13/cobra" "github.com/steveyegge/gastown/internal/config" "github.com/steveyegge/gastown/internal/git" + "github.com/steveyegge/gastown/internal/polecat" "github.com/steveyegge/gastown/internal/rig" + "github.com/steveyegge/gastown/internal/session" "github.com/steveyegge/gastown/internal/style" "github.com/steveyegge/gastown/internal/swarm" + "github.com/steveyegge/gastown/internal/tmux" "github.com/steveyegge/gastown/internal/workspace" ) @@ -116,7 +119,7 @@ func init() { swarmCreateCmd.Flags().StringSliceVar(&swarmWorkers, "worker", nil, "Polecat names to assign (repeatable)") swarmCreateCmd.Flags().BoolVar(&swarmStart, "start", false, "Start swarm immediately after creation") swarmCreateCmd.Flags().StringVar(&swarmTarget, "target", "main", "Target branch for landing") - swarmCreateCmd.MarkFlagRequired("epic") + _ = swarmCreateCmd.MarkFlagRequired("epic") // Status flags swarmStatusCmd.Flags().BoolVar(&swarmStatusJSON, "json", false, "Output as JSON") @@ -291,13 +294,14 @@ func runSwarmCreate(cmd *cobra.Command, args []string) error { func runSwarmStart(cmd *cobra.Command, args []string) error { swarmID := args[0] - // Find the swarm + // Find the swarm and its rig rigs, _, err := getAllRigs() if err != nil { return err } var store *SwarmStore + var foundRig *rig.Rig for _, r := range rigs { s, err := LoadSwarmStore(r.Path) @@ -307,6 +311,7 @@ func runSwarmStart(cmd *cobra.Command, args []string) error { if _, exists := s.Swarms[swarmID]; exists { store = s + foundRig = r break } } @@ -329,6 +334,73 @@ func runSwarmStart(cmd *cobra.Command, args []string) error { } fmt.Printf("%s Swarm %s started\n", style.Bold.Render("✓"), swarmID) + + // Spawn sessions for workers with tasks + if len(sw.Workers) > 0 && len(sw.Tasks) > 0 { + fmt.Printf("\nSpawning workers...\n") + if err := spawnSwarmWorkers(foundRig, sw); err != nil { + fmt.Printf("Warning: failed to spawn some workers: %v\n", err) + } + } + + return nil +} + +// spawnSwarmWorkers spawns sessions for swarm workers with task assignments. +func spawnSwarmWorkers(r *rig.Rig, sw *swarm.Swarm) error { + t := tmux.NewTmux() + sessMgr := session.NewManager(t, r) + polecatGit := git.NewGit(r.Path) + polecatMgr := polecat.NewManager(r, polecatGit) + + // Pair workers with tasks (round-robin if more tasks than workers) + workerIdx := 0 + for i, task := range sw.Tasks { + if task.State != swarm.TaskPending { + continue + } + + if workerIdx >= len(sw.Workers) { + break // No more workers + } + + worker := sw.Workers[workerIdx] + workerIdx++ + + // Assign task to worker in swarm state + sw.Tasks[i].Assignee = worker + sw.Tasks[i].State = swarm.TaskAssigned + + // Update polecat state + if err := polecatMgr.AssignIssue(worker, task.IssueID); err != nil { + fmt.Printf(" Warning: couldn't assign %s to %s: %v\n", task.IssueID, worker, err) + continue + } + + // Check if already running + running, _ := sessMgr.IsRunning(worker) + if running { + fmt.Printf(" %s already running, injecting task...\n", worker) + } else { + fmt.Printf(" Starting %s...\n", worker) + if err := sessMgr.Start(worker, session.StartOptions{}); err != nil { + fmt.Printf(" Warning: couldn't start %s: %v\n", worker, err) + continue + } + // Wait for Claude to initialize + time.Sleep(5 * time.Second) + } + + // Inject work assignment + context := fmt.Sprintf("[SWARM] You are part of swarm %s.\n\nAssigned task: %s\nTitle: %s\n\nWork on this task. When complete, commit and signal DONE.", + sw.ID, task.IssueID, task.Title) + if err := sessMgr.Inject(worker, context); err != nil { + fmt.Printf(" Warning: couldn't inject to %s: %v\n", worker, err) + } else { + fmt.Printf(" %s → %s ✓\n", worker, task.IssueID) + } + } + return nil } @@ -533,8 +605,8 @@ func runSwarmLand(cmd *cobra.Command, args []string) error { // Create manager and land mgr := swarm.NewManager(foundRig) // Reload swarm into manager - mgr.Create(sw.EpicID, sw.Workers, sw.TargetBranch) - mgr.UpdateState(sw.ID, sw.State) + _, _ = mgr.Create(sw.EpicID, sw.Workers, sw.TargetBranch) + _ = mgr.UpdateState(sw.ID, sw.State) fmt.Printf("Landing swarm %s to %s...\n", swarmID, sw.TargetBranch) diff --git a/internal/cmd/theme.go b/internal/cmd/theme.go new file mode 100644 index 00000000..64df0f25 --- /dev/null +++ b/internal/cmd/theme.go @@ -0,0 +1,193 @@ +package cmd + +import ( + "fmt" + "strings" + + "github.com/spf13/cobra" + "github.com/steveyegge/gastown/internal/tmux" +) + +var ( + themeListFlag bool + themeApplyFlag bool +) + +var themeCmd = &cobra.Command{ + Use: "theme [name]", + Short: "View or set tmux theme for the current rig", + Long: `Manage tmux status bar themes for Gas Town sessions. + +Without arguments, shows the current theme assignment. +With a name argument, sets the theme for this rig. + +Examples: + gt theme # Show current theme + gt theme --list # List available themes + gt theme forest # Set theme to 'forest' + gt theme apply # Apply theme to all running sessions in this rig`, + RunE: runTheme, +} + +var themeApplyCmd = &cobra.Command{ + Use: "apply", + Short: "Apply theme to all running sessions in this rig", + RunE: runThemeApply, +} + +func init() { + rootCmd.AddCommand(themeCmd) + themeCmd.AddCommand(themeApplyCmd) + themeCmd.Flags().BoolVarP(&themeListFlag, "list", "l", false, "List available themes") +} + +func runTheme(cmd *cobra.Command, args []string) error { + // List mode + if themeListFlag { + fmt.Println("Available themes:") + for _, name := range tmux.ListThemeNames() { + theme := tmux.GetThemeByName(name) + fmt.Printf(" %-10s %s\n", name, theme.Style()) + } + // Also show Mayor theme + mayor := tmux.MayorTheme() + fmt.Printf(" %-10s %s (Mayor only)\n", mayor.Name, mayor.Style()) + return nil + } + + // Determine current rig + rigName := detectCurrentRig() + if rigName == "" { + rigName = "unknown" + } + + // Show current theme assignment + if len(args) == 0 { + theme := tmux.AssignTheme(rigName) + fmt.Printf("Rig: %s\n", rigName) + fmt.Printf("Theme: %s (%s)\n", theme.Name, theme.Style()) + return nil + } + + // Set theme + themeName := args[0] + theme := tmux.GetThemeByName(themeName) + if theme == nil { + return fmt.Errorf("unknown theme: %s (use --list to see available themes)", themeName) + } + + // TODO: Save to rig config.json + fmt.Printf("Theme '%s' selected for rig '%s'\n", themeName, rigName) + fmt.Println("Note: Run 'gt theme apply' to apply to running sessions") + fmt.Println("(Persistent config not yet implemented)") + + return nil +} + +func runThemeApply(cmd *cobra.Command, args []string) error { + t := tmux.NewTmux() + + // Get all sessions + sessions, err := t.ListSessions() + if err != nil { + return fmt.Errorf("listing sessions: %w", err) + } + + // Determine current rig + rigName := detectCurrentRig() + + // Apply to matching sessions + applied := 0 + for _, session := range sessions { + if !strings.HasPrefix(session, "gt-") { + continue + } + + // Determine theme and identity for this session + var theme tmux.Theme + var rig, worker, role string + + if session == "gt-mayor" { + theme = tmux.MayorTheme() + worker = "Mayor" + role = "coordinator" + } else { + // Parse session name: gt-- or gt--crew- + parts := strings.SplitN(session, "-", 3) + if len(parts) < 3 { + continue + } + rig = parts[1] + + // Skip if not matching current rig (if we know it) + if rigName != "" && rig != rigName { + continue + } + + workerPart := parts[2] + if strings.HasPrefix(workerPart, "crew-") { + worker = strings.TrimPrefix(workerPart, "crew-") + role = "crew" + } else { + worker = workerPart + role = "polecat" + } + + theme = tmux.AssignTheme(rig) + } + + // Apply theme and status format + if err := t.ApplyTheme(session, theme); err != nil { + fmt.Printf(" %s: failed (%v)\n", session, err) + continue + } + if err := t.SetStatusFormat(session, rig, worker, role); err != nil { + fmt.Printf(" %s: failed to set format (%v)\n", session, err) + continue + } + if err := t.SetDynamicStatus(session); err != nil { + fmt.Printf(" %s: failed to set dynamic status (%v)\n", session, err) + continue + } + + fmt.Printf(" %s: applied %s theme\n", session, theme.Name) + applied++ + } + + if applied == 0 { + fmt.Println("No matching sessions found") + } else { + fmt.Printf("\nApplied theme to %d session(s)\n", applied) + } + + return nil +} + +// detectCurrentRig determines the rig from environment or cwd. +func detectCurrentRig() string { + // Try environment first + if rig := detectCurrentSession(); rig != "" { + // Extract rig from session name + parts := strings.SplitN(rig, "-", 3) + if len(parts) >= 2 && parts[0] == "gt" { + return parts[1] + } + } + + // Try to detect from cwd + cwd, err := findBeadsWorkDir() + if err != nil { + return "" + } + + // Extract rig name from path + // Typical paths: /Users/stevey/gt//... + parts := strings.Split(cwd, "/") + for i, p := range parts { + if p == "gt" && i+1 < len(parts) { + return parts[i+1] + } + } + + return "" +} diff --git a/internal/cmd/version.go b/internal/cmd/version.go index cc745438..d62a9f8d 100644 --- a/internal/cmd/version.go +++ b/internal/cmd/version.go @@ -9,7 +9,7 @@ import ( // Version information - set at build time via ldflags var ( - Version = "0.1.0" + Version = "0.0.1" BuildTime = "unknown" GitCommit = "unknown" ) diff --git a/internal/cmd/witness.go b/internal/cmd/witness.go new file mode 100644 index 00000000..c86f3009 --- /dev/null +++ b/internal/cmd/witness.go @@ -0,0 +1,291 @@ +package cmd + +import ( + "encoding/json" + "fmt" + "os" + "os/exec" + "path/filepath" + + "github.com/spf13/cobra" + "github.com/steveyegge/gastown/internal/config" + "github.com/steveyegge/gastown/internal/git" + "github.com/steveyegge/gastown/internal/rig" + "github.com/steveyegge/gastown/internal/style" + "github.com/steveyegge/gastown/internal/tmux" + "github.com/steveyegge/gastown/internal/witness" + "github.com/steveyegge/gastown/internal/workspace" +) + +// Witness command flags +var ( + witnessForeground bool + witnessStatusJSON bool +) + +var witnessCmd = &cobra.Command{ + Use: "witness", + Short: "Manage the polecat monitoring agent", + Long: `Manage the Witness monitoring agent for a rig. + +The Witness monitors polecats for stuck/idle state, nudges polecats +that seem blocked, and reports status to the mayor.`, +} + +var witnessStartCmd = &cobra.Command{ + Use: "start ", + Short: "Start the witness", + Long: `Start the Witness for a rig. + +Launches the monitoring agent which watches polecats for stuck or idle +states and takes action to keep work flowing. + +Examples: + gt witness start gastown + gt witness start gastown --foreground`, + Args: cobra.ExactArgs(1), + RunE: runWitnessStart, +} + +var witnessStopCmd = &cobra.Command{ + Use: "stop ", + Short: "Stop the witness", + Long: `Stop a running Witness. + +Gracefully stops the witness monitoring agent.`, + Args: cobra.ExactArgs(1), + RunE: runWitnessStop, +} + +var witnessStatusCmd = &cobra.Command{ + Use: "status ", + Short: "Show witness status", + Long: `Show the status of a rig's Witness. + +Displays running state, monitored polecats, and statistics.`, + Args: cobra.ExactArgs(1), + RunE: runWitnessStatus, +} + +var witnessAttachCmd = &cobra.Command{ + Use: "attach ", + Aliases: []string{"at"}, + Short: "Attach to witness session", + Long: `Attach to the Witness tmux session for a rig. + +Attaches the current terminal to the witness's tmux session. +Detach with Ctrl-B D. + +If the witness is not running, this will start it first.`, + Args: cobra.ExactArgs(1), + RunE: runWitnessAttach, +} + +func init() { + // Start flags + witnessStartCmd.Flags().BoolVar(&witnessForeground, "foreground", false, "Run in foreground (default: background)") + + // Status flags + witnessStatusCmd.Flags().BoolVar(&witnessStatusJSON, "json", false, "Output as JSON") + + // Add subcommands + witnessCmd.AddCommand(witnessStartCmd) + witnessCmd.AddCommand(witnessStopCmd) + witnessCmd.AddCommand(witnessStatusCmd) + witnessCmd.AddCommand(witnessAttachCmd) + + rootCmd.AddCommand(witnessCmd) +} + +// getWitnessManager creates a witness manager for a rig. +func getWitnessManager(rigName string) (*witness.Manager, *rig.Rig, error) { + townRoot, err := workspace.FindFromCwdOrError() + if err != nil { + return nil, nil, fmt.Errorf("not in a Gas Town workspace: %w", err) + } + + rigsConfigPath := filepath.Join(townRoot, "mayor", "rigs.json") + rigsConfig, err := config.LoadRigsConfig(rigsConfigPath) + if err != nil { + rigsConfig = &config.RigsConfig{Rigs: make(map[string]config.RigEntry)} + } + + g := git.NewGit(townRoot) + rigMgr := rig.NewManager(townRoot, rigsConfig, g) + r, err := rigMgr.GetRig(rigName) + if err != nil { + return nil, nil, fmt.Errorf("rig '%s' not found", rigName) + } + + mgr := witness.NewManager(r) + return mgr, r, nil +} + +func runWitnessStart(cmd *cobra.Command, args []string) error { + rigName := args[0] + + mgr, _, err := getWitnessManager(rigName) + if err != nil { + return err + } + + fmt.Printf("Starting witness for %s...\n", rigName) + + if err := mgr.Start(witnessForeground); err != nil { + if err == witness.ErrAlreadyRunning { + fmt.Printf("%s Witness is already running\n", style.Dim.Render("⚠")) + return nil + } + return fmt.Errorf("starting witness: %w", err) + } + + if witnessForeground { + // This will block until stopped + return nil + } + + fmt.Printf("%s Witness started for %s\n", style.Bold.Render("✓"), rigName) + fmt.Printf(" %s\n", style.Dim.Render("Use 'gt witness status' to check progress")) + return nil +} + +func runWitnessStop(cmd *cobra.Command, args []string) error { + rigName := args[0] + + mgr, _, err := getWitnessManager(rigName) + if err != nil { + return err + } + + if err := mgr.Stop(); err != nil { + if err == witness.ErrNotRunning { + fmt.Printf("%s Witness is not running\n", style.Dim.Render("⚠")) + return nil + } + return fmt.Errorf("stopping witness: %w", err) + } + + fmt.Printf("%s Witness stopped for %s\n", style.Bold.Render("✓"), rigName) + return nil +} + +func runWitnessStatus(cmd *cobra.Command, args []string) error { + rigName := args[0] + + mgr, _, err := getWitnessManager(rigName) + if err != nil { + return err + } + + w, err := mgr.Status() + if err != nil { + return fmt.Errorf("getting status: %w", err) + } + + // JSON output + if witnessStatusJSON { + enc := json.NewEncoder(os.Stdout) + enc.SetIndent("", " ") + return enc.Encode(w) + } + + // Human-readable output + fmt.Printf("%s Witness: %s\n\n", style.Bold.Render("👁"), rigName) + + stateStr := string(w.State) + switch w.State { + case witness.StateRunning: + stateStr = style.Bold.Render("● running") + case witness.StateStopped: + stateStr = style.Dim.Render("○ stopped") + case witness.StatePaused: + stateStr = style.Dim.Render("⏸ paused") + } + fmt.Printf(" State: %s\n", stateStr) + + if w.StartedAt != nil { + fmt.Printf(" Started: %s\n", w.StartedAt.Format("2006-01-02 15:04:05")) + } + + if w.LastCheckAt != nil { + fmt.Printf(" Last check: %s\n", w.LastCheckAt.Format("2006-01-02 15:04:05")) + } + + // Show monitored polecats + fmt.Printf("\n %s\n", style.Bold.Render("Monitored Polecats:")) + if len(w.MonitoredPolecats) == 0 { + fmt.Printf(" %s\n", style.Dim.Render("(none)")) + } else { + for _, p := range w.MonitoredPolecats { + fmt.Printf(" • %s\n", p) + } + } + + fmt.Printf("\n %s\n", style.Bold.Render("Statistics:")) + fmt.Printf(" Checks today: %d\n", w.Stats.TodayChecks) + fmt.Printf(" Nudges today: %d\n", w.Stats.TodayNudges) + fmt.Printf(" Total checks: %d\n", w.Stats.TotalChecks) + fmt.Printf(" Total nudges: %d\n", w.Stats.TotalNudges) + fmt.Printf(" Total escalations: %d\n", w.Stats.TotalEscalations) + + return nil +} + +// witnessSessionName returns the tmux session name for a rig's witness. +func witnessSessionName(rigName string) string { + return fmt.Sprintf("gt-witness-%s", rigName) +} + +func runWitnessAttach(cmd *cobra.Command, args []string) error { + rigName := args[0] + + // Verify rig exists + _, r, err := getWitnessManager(rigName) + if err != nil { + return err + } + + t := tmux.NewTmux() + sessionName := witnessSessionName(rigName) + + // Check if session exists + running, err := t.HasSession(sessionName) + if err != nil { + return fmt.Errorf("checking session: %w", err) + } + + if !running { + // Start witness session (like Mayor) + fmt.Printf("Starting witness session for %s...\n", rigName) + + if err := t.NewSession(sessionName, r.Path); err != nil { + return fmt.Errorf("creating session: %w", err) + } + + // Set environment + t.SetEnvironment(sessionName, "GT_ROLE", "witness") + t.SetEnvironment(sessionName, "GT_RIG", rigName) + + // Apply theme (same as rig polecats) + theme := tmux.AssignTheme(rigName) + _ = t.ConfigureGasTownSession(sessionName, theme, rigName, "witness", "witness") + + // Launch Claude in a respawn loop + loopCmd := `while true; do echo "👁️ Starting Witness for ` + rigName + `..."; claude --dangerously-skip-permissions; echo ""; echo "Witness exited. Restarting in 2s... (Ctrl-C to stop)"; sleep 2; done` + if err := t.SendKeysDelayed(sessionName, loopCmd, 200); err != nil { + return fmt.Errorf("sending command: %w", err) + } + } + + // Attach to the session + tmuxPath, err := exec.LookPath("tmux") + if err != nil { + return fmt.Errorf("tmux not found: %w", err) + } + + attachCmd := exec.Command(tmuxPath, "attach-session", "-t", sessionName) + attachCmd.Stdin = os.Stdin + attachCmd.Stdout = os.Stdout + attachCmd.Stderr = os.Stderr + return attachCmd.Run() +} diff --git a/internal/config/loader.go b/internal/config/loader.go index 4c123da2..885d279f 100644 --- a/internal/config/loader.go +++ b/internal/config/loader.go @@ -6,6 +6,7 @@ import ( "fmt" "os" "path/filepath" + "time" ) var ( @@ -186,3 +187,104 @@ func validateAgentState(s *AgentState) error { } return nil } + +// LoadRigConfig loads and validates a rig configuration file. +func LoadRigConfig(path string) (*RigConfig, error) { + data, err := os.ReadFile(path) + if err != nil { + if os.IsNotExist(err) { + return nil, fmt.Errorf("%w: %s", ErrNotFound, path) + } + return nil, fmt.Errorf("reading config: %w", err) + } + + var config RigConfig + if err := json.Unmarshal(data, &config); err != nil { + return nil, fmt.Errorf("parsing config: %w", err) + } + + if err := validateRigConfig(&config); err != nil { + return nil, err + } + + return &config, nil +} + +// SaveRigConfig saves a rig configuration to a file. +func SaveRigConfig(path string, config *RigConfig) error { + if err := validateRigConfig(config); err != nil { + return err + } + + if err := os.MkdirAll(filepath.Dir(path), 0755); err != nil { + return fmt.Errorf("creating directory: %w", err) + } + + data, err := json.MarshalIndent(config, "", " ") + if err != nil { + return fmt.Errorf("encoding config: %w", err) + } + + if err := os.WriteFile(path, data, 0644); err != nil { + return fmt.Errorf("writing config: %w", err) + } + + return nil +} + +// validateRigConfig validates a RigConfig. +func validateRigConfig(c *RigConfig) error { + if c.Type != "rig" && c.Type != "" { + return fmt.Errorf("%w: expected type 'rig', got '%s'", ErrInvalidType, c.Type) + } + if c.Version > CurrentRigConfigVersion { + return fmt.Errorf("%w: got %d, max supported %d", ErrInvalidVersion, c.Version, CurrentRigConfigVersion) + } + + // Validate merge queue config if present + if c.MergeQueue != nil { + if err := validateMergeQueueConfig(c.MergeQueue); err != nil { + return err + } + } + + return nil +} + +// ErrInvalidOnConflict indicates an invalid on_conflict strategy. +var ErrInvalidOnConflict = errors.New("invalid on_conflict strategy") + +// validateMergeQueueConfig validates a MergeQueueConfig. +func validateMergeQueueConfig(c *MergeQueueConfig) error { + // Validate on_conflict strategy + if c.OnConflict != "" && c.OnConflict != OnConflictAssignBack && c.OnConflict != OnConflictAutoRebase { + return fmt.Errorf("%w: got '%s', want '%s' or '%s'", + ErrInvalidOnConflict, c.OnConflict, OnConflictAssignBack, OnConflictAutoRebase) + } + + // Validate poll_interval if specified + if c.PollInterval != "" { + if _, err := time.ParseDuration(c.PollInterval); err != nil { + return fmt.Errorf("invalid poll_interval: %w", err) + } + } + + // Validate non-negative values + if c.RetryFlakyTests < 0 { + return fmt.Errorf("%w: retry_flaky_tests must be non-negative", ErrMissingField) + } + if c.MaxConcurrent < 0 { + return fmt.Errorf("%w: max_concurrent must be non-negative", ErrMissingField) + } + + return nil +} + +// NewRigConfig creates a new RigConfig with defaults. +func NewRigConfig() *RigConfig { + return &RigConfig{ + Type: "rig", + Version: CurrentRigConfigVersion, + MergeQueue: DefaultMergeQueueConfig(), + } +} diff --git a/internal/config/loader_test.go b/internal/config/loader_test.go index d00fbe94..f318e274 100644 --- a/internal/config/loader_test.go +++ b/internal/config/loader_test.go @@ -130,3 +130,196 @@ func TestValidationErrors(t *testing.T) { t.Error("expected error for missing role") } } + +func TestRigConfigRoundTrip(t *testing.T) { + dir := t.TempDir() + path := filepath.Join(dir, "config.json") + + original := NewRigConfig() + + if err := SaveRigConfig(path, original); err != nil { + t.Fatalf("SaveRigConfig: %v", err) + } + + loaded, err := LoadRigConfig(path) + if err != nil { + t.Fatalf("LoadRigConfig: %v", err) + } + + if loaded.Type != "rig" { + t.Errorf("Type = %q, want 'rig'", loaded.Type) + } + if loaded.Version != CurrentRigConfigVersion { + t.Errorf("Version = %d, want %d", loaded.Version, CurrentRigConfigVersion) + } + if loaded.MergeQueue == nil { + t.Fatal("MergeQueue is nil") + } + if !loaded.MergeQueue.Enabled { + t.Error("MergeQueue.Enabled = false, want true") + } + if loaded.MergeQueue.TargetBranch != "main" { + t.Errorf("MergeQueue.TargetBranch = %q, want 'main'", loaded.MergeQueue.TargetBranch) + } + if loaded.MergeQueue.OnConflict != OnConflictAssignBack { + t.Errorf("MergeQueue.OnConflict = %q, want %q", loaded.MergeQueue.OnConflict, OnConflictAssignBack) + } +} + +func TestRigConfigWithCustomMergeQueue(t *testing.T) { + dir := t.TempDir() + path := filepath.Join(dir, "config.json") + + original := &RigConfig{ + Type: "rig", + Version: 1, + MergeQueue: &MergeQueueConfig{ + Enabled: true, + TargetBranch: "develop", + IntegrationBranches: false, + OnConflict: OnConflictAutoRebase, + RunTests: true, + TestCommand: "make test", + DeleteMergedBranches: false, + RetryFlakyTests: 3, + PollInterval: "1m", + MaxConcurrent: 2, + }, + } + + if err := SaveRigConfig(path, original); err != nil { + t.Fatalf("SaveRigConfig: %v", err) + } + + loaded, err := LoadRigConfig(path) + if err != nil { + t.Fatalf("LoadRigConfig: %v", err) + } + + mq := loaded.MergeQueue + if mq.TargetBranch != "develop" { + t.Errorf("TargetBranch = %q, want 'develop'", mq.TargetBranch) + } + if mq.OnConflict != OnConflictAutoRebase { + t.Errorf("OnConflict = %q, want %q", mq.OnConflict, OnConflictAutoRebase) + } + if mq.TestCommand != "make test" { + t.Errorf("TestCommand = %q, want 'make test'", mq.TestCommand) + } + if mq.RetryFlakyTests != 3 { + t.Errorf("RetryFlakyTests = %d, want 3", mq.RetryFlakyTests) + } + if mq.PollInterval != "1m" { + t.Errorf("PollInterval = %q, want '1m'", mq.PollInterval) + } + if mq.MaxConcurrent != 2 { + t.Errorf("MaxConcurrent = %d, want 2", mq.MaxConcurrent) + } +} + +func TestRigConfigValidation(t *testing.T) { + tests := []struct { + name string + config *RigConfig + wantErr bool + }{ + { + name: "valid config", + config: &RigConfig{ + Type: "rig", + Version: 1, + MergeQueue: DefaultMergeQueueConfig(), + }, + wantErr: false, + }, + { + name: "valid config without merge queue", + config: &RigConfig{ + Type: "rig", + Version: 1, + }, + wantErr: false, + }, + { + name: "wrong type", + config: &RigConfig{ + Type: "wrong", + Version: 1, + }, + wantErr: true, + }, + { + name: "invalid on_conflict", + config: &RigConfig{ + Type: "rig", + Version: 1, + MergeQueue: &MergeQueueConfig{ + OnConflict: "invalid", + }, + }, + wantErr: true, + }, + { + name: "invalid poll_interval", + config: &RigConfig{ + Type: "rig", + Version: 1, + MergeQueue: &MergeQueueConfig{ + PollInterval: "not-a-duration", + }, + }, + wantErr: true, + }, + } + + for _, tt := range tests { + t.Run(tt.name, func(t *testing.T) { + err := validateRigConfig(tt.config) + if (err != nil) != tt.wantErr { + t.Errorf("validateRigConfig() error = %v, wantErr %v", err, tt.wantErr) + } + }) + } +} + +func TestDefaultMergeQueueConfig(t *testing.T) { + cfg := DefaultMergeQueueConfig() + + if !cfg.Enabled { + t.Error("Enabled should be true by default") + } + if cfg.TargetBranch != "main" { + t.Errorf("TargetBranch = %q, want 'main'", cfg.TargetBranch) + } + if !cfg.IntegrationBranches { + t.Error("IntegrationBranches should be true by default") + } + if cfg.OnConflict != OnConflictAssignBack { + t.Errorf("OnConflict = %q, want %q", cfg.OnConflict, OnConflictAssignBack) + } + if !cfg.RunTests { + t.Error("RunTests should be true by default") + } + if cfg.TestCommand != "go test ./..." { + t.Errorf("TestCommand = %q, want 'go test ./...'", cfg.TestCommand) + } + if !cfg.DeleteMergedBranches { + t.Error("DeleteMergedBranches should be true by default") + } + if cfg.RetryFlakyTests != 1 { + t.Errorf("RetryFlakyTests = %d, want 1", cfg.RetryFlakyTests) + } + if cfg.PollInterval != "30s" { + t.Errorf("PollInterval = %q, want '30s'", cfg.PollInterval) + } + if cfg.MaxConcurrent != 1 { + t.Errorf("MaxConcurrent = %d, want 1", cfg.MaxConcurrent) + } +} + +func TestLoadRigConfigNotFound(t *testing.T) { + _, err := LoadRigConfig("/nonexistent/path.json") + if err == nil { + t.Fatal("expected error for nonexistent file") + } +} diff --git a/internal/config/types.go b/internal/config/types.go index 0c6a6933..e86a622c 100644 --- a/internal/config/types.go +++ b/internal/config/types.go @@ -43,3 +43,85 @@ const CurrentTownVersion = 1 // CurrentRigsVersion is the current schema version for RigsConfig. const CurrentRigsVersion = 1 + +// CurrentRigConfigVersion is the current schema version for RigConfig. +const CurrentRigConfigVersion = 1 + +// RigConfig represents the per-rig configuration (rig/config.json). +type RigConfig struct { + Type string `json:"type"` // "rig" + Version int `json:"version"` // schema version + MergeQueue *MergeQueueConfig `json:"merge_queue,omitempty"` // merge queue settings + Theme *ThemeConfig `json:"theme,omitempty"` // tmux theme settings +} + +// ThemeConfig represents tmux theme settings for a rig. +type ThemeConfig struct { + // Name picks from the default palette (e.g., "ocean", "forest"). + // If empty, a theme is auto-assigned based on rig name. + Name string `json:"name,omitempty"` + + // Custom overrides the palette with specific colors. + Custom *CustomTheme `json:"custom,omitempty"` +} + +// CustomTheme allows specifying exact colors for the status bar. +type CustomTheme struct { + BG string `json:"bg"` // Background color (hex or tmux color name) + FG string `json:"fg"` // Foreground color (hex or tmux color name) +} + +// MergeQueueConfig represents merge queue settings for a rig. +type MergeQueueConfig struct { + // Enabled controls whether the merge queue is active. + Enabled bool `json:"enabled"` + + // TargetBranch is the default branch to merge into (usually "main"). + TargetBranch string `json:"target_branch"` + + // IntegrationBranches enables integration branch workflow for epics. + IntegrationBranches bool `json:"integration_branches"` + + // OnConflict specifies conflict resolution strategy: "assign_back" or "auto_rebase". + OnConflict string `json:"on_conflict"` + + // RunTests controls whether to run tests before merging. + RunTests bool `json:"run_tests"` + + // TestCommand is the command to run for tests. + TestCommand string `json:"test_command,omitempty"` + + // DeleteMergedBranches controls whether to delete branches after merging. + DeleteMergedBranches bool `json:"delete_merged_branches"` + + // RetryFlakyTests is the number of times to retry flaky tests. + RetryFlakyTests int `json:"retry_flaky_tests"` + + // PollInterval is how often to poll for new merge requests (e.g., "30s"). + PollInterval string `json:"poll_interval"` + + // MaxConcurrent is the maximum number of concurrent merges. + MaxConcurrent int `json:"max_concurrent"` +} + +// OnConflict strategy constants. +const ( + OnConflictAssignBack = "assign_back" + OnConflictAutoRebase = "auto_rebase" +) + +// DefaultMergeQueueConfig returns a MergeQueueConfig with sensible defaults. +func DefaultMergeQueueConfig() *MergeQueueConfig { + return &MergeQueueConfig{ + Enabled: true, + TargetBranch: "main", + IntegrationBranches: true, + OnConflict: OnConflictAssignBack, + RunTests: true, + TestCommand: "go test ./...", + DeleteMergedBranches: true, + RetryFlakyTests: 1, + PollInterval: "30s", + MaxConcurrent: 1, + } +} diff --git a/internal/crew/manager.go b/internal/crew/manager.go index e9927e65..f71c90b6 100644 --- a/internal/crew/manager.go +++ b/internal/crew/manager.go @@ -5,6 +5,7 @@ import ( "errors" "fmt" "os" + "os/exec" "path/filepath" "time" @@ -80,11 +81,11 @@ func (m *Manager) Add(name string, createBranch bool) (*CrewWorker, error) { if createBranch { branchName = fmt.Sprintf("crew/%s", name) if err := crewGit.CreateBranch(branchName); err != nil { - os.RemoveAll(crewPath) + _ = os.RemoveAll(crewPath) return nil, fmt.Errorf("creating branch: %w", err) } if err := crewGit.Checkout(branchName); err != nil { - os.RemoveAll(crewPath) + _ = os.RemoveAll(crewPath) return nil, fmt.Errorf("checking out branch: %w", err) } } @@ -92,13 +93,13 @@ func (m *Manager) Add(name string, createBranch bool) (*CrewWorker, error) { // Create mail directory for mail delivery mailPath := m.mailDir(name) if err := os.MkdirAll(mailPath, 0755); err != nil { - os.RemoveAll(crewPath) + _ = os.RemoveAll(crewPath) return nil, fmt.Errorf("creating mail dir: %w", err) } // Create CLAUDE.md with crew worker prompting if err := m.createClaudeMD(name, crewPath); err != nil { - os.RemoveAll(crewPath) + _ = os.RemoveAll(crewPath) return nil, fmt.Errorf("creating CLAUDE.md: %w", err) } @@ -115,7 +116,7 @@ func (m *Manager) Add(name string, createBranch bool) (*CrewWorker, error) { // Save state if err := m.saveState(crew); err != nil { - os.RemoveAll(crewPath) + _ = os.RemoveAll(crewPath) return nil, fmt.Errorf("saving state: %w", err) } @@ -274,3 +275,96 @@ func (m *Manager) loadState(name string) (*CrewWorker, error) { return &crew, nil } + +// Rename renames a crew worker from oldName to newName. +func (m *Manager) Rename(oldName, newName string) error { + if !m.exists(oldName) { + return ErrCrewNotFound + } + if m.exists(newName) { + return ErrCrewExists + } + + oldPath := m.crewDir(oldName) + newPath := m.crewDir(newName) + + // Rename directory + if err := os.Rename(oldPath, newPath); err != nil { + return fmt.Errorf("renaming crew dir: %w", err) + } + + // Update state file with new name and path + crew, err := m.loadState(newName) + if err != nil { + // Rollback on error + _ = os.Rename(newPath, oldPath) + return fmt.Errorf("loading state: %w", err) + } + + crew.Name = newName + crew.ClonePath = newPath + crew.UpdatedAt = time.Now() + + if err := m.saveState(crew); err != nil { + // Rollback on error + _ = os.Rename(newPath, oldPath) + return fmt.Errorf("saving state: %w", err) + } + + return nil +} + +// Pristine ensures a crew worker is up-to-date with remote. +// It runs git pull --rebase and bd sync. +func (m *Manager) Pristine(name string) (*PristineResult, error) { + if !m.exists(name) { + return nil, ErrCrewNotFound + } + + crewPath := m.crewDir(name) + crewGit := git.NewGit(crewPath) + + result := &PristineResult{ + Name: name, + } + + // Check for uncommitted changes + hasChanges, err := crewGit.HasUncommittedChanges() + if err != nil { + return nil, fmt.Errorf("checking changes: %w", err) + } + result.HadChanges = hasChanges + + // Pull latest (use origin and current branch) + if err := crewGit.Pull("origin", ""); err != nil { + result.PullError = err.Error() + } else { + result.Pulled = true + } + + // Run bd sync + if err := m.runBdSync(crewPath); err != nil { + result.SyncError = err.Error() + } else { + result.Synced = true + } + + return result, nil +} + +// runBdSync runs bd sync in the given directory. +func (m *Manager) runBdSync(dir string) error { + cmd := exec.Command("bd", "sync") + cmd.Dir = dir + return cmd.Run() +} + +// PristineResult captures the results of a pristine operation. +type PristineResult struct { + Name string `json:"name"` + HadChanges bool `json:"had_changes"` + Pulled bool `json:"pulled"` + PullError string `json:"pull_error,omitempty"` + Synced bool `json:"synced"` + SyncError string `json:"sync_error,omitempty"` +} diff --git a/internal/crew/manager_test.go b/internal/crew/manager_test.go index f5bea974..59ce81e2 100644 --- a/internal/crew/manager_test.go +++ b/internal/crew/manager_test.go @@ -16,7 +16,7 @@ func TestManagerAddAndGet(t *testing.T) { if err != nil { t.Fatalf("failed to create temp dir: %v", err) } - defer os.RemoveAll(tmpDir) + defer func() { _ = os.RemoveAll(tmpDir) }() // Create a mock rig rigPath := filepath.Join(tmpDir, "test-rig") @@ -107,7 +107,7 @@ func TestManagerAddWithBranch(t *testing.T) { if err != nil { t.Fatalf("failed to create temp dir: %v", err) } - defer os.RemoveAll(tmpDir) + defer func() { _ = os.RemoveAll(tmpDir) }() // Create a mock rig rigPath := filepath.Join(tmpDir, "test-rig") @@ -176,7 +176,7 @@ func TestManagerList(t *testing.T) { if err != nil { t.Fatalf("failed to create temp dir: %v", err) } - defer os.RemoveAll(tmpDir) + defer func() { _ = os.RemoveAll(tmpDir) }() // Create a mock rig rigPath := filepath.Join(tmpDir, "test-rig") @@ -234,7 +234,7 @@ func TestManagerRemove(t *testing.T) { if err != nil { t.Fatalf("failed to create temp dir: %v", err) } - defer os.RemoveAll(tmpDir) + defer func() { _ = os.RemoveAll(tmpDir) }() // Create a mock rig rigPath := filepath.Join(tmpDir, "test-rig") diff --git a/internal/daemon/backoff.go b/internal/daemon/backoff.go new file mode 100644 index 00000000..abc43ac6 --- /dev/null +++ b/internal/daemon/backoff.go @@ -0,0 +1,187 @@ +package daemon + +import ( + "time" +) + +// BackoffStrategy defines how intervals grow. +type BackoffStrategy string + +const ( + // StrategyFixed keeps the same interval (no backoff). + StrategyFixed BackoffStrategy = "fixed" + + // StrategyGeometric multiplies by a factor each miss (1.5x). + StrategyGeometric BackoffStrategy = "geometric" + + // StrategyExponential doubles interval each miss (2x). + StrategyExponential BackoffStrategy = "exponential" +) + +// BackoffConfig holds backoff configuration. +type BackoffConfig struct { + // Strategy determines how intervals grow. + Strategy BackoffStrategy + + // BaseInterval is the starting interval (default 60s). + BaseInterval time.Duration + + // MaxInterval is the cap on how large intervals can grow (default 10m). + MaxInterval time.Duration + + // Factor is the multiplier for geometric backoff (default 1.5). + Factor float64 +} + +// DefaultBackoffConfig returns sensible defaults. +func DefaultBackoffConfig() *BackoffConfig { + return &BackoffConfig{ + Strategy: StrategyGeometric, + BaseInterval: 60 * time.Second, + MaxInterval: 10 * time.Minute, + Factor: 1.5, + } +} + +// AgentBackoff tracks backoff state for a single agent. +type AgentBackoff struct { + // AgentID identifies the agent (e.g., "mayor", "gastown-witness"). + AgentID string + + // BaseInterval is the starting interval. + BaseInterval time.Duration + + // CurrentInterval is the current (possibly backed-off) interval. + CurrentInterval time.Duration + + // MaxInterval caps how large intervals can grow. + MaxInterval time.Duration + + // ConsecutiveMiss counts pokes with no response. + ConsecutiveMiss int + + // LastPoke is when we last poked this agent. + LastPoke time.Time + + // LastActivity is when the agent last showed activity. + LastActivity time.Time +} + +// NewAgentBackoff creates backoff state for an agent. +func NewAgentBackoff(agentID string, config *BackoffConfig) *AgentBackoff { + if config == nil { + config = DefaultBackoffConfig() + } + return &AgentBackoff{ + AgentID: agentID, + BaseInterval: config.BaseInterval, + CurrentInterval: config.BaseInterval, + MaxInterval: config.MaxInterval, + } +} + +// ShouldPoke returns true if enough time has passed since the last poke. +func (ab *AgentBackoff) ShouldPoke() bool { + if ab.LastPoke.IsZero() { + return true // Never poked + } + return time.Since(ab.LastPoke) >= ab.CurrentInterval +} + +// RecordPoke records that we poked the agent. +func (ab *AgentBackoff) RecordPoke() { + ab.LastPoke = time.Now() +} + +// RecordMiss records that the agent didn't respond since last poke. +// This increases the backoff interval. +func (ab *AgentBackoff) RecordMiss(config *BackoffConfig) { + ab.ConsecutiveMiss++ + + if config == nil { + config = DefaultBackoffConfig() + } + + switch config.Strategy { + case StrategyFixed: + // No change + case StrategyGeometric: + ab.CurrentInterval = time.Duration(float64(ab.CurrentInterval) * config.Factor) + case StrategyExponential: + ab.CurrentInterval = ab.CurrentInterval * 2 + } + + // Cap at max interval + if ab.CurrentInterval > ab.MaxInterval { + ab.CurrentInterval = ab.MaxInterval + } +} + +// RecordActivity records that the agent showed activity. +// This resets the backoff to the base interval. +func (ab *AgentBackoff) RecordActivity() { + ab.ConsecutiveMiss = 0 + ab.CurrentInterval = ab.BaseInterval + ab.LastActivity = time.Now() +} + +// BackoffManager tracks backoff state for all agents. +type BackoffManager struct { + config *BackoffConfig + agents map[string]*AgentBackoff +} + +// NewBackoffManager creates a new backoff manager. +func NewBackoffManager(config *BackoffConfig) *BackoffManager { + if config == nil { + config = DefaultBackoffConfig() + } + return &BackoffManager{ + config: config, + agents: make(map[string]*AgentBackoff), + } +} + +// GetOrCreate returns backoff state for an agent, creating if needed. +func (bm *BackoffManager) GetOrCreate(agentID string) *AgentBackoff { + if ab, ok := bm.agents[agentID]; ok { + return ab + } + ab := NewAgentBackoff(agentID, bm.config) + bm.agents[agentID] = ab + return ab +} + +// ShouldPoke returns true if we should poke the given agent. +func (bm *BackoffManager) ShouldPoke(agentID string) bool { + return bm.GetOrCreate(agentID).ShouldPoke() +} + +// RecordPoke records that we poked an agent. +func (bm *BackoffManager) RecordPoke(agentID string) { + bm.GetOrCreate(agentID).RecordPoke() +} + +// RecordMiss records that an agent didn't respond. +func (bm *BackoffManager) RecordMiss(agentID string) { + bm.GetOrCreate(agentID).RecordMiss(bm.config) +} + +// RecordActivity records that an agent showed activity. +func (bm *BackoffManager) RecordActivity(agentID string) { + bm.GetOrCreate(agentID).RecordActivity() +} + +// GetInterval returns the current interval for an agent. +func (bm *BackoffManager) GetInterval(agentID string) time.Duration { + return bm.GetOrCreate(agentID).CurrentInterval +} + +// Stats returns a map of agent ID to current interval for logging. +func (bm *BackoffManager) Stats() map[string]time.Duration { + stats := make(map[string]time.Duration, len(bm.agents)) + for id, ab := range bm.agents { + stats[id] = ab.CurrentInterval + } + return stats +} diff --git a/internal/daemon/backoff_test.go b/internal/daemon/backoff_test.go new file mode 100644 index 00000000..3af1f82d --- /dev/null +++ b/internal/daemon/backoff_test.go @@ -0,0 +1,290 @@ +package daemon + +import ( + "testing" + "time" +) + +func TestDefaultBackoffConfig(t *testing.T) { + config := DefaultBackoffConfig() + + if config.Strategy != StrategyGeometric { + t.Errorf("expected strategy Geometric, got %v", config.Strategy) + } + if config.BaseInterval != 60*time.Second { + t.Errorf("expected base interval 60s, got %v", config.BaseInterval) + } + if config.MaxInterval != 10*time.Minute { + t.Errorf("expected max interval 10m, got %v", config.MaxInterval) + } + if config.Factor != 1.5 { + t.Errorf("expected factor 1.5, got %v", config.Factor) + } +} + +func TestNewAgentBackoff(t *testing.T) { + config := DefaultBackoffConfig() + ab := NewAgentBackoff("test-agent", config) + + if ab.AgentID != "test-agent" { + t.Errorf("expected agent ID 'test-agent', got %s", ab.AgentID) + } + if ab.BaseInterval != 60*time.Second { + t.Errorf("expected base interval 60s, got %v", ab.BaseInterval) + } + if ab.CurrentInterval != 60*time.Second { + t.Errorf("expected current interval 60s, got %v", ab.CurrentInterval) + } + if ab.ConsecutiveMiss != 0 { + t.Errorf("expected consecutive miss 0, got %d", ab.ConsecutiveMiss) + } +} + +func TestAgentBackoff_ShouldPoke(t *testing.T) { + config := &BackoffConfig{ + Strategy: StrategyGeometric, + BaseInterval: 100 * time.Millisecond, // Short for testing + MaxInterval: 1 * time.Second, + Factor: 1.5, + } + ab := NewAgentBackoff("test", config) + + // Should poke immediately (never poked) + if !ab.ShouldPoke() { + t.Error("expected ShouldPoke=true for new agent") + } + + // Record a poke + ab.RecordPoke() + + // Should not poke immediately after + if ab.ShouldPoke() { + t.Error("expected ShouldPoke=false immediately after poke") + } + + // Wait for interval + time.Sleep(110 * time.Millisecond) + + // Now should poke again + if !ab.ShouldPoke() { + t.Error("expected ShouldPoke=true after interval elapsed") + } +} + +func TestAgentBackoff_GeometricBackoff(t *testing.T) { + config := &BackoffConfig{ + Strategy: StrategyGeometric, + BaseInterval: 100 * time.Millisecond, + MaxInterval: 1 * time.Second, + Factor: 1.5, + } + ab := NewAgentBackoff("test", config) + + // Initial interval + if ab.CurrentInterval != 100*time.Millisecond { + t.Errorf("expected initial interval 100ms, got %v", ab.CurrentInterval) + } + + // First miss: 100ms * 1.5 = 150ms + ab.RecordMiss(config) + if ab.CurrentInterval != 150*time.Millisecond { + t.Errorf("expected interval 150ms after 1 miss, got %v", ab.CurrentInterval) + } + if ab.ConsecutiveMiss != 1 { + t.Errorf("expected consecutive miss 1, got %d", ab.ConsecutiveMiss) + } + + // Second miss: 150ms * 1.5 = 225ms + ab.RecordMiss(config) + if ab.CurrentInterval != 225*time.Millisecond { + t.Errorf("expected interval 225ms after 2 misses, got %v", ab.CurrentInterval) + } + + // Third miss: 225ms * 1.5 = 337.5ms + ab.RecordMiss(config) + expected := time.Duration(337500000) // 337.5ms in nanoseconds + if ab.CurrentInterval != expected { + t.Errorf("expected interval ~337.5ms after 3 misses, got %v", ab.CurrentInterval) + } +} + +func TestAgentBackoff_ExponentialBackoff(t *testing.T) { + config := &BackoffConfig{ + Strategy: StrategyExponential, + BaseInterval: 100 * time.Millisecond, + MaxInterval: 1 * time.Second, + Factor: 2.0, // Ignored for exponential + } + ab := NewAgentBackoff("test", config) + + // First miss: 100ms * 2 = 200ms + ab.RecordMiss(config) + if ab.CurrentInterval != 200*time.Millisecond { + t.Errorf("expected interval 200ms after 1 miss, got %v", ab.CurrentInterval) + } + + // Second miss: 200ms * 2 = 400ms + ab.RecordMiss(config) + if ab.CurrentInterval != 400*time.Millisecond { + t.Errorf("expected interval 400ms after 2 misses, got %v", ab.CurrentInterval) + } + + // Third miss: 400ms * 2 = 800ms + ab.RecordMiss(config) + if ab.CurrentInterval != 800*time.Millisecond { + t.Errorf("expected interval 800ms after 3 misses, got %v", ab.CurrentInterval) + } +} + +func TestAgentBackoff_FixedStrategy(t *testing.T) { + config := &BackoffConfig{ + Strategy: StrategyFixed, + BaseInterval: 100 * time.Millisecond, + MaxInterval: 1 * time.Second, + Factor: 1.5, + } + ab := NewAgentBackoff("test", config) + + // Multiple misses should not change interval + ab.RecordMiss(config) + ab.RecordMiss(config) + ab.RecordMiss(config) + + if ab.CurrentInterval != 100*time.Millisecond { + t.Errorf("expected interval to stay at 100ms with fixed strategy, got %v", ab.CurrentInterval) + } + if ab.ConsecutiveMiss != 3 { + t.Errorf("expected consecutive miss 3, got %d", ab.ConsecutiveMiss) + } +} + +func TestAgentBackoff_MaxInterval(t *testing.T) { + config := &BackoffConfig{ + Strategy: StrategyExponential, + BaseInterval: 100 * time.Millisecond, + MaxInterval: 500 * time.Millisecond, + Factor: 2.0, + } + ab := NewAgentBackoff("test", config) + + // Keep missing until we hit the cap + for i := 0; i < 10; i++ { + ab.RecordMiss(config) + } + + if ab.CurrentInterval != 500*time.Millisecond { + t.Errorf("expected interval capped at 500ms, got %v", ab.CurrentInterval) + } +} + +func TestAgentBackoff_RecordActivity(t *testing.T) { + config := &BackoffConfig{ + Strategy: StrategyGeometric, + BaseInterval: 100 * time.Millisecond, + MaxInterval: 1 * time.Second, + Factor: 1.5, + } + ab := NewAgentBackoff("test", config) + + // Build up some backoff + ab.RecordMiss(config) + ab.RecordMiss(config) + ab.RecordMiss(config) + + if ab.CurrentInterval == 100*time.Millisecond { + t.Error("expected interval to have increased") + } + if ab.ConsecutiveMiss != 3 { + t.Errorf("expected consecutive miss 3, got %d", ab.ConsecutiveMiss) + } + + // Record activity - should reset + ab.RecordActivity() + + if ab.CurrentInterval != 100*time.Millisecond { + t.Errorf("expected interval reset to 100ms, got %v", ab.CurrentInterval) + } + if ab.ConsecutiveMiss != 0 { + t.Errorf("expected consecutive miss reset to 0, got %d", ab.ConsecutiveMiss) + } + if ab.LastActivity.IsZero() { + t.Error("expected LastActivity to be set") + } +} + +func TestBackoffManager_GetOrCreate(t *testing.T) { + bm := NewBackoffManager(DefaultBackoffConfig()) + + // First call creates + ab1 := bm.GetOrCreate("agent1") + if ab1 == nil { + t.Fatal("expected agent backoff to be created") + } + if ab1.AgentID != "agent1" { + t.Errorf("expected agent ID 'agent1', got %s", ab1.AgentID) + } + + // Second call returns same instance + ab2 := bm.GetOrCreate("agent1") + if ab1 != ab2 { + t.Error("expected same instance on second call") + } + + // Different agent creates new instance + ab3 := bm.GetOrCreate("agent2") + if ab1 == ab3 { + t.Error("expected different instance for different agent") + } +} + +func TestBackoffManager_Stats(t *testing.T) { + config := &BackoffConfig{ + Strategy: StrategyGeometric, + BaseInterval: 100 * time.Millisecond, + MaxInterval: 1 * time.Second, + Factor: 1.5, + } + bm := NewBackoffManager(config) + + // Create some agents with different backoff states + bm.RecordPoke("agent1") + bm.RecordMiss("agent1") + + bm.RecordPoke("agent2") + bm.RecordMiss("agent2") + bm.RecordMiss("agent2") + + stats := bm.Stats() + + if len(stats) != 2 { + t.Errorf("expected 2 agents in stats, got %d", len(stats)) + } + + // agent1: 100ms * 1.5 = 150ms + if stats["agent1"] != 150*time.Millisecond { + t.Errorf("expected agent1 interval 150ms, got %v", stats["agent1"]) + } + + // agent2: 100ms * 1.5 * 1.5 = 225ms + if stats["agent2"] != 225*time.Millisecond { + t.Errorf("expected agent2 interval 225ms, got %v", stats["agent2"]) + } +} + +func TestExtractRigName(t *testing.T) { + tests := []struct { + session string + expected string + }{ + {"gt-gastown-witness", "gastown"}, + {"gt-myrig-witness", "myrig"}, + {"gt-my-rig-name-witness", "my-rig-name"}, + } + + for _, tc := range tests { + result := extractRigName(tc.session) + if result != tc.expected { + t.Errorf("extractRigName(%q) = %q, expected %q", tc.session, result, tc.expected) + } + } +} diff --git a/internal/daemon/daemon.go b/internal/daemon/daemon.go new file mode 100644 index 00000000..28d2095e --- /dev/null +++ b/internal/daemon/daemon.go @@ -0,0 +1,356 @@ +package daemon + +import ( + "context" + "fmt" + "log" + "os" + "os/signal" + "path/filepath" + "strconv" + "strings" + "syscall" + "time" + + "github.com/steveyegge/gastown/internal/keepalive" + "github.com/steveyegge/gastown/internal/tmux" +) + +// Daemon is the town-level background service. +type Daemon struct { + config *Config + tmux *tmux.Tmux + logger *log.Logger + ctx context.Context + cancel context.CancelFunc + backoff *BackoffManager +} + +// New creates a new daemon instance. +func New(config *Config) (*Daemon, error) { + // Ensure daemon directory exists + daemonDir := filepath.Dir(config.LogFile) + if err := os.MkdirAll(daemonDir, 0755); err != nil { + return nil, fmt.Errorf("creating daemon directory: %w", err) + } + + // Open log file + logFile, err := os.OpenFile(config.LogFile, os.O_CREATE|os.O_APPEND|os.O_WRONLY, 0644) + if err != nil { + return nil, fmt.Errorf("opening log file: %w", err) + } + + logger := log.New(logFile, "", log.LstdFlags) + ctx, cancel := context.WithCancel(context.Background()) + + return &Daemon{ + config: config, + tmux: tmux.NewTmux(), + logger: logger, + ctx: ctx, + cancel: cancel, + backoff: NewBackoffManager(DefaultBackoffConfig()), + }, nil +} + +// Run starts the daemon main loop. +func (d *Daemon) Run() error { + d.logger.Printf("Daemon starting (PID %d)", os.Getpid()) + + // Write PID file + if err := os.WriteFile(d.config.PidFile, []byte(strconv.Itoa(os.Getpid())), 0644); err != nil { + return fmt.Errorf("writing PID file: %w", err) + } + defer func() { _ = os.Remove(d.config.PidFile) }() + + // Update state + state := &State{ + Running: true, + PID: os.Getpid(), + StartedAt: time.Now(), + } + if err := SaveState(d.config.TownRoot, state); err != nil { + d.logger.Printf("Warning: failed to save state: %v", err) + } + + // Handle signals + sigChan := make(chan os.Signal, 1) + signal.Notify(sigChan, syscall.SIGINT, syscall.SIGTERM) + + // Heartbeat ticker + ticker := time.NewTicker(d.config.HeartbeatInterval) + defer ticker.Stop() + + d.logger.Printf("Daemon running, heartbeat every %v", d.config.HeartbeatInterval) + + // Initial heartbeat + d.heartbeat(state) + + for { + select { + case <-d.ctx.Done(): + d.logger.Println("Daemon context cancelled, shutting down") + return d.shutdown(state) + + case sig := <-sigChan: + d.logger.Printf("Received signal %v, shutting down", sig) + return d.shutdown(state) + + case <-ticker.C: + d.heartbeat(state) + } + } +} + +// heartbeat performs one heartbeat cycle. +func (d *Daemon) heartbeat(state *State) { + d.logger.Println("Heartbeat starting") + + // 1. Poke Mayor + d.pokeMayor() + + // 2. Poke Witnesses (for each rig) + d.pokeWitnesses() + + // 3. Process lifecycle requests + d.processLifecycleRequests() + + // Update state + state.LastHeartbeat = time.Now() + state.HeartbeatCount++ + if err := SaveState(d.config.TownRoot, state); err != nil { + d.logger.Printf("Warning: failed to save state: %v", err) + } + + d.logger.Printf("Heartbeat complete (#%d)", state.HeartbeatCount) +} + +// pokeMayor sends a heartbeat to the Mayor session. +func (d *Daemon) pokeMayor() { + const mayorSession = "gt-mayor" + const agentID = "mayor" + + running, err := d.tmux.HasSession(mayorSession) + if err != nil { + d.logger.Printf("Error checking Mayor session: %v", err) + return + } + + if !running { + d.logger.Println("Mayor session not running, skipping poke") + return + } + + // Check keepalive to see if agent is active + state := keepalive.Read(d.config.TownRoot) + if state != nil && state.IsFresh() { + // Agent is actively working, reset backoff + d.backoff.RecordActivity(agentID) + d.logger.Printf("Mayor is fresh (cmd: %s), skipping poke", state.LastCommand) + return + } + + // Check if we should poke based on backoff interval + if !d.backoff.ShouldPoke(agentID) { + interval := d.backoff.GetInterval(agentID) + d.logger.Printf("Mayor backoff in effect (interval: %v), skipping poke", interval) + return + } + + // Send heartbeat message via tmux + msg := "HEARTBEAT: check your rigs" + if err := d.tmux.SendKeys(mayorSession, msg); err != nil { + d.logger.Printf("Error poking Mayor: %v", err) + return + } + + d.backoff.RecordPoke(agentID) + + // If agent is stale or very stale, record a miss (increase backoff) + if state == nil || state.IsVeryStale() { + d.backoff.RecordMiss(agentID) + interval := d.backoff.GetInterval(agentID) + d.logger.Printf("Poked Mayor (very stale, backoff now: %v)", interval) + } else if state.IsStale() { + // Stale but not very stale - don't increase backoff, but don't reset either + d.logger.Println("Poked Mayor (stale)") + } else { + d.logger.Println("Poked Mayor") + } +} + +// pokeWitnesses sends heartbeats to all Witness sessions. +func (d *Daemon) pokeWitnesses() { + // Find all rigs by looking for witness sessions + // Session naming: gt--witness + sessions, err := d.tmux.ListSessions() + if err != nil { + d.logger.Printf("Error listing sessions: %v", err) + return + } + + for _, session := range sessions { + // Check if it's a witness session + if !isWitnessSession(session) { + continue + } + + d.pokeWitness(session) + } +} + +// pokeWitness sends a heartbeat to a single witness session with backoff. +func (d *Daemon) pokeWitness(session string) { + // Extract rig name from session (gt--witness -> ) + rigName := extractRigName(session) + agentID := session // Use session name as agent ID + + // Find the rig's workspace for keepalive check + rigWorkspace := filepath.Join(d.config.TownRoot, "gastown", rigName) + + // Check keepalive to see if the witness is active + state := keepalive.Read(rigWorkspace) + if state != nil && state.IsFresh() { + // Witness is actively working, reset backoff + d.backoff.RecordActivity(agentID) + d.logger.Printf("Witness %s is fresh (cmd: %s), skipping poke", session, state.LastCommand) + return + } + + // Check if we should poke based on backoff interval + if !d.backoff.ShouldPoke(agentID) { + interval := d.backoff.GetInterval(agentID) + d.logger.Printf("Witness %s backoff in effect (interval: %v), skipping poke", session, interval) + return + } + + // Send heartbeat message + msg := "HEARTBEAT: check your workers" + if err := d.tmux.SendKeys(session, msg); err != nil { + d.logger.Printf("Error poking Witness %s: %v", session, err) + return + } + + d.backoff.RecordPoke(agentID) + + // If agent is stale or very stale, record a miss (increase backoff) + if state == nil || state.IsVeryStale() { + d.backoff.RecordMiss(agentID) + interval := d.backoff.GetInterval(agentID) + d.logger.Printf("Poked Witness %s (very stale, backoff now: %v)", session, interval) + } else if state.IsStale() { + d.logger.Printf("Poked Witness %s (stale)", session) + } else { + d.logger.Printf("Poked Witness %s", session) + } +} + +// extractRigName extracts the rig name from a witness session name. +// "gt-gastown-witness" -> "gastown" +func extractRigName(session string) string { + // Remove "gt-" prefix and "-witness" suffix + name := strings.TrimPrefix(session, "gt-") + name = strings.TrimSuffix(name, "-witness") + return name +} + +// isWitnessSession checks if a session name is a witness session. +func isWitnessSession(name string) bool { + // Pattern: gt--witness + if len(name) < 12 { // "gt-x-witness" minimum + return false + } + return name[:3] == "gt-" && name[len(name)-8:] == "-witness" +} + +// processLifecycleRequests checks for and processes lifecycle requests. +func (d *Daemon) processLifecycleRequests() { + d.ProcessLifecycleRequests() +} + +// shutdown performs graceful shutdown. +func (d *Daemon) shutdown(state *State) error { + d.logger.Println("Daemon shutting down") + + state.Running = false + if err := SaveState(d.config.TownRoot, state); err != nil { + d.logger.Printf("Warning: failed to save final state: %v", err) + } + + d.logger.Println("Daemon stopped") + return nil +} + +// Stop signals the daemon to stop. +func (d *Daemon) Stop() { + d.cancel() +} + +// IsRunning checks if a daemon is running for the given town. +func IsRunning(townRoot string) (bool, int, error) { + pidFile := filepath.Join(townRoot, "daemon", "daemon.pid") + data, err := os.ReadFile(pidFile) + if err != nil { + if os.IsNotExist(err) { + return false, 0, nil + } + return false, 0, err + } + + pid, err := strconv.Atoi(string(data)) + if err != nil { + return false, 0, nil + } + + // Check if process is running + process, err := os.FindProcess(pid) + if err != nil { + return false, 0, nil + } + + // On Unix, FindProcess always succeeds. Send signal 0 to check if alive. + err = process.Signal(syscall.Signal(0)) + if err != nil { + // Process not running, clean up stale PID file + _ = os.Remove(pidFile) + return false, 0, nil + } + + return true, pid, nil +} + +// StopDaemon stops the running daemon for the given town. +func StopDaemon(townRoot string) error { + running, pid, err := IsRunning(townRoot) + if err != nil { + return err + } + if !running { + return fmt.Errorf("daemon is not running") + } + + process, err := os.FindProcess(pid) + if err != nil { + return fmt.Errorf("finding process: %w", err) + } + + // Send SIGTERM for graceful shutdown + if err := process.Signal(syscall.SIGTERM); err != nil { + return fmt.Errorf("sending SIGTERM: %w", err) + } + + // Wait a bit for graceful shutdown + time.Sleep(500 * time.Millisecond) + + // Check if still running + if err := process.Signal(syscall.Signal(0)); err == nil { + // Still running, force kill + _ = process.Signal(syscall.SIGKILL) + } + + // Clean up PID file + pidFile := filepath.Join(townRoot, "daemon", "daemon.pid") + _ = os.Remove(pidFile) + + return nil +} diff --git a/internal/daemon/lifecycle.go b/internal/daemon/lifecycle.go new file mode 100644 index 00000000..051d3d87 --- /dev/null +++ b/internal/daemon/lifecycle.go @@ -0,0 +1,231 @@ +package daemon + +import ( + "encoding/json" + "fmt" + "os/exec" + "strings" + "time" + + "github.com/steveyegge/gastown/internal/tmux" +) + +// BeadsMessage represents a message from beads mail. +type BeadsMessage struct { + ID string `json:"id"` + Title string `json:"title"` + Description string `json:"description"` + Sender string `json:"sender"` + Assignee string `json:"assignee"` + Priority int `json:"priority"` + Status string `json:"status"` +} + +// ProcessLifecycleRequests checks for and processes lifecycle requests from the daemon inbox. +func (d *Daemon) ProcessLifecycleRequests() { + // Get mail for daemon identity + cmd := exec.Command("bd", "mail", "inbox", "--identity", "daemon/", "--json") + cmd.Dir = d.config.TownRoot + + output, err := cmd.Output() + if err != nil { + // bd mail might not be available or inbox empty + return + } + + if len(output) == 0 || string(output) == "[]" || string(output) == "[]\n" { + return + } + + var messages []BeadsMessage + if err := json.Unmarshal(output, &messages); err != nil { + d.logger.Printf("Error parsing mail: %v", err) + return + } + + for _, msg := range messages { + if msg.Status == "closed" { + continue // Already processed + } + + request := d.parseLifecycleRequest(&msg) + if request == nil { + continue // Not a lifecycle request + } + + d.logger.Printf("Processing lifecycle request from %s: %s", request.From, request.Action) + + if err := d.executeLifecycleAction(request); err != nil { + d.logger.Printf("Error executing lifecycle action: %v", err) + continue + } + + // Mark message as read (close the issue) + if err := d.closeMessage(msg.ID); err != nil { + d.logger.Printf("Warning: failed to close message %s: %v", msg.ID, err) + } + } +} + +// parseLifecycleRequest extracts a lifecycle request from a message. +func (d *Daemon) parseLifecycleRequest(msg *BeadsMessage) *LifecycleRequest { + // Look for lifecycle keywords in subject/title + // Expected format: "LIFECYCLE: requesting " + title := strings.ToLower(msg.Title) + + if !strings.HasPrefix(title, "lifecycle:") { + return nil + } + + var action LifecycleAction + var from string + + if strings.Contains(title, "cycle") || strings.Contains(title, "cycling") { + action = ActionCycle + } else if strings.Contains(title, "restart") { + action = ActionRestart + } else if strings.Contains(title, "shutdown") || strings.Contains(title, "stop") { + action = ActionShutdown + } else { + return nil + } + + // Extract role from title: "LIFECYCLE: requesting ..." + // Parse between "lifecycle: " and " requesting" + parts := strings.Split(title, " requesting") + if len(parts) >= 1 { + rolePart := strings.TrimPrefix(parts[0], "lifecycle:") + from = strings.TrimSpace(rolePart) + } + + if from == "" { + from = msg.Sender // fallback + } + + return &LifecycleRequest{ + From: from, + Action: action, + Timestamp: time.Now(), + } +} + +// executeLifecycleAction performs the requested lifecycle action. +func (d *Daemon) executeLifecycleAction(request *LifecycleRequest) error { + // Determine session name from sender identity + sessionName := d.identityToSession(request.From) + if sessionName == "" { + return fmt.Errorf("unknown agent identity: %s", request.From) + } + + d.logger.Printf("Executing %s for session %s", request.Action, sessionName) + + // Check if session exists + running, err := d.tmux.HasSession(sessionName) + if err != nil { + return fmt.Errorf("checking session: %w", err) + } + + switch request.Action { + case ActionShutdown: + if running { + if err := d.tmux.KillSession(sessionName); err != nil { + return fmt.Errorf("killing session: %w", err) + } + d.logger.Printf("Killed session %s", sessionName) + } + return nil + + case ActionCycle, ActionRestart: + if running { + // Kill the session first + if err := d.tmux.KillSession(sessionName); err != nil { + return fmt.Errorf("killing session: %w", err) + } + d.logger.Printf("Killed session %s for restart", sessionName) + + // Wait a moment + time.Sleep(500 * time.Millisecond) + } + + // Restart the session + if err := d.restartSession(sessionName, request.From); err != nil { + return fmt.Errorf("restarting session: %w", err) + } + d.logger.Printf("Restarted session %s", sessionName) + return nil + + default: + return fmt.Errorf("unknown action: %s", request.Action) + } +} + +// identityToSession converts a beads identity to a tmux session name. +func (d *Daemon) identityToSession(identity string) string { + // Handle known identities + switch identity { + case "mayor": + return "gt-mayor" + default: + // Pattern: -witness → gt--witness + if strings.HasSuffix(identity, "-witness") { + return "gt-" + identity + } + // Unknown identity + return "" + } +} + +// restartSession starts a new session for the given agent. +func (d *Daemon) restartSession(sessionName, identity string) error { + // Determine working directory and startup command based on agent type + var workDir, startCmd string + var rigName string + + if identity == "mayor" { + workDir = d.config.TownRoot + startCmd = "exec claude --dangerously-skip-permissions" + } else if strings.HasSuffix(identity, "-witness") { + // Extract rig name: -witness → + rigName = strings.TrimSuffix(identity, "-witness") + workDir = d.config.TownRoot + "/" + rigName + startCmd = "exec claude --dangerously-skip-permissions" + } else { + return fmt.Errorf("don't know how to restart %s", identity) + } + + // Create session + if err := d.tmux.NewSession(sessionName, workDir); err != nil { + return fmt.Errorf("creating session: %w", err) + } + + // Set environment + _ = d.tmux.SetEnvironment(sessionName, "GT_ROLE", identity) + + // Apply theme + if identity == "mayor" { + theme := tmux.MayorTheme() + _ = d.tmux.ConfigureGasTownSession(sessionName, theme, "", "Mayor", "coordinator") + } else if rigName != "" { + theme := tmux.AssignTheme(rigName) + _ = d.tmux.ConfigureGasTownSession(sessionName, theme, rigName, "witness", "witness") + } + + // Send startup command + if err := d.tmux.SendKeys(sessionName, startCmd); err != nil { + return fmt.Errorf("sending startup command: %w", err) + } + + // Prime after delay + if err := d.tmux.SendKeysDelayed(sessionName, "gt prime", 2000); err != nil { + d.logger.Printf("Warning: could not send prime: %v", err) + } + + return nil +} + +// closeMessage marks a mail message as read by closing the beads issue. +func (d *Daemon) closeMessage(id string) error { + cmd := exec.Command("bd", "close", id) + cmd.Dir = d.config.TownRoot + return cmd.Run() +} diff --git a/internal/daemon/types.go b/internal/daemon/types.go new file mode 100644 index 00000000..2b0d9068 --- /dev/null +++ b/internal/daemon/types.go @@ -0,0 +1,126 @@ +// Package daemon provides the town-level background service for Gas Town. +// +// The daemon is a simple Go process (not a Claude agent) that: +// 1. Pokes agents periodically (heartbeat) +// 2. Processes lifecycle requests (cycle, restart, shutdown) +// 3. Restarts sessions when agents request cycling +// +// The daemon is a "dumb scheduler" - all intelligence is in agents. +package daemon + +import ( + "encoding/json" + "os" + "path/filepath" + "time" +) + +// Config holds daemon configuration. +type Config struct { + // HeartbeatInterval is how often to poke agents. + HeartbeatInterval time.Duration `json:"heartbeat_interval"` + + // TownRoot is the Gas Town workspace root. + TownRoot string `json:"town_root"` + + // LogFile is the path to the daemon log file. + LogFile string `json:"log_file"` + + // PidFile is the path to the PID file. + PidFile string `json:"pid_file"` +} + +// DefaultConfig returns the default daemon configuration. +func DefaultConfig(townRoot string) *Config { + daemonDir := filepath.Join(townRoot, "daemon") + return &Config{ + HeartbeatInterval: 60 * time.Second, + TownRoot: townRoot, + LogFile: filepath.Join(daemonDir, "daemon.log"), + PidFile: filepath.Join(daemonDir, "daemon.pid"), + } +} + +// State represents the daemon's runtime state. +type State struct { + // Running indicates if the daemon is running. + Running bool `json:"running"` + + // PID is the process ID of the daemon. + PID int `json:"pid"` + + // StartedAt is when the daemon started. + StartedAt time.Time `json:"started_at"` + + // LastHeartbeat is when the last heartbeat completed. + LastHeartbeat time.Time `json:"last_heartbeat"` + + // HeartbeatCount is how many heartbeats have completed. + HeartbeatCount int64 `json:"heartbeat_count"` +} + +// StateFile returns the path to the state file. +func StateFile(townRoot string) string { + return filepath.Join(townRoot, "daemon", "state.json") +} + +// LoadState loads daemon state from disk. +func LoadState(townRoot string) (*State, error) { + stateFile := StateFile(townRoot) + data, err := os.ReadFile(stateFile) + if err != nil { + if os.IsNotExist(err) { + return &State{}, nil + } + return nil, err + } + + var state State + if err := json.Unmarshal(data, &state); err != nil { + return nil, err + } + return &state, nil +} + +// SaveState saves daemon state to disk. +func SaveState(townRoot string, state *State) error { + stateFile := StateFile(townRoot) + + // Ensure daemon directory exists + if err := os.MkdirAll(filepath.Dir(stateFile), 0755); err != nil { + return err + } + + data, err := json.MarshalIndent(state, "", " ") + if err != nil { + return err + } + + return os.WriteFile(stateFile, data, 0644) +} + +// LifecycleAction represents a lifecycle request action. +type LifecycleAction string + +const ( + // ActionCycle restarts the session with handoff. + ActionCycle LifecycleAction = "cycle" + + // ActionRestart does a fresh restart without handoff. + ActionRestart LifecycleAction = "restart" + + // ActionShutdown terminates without restart. + ActionShutdown LifecycleAction = "shutdown" +) + +// LifecycleRequest represents a request from an agent to the daemon. +type LifecycleRequest struct { + // From is the agent requesting the action (e.g., "mayor/", "gastown/witness"). + From string `json:"from"` + + // Action is what lifecycle action to perform. + Action LifecycleAction `json:"action"` + + // Timestamp is when the request was made. + Timestamp time.Time `json:"timestamp"` +} diff --git a/internal/doctor/types.go b/internal/doctor/types.go index c50b8273..ac7e2004 100644 --- a/internal/doctor/types.go +++ b/internal/doctor/types.go @@ -141,7 +141,7 @@ func (r *Report) Print(w io.Writer, verbose bool) { } // Print summary - fmt.Fprintln(w) + _, _ = fmt.Fprintln(w) r.printSummary(w) } @@ -157,18 +157,18 @@ func (r *Report) printCheck(w io.Writer, check *CheckResult, verbose bool) { prefix = style.ErrorPrefix } - fmt.Fprintf(w, "%s %s: %s\n", prefix, check.Name, check.Message) + _, _ = fmt.Fprintf(w, "%s %s: %s\n", prefix, check.Name, check.Message) // Print details in verbose mode or for non-OK results if len(check.Details) > 0 && (verbose || check.Status != StatusOK) { for _, detail := range check.Details { - fmt.Fprintf(w, " %s\n", detail) + _, _ = fmt.Fprintf(w, " %s\n", detail) } } // Print fix hint for errors/warnings if check.FixHint != "" && check.Status != StatusOK { - fmt.Fprintf(w, " %s %s\n", style.ArrowPrefix, check.FixHint) + _, _ = fmt.Fprintf(w, " %s %s\n", style.ArrowPrefix, check.FixHint) } } @@ -188,5 +188,5 @@ func (r *Report) printSummary(w io.Writer) { parts = append(parts, style.Error.Render(fmt.Sprintf("%d errors", r.Summary.Errors))) } - fmt.Fprintln(w, strings.Join(parts, ", ")) + _, _ = fmt.Fprintln(w, strings.Join(parts, ", ")) } diff --git a/internal/git/git.go b/internal/git/git.go index fcd9db3a..68d4c760 100644 --- a/internal/git/git.go +++ b/internal/git/git.go @@ -27,6 +27,11 @@ func NewGit(workDir string) *Git { return &Git{workDir: workDir} } +// WorkDir returns the working directory for this Git instance. +func (g *Git) WorkDir() string { + return g.workDir +} + // run executes a git command and returns stdout. func (g *Git) run(args ...string) (string, error) { cmd := exec.Command("git", args...) @@ -91,6 +96,12 @@ func (g *Git) Fetch(remote string) error { return err } +// FetchBranch fetches a specific branch from the remote. +func (g *Git) FetchBranch(remote, branch string) error { + _, err := g.run("fetch", remote, branch) + return err +} + // Pull pulls from the remote branch. func (g *Git) Pull(remote, branch string) error { _, err := g.run("pull", remote, branch) @@ -195,6 +206,18 @@ func (g *Git) Merge(branch string) error { return err } +// MergeNoFF merges the given branch with --no-ff flag and a custom message. +func (g *Git) MergeNoFF(branch, message string) error { + _, err := g.run("merge", "--no-ff", "-m", message, branch) + return err +} + +// DeleteRemoteBranch deletes a branch on the remote. +func (g *Git) DeleteRemoteBranch(remote, branch string) error { + _, err := g.run("push", remote, "--delete", branch) + return err +} + // Rebase rebases the current branch onto the given ref. func (g *Git) Rebase(onto string) error { _, err := g.run("rebase", onto) @@ -207,6 +230,95 @@ func (g *Git) AbortMerge() error { return err } +// CheckConflicts performs a test merge to check if source can be merged into target +// without conflicts. Returns a list of conflicting files, or empty slice if clean. +// The merge is always aborted after checking - no actual changes are made. +// +// The caller must ensure the working directory is clean before calling this. +// After return, the working directory is restored to the target branch. +func (g *Git) CheckConflicts(source, target string) ([]string, error) { + // Checkout the target branch + if err := g.Checkout(target); err != nil { + return nil, fmt.Errorf("checkout target %s: %w", target, err) + } + + // Attempt test merge with --no-commit --no-ff + // We need to capture both stdout and stderr to detect conflicts + _, mergeErr := g.runMergeCheck("merge", "--no-commit", "--no-ff", source) + + if mergeErr != nil { + // Check if there are unmerged files (indicates conflict) + conflicts, err := g.getConflictingFiles() + if err == nil && len(conflicts) > 0 { + // Abort the test merge + _ = g.AbortMerge() + return conflicts, nil + } + + // Check if it's a conflict error from wrapper + if errors.Is(mergeErr, ErrMergeConflict) { + _ = g.AbortMerge() + return conflicts, nil + } + + // Some other merge error + _ = g.AbortMerge() + return nil, mergeErr + } + + // Merge succeeded (no conflicts) - abort the test merge + // Use reset since --abort won't work on successful merge + _, _ = g.run("reset", "--hard", "HEAD") + return nil, nil +} + +// runMergeCheck runs a git merge command and returns error info from both stdout and stderr. +// This is needed because git merge outputs CONFLICT info to stdout. +func (g *Git) runMergeCheck(args ...string) (string, error) { + cmd := exec.Command("git", args...) + cmd.Dir = g.workDir + + var stdout, stderr bytes.Buffer + cmd.Stdout = &stdout + cmd.Stderr = &stderr + + err := cmd.Run() + if err != nil { + // Check stdout for CONFLICT message (git sends it there) + stdoutStr := stdout.String() + if strings.Contains(stdoutStr, "CONFLICT") { + return "", ErrMergeConflict + } + // Fall back to stderr check + return "", g.wrapError(err, stderr.String(), args) + } + + return strings.TrimSpace(stdout.String()), nil +} + +// getConflictingFiles returns the list of files with merge conflicts. +func (g *Git) getConflictingFiles() ([]string, error) { + // git diff --name-only --diff-filter=U shows unmerged files + out, err := g.run("diff", "--name-only", "--diff-filter=U") + if err != nil { + return nil, err + } + + if out == "" { + return nil, nil + } + + files := strings.Split(out, "\n") + // Filter out empty strings + var result []string + for _, f := range files { + if f != "" { + result = append(result, f) + } + } + return result, nil +} + // AbortRebase aborts a rebase in progress. func (g *Git) AbortRebase() error { _, err := g.run("rebase", "--abort") @@ -219,6 +331,39 @@ func (g *Git) CreateBranch(name string) error { return err } +// CreateBranchFrom creates a new branch from a specific ref. +func (g *Git) CreateBranchFrom(name, ref string) error { + _, err := g.run("branch", name, ref) + return err +} + +// BranchExists checks if a branch exists locally. +func (g *Git) BranchExists(name string) (bool, error) { + _, err := g.run("show-ref", "--verify", "--quiet", "refs/heads/"+name) + if err != nil { + // Exit code 1 means branch doesn't exist + if strings.Contains(err.Error(), "exit status 1") { + return false, nil + } + return false, err + } + return true, nil +} + +// RemoteBranchExists checks if a branch exists on the remote. +func (g *Git) RemoteBranchExists(remote, branch string) (bool, error) { + _, err := g.run("ls-remote", "--heads", remote, branch) + if err != nil { + return false, err + } + // ls-remote returns empty if branch doesn't exist, need to check output + out, err := g.run("ls-remote", "--heads", remote, branch) + if err != nil { + return false, err + } + return out != "", nil +} + // DeleteBranch deletes a branch. func (g *Git) DeleteBranch(name string, force bool) error { flag := "-d" @@ -325,3 +470,58 @@ func (g *Git) WorktreeList() ([]Worktree, error) { return worktrees, nil } + +// BranchCreatedDate returns the date when a branch was created. +// This uses the committer date of the first commit on the branch. +// Returns date in YYYY-MM-DD format. +func (g *Git) BranchCreatedDate(branch string) (string, error) { + // Get the date of the first commit on the branch that's not on main + // Use merge-base to find where the branch diverged from main + mergeBase, err := g.run("merge-base", "main", branch) + if err != nil { + // If merge-base fails, fall back to the branch tip's date + out, err := g.run("log", "-1", "--format=%cs", branch) + if err != nil { + return "", err + } + return out, nil + } + + // Get the first commit after the merge base on this branch + out, err := g.run("log", "--format=%cs", "--reverse", mergeBase+".."+branch) + if err != nil { + return "", err + } + + // Get the first line (first commit's date) + lines := strings.Split(out, "\n") + if len(lines) > 0 && lines[0] != "" { + return lines[0], nil + } + + // If no commits after merge-base, the branch points to merge-base + // Return the merge-base commit date + out, err = g.run("log", "-1", "--format=%cs", mergeBase) + if err != nil { + return "", err + } + return out, nil +} + +// CommitsAhead returns the number of commits that branch has ahead of base. +// For example, CommitsAhead("main", "feature") returns how many commits +// are on feature that are not on main. +func (g *Git) CommitsAhead(base, branch string) (int, error) { + out, err := g.run("rev-list", "--count", base+".."+branch) + if err != nil { + return 0, err + } + + var count int + _, err = fmt.Sscanf(out, "%d", &count) + if err != nil { + return 0, fmt.Errorf("parsing commit count: %w", err) + } + + return count, nil +} diff --git a/internal/git/git_test.go b/internal/git/git_test.go index 64d772f0..1224bb63 100644 --- a/internal/git/git_test.go +++ b/internal/git/git_test.go @@ -21,10 +21,10 @@ func initTestRepo(t *testing.T) string { // Configure user for commits cmd = exec.Command("git", "config", "user.email", "test@test.com") cmd.Dir = dir - cmd.Run() + _ = cmd.Run() cmd = exec.Command("git", "config", "user.name", "Test User") cmd.Dir = dir - cmd.Run() + _ = cmd.Run() // Create initial commit testFile := filepath.Join(dir, "README.md") @@ -33,10 +33,10 @@ func initTestRepo(t *testing.T) string { } cmd = exec.Command("git", "add", ".") cmd.Dir = dir - cmd.Run() + _ = cmd.Run() cmd = exec.Command("git", "commit", "-m", "initial") cmd.Dir = dir - cmd.Run() + _ = cmd.Run() return dir } @@ -186,3 +186,157 @@ func TestRev(t *testing.T) { t.Errorf("hash length = %d, want 40", len(hash)) } } + +func TestFetchBranch(t *testing.T) { + // Create a "remote" repo + remoteDir := t.TempDir() + cmd := exec.Command("git", "init", "--bare") + cmd.Dir = remoteDir + if err := cmd.Run(); err != nil { + t.Fatalf("git init --bare: %v", err) + } + + // Create a local repo and push to remote + localDir := initTestRepo(t) + g := NewGit(localDir) + + // Add remote + cmd = exec.Command("git", "remote", "add", "origin", remoteDir) + cmd.Dir = localDir + if err := cmd.Run(); err != nil { + t.Fatalf("git remote add: %v", err) + } + + // Push main branch + mainBranch, _ := g.CurrentBranch() + cmd = exec.Command("git", "push", "-u", "origin", mainBranch) + cmd.Dir = localDir + if err := cmd.Run(); err != nil { + t.Fatalf("git push: %v", err) + } + + // Fetch should succeed + if err := g.FetchBranch("origin", mainBranch); err != nil { + t.Errorf("FetchBranch: %v", err) + } +} + +func TestCheckConflicts_NoConflict(t *testing.T) { + dir := initTestRepo(t) + g := NewGit(dir) + mainBranch, _ := g.CurrentBranch() + + // Create feature branch with non-conflicting change + if err := g.CreateBranch("feature"); err != nil { + t.Fatalf("CreateBranch: %v", err) + } + if err := g.Checkout("feature"); err != nil { + t.Fatalf("Checkout feature: %v", err) + } + + // Add a new file (won't conflict with main) + newFile := filepath.Join(dir, "feature.txt") + if err := os.WriteFile(newFile, []byte("feature content"), 0644); err != nil { + t.Fatalf("write file: %v", err) + } + if err := g.Add("feature.txt"); err != nil { + t.Fatalf("Add: %v", err) + } + if err := g.Commit("add feature file"); err != nil { + t.Fatalf("Commit: %v", err) + } + + // Go back to main + if err := g.Checkout(mainBranch); err != nil { + t.Fatalf("Checkout main: %v", err) + } + + // Check for conflicts - should be none + conflicts, err := g.CheckConflicts("feature", mainBranch) + if err != nil { + t.Fatalf("CheckConflicts: %v", err) + } + if len(conflicts) > 0 { + t.Errorf("expected no conflicts, got %v", conflicts) + } + + // Verify we're still on main and clean + branch, _ := g.CurrentBranch() + if branch != mainBranch { + t.Errorf("branch = %q, want %q", branch, mainBranch) + } + status, _ := g.Status() + if !status.Clean { + t.Error("expected clean working directory after CheckConflicts") + } +} + +func TestCheckConflicts_WithConflict(t *testing.T) { + dir := initTestRepo(t) + g := NewGit(dir) + mainBranch, _ := g.CurrentBranch() + + // Create feature branch + if err := g.CreateBranch("feature"); err != nil { + t.Fatalf("CreateBranch: %v", err) + } + if err := g.Checkout("feature"); err != nil { + t.Fatalf("Checkout feature: %v", err) + } + + // Modify README.md on feature branch + readmeFile := filepath.Join(dir, "README.md") + if err := os.WriteFile(readmeFile, []byte("# Feature changes\n"), 0644); err != nil { + t.Fatalf("write file: %v", err) + } + if err := g.Add("README.md"); err != nil { + t.Fatalf("Add: %v", err) + } + if err := g.Commit("modify readme on feature"); err != nil { + t.Fatalf("Commit: %v", err) + } + + // Go back to main and make conflicting change + if err := g.Checkout(mainBranch); err != nil { + t.Fatalf("Checkout main: %v", err) + } + if err := os.WriteFile(readmeFile, []byte("# Main changes\n"), 0644); err != nil { + t.Fatalf("write file: %v", err) + } + if err := g.Add("README.md"); err != nil { + t.Fatalf("Add: %v", err) + } + if err := g.Commit("modify readme on main"); err != nil { + t.Fatalf("Commit: %v", err) + } + + // Check for conflicts - should find README.md + conflicts, err := g.CheckConflicts("feature", mainBranch) + if err != nil { + t.Fatalf("CheckConflicts: %v", err) + } + if len(conflicts) == 0 { + t.Error("expected conflicts, got none") + } + + foundReadme := false + for _, f := range conflicts { + if f == "README.md" { + foundReadme = true + break + } + } + if !foundReadme { + t.Errorf("expected README.md in conflicts, got %v", conflicts) + } + + // Verify we're still on main and clean + branch, _ := g.CurrentBranch() + if branch != mainBranch { + t.Errorf("branch = %q, want %q", branch, mainBranch) + } + status, _ := g.Status() + if !status.Clean { + t.Error("expected clean working directory after CheckConflicts") + } +} diff --git a/internal/keepalive/keepalive.go b/internal/keepalive/keepalive.go new file mode 100644 index 00000000..6ff4a5de --- /dev/null +++ b/internal/keepalive/keepalive.go @@ -0,0 +1,111 @@ +// Package keepalive provides agent activity signaling via file touch. +package keepalive + +import ( + "encoding/json" + "os" + "path/filepath" + "strings" + "time" + + "github.com/steveyegge/gastown/internal/workspace" +) + +// State represents the keepalive file contents. +type State struct { + LastCommand string `json:"last_command"` + Timestamp time.Time `json:"timestamp"` +} + +// Touch updates the keepalive file in the workspace's .gastown directory. +// It silently ignores errors (best-effort signaling). +func Touch(command string) { + TouchWithArgs(command, nil) +} + +// TouchWithArgs updates the keepalive file with the full command including args. +// It silently ignores errors (best-effort signaling). +func TouchWithArgs(command string, args []string) { + root, err := workspace.FindFromCwd() + if err != nil || root == "" { + return // Not in a workspace, nothing to do + } + + // Build full command string + fullCmd := command + if len(args) > 0 { + fullCmd = command + " " + strings.Join(args, " ") + } + + TouchInWorkspace(root, fullCmd) +} + +// TouchInWorkspace updates the keepalive file in a specific workspace. +// It silently ignores errors (best-effort signaling). +func TouchInWorkspace(workspaceRoot, command string) { + gastown := filepath.Join(workspaceRoot, ".gastown") + + // Ensure .gastown directory exists + if err := os.MkdirAll(gastown, 0755); err != nil { + return + } + + state := State{ + LastCommand: command, + Timestamp: time.Now().UTC(), + } + + data, err := json.Marshal(state) + if err != nil { + return + } + + keepalivePath := filepath.Join(gastown, "keepalive.json") + _ = os.WriteFile(keepalivePath, data, 0644) +} + +// Read returns the current keepalive state for the workspace. +// Returns nil if the file doesn't exist or can't be read. +func Read(workspaceRoot string) *State { + keepalivePath := filepath.Join(workspaceRoot, ".gastown", "keepalive.json") + + data, err := os.ReadFile(keepalivePath) + if err != nil { + return nil + } + + var state State + if err := json.Unmarshal(data, &state); err != nil { + return nil + } + + return &state +} + +// Age returns how old the keepalive signal is. +// Returns a very large duration if the state is nil. +func (s *State) Age() time.Duration { + if s == nil { + return 24 * time.Hour * 365 // Very stale + } + return time.Since(s.Timestamp) +} + +// IsFresh returns true if the keepalive is less than 2 minutes old. +func (s *State) IsFresh() bool { + return s != nil && s.Age() < 2*time.Minute +} + +// IsStale returns true if the keepalive is 2-5 minutes old. +func (s *State) IsStale() bool { + if s == nil { + return false + } + age := s.Age() + return age >= 2*time.Minute && age < 5*time.Minute +} + +// IsVeryStale returns true if the keepalive is more than 5 minutes old. +func (s *State) IsVeryStale() bool { + return s == nil || s.Age() >= 5*time.Minute +} diff --git a/internal/keepalive/keepalive_test.go b/internal/keepalive/keepalive_test.go new file mode 100644 index 00000000..10e3e7ad --- /dev/null +++ b/internal/keepalive/keepalive_test.go @@ -0,0 +1,97 @@ +package keepalive + +import ( + "os" + "path/filepath" + "testing" + "time" +) + +func TestTouchInWorkspace(t *testing.T) { + // Create temp directory + tmpDir := t.TempDir() + + // Touch the keepalive + TouchInWorkspace(tmpDir, "gt status") + + // Read back + state := Read(tmpDir) + if state == nil { + t.Fatal("expected state to be non-nil") + } + + if state.LastCommand != "gt status" { + t.Errorf("expected last_command 'gt status', got %q", state.LastCommand) + } + + // Check timestamp is recent + if time.Since(state.Timestamp) > time.Minute { + t.Errorf("timestamp too old: %v", state.Timestamp) + } +} + +func TestReadNonExistent(t *testing.T) { + tmpDir := t.TempDir() + state := Read(tmpDir) + if state != nil { + t.Error("expected nil state for non-existent file") + } +} + +func TestStateAge(t *testing.T) { + // Test nil state + var nilState *State + if nilState.Age() < 24*time.Hour { + t.Error("nil state should have very large age") + } + + // Test fresh state + freshState := &State{Timestamp: time.Now().Add(-30 * time.Second)} + if !freshState.IsFresh() { + t.Error("30-second-old state should be fresh") + } + if freshState.IsStale() { + t.Error("30-second-old state should not be stale") + } + if freshState.IsVeryStale() { + t.Error("30-second-old state should not be very stale") + } + + // Test stale state (3 minutes) + staleState := &State{Timestamp: time.Now().Add(-3 * time.Minute)} + if staleState.IsFresh() { + t.Error("3-minute-old state should not be fresh") + } + if !staleState.IsStale() { + t.Error("3-minute-old state should be stale") + } + if staleState.IsVeryStale() { + t.Error("3-minute-old state should not be very stale") + } + + // Test very stale state (10 minutes) + veryStaleState := &State{Timestamp: time.Now().Add(-10 * time.Minute)} + if veryStaleState.IsFresh() { + t.Error("10-minute-old state should not be fresh") + } + if veryStaleState.IsStale() { + t.Error("10-minute-old state should not be stale (it's very stale)") + } + if !veryStaleState.IsVeryStale() { + t.Error("10-minute-old state should be very stale") + } +} + +func TestDirectoryCreation(t *testing.T) { + tmpDir := t.TempDir() + workDir := filepath.Join(tmpDir, "some", "nested", "workspace") + + // Touch should create .gastown directory + TouchInWorkspace(workDir, "gt test") + + // Verify directory was created + gastown := filepath.Join(workDir, ".gastown") + if _, err := os.Stat(gastown); os.IsNotExist(err) { + t.Error("expected .gastown directory to be created") + } +} diff --git a/internal/mail/mailbox.go b/internal/mail/mailbox.go index 14dd7b77..ad6b8c06 100644 --- a/internal/mail/mailbox.go +++ b/internal/mail/mailbox.go @@ -93,7 +93,7 @@ func (m *Mailbox) listBeads() ([]*Message, error) { var beadsMsgs []BeadsMessage if err := json.Unmarshal(stdout.Bytes(), &beadsMsgs); err != nil { // Empty inbox returns empty array or nothing - if len(stdout.Bytes()) == 0 || string(stdout.Bytes()) == "null" { + if len(stdout.Bytes()) == 0 || stdout.String() == "null" { return nil, nil } return nil, err @@ -116,7 +116,7 @@ func (m *Mailbox) listLegacy() ([]*Message, error) { } return nil, err } - defer file.Close() + defer func() { _ = file.Close() }() var messages []*Message scanner := bufio.NewScanner(file) @@ -336,7 +336,7 @@ func (m *Mailbox) appendLegacy(msg *Message) error { if err != nil { return err } - defer file.Close() + defer func() { _ = file.Close() }() data, err := json.Marshal(msg) if err != nil { @@ -364,18 +364,86 @@ func (m *Mailbox) rewriteLegacy(messages []*Message) error { for _, msg := range messages { data, err := json.Marshal(msg) if err != nil { - file.Close() - os.Remove(tmpPath) + _ = file.Close() + _ = os.Remove(tmpPath) return err } - file.WriteString(string(data) + "\n") + _, _ = file.WriteString(string(data) + "\n") } if err := file.Close(); err != nil { - os.Remove(tmpPath) + _ = os.Remove(tmpPath) return err } // Atomic rename return os.Rename(tmpPath, m.path) } + +// ListByThread returns all messages in a given thread. +func (m *Mailbox) ListByThread(threadID string) ([]*Message, error) { + if m.legacy { + return m.listByThreadLegacy(threadID) + } + return m.listByThreadBeads(threadID) +} + +func (m *Mailbox) listByThreadBeads(threadID string) ([]*Message, error) { + // bd message thread --json + cmd := exec.Command("bd", "message", "thread", threadID, "--json") + cmd.Dir = m.workDir + cmd.Env = append(cmd.Environ(), "BD_IDENTITY="+m.identity) + + var stdout, stderr bytes.Buffer + cmd.Stdout = &stdout + cmd.Stderr = &stderr + + if err := cmd.Run(); err != nil { + errMsg := strings.TrimSpace(stderr.String()) + if errMsg != "" { + return nil, errors.New(errMsg) + } + return nil, err + } + + var beadsMsgs []BeadsMessage + if err := json.Unmarshal(stdout.Bytes(), &beadsMsgs); err != nil { + if len(stdout.Bytes()) == 0 || stdout.String() == "null" { + return nil, nil + } + return nil, err + } + + var messages []*Message + for _, bm := range beadsMsgs { + messages = append(messages, bm.ToMessage()) + } + + // Sort by timestamp (oldest first for thread view) + sort.Slice(messages, func(i, j int) bool { + return messages[i].Timestamp.Before(messages[j].Timestamp) + }) + + return messages, nil +} + +func (m *Mailbox) listByThreadLegacy(threadID string) ([]*Message, error) { + messages, err := m.List() + if err != nil { + return nil, err + } + + var thread []*Message + for _, msg := range messages { + if msg.ThreadID == threadID { + thread = append(thread, msg) + } + } + + // Sort by timestamp (oldest first for thread view) + sort.Slice(thread, func(i, j int) bool { + return thread[i].Timestamp.Before(thread[j].Timestamp) + }) + + return thread, nil +} diff --git a/internal/mail/router.go b/internal/mail/router.go index 08268ada..e26fe86e 100644 --- a/internal/mail/router.go +++ b/internal/mail/router.go @@ -25,25 +25,39 @@ func NewRouter(workDir string) *Router { } } -// Send delivers a message via beads mail. +// Send delivers a message via beads message. func (r *Router) Send(msg *Message) error { // Convert addresses to beads identities toIdentity := addressToIdentity(msg.To) fromIdentity := addressToIdentity(msg.From) - // Build command: bd mail send -s -m --identity + // Build command: bd mail send -s -m args := []string{"mail", "send", toIdentity, "-s", msg.Subject, "-m", msg.Body, - "--identity", fromIdentity, } - // Add --urgent flag for high priority - if msg.Priority == PriorityHigh { - args = append(args, "--urgent") + // Add priority flag + beadsPriority := PriorityToBeads(msg.Priority) + args = append(args, "--priority", fmt.Sprintf("%d", beadsPriority)) + + // Add message type if set + if msg.Type != "" && msg.Type != TypeNotification { + args = append(args, "--type", string(msg.Type)) + } + + // Add thread ID if set + if msg.ThreadID != "" { + args = append(args, "--thread-id", msg.ThreadID) + } + + // Add reply-to if set + if msg.ReplyTo != "" { + args = append(args, "--reply-to", msg.ReplyTo) } cmd := exec.Command("bd", args...) + cmd.Env = append(cmd.Environ(), "BEADS_AGENT_NAME="+fromIdentity) cmd.Dir = r.workDir var stderr bytes.Buffer @@ -57,10 +71,8 @@ func (r *Router) Send(msg *Message) error { return fmt.Errorf("sending message: %w", err) } - // Optionally notify if recipient is a polecat with active session - if isPolecat(msg.To) && msg.Priority == PriorityHigh { - r.notifyPolecat(msg) - } + // Notify recipient if they have an active session + _ = r.notifyRecipient(msg) return nil } @@ -70,43 +82,43 @@ func (r *Router) GetMailbox(address string) (*Mailbox, error) { return NewMailboxFromAddress(address, r.workDir), nil } -// notifyPolecat sends a notification to a polecat's tmux session. -func (r *Router) notifyPolecat(msg *Message) error { - // Parse rig/polecat from address - parts := strings.SplitN(msg.To, "/", 2) - if len(parts) != 2 { - return nil +// notifyRecipient sends a notification to a recipient's tmux session. +// Uses send-keys to echo a visible banner to ensure notification is seen. +// Supports mayor/, rig/polecat, and rig/refinery addresses. +func (r *Router) notifyRecipient(msg *Message) error { + sessionID := addressToSessionID(msg.To) + if sessionID == "" { + return nil // Unable to determine session ID } - rig := parts[0] - polecat := parts[1] - - // Generate session name (matches session.Manager) - sessionID := fmt.Sprintf("gt-%s-%s", rig, polecat) - // Check if session exists hasSession, err := r.tmux.HasSession(sessionID) if err != nil || !hasSession { return nil // No active session, skip notification } - // Inject notification - notification := fmt.Sprintf("[MAIL] %s", msg.Subject) - return r.tmux.SendKeys(sessionID, notification) + // Send visible notification banner to the terminal + return r.tmux.SendNotificationBanner(sessionID, msg.From, msg.Subject) } -// isPolecat checks if an address points to a polecat. -func isPolecat(address string) bool { - // Not mayor, not refinery, has rig/name format +// addressToSessionID converts a mail address to a tmux session ID. +// Returns empty string if address format is not recognized. +func addressToSessionID(address string) string { + // Mayor address: "mayor/" or "mayor" if strings.HasPrefix(address, "mayor") { - return false + return "gt-mayor" } + // Rig-based address: "rig/target" parts := strings.SplitN(address, "/", 2) - if len(parts) != 2 { - return false + if len(parts) != 2 || parts[1] == "" { + return "" } + rig := parts[0] target := parts[1] - return target != "" && target != "refinery" + + // Polecat: gt-rig-polecat + // Refinery: gt-rig-refinery (if refinery has its own session) + return fmt.Sprintf("gt-%s-%s", rig, target) } diff --git a/internal/mail/types.go b/internal/mail/types.go index e602d072..7a928d1a 100644 --- a/internal/mail/types.go +++ b/internal/mail/types.go @@ -11,11 +11,34 @@ import ( type Priority string const ( + // PriorityLow is for non-urgent messages. + PriorityLow Priority = "low" + // PriorityNormal is the default priority. PriorityNormal Priority = "normal" - // PriorityHigh indicates an urgent message. + // PriorityHigh indicates an important message. PriorityHigh Priority = "high" + + // PriorityUrgent indicates an urgent message requiring immediate attention. + PriorityUrgent Priority = "urgent" +) + +// MessageType indicates the purpose of a message. +type MessageType string + +const ( + // TypeTask indicates a message requiring action from the recipient. + TypeTask MessageType = "task" + + // TypeScavenge indicates optional first-come-first-served work. + TypeScavenge MessageType = "scavenge" + + // TypeNotification is an informational message (default). + TypeNotification MessageType = "notification" + + // TypeReply is a response to another message. + TypeReply MessageType = "reply" ) // Message represents a mail message between agents. @@ -44,9 +67,18 @@ type Message struct { // Priority is the message priority. Priority Priority `json:"priority"` + + // Type indicates the message type (task, scavenge, notification, reply). + Type MessageType `json:"type"` + + // ThreadID groups related messages into a conversation thread. + ThreadID string `json:"thread_id,omitempty"` + + // ReplyTo is the ID of the message this is replying to. + ReplyTo string `json:"reply_to,omitempty"` } -// NewMessage creates a new message with a generated ID (for legacy JSONL mode). +// NewMessage creates a new message with a generated ID and thread ID. func NewMessage(from, to, subject, body string) *Message { return &Message{ ID: generateID(), @@ -57,16 +89,42 @@ func NewMessage(from, to, subject, body string) *Message { Timestamp: time.Now(), Read: false, Priority: PriorityNormal, + Type: TypeNotification, + ThreadID: generateThreadID(), + } +} + +// NewReplyMessage creates a reply message that inherits the thread from the original. +func NewReplyMessage(from, to, subject, body string, original *Message) *Message { + return &Message{ + ID: generateID(), + From: from, + To: to, + Subject: subject, + Body: body, + Timestamp: time.Now(), + Read: false, + Priority: PriorityNormal, + Type: TypeReply, + ThreadID: original.ThreadID, + ReplyTo: original.ID, } } // generateID creates a random message ID. func generateID() string { b := make([]byte, 8) - rand.Read(b) + _, _ = rand.Read(b) return "msg-" + hex.EncodeToString(b) } +// generateThreadID creates a random thread ID. +func generateThreadID() string { + b := make([]byte, 6) + _, _ = rand.Read(b) + return "thread-" + hex.EncodeToString(b) +} + // BeadsMessage represents a message as returned by bd mail commands. type BeadsMessage struct { ID string `json:"id"` @@ -74,16 +132,34 @@ type BeadsMessage struct { Description string `json:"description"` // Body Sender string `json:"sender"` // From identity Assignee string `json:"assignee"` // To identity - Priority int `json:"priority"` // 0=urgent, 2=normal + Priority int `json:"priority"` // 0=urgent, 1=high, 2=normal, 3=low Status string `json:"status"` // open=unread, closed=read CreatedAt time.Time `json:"created_at"` + Type string `json:"type,omitempty"` // Message type + ThreadID string `json:"thread_id,omitempty"` // Thread identifier + ReplyTo string `json:"reply_to,omitempty"` // Original message ID } // ToMessage converts a BeadsMessage to a GGT Message. func (bm *BeadsMessage) ToMessage() *Message { - priority := PriorityNormal - if bm.Priority == 0 { + // Convert beads priority (0=urgent, 1=high, 2=normal, 3=low) to GGT Priority + var priority Priority + switch bm.Priority { + case 0: + priority = PriorityUrgent + case 1: priority = PriorityHigh + case 3: + priority = PriorityLow + default: + priority = PriorityNormal + } + + // Convert message type, default to notification + msgType := TypeNotification + switch MessageType(bm.Type) { + case TypeTask, TypeScavenge, TypeReply: + msgType = MessageType(bm.Type) } return &Message{ @@ -95,25 +171,64 @@ func (bm *BeadsMessage) ToMessage() *Message { Timestamp: bm.CreatedAt, Read: bm.Status == "closed", Priority: priority, + Type: msgType, + ThreadID: bm.ThreadID, + ReplyTo: bm.ReplyTo, + } +} + +// PriorityToBeads converts a GGT Priority to beads priority integer. +// Returns: 0=urgent, 1=high, 2=normal, 3=low +func PriorityToBeads(p Priority) int { + switch p { + case PriorityUrgent: + return 0 + case PriorityHigh: + return 1 + case PriorityLow: + return 3 + default: + return 2 // normal + } +} + +// ParsePriority parses a priority string, returning PriorityNormal for invalid values. +func ParsePriority(s string) Priority { + switch Priority(s) { + case PriorityLow, PriorityNormal, PriorityHigh, PriorityUrgent: + return Priority(s) + default: + return PriorityNormal + } +} + +// ParseMessageType parses a message type string, returning TypeNotification for invalid values. +func ParseMessageType(s string) MessageType { + switch MessageType(s) { + case TypeTask, TypeScavenge, TypeNotification, TypeReply: + return MessageType(s) + default: + return TypeNotification } } // addressToIdentity converts a GGT address to a beads identity. // // Examples: -// - "mayor/" → "mayor" +// - "mayor/" → "mayor/" +// - "mayor" → "mayor/" // - "gastown/Toast" → "gastown-Toast" // - "gastown/refinery" → "gastown-refinery" // - "gastown/" → "gastown" (rig broadcast) func addressToIdentity(address string) string { - // Trim trailing slash - if len(address) > 0 && address[len(address)-1] == '/' { - address = address[:len(address)-1] + // Mayor special case - always use "mayor/" for consistency + if address == "mayor" || address == "mayor/" { + return "mayor/" } - // Mayor special case - if address == "mayor" { - return "mayor" + // Trim trailing slash for non-mayor addresses + if len(address) > 0 && address[len(address)-1] == '/' { + address = address[:len(address)-1] } // Replace / with - for beads identity diff --git a/internal/mq/id.go b/internal/mq/id.go index ce2753d3..f5a408b8 100644 --- a/internal/mq/id.go +++ b/internal/mq/id.go @@ -22,7 +22,7 @@ import ( func GenerateMRID(prefix, branch string) string { // Generate 8 random bytes for additional uniqueness randomBytes := make([]byte, 8) - rand.Read(randomBytes) + _, _ = rand.Read(randomBytes) return generateMRIDInternal(prefix, branch, time.Now(), randomBytes) } diff --git a/internal/polecat/manager.go b/internal/polecat/manager.go index dfc41c47..45f6b9b8 100644 --- a/internal/polecat/manager.go +++ b/internal/polecat/manager.go @@ -1,13 +1,13 @@ package polecat import ( - "encoding/json" "errors" "fmt" "os" "path/filepath" "time" + "github.com/steveyegge/gastown/internal/beads" "github.com/steveyegge/gastown/internal/git" "github.com/steveyegge/gastown/internal/rig" ) @@ -21,36 +21,42 @@ var ( // Manager handles polecat lifecycle. type Manager struct { - rig *rig.Rig - git *git.Git + rig *rig.Rig + git *git.Git + beads *beads.Beads } // NewManager creates a new polecat manager. func NewManager(r *rig.Rig, g *git.Git) *Manager { + // Use the mayor's rig directory for beads operations (rig-level beads) + mayorRigPath := filepath.Join(r.Path, "mayor", "rig") return &Manager{ - rig: r, - git: g, + rig: r, + git: g, + beads: beads.New(mayorRigPath), } } +// assigneeID returns the beads assignee identifier for a polecat. +// Format: "rig/polecatName" (e.g., "gastown/Toast") +func (m *Manager) assigneeID(name string) string { + return fmt.Sprintf("%s/%s", m.rig.Name, name) +} + // polecatDir returns the directory for a polecat. func (m *Manager) polecatDir(name string) string { return filepath.Join(m.rig.Path, "polecats", name) } -// stateFile returns the state file path for a polecat. -func (m *Manager) stateFile(name string) string { - return filepath.Join(m.polecatDir(name), "state.json") -} - // exists checks if a polecat exists. func (m *Manager) exists(name string) bool { _, err := os.Stat(m.polecatDir(name)) return err == nil } -// Add creates a new polecat as a git worktree from the refinery clone. -// This is much faster than a full clone and shares objects with the refinery. +// Add creates a new polecat as a git worktree from the mayor's clone. +// This is much faster than a full clone and shares objects with the mayor. +// Polecat state is derived from beads assignee field, not state.json. func (m *Manager) Add(name string) (*Polecat, error) { if m.exists(name) { return nil, ErrPolecatExists @@ -74,31 +80,39 @@ func (m *Manager) Add(name string) (*Polecat, error) { return nil, fmt.Errorf("mayor clone not found at %s (run 'gt rig add' to set up rig structure)", mayorPath) } - // Create worktree with new branch - // git worktree add -b polecat/ - if err := mayorGit.WorktreeAdd(polecatPath, branchName); err != nil { - return nil, fmt.Errorf("creating worktree: %w", err) + // Check if branch already exists (e.g., from previous polecat that wasn't cleaned up) + branchExists, err := mayorGit.BranchExists(branchName) + if err != nil { + return nil, fmt.Errorf("checking branch existence: %w", err) } - // Create polecat state + // Create worktree - reuse existing branch if it exists + if branchExists { + // Branch exists, create worktree using existing branch + if err := mayorGit.WorktreeAddExisting(polecatPath, branchName); err != nil { + return nil, fmt.Errorf("creating worktree with existing branch: %w", err) + } + } else { + // Create new branch with worktree + // git worktree add -b polecat/ + if err := mayorGit.WorktreeAdd(polecatPath, branchName); err != nil { + return nil, fmt.Errorf("creating worktree: %w", err) + } + } + + // Return polecat with derived state (no issue assigned yet = idle) + // State is derived from beads, not stored in state.json now := time.Now() polecat := &Polecat{ Name: name, Rig: m.rig.Name, - State: StateIdle, + State: StateIdle, // No issue assigned yet ClonePath: polecatPath, Branch: branchName, CreatedAt: now, UpdatedAt: now, } - // Save state - if err := m.saveState(polecat); err != nil { - // Clean up worktree on failure - mayorGit.WorktreeRemove(polecatPath, true) - return nil, fmt.Errorf("saving state: %w", err) - } - return polecat, nil } @@ -134,7 +148,7 @@ func (m *Manager) Remove(name string, force bool) error { } // Prune any stale worktree entries - mayorGit.WorktreePrune() + _ = mayorGit.WorktreePrune() return nil } @@ -168,120 +182,227 @@ func (m *Manager) List() ([]*Polecat, error) { } // Get returns a specific polecat by name. +// State is derived from beads assignee field: +// - If an issue is assigned to this polecat and is open/in_progress: StateWorking +// - If an issue is assigned but closed: StateDone +// - If no issue assigned: StateIdle func (m *Manager) Get(name string) (*Polecat, error) { if !m.exists(name) { return nil, ErrPolecatNotFound } - return m.loadState(name) + return m.loadFromBeads(name) } // SetState updates a polecat's state. +// In the beads model, state is derived from issue status: +// - StateWorking/StateActive: issue status set to in_progress +// - StateDone/StateIdle: assignee cleared from issue +// - StateStuck: issue status set to blocked (if supported) +// If beads is not available, this is a no-op. func (m *Manager) SetState(name string, state State) error { - polecat, err := m.Get(name) - if err != nil { - return err + if !m.exists(name) { + return ErrPolecatNotFound } - polecat.State = state - polecat.UpdatedAt = time.Now() + // Find the issue assigned to this polecat + assignee := m.assigneeID(name) + issue, err := m.beads.GetAssignedIssue(assignee) + if err != nil { + // If beads is not available, treat as no-op (state can't be changed) + return nil + } - return m.saveState(polecat) + switch state { + case StateWorking, StateActive: + // Set issue to in_progress if there is one + if issue != nil { + status := "in_progress" + if err := m.beads.Update(issue.ID, beads.UpdateOptions{Status: &status}); err != nil { + return fmt.Errorf("setting issue status: %w", err) + } + } + case StateDone, StateIdle: + // Clear assignment when done/idle + if issue != nil { + empty := "" + if err := m.beads.Update(issue.ID, beads.UpdateOptions{Assignee: &empty}); err != nil { + return fmt.Errorf("clearing assignee: %w", err) + } + } + case StateStuck: + // Mark issue as blocked if supported, otherwise just note in issue + if issue != nil { + // For now, just keep the assignment - the issue's blocked_by would indicate stuck + // We could add a status="blocked" here if beads supports it + } + } + + return nil } -// AssignIssue assigns an issue to a polecat. +// AssignIssue assigns an issue to a polecat by setting the issue's assignee in beads. func (m *Manager) AssignIssue(name, issue string) error { - polecat, err := m.Get(name) - if err != nil { - return err + if !m.exists(name) { + return ErrPolecatNotFound } - polecat.Issue = issue - polecat.State = StateWorking - polecat.UpdatedAt = time.Now() + // Set the issue's assignee to this polecat + assignee := m.assigneeID(name) + status := "in_progress" + if err := m.beads.Update(issue, beads.UpdateOptions{ + Assignee: &assignee, + Status: &status, + }); err != nil { + return fmt.Errorf("setting issue assignee: %w", err) + } - return m.saveState(polecat) + return nil } // ClearIssue removes the issue assignment from a polecat. +// In the ephemeral model, this transitions to Done state for cleanup. +// This clears the assignee from the currently assigned issue in beads. +// If beads is not available, this is a no-op. func (m *Manager) ClearIssue(name string) error { - polecat, err := m.Get(name) - if err != nil { - return err + if !m.exists(name) { + return ErrPolecatNotFound } - polecat.Issue = "" - polecat.State = StateIdle - polecat.UpdatedAt = time.Now() + // Find the issue assigned to this polecat + assignee := m.assigneeID(name) + issue, err := m.beads.GetAssignedIssue(assignee) + if err != nil { + // If beads is not available, treat as no-op + return nil + } - return m.saveState(polecat) + if issue == nil { + // No issue assigned, nothing to clear + return nil + } + + // Clear the assignee from the issue + empty := "" + if err := m.beads.Update(issue.ID, beads.UpdateOptions{ + Assignee: &empty, + }); err != nil { + return fmt.Errorf("clearing issue assignee: %w", err) + } + + return nil } // Wake transitions a polecat from idle to active. +// Deprecated: In the ephemeral model, polecats start in working state. +// This method is kept for backward compatibility with existing polecats. func (m *Manager) Wake(name string) error { polecat, err := m.Get(name) if err != nil { return err } - if polecat.State != StateIdle { + // Accept both idle and done states for legacy compatibility + if polecat.State != StateIdle && polecat.State != StateDone { return fmt.Errorf("polecat is not idle (state: %s)", polecat.State) } - return m.SetState(name, StateActive) + return m.SetState(name, StateWorking) } // Sleep transitions a polecat from active to idle. +// Deprecated: In the ephemeral model, polecats are deleted when done. +// This method is kept for backward compatibility. func (m *Manager) Sleep(name string) error { polecat, err := m.Get(name) if err != nil { return err } - if polecat.State != StateActive { + // Accept working state as well for legacy compatibility + if polecat.State != StateActive && polecat.State != StateWorking { return fmt.Errorf("polecat is not active (state: %s)", polecat.State) } - return m.SetState(name, StateIdle) + return m.SetState(name, StateDone) } -// saveState persists polecat state to disk. -func (m *Manager) saveState(polecat *Polecat) error { - data, err := json.MarshalIndent(polecat, "", " ") +// Finish transitions a polecat from working/done/stuck to idle and clears the issue. +// This clears the assignee from any assigned issue. +func (m *Manager) Finish(name string) error { + polecat, err := m.Get(name) if err != nil { - return fmt.Errorf("marshaling state: %w", err) + return err } - stateFile := m.stateFile(polecat.Name) - if err := os.WriteFile(stateFile, data, 0644); err != nil { - return fmt.Errorf("writing state: %w", err) + // Only allow finishing from working-related states + switch polecat.State { + case StateWorking, StateDone, StateStuck: + // OK to finish + default: + return fmt.Errorf("polecat is not in a finishing state (state: %s)", polecat.State) } - return nil + // Clear the issue assignment + return m.ClearIssue(name) } -// loadState reads polecat state from disk. -func (m *Manager) loadState(name string) (*Polecat, error) { - stateFile := m.stateFile(name) +// Reset forces a polecat to idle state regardless of current state. +// This clears the assignee from any assigned issue. +func (m *Manager) Reset(name string) error { + if !m.exists(name) { + return ErrPolecatNotFound + } - data, err := os.ReadFile(stateFile) + // Clear the issue assignment + return m.ClearIssue(name) +} + +// loadFromBeads derives polecat state from beads assignee field. +// State is derived as follows: +// - If an issue is assigned to this polecat and is open/in_progress: StateWorking +// - If no issue assigned: StateIdle +func (m *Manager) loadFromBeads(name string) (*Polecat, error) { + polecatPath := m.polecatDir(name) + branchName := fmt.Sprintf("polecat/%s", name) + + // Query beads for assigned issue + assignee := m.assigneeID(name) + issue, err := m.beads.GetAssignedIssue(assignee) if err != nil { - if os.IsNotExist(err) { - // Return minimal polecat if state file missing - return &Polecat{ - Name: name, - Rig: m.rig.Name, - State: StateIdle, - ClonePath: m.polecatDir(name), - }, nil + // If beads query fails, return basic polecat info + // This allows the system to work even if beads is not available + return &Polecat{ + Name: name, + Rig: m.rig.Name, + State: StateIdle, + ClonePath: polecatPath, + Branch: branchName, + }, nil + } + + // Derive state from issue + state := StateIdle + issueID := "" + if issue != nil { + issueID = issue.ID + switch issue.Status { + case "open", "in_progress": + state = StateWorking + case "closed": + state = StateDone + default: + // Unknown status, assume working if assigned + state = StateWorking } - return nil, fmt.Errorf("reading state: %w", err) } - var polecat Polecat - if err := json.Unmarshal(data, &polecat); err != nil { - return nil, fmt.Errorf("parsing state: %w", err) - } - - return &polecat, nil + return &Polecat{ + Name: name, + Rig: m.rig.Name, + State: state, + ClonePath: polecatPath, + Branch: branchName, + Issue: issueID, + }, nil } diff --git a/internal/polecat/manager_test.go b/internal/polecat/manager_test.go index e8b97a3b..df96c912 100644 --- a/internal/polecat/manager_test.go +++ b/internal/polecat/manager_test.go @@ -9,21 +9,22 @@ import ( "github.com/steveyegge/gastown/internal/rig" ) -func TestStateIsAvailable(t *testing.T) { +func TestStateIsActive(t *testing.T) { tests := []struct { - state State - available bool + state State + active bool }{ - {StateIdle, true}, - {StateActive, true}, - {StateWorking, false}, + {StateWorking, true}, {StateDone, false}, {StateStuck, false}, + // Legacy states are treated as active + {StateIdle, true}, + {StateActive, true}, } for _, tt := range tests { - if got := tt.state.IsAvailable(); got != tt.available { - t.Errorf("%s.IsAvailable() = %v, want %v", tt.state, got, tt.available) + if got := tt.state.IsActive(); got != tt.active { + t.Errorf("%s.IsActive() = %v, want %v", tt.state, got, tt.active) } } } @@ -105,7 +106,7 @@ func TestRemoveNotFound(t *testing.T) { } m := NewManager(r, git.NewGit(root)) - err := m.Remove("nonexistent") + err := m.Remove("nonexistent", false) if err != ErrPolecatNotFound { t.Errorf("Remove = %v, want ErrPolecatNotFound", err) } @@ -125,72 +126,72 @@ func TestPolecatDir(t *testing.T) { } } -func TestStateFile(t *testing.T) { +func TestAssigneeID(t *testing.T) { r := &rig.Rig{ Name: "test-rig", Path: "/home/user/ai/test-rig", } m := NewManager(r, git.NewGit(r.Path)) - file := m.stateFile("Toast") - expected := "/home/user/ai/test-rig/polecats/Toast/state.json" - if file != expected { - t.Errorf("stateFile = %q, want %q", file, expected) + id := m.assigneeID("Toast") + expected := "test-rig/Toast" + if id != expected { + t.Errorf("assigneeID = %q, want %q", id, expected) } } -func TestStatePersistence(t *testing.T) { +// Note: State persistence tests removed - state is now derived from beads assignee field. +// Integration tests should verify beads-based state management. + +func TestGetReturnsIdleWithoutBeads(t *testing.T) { + // When beads is not available, Get should return StateIdle root := t.TempDir() polecatDir := filepath.Join(root, "polecats", "Test") if err := os.MkdirAll(polecatDir, 0755); err != nil { t.Fatalf("mkdir: %v", err) } + // Create mayor/rig directory for beads (but no actual beads) + mayorRigDir := filepath.Join(root, "mayor", "rig") + if err := os.MkdirAll(mayorRigDir, 0755); err != nil { + t.Fatalf("mkdir mayor/rig: %v", err) + } + r := &rig.Rig{ Name: "test-rig", Path: root, } m := NewManager(r, git.NewGit(root)) - // Save state - polecat := &Polecat{ - Name: "Test", - Rig: "test-rig", - State: StateWorking, - ClonePath: polecatDir, - Issue: "gt-xyz", - } - if err := m.saveState(polecat); err != nil { - t.Fatalf("saveState: %v", err) - } - - // Load state - loaded, err := m.loadState("Test") + // Get should return polecat with StateIdle (no beads = no assignment) + polecat, err := m.Get("Test") if err != nil { - t.Fatalf("loadState: %v", err) + t.Fatalf("Get: %v", err) } - if loaded.Name != "Test" { - t.Errorf("Name = %q, want Test", loaded.Name) + if polecat.Name != "Test" { + t.Errorf("Name = %q, want Test", polecat.Name) } - if loaded.State != StateWorking { - t.Errorf("State = %v, want StateWorking", loaded.State) - } - if loaded.Issue != "gt-xyz" { - t.Errorf("Issue = %q, want gt-xyz", loaded.Issue) + if polecat.State != StateIdle { + t.Errorf("State = %v, want StateIdle (beads not available)", polecat.State) } } func TestListWithPolecats(t *testing.T) { root := t.TempDir() - // Create some polecat directories with state files + // Create some polecat directories (state is now derived from beads, not state files) for _, name := range []string{"Toast", "Cheedo"} { polecatDir := filepath.Join(root, "polecats", name) if err := os.MkdirAll(polecatDir, 0755); err != nil { t.Fatalf("mkdir: %v", err) } } + // Create mayor/rig for beads path + mayorRig := filepath.Join(root, "mayor", "rig") + if err := os.MkdirAll(mayorRig, 0755); err != nil { + t.Fatalf("mkdir mayor/rig: %v", err) + } r := &rig.Rig{ Name: "test-rig", @@ -207,12 +208,23 @@ func TestListWithPolecats(t *testing.T) { } } -func TestSetState(t *testing.T) { +// Note: TestSetState, TestAssignIssue, and TestClearIssue were removed. +// These operations now require a running beads instance and are tested +// via integration tests. The unit tests here focus on testing the basic +// polecat lifecycle operations that don't require beads. + +func TestSetStateWithoutBeads(t *testing.T) { + // SetState should not error when beads is not available root := t.TempDir() polecatDir := filepath.Join(root, "polecats", "Test") if err := os.MkdirAll(polecatDir, 0755); err != nil { t.Fatalf("mkdir: %v", err) } + // Create mayor/rig for beads path + mayorRig := filepath.Join(root, "mayor", "rig") + if err := os.MkdirAll(mayorRig, 0755); err != nil { + t.Fatalf("mkdir mayor/rig: %v", err) + } r := &rig.Rig{ Name: "test-rig", @@ -220,32 +232,25 @@ func TestSetState(t *testing.T) { } m := NewManager(r, git.NewGit(root)) - // Initial state - if err := m.saveState(&Polecat{Name: "Test", State: StateIdle}); err != nil { - t.Fatalf("saveState: %v", err) - } - - // Update state - if err := m.SetState("Test", StateActive); err != nil { - t.Fatalf("SetState: %v", err) - } - - // Verify - polecat, err := m.Get("Test") + // SetState should succeed (no-op when no issue assigned) + err := m.SetState("Test", StateActive) if err != nil { - t.Fatalf("Get: %v", err) - } - if polecat.State != StateActive { - t.Errorf("State = %v, want StateActive", polecat.State) + t.Errorf("SetState: %v (expected no error when no beads/issue)", err) } } -func TestAssignIssue(t *testing.T) { +func TestClearIssueWithoutAssignment(t *testing.T) { + // ClearIssue should not error when no issue is assigned root := t.TempDir() polecatDir := filepath.Join(root, "polecats", "Test") if err := os.MkdirAll(polecatDir, 0755); err != nil { t.Fatalf("mkdir: %v", err) } + // Create mayor/rig for beads path + mayorRig := filepath.Join(root, "mayor", "rig") + if err := os.MkdirAll(mayorRig, 0755); err != nil { + t.Fatalf("mkdir mayor/rig: %v", err) + } r := &rig.Rig{ Name: "test-rig", @@ -253,61 +258,9 @@ func TestAssignIssue(t *testing.T) { } m := NewManager(r, git.NewGit(root)) - // Initial state - if err := m.saveState(&Polecat{Name: "Test", State: StateIdle}); err != nil { - t.Fatalf("saveState: %v", err) - } - - // Assign issue - if err := m.AssignIssue("Test", "gt-abc"); err != nil { - t.Fatalf("AssignIssue: %v", err) - } - - // Verify - polecat, err := m.Get("Test") + // ClearIssue should succeed even when no issue assigned + err := m.ClearIssue("Test") if err != nil { - t.Fatalf("Get: %v", err) - } - if polecat.Issue != "gt-abc" { - t.Errorf("Issue = %q, want gt-abc", polecat.Issue) - } - if polecat.State != StateWorking { - t.Errorf("State = %v, want StateWorking", polecat.State) - } -} - -func TestClearIssue(t *testing.T) { - root := t.TempDir() - polecatDir := filepath.Join(root, "polecats", "Test") - if err := os.MkdirAll(polecatDir, 0755); err != nil { - t.Fatalf("mkdir: %v", err) - } - - r := &rig.Rig{ - Name: "test-rig", - Path: root, - } - m := NewManager(r, git.NewGit(root)) - - // Initial state with issue - if err := m.saveState(&Polecat{Name: "Test", State: StateWorking, Issue: "gt-abc"}); err != nil { - t.Fatalf("saveState: %v", err) - } - - // Clear issue - if err := m.ClearIssue("Test"); err != nil { - t.Fatalf("ClearIssue: %v", err) - } - - // Verify - polecat, err := m.Get("Test") - if err != nil { - t.Fatalf("Get: %v", err) - } - if polecat.Issue != "" { - t.Errorf("Issue = %q, want empty", polecat.Issue) - } - if polecat.State != StateIdle { - t.Errorf("State = %v, want StateIdle", polecat.State) + t.Errorf("ClearIssue: %v (expected no error when no assignment)", err) } } diff --git a/internal/polecat/types.go b/internal/polecat/types.go index 1d31a964..03687e9a 100644 --- a/internal/polecat/types.go +++ b/internal/polecat/types.go @@ -4,35 +4,39 @@ package polecat import "time" // State represents the current state of a polecat. +// In the ephemeral model, polecats exist only while working. type State string const ( - // StateIdle means the polecat is not actively working. - StateIdle State = "idle" - - // StateActive means the polecat session is running but not assigned work. - StateActive State = "active" - // StateWorking means the polecat is actively working on an issue. + // This is the initial and primary state for ephemeral polecats. StateWorking State = "working" - // StateDone means the polecat has completed its assigned work. + // StateDone means the polecat has completed its assigned work + // and is ready for cleanup by the Witness. StateDone State = "done" // StateStuck means the polecat needs assistance. StateStuck State = "stuck" -) -// IsAvailable returns true if the polecat can be assigned new work. -func (s State) IsAvailable() bool { - return s == StateIdle || s == StateActive -} + // Legacy states for backward compatibility during transition. + // New code should not use these. + StateIdle State = "idle" // Deprecated: use StateWorking + StateActive State = "active" // Deprecated: use StateWorking +) // IsWorking returns true if the polecat is currently working. func (s State) IsWorking() bool { return s == StateWorking } +// IsActive returns true if the polecat session is actively working. +// For ephemeral polecats, this is true for working state and +// legacy idle/active states (treated as working). +func (s State) IsActive() bool { + return s == StateWorking || s == StateIdle || s == StateActive +} + // Polecat represents a worker agent in a rig. type Polecat struct { // Name is the polecat identifier. diff --git a/internal/refinery/engineer.go b/internal/refinery/engineer.go new file mode 100644 index 00000000..5eea4b81 --- /dev/null +++ b/internal/refinery/engineer.go @@ -0,0 +1,321 @@ +// Package refinery provides the merge queue processing agent. +package refinery + +import ( + "context" + "encoding/json" + "fmt" + "os" + "path/filepath" + "time" + + "github.com/steveyegge/gastown/internal/beads" + "github.com/steveyegge/gastown/internal/rig" +) + +// MergeQueueConfig holds configuration for the merge queue processor. +type MergeQueueConfig struct { + // Enabled controls whether the merge queue is active. + Enabled bool `json:"enabled"` + + // TargetBranch is the default branch to merge to (e.g., "main"). + TargetBranch string `json:"target_branch"` + + // IntegrationBranches enables per-epic integration branches. + IntegrationBranches bool `json:"integration_branches"` + + // OnConflict is the strategy for handling conflicts: "assign_back" or "auto_rebase". + OnConflict string `json:"on_conflict"` + + // RunTests controls whether to run tests before merging. + RunTests bool `json:"run_tests"` + + // TestCommand is the command to run for testing. + TestCommand string `json:"test_command"` + + // DeleteMergedBranches controls whether to delete branches after merge. + DeleteMergedBranches bool `json:"delete_merged_branches"` + + // RetryFlakyTests is the number of times to retry flaky tests. + RetryFlakyTests int `json:"retry_flaky_tests"` + + // PollInterval is how often to check for new MRs. + PollInterval time.Duration `json:"poll_interval"` + + // MaxConcurrent is the maximum number of MRs to process concurrently. + MaxConcurrent int `json:"max_concurrent"` +} + +// DefaultMergeQueueConfig returns sensible defaults for merge queue configuration. +func DefaultMergeQueueConfig() *MergeQueueConfig { + return &MergeQueueConfig{ + Enabled: true, + TargetBranch: "main", + IntegrationBranches: true, + OnConflict: "assign_back", + RunTests: true, + TestCommand: "", + DeleteMergedBranches: true, + RetryFlakyTests: 1, + PollInterval: 30 * time.Second, + MaxConcurrent: 1, + } +} + +// Engineer is the merge queue processor that polls for ready merge-requests +// and processes them according to the merge queue design. +type Engineer struct { + rig *rig.Rig + beads *beads.Beads + config *MergeQueueConfig + workDir string + + // stopCh is used for graceful shutdown + stopCh chan struct{} +} + +// NewEngineer creates a new Engineer for the given rig. +func NewEngineer(r *rig.Rig) *Engineer { + return &Engineer{ + rig: r, + beads: beads.New(r.Path), + config: DefaultMergeQueueConfig(), + workDir: r.Path, + stopCh: make(chan struct{}), + } +} + +// LoadConfig loads merge queue configuration from the rig's config.json. +func (e *Engineer) LoadConfig() error { + configPath := filepath.Join(e.rig.Path, "config.json") + data, err := os.ReadFile(configPath) + if err != nil { + if os.IsNotExist(err) { + // Use defaults if no config file + return nil + } + return fmt.Errorf("reading config: %w", err) + } + + // Parse config file to extract merge_queue section + var rawConfig struct { + MergeQueue json.RawMessage `json:"merge_queue"` + } + if err := json.Unmarshal(data, &rawConfig); err != nil { + return fmt.Errorf("parsing config: %w", err) + } + + if rawConfig.MergeQueue == nil { + // No merge_queue section, use defaults + return nil + } + + // Parse merge_queue section into our config struct + // We need special handling for poll_interval (string -> Duration) + var mqRaw struct { + Enabled *bool `json:"enabled"` + TargetBranch *string `json:"target_branch"` + IntegrationBranches *bool `json:"integration_branches"` + OnConflict *string `json:"on_conflict"` + RunTests *bool `json:"run_tests"` + TestCommand *string `json:"test_command"` + DeleteMergedBranches *bool `json:"delete_merged_branches"` + RetryFlakyTests *int `json:"retry_flaky_tests"` + PollInterval *string `json:"poll_interval"` + MaxConcurrent *int `json:"max_concurrent"` + } + + if err := json.Unmarshal(rawConfig.MergeQueue, &mqRaw); err != nil { + return fmt.Errorf("parsing merge_queue config: %w", err) + } + + // Apply non-nil values to config (preserving defaults for missing fields) + if mqRaw.Enabled != nil { + e.config.Enabled = *mqRaw.Enabled + } + if mqRaw.TargetBranch != nil { + e.config.TargetBranch = *mqRaw.TargetBranch + } + if mqRaw.IntegrationBranches != nil { + e.config.IntegrationBranches = *mqRaw.IntegrationBranches + } + if mqRaw.OnConflict != nil { + e.config.OnConflict = *mqRaw.OnConflict + } + if mqRaw.RunTests != nil { + e.config.RunTests = *mqRaw.RunTests + } + if mqRaw.TestCommand != nil { + e.config.TestCommand = *mqRaw.TestCommand + } + if mqRaw.DeleteMergedBranches != nil { + e.config.DeleteMergedBranches = *mqRaw.DeleteMergedBranches + } + if mqRaw.RetryFlakyTests != nil { + e.config.RetryFlakyTests = *mqRaw.RetryFlakyTests + } + if mqRaw.MaxConcurrent != nil { + e.config.MaxConcurrent = *mqRaw.MaxConcurrent + } + if mqRaw.PollInterval != nil { + dur, err := time.ParseDuration(*mqRaw.PollInterval) + if err != nil { + return fmt.Errorf("invalid poll_interval %q: %w", *mqRaw.PollInterval, err) + } + e.config.PollInterval = dur + } + + return nil +} + +// Config returns the current merge queue configuration. +func (e *Engineer) Config() *MergeQueueConfig { + return e.config +} + +// Run starts the Engineer main loop. It blocks until the context is cancelled +// or Stop() is called. Returns nil on graceful shutdown. +func (e *Engineer) Run(ctx context.Context) error { + if err := e.LoadConfig(); err != nil { + return fmt.Errorf("loading config: %w", err) + } + + if !e.config.Enabled { + return fmt.Errorf("merge queue is disabled in configuration") + } + + fmt.Printf("[Engineer] Starting for rig %s (poll_interval=%s)\n", + e.rig.Name, e.config.PollInterval) + + ticker := time.NewTicker(e.config.PollInterval) + defer ticker.Stop() + + // Run one iteration immediately, then on ticker + if err := e.processOnce(ctx); err != nil { + fmt.Printf("[Engineer] Error: %v\n", err) + } + + for { + select { + case <-ctx.Done(): + fmt.Println("[Engineer] Shutting down (context cancelled)") + return nil + case <-e.stopCh: + fmt.Println("[Engineer] Shutting down (stop signal)") + return nil + case <-ticker.C: + if err := e.processOnce(ctx); err != nil { + fmt.Printf("[Engineer] Error: %v\n", err) + } + } + } +} + +// Stop signals the Engineer to stop processing. This is a non-blocking call. +func (e *Engineer) Stop() { + close(e.stopCh) +} + +// processOnce performs one iteration of the Engineer loop: +// 1. Query for ready merge-requests +// 2. If none, return (will try again on next tick) +// 3. Process the highest priority, oldest MR +func (e *Engineer) processOnce(ctx context.Context) error { + // Check context before starting + select { + case <-ctx.Done(): + return nil + default: + } + + // 1. Query: bd ready --type=merge-request (filtered client-side) + readyMRs, err := e.beads.ReadyWithType("merge-request") + if err != nil { + return fmt.Errorf("querying ready merge-requests: %w", err) + } + + // 2. If empty, return + if len(readyMRs) == 0 { + return nil + } + + // 3. Select highest priority, oldest MR + // bd ready already returns sorted by priority then age, so first is best + mr := readyMRs[0] + + fmt.Printf("[Engineer] Processing: %s (%s)\n", mr.ID, mr.Title) + + // 4. Claim: bd update --status=in_progress + inProgress := "in_progress" + if err := e.beads.Update(mr.ID, beads.UpdateOptions{Status: &inProgress}); err != nil { + return fmt.Errorf("claiming MR %s: %w", mr.ID, err) + } + + // 5. Process (delegate to ProcessMR - implementation in separate issue gt-3x1.2) + result := e.ProcessMR(ctx, mr) + + // 6. Handle result + if result.Success { + // Close with merged reason + reason := fmt.Sprintf("merged: %s", result.MergeCommit) + if err := e.beads.CloseWithReason(reason, mr.ID); err != nil { + fmt.Printf("[Engineer] Warning: failed to close MR %s: %v\n", mr.ID, err) + } + fmt.Printf("[Engineer] ✓ Merged: %s\n", mr.ID) + } else { + // Failure handling (detailed implementation in gt-3x1.4) + e.handleFailure(mr, result) + } + + return nil +} + +// ProcessResult contains the result of processing a merge request. +type ProcessResult struct { + Success bool + MergeCommit string + Error string + Conflict bool + TestsFailed bool +} + +// ProcessMR processes a single merge request. +// This is a placeholder that will be fully implemented in gt-3x1.2. +func (e *Engineer) ProcessMR(ctx context.Context, mr *beads.Issue) ProcessResult { + // Parse MR fields from description + mrFields := beads.ParseMRFields(mr) + if mrFields == nil { + return ProcessResult{ + Success: false, + Error: "no MR fields found in description", + } + } + + // For now, just log what we would do + // Full implementation in gt-3x1.2: Fetch and conflict check + fmt.Printf("[Engineer] Would process:\n") + fmt.Printf(" Branch: %s\n", mrFields.Branch) + fmt.Printf(" Target: %s\n", mrFields.Target) + fmt.Printf(" Worker: %s\n", mrFields.Worker) + + // Return failure for now - actual implementation in gt-3x1.2 + return ProcessResult{ + Success: false, + Error: "ProcessMR not fully implemented (see gt-3x1.2)", + } +} + +// handleFailure handles a failed merge request. +// This is a placeholder that will be fully implemented in gt-3x1.4. +func (e *Engineer) handleFailure(mr *beads.Issue, result ProcessResult) { + // Reopen the MR (back to open status for rework) + open := "open" + if err := e.beads.Update(mr.ID, beads.UpdateOptions{Status: &open}); err != nil { + fmt.Printf("[Engineer] Warning: failed to reopen MR %s: %v\n", mr.ID, err) + } + + // Log the failure + fmt.Printf("[Engineer] ✗ Failed: %s - %s\n", mr.ID, result.Error) + + // Full failure handling (assign back to worker, labels) in gt-3x1.4 +} diff --git a/internal/refinery/engineer_test.go b/internal/refinery/engineer_test.go new file mode 100644 index 00000000..18174dee --- /dev/null +++ b/internal/refinery/engineer_test.go @@ -0,0 +1,209 @@ +package refinery + +import ( + "encoding/json" + "os" + "path/filepath" + "testing" + "time" + + "github.com/steveyegge/gastown/internal/rig" +) + +func TestDefaultMergeQueueConfig(t *testing.T) { + cfg := DefaultMergeQueueConfig() + + if !cfg.Enabled { + t.Error("expected Enabled to be true by default") + } + if cfg.TargetBranch != "main" { + t.Errorf("expected TargetBranch to be 'main', got %q", cfg.TargetBranch) + } + if cfg.PollInterval != 30*time.Second { + t.Errorf("expected PollInterval to be 30s, got %v", cfg.PollInterval) + } + if cfg.MaxConcurrent != 1 { + t.Errorf("expected MaxConcurrent to be 1, got %d", cfg.MaxConcurrent) + } + if cfg.OnConflict != "assign_back" { + t.Errorf("expected OnConflict to be 'assign_back', got %q", cfg.OnConflict) + } +} + +func TestEngineer_LoadConfig_NoFile(t *testing.T) { + // Create a temp directory without config.json + tmpDir, err := os.MkdirTemp("", "engineer-test-*") + if err != nil { + t.Fatal(err) + } + defer func() { _ = os.RemoveAll(tmpDir) }() + + r := &rig.Rig{ + Name: "test-rig", + Path: tmpDir, + } + + e := NewEngineer(r) + + // Should not error with missing config file + if err := e.LoadConfig(); err != nil { + t.Errorf("unexpected error with missing config: %v", err) + } + + // Should use defaults + if e.config.PollInterval != 30*time.Second { + t.Errorf("expected default PollInterval, got %v", e.config.PollInterval) + } +} + +func TestEngineer_LoadConfig_WithMergeQueue(t *testing.T) { + // Create a temp directory with config.json + tmpDir, err := os.MkdirTemp("", "engineer-test-*") + if err != nil { + t.Fatal(err) + } + defer func() { _ = os.RemoveAll(tmpDir) }() + + // Write config file + config := map[string]interface{}{ + "type": "rig", + "version": 1, + "name": "test-rig", + "merge_queue": map[string]interface{}{ + "enabled": true, + "target_branch": "develop", + "poll_interval": "10s", + "max_concurrent": 2, + "run_tests": false, + "test_command": "make test", + }, + } + + data, _ := json.MarshalIndent(config, "", " ") + if err := os.WriteFile(filepath.Join(tmpDir, "config.json"), data, 0644); err != nil { + t.Fatal(err) + } + + r := &rig.Rig{ + Name: "test-rig", + Path: tmpDir, + } + + e := NewEngineer(r) + + if err := e.LoadConfig(); err != nil { + t.Errorf("unexpected error loading config: %v", err) + } + + // Check that config values were loaded + if e.config.TargetBranch != "develop" { + t.Errorf("expected TargetBranch 'develop', got %q", e.config.TargetBranch) + } + if e.config.PollInterval != 10*time.Second { + t.Errorf("expected PollInterval 10s, got %v", e.config.PollInterval) + } + if e.config.MaxConcurrent != 2 { + t.Errorf("expected MaxConcurrent 2, got %d", e.config.MaxConcurrent) + } + if e.config.RunTests != false { + t.Errorf("expected RunTests false, got %v", e.config.RunTests) + } + if e.config.TestCommand != "make test" { + t.Errorf("expected TestCommand 'make test', got %q", e.config.TestCommand) + } + + // Check that defaults are preserved for unspecified fields + if e.config.OnConflict != "assign_back" { + t.Errorf("expected OnConflict default 'assign_back', got %q", e.config.OnConflict) + } +} + +func TestEngineer_LoadConfig_NoMergeQueueSection(t *testing.T) { + // Create a temp directory with config.json without merge_queue + tmpDir, err := os.MkdirTemp("", "engineer-test-*") + if err != nil { + t.Fatal(err) + } + defer func() { _ = os.RemoveAll(tmpDir) }() + + // Write config file without merge_queue + config := map[string]interface{}{ + "type": "rig", + "version": 1, + "name": "test-rig", + } + + data, _ := json.MarshalIndent(config, "", " ") + if err := os.WriteFile(filepath.Join(tmpDir, "config.json"), data, 0644); err != nil { + t.Fatal(err) + } + + r := &rig.Rig{ + Name: "test-rig", + Path: tmpDir, + } + + e := NewEngineer(r) + + if err := e.LoadConfig(); err != nil { + t.Errorf("unexpected error loading config: %v", err) + } + + // Should use all defaults + if e.config.PollInterval != 30*time.Second { + t.Errorf("expected default PollInterval, got %v", e.config.PollInterval) + } +} + +func TestEngineer_LoadConfig_InvalidPollInterval(t *testing.T) { + tmpDir, err := os.MkdirTemp("", "engineer-test-*") + if err != nil { + t.Fatal(err) + } + defer func() { _ = os.RemoveAll(tmpDir) }() + + config := map[string]interface{}{ + "merge_queue": map[string]interface{}{ + "poll_interval": "not-a-duration", + }, + } + + data, _ := json.MarshalIndent(config, "", " ") + if err := os.WriteFile(filepath.Join(tmpDir, "config.json"), data, 0644); err != nil { + t.Fatal(err) + } + + r := &rig.Rig{ + Name: "test-rig", + Path: tmpDir, + } + + e := NewEngineer(r) + + err = e.LoadConfig() + if err == nil { + t.Error("expected error for invalid poll_interval") + } +} + +func TestNewEngineer(t *testing.T) { + r := &rig.Rig{ + Name: "test-rig", + Path: "/tmp/test-rig", + } + + e := NewEngineer(r) + + if e.rig != r { + t.Error("expected rig to be set") + } + if e.beads == nil { + t.Error("expected beads client to be initialized") + } + if e.config == nil { + t.Error("expected config to be initialized with defaults") + } + if e.stopCh == nil { + t.Error("expected stopCh to be initialized") + } +} diff --git a/internal/refinery/manager.go b/internal/refinery/manager.go index 416e596a..785c5bad 100644 --- a/internal/refinery/manager.go +++ b/internal/refinery/manager.go @@ -5,7 +5,6 @@ import ( "encoding/json" "errors" "fmt" - "log" "os" "os/exec" "path/filepath" @@ -13,9 +12,9 @@ import ( "strings" "time" - "github.com/steveyegge/gastown/internal/beads" "github.com/steveyegge/gastown/internal/mail" "github.com/steveyegge/gastown/internal/rig" + "github.com/steveyegge/gastown/internal/tmux" ) // Common errors @@ -44,6 +43,11 @@ func (m *Manager) stateFile() string { return filepath.Join(m.rig.Path, ".gastown", "refinery.json") } +// sessionName returns the tmux session name for this refinery. +func (m *Manager) sessionName() string { + return fmt.Sprintf("gt-%s-refinery", m.rig.Name) +} + // loadState loads refinery state from disk. func (m *Manager) loadState() (*Refinery, error) { data, err := os.ReadFile(m.stateFile()) @@ -87,13 +91,35 @@ func (m *Manager) Status() (*Refinery, error) { return nil, err } - // If running, verify process is still alive - if ref.State == StateRunning && ref.PID > 0 { - if !processExists(ref.PID) { - ref.State = StateStopped - ref.PID = 0 - m.saveState(ref) + // Check if tmux session exists + t := tmux.NewTmux() + sessionID := m.sessionName() + sessionRunning, _ := t.HasSession(sessionID) + + // If tmux session is running, refinery is running + if sessionRunning { + if ref.State != StateRunning { + // Update state to match reality + now := time.Now() + ref.State = StateRunning + if ref.StartedAt == nil { + ref.StartedAt = &now + } + _ = m.saveState(ref) } + return ref, nil + } + + // If state says running but tmux session doesn't exist, check PID + if ref.State == StateRunning { + if ref.PID > 0 && processExists(ref.PID) { + // Process is still running (foreground mode without tmux) + return ref, nil + } + // Neither session nor process exists - mark as stopped + ref.State = StateStopped + ref.PID = 0 + _ = m.saveState(ref) } return ref, nil @@ -101,33 +127,63 @@ func (m *Manager) Status() (*Refinery, error) { // Start starts the refinery. // If foreground is true, runs in the current process (blocking). -// Otherwise, spawns a background process. +// Otherwise, spawns a tmux session running the refinery in foreground mode. func (m *Manager) Start(foreground bool) error { ref, err := m.loadState() if err != nil { return err } + // Check if already running via tmux session + t := tmux.NewTmux() + sessionID := m.sessionName() + running, _ := t.HasSession(sessionID) + if running { + return ErrAlreadyRunning + } + + // Also check via PID for backwards compatibility if ref.State == StateRunning && ref.PID > 0 && processExists(ref.PID) { return ErrAlreadyRunning } - now := time.Now() - ref.State = StateRunning - ref.StartedAt = &now - ref.PID = os.Getpid() // For foreground mode; background would set actual PID - - if err := m.saveState(ref); err != nil { - return err - } - if foreground { + // Running in foreground - update state and run + now := time.Now() + ref.State = StateRunning + ref.StartedAt = &now + ref.PID = os.Getpid() + + if err := m.saveState(ref); err != nil { + return err + } + // Run the processing loop (blocking) return m.run(ref) } - // Background mode: spawn a new process - // For MVP, we just mark as running - actual daemon implementation in gt-ov2 + // Background mode: spawn a tmux session running the refinery + if err := t.NewSession(sessionID, m.workDir); err != nil { + return fmt.Errorf("creating tmux session: %w", err) + } + + // Set environment variables + _ = t.SetEnvironment(sessionID, "GT_RIG", m.rig.Name) + _ = t.SetEnvironment(sessionID, "GT_REFINERY", "1") + + // Apply theme (same as rig polecats) + theme := tmux.AssignTheme(m.rig.Name) + _ = t.ConfigureGasTownSession(sessionID, theme, m.rig.Name, "refinery", "refinery") + + // Send the command to start refinery in foreground mode + // The foreground mode handles state updates and the processing loop + command := fmt.Sprintf("gt refinery start %s --foreground", m.rig.Name) + if err := t.SendKeys(sessionID, command); err != nil { + // Clean up the session on failure + _ = t.KillSession(sessionID) + return fmt.Errorf("starting refinery: %w", err) + } + return nil } @@ -138,15 +194,26 @@ func (m *Manager) Stop() error { return err } - if ref.State != StateRunning { + // Check if tmux session exists + t := tmux.NewTmux() + sessionID := m.sessionName() + sessionRunning, _ := t.HasSession(sessionID) + + // If neither state nor session indicates running, it's not running + if ref.State != StateRunning && !sessionRunning { return ErrNotRunning } - // If we have a PID, try to stop it gracefully - if ref.PID > 0 && ref.PID != os.Getpid() { + // Kill tmux session if it exists + if sessionRunning { + _ = t.KillSession(sessionID) + } + + // If we have a PID and it's a different process, try to stop it gracefully + if ref.PID > 0 && ref.PID != os.Getpid() && processExists(ref.PID) { // Send SIGTERM if proc, err := os.FindProcess(ref.PID); err == nil { - proc.Signal(os.Interrupt) + _ = proc.Signal(os.Interrupt) } } @@ -258,15 +325,13 @@ func (m *Manager) run(ref *Refinery) error { ticker := time.NewTicker(10 * time.Second) defer ticker.Stop() - for { - select { - case <-ticker.C: - // Process queue - if err := m.ProcessQueue(); err != nil { - fmt.Printf("Queue processing error: %v\n", err) - } + for range ticker.C { + // Process queue + if err := m.ProcessQueue(); err != nil { + fmt.Printf("Queue processing error: %v\n", err) } } + return nil } // ProcessQueue processes all pending merge requests. @@ -297,50 +362,44 @@ func (m *Manager) ProcessQueue() error { // MergeResult contains the result of a merge attempt. type MergeResult struct { Success bool + MergeCommit string // SHA of merge commit on success Error string - FailureType FailureType - RetryCount int // Number of retries attempted + Conflict bool + TestsFailed bool } // ProcessMR processes a single merge request. func (m *Manager) ProcessMR(mr *MergeRequest) MergeResult { - return m.processMRWithRetry(mr, 0) -} - -// processMRWithRetry processes a merge request with retry support. -func (m *Manager) processMRWithRetry(mr *MergeRequest, retryCount int) MergeResult { ref, _ := m.loadState() + config := m.getMergeConfig() // Claim the MR (open → in_progress) if err := mr.Claim(); err != nil { return MergeResult{Error: fmt.Sprintf("cannot claim MR: %v", err)} } ref.CurrentMR = mr - m.saveState(ref) + _ = m.saveState(ref) - result := MergeResult{RetryCount: retryCount} + result := MergeResult{} // 1. Fetch the branch if err := m.gitRun("fetch", "origin", mr.Branch); err != nil { result.Error = fmt.Sprintf("fetch failed: %v", err) - result.FailureType = FailureFetch - m.handleFailure(mr, result) + m.completeMR(mr, "", result.Error) // Reopen for retry return result } - // 2. Attempt merge to target branch - // First, checkout target + // 2. Checkout target branch if err := m.gitRun("checkout", mr.TargetBranch); err != nil { result.Error = fmt.Sprintf("checkout target failed: %v", err) - result.FailureType = FailureCheckout - m.handleFailure(mr, result) + m.completeMR(mr, "", result.Error) // Reopen for retry return result } // Pull latest - m.gitRun("pull", "origin", mr.TargetBranch) // Ignore errors + _ = m.gitRun("pull", "origin", mr.TargetBranch) // Ignore errors - // Merge + // 3. Merge err := m.gitRun("merge", "--no-ff", "-m", fmt.Sprintf("Merge %s from %s", mr.Branch, mr.Worker), "origin/"+mr.Branch) @@ -348,258 +407,63 @@ func (m *Manager) processMRWithRetry(mr *MergeRequest, retryCount int) MergeResu if err != nil { errStr := err.Error() if strings.Contains(errStr, "CONFLICT") || strings.Contains(errStr, "conflict") { - result.FailureType = FailureConflict + result.Conflict = true result.Error = "merge conflict" // Abort the merge - m.gitRun("merge", "--abort") - m.handleFailure(mr, result) + _ = m.gitRun("merge", "--abort") + m.completeMR(mr, "", "merge conflict - polecat must rebase") // Reopen for rebase + // Notify worker about conflict + m.notifyWorkerConflict(mr) return result } result.Error = fmt.Sprintf("merge failed: %v", err) - m.handleFailure(mr, result) + m.completeMR(mr, "", result.Error) // Reopen for retry return result } - // 3. Run tests if configured - testCmd := m.getTestCommand() - if testCmd != "" { - if err := m.runTests(testCmd); err != nil { - // Reset to before merge - m.gitRun("reset", "--hard", "HEAD~1") - - // Check if this might be a flaky test (retry once) - retryFlakyTests := m.getRetryFlakyTests() - if retryCount < retryFlakyTests { - log.Printf("[MQ] Test failure on attempt %d, retrying (may be flaky)...", retryCount+1) - // Reopen the MR for retry - mr.Reopen() - return m.processMRWithRetry(mr, retryCount+1) - } - - result.FailureType = FailureTestsFail + // 4. Run tests if configured + if config.RunTests && config.TestCommand != "" { + if err := m.runTests(config.TestCommand); err != nil { + result.TestsFailed = true result.Error = fmt.Sprintf("tests failed: %v", err) - m.handleFailure(mr, result) + // Reset to before merge + _ = m.gitRun("reset", "--hard", "HEAD~1") + m.completeMR(mr, "", result.Error) // Reopen for fixes return result } } - // 4. Push with retry for transient failures - pushErr := m.pushWithRetry(mr.TargetBranch, 3) - if pushErr != nil { - result.Error = fmt.Sprintf("push failed: %v", pushErr) - result.FailureType = FailurePushFail + // 5. Push with retry logic + if err := m.pushWithRetry(mr.TargetBranch, config); err != nil { + result.Error = fmt.Sprintf("push failed: %v", err) // Reset to before merge - m.gitRun("reset", "--hard", "HEAD~1") - m.handleFailure(mr, result) + _ = m.gitRun("reset", "--hard", "HEAD~1") + m.completeMR(mr, "", result.Error) // Reopen for retry return result } + // 6. Get merge commit SHA + mergeCommit, err := m.gitOutput("rev-parse", "HEAD") + if err != nil { + mergeCommit = "" // Non-fatal, continue + } + // Success! result.Success = true + result.MergeCommit = mergeCommit m.completeMR(mr, CloseReasonMerged, "") // Notify worker of success m.notifyWorkerMerged(mr) // Optionally delete the merged branch - m.gitRun("push", "origin", "--delete", mr.Branch) + if config.DeleteMergedBranches { + _ = m.gitRun("push", "origin", "--delete", mr.Branch) + } return result } -// pushWithRetry attempts to push with exponential backoff. -func (m *Manager) pushWithRetry(branch string, maxRetries int) error { - var lastErr error - for i := 0; i < maxRetries; i++ { - if i > 0 { - // Exponential backoff: 1s, 2s, 4s... - backoff := time.Duration(1< 0 { - return config.MergeQueue.RetryFlakyTests - } - return 1 -} - // completeMR marks an MR as complete and updates stats. // For success, pass closeReason (e.g., CloseReasonMerged). // For failures that should return to open, pass empty closeReason. @@ -638,7 +502,7 @@ func (m *Manager) completeMR(mr *MergeRequest, closeReason CloseReason, errMsg s ref.Stats.TodayFailed++ } - m.saveState(ref) + _ = m.saveState(ref) } // getTestCommand returns the test command if configured. @@ -699,6 +563,89 @@ func (m *Manager) gitRun(args ...string) error { return nil } +// gitOutput executes a git command and returns stdout. +func (m *Manager) gitOutput(args ...string) (string, error) { + cmd := exec.Command("git", args...) + cmd.Dir = m.workDir + + var stdout, stderr bytes.Buffer + cmd.Stdout = &stdout + cmd.Stderr = &stderr + + if err := cmd.Run(); err != nil { + errMsg := strings.TrimSpace(stderr.String()) + if errMsg != "" { + return "", fmt.Errorf("%s", errMsg) + } + return "", err + } + + return strings.TrimSpace(stdout.String()), nil +} + +// getMergeConfig loads the merge configuration from disk. +// Returns default config if not configured. +func (m *Manager) getMergeConfig() MergeConfig { + config := DefaultMergeConfig() + + // Check for .gastown/config.json with merge_queue settings + configPath := filepath.Join(m.rig.Path, ".gastown", "config.json") + data, err := os.ReadFile(configPath) + if err != nil { + return config + } + + var rawConfig struct { + MergeQueue *MergeConfig `json:"merge_queue"` + // Legacy field for backwards compatibility + TestCommand string `json:"test_command"` + } + if err := json.Unmarshal(data, &rawConfig); err != nil { + return config + } + + // Apply merge_queue config if present + if rawConfig.MergeQueue != nil { + config = *rawConfig.MergeQueue + // Ensure defaults for zero values + if config.PushRetryCount == 0 { + config.PushRetryCount = 3 + } + if config.PushRetryDelayMs == 0 { + config.PushRetryDelayMs = 1000 + } + } + + // Legacy: use test_command if merge_queue not set + if rawConfig.TestCommand != "" && config.TestCommand == "" { + config.TestCommand = rawConfig.TestCommand + } + + return config +} + +// pushWithRetry pushes to the target branch with exponential backoff retry. +func (m *Manager) pushWithRetry(targetBranch string, config MergeConfig) error { + var lastErr error + delay := time.Duration(config.PushRetryDelayMs) * time.Millisecond + + for attempt := 0; attempt <= config.PushRetryCount; attempt++ { + if attempt > 0 { + fmt.Printf("Push retry %d/%d after %v\n", attempt, config.PushRetryCount, delay) + time.Sleep(delay) + delay *= 2 // Exponential backoff + } + + err := m.gitRun("push", "origin", targetBranch) + if err == nil { + return nil // Success + } + lastErr = err + } + + return fmt.Errorf("push failed after %d retries: %v", config.PushRetryCount, lastErr) +} + // processExists checks if a process with the given PID exists. func processExists(pid int) bool { proc, err := os.FindProcess(pid) @@ -726,6 +673,27 @@ func formatAge(t time.Time) string { return fmt.Sprintf("%dd ago", int(d.Hours()/24)) } +// notifyWorkerConflict sends a conflict notification to a polecat. +func (m *Manager) notifyWorkerConflict(mr *MergeRequest) { + router := mail.NewRouter(m.workDir) + msg := &mail.Message{ + From: fmt.Sprintf("%s/refinery", m.rig.Name), + To: fmt.Sprintf("%s/%s", m.rig.Name, mr.Worker), + Subject: "Merge conflict - rebase required", + Body: fmt.Sprintf(`Your branch %s has conflicts with %s. + +Please rebase your changes: + git fetch origin + git rebase origin/%s + git push -f + +Then the Refinery will retry the merge.`, + mr.Branch, mr.TargetBranch, mr.TargetBranch), + Priority: mail.PriorityHigh, + } + _ = router.Send(msg) +} + // notifyWorkerMerged sends a success notification to a polecat. func (m *Manager) notifyWorkerMerged(mr *MergeRequest) { router := mail.NewRouter(m.workDir) @@ -739,7 +707,167 @@ Issue: %s Thank you for your contribution!`, mr.Branch, mr.TargetBranch, mr.IssueID), } - router.Send(msg) + _ = router.Send(msg) +} + +// Common errors for MR operations +var ( + ErrMRNotFound = errors.New("merge request not found") + ErrMRNotFailed = errors.New("merge request has not failed") +) + +// GetMR returns a merge request by ID from the state. +func (m *Manager) GetMR(id string) (*MergeRequest, error) { + ref, err := m.loadState() + if err != nil { + return nil, err + } + + // Check if it's the current MR + if ref.CurrentMR != nil && ref.CurrentMR.ID == id { + return ref.CurrentMR, nil + } + + // Check pending MRs + if ref.PendingMRs != nil { + if mr, ok := ref.PendingMRs[id]; ok { + return mr, nil + } + } + + return nil, ErrMRNotFound +} + +// FindMR finds a merge request by ID or branch name in the queue. +func (m *Manager) FindMR(idOrBranch string) (*MergeRequest, error) { + queue, err := m.Queue() + if err != nil { + return nil, err + } + + for _, item := range queue { + // Match by ID + if item.MR.ID == idOrBranch { + return item.MR, nil + } + // Match by branch name (with or without polecat/ prefix) + if item.MR.Branch == idOrBranch { + return item.MR, nil + } + if "polecat/"+idOrBranch == item.MR.Branch { + return item.MR, nil + } + // Match by worker name (partial match for convenience) + if strings.Contains(item.MR.ID, idOrBranch) { + return item.MR, nil + } + } + + return nil, ErrMRNotFound +} + +// Retry resets a failed merge request so it can be processed again. +// If processNow is true, immediately processes the MR instead of waiting for the loop. +func (m *Manager) Retry(id string, processNow bool) error { + ref, err := m.loadState() + if err != nil { + return err + } + + // Find the MR + var mr *MergeRequest + if ref.PendingMRs != nil { + mr = ref.PendingMRs[id] + } + if mr == nil { + return ErrMRNotFound + } + + // Verify it's in a failed state (open with an error) + if mr.Status != MROpen || mr.Error == "" { + return ErrMRNotFailed + } + + // Clear the error to mark as ready for retry + mr.Error = "" + + // Save the state + if err := m.saveState(ref); err != nil { + return err + } + + // If --now flag, process immediately + if processNow { + result := m.ProcessMR(mr) + if !result.Success { + return fmt.Errorf("retry failed: %s", result.Error) + } + } + + return nil +} + +// RegisterMR adds a merge request to the pending queue. +func (m *Manager) RegisterMR(mr *MergeRequest) error { + ref, err := m.loadState() + if err != nil { + return err + } + + if ref.PendingMRs == nil { + ref.PendingMRs = make(map[string]*MergeRequest) + } + + ref.PendingMRs[mr.ID] = mr + return m.saveState(ref) +} + +// RejectMR manually rejects a merge request. +// It closes the MR with rejected status and optionally notifies the worker. +// Returns the rejected MR for display purposes. +func (m *Manager) RejectMR(idOrBranch string, reason string, notify bool) (*MergeRequest, error) { + mr, err := m.FindMR(idOrBranch) + if err != nil { + return nil, err + } + + // Verify MR is open or in_progress (can't reject already closed) + if mr.IsClosed() { + return nil, fmt.Errorf("%w: MR is already closed with reason: %s", ErrClosedImmutable, mr.CloseReason) + } + + // Close with rejected reason + if err := mr.Close(CloseReasonRejected); err != nil { + return nil, fmt.Errorf("failed to close MR: %w", err) + } + mr.Error = reason + + // Optionally notify worker + if notify { + m.notifyWorkerRejected(mr, reason) + } + + return mr, nil +} + +// notifyWorkerRejected sends a rejection notification to a polecat. +func (m *Manager) notifyWorkerRejected(mr *MergeRequest, reason string) { + router := mail.NewRouter(m.workDir) + msg := &mail.Message{ + From: fmt.Sprintf("%s/refinery", m.rig.Name), + To: fmt.Sprintf("%s/%s", m.rig.Name, mr.Worker), + Subject: "Merge request rejected", + Body: fmt.Sprintf(`Your merge request has been rejected. + +Branch: %s +Issue: %s +Reason: %s + +Please review the feedback and address the issues before resubmitting.`, + mr.Branch, mr.IssueID, reason), + Priority: mail.PriorityNormal, + } + _ = router.Send(msg) } // findTownRoot walks up directories to find the town root. diff --git a/internal/refinery/manager_test.go b/internal/refinery/manager_test.go new file mode 100644 index 00000000..19dc20d2 --- /dev/null +++ b/internal/refinery/manager_test.go @@ -0,0 +1,172 @@ +package refinery + +import ( + "encoding/json" + "os" + "path/filepath" + "testing" + "time" + + "github.com/steveyegge/gastown/internal/rig" +) + +func setupTestManager(t *testing.T) (*Manager, string) { + t.Helper() + + // Create temp directory structure + tmpDir := t.TempDir() + rigPath := filepath.Join(tmpDir, "testrig") + if err := os.MkdirAll(filepath.Join(rigPath, ".gastown"), 0755); err != nil { + t.Fatalf("mkdir .gastown: %v", err) + } + + r := &rig.Rig{ + Name: "testrig", + Path: rigPath, + } + + return NewManager(r), rigPath +} + +func TestManager_GetMR(t *testing.T) { + mgr, _ := setupTestManager(t) + + // Create a test MR in the pending queue + mr := &MergeRequest{ + ID: "gt-mr-abc123", + Branch: "polecat/Toast/gt-xyz", + Worker: "Toast", + IssueID: "gt-xyz", + Status: MROpen, + Error: "test failure", + } + + if err := mgr.RegisterMR(mr); err != nil { + t.Fatalf("RegisterMR: %v", err) + } + + t.Run("find existing MR", func(t *testing.T) { + found, err := mgr.GetMR("gt-mr-abc123") + if err != nil { + t.Errorf("GetMR() unexpected error: %v", err) + } + if found == nil { + t.Fatal("GetMR() returned nil") + } + if found.ID != mr.ID { + t.Errorf("GetMR() ID = %s, want %s", found.ID, mr.ID) + } + }) + + t.Run("MR not found", func(t *testing.T) { + _, err := mgr.GetMR("nonexistent-mr") + if err != ErrMRNotFound { + t.Errorf("GetMR() error = %v, want %v", err, ErrMRNotFound) + } + }) +} + +func TestManager_Retry(t *testing.T) { + t.Run("retry failed MR clears error", func(t *testing.T) { + mgr, _ := setupTestManager(t) + + // Create a failed MR + mr := &MergeRequest{ + ID: "gt-mr-failed", + Branch: "polecat/Toast/gt-xyz", + Worker: "Toast", + Status: MROpen, + Error: "merge conflict", + } + + if err := mgr.RegisterMR(mr); err != nil { + t.Fatalf("RegisterMR: %v", err) + } + + // Retry without processing + err := mgr.Retry("gt-mr-failed", false) + if err != nil { + t.Errorf("Retry() unexpected error: %v", err) + } + + // Verify error was cleared + found, _ := mgr.GetMR("gt-mr-failed") + if found.Error != "" { + t.Errorf("Retry() error not cleared, got %s", found.Error) + } + }) + + t.Run("retry non-failed MR fails", func(t *testing.T) { + mgr, _ := setupTestManager(t) + + // Create a successful MR (no error) + mr := &MergeRequest{ + ID: "gt-mr-success", + Branch: "polecat/Toast/gt-abc", + Worker: "Toast", + Status: MROpen, + Error: "", // No error + } + + if err := mgr.RegisterMR(mr); err != nil { + t.Fatalf("RegisterMR: %v", err) + } + + err := mgr.Retry("gt-mr-success", false) + if err != ErrMRNotFailed { + t.Errorf("Retry() error = %v, want %v", err, ErrMRNotFailed) + } + }) + + t.Run("retry nonexistent MR fails", func(t *testing.T) { + mgr, _ := setupTestManager(t) + + err := mgr.Retry("nonexistent", false) + if err != ErrMRNotFound { + t.Errorf("Retry() error = %v, want %v", err, ErrMRNotFound) + } + }) +} + +func TestManager_RegisterMR(t *testing.T) { + mgr, rigPath := setupTestManager(t) + + mr := &MergeRequest{ + ID: "gt-mr-new", + Branch: "polecat/Cheedo/gt-123", + Worker: "Cheedo", + IssueID: "gt-123", + TargetBranch: "main", + CreatedAt: time.Now(), + Status: MROpen, + } + + if err := mgr.RegisterMR(mr); err != nil { + t.Fatalf("RegisterMR: %v", err) + } + + // Verify it was saved to disk + stateFile := filepath.Join(rigPath, ".gastown", "refinery.json") + data, err := os.ReadFile(stateFile) + if err != nil { + t.Fatalf("reading state file: %v", err) + } + + var ref Refinery + if err := json.Unmarshal(data, &ref); err != nil { + t.Fatalf("unmarshal state: %v", err) + } + + if ref.PendingMRs == nil { + t.Fatal("PendingMRs is nil") + } + + saved, ok := ref.PendingMRs["gt-mr-new"] + if !ok { + t.Fatal("MR not found in PendingMRs") + } + + if saved.Worker != "Cheedo" { + t.Errorf("saved MR worker = %s, want Cheedo", saved.Worker) + } +} diff --git a/internal/refinery/types.go b/internal/refinery/types.go index b2e53f8f..8d2ae1b7 100644 --- a/internal/refinery/types.go +++ b/internal/refinery/types.go @@ -38,6 +38,10 @@ type Refinery struct { // CurrentMR is the merge request currently being processed. CurrentMR *MergeRequest `json:"current_mr,omitempty"` + // PendingMRs tracks merge requests that have been submitted. + // Key is the MR ID. + PendingMRs map[string]*MergeRequest `json:"pending_mrs,omitempty"` + // LastMergeAt is when the last successful merge happened. LastMergeAt *time.Time `json:"last_merge_at,omitempty"` @@ -111,6 +115,41 @@ const ( ) +// MergeConfig contains configuration for the merge process. +type MergeConfig struct { + // RunTests controls whether tests are run after merge. + // Default: true + RunTests bool `json:"run_tests"` + + // TestCommand is the command to run for testing. + // Default: "go test ./..." + TestCommand string `json:"test_command"` + + // DeleteMergedBranches controls whether merged branches are deleted. + // Default: true + DeleteMergedBranches bool `json:"delete_merged_branches"` + + // PushRetryCount is the number of times to retry a failed push. + // Default: 3 + PushRetryCount int `json:"push_retry_count"` + + // PushRetryDelayMs is the base delay between push retries in milliseconds. + // Each retry doubles the delay (exponential backoff). + // Default: 1000 + PushRetryDelayMs int `json:"push_retry_delay_ms"` +} + +// DefaultMergeConfig returns the default merge configuration. +func DefaultMergeConfig() MergeConfig { + return MergeConfig{ + RunTests: true, + TestCommand: "go test ./...", + DeleteMergedBranches: true, + PushRetryCount: 3, + PushRetryDelayMs: 1000, + } +} + // RefineryStats contains cumulative refinery statistics. type RefineryStats struct { // TotalMerged is the total number of successful merges. diff --git a/internal/rig/manager.go b/internal/rig/manager.go index de8a4064..4916cdc3 100644 --- a/internal/rig/manager.go +++ b/internal/rig/manager.go @@ -12,6 +12,7 @@ import ( "github.com/steveyegge/gastown/internal/config" "github.com/steveyegge/gastown/internal/git" + "github.com/steveyegge/gastown/internal/templates" ) // Common errors @@ -193,7 +194,7 @@ func (m *Manager) AddRig(opts AddRigOptions) (*Rig, error) { } // Track cleanup on failure - cleanup := func() { os.RemoveAll(rigPath) } + cleanup := func() { _ = os.RemoveAll(rigPath) } success := false defer func() { if !success { @@ -224,6 +225,10 @@ func (m *Manager) AddRig(opts AddRigOptions) (*Rig, error) { if err := m.git.Clone(opts.GitURL, refineryRigPath); err != nil { return nil, fmt.Errorf("cloning for refinery: %w", err) } + // Create refinery CLAUDE.md (overrides any from cloned repo) + if err := m.createRoleCLAUDEmd(refineryRigPath, "refinery", opts.Name, ""); err != nil { + return nil, fmt.Errorf("creating refinery CLAUDE.md: %w", err) + } // Clone repository for mayor mayorRigPath := filepath.Join(rigPath, "mayor", "rig") @@ -233,6 +238,10 @@ func (m *Manager) AddRig(opts AddRigOptions) (*Rig, error) { if err := m.git.Clone(opts.GitURL, mayorRigPath); err != nil { return nil, fmt.Errorf("cloning for mayor: %w", err) } + // Create mayor CLAUDE.md (overrides any from cloned repo) + if err := m.createRoleCLAUDEmd(mayorRigPath, "mayor", opts.Name, ""); err != nil { + return nil, fmt.Errorf("creating mayor CLAUDE.md: %w", err) + } // Clone repository for default crew workspace crewPath := filepath.Join(rigPath, "crew", opts.CrewName) @@ -242,6 +251,10 @@ func (m *Manager) AddRig(opts AddRigOptions) (*Rig, error) { if err := m.git.Clone(opts.GitURL, crewPath); err != nil { return nil, fmt.Errorf("cloning for crew: %w", err) } + // Create crew CLAUDE.md (overrides any from cloned repo) + if err := m.createRoleCLAUDEmd(crewPath, "crew", opts.Name, opts.CrewName); err != nil { + return nil, fmt.Errorf("creating crew CLAUDE.md: %w", err) + } // Create witness directory (no clone needed) witnessPath := filepath.Join(rigPath, "witness") @@ -384,3 +397,29 @@ func (m *Manager) ListRigNames() []string { } return names } + +// createRoleCLAUDEmd creates a CLAUDE.md file with role-specific context. +// This ensures each workspace (crew, refinery, mayor) gets the correct prompting, +// overriding any CLAUDE.md that may exist in the cloned repository. +func (m *Manager) createRoleCLAUDEmd(workspacePath string, role string, rigName string, workerName string) error { + tmpl, err := templates.New() + if err != nil { + return err + } + + data := templates.RoleData{ + Role: role, + RigName: rigName, + TownRoot: m.townRoot, + WorkDir: workspacePath, + Polecat: workerName, // Used for crew member name as well + } + + content, err := tmpl.RenderRole(role, data) + if err != nil { + return err + } + + claudePath := filepath.Join(workspacePath, "CLAUDE.md") + return os.WriteFile(claudePath, []byte(content), 0644) +} diff --git a/internal/session/manager.go b/internal/session/manager.go index 3ef85a95..594116e8 100644 --- a/internal/session/manager.go +++ b/internal/session/manager.go @@ -5,6 +5,7 @@ import ( "errors" "fmt" "os" + "os/exec" "path/filepath" "strings" "time" @@ -59,6 +60,15 @@ type Info struct { // RigName is the rig this session belongs to. RigName string `json:"rig_name"` + + // Attached indicates if someone is attached to the session. + Attached bool `json:"attached,omitempty"` + + // Created is when the session was created. + Created time.Time `json:"created,omitempty"` + + // Windows is the number of tmux windows. + Windows int `json:"windows,omitempty"` } // sessionName generates the tmux session name for a polecat. @@ -111,8 +121,20 @@ func (m *Manager) Start(polecat string, opts StartOptions) error { } // Set environment - m.tmux.SetEnvironment(sessionID, "GT_RIG", m.rig.Name) - m.tmux.SetEnvironment(sessionID, "GT_POLECAT", polecat) + _ = m.tmux.SetEnvironment(sessionID, "GT_RIG", m.rig.Name) + _ = m.tmux.SetEnvironment(sessionID, "GT_POLECAT", polecat) + + // CRITICAL: Set beads environment for worktree polecats + // Polecats share the rig's beads directory (in mayor/rig/.beads) + // BEADS_NO_DAEMON=1 prevents daemon from committing to wrong branch + beadsDir := filepath.Join(m.rig.Path, "mayor", "rig", ".beads") + _ = m.tmux.SetEnvironment(sessionID, "BEADS_DIR", beadsDir) + _ = m.tmux.SetEnvironment(sessionID, "BEADS_NO_DAEMON", "1") + _ = m.tmux.SetEnvironment(sessionID, "BEADS_AGENT_NAME", fmt.Sprintf("%s/%s", m.rig.Name, polecat)) + + // Apply theme + theme := tmux.AssignTheme(m.rig.Name) + _ = m.tmux.ConfigureGasTownSession(sessionID, theme, m.rig.Name, polecat, "polecat") // Send initial command command := opts.Command @@ -128,9 +150,7 @@ func (m *Manager) Start(polecat string, opts StartOptions) error { if opts.Issue != "" { time.Sleep(500 * time.Millisecond) prompt := fmt.Sprintf("Work on issue: %s", opts.Issue) - if err := m.Inject(polecat, prompt); err != nil { - // Non-fatal, just log - } + _ = m.Inject(polecat, prompt) // Non-fatal error } return nil @@ -150,9 +170,19 @@ func (m *Manager) Stop(polecat string, force bool) error { return ErrSessionNotFound } + // Sync beads before shutdown to preserve any changes + // Run in the polecat's worktree directory + if !force { + polecatDir := m.polecatDir(polecat) + if err := m.syncBeads(polecatDir); err != nil { + // Non-fatal - log and continue with shutdown + fmt.Printf("Warning: beads sync failed: %v\n", err) + } + } + // Try graceful shutdown first (unless forced) if !force { - m.tmux.SendKeysRaw(sessionID, "C-c") // Ctrl+C + _ = m.tmux.SendKeysRaw(sessionID, "C-c") // Ctrl+C time.Sleep(100 * time.Millisecond) } @@ -164,12 +194,69 @@ func (m *Manager) Stop(polecat string, force bool) error { return nil } +// syncBeads runs bd sync in the given directory. +func (m *Manager) syncBeads(workDir string) error { + cmd := exec.Command("bd", "sync") + cmd.Dir = workDir + return cmd.Run() +} + // IsRunning checks if a polecat session is active. func (m *Manager) IsRunning(polecat string) (bool, error) { sessionID := m.sessionName(polecat) return m.tmux.HasSession(sessionID) } +// Status returns detailed status for a polecat session. +func (m *Manager) Status(polecat string) (*Info, error) { + sessionID := m.sessionName(polecat) + + running, err := m.tmux.HasSession(sessionID) + if err != nil { + return nil, fmt.Errorf("checking session: %w", err) + } + + info := &Info{ + Polecat: polecat, + SessionID: sessionID, + Running: running, + RigName: m.rig.Name, + } + + if !running { + return info, nil + } + + // Get detailed session info + tmuxInfo, err := m.tmux.GetSessionInfo(sessionID) + if err != nil { + // Non-fatal - return basic info + return info, nil + } + + info.Attached = tmuxInfo.Attached + info.Windows = tmuxInfo.Windows + + // Parse created time from tmux format (e.g., "Thu Dec 19 10:30:00 2025") + if tmuxInfo.Created != "" { + // Try common tmux date formats + formats := []string{ + "Mon Jan 2 15:04:05 2006", + "Mon Jan _2 15:04:05 2006", + time.ANSIC, + time.UnixDate, + } + for _, format := range formats { + if t, err := time.Parse(format, tmuxInfo.Created); err == nil { + info.Created = t + break + } + } + } + + return info, nil +} + // List returns information about all sessions for this rig. func (m *Manager) List() ([]Info, error) { sessions, err := m.tmux.ListSessions() @@ -228,6 +315,7 @@ func (m *Manager) Capture(polecat string, lines int) (string, error) { } // Inject sends a message to a polecat session. +// Uses a longer debounce delay for large messages to ensure paste completes. func (m *Manager) Inject(polecat, message string) error { sessionID := m.sessionName(polecat) @@ -239,7 +327,15 @@ func (m *Manager) Inject(polecat, message string) error { return ErrSessionNotFound } - return m.tmux.SendKeys(sessionID, message) + // Use longer debounce for large messages (spawn context can be 1KB+) + // Claude needs time to process paste before Enter is sent + // Scale delay based on message size: 200ms base + 100ms per KB + debounceMs := 200 + (len(message)/1024)*100 + if debounceMs > 1500 { + debounceMs = 1500 // Cap at 1.5s for large pastes + } + + return m.tmux.SendKeysDebounced(sessionID, message, debounceMs) } // StopAll terminates all sessions for this rig. diff --git a/internal/session/manager_test.go b/internal/session/manager_test.go index 18441b95..5600b257 100644 --- a/internal/session/manager_test.go +++ b/internal/session/manager_test.go @@ -1,6 +1,8 @@ package session import ( + "os" + "path/filepath" "testing" "github.com/steveyegge/gastown/internal/rig" @@ -36,8 +38,17 @@ func TestPolecatDir(t *testing.T) { } func TestHasPolecat(t *testing.T) { + root := t.TempDir() + // hasPolecat checks filesystem, so create actual directories + for _, name := range []string{"Toast", "Cheedo"} { + if err := os.MkdirAll(filepath.Join(root, "polecats", name), 0755); err != nil { + t.Fatalf("mkdir: %v", err) + } + } + r := &rig.Rig{ Name: "gastown", + Path: root, Polecats: []string{"Toast", "Cheedo"}, } m := NewManager(tmux.NewTmux(), r) diff --git a/internal/swarm/integration.go b/internal/swarm/integration.go index 7dd854ae..9ebd1666 100644 --- a/internal/swarm/integration.go +++ b/internal/swarm/integration.go @@ -37,9 +37,7 @@ func (m *Manager) CreateIntegrationBranch(swarmID string) error { } // Push to origin - if err := m.gitRun("push", "-u", "origin", branchName); err != nil { - // Non-fatal - may not have remote - } + _ = m.gitRun("push", "-u", "origin", branchName) // Non-fatal - may not have remote return nil } @@ -64,9 +62,7 @@ func (m *Manager) MergeToIntegration(swarmID, workerBranch string) error { } // Fetch the worker branch - if err := m.gitRun("fetch", "origin", workerBranch); err != nil { - // May not exist on remote, try local - } + _ = m.gitRun("fetch", "origin", workerBranch) // May not exist on remote, try local // Attempt merge err = m.gitRun("merge", "--no-ff", "-m", @@ -102,7 +98,7 @@ func (m *Manager) LandToMain(swarmID string) error { } // Pull latest - m.gitRun("pull", "origin", swarm.TargetBranch) // Ignore errors + _ = m.gitRun("pull", "origin", swarm.TargetBranch) // Ignore errors // Merge integration branch err := m.gitRun("merge", "--no-ff", "-m", @@ -138,15 +134,15 @@ func (m *Manager) CleanupBranches(swarmID string) error { } // Delete integration branch remotely - m.gitRun("push", "origin", "--delete", swarm.Integration) // Ignore errors + _ = m.gitRun("push", "origin", "--delete", swarm.Integration) // Ignore errors // Delete worker branches for _, task := range swarm.Tasks { if task.Branch != "" { // Local delete - m.gitRun("branch", "-D", task.Branch) + _ = m.gitRun("branch", "-D", task.Branch) // Remote delete - m.gitRun("push", "origin", "--delete", task.Branch) + _ = m.gitRun("push", "origin", "--delete", task.Branch) } } diff --git a/internal/swarm/landing.go b/internal/swarm/landing.go index ff46443d..f5c971a4 100644 --- a/internal/swarm/landing.go +++ b/internal/swarm/landing.go @@ -210,7 +210,7 @@ Manual intervention required.`, swarmID, strings.Join(workers, "\n- ")), Priority: mail.PriorityHigh, } - router.Send(msg) + _ = router.Send(msg) } // notifyMayorLanded sends a landing report to Mayor. @@ -233,5 +233,5 @@ Tasks merged: %d`, result.BranchesCleaned, len(swarm.Tasks)), } - router.Send(msg) + _ = router.Send(msg) } diff --git a/internal/swarm/manager.go b/internal/swarm/manager.go index 74b8ac55..77815805 100644 --- a/internal/swarm/manager.go +++ b/internal/swarm/manager.go @@ -311,10 +311,11 @@ func (m *Manager) loadTasksFromBeads(epicID string) ([]SwarmTask, error) { return nil, fmt.Errorf("epic not found: %s", epicID) } - // Extract parent-child dependents as tasks + // Extract dependents as tasks (issues that depend on/are blocked by this epic) + // Accept both "parent-child" and "blocks" relationships var tasks []SwarmTask for _, dep := range issues[0].Dependents { - if dep.DependencyType != "parent-child" { + if dep.DependencyType != "parent-child" && dep.DependencyType != "blocks" { continue } diff --git a/internal/swarm/manager_test.go b/internal/swarm/manager_test.go index aef3b30c..7111953e 100644 --- a/internal/swarm/manager_test.go +++ b/internal/swarm/manager_test.go @@ -94,7 +94,7 @@ func TestManagerCancel(t *testing.T) { m := NewManager(r) swarm, _ := m.Create("epic-1", []string{"Toast"}, "main") - m.Start(swarm.ID) + _ = m.Start(swarm.ID) if err := m.Cancel(swarm.ID, "user requested"); err != nil { t.Errorf("Cancel failed: %v", err) @@ -179,7 +179,7 @@ func TestManagerIsComplete(t *testing.T) { } // Complete the pending task - m.UpdateTaskState(swarm.ID, "task-1", TaskMerged) + _ = m.UpdateTaskState(swarm.ID, "task-1", TaskMerged) complete, _ = m.IsComplete(swarm.ID) if !complete { t.Error("IsComplete should be true when all tasks merged") diff --git a/internal/templates/roles/polecat.md.tmpl b/internal/templates/roles/polecat.md.tmpl index 8bc83084..a4c8ebda 100644 --- a/internal/templates/roles/polecat.md.tmpl +++ b/internal/templates/roles/polecat.md.tmpl @@ -49,6 +49,14 @@ Town ({{ .TownRoot }}) - `bd create --title="Found bug" --type=bug` - File new issue - `bd create --title="Need feature" --type=task` - File new task +### Agent UX: File Issues for CLI Surprises +If you guess how a `gt` or `bd` command should work and it fails, file a bead! +Example: If `gt session capture rig/polecat 50` fails but `-n 50` works, file: +``` +bd create --title="gt session capture: Support positional line count" --type=task --priority=1 +``` +Agent-friendly UX is critical. Your guesses reveal what's intuitive. + ### Completion - `gt done` - Signal work ready for merge queue - `bd sync` - Sync beads changes diff --git a/internal/tmux/theme.go b/internal/tmux/theme.go new file mode 100644 index 00000000..7b8bd9fb --- /dev/null +++ b/internal/tmux/theme.go @@ -0,0 +1,77 @@ +// Package tmux provides theme support for Gas Town tmux sessions. +package tmux + +import ( + "fmt" + "hash/fnv" +) + +// Theme represents a tmux status bar color scheme. +type Theme struct { + Name string // Human-readable name + BG string // Background color (hex or tmux color name) + FG string // Foreground color (hex or tmux color name) +} + +// DefaultPalette is the curated set of distinct, professional color themes. +// Each theme has good contrast and is visually distinct from others. +var DefaultPalette = []Theme{ + {Name: "ocean", BG: "#1e3a5f", FG: "#e0e0e0"}, // Deep blue + {Name: "forest", BG: "#2d5a3d", FG: "#e0e0e0"}, // Forest green + {Name: "rust", BG: "#8b4513", FG: "#f5f5dc"}, // Rust/brown + {Name: "plum", BG: "#4a3050", FG: "#e0e0e0"}, // Purple + {Name: "slate", BG: "#4a5568", FG: "#e0e0e0"}, // Slate gray + {Name: "ember", BG: "#b33a00", FG: "#f5f5dc"}, // Burnt orange + {Name: "midnight", BG: "#1a1a2e", FG: "#c0c0c0"}, // Dark blue-black + {Name: "wine", BG: "#722f37", FG: "#f5f5dc"}, // Burgundy + {Name: "teal", BG: "#0d5c63", FG: "#e0e0e0"}, // Teal + {Name: "copper", BG: "#6d4c41", FG: "#f5f5dc"}, // Warm brown +} + +// MayorTheme returns the special theme for the Mayor session. +// Gold/dark to distinguish it from rig themes. +func MayorTheme() Theme { + return Theme{Name: "mayor", BG: "#3d3200", FG: "#ffd700"} +} + +// GetThemeByName finds a theme by name from the default palette. +// Returns nil if not found. +func GetThemeByName(name string) *Theme { + for _, t := range DefaultPalette { + if t.Name == name { + return &t + } + } + return nil +} + +// AssignTheme picks a theme for a rig based on its name. +// Uses consistent hashing so the same rig always gets the same color. +func AssignTheme(rigName string) Theme { + return AssignThemeFromPalette(rigName, DefaultPalette) +} + +// AssignThemeFromPalette picks a theme using a custom palette. +func AssignThemeFromPalette(rigName string, palette []Theme) Theme { + if len(palette) == 0 { + return DefaultPalette[0] + } + h := fnv.New32a() + h.Write([]byte(rigName)) + idx := int(h.Sum32()) % len(palette) + return palette[idx] +} + +// Style returns the tmux status-style string for this theme. +func (t Theme) Style() string { + return fmt.Sprintf("bg=%s,fg=%s", t.BG, t.FG) +} + +// ListThemeNames returns the names of all themes in the default palette. +func ListThemeNames() []string { + names := make([]string, len(DefaultPalette)) + for i, t := range DefaultPalette { + names[i] = t.Name + } + return names +} diff --git a/internal/tmux/theme_test.go b/internal/tmux/theme_test.go new file mode 100644 index 00000000..1b28c0a6 --- /dev/null +++ b/internal/tmux/theme_test.go @@ -0,0 +1,127 @@ +package tmux + +import ( + "testing" +) + +func TestAssignTheme_Deterministic(t *testing.T) { + // Same rig name should always get same theme + theme1 := AssignTheme("gastown") + theme2 := AssignTheme("gastown") + + if theme1.Name != theme2.Name { + t.Errorf("AssignTheme not deterministic: got %s and %s for same input", theme1.Name, theme2.Name) + } +} + +func TestAssignTheme_Distribution(t *testing.T) { + // Different rig names should (mostly) get different themes + // With 10 themes and good hashing, collisions should be rare + rigs := []string{"gastown", "beads", "myproject", "frontend", "backend", "api", "web", "mobile"} + themes := make(map[string]int) + + for _, rig := range rigs { + theme := AssignTheme(rig) + themes[theme.Name]++ + } + + // We should have at least 4 different themes for 8 rigs + if len(themes) < 4 { + t.Errorf("Poor distribution: only %d different themes for %d rigs", len(themes), len(rigs)) + } +} + +func TestGetThemeByName(t *testing.T) { + tests := []struct { + name string + want bool + }{ + {"ocean", true}, + {"forest", true}, + {"nonexistent", false}, + {"", false}, + } + + for _, tt := range tests { + theme := GetThemeByName(tt.name) + got := theme != nil + if got != tt.want { + t.Errorf("GetThemeByName(%q) = %v, want %v", tt.name, got, tt.want) + } + } +} + +func TestThemeStyle(t *testing.T) { + theme := Theme{Name: "test", BG: "#1e3a5f", FG: "#e0e0e0"} + want := "bg=#1e3a5f,fg=#e0e0e0" + got := theme.Style() + + if got != want { + t.Errorf("Theme.Style() = %q, want %q", got, want) + } +} + +func TestMayorTheme(t *testing.T) { + theme := MayorTheme() + + if theme.Name != "mayor" { + t.Errorf("MayorTheme().Name = %q, want %q", theme.Name, "mayor") + } + + // Mayor should have distinct gold/dark colors + if theme.BG == "" || theme.FG == "" { + t.Error("MayorTheme() has empty colors") + } +} + +func TestListThemeNames(t *testing.T) { + names := ListThemeNames() + + if len(names) != len(DefaultPalette) { + t.Errorf("ListThemeNames() returned %d names, want %d", len(names), len(DefaultPalette)) + } + + // Check that known themes are in the list + found := make(map[string]bool) + for _, name := range names { + found[name] = true + } + + for _, want := range []string{"ocean", "forest", "rust"} { + if !found[want] { + t.Errorf("ListThemeNames() missing %q", want) + } + } +} + +func TestDefaultPaletteHasDistinctColors(t *testing.T) { + // Ensure no duplicate colors in the palette + bgColors := make(map[string]string) + for _, theme := range DefaultPalette { + if existing, ok := bgColors[theme.BG]; ok { + t.Errorf("Duplicate BG color %s used by %s and %s", theme.BG, existing, theme.Name) + } + bgColors[theme.BG] = theme.Name + } +} + +func TestAssignThemeFromPalette_EmptyPalette(t *testing.T) { + // Empty palette should return first default theme + theme := AssignThemeFromPalette("test", []Theme{}) + if theme.Name != DefaultPalette[0].Name { + t.Errorf("AssignThemeFromPalette with empty palette = %q, want %q", theme.Name, DefaultPalette[0].Name) + } +} + +func TestAssignThemeFromPalette_CustomPalette(t *testing.T) { + custom := []Theme{ + {Name: "custom1", BG: "#111", FG: "#fff"}, + {Name: "custom2", BG: "#222", FG: "#fff"}, + } + + // Should only return themes from custom palette + theme := AssignThemeFromPalette("test", custom) + if theme.Name != "custom1" && theme.Name != "custom2" { + t.Errorf("AssignThemeFromPalette returned %q, want one of custom themes", theme.Name) + } +} diff --git a/internal/tmux/tmux.go b/internal/tmux/tmux.go index 8db4a620..ee7ef060 100644 --- a/internal/tmux/tmux.go +++ b/internal/tmux/tmux.go @@ -110,11 +110,23 @@ func (t *Tmux) ListSessions() ([]string, error) { // SendKeys sends keystrokes to a session and presses Enter. // Always sends Enter as a separate command for reliability. +// Uses a debounce delay between paste and Enter to ensure paste completes. func (t *Tmux) SendKeys(session, keys string) error { + return t.SendKeysDebounced(session, keys, 100) // 100ms default debounce +} + +// SendKeysDebounced sends keystrokes with a configurable delay before Enter. +// The debounceMs parameter controls how long to wait after paste before sending Enter. +// This prevents race conditions where Enter arrives before paste is processed. +func (t *Tmux) SendKeysDebounced(session, keys string, debounceMs int) error { // Send text using literal mode (-l) to handle special chars if _, err := t.run("send-keys", "-t", session, "-l", keys); err != nil { return err } + // Wait for paste to be processed + if debounceMs > 0 { + time.Sleep(time.Duration(debounceMs) * time.Millisecond) + } // Send Enter separately - more reliable than appending to send-keys _, err := t.run("send-keys", "-t", session, "Enter") return err @@ -133,6 +145,16 @@ func (t *Tmux) SendKeysDelayed(session, keys string, delayMs int) error { return t.SendKeys(session, keys) } +// GetPaneCommand returns the current command running in a pane. +// Returns "bash", "zsh", "claude", "node", etc. +func (t *Tmux) GetPaneCommand(session string) (string, error) { + out, err := t.run("list-panes", "-t", session, "-F", "#{pane_current_command}") + if err != nil { + return "", err + } + return strings.TrimSpace(out), nil +} + // CapturePane captures the visible content of a pane. func (t *Tmux) CapturePane(session string, lines int) (string, error) { return t.run("capture-pane", "-p", "-t", session, "-S", fmt.Sprintf("-%d", lines)) @@ -190,6 +212,96 @@ type SessionInfo struct { Attached bool } +// DisplayMessage shows a message in the tmux status line. +// This is non-disruptive - it doesn't interrupt the session's input. +// Duration is specified in milliseconds. +func (t *Tmux) DisplayMessage(session, message string, durationMs int) error { + // Set display time temporarily, show message, then restore + // Use -d flag for duration in tmux 2.9+ + _, err := t.run("display-message", "-t", session, "-d", fmt.Sprintf("%d", durationMs), message) + return err +} + +// DisplayMessageDefault shows a message with default duration (5 seconds). +func (t *Tmux) DisplayMessageDefault(session, message string) error { + return t.DisplayMessage(session, message, 5000) +} + +// SendNotificationBanner sends a visible notification banner to a tmux session. +// This interrupts the terminal to ensure the notification is seen. +// Uses echo to print a boxed banner with the notification details. +func (t *Tmux) SendNotificationBanner(session, from, subject string) error { + // Build the banner text + banner := fmt.Sprintf(`echo ' +━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━ +📬 NEW MAIL from %s +Subject: %s +Run: bd mail inbox +━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━ +'`, from, subject) + + return t.SendKeys(session, banner) +} + +// IsClaudeRunning checks if Claude appears to be running in the session. +// Only trusts the pane command - UI markers in scrollback cause false positives. +func (t *Tmux) IsClaudeRunning(session string) bool { + // Check pane command - Claude runs as node + cmd, err := t.GetPaneCommand(session) + if err != nil { + return false + } + return cmd == "node" +} + +// WaitForCommand polls until the pane is NOT running one of the excluded commands. +// Useful for waiting until a shell has started a new process (e.g., claude). +// Returns nil when a non-excluded command is detected, or error on timeout. +func (t *Tmux) WaitForCommand(session string, excludeCommands []string, timeout time.Duration) error { + deadline := time.Now().Add(timeout) + for time.Now().Before(deadline) { + cmd, err := t.GetPaneCommand(session) + if err != nil { + time.Sleep(100 * time.Millisecond) + continue + } + // Check if current command is NOT in the exclude list + excluded := false + for _, exc := range excludeCommands { + if cmd == exc { + excluded = true + break + } + } + if !excluded { + return nil + } + time.Sleep(100 * time.Millisecond) + } + return fmt.Errorf("timeout waiting for command (still running excluded command)") +} + +// WaitForShellReady polls until the pane is running a shell command. +// Useful for waiting until a process has exited and returned to shell. +func (t *Tmux) WaitForShellReady(session string, timeout time.Duration) error { + shells := []string{"bash", "zsh", "sh", "fish", "tcsh", "ksh"} + deadline := time.Now().Add(timeout) + for time.Now().Before(deadline) { + cmd, err := t.GetPaneCommand(session) + if err != nil { + time.Sleep(100 * time.Millisecond) + continue + } + for _, shell := range shells { + if cmd == shell { + return nil + } + } + time.Sleep(100 * time.Millisecond) + } + return fmt.Errorf("timeout waiting for shell") +} + // GetSessionInfo returns detailed information about a session. func (t *Tmux) GetSessionInfo(name string) (*SessionInfo, error) { format := "#{session_name}|#{session_windows}|#{session_created_string}|#{session_attached}" @@ -207,7 +319,7 @@ func (t *Tmux) GetSessionInfo(name string) (*SessionInfo, error) { } windows := 0 - fmt.Sscanf(parts[1], "%d", &windows) + _, _ = fmt.Sscanf(parts[1], "%d", &windows) return &SessionInfo{ Name: parts[0], @@ -216,3 +328,62 @@ func (t *Tmux) GetSessionInfo(name string) (*SessionInfo, error) { Attached: parts[3] == "1", }, nil } + +// ApplyTheme sets the status bar style for a session. +func (t *Tmux) ApplyTheme(session string, theme Theme) error { + _, err := t.run("set-option", "-t", session, "status-style", theme.Style()) + return err +} + +// SetStatusFormat configures the left side of the status bar. +// Shows: [rig/worker] role +func (t *Tmux) SetStatusFormat(session, rig, worker, role string) error { + // Format: [gastown/Rictus] polecat + var left string + if rig == "" { + // Mayor or other top-level agent + left = fmt.Sprintf("[%s] %s ", worker, role) + } else { + left = fmt.Sprintf("[%s/%s] %s ", rig, worker, role) + } + + // Allow enough room for the identity + if _, err := t.run("set-option", "-t", session, "status-left-length", "40"); err != nil { + return err + } + _, err := t.run("set-option", "-t", session, "status-left", left) + return err +} + +// SetDynamicStatus configures the right side with dynamic content. +// Uses a shell command that tmux calls periodically to get current status. +func (t *Tmux) SetDynamicStatus(session string) error { + // tmux calls this command every status-interval seconds + // gt status-line reads env vars and mail to build the status + right := fmt.Sprintf(`#(gt status-line --session=%s 2>/dev/null) %%H:%%M`, session) + + if _, err := t.run("set-option", "-t", session, "status-right-length", "50"); err != nil { + return err + } + // Set faster refresh for more responsive status + if _, err := t.run("set-option", "-t", session, "status-interval", "5"); err != nil { + return err + } + _, err := t.run("set-option", "-t", session, "status-right", right) + return err +} + +// ConfigureGasTownSession applies full Gas Town theming to a session. +// This is a convenience method that applies theme, status format, and dynamic status. +func (t *Tmux) ConfigureGasTownSession(session string, theme Theme, rig, worker, role string) error { + if err := t.ApplyTheme(session, theme); err != nil { + return fmt.Errorf("applying theme: %w", err) + } + if err := t.SetStatusFormat(session, rig, worker, role); err != nil { + return fmt.Errorf("setting status format: %w", err) + } + if err := t.SetDynamicStatus(session); err != nil { + return fmt.Errorf("setting dynamic status: %w", err) + } + return nil +} diff --git a/internal/tmux/tmux_test.go b/internal/tmux/tmux_test.go index 3555ebcc..0116bf53 100644 --- a/internal/tmux/tmux_test.go +++ b/internal/tmux/tmux_test.go @@ -50,13 +50,13 @@ func TestSessionLifecycle(t *testing.T) { sessionName := "gt-test-session-" + t.Name() // Clean up any existing session - tm.KillSession(sessionName) + _ = tm.KillSession(sessionName) // Create session if err := tm.NewSession(sessionName, ""); err != nil { t.Fatalf("NewSession: %v", err) } - defer tm.KillSession(sessionName) + defer func() { _ = tm.KillSession(sessionName) }() // Verify exists has, err := tm.HasSession(sessionName) @@ -107,13 +107,13 @@ func TestDuplicateSession(t *testing.T) { sessionName := "gt-test-dup-" + t.Name() // Clean up any existing session - tm.KillSession(sessionName) + _ = tm.KillSession(sessionName) // Create session if err := tm.NewSession(sessionName, ""); err != nil { t.Fatalf("NewSession: %v", err) } - defer tm.KillSession(sessionName) + defer func() { _ = tm.KillSession(sessionName) }() // Try to create duplicate err := tm.NewSession(sessionName, "") @@ -131,13 +131,13 @@ func TestSendKeysAndCapture(t *testing.T) { sessionName := "gt-test-keys-" + t.Name() // Clean up any existing session - tm.KillSession(sessionName) + _ = tm.KillSession(sessionName) // Create session if err := tm.NewSession(sessionName, ""); err != nil { t.Fatalf("NewSession: %v", err) } - defer tm.KillSession(sessionName) + defer func() { _ = tm.KillSession(sessionName) }() // Send echo command if err := tm.SendKeys(sessionName, "echo HELLO_TEST_MARKER"); err != nil { @@ -167,13 +167,13 @@ func TestGetSessionInfo(t *testing.T) { sessionName := "gt-test-info-" + t.Name() // Clean up any existing session - tm.KillSession(sessionName) + _ = tm.KillSession(sessionName) // Create session if err := tm.NewSession(sessionName, ""); err != nil { t.Fatalf("NewSession: %v", err) } - defer tm.KillSession(sessionName) + defer func() { _ = tm.KillSession(sessionName) }() info, err := tm.GetSessionInfo(sessionName) if err != nil { diff --git a/internal/witness/manager.go b/internal/witness/manager.go new file mode 100644 index 00000000..e9a4bc47 --- /dev/null +++ b/internal/witness/manager.go @@ -0,0 +1,358 @@ +package witness + +import ( + "bytes" + "encoding/json" + "errors" + "fmt" + "os" + "os/exec" + "path/filepath" + "regexp" + "strings" + "time" + + "github.com/steveyegge/gastown/internal/git" + "github.com/steveyegge/gastown/internal/polecat" + "github.com/steveyegge/gastown/internal/rig" + "github.com/steveyegge/gastown/internal/session" + "github.com/steveyegge/gastown/internal/tmux" +) + +// Common errors +var ( + ErrNotRunning = errors.New("witness not running") + ErrAlreadyRunning = errors.New("witness already running") +) + +// Manager handles witness lifecycle and monitoring operations. +type Manager struct { + rig *rig.Rig + workDir string +} + +// NewManager creates a new witness manager for a rig. +func NewManager(r *rig.Rig) *Manager { + return &Manager{ + rig: r, + workDir: r.Path, + } +} + +// stateFile returns the path to the witness state file. +func (m *Manager) stateFile() string { + return filepath.Join(m.rig.Path, ".gastown", "witness.json") +} + +// loadState loads witness state from disk. +func (m *Manager) loadState() (*Witness, error) { + data, err := os.ReadFile(m.stateFile()) + if err != nil { + if os.IsNotExist(err) { + return &Witness{ + RigName: m.rig.Name, + State: StateStopped, + }, nil + } + return nil, err + } + + var w Witness + if err := json.Unmarshal(data, &w); err != nil { + return nil, err + } + + return &w, nil +} + +// saveState persists witness state to disk. +func (m *Manager) saveState(w *Witness) error { + dir := filepath.Dir(m.stateFile()) + if err := os.MkdirAll(dir, 0755); err != nil { + return err + } + + data, err := json.MarshalIndent(w, "", " ") + if err != nil { + return err + } + + return os.WriteFile(m.stateFile(), data, 0644) +} + +// Status returns the current witness status. +func (m *Manager) Status() (*Witness, error) { + w, err := m.loadState() + if err != nil { + return nil, err + } + + // If running, verify process is still alive + if w.State == StateRunning && w.PID > 0 { + if !processExists(w.PID) { + w.State = StateStopped + w.PID = 0 + _ = m.saveState(w) + } + } + + // Update monitored polecats list + w.MonitoredPolecats = m.rig.Polecats + + return w, nil +} + +// Start starts the witness. +// If foreground is true, runs in the current process (blocking). +// Otherwise, spawns a background process. +func (m *Manager) Start(foreground bool) error { + w, err := m.loadState() + if err != nil { + return err + } + + if w.State == StateRunning && w.PID > 0 && processExists(w.PID) { + return ErrAlreadyRunning + } + + now := time.Now() + w.State = StateRunning + w.StartedAt = &now + w.PID = os.Getpid() // For foreground mode; background would set actual PID + w.MonitoredPolecats = m.rig.Polecats + + if err := m.saveState(w); err != nil { + return err + } + + if foreground { + // Run the monitoring loop (blocking) + return m.run(w) + } + + // Background mode: spawn a new process + // For MVP, we just mark as running - actual daemon implementation later + return nil +} + +// Stop stops the witness. +func (m *Manager) Stop() error { + w, err := m.loadState() + if err != nil { + return err + } + + if w.State != StateRunning { + return ErrNotRunning + } + + // If we have a PID, try to stop it gracefully + if w.PID > 0 && w.PID != os.Getpid() { + // Send SIGTERM + if proc, err := os.FindProcess(w.PID); err == nil { + _ = proc.Signal(os.Interrupt) + } + } + + w.State = StateStopped + w.PID = 0 + + return m.saveState(w) +} + +// run is the main monitoring loop (for foreground mode). +func (m *Manager) run(w *Witness) error { + fmt.Println("Witness running...") + fmt.Println("Press Ctrl+C to stop") + + // Initial check immediately + m.checkAndProcess(w) + + ticker := time.NewTicker(30 * time.Second) + defer ticker.Stop() + + for range ticker.C { + m.checkAndProcess(w) + } + return nil +} + +// checkAndProcess performs health check and processes shutdown requests. +func (m *Manager) checkAndProcess(w *Witness) { + // Perform health check + if err := m.healthCheck(w); err != nil { + fmt.Printf("Health check error: %v\n", err) + } + + // Check for shutdown requests + if err := m.processShutdownRequests(w); err != nil { + fmt.Printf("Shutdown request error: %v\n", err) + } +} + +// healthCheck performs a health check on all monitored polecats. +func (m *Manager) healthCheck(w *Witness) error { + now := time.Now() + w.LastCheckAt = &now + w.Stats.TotalChecks++ + w.Stats.TodayChecks++ + + return m.saveState(w) +} + +// processShutdownRequests checks mail for lifecycle requests and handles them. +func (m *Manager) processShutdownRequests(w *Witness) error { + // Get witness mailbox via bd mail inbox + messages, err := m.getWitnessMessages() + if err != nil { + return fmt.Errorf("getting messages: %w", err) + } + + for _, msg := range messages { + // Look for LIFECYCLE requests + if strings.Contains(msg.Subject, "LIFECYCLE:") && strings.Contains(msg.Subject, "shutdown") { + fmt.Printf("Processing shutdown request: %s\n", msg.Subject) + + // Extract polecat name from message body + polecatName := extractPolecatName(msg.Body) + if polecatName == "" { + fmt.Printf(" Warning: could not extract polecat name from message\n") + m.ackMessage(msg.ID) + continue + } + + fmt.Printf(" Polecat: %s\n", polecatName) + + // Perform cleanup + if err := m.cleanupPolecat(polecatName); err != nil { + fmt.Printf(" Cleanup error: %v\n", err) + // Don't ack message on error - will retry + continue + } + + fmt.Printf(" Cleanup complete\n") + + // Acknowledge the message + m.ackMessage(msg.ID) + } + } + + return nil +} + +// WitnessMessage represents a mail message for the witness. +type WitnessMessage struct { + ID string `json:"id"` + Subject string `json:"subject"` + Body string `json:"body"` + From string `json:"from"` +} + +// getWitnessMessages retrieves unread messages for the witness. +func (m *Manager) getWitnessMessages() ([]WitnessMessage, error) { + // Use bd mail inbox --json + cmd := exec.Command("bd", "mail", "inbox", "--json") + cmd.Dir = m.workDir + cmd.Env = append(os.Environ(), "BEADS_AGENT_NAME="+m.rig.Name+"-witness") + + var stdout, stderr bytes.Buffer + cmd.Stdout = &stdout + cmd.Stderr = &stderr + + if err := cmd.Run(); err != nil { + // No messages is not an error + if strings.Contains(stderr.String(), "no messages") { + return nil, nil + } + return nil, fmt.Errorf("%s", stderr.String()) + } + + if stdout.Len() == 0 { + return nil, nil + } + + var messages []WitnessMessage + if err := json.Unmarshal(stdout.Bytes(), &messages); err != nil { + // Try parsing as empty array + if strings.TrimSpace(stdout.String()) == "[]" { + return nil, nil + } + return nil, fmt.Errorf("parsing messages: %w", err) + } + + return messages, nil +} + +// ackMessage acknowledges a message (marks it as read/handled). +func (m *Manager) ackMessage(id string) { + cmd := exec.Command("bd", "mail", "ack", id) + cmd.Dir = m.workDir + _ = cmd.Run() // Ignore errors +} + +// extractPolecatName extracts the polecat name from a lifecycle request body. +func extractPolecatName(body string) string { + // Look for "Polecat: " pattern + re := regexp.MustCompile(`Polecat:\s*(\S+)`) + matches := re.FindStringSubmatch(body) + if len(matches) >= 2 { + return matches[1] + } + return "" +} + +// cleanupPolecat performs the full cleanup sequence for an ephemeral polecat. +// 1. Kill session +// 2. Remove worktree +// 3. Delete branch +func (m *Manager) cleanupPolecat(polecatName string) error { + fmt.Printf(" Cleaning up polecat %s...\n", polecatName) + + // Get managers + t := tmux.NewTmux() + sessMgr := session.NewManager(t, m.rig) + polecatGit := git.NewGit(m.rig.Path) + polecatMgr := polecat.NewManager(m.rig, polecatGit) + + // 1. Kill session + running, err := sessMgr.IsRunning(polecatName) + if err == nil && running { + fmt.Printf(" Killing session...\n") + if err := sessMgr.Stop(polecatName, true); err != nil { + fmt.Printf(" Warning: failed to stop session: %v\n", err) + } + } + + // 2. Remove worktree (this also removes the directory) + fmt.Printf(" Removing worktree...\n") + if err := polecatMgr.Remove(polecatName, true); err != nil { + // Only error if polecat actually exists + if !errors.Is(err, polecat.ErrPolecatNotFound) { + return fmt.Errorf("removing worktree: %w", err) + } + } + + // 3. Delete branch from mayor's clone + branchName := fmt.Sprintf("polecat/%s", polecatName) + mayorPath := filepath.Join(m.rig.Path, "mayor", "rig") + mayorGit := git.NewGit(mayorPath) + + fmt.Printf(" Deleting branch %s...\n", branchName) + if err := mayorGit.DeleteBranch(branchName, true); err != nil { + // Branch might already be deleted or merged, not a critical error + fmt.Printf(" Warning: failed to delete branch: %v\n", err) + } + + return nil +} + +// processExists checks if a process with the given PID exists. +func processExists(pid int) bool { + proc, err := os.FindProcess(pid) + if err != nil { + return false + } + // On Unix, FindProcess always succeeds; signal 0 tests existence + err = proc.Signal(nil) + return err == nil +} diff --git a/internal/witness/types.go b/internal/witness/types.go new file mode 100644 index 00000000..6737524c --- /dev/null +++ b/internal/witness/types.go @@ -0,0 +1,62 @@ +// Package witness provides the polecat monitoring agent. +package witness + +import ( + "time" +) + +// State represents the witness's running state. +type State string + +const ( + // StateStopped means the witness is not running. + StateStopped State = "stopped" + + // StateRunning means the witness is actively monitoring. + StateRunning State = "running" + + // StatePaused means the witness is paused (not monitoring). + StatePaused State = "paused" +) + +// Witness represents a rig's polecat monitoring agent. +type Witness struct { + // RigName is the rig this witness monitors. + RigName string `json:"rig_name"` + + // State is the current running state. + State State `json:"state"` + + // PID is the process ID if running in background. + PID int `json:"pid,omitempty"` + + // StartedAt is when the witness was started. + StartedAt *time.Time `json:"started_at,omitempty"` + + // MonitoredPolecats tracks polecats being monitored. + MonitoredPolecats []string `json:"monitored_polecats,omitempty"` + + // LastCheckAt is when the last health check was performed. + LastCheckAt *time.Time `json:"last_check_at,omitempty"` + + // Stats contains cumulative statistics. + Stats WitnessStats `json:"stats"` +} + +// WitnessStats contains cumulative witness statistics. +type WitnessStats struct { + // TotalChecks is the total number of health checks performed. + TotalChecks int `json:"total_checks"` + + // TotalNudges is the total number of nudges sent to polecats. + TotalNudges int `json:"total_nudges"` + + // TotalEscalations is the total number of escalations to mayor. + TotalEscalations int `json:"total_escalations"` + + // TodayChecks is the number of checks today. + TodayChecks int `json:"today_checks"` + + // TodayNudges is the number of nudges today. + TodayNudges int `json:"today_nudges"` +} diff --git a/prompts/roles/polecat.md b/prompts/roles/polecat.md new file mode 100644 index 00000000..97fd66c5 --- /dev/null +++ b/prompts/roles/polecat.md @@ -0,0 +1,158 @@ +# Gas Town Polecat Context + +> **Recovery**: Run `gt prime` after compaction, clear, or new session + +## Your Role: POLECAT ({{ name }} in {{ rig }}) + +You are a **polecat** - an ephemeral worker agent in the Gas Town swarm. You are: + +- **Task-focused**: You work on one assigned issue at a time +- **Ephemeral**: When your work is done, you may be decommissioned +- **Witness-managed**: The Witness monitors your progress and can nudge or reassign you +- **Part of a swarm**: Other polecats may be working on related issues in parallel + +**Your mission**: Complete your assigned issue, sync your work, and signal done. + +## Your Workspace + +You work from: `{{ workspace_path }}` + +This is a git **worktree** (not a full clone) sharing the repo with other polecats. + +## Two-Level Beads Architecture + +Gas Town has TWO beads databases: + +### 1. Rig-Level Beads (YOUR issues) +- Location: `{{ rig_path }}/mayor/rig/.beads/` +- Prefix: `gt-*` (project issues) +- Use for: Bugs, features, tasks you work on +- Commands: `bd show`, `bd update`, `bd close`, `bd sync` + +### 2. Town-Level Beads (Mayor mail) +- Location: `~/gt/.beads/` +- Prefix: `gm-*` (mayor messages) +- Use for: Cross-rig coordination, mayor handoffs +- **Not your concern** - Mayor and Witness use this + +**Important**: As a polecat, you only work with rig-level beads. Never modify town-level beads. + +## Beads Sync Protocol + +**CRITICAL**: Your worktree has its own `.beads/` copy. Changes must be synced! + +### On Startup +```bash +bd sync --from-main # Pull latest beads state +bd show # Verify your assignment +``` + +### During Work +```bash +bd update --status=in_progress # Claim if not already +# ... do your work ... +bd close --reason="Done: summary" +``` + +### Before Finishing +```bash +bd sync # Push your beads changes +git add +git commit -m "message" +git push origin +``` + +**Never signal DONE until beads are synced!** + +## Your Workflow + +### 1. Understand Your Assignment +```bash +bd show # Full issue details +bd show --deps # See dependencies +``` + +### 2. Do The Work +- Make your changes +- Run tests: `go test ./...` +- Build: `go build -o gt ./cmd/gt` + +### 3. Commit Your Changes +```bash +git status +git add +git commit -m "feat/fix/docs: description (gt-xxx)" +``` + +### 4. Finish Up +```bash +bd close --reason="summary of what was done" +bd sync # CRITICAL: Push beads changes +git push origin HEAD # Push code changes +``` + +### 5. Signal Completion +After everything is synced and pushed: +``` +DONE + +Summary of changes: +- ... +``` + +## Communicating + +### With Witness (your manager) +If you need help or are blocked: +```bash +gt mail send {{ rig }}/witness -s "Blocked on gt-xxx" -m "Details..." +``` + +### With Other Polecats +Coordinate through beads dependencies, not direct messages. + +## Environment Variables + +These are set for you automatically: +- `GT_RIG`: Your rig name ({{ rig }}) +- `GT_POLECAT`: Your polecat name ({{ name }}) +- `BEADS_DIR`: Path to rig's canonical beads +- `BEADS_NO_DAEMON`: Set to 1 (worktree safety) +- `BEADS_AGENT_NAME`: Your identity for beads ({{ rig }}/{{ name }}) + +## Common Issues + +### Stale Beads +If your issue status looks wrong: +```bash +bd sync --from-main # Pull fresh state +``` + +### Merge Conflicts in Code +Resolve normally, then: +```bash +git add +git commit +git push +``` + +### Beads Sync Conflicts +The beads sync uses a shared branch. If conflicts occur: +```bash +bd sync --from-main # Accept upstream state +# Re-apply your changes via bd update/close +bd sync # Push again +``` + +## Session End Checklist + +Before saying DONE: +``` +[ ] Code changes committed +[ ] Code pushed to branch +[ ] Issue closed with bd close +[ ] Beads synced with bd sync +[ ] Summary of work provided +``` + +Only after all boxes are checked should you signal DONE. diff --git a/scripts/mayor-respawn-daemon.sh b/scripts/mayor-respawn-daemon.sh new file mode 100755 index 00000000..7fa4397e --- /dev/null +++ b/scripts/mayor-respawn-daemon.sh @@ -0,0 +1,164 @@ +#!/bin/bash +# Mayor Respawn Daemon +# Watches for restart requests and respawns the mayor session +# +# Usage: mayor-respawn-daemon.sh [start|stop|status] +# +# The daemon monitors for mail to "daemon/" with subject containing "RESTART". +# When found, it: +# 1. Acknowledges the mail +# 2. Waits 5 seconds (for handoff mail to be sent) +# 3. Runs `gt mayor restart` + +DAEMON_NAME="gt-mayor-respawn" +PID_FILE="/tmp/${DAEMON_NAME}.pid" +LOG_FILE="/tmp/${DAEMON_NAME}.log" +CHECK_INTERVAL=10 # seconds between mail checks +TOWN_ROOT="${GT_TOWN_ROOT:-/Users/stevey/gt}" + +log() { + echo "[$(date '+%Y-%m-%d %H:%M:%S')] $*" >> "$LOG_FILE" +} + +check_for_restart() { + cd "$TOWN_ROOT" || return 1 + + # Check inbox for daemon identity - look for RESTART subject + # Set BD_IDENTITY=daemon so bd mail knows which inbox to check + local inbox + inbox=$(BD_IDENTITY=daemon bd mail inbox --json 2>/dev/null) + + if [ -z "$inbox" ] || [ "$inbox" = "null" ] || [ "$inbox" = "[]" ]; then + return 1 + fi + + # Parse JSON to find RESTART messages + # Note: bd mail returns "title" not "subject" (beads uses title for message subjects) + local msg_id + msg_id=$(echo "$inbox" | jq -r '.[] | select(.title | test("RESTART"; "i")) | .id' 2>/dev/null | head -1) + + if [ -n "$msg_id" ] && [ "$msg_id" != "null" ]; then + log "Found restart request: $msg_id" + + # Acknowledge the message + BD_IDENTITY=daemon bd mail ack "$msg_id" 2>/dev/null + log "Acknowledged restart request" + + # Wait for handoff to complete + sleep 5 + + # Restart mayor (just sends Ctrl-C, loop handles respawn) + log "Triggering mayor respawn..." + gt mayor restart 2>&1 | while read -r line; do log "$line"; done + log "Mayor respawn triggered" + + return 0 + fi + + return 1 +} + +daemon_loop() { + log "Daemon starting, watching for restart requests..." + + while true; do + if check_for_restart; then + log "Restart handled, continuing watch..." + fi + sleep "$CHECK_INTERVAL" + done +} + +start_daemon() { + if [ -f "$PID_FILE" ]; then + local pid + pid=$(cat "$PID_FILE") + if kill -0 "$pid" 2>/dev/null; then + echo "Daemon already running (PID $pid)" + return 1 + fi + rm -f "$PID_FILE" + fi + + # Start daemon in background using the script itself + nohup "$0" run > /dev/null 2>&1 & + + local pid=$! + echo "$pid" > "$PID_FILE" + echo "Started mayor respawn daemon (PID $pid)" + echo "Log: $LOG_FILE" +} + +run_daemon() { + # Called when script is invoked with "run" + echo $$ > "$PID_FILE" + daemon_loop +} + +stop_daemon() { + if [ ! -f "$PID_FILE" ]; then + echo "Daemon not running (no PID file)" + return 1 + fi + + local pid + pid=$(cat "$PID_FILE") + + if kill -0 "$pid" 2>/dev/null; then + kill "$pid" + rm -f "$PID_FILE" + echo "Stopped daemon (PID $pid)" + else + rm -f "$PID_FILE" + echo "Daemon was not running (stale PID file removed)" + fi +} + +daemon_status() { + if [ ! -f "$PID_FILE" ]; then + echo "Daemon not running" + return 1 + fi + + local pid + pid=$(cat "$PID_FILE") + + if kill -0 "$pid" 2>/dev/null; then + echo "Daemon running (PID $pid)" + echo "Log: $LOG_FILE" + if [ -f "$LOG_FILE" ]; then + echo "" + echo "Recent log entries:" + tail -5 "$LOG_FILE" + fi + return 0 + else + rm -f "$PID_FILE" + echo "Daemon not running (stale PID file removed)" + return 1 + fi +} + +case "${1:-}" in + start) + start_daemon + ;; + stop) + stop_daemon + ;; + status) + daemon_status + ;; + restart) + stop_daemon 2>/dev/null + start_daemon + ;; + run) + # Internal: called when daemon starts itself in background + run_daemon + ;; + *) + echo "Usage: $0 {start|stop|status|restart}" + exit 1 + ;; +esac