Merge polecat/Coma: gt mq status + Engineer main loop
This commit is contained in:
@@ -30,7 +30,7 @@
|
||||
{"id":"gt-3pp","title":"Support numeric shortcuts in mail read (e.g., 'mail read 1')","description":"When inbox shows numbered messages like:\n* 1. gm-19b29031... 2025-12-16 mayor Subject...\n* 2. gm-19b26d51... 2025-12-16 Subject...\n\nUsers should be able to run 'gt mail read 1' instead of needing the full message ID 'gt mail read gm-19b29031f6a172206'.\n\nImplementation:\n- Track inbox message order in display\n- Map numeric indices to actual message IDs\n- Accept both numeric shortcuts and full IDs in 'mail read' command","status":"open","priority":2,"issue_type":"feature","created_at":"2025-12-16T13:15:07.857939-08:00","updated_at":"2025-12-16T13:15:29.273066-08:00"}
|
||||
{"id":"gt-3tz","title":"CLI: polecat commands (add, list, wake, sleep, decommission)","description":"GGT is missing most polecat management commands that PGT has.\n\nMissing Commands:\n- gt polecat add \u003crig\u003e \u003cname\u003e - Add polecat to rig (creates clone)\n- gt polecat list [\u003crig\u003e] - List polecats with state\n- gt polecat info \u003cpolecat\u003e - Show detailed info\n- gt polecat wake \u003cpolecat\u003e - Mark available\n- gt polecat sleep \u003cpolecat\u003e - Mark unavailable \n- gt polecat decommission \u003cpolecat\u003e - Remove polecat safely\n\nPGT Reference: gastown-py/src/gastown/cli/polecat_cmd.py\n\nNotes:\n- spawn exists but doesn't cover management\n- wake/sleep are in polecat manager but not CLI\n- decommission should check for uncommitted work","status":"closed","priority":1,"issue_type":"task","created_at":"2025-12-16T14:46:31.326692-08:00","updated_at":"2025-12-16T16:03:14.462338-08:00","closed_at":"2025-12-16T16:03:14.462338-08:00","close_reason":"Duplicate of gt-u1j.17 which has more detail"}
|
||||
{"id":"gt-3x1","title":"Update Refinery to use Beads merge queue","description":"Replace branch discovery with Beads queue in the Refinery module:\n\nCurrent (internal/refinery/manager.go):\n- Scans for polecat/* branches\n- Creates MR objects on-the-fly\n\nNew:\n- Pull from Beads: bd ready --type=merge-request\n- Process each MR\n- Close with merge commit: bd close \u003cid\u003e --reason=\"Merged at \u003csha\u003e\"\n- Handle failures: bd update \u003cid\u003e --status=blocked --reason=\"...\"\n\nThe Engineer (agent) becomes Beads-native.\nThe Refinery (module) provides the infrastructure.","status":"open","priority":1,"issue_type":"task","created_at":"2025-12-16T23:02:37.96436-08:00","updated_at":"2025-12-16T23:07:32.911782-08:00","dependencies":[{"issue_id":"gt-3x1","depends_on_id":"gt-h5n","type":"blocks","created_at":"2025-12-16T23:02:55.812433-08:00","created_by":"daemon","metadata":"{}"},{"issue_id":"gt-3x1","depends_on_id":"gt-svi","type":"blocks","created_at":"2025-12-16T23:03:12.814463-08:00","created_by":"daemon","metadata":"{}"}]}
|
||||
{"id":"gt-3x1.1","title":"Engineer main loop: poll for ready merge-requests","description":"Implement the Engineer's main processing loop.\n\nLoop structure:\n1. Query: bd ready --type=merge-request\n2. If empty: sleep(poll_interval), continue\n3. Select highest priority, oldest MR\n4. Claim: bd update \u003cid\u003e --status=in_progress\n5. Process (delegate to other subtasks)\n6. Repeat\n\nConfiguration:\n- poll_interval: from rig config (default 30s)\n- max_concurrent: from rig config (default 1)\n\nThe loop should be interruptible and handle graceful shutdown.\n\nReference: docs/merge-queue-design.md#engineer-processing-loop","status":"open","priority":1,"issue_type":"task","created_at":"2025-12-17T13:50:57.022367-08:00","updated_at":"2025-12-17T13:50:57.022367-08:00","dependencies":[{"issue_id":"gt-3x1.1","depends_on_id":"gt-3x1","type":"parent-child","created_at":"2025-12-17T13:50:57.024225-08:00","created_by":"daemon","metadata":"{}"},{"issue_id":"gt-3x1.1","depends_on_id":"gt-svi.1","type":"blocks","created_at":"2025-12-17T13:53:09.832586-08:00","created_by":"daemon","metadata":"{}"},{"issue_id":"gt-3x1.1","depends_on_id":"gt-svi.2","type":"blocks","created_at":"2025-12-17T13:53:09.9547-08:00","created_by":"daemon","metadata":"{}"},{"issue_id":"gt-3x1.1","depends_on_id":"gt-h5n.8","type":"blocks","created_at":"2025-12-17T13:53:16.770078-08:00","created_by":"daemon","metadata":"{}"}]}
|
||||
{"id":"gt-3x1.1","title":"Engineer main loop: poll for ready merge-requests","description":"Implement the Engineer's main processing loop.\n\nLoop structure:\n1. Query: bd ready --type=merge-request\n2. If empty: sleep(poll_interval), continue\n3. Select highest priority, oldest MR\n4. Claim: bd update \u003cid\u003e --status=in_progress\n5. Process (delegate to other subtasks)\n6. Repeat\n\nConfiguration:\n- poll_interval: from rig config (default 30s)\n- max_concurrent: from rig config (default 1)\n\nThe loop should be interruptible and handle graceful shutdown.\n\nReference: docs/merge-queue-design.md#engineer-processing-loop","status":"closed","priority":1,"issue_type":"task","created_at":"2025-12-17T13:50:57.022367-08:00","updated_at":"2025-12-18T20:45:17.731441-08:00","closed_at":"2025-12-18T20:14:35.321731-08:00","dependencies":[{"issue_id":"gt-3x1.1","depends_on_id":"gt-3x1","type":"parent-child","created_at":"2025-12-17T13:50:57.024225-08:00","created_by":"daemon","metadata":"{}"},{"issue_id":"gt-3x1.1","depends_on_id":"gt-svi.1","type":"blocks","created_at":"2025-12-17T13:53:09.832586-08:00","created_by":"daemon","metadata":"{}"},{"issue_id":"gt-3x1.1","depends_on_id":"gt-svi.2","type":"blocks","created_at":"2025-12-17T13:53:09.9547-08:00","created_by":"daemon","metadata":"{}"},{"issue_id":"gt-3x1.1","depends_on_id":"gt-h5n.8","type":"blocks","created_at":"2025-12-17T13:53:16.770078-08:00","created_by":"daemon","metadata":"{}"}]}
|
||||
{"id":"gt-3x1.2","title":"Fetch and conflict check: git operations for MR","description":"Implement git operations for MR processing.\n\nSteps:\n1. git fetch origin \u003cmr.branch\u003e\n2. git checkout \u003cmr.target\u003e (main or integration/xxx)\n3. git merge --no-commit --no-ff \u003cmr.branch\u003e (test merge)\n4. Check for conflicts\n5. If conflicts: abort and return Failure(conflict, files)\n6. If clean: abort (actual merge in next step)\n\nHelper functions:\n- FetchBranch(branch string) error\n- CheckConflicts(source, target string) ([]string, error)\n\nReference: docs/merge-queue-design.md#process-merge-steps","status":"closed","priority":1,"issue_type":"task","created_at":"2025-12-17T13:50:58.99193-08:00","updated_at":"2025-12-18T20:17:47.781432-08:00","closed_at":"2025-12-18T20:17:47.781432-08:00","close_reason":"Implemented in commit 999abe6","dependencies":[{"issue_id":"gt-3x1.2","depends_on_id":"gt-3x1","type":"parent-child","created_at":"2025-12-17T13:50:58.993973-08:00","created_by":"daemon","metadata":"{}"},{"issue_id":"gt-3x1.2","depends_on_id":"gt-3x1.1","type":"blocks","created_at":"2025-12-17T13:53:10.066159-08:00","created_by":"daemon","metadata":"{}"}]}
|
||||
{"id":"gt-3x1.3","title":"Merge execution: merge, test, push","description":"Implement the actual merge execution.\n\nSteps:\n1. git checkout \u003cmr.target\u003e\n2. git merge \u003cmr.branch\u003e --no-ff -m 'Merge \u003cbranch\u003e: \u003ctitle\u003e'\n3. If config.run_tests:\n - Run test_command (from config)\n - If failed: git reset --hard HEAD~1, return Failure(tests_failed)\n4. git push origin \u003cmr.target\u003e\n5. Return Success(merge_commit=HEAD)\n\nConfiguration:\n- run_tests: bool (default true)\n- test_command: string (default 'go test ./...')\n\nHandle push failures with retry logic.\n\nReference: docs/merge-queue-design.md#process-merge-steps","status":"open","priority":1,"issue_type":"task","created_at":"2025-12-17T13:51:00.742994-08:00","updated_at":"2025-12-17T13:51:00.742994-08:00","dependencies":[{"issue_id":"gt-3x1.3","depends_on_id":"gt-3x1","type":"parent-child","created_at":"2025-12-17T13:51:00.744975-08:00","created_by":"daemon","metadata":"{}"},{"issue_id":"gt-3x1.3","depends_on_id":"gt-3x1.2","type":"blocks","created_at":"2025-12-17T13:53:10.163097-08:00","created_by":"daemon","metadata":"{}"}]}
|
||||
{"id":"gt-3x1.4","title":"Failure handling: assign back to worker, add labels","description":"Handle merge failures appropriately.\n\nFailure types and actions:\n| Failure | Action |\n|-------------|---------------------------------------------|\n| conflict | Add needs-rebase label, assign to worker |\n| tests_fail | Add needs-fix label, assign to worker |\n| build_fail | Add needs-fix label, assign to worker |\n| flaky_test | Retry once, then treat as tests_fail |\n| push_fail | Retry with backoff, escalate if persistent |\n\nActions:\n1. bd update \u003cid\u003e --status=open --assignee=\u003cworker\u003e\n2. bd update \u003cid\u003e --labels=\u003cfailure-label\u003e\n3. Send mail to worker explaining failure\n4. Log failure details\n\nReference: docs/merge-queue-design.md#handling-failures","status":"open","priority":1,"issue_type":"task","created_at":"2025-12-17T13:51:17.238066-08:00","updated_at":"2025-12-17T13:51:17.238066-08:00","dependencies":[{"issue_id":"gt-3x1.4","depends_on_id":"gt-3x1","type":"parent-child","created_at":"2025-12-17T13:51:17.240001-08:00","created_by":"daemon","metadata":"{}"},{"issue_id":"gt-3x1.4","depends_on_id":"gt-3x1.1","type":"blocks","created_at":"2025-12-17T13:53:10.281038-08:00","created_by":"daemon","metadata":"{}"}]}
|
||||
@@ -41,7 +41,7 @@
|
||||
{"id":"gt-4my","title":"Doctor check: Worker health and stuck detection","description":"Detect and report stuck workers via gt doctor.\n\n## Checks\n\n### WorkerHealthCheck\n- List all active workers (polecats with state=working)\n- Check last activity timestamp for each\n- Flag as potentially stuck if no progress for configurable threshold (default: 30 min)\n- Check if Witness is running for the rig\n- Verify Witness last heartbeat time\n\n### Stuck Detection Criteria\n- Polecat state=working but session not running\n- Polecat state=working but output unchanged for threshold\n- Witness not responding to health checks\n- Multiple polecats in same rig all stuck\n\n## Output\n\n```\n[WARN] Workers in rig 'wyvern' may be stuck:\n - Toast: working for 45m, no recent output\n - Capable: working for 52m, session not found\n - Witness: last heartbeat 20m ago\n \n Suggestions:\n - gt witness status wyvern\n - gt capture wyvern/Toast 50\n - gt stop --rig wyvern (kill all)\n```\n\n## Auto-Fix\n\nCannot auto-fix stuck workers (risk of data loss), but can:\n- Restart Witness daemon if crashed\n- Send warning mail to Mayor","status":"open","priority":1,"issue_type":"task","created_at":"2025-12-15T23:17:59.265062-08:00","updated_at":"2025-12-16T17:24:34.882466-08:00","dependencies":[{"issue_id":"gt-4my","depends_on_id":"gt-f9x.4","type":"blocks","created_at":"2025-12-15T23:19:05.565606-08:00","created_by":"daemon","metadata":"{}"},{"issue_id":"gt-4my","depends_on_id":"gt-7ik","type":"blocks","created_at":"2025-12-17T15:44:42.068149-08:00","created_by":"daemon","metadata":"{}"}]}
|
||||
{"id":"gt-4nn","title":"Molecules: Composable Workflow Beads","description":"## Summary\n\nMolecules are crystallized workflow patterns stored as Beads issues.\nWhen instantiated, the molecule creates child beads forming a DAG.\n\n## Key Insight: Molecules ARE Beads\n\nPer HOP Decision 001: Beads IS the ledger. Molecules don't get a separate YAML format - they're issues with `type: molecule` containing prose-based step definitions.\n\nAgents don't need rigid schemas. They parse natural language natively. A molecule is just instructions with enough structure for tooling.\n\n## Example: Engineer in a Box\n\n```markdown\nid: mol-xyz\ntype: molecule\ntitle: Engineer in a Box\n\nThis workflow takes a task from design to merge.\n\n## Step: design\nThink carefully about architecture. Consider existing patterns, \ntrade-offs, testability.\n\n## Step: implement\nWrite clean code. Follow codebase conventions.\nNeeds: design\n\n## Step: review \nReview for bugs, edge cases, style issues.\nNeeds: implement\n\n## Step: test\nWrite and run tests. Cover happy path and edge cases.\nNeeds: implement\n\n## Step: submit\nSubmit for merge via refinery.\nNeeds: review, test\n```\n\n## Instantiation\n\n```bash\n# Attach molecule when spawning\ngt spawn --issue gt-abc --molecule mol-xyz\n\n# Creates child beads atomically:\ngt-abc.design ← ready first\ngt-abc.implement ← blocked by design \ngt-abc.review ← blocked by implement\ngt-abc.test ← blocked by implement\ngt-abc.submit ← blocked by review, test\n```\n\nEach step issue gets an `instantiated-from` edge to the molecule (with step metadata).\n\n## Why This Matters\n\n1. **Unified data plane**: Everything in Beads, no parallel YAML channel\n2. **AI-native**: Prose instructions, not rigid schemas\n3. **Error isolation**: Each step is a checkpoint - failure doesn't lose progress\n4. **Scales with AI**: As agents get smarter, they handle more complex molecules\n\n## Implementation Primitives\n\n- `ParseMoleculeSteps()`: Extract steps from prose (convention-based)\n- `InstantiateMolecule()`: Atomic transaction creating all steps + edges \n- `instantiated-from` edge type: Track provenance\n- Parameterization: `{{variable}}` substitution from context map","status":"open","priority":0,"issue_type":"epic","created_at":"2025-12-18T18:06:24.573068-08:00","updated_at":"2025-12-18T20:14:52.411317-08:00"}
|
||||
{"id":"gt-4nn.1","title":"Molecule schema: YAML format for workflow definitions","description":"Define the YAML schema for molecule definitions:\n\n```yaml\nmolecule: \u003cname\u003e\nversion: 1\ndescription: \"Human description\"\nsteps:\n - id: \u003cstep-id\u003e\n title: \"Step title\"\n prompt: \"Instructions for agent\"\n depends: [\u003cother-step-ids\u003e] # optional\n tier: haiku|sonnet|opus # optional, default from config\n timeout: 30m # optional\n```\n\nStore molecules in:\n- `\u003crig\u003e/molecules/\u003cname\u003e.yaml` for rig-specific\n- `\u003ctown\u003e/molecules/\u003cname\u003e.yaml` for town-wide\n\nBuilt-in molecules to ship:\n- engineer-in-box: design→code→review→test→submit\n- quick-fix: implement→test→submit\n- research: investigate→document","status":"closed","priority":0,"issue_type":"task","created_at":"2025-12-18T18:06:49.441267-08:00","updated_at":"2025-12-18T20:14:32.629327-08:00","closed_at":"2025-12-18T20:14:32.629327-08:00","close_reason":"Pivoting away from YAML schema. Molecules should be Beads issues with prose-based step definitions, not a separate YAML format. This keeps the data plane unified in Beads (per HOP Decision 001) and leverages AI flexibility - agents can interpret prose instructions without rigid schemas.","dependencies":[{"issue_id":"gt-4nn.1","depends_on_id":"gt-4nn","type":"parent-child","created_at":"2025-12-18T18:06:49.442723-08:00","created_by":"daemon"}]}
|
||||
{"id":"gt-4nn.2","title":"Molecule instantiation: create child beads from template","description":"When instantiating a molecule on a work bead:\n\n## Transaction Flow\n\n1. Parse molecule's `## Step:` sections from description\n2. Begin SQLite transaction\n3. For each step, create child issue:\n - ID: `{parent-id}.{step-ref}` or generated\n - Title: step title (from header or first line)\n - Description: step prose instructions\n - Type: task\n - Priority: inherit from parent\n4. Add `instantiated-from` edge from each step to molecule:\n ```sql\n INSERT INTO dependencies (issue_id, depends_on_id, type, metadata)\n VALUES (step_id, mol_id, 'instantiated-from', '{\"step\": \"implement\"}');\n ```\n5. Wire inter-step dependencies from `Needs:` lines\n6. Commit transaction (atomic - all or nothing)\n\n## Parsing Conventions\n\n```markdown\n## Step: \u003cref\u003e\n\u003cprose instructions\u003e\nNeeds: \u003cstep\u003e, \u003cstep\u003e # optional\nTier: haiku|sonnet|opus # optional hint\n```\n\n## Parameterization\n\nSteps can have `{{variable}}` placeholders:\n```markdown\n## Step: implement\nImplement {{feature_name}} in {{target_file}}.\n```\n\nContext map provided at instantiation time.\n\n## API\n\n```go\nfunc (s *Store) InstantiateMolecule(mol *Issue, parent *Issue, ctx map[string]string) ([]*Issue, error)\nfunc ParseMoleculeSteps(description string) ([]MoleculeStep, error)\n```\n\nImplementation lives in `internal/beads/molecule.go`.","status":"open","priority":0,"issue_type":"task","created_at":"2025-12-18T18:06:52.071066-08:00","updated_at":"2025-12-18T20:15:15.502672-08:00","dependencies":[{"issue_id":"gt-4nn.2","depends_on_id":"gt-4nn","type":"parent-child","created_at":"2025-12-18T18:06:52.072554-08:00","created_by":"daemon"}]}
|
||||
{"id":"gt-4nn.2","title":"Molecule instantiation: create child beads from template","description":"When instantiating a molecule on a work bead:\n\n## Transaction Flow\n\n1. Parse molecule's `## Step:` sections from description\n2. Begin SQLite transaction\n3. For each step, create child issue:\n - ID: `{parent-id}.{step-ref}` or generated\n - Title: step title (from header or first line)\n - Description: step prose instructions\n - Type: task\n - Priority: inherit from parent\n4. Add `instantiated-from` edge from each step to molecule:\n ```sql\n INSERT INTO dependencies (issue_id, depends_on_id, type, metadata)\n VALUES (step_id, mol_id, 'instantiated-from', '{\"step\": \"implement\"}');\n ```\n5. Wire inter-step dependencies from `Needs:` lines\n6. Commit transaction (atomic - all or nothing)\n\n## Parsing Conventions\n\n```markdown\n## Step: \u003cref\u003e\n\u003cprose instructions\u003e\nNeeds: \u003cstep\u003e, \u003cstep\u003e # optional\nTier: haiku|sonnet|opus # optional hint\n```\n\n## Parameterization\n\nSteps can have `{{variable}}` placeholders:\n```markdown\n## Step: implement\nImplement {{feature_name}} in {{target_file}}.\n```\n\nContext map provided at instantiation time.\n\n## API\n\n```go\nfunc (s *Store) InstantiateMolecule(mol *Issue, parent *Issue, ctx map[string]string) ([]*Issue, error)\nfunc ParseMoleculeSteps(description string) ([]MoleculeStep, error)\n```\n\nImplementation lives in `internal/beads/molecule.go`.","status":"open","priority":0,"issue_type":"task","created_at":"2025-12-18T18:06:52.071066-08:00","updated_at":"2025-12-18T20:15:15.502672-08:00","dependencies":[{"issue_id":"gt-4nn.2","depends_on_id":"gt-4nn","type":"parent-child","created_at":"2025-12-18T18:06:52.072554-08:00","created_by":"daemon"},{"issue_id":"gt-4nn.2","depends_on_id":"gt-4nn.1","type":"blocks","created_at":"2025-12-18T18:07:02.949242-08:00","created_by":"daemon"}]}
|
||||
{"id":"gt-4nn.3","title":"Molecule CLI: bd molecule commands","description":"Add molecule commands to bd:\n\n## Commands\n\n```bash\nbd molecule list # List molecules (type: molecule)\nbd molecule show \u003cid\u003e # Show molecule with parsed steps\nbd molecule parse \u003cid\u003e # Validate and show parsed structure \nbd molecule instantiate \u003cmol-id\u003e --parent=\u003cissue-id\u003e # Create steps\nbd molecule instances \u003cmol-id\u003e # Show all instantiations\n```\n\n## gt spawn integration\n\n```bash\ngt spawn --issue \u003cid\u003e --molecule \u003cmol-id\u003e\n```\n\nThis should:\n1. Call `bd molecule instantiate` (creates child beads atomically)\n2. Spawn polecat on first ready step\n3. Polecat grinds through via `bd ready`\n\n## Output Examples\n\n```\n$ bd molecule show mol-abc\n\nmol-abc: Engineer in a Box\nType: molecule\n\nSteps (5):\n design → (ready first)\n implement → Needs: design\n review → Needs: implement\n test → Needs: implement \n submit → Needs: review, test\n \nInstances: 3\n```\n\n```\n$ bd molecule instances mol-abc\n\nParent Status Created\ngt-xyz done 2025-12-15\ngt-abc active 2025-12-17 (3/5 complete)\ngt-def pending 2025-12-18\n```","status":"open","priority":0,"issue_type":"task","created_at":"2025-12-18T18:06:53.919884-08:00","updated_at":"2025-12-18T20:15:38.500265-08:00","dependencies":[{"issue_id":"gt-4nn.3","depends_on_id":"gt-4nn","type":"parent-child","created_at":"2025-12-18T18:06:53.921621-08:00","created_by":"daemon"},{"issue_id":"gt-4nn.3","depends_on_id":"gt-4nn.2","type":"blocks","created_at":"2025-12-18T18:07:03.048941-08:00","created_by":"daemon"}]}
|
||||
{"id":"gt-4nn.4","title":"Built-in molecules: engineer-in-box, quick-fix, research","description":"Create built-in molecules as Beads issues:\n\n## engineer-in-box\n\n```markdown\nid: mol-engineer-in-box\ntype: molecule\ntitle: Engineer in a Box\n\nFull workflow from design to merge.\n\n## Step: design\nThink carefully about architecture. Consider:\n- Existing patterns in the codebase\n- Trade-offs between approaches \n- Testability and maintainability\n\nWrite a brief design summary before proceeding.\n\n## Step: implement\nWrite the code. Follow codebase conventions.\nNeeds: design\n\n## Step: review\nSelf-review the changes. Look for:\n- Bugs and edge cases\n- Style issues\n- Missing error handling\nNeeds: implement\n\n## Step: test\nWrite and run tests. Cover happy path and edge cases.\nFix any failures before proceeding.\nNeeds: implement\n\n## Step: submit\nSubmit for merge via refinery.\nNeeds: review, test\n```\n\n## quick-fix\n\n```markdown\nid: mol-quick-fix\ntype: molecule \ntitle: Quick Fix\n\nFast path for small changes.\n\n## Step: implement\nMake the fix. Keep it focused.\n\n## Step: test\nRun relevant tests. Fix any regressions.\nNeeds: implement\n\n## Step: submit\nSubmit for merge.\nNeeds: test\n```\n\n## research\n\n```markdown\nid: mol-research\ntype: molecule\ntitle: Research\n\nInvestigation workflow.\n\n## Step: investigate\nExplore the question. Search code, read docs, \nunderstand context. Take notes.\n\n## Step: document\nWrite up findings. Include:\n- What you learned\n- Recommendations\n- Open questions\nNeeds: investigate\n```\n\n## Storage\n\nBuilt-in molecules live in `\u003ctown\u003e/.beads/` as regular issues.\nCreated during `gt install` or `bd init`.","status":"open","priority":1,"issue_type":"task","created_at":"2025-12-18T18:07:04.574565-08:00","updated_at":"2025-12-18T20:15:38.8612-08:00","dependencies":[{"issue_id":"gt-4nn.4","depends_on_id":"gt-4nn","type":"parent-child","created_at":"2025-12-18T18:07:04.576587-08:00","created_by":"daemon"}]}
|
||||
{"id":"gt-51x","title":"Fix golangci-lint errcheck warnings (~160 issues)","description":"Running golangci-lint shows ~160 errcheck warnings for unchecked error returns.\n\nCommon patterns:\n- t.SetEnvironment() return values\n- os.WriteFile(), os.RemoveAll() \n- MarkFlagRequired() on cobra commands\n- Various manager methods\n\nRun: golangci-lint run ./...\n\nCould batch fix with:\n1. Add explicit _ = for intentionally ignored errors\n2. Handle errors properly where they matter\n3. Consider adding //nolint:errcheck for cobra flag setup","status":"open","priority":2,"issue_type":"task","created_at":"2025-12-17T15:02:39.807659-08:00","updated_at":"2025-12-17T15:02:39.807659-08:00"}
|
||||
|
||||
@@ -190,6 +190,26 @@ func (b *Beads) Ready() ([]*Issue, error) {
|
||||
return issues, nil
|
||||
}
|
||||
|
||||
// ReadyWithType returns ready issues filtered by type.
|
||||
// This fetches all ready issues and filters client-side by type.
|
||||
// Issues are returned sorted by priority (lowest first) then by creation time (oldest first).
|
||||
func (b *Beads) ReadyWithType(issueType string) ([]*Issue, error) {
|
||||
issues, err := b.Ready()
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
// Filter by type
|
||||
var filtered []*Issue
|
||||
for _, issue := range issues {
|
||||
if issue.Type == issueType {
|
||||
filtered = append(filtered, issue)
|
||||
}
|
||||
}
|
||||
|
||||
return filtered, nil
|
||||
}
|
||||
|
||||
// Show returns detailed information about an issue.
|
||||
func (b *Beads) Show(id string) (*Issue, error) {
|
||||
out, err := b.run("show", id, "--json")
|
||||
|
||||
@@ -40,6 +40,9 @@ var (
|
||||
mqListWorker string
|
||||
mqListEpic string
|
||||
mqListJSON bool
|
||||
|
||||
// Status command flags
|
||||
mqStatusJSON bool
|
||||
)
|
||||
|
||||
var mqCmd = &cobra.Command{
|
||||
@@ -127,6 +130,20 @@ Examples:
|
||||
RunE: runMQReject,
|
||||
}
|
||||
|
||||
var mqStatusCmd = &cobra.Command{
|
||||
Use: "status <id>",
|
||||
Short: "Show detailed merge request status",
|
||||
Long: `Display detailed information about a merge request.
|
||||
|
||||
Shows all MR fields, current status with timestamps, dependencies,
|
||||
blockers, and processing history.
|
||||
|
||||
Example:
|
||||
gt mq status gt-mr-abc123`,
|
||||
Args: cobra.ExactArgs(1),
|
||||
RunE: runMqStatus,
|
||||
}
|
||||
|
||||
func init() {
|
||||
// Submit flags
|
||||
mqSubmitCmd.Flags().StringVar(&mqSubmitBranch, "branch", "", "Source branch (default: current branch)")
|
||||
@@ -149,11 +166,15 @@ func init() {
|
||||
mqRejectCmd.Flags().BoolVar(&mqRejectNotify, "notify", false, "Send mail notification to worker")
|
||||
_ = mqRejectCmd.MarkFlagRequired("reason")
|
||||
|
||||
// Status flags
|
||||
mqStatusCmd.Flags().BoolVar(&mqStatusJSON, "json", false, "Output as JSON")
|
||||
|
||||
// Add subcommands
|
||||
mqCmd.AddCommand(mqSubmitCmd)
|
||||
mqCmd.AddCommand(mqRetryCmd)
|
||||
mqCmd.AddCommand(mqListCmd)
|
||||
mqCmd.AddCommand(mqRejectCmd)
|
||||
mqCmd.AddCommand(mqStatusCmd)
|
||||
|
||||
rootCmd.AddCommand(mqCmd)
|
||||
}
|
||||
@@ -611,3 +632,347 @@ func runMQReject(cmd *cobra.Command, args []string) error {
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
// MRStatusOutput is the JSON output structure for gt mq status.
|
||||
type MRStatusOutput struct {
|
||||
// Core issue fields
|
||||
ID string `json:"id"`
|
||||
Title string `json:"title"`
|
||||
Status string `json:"status"`
|
||||
Priority int `json:"priority"`
|
||||
Type string `json:"type"`
|
||||
Assignee string `json:"assignee,omitempty"`
|
||||
CreatedAt string `json:"created_at"`
|
||||
UpdatedAt string `json:"updated_at"`
|
||||
ClosedAt string `json:"closed_at,omitempty"`
|
||||
|
||||
// MR-specific fields
|
||||
Branch string `json:"branch,omitempty"`
|
||||
Target string `json:"target,omitempty"`
|
||||
SourceIssue string `json:"source_issue,omitempty"`
|
||||
Worker string `json:"worker,omitempty"`
|
||||
Rig string `json:"rig,omitempty"`
|
||||
MergeCommit string `json:"merge_commit,omitempty"`
|
||||
CloseReason string `json:"close_reason,omitempty"`
|
||||
|
||||
// Dependencies
|
||||
DependsOn []DependencyInfo `json:"depends_on,omitempty"`
|
||||
Blocks []DependencyInfo `json:"blocks,omitempty"`
|
||||
}
|
||||
|
||||
// DependencyInfo represents a dependency or blocker.
|
||||
type DependencyInfo struct {
|
||||
ID string `json:"id"`
|
||||
Title string `json:"title"`
|
||||
Status string `json:"status"`
|
||||
Priority int `json:"priority"`
|
||||
Type string `json:"type"`
|
||||
}
|
||||
|
||||
func runMqStatus(cmd *cobra.Command, args []string) error {
|
||||
mrID := args[0]
|
||||
|
||||
// Use current working directory for beads operations
|
||||
// (beads repos are per-rig, not per-workspace)
|
||||
workDir, err := os.Getwd()
|
||||
if err != nil {
|
||||
return fmt.Errorf("getting current directory: %w", err)
|
||||
}
|
||||
|
||||
// Initialize beads client
|
||||
bd := beads.New(workDir)
|
||||
|
||||
// Fetch the issue
|
||||
issue, err := bd.Show(mrID)
|
||||
if err != nil {
|
||||
if err == beads.ErrNotFound {
|
||||
return fmt.Errorf("merge request '%s' not found", mrID)
|
||||
}
|
||||
return fmt.Errorf("fetching merge request: %w", err)
|
||||
}
|
||||
|
||||
// Parse MR-specific fields from description
|
||||
mrFields := beads.ParseMRFields(issue)
|
||||
|
||||
// Build output structure
|
||||
output := MRStatusOutput{
|
||||
ID: issue.ID,
|
||||
Title: issue.Title,
|
||||
Status: issue.Status,
|
||||
Priority: issue.Priority,
|
||||
Type: issue.Type,
|
||||
Assignee: issue.Assignee,
|
||||
CreatedAt: issue.CreatedAt,
|
||||
UpdatedAt: issue.UpdatedAt,
|
||||
ClosedAt: issue.ClosedAt,
|
||||
}
|
||||
|
||||
// Add MR fields if present
|
||||
if mrFields != nil {
|
||||
output.Branch = mrFields.Branch
|
||||
output.Target = mrFields.Target
|
||||
output.SourceIssue = mrFields.SourceIssue
|
||||
output.Worker = mrFields.Worker
|
||||
output.Rig = mrFields.Rig
|
||||
output.MergeCommit = mrFields.MergeCommit
|
||||
output.CloseReason = mrFields.CloseReason
|
||||
}
|
||||
|
||||
// Add dependency info from the issue's Dependencies field
|
||||
for _, dep := range issue.Dependencies {
|
||||
output.DependsOn = append(output.DependsOn, DependencyInfo{
|
||||
ID: dep.ID,
|
||||
Title: dep.Title,
|
||||
Status: dep.Status,
|
||||
Priority: dep.Priority,
|
||||
Type: dep.Type,
|
||||
})
|
||||
}
|
||||
|
||||
// Add blocker info from the issue's Dependents field
|
||||
for _, dep := range issue.Dependents {
|
||||
output.Blocks = append(output.Blocks, DependencyInfo{
|
||||
ID: dep.ID,
|
||||
Title: dep.Title,
|
||||
Status: dep.Status,
|
||||
Priority: dep.Priority,
|
||||
Type: dep.Type,
|
||||
})
|
||||
}
|
||||
|
||||
// JSON output
|
||||
if mqStatusJSON {
|
||||
enc := json.NewEncoder(os.Stdout)
|
||||
enc.SetIndent("", " ")
|
||||
return enc.Encode(output)
|
||||
}
|
||||
|
||||
// Human-readable output
|
||||
return printMqStatus(issue, mrFields)
|
||||
}
|
||||
|
||||
// printMqStatus prints detailed MR status in human-readable format.
|
||||
func printMqStatus(issue *beads.Issue, mrFields *beads.MRFields) error {
|
||||
// Header
|
||||
fmt.Printf("%s %s\n", style.Bold.Render("📋 Merge Request:"), issue.ID)
|
||||
fmt.Printf(" %s\n\n", issue.Title)
|
||||
|
||||
// Status section
|
||||
fmt.Printf("%s\n", style.Bold.Render("Status"))
|
||||
statusDisplay := formatStatus(issue.Status)
|
||||
fmt.Printf(" State: %s\n", statusDisplay)
|
||||
fmt.Printf(" Priority: P%d\n", issue.Priority)
|
||||
if issue.Type != "" {
|
||||
fmt.Printf(" Type: %s\n", issue.Type)
|
||||
}
|
||||
if issue.Assignee != "" {
|
||||
fmt.Printf(" Assignee: %s\n", issue.Assignee)
|
||||
}
|
||||
|
||||
// Timestamps
|
||||
fmt.Printf("\n%s\n", style.Bold.Render("Timeline"))
|
||||
if issue.CreatedAt != "" {
|
||||
fmt.Printf(" Created: %s %s\n", issue.CreatedAt, formatTimeAgo(issue.CreatedAt))
|
||||
}
|
||||
if issue.UpdatedAt != "" && issue.UpdatedAt != issue.CreatedAt {
|
||||
fmt.Printf(" Updated: %s %s\n", issue.UpdatedAt, formatTimeAgo(issue.UpdatedAt))
|
||||
}
|
||||
if issue.ClosedAt != "" {
|
||||
fmt.Printf(" Closed: %s %s\n", issue.ClosedAt, formatTimeAgo(issue.ClosedAt))
|
||||
}
|
||||
|
||||
// MR-specific fields
|
||||
if mrFields != nil {
|
||||
fmt.Printf("\n%s\n", style.Bold.Render("Merge Details"))
|
||||
if mrFields.Branch != "" {
|
||||
fmt.Printf(" Branch: %s\n", mrFields.Branch)
|
||||
}
|
||||
if mrFields.Target != "" {
|
||||
fmt.Printf(" Target: %s\n", mrFields.Target)
|
||||
}
|
||||
if mrFields.SourceIssue != "" {
|
||||
fmt.Printf(" Source Issue: %s\n", mrFields.SourceIssue)
|
||||
}
|
||||
if mrFields.Worker != "" {
|
||||
fmt.Printf(" Worker: %s\n", mrFields.Worker)
|
||||
}
|
||||
if mrFields.Rig != "" {
|
||||
fmt.Printf(" Rig: %s\n", mrFields.Rig)
|
||||
}
|
||||
if mrFields.MergeCommit != "" {
|
||||
fmt.Printf(" Merge Commit: %s\n", mrFields.MergeCommit)
|
||||
}
|
||||
if mrFields.CloseReason != "" {
|
||||
fmt.Printf(" Close Reason: %s\n", mrFields.CloseReason)
|
||||
}
|
||||
}
|
||||
|
||||
// Dependencies (what this MR is waiting on)
|
||||
if len(issue.Dependencies) > 0 {
|
||||
fmt.Printf("\n%s\n", style.Bold.Render("Waiting On"))
|
||||
for _, dep := range issue.Dependencies {
|
||||
statusIcon := getStatusIcon(dep.Status)
|
||||
fmt.Printf(" %s %s: %s %s\n",
|
||||
statusIcon,
|
||||
dep.ID,
|
||||
truncateString(dep.Title, 50),
|
||||
style.Dim.Render(fmt.Sprintf("[%s]", dep.Status)))
|
||||
}
|
||||
}
|
||||
|
||||
// Blockers (what's waiting on this MR)
|
||||
if len(issue.Dependents) > 0 {
|
||||
fmt.Printf("\n%s\n", style.Bold.Render("Blocking"))
|
||||
for _, dep := range issue.Dependents {
|
||||
statusIcon := getStatusIcon(dep.Status)
|
||||
fmt.Printf(" %s %s: %s %s\n",
|
||||
statusIcon,
|
||||
dep.ID,
|
||||
truncateString(dep.Title, 50),
|
||||
style.Dim.Render(fmt.Sprintf("[%s]", dep.Status)))
|
||||
}
|
||||
}
|
||||
|
||||
// Description (if present and not just MR fields)
|
||||
desc := getDescriptionWithoutMRFields(issue.Description)
|
||||
if desc != "" {
|
||||
fmt.Printf("\n%s\n", style.Bold.Render("Notes"))
|
||||
// Indent each line
|
||||
for _, line := range strings.Split(desc, "\n") {
|
||||
fmt.Printf(" %s\n", line)
|
||||
}
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
// formatStatus formats the status with appropriate styling.
|
||||
func formatStatus(status string) string {
|
||||
switch status {
|
||||
case "open":
|
||||
return style.Info.Render("● open")
|
||||
case "in_progress":
|
||||
return style.Bold.Render("▶ in_progress")
|
||||
case "closed":
|
||||
return style.Dim.Render("✓ closed")
|
||||
default:
|
||||
return status
|
||||
}
|
||||
}
|
||||
|
||||
// getStatusIcon returns an icon for the given status.
|
||||
func getStatusIcon(status string) string {
|
||||
switch status {
|
||||
case "open":
|
||||
return "○"
|
||||
case "in_progress":
|
||||
return "▶"
|
||||
case "closed":
|
||||
return "✓"
|
||||
default:
|
||||
return "•"
|
||||
}
|
||||
}
|
||||
|
||||
// formatTimeAgo formats a timestamp as a relative time string.
|
||||
func formatTimeAgo(timestamp string) string {
|
||||
// Try parsing common formats
|
||||
formats := []string{
|
||||
time.RFC3339,
|
||||
"2006-01-02T15:04:05Z",
|
||||
"2006-01-02T15:04:05",
|
||||
"2006-01-02 15:04:05",
|
||||
"2006-01-02",
|
||||
}
|
||||
|
||||
var t time.Time
|
||||
var err error
|
||||
for _, format := range formats {
|
||||
t, err = time.Parse(format, timestamp)
|
||||
if err == nil {
|
||||
break
|
||||
}
|
||||
}
|
||||
if err != nil {
|
||||
return "" // Can't parse, return empty
|
||||
}
|
||||
|
||||
d := time.Since(t)
|
||||
if d < 0 {
|
||||
return style.Dim.Render("(in the future)")
|
||||
}
|
||||
|
||||
var ago string
|
||||
if d < time.Minute {
|
||||
ago = fmt.Sprintf("%ds ago", int(d.Seconds()))
|
||||
} else if d < time.Hour {
|
||||
ago = fmt.Sprintf("%dm ago", int(d.Minutes()))
|
||||
} else if d < 24*time.Hour {
|
||||
ago = fmt.Sprintf("%dh ago", int(d.Hours()))
|
||||
} else {
|
||||
ago = fmt.Sprintf("%dd ago", int(d.Hours()/24))
|
||||
}
|
||||
|
||||
return style.Dim.Render("(" + ago + ")")
|
||||
}
|
||||
|
||||
// truncateString truncates a string to maxLen, adding "..." if truncated.
|
||||
func truncateString(s string, maxLen int) string {
|
||||
if len(s) <= maxLen {
|
||||
return s
|
||||
}
|
||||
if maxLen <= 3 {
|
||||
return s[:maxLen]
|
||||
}
|
||||
return s[:maxLen-3] + "..."
|
||||
}
|
||||
|
||||
// getDescriptionWithoutMRFields returns the description with MR field lines removed.
|
||||
func getDescriptionWithoutMRFields(description string) string {
|
||||
if description == "" {
|
||||
return ""
|
||||
}
|
||||
|
||||
// Known MR field keys (lowercase)
|
||||
mrKeys := map[string]bool{
|
||||
"branch": true,
|
||||
"target": true,
|
||||
"source_issue": true,
|
||||
"source-issue": true,
|
||||
"sourceissue": true,
|
||||
"worker": true,
|
||||
"rig": true,
|
||||
"merge_commit": true,
|
||||
"merge-commit": true,
|
||||
"mergecommit": true,
|
||||
"close_reason": true,
|
||||
"close-reason": true,
|
||||
"closereason": true,
|
||||
"type": true,
|
||||
}
|
||||
|
||||
var lines []string
|
||||
for _, line := range strings.Split(description, "\n") {
|
||||
trimmed := strings.TrimSpace(line)
|
||||
if trimmed == "" {
|
||||
lines = append(lines, line)
|
||||
continue
|
||||
}
|
||||
|
||||
// Check if this is an MR field line
|
||||
colonIdx := strings.Index(trimmed, ":")
|
||||
if colonIdx != -1 {
|
||||
key := strings.ToLower(strings.TrimSpace(trimmed[:colonIdx]))
|
||||
if mrKeys[key] {
|
||||
continue // Skip MR field lines
|
||||
}
|
||||
}
|
||||
|
||||
lines = append(lines, line)
|
||||
}
|
||||
|
||||
// Trim leading/trailing blank lines
|
||||
result := strings.Join(lines, "\n")
|
||||
result = strings.TrimSpace(result)
|
||||
return result
|
||||
}
|
||||
|
||||
321
internal/refinery/engineer.go
Normal file
321
internal/refinery/engineer.go
Normal file
@@ -0,0 +1,321 @@
|
||||
// Package refinery provides the merge queue processing agent.
|
||||
package refinery
|
||||
|
||||
import (
|
||||
"context"
|
||||
"encoding/json"
|
||||
"fmt"
|
||||
"os"
|
||||
"path/filepath"
|
||||
"time"
|
||||
|
||||
"github.com/steveyegge/gastown/internal/beads"
|
||||
"github.com/steveyegge/gastown/internal/rig"
|
||||
)
|
||||
|
||||
// MergeQueueConfig holds configuration for the merge queue processor.
|
||||
type MergeQueueConfig struct {
|
||||
// Enabled controls whether the merge queue is active.
|
||||
Enabled bool `json:"enabled"`
|
||||
|
||||
// TargetBranch is the default branch to merge to (e.g., "main").
|
||||
TargetBranch string `json:"target_branch"`
|
||||
|
||||
// IntegrationBranches enables per-epic integration branches.
|
||||
IntegrationBranches bool `json:"integration_branches"`
|
||||
|
||||
// OnConflict is the strategy for handling conflicts: "assign_back" or "auto_rebase".
|
||||
OnConflict string `json:"on_conflict"`
|
||||
|
||||
// RunTests controls whether to run tests before merging.
|
||||
RunTests bool `json:"run_tests"`
|
||||
|
||||
// TestCommand is the command to run for testing.
|
||||
TestCommand string `json:"test_command"`
|
||||
|
||||
// DeleteMergedBranches controls whether to delete branches after merge.
|
||||
DeleteMergedBranches bool `json:"delete_merged_branches"`
|
||||
|
||||
// RetryFlakyTests is the number of times to retry flaky tests.
|
||||
RetryFlakyTests int `json:"retry_flaky_tests"`
|
||||
|
||||
// PollInterval is how often to check for new MRs.
|
||||
PollInterval time.Duration `json:"poll_interval"`
|
||||
|
||||
// MaxConcurrent is the maximum number of MRs to process concurrently.
|
||||
MaxConcurrent int `json:"max_concurrent"`
|
||||
}
|
||||
|
||||
// DefaultMergeQueueConfig returns sensible defaults for merge queue configuration.
|
||||
func DefaultMergeQueueConfig() *MergeQueueConfig {
|
||||
return &MergeQueueConfig{
|
||||
Enabled: true,
|
||||
TargetBranch: "main",
|
||||
IntegrationBranches: true,
|
||||
OnConflict: "assign_back",
|
||||
RunTests: true,
|
||||
TestCommand: "",
|
||||
DeleteMergedBranches: true,
|
||||
RetryFlakyTests: 1,
|
||||
PollInterval: 30 * time.Second,
|
||||
MaxConcurrent: 1,
|
||||
}
|
||||
}
|
||||
|
||||
// Engineer is the merge queue processor that polls for ready merge-requests
|
||||
// and processes them according to the merge queue design.
|
||||
type Engineer struct {
|
||||
rig *rig.Rig
|
||||
beads *beads.Beads
|
||||
config *MergeQueueConfig
|
||||
workDir string
|
||||
|
||||
// stopCh is used for graceful shutdown
|
||||
stopCh chan struct{}
|
||||
}
|
||||
|
||||
// NewEngineer creates a new Engineer for the given rig.
|
||||
func NewEngineer(r *rig.Rig) *Engineer {
|
||||
return &Engineer{
|
||||
rig: r,
|
||||
beads: beads.New(r.Path),
|
||||
config: DefaultMergeQueueConfig(),
|
||||
workDir: r.Path,
|
||||
stopCh: make(chan struct{}),
|
||||
}
|
||||
}
|
||||
|
||||
// LoadConfig loads merge queue configuration from the rig's config.json.
|
||||
func (e *Engineer) LoadConfig() error {
|
||||
configPath := filepath.Join(e.rig.Path, "config.json")
|
||||
data, err := os.ReadFile(configPath)
|
||||
if err != nil {
|
||||
if os.IsNotExist(err) {
|
||||
// Use defaults if no config file
|
||||
return nil
|
||||
}
|
||||
return fmt.Errorf("reading config: %w", err)
|
||||
}
|
||||
|
||||
// Parse config file to extract merge_queue section
|
||||
var rawConfig struct {
|
||||
MergeQueue json.RawMessage `json:"merge_queue"`
|
||||
}
|
||||
if err := json.Unmarshal(data, &rawConfig); err != nil {
|
||||
return fmt.Errorf("parsing config: %w", err)
|
||||
}
|
||||
|
||||
if rawConfig.MergeQueue == nil {
|
||||
// No merge_queue section, use defaults
|
||||
return nil
|
||||
}
|
||||
|
||||
// Parse merge_queue section into our config struct
|
||||
// We need special handling for poll_interval (string -> Duration)
|
||||
var mqRaw struct {
|
||||
Enabled *bool `json:"enabled"`
|
||||
TargetBranch *string `json:"target_branch"`
|
||||
IntegrationBranches *bool `json:"integration_branches"`
|
||||
OnConflict *string `json:"on_conflict"`
|
||||
RunTests *bool `json:"run_tests"`
|
||||
TestCommand *string `json:"test_command"`
|
||||
DeleteMergedBranches *bool `json:"delete_merged_branches"`
|
||||
RetryFlakyTests *int `json:"retry_flaky_tests"`
|
||||
PollInterval *string `json:"poll_interval"`
|
||||
MaxConcurrent *int `json:"max_concurrent"`
|
||||
}
|
||||
|
||||
if err := json.Unmarshal(rawConfig.MergeQueue, &mqRaw); err != nil {
|
||||
return fmt.Errorf("parsing merge_queue config: %w", err)
|
||||
}
|
||||
|
||||
// Apply non-nil values to config (preserving defaults for missing fields)
|
||||
if mqRaw.Enabled != nil {
|
||||
e.config.Enabled = *mqRaw.Enabled
|
||||
}
|
||||
if mqRaw.TargetBranch != nil {
|
||||
e.config.TargetBranch = *mqRaw.TargetBranch
|
||||
}
|
||||
if mqRaw.IntegrationBranches != nil {
|
||||
e.config.IntegrationBranches = *mqRaw.IntegrationBranches
|
||||
}
|
||||
if mqRaw.OnConflict != nil {
|
||||
e.config.OnConflict = *mqRaw.OnConflict
|
||||
}
|
||||
if mqRaw.RunTests != nil {
|
||||
e.config.RunTests = *mqRaw.RunTests
|
||||
}
|
||||
if mqRaw.TestCommand != nil {
|
||||
e.config.TestCommand = *mqRaw.TestCommand
|
||||
}
|
||||
if mqRaw.DeleteMergedBranches != nil {
|
||||
e.config.DeleteMergedBranches = *mqRaw.DeleteMergedBranches
|
||||
}
|
||||
if mqRaw.RetryFlakyTests != nil {
|
||||
e.config.RetryFlakyTests = *mqRaw.RetryFlakyTests
|
||||
}
|
||||
if mqRaw.MaxConcurrent != nil {
|
||||
e.config.MaxConcurrent = *mqRaw.MaxConcurrent
|
||||
}
|
||||
if mqRaw.PollInterval != nil {
|
||||
dur, err := time.ParseDuration(*mqRaw.PollInterval)
|
||||
if err != nil {
|
||||
return fmt.Errorf("invalid poll_interval %q: %w", *mqRaw.PollInterval, err)
|
||||
}
|
||||
e.config.PollInterval = dur
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
// Config returns the current merge queue configuration.
|
||||
func (e *Engineer) Config() *MergeQueueConfig {
|
||||
return e.config
|
||||
}
|
||||
|
||||
// Run starts the Engineer main loop. It blocks until the context is cancelled
|
||||
// or Stop() is called. Returns nil on graceful shutdown.
|
||||
func (e *Engineer) Run(ctx context.Context) error {
|
||||
if err := e.LoadConfig(); err != nil {
|
||||
return fmt.Errorf("loading config: %w", err)
|
||||
}
|
||||
|
||||
if !e.config.Enabled {
|
||||
return fmt.Errorf("merge queue is disabled in configuration")
|
||||
}
|
||||
|
||||
fmt.Printf("[Engineer] Starting for rig %s (poll_interval=%s)\n",
|
||||
e.rig.Name, e.config.PollInterval)
|
||||
|
||||
ticker := time.NewTicker(e.config.PollInterval)
|
||||
defer ticker.Stop()
|
||||
|
||||
// Run one iteration immediately, then on ticker
|
||||
if err := e.processOnce(ctx); err != nil {
|
||||
fmt.Printf("[Engineer] Error: %v\n", err)
|
||||
}
|
||||
|
||||
for {
|
||||
select {
|
||||
case <-ctx.Done():
|
||||
fmt.Println("[Engineer] Shutting down (context cancelled)")
|
||||
return nil
|
||||
case <-e.stopCh:
|
||||
fmt.Println("[Engineer] Shutting down (stop signal)")
|
||||
return nil
|
||||
case <-ticker.C:
|
||||
if err := e.processOnce(ctx); err != nil {
|
||||
fmt.Printf("[Engineer] Error: %v\n", err)
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// Stop signals the Engineer to stop processing. This is a non-blocking call.
|
||||
func (e *Engineer) Stop() {
|
||||
close(e.stopCh)
|
||||
}
|
||||
|
||||
// processOnce performs one iteration of the Engineer loop:
|
||||
// 1. Query for ready merge-requests
|
||||
// 2. If none, return (will try again on next tick)
|
||||
// 3. Process the highest priority, oldest MR
|
||||
func (e *Engineer) processOnce(ctx context.Context) error {
|
||||
// Check context before starting
|
||||
select {
|
||||
case <-ctx.Done():
|
||||
return nil
|
||||
default:
|
||||
}
|
||||
|
||||
// 1. Query: bd ready --type=merge-request (filtered client-side)
|
||||
readyMRs, err := e.beads.ReadyWithType("merge-request")
|
||||
if err != nil {
|
||||
return fmt.Errorf("querying ready merge-requests: %w", err)
|
||||
}
|
||||
|
||||
// 2. If empty, return
|
||||
if len(readyMRs) == 0 {
|
||||
return nil
|
||||
}
|
||||
|
||||
// 3. Select highest priority, oldest MR
|
||||
// bd ready already returns sorted by priority then age, so first is best
|
||||
mr := readyMRs[0]
|
||||
|
||||
fmt.Printf("[Engineer] Processing: %s (%s)\n", mr.ID, mr.Title)
|
||||
|
||||
// 4. Claim: bd update <id> --status=in_progress
|
||||
inProgress := "in_progress"
|
||||
if err := e.beads.Update(mr.ID, beads.UpdateOptions{Status: &inProgress}); err != nil {
|
||||
return fmt.Errorf("claiming MR %s: %w", mr.ID, err)
|
||||
}
|
||||
|
||||
// 5. Process (delegate to ProcessMR - implementation in separate issue gt-3x1.2)
|
||||
result := e.ProcessMR(ctx, mr)
|
||||
|
||||
// 6. Handle result
|
||||
if result.Success {
|
||||
// Close with merged reason
|
||||
reason := fmt.Sprintf("merged: %s", result.MergeCommit)
|
||||
if err := e.beads.CloseWithReason(reason, mr.ID); err != nil {
|
||||
fmt.Printf("[Engineer] Warning: failed to close MR %s: %v\n", mr.ID, err)
|
||||
}
|
||||
fmt.Printf("[Engineer] ✓ Merged: %s\n", mr.ID)
|
||||
} else {
|
||||
// Failure handling (detailed implementation in gt-3x1.4)
|
||||
e.handleFailure(mr, result)
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
// ProcessResult contains the result of processing a merge request.
|
||||
type ProcessResult struct {
|
||||
Success bool
|
||||
MergeCommit string
|
||||
Error string
|
||||
Conflict bool
|
||||
TestsFailed bool
|
||||
}
|
||||
|
||||
// ProcessMR processes a single merge request.
|
||||
// This is a placeholder that will be fully implemented in gt-3x1.2.
|
||||
func (e *Engineer) ProcessMR(ctx context.Context, mr *beads.Issue) ProcessResult {
|
||||
// Parse MR fields from description
|
||||
mrFields := beads.ParseMRFields(mr)
|
||||
if mrFields == nil {
|
||||
return ProcessResult{
|
||||
Success: false,
|
||||
Error: "no MR fields found in description",
|
||||
}
|
||||
}
|
||||
|
||||
// For now, just log what we would do
|
||||
// Full implementation in gt-3x1.2: Fetch and conflict check
|
||||
fmt.Printf("[Engineer] Would process:\n")
|
||||
fmt.Printf(" Branch: %s\n", mrFields.Branch)
|
||||
fmt.Printf(" Target: %s\n", mrFields.Target)
|
||||
fmt.Printf(" Worker: %s\n", mrFields.Worker)
|
||||
|
||||
// Return failure for now - actual implementation in gt-3x1.2
|
||||
return ProcessResult{
|
||||
Success: false,
|
||||
Error: "ProcessMR not fully implemented (see gt-3x1.2)",
|
||||
}
|
||||
}
|
||||
|
||||
// handleFailure handles a failed merge request.
|
||||
// This is a placeholder that will be fully implemented in gt-3x1.4.
|
||||
func (e *Engineer) handleFailure(mr *beads.Issue, result ProcessResult) {
|
||||
// Reopen the MR (back to open status for rework)
|
||||
open := "open"
|
||||
if err := e.beads.Update(mr.ID, beads.UpdateOptions{Status: &open}); err != nil {
|
||||
fmt.Printf("[Engineer] Warning: failed to reopen MR %s: %v\n", mr.ID, err)
|
||||
}
|
||||
|
||||
// Log the failure
|
||||
fmt.Printf("[Engineer] ✗ Failed: %s - %s\n", mr.ID, result.Error)
|
||||
|
||||
// Full failure handling (assign back to worker, labels) in gt-3x1.4
|
||||
}
|
||||
209
internal/refinery/engineer_test.go
Normal file
209
internal/refinery/engineer_test.go
Normal file
@@ -0,0 +1,209 @@
|
||||
package refinery
|
||||
|
||||
import (
|
||||
"encoding/json"
|
||||
"os"
|
||||
"path/filepath"
|
||||
"testing"
|
||||
"time"
|
||||
|
||||
"github.com/steveyegge/gastown/internal/rig"
|
||||
)
|
||||
|
||||
func TestDefaultMergeQueueConfig(t *testing.T) {
|
||||
cfg := DefaultMergeQueueConfig()
|
||||
|
||||
if !cfg.Enabled {
|
||||
t.Error("expected Enabled to be true by default")
|
||||
}
|
||||
if cfg.TargetBranch != "main" {
|
||||
t.Errorf("expected TargetBranch to be 'main', got %q", cfg.TargetBranch)
|
||||
}
|
||||
if cfg.PollInterval != 30*time.Second {
|
||||
t.Errorf("expected PollInterval to be 30s, got %v", cfg.PollInterval)
|
||||
}
|
||||
if cfg.MaxConcurrent != 1 {
|
||||
t.Errorf("expected MaxConcurrent to be 1, got %d", cfg.MaxConcurrent)
|
||||
}
|
||||
if cfg.OnConflict != "assign_back" {
|
||||
t.Errorf("expected OnConflict to be 'assign_back', got %q", cfg.OnConflict)
|
||||
}
|
||||
}
|
||||
|
||||
func TestEngineer_LoadConfig_NoFile(t *testing.T) {
|
||||
// Create a temp directory without config.json
|
||||
tmpDir, err := os.MkdirTemp("", "engineer-test-*")
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
defer os.RemoveAll(tmpDir)
|
||||
|
||||
r := &rig.Rig{
|
||||
Name: "test-rig",
|
||||
Path: tmpDir,
|
||||
}
|
||||
|
||||
e := NewEngineer(r)
|
||||
|
||||
// Should not error with missing config file
|
||||
if err := e.LoadConfig(); err != nil {
|
||||
t.Errorf("unexpected error with missing config: %v", err)
|
||||
}
|
||||
|
||||
// Should use defaults
|
||||
if e.config.PollInterval != 30*time.Second {
|
||||
t.Errorf("expected default PollInterval, got %v", e.config.PollInterval)
|
||||
}
|
||||
}
|
||||
|
||||
func TestEngineer_LoadConfig_WithMergeQueue(t *testing.T) {
|
||||
// Create a temp directory with config.json
|
||||
tmpDir, err := os.MkdirTemp("", "engineer-test-*")
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
defer os.RemoveAll(tmpDir)
|
||||
|
||||
// Write config file
|
||||
config := map[string]interface{}{
|
||||
"type": "rig",
|
||||
"version": 1,
|
||||
"name": "test-rig",
|
||||
"merge_queue": map[string]interface{}{
|
||||
"enabled": true,
|
||||
"target_branch": "develop",
|
||||
"poll_interval": "10s",
|
||||
"max_concurrent": 2,
|
||||
"run_tests": false,
|
||||
"test_command": "make test",
|
||||
},
|
||||
}
|
||||
|
||||
data, _ := json.MarshalIndent(config, "", " ")
|
||||
if err := os.WriteFile(filepath.Join(tmpDir, "config.json"), data, 0644); err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
|
||||
r := &rig.Rig{
|
||||
Name: "test-rig",
|
||||
Path: tmpDir,
|
||||
}
|
||||
|
||||
e := NewEngineer(r)
|
||||
|
||||
if err := e.LoadConfig(); err != nil {
|
||||
t.Errorf("unexpected error loading config: %v", err)
|
||||
}
|
||||
|
||||
// Check that config values were loaded
|
||||
if e.config.TargetBranch != "develop" {
|
||||
t.Errorf("expected TargetBranch 'develop', got %q", e.config.TargetBranch)
|
||||
}
|
||||
if e.config.PollInterval != 10*time.Second {
|
||||
t.Errorf("expected PollInterval 10s, got %v", e.config.PollInterval)
|
||||
}
|
||||
if e.config.MaxConcurrent != 2 {
|
||||
t.Errorf("expected MaxConcurrent 2, got %d", e.config.MaxConcurrent)
|
||||
}
|
||||
if e.config.RunTests != false {
|
||||
t.Errorf("expected RunTests false, got %v", e.config.RunTests)
|
||||
}
|
||||
if e.config.TestCommand != "make test" {
|
||||
t.Errorf("expected TestCommand 'make test', got %q", e.config.TestCommand)
|
||||
}
|
||||
|
||||
// Check that defaults are preserved for unspecified fields
|
||||
if e.config.OnConflict != "assign_back" {
|
||||
t.Errorf("expected OnConflict default 'assign_back', got %q", e.config.OnConflict)
|
||||
}
|
||||
}
|
||||
|
||||
func TestEngineer_LoadConfig_NoMergeQueueSection(t *testing.T) {
|
||||
// Create a temp directory with config.json without merge_queue
|
||||
tmpDir, err := os.MkdirTemp("", "engineer-test-*")
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
defer os.RemoveAll(tmpDir)
|
||||
|
||||
// Write config file without merge_queue
|
||||
config := map[string]interface{}{
|
||||
"type": "rig",
|
||||
"version": 1,
|
||||
"name": "test-rig",
|
||||
}
|
||||
|
||||
data, _ := json.MarshalIndent(config, "", " ")
|
||||
if err := os.WriteFile(filepath.Join(tmpDir, "config.json"), data, 0644); err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
|
||||
r := &rig.Rig{
|
||||
Name: "test-rig",
|
||||
Path: tmpDir,
|
||||
}
|
||||
|
||||
e := NewEngineer(r)
|
||||
|
||||
if err := e.LoadConfig(); err != nil {
|
||||
t.Errorf("unexpected error loading config: %v", err)
|
||||
}
|
||||
|
||||
// Should use all defaults
|
||||
if e.config.PollInterval != 30*time.Second {
|
||||
t.Errorf("expected default PollInterval, got %v", e.config.PollInterval)
|
||||
}
|
||||
}
|
||||
|
||||
func TestEngineer_LoadConfig_InvalidPollInterval(t *testing.T) {
|
||||
tmpDir, err := os.MkdirTemp("", "engineer-test-*")
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
defer os.RemoveAll(tmpDir)
|
||||
|
||||
config := map[string]interface{}{
|
||||
"merge_queue": map[string]interface{}{
|
||||
"poll_interval": "not-a-duration",
|
||||
},
|
||||
}
|
||||
|
||||
data, _ := json.MarshalIndent(config, "", " ")
|
||||
if err := os.WriteFile(filepath.Join(tmpDir, "config.json"), data, 0644); err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
|
||||
r := &rig.Rig{
|
||||
Name: "test-rig",
|
||||
Path: tmpDir,
|
||||
}
|
||||
|
||||
e := NewEngineer(r)
|
||||
|
||||
err = e.LoadConfig()
|
||||
if err == nil {
|
||||
t.Error("expected error for invalid poll_interval")
|
||||
}
|
||||
}
|
||||
|
||||
func TestNewEngineer(t *testing.T) {
|
||||
r := &rig.Rig{
|
||||
Name: "test-rig",
|
||||
Path: "/tmp/test-rig",
|
||||
}
|
||||
|
||||
e := NewEngineer(r)
|
||||
|
||||
if e.rig != r {
|
||||
t.Error("expected rig to be set")
|
||||
}
|
||||
if e.beads == nil {
|
||||
t.Error("expected beads client to be initialized")
|
||||
}
|
||||
if e.config == nil {
|
||||
t.Error("expected config to be initialized with defaults")
|
||||
}
|
||||
if e.stopCh == nil {
|
||||
t.Error("expected stopCh to be initialized")
|
||||
}
|
||||
}
|
||||
Reference in New Issue
Block a user