refactor(mrqueue): remove mrqueue package, use beads for MRs (gt-dqi)

Remove the mrqueue side-channel from gastown. The merge queue now uses
beads merge-request wisps exclusively, not parallel .beads/mq/*.json files.

Changes:
- Delete internal/mrqueue/ package (~830 lines removed)
- Move scoring logic to internal/refinery/score.go
- Update Refinery engineer to query beads via ReadyWithType("merge-request")
- Add MRInfo struct to replace mrqueue.MR
- Add ClaimMR/ReleaseMR methods using beads assignee field
- Update HandleMergeReady to not create duplicate queue entries
- Update gt refinery commands (claim, release, unclaimed) to use beads
- Stub out MQEventSource (no longer needed)

The Refinery now:
- Lists MRs via beads.ReadyWithType("merge-request")
- Claims via beads.Update(..., {Assignee: worker})
- Closes via beads.CloseWithReason("merged", mrID)
- Blocks on conflicts via beads.AddDependency(mrID, taskID)

Co-Authored-By: Claude Opus 4.5 <noreply@anthropic.com>
This commit is contained in:
george
2026-01-12 23:48:56 -08:00
committed by Steve Yegge
parent f0192c8b3d
commit 58207a00ec
12 changed files with 312 additions and 1556 deletions

View File

@@ -16,7 +16,6 @@ import (
"github.com/steveyegge/gastown/internal/beads"
"github.com/steveyegge/gastown/internal/git"
"github.com/steveyegge/gastown/internal/mail"
"github.com/steveyegge/gastown/internal/mrqueue"
"github.com/steveyegge/gastown/internal/protocol"
"github.com/steveyegge/gastown/internal/rig"
)
@@ -70,18 +69,35 @@ func DefaultMergeQueueConfig() *MergeQueueConfig {
}
}
// MRInfo holds merge request information for display and processing.
// This replaces mrqueue.MR after the mrqueue package removal.
type MRInfo struct {
ID string // Bead ID (e.g., "gt-abc123")
Branch string // Source branch (e.g., "polecat/nux")
Target string // Target branch (e.g., "main")
SourceIssue string // The work item being merged
Worker string // Who did the work
Rig string // Which rig
Title string // MR title
Priority int // Priority (lower = higher priority)
AgentBead string // Agent bead ID that created this MR
RetryCount int // Conflict retry count
ConvoyID string // Parent convoy ID if part of a convoy
ConvoyCreatedAt *time.Time // Convoy creation time
CreatedAt time.Time // MR creation time
BlockedBy string // Task ID blocking this MR
}
// Engineer is the merge queue processor that polls for ready merge-requests
// and processes them according to the merge queue design.
type Engineer struct {
rig *rig.Rig
beads *beads.Beads
mrQueue *mrqueue.Queue
git *git.Git
config *MergeQueueConfig
workDir string
output io.Writer // Output destination for user-facing messages
eventLogger *mrqueue.EventLogger
router *mail.Router // Mail router for sending protocol messages
rig *rig.Rig
beads *beads.Beads
git *git.Git
config *MergeQueueConfig
workDir string
output io.Writer // Output destination for user-facing messages
router *mail.Router // Mail router for sending protocol messages
// stopCh is used for graceful shutdown
stopCh chan struct{}
@@ -102,16 +118,14 @@ func NewEngineer(r *rig.Rig) *Engineer {
}
return &Engineer{
rig: r,
beads: beads.New(r.Path),
mrQueue: mrqueue.New(r.Path),
git: git.NewGit(gitDir),
config: cfg,
workDir: gitDir,
output: os.Stdout,
eventLogger: mrqueue.NewEventLoggerFromRig(r.Path),
router: mail.NewRouter(r.Path),
stopCh: make(chan struct{}),
rig: r,
beads: beads.New(r.Path),
git: git.NewGit(gitDir),
config: cfg,
workDir: gitDir,
output: os.Stdout,
router: mail.NewRouter(r.Path),
stopCh: make(chan struct{}),
}
}
@@ -479,31 +493,21 @@ func (e *Engineer) handleFailure(mr *beads.Issue, result ProcessResult) {
_, _ = fmt.Fprintf(e.output, "[Engineer] ✗ Failed: %s - %s\n", mr.ID, result.Error)
}
// ProcessMRFromQueue processes a merge request from wisp queue.
func (e *Engineer) ProcessMRFromQueue(ctx context.Context, mr *mrqueue.MR) ProcessResult {
// MR fields are directly on the struct (no parsing needed)
_, _ = fmt.Fprintln(e.output, "[Engineer] Processing MR from queue:")
// ProcessMRInfo processes a merge request from MRInfo.
func (e *Engineer) ProcessMRInfo(ctx context.Context, mr *MRInfo) ProcessResult {
// MR fields are directly on the struct
_, _ = fmt.Fprintln(e.output, "[Engineer] Processing MR:")
_, _ = fmt.Fprintf(e.output, " Branch: %s\n", mr.Branch)
_, _ = fmt.Fprintf(e.output, " Target: %s\n", mr.Target)
_, _ = fmt.Fprintf(e.output, " Worker: %s\n", mr.Worker)
_, _ = fmt.Fprintf(e.output, " Source: %s\n", mr.SourceIssue)
// Emit merge_started event
if err := e.eventLogger.LogMergeStarted(mr); err != nil {
_, _ = fmt.Fprintf(e.output, "[Engineer] Warning: failed to log merge_started event: %v\n", err)
}
// Use the shared merge logic
return e.doMerge(ctx, mr.Branch, mr.Target, mr.SourceIssue)
}
// handleSuccessFromQueue handles a successful merge from wisp queue.
func (e *Engineer) handleSuccessFromQueue(mr *mrqueue.MR, result ProcessResult) {
// Emit merged event
if err := e.eventLogger.LogMerged(mr, result.MergeCommit); err != nil {
_, _ = fmt.Fprintf(e.output, "[Engineer] Warning: failed to log merged event: %v\n", err)
}
// HandleMRInfoSuccess handles a successful merge from MRInfo.
func (e *Engineer) HandleMRInfoSuccess(mr *MRInfo, result ProcessResult) {
// Release merge slot if this was a conflict resolution
// The slot is held while conflict resolution is in progress
holder := e.rig.Name + "/refinery"
@@ -518,7 +522,7 @@ func (e *Engineer) handleSuccessFromQueue(mr *mrqueue.MR, result ProcessResult)
_, _ = fmt.Fprintf(e.output, "[Engineer] Released merge slot\n")
}
// Update and close the MR bead (matches handleSuccess behavior)
// Update and close the MR bead
if mr.ID != "" {
// Fetch the MR bead to update its fields
mrBead, err := e.beads.Show(mr.ID)
@@ -572,24 +576,14 @@ func (e *Engineer) handleSuccessFromQueue(mr *mrqueue.MR, result ProcessResult)
}
}
// 3. Remove MR from queue (ephemeral - just delete the file)
if err := e.mrQueue.Remove(mr.ID); err != nil {
_, _ = fmt.Fprintf(e.output, "[Engineer] Warning: failed to remove MR from queue: %v\n", err)
}
// 4. Log success
// 3. Log success
_, _ = fmt.Fprintf(e.output, "[Engineer] ✓ Merged: %s (commit: %s)\n", mr.ID, result.MergeCommit)
}
// handleFailureFromQueue handles a failed merge from wisp queue.
// HandleMRInfoFailure handles a failed merge from MRInfo.
// For conflicts, creates a resolution task and blocks the MR until resolved.
// This enables non-blocking delegation: the queue continues to the next MR.
func (e *Engineer) handleFailureFromQueue(mr *mrqueue.MR, result ProcessResult) {
// Emit merge_failed event
if err := e.eventLogger.LogMergeFailed(mr, result.Error); err != nil {
_, _ = fmt.Fprintf(e.output, "[Engineer] Warning: failed to log merge_failed event: %v\n", err)
}
func (e *Engineer) HandleMRInfoFailure(mr *MRInfo, result ProcessResult) {
// Notify Witness of the failure so polecat can be alerted
// Determine failure type from result
failureType := "build"
@@ -608,13 +602,13 @@ func (e *Engineer) handleFailureFromQueue(mr *mrqueue.MR, result ProcessResult)
// If this was a conflict, create a conflict-resolution task for dispatch
// and block the MR until the task is resolved (non-blocking delegation)
if result.Conflict {
taskID, err := e.createConflictResolutionTask(mr, result)
taskID, err := e.createConflictResolutionTaskForMR(mr, result)
if err != nil {
_, _ = fmt.Fprintf(e.output, "[Engineer] Warning: failed to create conflict resolution task: %v\n", err)
} else {
// Block the MR on the conflict resolution task
} else if taskID != "" {
// Block the MR on the conflict resolution task using beads dependency
// When the task closes, the MR unblocks and re-enters the ready queue
if err := e.mrQueue.SetBlockedBy(mr.ID, taskID); err != nil {
if err := e.beads.AddDependency(mr.ID, taskID); err != nil {
_, _ = fmt.Fprintf(e.output, "[Engineer] Warning: failed to block MR on task: %v\n", err)
} else {
_, _ = fmt.Fprintf(e.output, "[Engineer] MR %s blocked on conflict task %s (non-blocking delegation)\n", mr.ID, taskID)
@@ -631,7 +625,7 @@ func (e *Engineer) handleFailureFromQueue(mr *mrqueue.MR, result ProcessResult)
}
}
// createConflictResolutionTask creates a dispatchable task for resolving merge conflicts.
// createConflictResolutionTaskForMR creates a dispatchable task for resolving merge conflicts.
// This task will be picked up by bd ready and can be slung to a fresh polecat (spawned on demand).
// Returns the created task's ID for blocking the MR until resolution.
//
@@ -647,7 +641,7 @@ func (e *Engineer) handleFailureFromQueue(mr *mrqueue.MR, result ProcessResult)
// This serializes conflict resolution - only one polecat can resolve conflicts at a time.
// If the slot is already held, we skip creating the task and let the MR stay in queue.
// When the current resolution completes and merges, the slot is released.
func (e *Engineer) createConflictResolutionTask(mr *mrqueue.MR, _ ProcessResult) (string, error) { // result unused but kept for future merge diagnostics
func (e *Engineer) createConflictResolutionTaskForMR(mr *MRInfo, _ ProcessResult) (string, error) { // result unused but kept for future merge diagnostics
// === MERGE SLOT GATE: Serialize conflict resolution ===
// Ensure merge slot exists (idempotent)
slotID, err := e.beads.MergeSlotEnsureExists()
@@ -743,14 +737,11 @@ The Refinery will automatically retry the merge after you force-push.`,
_, _ = fmt.Fprintf(e.output, "[Engineer] Created conflict resolution task: %s (P%d)\n", task.ID, task.Priority)
// Update the MR's retry count for priority scoring
mr.RetryCount = retryCount
return task.ID, nil
}
// IsBeadOpen checks if a bead is still open (not closed).
// This is used as a status checker for mrqueue.ListReady to filter blocked MRs.
// This is used as a status checker to filter blocked MRs.
func (e *Engineer) IsBeadOpen(beadID string) (bool, error) {
issue, err := e.beads.Show(beadID)
if err != nil {
@@ -762,15 +753,172 @@ func (e *Engineer) IsBeadOpen(beadID string) (bool, error) {
}
// ListReadyMRs returns MRs that are ready for processing:
// - Not claimed by another worker (or claim is stale)
// - Not blocked by an open task
// Sorted by priority score (highest first).
func (e *Engineer) ListReadyMRs() ([]*mrqueue.MR, error) {
return e.mrQueue.ListReady(e.IsBeadOpen)
// - Not claimed by another worker (checked via assignee field)
// - Not blocked by an open task (handled by bd ready)
// Sorted by priority (highest first).
//
// This queries beads for merge-request wisps.
func (e *Engineer) ListReadyMRs() ([]*MRInfo, error) {
// Query beads for ready merge-request issues
issues, err := e.beads.ReadyWithType("merge-request")
if err != nil {
return nil, fmt.Errorf("querying beads for merge-requests: %w", err)
}
// Convert beads issues to MRInfo
var mrs []*MRInfo
for _, issue := range issues {
fields := beads.ParseMRFields(issue)
if fields == nil {
continue // Skip issues without MR fields
}
// Skip if already assigned (claimed by another worker)
if issue.Assignee != "" {
// TODO: Add stale claim detection based on updated_at
continue
}
// Parse convoy created_at if present
var convoyCreatedAt *time.Time
if fields.ConvoyCreatedAt != "" {
if t, err := time.Parse(time.RFC3339, fields.ConvoyCreatedAt); err == nil {
convoyCreatedAt = &t
}
}
// Parse issue created_at
var createdAt time.Time
if issue.CreatedAt != "" {
if t, err := time.Parse(time.RFC3339, issue.CreatedAt); err == nil {
createdAt = t
}
}
mr := &MRInfo{
ID: issue.ID,
Branch: fields.Branch,
Target: fields.Target,
SourceIssue: fields.SourceIssue,
Worker: fields.Worker,
Rig: fields.Rig,
Title: issue.Title,
Priority: issue.Priority,
AgentBead: fields.AgentBead,
RetryCount: fields.RetryCount,
ConvoyID: fields.ConvoyID,
ConvoyCreatedAt: convoyCreatedAt,
CreatedAt: createdAt,
}
mrs = append(mrs, mr)
}
return mrs, nil
}
// ListBlockedMRs returns MRs that are blocked by open tasks.
// Useful for monitoring/reporting.
func (e *Engineer) ListBlockedMRs() ([]*mrqueue.MR, error) {
return e.mrQueue.ListBlocked(e.IsBeadOpen)
//
// This queries beads for blocked merge-request issues.
func (e *Engineer) ListBlockedMRs() ([]*MRInfo, error) {
// Query all merge-request issues (both ready and blocked)
issues, err := e.beads.List(beads.ListOptions{
Status: "open",
Label: "gt:merge-request",
Priority: -1, // No priority filter
})
if err != nil {
return nil, fmt.Errorf("querying beads for merge-requests: %w", err)
}
// Filter for blocked issues (those with open blockers)
var mrs []*MRInfo
for _, issue := range issues {
// Skip if not blocked
if len(issue.BlockedBy) == 0 {
continue
}
// Check if any blocker is still open
hasOpenBlocker := false
for _, blockerID := range issue.BlockedBy {
isOpen, err := e.IsBeadOpen(blockerID)
if err == nil && isOpen {
hasOpenBlocker = true
break
}
}
if !hasOpenBlocker {
continue // All blockers are closed, not blocked
}
fields := beads.ParseMRFields(issue)
if fields == nil {
continue
}
// Parse convoy created_at if present
var convoyCreatedAt *time.Time
if fields.ConvoyCreatedAt != "" {
if t, err := time.Parse(time.RFC3339, fields.ConvoyCreatedAt); err == nil {
convoyCreatedAt = &t
}
}
// Parse issue created_at
var createdAt time.Time
if issue.CreatedAt != "" {
if t, err := time.Parse(time.RFC3339, issue.CreatedAt); err == nil {
createdAt = t
}
}
// Use the first open blocker as BlockedBy
blockedBy := ""
for _, blockerID := range issue.BlockedBy {
isOpen, err := e.IsBeadOpen(blockerID)
if err == nil && isOpen {
blockedBy = blockerID
break
}
}
mr := &MRInfo{
ID: issue.ID,
Branch: fields.Branch,
Target: fields.Target,
SourceIssue: fields.SourceIssue,
Worker: fields.Worker,
Rig: fields.Rig,
Title: issue.Title,
Priority: issue.Priority,
AgentBead: fields.AgentBead,
RetryCount: fields.RetryCount,
ConvoyID: fields.ConvoyID,
ConvoyCreatedAt: convoyCreatedAt,
CreatedAt: createdAt,
BlockedBy: blockedBy,
}
mrs = append(mrs, mr)
}
return mrs, nil
}
// ClaimMR claims an MR for processing by setting the assignee field.
// This replaces mrqueue.Claim() for beads-based MRs.
// The workerID is typically the refinery's identifier (e.g., "gastown/refinery").
func (e *Engineer) ClaimMR(mrID, workerID string) error {
return e.beads.Update(mrID, beads.UpdateOptions{
Assignee: &workerID,
})
}
// ReleaseMR releases a claimed MR back to the queue by clearing the assignee.
// This replaces mrqueue.Release() for beads-based MRs.
func (e *Engineer) ReleaseMR(mrID string) error {
empty := ""
return e.beads.Update(mrID, beads.UpdateOptions{
Assignee: &empty,
})
}

View File

@@ -16,7 +16,6 @@ import (
"github.com/steveyegge/gastown/internal/constants"
"github.com/steveyegge/gastown/internal/events"
"github.com/steveyegge/gastown/internal/mail"
"github.com/steveyegge/gastown/internal/mrqueue"
"github.com/steveyegge/gastown/internal/rig"
"github.com/steveyegge/gastown/internal/runtime"
"github.com/steveyegge/gastown/internal/session"
@@ -358,7 +357,7 @@ func (m *Manager) calculateIssueScore(issue *beads.Issue, now time.Time) float64
}
// Build score input
input := mrqueue.ScoreInput{
input := ScoreInput{
Priority: issue.Priority,
MRCreatedAt: mrCreatedAt,
Now: now,
@@ -376,7 +375,7 @@ func (m *Manager) calculateIssueScore(issue *beads.Issue, now time.Time) float64
}
}
return mrqueue.ScoreMRWithDefaults(input)
return ScoreMRWithDefaults(input)
}
// issueToMR converts a beads issue to a MergeRequest.

152
internal/refinery/score.go Normal file
View File

@@ -0,0 +1,152 @@
// Package refinery provides the merge queue processing agent.
// This file contains priority scoring logic for merge requests.
package refinery
import (
"time"
)
// ScoreConfig contains tunable weights for MR priority scoring.
// All weights are designed so higher scores = higher priority (process first).
type ScoreConfig struct {
// BaseScore is the starting score before applying factors.
// Default: 1000 (keeps all scores positive)
BaseScore float64
// ConvoyAgeWeight is points added per hour of convoy age.
// Older convoys get priority to prevent starvation.
// Default: 10.0 (10 pts/hour = 240 pts/day)
ConvoyAgeWeight float64
// PriorityWeight is multiplied by (4 - priority) so P0 gets most points.
// P0 adds 4*weight, P1 adds 3*weight, ..., P4 adds 0*weight.
// Default: 100.0 (P0 gets +400, P4 gets +0)
PriorityWeight float64
// RetryPenalty is subtracted per retry attempt to prevent thrashing.
// MRs that keep failing get deprioritized, giving repo state time to stabilize.
// Default: 50.0 (each retry loses 50 pts)
RetryPenalty float64
// MRAgeWeight is points added per hour since MR submission.
// Minor factor for FIFO ordering within same priority/convoy.
// Default: 1.0 (1 pt/hour)
MRAgeWeight float64
// MaxRetryPenalty caps the total retry penalty to prevent permanent deprioritization.
// Default: 300.0 (after 6 retries, penalty is capped)
MaxRetryPenalty float64
}
// DefaultScoreConfig returns sensible defaults for MR scoring.
func DefaultScoreConfig() ScoreConfig {
return ScoreConfig{
BaseScore: 1000.0,
ConvoyAgeWeight: 10.0,
PriorityWeight: 100.0,
RetryPenalty: 50.0,
MRAgeWeight: 1.0,
MaxRetryPenalty: 300.0,
}
}
// ScoreInput contains the data needed to score an MR.
// This struct decouples scoring from the MR struct, allowing the
// caller to provide convoy age from external lookups.
type ScoreInput struct {
// Priority is the issue priority (0=P0/critical, 4=P4/backlog).
Priority int
// MRCreatedAt is when the MR was submitted to the queue.
MRCreatedAt time.Time
// ConvoyCreatedAt is when the convoy was created.
// Nil if MR is not part of a convoy (standalone work).
ConvoyCreatedAt *time.Time
// RetryCount is how many times this MR has been retried after conflicts.
// 0 = first attempt.
RetryCount int
// Now is the current time (for deterministic testing).
// If zero, time.Now() is used.
Now time.Time
}
// ScoreMR calculates the priority score for a merge request.
// Higher scores mean higher priority (process first).
//
// The scoring formula:
//
// score = BaseScore
// + ConvoyAgeWeight * hoursOld(convoy) // Prevent convoy starvation
// + PriorityWeight * (4 - priority) // P0=+400, P4=+0
// - min(RetryPenalty * retryCount, MaxRetryPenalty) // Prevent thrashing
// + MRAgeWeight * hoursOld(MR) // FIFO tiebreaker
func ScoreMR(input ScoreInput, config ScoreConfig) float64 {
now := input.Now
if now.IsZero() {
now = time.Now()
}
score := config.BaseScore
// Convoy age factor: prevent starvation of old convoys
if input.ConvoyCreatedAt != nil {
convoyAge := now.Sub(*input.ConvoyCreatedAt)
convoyHours := convoyAge.Hours()
if convoyHours > 0 {
score += config.ConvoyAgeWeight * convoyHours
}
}
// Priority factor: P0 (0) gets +400, P4 (4) gets +0
priorityBonus := 4 - input.Priority
if priorityBonus < 0 {
priorityBonus = 0 // Clamp for invalid priorities > 4
}
if priorityBonus > 4 {
priorityBonus = 4 // Clamp for invalid priorities < 0
}
score += config.PriorityWeight * float64(priorityBonus)
// Retry penalty: prevent thrashing on repeatedly failing MRs
retryPenalty := config.RetryPenalty * float64(input.RetryCount)
if retryPenalty > config.MaxRetryPenalty {
retryPenalty = config.MaxRetryPenalty
}
score -= retryPenalty
// MR age factor: FIFO ordering as tiebreaker
mrAge := now.Sub(input.MRCreatedAt)
mrHours := mrAge.Hours()
if mrHours > 0 {
score += config.MRAgeWeight * mrHours
}
return score
}
// ScoreMRWithDefaults is a convenience wrapper using default config.
func ScoreMRWithDefaults(input ScoreInput) float64 {
return ScoreMR(input, DefaultScoreConfig())
}
// Score calculates the priority score for this MR using default config.
// Higher scores mean higher priority (process first).
func (mr *MRInfo) Score() float64 {
return mr.ScoreAt(time.Now())
}
// ScoreAt calculates the priority score at a specific time (for deterministic testing).
func (mr *MRInfo) ScoreAt(now time.Time) float64 {
input := ScoreInput{
Priority: mr.Priority,
MRCreatedAt: mr.CreatedAt,
ConvoyCreatedAt: mr.ConvoyCreatedAt,
RetryCount: mr.RetryCount,
Now: now,
}
return ScoreMRWithDefaults(input)
}