feat: implement priming subsystem improvements

Phase 1 of dynamic priming subsystem:

1. PRIME.md provisioning for all workers (hq-5z76w, hq-ukjrr Part A)
   - Added ProvisionPrimeMD to beads package with Gas Town context template
   - Provision at rig level in AddRig() so all workers inherit it
   - Added fallback provisioning in crew and polecat managers
   - Created PRIME.md for existing rigs

2. Post-handoff detection to prevent handoff loop bug (hq-ukjrr Part B)
   - Added FileHandoffMarker constant (.runtime/handoff_to_successor)
   - gt handoff writes marker before respawn
   - gt prime detects marker and outputs "HANDOFF COMPLETE" warning
   - Marker cleared after detection to prevent duplicate warnings

3. Priming health checks for gt doctor (hq-5scnt)
   - New priming_check.go validates priming subsystem configuration
   - Checks: SessionStart hook, gt prime command, PRIME.md presence
   - Warns if CLAUDE.md is too large (should be bootstrap pointer)
   - Fixable: provisions missing PRIME.md files

This ensures crew workers get Gas Town context (GUPP, hooks, propulsion)
even if the gt prime hook fails, via bd prime fallback.

Co-Authored-By: Claude Opus 4.5 <noreply@anthropic.com>
This commit is contained in:
mayor
2026-01-09 23:56:38 -08:00
committed by Steve Yegge
parent 7533fed55e
commit db353c247b
10 changed files with 567 additions and 7 deletions

View File

@@ -599,3 +599,87 @@ func (b *Beads) IsBeadsRepo() bool {
info, err := os.Stat(beadsDir)
return err == nil && info.IsDir()
}
// primeContent is the Gas Town PRIME.md content that provides essential context
// for crew workers. This is the fallback if the SessionStart hook fails.
const primeContent = `# Gas Town Worker Context
> **Context Recovery**: Run ` + "`gt prime`" + ` for full context after compaction or new session.
## The Propulsion Principle (GUPP)
**If you find work on your hook, YOU RUN IT.**
No confirmation. No waiting. No announcements. The hook having work IS the assignment.
This is physics, not politeness. Gas Town is a steam engine - you are a piston.
**Failure mode we're preventing:**
- Agent starts with work on hook
- Agent announces itself and waits for human to say "ok go"
- Human is AFK / trusting the engine to run
- Work sits idle. The whole system stalls.
## Startup Protocol
1. Check your hook: ` + "`gt mol status`" + `
2. If work is hooked → EXECUTE (no announcement, no waiting)
3. If hook empty → Check mail: ` + "`gt mail inbox`" + `
4. Still nothing? Wait for user instructions
## Key Commands
- ` + "`gt prime`" + ` - Get full role context (run after compaction)
- ` + "`gt mol status`" + ` - Check your hooked work
- ` + "`gt mail inbox`" + ` - Check for messages
- ` + "`bd ready`" + ` - Find available work (no blockers)
- ` + "`bd sync`" + ` - Sync beads changes
## Session Close Protocol
Before saying "done":
1. git status (check what changed)
2. git add <files> (stage code changes)
3. bd sync (commit beads changes)
4. git commit -m "..." (commit code)
5. bd sync (commit any new beads changes)
6. git push (push to remote)
**Work is not done until pushed.**
`
// ProvisionPrimeMD writes the Gas Town PRIME.md file to the specified beads directory.
// This provides essential Gas Town context (GUPP, startup protocol) as a fallback
// if the SessionStart hook fails. The PRIME.md is read by bd prime.
//
// The beadsDir should be the actual beads directory (after following any redirect).
// Returns nil if PRIME.md already exists (idempotent).
func ProvisionPrimeMD(beadsDir string) error {
primePath := filepath.Join(beadsDir, "PRIME.md")
// Check if already exists - don't overwrite customizations
if _, err := os.Stat(primePath); err == nil {
return nil // Already exists, don't overwrite
}
// Create .beads directory if it doesn't exist
if err := os.MkdirAll(beadsDir, 0755); err != nil {
return fmt.Errorf("creating beads dir: %w", err)
}
// Write PRIME.md
if err := os.WriteFile(primePath, []byte(primeContent), 0644); err != nil {
return fmt.Errorf("writing PRIME.md: %w", err)
}
return nil
}
// ProvisionPrimeMDForWorktree provisions PRIME.md for a worktree by following its redirect.
// This is the main entry point for crew/polecat provisioning.
func ProvisionPrimeMDForWorktree(worktreePath string) error {
// Resolve the beads directory (follows redirect chain)
beadsDir := ResolveBeadsDir(worktreePath)
// Provision PRIME.md in the target directory
return ProvisionPrimeMD(beadsDir)
}

View File

@@ -161,6 +161,9 @@ func runDoctor(cmd *cobra.Command, args []string) error {
d.Register(doctor.NewLegacyGastownCheck())
d.Register(doctor.NewClaudeSettingsCheck())
// Priming subsystem check
d.Register(doctor.NewPrimingCheck())
// Crew workspace checks
d.Register(doctor.NewCrewStateCheck())
d.Register(doctor.NewCrewWorktreeCheck())

View File

@@ -9,6 +9,7 @@ import (
"github.com/spf13/cobra"
"github.com/steveyegge/gastown/internal/config"
"github.com/steveyegge/gastown/internal/constants"
"github.com/steveyegge/gastown/internal/events"
"github.com/steveyegge/gastown/internal/session"
"github.com/steveyegge/gastown/internal/style"
@@ -192,6 +193,16 @@ func runHandoff(cmd *cobra.Command, args []string) error {
style.PrintWarning("could not clear history: %v", err)
}
// Write handoff marker for successor detection (prevents handoff loop bug).
// The marker is cleared by gt prime after it outputs the warning.
// This tells the new session "you're post-handoff, don't re-run /handoff"
if cwd, err := os.Getwd(); err == nil {
runtimeDir := filepath.Join(cwd, constants.DirRuntime)
_ = os.MkdirAll(runtimeDir, 0755)
markerPath := filepath.Join(runtimeDir, constants.FileHandoffMarker)
_ = os.WriteFile(markerPath, []byte(currentSession), 0644)
}
// Use exec to respawn the pane - this kills us and restarts
return t.RespawnPane(pane, restartCmd)
}

View File

@@ -83,10 +83,6 @@ func init() {
type RoleContext = RoleInfo
func runPrime(cmd *cobra.Command, args []string) error {
if !state.IsEnabled() {
return nil
}
cwd, err := os.Getwd()
if err != nil {
return fmt.Errorf("getting current directory: %w", err)
@@ -96,7 +92,17 @@ func runPrime(cmd *cobra.Command, args []string) error {
if err != nil {
return fmt.Errorf("finding workspace: %w", err)
}
// "Discover, Don't Track" principle:
// - If we're in a workspace, proceed - the workspace's existence IS the enable signal
// - If we're NOT in a workspace, check the global enabled state
// This ensures a missing/stale state file doesn't break workspace users
if townRoot == "" {
// Not in a workspace - check global enabled state
// (This matters for hooks that might run from random directories)
if !state.IsEnabled() {
return nil // Silent exit - not in workspace and not enabled
}
return fmt.Errorf("not in a Gas Town workspace")
}
@@ -117,6 +123,9 @@ func runPrime(cmd *cobra.Command, args []string) error {
}
}
// Check for handoff marker (prevents handoff loop bug)
checkHandoffMarker(cwd)
// Get role using env-aware detection
roleInfo, err := GetRoleWithContext(cwd, townRoot)
if err != nil {
@@ -1632,3 +1641,38 @@ func readSessionFile(dir string) string {
}
return ""
}
// checkHandoffMarker checks for a handoff marker file and outputs a warning if found.
// This prevents the "handoff loop" bug where a new session sees /handoff in context
// and incorrectly runs it again. The marker tells the new session: "handoff is DONE,
// the /handoff you see in context was from YOUR PREDECESSOR, not a request for you."
func checkHandoffMarker(workDir string) {
markerPath := filepath.Join(workDir, constants.DirRuntime, constants.FileHandoffMarker)
data, err := os.ReadFile(markerPath)
if err != nil {
// No marker = not post-handoff, normal startup
return
}
// Marker found - this is a post-handoff session
prevSession := strings.TrimSpace(string(data))
// Remove the marker FIRST so we don't warn twice
_ = os.Remove(markerPath)
// Output prominent warning
fmt.Println()
fmt.Println(style.Bold.Render("╔══════════════════════════════════════════════════════════════════╗"))
fmt.Println(style.Bold.Render("║ ✅ HANDOFF COMPLETE - You are the NEW session ║"))
fmt.Println(style.Bold.Render("╚══════════════════════════════════════════════════════════════════╝"))
fmt.Println()
if prevSession != "" {
fmt.Printf("Your predecessor (%s) handed off to you.\n", prevSession)
}
fmt.Println()
fmt.Println(style.Bold.Render("⚠️ DO NOT run /handoff - that was your predecessor's action."))
fmt.Println(" The /handoff you see in context is NOT a request for you.")
fmt.Println()
fmt.Println("Instead: Check your hook (`gt mol status`) and mail (`gt mail inbox`).")
fmt.Println()
}

View File

@@ -71,6 +71,11 @@ const (
// FileAccountsJSON is the accounts configuration file in mayor/.
FileAccountsJSON = "accounts.json"
// FileHandoffMarker is the marker file indicating a handoff just occurred.
// Written by gt handoff before respawn, cleared by gt prime after detection.
// This prevents the handoff loop bug where agents re-run /handoff from context.
FileHandoffMarker = "handoff_to_successor"
)
// Beads configuration constants.

View File

@@ -173,6 +173,14 @@ func (m *Manager) Add(name string, createBranch bool) (*CrewWorker, error) {
fmt.Printf("Warning: could not set up shared beads: %v\n", err)
}
// Provision PRIME.md with Gas Town context for this worker.
// This is the fallback if SessionStart hook fails - ensures crew workers
// always have GUPP and essential Gas Town context.
if err := beads.ProvisionPrimeMDForWorktree(crewPath); err != nil {
// Non-fatal - crew can still work via hook, warn but don't fail
fmt.Printf("Warning: could not provision PRIME.md: %v\n", err)
}
// Copy overlay files from .runtime/overlay/ to crew root.
// This allows services to have .env and other config files at their root.
if err := rig.CopyOverlay(m.rig.Path, crewPath); err != nil {

View File

@@ -0,0 +1,344 @@
package doctor
import (
"bufio"
"encoding/json"
"fmt"
"os"
"os/exec"
"path/filepath"
"strings"
"github.com/steveyegge/gastown/internal/beads"
"github.com/steveyegge/gastown/internal/constants"
)
// PrimingCheck verifies the priming subsystem is correctly configured.
// This ensures agents receive proper context on startup via the gt prime chain.
type PrimingCheck struct {
FixableCheck
issues []primingIssue
}
type primingIssue struct {
location string // e.g., "mayor", "gastown/crew/max", "gastown/witness"
issueType string // e.g., "no_hook", "no_prime", "large_claude_md", "missing_prime_md"
description string
fixable bool
}
// NewPrimingCheck creates a new priming subsystem check.
func NewPrimingCheck() *PrimingCheck {
return &PrimingCheck{
FixableCheck: FixableCheck{
BaseCheck: BaseCheck{
CheckName: "priming",
CheckDescription: "Verify priming subsystem is correctly configured",
},
},
}
}
// Run checks the priming configuration across all agent locations.
func (c *PrimingCheck) Run(ctx *CheckContext) *CheckResult {
c.issues = nil
var details []string
// Check 1: gt binary in PATH
if err := exec.Command("which", "gt").Run(); err != nil {
c.issues = append(c.issues, primingIssue{
location: "system",
issueType: "gt_not_in_path",
description: "gt binary not found in PATH",
fixable: false,
})
details = append(details, "gt binary not found in PATH")
}
// Check 2: Mayor priming (town-level)
mayorIssues := c.checkAgentPriming(ctx.TownRoot, "mayor", "mayor")
for _, issue := range mayorIssues {
details = append(details, fmt.Sprintf("%s: %s", issue.location, issue.description))
}
c.issues = append(c.issues, mayorIssues...)
// Check 3: Deacon priming
deaconPath := filepath.Join(ctx.TownRoot, "deacon")
if dirExists(deaconPath) {
deaconIssues := c.checkAgentPriming(ctx.TownRoot, "deacon", "deacon")
for _, issue := range deaconIssues {
details = append(details, fmt.Sprintf("%s: %s", issue.location, issue.description))
}
c.issues = append(c.issues, deaconIssues...)
}
// Check 4: Rig-level agents (witness, refinery, crew, polecats)
rigIssues := c.checkRigPriming(ctx.TownRoot)
for _, issue := range rigIssues {
details = append(details, fmt.Sprintf("%s: %s", issue.location, issue.description))
}
c.issues = append(c.issues, rigIssues...)
if len(c.issues) == 0 {
return &CheckResult{
Name: c.Name(),
Status: StatusOK,
Message: "Priming subsystem is correctly configured",
}
}
// Count fixable issues
fixableCount := 0
for _, issue := range c.issues {
if issue.fixable {
fixableCount++
}
}
fixHint := ""
if fixableCount > 0 {
fixHint = fmt.Sprintf("Run 'gt doctor --fix' to fix %d issue(s)", fixableCount)
}
return &CheckResult{
Name: c.Name(),
Status: StatusError,
Message: fmt.Sprintf("Found %d priming issue(s)", len(c.issues)),
Details: details,
FixHint: fixHint,
}
}
// checkAgentPriming checks priming configuration for a specific agent.
func (c *PrimingCheck) checkAgentPriming(townRoot, agentDir, agentType string) []primingIssue {
var issues []primingIssue
agentPath := filepath.Join(townRoot, agentDir)
settingsPath := filepath.Join(agentPath, ".claude", "settings.json")
// Check for SessionStart hook with gt prime
if fileExists(settingsPath) {
data, err := os.ReadFile(settingsPath)
if err == nil {
var settings map[string]any
if err := json.Unmarshal(data, &settings); err == nil {
if !c.hasGtPrimeHook(settings) {
issues = append(issues, primingIssue{
location: agentDir,
issueType: "no_prime_hook",
description: "SessionStart hook missing 'gt prime'",
fixable: false, // Requires template regeneration
})
}
}
}
}
// Check CLAUDE.md is minimal (bootstrap pointer, not full context)
claudeMdPath := filepath.Join(agentPath, "CLAUDE.md")
if fileExists(claudeMdPath) {
lines := c.countLines(claudeMdPath)
if lines > 30 {
issues = append(issues, primingIssue{
location: agentDir,
issueType: "large_claude_md",
description: fmt.Sprintf("CLAUDE.md has %d lines (should be <30 for bootstrap pointer)", lines),
fixable: false, // Requires manual review
})
}
}
return issues
}
// checkRigPriming checks priming for all rigs.
func (c *PrimingCheck) checkRigPriming(townRoot string) []primingIssue {
var issues []primingIssue
entries, err := os.ReadDir(townRoot)
if err != nil {
return issues
}
for _, entry := range entries {
if !entry.IsDir() {
continue
}
rigName := entry.Name()
rigPath := filepath.Join(townRoot, rigName)
// Skip non-rig directories
if rigName == "mayor" || rigName == "deacon" || rigName == "daemon" ||
rigName == "docs" || rigName[0] == '.' {
continue
}
// Check if this is actually a rig (has .beads directory)
if !dirExists(filepath.Join(rigPath, ".beads")) {
continue
}
// Check PRIME.md exists at rig level
primeMdPath := filepath.Join(rigPath, ".beads", "PRIME.md")
if !fileExists(primeMdPath) {
issues = append(issues, primingIssue{
location: rigName,
issueType: "missing_prime_md",
description: "Missing .beads/PRIME.md (Gas Town context fallback)",
fixable: true,
})
}
// Check witness priming
witnessPath := filepath.Join(rigPath, "witness")
if dirExists(witnessPath) {
witnessIssues := c.checkAgentPriming(townRoot, filepath.Join(rigName, "witness"), "witness")
issues = append(issues, witnessIssues...)
}
// Check refinery priming
refineryPath := filepath.Join(rigPath, "refinery")
if dirExists(refineryPath) {
refineryIssues := c.checkAgentPriming(townRoot, filepath.Join(rigName, "refinery"), "refinery")
issues = append(issues, refineryIssues...)
}
// Check crew PRIME.md (shared settings, individual worktrees)
crewDir := filepath.Join(rigPath, "crew")
if dirExists(crewDir) {
crewEntries, _ := os.ReadDir(crewDir)
for _, crewEntry := range crewEntries {
if !crewEntry.IsDir() || crewEntry.Name() == ".claude" {
continue
}
crewPath := filepath.Join(crewDir, crewEntry.Name())
// Check if beads redirect is set up (crew should redirect to rig)
beadsDir := beads.ResolveBeadsDir(crewPath)
primeMdPath := filepath.Join(beadsDir, "PRIME.md")
if !fileExists(primeMdPath) {
issues = append(issues, primingIssue{
location: fmt.Sprintf("%s/crew/%s", rigName, crewEntry.Name()),
issueType: "missing_prime_md",
description: "Missing PRIME.md (Gas Town context fallback)",
fixable: true,
})
}
}
}
// Check polecat PRIME.md
polecatsDir := filepath.Join(rigPath, "polecats")
if dirExists(polecatsDir) {
pcEntries, _ := os.ReadDir(polecatsDir)
for _, pcEntry := range pcEntries {
if !pcEntry.IsDir() || pcEntry.Name() == ".claude" {
continue
}
polecatPath := filepath.Join(polecatsDir, pcEntry.Name())
// Check if beads redirect is set up
beadsDir := beads.ResolveBeadsDir(polecatPath)
primeMdPath := filepath.Join(beadsDir, "PRIME.md")
if !fileExists(primeMdPath) {
issues = append(issues, primingIssue{
location: fmt.Sprintf("%s/polecats/%s", rigName, pcEntry.Name()),
issueType: "missing_prime_md",
description: "Missing PRIME.md (Gas Town context fallback)",
fixable: true,
})
}
}
}
}
return issues
}
// hasGtPrimeHook checks if settings have a SessionStart hook that calls gt prime.
func (c *PrimingCheck) hasGtPrimeHook(settings map[string]any) bool {
hooks, ok := settings["hooks"].(map[string]any)
if !ok {
return false
}
hookList, ok := hooks["SessionStart"].([]any)
if !ok {
return false
}
for _, hook := range hookList {
hookMap, ok := hook.(map[string]any)
if !ok {
continue
}
innerHooks, ok := hookMap["hooks"].([]any)
if !ok {
continue
}
for _, inner := range innerHooks {
innerMap, ok := inner.(map[string]any)
if !ok {
continue
}
cmd, ok := innerMap["command"].(string)
if ok && strings.Contains(cmd, "gt prime") {
return true
}
}
}
return false
}
// countLines counts the number of lines in a file.
func (c *PrimingCheck) countLines(path string) int {
file, err := os.Open(path)
if err != nil {
return 0
}
defer file.Close()
scanner := bufio.NewScanner(file)
count := 0
for scanner.Scan() {
count++
}
return count
}
// Fix attempts to fix priming issues.
func (c *PrimingCheck) Fix(ctx *CheckContext) error {
var errors []string
for _, issue := range c.issues {
if !issue.fixable {
continue
}
switch issue.issueType {
case "missing_prime_md":
// Provision PRIME.md at the appropriate location
var targetPath string
// Parse the location to determine where to provision
if strings.Contains(issue.location, "/crew/") || strings.Contains(issue.location, "/polecats/") {
// Worker location - use beads.ProvisionPrimeMDForWorktree
worktreePath := filepath.Join(ctx.TownRoot, issue.location)
if err := beads.ProvisionPrimeMDForWorktree(worktreePath); err != nil {
errors = append(errors, fmt.Sprintf("%s: %v", issue.location, err))
}
} else {
// Rig location - provision directly
targetPath = filepath.Join(ctx.TownRoot, issue.location, constants.DirBeads)
if err := beads.ProvisionPrimeMD(targetPath); err != nil {
errors = append(errors, fmt.Sprintf("%s: %v", issue.location, err))
}
}
}
}
if len(errors) > 0 {
return fmt.Errorf("%s", strings.Join(errors, "; "))
}
return nil
}

View File

@@ -281,6 +281,14 @@ func (m *Manager) AddWithOptions(name string, opts AddOptions) (*Polecat, error)
fmt.Printf("Warning: could not set up shared beads: %v\n", err)
}
// Provision PRIME.md with Gas Town context for this worker.
// This is the fallback if SessionStart hook fails - ensures polecats
// always have GUPP and essential Gas Town context.
if err := beads.ProvisionPrimeMDForWorktree(clonePath); err != nil {
// Non-fatal - polecat can still work via hook, warn but don't fail
fmt.Printf("Warning: could not provision PRIME.md: %v\n", err)
}
// Copy overlay files from .runtime/overlay/ to polecat root.
// This allows services to have .env and other config files at their root.
if err := rig.CopyOverlay(m.rig.Path, clonePath); err != nil {
@@ -572,8 +580,9 @@ func (m *Manager) RepairWorktreeWithOptions(name string, force bool, opts AddOpt
}, nil
}
// ReconcilePool syncs pool state with existing polecat directories.
// This should be called to recover from crashes or stale state.
// ReconcilePool derives pool InUse state from existing polecat directories.
// This implements ZFC: InUse is discovered from filesystem, not tracked separately.
// Called before each allocation to ensure InUse reflects reality.
func (m *Manager) ReconcilePool() {
polecats, err := m.List()
if err != nil {
@@ -586,7 +595,7 @@ func (m *Manager) ReconcilePool() {
}
m.namePool.Reconcile(names)
_ = m.namePool.Save() // non-fatal: state file update
// Note: No Save() needed - InUse is transient state, only OverflowNext is persisted
}
// PoolStatus returns information about the name pool.

View File

@@ -386,6 +386,15 @@ func (m *Manager) AddRig(opts AddRigOptions) (*Rig, error) {
}
fmt.Printf(" ✓ Initialized beads (prefix: %s)\n", opts.BeadsPrefix)
// Provision PRIME.md with Gas Town context for all workers in this rig.
// This is the fallback if SessionStart hook fails - ensures ALL workers
// (crew, polecats, refinery, witness) have GUPP and essential Gas Town context.
// PRIME.md is read by bd prime and output to the agent.
rigBeadsPath := filepath.Join(rigPath, ".beads")
if err := beads.ProvisionPrimeMD(rigBeadsPath); err != nil {
fmt.Printf(" Warning: Could not provision PRIME.md: %v\n", err)
}
// Create refinery as worktree from bare repo on default branch.
// Refinery needs to see polecat branches (shared .repo.git) and merges them.
// Being on the default branch allows direct merge workflow.