feat: implement priming subsystem improvements
Phase 1 of dynamic priming subsystem: 1. PRIME.md provisioning for all workers (hq-5z76w, hq-ukjrr Part A) - Added ProvisionPrimeMD to beads package with Gas Town context template - Provision at rig level in AddRig() so all workers inherit it - Added fallback provisioning in crew and polecat managers - Created PRIME.md for existing rigs 2. Post-handoff detection to prevent handoff loop bug (hq-ukjrr Part B) - Added FileHandoffMarker constant (.runtime/handoff_to_successor) - gt handoff writes marker before respawn - gt prime detects marker and outputs "HANDOFF COMPLETE" warning - Marker cleared after detection to prevent duplicate warnings 3. Priming health checks for gt doctor (hq-5scnt) - New priming_check.go validates priming subsystem configuration - Checks: SessionStart hook, gt prime command, PRIME.md presence - Warns if CLAUDE.md is too large (should be bootstrap pointer) - Fixable: provisions missing PRIME.md files This ensures crew workers get Gas Town context (GUPP, hooks, propulsion) even if the gt prime hook fails, via bd prime fallback. Co-Authored-By: Claude Opus 4.5 <noreply@anthropic.com>
This commit is contained in:
@@ -281,6 +281,14 @@ func (m *Manager) AddWithOptions(name string, opts AddOptions) (*Polecat, error)
|
||||
fmt.Printf("Warning: could not set up shared beads: %v\n", err)
|
||||
}
|
||||
|
||||
// Provision PRIME.md with Gas Town context for this worker.
|
||||
// This is the fallback if SessionStart hook fails - ensures polecats
|
||||
// always have GUPP and essential Gas Town context.
|
||||
if err := beads.ProvisionPrimeMDForWorktree(clonePath); err != nil {
|
||||
// Non-fatal - polecat can still work via hook, warn but don't fail
|
||||
fmt.Printf("Warning: could not provision PRIME.md: %v\n", err)
|
||||
}
|
||||
|
||||
// Copy overlay files from .runtime/overlay/ to polecat root.
|
||||
// This allows services to have .env and other config files at their root.
|
||||
if err := rig.CopyOverlay(m.rig.Path, clonePath); err != nil {
|
||||
@@ -572,8 +580,9 @@ func (m *Manager) RepairWorktreeWithOptions(name string, force bool, opts AddOpt
|
||||
}, nil
|
||||
}
|
||||
|
||||
// ReconcilePool syncs pool state with existing polecat directories.
|
||||
// This should be called to recover from crashes or stale state.
|
||||
// ReconcilePool derives pool InUse state from existing polecat directories.
|
||||
// This implements ZFC: InUse is discovered from filesystem, not tracked separately.
|
||||
// Called before each allocation to ensure InUse reflects reality.
|
||||
func (m *Manager) ReconcilePool() {
|
||||
polecats, err := m.List()
|
||||
if err != nil {
|
||||
@@ -586,7 +595,7 @@ func (m *Manager) ReconcilePool() {
|
||||
}
|
||||
|
||||
m.namePool.Reconcile(names)
|
||||
_ = m.namePool.Save() // non-fatal: state file update
|
||||
// Note: No Save() needed - InUse is transient state, only OverflowNext is persisted
|
||||
}
|
||||
|
||||
// PoolStatus returns information about the name pool.
|
||||
|
||||
Reference in New Issue
Block a user