feat: implement agent-driven compaction (bd-buol)

- Add --analyze mode to export candidates without API key
- Add --apply mode to accept agent-provided summaries
- Move --auto mode for legacy AI-powered compaction
- Update documentation for new workflow
- Addresses GH #243 complaint about API key requirement
This commit is contained in:
Steve Yegge
2025-11-07 23:11:01 -08:00
parent 07bd7066c1
commit 6355ee1448
3 changed files with 375 additions and 58 deletions

View File

@@ -261,6 +261,17 @@ bd show <id> [<id>...] --json
bd rename-prefix kw- --dry-run # Preview changes
bd rename-prefix kw- --json # Apply rename
# Agent-driven compaction (memory decay)
bd compact --analyze --json # Get candidates for review
bd compact --analyze --tier 1 --limit 10 --json # Limited batch
bd compact --apply --id bd-42 --summary summary.txt # Apply compaction
bd compact --apply --id bd-42 --summary - < summary.txt # From stdin
bd compact --stats --json # Show statistics
# Legacy AI-powered compaction (requires ANTHROPIC_API_KEY)
bd compact --auto --dry-run --all # Preview
bd compact --auto --all --tier 1 # Auto-compact tier 1
# Restore compacted issue from git history
bd restore <id> # View full history at time of compaction

View File

@@ -611,14 +611,24 @@ bd config unset jira.url
### Compaction (Memory Decay)
Beads uses AI to compress old closed issues, keeping databases lightweight as they age:
Beads provides **agent-driven compaction** - your AI agent decides what to compress, no API keys required:
```bash
bd compact --dry-run --all # Preview candidates
bd compact --days 90 # Compact closed issues older than 90 days
# Agent-driven workflow (recommended)
bd compact --analyze --json # Get candidates with full content
bd compact --apply --id bd-42 --summary summary.txt
# Legacy AI-powered workflow (requires ANTHROPIC_API_KEY)
bd compact --auto --dry-run --all # Preview candidates
bd compact --auto --all # Auto-compact all eligible issues
```
This is agentic memory decay - your database naturally forgets fine-grained details while preserving essential context.
**How it works:**
1. Use `--analyze` to export candidates (closed 30+ days) with full content
2. Summarize the content using any LLM (Claude, GPT, local model, etc.)
3. Use `--apply` to persist the summary and mark as compacted
This is agentic memory decay - your database naturally forgets fine-grained details while preserving essential context. The agent has full control over compression quality.
### Export/Import

View File

@@ -4,6 +4,7 @@ import (
"context"
"encoding/json"
"fmt"
"io"
"os"
"time"
@@ -21,6 +22,12 @@ var (
compactBatch int
compactWorkers int
compactStats bool
compactAnalyze bool
compactApply bool
compactAuto bool
compactSummary string
compactActor string
compactLimit int
)
var compactCmd = &cobra.Command{
@@ -31,15 +38,27 @@ var compactCmd = &cobra.Command{
Compaction reduces database size by summarizing closed issues that are no longer
actively referenced. This is permanent graceful decay - original content is discarded.
Modes:
- Analyze: Export candidates for agent review (no API key needed)
- Apply: Accept agent-provided summary (no API key needed)
- Auto: AI-powered compaction (requires ANTHROPIC_API_KEY, legacy)
Tiers:
- Tier 1: Semantic compression (30 days closed, 70% reduction)
- Tier 2: Ultra compression (90 days closed, 95% reduction)
Examples:
bd compact --dry-run # Preview candidates
bd compact --all # Compact all eligible issues
bd compact --id bd-42 # Compact specific issue
bd compact --id bd-42 --force # Force compact (bypass checks)
# Agent-driven workflow (recommended)
bd compact --analyze --json # Get candidates with full content
bd compact --apply --id bd-42 --summary summary.txt
bd compact --apply --id bd-42 --summary - < summary.txt
# Legacy AI-powered workflow
bd compact --auto --dry-run # Preview candidates
bd compact --auto --all # Compact all eligible issues
bd compact --auto --id bd-42 # Compact specific issue
# Statistics
bd compact --stats # Show statistics
`,
Run: func(_ *cobra.Command, _ []string) {
@@ -60,19 +79,72 @@ Examples:
return
}
// If using daemon, delegate to RPC
if daemonClient != nil {
runCompactRPC(ctx)
return
// Count active modes
activeModes := 0
if compactAnalyze {
activeModes++
}
if compactApply {
activeModes++
}
if compactAuto {
activeModes++
}
// Direct mode - original logic
// Check for exactly one mode
if activeModes == 0 {
fmt.Fprintf(os.Stderr, "Error: must specify one mode: --analyze, --apply, or --auto\n")
os.Exit(1)
}
if activeModes > 1 {
fmt.Fprintf(os.Stderr, "Error: cannot use multiple modes together (--analyze, --apply, --auto are mutually exclusive)\n")
os.Exit(1)
}
// Get SQLite store
sqliteStore, ok := store.(*sqlite.SQLiteStorage)
if !ok {
fmt.Fprintf(os.Stderr, "Error: compact requires SQLite storage\n")
os.Exit(1)
}
// Handle analyze mode
if compactAnalyze {
if daemonClient != nil {
fmt.Fprintf(os.Stderr, "Error: --analyze not yet supported in daemon mode, use --no-daemon\n")
os.Exit(1)
}
runCompactAnalyze(ctx, sqliteStore)
return
}
// Handle apply mode
if compactApply {
if daemonClient != nil {
fmt.Fprintf(os.Stderr, "Error: --apply not yet supported in daemon mode, use --no-daemon\n")
os.Exit(1)
}
if compactID == "" {
fmt.Fprintf(os.Stderr, "Error: --apply requires --id\n")
os.Exit(1)
}
if compactSummary == "" {
fmt.Fprintf(os.Stderr, "Error: --apply requires --summary\n")
os.Exit(1)
}
runCompactApply(ctx, sqliteStore)
return
}
// Handle auto mode (legacy)
if compactAuto {
// API key check only for auto mode
apiKey := os.Getenv("ANTHROPIC_API_KEY")
if apiKey == "" && !compactDryRun {
fmt.Fprintf(os.Stderr, "Error: --auto mode requires ANTHROPIC_API_KEY environment variable\n")
os.Exit(1)
}
if compactID != "" && compactAll {
fmt.Fprintf(os.Stderr, "Error: cannot use --id and --all together\n")
os.Exit(1)
@@ -88,12 +160,6 @@ Examples:
os.Exit(1)
}
apiKey := os.Getenv("ANTHROPIC_API_KEY")
if apiKey == "" && !compactDryRun {
fmt.Fprintf(os.Stderr, "Error: ANTHROPIC_API_KEY environment variable not set\n")
os.Exit(1)
}
config := &compact.Config{
APIKey: apiKey,
Concurrency: compactWorkers,
@@ -112,6 +178,7 @@ Examples:
}
runCompactAll(ctx, compactor, sqliteStore)
}
},
}
@@ -562,6 +629,227 @@ func runCompactStatsRPC() {
fmt.Printf(" Min age: %s\n", result.Stats.Tier2MinAge)
}
func runCompactAnalyze(ctx context.Context, store *sqlite.SQLiteStorage) {
type Candidate struct {
ID string `json:"id"`
Title string `json:"title"`
Description string `json:"description"`
Design string `json:"design"`
Notes string `json:"notes"`
AcceptanceCriteria string `json:"acceptance_criteria"`
SizeBytes int `json:"size_bytes"`
AgeDays int `json:"age_days"`
Tier int `json:"tier"`
Compacted bool `json:"compacted"`
}
var candidates []Candidate
// Single issue mode
if compactID != "" {
issue, err := store.GetIssue(ctx, compactID)
if err != nil {
fmt.Fprintf(os.Stderr, "Error: failed to get issue: %v\n", err)
os.Exit(1)
}
sizeBytes := len(issue.Description) + len(issue.Design) + len(issue.Notes) + len(issue.AcceptanceCriteria)
ageDays := 0
if issue.ClosedAt != nil {
ageDays = int(time.Since(*issue.ClosedAt).Hours() / 24)
}
candidates = append(candidates, Candidate{
ID: issue.ID,
Title: issue.Title,
Description: issue.Description,
Design: issue.Design,
Notes: issue.Notes,
AcceptanceCriteria: issue.AcceptanceCriteria,
SizeBytes: sizeBytes,
AgeDays: ageDays,
Tier: compactTier,
Compacted: issue.CompactionLevel > 0,
})
} else {
// Get tier candidates
var tierCandidates []*sqlite.CompactionCandidate
var err error
if compactTier == 1 {
tierCandidates, err = store.GetTier1Candidates(ctx)
} else {
tierCandidates, err = store.GetTier2Candidates(ctx)
}
if err != nil {
fmt.Fprintf(os.Stderr, "Error: failed to get candidates: %v\n", err)
os.Exit(1)
}
// Apply limit if specified
if compactLimit > 0 && len(tierCandidates) > compactLimit {
tierCandidates = tierCandidates[:compactLimit]
}
// Fetch full details for each candidate
for _, c := range tierCandidates {
issue, err := store.GetIssue(ctx, c.IssueID)
if err != nil {
continue // Skip issues we can't fetch
}
ageDays := int(time.Since(c.ClosedAt).Hours() / 24)
candidates = append(candidates, Candidate{
ID: issue.ID,
Title: issue.Title,
Description: issue.Description,
Design: issue.Design,
Notes: issue.Notes,
AcceptanceCriteria: issue.AcceptanceCriteria,
SizeBytes: c.OriginalSize,
AgeDays: ageDays,
Tier: compactTier,
Compacted: issue.CompactionLevel > 0,
})
}
}
if jsonOutput {
outputJSON(candidates)
return
}
// Human-readable output
fmt.Printf("Compaction Candidates (Tier %d)\n\n", compactTier)
for _, c := range candidates {
compactStatus := ""
if c.Compacted {
compactStatus = " (already compacted)"
}
fmt.Printf("ID: %s%s\n", c.ID, compactStatus)
fmt.Printf(" Title: %s\n", c.Title)
fmt.Printf(" Size: %d bytes\n", c.SizeBytes)
fmt.Printf(" Age: %d days\n\n", c.AgeDays)
}
fmt.Printf("Total: %d candidates\n", len(candidates))
}
func runCompactApply(ctx context.Context, store *sqlite.SQLiteStorage) {
start := time.Now()
// Read summary
var summaryBytes []byte
var err error
if compactSummary == "-" {
// Read from stdin
summaryBytes, err = io.ReadAll(os.Stdin)
if err != nil {
fmt.Fprintf(os.Stderr, "Error: failed to read summary from stdin: %v\n", err)
os.Exit(1)
}
} else {
summaryBytes, err = os.ReadFile(compactSummary)
if err != nil {
fmt.Fprintf(os.Stderr, "Error: failed to read summary file: %v\n", err)
os.Exit(1)
}
}
summary := string(summaryBytes)
// Get issue
issue, err := store.GetIssue(ctx, compactID)
if err != nil {
fmt.Fprintf(os.Stderr, "Error: failed to get issue: %v\n", err)
os.Exit(1)
}
// Calculate sizes
originalSize := len(issue.Description) + len(issue.Design) + len(issue.Notes) + len(issue.AcceptanceCriteria)
compactedSize := len(summary)
// Check eligibility unless --force
if !compactForce {
eligible, reason, err := store.CheckEligibility(ctx, compactID, compactTier)
if err != nil {
fmt.Fprintf(os.Stderr, "Error: failed to check eligibility: %v\n", err)
os.Exit(1)
}
if !eligible {
fmt.Fprintf(os.Stderr, "Error: %s is not eligible for Tier %d compaction: %s\n", compactID, compactTier, reason)
fmt.Fprintf(os.Stderr, "Hint: use --force to bypass eligibility checks\n")
os.Exit(1)
}
// Enforce size reduction unless --force
if compactedSize >= originalSize {
fmt.Fprintf(os.Stderr, "Error: summary (%d bytes) is not shorter than original (%d bytes)\n", compactedSize, originalSize)
fmt.Fprintf(os.Stderr, "Hint: use --force to bypass size validation\n")
os.Exit(1)
}
}
// Apply compaction
actor := compactActor
if actor == "" {
actor = "agent"
}
updates := map[string]interface{}{
"description": summary,
"design": "",
"notes": "",
"acceptance_criteria": "",
}
if err := store.UpdateIssue(ctx, compactID, updates, actor); err != nil {
fmt.Fprintf(os.Stderr, "Error: failed to update issue: %v\n", err)
os.Exit(1)
}
commitHash := compact.GetCurrentCommitHash()
if err := store.ApplyCompaction(ctx, compactID, compactTier, originalSize, compactedSize, commitHash); err != nil {
fmt.Fprintf(os.Stderr, "Error: failed to apply compaction: %v\n", err)
os.Exit(1)
}
savingBytes := originalSize - compactedSize
reductionPct := float64(savingBytes) / float64(originalSize) * 100
eventData := fmt.Sprintf("Tier %d compaction: %d → %d bytes (saved %d, %.1f%%)", compactTier, originalSize, compactedSize, savingBytes, reductionPct)
if err := store.AddComment(ctx, compactID, actor, eventData); err != nil {
fmt.Fprintf(os.Stderr, "Error: failed to record event: %v\n", err)
os.Exit(1)
}
if err := store.MarkIssueDirty(ctx, compactID); err != nil {
fmt.Fprintf(os.Stderr, "Error: failed to mark dirty: %v\n", err)
os.Exit(1)
}
elapsed := time.Since(start)
if jsonOutput {
output := map[string]interface{}{
"success": true,
"issue_id": compactID,
"tier": compactTier,
"original_size": originalSize,
"compacted_size": compactedSize,
"saved_bytes": savingBytes,
"reduction_pct": reductionPct,
"elapsed_ms": elapsed.Milliseconds(),
}
outputJSON(output)
return
}
fmt.Printf("✓ Compacted %s (Tier %d)\n", compactID, compactTier)
fmt.Printf(" %d → %d bytes (saved %d, %.1f%%)\n", originalSize, compactedSize, savingBytes, reductionPct)
fmt.Printf(" Time: %v\n", elapsed)
// Schedule auto-flush to export changes
markDirtyAndScheduleFlush()
}
func init() {
compactCmd.Flags().BoolVar(&compactDryRun, "dry-run", false, "Preview without compacting")
compactCmd.Flags().IntVar(&compactTier, "tier", 1, "Compaction tier (1 or 2)")
@@ -573,5 +861,13 @@ func init() {
compactCmd.Flags().BoolVar(&compactStats, "stats", false, "Show compaction statistics")
compactCmd.Flags().BoolVar(&jsonOutput, "json", false, "Output JSON format")
// New mode flags
compactCmd.Flags().BoolVar(&compactAnalyze, "analyze", false, "Analyze mode: export candidates for agent review")
compactCmd.Flags().BoolVar(&compactApply, "apply", false, "Apply mode: accept agent-provided summary")
compactCmd.Flags().BoolVar(&compactAuto, "auto", false, "Auto mode: AI-powered compaction (legacy)")
compactCmd.Flags().StringVar(&compactSummary, "summary", "", "Path to summary file (use '-' for stdin)")
compactCmd.Flags().StringVar(&compactActor, "actor", "agent", "Actor name for audit trail")
compactCmd.Flags().IntVar(&compactLimit, "limit", 0, "Limit number of candidates (0 = no limit)")
rootCmd.AddCommand(compactCmd)
}