refactor: Split large cmd/bd files to meet 800-line limit (bd-xtf5)
Split 6 files exceeding 800 lines by extracting cohesive function groups: - show.go (1592→578): extracted show_thread.go, close.go, edit.go, update.go - doctor.go (1295→690): extracted doctor_fix.go, doctor_health.go, doctor_pollution.go - sync.go (1201→749): extracted sync_git.go - compact.go (1199→775): extracted compact_tombstone.go, compact_rpc.go - linear.go (1190→641): extracted linear_sync.go, linear_conflict.go - main.go (1148→800): extracted main_help.go, main_errors.go, main_daemon.go All files now under 800-line acceptance criteria. 🤖 Generated with [Claude Code](https://claude.com/claude-code) Co-Authored-By: Claude Opus 4.5 <noreply@anthropic.com>
This commit is contained in:
@@ -2,17 +2,14 @@ package main
|
||||
|
||||
import (
|
||||
"context"
|
||||
"encoding/json"
|
||||
"fmt"
|
||||
"io"
|
||||
"os"
|
||||
"path/filepath"
|
||||
"time"
|
||||
|
||||
"github.com/spf13/cobra"
|
||||
"github.com/steveyegge/beads/internal/compact"
|
||||
"github.com/steveyegge/beads/internal/storage/sqlite"
|
||||
"github.com/steveyegge/beads/internal/types"
|
||||
)
|
||||
|
||||
var (
|
||||
@@ -512,182 +509,6 @@ func runCompactStats(ctx context.Context, store *sqlite.SQLiteStorage) {
|
||||
}
|
||||
}
|
||||
|
||||
func progressBar(current, total int) string {
|
||||
const width = 40
|
||||
if total == 0 {
|
||||
return "[" + string(make([]byte, width)) + "]"
|
||||
}
|
||||
filled := (current * width) / total
|
||||
bar := ""
|
||||
for i := 0; i < width; i++ {
|
||||
if i < filled {
|
||||
bar += "█"
|
||||
} else {
|
||||
bar += " "
|
||||
}
|
||||
}
|
||||
return "[" + bar + "]"
|
||||
}
|
||||
|
||||
//nolint:unparam // ctx may be used in future for cancellation
|
||||
func runCompactRPC(_ context.Context) {
|
||||
if compactID != "" && compactAll {
|
||||
fmt.Fprintf(os.Stderr, "Error: cannot use --id and --all together\n")
|
||||
os.Exit(1)
|
||||
}
|
||||
|
||||
if compactForce && compactID == "" {
|
||||
fmt.Fprintf(os.Stderr, "Error: --force requires --id\n")
|
||||
os.Exit(1)
|
||||
}
|
||||
|
||||
if compactID == "" && !compactAll && !compactDryRun {
|
||||
fmt.Fprintf(os.Stderr, "Error: must specify --all, --id, or --dry-run\n")
|
||||
os.Exit(1)
|
||||
}
|
||||
|
||||
apiKey := os.Getenv("ANTHROPIC_API_KEY")
|
||||
if apiKey == "" && !compactDryRun {
|
||||
fmt.Fprintf(os.Stderr, "Error: ANTHROPIC_API_KEY environment variable not set\n")
|
||||
os.Exit(1)
|
||||
}
|
||||
|
||||
args := map[string]interface{}{
|
||||
"tier": compactTier,
|
||||
"dry_run": compactDryRun,
|
||||
"force": compactForce,
|
||||
"all": compactAll,
|
||||
"api_key": apiKey,
|
||||
"workers": compactWorkers,
|
||||
"batch_size": compactBatch,
|
||||
}
|
||||
if compactID != "" {
|
||||
args["issue_id"] = compactID
|
||||
}
|
||||
|
||||
resp, err := daemonClient.Execute("compact", args)
|
||||
if err != nil {
|
||||
fmt.Fprintf(os.Stderr, "Error: %v\n", err)
|
||||
os.Exit(1)
|
||||
}
|
||||
|
||||
if !resp.Success {
|
||||
fmt.Fprintf(os.Stderr, "Error: %s\n", resp.Error)
|
||||
os.Exit(1)
|
||||
}
|
||||
|
||||
if jsonOutput {
|
||||
fmt.Println(string(resp.Data))
|
||||
return
|
||||
}
|
||||
|
||||
var result struct {
|
||||
Success bool `json:"success"`
|
||||
IssueID string `json:"issue_id,omitempty"`
|
||||
OriginalSize int `json:"original_size,omitempty"`
|
||||
CompactedSize int `json:"compacted_size,omitempty"`
|
||||
Reduction string `json:"reduction,omitempty"`
|
||||
Duration string `json:"duration,omitempty"`
|
||||
DryRun bool `json:"dry_run,omitempty"`
|
||||
Results []struct {
|
||||
IssueID string `json:"issue_id"`
|
||||
Success bool `json:"success"`
|
||||
Error string `json:"error,omitempty"`
|
||||
OriginalSize int `json:"original_size,omitempty"`
|
||||
CompactedSize int `json:"compacted_size,omitempty"`
|
||||
Reduction string `json:"reduction,omitempty"`
|
||||
} `json:"results,omitempty"`
|
||||
}
|
||||
|
||||
if err := json.Unmarshal(resp.Data, &result); err != nil {
|
||||
fmt.Fprintf(os.Stderr, "Error parsing response: %v\n", err)
|
||||
os.Exit(1)
|
||||
}
|
||||
|
||||
if compactID != "" {
|
||||
if result.DryRun {
|
||||
fmt.Printf("DRY RUN - Tier %d compaction\n\n", compactTier)
|
||||
fmt.Printf("Issue: %s\n", compactID)
|
||||
fmt.Printf("Original size: %d bytes\n", result.OriginalSize)
|
||||
fmt.Printf("Estimated reduction: %s\n", result.Reduction)
|
||||
} else {
|
||||
fmt.Printf("Successfully compacted %s\n", result.IssueID)
|
||||
fmt.Printf("Original size: %d bytes\n", result.OriginalSize)
|
||||
fmt.Printf("Compacted size: %d bytes\n", result.CompactedSize)
|
||||
fmt.Printf("Reduction: %s\n", result.Reduction)
|
||||
fmt.Printf("Duration: %s\n", result.Duration)
|
||||
}
|
||||
} else if compactAll {
|
||||
if result.DryRun {
|
||||
fmt.Printf("DRY RUN - Found %d candidates for Tier %d compaction\n", len(result.Results), compactTier)
|
||||
} else {
|
||||
successCount := 0
|
||||
for _, r := range result.Results {
|
||||
if r.Success {
|
||||
successCount++
|
||||
}
|
||||
}
|
||||
fmt.Printf("Compacted %d/%d issues in %s\n", successCount, len(result.Results), result.Duration)
|
||||
for _, r := range result.Results {
|
||||
if r.Success {
|
||||
fmt.Printf(" ✓ %s: %d → %d bytes (%s)\n", r.IssueID, r.OriginalSize, r.CompactedSize, r.Reduction)
|
||||
} else {
|
||||
fmt.Printf(" ✗ %s: %s\n", r.IssueID, r.Error)
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func runCompactStatsRPC() {
|
||||
args := map[string]interface{}{
|
||||
"tier": compactTier,
|
||||
}
|
||||
|
||||
resp, err := daemonClient.Execute("compact_stats", args)
|
||||
if err != nil {
|
||||
fmt.Fprintf(os.Stderr, "Error: %v\n", err)
|
||||
os.Exit(1)
|
||||
}
|
||||
|
||||
if !resp.Success {
|
||||
fmt.Fprintf(os.Stderr, "Error: %s\n", resp.Error)
|
||||
os.Exit(1)
|
||||
}
|
||||
|
||||
if jsonOutput {
|
||||
fmt.Println(string(resp.Data))
|
||||
return
|
||||
}
|
||||
|
||||
var result struct {
|
||||
Success bool `json:"success"`
|
||||
Stats struct {
|
||||
Tier1Candidates int `json:"tier1_candidates"`
|
||||
Tier2Candidates int `json:"tier2_candidates"`
|
||||
TotalClosed int `json:"total_closed"`
|
||||
Tier1MinAge string `json:"tier1_min_age"`
|
||||
Tier2MinAge string `json:"tier2_min_age"`
|
||||
EstimatedSavings string `json:"estimated_savings,omitempty"`
|
||||
} `json:"stats"`
|
||||
}
|
||||
|
||||
if err := json.Unmarshal(resp.Data, &result); err != nil {
|
||||
fmt.Fprintf(os.Stderr, "Error parsing response: %v\n", err)
|
||||
os.Exit(1)
|
||||
}
|
||||
|
||||
fmt.Printf("\nCompaction Statistics\n")
|
||||
fmt.Printf("=====================\n\n")
|
||||
fmt.Printf("Total closed issues: %d\n\n", result.Stats.TotalClosed)
|
||||
fmt.Printf("Tier 1 (30+ days closed, not compacted):\n")
|
||||
fmt.Printf(" Candidates: %d\n", result.Stats.Tier1Candidates)
|
||||
fmt.Printf(" Min age: %s\n\n", result.Stats.Tier1MinAge)
|
||||
fmt.Printf("Tier 2 (90+ days closed, Tier 1 compacted):\n")
|
||||
fmt.Printf(" Candidates: %d\n", result.Stats.Tier2Candidates)
|
||||
fmt.Printf(" Min age: %s\n", result.Stats.Tier2MinAge)
|
||||
}
|
||||
|
||||
func runCompactAnalyze(ctx context.Context, store *sqlite.SQLiteStorage) {
|
||||
type Candidate struct {
|
||||
ID string `json:"id"`
|
||||
@@ -929,251 +750,6 @@ func runCompactApply(ctx context.Context, store *sqlite.SQLiteStorage) {
|
||||
markDirtyAndScheduleFlush()
|
||||
}
|
||||
|
||||
// TombstonePruneResult contains the results of tombstone pruning
|
||||
type TombstonePruneResult struct {
|
||||
PrunedCount int
|
||||
PrunedIDs []string
|
||||
TTLDays int
|
||||
}
|
||||
|
||||
// pruneExpiredTombstones reads issues.jsonl, removes expired tombstones,
|
||||
// and writes back the pruned file. Returns the prune result.
|
||||
// If customTTL is > 0, it overrides the default TTL (bypasses MinTombstoneTTL safety).
|
||||
// If customTTL is 0, uses DefaultTombstoneTTL.
|
||||
func pruneExpiredTombstones(customTTL time.Duration) (*TombstonePruneResult, error) {
|
||||
beadsDir := filepath.Dir(dbPath)
|
||||
issuesPath := filepath.Join(beadsDir, "issues.jsonl")
|
||||
|
||||
// Check if issues.jsonl exists
|
||||
if _, err := os.Stat(issuesPath); os.IsNotExist(err) {
|
||||
return &TombstonePruneResult{}, nil
|
||||
}
|
||||
|
||||
// Read all issues
|
||||
// nolint:gosec // G304: issuesPath is controlled from beadsDir
|
||||
file, err := os.Open(issuesPath)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("failed to open issues.jsonl: %w", err)
|
||||
}
|
||||
|
||||
var allIssues []*types.Issue
|
||||
decoder := json.NewDecoder(file)
|
||||
for {
|
||||
var issue types.Issue
|
||||
if err := decoder.Decode(&issue); err != nil {
|
||||
if err.Error() == "EOF" {
|
||||
break
|
||||
}
|
||||
// Skip corrupt lines
|
||||
continue
|
||||
}
|
||||
allIssues = append(allIssues, &issue)
|
||||
}
|
||||
if err := file.Close(); err != nil {
|
||||
return nil, fmt.Errorf("failed to close issues file: %w", err)
|
||||
}
|
||||
|
||||
// Determine TTL - customTTL > 0 overrides default (for --hard mode)
|
||||
ttl := types.DefaultTombstoneTTL
|
||||
if customTTL > 0 {
|
||||
ttl = customTTL
|
||||
}
|
||||
ttlDays := int(ttl.Hours() / 24)
|
||||
|
||||
// Filter out expired tombstones
|
||||
var kept []*types.Issue
|
||||
var prunedIDs []string
|
||||
for _, issue := range allIssues {
|
||||
if issue.IsExpired(ttl) {
|
||||
prunedIDs = append(prunedIDs, issue.ID)
|
||||
} else {
|
||||
kept = append(kept, issue)
|
||||
}
|
||||
}
|
||||
|
||||
if len(prunedIDs) == 0 {
|
||||
return &TombstonePruneResult{TTLDays: ttlDays}, nil
|
||||
}
|
||||
|
||||
// Write back the pruned file atomically
|
||||
dir := filepath.Dir(issuesPath)
|
||||
base := filepath.Base(issuesPath)
|
||||
tempFile, err := os.CreateTemp(dir, base+".prune.*")
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("failed to create temp file: %w", err)
|
||||
}
|
||||
tempPath := tempFile.Name()
|
||||
|
||||
encoder := json.NewEncoder(tempFile)
|
||||
for _, issue := range kept {
|
||||
if err := encoder.Encode(issue); err != nil {
|
||||
_ = tempFile.Close()
|
||||
_ = os.Remove(tempPath)
|
||||
return nil, fmt.Errorf("failed to write issue %s: %w", issue.ID, err)
|
||||
}
|
||||
}
|
||||
|
||||
if err := tempFile.Close(); err != nil {
|
||||
_ = os.Remove(tempPath)
|
||||
return nil, fmt.Errorf("failed to close temp file: %w", err)
|
||||
}
|
||||
|
||||
// Atomically replace
|
||||
if err := os.Rename(tempPath, issuesPath); err != nil {
|
||||
_ = os.Remove(tempPath)
|
||||
return nil, fmt.Errorf("failed to replace issues.jsonl: %w", err)
|
||||
}
|
||||
|
||||
return &TombstonePruneResult{
|
||||
PrunedCount: len(prunedIDs),
|
||||
PrunedIDs: prunedIDs,
|
||||
TTLDays: ttlDays,
|
||||
}, nil
|
||||
}
|
||||
|
||||
// previewPruneTombstones checks what tombstones would be pruned without modifying files.
|
||||
// Used for dry-run mode in cleanup command.
|
||||
// If customTTL is > 0, it overrides the default TTL (bypasses MinTombstoneTTL safety).
|
||||
// If customTTL is 0, uses DefaultTombstoneTTL.
|
||||
func previewPruneTombstones(customTTL time.Duration) (*TombstonePruneResult, error) {
|
||||
beadsDir := filepath.Dir(dbPath)
|
||||
issuesPath := filepath.Join(beadsDir, "issues.jsonl")
|
||||
|
||||
// Check if issues.jsonl exists
|
||||
if _, err := os.Stat(issuesPath); os.IsNotExist(err) {
|
||||
return &TombstonePruneResult{}, nil
|
||||
}
|
||||
|
||||
// Read all issues
|
||||
// nolint:gosec // G304: issuesPath is controlled from beadsDir
|
||||
file, err := os.Open(issuesPath)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("failed to open issues.jsonl: %w", err)
|
||||
}
|
||||
defer file.Close()
|
||||
|
||||
var allIssues []*types.Issue
|
||||
decoder := json.NewDecoder(file)
|
||||
for {
|
||||
var issue types.Issue
|
||||
if err := decoder.Decode(&issue); err != nil {
|
||||
if err.Error() == "EOF" {
|
||||
break
|
||||
}
|
||||
// Skip corrupt lines
|
||||
continue
|
||||
}
|
||||
allIssues = append(allIssues, &issue)
|
||||
}
|
||||
|
||||
// Determine TTL - customTTL > 0 overrides default (for --hard mode)
|
||||
ttl := types.DefaultTombstoneTTL
|
||||
if customTTL > 0 {
|
||||
ttl = customTTL
|
||||
}
|
||||
ttlDays := int(ttl.Hours() / 24)
|
||||
|
||||
// Count expired tombstones
|
||||
var prunedIDs []string
|
||||
for _, issue := range allIssues {
|
||||
if issue.IsExpired(ttl) {
|
||||
prunedIDs = append(prunedIDs, issue.ID)
|
||||
}
|
||||
}
|
||||
|
||||
return &TombstonePruneResult{
|
||||
PrunedCount: len(prunedIDs),
|
||||
PrunedIDs: prunedIDs,
|
||||
TTLDays: ttlDays,
|
||||
}, nil
|
||||
}
|
||||
|
||||
// runCompactPrune handles the --prune mode for standalone tombstone pruning.
|
||||
// This mode only prunes expired tombstones from issues.jsonl without doing
|
||||
// any semantic compaction. It's useful for reducing sync overhead.
|
||||
func runCompactPrune() {
|
||||
start := time.Now()
|
||||
|
||||
// Calculate TTL from --older-than flag (0 means use default 30 days)
|
||||
var customTTL time.Duration
|
||||
if compactOlderThan > 0 {
|
||||
customTTL = time.Duration(compactOlderThan) * 24 * time.Hour
|
||||
}
|
||||
|
||||
if compactDryRun {
|
||||
// Preview mode - show what would be pruned
|
||||
result, err := previewPruneTombstones(customTTL)
|
||||
if err != nil {
|
||||
fmt.Fprintf(os.Stderr, "Error: failed to preview tombstones: %v\n", err)
|
||||
os.Exit(1)
|
||||
}
|
||||
|
||||
if jsonOutput {
|
||||
output := map[string]interface{}{
|
||||
"dry_run": true,
|
||||
"prune_count": result.PrunedCount,
|
||||
"ttl_days": result.TTLDays,
|
||||
"tombstone_ids": result.PrunedIDs,
|
||||
}
|
||||
outputJSON(output)
|
||||
return
|
||||
}
|
||||
|
||||
fmt.Printf("DRY RUN - Tombstone Pruning\n\n")
|
||||
fmt.Printf("TTL: %d days\n", result.TTLDays)
|
||||
fmt.Printf("Tombstones that would be pruned: %d\n", result.PrunedCount)
|
||||
if len(result.PrunedIDs) > 0 && len(result.PrunedIDs) <= 20 {
|
||||
fmt.Println("\nTombstone IDs:")
|
||||
for _, id := range result.PrunedIDs {
|
||||
fmt.Printf(" - %s\n", id)
|
||||
}
|
||||
} else if len(result.PrunedIDs) > 20 {
|
||||
fmt.Printf("\nFirst 20 tombstone IDs:\n")
|
||||
for _, id := range result.PrunedIDs[:20] {
|
||||
fmt.Printf(" - %s\n", id)
|
||||
}
|
||||
fmt.Printf(" ... and %d more\n", len(result.PrunedIDs)-20)
|
||||
}
|
||||
return
|
||||
}
|
||||
|
||||
// Actually prune tombstones
|
||||
result, err := pruneExpiredTombstones(customTTL)
|
||||
if err != nil {
|
||||
fmt.Fprintf(os.Stderr, "Error: failed to prune tombstones: %v\n", err)
|
||||
os.Exit(1)
|
||||
}
|
||||
|
||||
elapsed := time.Since(start)
|
||||
|
||||
if jsonOutput {
|
||||
output := map[string]interface{}{
|
||||
"success": true,
|
||||
"pruned_count": result.PrunedCount,
|
||||
"ttl_days": result.TTLDays,
|
||||
"tombstone_ids": result.PrunedIDs,
|
||||
"elapsed_ms": elapsed.Milliseconds(),
|
||||
}
|
||||
outputJSON(output)
|
||||
return
|
||||
}
|
||||
|
||||
if result.PrunedCount == 0 {
|
||||
fmt.Printf("No expired tombstones to prune (TTL: %d days)\n", result.TTLDays)
|
||||
return
|
||||
}
|
||||
|
||||
fmt.Printf("✓ Pruned %d expired tombstone(s)\n", result.PrunedCount)
|
||||
fmt.Printf(" TTL: %d days\n", result.TTLDays)
|
||||
fmt.Printf(" Time: %v\n", elapsed)
|
||||
if len(result.PrunedIDs) <= 10 {
|
||||
fmt.Println("\nPruned IDs:")
|
||||
for _, id := range result.PrunedIDs {
|
||||
fmt.Printf(" - %s\n", id)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func init() {
|
||||
compactCmd.Flags().BoolVar(&compactDryRun, "dry-run", false, "Preview without compacting")
|
||||
compactCmd.Flags().IntVar(&compactTier, "tier", 1, "Compaction tier (1 or 2)")
|
||||
|
||||
Reference in New Issue
Block a user