Standardize error handling: use FatalError in compact.go, sync.go, migrate.go

Replace direct fmt.Fprintf(os.Stderr, "Error:...") + os.Exit(1) patterns with
FatalError() and FatalErrorWithHint() helpers for consistent error handling.

Files updated:
- compact.go: All 48 os.Exit(1) calls converted
- sync.go: All error patterns converted (kept 1 valid summary exit)
- migrate.go: Partial conversion (4 patterns converted)

This is incremental progress on bd-qioh. Remaining work: ~326 error patterns
across other cmd/bd files.

🤖 Generated with [Claude Code](https://claude.com/claude-code)

Co-Authored-By: Claude Opus 4.5 <noreply@anthropic.com>
This commit is contained in:
Steve Yegge
2025-12-23 13:45:14 -08:00
parent 05f37d2408
commit 03b12e4b4b
3 changed files with 94 additions and 195 deletions
+48 -102
View File
@@ -166,8 +166,7 @@ Examples:
} else { } else {
sqliteStore, ok := store.(*sqlite.SQLiteStorage) sqliteStore, ok := store.(*sqlite.SQLiteStorage)
if !ok { if !ok {
fmt.Fprintf(os.Stderr, "Error: compact requires SQLite storage\n") FatalError("compact requires SQLite storage")
os.Exit(1)
} }
runCompactStats(ctx, sqliteStore) runCompactStats(ctx, sqliteStore)
} }
@@ -188,26 +187,20 @@ Examples:
// Check for exactly one mode // Check for exactly one mode
if activeModes == 0 { if activeModes == 0 {
fmt.Fprintf(os.Stderr, "Error: must specify one mode: --analyze, --apply, or --auto\n") FatalError("must specify one mode: --analyze, --apply, or --auto")
os.Exit(1)
} }
if activeModes > 1 { if activeModes > 1 {
fmt.Fprintf(os.Stderr, "Error: cannot use multiple modes together (--analyze, --apply, --auto are mutually exclusive)\n") FatalError("cannot use multiple modes together (--analyze, --apply, --auto are mutually exclusive)")
os.Exit(1)
} }
// Handle analyze mode (requires direct database access) // Handle analyze mode (requires direct database access)
if compactAnalyze { if compactAnalyze {
if err := ensureDirectMode("compact --analyze requires direct database access"); err != nil { if err := ensureDirectMode("compact --analyze requires direct database access"); err != nil {
fmt.Fprintf(os.Stderr, "Error: %v\n", err) FatalErrorWithHint(fmt.Sprintf("%v", err), "Use --no-daemon flag to bypass daemon and access database directly")
fmt.Fprintf(os.Stderr, "Hint: Use --no-daemon flag to bypass daemon and access database directly\n")
os.Exit(1)
} }
sqliteStore, ok := store.(*sqlite.SQLiteStorage) sqliteStore, ok := store.(*sqlite.SQLiteStorage)
if !ok { if !ok {
fmt.Fprintf(os.Stderr, "Error: failed to open database in direct mode\n") FatalErrorWithHint("failed to open database in direct mode", "Ensure .beads/beads.db exists and is readable")
fmt.Fprintf(os.Stderr, "Hint: Ensure .beads/beads.db exists and is readable\n")
os.Exit(1)
} }
runCompactAnalyze(ctx, sqliteStore) runCompactAnalyze(ctx, sqliteStore)
return return
@@ -216,23 +209,17 @@ Examples:
// Handle apply mode (requires direct database access) // Handle apply mode (requires direct database access)
if compactApply { if compactApply {
if err := ensureDirectMode("compact --apply requires direct database access"); err != nil { if err := ensureDirectMode("compact --apply requires direct database access"); err != nil {
fmt.Fprintf(os.Stderr, "Error: %v\n", err) FatalErrorWithHint(fmt.Sprintf("%v", err), "Use --no-daemon flag to bypass daemon and access database directly")
fmt.Fprintf(os.Stderr, "Hint: Use --no-daemon flag to bypass daemon and access database directly\n")
os.Exit(1)
} }
if compactID == "" { if compactID == "" {
fmt.Fprintf(os.Stderr, "Error: --apply requires --id\n") FatalError("--apply requires --id")
os.Exit(1)
} }
if compactSummary == "" { if compactSummary == "" {
fmt.Fprintf(os.Stderr, "Error: --apply requires --summary\n") FatalError("--apply requires --summary")
os.Exit(1)
} }
sqliteStore, ok := store.(*sqlite.SQLiteStorage) sqliteStore, ok := store.(*sqlite.SQLiteStorage)
if !ok { if !ok {
fmt.Fprintf(os.Stderr, "Error: failed to open database in direct mode\n") FatalErrorWithHint("failed to open database in direct mode", "Ensure .beads/beads.db exists and is readable")
fmt.Fprintf(os.Stderr, "Hint: Ensure .beads/beads.db exists and is readable\n")
os.Exit(1)
} }
runCompactApply(ctx, sqliteStore) runCompactApply(ctx, sqliteStore)
return return
@@ -248,16 +235,13 @@ Examples:
// Validation checks // Validation checks
if compactID != "" && compactAll { if compactID != "" && compactAll {
fmt.Fprintf(os.Stderr, "Error: cannot use --id and --all together\n") FatalError("cannot use --id and --all together")
os.Exit(1)
} }
if compactForce && compactID == "" { if compactForce && compactID == "" {
fmt.Fprintf(os.Stderr, "Error: --force requires --id\n") FatalError("--force requires --id")
os.Exit(1)
} }
if compactID == "" && !compactAll && !compactDryRun { if compactID == "" && !compactAll && !compactDryRun {
fmt.Fprintf(os.Stderr, "Error: must specify --all, --id, or --dry-run\n") FatalError("must specify --all, --id, or --dry-run")
os.Exit(1)
} }
// Use RPC if daemon available, otherwise direct mode // Use RPC if daemon available, otherwise direct mode
@@ -269,14 +253,12 @@ Examples:
// Fallback to direct mode // Fallback to direct mode
apiKey := os.Getenv("ANTHROPIC_API_KEY") apiKey := os.Getenv("ANTHROPIC_API_KEY")
if apiKey == "" && !compactDryRun { if apiKey == "" && !compactDryRun {
fmt.Fprintf(os.Stderr, "Error: --auto mode requires ANTHROPIC_API_KEY environment variable\n") FatalError("--auto mode requires ANTHROPIC_API_KEY environment variable")
os.Exit(1)
} }
sqliteStore, ok := store.(*sqlite.SQLiteStorage) sqliteStore, ok := store.(*sqlite.SQLiteStorage)
if !ok { if !ok {
fmt.Fprintf(os.Stderr, "Error: compact requires SQLite storage\n") FatalError("compact requires SQLite storage")
os.Exit(1)
} }
config := &compact.Config{ config := &compact.Config{
@@ -289,8 +271,7 @@ Examples:
compactor, err := compact.New(sqliteStore, apiKey, config) compactor, err := compact.New(sqliteStore, apiKey, config)
if err != nil { if err != nil {
fmt.Fprintf(os.Stderr, "Error: failed to create compactor: %v\n", err) FatalError("failed to create compactor: %v", err)
os.Exit(1)
} }
if compactID != "" { if compactID != "" {
@@ -309,19 +290,16 @@ func runCompactSingle(ctx context.Context, compactor *compact.Compactor, store *
if !compactForce { if !compactForce {
eligible, reason, err := store.CheckEligibility(ctx, issueID, compactTier) eligible, reason, err := store.CheckEligibility(ctx, issueID, compactTier)
if err != nil { if err != nil {
fmt.Fprintf(os.Stderr, "Error: failed to check eligibility: %v\n", err) FatalError("failed to check eligibility: %v", err)
os.Exit(1)
} }
if !eligible { if !eligible {
fmt.Fprintf(os.Stderr, "Error: %s is not eligible for Tier %d compaction: %s\n", issueID, compactTier, reason) FatalError("%s is not eligible for Tier %d compaction: %s", issueID, compactTier, reason)
os.Exit(1)
} }
} }
issue, err := store.GetIssue(ctx, issueID) issue, err := store.GetIssue(ctx, issueID)
if err != nil { if err != nil {
fmt.Fprintf(os.Stderr, "Error: failed to get issue: %v\n", err) FatalError("failed to get issue: %v", err)
os.Exit(1)
} }
originalSize := len(issue.Description) + len(issue.Design) + len(issue.Notes) + len(issue.AcceptanceCriteria) originalSize := len(issue.Description) + len(issue.Design) + len(issue.Notes) + len(issue.AcceptanceCriteria)
@@ -349,19 +327,16 @@ func runCompactSingle(ctx context.Context, compactor *compact.Compactor, store *
if compactTier == 1 { if compactTier == 1 {
compactErr = compactor.CompactTier1(ctx, issueID) compactErr = compactor.CompactTier1(ctx, issueID)
} else { } else {
fmt.Fprintf(os.Stderr, "Error: Tier 2 compaction not yet implemented\n") FatalError("Tier 2 compaction not yet implemented")
os.Exit(1)
} }
if compactErr != nil { if compactErr != nil {
fmt.Fprintf(os.Stderr, "Error: %v\n", compactErr) FatalError("%v", compactErr)
os.Exit(1)
} }
issue, err = store.GetIssue(ctx, issueID) issue, err = store.GetIssue(ctx, issueID)
if err != nil { if err != nil {
fmt.Fprintf(os.Stderr, "Error: failed to get updated issue: %v\n", err) FatalError("failed to get updated issue: %v", err)
os.Exit(1)
} }
compactedSize := len(issue.Description) compactedSize := len(issue.Description)
@@ -407,8 +382,7 @@ func runCompactAll(ctx context.Context, compactor *compact.Compactor, store *sql
if compactTier == 1 { if compactTier == 1 {
tier1, err := store.GetTier1Candidates(ctx) tier1, err := store.GetTier1Candidates(ctx)
if err != nil { if err != nil {
fmt.Fprintf(os.Stderr, "Error: failed to get candidates: %v\n", err) FatalError("failed to get candidates: %v", err)
os.Exit(1)
} }
for _, c := range tier1 { for _, c := range tier1 {
candidates = append(candidates, c.IssueID) candidates = append(candidates, c.IssueID)
@@ -416,8 +390,7 @@ func runCompactAll(ctx context.Context, compactor *compact.Compactor, store *sql
} else { } else {
tier2, err := store.GetTier2Candidates(ctx) tier2, err := store.GetTier2Candidates(ctx)
if err != nil { if err != nil {
fmt.Fprintf(os.Stderr, "Error: failed to get candidates: %v\n", err) FatalError("failed to get candidates: %v", err)
os.Exit(1)
} }
for _, c := range tier2 { for _, c := range tier2 {
candidates = append(candidates, c.IssueID) candidates = append(candidates, c.IssueID)
@@ -471,8 +444,7 @@ func runCompactAll(ctx context.Context, compactor *compact.Compactor, store *sql
results, err := compactor.CompactTier1Batch(ctx, candidates) results, err := compactor.CompactTier1Batch(ctx, candidates)
if err != nil { if err != nil {
fmt.Fprintf(os.Stderr, "Error: batch compaction failed: %v\n", err) FatalError("batch compaction failed: %v", err)
os.Exit(1)
} }
successCount := 0 successCount := 0
@@ -535,14 +507,12 @@ func runCompactAll(ctx context.Context, compactor *compact.Compactor, store *sql
func runCompactStats(ctx context.Context, store *sqlite.SQLiteStorage) { func runCompactStats(ctx context.Context, store *sqlite.SQLiteStorage) {
tier1, err := store.GetTier1Candidates(ctx) tier1, err := store.GetTier1Candidates(ctx)
if err != nil { if err != nil {
fmt.Fprintf(os.Stderr, "Error: failed to get Tier 1 candidates: %v\n", err) FatalError("failed to get Tier 1 candidates: %v", err)
os.Exit(1)
} }
tier2, err := store.GetTier2Candidates(ctx) tier2, err := store.GetTier2Candidates(ctx)
if err != nil { if err != nil {
fmt.Fprintf(os.Stderr, "Error: failed to get Tier 2 candidates: %v\n", err) FatalError("failed to get Tier 2 candidates: %v", err)
os.Exit(1)
} }
tier1Size := 0 tier1Size := 0
@@ -608,24 +578,20 @@ func progressBar(current, total int) string {
//nolint:unparam // ctx may be used in future for cancellation //nolint:unparam // ctx may be used in future for cancellation
func runCompactRPC(_ context.Context) { func runCompactRPC(_ context.Context) {
if compactID != "" && compactAll { if compactID != "" && compactAll {
fmt.Fprintf(os.Stderr, "Error: cannot use --id and --all together\n") FatalError("cannot use --id and --all together")
os.Exit(1)
} }
if compactForce && compactID == "" { if compactForce && compactID == "" {
fmt.Fprintf(os.Stderr, "Error: --force requires --id\n") FatalError("--force requires --id")
os.Exit(1)
} }
if compactID == "" && !compactAll && !compactDryRun { if compactID == "" && !compactAll && !compactDryRun {
fmt.Fprintf(os.Stderr, "Error: must specify --all, --id, or --dry-run\n") FatalError("must specify --all, --id, or --dry-run")
os.Exit(1)
} }
apiKey := os.Getenv("ANTHROPIC_API_KEY") apiKey := os.Getenv("ANTHROPIC_API_KEY")
if apiKey == "" && !compactDryRun { if apiKey == "" && !compactDryRun {
fmt.Fprintf(os.Stderr, "Error: ANTHROPIC_API_KEY environment variable not set\n") FatalError("ANTHROPIC_API_KEY environment variable not set")
os.Exit(1)
} }
args := map[string]interface{}{ args := map[string]interface{}{
@@ -643,13 +609,11 @@ func runCompactRPC(_ context.Context) {
resp, err := daemonClient.Execute("compact", args) resp, err := daemonClient.Execute("compact", args)
if err != nil { if err != nil {
fmt.Fprintf(os.Stderr, "Error: %v\n", err) FatalError("%v", err)
os.Exit(1)
} }
if !resp.Success { if !resp.Success {
fmt.Fprintf(os.Stderr, "Error: %s\n", resp.Error) FatalError("%s", resp.Error)
os.Exit(1)
} }
if jsonOutput { if jsonOutput {
@@ -676,8 +640,7 @@ func runCompactRPC(_ context.Context) {
} }
if err := json.Unmarshal(resp.Data, &result); err != nil { if err := json.Unmarshal(resp.Data, &result); err != nil {
fmt.Fprintf(os.Stderr, "Error parsing response: %v\n", err) FatalError("parsing response: %v", err)
os.Exit(1)
} }
if compactID != "" { if compactID != "" {
@@ -722,13 +685,11 @@ func runCompactStatsRPC() {
resp, err := daemonClient.Execute("compact_stats", args) resp, err := daemonClient.Execute("compact_stats", args)
if err != nil { if err != nil {
fmt.Fprintf(os.Stderr, "Error: %v\n", err) FatalError("%v", err)
os.Exit(1)
} }
if !resp.Success { if !resp.Success {
fmt.Fprintf(os.Stderr, "Error: %s\n", resp.Error) FatalError("%s", resp.Error)
os.Exit(1)
} }
if jsonOutput { if jsonOutput {
@@ -749,8 +710,7 @@ func runCompactStatsRPC() {
} }
if err := json.Unmarshal(resp.Data, &result); err != nil { if err := json.Unmarshal(resp.Data, &result); err != nil {
fmt.Fprintf(os.Stderr, "Error parsing response: %v\n", err) FatalError("parsing response: %v", err)
os.Exit(1)
} }
fmt.Printf("\nCompaction Statistics\n") fmt.Printf("\nCompaction Statistics\n")
@@ -784,8 +744,7 @@ func runCompactAnalyze(ctx context.Context, store *sqlite.SQLiteStorage) {
if compactID != "" { if compactID != "" {
issue, err := store.GetIssue(ctx, compactID) issue, err := store.GetIssue(ctx, compactID)
if err != nil { if err != nil {
fmt.Fprintf(os.Stderr, "Error: failed to get issue: %v\n", err) FatalError("failed to get issue: %v", err)
os.Exit(1)
} }
sizeBytes := len(issue.Description) + len(issue.Design) + len(issue.Notes) + len(issue.AcceptanceCriteria) sizeBytes := len(issue.Description) + len(issue.Design) + len(issue.Notes) + len(issue.AcceptanceCriteria)
@@ -816,8 +775,7 @@ func runCompactAnalyze(ctx context.Context, store *sqlite.SQLiteStorage) {
tierCandidates, err = store.GetTier2Candidates(ctx) tierCandidates, err = store.GetTier2Candidates(ctx)
} }
if err != nil { if err != nil {
fmt.Fprintf(os.Stderr, "Error: failed to get candidates: %v\n", err) FatalError("failed to get candidates: %v", err)
os.Exit(1)
} }
// Apply limit if specified // Apply limit if specified
@@ -879,15 +837,13 @@ func runCompactApply(ctx context.Context, store *sqlite.SQLiteStorage) {
// Read from stdin // Read from stdin
summaryBytes, err = io.ReadAll(os.Stdin) summaryBytes, err = io.ReadAll(os.Stdin)
if err != nil { if err != nil {
fmt.Fprintf(os.Stderr, "Error: failed to read summary from stdin: %v\n", err) FatalError("failed to read summary from stdin: %v", err)
os.Exit(1)
} }
} else { } else {
// #nosec G304 -- summary file path provided explicitly by operator // #nosec G304 -- summary file path provided explicitly by operator
summaryBytes, err = os.ReadFile(compactSummary) summaryBytes, err = os.ReadFile(compactSummary)
if err != nil { if err != nil {
fmt.Fprintf(os.Stderr, "Error: failed to read summary file: %v\n", err) FatalError("failed to read summary file: %v", err)
os.Exit(1)
} }
} }
summary := string(summaryBytes) summary := string(summaryBytes)
@@ -895,8 +851,7 @@ func runCompactApply(ctx context.Context, store *sqlite.SQLiteStorage) {
// Get issue // Get issue
issue, err := store.GetIssue(ctx, compactID) issue, err := store.GetIssue(ctx, compactID)
if err != nil { if err != nil {
fmt.Fprintf(os.Stderr, "Error: failed to get issue: %v\n", err) FatalError("failed to get issue: %v", err)
os.Exit(1)
} }
// Calculate sizes // Calculate sizes
@@ -907,20 +862,15 @@ func runCompactApply(ctx context.Context, store *sqlite.SQLiteStorage) {
if !compactForce { if !compactForce {
eligible, reason, err := store.CheckEligibility(ctx, compactID, compactTier) eligible, reason, err := store.CheckEligibility(ctx, compactID, compactTier)
if err != nil { if err != nil {
fmt.Fprintf(os.Stderr, "Error: failed to check eligibility: %v\n", err) FatalError("failed to check eligibility: %v", err)
os.Exit(1)
} }
if !eligible { if !eligible {
fmt.Fprintf(os.Stderr, "Error: %s is not eligible for Tier %d compaction: %s\n", compactID, compactTier, reason) FatalErrorWithHint(fmt.Sprintf("%s is not eligible for Tier %d compaction: %s", compactID, compactTier, reason), "use --force to bypass eligibility checks")
fmt.Fprintf(os.Stderr, "Hint: use --force to bypass eligibility checks\n")
os.Exit(1)
} }
// Enforce size reduction unless --force // Enforce size reduction unless --force
if compactedSize >= originalSize { if compactedSize >= originalSize {
fmt.Fprintf(os.Stderr, "Error: summary (%d bytes) is not shorter than original (%d bytes)\n", compactedSize, originalSize) FatalErrorWithHint(fmt.Sprintf("summary (%d bytes) is not shorter than original (%d bytes)", compactedSize, originalSize), "use --force to bypass size validation")
fmt.Fprintf(os.Stderr, "Hint: use --force to bypass size validation\n")
os.Exit(1)
} }
} }
@@ -938,27 +888,23 @@ func runCompactApply(ctx context.Context, store *sqlite.SQLiteStorage) {
} }
if err := store.UpdateIssue(ctx, compactID, updates, actor); err != nil { if err := store.UpdateIssue(ctx, compactID, updates, actor); err != nil {
fmt.Fprintf(os.Stderr, "Error: failed to update issue: %v\n", err) FatalError("failed to update issue: %v", err)
os.Exit(1)
} }
commitHash := compact.GetCurrentCommitHash() commitHash := compact.GetCurrentCommitHash()
if err := store.ApplyCompaction(ctx, compactID, compactTier, originalSize, compactedSize, commitHash); err != nil { if err := store.ApplyCompaction(ctx, compactID, compactTier, originalSize, compactedSize, commitHash); err != nil {
fmt.Fprintf(os.Stderr, "Error: failed to apply compaction: %v\n", err) FatalError("failed to apply compaction: %v", err)
os.Exit(1)
} }
savingBytes := originalSize - compactedSize savingBytes := originalSize - compactedSize
reductionPct := float64(savingBytes) / float64(originalSize) * 100 reductionPct := float64(savingBytes) / float64(originalSize) * 100
eventData := fmt.Sprintf("Tier %d compaction: %d → %d bytes (saved %d, %.1f%%)", compactTier, originalSize, compactedSize, savingBytes, reductionPct) eventData := fmt.Sprintf("Tier %d compaction: %d → %d bytes (saved %d, %.1f%%)", compactTier, originalSize, compactedSize, savingBytes, reductionPct)
if err := store.AddComment(ctx, compactID, actor, eventData); err != nil { if err := store.AddComment(ctx, compactID, actor, eventData); err != nil {
fmt.Fprintf(os.Stderr, "Error: failed to record event: %v\n", err) FatalError("failed to record event: %v", err)
os.Exit(1)
} }
if err := store.MarkIssueDirty(ctx, compactID); err != nil { if err := store.MarkIssueDirty(ctx, compactID); err != nil {
fmt.Fprintf(os.Stderr, "Error: failed to mark dirty: %v\n", err) FatalError("failed to mark dirty: %v", err)
os.Exit(1)
} }
elapsed := time.Since(start) elapsed := time.Since(start)
+7 -7
View File
@@ -74,11 +74,10 @@ This command:
"error": "no_beads_directory", "error": "no_beads_directory",
"message": "No .beads directory found. Run 'bd init' first.", "message": "No .beads directory found. Run 'bd init' first.",
}) })
} else {
fmt.Fprintf(os.Stderr, "Error: no .beads directory found\n")
fmt.Fprintf(os.Stderr, "Hint: run 'bd init' to initialize bd\n")
}
os.Exit(1) os.Exit(1)
} else {
FatalErrorWithHint("no .beads directory found", "run 'bd init' to initialize bd")
}
} }
// Load config to get target database name (respects user's config.json) // Load config to get target database name (respects user's config.json)
@@ -103,10 +102,10 @@ This command:
"error": "detection_failed", "error": "detection_failed",
"message": err.Error(), "message": err.Error(),
}) })
os.Exit(1)
} else { } else {
fmt.Fprintf(os.Stderr, "Error: %v\n", err) FatalError("%v", err)
} }
os.Exit(1)
} }
if len(databases) == 0 { if len(databases) == 0 {
@@ -174,14 +173,15 @@ This command:
"message": "Multiple old database files found", "message": "Multiple old database files found",
"databases": formatDBList(oldDBs), "databases": formatDBList(oldDBs),
}) })
os.Exit(1)
} else { } else {
fmt.Fprintf(os.Stderr, "Error: multiple old database files found:\n") fmt.Fprintf(os.Stderr, "Error: multiple old database files found:\n")
for _, db := range oldDBs { for _, db := range oldDBs {
fmt.Fprintf(os.Stderr, " - %s (version: %s)\n", filepath.Base(db.path), db.version) fmt.Fprintf(os.Stderr, " - %s (version: %s)\n", filepath.Base(db.path), db.version)
} }
fmt.Fprintf(os.Stderr, "\nPlease manually rename the correct database to %s and remove others.\n", cfg.Database) fmt.Fprintf(os.Stderr, "\nPlease manually rename the correct database to %s and remove others.\n", cfg.Database)
os.Exit(1)
} }
os.Exit(1)
} else if currentDB != nil && currentDB.version != Version { } else if currentDB != nil && currentDB.version != Version {
// Update version metadata // Update version metadata
needsVersionUpdate = true needsVersionUpdate = true
+39 -86
View File
@@ -83,15 +83,13 @@ Use --merge to merge the sync branch back to main branch.`,
// Find JSONL path // Find JSONL path
jsonlPath := findJSONLPath() jsonlPath := findJSONLPath()
if jsonlPath == "" { if jsonlPath == "" {
fmt.Fprintf(os.Stderr, "Error: not in a bd workspace (no .beads directory found)\n") FatalError("not in a bd workspace (no .beads directory found)")
os.Exit(1)
} }
// If status mode, show diff between sync branch and main // If status mode, show diff between sync branch and main
if status { if status {
if err := showSyncStatus(ctx); err != nil { if err := showSyncStatus(ctx); err != nil {
fmt.Fprintf(os.Stderr, "Error: %v\n", err) FatalError("%v", err)
os.Exit(1)
} }
return return
} }
@@ -105,8 +103,7 @@ Use --merge to merge the sync branch back to main branch.`,
// If merge mode, merge sync branch to main // If merge mode, merge sync branch to main
if merge { if merge {
if err := mergeSyncBranch(ctx, dryRun); err != nil { if err := mergeSyncBranch(ctx, dryRun); err != nil {
fmt.Fprintf(os.Stderr, "Error: %v\n", err) FatalError("%v", err)
os.Exit(1)
} }
return return
} }
@@ -114,8 +111,7 @@ Use --merge to merge the sync branch back to main branch.`,
// If from-main mode, one-way sync from main branch (gt-ick9: ephemeral branch support) // If from-main mode, one-way sync from main branch (gt-ick9: ephemeral branch support)
if fromMain { if fromMain {
if err := doSyncFromMain(ctx, jsonlPath, renameOnImport, dryRun, noGitHistory); err != nil { if err := doSyncFromMain(ctx, jsonlPath, renameOnImport, dryRun, noGitHistory); err != nil {
fmt.Fprintf(os.Stderr, "Error: %v\n", err) FatalError("%v", err)
os.Exit(1)
} }
return return
} }
@@ -127,8 +123,7 @@ Use --merge to merge the sync branch back to main branch.`,
} else { } else {
fmt.Println("→ Importing from JSONL...") fmt.Println("→ Importing from JSONL...")
if err := importFromJSONL(ctx, jsonlPath, renameOnImport, noGitHistory); err != nil { if err := importFromJSONL(ctx, jsonlPath, renameOnImport, noGitHistory); err != nil {
fmt.Fprintf(os.Stderr, "Error importing: %v\n", err) FatalError("importing: %v", err)
os.Exit(1)
} }
fmt.Println("✓ Import complete") fmt.Println("✓ Import complete")
} }
@@ -141,8 +136,7 @@ Use --merge to merge the sync branch back to main branch.`,
fmt.Println("→ [DRY RUN] Would export pending changes to JSONL") fmt.Println("→ [DRY RUN] Would export pending changes to JSONL")
} else { } else {
if err := exportToJSONL(ctx, jsonlPath); err != nil { if err := exportToJSONL(ctx, jsonlPath); err != nil {
fmt.Fprintf(os.Stderr, "Error exporting: %v\n", err) FatalError("exporting: %v", err)
os.Exit(1)
} }
} }
return return
@@ -156,8 +150,7 @@ Use --merge to merge the sync branch back to main branch.`,
} else { } else {
fmt.Println("→ Exporting pending changes to JSONL (squash mode)...") fmt.Println("→ Exporting pending changes to JSONL (squash mode)...")
if err := exportToJSONL(ctx, jsonlPath); err != nil { if err := exportToJSONL(ctx, jsonlPath); err != nil {
fmt.Fprintf(os.Stderr, "Error exporting: %v\n", err) FatalError("exporting: %v", err)
os.Exit(1)
} }
fmt.Println("✓ Changes accumulated in JSONL") fmt.Println("✓ Changes accumulated in JSONL")
fmt.Println(" Run 'bd sync' (without --squash) to commit all accumulated changes") fmt.Println(" Run 'bd sync' (without --squash) to commit all accumulated changes")
@@ -167,19 +160,14 @@ Use --merge to merge the sync branch back to main branch.`,
// Check if we're in a git repository // Check if we're in a git repository
if !isGitRepo() { if !isGitRepo() {
fmt.Fprintf(os.Stderr, "Error: not in a git repository\n") FatalErrorWithHint("not in a git repository", "run 'git init' to initialize a repository")
fmt.Fprintf(os.Stderr, "Hint: run 'git init' to initialize a repository\n")
os.Exit(1)
} }
// Preflight: check for merge/rebase in progress // Preflight: check for merge/rebase in progress
if inMerge, err := gitHasUnmergedPaths(); err != nil { if inMerge, err := gitHasUnmergedPaths(); err != nil {
fmt.Fprintf(os.Stderr, "Error checking git state: %v\n", err) FatalError("checking git state: %v", err)
os.Exit(1)
} else if inMerge { } else if inMerge {
fmt.Fprintf(os.Stderr, "Error: unmerged paths or merge in progress\n") FatalErrorWithHint("unmerged paths or merge in progress", "resolve conflicts, run 'bd import' if needed, then 'bd sync' again")
fmt.Fprintf(os.Stderr, "Hint: resolve conflicts, run 'bd import' if needed, then 'bd sync' again\n")
os.Exit(1)
} }
// GH#638: Check sync.branch BEFORE upstream check // GH#638: Check sync.branch BEFORE upstream check
@@ -201,8 +189,7 @@ Use --merge to merge the sync branch back to main branch.`,
fmt.Println("→ No upstream configured, using --from-main mode") fmt.Println("→ No upstream configured, using --from-main mode")
// Force noGitHistory=true for auto-detected from-main mode (fixes #417) // Force noGitHistory=true for auto-detected from-main mode (fixes #417)
if err := doSyncFromMain(ctx, jsonlPath, renameOnImport, dryRun, true); err != nil { if err := doSyncFromMain(ctx, jsonlPath, renameOnImport, dryRun, true); err != nil {
fmt.Fprintf(os.Stderr, "Error: %v\n", err) FatalError("%v", err)
os.Exit(1)
} }
return return
} }
@@ -235,8 +222,7 @@ Use --merge to merge the sync branch back to main branch.`,
fmt.Printf("→ DB has %d issues but JSONL has %d (stale JSONL detected)\n", dbCount, jsonlCount) fmt.Printf("→ DB has %d issues but JSONL has %d (stale JSONL detected)\n", dbCount, jsonlCount)
fmt.Println("→ Importing JSONL first (ZFC)...") fmt.Println("→ Importing JSONL first (ZFC)...")
if err := importFromJSONL(ctx, jsonlPath, renameOnImport, noGitHistory); err != nil { if err := importFromJSONL(ctx, jsonlPath, renameOnImport, noGitHistory); err != nil {
fmt.Fprintf(os.Stderr, "Error importing (ZFC): %v\n", err) FatalError("importing (ZFC): %v", err)
os.Exit(1)
} }
// Skip export after ZFC import - JSONL is source of truth // Skip export after ZFC import - JSONL is source of truth
skipExport = true skipExport = true
@@ -256,8 +242,7 @@ Use --merge to merge the sync branch back to main branch.`,
fmt.Printf("→ JSONL has %d issues but DB has only %d (stale DB detected - bd-53c)\n", jsonlCount, dbCount) fmt.Printf("→ JSONL has %d issues but DB has only %d (stale DB detected - bd-53c)\n", jsonlCount, dbCount)
fmt.Println("→ Importing JSONL first to prevent data loss...") fmt.Println("→ Importing JSONL first to prevent data loss...")
if err := importFromJSONL(ctx, jsonlPath, renameOnImport, noGitHistory); err != nil { if err := importFromJSONL(ctx, jsonlPath, renameOnImport, noGitHistory); err != nil {
fmt.Fprintf(os.Stderr, "Error importing (reverse ZFC): %v\n", err) FatalError("importing (reverse ZFC): %v", err)
os.Exit(1)
} }
// Skip export after import - JSONL is source of truth // Skip export after import - JSONL is source of truth
skipExport = true skipExport = true
@@ -285,8 +270,7 @@ Use --merge to merge the sync branch back to main branch.`,
fmt.Println("→ JSONL content differs from last sync (bd-f2f)") fmt.Println("→ JSONL content differs from last sync (bd-f2f)")
fmt.Println("→ Importing JSONL first to prevent stale DB from overwriting changes...") fmt.Println("→ Importing JSONL first to prevent stale DB from overwriting changes...")
if err := importFromJSONL(ctx, jsonlPath, renameOnImport, noGitHistory); err != nil { if err := importFromJSONL(ctx, jsonlPath, renameOnImport, noGitHistory); err != nil {
fmt.Fprintf(os.Stderr, "Error importing (bd-f2f hash mismatch): %v\n", err) FatalError("importing (bd-f2f hash mismatch): %v", err)
os.Exit(1)
} }
// Don't skip export - we still want to export any remaining local dirty issues // Don't skip export - we still want to export any remaining local dirty issues
// The import updated DB with JSONL content, and export will write merged state // The import updated DB with JSONL content, and export will write merged state
@@ -299,12 +283,10 @@ Use --merge to merge the sync branch back to main branch.`,
// Pre-export integrity checks // Pre-export integrity checks
if err := ensureStoreActive(); err == nil && store != nil { if err := ensureStoreActive(); err == nil && store != nil {
if err := validatePreExport(ctx, store, jsonlPath); err != nil { if err := validatePreExport(ctx, store, jsonlPath); err != nil {
fmt.Fprintf(os.Stderr, "Pre-export validation failed: %v\n", err) FatalError("pre-export validation failed: %v", err)
os.Exit(1)
} }
if err := checkDuplicateIDs(ctx, store); err != nil { if err := checkDuplicateIDs(ctx, store); err != nil {
fmt.Fprintf(os.Stderr, "Database corruption detected: %v\n", err) FatalError("database corruption detected: %v", err)
os.Exit(1)
} }
if orphaned, err := checkOrphanedDeps(ctx, store); err != nil { if orphaned, err := checkOrphanedDeps(ctx, store); err != nil {
fmt.Fprintf(os.Stderr, "Warning: orphaned dependency check failed: %v\n", err) fmt.Fprintf(os.Stderr, "Warning: orphaned dependency check failed: %v\n", err)
@@ -315,16 +297,14 @@ Use --merge to merge the sync branch back to main branch.`,
fmt.Println("→ Exporting pending changes to JSONL...") fmt.Println("→ Exporting pending changes to JSONL...")
if err := exportToJSONL(ctx, jsonlPath); err != nil { if err := exportToJSONL(ctx, jsonlPath); err != nil {
fmt.Fprintf(os.Stderr, "Error exporting: %v\n", err) FatalError("exporting: %v", err)
os.Exit(1)
} }
} }
// Capture left snapshot (pre-pull state) for 3-way merge // Capture left snapshot (pre-pull state) for 3-way merge
// This is mandatory for deletion tracking integrity // This is mandatory for deletion tracking integrity
if err := captureLeftSnapshot(jsonlPath); err != nil { if err := captureLeftSnapshot(jsonlPath); err != nil {
fmt.Fprintf(os.Stderr, "Error: failed to capture snapshot (required for deletion tracking): %v\n", err) FatalError("failed to capture snapshot (required for deletion tracking): %v", err)
os.Exit(1)
} }
} }
@@ -340,8 +320,7 @@ Use --merge to merge the sync branch back to main branch.`,
// Check for changes in the external beads repo // Check for changes in the external beads repo
externalRepoRoot, err := getRepoRootFromPath(ctx, beadsDir) externalRepoRoot, err := getRepoRootFromPath(ctx, beadsDir)
if err != nil { if err != nil {
fmt.Fprintf(os.Stderr, "Error: %v\n", err) FatalError("%v", err)
os.Exit(1)
} }
// Check if there are changes to commit // Check if there are changes to commit
@@ -356,8 +335,7 @@ Use --merge to merge the sync branch back to main branch.`,
} else { } else {
committed, err := commitToExternalBeadsRepo(ctx, beadsDir, message, !noPush) committed, err := commitToExternalBeadsRepo(ctx, beadsDir, message, !noPush)
if err != nil { if err != nil {
fmt.Fprintf(os.Stderr, "Error: %v\n", err) FatalError("%v", err)
os.Exit(1)
} }
if committed { if committed {
if !noPush { if !noPush {
@@ -377,16 +355,14 @@ Use --merge to merge the sync branch back to main branch.`,
} else { } else {
fmt.Println("→ Pulling from external beads repo...") fmt.Println("→ Pulling from external beads repo...")
if err := pullFromExternalBeadsRepo(ctx, beadsDir); err != nil { if err := pullFromExternalBeadsRepo(ctx, beadsDir); err != nil {
fmt.Fprintf(os.Stderr, "Error pulling: %v\n", err) FatalError("pulling: %v", err)
os.Exit(1)
} }
fmt.Println("✓ Pulled from external beads repo") fmt.Println("✓ Pulled from external beads repo")
// Re-import after pull to update local database // Re-import after pull to update local database
fmt.Println("→ Importing JSONL...") fmt.Println("→ Importing JSONL...")
if err := importFromJSONL(ctx, jsonlPath, renameOnImport, noGitHistory); err != nil { if err := importFromJSONL(ctx, jsonlPath, renameOnImport, noGitHistory); err != nil {
fmt.Fprintf(os.Stderr, "Error importing: %v\n", err) FatalError("importing: %v", err)
os.Exit(1)
} }
} }
} }
@@ -426,8 +402,7 @@ Use --merge to merge the sync branch back to main branch.`,
// Step 2: Check if there are changes to commit (check entire .beads/ directory) // Step 2: Check if there are changes to commit (check entire .beads/ directory)
hasChanges, err := gitHasBeadsChanges(ctx) hasChanges, err := gitHasBeadsChanges(ctx)
if err != nil { if err != nil {
fmt.Fprintf(os.Stderr, "Error checking git status: %v\n", err) FatalError("checking git status: %v", err)
os.Exit(1)
} }
// Track if we already pushed via worktree (to skip Step 5) // Track if we already pushed via worktree (to skip Step 5)
@@ -448,8 +423,7 @@ Use --merge to merge the sync branch back to main branch.`,
fmt.Printf("→ Committing changes to sync branch '%s'...\n", syncBranchName) fmt.Printf("→ Committing changes to sync branch '%s'...\n", syncBranchName)
result, err := syncbranch.CommitToSyncBranch(ctx, repoRoot, syncBranchName, jsonlPath, !noPush) result, err := syncbranch.CommitToSyncBranch(ctx, repoRoot, syncBranchName, jsonlPath, !noPush)
if err != nil { if err != nil {
fmt.Fprintf(os.Stderr, "Error committing to sync branch: %v\n", err) FatalError("committing to sync branch: %v", err)
os.Exit(1)
} }
if result.Committed { if result.Committed {
fmt.Printf("✓ Committed to %s\n", syncBranchName) fmt.Printf("✓ Committed to %s\n", syncBranchName)
@@ -467,8 +441,7 @@ Use --merge to merge the sync branch back to main branch.`,
fmt.Println("→ Committing changes to git...") fmt.Println("→ Committing changes to git...")
} }
if err := gitCommitBeadsDir(ctx, message); err != nil { if err := gitCommitBeadsDir(ctx, message); err != nil {
fmt.Fprintf(os.Stderr, "Error committing: %v\n", err) FatalError("committing: %v", err)
os.Exit(1)
} }
} }
} else { } else {
@@ -498,8 +471,7 @@ Use --merge to merge the sync branch back to main branch.`,
pullResult, err := syncbranch.PullFromSyncBranch(ctx, repoRoot, syncBranchName, jsonlPath, !noPush, requireMassDeleteConfirmation) pullResult, err := syncbranch.PullFromSyncBranch(ctx, repoRoot, syncBranchName, jsonlPath, !noPush, requireMassDeleteConfirmation)
if err != nil { if err != nil {
fmt.Fprintf(os.Stderr, "Error pulling from sync branch: %v\n", err) FatalError("pulling from sync branch: %v", err)
os.Exit(1)
} }
if pullResult.Pulled { if pullResult.Pulled {
if pullResult.Merged { if pullResult.Merged {
@@ -525,8 +497,7 @@ Use --merge to merge the sync branch back to main branch.`,
if response == "y" || response == "yes" { if response == "y" || response == "yes" {
fmt.Printf("→ Pushing to %s...\n", syncBranchName) fmt.Printf("→ Pushing to %s...\n", syncBranchName)
if err := syncbranch.PushSyncBranch(ctx, repoRoot, syncBranchName); err != nil { if err := syncbranch.PushSyncBranch(ctx, repoRoot, syncBranchName); err != nil {
fmt.Fprintf(os.Stderr, "Error pushing to sync branch: %v\n", err) FatalError("pushing to sync branch: %v", err)
os.Exit(1)
} }
fmt.Printf("✓ Pushed merged changes to %s\n", syncBranchName) fmt.Printf("✓ Pushed merged changes to %s\n", syncBranchName)
pushedViaSyncBranch = true pushedViaSyncBranch = true
@@ -564,31 +535,23 @@ Use --merge to merge the sync branch back to main branch.`,
// Export clean JSONL from DB (database is source of truth) // Export clean JSONL from DB (database is source of truth)
if exportErr := exportToJSONL(ctx, jsonlPath); exportErr != nil { if exportErr := exportToJSONL(ctx, jsonlPath); exportErr != nil {
fmt.Fprintf(os.Stderr, "Error: failed to export for conflict resolution: %v\n", exportErr) FatalErrorWithHint(fmt.Sprintf("failed to export for conflict resolution: %v", exportErr), "resolve conflicts manually and run 'bd import' then 'bd sync' again")
fmt.Fprintf(os.Stderr, "Hint: resolve conflicts manually and run 'bd import' then 'bd sync' again\n")
os.Exit(1)
} }
// Mark conflict as resolved // Mark conflict as resolved
addCmd := exec.CommandContext(ctx, "git", "add", jsonlPath) addCmd := exec.CommandContext(ctx, "git", "add", jsonlPath)
if addErr := addCmd.Run(); addErr != nil { if addErr := addCmd.Run(); addErr != nil {
fmt.Fprintf(os.Stderr, "Error: failed to mark conflict resolved: %v\n", addErr) FatalErrorWithHint(fmt.Sprintf("failed to mark conflict resolved: %v", addErr), "resolve conflicts manually and run 'bd import' then 'bd sync' again")
fmt.Fprintf(os.Stderr, "Hint: resolve conflicts manually and run 'bd import' then 'bd sync' again\n")
os.Exit(1)
} }
// Continue rebase // Continue rebase
if continueErr := runGitRebaseContinue(ctx); continueErr != nil { if continueErr := runGitRebaseContinue(ctx); continueErr != nil {
fmt.Fprintf(os.Stderr, "Error: failed to continue rebase: %v\n", continueErr) FatalErrorWithHint(fmt.Sprintf("failed to continue rebase: %v", continueErr), "resolve conflicts manually and run 'bd import' then 'bd sync' again")
fmt.Fprintf(os.Stderr, "Hint: resolve conflicts manually and run 'bd import' then 'bd sync' again\n")
os.Exit(1)
} }
fmt.Println("✓ Auto-resolved JSONL conflict") fmt.Println("✓ Auto-resolved JSONL conflict")
} else { } else {
// Not an auto-resolvable conflict, fail with original error // Not an auto-resolvable conflict, fail with original error
fmt.Fprintf(os.Stderr, "Error pulling: %v\n", err)
// Check if this looks like a merge driver failure // Check if this looks like a merge driver failure
errStr := err.Error() errStr := err.Error()
if strings.Contains(errStr, "merge driver") || if strings.Contains(errStr, "merge driver") ||
@@ -598,8 +561,7 @@ Use --merge to merge the sync branch back to main branch.`,
fmt.Fprintf(os.Stderr, "Fix: bd doctor --fix\n\n") fmt.Fprintf(os.Stderr, "Fix: bd doctor --fix\n\n")
} }
fmt.Fprintf(os.Stderr, "Hint: resolve conflicts manually and run 'bd import' then 'bd sync' again\n") FatalErrorWithHint(fmt.Sprintf("pulling: %v", err), "resolve conflicts manually and run 'bd import' then 'bd sync' again")
os.Exit(1)
} }
} }
} }
@@ -617,8 +579,7 @@ Use --merge to merge the sync branch back to main branch.`,
// Step 3.5: Perform 3-way merge and prune deletions // Step 3.5: Perform 3-way merge and prune deletions
if err := ensureStoreActive(); err == nil && store != nil { if err := ensureStoreActive(); err == nil && store != nil {
if err := applyDeletionsFromMerge(ctx, store, jsonlPath); err != nil { if err := applyDeletionsFromMerge(ctx, store, jsonlPath); err != nil {
fmt.Fprintf(os.Stderr, "Error during 3-way merge: %v\n", err) FatalError("during 3-way merge: %v", err)
os.Exit(1)
} }
} }
@@ -627,8 +588,7 @@ Use --merge to merge the sync branch back to main branch.`,
// tombstoning issues that were in our local export but got lost during merge (bd-sync-deletion fix) // tombstoning issues that were in our local export but got lost during merge (bd-sync-deletion fix)
fmt.Println("→ Importing updated JSONL...") fmt.Println("→ Importing updated JSONL...")
if err := importFromJSONL(ctx, jsonlPath, renameOnImport, noGitHistory, true); err != nil { if err := importFromJSONL(ctx, jsonlPath, renameOnImport, noGitHistory, true); err != nil {
fmt.Fprintf(os.Stderr, "Error importing: %v\n", err) FatalError("importing: %v", err)
os.Exit(1)
} }
// Validate import didn't cause data loss // Validate import didn't cause data loss
@@ -639,8 +599,7 @@ Use --merge to merge the sync branch back to main branch.`,
fmt.Fprintf(os.Stderr, "Warning: failed to count issues after import: %v\n", err) fmt.Fprintf(os.Stderr, "Warning: failed to count issues after import: %v\n", err)
} else { } else {
if err := validatePostImportWithExpectedDeletions(beforeCount, afterCount, 0, jsonlPath); err != nil { if err := validatePostImportWithExpectedDeletions(beforeCount, afterCount, 0, jsonlPath); err != nil {
fmt.Fprintf(os.Stderr, "Post-import validation failed: %v\n", err) FatalError("post-import validation failed: %v", err)
os.Exit(1)
} }
} }
} }
@@ -681,15 +640,13 @@ Use --merge to merge the sync branch back to main branch.`,
if needsExport { if needsExport {
fmt.Println("→ Re-exporting after import to sync DB changes...") fmt.Println("→ Re-exporting after import to sync DB changes...")
if err := exportToJSONL(ctx, jsonlPath); err != nil { if err := exportToJSONL(ctx, jsonlPath); err != nil {
fmt.Fprintf(os.Stderr, "Error re-exporting after import: %v\n", err) FatalError("re-exporting after import: %v", err)
os.Exit(1)
} }
// Step 4.6: Commit the re-export if it created changes // Step 4.6: Commit the re-export if it created changes
hasPostImportChanges, err := gitHasBeadsChanges(ctx) hasPostImportChanges, err := gitHasBeadsChanges(ctx)
if err != nil { if err != nil {
fmt.Fprintf(os.Stderr, "Error checking git status after re-export: %v\n", err) FatalError("checking git status after re-export: %v", err)
os.Exit(1)
} }
if hasPostImportChanges { if hasPostImportChanges {
fmt.Println("→ Committing DB changes from import...") fmt.Println("→ Committing DB changes from import...")
@@ -697,16 +654,14 @@ Use --merge to merge the sync branch back to main branch.`,
// Commit to sync branch via worktree (bd-e3w) // Commit to sync branch via worktree (bd-e3w)
result, err := syncbranch.CommitToSyncBranch(ctx, repoRoot, syncBranchName, jsonlPath, !noPush) result, err := syncbranch.CommitToSyncBranch(ctx, repoRoot, syncBranchName, jsonlPath, !noPush)
if err != nil { if err != nil {
fmt.Fprintf(os.Stderr, "Error committing to sync branch: %v\n", err) FatalError("committing to sync branch: %v", err)
os.Exit(1)
} }
if result.Pushed { if result.Pushed {
pushedViaSyncBranch = true pushedViaSyncBranch = true
} }
} else { } else {
if err := gitCommitBeadsDir(ctx, "bd sync: apply DB changes after import"); err != nil { if err := gitCommitBeadsDir(ctx, "bd sync: apply DB changes after import"); err != nil {
fmt.Fprintf(os.Stderr, "Error committing post-import changes: %v\n", err) FatalError("committing post-import changes: %v", err)
os.Exit(1)
} }
} }
hasChanges = true // Mark that we have changes to push hasChanges = true // Mark that we have changes to push
@@ -733,9 +688,7 @@ Use --merge to merge the sync branch back to main branch.`,
} else { } else {
fmt.Println("→ Pushing to remote...") fmt.Println("→ Pushing to remote...")
if err := gitPush(ctx); err != nil { if err := gitPush(ctx); err != nil {
fmt.Fprintf(os.Stderr, "Error pushing: %v\n", err) FatalErrorWithHint(fmt.Sprintf("pushing: %v", err), "pull may have brought new changes, run 'bd sync' again")
fmt.Fprintf(os.Stderr, "Hint: pull may have brought new changes, run 'bd sync' again\n")
os.Exit(1)
} }
} }
} }