refactor: Split large cmd/bd files to meet 800-line limit (bd-xtf5)

Split 6 files exceeding 800 lines by extracting cohesive function groups:

- show.go (1592→578): extracted show_thread.go, close.go, edit.go, update.go
- doctor.go (1295→690): extracted doctor_fix.go, doctor_health.go, doctor_pollution.go
- sync.go (1201→749): extracted sync_git.go
- compact.go (1199→775): extracted compact_tombstone.go, compact_rpc.go
- linear.go (1190→641): extracted linear_sync.go, linear_conflict.go
- main.go (1148→800): extracted main_help.go, main_errors.go, main_daemon.go

All files now under 800-line acceptance criteria.

🤖 Generated with [Claude Code](https://claude.com/claude-code)

Co-Authored-By: Claude Opus 4.5 <noreply@anthropic.com>
This commit is contained in:
Steve Yegge
2025-12-28 18:04:52 -08:00
parent 7ea7aee411
commit 6c14fd2225
22 changed files with 4251 additions and 4087 deletions

244
cmd/bd/close.go Normal file
View File

@@ -0,0 +1,244 @@
package main
import (
"encoding/json"
"fmt"
"os"
"github.com/spf13/cobra"
"github.com/steveyegge/beads/internal/hooks"
"github.com/steveyegge/beads/internal/rpc"
"github.com/steveyegge/beads/internal/types"
"github.com/steveyegge/beads/internal/ui"
"github.com/steveyegge/beads/internal/utils"
)
var closeCmd = &cobra.Command{
Use: "close [id...]",
GroupID: "issues",
Short: "Close one or more issues",
Args: cobra.MinimumNArgs(1),
Run: func(cmd *cobra.Command, args []string) {
CheckReadonly("close")
reason, _ := cmd.Flags().GetString("reason")
if reason == "" {
// Check --resolution alias (Jira CLI convention)
reason, _ = cmd.Flags().GetString("resolution")
}
if reason == "" {
reason = "Closed"
}
force, _ := cmd.Flags().GetBool("force")
continueFlag, _ := cmd.Flags().GetBool("continue")
noAuto, _ := cmd.Flags().GetBool("no-auto")
suggestNext, _ := cmd.Flags().GetBool("suggest-next")
ctx := rootCtx
// --continue only works with a single issue
if continueFlag && len(args) > 1 {
FatalErrorRespectJSON("--continue only works when closing a single issue")
}
// --suggest-next only works with a single issue
if suggestNext && len(args) > 1 {
FatalErrorRespectJSON("--suggest-next only works when closing a single issue")
}
// Resolve partial IDs first
var resolvedIDs []string
if daemonClient != nil {
for _, id := range args {
resolveArgs := &rpc.ResolveIDArgs{ID: id}
resp, err := daemonClient.ResolveID(resolveArgs)
if err != nil {
FatalErrorRespectJSON("resolving ID %s: %v", id, err)
}
var resolvedID string
if err := json.Unmarshal(resp.Data, &resolvedID); err != nil {
FatalErrorRespectJSON("unmarshaling resolved ID: %v", err)
}
resolvedIDs = append(resolvedIDs, resolvedID)
}
} else {
var err error
resolvedIDs, err = utils.ResolvePartialIDs(ctx, store, args)
if err != nil {
FatalErrorRespectJSON("%v", err)
}
}
// If daemon is running, use RPC
if daemonClient != nil {
closedIssues := []*types.Issue{}
for _, id := range resolvedIDs {
// Get issue for template and pinned checks
showArgs := &rpc.ShowArgs{ID: id}
showResp, showErr := daemonClient.Show(showArgs)
if showErr == nil {
var issue types.Issue
if json.Unmarshal(showResp.Data, &issue) == nil {
if err := validateIssueClosable(id, &issue, force); err != nil {
fmt.Fprintf(os.Stderr, "%s\n", err)
continue
}
}
}
closeArgs := &rpc.CloseArgs{
ID: id,
Reason: reason,
SuggestNext: suggestNext,
}
resp, err := daemonClient.CloseIssue(closeArgs)
if err != nil {
fmt.Fprintf(os.Stderr, "Error closing %s: %v\n", id, err)
continue
}
// Handle response based on whether SuggestNext was requested
if suggestNext {
var result rpc.CloseResult
if err := json.Unmarshal(resp.Data, &result); err == nil {
if result.Closed != nil {
// Run close hook
if hookRunner != nil {
hookRunner.Run(hooks.EventClose, result.Closed)
}
if jsonOutput {
closedIssues = append(closedIssues, result.Closed)
}
}
if !jsonOutput {
fmt.Printf("%s Closed %s: %s\n", ui.RenderPass("✓"), id, reason)
// Display newly unblocked issues
if len(result.Unblocked) > 0 {
fmt.Printf("\nNewly unblocked:\n")
for _, issue := range result.Unblocked {
fmt.Printf(" • %s %q (P%d)\n", issue.ID, issue.Title, issue.Priority)
}
}
}
}
} else {
var issue types.Issue
if err := json.Unmarshal(resp.Data, &issue); err == nil {
// Run close hook
if hookRunner != nil {
hookRunner.Run(hooks.EventClose, &issue)
}
if jsonOutput {
closedIssues = append(closedIssues, &issue)
}
}
if !jsonOutput {
fmt.Printf("%s Closed %s: %s\n", ui.RenderPass("✓"), id, reason)
}
}
}
// Handle --continue flag in daemon mode
// Note: --continue requires direct database access to walk parent-child chain
if continueFlag && len(closedIssues) > 0 {
fmt.Fprintf(os.Stderr, "\nNote: --continue requires direct database access\n")
fmt.Fprintf(os.Stderr, "Hint: use --no-daemon flag: bd --no-daemon close %s --continue\n", resolvedIDs[0])
}
if jsonOutput && len(closedIssues) > 0 {
outputJSON(closedIssues)
}
return
}
// Direct mode
closedIssues := []*types.Issue{}
closedCount := 0
for _, id := range resolvedIDs {
// Get issue for checks
issue, _ := store.GetIssue(ctx, id)
if err := validateIssueClosable(id, issue, force); err != nil {
fmt.Fprintf(os.Stderr, "%s\n", err)
continue
}
if err := store.CloseIssue(ctx, id, reason, actor); err != nil {
fmt.Fprintf(os.Stderr, "Error closing %s: %v\n", id, err)
continue
}
closedCount++
// Run close hook
closedIssue, _ := store.GetIssue(ctx, id)
if closedIssue != nil && hookRunner != nil {
hookRunner.Run(hooks.EventClose, closedIssue)
}
if jsonOutput {
if closedIssue != nil {
closedIssues = append(closedIssues, closedIssue)
}
} else {
fmt.Printf("%s Closed %s: %s\n", ui.RenderPass("✓"), id, reason)
}
}
// Handle --suggest-next flag in direct mode
if suggestNext && len(resolvedIDs) == 1 && closedCount > 0 {
unblocked, err := store.GetNewlyUnblockedByClose(ctx, resolvedIDs[0])
if err == nil && len(unblocked) > 0 {
if jsonOutput {
outputJSON(map[string]interface{}{
"closed": closedIssues,
"unblocked": unblocked,
})
return
}
fmt.Printf("\nNewly unblocked:\n")
for _, issue := range unblocked {
fmt.Printf(" • %s %q (P%d)\n", issue.ID, issue.Title, issue.Priority)
}
}
}
// Schedule auto-flush if any issues were closed
if len(args) > 0 {
markDirtyAndScheduleFlush()
}
// Handle --continue flag
if continueFlag && len(resolvedIDs) == 1 && closedCount > 0 {
autoClaim := !noAuto
result, err := AdvanceToNextStep(ctx, store, resolvedIDs[0], autoClaim, actor)
if err != nil {
fmt.Fprintf(os.Stderr, "Warning: could not advance to next step: %v\n", err)
} else if result != nil {
if jsonOutput {
// Include continue result in JSON output
outputJSON(map[string]interface{}{
"closed": closedIssues,
"continue": result,
})
return
}
PrintContinueResult(result)
}
}
if jsonOutput && len(closedIssues) > 0 {
outputJSON(closedIssues)
}
},
}
func init() {
closeCmd.Flags().StringP("reason", "r", "", "Reason for closing")
closeCmd.Flags().String("resolution", "", "Alias for --reason (Jira CLI convention)")
_ = closeCmd.Flags().MarkHidden("resolution") // Hidden alias for agent/CLI ergonomics
closeCmd.Flags().BoolP("force", "f", false, "Force close pinned issues")
closeCmd.Flags().Bool("continue", false, "Auto-advance to next step in molecule")
closeCmd.Flags().Bool("no-auto", false, "With --continue, show next step but don't claim it")
closeCmd.Flags().Bool("suggest-next", false, "Show newly unblocked issues after closing")
rootCmd.AddCommand(closeCmd)
}

View File

@@ -2,17 +2,14 @@ package main
import (
"context"
"encoding/json"
"fmt"
"io"
"os"
"path/filepath"
"time"
"github.com/spf13/cobra"
"github.com/steveyegge/beads/internal/compact"
"github.com/steveyegge/beads/internal/storage/sqlite"
"github.com/steveyegge/beads/internal/types"
)
var (
@@ -512,182 +509,6 @@ func runCompactStats(ctx context.Context, store *sqlite.SQLiteStorage) {
}
}
func progressBar(current, total int) string {
const width = 40
if total == 0 {
return "[" + string(make([]byte, width)) + "]"
}
filled := (current * width) / total
bar := ""
for i := 0; i < width; i++ {
if i < filled {
bar += "█"
} else {
bar += " "
}
}
return "[" + bar + "]"
}
//nolint:unparam // ctx may be used in future for cancellation
func runCompactRPC(_ context.Context) {
if compactID != "" && compactAll {
fmt.Fprintf(os.Stderr, "Error: cannot use --id and --all together\n")
os.Exit(1)
}
if compactForce && compactID == "" {
fmt.Fprintf(os.Stderr, "Error: --force requires --id\n")
os.Exit(1)
}
if compactID == "" && !compactAll && !compactDryRun {
fmt.Fprintf(os.Stderr, "Error: must specify --all, --id, or --dry-run\n")
os.Exit(1)
}
apiKey := os.Getenv("ANTHROPIC_API_KEY")
if apiKey == "" && !compactDryRun {
fmt.Fprintf(os.Stderr, "Error: ANTHROPIC_API_KEY environment variable not set\n")
os.Exit(1)
}
args := map[string]interface{}{
"tier": compactTier,
"dry_run": compactDryRun,
"force": compactForce,
"all": compactAll,
"api_key": apiKey,
"workers": compactWorkers,
"batch_size": compactBatch,
}
if compactID != "" {
args["issue_id"] = compactID
}
resp, err := daemonClient.Execute("compact", args)
if err != nil {
fmt.Fprintf(os.Stderr, "Error: %v\n", err)
os.Exit(1)
}
if !resp.Success {
fmt.Fprintf(os.Stderr, "Error: %s\n", resp.Error)
os.Exit(1)
}
if jsonOutput {
fmt.Println(string(resp.Data))
return
}
var result struct {
Success bool `json:"success"`
IssueID string `json:"issue_id,omitempty"`
OriginalSize int `json:"original_size,omitempty"`
CompactedSize int `json:"compacted_size,omitempty"`
Reduction string `json:"reduction,omitempty"`
Duration string `json:"duration,omitempty"`
DryRun bool `json:"dry_run,omitempty"`
Results []struct {
IssueID string `json:"issue_id"`
Success bool `json:"success"`
Error string `json:"error,omitempty"`
OriginalSize int `json:"original_size,omitempty"`
CompactedSize int `json:"compacted_size,omitempty"`
Reduction string `json:"reduction,omitempty"`
} `json:"results,omitempty"`
}
if err := json.Unmarshal(resp.Data, &result); err != nil {
fmt.Fprintf(os.Stderr, "Error parsing response: %v\n", err)
os.Exit(1)
}
if compactID != "" {
if result.DryRun {
fmt.Printf("DRY RUN - Tier %d compaction\n\n", compactTier)
fmt.Printf("Issue: %s\n", compactID)
fmt.Printf("Original size: %d bytes\n", result.OriginalSize)
fmt.Printf("Estimated reduction: %s\n", result.Reduction)
} else {
fmt.Printf("Successfully compacted %s\n", result.IssueID)
fmt.Printf("Original size: %d bytes\n", result.OriginalSize)
fmt.Printf("Compacted size: %d bytes\n", result.CompactedSize)
fmt.Printf("Reduction: %s\n", result.Reduction)
fmt.Printf("Duration: %s\n", result.Duration)
}
} else if compactAll {
if result.DryRun {
fmt.Printf("DRY RUN - Found %d candidates for Tier %d compaction\n", len(result.Results), compactTier)
} else {
successCount := 0
for _, r := range result.Results {
if r.Success {
successCount++
}
}
fmt.Printf("Compacted %d/%d issues in %s\n", successCount, len(result.Results), result.Duration)
for _, r := range result.Results {
if r.Success {
fmt.Printf(" ✓ %s: %d → %d bytes (%s)\n", r.IssueID, r.OriginalSize, r.CompactedSize, r.Reduction)
} else {
fmt.Printf(" ✗ %s: %s\n", r.IssueID, r.Error)
}
}
}
}
}
func runCompactStatsRPC() {
args := map[string]interface{}{
"tier": compactTier,
}
resp, err := daemonClient.Execute("compact_stats", args)
if err != nil {
fmt.Fprintf(os.Stderr, "Error: %v\n", err)
os.Exit(1)
}
if !resp.Success {
fmt.Fprintf(os.Stderr, "Error: %s\n", resp.Error)
os.Exit(1)
}
if jsonOutput {
fmt.Println(string(resp.Data))
return
}
var result struct {
Success bool `json:"success"`
Stats struct {
Tier1Candidates int `json:"tier1_candidates"`
Tier2Candidates int `json:"tier2_candidates"`
TotalClosed int `json:"total_closed"`
Tier1MinAge string `json:"tier1_min_age"`
Tier2MinAge string `json:"tier2_min_age"`
EstimatedSavings string `json:"estimated_savings,omitempty"`
} `json:"stats"`
}
if err := json.Unmarshal(resp.Data, &result); err != nil {
fmt.Fprintf(os.Stderr, "Error parsing response: %v\n", err)
os.Exit(1)
}
fmt.Printf("\nCompaction Statistics\n")
fmt.Printf("=====================\n\n")
fmt.Printf("Total closed issues: %d\n\n", result.Stats.TotalClosed)
fmt.Printf("Tier 1 (30+ days closed, not compacted):\n")
fmt.Printf(" Candidates: %d\n", result.Stats.Tier1Candidates)
fmt.Printf(" Min age: %s\n\n", result.Stats.Tier1MinAge)
fmt.Printf("Tier 2 (90+ days closed, Tier 1 compacted):\n")
fmt.Printf(" Candidates: %d\n", result.Stats.Tier2Candidates)
fmt.Printf(" Min age: %s\n", result.Stats.Tier2MinAge)
}
func runCompactAnalyze(ctx context.Context, store *sqlite.SQLiteStorage) {
type Candidate struct {
ID string `json:"id"`
@@ -929,251 +750,6 @@ func runCompactApply(ctx context.Context, store *sqlite.SQLiteStorage) {
markDirtyAndScheduleFlush()
}
// TombstonePruneResult contains the results of tombstone pruning
type TombstonePruneResult struct {
PrunedCount int
PrunedIDs []string
TTLDays int
}
// pruneExpiredTombstones reads issues.jsonl, removes expired tombstones,
// and writes back the pruned file. Returns the prune result.
// If customTTL is > 0, it overrides the default TTL (bypasses MinTombstoneTTL safety).
// If customTTL is 0, uses DefaultTombstoneTTL.
func pruneExpiredTombstones(customTTL time.Duration) (*TombstonePruneResult, error) {
beadsDir := filepath.Dir(dbPath)
issuesPath := filepath.Join(beadsDir, "issues.jsonl")
// Check if issues.jsonl exists
if _, err := os.Stat(issuesPath); os.IsNotExist(err) {
return &TombstonePruneResult{}, nil
}
// Read all issues
// nolint:gosec // G304: issuesPath is controlled from beadsDir
file, err := os.Open(issuesPath)
if err != nil {
return nil, fmt.Errorf("failed to open issues.jsonl: %w", err)
}
var allIssues []*types.Issue
decoder := json.NewDecoder(file)
for {
var issue types.Issue
if err := decoder.Decode(&issue); err != nil {
if err.Error() == "EOF" {
break
}
// Skip corrupt lines
continue
}
allIssues = append(allIssues, &issue)
}
if err := file.Close(); err != nil {
return nil, fmt.Errorf("failed to close issues file: %w", err)
}
// Determine TTL - customTTL > 0 overrides default (for --hard mode)
ttl := types.DefaultTombstoneTTL
if customTTL > 0 {
ttl = customTTL
}
ttlDays := int(ttl.Hours() / 24)
// Filter out expired tombstones
var kept []*types.Issue
var prunedIDs []string
for _, issue := range allIssues {
if issue.IsExpired(ttl) {
prunedIDs = append(prunedIDs, issue.ID)
} else {
kept = append(kept, issue)
}
}
if len(prunedIDs) == 0 {
return &TombstonePruneResult{TTLDays: ttlDays}, nil
}
// Write back the pruned file atomically
dir := filepath.Dir(issuesPath)
base := filepath.Base(issuesPath)
tempFile, err := os.CreateTemp(dir, base+".prune.*")
if err != nil {
return nil, fmt.Errorf("failed to create temp file: %w", err)
}
tempPath := tempFile.Name()
encoder := json.NewEncoder(tempFile)
for _, issue := range kept {
if err := encoder.Encode(issue); err != nil {
_ = tempFile.Close()
_ = os.Remove(tempPath)
return nil, fmt.Errorf("failed to write issue %s: %w", issue.ID, err)
}
}
if err := tempFile.Close(); err != nil {
_ = os.Remove(tempPath)
return nil, fmt.Errorf("failed to close temp file: %w", err)
}
// Atomically replace
if err := os.Rename(tempPath, issuesPath); err != nil {
_ = os.Remove(tempPath)
return nil, fmt.Errorf("failed to replace issues.jsonl: %w", err)
}
return &TombstonePruneResult{
PrunedCount: len(prunedIDs),
PrunedIDs: prunedIDs,
TTLDays: ttlDays,
}, nil
}
// previewPruneTombstones checks what tombstones would be pruned without modifying files.
// Used for dry-run mode in cleanup command.
// If customTTL is > 0, it overrides the default TTL (bypasses MinTombstoneTTL safety).
// If customTTL is 0, uses DefaultTombstoneTTL.
func previewPruneTombstones(customTTL time.Duration) (*TombstonePruneResult, error) {
beadsDir := filepath.Dir(dbPath)
issuesPath := filepath.Join(beadsDir, "issues.jsonl")
// Check if issues.jsonl exists
if _, err := os.Stat(issuesPath); os.IsNotExist(err) {
return &TombstonePruneResult{}, nil
}
// Read all issues
// nolint:gosec // G304: issuesPath is controlled from beadsDir
file, err := os.Open(issuesPath)
if err != nil {
return nil, fmt.Errorf("failed to open issues.jsonl: %w", err)
}
defer file.Close()
var allIssues []*types.Issue
decoder := json.NewDecoder(file)
for {
var issue types.Issue
if err := decoder.Decode(&issue); err != nil {
if err.Error() == "EOF" {
break
}
// Skip corrupt lines
continue
}
allIssues = append(allIssues, &issue)
}
// Determine TTL - customTTL > 0 overrides default (for --hard mode)
ttl := types.DefaultTombstoneTTL
if customTTL > 0 {
ttl = customTTL
}
ttlDays := int(ttl.Hours() / 24)
// Count expired tombstones
var prunedIDs []string
for _, issue := range allIssues {
if issue.IsExpired(ttl) {
prunedIDs = append(prunedIDs, issue.ID)
}
}
return &TombstonePruneResult{
PrunedCount: len(prunedIDs),
PrunedIDs: prunedIDs,
TTLDays: ttlDays,
}, nil
}
// runCompactPrune handles the --prune mode for standalone tombstone pruning.
// This mode only prunes expired tombstones from issues.jsonl without doing
// any semantic compaction. It's useful for reducing sync overhead.
func runCompactPrune() {
start := time.Now()
// Calculate TTL from --older-than flag (0 means use default 30 days)
var customTTL time.Duration
if compactOlderThan > 0 {
customTTL = time.Duration(compactOlderThan) * 24 * time.Hour
}
if compactDryRun {
// Preview mode - show what would be pruned
result, err := previewPruneTombstones(customTTL)
if err != nil {
fmt.Fprintf(os.Stderr, "Error: failed to preview tombstones: %v\n", err)
os.Exit(1)
}
if jsonOutput {
output := map[string]interface{}{
"dry_run": true,
"prune_count": result.PrunedCount,
"ttl_days": result.TTLDays,
"tombstone_ids": result.PrunedIDs,
}
outputJSON(output)
return
}
fmt.Printf("DRY RUN - Tombstone Pruning\n\n")
fmt.Printf("TTL: %d days\n", result.TTLDays)
fmt.Printf("Tombstones that would be pruned: %d\n", result.PrunedCount)
if len(result.PrunedIDs) > 0 && len(result.PrunedIDs) <= 20 {
fmt.Println("\nTombstone IDs:")
for _, id := range result.PrunedIDs {
fmt.Printf(" - %s\n", id)
}
} else if len(result.PrunedIDs) > 20 {
fmt.Printf("\nFirst 20 tombstone IDs:\n")
for _, id := range result.PrunedIDs[:20] {
fmt.Printf(" - %s\n", id)
}
fmt.Printf(" ... and %d more\n", len(result.PrunedIDs)-20)
}
return
}
// Actually prune tombstones
result, err := pruneExpiredTombstones(customTTL)
if err != nil {
fmt.Fprintf(os.Stderr, "Error: failed to prune tombstones: %v\n", err)
os.Exit(1)
}
elapsed := time.Since(start)
if jsonOutput {
output := map[string]interface{}{
"success": true,
"pruned_count": result.PrunedCount,
"ttl_days": result.TTLDays,
"tombstone_ids": result.PrunedIDs,
"elapsed_ms": elapsed.Milliseconds(),
}
outputJSON(output)
return
}
if result.PrunedCount == 0 {
fmt.Printf("No expired tombstones to prune (TTL: %d days)\n", result.TTLDays)
return
}
fmt.Printf("✓ Pruned %d expired tombstone(s)\n", result.PrunedCount)
fmt.Printf(" TTL: %d days\n", result.TTLDays)
fmt.Printf(" Time: %v\n", elapsed)
if len(result.PrunedIDs) <= 10 {
fmt.Println("\nPruned IDs:")
for _, id := range result.PrunedIDs {
fmt.Printf(" - %s\n", id)
}
}
}
func init() {
compactCmd.Flags().BoolVar(&compactDryRun, "dry-run", false, "Preview without compacting")
compactCmd.Flags().IntVar(&compactTier, "tier", 1, "Compaction tier (1 or 2)")

184
cmd/bd/compact_rpc.go Normal file
View File

@@ -0,0 +1,184 @@
package main
import (
"context"
"encoding/json"
"fmt"
"os"
)
func progressBar(current, total int) string {
const width = 40
if total == 0 {
return "[" + string(make([]byte, width)) + "]"
}
filled := (current * width) / total
bar := ""
for i := 0; i < width; i++ {
if i < filled {
bar += "█"
} else {
bar += " "
}
}
return "[" + bar + "]"
}
//nolint:unparam // ctx may be used in future for cancellation
func runCompactRPC(_ context.Context) {
if compactID != "" && compactAll {
fmt.Fprintf(os.Stderr, "Error: cannot use --id and --all together\n")
os.Exit(1)
}
if compactForce && compactID == "" {
fmt.Fprintf(os.Stderr, "Error: --force requires --id\n")
os.Exit(1)
}
if compactID == "" && !compactAll && !compactDryRun {
fmt.Fprintf(os.Stderr, "Error: must specify --all, --id, or --dry-run\n")
os.Exit(1)
}
apiKey := os.Getenv("ANTHROPIC_API_KEY")
if apiKey == "" && !compactDryRun {
fmt.Fprintf(os.Stderr, "Error: ANTHROPIC_API_KEY environment variable not set\n")
os.Exit(1)
}
args := map[string]interface{}{
"tier": compactTier,
"dry_run": compactDryRun,
"force": compactForce,
"all": compactAll,
"api_key": apiKey,
"workers": compactWorkers,
"batch_size": compactBatch,
}
if compactID != "" {
args["issue_id"] = compactID
}
resp, err := daemonClient.Execute("compact", args)
if err != nil {
fmt.Fprintf(os.Stderr, "Error: %v\n", err)
os.Exit(1)
}
if !resp.Success {
fmt.Fprintf(os.Stderr, "Error: %s\n", resp.Error)
os.Exit(1)
}
if jsonOutput {
fmt.Println(string(resp.Data))
return
}
var result struct {
Success bool `json:"success"`
IssueID string `json:"issue_id,omitempty"`
OriginalSize int `json:"original_size,omitempty"`
CompactedSize int `json:"compacted_size,omitempty"`
Reduction string `json:"reduction,omitempty"`
Duration string `json:"duration,omitempty"`
DryRun bool `json:"dry_run,omitempty"`
Results []struct {
IssueID string `json:"issue_id"`
Success bool `json:"success"`
Error string `json:"error,omitempty"`
OriginalSize int `json:"original_size,omitempty"`
CompactedSize int `json:"compacted_size,omitempty"`
Reduction string `json:"reduction,omitempty"`
} `json:"results,omitempty"`
}
if err := json.Unmarshal(resp.Data, &result); err != nil {
fmt.Fprintf(os.Stderr, "Error parsing response: %v\n", err)
os.Exit(1)
}
if compactID != "" {
if result.DryRun {
fmt.Printf("DRY RUN - Tier %d compaction\n\n", compactTier)
fmt.Printf("Issue: %s\n", compactID)
fmt.Printf("Original size: %d bytes\n", result.OriginalSize)
fmt.Printf("Estimated reduction: %s\n", result.Reduction)
} else {
fmt.Printf("Successfully compacted %s\n", result.IssueID)
fmt.Printf("Original size: %d bytes\n", result.OriginalSize)
fmt.Printf("Compacted size: %d bytes\n", result.CompactedSize)
fmt.Printf("Reduction: %s\n", result.Reduction)
fmt.Printf("Duration: %s\n", result.Duration)
}
} else if compactAll {
if result.DryRun {
fmt.Printf("DRY RUN - Found %d candidates for Tier %d compaction\n", len(result.Results), compactTier)
} else {
successCount := 0
for _, r := range result.Results {
if r.Success {
successCount++
}
}
fmt.Printf("Compacted %d/%d issues in %s\n", successCount, len(result.Results), result.Duration)
for _, r := range result.Results {
if r.Success {
fmt.Printf(" ✓ %s: %d → %d bytes (%s)\n", r.IssueID, r.OriginalSize, r.CompactedSize, r.Reduction)
} else {
fmt.Printf(" ✗ %s: %s\n", r.IssueID, r.Error)
}
}
}
}
}
func runCompactStatsRPC() {
args := map[string]interface{}{
"tier": compactTier,
}
resp, err := daemonClient.Execute("compact_stats", args)
if err != nil {
fmt.Fprintf(os.Stderr, "Error: %v\n", err)
os.Exit(1)
}
if !resp.Success {
fmt.Fprintf(os.Stderr, "Error: %s\n", resp.Error)
os.Exit(1)
}
if jsonOutput {
fmt.Println(string(resp.Data))
return
}
var result struct {
Success bool `json:"success"`
Stats struct {
Tier1Candidates int `json:"tier1_candidates"`
Tier2Candidates int `json:"tier2_candidates"`
TotalClosed int `json:"total_closed"`
Tier1MinAge string `json:"tier1_min_age"`
Tier2MinAge string `json:"tier2_min_age"`
EstimatedSavings string `json:"estimated_savings,omitempty"`
} `json:"stats"`
}
if err := json.Unmarshal(resp.Data, &result); err != nil {
fmt.Fprintf(os.Stderr, "Error parsing response: %v\n", err)
os.Exit(1)
}
fmt.Printf("\nCompaction Statistics\n")
fmt.Printf("=====================\n\n")
fmt.Printf("Total closed issues: %d\n\n", result.Stats.TotalClosed)
fmt.Printf("Tier 1 (30+ days closed, not compacted):\n")
fmt.Printf(" Candidates: %d\n", result.Stats.Tier1Candidates)
fmt.Printf(" Min age: %s\n\n", result.Stats.Tier1MinAge)
fmt.Printf("Tier 2 (90+ days closed, Tier 1 compacted):\n")
fmt.Printf(" Candidates: %d\n", result.Stats.Tier2Candidates)
fmt.Printf(" Min age: %s\n", result.Stats.Tier2MinAge)
}

256
cmd/bd/compact_tombstone.go Normal file
View File

@@ -0,0 +1,256 @@
package main
import (
"encoding/json"
"fmt"
"os"
"path/filepath"
"time"
"github.com/steveyegge/beads/internal/types"
)
// TombstonePruneResult contains the results of tombstone pruning
type TombstonePruneResult struct {
PrunedCount int
PrunedIDs []string
TTLDays int
}
// pruneExpiredTombstones reads issues.jsonl, removes expired tombstones,
// and writes back the pruned file. Returns the prune result.
// If customTTL is > 0, it overrides the default TTL (bypasses MinTombstoneTTL safety).
// If customTTL is 0, uses DefaultTombstoneTTL.
func pruneExpiredTombstones(customTTL time.Duration) (*TombstonePruneResult, error) {
beadsDir := filepath.Dir(dbPath)
issuesPath := filepath.Join(beadsDir, "issues.jsonl")
// Check if issues.jsonl exists
if _, err := os.Stat(issuesPath); os.IsNotExist(err) {
return &TombstonePruneResult{}, nil
}
// Read all issues
// nolint:gosec // G304: issuesPath is controlled from beadsDir
file, err := os.Open(issuesPath)
if err != nil {
return nil, fmt.Errorf("failed to open issues.jsonl: %w", err)
}
var allIssues []*types.Issue
decoder := json.NewDecoder(file)
for {
var issue types.Issue
if err := decoder.Decode(&issue); err != nil {
if err.Error() == "EOF" {
break
}
// Skip corrupt lines
continue
}
allIssues = append(allIssues, &issue)
}
if err := file.Close(); err != nil {
return nil, fmt.Errorf("failed to close issues file: %w", err)
}
// Determine TTL - customTTL > 0 overrides default (for --hard mode)
ttl := types.DefaultTombstoneTTL
if customTTL > 0 {
ttl = customTTL
}
ttlDays := int(ttl.Hours() / 24)
// Filter out expired tombstones
var kept []*types.Issue
var prunedIDs []string
for _, issue := range allIssues {
if issue.IsExpired(ttl) {
prunedIDs = append(prunedIDs, issue.ID)
} else {
kept = append(kept, issue)
}
}
if len(prunedIDs) == 0 {
return &TombstonePruneResult{TTLDays: ttlDays}, nil
}
// Write back the pruned file atomically
dir := filepath.Dir(issuesPath)
base := filepath.Base(issuesPath)
tempFile, err := os.CreateTemp(dir, base+".prune.*")
if err != nil {
return nil, fmt.Errorf("failed to create temp file: %w", err)
}
tempPath := tempFile.Name()
encoder := json.NewEncoder(tempFile)
for _, issue := range kept {
if err := encoder.Encode(issue); err != nil {
_ = tempFile.Close()
_ = os.Remove(tempPath)
return nil, fmt.Errorf("failed to write issue %s: %w", issue.ID, err)
}
}
if err := tempFile.Close(); err != nil {
_ = os.Remove(tempPath)
return nil, fmt.Errorf("failed to close temp file: %w", err)
}
// Atomically replace
if err := os.Rename(tempPath, issuesPath); err != nil {
_ = os.Remove(tempPath)
return nil, fmt.Errorf("failed to replace issues.jsonl: %w", err)
}
return &TombstonePruneResult{
PrunedCount: len(prunedIDs),
PrunedIDs: prunedIDs,
TTLDays: ttlDays,
}, nil
}
// previewPruneTombstones checks what tombstones would be pruned without modifying files.
// Used for dry-run mode in cleanup command.
// If customTTL is > 0, it overrides the default TTL (bypasses MinTombstoneTTL safety).
// If customTTL is 0, uses DefaultTombstoneTTL.
func previewPruneTombstones(customTTL time.Duration) (*TombstonePruneResult, error) {
beadsDir := filepath.Dir(dbPath)
issuesPath := filepath.Join(beadsDir, "issues.jsonl")
// Check if issues.jsonl exists
if _, err := os.Stat(issuesPath); os.IsNotExist(err) {
return &TombstonePruneResult{}, nil
}
// Read all issues
// nolint:gosec // G304: issuesPath is controlled from beadsDir
file, err := os.Open(issuesPath)
if err != nil {
return nil, fmt.Errorf("failed to open issues.jsonl: %w", err)
}
defer file.Close()
var allIssues []*types.Issue
decoder := json.NewDecoder(file)
for {
var issue types.Issue
if err := decoder.Decode(&issue); err != nil {
if err.Error() == "EOF" {
break
}
// Skip corrupt lines
continue
}
allIssues = append(allIssues, &issue)
}
// Determine TTL - customTTL > 0 overrides default (for --hard mode)
ttl := types.DefaultTombstoneTTL
if customTTL > 0 {
ttl = customTTL
}
ttlDays := int(ttl.Hours() / 24)
// Count expired tombstones
var prunedIDs []string
for _, issue := range allIssues {
if issue.IsExpired(ttl) {
prunedIDs = append(prunedIDs, issue.ID)
}
}
return &TombstonePruneResult{
PrunedCount: len(prunedIDs),
PrunedIDs: prunedIDs,
TTLDays: ttlDays,
}, nil
}
// runCompactPrune handles the --prune mode for standalone tombstone pruning.
// This mode only prunes expired tombstones from issues.jsonl without doing
// any semantic compaction. It's useful for reducing sync overhead.
func runCompactPrune() {
start := time.Now()
// Calculate TTL from --older-than flag (0 means use default 30 days)
var customTTL time.Duration
if compactOlderThan > 0 {
customTTL = time.Duration(compactOlderThan) * 24 * time.Hour
}
if compactDryRun {
// Preview mode - show what would be pruned
result, err := previewPruneTombstones(customTTL)
if err != nil {
fmt.Fprintf(os.Stderr, "Error: failed to preview tombstones: %v\n", err)
os.Exit(1)
}
if jsonOutput {
output := map[string]interface{}{
"dry_run": true,
"prune_count": result.PrunedCount,
"ttl_days": result.TTLDays,
"tombstone_ids": result.PrunedIDs,
}
outputJSON(output)
return
}
fmt.Printf("DRY RUN - Tombstone Pruning\n\n")
fmt.Printf("TTL: %d days\n", result.TTLDays)
fmt.Printf("Tombstones that would be pruned: %d\n", result.PrunedCount)
if len(result.PrunedIDs) > 0 && len(result.PrunedIDs) <= 20 {
fmt.Println("\nTombstone IDs:")
for _, id := range result.PrunedIDs {
fmt.Printf(" - %s\n", id)
}
} else if len(result.PrunedIDs) > 20 {
fmt.Printf("\nFirst 20 tombstone IDs:\n")
for _, id := range result.PrunedIDs[:20] {
fmt.Printf(" - %s\n", id)
}
fmt.Printf(" ... and %d more\n", len(result.PrunedIDs)-20)
}
return
}
// Actually prune tombstones
result, err := pruneExpiredTombstones(customTTL)
if err != nil {
fmt.Fprintf(os.Stderr, "Error: failed to prune tombstones: %v\n", err)
os.Exit(1)
}
elapsed := time.Since(start)
if jsonOutput {
output := map[string]interface{}{
"success": true,
"pruned_count": result.PrunedCount,
"ttl_days": result.TTLDays,
"tombstone_ids": result.PrunedIDs,
"elapsed_ms": elapsed.Milliseconds(),
}
outputJSON(output)
return
}
if result.PrunedCount == 0 {
fmt.Printf("No expired tombstones to prune (TTL: %d days)\n", result.TTLDays)
return
}
fmt.Printf("✓ Pruned %d expired tombstone(s)\n", result.PrunedCount)
fmt.Printf(" TTL: %d days\n", result.TTLDays)
fmt.Printf(" Time: %v\n", elapsed)
if len(result.PrunedIDs) <= 10 {
fmt.Println("\nPruned IDs:")
for _, id := range result.PrunedIDs {
fmt.Printf(" - %s\n", id)
}
}
}

View File

@@ -1,25 +1,17 @@
package main
import (
"bufio"
"database/sql"
"encoding/json"
"fmt"
"os"
"path/filepath"
"slices"
"strings"
"time"
_ "github.com/ncruces/go-sqlite3/driver"
_ "github.com/ncruces/go-sqlite3/embed"
"github.com/spf13/cobra"
"github.com/steveyegge/beads/cmd/bd/doctor"
"github.com/steveyegge/beads/cmd/bd/doctor/fix"
"github.com/steveyegge/beads/internal/beads"
"github.com/steveyegge/beads/internal/configfile"
"github.com/steveyegge/beads/internal/syncbranch"
"github.com/steveyegge/beads/internal/types"
"github.com/steveyegge/beads/internal/ui"
)
@@ -227,460 +219,6 @@ func init() {
doctorCmd.Flags().BoolVar(&doctorFixChildParent, "fix-child-parent", false, "Remove child→parent dependencies (opt-in)")
}
// previewFixes shows what would be fixed without applying changes
func previewFixes(result doctorResult) {
// Collect all fixable issues
var fixableIssues []doctorCheck
for _, check := range result.Checks {
if (check.Status == statusWarning || check.Status == statusError) && check.Fix != "" {
fixableIssues = append(fixableIssues, check)
}
}
if len(fixableIssues) == 0 {
fmt.Println("\n✓ No fixable issues found (dry-run)")
return
}
fmt.Println("\n[DRY-RUN] The following issues would be fixed with --fix:")
fmt.Println()
for i, issue := range fixableIssues {
// Show the issue details
fmt.Printf(" %d. %s\n", i+1, issue.Name)
if issue.Status == statusError {
fmt.Printf(" Status: %s\n", ui.RenderFail("ERROR"))
} else {
fmt.Printf(" Status: %s\n", ui.RenderWarn("WARNING"))
}
fmt.Printf(" Issue: %s\n", issue.Message)
if issue.Detail != "" {
fmt.Printf(" Detail: %s\n", issue.Detail)
}
fmt.Printf(" Fix: %s\n", issue.Fix)
fmt.Println()
}
fmt.Printf("[DRY-RUN] Would attempt to fix %d issue(s)\n", len(fixableIssues))
fmt.Println("Run 'bd doctor --fix' to apply these fixes")
}
func applyFixes(result doctorResult) {
// Collect all fixable issues
var fixableIssues []doctorCheck
for _, check := range result.Checks {
if (check.Status == statusWarning || check.Status == statusError) && check.Fix != "" {
fixableIssues = append(fixableIssues, check)
}
}
if len(fixableIssues) == 0 {
fmt.Println("\nNo fixable issues found.")
return
}
// Show what will be fixed
fmt.Println("\nFixable issues:")
for i, issue := range fixableIssues {
fmt.Printf(" %d. %s: %s\n", i+1, issue.Name, issue.Message)
}
// Interactive mode - confirm each fix individually
if doctorInteractive {
applyFixesInteractive(result.Path, fixableIssues)
return
}
// Ask for confirmation (skip if --yes flag is set)
if !doctorYes {
fmt.Printf("\nThis will attempt to fix %d issue(s). Continue? (Y/n): ", len(fixableIssues))
reader := bufio.NewReader(os.Stdin)
response, err := reader.ReadString('\n')
if err != nil {
fmt.Fprintf(os.Stderr, "Error reading input: %v\n", err)
return
}
response = strings.TrimSpace(strings.ToLower(response))
if response != "" && response != "y" && response != "yes" {
fmt.Println("Fix canceled.")
return
}
}
// Apply fixes
fmt.Println("\nApplying fixes...")
applyFixList(result.Path, fixableIssues)
}
// applyFixesInteractive prompts for each fix individually
func applyFixesInteractive(path string, issues []doctorCheck) {
reader := bufio.NewReader(os.Stdin)
applyAll := false
var approvedFixes []doctorCheck
fmt.Println("\nReview each fix:")
fmt.Println(" [y]es - apply this fix")
fmt.Println(" [n]o - skip this fix")
fmt.Println(" [a]ll - apply all remaining fixes")
fmt.Println(" [q]uit - stop without applying more fixes")
fmt.Println()
for i, issue := range issues {
// Show issue details
fmt.Printf("(%d/%d) %s\n", i+1, len(issues), issue.Name)
if issue.Status == statusError {
fmt.Printf(" Status: %s\n", ui.RenderFail("ERROR"))
} else {
fmt.Printf(" Status: %s\n", ui.RenderWarn("WARNING"))
}
fmt.Printf(" Issue: %s\n", issue.Message)
if issue.Detail != "" {
fmt.Printf(" Detail: %s\n", issue.Detail)
}
fmt.Printf(" Fix: %s\n", issue.Fix)
// Check if we should apply all remaining
if applyAll {
fmt.Println(" → Auto-approved (apply all)")
approvedFixes = append(approvedFixes, issue)
continue
}
// Prompt for this fix
fmt.Print("\n Apply this fix? [y/n/a/q]: ")
response, err := reader.ReadString('\n')
if err != nil {
fmt.Fprintf(os.Stderr, "Error reading input: %v\n", err)
return
}
response = strings.TrimSpace(strings.ToLower(response))
switch response {
case "y", "yes":
approvedFixes = append(approvedFixes, issue)
fmt.Println(" → Approved")
case "n", "no", "":
fmt.Println(" → Skipped")
case "a", "all":
applyAll = true
approvedFixes = append(approvedFixes, issue)
fmt.Println(" → Approved (applying all remaining)")
case "q", "quit":
fmt.Println(" → Quit")
if len(approvedFixes) > 0 {
fmt.Printf("\nApplying %d approved fix(es)...\n", len(approvedFixes))
applyFixList(path, approvedFixes)
} else {
fmt.Println("\nNo fixes applied.")
}
return
default:
// Treat unknown input as skip
fmt.Println(" → Skipped (unrecognized input)")
}
fmt.Println()
}
// Apply all approved fixes
if len(approvedFixes) > 0 {
fmt.Printf("\nApplying %d approved fix(es)...\n", len(approvedFixes))
applyFixList(path, approvedFixes)
} else {
fmt.Println("\nNo fixes approved.")
}
}
// applyFixList applies a list of fixes and reports results
func applyFixList(path string, fixes []doctorCheck) {
// Apply fixes in a dependency-aware order.
// Rough dependency chain:
// permissions/daemon cleanup → config sanity → DB integrity/migrations → DB↔JSONL sync.
order := []string{
"Permissions",
"Daemon Health",
"Database Config",
"JSONL Config",
"Database Integrity",
"Database",
"Schema Compatibility",
"JSONL Integrity",
"DB-JSONL Sync",
}
priority := make(map[string]int, len(order))
for i, name := range order {
priority[name] = i
}
slices.SortStableFunc(fixes, func(a, b doctorCheck) int {
pa, oka := priority[a.Name]
if !oka {
pa = 1000
}
pb, okb := priority[b.Name]
if !okb {
pb = 1000
}
if pa < pb {
return -1
}
if pa > pb {
return 1
}
return 0
})
fixedCount := 0
errorCount := 0
for _, check := range fixes {
fmt.Printf("\nFixing %s...\n", check.Name)
var err error
switch check.Name {
case "Gitignore":
err = doctor.FixGitignore()
case "Git Hooks":
err = fix.GitHooks(path)
case "Daemon Health":
err = fix.Daemon(path)
case "DB-JSONL Sync":
err = fix.DBJSONLSync(path)
case "Permissions":
err = fix.Permissions(path)
case "Database":
err = fix.DatabaseVersion(path)
case "Database Integrity":
// Corruption detected - try recovery from JSONL
err = fix.DatabaseCorruptionRecovery(path)
case "Schema Compatibility":
err = fix.SchemaCompatibility(path)
case "Repo Fingerprint":
err = fix.RepoFingerprint(path)
case "Git Merge Driver":
err = fix.MergeDriver(path)
case "Sync Branch Config":
// No auto-fix: sync-branch should be added to config.yaml (version controlled)
fmt.Printf(" ⚠ Add 'sync-branch: beads-sync' to .beads/config.yaml\n")
continue
case "Database Config":
err = fix.DatabaseConfig(path)
case "JSONL Config":
err = fix.LegacyJSONLConfig(path)
case "JSONL Integrity":
err = fix.JSONLIntegrity(path)
case "Deletions Manifest":
err = fix.MigrateTombstones(path)
case "Untracked Files":
err = fix.UntrackedJSONL(path)
case "Sync Branch Health":
// Get sync branch from config
syncBranch := syncbranch.GetFromYAML()
if syncBranch == "" {
fmt.Printf(" ⚠ No sync branch configured in config.yaml\n")
continue
}
err = fix.SyncBranchHealth(path, syncBranch)
case "Merge Artifacts":
err = fix.MergeArtifacts(path)
case "Orphaned Dependencies":
err = fix.OrphanedDependencies(path)
case "Child-Parent Dependencies":
// Requires explicit opt-in flag (destructive, may remove intentional deps)
if !doctorFixChildParent {
fmt.Printf(" ⚠ Child→parent deps require explicit opt-in: bd doctor --fix --fix-child-parent\n")
continue
}
err = fix.ChildParentDependencies(path)
case "Duplicate Issues":
// No auto-fix: duplicates require user review
fmt.Printf(" ⚠ Run 'bd duplicates' to review and merge duplicates\n")
continue
case "Test Pollution":
// No auto-fix: test cleanup requires user review
fmt.Printf(" ⚠ Run 'bd doctor --check=pollution' to review and clean test issues\n")
continue
case "Git Conflicts":
// No auto-fix: git conflicts require manual resolution
fmt.Printf(" ⚠ Resolve conflicts manually: git checkout --ours or --theirs .beads/issues.jsonl\n")
continue
case "Stale Closed Issues":
// consolidate cleanup into doctor --fix
err = fix.StaleClosedIssues(path)
case "Expired Tombstones":
// consolidate cleanup into doctor --fix
err = fix.ExpiredTombstones(path)
case "Compaction Candidates":
// No auto-fix: compaction requires agent review
fmt.Printf(" ⚠ Run 'bd compact --analyze' to review candidates\n")
continue
case "Large Database":
// No auto-fix: pruning deletes data, must be user-controlled
fmt.Printf(" ⚠ Run 'bd cleanup --older-than 90' to prune old closed issues\n")
continue
default:
fmt.Printf(" ⚠ No automatic fix available for %s\n", check.Name)
fmt.Printf(" Manual fix: %s\n", check.Fix)
continue
}
if err != nil {
errorCount++
fmt.Printf(" %s Error: %v\n", ui.RenderFail("✗"), err)
fmt.Printf(" Manual fix: %s\n", check.Fix)
} else {
fixedCount++
fmt.Printf(" %s Fixed\n", ui.RenderPass("✓"))
}
}
// Summary
fmt.Printf("\nFix summary: %d fixed, %d errors\n", fixedCount, errorCount)
if errorCount > 0 {
fmt.Println("\nSome fixes failed. Please review the errors above and apply manual fixes as needed.")
}
}
// runCheckHealth runs lightweight health checks for git hooks.
// Silent on success, prints a hint if issues detected.
// Respects hints.doctor config setting.
func runCheckHealth(path string) {
beadsDir := filepath.Join(path, ".beads")
// Check if .beads/ exists
if _, err := os.Stat(beadsDir); os.IsNotExist(err) {
// No .beads directory - nothing to check
return
}
// Get database path once (centralized path resolution)
dbPath := getCheckHealthDBPath(beadsDir)
// Check if database exists
if _, err := os.Stat(dbPath); os.IsNotExist(err) {
// No database - only check hooks
if issue := doctor.CheckHooksQuick(Version); issue != "" {
printCheckHealthHint([]string{issue})
}
return
}
// Open database once for all checks (single DB connection)
db, err := sql.Open("sqlite3", "file:"+dbPath+"?mode=ro")
if err != nil {
// Can't open DB - only check hooks
if issue := doctor.CheckHooksQuick(Version); issue != "" {
printCheckHealthHint([]string{issue})
}
return
}
defer db.Close()
// Check if hints.doctor is disabled in config
if hintsDisabledDB(db) {
return
}
// Run lightweight checks
var issues []string
// Check 1: Database version mismatch (CLI vs database bd_version)
if issue := checkVersionMismatchDB(db); issue != "" {
issues = append(issues, issue)
}
// Check 2: Sync branch not configured (now reads from config.yaml, not DB)
if issue := doctor.CheckSyncBranchQuick(); issue != "" {
issues = append(issues, issue)
}
// Check 3: Outdated git hooks
if issue := doctor.CheckHooksQuick(Version); issue != "" {
issues = append(issues, issue)
}
// Check 3: Sync-branch hook compatibility (issue #532)
if issue := doctor.CheckSyncBranchHookQuick(path); issue != "" {
issues = append(issues, issue)
}
// If any issues found, print hint
if len(issues) > 0 {
printCheckHealthHint(issues)
}
// Silent exit on success
}
// runDeepValidation runs full graph integrity validation
func runDeepValidation(path string) {
// Show warning about potential slowness
fmt.Println("Running deep validation (may be slow on large databases)...")
fmt.Println()
result := doctor.RunDeepValidation(path)
if jsonOutput {
jsonBytes, err := doctor.DeepValidationResultJSON(result)
if err != nil {
fmt.Fprintf(os.Stderr, "Error: %v\n", err)
os.Exit(1)
}
fmt.Println(string(jsonBytes))
} else {
doctor.PrintDeepValidationResult(result)
}
if !result.OverallOK {
os.Exit(1)
}
}
// printCheckHealthHint prints the health check hint and exits with error.
func printCheckHealthHint(issues []string) {
fmt.Fprintf(os.Stderr, "💡 bd doctor recommends a health check:\n")
for _, issue := range issues {
fmt.Fprintf(os.Stderr, " • %s\n", issue)
}
fmt.Fprintf(os.Stderr, " Run 'bd doctor' for details, or 'bd doctor --fix' to auto-repair\n")
fmt.Fprintf(os.Stderr, " (Suppress with: bd config set %s false)\n", ConfigKeyHintsDoctor)
os.Exit(1)
}
// getCheckHealthDBPath returns the database path for check-health operations.
// This centralizes the path resolution logic.
func getCheckHealthDBPath(beadsDir string) string {
if cfg, err := configfile.Load(beadsDir); err == nil && cfg != nil && cfg.Database != "" {
return cfg.DatabasePath(beadsDir)
}
return filepath.Join(beadsDir, beads.CanonicalDatabaseName)
}
// hintsDisabledDB checks if hints.doctor is set to "false" using an existing DB connection.
// Used by runCheckHealth to avoid multiple DB opens.
func hintsDisabledDB(db *sql.DB) bool {
var value string
err := db.QueryRow("SELECT value FROM config WHERE key = ?", ConfigKeyHintsDoctor).Scan(&value)
if err != nil {
return false // Key not set, assume hints enabled
}
return strings.ToLower(value) == "false"
}
// checkVersionMismatchDB checks if CLI version differs from database bd_version.
// Uses an existing DB connection.
func checkVersionMismatchDB(db *sql.DB) string {
var dbVersion string
err := db.QueryRow("SELECT value FROM metadata WHERE key = 'bd_version'").Scan(&dbVersion)
if err != nil {
return "" // Can't read version, skip
}
if dbVersion != "" && dbVersion != Version {
return fmt.Sprintf("Version mismatch (CLI: %s, database: %s)", Version, dbVersion)
}
return ""
}
func runDiagnostics(path string) doctorResult {
result := doctorResult{
Path: path,
@@ -1150,146 +688,3 @@ func printDiagnostics(result doctorResult) {
}
}
// runPollutionCheck runs detailed test pollution detection
// This integrates the detect-pollution command functionality into doctor.
func runPollutionCheck(path string, clean bool, yes bool) {
// Ensure we have a store initialized (uses direct mode, no daemon support yet)
if err := ensureDirectMode("pollution check requires direct mode"); err != nil {
fmt.Fprintf(os.Stderr, "Error: %v\n", err)
os.Exit(1)
}
ctx := rootCtx
// Get all issues
allIssues, err := store.SearchIssues(ctx, "", types.IssueFilter{})
if err != nil {
fmt.Fprintf(os.Stderr, "Error fetching issues: %v\n", err)
os.Exit(1)
}
// Detect pollution (reuse detectTestPollution from detect_pollution.go)
polluted := detectTestPollution(allIssues)
if len(polluted) == 0 {
if !jsonOutput {
fmt.Println("No test pollution detected!")
} else {
outputJSON(map[string]interface{}{
"polluted_count": 0,
"issues": []interface{}{},
})
}
return
}
// Categorize by confidence
highConfidence := []pollutionResult{}
mediumConfidence := []pollutionResult{}
for _, p := range polluted {
if p.score >= 0.9 {
highConfidence = append(highConfidence, p)
} else {
mediumConfidence = append(mediumConfidence, p)
}
}
if jsonOutput {
result := map[string]interface{}{
"polluted_count": len(polluted),
"high_confidence": len(highConfidence),
"medium_confidence": len(mediumConfidence),
"issues": []map[string]interface{}{},
}
for _, p := range polluted {
result["issues"] = append(result["issues"].([]map[string]interface{}), map[string]interface{}{
"id": p.issue.ID,
"title": p.issue.Title,
"score": p.score,
"reasons": p.reasons,
"created_at": p.issue.CreatedAt,
})
}
outputJSON(result)
return
}
// Human-readable output
fmt.Printf("Found %d potential test issues:\n\n", len(polluted))
if len(highConfidence) > 0 {
fmt.Printf("High Confidence (score ≥ 0.9):\n")
for _, p := range highConfidence {
fmt.Printf(" %s: %q (score: %.2f)\n", p.issue.ID, p.issue.Title, p.score)
for _, reason := range p.reasons {
fmt.Printf(" - %s\n", reason)
}
}
fmt.Printf(" (Total: %d issues)\n\n", len(highConfidence))
}
if len(mediumConfidence) > 0 {
fmt.Printf("Medium Confidence (score 0.7-0.9):\n")
for _, p := range mediumConfidence {
fmt.Printf(" %s: %q (score: %.2f)\n", p.issue.ID, p.issue.Title, p.score)
for _, reason := range p.reasons {
fmt.Printf(" - %s\n", reason)
}
}
fmt.Printf(" (Total: %d issues)\n\n", len(mediumConfidence))
}
if !clean {
fmt.Printf("Run 'bd doctor --check=pollution --clean' to delete these issues (with confirmation).\n")
return
}
// Confirmation prompt
if !yes {
fmt.Printf("\nDelete %d test issues? [y/N] ", len(polluted))
var response string
_, _ = fmt.Scanln(&response)
if strings.ToLower(response) != "y" {
fmt.Println("Canceled.")
return
}
}
// Backup to JSONL before deleting
backupPath := ".beads/pollution-backup.jsonl"
if err := backupPollutedIssues(polluted, backupPath); err != nil {
fmt.Fprintf(os.Stderr, "Error backing up issues: %v\n", err)
os.Exit(1)
}
fmt.Printf("Backed up %d issues to %s\n", len(polluted), backupPath)
// Delete issues
fmt.Printf("\nDeleting %d issues...\n", len(polluted))
deleted := 0
for _, p := range polluted {
if err := deleteIssue(ctx, p.issue.ID); err != nil {
fmt.Fprintf(os.Stderr, "Error deleting %s: %v\n", p.issue.ID, err)
continue
}
deleted++
}
// Schedule auto-flush
markDirtyAndScheduleFlush()
fmt.Printf("%s Deleted %d test issues\n", ui.RenderPass("✓"), deleted)
fmt.Printf("\nCleanup complete. To restore, run: bd import %s\n", backupPath)
}
func init() {
rootCmd.AddCommand(doctorCmd)
doctorCmd.Flags().BoolVar(&perfMode, "perf", false, "Run performance diagnostics and generate CPU profile")
doctorCmd.Flags().BoolVar(&checkHealthMode, "check-health", false, "Quick health check for git hooks (silent on success)")
doctorCmd.Flags().StringVarP(&doctorOutput, "output", "o", "", "Export diagnostics to JSON file")
doctorCmd.Flags().StringVar(&doctorCheckFlag, "check", "", "Run specific check in detail (e.g., 'pollution')")
doctorCmd.Flags().BoolVar(&doctorClean, "clean", false, "For pollution check: delete detected test issues")
doctorCmd.Flags().BoolVar(&doctorDeep, "deep", false, "Validate full graph integrity")
}

327
cmd/bd/doctor_fix.go Normal file
View File

@@ -0,0 +1,327 @@
package main
import (
"bufio"
"fmt"
"os"
"slices"
"strings"
"github.com/steveyegge/beads/cmd/bd/doctor"
"github.com/steveyegge/beads/cmd/bd/doctor/fix"
"github.com/steveyegge/beads/internal/syncbranch"
"github.com/steveyegge/beads/internal/ui"
)
// previewFixes shows what would be fixed without applying changes
func previewFixes(result doctorResult) {
// Collect all fixable issues
var fixableIssues []doctorCheck
for _, check := range result.Checks {
if (check.Status == statusWarning || check.Status == statusError) && check.Fix != "" {
fixableIssues = append(fixableIssues, check)
}
}
if len(fixableIssues) == 0 {
fmt.Println("\n✓ No fixable issues found (dry-run)")
return
}
fmt.Println("\n[DRY-RUN] The following issues would be fixed with --fix:")
fmt.Println()
for i, issue := range fixableIssues {
// Show the issue details
fmt.Printf(" %d. %s\n", i+1, issue.Name)
if issue.Status == statusError {
fmt.Printf(" Status: %s\n", ui.RenderFail("ERROR"))
} else {
fmt.Printf(" Status: %s\n", ui.RenderWarn("WARNING"))
}
fmt.Printf(" Issue: %s\n", issue.Message)
if issue.Detail != "" {
fmt.Printf(" Detail: %s\n", issue.Detail)
}
fmt.Printf(" Fix: %s\n", issue.Fix)
fmt.Println()
}
fmt.Printf("[DRY-RUN] Would attempt to fix %d issue(s)\n", len(fixableIssues))
fmt.Println("Run 'bd doctor --fix' to apply these fixes")
}
func applyFixes(result doctorResult) {
// Collect all fixable issues
var fixableIssues []doctorCheck
for _, check := range result.Checks {
if (check.Status == statusWarning || check.Status == statusError) && check.Fix != "" {
fixableIssues = append(fixableIssues, check)
}
}
if len(fixableIssues) == 0 {
fmt.Println("\nNo fixable issues found.")
return
}
// Show what will be fixed
fmt.Println("\nFixable issues:")
for i, issue := range fixableIssues {
fmt.Printf(" %d. %s: %s\n", i+1, issue.Name, issue.Message)
}
// Interactive mode - confirm each fix individually
if doctorInteractive {
applyFixesInteractive(result.Path, fixableIssues)
return
}
// Ask for confirmation (skip if --yes flag is set)
if !doctorYes {
fmt.Printf("\nThis will attempt to fix %d issue(s). Continue? (Y/n): ", len(fixableIssues))
reader := bufio.NewReader(os.Stdin)
response, err := reader.ReadString('\n')
if err != nil {
fmt.Fprintf(os.Stderr, "Error reading input: %v\n", err)
return
}
response = strings.TrimSpace(strings.ToLower(response))
if response != "" && response != "y" && response != "yes" {
fmt.Println("Fix canceled.")
return
}
}
// Apply fixes
fmt.Println("\nApplying fixes...")
applyFixList(result.Path, fixableIssues)
}
// applyFixesInteractive prompts for each fix individually
func applyFixesInteractive(path string, issues []doctorCheck) {
reader := bufio.NewReader(os.Stdin)
applyAll := false
var approvedFixes []doctorCheck
fmt.Println("\nReview each fix:")
fmt.Println(" [y]es - apply this fix")
fmt.Println(" [n]o - skip this fix")
fmt.Println(" [a]ll - apply all remaining fixes")
fmt.Println(" [q]uit - stop without applying more fixes")
fmt.Println()
for i, issue := range issues {
// Show issue details
fmt.Printf("(%d/%d) %s\n", i+1, len(issues), issue.Name)
if issue.Status == statusError {
fmt.Printf(" Status: %s\n", ui.RenderFail("ERROR"))
} else {
fmt.Printf(" Status: %s\n", ui.RenderWarn("WARNING"))
}
fmt.Printf(" Issue: %s\n", issue.Message)
if issue.Detail != "" {
fmt.Printf(" Detail: %s\n", issue.Detail)
}
fmt.Printf(" Fix: %s\n", issue.Fix)
// Check if we should apply all remaining
if applyAll {
fmt.Println(" → Auto-approved (apply all)")
approvedFixes = append(approvedFixes, issue)
continue
}
// Prompt for this fix
fmt.Print("\n Apply this fix? [y/n/a/q]: ")
response, err := reader.ReadString('\n')
if err != nil {
fmt.Fprintf(os.Stderr, "Error reading input: %v\n", err)
return
}
response = strings.TrimSpace(strings.ToLower(response))
switch response {
case "y", "yes":
approvedFixes = append(approvedFixes, issue)
fmt.Println(" → Approved")
case "n", "no", "":
fmt.Println(" → Skipped")
case "a", "all":
applyAll = true
approvedFixes = append(approvedFixes, issue)
fmt.Println(" → Approved (applying all remaining)")
case "q", "quit":
fmt.Println(" → Quit")
if len(approvedFixes) > 0 {
fmt.Printf("\nApplying %d approved fix(es)...\n", len(approvedFixes))
applyFixList(path, approvedFixes)
} else {
fmt.Println("\nNo fixes applied.")
}
return
default:
// Treat unknown input as skip
fmt.Println(" → Skipped (unrecognized input)")
}
fmt.Println()
}
// Apply all approved fixes
if len(approvedFixes) > 0 {
fmt.Printf("\nApplying %d approved fix(es)...\n", len(approvedFixes))
applyFixList(path, approvedFixes)
} else {
fmt.Println("\nNo fixes approved.")
}
}
// applyFixList applies a list of fixes and reports results
func applyFixList(path string, fixes []doctorCheck) {
// Apply fixes in a dependency-aware order.
// Rough dependency chain:
// permissions/daemon cleanup → config sanity → DB integrity/migrations → DB↔JSONL sync.
order := []string{
"Permissions",
"Daemon Health",
"Database Config",
"JSONL Config",
"Database Integrity",
"Database",
"Schema Compatibility",
"JSONL Integrity",
"DB-JSONL Sync",
}
priority := make(map[string]int, len(order))
for i, name := range order {
priority[name] = i
}
slices.SortStableFunc(fixes, func(a, b doctorCheck) int {
pa, oka := priority[a.Name]
if !oka {
pa = 1000
}
pb, okb := priority[b.Name]
if !okb {
pb = 1000
}
if pa < pb {
return -1
}
if pa > pb {
return 1
}
return 0
})
fixedCount := 0
errorCount := 0
for _, check := range fixes {
fmt.Printf("\nFixing %s...\n", check.Name)
var err error
switch check.Name {
case "Gitignore":
err = doctor.FixGitignore()
case "Git Hooks":
err = fix.GitHooks(path)
case "Daemon Health":
err = fix.Daemon(path)
case "DB-JSONL Sync":
err = fix.DBJSONLSync(path)
case "Permissions":
err = fix.Permissions(path)
case "Database":
err = fix.DatabaseVersion(path)
case "Database Integrity":
// Corruption detected - try recovery from JSONL
err = fix.DatabaseCorruptionRecovery(path)
case "Schema Compatibility":
err = fix.SchemaCompatibility(path)
case "Repo Fingerprint":
err = fix.RepoFingerprint(path)
case "Git Merge Driver":
err = fix.MergeDriver(path)
case "Sync Branch Config":
// No auto-fix: sync-branch should be added to config.yaml (version controlled)
fmt.Printf(" ⚠ Add 'sync-branch: beads-sync' to .beads/config.yaml\n")
continue
case "Database Config":
err = fix.DatabaseConfig(path)
case "JSONL Config":
err = fix.LegacyJSONLConfig(path)
case "JSONL Integrity":
err = fix.JSONLIntegrity(path)
case "Deletions Manifest":
err = fix.MigrateTombstones(path)
case "Untracked Files":
err = fix.UntrackedJSONL(path)
case "Sync Branch Health":
// Get sync branch from config
syncBranch := syncbranch.GetFromYAML()
if syncBranch == "" {
fmt.Printf(" ⚠ No sync branch configured in config.yaml\n")
continue
}
err = fix.SyncBranchHealth(path, syncBranch)
case "Merge Artifacts":
err = fix.MergeArtifacts(path)
case "Orphaned Dependencies":
err = fix.OrphanedDependencies(path)
case "Child-Parent Dependencies":
// Requires explicit opt-in flag (destructive, may remove intentional deps)
if !doctorFixChildParent {
fmt.Printf(" ⚠ Child→parent deps require explicit opt-in: bd doctor --fix --fix-child-parent\n")
continue
}
err = fix.ChildParentDependencies(path)
case "Duplicate Issues":
// No auto-fix: duplicates require user review
fmt.Printf(" ⚠ Run 'bd duplicates' to review and merge duplicates\n")
continue
case "Test Pollution":
// No auto-fix: test cleanup requires user review
fmt.Printf(" ⚠ Run 'bd doctor --check=pollution' to review and clean test issues\n")
continue
case "Git Conflicts":
// No auto-fix: git conflicts require manual resolution
fmt.Printf(" ⚠ Resolve conflicts manually: git checkout --ours or --theirs .beads/issues.jsonl\n")
continue
case "Stale Closed Issues":
// consolidate cleanup into doctor --fix
err = fix.StaleClosedIssues(path)
case "Expired Tombstones":
// consolidate cleanup into doctor --fix
err = fix.ExpiredTombstones(path)
case "Compaction Candidates":
// No auto-fix: compaction requires agent review
fmt.Printf(" ⚠ Run 'bd compact --analyze' to review candidates\n")
continue
case "Large Database":
// No auto-fix: pruning deletes data, must be user-controlled
fmt.Printf(" ⚠ Run 'bd cleanup --older-than 90' to prune old closed issues\n")
continue
default:
fmt.Printf(" ⚠ No automatic fix available for %s\n", check.Name)
fmt.Printf(" Manual fix: %s\n", check.Fix)
continue
}
if err != nil {
errorCount++
fmt.Printf(" %s Error: %v\n", ui.RenderFail("✗"), err)
fmt.Printf(" Manual fix: %s\n", check.Fix)
} else {
fixedCount++
fmt.Printf(" %s Fixed\n", ui.RenderPass("✓"))
}
}
// Summary
fmt.Printf("\nFix summary: %d fixed, %d errors\n", fixedCount, errorCount)
if errorCount > 0 {
fmt.Println("\nSome fixes failed. Please review the errors above and apply manual fixes as needed.")
}
}

154
cmd/bd/doctor_health.go Normal file
View File

@@ -0,0 +1,154 @@
package main
import (
"database/sql"
"fmt"
"os"
"path/filepath"
"strings"
"github.com/steveyegge/beads/cmd/bd/doctor"
"github.com/steveyegge/beads/internal/beads"
"github.com/steveyegge/beads/internal/configfile"
)
// runCheckHealth runs lightweight health checks for git hooks.
// Silent on success, prints a hint if issues detected.
// Respects hints.doctor config setting.
func runCheckHealth(path string) {
beadsDir := filepath.Join(path, ".beads")
// Check if .beads/ exists
if _, err := os.Stat(beadsDir); os.IsNotExist(err) {
// No .beads directory - nothing to check
return
}
// Get database path once (centralized path resolution)
dbPath := getCheckHealthDBPath(beadsDir)
// Check if database exists
if _, err := os.Stat(dbPath); os.IsNotExist(err) {
// No database - only check hooks
if issue := doctor.CheckHooksQuick(Version); issue != "" {
printCheckHealthHint([]string{issue})
}
return
}
// Open database once for all checks (single DB connection)
db, err := sql.Open("sqlite3", "file:"+dbPath+"?mode=ro")
if err != nil {
// Can't open DB - only check hooks
if issue := doctor.CheckHooksQuick(Version); issue != "" {
printCheckHealthHint([]string{issue})
}
return
}
defer db.Close()
// Check if hints.doctor is disabled in config
if hintsDisabledDB(db) {
return
}
// Run lightweight checks
var issues []string
// Check 1: Database version mismatch (CLI vs database bd_version)
if issue := checkVersionMismatchDB(db); issue != "" {
issues = append(issues, issue)
}
// Check 2: Sync branch not configured (now reads from config.yaml, not DB)
if issue := doctor.CheckSyncBranchQuick(); issue != "" {
issues = append(issues, issue)
}
// Check 3: Outdated git hooks
if issue := doctor.CheckHooksQuick(Version); issue != "" {
issues = append(issues, issue)
}
// Check 3: Sync-branch hook compatibility (issue #532)
if issue := doctor.CheckSyncBranchHookQuick(path); issue != "" {
issues = append(issues, issue)
}
// If any issues found, print hint
if len(issues) > 0 {
printCheckHealthHint(issues)
}
// Silent exit on success
}
// runDeepValidation runs full graph integrity validation
func runDeepValidation(path string) {
// Show warning about potential slowness
fmt.Println("Running deep validation (may be slow on large databases)...")
fmt.Println()
result := doctor.RunDeepValidation(path)
if jsonOutput {
jsonBytes, err := doctor.DeepValidationResultJSON(result)
if err != nil {
fmt.Fprintf(os.Stderr, "Error: %v\n", err)
os.Exit(1)
}
fmt.Println(string(jsonBytes))
} else {
doctor.PrintDeepValidationResult(result)
}
if !result.OverallOK {
os.Exit(1)
}
}
// printCheckHealthHint prints the health check hint and exits with error.
func printCheckHealthHint(issues []string) {
fmt.Fprintf(os.Stderr, "💡 bd doctor recommends a health check:\n")
for _, issue := range issues {
fmt.Fprintf(os.Stderr, " • %s\n", issue)
}
fmt.Fprintf(os.Stderr, " Run 'bd doctor' for details, or 'bd doctor --fix' to auto-repair\n")
fmt.Fprintf(os.Stderr, " (Suppress with: bd config set %s false)\n", ConfigKeyHintsDoctor)
os.Exit(1)
}
// getCheckHealthDBPath returns the database path for check-health operations.
// This centralizes the path resolution logic.
func getCheckHealthDBPath(beadsDir string) string {
if cfg, err := configfile.Load(beadsDir); err == nil && cfg != nil && cfg.Database != "" {
return cfg.DatabasePath(beadsDir)
}
return filepath.Join(beadsDir, beads.CanonicalDatabaseName)
}
// hintsDisabledDB checks if hints.doctor is set to "false" using an existing DB connection.
// Used by runCheckHealth to avoid multiple DB opens.
func hintsDisabledDB(db *sql.DB) bool {
var value string
err := db.QueryRow("SELECT value FROM config WHERE key = ?", ConfigKeyHintsDoctor).Scan(&value)
if err != nil {
return false // Key not set, assume hints enabled
}
return strings.ToLower(value) == "false"
}
// checkVersionMismatchDB checks if CLI version differs from database bd_version.
// Uses an existing DB connection.
func checkVersionMismatchDB(db *sql.DB) string {
var dbVersion string
err := db.QueryRow("SELECT value FROM metadata WHERE key = 'bd_version'").Scan(&dbVersion)
if err != nil {
return "" // Can't read version, skip
}
if dbVersion != "" && dbVersion != Version {
return fmt.Sprintf("Version mismatch (CLI: %s, database: %s)", Version, dbVersion)
}
return ""
}

154
cmd/bd/doctor_pollution.go Normal file
View File

@@ -0,0 +1,154 @@
package main
import (
"fmt"
"os"
"strings"
"github.com/steveyegge/beads/internal/types"
"github.com/steveyegge/beads/internal/ui"
)
// runPollutionCheck runs detailed test pollution detection
// This integrates the detect-pollution command functionality into doctor.
func runPollutionCheck(path string, clean bool, yes bool) {
// Ensure we have a store initialized (uses direct mode, no daemon support yet)
if err := ensureDirectMode("pollution check requires direct mode"); err != nil {
fmt.Fprintf(os.Stderr, "Error: %v\n", err)
os.Exit(1)
}
ctx := rootCtx
// Get all issues
allIssues, err := store.SearchIssues(ctx, "", types.IssueFilter{})
if err != nil {
fmt.Fprintf(os.Stderr, "Error fetching issues: %v\n", err)
os.Exit(1)
}
// Detect pollution (reuse detectTestPollution from detect_pollution.go)
polluted := detectTestPollution(allIssues)
if len(polluted) == 0 {
if !jsonOutput {
fmt.Println("No test pollution detected!")
} else {
outputJSON(map[string]interface{}{
"polluted_count": 0,
"issues": []interface{}{},
})
}
return
}
// Categorize by confidence
highConfidence := []pollutionResult{}
mediumConfidence := []pollutionResult{}
for _, p := range polluted {
if p.score >= 0.9 {
highConfidence = append(highConfidence, p)
} else {
mediumConfidence = append(mediumConfidence, p)
}
}
if jsonOutput {
result := map[string]interface{}{
"polluted_count": len(polluted),
"high_confidence": len(highConfidence),
"medium_confidence": len(mediumConfidence),
"issues": []map[string]interface{}{},
}
for _, p := range polluted {
result["issues"] = append(result["issues"].([]map[string]interface{}), map[string]interface{}{
"id": p.issue.ID,
"title": p.issue.Title,
"score": p.score,
"reasons": p.reasons,
"created_at": p.issue.CreatedAt,
})
}
outputJSON(result)
return
}
// Human-readable output
fmt.Printf("Found %d potential test issues:\n\n", len(polluted))
if len(highConfidence) > 0 {
fmt.Printf("High Confidence (score ≥ 0.9):\n")
for _, p := range highConfidence {
fmt.Printf(" %s: %q (score: %.2f)\n", p.issue.ID, p.issue.Title, p.score)
for _, reason := range p.reasons {
fmt.Printf(" - %s\n", reason)
}
}
fmt.Printf(" (Total: %d issues)\n\n", len(highConfidence))
}
if len(mediumConfidence) > 0 {
fmt.Printf("Medium Confidence (score 0.7-0.9):\n")
for _, p := range mediumConfidence {
fmt.Printf(" %s: %q (score: %.2f)\n", p.issue.ID, p.issue.Title, p.score)
for _, reason := range p.reasons {
fmt.Printf(" - %s\n", reason)
}
}
fmt.Printf(" (Total: %d issues)\n\n", len(mediumConfidence))
}
if !clean {
fmt.Printf("Run 'bd doctor --check=pollution --clean' to delete these issues (with confirmation).\n")
return
}
// Confirmation prompt
if !yes {
fmt.Printf("\nDelete %d test issues? [y/N] ", len(polluted))
var response string
_, _ = fmt.Scanln(&response)
if strings.ToLower(response) != "y" {
fmt.Println("Canceled.")
return
}
}
// Backup to JSONL before deleting
backupPath := ".beads/pollution-backup.jsonl"
if err := backupPollutedIssues(polluted, backupPath); err != nil {
fmt.Fprintf(os.Stderr, "Error backing up issues: %v\n", err)
os.Exit(1)
}
fmt.Printf("Backed up %d issues to %s\n", len(polluted), backupPath)
// Delete issues
fmt.Printf("\nDeleting %d issues...\n", len(polluted))
deleted := 0
for _, p := range polluted {
if err := deleteIssue(ctx, p.issue.ID); err != nil {
fmt.Fprintf(os.Stderr, "Error deleting %s: %v\n", p.issue.ID, err)
continue
}
deleted++
}
// Schedule auto-flush
markDirtyAndScheduleFlush()
fmt.Printf("%s Deleted %d test issues\n", ui.RenderPass("✓"), deleted)
fmt.Printf("\nCleanup complete. To restore, run: bd import %s\n", backupPath)
}
func init() {
rootCmd.AddCommand(doctorCmd)
doctorCmd.Flags().BoolVar(&perfMode, "perf", false, "Run performance diagnostics and generate CPU profile")
doctorCmd.Flags().BoolVar(&checkHealthMode, "check-health", false, "Quick health check for git hooks (silent on success)")
doctorCmd.Flags().StringVarP(&doctorOutput, "output", "o", "", "Export diagnostics to JSON file")
doctorCmd.Flags().StringVar(&doctorCheckFlag, "check", "", "Run specific check in detail (e.g., 'pollution')")
doctorCmd.Flags().BoolVar(&doctorClean, "clean", false, "For pollution check: delete detected test issues")
doctorCmd.Flags().BoolVar(&doctorDeep, "deep", false, "Validate full graph integrity")
}

209
cmd/bd/edit.go Normal file
View File

@@ -0,0 +1,209 @@
package main
import (
"encoding/json"
"fmt"
"os"
"os/exec"
"strings"
"github.com/spf13/cobra"
"github.com/steveyegge/beads/internal/rpc"
"github.com/steveyegge/beads/internal/types"
"github.com/steveyegge/beads/internal/ui"
"github.com/steveyegge/beads/internal/utils"
)
var editCmd = &cobra.Command{
Use: "edit [id]",
GroupID: "issues",
Short: "Edit an issue field in $EDITOR",
Long: `Edit an issue field using your configured $EDITOR.
By default, edits the description. Use flags to edit other fields.
Examples:
bd edit bd-42 # Edit description
bd edit bd-42 --title # Edit title
bd edit bd-42 --design # Edit design notes
bd edit bd-42 --notes # Edit notes
bd edit bd-42 --acceptance # Edit acceptance criteria`,
Args: cobra.ExactArgs(1),
Run: func(cmd *cobra.Command, args []string) {
CheckReadonly("edit")
id := args[0]
ctx := rootCtx
// Resolve partial ID if in direct mode
if daemonClient == nil {
fullID, err := utils.ResolvePartialID(ctx, store, id)
if err != nil {
FatalErrorRespectJSON("resolving %s: %v", id, err)
}
id = fullID
}
// Determine which field to edit
fieldToEdit := "description"
if cmd.Flags().Changed("title") {
fieldToEdit = "title"
} else if cmd.Flags().Changed("design") {
fieldToEdit = "design"
} else if cmd.Flags().Changed("notes") {
fieldToEdit = "notes"
} else if cmd.Flags().Changed("acceptance") {
fieldToEdit = "acceptance_criteria"
}
// Get the editor from environment
editor := os.Getenv("EDITOR")
if editor == "" {
editor = os.Getenv("VISUAL")
}
if editor == "" {
// Try common defaults
for _, defaultEditor := range []string{"vim", "vi", "nano", "emacs"} {
if _, err := exec.LookPath(defaultEditor); err == nil {
editor = defaultEditor
break
}
}
}
if editor == "" {
FatalErrorRespectJSON("no editor found. Set $EDITOR or $VISUAL environment variable")
}
// Get the current issue
var issue *types.Issue
var err error
if daemonClient != nil {
// Daemon mode
showArgs := &rpc.ShowArgs{ID: id}
resp, err := daemonClient.Show(showArgs)
if err != nil {
FatalErrorRespectJSON("fetching issue %s: %v", id, err)
}
issue = &types.Issue{}
if err := json.Unmarshal(resp.Data, issue); err != nil {
FatalErrorRespectJSON("parsing issue data: %v", err)
}
} else {
// Direct mode
issue, err = store.GetIssue(ctx, id)
if err != nil {
FatalErrorRespectJSON("fetching issue %s: %v", id, err)
}
if issue == nil {
FatalErrorRespectJSON("issue %s not found", id)
}
}
// Get the current field value
var currentValue string
switch fieldToEdit {
case "title":
currentValue = issue.Title
case "description":
currentValue = issue.Description
case "design":
currentValue = issue.Design
case "notes":
currentValue = issue.Notes
case "acceptance_criteria":
currentValue = issue.AcceptanceCriteria
}
// Create a temporary file with the current value
tmpFile, err := os.CreateTemp("", fmt.Sprintf("bd-edit-%s-*.txt", fieldToEdit))
if err != nil {
FatalErrorRespectJSON("creating temp file: %v", err)
}
tmpPath := tmpFile.Name()
defer func() { _ = os.Remove(tmpPath) }()
// Write current value to temp file
if _, err := tmpFile.WriteString(currentValue); err != nil {
_ = tmpFile.Close()
FatalErrorRespectJSON("writing to temp file: %v", err)
}
_ = tmpFile.Close()
// Open the editor
editorCmd := exec.Command(editor, tmpPath)
editorCmd.Stdin = os.Stdin
editorCmd.Stdout = os.Stdout
editorCmd.Stderr = os.Stderr
if err := editorCmd.Run(); err != nil {
FatalErrorRespectJSON("running editor: %v", err)
}
// Read the edited content
// #nosec G304 -- tmpPath was created earlier in this function
editedContent, err := os.ReadFile(tmpPath)
if err != nil {
FatalErrorRespectJSON("reading edited file: %v", err)
}
newValue := string(editedContent)
// Check if the value changed
if newValue == currentValue {
fmt.Println("No changes made")
return
}
// Validate title if editing title
if fieldToEdit == "title" && strings.TrimSpace(newValue) == "" {
FatalErrorRespectJSON("title cannot be empty")
}
// Update the issue
updates := map[string]interface{}{
fieldToEdit: newValue,
}
if daemonClient != nil {
// Daemon mode
updateArgs := &rpc.UpdateArgs{ID: id}
switch fieldToEdit {
case "title":
updateArgs.Title = &newValue
case "description":
updateArgs.Description = &newValue
case "design":
updateArgs.Design = &newValue
case "notes":
updateArgs.Notes = &newValue
case "acceptance_criteria":
updateArgs.AcceptanceCriteria = &newValue
}
_, err := daemonClient.Update(updateArgs)
if err != nil {
FatalErrorRespectJSON("updating issue: %v", err)
}
} else {
// Direct mode
if err := store.UpdateIssue(ctx, id, updates, actor); err != nil {
FatalErrorRespectJSON("updating issue: %v", err)
}
markDirtyAndScheduleFlush()
}
fieldName := strings.ReplaceAll(fieldToEdit, "_", " ")
fmt.Printf("%s Updated %s for issue: %s\n", ui.RenderPass("✓"), fieldName, id)
},
}
func init() {
editCmd.Flags().Bool("title", false, "Edit the title")
editCmd.Flags().Bool("description", false, "Edit the description (default)")
editCmd.Flags().Bool("design", false, "Edit the design notes")
editCmd.Flags().Bool("notes", false, "Edit the notes")
editCmd.Flags().Bool("acceptance", false, "Edit the acceptance criteria")
rootCmd.AddCommand(editCmd)
}

View File

@@ -639,552 +639,3 @@ func getLinearHashLength(ctx context.Context) int {
return value
}
// detectLinearConflicts finds issues that have been modified both locally and in Linear
// since the last sync. This is a more expensive operation as it fetches individual
// issue timestamps from Linear.
func detectLinearConflicts(ctx context.Context) ([]linear.Conflict, error) {
lastSyncStr, _ := store.GetConfig(ctx, "linear.last_sync")
if lastSyncStr == "" {
return nil, nil
}
lastSync, err := time.Parse(time.RFC3339, lastSyncStr)
if err != nil {
return nil, fmt.Errorf("invalid last_sync timestamp: %w", err)
}
config := loadLinearMappingConfig(ctx)
client, err := getLinearClient(ctx)
if err != nil {
return nil, fmt.Errorf("failed to create Linear client: %w", err)
}
// Get all local issues with Linear external refs
allIssues, err := store.SearchIssues(ctx, "", types.IssueFilter{})
if err != nil {
return nil, err
}
var conflicts []linear.Conflict
for _, issue := range allIssues {
if issue.ExternalRef == nil || !linear.IsLinearExternalRef(*issue.ExternalRef) {
continue
}
if !issue.UpdatedAt.After(lastSync) {
continue
}
linearIdentifier := linear.ExtractLinearIdentifier(*issue.ExternalRef)
if linearIdentifier == "" {
continue
}
linearIssue, err := client.FetchIssueByIdentifier(ctx, linearIdentifier)
if err != nil {
fmt.Fprintf(os.Stderr, "Warning: failed to fetch Linear issue %s for conflict check: %v\n",
linearIdentifier, err)
continue
}
if linearIssue == nil {
continue
}
linearUpdatedAt, err := time.Parse(time.RFC3339, linearIssue.UpdatedAt)
if err != nil {
fmt.Fprintf(os.Stderr, "Warning: failed to parse Linear UpdatedAt for %s: %v\n",
linearIdentifier, err)
continue
}
if !linearUpdatedAt.After(lastSync) {
continue
}
localComparable := linear.NormalizeIssueForLinearHash(issue)
linearComparable := linear.IssueToBeads(linearIssue, config).Issue.(*types.Issue)
if localComparable.ComputeContentHash() == linearComparable.ComputeContentHash() {
continue
}
conflicts = append(conflicts, linear.Conflict{
IssueID: issue.ID,
LocalUpdated: issue.UpdatedAt,
LinearUpdated: linearUpdatedAt,
LinearExternalRef: *issue.ExternalRef,
LinearIdentifier: linearIdentifier,
LinearInternalID: linearIssue.ID,
})
}
return conflicts, nil
}
// reimportLinearConflicts re-imports conflicting issues from Linear (Linear wins).
// For each conflict, fetches the current state from Linear and updates the local copy.
func reimportLinearConflicts(ctx context.Context, conflicts []linear.Conflict) error {
if len(conflicts) == 0 {
return nil
}
client, err := getLinearClient(ctx)
if err != nil {
return fmt.Errorf("failed to create Linear client: %w", err)
}
config := loadLinearMappingConfig(ctx)
resolved := 0
failed := 0
for _, conflict := range conflicts {
linearIssue, err := client.FetchIssueByIdentifier(ctx, conflict.LinearIdentifier)
if err != nil {
fmt.Fprintf(os.Stderr, " Warning: failed to fetch %s for resolution: %v\n",
conflict.LinearIdentifier, err)
failed++
continue
}
if linearIssue == nil {
fmt.Fprintf(os.Stderr, " Warning: Linear issue %s not found, skipping\n",
conflict.LinearIdentifier)
failed++
continue
}
updates := linear.BuildLinearToLocalUpdates(linearIssue, config)
err = store.UpdateIssue(ctx, conflict.IssueID, updates, actor)
if err != nil {
fmt.Fprintf(os.Stderr, " Warning: failed to update local issue %s: %v\n",
conflict.IssueID, err)
failed++
continue
}
fmt.Printf(" Resolved: %s <- %s (Linear wins)\n", conflict.IssueID, conflict.LinearIdentifier)
resolved++
}
if failed > 0 {
return fmt.Errorf("%d conflict(s) failed to resolve", failed)
}
fmt.Printf(" Resolved %d conflict(s) by keeping Linear version\n", resolved)
return nil
}
// resolveLinearConflictsByTimestamp resolves conflicts by keeping the newer version.
// For each conflict, compares local and Linear UpdatedAt timestamps.
// If Linear is newer, re-imports from Linear. If local is newer, push will overwrite.
func resolveLinearConflictsByTimestamp(ctx context.Context, conflicts []linear.Conflict) error {
if len(conflicts) == 0 {
return nil
}
var linearWins []linear.Conflict
var localWins []linear.Conflict
for _, conflict := range conflicts {
if conflict.LinearUpdated.After(conflict.LocalUpdated) {
linearWins = append(linearWins, conflict)
} else {
localWins = append(localWins, conflict)
}
}
if len(linearWins) > 0 {
fmt.Printf(" %d conflict(s): Linear is newer, will re-import\n", len(linearWins))
}
if len(localWins) > 0 {
fmt.Printf(" %d conflict(s): Local is newer, will push to Linear\n", len(localWins))
}
if len(linearWins) > 0 {
err := reimportLinearConflicts(ctx, linearWins)
if err != nil {
return fmt.Errorf("failed to re-import Linear-wins conflicts: %w", err)
}
}
if len(localWins) > 0 {
for _, conflict := range localWins {
fmt.Printf(" Resolved: %s -> %s (local wins, will push)\n",
conflict.IssueID, conflict.LinearIdentifier)
}
}
return nil
}
// doPullFromLinear imports issues from Linear using the GraphQL API.
// Supports incremental sync by checking linear.last_sync config and only fetching
// issues updated since that timestamp.
func doPullFromLinear(ctx context.Context, dryRun bool, state string, skipLinearIDs map[string]bool) (*linear.PullStats, error) {
stats := &linear.PullStats{}
client, err := getLinearClient(ctx)
if err != nil {
return stats, fmt.Errorf("failed to create Linear client: %w", err)
}
var linearIssues []linear.Issue
lastSyncStr, _ := store.GetConfig(ctx, "linear.last_sync")
if lastSyncStr != "" {
lastSync, err := time.Parse(time.RFC3339, lastSyncStr)
if err != nil {
fmt.Fprintf(os.Stderr, "Warning: invalid linear.last_sync timestamp, doing full sync\n")
linearIssues, err = client.FetchIssues(ctx, state)
if err != nil {
return stats, fmt.Errorf("failed to fetch issues from Linear: %w", err)
}
} else {
stats.Incremental = true
stats.SyncedSince = lastSyncStr
linearIssues, err = client.FetchIssuesSince(ctx, state, lastSync)
if err != nil {
return stats, fmt.Errorf("failed to fetch issues from Linear (incremental): %w", err)
}
if !dryRun {
fmt.Printf(" Incremental sync since %s\n", lastSync.Format("2006-01-02 15:04:05"))
}
}
} else {
linearIssues, err = client.FetchIssues(ctx, state)
if err != nil {
return stats, fmt.Errorf("failed to fetch issues from Linear: %w", err)
}
if !dryRun {
fmt.Println(" Full sync (no previous sync timestamp)")
}
}
mappingConfig := loadLinearMappingConfig(ctx)
idMode := getLinearIDMode(ctx)
hashLength := getLinearHashLength(ctx)
var beadsIssues []*types.Issue
var allDeps []linear.DependencyInfo
linearIDToBeadsID := make(map[string]string)
for i := range linearIssues {
conversion := linear.IssueToBeads(&linearIssues[i], mappingConfig)
beadsIssues = append(beadsIssues, conversion.Issue.(*types.Issue))
allDeps = append(allDeps, conversion.Dependencies...)
}
if len(beadsIssues) == 0 {
fmt.Println(" No issues to import")
return stats, nil
}
if len(skipLinearIDs) > 0 {
var filteredIssues []*types.Issue
skipped := 0
for _, issue := range beadsIssues {
if issue.ExternalRef == nil {
filteredIssues = append(filteredIssues, issue)
continue
}
linearID := linear.ExtractLinearIdentifier(*issue.ExternalRef)
if linearID != "" && skipLinearIDs[linearID] {
skipped++
continue
}
filteredIssues = append(filteredIssues, issue)
}
if skipped > 0 {
stats.Skipped += skipped
}
beadsIssues = filteredIssues
if len(allDeps) > 0 {
var filteredDeps []linear.DependencyInfo
for _, dep := range allDeps {
if skipLinearIDs[dep.FromLinearID] || skipLinearIDs[dep.ToLinearID] {
continue
}
filteredDeps = append(filteredDeps, dep)
}
allDeps = filteredDeps
}
}
prefix, err := store.GetConfig(ctx, "issue_prefix")
if err != nil || prefix == "" {
prefix = "bd"
}
if idMode == "hash" {
existingIssues, err := store.SearchIssues(ctx, "", types.IssueFilter{IncludeTombstones: true})
if err != nil {
return stats, fmt.Errorf("failed to fetch existing issues for ID collision avoidance: %w", err)
}
usedIDs := make(map[string]bool, len(existingIssues))
for _, issue := range existingIssues {
if issue.ID != "" {
usedIDs[issue.ID] = true
}
}
idOpts := linear.IDGenerationOptions{
BaseLength: hashLength,
MaxLength: 8,
UsedIDs: usedIDs,
}
if err := linear.GenerateIssueIDs(beadsIssues, prefix, "linear-import", idOpts); err != nil {
return stats, fmt.Errorf("failed to generate issue IDs: %w", err)
}
} else if idMode != "db" {
return stats, fmt.Errorf("unsupported linear.id_mode %q (expected \"hash\" or \"db\")", idMode)
}
opts := ImportOptions{
DryRun: dryRun,
SkipUpdate: false,
}
result, err := importIssuesCore(ctx, dbPath, store, beadsIssues, opts)
if err != nil {
return stats, fmt.Errorf("import failed: %w", err)
}
stats.Created = result.Created
stats.Updated = result.Updated
stats.Skipped = result.Skipped
if dryRun {
if stats.Incremental {
fmt.Printf(" Would import %d issues from Linear (incremental since %s)\n",
len(linearIssues), stats.SyncedSince)
} else {
fmt.Printf(" Would import %d issues from Linear (full sync)\n", len(linearIssues))
}
return stats, nil
}
allBeadsIssues, err := store.SearchIssues(ctx, "", types.IssueFilter{})
if err != nil {
fmt.Fprintf(os.Stderr, "Warning: failed to fetch issues for dependency mapping: %v\n", err)
return stats, nil
}
for _, issue := range allBeadsIssues {
if issue.ExternalRef != nil && linear.IsLinearExternalRef(*issue.ExternalRef) {
linearID := linear.ExtractLinearIdentifier(*issue.ExternalRef)
if linearID != "" {
linearIDToBeadsID[linearID] = issue.ID
}
}
}
depsCreated := 0
for _, dep := range allDeps {
fromID, fromOK := linearIDToBeadsID[dep.FromLinearID]
toID, toOK := linearIDToBeadsID[dep.ToLinearID]
if !fromOK || !toOK {
continue
}
dependency := &types.Dependency{
IssueID: fromID,
DependsOnID: toID,
Type: types.DependencyType(dep.Type),
CreatedAt: time.Now(),
}
err := store.AddDependency(ctx, dependency, actor)
if err != nil {
if !strings.Contains(err.Error(), "already exists") &&
!strings.Contains(err.Error(), "duplicate") {
fmt.Fprintf(os.Stderr, "Warning: failed to create dependency %s -> %s (%s): %v\n",
fromID, toID, dep.Type, err)
}
} else {
depsCreated++
}
}
if depsCreated > 0 {
fmt.Printf(" Created %d dependencies from Linear relations\n", depsCreated)
}
return stats, nil
}
// doPushToLinear exports issues to Linear using the GraphQL API.
func doPushToLinear(ctx context.Context, dryRun bool, createOnly bool, updateRefs bool, forceUpdateIDs map[string]bool, skipUpdateIDs map[string]bool) (*linear.PushStats, error) {
stats := &linear.PushStats{}
client, err := getLinearClient(ctx)
if err != nil {
return stats, fmt.Errorf("failed to create Linear client: %w", err)
}
allIssues, err := store.SearchIssues(ctx, "", types.IssueFilter{})
if err != nil {
return stats, fmt.Errorf("failed to get local issues: %w", err)
}
var toCreate []*types.Issue
var toUpdate []*types.Issue
for _, issue := range allIssues {
if issue.IsTombstone() {
continue
}
if issue.ExternalRef != nil && linear.IsLinearExternalRef(*issue.ExternalRef) {
if !createOnly {
toUpdate = append(toUpdate, issue)
}
} else if issue.ExternalRef == nil {
toCreate = append(toCreate, issue)
}
}
var stateCache *linear.StateCache
if !dryRun && (len(toCreate) > 0 || (!createOnly && len(toUpdate) > 0)) {
stateCache, err = linear.BuildStateCache(ctx, client)
if err != nil {
return stats, fmt.Errorf("failed to fetch team states: %w", err)
}
}
mappingConfig := loadLinearMappingConfig(ctx)
for _, issue := range toCreate {
if dryRun {
stats.Created++
continue
}
linearPriority := linear.PriorityToLinear(issue.Priority, mappingConfig)
stateID := stateCache.FindStateForBeadsStatus(issue.Status)
description := linear.BuildLinearDescription(issue)
linearIssue, err := client.CreateIssue(ctx, issue.Title, description, linearPriority, stateID, nil)
if err != nil {
fmt.Fprintf(os.Stderr, "Warning: failed to create issue '%s' in Linear: %v\n", issue.Title, err)
stats.Errors++
continue
}
stats.Created++
fmt.Printf(" Created: %s -> %s\n", issue.ID, linearIssue.Identifier)
if updateRefs && linearIssue.URL != "" {
externalRef := linearIssue.URL
if canonical, ok := linear.CanonicalizeLinearExternalRef(externalRef); ok {
externalRef = canonical
}
updates := map[string]interface{}{
"external_ref": externalRef,
}
if err := store.UpdateIssue(ctx, issue.ID, updates, actor); err != nil {
fmt.Fprintf(os.Stderr, "Warning: failed to update external_ref for %s: %v\n", issue.ID, err)
stats.Errors++
}
}
}
if len(toUpdate) > 0 && !createOnly {
for _, issue := range toUpdate {
if skipUpdateIDs != nil && skipUpdateIDs[issue.ID] {
stats.Skipped++
continue
}
linearIdentifier := linear.ExtractLinearIdentifier(*issue.ExternalRef)
if linearIdentifier == "" {
fmt.Fprintf(os.Stderr, "Warning: could not extract Linear identifier from %s: %s\n",
issue.ID, *issue.ExternalRef)
stats.Errors++
continue
}
linearIssue, err := client.FetchIssueByIdentifier(ctx, linearIdentifier)
if err != nil {
fmt.Fprintf(os.Stderr, "Warning: failed to fetch Linear issue %s: %v\n",
linearIdentifier, err)
stats.Errors++
continue
}
if linearIssue == nil {
fmt.Fprintf(os.Stderr, "Warning: Linear issue %s not found (may have been deleted)\n",
linearIdentifier)
stats.Skipped++
continue
}
linearUpdatedAt, err := time.Parse(time.RFC3339, linearIssue.UpdatedAt)
if err != nil {
fmt.Fprintf(os.Stderr, "Warning: failed to parse Linear UpdatedAt for %s: %v\n",
linearIdentifier, err)
stats.Errors++
continue
}
forcedUpdate := forceUpdateIDs != nil && forceUpdateIDs[issue.ID]
if !forcedUpdate && !issue.UpdatedAt.After(linearUpdatedAt) {
stats.Skipped++
continue
}
if !forcedUpdate {
localComparable := linear.NormalizeIssueForLinearHash(issue)
linearComparable := linear.IssueToBeads(linearIssue, mappingConfig).Issue.(*types.Issue)
if localComparable.ComputeContentHash() == linearComparable.ComputeContentHash() {
stats.Skipped++
continue
}
}
if dryRun {
stats.Updated++
continue
}
description := linear.BuildLinearDescription(issue)
updatePayload := map[string]interface{}{
"title": issue.Title,
"description": description,
}
linearPriority := linear.PriorityToLinear(issue.Priority, mappingConfig)
if linearPriority > 0 {
updatePayload["priority"] = linearPriority
}
stateID := stateCache.FindStateForBeadsStatus(issue.Status)
if stateID != "" {
updatePayload["stateId"] = stateID
}
updatedLinearIssue, err := client.UpdateIssue(ctx, linearIssue.ID, updatePayload)
if err != nil {
fmt.Fprintf(os.Stderr, "Warning: failed to update Linear issue %s: %v\n",
linearIdentifier, err)
stats.Errors++
continue
}
stats.Updated++
fmt.Printf(" Updated: %s -> %s\n", issue.ID, updatedLinearIssue.Identifier)
}
}
if dryRun {
fmt.Printf(" Would create %d issues in Linear\n", stats.Created)
if !createOnly {
fmt.Printf(" Would update %d issues in Linear\n", stats.Updated)
}
}
return stats, nil
}

190
cmd/bd/linear_conflict.go Normal file
View File

@@ -0,0 +1,190 @@
package main
import (
"context"
"fmt"
"os"
"time"
"github.com/steveyegge/beads/internal/linear"
"github.com/steveyegge/beads/internal/types"
)
// detectLinearConflicts finds issues that have been modified both locally and in Linear
// since the last sync. This is a more expensive operation as it fetches individual
// issue timestamps from Linear.
func detectLinearConflicts(ctx context.Context) ([]linear.Conflict, error) {
lastSyncStr, _ := store.GetConfig(ctx, "linear.last_sync")
if lastSyncStr == "" {
return nil, nil
}
lastSync, err := time.Parse(time.RFC3339, lastSyncStr)
if err != nil {
return nil, fmt.Errorf("invalid last_sync timestamp: %w", err)
}
config := loadLinearMappingConfig(ctx)
client, err := getLinearClient(ctx)
if err != nil {
return nil, fmt.Errorf("failed to create Linear client: %w", err)
}
// Get all local issues with Linear external refs
allIssues, err := store.SearchIssues(ctx, "", types.IssueFilter{})
if err != nil {
return nil, err
}
var conflicts []linear.Conflict
for _, issue := range allIssues {
if issue.ExternalRef == nil || !linear.IsLinearExternalRef(*issue.ExternalRef) {
continue
}
if !issue.UpdatedAt.After(lastSync) {
continue
}
linearIdentifier := linear.ExtractLinearIdentifier(*issue.ExternalRef)
if linearIdentifier == "" {
continue
}
linearIssue, err := client.FetchIssueByIdentifier(ctx, linearIdentifier)
if err != nil {
fmt.Fprintf(os.Stderr, "Warning: failed to fetch Linear issue %s for conflict check: %v\n",
linearIdentifier, err)
continue
}
if linearIssue == nil {
continue
}
linearUpdatedAt, err := time.Parse(time.RFC3339, linearIssue.UpdatedAt)
if err != nil {
fmt.Fprintf(os.Stderr, "Warning: failed to parse Linear UpdatedAt for %s: %v\n",
linearIdentifier, err)
continue
}
if !linearUpdatedAt.After(lastSync) {
continue
}
localComparable := linear.NormalizeIssueForLinearHash(issue)
linearComparable := linear.IssueToBeads(linearIssue, config).Issue.(*types.Issue)
if localComparable.ComputeContentHash() == linearComparable.ComputeContentHash() {
continue
}
conflicts = append(conflicts, linear.Conflict{
IssueID: issue.ID,
LocalUpdated: issue.UpdatedAt,
LinearUpdated: linearUpdatedAt,
LinearExternalRef: *issue.ExternalRef,
LinearIdentifier: linearIdentifier,
LinearInternalID: linearIssue.ID,
})
}
return conflicts, nil
}
// reimportLinearConflicts re-imports conflicting issues from Linear (Linear wins).
// For each conflict, fetches the current state from Linear and updates the local copy.
func reimportLinearConflicts(ctx context.Context, conflicts []linear.Conflict) error {
if len(conflicts) == 0 {
return nil
}
client, err := getLinearClient(ctx)
if err != nil {
return fmt.Errorf("failed to create Linear client: %w", err)
}
config := loadLinearMappingConfig(ctx)
resolved := 0
failed := 0
for _, conflict := range conflicts {
linearIssue, err := client.FetchIssueByIdentifier(ctx, conflict.LinearIdentifier)
if err != nil {
fmt.Fprintf(os.Stderr, " Warning: failed to fetch %s for resolution: %v\n",
conflict.LinearIdentifier, err)
failed++
continue
}
if linearIssue == nil {
fmt.Fprintf(os.Stderr, " Warning: Linear issue %s not found, skipping\n",
conflict.LinearIdentifier)
failed++
continue
}
updates := linear.BuildLinearToLocalUpdates(linearIssue, config)
err = store.UpdateIssue(ctx, conflict.IssueID, updates, actor)
if err != nil {
fmt.Fprintf(os.Stderr, " Warning: failed to update local issue %s: %v\n",
conflict.IssueID, err)
failed++
continue
}
fmt.Printf(" Resolved: %s <- %s (Linear wins)\n", conflict.IssueID, conflict.LinearIdentifier)
resolved++
}
if failed > 0 {
return fmt.Errorf("%d conflict(s) failed to resolve", failed)
}
fmt.Printf(" Resolved %d conflict(s) by keeping Linear version\n", resolved)
return nil
}
// resolveLinearConflictsByTimestamp resolves conflicts by keeping the newer version.
// For each conflict, compares local and Linear UpdatedAt timestamps.
// If Linear is newer, re-imports from Linear. If local is newer, push will overwrite.
func resolveLinearConflictsByTimestamp(ctx context.Context, conflicts []linear.Conflict) error {
if len(conflicts) == 0 {
return nil
}
var linearWins []linear.Conflict
var localWins []linear.Conflict
for _, conflict := range conflicts {
if conflict.LinearUpdated.After(conflict.LocalUpdated) {
linearWins = append(linearWins, conflict)
} else {
localWins = append(localWins, conflict)
}
}
if len(linearWins) > 0 {
fmt.Printf(" %d conflict(s): Linear is newer, will re-import\n", len(linearWins))
}
if len(localWins) > 0 {
fmt.Printf(" %d conflict(s): Local is newer, will push to Linear\n", len(localWins))
}
if len(linearWins) > 0 {
err := reimportLinearConflicts(ctx, linearWins)
if err != nil {
return fmt.Errorf("failed to re-import Linear-wins conflicts: %w", err)
}
}
if len(localWins) > 0 {
for _, conflict := range localWins {
fmt.Printf(" Resolved: %s -> %s (local wins, will push)\n",
conflict.IssueID, conflict.LinearIdentifier)
}
}
return nil
}

383
cmd/bd/linear_sync.go Normal file
View File

@@ -0,0 +1,383 @@
package main
import (
"context"
"fmt"
"os"
"strings"
"time"
"github.com/steveyegge/beads/internal/linear"
"github.com/steveyegge/beads/internal/types"
)
// doPullFromLinear imports issues from Linear using the GraphQL API.
// Supports incremental sync by checking linear.last_sync config and only fetching
// issues updated since that timestamp.
func doPullFromLinear(ctx context.Context, dryRun bool, state string, skipLinearIDs map[string]bool) (*linear.PullStats, error) {
stats := &linear.PullStats{}
client, err := getLinearClient(ctx)
if err != nil {
return stats, fmt.Errorf("failed to create Linear client: %w", err)
}
var linearIssues []linear.Issue
lastSyncStr, _ := store.GetConfig(ctx, "linear.last_sync")
if lastSyncStr != "" {
lastSync, err := time.Parse(time.RFC3339, lastSyncStr)
if err != nil {
fmt.Fprintf(os.Stderr, "Warning: invalid linear.last_sync timestamp, doing full sync\n")
linearIssues, err = client.FetchIssues(ctx, state)
if err != nil {
return stats, fmt.Errorf("failed to fetch issues from Linear: %w", err)
}
} else {
stats.Incremental = true
stats.SyncedSince = lastSyncStr
linearIssues, err = client.FetchIssuesSince(ctx, state, lastSync)
if err != nil {
return stats, fmt.Errorf("failed to fetch issues from Linear (incremental): %w", err)
}
if !dryRun {
fmt.Printf(" Incremental sync since %s\n", lastSync.Format("2006-01-02 15:04:05"))
}
}
} else {
linearIssues, err = client.FetchIssues(ctx, state)
if err != nil {
return stats, fmt.Errorf("failed to fetch issues from Linear: %w", err)
}
if !dryRun {
fmt.Println(" Full sync (no previous sync timestamp)")
}
}
mappingConfig := loadLinearMappingConfig(ctx)
idMode := getLinearIDMode(ctx)
hashLength := getLinearHashLength(ctx)
var beadsIssues []*types.Issue
var allDeps []linear.DependencyInfo
linearIDToBeadsID := make(map[string]string)
for i := range linearIssues {
conversion := linear.IssueToBeads(&linearIssues[i], mappingConfig)
beadsIssues = append(beadsIssues, conversion.Issue.(*types.Issue))
allDeps = append(allDeps, conversion.Dependencies...)
}
if len(beadsIssues) == 0 {
fmt.Println(" No issues to import")
return stats, nil
}
if len(skipLinearIDs) > 0 {
var filteredIssues []*types.Issue
skipped := 0
for _, issue := range beadsIssues {
if issue.ExternalRef == nil {
filteredIssues = append(filteredIssues, issue)
continue
}
linearID := linear.ExtractLinearIdentifier(*issue.ExternalRef)
if linearID != "" && skipLinearIDs[linearID] {
skipped++
continue
}
filteredIssues = append(filteredIssues, issue)
}
if skipped > 0 {
stats.Skipped += skipped
}
beadsIssues = filteredIssues
if len(allDeps) > 0 {
var filteredDeps []linear.DependencyInfo
for _, dep := range allDeps {
if skipLinearIDs[dep.FromLinearID] || skipLinearIDs[dep.ToLinearID] {
continue
}
filteredDeps = append(filteredDeps, dep)
}
allDeps = filteredDeps
}
}
prefix, err := store.GetConfig(ctx, "issue_prefix")
if err != nil || prefix == "" {
prefix = "bd"
}
if idMode == "hash" {
existingIssues, err := store.SearchIssues(ctx, "", types.IssueFilter{IncludeTombstones: true})
if err != nil {
return stats, fmt.Errorf("failed to fetch existing issues for ID collision avoidance: %w", err)
}
usedIDs := make(map[string]bool, len(existingIssues))
for _, issue := range existingIssues {
if issue.ID != "" {
usedIDs[issue.ID] = true
}
}
idOpts := linear.IDGenerationOptions{
BaseLength: hashLength,
MaxLength: 8,
UsedIDs: usedIDs,
}
if err := linear.GenerateIssueIDs(beadsIssues, prefix, "linear-import", idOpts); err != nil {
return stats, fmt.Errorf("failed to generate issue IDs: %w", err)
}
} else if idMode != "db" {
return stats, fmt.Errorf("unsupported linear.id_mode %q (expected \"hash\" or \"db\")", idMode)
}
opts := ImportOptions{
DryRun: dryRun,
SkipUpdate: false,
}
result, err := importIssuesCore(ctx, dbPath, store, beadsIssues, opts)
if err != nil {
return stats, fmt.Errorf("import failed: %w", err)
}
stats.Created = result.Created
stats.Updated = result.Updated
stats.Skipped = result.Skipped
if dryRun {
if stats.Incremental {
fmt.Printf(" Would import %d issues from Linear (incremental since %s)\n",
len(linearIssues), stats.SyncedSince)
} else {
fmt.Printf(" Would import %d issues from Linear (full sync)\n", len(linearIssues))
}
return stats, nil
}
allBeadsIssues, err := store.SearchIssues(ctx, "", types.IssueFilter{})
if err != nil {
fmt.Fprintf(os.Stderr, "Warning: failed to fetch issues for dependency mapping: %v\n", err)
return stats, nil
}
for _, issue := range allBeadsIssues {
if issue.ExternalRef != nil && linear.IsLinearExternalRef(*issue.ExternalRef) {
linearID := linear.ExtractLinearIdentifier(*issue.ExternalRef)
if linearID != "" {
linearIDToBeadsID[linearID] = issue.ID
}
}
}
depsCreated := 0
for _, dep := range allDeps {
fromID, fromOK := linearIDToBeadsID[dep.FromLinearID]
toID, toOK := linearIDToBeadsID[dep.ToLinearID]
if !fromOK || !toOK {
continue
}
dependency := &types.Dependency{
IssueID: fromID,
DependsOnID: toID,
Type: types.DependencyType(dep.Type),
CreatedAt: time.Now(),
}
err := store.AddDependency(ctx, dependency, actor)
if err != nil {
if !strings.Contains(err.Error(), "already exists") &&
!strings.Contains(err.Error(), "duplicate") {
fmt.Fprintf(os.Stderr, "Warning: failed to create dependency %s -> %s (%s): %v\n",
fromID, toID, dep.Type, err)
}
} else {
depsCreated++
}
}
if depsCreated > 0 {
fmt.Printf(" Created %d dependencies from Linear relations\n", depsCreated)
}
return stats, nil
}
// doPushToLinear exports issues to Linear using the GraphQL API.
func doPushToLinear(ctx context.Context, dryRun bool, createOnly bool, updateRefs bool, forceUpdateIDs map[string]bool, skipUpdateIDs map[string]bool) (*linear.PushStats, error) {
stats := &linear.PushStats{}
client, err := getLinearClient(ctx)
if err != nil {
return stats, fmt.Errorf("failed to create Linear client: %w", err)
}
allIssues, err := store.SearchIssues(ctx, "", types.IssueFilter{})
if err != nil {
return stats, fmt.Errorf("failed to get local issues: %w", err)
}
var toCreate []*types.Issue
var toUpdate []*types.Issue
for _, issue := range allIssues {
if issue.IsTombstone() {
continue
}
if issue.ExternalRef != nil && linear.IsLinearExternalRef(*issue.ExternalRef) {
if !createOnly {
toUpdate = append(toUpdate, issue)
}
} else if issue.ExternalRef == nil {
toCreate = append(toCreate, issue)
}
}
var stateCache *linear.StateCache
if !dryRun && (len(toCreate) > 0 || (!createOnly && len(toUpdate) > 0)) {
stateCache, err = linear.BuildStateCache(ctx, client)
if err != nil {
return stats, fmt.Errorf("failed to fetch team states: %w", err)
}
}
mappingConfig := loadLinearMappingConfig(ctx)
for _, issue := range toCreate {
if dryRun {
stats.Created++
continue
}
linearPriority := linear.PriorityToLinear(issue.Priority, mappingConfig)
stateID := stateCache.FindStateForBeadsStatus(issue.Status)
description := linear.BuildLinearDescription(issue)
linearIssue, err := client.CreateIssue(ctx, issue.Title, description, linearPriority, stateID, nil)
if err != nil {
fmt.Fprintf(os.Stderr, "Warning: failed to create issue '%s' in Linear: %v\n", issue.Title, err)
stats.Errors++
continue
}
stats.Created++
fmt.Printf(" Created: %s -> %s\n", issue.ID, linearIssue.Identifier)
if updateRefs && linearIssue.URL != "" {
externalRef := linearIssue.URL
if canonical, ok := linear.CanonicalizeLinearExternalRef(externalRef); ok {
externalRef = canonical
}
updates := map[string]interface{}{
"external_ref": externalRef,
}
if err := store.UpdateIssue(ctx, issue.ID, updates, actor); err != nil {
fmt.Fprintf(os.Stderr, "Warning: failed to update external_ref for %s: %v\n", issue.ID, err)
stats.Errors++
}
}
}
if len(toUpdate) > 0 && !createOnly {
for _, issue := range toUpdate {
if skipUpdateIDs != nil && skipUpdateIDs[issue.ID] {
stats.Skipped++
continue
}
linearIdentifier := linear.ExtractLinearIdentifier(*issue.ExternalRef)
if linearIdentifier == "" {
fmt.Fprintf(os.Stderr, "Warning: could not extract Linear identifier from %s: %s\n",
issue.ID, *issue.ExternalRef)
stats.Errors++
continue
}
linearIssue, err := client.FetchIssueByIdentifier(ctx, linearIdentifier)
if err != nil {
fmt.Fprintf(os.Stderr, "Warning: failed to fetch Linear issue %s: %v\n",
linearIdentifier, err)
stats.Errors++
continue
}
if linearIssue == nil {
fmt.Fprintf(os.Stderr, "Warning: Linear issue %s not found (may have been deleted)\n",
linearIdentifier)
stats.Skipped++
continue
}
linearUpdatedAt, err := time.Parse(time.RFC3339, linearIssue.UpdatedAt)
if err != nil {
fmt.Fprintf(os.Stderr, "Warning: failed to parse Linear UpdatedAt for %s: %v\n",
linearIdentifier, err)
stats.Errors++
continue
}
forcedUpdate := forceUpdateIDs != nil && forceUpdateIDs[issue.ID]
if !forcedUpdate && !issue.UpdatedAt.After(linearUpdatedAt) {
stats.Skipped++
continue
}
if !forcedUpdate {
localComparable := linear.NormalizeIssueForLinearHash(issue)
linearComparable := linear.IssueToBeads(linearIssue, mappingConfig).Issue.(*types.Issue)
if localComparable.ComputeContentHash() == linearComparable.ComputeContentHash() {
stats.Skipped++
continue
}
}
if dryRun {
stats.Updated++
continue
}
description := linear.BuildLinearDescription(issue)
updatePayload := map[string]interface{}{
"title": issue.Title,
"description": description,
}
linearPriority := linear.PriorityToLinear(issue.Priority, mappingConfig)
if linearPriority > 0 {
updatePayload["priority"] = linearPriority
}
stateID := stateCache.FindStateForBeadsStatus(issue.Status)
if stateID != "" {
updatePayload["stateId"] = stateID
}
updatedLinearIssue, err := client.UpdateIssue(ctx, linearIssue.ID, updatePayload)
if err != nil {
fmt.Fprintf(os.Stderr, "Warning: failed to update Linear issue %s: %v\n",
linearIdentifier, err)
stats.Errors++
continue
}
stats.Updated++
fmt.Printf(" Updated: %s -> %s\n", issue.ID, updatedLinearIssue.Identifier)
}
}
if dryRun {
fmt.Printf(" Would create %d issues in Linear\n", stats.Created)
if !createOnly {
fmt.Printf(" Would update %d issues in Linear\n", stats.Updated)
}
}
return stats, nil
}

View File

@@ -2,16 +2,13 @@ package main
import (
"context"
"encoding/json"
"fmt"
"os"
"os/signal"
"path/filepath"
"regexp"
"runtime/pprof"
"runtime/trace"
"slices"
"strings"
"sync"
"syscall"
"time"
@@ -26,40 +23,9 @@ import (
"github.com/steveyegge/beads/internal/storage"
"github.com/steveyegge/beads/internal/storage/memory"
"github.com/steveyegge/beads/internal/storage/sqlite"
"github.com/steveyegge/beads/internal/ui"
"github.com/steveyegge/beads/internal/utils"
)
// DaemonStatus captures daemon connection state for the current command
type DaemonStatus struct {
Mode string `json:"mode"` // "daemon" or "direct"
Connected bool `json:"connected"`
Degraded bool `json:"degraded"`
SocketPath string `json:"socket_path,omitempty"`
AutoStartEnabled bool `json:"auto_start_enabled"`
AutoStartAttempted bool `json:"auto_start_attempted"`
AutoStartSucceeded bool `json:"auto_start_succeeded"`
FallbackReason string `json:"fallback_reason,omitempty"` // "none","flag_no_daemon","connect_failed","health_failed","auto_start_disabled","auto_start_failed"
Detail string `json:"detail,omitempty"` // short diagnostic
Health string `json:"health,omitempty"` // "healthy","degraded","unhealthy"
}
// Fallback reason constants
const (
FallbackNone = "none"
FallbackFlagNoDaemon = "flag_no_daemon"
FallbackConnectFailed = "connect_failed"
FallbackHealthFailed = "health_failed"
FallbackWorktreeSafety = "worktree_safety"
cmdDaemon = "daemon"
cmdImport = "import"
statusHealthy = "healthy"
FallbackAutoStartDisabled = "auto_start_disabled"
FallbackAutoStartFailed = "auto_start_failed"
FallbackDaemonUnsupported = "daemon_unsupported"
FallbackWispOperation = "wisp_operation"
)
var (
dbPath string
actor string
@@ -101,7 +67,6 @@ var (
previousVersion = "" // The last bd version user had (empty = first run or unknown)
upgradeAcknowledged = false // Set to true after showing upgrade notification once per session
)
var (
noAutoFlush bool
noAutoImport bool
@@ -117,12 +82,6 @@ var (
quietFlag bool // Suppress non-essential output
)
// Command group IDs for help organization
const (
GroupMaintenance = "maintenance"
GroupIntegrations = "integrations"
)
func init() {
// Initialize viper configuration
if err := config.Initialize(); err != nil {
@@ -171,132 +130,6 @@ func init() {
rootCmd.SetHelpFunc(colorizedHelpFunc)
}
// colorizedHelpFunc wraps Cobra's default help with semantic coloring
// Applies subtle accent color to group headers for visual hierarchy
func colorizedHelpFunc(cmd *cobra.Command, args []string) {
// Build full help output: Long description + Usage
var output strings.Builder
// Include Long description first (like Cobra's default help)
if cmd.Long != "" {
output.WriteString(cmd.Long)
output.WriteString("\n\n")
} else if cmd.Short != "" {
output.WriteString(cmd.Short)
output.WriteString("\n\n")
}
// Add the usage string which contains commands, flags, etc.
output.WriteString(cmd.UsageString())
// Apply semantic coloring
result := colorizeHelpOutput(output.String())
fmt.Print(result)
}
// colorizeHelpOutput applies semantic colors to help text
// - Group headers get accent color for visual hierarchy
// - Section headers (Examples:, Flags:) get accent color
// - Command names get subtle styling for scanability
// - Flag names get bold styling, types get muted
// - Default values get muted styling
func colorizeHelpOutput(help string) string {
// Match group header lines (e.g., "Working With Issues:")
// These are standalone lines ending with ":" and followed by commands
groupHeaderRE := regexp.MustCompile(`(?m)^([A-Z][A-Za-z &]+:)\s*$`)
result := groupHeaderRE.ReplaceAllStringFunc(help, func(match string) string {
// Trim whitespace, colorize, then restore
trimmed := strings.TrimSpace(match)
return ui.RenderAccent(trimmed)
})
// Match section headers in subcommand help (Examples:, Flags:, etc.)
sectionHeaderRE := regexp.MustCompile(`(?m)^(Examples|Flags|Usage|Global Flags|Aliases|Available Commands):`)
result = sectionHeaderRE.ReplaceAllStringFunc(result, func(match string) string {
return ui.RenderAccent(match)
})
// Match command lines: " command Description text"
// Commands are indented with 2 spaces, followed by spaces, then description
// Pattern matches: indent + command-name (with hyphens) + spacing + description
cmdLineRE := regexp.MustCompile(`(?m)^( )([a-z][a-z0-9]*(?:-[a-z0-9]+)*)(\s{2,})(.*)$`)
result = cmdLineRE.ReplaceAllStringFunc(result, func(match string) string {
parts := cmdLineRE.FindStringSubmatch(match)
if len(parts) != 5 {
return match
}
indent := parts[1]
cmdName := parts[2]
spacing := parts[3]
description := parts[4]
// Colorize command references in description (e.g., 'comments add')
description = colorizeCommandRefs(description)
// Highlight entry point hints (e.g., "(start here)")
description = highlightEntryPoints(description)
// Subtle styling on command name for scanability
return indent + ui.RenderCommand(cmdName) + spacing + description
})
// Match flag lines: " -f, --file string Description"
// Pattern: indent + flags + spacing + optional type + description
flagLineRE := regexp.MustCompile(`(?m)^(\s+)(-\w,\s+--[\w-]+|--[\w-]+)(\s+)(string|int|duration|bool)?(\s*.*)$`)
result = flagLineRE.ReplaceAllStringFunc(result, func(match string) string {
parts := flagLineRE.FindStringSubmatch(match)
if len(parts) < 6 {
return match
}
indent := parts[1]
flags := parts[2]
spacing := parts[3]
typeStr := parts[4]
desc := parts[5]
// Mute default values in description
desc = muteDefaults(desc)
if typeStr != "" {
return indent + ui.RenderCommand(flags) + spacing + ui.RenderMuted(typeStr) + desc
}
return indent + ui.RenderCommand(flags) + spacing + desc
})
return result
}
// muteDefaults applies muted styling to default value annotations
func muteDefaults(text string) string {
defaultRE := regexp.MustCompile(`(\(default[^)]*\))`)
return defaultRE.ReplaceAllStringFunc(text, func(match string) string {
return ui.RenderMuted(match)
})
}
// highlightEntryPoints applies accent styling to entry point hints like "(start here)"
func highlightEntryPoints(text string) string {
entryRE := regexp.MustCompile(`(\(start here\))`)
return entryRE.ReplaceAllStringFunc(text, func(match string) string {
return ui.RenderAccent(match)
})
}
// colorizeCommandRefs applies command styling to references in text
// Matches patterns like 'command name' or 'bd command'
func colorizeCommandRefs(text string) string {
// Match 'command words' in single quotes (e.g., 'comments add')
cmdRefRE := regexp.MustCompile(`'([a-z][a-z0-9 -]+)'`)
return cmdRefRE.ReplaceAllStringFunc(text, func(match string) string {
// Extract the command name without quotes
inner := match[1 : len(match)-1]
return "'" + ui.RenderCommand(inner) + "'"
})
}
var rootCmd = &cobra.Command{
Use: "bd",
Short: "bd - Dependency-aware issue tracker",
@@ -960,189 +793,8 @@ var rootCmd = &cobra.Command{
},
}
// getDebounceDuration returns the auto-flush debounce duration
// Configurable via config file or BEADS_FLUSH_DEBOUNCE env var (e.g., "500ms", "10s")
// Defaults to 5 seconds if not set or invalid
// signalGasTownActivity writes an activity signal for Gas Town daemon.
// This enables exponential backoff based on bd usage detection.
// Best-effort: silent on any failure, never affects bd operation.
func signalGasTownActivity() {
// Determine town root
// Priority: GT_ROOT env > detect from cwd path > skip
townRoot := os.Getenv("GT_ROOT")
if townRoot == "" {
// Try to detect from cwd - if under ~/gt/, use that as town root
home, err := os.UserHomeDir()
if err != nil {
return
}
gtRoot := filepath.Join(home, "gt")
cwd, err := os.Getwd()
if err != nil {
return
}
if strings.HasPrefix(cwd, gtRoot+string(os.PathSeparator)) {
townRoot = gtRoot
}
}
if townRoot == "" {
return // Not in Gas Town, skip
}
// Ensure daemon directory exists
daemonDir := filepath.Join(townRoot, "daemon")
if err := os.MkdirAll(daemonDir, 0755); err != nil {
return
}
// Build command line from os.Args
cmdLine := strings.Join(os.Args, " ")
// Determine actor (use package-level var if set, else fall back to env)
actorName := actor
if actorName == "" {
if bdActor := os.Getenv("BD_ACTOR"); bdActor != "" {
actorName = bdActor
} else if user := os.Getenv("USER"); user != "" {
actorName = user
} else {
actorName = "unknown"
}
}
// Build activity signal
activity := struct {
LastCommand string `json:"last_command"`
Actor string `json:"actor"`
Timestamp string `json:"timestamp"`
}{
LastCommand: cmdLine,
Actor: actorName,
Timestamp: time.Now().UTC().Format(time.RFC3339),
}
data, err := json.Marshal(activity)
if err != nil {
return
}
// Write atomically (write to temp, rename)
activityPath := filepath.Join(daemonDir, "activity.json")
tmpPath := activityPath + ".tmp"
// nolint:gosec // G306: 0644 is appropriate for a status file
if err := os.WriteFile(tmpPath, data, 0644); err != nil {
return
}
_ = os.Rename(tmpPath, activityPath)
}
func main() {
if err := rootCmd.Execute(); err != nil {
os.Exit(1)
}
}
// isFreshCloneError checks if the error is due to a fresh clone scenario
// where the database exists but is missing required config (like issue_prefix).
// This happens when someone clones a repo with beads but needs to initialize.
func isFreshCloneError(err error) bool {
if err == nil {
return false
}
errStr := err.Error()
// Check for the specific migration invariant error pattern
return strings.Contains(errStr, "post-migration validation failed") &&
strings.Contains(errStr, "required config key missing: issue_prefix")
}
// handleFreshCloneError displays a helpful message when a fresh clone is detected
// and returns true if the error was handled (so caller should exit).
// If not a fresh clone error, returns false and does nothing.
func handleFreshCloneError(err error, beadsDir string) bool {
if !isFreshCloneError(err) {
return false
}
// Look for JSONL file in the .beads directory
jsonlPath := ""
issueCount := 0
if beadsDir != "" {
// Check for issues.jsonl (canonical) first, then beads.jsonl (legacy)
for _, name := range []string{"issues.jsonl", "beads.jsonl"} {
candidate := filepath.Join(beadsDir, name)
if info, statErr := os.Stat(candidate); statErr == nil && !info.IsDir() {
jsonlPath = candidate
// Count lines (approximately = issue count)
// #nosec G304 -- candidate is constructed from beadsDir which is .beads/
if data, readErr := os.ReadFile(candidate); readErr == nil {
for _, line := range strings.Split(string(data), "\n") {
if strings.TrimSpace(line) != "" {
issueCount++
}
}
}
break
}
}
}
fmt.Fprintf(os.Stderr, "Error: Database not initialized\n\n")
fmt.Fprintf(os.Stderr, "This appears to be a fresh clone or the database needs initialization.\n")
if jsonlPath != "" && issueCount > 0 {
fmt.Fprintf(os.Stderr, "Found: %s (%d issues)\n\n", jsonlPath, issueCount)
fmt.Fprintf(os.Stderr, "To initialize from the JSONL file, run:\n")
fmt.Fprintf(os.Stderr, " bd import -i %s\n\n", jsonlPath)
} else {
fmt.Fprintf(os.Stderr, "\nTo initialize a new database, run:\n")
fmt.Fprintf(os.Stderr, " bd init --prefix <your-prefix>\n\n")
}
fmt.Fprintf(os.Stderr, "For more information: bd init --help\n")
return true
}
// isWispOperation returns true if the command operates on ephemeral wisps.
// Wisp operations auto-bypass the daemon because wisps are local-only
// (Ephemeral=true issues are never exported to JSONL).
// Detects:
// - mol wisp subcommands (create, list, gc, or direct proto invocation)
// - mol burn (only operates on wisps)
// - mol squash (condenses wisps to digests)
// - Commands with ephemeral issue IDs in args (bd-*-eph-*, eph-*)
func isWispOperation(cmd *cobra.Command, args []string) bool {
cmdName := cmd.Name()
// Check command hierarchy for wisp subcommands
// bd mol wisp → parent is "mol", cmd is "wisp"
// bd mol wisp create → parent is "wisp", cmd is "create"
if cmd.Parent() != nil {
parentName := cmd.Parent().Name()
// Direct wisp command or subcommands under wisp
if parentName == "wisp" || cmdName == "wisp" {
return true
}
// mol burn and mol squash are wisp-only operations
if parentName == "mol" && (cmdName == "burn" || cmdName == "squash") {
return true
}
}
// Check for ephemeral issue IDs in arguments
// Ephemeral IDs have "eph" segment: bd-eph-xxx, gt-eph-xxx, eph-xxx
for _, arg := range args {
// Skip flags
if strings.HasPrefix(arg, "-") {
continue
}
// Check for ephemeral prefix patterns
if strings.Contains(arg, "-eph-") || strings.HasPrefix(arg, "eph-") {
return true
}
}
return false
}

119
cmd/bd/main_daemon.go Normal file
View File

@@ -0,0 +1,119 @@
package main
import (
"encoding/json"
"os"
"path/filepath"
"strings"
"time"
)
// DaemonStatus captures daemon connection state for the current command
type DaemonStatus struct {
Mode string `json:"mode"` // "daemon" or "direct"
Connected bool `json:"connected"`
Degraded bool `json:"degraded"`
SocketPath string `json:"socket_path,omitempty"`
AutoStartEnabled bool `json:"auto_start_enabled"`
AutoStartAttempted bool `json:"auto_start_attempted"`
AutoStartSucceeded bool `json:"auto_start_succeeded"`
FallbackReason string `json:"fallback_reason,omitempty"` // "none","flag_no_daemon","connect_failed","health_failed","auto_start_disabled","auto_start_failed"
Detail string `json:"detail,omitempty"` // short diagnostic
Health string `json:"health,omitempty"` // "healthy","degraded","unhealthy"
}
// Fallback reason constants
const (
FallbackNone = "none"
FallbackFlagNoDaemon = "flag_no_daemon"
FallbackConnectFailed = "connect_failed"
FallbackHealthFailed = "health_failed"
FallbackWorktreeSafety = "worktree_safety"
cmdDaemon = "daemon"
cmdImport = "import"
statusHealthy = "healthy"
FallbackAutoStartDisabled = "auto_start_disabled"
FallbackAutoStartFailed = "auto_start_failed"
FallbackDaemonUnsupported = "daemon_unsupported"
FallbackWispOperation = "wisp_operation"
)
// Command group IDs for help organization
const (
GroupMaintenance = "maintenance"
GroupIntegrations = "integrations"
)
// signalGasTownActivity writes an activity signal for Gas Town daemon.
// This enables exponential backoff based on bd usage detection.
// Best-effort: silent on any failure, never affects bd operation.
func signalGasTownActivity() {
// Determine town root
// Priority: GT_ROOT env > detect from cwd path > skip
townRoot := os.Getenv("GT_ROOT")
if townRoot == "" {
// Try to detect from cwd - if under ~/gt/, use that as town root
home, err := os.UserHomeDir()
if err != nil {
return
}
gtRoot := filepath.Join(home, "gt")
cwd, err := os.Getwd()
if err != nil {
return
}
if strings.HasPrefix(cwd, gtRoot+string(os.PathSeparator)) {
townRoot = gtRoot
}
}
if townRoot == "" {
return // Not in Gas Town, skip
}
// Ensure daemon directory exists
daemonDir := filepath.Join(townRoot, "daemon")
if err := os.MkdirAll(daemonDir, 0755); err != nil {
return
}
// Build command line from os.Args
cmdLine := strings.Join(os.Args, " ")
// Determine actor (use package-level var if set, else fall back to env)
actorName := actor
if actorName == "" {
if bdActor := os.Getenv("BD_ACTOR"); bdActor != "" {
actorName = bdActor
} else if user := os.Getenv("USER"); user != "" {
actorName = user
} else {
actorName = "unknown"
}
}
// Build activity signal
activity := struct {
LastCommand string `json:"last_command"`
Actor string `json:"actor"`
Timestamp string `json:"timestamp"`
}{
LastCommand: cmdLine,
Actor: actorName,
Timestamp: time.Now().UTC().Format(time.RFC3339),
}
data, err := json.Marshal(activity)
if err != nil {
return
}
// Write atomically (write to temp, rename)
activityPath := filepath.Join(daemonDir, "activity.json")
tmpPath := activityPath + ".tmp"
// nolint:gosec // G306: 0644 is appropriate for a status file
if err := os.WriteFile(tmpPath, data, 0644); err != nil {
return
}
_ = os.Rename(tmpPath, activityPath)
}

113
cmd/bd/main_errors.go Normal file
View File

@@ -0,0 +1,113 @@
package main
import (
"fmt"
"os"
"path/filepath"
"strings"
"github.com/spf13/cobra"
)
// isFreshCloneError checks if the error is due to a fresh clone scenario
// where the database exists but is missing required config (like issue_prefix).
// This happens when someone clones a repo with beads but needs to initialize.
func isFreshCloneError(err error) bool {
if err == nil {
return false
}
errStr := err.Error()
// Check for the specific migration invariant error pattern
return strings.Contains(errStr, "post-migration validation failed") &&
strings.Contains(errStr, "required config key missing: issue_prefix")
}
// handleFreshCloneError displays a helpful message when a fresh clone is detected
// and returns true if the error was handled (so caller should exit).
// If not a fresh clone error, returns false and does nothing.
func handleFreshCloneError(err error, beadsDir string) bool {
if !isFreshCloneError(err) {
return false
}
// Look for JSONL file in the .beads directory
jsonlPath := ""
issueCount := 0
if beadsDir != "" {
// Check for issues.jsonl (canonical) first, then beads.jsonl (legacy)
for _, name := range []string{"issues.jsonl", "beads.jsonl"} {
candidate := filepath.Join(beadsDir, name)
if info, statErr := os.Stat(candidate); statErr == nil && !info.IsDir() {
jsonlPath = candidate
// Count lines (approximately = issue count)
// #nosec G304 -- candidate is constructed from beadsDir which is .beads/
if data, readErr := os.ReadFile(candidate); readErr == nil {
for _, line := range strings.Split(string(data), "\n") {
if strings.TrimSpace(line) != "" {
issueCount++
}
}
}
break
}
}
}
fmt.Fprintf(os.Stderr, "Error: Database not initialized\n\n")
fmt.Fprintf(os.Stderr, "This appears to be a fresh clone or the database needs initialization.\n")
if jsonlPath != "" && issueCount > 0 {
fmt.Fprintf(os.Stderr, "Found: %s (%d issues)\n\n", jsonlPath, issueCount)
fmt.Fprintf(os.Stderr, "To initialize from the JSONL file, run:\n")
fmt.Fprintf(os.Stderr, " bd import -i %s\n\n", jsonlPath)
} else {
fmt.Fprintf(os.Stderr, "\nTo initialize a new database, run:\n")
fmt.Fprintf(os.Stderr, " bd init --prefix <your-prefix>\n\n")
}
fmt.Fprintf(os.Stderr, "For more information: bd init --help\n")
return true
}
// isWispOperation returns true if the command operates on ephemeral wisps.
// Wisp operations auto-bypass the daemon because wisps are local-only
// (Ephemeral=true issues are never exported to JSONL).
// Detects:
// - mol wisp subcommands (create, list, gc, or direct proto invocation)
// - mol burn (only operates on wisps)
// - mol squash (condenses wisps to digests)
// - Commands with ephemeral issue IDs in args (bd-*-eph-*, eph-*)
func isWispOperation(cmd *cobra.Command, args []string) bool {
cmdName := cmd.Name()
// Check command hierarchy for wisp subcommands
// bd mol wisp → parent is "mol", cmd is "wisp"
// bd mol wisp create → parent is "wisp", cmd is "create"
if cmd.Parent() != nil {
parentName := cmd.Parent().Name()
// Direct wisp command or subcommands under wisp
if parentName == "wisp" || cmdName == "wisp" {
return true
}
// mol burn and mol squash are wisp-only operations
if parentName == "mol" && (cmdName == "burn" || cmdName == "squash") {
return true
}
}
// Check for ephemeral issue IDs in arguments
// Ephemeral IDs have "eph" segment: bd-eph-xxx, gt-eph-xxx, eph-xxx
for _, arg := range args {
// Skip flags
if strings.HasPrefix(arg, "-") {
continue
}
// Check for ephemeral prefix patterns
if strings.Contains(arg, "-eph-") || strings.HasPrefix(arg, "eph-") {
return true
}
}
return false
}

136
cmd/bd/main_help.go Normal file
View File

@@ -0,0 +1,136 @@
package main
import (
"fmt"
"regexp"
"strings"
"github.com/spf13/cobra"
"github.com/steveyegge/beads/internal/ui"
)
// colorizedHelpFunc wraps Cobra's default help with semantic coloring
// Applies subtle accent color to group headers for visual hierarchy
func colorizedHelpFunc(cmd *cobra.Command, args []string) {
// Build full help output: Long description + Usage
var output strings.Builder
// Include Long description first (like Cobra's default help)
if cmd.Long != "" {
output.WriteString(cmd.Long)
output.WriteString("\n\n")
} else if cmd.Short != "" {
output.WriteString(cmd.Short)
output.WriteString("\n\n")
}
// Add the usage string which contains commands, flags, etc.
output.WriteString(cmd.UsageString())
// Apply semantic coloring
result := colorizeHelpOutput(output.String())
fmt.Print(result)
}
// colorizeHelpOutput applies semantic colors to help text
// - Group headers get accent color for visual hierarchy
// - Section headers (Examples:, Flags:) get accent color
// - Command names get subtle styling for scanability
// - Flag names get bold styling, types get muted
// - Default values get muted styling
func colorizeHelpOutput(help string) string {
// Match group header lines (e.g., "Working With Issues:")
// These are standalone lines ending with ":" and followed by commands
groupHeaderRE := regexp.MustCompile(`(?m)^([A-Z][A-Za-z &]+:)\s*$`)
result := groupHeaderRE.ReplaceAllStringFunc(help, func(match string) string {
// Trim whitespace, colorize, then restore
trimmed := strings.TrimSpace(match)
return ui.RenderAccent(trimmed)
})
// Match section headers in subcommand help (Examples:, Flags:, etc.)
sectionHeaderRE := regexp.MustCompile(`(?m)^(Examples|Flags|Usage|Global Flags|Aliases|Available Commands):`)
result = sectionHeaderRE.ReplaceAllStringFunc(result, func(match string) string {
return ui.RenderAccent(match)
})
// Match command lines: " command Description text"
// Commands are indented with 2 spaces, followed by spaces, then description
// Pattern matches: indent + command-name (with hyphens) + spacing + description
cmdLineRE := regexp.MustCompile(`(?m)^( )([a-z][a-z0-9]*(?:-[a-z0-9]+)*)(\s{2,})(.*)$`)
result = cmdLineRE.ReplaceAllStringFunc(result, func(match string) string {
parts := cmdLineRE.FindStringSubmatch(match)
if len(parts) != 5 {
return match
}
indent := parts[1]
cmdName := parts[2]
spacing := parts[3]
description := parts[4]
// Colorize command references in description (e.g., 'comments add')
description = colorizeCommandRefs(description)
// Highlight entry point hints (e.g., "(start here)")
description = highlightEntryPoints(description)
// Subtle styling on command name for scanability
return indent + ui.RenderCommand(cmdName) + spacing + description
})
// Match flag lines: " -f, --file string Description"
// Pattern: indent + flags + spacing + optional type + description
flagLineRE := regexp.MustCompile(`(?m)^(\s+)(-\w,\s+--[\w-]+|--[\w-]+)(\s+)(string|int|duration|bool)?(\s*.*)$`)
result = flagLineRE.ReplaceAllStringFunc(result, func(match string) string {
parts := flagLineRE.FindStringSubmatch(match)
if len(parts) < 6 {
return match
}
indent := parts[1]
flags := parts[2]
spacing := parts[3]
typeStr := parts[4]
desc := parts[5]
// Mute default values in description
desc = muteDefaults(desc)
if typeStr != "" {
return indent + ui.RenderCommand(flags) + spacing + ui.RenderMuted(typeStr) + desc
}
return indent + ui.RenderCommand(flags) + spacing + desc
})
return result
}
// muteDefaults applies muted styling to default value annotations
func muteDefaults(text string) string {
defaultRE := regexp.MustCompile(`(\(default[^)]*\))`)
return defaultRE.ReplaceAllStringFunc(text, func(match string) string {
return ui.RenderMuted(match)
})
}
// highlightEntryPoints applies accent styling to entry point hints like "(start here)"
func highlightEntryPoints(text string) string {
entryRE := regexp.MustCompile(`(\(start here\))`)
return entryRE.ReplaceAllStringFunc(text, func(match string) string {
return ui.RenderAccent(match)
})
}
// colorizeCommandRefs applies command styling to references in text
// Matches patterns like 'command name' or 'bd command'
func colorizeCommandRefs(text string) string {
// Match 'command words' in single quotes (e.g., 'comments add')
cmdRefRE := regexp.MustCompile(`'([a-z][a-z0-9 -]+)'`)
return cmdRefRE.ReplaceAllStringFunc(text, func(match string) string {
// Extract the command name without quotes
inner := match[1 : len(match)-1]
return "'" + ui.RenderCommand(inner) + "'"
})
}

File diff suppressed because it is too large Load Diff

266
cmd/bd/show_thread.go Normal file
View File

@@ -0,0 +1,266 @@
package main
import (
"context"
"encoding/json"
"fmt"
"os"
"slices"
"strings"
"github.com/steveyegge/beads/internal/rpc"
"github.com/steveyegge/beads/internal/storage"
"github.com/steveyegge/beads/internal/storage/sqlite"
"github.com/steveyegge/beads/internal/types"
"github.com/steveyegge/beads/internal/ui"
)
// showMessageThread displays a full conversation thread for a message
func showMessageThread(ctx context.Context, messageID string, jsonOutput bool) {
// Get the starting message
var startMsg *types.Issue
var err error
if daemonClient != nil {
resp, err := daemonClient.Show(&rpc.ShowArgs{ID: messageID})
if err != nil {
fmt.Fprintf(os.Stderr, "Error fetching message %s: %v\n", messageID, err)
os.Exit(1)
}
if err := json.Unmarshal(resp.Data, &startMsg); err != nil {
fmt.Fprintf(os.Stderr, "Error parsing response: %v\n", err)
os.Exit(1)
}
} else {
startMsg, err = store.GetIssue(ctx, messageID)
if err != nil {
fmt.Fprintf(os.Stderr, "Error fetching message %s: %v\n", messageID, err)
os.Exit(1)
}
}
if startMsg == nil {
fmt.Fprintf(os.Stderr, "Message %s not found\n", messageID)
os.Exit(1)
}
// Find the root of the thread by following replies-to dependencies upward
// Per Decision 004, RepliesTo is now stored as a dependency, not an Issue field
rootMsg := startMsg
seen := make(map[string]bool)
seen[rootMsg.ID] = true
for {
// Find parent via replies-to dependency
parentID := findRepliesTo(ctx, rootMsg.ID, daemonClient, store)
if parentID == "" {
break // No parent, this is the root
}
if seen[parentID] {
break // Avoid infinite loops
}
seen[parentID] = true
var parentMsg *types.Issue
if daemonClient != nil {
resp, err := daemonClient.Show(&rpc.ShowArgs{ID: parentID})
if err != nil {
break // Parent not found, use current as root
}
if err := json.Unmarshal(resp.Data, &parentMsg); err != nil {
break
}
} else {
parentMsg, _ = store.GetIssue(ctx, parentID)
}
if parentMsg == nil {
break
}
rootMsg = parentMsg
}
// Now collect all messages in the thread
// Start from root and find all replies
// Build a map of child ID -> parent ID for display purposes
threadMessages := []*types.Issue{rootMsg}
threadIDs := map[string]bool{rootMsg.ID: true}
repliesTo := map[string]string{} // child ID -> parent ID
queue := []string{rootMsg.ID}
// BFS to find all replies
for len(queue) > 0 {
currentID := queue[0]
queue = queue[1:]
// Find all messages that reply to currentID via replies-to dependency
// Per Decision 004, replies are found via dependents with type replies-to
replies := findReplies(ctx, currentID, daemonClient, store)
for _, reply := range replies {
if threadIDs[reply.ID] {
continue // Already seen
}
threadMessages = append(threadMessages, reply)
threadIDs[reply.ID] = true
repliesTo[reply.ID] = currentID // Track parent for display
queue = append(queue, reply.ID)
}
}
// Sort by creation time
slices.SortFunc(threadMessages, func(a, b *types.Issue) int {
return a.CreatedAt.Compare(b.CreatedAt)
})
if jsonOutput {
encoder := json.NewEncoder(os.Stdout)
encoder.SetIndent("", " ")
_ = encoder.Encode(threadMessages)
return
}
// Display the thread
fmt.Printf("\n%s Thread: %s\n", ui.RenderAccent("📬"), rootMsg.Title)
fmt.Println(strings.Repeat("─", 66))
for _, msg := range threadMessages {
// Show indent based on depth (count replies_to chain using our map)
depth := 0
parent := repliesTo[msg.ID]
for parent != "" && depth < 5 {
depth++
parent = repliesTo[parent]
}
indent := strings.Repeat(" ", depth)
// Format timestamp
timeStr := msg.CreatedAt.Format("2006-01-02 15:04")
// Status indicator
statusIcon := "📧"
if msg.Status == types.StatusClosed {
statusIcon = "✓"
}
fmt.Printf("%s%s %s %s\n", indent, statusIcon, ui.RenderAccent(msg.ID), ui.RenderMuted(timeStr))
fmt.Printf("%s From: %s To: %s\n", indent, msg.Sender, msg.Assignee)
if parentID := repliesTo[msg.ID]; parentID != "" {
fmt.Printf("%s Re: %s\n", indent, parentID)
}
fmt.Printf("%s %s: %s\n", indent, ui.RenderMuted("Subject"), msg.Title)
if msg.Description != "" {
// Indent the body
bodyLines := strings.Split(msg.Description, "\n")
for _, line := range bodyLines {
fmt.Printf("%s %s\n", indent, line)
}
}
fmt.Println()
}
fmt.Printf("Total: %d messages in thread\n\n", len(threadMessages))
}
// findRepliesTo finds the parent ID that this issue replies to via replies-to dependency.
// Returns empty string if no parent found.
func findRepliesTo(ctx context.Context, issueID string, daemonClient *rpc.Client, store storage.Storage) string {
if daemonClient != nil {
// In daemon mode, use Show to get dependencies with metadata
resp, err := daemonClient.Show(&rpc.ShowArgs{ID: issueID})
if err != nil {
return ""
}
// Parse the full show response to get dependencies
type showResponse struct {
Dependencies []struct {
ID string `json:"id"`
DependencyType string `json:"dependency_type"`
} `json:"dependencies"`
}
var details showResponse
if err := json.Unmarshal(resp.Data, &details); err != nil {
return ""
}
for _, dep := range details.Dependencies {
if dep.DependencyType == string(types.DepRepliesTo) {
return dep.ID
}
}
return ""
}
// Direct mode - query storage
deps, err := store.GetDependencyRecords(ctx, issueID)
if err != nil {
return ""
}
for _, dep := range deps {
if dep.Type == types.DepRepliesTo {
return dep.DependsOnID
}
}
return ""
}
// findReplies finds all issues that reply to this issue via replies-to dependency.
func findReplies(ctx context.Context, issueID string, daemonClient *rpc.Client, store storage.Storage) []*types.Issue {
if daemonClient != nil {
// In daemon mode, use Show to get dependents with metadata
resp, err := daemonClient.Show(&rpc.ShowArgs{ID: issueID})
if err != nil {
return nil
}
// Parse the full show response to get dependents
type showResponse struct {
Dependents []struct {
types.Issue
DependencyType string `json:"dependency_type"`
} `json:"dependents"`
}
var details showResponse
if err := json.Unmarshal(resp.Data, &details); err != nil {
return nil
}
var replies []*types.Issue
for _, dep := range details.Dependents {
if dep.DependencyType == string(types.DepRepliesTo) {
issue := dep.Issue // Copy to avoid aliasing
replies = append(replies, &issue)
}
}
return replies
}
// Direct mode - query storage
if sqliteStore, ok := store.(*sqlite.SQLiteStorage); ok {
deps, err := sqliteStore.GetDependentsWithMetadata(ctx, issueID)
if err != nil {
return nil
}
var replies []*types.Issue
for _, dep := range deps {
if dep.DependencyType == types.DepRepliesTo {
issue := dep.Issue // Copy to avoid aliasing
replies = append(replies, &issue)
}
}
return replies
}
allDeps, err := store.GetAllDependencyRecords(ctx)
if err != nil {
return nil
}
var replies []*types.Issue
for childID, deps := range allDeps {
for _, dep := range deps {
if dep.Type == types.DepRepliesTo && dep.DependsOnID == issueID {
issue, _ := store.GetIssue(ctx, childID)
if issue != nil {
replies = append(replies, issue)
}
}
}
}
return replies
}

View File

@@ -2,19 +2,16 @@ package main
import (
"bufio"
"context"
"fmt"
"os"
"os/exec"
"path/filepath"
"strings"
"time"
"github.com/spf13/cobra"
"github.com/steveyegge/beads/internal/beads"
"github.com/steveyegge/beads/internal/config"
"github.com/steveyegge/beads/internal/debug"
"github.com/steveyegge/beads/internal/git"
"github.com/steveyegge/beads/internal/syncbranch"
)
@@ -742,456 +739,7 @@ func init() {
rootCmd.AddCommand(syncCmd)
}
// isGitRepo checks if the current directory is in a git repository
func isGitRepo() bool {
cmd := exec.Command("git", "rev-parse", "--git-dir")
return cmd.Run() == nil
}
// gitHasUnmergedPaths checks for unmerged paths or merge in progress
func gitHasUnmergedPaths() (bool, error) {
cmd := exec.Command("git", "status", "--porcelain")
out, err := cmd.Output()
if err != nil {
return false, fmt.Errorf("git status failed: %w", err)
}
// Check for unmerged status codes (DD, AU, UD, UA, DU, AA, UU)
for _, line := range strings.Split(string(out), "\n") {
if len(line) >= 2 {
s := line[:2]
if s == "DD" || s == "AU" || s == "UD" || s == "UA" || s == "DU" || s == "AA" || s == "UU" {
return true, nil
}
}
}
// Check if MERGE_HEAD exists (merge in progress)
if exec.Command("git", "rev-parse", "-q", "--verify", "MERGE_HEAD").Run() == nil {
return true, nil
}
return false, nil
}
// gitHasUpstream checks if the current branch has an upstream configured
// Uses git config directly for compatibility with Git for Windows
func gitHasUpstream() bool {
// Get current branch name
branchCmd := exec.Command("git", "symbolic-ref", "--short", "HEAD")
branchOutput, err := branchCmd.Output()
if err != nil {
return false
}
branch := strings.TrimSpace(string(branchOutput))
// Check if remote and merge refs are configured
remoteCmd := exec.Command("git", "config", "--get", fmt.Sprintf("branch.%s.remote", branch))
mergeCmd := exec.Command("git", "config", "--get", fmt.Sprintf("branch.%s.merge", branch))
remoteErr := remoteCmd.Run()
mergeErr := mergeCmd.Run()
return remoteErr == nil && mergeErr == nil
}
// gitHasChanges checks if the specified file has uncommitted changes
func gitHasChanges(ctx context.Context, filePath string) (bool, error) {
cmd := exec.CommandContext(ctx, "git", "status", "--porcelain", filePath)
output, err := cmd.Output()
if err != nil {
return false, fmt.Errorf("git status failed: %w", err)
}
return len(strings.TrimSpace(string(output))) > 0, nil
}
// getRepoRootForWorktree returns the main repository root for running git commands
// This is always the main repository root, never the worktree root
func getRepoRootForWorktree(_ context.Context) string {
repoRoot, err := git.GetMainRepoRoot()
if err != nil {
// Fallback to current directory if GetMainRepoRoot fails
return "."
}
return repoRoot
}
// gitHasBeadsChanges checks if any tracked files in .beads/ have uncommitted changes
func gitHasBeadsChanges(ctx context.Context) (bool, error) {
// Get the absolute path to .beads directory
beadsDir := beads.FindBeadsDir()
if beadsDir == "" {
return false, fmt.Errorf("no .beads directory found")
}
// Get the repository root (handles worktrees properly)
repoRoot := getRepoRootForWorktree(ctx)
if repoRoot == "" {
return false, fmt.Errorf("cannot determine repository root")
}
// Compute relative path from repo root to .beads
relPath, err := filepath.Rel(repoRoot, beadsDir)
if err != nil {
// Fall back to absolute path if relative path fails
statusCmd := exec.CommandContext(ctx, "git", "status", "--porcelain", beadsDir)
statusOutput, err := statusCmd.Output()
if err != nil {
return false, fmt.Errorf("git status failed: %w", err)
}
return len(strings.TrimSpace(string(statusOutput))) > 0, nil
}
// Run git status with relative path from repo root
statusCmd := exec.CommandContext(ctx, "git", "-C", repoRoot, "status", "--porcelain", relPath)
statusOutput, err := statusCmd.Output()
if err != nil {
return false, fmt.Errorf("git status failed: %w", err)
}
return len(strings.TrimSpace(string(statusOutput))) > 0, nil
}
// buildGitCommitArgs returns git commit args with config-based author and signing options (GH#600)
// This allows users to configure a separate author and disable GPG signing for beads commits.
func buildGitCommitArgs(repoRoot, message string, extraArgs ...string) []string {
args := []string{"-C", repoRoot, "commit"}
// Add --author if configured
if author := config.GetString("git.author"); author != "" {
args = append(args, "--author", author)
}
// Add --no-gpg-sign if configured
if config.GetBool("git.no-gpg-sign") {
args = append(args, "--no-gpg-sign")
}
// Add message
args = append(args, "-m", message)
// Add any extra args (like -- pathspec)
args = append(args, extraArgs...)
return args
}
// gitCommit commits the specified file (worktree-aware)
func gitCommit(ctx context.Context, filePath string, message string) error {
// Get the repository root (handles worktrees properly)
repoRoot := getRepoRootForWorktree(ctx)
if repoRoot == "" {
return fmt.Errorf("cannot determine repository root")
}
// Make file path relative to repo root for git operations
relPath, err := filepath.Rel(repoRoot, filePath)
if err != nil {
relPath = filePath // Fall back to absolute path
}
// Stage the file from repo root context
addCmd := exec.CommandContext(ctx, "git", "-C", repoRoot, "add", relPath)
if err := addCmd.Run(); err != nil {
return fmt.Errorf("git add failed: %w", err)
}
// Generate message if not provided
if message == "" {
message = fmt.Sprintf("bd sync: %s", time.Now().Format("2006-01-02 15:04:05"))
}
// Commit from repo root context with config-based author and signing options
// Use pathspec to commit ONLY this file
// This prevents accidentally committing other staged files
commitArgs := buildGitCommitArgs(repoRoot, message, "--", relPath)
commitCmd := exec.CommandContext(ctx, "git", commitArgs...)
output, err := commitCmd.CombinedOutput()
if err != nil {
return fmt.Errorf("git commit failed: %w\n%s", err, output)
}
return nil
}
// gitCommitBeadsDir stages and commits only sync-related files in .beads/
// This ensures bd sync doesn't accidentally commit other staged files.
// Only stages specific sync files (issues.jsonl, deletions.jsonl, metadata.json)
// to avoid staging gitignored snapshot files that may be tracked.
// Worktree-aware: handles cases where .beads is in the main repo but we're running from a worktree.
func gitCommitBeadsDir(ctx context.Context, message string) error {
beadsDir := beads.FindBeadsDir()
if beadsDir == "" {
return fmt.Errorf("no .beads directory found")
}
// Get the repository root (handles worktrees properly)
repoRoot := getRepoRootForWorktree(ctx)
if repoRoot == "" {
return fmt.Errorf("cannot determine repository root")
}
// Stage only the specific sync-related files
// This avoids staging gitignored snapshot files (beads.*.jsonl, *.meta.json)
// that may still be tracked from before they were added to .gitignore
syncFiles := []string{
filepath.Join(beadsDir, "issues.jsonl"),
filepath.Join(beadsDir, "deletions.jsonl"),
filepath.Join(beadsDir, "interactions.jsonl"),
filepath.Join(beadsDir, "metadata.json"),
}
// Only add files that exist
var filesToAdd []string
for _, f := range syncFiles {
if _, err := os.Stat(f); err == nil {
// Convert to relative path from repo root for git operations
relPath, err := filepath.Rel(repoRoot, f)
if err != nil {
relPath = f // Fall back to absolute path if relative fails
}
filesToAdd = append(filesToAdd, relPath)
}
}
if len(filesToAdd) == 0 {
return fmt.Errorf("no sync files found to commit")
}
// Stage only the sync files from repo root context (worktree-aware)
args := append([]string{"-C", repoRoot, "add"}, filesToAdd...)
addCmd := exec.CommandContext(ctx, "git", args...)
if err := addCmd.Run(); err != nil {
return fmt.Errorf("git add failed: %w", err)
}
// Generate message if not provided
if message == "" {
message = fmt.Sprintf("bd sync: %s", time.Now().Format("2006-01-02 15:04:05"))
}
// Commit only .beads/ files using -- pathspec
// This prevents accidentally committing other staged files that the user
// may have staged but wasn't ready to commit yet.
// Convert beadsDir to relative path for git commit (worktree-aware)
relBeadsDir, err := filepath.Rel(repoRoot, beadsDir)
if err != nil {
relBeadsDir = beadsDir // Fall back to absolute path if relative fails
}
// Use config-based author and signing options with pathspec
commitArgs := buildGitCommitArgs(repoRoot, message, "--", relBeadsDir)
commitCmd := exec.CommandContext(ctx, "git", commitArgs...)
output, err := commitCmd.CombinedOutput()
if err != nil {
return fmt.Errorf("git commit failed: %w\n%s", err, output)
}
return nil
}
// hasGitRemote checks if a git remote exists in the repository
func hasGitRemote(ctx context.Context) bool {
cmd := exec.CommandContext(ctx, "git", "remote")
output, err := cmd.Output()
if err != nil {
return false
}
return len(strings.TrimSpace(string(output))) > 0
}
// isInRebase checks if we're currently in a git rebase state
func isInRebase() bool {
// Get actual git directory (handles worktrees)
gitDir, err := git.GetGitDir()
if err != nil {
return false
}
// Check for rebase-merge directory (interactive rebase)
rebaseMergePath := filepath.Join(gitDir, "rebase-merge")
if _, err := os.Stat(rebaseMergePath); err == nil {
return true
}
// Check for rebase-apply directory (non-interactive rebase)
rebaseApplyPath := filepath.Join(gitDir, "rebase-apply")
if _, err := os.Stat(rebaseApplyPath); err == nil {
return true
}
return false
}
// hasJSONLConflict checks if the beads JSONL file has a merge conflict
// Returns true only if the JSONL file (issues.jsonl or beads.jsonl) is the only file in conflict
func hasJSONLConflict() bool {
cmd := exec.Command("git", "status", "--porcelain")
out, err := cmd.Output()
if err != nil {
return false
}
var hasJSONLConflict bool
var hasOtherConflict bool
for _, line := range strings.Split(string(out), "\n") {
if len(line) < 3 {
continue
}
// Check for unmerged status codes (UU = both modified, AA = both added, etc.)
status := line[:2]
if status == "UU" || status == "AA" || status == "DD" ||
status == "AU" || status == "UA" || status == "DU" || status == "UD" {
filepath := strings.TrimSpace(line[3:])
// Check for beads JSONL files (issues.jsonl or beads.jsonl in .beads/)
if strings.HasSuffix(filepath, "issues.jsonl") || strings.HasSuffix(filepath, "beads.jsonl") {
hasJSONLConflict = true
} else {
hasOtherConflict = true
}
}
}
// Only return true if ONLY the JSONL file has a conflict
return hasJSONLConflict && !hasOtherConflict
}
// runGitRebaseContinue continues a rebase after resolving conflicts
func runGitRebaseContinue(ctx context.Context) error {
cmd := exec.CommandContext(ctx, "git", "rebase", "--continue")
output, err := cmd.CombinedOutput()
if err != nil {
return fmt.Errorf("git rebase --continue failed: %w\n%s", err, output)
}
return nil
}
// gitPull pulls from the current branch's upstream
// Returns nil if no remote configured (local-only mode)
func checkMergeDriverConfig() {
// Get current merge driver configuration
cmd := exec.Command("git", "config", "merge.beads.driver")
output, err := cmd.Output()
if err != nil {
// No merge driver configured - this is OK, user may not need it
return
}
currentConfig := strings.TrimSpace(string(output))
// Check if using old incorrect placeholders
if strings.Contains(currentConfig, "%L") || strings.Contains(currentConfig, "%R") {
fmt.Fprintf(os.Stderr, "\n⚠ WARNING: Git merge driver is misconfigured!\n")
fmt.Fprintf(os.Stderr, " Current: %s\n", currentConfig)
fmt.Fprintf(os.Stderr, " Problem: Git only supports %%O (base), %%A (current), %%B (other)\n")
fmt.Fprintf(os.Stderr, " Using %%L/%%R will cause merge failures!\n")
fmt.Fprintf(os.Stderr, "\n Fix now: bd doctor --fix\n")
fmt.Fprintf(os.Stderr, " Or manually: git config merge.beads.driver \"bd merge %%A %%O %%A %%B\"\n\n")
}
}
func gitPull(ctx context.Context) error {
// Check if any remote exists (support local-only repos)
if !hasGitRemote(ctx) {
return nil // Gracefully skip - local-only mode
}
// Get current branch name
// Use symbolic-ref to work in fresh repos without commits
branchCmd := exec.CommandContext(ctx, "git", "symbolic-ref", "--short", "HEAD")
branchOutput, err := branchCmd.Output()
if err != nil {
return fmt.Errorf("failed to get current branch: %w", err)
}
branch := strings.TrimSpace(string(branchOutput))
// Get remote name for current branch (usually "origin")
remoteCmd := exec.CommandContext(ctx, "git", "config", "--get", fmt.Sprintf("branch.%s.remote", branch))
remoteOutput, err := remoteCmd.Output()
if err != nil {
// If no remote configured, default to "origin"
remoteOutput = []byte("origin\n")
}
remote := strings.TrimSpace(string(remoteOutput))
// Pull with explicit remote and branch
cmd := exec.CommandContext(ctx, "git", "pull", remote, branch)
output, err := cmd.CombinedOutput()
if err != nil {
return fmt.Errorf("git pull failed: %w\n%s", err, output)
}
return nil
}
// gitPush pushes to the current branch's upstream
// Returns nil if no remote configured (local-only mode)
func gitPush(ctx context.Context) error {
// Check if any remote exists (support local-only repos)
if !hasGitRemote(ctx) {
return nil // Gracefully skip - local-only mode
}
cmd := exec.CommandContext(ctx, "git", "push")
output, err := cmd.CombinedOutput()
if err != nil {
return fmt.Errorf("git push failed: %w\n%s", err, output)
}
return nil
}
// restoreBeadsDirFromBranch restores .beads/ directory from the current branch's committed state.
// This is used after sync when sync.branch is configured to keep the working directory clean.
// The actual beads data lives on the sync branch; the main branch's .beads/ is just a snapshot.
func restoreBeadsDirFromBranch(ctx context.Context) error {
beadsDir := beads.FindBeadsDir()
if beadsDir == "" {
return fmt.Errorf("no .beads directory found")
}
// Restore .beads/ from HEAD (current branch's committed state)
// Using -- to ensure .beads/ is treated as a path, not a branch name
cmd := exec.CommandContext(ctx, "git", "checkout", "HEAD", "--", beadsDir)
output, err := cmd.CombinedOutput()
if err != nil {
return fmt.Errorf("git checkout failed: %w\n%s", err, output)
}
return nil
}
// getDefaultBranch returns the default branch name (main or master) for origin remote
// Checks remote HEAD first, then falls back to checking if main/master exist
func getDefaultBranch(ctx context.Context) string {
return getDefaultBranchForRemote(ctx, "origin")
}
// getDefaultBranchForRemote returns the default branch name for a specific remote
// Checks remote HEAD first, then falls back to checking if main/master exist
func getDefaultBranchForRemote(ctx context.Context, remote string) string {
// Try to get default branch from remote
cmd := exec.CommandContext(ctx, "git", "symbolic-ref", fmt.Sprintf("refs/remotes/%s/HEAD", remote))
output, err := cmd.Output()
if err == nil {
ref := strings.TrimSpace(string(output))
// Extract branch name from refs/remotes/<remote>/main
prefix := fmt.Sprintf("refs/remotes/%s/", remote)
if strings.HasPrefix(ref, prefix) {
return strings.TrimPrefix(ref, prefix)
}
}
// Fallback: check if <remote>/main exists
if exec.CommandContext(ctx, "git", "rev-parse", "--verify", fmt.Sprintf("%s/main", remote)).Run() == nil {
return "main"
}
// Fallback: check if <remote>/master exists
if exec.CommandContext(ctx, "git", "rev-parse", "--verify", fmt.Sprintf("%s/master", remote)).Run() == nil {
return "master"
}
// Default to main
return "main"
}
// Git helper functions moved to sync_git.go
// doSyncFromMain function moved to sync_import.go
// Export function moved to sync_export.go

466
cmd/bd/sync_git.go Normal file
View File

@@ -0,0 +1,466 @@
package main
import (
"context"
"fmt"
"os"
"os/exec"
"path/filepath"
"strings"
"time"
"github.com/steveyegge/beads/internal/beads"
"github.com/steveyegge/beads/internal/config"
"github.com/steveyegge/beads/internal/git"
)
// isGitRepo checks if the current directory is in a git repository
func isGitRepo() bool {
cmd := exec.Command("git", "rev-parse", "--git-dir")
return cmd.Run() == nil
}
// gitHasUnmergedPaths checks for unmerged paths or merge in progress
func gitHasUnmergedPaths() (bool, error) {
cmd := exec.Command("git", "status", "--porcelain")
out, err := cmd.Output()
if err != nil {
return false, fmt.Errorf("git status failed: %w", err)
}
// Check for unmerged status codes (DD, AU, UD, UA, DU, AA, UU)
for _, line := range strings.Split(string(out), "\n") {
if len(line) >= 2 {
s := line[:2]
if s == "DD" || s == "AU" || s == "UD" || s == "UA" || s == "DU" || s == "AA" || s == "UU" {
return true, nil
}
}
}
// Check if MERGE_HEAD exists (merge in progress)
if exec.Command("git", "rev-parse", "-q", "--verify", "MERGE_HEAD").Run() == nil {
return true, nil
}
return false, nil
}
// gitHasUpstream checks if the current branch has an upstream configured
// Uses git config directly for compatibility with Git for Windows
func gitHasUpstream() bool {
// Get current branch name
branchCmd := exec.Command("git", "symbolic-ref", "--short", "HEAD")
branchOutput, err := branchCmd.Output()
if err != nil {
return false
}
branch := strings.TrimSpace(string(branchOutput))
// Check if remote and merge refs are configured
remoteCmd := exec.Command("git", "config", "--get", fmt.Sprintf("branch.%s.remote", branch))
mergeCmd := exec.Command("git", "config", "--get", fmt.Sprintf("branch.%s.merge", branch))
remoteErr := remoteCmd.Run()
mergeErr := mergeCmd.Run()
return remoteErr == nil && mergeErr == nil
}
// gitHasChanges checks if the specified file has uncommitted changes
func gitHasChanges(ctx context.Context, filePath string) (bool, error) {
cmd := exec.CommandContext(ctx, "git", "status", "--porcelain", filePath)
output, err := cmd.Output()
if err != nil {
return false, fmt.Errorf("git status failed: %w", err)
}
return len(strings.TrimSpace(string(output))) > 0, nil
}
// getRepoRootForWorktree returns the main repository root for running git commands
// This is always the main repository root, never the worktree root
func getRepoRootForWorktree(_ context.Context) string {
repoRoot, err := git.GetMainRepoRoot()
if err != nil {
// Fallback to current directory if GetMainRepoRoot fails
return "."
}
return repoRoot
}
// gitHasBeadsChanges checks if any tracked files in .beads/ have uncommitted changes
func gitHasBeadsChanges(ctx context.Context) (bool, error) {
// Get the absolute path to .beads directory
beadsDir := beads.FindBeadsDir()
if beadsDir == "" {
return false, fmt.Errorf("no .beads directory found")
}
// Get the repository root (handles worktrees properly)
repoRoot := getRepoRootForWorktree(ctx)
if repoRoot == "" {
return false, fmt.Errorf("cannot determine repository root")
}
// Compute relative path from repo root to .beads
relPath, err := filepath.Rel(repoRoot, beadsDir)
if err != nil {
// Fall back to absolute path if relative path fails
statusCmd := exec.CommandContext(ctx, "git", "status", "--porcelain", beadsDir)
statusOutput, err := statusCmd.Output()
if err != nil {
return false, fmt.Errorf("git status failed: %w", err)
}
return len(strings.TrimSpace(string(statusOutput))) > 0, nil
}
// Run git status with relative path from repo root
statusCmd := exec.CommandContext(ctx, "git", "-C", repoRoot, "status", "--porcelain", relPath)
statusOutput, err := statusCmd.Output()
if err != nil {
return false, fmt.Errorf("git status failed: %w", err)
}
return len(strings.TrimSpace(string(statusOutput))) > 0, nil
}
// buildGitCommitArgs returns git commit args with config-based author and signing options (GH#600)
// This allows users to configure a separate author and disable GPG signing for beads commits.
func buildGitCommitArgs(repoRoot, message string, extraArgs ...string) []string {
args := []string{"-C", repoRoot, "commit"}
// Add --author if configured
if author := config.GetString("git.author"); author != "" {
args = append(args, "--author", author)
}
// Add --no-gpg-sign if configured
if config.GetBool("git.no-gpg-sign") {
args = append(args, "--no-gpg-sign")
}
// Add message
args = append(args, "-m", message)
// Add any extra args (like -- pathspec)
args = append(args, extraArgs...)
return args
}
// gitCommit commits the specified file (worktree-aware)
func gitCommit(ctx context.Context, filePath string, message string) error {
// Get the repository root (handles worktrees properly)
repoRoot := getRepoRootForWorktree(ctx)
if repoRoot == "" {
return fmt.Errorf("cannot determine repository root")
}
// Make file path relative to repo root for git operations
relPath, err := filepath.Rel(repoRoot, filePath)
if err != nil {
relPath = filePath // Fall back to absolute path
}
// Stage the file from repo root context
addCmd := exec.CommandContext(ctx, "git", "-C", repoRoot, "add", relPath)
if err := addCmd.Run(); err != nil {
return fmt.Errorf("git add failed: %w", err)
}
// Generate message if not provided
if message == "" {
message = fmt.Sprintf("bd sync: %s", time.Now().Format("2006-01-02 15:04:05"))
}
// Commit from repo root context with config-based author and signing options
// Use pathspec to commit ONLY this file
// This prevents accidentally committing other staged files
commitArgs := buildGitCommitArgs(repoRoot, message, "--", relPath)
commitCmd := exec.CommandContext(ctx, "git", commitArgs...)
output, err := commitCmd.CombinedOutput()
if err != nil {
return fmt.Errorf("git commit failed: %w\n%s", err, output)
}
return nil
}
// gitCommitBeadsDir stages and commits only sync-related files in .beads/
// This ensures bd sync doesn't accidentally commit other staged files.
// Only stages specific sync files (issues.jsonl, deletions.jsonl, metadata.json)
// to avoid staging gitignored snapshot files that may be tracked.
// Worktree-aware: handles cases where .beads is in the main repo but we're running from a worktree.
func gitCommitBeadsDir(ctx context.Context, message string) error {
beadsDir := beads.FindBeadsDir()
if beadsDir == "" {
return fmt.Errorf("no .beads directory found")
}
// Get the repository root (handles worktrees properly)
repoRoot := getRepoRootForWorktree(ctx)
if repoRoot == "" {
return fmt.Errorf("cannot determine repository root")
}
// Stage only the specific sync-related files
// This avoids staging gitignored snapshot files (beads.*.jsonl, *.meta.json)
// that may still be tracked from before they were added to .gitignore
syncFiles := []string{
filepath.Join(beadsDir, "issues.jsonl"),
filepath.Join(beadsDir, "deletions.jsonl"),
filepath.Join(beadsDir, "interactions.jsonl"),
filepath.Join(beadsDir, "metadata.json"),
}
// Only add files that exist
var filesToAdd []string
for _, f := range syncFiles {
if _, err := os.Stat(f); err == nil {
// Convert to relative path from repo root for git operations
relPath, err := filepath.Rel(repoRoot, f)
if err != nil {
relPath = f // Fall back to absolute path if relative fails
}
filesToAdd = append(filesToAdd, relPath)
}
}
if len(filesToAdd) == 0 {
return fmt.Errorf("no sync files found to commit")
}
// Stage only the sync files from repo root context (worktree-aware)
args := append([]string{"-C", repoRoot, "add"}, filesToAdd...)
addCmd := exec.CommandContext(ctx, "git", args...)
if err := addCmd.Run(); err != nil {
return fmt.Errorf("git add failed: %w", err)
}
// Generate message if not provided
if message == "" {
message = fmt.Sprintf("bd sync: %s", time.Now().Format("2006-01-02 15:04:05"))
}
// Commit only .beads/ files using -- pathspec
// This prevents accidentally committing other staged files that the user
// may have staged but wasn't ready to commit yet.
// Convert beadsDir to relative path for git commit (worktree-aware)
relBeadsDir, err := filepath.Rel(repoRoot, beadsDir)
if err != nil {
relBeadsDir = beadsDir // Fall back to absolute path if relative fails
}
// Use config-based author and signing options with pathspec
commitArgs := buildGitCommitArgs(repoRoot, message, "--", relBeadsDir)
commitCmd := exec.CommandContext(ctx, "git", commitArgs...)
output, err := commitCmd.CombinedOutput()
if err != nil {
return fmt.Errorf("git commit failed: %w\n%s", err, output)
}
return nil
}
// hasGitRemote checks if a git remote exists in the repository
func hasGitRemote(ctx context.Context) bool {
cmd := exec.CommandContext(ctx, "git", "remote")
output, err := cmd.Output()
if err != nil {
return false
}
return len(strings.TrimSpace(string(output))) > 0
}
// isInRebase checks if we're currently in a git rebase state
func isInRebase() bool {
// Get actual git directory (handles worktrees)
gitDir, err := git.GetGitDir()
if err != nil {
return false
}
// Check for rebase-merge directory (interactive rebase)
rebaseMergePath := filepath.Join(gitDir, "rebase-merge")
if _, err := os.Stat(rebaseMergePath); err == nil {
return true
}
// Check for rebase-apply directory (non-interactive rebase)
rebaseApplyPath := filepath.Join(gitDir, "rebase-apply")
if _, err := os.Stat(rebaseApplyPath); err == nil {
return true
}
return false
}
// hasJSONLConflict checks if the beads JSONL file has a merge conflict
// Returns true only if the JSONL file (issues.jsonl or beads.jsonl) is the only file in conflict
func hasJSONLConflict() bool {
cmd := exec.Command("git", "status", "--porcelain")
out, err := cmd.Output()
if err != nil {
return false
}
var hasJSONLConflict bool
var hasOtherConflict bool
for _, line := range strings.Split(string(out), "\n") {
if len(line) < 3 {
continue
}
// Check for unmerged status codes (UU = both modified, AA = both added, etc.)
status := line[:2]
if status == "UU" || status == "AA" || status == "DD" ||
status == "AU" || status == "UA" || status == "DU" || status == "UD" {
filepath := strings.TrimSpace(line[3:])
// Check for beads JSONL files (issues.jsonl or beads.jsonl in .beads/)
if strings.HasSuffix(filepath, "issues.jsonl") || strings.HasSuffix(filepath, "beads.jsonl") {
hasJSONLConflict = true
} else {
hasOtherConflict = true
}
}
}
// Only return true if ONLY the JSONL file has a conflict
return hasJSONLConflict && !hasOtherConflict
}
// runGitRebaseContinue continues a rebase after resolving conflicts
func runGitRebaseContinue(ctx context.Context) error {
cmd := exec.CommandContext(ctx, "git", "rebase", "--continue")
output, err := cmd.CombinedOutput()
if err != nil {
return fmt.Errorf("git rebase --continue failed: %w\n%s", err, output)
}
return nil
}
// gitPull pulls from the current branch's upstream
// Returns nil if no remote configured (local-only mode)
func checkMergeDriverConfig() {
// Get current merge driver configuration
cmd := exec.Command("git", "config", "merge.beads.driver")
output, err := cmd.Output()
if err != nil {
// No merge driver configured - this is OK, user may not need it
return
}
currentConfig := strings.TrimSpace(string(output))
// Check if using old incorrect placeholders
if strings.Contains(currentConfig, "%L") || strings.Contains(currentConfig, "%R") {
fmt.Fprintf(os.Stderr, "\n⚠ WARNING: Git merge driver is misconfigured!\n")
fmt.Fprintf(os.Stderr, " Current: %s\n", currentConfig)
fmt.Fprintf(os.Stderr, " Problem: Git only supports %%O (base), %%A (current), %%B (other)\n")
fmt.Fprintf(os.Stderr, " Using %%L/%%R will cause merge failures!\n")
fmt.Fprintf(os.Stderr, "\n Fix now: bd doctor --fix\n")
fmt.Fprintf(os.Stderr, " Or manually: git config merge.beads.driver \"bd merge %%A %%O %%A %%B\"\n\n")
}
}
func gitPull(ctx context.Context) error {
// Check if any remote exists (support local-only repos)
if !hasGitRemote(ctx) {
return nil // Gracefully skip - local-only mode
}
// Get current branch name
// Use symbolic-ref to work in fresh repos without commits
branchCmd := exec.CommandContext(ctx, "git", "symbolic-ref", "--short", "HEAD")
branchOutput, err := branchCmd.Output()
if err != nil {
return fmt.Errorf("failed to get current branch: %w", err)
}
branch := strings.TrimSpace(string(branchOutput))
// Get remote name for current branch (usually "origin")
remoteCmd := exec.CommandContext(ctx, "git", "config", "--get", fmt.Sprintf("branch.%s.remote", branch))
remoteOutput, err := remoteCmd.Output()
if err != nil {
// If no remote configured, default to "origin"
remoteOutput = []byte("origin\n")
}
remote := strings.TrimSpace(string(remoteOutput))
// Pull with explicit remote and branch
cmd := exec.CommandContext(ctx, "git", "pull", remote, branch)
output, err := cmd.CombinedOutput()
if err != nil {
return fmt.Errorf("git pull failed: %w\n%s", err, output)
}
return nil
}
// gitPush pushes to the current branch's upstream
// Returns nil if no remote configured (local-only mode)
func gitPush(ctx context.Context) error {
// Check if any remote exists (support local-only repos)
if !hasGitRemote(ctx) {
return nil // Gracefully skip - local-only mode
}
cmd := exec.CommandContext(ctx, "git", "push")
output, err := cmd.CombinedOutput()
if err != nil {
return fmt.Errorf("git push failed: %w\n%s", err, output)
}
return nil
}
// restoreBeadsDirFromBranch restores .beads/ directory from the current branch's committed state.
// This is used after sync when sync.branch is configured to keep the working directory clean.
// The actual beads data lives on the sync branch; the main branch's .beads/ is just a snapshot.
func restoreBeadsDirFromBranch(ctx context.Context) error {
beadsDir := beads.FindBeadsDir()
if beadsDir == "" {
return fmt.Errorf("no .beads directory found")
}
// Restore .beads/ from HEAD (current branch's committed state)
// Using -- to ensure .beads/ is treated as a path, not a branch name
cmd := exec.CommandContext(ctx, "git", "checkout", "HEAD", "--", beadsDir)
output, err := cmd.CombinedOutput()
if err != nil {
return fmt.Errorf("git checkout failed: %w\n%s", err, output)
}
return nil
}
// getDefaultBranch returns the default branch name (main or master) for origin remote
// Checks remote HEAD first, then falls back to checking if main/master exist
func getDefaultBranch(ctx context.Context) string {
return getDefaultBranchForRemote(ctx, "origin")
}
// getDefaultBranchForRemote returns the default branch name for a specific remote
// Checks remote HEAD first, then falls back to checking if main/master exist
func getDefaultBranchForRemote(ctx context.Context, remote string) string {
// Try to get default branch from remote
cmd := exec.CommandContext(ctx, "git", "symbolic-ref", fmt.Sprintf("refs/remotes/%s/HEAD", remote))
output, err := cmd.Output()
if err == nil {
ref := strings.TrimSpace(string(output))
// Extract branch name from refs/remotes/<remote>/main
prefix := fmt.Sprintf("refs/remotes/%s/", remote)
if strings.HasPrefix(ref, prefix) {
return strings.TrimPrefix(ref, prefix)
}
}
// Fallback: check if <remote>/main exists
if exec.CommandContext(ctx, "git", "rev-parse", "--verify", fmt.Sprintf("%s/main", remote)).Run() == nil {
return "main"
}
// Fallback: check if <remote>/master exists
if exec.CommandContext(ctx, "git", "rev-parse", "--verify", fmt.Sprintf("%s/master", remote)).Run() == nil {
return "master"
}
// Default to main
return "main"
}

355
cmd/bd/update.go Normal file
View File

@@ -0,0 +1,355 @@
package main
import (
"encoding/json"
"fmt"
"os"
"github.com/spf13/cobra"
"github.com/steveyegge/beads/internal/hooks"
"github.com/steveyegge/beads/internal/rpc"
"github.com/steveyegge/beads/internal/types"
"github.com/steveyegge/beads/internal/ui"
"github.com/steveyegge/beads/internal/utils"
"github.com/steveyegge/beads/internal/validation"
)
var updateCmd = &cobra.Command{
Use: "update [id...]",
GroupID: "issues",
Short: "Update one or more issues",
Args: cobra.MinimumNArgs(1),
Run: func(cmd *cobra.Command, args []string) {
CheckReadonly("update")
updates := make(map[string]interface{})
if cmd.Flags().Changed("status") {
status, _ := cmd.Flags().GetString("status")
updates["status"] = status
}
if cmd.Flags().Changed("priority") {
priorityStr, _ := cmd.Flags().GetString("priority")
priority, err := validation.ValidatePriority(priorityStr)
if err != nil {
FatalErrorRespectJSON("%v", err)
}
updates["priority"] = priority
}
if cmd.Flags().Changed("title") {
title, _ := cmd.Flags().GetString("title")
updates["title"] = title
}
if cmd.Flags().Changed("assignee") {
assignee, _ := cmd.Flags().GetString("assignee")
updates["assignee"] = assignee
}
description, descChanged := getDescriptionFlag(cmd)
if descChanged {
updates["description"] = description
}
if cmd.Flags().Changed("design") {
design, _ := cmd.Flags().GetString("design")
updates["design"] = design
}
if cmd.Flags().Changed("notes") {
notes, _ := cmd.Flags().GetString("notes")
updates["notes"] = notes
}
if cmd.Flags().Changed("acceptance") || cmd.Flags().Changed("acceptance-criteria") {
var acceptanceCriteria string
if cmd.Flags().Changed("acceptance") {
acceptanceCriteria, _ = cmd.Flags().GetString("acceptance")
} else {
acceptanceCriteria, _ = cmd.Flags().GetString("acceptance-criteria")
}
updates["acceptance_criteria"] = acceptanceCriteria
}
if cmd.Flags().Changed("external-ref") {
externalRef, _ := cmd.Flags().GetString("external-ref")
updates["external_ref"] = externalRef
}
if cmd.Flags().Changed("estimate") {
estimate, _ := cmd.Flags().GetInt("estimate")
if estimate < 0 {
FatalErrorRespectJSON("estimate must be a non-negative number of minutes")
}
updates["estimated_minutes"] = estimate
}
if cmd.Flags().Changed("type") {
issueType, _ := cmd.Flags().GetString("type")
// Validate issue type
if !types.IssueType(issueType).IsValid() {
FatalErrorRespectJSON("invalid issue type %q. Valid types: bug, feature, task, epic, chore, merge-request, molecule, gate", issueType)
}
updates["issue_type"] = issueType
}
if cmd.Flags().Changed("add-label") {
addLabels, _ := cmd.Flags().GetStringSlice("add-label")
updates["add_labels"] = addLabels
}
if cmd.Flags().Changed("remove-label") {
removeLabels, _ := cmd.Flags().GetStringSlice("remove-label")
updates["remove_labels"] = removeLabels
}
if cmd.Flags().Changed("set-labels") {
setLabels, _ := cmd.Flags().GetStringSlice("set-labels")
updates["set_labels"] = setLabels
}
if cmd.Flags().Changed("parent") {
parent, _ := cmd.Flags().GetString("parent")
updates["parent"] = parent
}
if cmd.Flags().Changed("type") {
issueType, _ := cmd.Flags().GetString("type")
// Validate issue type
if _, err := validation.ParseIssueType(issueType); err != nil {
FatalErrorRespectJSON("%v", err)
}
updates["issue_type"] = issueType
}
if len(updates) == 0 {
fmt.Println("No updates specified")
return
}
ctx := rootCtx
// Resolve partial IDs first
var resolvedIDs []string
if daemonClient != nil {
for _, id := range args {
resolveArgs := &rpc.ResolveIDArgs{ID: id}
resp, err := daemonClient.ResolveID(resolveArgs)
if err != nil {
FatalErrorRespectJSON("resolving ID %s: %v", id, err)
}
var resolvedID string
if err := json.Unmarshal(resp.Data, &resolvedID); err != nil {
FatalErrorRespectJSON("unmarshaling resolved ID: %v", err)
}
resolvedIDs = append(resolvedIDs, resolvedID)
}
} else {
var err error
resolvedIDs, err = utils.ResolvePartialIDs(ctx, store, args)
if err != nil {
FatalErrorRespectJSON("%v", err)
}
}
// If daemon is running, use RPC
if daemonClient != nil {
updatedIssues := []*types.Issue{}
for _, id := range resolvedIDs {
updateArgs := &rpc.UpdateArgs{ID: id}
// Map updates to RPC args
if status, ok := updates["status"].(string); ok {
updateArgs.Status = &status
}
if priority, ok := updates["priority"].(int); ok {
updateArgs.Priority = &priority
}
if title, ok := updates["title"].(string); ok {
updateArgs.Title = &title
}
if assignee, ok := updates["assignee"].(string); ok {
updateArgs.Assignee = &assignee
}
if description, ok := updates["description"].(string); ok {
updateArgs.Description = &description
}
if design, ok := updates["design"].(string); ok {
updateArgs.Design = &design
}
if notes, ok := updates["notes"].(string); ok {
updateArgs.Notes = &notes
}
if acceptanceCriteria, ok := updates["acceptance_criteria"].(string); ok {
updateArgs.AcceptanceCriteria = &acceptanceCriteria
}
if externalRef, ok := updates["external_ref"].(string); ok {
updateArgs.ExternalRef = &externalRef
}
if estimate, ok := updates["estimated_minutes"].(int); ok {
updateArgs.EstimatedMinutes = &estimate
}
if issueType, ok := updates["issue_type"].(string); ok {
updateArgs.IssueType = &issueType
}
if addLabels, ok := updates["add_labels"].([]string); ok {
updateArgs.AddLabels = addLabels
}
if removeLabels, ok := updates["remove_labels"].([]string); ok {
updateArgs.RemoveLabels = removeLabels
}
if setLabels, ok := updates["set_labels"].([]string); ok {
updateArgs.SetLabels = setLabels
}
if issueType, ok := updates["issue_type"].(string); ok {
updateArgs.IssueType = &issueType
}
if parent, ok := updates["parent"].(string); ok {
updateArgs.Parent = &parent
}
resp, err := daemonClient.Update(updateArgs)
if err != nil {
fmt.Fprintf(os.Stderr, "Error updating %s: %v\n", id, err)
continue
}
var issue types.Issue
if err := json.Unmarshal(resp.Data, &issue); err == nil {
// Run update hook
if hookRunner != nil {
hookRunner.Run(hooks.EventUpdate, &issue)
}
if jsonOutput {
updatedIssues = append(updatedIssues, &issue)
}
}
if !jsonOutput {
fmt.Printf("%s Updated issue: %s\n", ui.RenderPass("✓"), id)
}
}
if jsonOutput && len(updatedIssues) > 0 {
outputJSON(updatedIssues)
}
return
}
// Direct mode
updatedIssues := []*types.Issue{}
for _, id := range resolvedIDs {
// Check if issue is a template: templates are read-only
issue, err := store.GetIssue(ctx, id)
if err != nil {
fmt.Fprintf(os.Stderr, "Error getting %s: %v\n", id, err)
continue
}
if err := validateIssueUpdatable(id, issue); err != nil {
fmt.Fprintf(os.Stderr, "%s\n", err)
continue
}
// Apply regular field updates if any
regularUpdates := make(map[string]interface{})
for k, v := range updates {
if k != "add_labels" && k != "remove_labels" && k != "set_labels" && k != "parent" {
regularUpdates[k] = v
}
}
if len(regularUpdates) > 0 {
if err := store.UpdateIssue(ctx, id, regularUpdates, actor); err != nil {
fmt.Fprintf(os.Stderr, "Error updating %s: %v\n", id, err)
continue
}
}
// Handle label operations
var setLabels, addLabels, removeLabels []string
if v, ok := updates["set_labels"].([]string); ok {
setLabels = v
}
if v, ok := updates["add_labels"].([]string); ok {
addLabels = v
}
if v, ok := updates["remove_labels"].([]string); ok {
removeLabels = v
}
if len(setLabels) > 0 || len(addLabels) > 0 || len(removeLabels) > 0 {
if err := applyLabelUpdates(ctx, store, id, actor, setLabels, addLabels, removeLabels); err != nil {
fmt.Fprintf(os.Stderr, "Error updating labels for %s: %v\n", id, err)
continue
}
}
// Handle parent reparenting
if newParent, ok := updates["parent"].(string); ok {
// Validate new parent exists (unless empty string to remove parent)
if newParent != "" {
parentIssue, err := store.GetIssue(ctx, newParent)
if err != nil {
fmt.Fprintf(os.Stderr, "Error getting parent %s: %v\n", newParent, err)
continue
}
if parentIssue == nil {
fmt.Fprintf(os.Stderr, "Error: parent issue %s not found\n", newParent)
continue
}
}
// Find and remove existing parent-child dependency
deps, err := store.GetDependencyRecords(ctx, id)
if err != nil {
fmt.Fprintf(os.Stderr, "Error getting dependencies for %s: %v\n", id, err)
continue
}
for _, dep := range deps {
if dep.Type == types.DepParentChild {
if err := store.RemoveDependency(ctx, id, dep.DependsOnID, actor); err != nil {
fmt.Fprintf(os.Stderr, "Error removing old parent dependency: %v\n", err)
}
break
}
}
// Add new parent-child dependency (if not removing parent)
if newParent != "" {
newDep := &types.Dependency{
IssueID: id,
DependsOnID: newParent,
Type: types.DepParentChild,
}
if err := store.AddDependency(ctx, newDep, actor); err != nil {
fmt.Fprintf(os.Stderr, "Error adding parent dependency: %v\n", err)
continue
}
}
}
// Run update hook
updatedIssue, _ := store.GetIssue(ctx, id)
if updatedIssue != nil && hookRunner != nil {
hookRunner.Run(hooks.EventUpdate, updatedIssue)
}
if jsonOutput {
if updatedIssue != nil {
updatedIssues = append(updatedIssues, updatedIssue)
}
} else {
fmt.Printf("%s Updated issue: %s\n", ui.RenderPass("✓"), id)
}
}
// Schedule auto-flush if any issues were updated
if len(args) > 0 {
markDirtyAndScheduleFlush()
}
if jsonOutput && len(updatedIssues) > 0 {
outputJSON(updatedIssues)
}
},
}
func init() {
updateCmd.Flags().StringP("status", "s", "", "New status")
registerPriorityFlag(updateCmd, "")
updateCmd.Flags().String("title", "", "New title")
updateCmd.Flags().StringP("type", "t", "", "New type (bug|feature|task|epic|chore|merge-request|molecule|gate)")
registerCommonIssueFlags(updateCmd)
updateCmd.Flags().String("notes", "", "Additional notes")
updateCmd.Flags().String("acceptance-criteria", "", "DEPRECATED: use --acceptance")
_ = updateCmd.Flags().MarkHidden("acceptance-criteria") // Only fails if flag missing (caught in tests)
updateCmd.Flags().IntP("estimate", "e", 0, "Time estimate in minutes (e.g., 60 for 1 hour)")
updateCmd.Flags().StringSlice("add-label", nil, "Add labels (repeatable)")
updateCmd.Flags().StringSlice("remove-label", nil, "Remove labels (repeatable)")
updateCmd.Flags().StringSlice("set-labels", nil, "Set labels, replacing all existing (repeatable)")
updateCmd.Flags().String("parent", "", "New parent issue ID (reparents the issue, use empty string to remove parent)")
rootCmd.AddCommand(updateCmd)
}