bd sync: 2025-12-23 23:38:57

This commit is contained in:
Steve Yegge
2025-12-23 23:38:57 -08:00
parent 05e10b6759
commit e67f27c092
80 changed files with 7165 additions and 8490 deletions

View File

@@ -245,58 +245,3 @@ func TestGetSocketPath(t *testing.T) {
}
})
}
func TestDetermineSocketPath(t *testing.T) {
t.Run("returns same path passed in", func(t *testing.T) {
testPath := "/tmp/.beads/bd.sock"
result := determineSocketPath(testPath)
if result != testPath {
t.Errorf("determineSocketPath(%q) = %q, want %q", testPath, result, testPath)
}
})
t.Run("preserves empty path", func(t *testing.T) {
result := determineSocketPath("")
if result != "" {
t.Errorf("determineSocketPath(\"\") = %q, want empty string", result)
}
})
}
func TestIsDaemonRunningQuiet(t *testing.T) {
tmpDir := t.TempDir()
pidFile := filepath.Join(tmpDir, "daemon.pid")
t.Run("returns false when PID file does not exist", func(t *testing.T) {
os.Remove(pidFile)
if isDaemonRunningQuiet(pidFile) {
t.Error("expected false when PID file does not exist")
}
})
t.Run("returns false when PID file is invalid", func(t *testing.T) {
if err := os.WriteFile(pidFile, []byte("not-a-number"), 0644); err != nil {
t.Fatalf("failed to create PID file: %v", err)
}
defer os.Remove(pidFile)
if isDaemonRunningQuiet(pidFile) {
t.Error("expected false when PID file contains invalid content")
}
})
t.Run("returns false for non-existent PID", func(t *testing.T) {
// Use a PID that's very unlikely to exist
if err := os.WriteFile(pidFile, []byte("999999999"), 0644); err != nil {
t.Fatalf("failed to create PID file: %v", err)
}
defer os.Remove(pidFile)
if isDaemonRunningQuiet(pidFile) {
t.Error("expected false for non-existent PID")
}
})
}
// Note: TestGetPIDFileForSocket, TestReadPIDFromFile, and TestIsPIDAlive
// are already defined in daemon_basics_test.go

View File

@@ -166,8 +166,7 @@ Examples:
} else {
sqliteStore, ok := store.(*sqlite.SQLiteStorage)
if !ok {
fmt.Fprintf(os.Stderr, "Error: compact requires SQLite storage\n")
os.Exit(1)
FatalError("compact requires SQLite storage")
}
runCompactStats(ctx, sqliteStore)
}
@@ -188,26 +187,20 @@ Examples:
// Check for exactly one mode
if activeModes == 0 {
fmt.Fprintf(os.Stderr, "Error: must specify one mode: --analyze, --apply, or --auto\n")
os.Exit(1)
FatalError("must specify one mode: --analyze, --apply, or --auto")
}
if activeModes > 1 {
fmt.Fprintf(os.Stderr, "Error: cannot use multiple modes together (--analyze, --apply, --auto are mutually exclusive)\n")
os.Exit(1)
FatalError("cannot use multiple modes together (--analyze, --apply, --auto are mutually exclusive)")
}
// Handle analyze mode (requires direct database access)
if compactAnalyze {
if err := ensureDirectMode("compact --analyze requires direct database access"); err != nil {
fmt.Fprintf(os.Stderr, "Error: %v\n", err)
fmt.Fprintf(os.Stderr, "Hint: Use --no-daemon flag to bypass daemon and access database directly\n")
os.Exit(1)
FatalErrorWithHint(fmt.Sprintf("%v", err), "Use --no-daemon flag to bypass daemon and access database directly")
}
sqliteStore, ok := store.(*sqlite.SQLiteStorage)
if !ok {
fmt.Fprintf(os.Stderr, "Error: failed to open database in direct mode\n")
fmt.Fprintf(os.Stderr, "Hint: Ensure .beads/beads.db exists and is readable\n")
os.Exit(1)
FatalErrorWithHint("failed to open database in direct mode", "Ensure .beads/beads.db exists and is readable")
}
runCompactAnalyze(ctx, sqliteStore)
return
@@ -216,23 +209,17 @@ Examples:
// Handle apply mode (requires direct database access)
if compactApply {
if err := ensureDirectMode("compact --apply requires direct database access"); err != nil {
fmt.Fprintf(os.Stderr, "Error: %v\n", err)
fmt.Fprintf(os.Stderr, "Hint: Use --no-daemon flag to bypass daemon and access database directly\n")
os.Exit(1)
FatalErrorWithHint(fmt.Sprintf("%v", err), "Use --no-daemon flag to bypass daemon and access database directly")
}
if compactID == "" {
fmt.Fprintf(os.Stderr, "Error: --apply requires --id\n")
os.Exit(1)
FatalError("--apply requires --id")
}
if compactSummary == "" {
fmt.Fprintf(os.Stderr, "Error: --apply requires --summary\n")
os.Exit(1)
FatalError("--apply requires --summary")
}
sqliteStore, ok := store.(*sqlite.SQLiteStorage)
if !ok {
fmt.Fprintf(os.Stderr, "Error: failed to open database in direct mode\n")
fmt.Fprintf(os.Stderr, "Hint: Ensure .beads/beads.db exists and is readable\n")
os.Exit(1)
FatalErrorWithHint("failed to open database in direct mode", "Ensure .beads/beads.db exists and is readable")
}
runCompactApply(ctx, sqliteStore)
return
@@ -248,16 +235,13 @@ Examples:
// Validation checks
if compactID != "" && compactAll {
fmt.Fprintf(os.Stderr, "Error: cannot use --id and --all together\n")
os.Exit(1)
FatalError("cannot use --id and --all together")
}
if compactForce && compactID == "" {
fmt.Fprintf(os.Stderr, "Error: --force requires --id\n")
os.Exit(1)
FatalError("--force requires --id")
}
if compactID == "" && !compactAll && !compactDryRun {
fmt.Fprintf(os.Stderr, "Error: must specify --all, --id, or --dry-run\n")
os.Exit(1)
FatalError("must specify --all, --id, or --dry-run")
}
// Use RPC if daemon available, otherwise direct mode
@@ -269,14 +253,12 @@ Examples:
// Fallback to direct mode
apiKey := os.Getenv("ANTHROPIC_API_KEY")
if apiKey == "" && !compactDryRun {
fmt.Fprintf(os.Stderr, "Error: --auto mode requires ANTHROPIC_API_KEY environment variable\n")
os.Exit(1)
FatalError("--auto mode requires ANTHROPIC_API_KEY environment variable")
}
sqliteStore, ok := store.(*sqlite.SQLiteStorage)
if !ok {
fmt.Fprintf(os.Stderr, "Error: compact requires SQLite storage\n")
os.Exit(1)
FatalError("compact requires SQLite storage")
}
config := &compact.Config{
@@ -289,8 +271,7 @@ Examples:
compactor, err := compact.New(sqliteStore, apiKey, config)
if err != nil {
fmt.Fprintf(os.Stderr, "Error: failed to create compactor: %v\n", err)
os.Exit(1)
FatalError("failed to create compactor: %v", err)
}
if compactID != "" {
@@ -309,19 +290,16 @@ func runCompactSingle(ctx context.Context, compactor *compact.Compactor, store *
if !compactForce {
eligible, reason, err := store.CheckEligibility(ctx, issueID, compactTier)
if err != nil {
fmt.Fprintf(os.Stderr, "Error: failed to check eligibility: %v\n", err)
os.Exit(1)
FatalError("failed to check eligibility: %v", err)
}
if !eligible {
fmt.Fprintf(os.Stderr, "Error: %s is not eligible for Tier %d compaction: %s\n", issueID, compactTier, reason)
os.Exit(1)
FatalError("%s is not eligible for Tier %d compaction: %s", issueID, compactTier, reason)
}
}
issue, err := store.GetIssue(ctx, issueID)
if err != nil {
fmt.Fprintf(os.Stderr, "Error: failed to get issue: %v\n", err)
os.Exit(1)
FatalError("failed to get issue: %v", err)
}
originalSize := len(issue.Description) + len(issue.Design) + len(issue.Notes) + len(issue.AcceptanceCriteria)
@@ -349,19 +327,16 @@ func runCompactSingle(ctx context.Context, compactor *compact.Compactor, store *
if compactTier == 1 {
compactErr = compactor.CompactTier1(ctx, issueID)
} else {
fmt.Fprintf(os.Stderr, "Error: Tier 2 compaction not yet implemented\n")
os.Exit(1)
FatalError("Tier 2 compaction not yet implemented")
}
if compactErr != nil {
fmt.Fprintf(os.Stderr, "Error: %v\n", compactErr)
os.Exit(1)
FatalError("%v", compactErr)
}
issue, err = store.GetIssue(ctx, issueID)
if err != nil {
fmt.Fprintf(os.Stderr, "Error: failed to get updated issue: %v\n", err)
os.Exit(1)
FatalError("failed to get updated issue: %v", err)
}
compactedSize := len(issue.Description)
@@ -407,8 +382,7 @@ func runCompactAll(ctx context.Context, compactor *compact.Compactor, store *sql
if compactTier == 1 {
tier1, err := store.GetTier1Candidates(ctx)
if err != nil {
fmt.Fprintf(os.Stderr, "Error: failed to get candidates: %v\n", err)
os.Exit(1)
FatalError("failed to get candidates: %v", err)
}
for _, c := range tier1 {
candidates = append(candidates, c.IssueID)
@@ -416,8 +390,7 @@ func runCompactAll(ctx context.Context, compactor *compact.Compactor, store *sql
} else {
tier2, err := store.GetTier2Candidates(ctx)
if err != nil {
fmt.Fprintf(os.Stderr, "Error: failed to get candidates: %v\n", err)
os.Exit(1)
FatalError("failed to get candidates: %v", err)
}
for _, c := range tier2 {
candidates = append(candidates, c.IssueID)
@@ -471,8 +444,7 @@ func runCompactAll(ctx context.Context, compactor *compact.Compactor, store *sql
results, err := compactor.CompactTier1Batch(ctx, candidates)
if err != nil {
fmt.Fprintf(os.Stderr, "Error: batch compaction failed: %v\n", err)
os.Exit(1)
FatalError("batch compaction failed: %v", err)
}
successCount := 0
@@ -535,14 +507,12 @@ func runCompactAll(ctx context.Context, compactor *compact.Compactor, store *sql
func runCompactStats(ctx context.Context, store *sqlite.SQLiteStorage) {
tier1, err := store.GetTier1Candidates(ctx)
if err != nil {
fmt.Fprintf(os.Stderr, "Error: failed to get Tier 1 candidates: %v\n", err)
os.Exit(1)
FatalError("failed to get Tier 1 candidates: %v", err)
}
tier2, err := store.GetTier2Candidates(ctx)
if err != nil {
fmt.Fprintf(os.Stderr, "Error: failed to get Tier 2 candidates: %v\n", err)
os.Exit(1)
FatalError("failed to get Tier 2 candidates: %v", err)
}
tier1Size := 0
@@ -608,24 +578,20 @@ func progressBar(current, total int) string {
//nolint:unparam // ctx may be used in future for cancellation
func runCompactRPC(_ context.Context) {
if compactID != "" && compactAll {
fmt.Fprintf(os.Stderr, "Error: cannot use --id and --all together\n")
os.Exit(1)
FatalError("cannot use --id and --all together")
}
if compactForce && compactID == "" {
fmt.Fprintf(os.Stderr, "Error: --force requires --id\n")
os.Exit(1)
FatalError("--force requires --id")
}
if compactID == "" && !compactAll && !compactDryRun {
fmt.Fprintf(os.Stderr, "Error: must specify --all, --id, or --dry-run\n")
os.Exit(1)
FatalError("must specify --all, --id, or --dry-run")
}
apiKey := os.Getenv("ANTHROPIC_API_KEY")
if apiKey == "" && !compactDryRun {
fmt.Fprintf(os.Stderr, "Error: ANTHROPIC_API_KEY environment variable not set\n")
os.Exit(1)
FatalError("ANTHROPIC_API_KEY environment variable not set")
}
args := map[string]interface{}{
@@ -643,13 +609,11 @@ func runCompactRPC(_ context.Context) {
resp, err := daemonClient.Execute("compact", args)
if err != nil {
fmt.Fprintf(os.Stderr, "Error: %v\n", err)
os.Exit(1)
FatalError("%v", err)
}
if !resp.Success {
fmt.Fprintf(os.Stderr, "Error: %s\n", resp.Error)
os.Exit(1)
FatalError("%s", resp.Error)
}
if jsonOutput {
@@ -676,8 +640,7 @@ func runCompactRPC(_ context.Context) {
}
if err := json.Unmarshal(resp.Data, &result); err != nil {
fmt.Fprintf(os.Stderr, "Error parsing response: %v\n", err)
os.Exit(1)
FatalError("parsing response: %v", err)
}
if compactID != "" {
@@ -722,13 +685,11 @@ func runCompactStatsRPC() {
resp, err := daemonClient.Execute("compact_stats", args)
if err != nil {
fmt.Fprintf(os.Stderr, "Error: %v\n", err)
os.Exit(1)
FatalError("%v", err)
}
if !resp.Success {
fmt.Fprintf(os.Stderr, "Error: %s\n", resp.Error)
os.Exit(1)
FatalError("%s", resp.Error)
}
if jsonOutput {
@@ -749,8 +710,7 @@ func runCompactStatsRPC() {
}
if err := json.Unmarshal(resp.Data, &result); err != nil {
fmt.Fprintf(os.Stderr, "Error parsing response: %v\n", err)
os.Exit(1)
FatalError("parsing response: %v", err)
}
fmt.Printf("\nCompaction Statistics\n")
@@ -784,8 +744,7 @@ func runCompactAnalyze(ctx context.Context, store *sqlite.SQLiteStorage) {
if compactID != "" {
issue, err := store.GetIssue(ctx, compactID)
if err != nil {
fmt.Fprintf(os.Stderr, "Error: failed to get issue: %v\n", err)
os.Exit(1)
FatalError("failed to get issue: %v", err)
}
sizeBytes := len(issue.Description) + len(issue.Design) + len(issue.Notes) + len(issue.AcceptanceCriteria)
@@ -816,8 +775,7 @@ func runCompactAnalyze(ctx context.Context, store *sqlite.SQLiteStorage) {
tierCandidates, err = store.GetTier2Candidates(ctx)
}
if err != nil {
fmt.Fprintf(os.Stderr, "Error: failed to get candidates: %v\n", err)
os.Exit(1)
FatalError("failed to get candidates: %v", err)
}
// Apply limit if specified
@@ -879,15 +837,13 @@ func runCompactApply(ctx context.Context, store *sqlite.SQLiteStorage) {
// Read from stdin
summaryBytes, err = io.ReadAll(os.Stdin)
if err != nil {
fmt.Fprintf(os.Stderr, "Error: failed to read summary from stdin: %v\n", err)
os.Exit(1)
FatalError("failed to read summary from stdin: %v", err)
}
} else {
// #nosec G304 -- summary file path provided explicitly by operator
summaryBytes, err = os.ReadFile(compactSummary)
if err != nil {
fmt.Fprintf(os.Stderr, "Error: failed to read summary file: %v\n", err)
os.Exit(1)
FatalError("failed to read summary file: %v", err)
}
}
summary := string(summaryBytes)
@@ -895,8 +851,7 @@ func runCompactApply(ctx context.Context, store *sqlite.SQLiteStorage) {
// Get issue
issue, err := store.GetIssue(ctx, compactID)
if err != nil {
fmt.Fprintf(os.Stderr, "Error: failed to get issue: %v\n", err)
os.Exit(1)
FatalError("failed to get issue: %v", err)
}
// Calculate sizes
@@ -907,20 +862,15 @@ func runCompactApply(ctx context.Context, store *sqlite.SQLiteStorage) {
if !compactForce {
eligible, reason, err := store.CheckEligibility(ctx, compactID, compactTier)
if err != nil {
fmt.Fprintf(os.Stderr, "Error: failed to check eligibility: %v\n", err)
os.Exit(1)
FatalError("failed to check eligibility: %v", err)
}
if !eligible {
fmt.Fprintf(os.Stderr, "Error: %s is not eligible for Tier %d compaction: %s\n", compactID, compactTier, reason)
fmt.Fprintf(os.Stderr, "Hint: use --force to bypass eligibility checks\n")
os.Exit(1)
FatalErrorWithHint(fmt.Sprintf("%s is not eligible for Tier %d compaction: %s", compactID, compactTier, reason), "use --force to bypass eligibility checks")
}
// Enforce size reduction unless --force
if compactedSize >= originalSize {
fmt.Fprintf(os.Stderr, "Error: summary (%d bytes) is not shorter than original (%d bytes)\n", compactedSize, originalSize)
fmt.Fprintf(os.Stderr, "Hint: use --force to bypass size validation\n")
os.Exit(1)
FatalErrorWithHint(fmt.Sprintf("summary (%d bytes) is not shorter than original (%d bytes)", compactedSize, originalSize), "use --force to bypass size validation")
}
}
@@ -938,27 +888,23 @@ func runCompactApply(ctx context.Context, store *sqlite.SQLiteStorage) {
}
if err := store.UpdateIssue(ctx, compactID, updates, actor); err != nil {
fmt.Fprintf(os.Stderr, "Error: failed to update issue: %v\n", err)
os.Exit(1)
FatalError("failed to update issue: %v", err)
}
commitHash := compact.GetCurrentCommitHash()
if err := store.ApplyCompaction(ctx, compactID, compactTier, originalSize, compactedSize, commitHash); err != nil {
fmt.Fprintf(os.Stderr, "Error: failed to apply compaction: %v\n", err)
os.Exit(1)
FatalError("failed to apply compaction: %v", err)
}
savingBytes := originalSize - compactedSize
reductionPct := float64(savingBytes) / float64(originalSize) * 100
eventData := fmt.Sprintf("Tier %d compaction: %d → %d bytes (saved %d, %.1f%%)", compactTier, originalSize, compactedSize, savingBytes, reductionPct)
if err := store.AddComment(ctx, compactID, actor, eventData); err != nil {
fmt.Fprintf(os.Stderr, "Error: failed to record event: %v\n", err)
os.Exit(1)
FatalError("failed to record event: %v", err)
}
if err := store.MarkIssueDirty(ctx, compactID); err != nil {
fmt.Fprintf(os.Stderr, "Error: failed to mark dirty: %v\n", err)
os.Exit(1)
FatalError("failed to mark dirty: %v", err)
}
elapsed := time.Since(start)

View File

@@ -7,6 +7,7 @@ import (
"strings"
"github.com/spf13/cobra"
"github.com/steveyegge/beads/internal/config"
"github.com/steveyegge/beads/internal/syncbranch"
)
@@ -49,17 +50,38 @@ var configSetCmd = &cobra.Command{
Short: "Set a configuration value",
Args: cobra.ExactArgs(2),
Run: func(_ *cobra.Command, args []string) {
// Config operations work in direct mode only
key := args[0]
value := args[1]
// Check if this is a yaml-only key (startup settings like no-db, no-daemon, etc.)
// These must be written to config.yaml, not SQLite, because they're read
// before the database is opened. (GH#536)
if config.IsYamlOnlyKey(key) {
if err := config.SetYamlConfig(key, value); err != nil {
fmt.Fprintf(os.Stderr, "Error setting config: %v\n", err)
os.Exit(1)
}
if jsonOutput {
outputJSON(map[string]interface{}{
"key": key,
"value": value,
"location": "config.yaml",
})
} else {
fmt.Printf("Set %s = %s (in config.yaml)\n", key, value)
}
return
}
// Database-stored config requires direct mode
if err := ensureDirectMode("config set requires direct database access"); err != nil {
fmt.Fprintf(os.Stderr, "Error: %v\n", err)
os.Exit(1)
}
key := args[0]
value := args[1]
ctx := rootCtx
// Special handling for sync.branch to apply validation
if strings.TrimSpace(key) == syncbranch.ConfigKey {
if err := syncbranch.Set(ctx, store, value); err != nil {
@@ -89,25 +111,46 @@ var configGetCmd = &cobra.Command{
Short: "Get a configuration value",
Args: cobra.ExactArgs(1),
Run: func(cmd *cobra.Command, args []string) {
// Config operations work in direct mode only
key := args[0]
// Check if this is a yaml-only key (startup settings)
// These are read from config.yaml via viper, not SQLite. (GH#536)
if config.IsYamlOnlyKey(key) {
value := config.GetYamlConfig(key)
if jsonOutput {
outputJSON(map[string]interface{}{
"key": key,
"value": value,
"location": "config.yaml",
})
} else {
if value == "" {
fmt.Printf("%s (not set in config.yaml)\n", key)
} else {
fmt.Printf("%s\n", value)
}
}
return
}
// Database-stored config requires direct mode
if err := ensureDirectMode("config get requires direct database access"); err != nil {
fmt.Fprintf(os.Stderr, "Error: %v\n", err)
os.Exit(1)
}
key := args[0]
ctx := rootCtx
var value string
var err error
// Special handling for sync.branch to support env var override
if strings.TrimSpace(key) == syncbranch.ConfigKey {
value, err = syncbranch.Get(ctx, store)
} else {
value, err = store.GetConfig(ctx, key)
}
if err != nil {
fmt.Fprintf(os.Stderr, "Error getting config: %v\n", err)
os.Exit(1)

View File

@@ -56,6 +56,8 @@ Run 'bd daemon' with no flags to see available options.`,
localMode, _ := cmd.Flags().GetBool("local")
logFile, _ := cmd.Flags().GetString("log")
foreground, _ := cmd.Flags().GetBool("foreground")
logLevel, _ := cmd.Flags().GetString("log-level")
logJSON, _ := cmd.Flags().GetBool("log-json")
// If no operation flags provided, show help
if !start && !stop && !stopAll && !status && !health && !metrics {
@@ -245,7 +247,7 @@ Run 'bd daemon' with no flags to see available options.`,
fmt.Printf("Logging to: %s\n", logFile)
}
startDaemon(interval, autoCommit, autoPush, autoPull, localMode, foreground, logFile, pidFile)
startDaemon(interval, autoCommit, autoPush, autoPull, localMode, foreground, logFile, pidFile, logLevel, logJSON)
},
}
@@ -263,6 +265,8 @@ func init() {
daemonCmd.Flags().Bool("metrics", false, "Show detailed daemon metrics")
daemonCmd.Flags().String("log", "", "Log file path (default: .beads/daemon.log)")
daemonCmd.Flags().Bool("foreground", false, "Run in foreground (don't daemonize)")
daemonCmd.Flags().String("log-level", "info", "Log level (debug, info, warn, error)")
daemonCmd.Flags().Bool("log-json", false, "Output logs in JSON format (structured logging)")
daemonCmd.Flags().BoolVar(&jsonOutput, "json", false, "Output JSON format")
rootCmd.AddCommand(daemonCmd)
}
@@ -279,8 +283,9 @@ func computeDaemonParentPID() int {
}
return os.Getppid()
}
func runDaemonLoop(interval time.Duration, autoCommit, autoPush, autoPull, localMode bool, logPath, pidFile string) {
logF, log := setupDaemonLogger(logPath)
func runDaemonLoop(interval time.Duration, autoCommit, autoPush, autoPull, localMode bool, logPath, pidFile, logLevel string, logJSON bool) {
level := parseLogLevel(logLevel)
logF, log := setupDaemonLogger(logPath, logJSON, level)
defer func() { _ = logF.Close() }()
// Set up signal-aware context for graceful shutdown
@@ -290,13 +295,13 @@ func runDaemonLoop(interval time.Duration, autoCommit, autoPush, autoPull, local
// Top-level panic recovery to ensure clean shutdown and diagnostics
defer func() {
if r := recover(); r != nil {
log.log("PANIC: daemon crashed: %v", r)
log.Error("daemon crashed", "panic", r)
// Capture stack trace
stackBuf := make([]byte, 4096)
stackSize := runtime.Stack(stackBuf, false)
stackTrace := string(stackBuf[:stackSize])
log.log("Stack trace:\n%s", stackTrace)
log.Error("stack trace", "trace", stackTrace)
// Write crash report to daemon-error file for user visibility
var beadsDir string
@@ -305,21 +310,21 @@ func runDaemonLoop(interval time.Duration, autoCommit, autoPush, autoPull, local
} else if foundDB := beads.FindDatabasePath(); foundDB != "" {
beadsDir = filepath.Dir(foundDB)
}
if beadsDir != "" {
errFile := filepath.Join(beadsDir, "daemon-error")
crashReport := fmt.Sprintf("Daemon crashed at %s\n\nPanic: %v\n\nStack trace:\n%s\n",
time.Now().Format(time.RFC3339), r, stackTrace)
// nolint:gosec // G306: Error file needs to be readable for debugging
if err := os.WriteFile(errFile, []byte(crashReport), 0644); err != nil {
log.log("Warning: could not write crash report: %v", err)
log.Warn("could not write crash report", "error", err)
}
}
// Clean up PID file
_ = os.Remove(pidFile)
log.log("Daemon terminated after panic")
log.Info("daemon terminated after panic")
}
}()
@@ -329,8 +334,8 @@ func runDaemonLoop(interval time.Duration, autoCommit, autoPush, autoPull, local
if foundDB := beads.FindDatabasePath(); foundDB != "" {
daemonDBPath = foundDB
} else {
log.log("Error: no beads database found")
log.log("Hint: run 'bd init' to create a database or set BEADS_DB environment variable")
log.Error("no beads database found")
log.Info("hint: run 'bd init' to create a database or set BEADS_DB environment variable")
return // Use return instead of os.Exit to allow defers to run
}
}
@@ -376,7 +381,7 @@ func runDaemonLoop(interval time.Duration, autoCommit, autoPush, autoPull, local
errFile := filepath.Join(beadsDir, "daemon-error")
// nolint:gosec // G306: Error file needs to be readable for debugging
if err := os.WriteFile(errFile, []byte(errMsg), 0644); err != nil {
log.log("Warning: could not write daemon-error file: %v", err)
log.Warn("could not write daemon-error file", "error", err)
}
return // Use return instead of os.Exit to allow defers to run
@@ -386,24 +391,22 @@ func runDaemonLoop(interval time.Duration, autoCommit, autoPush, autoPull, local
// Validate using canonical name
dbBaseName := filepath.Base(daemonDBPath)
if dbBaseName != beads.CanonicalDatabaseName {
log.log("Error: Non-canonical database name: %s", dbBaseName)
log.log("Expected: %s", beads.CanonicalDatabaseName)
log.log("")
log.log("Run 'bd init' to migrate to canonical name")
log.Error("non-canonical database name", "name", dbBaseName, "expected", beads.CanonicalDatabaseName)
log.Info("run 'bd init' to migrate to canonical name")
return // Use return instead of os.Exit to allow defers to run
}
log.log("Using database: %s", daemonDBPath)
log.Info("using database", "path", daemonDBPath)
// Clear any previous daemon-error file on successful startup
errFile := filepath.Join(beadsDir, "daemon-error")
if err := os.Remove(errFile); err != nil && !os.IsNotExist(err) {
log.log("Warning: could not remove daemon-error file: %v", err)
log.Warn("could not remove daemon-error file", "error", err)
}
store, err := sqlite.New(ctx, daemonDBPath)
if err != nil {
log.log("Error: cannot open database: %v", err)
log.Error("cannot open database", "error", err)
return // Use return instead of os.Exit to allow defers to run
}
defer func() { _ = store.Close() }()
@@ -411,73 +414,71 @@ func runDaemonLoop(interval time.Duration, autoCommit, autoPush, autoPull, local
// Enable freshness checking to detect external database file modifications
// (e.g., when git merge replaces the database file)
store.EnableFreshnessChecking()
log.log("Database opened: %s (freshness checking enabled)", daemonDBPath)
log.Info("database opened", "path", daemonDBPath, "freshness_checking", true)
// Auto-upgrade .beads/.gitignore if outdated
gitignoreCheck := doctor.CheckGitignore()
if gitignoreCheck.Status == "warning" || gitignoreCheck.Status == "error" {
log.log("Upgrading .beads/.gitignore...")
log.Info("upgrading .beads/.gitignore")
if err := doctor.FixGitignore(); err != nil {
log.log("Warning: failed to upgrade .gitignore: %v", err)
log.Warn("failed to upgrade .gitignore", "error", err)
} else {
log.log("Successfully upgraded .beads/.gitignore")
log.Info("successfully upgraded .beads/.gitignore")
}
}
// Hydrate from multi-repo if configured
if results, err := store.HydrateFromMultiRepo(ctx); err != nil {
log.log("Error: multi-repo hydration failed: %v", err)
log.Error("multi-repo hydration failed", "error", err)
return // Use return instead of os.Exit to allow defers to run
} else if results != nil {
log.log("Multi-repo hydration complete:")
log.Info("multi-repo hydration complete")
for repo, count := range results {
log.log(" %s: %d issues", repo, count)
log.Info("hydrated issues", "repo", repo, "count", count)
}
}
// Validate database fingerprint (skip in local mode - no git available)
if localMode {
log.log("Skipping fingerprint validation (local mode)")
log.Info("skipping fingerprint validation (local mode)")
} else if err := validateDatabaseFingerprint(ctx, store, &log); err != nil {
if os.Getenv("BEADS_IGNORE_REPO_MISMATCH") != "1" {
log.log("Error: %v", err)
log.Error("repository fingerprint validation failed", "error", err)
return // Use return instead of os.Exit to allow defers to run
}
log.log("Warning: repository mismatch ignored (BEADS_IGNORE_REPO_MISMATCH=1)")
log.Warn("repository mismatch ignored (BEADS_IGNORE_REPO_MISMATCH=1)")
}
// Validate schema version matches daemon version
versionCtx := context.Background()
dbVersion, err := store.GetMetadata(versionCtx, "bd_version")
if err != nil && err.Error() != "metadata key not found: bd_version" {
log.log("Error: failed to read database version: %v", err)
log.Error("failed to read database version", "error", err)
return // Use return instead of os.Exit to allow defers to run
}
if dbVersion != "" && dbVersion != Version {
log.log("Warning: Database schema version mismatch")
log.log(" Database version: %s", dbVersion)
log.log(" Daemon version: %s", Version)
log.log(" Auto-upgrading database to daemon version...")
log.Warn("database schema version mismatch", "db_version", dbVersion, "daemon_version", Version)
log.Info("auto-upgrading database to daemon version")
// Auto-upgrade database to daemon version
// The daemon operates on its own database, so it should always use its own version
if err := store.SetMetadata(versionCtx, "bd_version", Version); err != nil {
log.log("Error: failed to update database version: %v", err)
log.Error("failed to update database version", "error", err)
// Allow override via environment variable for emergencies
if os.Getenv("BEADS_IGNORE_VERSION_MISMATCH") != "1" {
return // Use return instead of os.Exit to allow defers to run
}
log.log("Warning: Proceeding despite version update failure (BEADS_IGNORE_VERSION_MISMATCH=1)")
log.Warn("proceeding despite version update failure (BEADS_IGNORE_VERSION_MISMATCH=1)")
} else {
log.log(" Database version updated to %s", Version)
log.Info("database version updated", "version", Version)
}
} else if dbVersion == "" {
// Old database without version metadata - set it now
log.log("Warning: Database missing version metadata, setting to %s", Version)
log.Warn("database missing version metadata", "setting_to", Version)
if err := store.SetMetadata(versionCtx, "bd_version", Version); err != nil {
log.log("Error: failed to set database version: %v", err)
log.Error("failed to set database version", "error", err)
return // Use return instead of os.Exit to allow defers to run
}
}
@@ -506,7 +507,7 @@ func runDaemonLoop(interval time.Duration, autoCommit, autoPush, autoPull, local
// Register daemon in global registry
registry, err := daemon.NewRegistry()
if err != nil {
log.log("Warning: failed to create registry: %v", err)
log.Warn("failed to create registry", "error", err)
} else {
entry := daemon.RegistryEntry{
WorkspacePath: workspacePath,
@@ -517,14 +518,14 @@ func runDaemonLoop(interval time.Duration, autoCommit, autoPush, autoPull, local
StartedAt: time.Now(),
}
if err := registry.Register(entry); err != nil {
log.log("Warning: failed to register daemon: %v", err)
log.Warn("failed to register daemon", "error", err)
} else {
log.log("Registered in global registry")
log.Info("registered in global registry")
}
// Ensure we unregister on exit
defer func() {
if err := registry.Unregister(workspacePath, os.Getpid()); err != nil {
log.log("Warning: failed to unregister daemon: %v", err)
log.Warn("failed to unregister daemon", "error", err)
}
}()
}
@@ -543,16 +544,16 @@ func runDaemonLoop(interval time.Duration, autoCommit, autoPush, autoPull, local
// Get parent PID for monitoring (exit if parent dies)
parentPID := computeDaemonParentPID()
log.log("Monitoring parent process (PID %d)", parentPID)
log.Info("monitoring parent process", "pid", parentPID)
// daemonMode already determined above for SetConfig
switch daemonMode {
case "events":
log.log("Using event-driven mode")
log.Info("using event-driven mode")
jsonlPath := findJSONLPath()
if jsonlPath == "" {
log.log("Error: JSONL path not found, cannot use event-driven mode")
log.log("Falling back to polling mode")
log.Error("JSONL path not found, cannot use event-driven mode")
log.Info("falling back to polling mode")
runEventLoop(ctx, cancel, ticker, doSync, server, serverErrChan, parentPID, log)
} else {
// Event-driven mode uses separate export-only and import-only functions
@@ -567,10 +568,10 @@ func runDaemonLoop(interval time.Duration, autoCommit, autoPush, autoPull, local
runEventDrivenLoop(ctx, cancel, server, serverErrChan, store, jsonlPath, doExport, doAutoImport, autoPull, parentPID, log)
}
case "poll":
log.log("Using polling mode (interval: %v)", interval)
log.Info("using polling mode", "interval", interval)
runEventLoop(ctx, cancel, ticker, doSync, server, serverErrChan, parentPID, log)
default:
log.log("Unknown BEADS_DAEMON_MODE: %s (valid: poll, events), defaulting to poll", daemonMode)
log.Warn("unknown BEADS_DAEMON_MODE, defaulting to poll", "mode", daemonMode, "valid", "poll, events")
runEventLoop(ctx, cancel, ticker, doSync, server, serverErrChan, parentPID, log)
}
}

View File

@@ -1,144 +0,0 @@
package main
import (
"os"
"path/filepath"
"testing"
)
// TestGetSocketPathForPID tests socket path derivation from PID file path
func TestGetSocketPathForPID(t *testing.T) {
tests := []struct {
name string
pidFile string
expected string
}{
{
name: "absolute path",
pidFile: "/home/user/.beads/daemon.pid",
expected: "/home/user/.beads/bd.sock",
},
{
name: "relative path",
pidFile: ".beads/daemon.pid",
expected: ".beads/bd.sock",
},
{
name: "nested path",
pidFile: "/var/run/beads/project/.beads/daemon.pid",
expected: "/var/run/beads/project/.beads/bd.sock",
},
}
for _, tt := range tests {
t.Run(tt.name, func(t *testing.T) {
result := getSocketPathForPID(tt.pidFile)
if result != tt.expected {
t.Errorf("getSocketPathForPID(%q) = %q, want %q", tt.pidFile, result, tt.expected)
}
})
}
}
// Note: TestGetEnvInt, TestGetEnvBool, TestBoolToFlag are already defined in
// daemon_rotation_test.go and autoimport_test.go respectively
func TestEnsureBeadsDir(t *testing.T) {
// Save original dbPath and restore after
originalDbPath := dbPath
defer func() { dbPath = originalDbPath }()
t.Run("creates directory when dbPath is set", func(t *testing.T) {
tmpDir := t.TempDir()
beadsDir := filepath.Join(tmpDir, ".beads")
dbPath = filepath.Join(beadsDir, "beads.db")
result, err := ensureBeadsDir()
if err != nil {
t.Fatalf("ensureBeadsDir() error = %v", err)
}
if result != beadsDir {
t.Errorf("ensureBeadsDir() = %q, want %q", result, beadsDir)
}
// Verify directory was created
if _, err := os.Stat(beadsDir); os.IsNotExist(err) {
t.Error("directory was not created")
}
})
t.Run("returns existing directory", func(t *testing.T) {
tmpDir := t.TempDir()
beadsDir := filepath.Join(tmpDir, ".beads")
// Pre-create the directory
if err := os.MkdirAll(beadsDir, 0755); err != nil {
t.Fatalf("failed to create test directory: %v", err)
}
dbPath = filepath.Join(beadsDir, "beads.db")
result, err := ensureBeadsDir()
if err != nil {
t.Fatalf("ensureBeadsDir() error = %v", err)
}
if result != beadsDir {
t.Errorf("ensureBeadsDir() = %q, want %q", result, beadsDir)
}
})
}
func TestGetPIDFilePath(t *testing.T) {
// Save original dbPath and restore after
originalDbPath := dbPath
defer func() { dbPath = originalDbPath }()
t.Run("returns correct PID file path", func(t *testing.T) {
tmpDir := t.TempDir()
beadsDir := filepath.Join(tmpDir, ".beads")
dbPath = filepath.Join(beadsDir, "beads.db")
result, err := getPIDFilePath()
if err != nil {
t.Fatalf("getPIDFilePath() error = %v", err)
}
expected := filepath.Join(beadsDir, "daemon.pid")
if result != expected {
t.Errorf("getPIDFilePath() = %q, want %q", result, expected)
}
})
}
func TestGetLogFilePath(t *testing.T) {
// Save original dbPath and restore after
originalDbPath := dbPath
defer func() { dbPath = originalDbPath }()
t.Run("returns user-specified path when provided", func(t *testing.T) {
userPath := "/custom/path/daemon.log"
result, err := getLogFilePath(userPath)
if err != nil {
t.Fatalf("getLogFilePath() error = %v", err)
}
if result != userPath {
t.Errorf("getLogFilePath(%q) = %q, want %q", userPath, result, userPath)
}
})
t.Run("returns default path when empty", func(t *testing.T) {
tmpDir := t.TempDir()
beadsDir := filepath.Join(tmpDir, ".beads")
dbPath = filepath.Join(beadsDir, "beads.db")
result, err := getLogFilePath("")
if err != nil {
t.Fatalf("getLogFilePath() error = %v", err)
}
expected := filepath.Join(beadsDir, "daemon.log")
if result != expected {
t.Errorf("getLogFilePath(\"\") = %q, want %q", result, expected)
}
})
}

View File

@@ -457,11 +457,7 @@ func TestEventLoopSignalHandling(t *testing.T) {
// createTestLogger creates a daemonLogger for testing
func createTestLogger(t *testing.T) daemonLogger {
return daemonLogger{
logFunc: func(format string, args ...interface{}) {
t.Logf("[daemon] "+format, args...)
},
}
return newTestLogger()
}
// TestDaemonIntegration_SocketCleanup verifies socket cleanup after daemon stops

View File

@@ -369,7 +369,7 @@ func stopAllDaemons() {
}
// startDaemon starts the daemon (in foreground if requested, otherwise background)
func startDaemon(interval time.Duration, autoCommit, autoPush, autoPull, localMode, foreground bool, logFile, pidFile string) {
func startDaemon(interval time.Duration, autoCommit, autoPush, autoPull, localMode, foreground bool, logFile, pidFile, logLevel string, logJSON bool) {
logPath, err := getLogFilePath(logFile)
if err != nil {
fmt.Fprintf(os.Stderr, "Error: %v\n", err)
@@ -378,7 +378,7 @@ func startDaemon(interval time.Duration, autoCommit, autoPush, autoPull, localMo
// Run in foreground if --foreground flag set or if we're the forked child process
if foreground || os.Getenv("BD_DAEMON_FOREGROUND") == "1" {
runDaemonLoop(interval, autoCommit, autoPush, autoPull, localMode, logPath, pidFile)
runDaemonLoop(interval, autoCommit, autoPush, autoPull, localMode, logPath, pidFile, logLevel, logJSON)
return
}
@@ -406,6 +406,12 @@ func startDaemon(interval time.Duration, autoCommit, autoPush, autoPull, localMo
if logFile != "" {
args = append(args, "--log", logFile)
}
if logLevel != "" && logLevel != "info" {
args = append(args, "--log-level", logLevel)
}
if logJSON {
args = append(args, "--log-json")
}
cmd := exec.Command(exe, args...) // #nosec G204 - bd daemon command from trusted binary
cmd.Env = append(os.Environ(), "BD_DAEMON_FOREGROUND=1")
@@ -455,18 +461,18 @@ func setupDaemonLock(pidFile string, dbPath string, log daemonLogger) (*DaemonLo
// Detect nested .beads directories (e.g., .beads/.beads/.beads/)
cleanPath := filepath.Clean(beadsDir)
if strings.Contains(cleanPath, string(filepath.Separator)+".beads"+string(filepath.Separator)+".beads") {
log.log("Error: Nested .beads directory detected: %s", cleanPath)
log.log("Hint: Do not run 'bd daemon' from inside .beads/ directory")
log.log("Hint: Use absolute paths for BEADS_DB or run from workspace root")
log.Error("nested .beads directory detected", "path", cleanPath)
log.Info("hint: do not run 'bd daemon' from inside .beads/ directory")
log.Info("hint: use absolute paths for BEADS_DB or run from workspace root")
return nil, fmt.Errorf("nested .beads directory detected")
}
lock, err := acquireDaemonLock(beadsDir, dbPath)
if err != nil {
if err == ErrDaemonLocked {
log.log("Daemon already running (lock held), exiting")
log.Info("daemon already running (lock held), exiting")
} else {
log.log("Error acquiring daemon lock: %v", err)
log.Error("acquiring daemon lock", "error", err)
}
return nil, err
}
@@ -477,11 +483,11 @@ func setupDaemonLock(pidFile string, dbPath string, log daemonLogger) (*DaemonLo
if pid, err := strconv.Atoi(strings.TrimSpace(string(data))); err == nil && pid == myPID {
// PID file is correct, continue
} else {
log.log("PID file has wrong PID (expected %d, got %d), overwriting", myPID, pid)
log.Warn("PID file has wrong PID, overwriting", "expected", myPID, "got", pid)
_ = os.WriteFile(pidFile, []byte(fmt.Sprintf("%d\n", myPID)), 0600)
}
} else {
log.log("PID file missing after lock acquisition, creating")
log.Info("PID file missing after lock acquisition, creating")
_ = os.WriteFile(pidFile, []byte(fmt.Sprintf("%d\n", myPID)), 0600)
}

View File

@@ -122,12 +122,8 @@ func TestCreateLocalSyncFunc(t *testing.T) {
t.Fatalf("Failed to create issue: %v", err)
}
// Create logger
log := daemonLogger{
logFunc: func(format string, args ...interface{}) {
t.Logf(format, args...)
},
}
// Create logger (test output via newTestLogger)
log := newTestLogger()
// Create and run local sync function
doSync := createLocalSyncFunc(ctx, testStore, log)
@@ -193,11 +189,7 @@ func TestCreateLocalExportFunc(t *testing.T) {
}
}
log := daemonLogger{
logFunc: func(format string, args ...interface{}) {
t.Logf(format, args...)
},
}
log := newTestLogger()
doExport := createLocalExportFunc(ctx, testStore, log)
doExport()
@@ -258,11 +250,7 @@ func TestCreateLocalAutoImportFunc(t *testing.T) {
t.Fatalf("Failed to write JSONL: %v", err)
}
log := daemonLogger{
logFunc: func(format string, args ...interface{}) {
t.Logf(format, args...)
},
}
log := newTestLogger()
doImport := createLocalAutoImportFunc(ctx, testStore, log)
doImport()
@@ -379,11 +367,7 @@ func TestLocalModeInNonGitDirectory(t *testing.T) {
t.Fatalf("Failed to create issue: %v", err)
}
log := daemonLogger{
logFunc: func(format string, args ...interface{}) {
t.Logf(format, args...)
},
}
log := newTestLogger()
// Run local sync (should work without git)
doSync := createLocalSyncFunc(ctx, testStore, log)
@@ -437,11 +421,7 @@ func TestLocalModeExportImportRoundTrip(t *testing.T) {
defer func() { dbPath = oldDBPath }()
dbPath = testDBPath
log := daemonLogger{
logFunc: func(format string, args ...interface{}) {
t.Logf(format, args...)
},
}
log := newTestLogger()
// Create issues
for i := 0; i < 5; i++ {

View File

@@ -1,23 +1,97 @@
package main
import (
"fmt"
"time"
"io"
"log/slog"
"os"
"strings"
"gopkg.in/natefinch/lumberjack.v2"
)
// daemonLogger wraps a logging function for the daemon
// daemonLogger wraps slog for daemon logging.
// Provides level-specific methods and backward-compatible log() for migration.
type daemonLogger struct {
logFunc func(string, ...interface{})
logger *slog.Logger
}
// log is the backward-compatible logging method (maps to Info level).
// Use Info(), Warn(), Error(), Debug() for explicit levels.
func (d *daemonLogger) log(format string, args ...interface{}) {
d.logFunc(format, args...)
d.logger.Info(format, toSlogArgs(args)...)
}
// setupDaemonLogger creates a rotating log file logger for the daemon
func setupDaemonLogger(logPath string) (*lumberjack.Logger, daemonLogger) {
// Info logs at INFO level.
func (d *daemonLogger) Info(msg string, args ...interface{}) {
d.logger.Info(msg, toSlogArgs(args)...)
}
// Warn logs at WARN level.
func (d *daemonLogger) Warn(msg string, args ...interface{}) {
d.logger.Warn(msg, toSlogArgs(args)...)
}
// Error logs at ERROR level.
func (d *daemonLogger) Error(msg string, args ...interface{}) {
d.logger.Error(msg, toSlogArgs(args)...)
}
// Debug logs at DEBUG level.
func (d *daemonLogger) Debug(msg string, args ...interface{}) {
d.logger.Debug(msg, toSlogArgs(args)...)
}
// toSlogArgs converts variadic args to slog-compatible key-value pairs.
// If args are already in key-value format (string, value, string, value...),
// they're passed through. Otherwise, they're wrapped as "args" for sprintf-style logs.
func toSlogArgs(args []interface{}) []any {
if len(args) == 0 {
return nil
}
// Check if args look like slog key-value pairs (string key followed by value)
// If first arg is a string and we have pairs, treat as slog format
if len(args) >= 2 {
if _, ok := args[0].(string); ok {
// Likely slog-style: "key", value, "key2", value2
result := make([]any, len(args))
for i, a := range args {
result[i] = a
}
return result
}
}
// For sprintf-style args, wrap them (caller should use fmt.Sprintf)
result := make([]any, len(args))
for i, a := range args {
result[i] = a
}
return result
}
// parseLogLevel converts a log level string to slog.Level.
func parseLogLevel(level string) slog.Level {
switch strings.ToLower(level) {
case "debug":
return slog.LevelDebug
case "info":
return slog.LevelInfo
case "warn", "warning":
return slog.LevelWarn
case "error":
return slog.LevelError
default:
return slog.LevelInfo
}
}
// setupDaemonLogger creates a structured logger for the daemon.
// Returns the lumberjack logger (for cleanup) and the daemon logger.
//
// Parameters:
// - logPath: path to log file (uses lumberjack for rotation)
// - jsonFormat: if true, output JSON; otherwise text format
// - level: log level (debug, info, warn, error)
func setupDaemonLogger(logPath string, jsonFormat bool, level slog.Level) (*lumberjack.Logger, daemonLogger) {
maxSizeMB := getEnvInt("BEADS_DAEMON_LOG_MAX_SIZE", 50)
maxBackups := getEnvInt("BEADS_DAEMON_LOG_MAX_BACKUPS", 7)
maxAgeDays := getEnvInt("BEADS_DAEMON_LOG_MAX_AGE", 30)
@@ -31,13 +105,65 @@ func setupDaemonLogger(logPath string) (*lumberjack.Logger, daemonLogger) {
Compress: compress,
}
// Create multi-writer to log to both file and stderr (for foreground mode visibility)
var w io.Writer = logF
// Configure slog handler
opts := &slog.HandlerOptions{
Level: level,
}
var handler slog.Handler
if jsonFormat {
handler = slog.NewJSONHandler(w, opts)
} else {
handler = slog.NewTextHandler(w, opts)
}
logger := daemonLogger{
logFunc: func(format string, args ...interface{}) {
msg := fmt.Sprintf(format, args...)
timestamp := time.Now().Format("2006-01-02 15:04:05")
_, _ = fmt.Fprintf(logF, "[%s] %s\n", timestamp, msg)
},
logger: slog.New(handler),
}
return logF, logger
}
// setupDaemonLoggerLegacy is the old signature for backward compatibility during migration.
// TODO: Remove this once all callers are updated to use the new signature.
func setupDaemonLoggerLegacy(logPath string) (*lumberjack.Logger, daemonLogger) {
return setupDaemonLogger(logPath, false, slog.LevelInfo)
}
// SetupStderrLogger creates a logger that writes to stderr only (no file).
// Useful for foreground mode or testing.
func SetupStderrLogger(jsonFormat bool, level slog.Level) daemonLogger {
opts := &slog.HandlerOptions{
Level: level,
}
var handler slog.Handler
if jsonFormat {
handler = slog.NewJSONHandler(os.Stderr, opts)
} else {
handler = slog.NewTextHandler(os.Stderr, opts)
}
return daemonLogger{
logger: slog.New(handler),
}
}
// newTestLogger creates a no-op logger for testing.
// Logs are discarded - use this when you don't need to verify log output.
func newTestLogger() daemonLogger {
return daemonLogger{
logger: slog.New(slog.NewTextHandler(io.Discard, nil)),
}
}
// newTestLoggerWithWriter creates a logger that writes to the given writer.
// Use this when you need to capture and verify log output in tests.
func newTestLoggerWithWriter(w io.Writer) daemonLogger {
return daemonLogger{
logger: slog.New(slog.NewTextHandler(w, nil)),
}
}

View File

@@ -19,21 +19,21 @@ func startRPCServer(ctx context.Context, socketPath string, store storage.Storag
serverErrChan := make(chan error, 1)
go func() {
log.log("Starting RPC server: %s", socketPath)
log.Info("starting RPC server", "socket", socketPath)
if err := server.Start(ctx); err != nil {
log.log("RPC server error: %v", err)
log.Error("RPC server error", "error", err)
serverErrChan <- err
}
}()
select {
case err := <-serverErrChan:
log.log("RPC server failed to start: %v", err)
log.Error("RPC server failed to start", "error", err)
return nil, nil, err
case <-server.WaitReady():
log.log("RPC server ready (socket listening)")
log.Info("RPC server ready (socket listening)")
case <-time.After(5 * time.Second):
log.log("WARNING: Server didn't signal ready after 5 seconds (may still be starting)")
log.Warn("server didn't signal ready after 5 seconds (may still be starting)")
}
return server, serverErrChan, nil
@@ -78,35 +78,35 @@ func runEventLoop(ctx context.Context, cancel context.CancelFunc, ticker *time.T
case <-parentCheckTicker.C:
// Check if parent process is still alive
if !checkParentProcessAlive(parentPID) {
log.log("Parent process (PID %d) died, shutting down daemon", parentPID)
log.Info("parent process died, shutting down daemon", "parent_pid", parentPID)
cancel()
if err := server.Stop(); err != nil {
log.log("Error stopping server: %v", err)
log.Error("stopping server", "error", err)
}
return
}
case sig := <-sigChan:
if isReloadSignal(sig) {
log.log("Received reload signal, ignoring (daemon continues running)")
log.Info("received reload signal, ignoring (daemon continues running)")
continue
}
log.log("Received signal %v, shutting down gracefully...", sig)
log.Info("received signal, shutting down gracefully", "signal", sig)
cancel()
if err := server.Stop(); err != nil {
log.log("Error stopping RPC server: %v", err)
log.Error("stopping RPC server", "error", err)
}
return
case <-ctx.Done():
log.log("Context canceled, shutting down")
log.Info("context canceled, shutting down")
if err := server.Stop(); err != nil {
log.log("Error stopping RPC server: %v", err)
log.Error("stopping RPC server", "error", err)
}
return
case err := <-serverErrChan:
log.log("RPC server failed: %v", err)
log.Error("RPC server failed", "error", err)
cancel()
if err := server.Stop(); err != nil {
log.log("Error stopping RPC server: %v", err)
log.Error("stopping RPC server", "error", err)
}
return
}

View File

@@ -772,13 +772,11 @@ func TestSyncBranchIntegration_EndToEnd(t *testing.T) {
// Helper types for testing
func newTestSyncBranchLogger() (daemonLogger, *string) {
// Note: With slog, we can't easily capture formatted messages like before.
// For tests that need to verify log output, use strings.Builder and newTestLoggerWithWriter.
// This helper is kept for backward compatibility but messages won't be captured.
messages := ""
logger := daemonLogger{
logFunc: func(format string, args ...interface{}) {
messages += "\n" + format
},
}
return logger, &messages
return newTestLogger(), &messages
}
// TestSyncBranchConfigChange tests changing sync.branch after worktree exists

View File

@@ -335,11 +335,7 @@ func TestExportUpdatesMetadata(t *testing.T) {
// Update metadata using the actual daemon helper function (bd-ar2.3 fix)
// This verifies that updateExportMetadata (used by createExportFunc and createSyncFunc) works correctly
mockLogger := daemonLogger{
logFunc: func(format string, args ...interface{}) {
t.Logf(format, args...)
},
}
mockLogger := newTestLogger()
updateExportMetadata(ctx, store, jsonlPath, mockLogger, "")
// Verify metadata was set (renamed from last_import_hash to jsonl_content_hash - bd-39o)
@@ -438,11 +434,7 @@ func TestUpdateExportMetadataMultiRepo(t *testing.T) {
}
// Create mock logger
mockLogger := daemonLogger{
logFunc: func(format string, args ...interface{}) {
t.Logf(format, args...)
},
}
mockLogger := newTestLogger()
// Update metadata for each repo with different keys (bd-ar2.2 multi-repo support)
updateExportMetadata(ctx, store, jsonlPath1, mockLogger, jsonlPath1)
@@ -554,11 +546,7 @@ func TestExportWithMultiRepoConfigUpdatesAllMetadata(t *testing.T) {
// Simulate multi-repo export flow (as in createExportFunc)
// This tests the full integration: getMultiRepoJSONLPaths -> getRepoKeyForPath -> updateExportMetadata
mockLogger := daemonLogger{
logFunc: func(format string, args ...interface{}) {
t.Logf(format, args...)
},
}
mockLogger := newTestLogger()
// Simulate multi-repo mode with stable keys
multiRepoPaths := []string{primaryJSONL, additionalJSONL}
@@ -676,11 +664,7 @@ func TestUpdateExportMetadataInvalidKeySuffix(t *testing.T) {
}
// Create mock logger
mockLogger := daemonLogger{
logFunc: func(format string, args ...interface{}) {
t.Logf(format, args...)
},
}
mockLogger := newTestLogger()
// Update metadata with keySuffix containing ':' (bd-web8: should be auto-sanitized)
// This simulates Windows absolute paths like "C:\Users\..."

View File

@@ -15,9 +15,7 @@ import (
// newMockLogger creates a daemonLogger that does nothing
func newMockLogger() daemonLogger {
return daemonLogger{
logFunc: func(format string, args ...interface{}) {},
}
return newTestLogger()
}
func TestFileWatcher_JSONLChangeDetection(t *testing.T) {

View File

@@ -272,3 +272,330 @@ func countJSONLIssuesTest(t *testing.T, jsonlPath string) int {
}
return count
}
// TestCreateTombstoneWrapper tests the createTombstone wrapper function
func TestCreateTombstoneWrapper(t *testing.T) {
if testing.Short() {
t.Skip("skipping integration test in short mode")
}
tmpDir := t.TempDir()
beadsDir := filepath.Join(tmpDir, ".beads")
testDB := filepath.Join(beadsDir, "beads.db")
s := newTestStore(t, testDB)
ctx := context.Background()
// Save and restore global store
oldStore := store
defer func() { store = oldStore }()
store = s
t.Run("successful tombstone creation", func(t *testing.T) {
issue := &types.Issue{
Title: "Test Issue",
Description: "Issue to be tombstoned",
Status: types.StatusOpen,
Priority: 2,
IssueType: "task",
}
if err := s.CreateIssue(ctx, issue, "test"); err != nil {
t.Fatalf("Failed to create issue: %v", err)
}
err := createTombstone(ctx, issue.ID, "test-actor", "Test deletion reason")
if err != nil {
t.Fatalf("createTombstone failed: %v", err)
}
// Verify tombstone status
updated, err := s.GetIssue(ctx, issue.ID)
if err != nil {
t.Fatalf("GetIssue failed: %v", err)
}
if updated == nil {
t.Fatal("Issue should still exist as tombstone")
}
if updated.Status != types.StatusTombstone {
t.Errorf("Expected status %s, got %s", types.StatusTombstone, updated.Status)
}
})
t.Run("tombstone with actor and reason tracking", func(t *testing.T) {
issue := &types.Issue{
Title: "Issue with tracking",
Description: "Check actor/reason",
Status: types.StatusOpen,
Priority: 1,
IssueType: "bug",
}
if err := s.CreateIssue(ctx, issue, "test"); err != nil {
t.Fatalf("Failed to create issue: %v", err)
}
actor := "admin-user"
reason := "Duplicate issue"
err := createTombstone(ctx, issue.ID, actor, reason)
if err != nil {
t.Fatalf("createTombstone failed: %v", err)
}
// Verify actor and reason were recorded
updated, err := s.GetIssue(ctx, issue.ID)
if err != nil {
t.Fatalf("GetIssue failed: %v", err)
}
if updated.DeletedBy != actor {
t.Errorf("Expected DeletedBy %q, got %q", actor, updated.DeletedBy)
}
if updated.DeleteReason != reason {
t.Errorf("Expected DeleteReason %q, got %q", reason, updated.DeleteReason)
}
})
t.Run("error when issue does not exist", func(t *testing.T) {
err := createTombstone(ctx, "nonexistent-issue-id", "actor", "reason")
if err == nil {
t.Error("Expected error for non-existent issue")
}
})
t.Run("verify tombstone preserves original type", func(t *testing.T) {
issue := &types.Issue{
Title: "Feature issue",
Description: "Should preserve type",
Status: types.StatusOpen,
Priority: 2,
IssueType: types.TypeFeature,
}
if err := s.CreateIssue(ctx, issue, "test"); err != nil {
t.Fatalf("Failed to create issue: %v", err)
}
err := createTombstone(ctx, issue.ID, "actor", "reason")
if err != nil {
t.Fatalf("createTombstone failed: %v", err)
}
updated, err := s.GetIssue(ctx, issue.ID)
if err != nil {
t.Fatalf("GetIssue failed: %v", err)
}
if updated.OriginalType != string(types.TypeFeature) {
t.Errorf("Expected OriginalType %q, got %q", types.TypeFeature, updated.OriginalType)
}
})
t.Run("verify audit trail recorded", func(t *testing.T) {
issue := &types.Issue{
Title: "Issue for audit",
Description: "Check event recording",
Status: types.StatusOpen,
Priority: 2,
IssueType: "task",
}
if err := s.CreateIssue(ctx, issue, "test"); err != nil {
t.Fatalf("Failed to create issue: %v", err)
}
err := createTombstone(ctx, issue.ID, "audit-actor", "audit-reason")
if err != nil {
t.Fatalf("createTombstone failed: %v", err)
}
// Verify an event was recorded
events, err := s.GetEvents(ctx, issue.ID, 100)
if err != nil {
t.Fatalf("GetEvents failed: %v", err)
}
found := false
for _, e := range events {
if e.EventType == "deleted" && e.Actor == "audit-actor" {
found = true
break
}
}
if !found {
t.Error("Expected 'deleted' event in audit trail")
}
})
}
// TestDeleteIssueWrapper tests the deleteIssue wrapper function
func TestDeleteIssueWrapper(t *testing.T) {
if testing.Short() {
t.Skip("skipping integration test in short mode")
}
tmpDir := t.TempDir()
beadsDir := filepath.Join(tmpDir, ".beads")
testDB := filepath.Join(beadsDir, "beads.db")
s := newTestStore(t, testDB)
ctx := context.Background()
// Save and restore global store
oldStore := store
defer func() { store = oldStore }()
store = s
t.Run("successful issue deletion", func(t *testing.T) {
issue := &types.Issue{
Title: "Issue to delete",
Description: "Will be permanently deleted",
Status: types.StatusOpen,
Priority: 2,
IssueType: "task",
}
if err := s.CreateIssue(ctx, issue, "test"); err != nil {
t.Fatalf("Failed to create issue: %v", err)
}
err := deleteIssue(ctx, issue.ID)
if err != nil {
t.Fatalf("deleteIssue failed: %v", err)
}
// Verify issue is gone
deleted, err := s.GetIssue(ctx, issue.ID)
if err != nil {
t.Fatalf("GetIssue failed: %v", err)
}
if deleted != nil {
t.Error("Issue should be completely deleted")
}
})
t.Run("error on non-existent issue", func(t *testing.T) {
err := deleteIssue(ctx, "nonexistent-issue-id")
if err == nil {
t.Error("Expected error for non-existent issue")
}
})
t.Run("verify dependencies are removed", func(t *testing.T) {
// Create two issues with a dependency
issue1 := &types.Issue{
Title: "Blocker issue",
Status: types.StatusOpen,
Priority: 1,
IssueType: "task",
}
issue2 := &types.Issue{
Title: "Dependent issue",
Status: types.StatusOpen,
Priority: 2,
IssueType: "task",
}
if err := s.CreateIssue(ctx, issue1, "test"); err != nil {
t.Fatalf("Failed to create issue1: %v", err)
}
if err := s.CreateIssue(ctx, issue2, "test"); err != nil {
t.Fatalf("Failed to create issue2: %v", err)
}
// Add dependency: issue2 depends on issue1
dep := &types.Dependency{
IssueID: issue2.ID,
DependsOnID: issue1.ID,
Type: types.DepBlocks,
}
if err := s.AddDependency(ctx, dep, "test"); err != nil {
t.Fatalf("Failed to add dependency: %v", err)
}
// Delete issue1 (the blocker)
err := deleteIssue(ctx, issue1.ID)
if err != nil {
t.Fatalf("deleteIssue failed: %v", err)
}
// Verify issue2 no longer has dependencies
deps, err := s.GetDependencies(ctx, issue2.ID)
if err != nil {
t.Fatalf("GetDependencies failed: %v", err)
}
if len(deps) > 0 {
t.Errorf("Expected no dependencies after deleting blocker, got %d", len(deps))
}
})
t.Run("verify issue removed from database", func(t *testing.T) {
issue := &types.Issue{
Title: "Verify removal",
Status: types.StatusOpen,
Priority: 2,
IssueType: "task",
}
if err := s.CreateIssue(ctx, issue, "test"); err != nil {
t.Fatalf("Failed to create issue: %v", err)
}
// Get statistics before delete
statsBefore, err := s.GetStatistics(ctx)
if err != nil {
t.Fatalf("GetStatistics failed: %v", err)
}
err = deleteIssue(ctx, issue.ID)
if err != nil {
t.Fatalf("deleteIssue failed: %v", err)
}
// Get statistics after delete
statsAfter, err := s.GetStatistics(ctx)
if err != nil {
t.Fatalf("GetStatistics failed: %v", err)
}
if statsAfter.TotalIssues != statsBefore.TotalIssues-1 {
t.Errorf("Expected total issues to decrease by 1, was %d now %d",
statsBefore.TotalIssues, statsAfter.TotalIssues)
}
})
}
func TestCreateTombstoneUnsupportedStorage(t *testing.T) {
if testing.Short() {
t.Skip("skipping integration test in short mode")
}
oldStore := store
defer func() { store = oldStore }()
// Set store to nil - the type assertion will fail
store = nil
ctx := context.Background()
err := createTombstone(ctx, "any-id", "actor", "reason")
if err == nil {
t.Error("Expected error when storage is nil")
}
expectedMsg := "tombstone operation not supported by this storage backend"
if err.Error() != expectedMsg {
t.Errorf("Expected error %q, got %q", expectedMsg, err.Error())
}
}
func TestDeleteIssueUnsupportedStorage(t *testing.T) {
if testing.Short() {
t.Skip("skipping integration test in short mode")
}
oldStore := store
defer func() { store = oldStore }()
// Set store to nil - the type assertion will fail
store = nil
ctx := context.Background()
err := deleteIssue(ctx, "any-id")
if err == nil {
t.Error("Expected error when storage is nil")
}
expectedMsg := "delete operation not supported by this storage backend"
if err.Error() != expectedMsg {
t.Errorf("Expected error %q, got %q", expectedMsg, err.Error())
}
}

View File

@@ -7,6 +7,7 @@ import (
"fmt"
"os"
"path/filepath"
"slices"
"strings"
"time"
@@ -52,7 +53,6 @@ var (
doctorInteractive bool // bd-3xl: per-fix confirmation mode
doctorDryRun bool // bd-a5z: preview fixes without applying
doctorOutput string // bd-9cc: export diagnostics to file
doctorVerbose bool // bd-4qfb: show all checks including passed
perfMode bool
checkHealthMode bool
)
@@ -422,10 +422,6 @@ func applyFixList(path string, fixes []doctorCheck) {
// No auto-fix: compaction requires agent review
fmt.Printf(" ⚠ Run 'bd compact --analyze' to review candidates\n")
continue
case "Large Database":
// No auto-fix: pruning deletes data, must be user-controlled
fmt.Printf(" ⚠ Run 'bd cleanup --older-than 90' to prune old closed issues\n")
continue
default:
fmt.Printf(" ⚠ No automatic fix available for %s\n", check.Name)
fmt.Printf(" Manual fix: %s\n", check.Fix)
@@ -821,12 +817,6 @@ func runDiagnostics(path string) doctorResult {
result.Checks = append(result.Checks, compactionCheck)
// Info only, not a warning - compaction requires human review
// Check 29: Database size (pruning suggestion)
// Note: This check has no auto-fix - pruning is destructive and user-controlled
sizeCheck := convertDoctorCheck(doctor.CheckDatabaseSize(path))
result.Checks = append(result.Checks, sizeCheck)
// Don't fail overall check for size warning, just inform
return result
}
@@ -868,118 +858,136 @@ func exportDiagnostics(result doctorResult, outputPath string) error {
}
func printDiagnostics(result doctorResult) {
// Count checks by status and collect into categories
var passCount, warnCount, failCount int
var errors, warnings []doctorCheck
passedByCategory := make(map[string][]doctorCheck)
for _, check := range result.Checks {
switch check.Status {
case statusOK:
passCount++
cat := check.Category
if cat == "" {
cat = "Other"
}
passedByCategory[cat] = append(passedByCategory[cat], check)
case statusWarning:
warnCount++
warnings = append(warnings, check)
case statusError:
failCount++
errors = append(errors, check)
}
}
// Print header with version and summary at TOP
// Print header with version
fmt.Printf("\nbd doctor v%s\n\n", result.CLIVersion)
fmt.Printf("Summary: %d checks passed, %d warnings, %d errors\n", passCount, warnCount, failCount)
// Print errors section (always shown if any)
if failCount > 0 {
fmt.Println()
fmt.Println(ui.RenderSeparator())
fmt.Printf("%s Errors (%d)\n", ui.RenderFailIcon(), failCount)
fmt.Println(ui.RenderSeparator())
fmt.Println()
// Group checks by category
checksByCategory := make(map[string][]doctorCheck)
for _, check := range result.Checks {
cat := check.Category
if cat == "" {
cat = "Other"
}
checksByCategory[cat] = append(checksByCategory[cat], check)
}
for _, check := range errors {
fmt.Printf("[%s] %s\n", check.Name, check.Message)
// Track counts
var passCount, warnCount, failCount int
var warnings []doctorCheck
// Print checks by category in defined order
for _, category := range doctor.CategoryOrder {
checks, exists := checksByCategory[category]
if !exists || len(checks) == 0 {
continue
}
// Print category header
fmt.Println(ui.RenderCategory(category))
// Print each check in this category
for _, check := range checks {
// Determine status icon
var statusIcon string
switch check.Status {
case statusOK:
statusIcon = ui.RenderPassIcon()
passCount++
case statusWarning:
statusIcon = ui.RenderWarnIcon()
warnCount++
warnings = append(warnings, check)
case statusError:
statusIcon = ui.RenderFailIcon()
failCount++
warnings = append(warnings, check)
}
// Print check line: icon + name + message
fmt.Printf(" %s %s", statusIcon, check.Name)
if check.Message != "" {
fmt.Printf("%s", ui.RenderMuted(" "+check.Message))
}
fmt.Println()
// Print detail if present (indented)
if check.Detail != "" {
fmt.Printf(" %s\n", check.Detail)
fmt.Printf(" %s%s\n", ui.MutedStyle.Render(ui.TreeLast), ui.RenderMuted(check.Detail))
}
}
fmt.Println()
}
// Print any checks without a category
if otherChecks, exists := checksByCategory["Other"]; exists && len(otherChecks) > 0 {
fmt.Println(ui.RenderCategory("Other"))
for _, check := range otherChecks {
var statusIcon string
switch check.Status {
case statusOK:
statusIcon = ui.RenderPassIcon()
passCount++
case statusWarning:
statusIcon = ui.RenderWarnIcon()
warnCount++
warnings = append(warnings, check)
case statusError:
statusIcon = ui.RenderFailIcon()
failCount++
warnings = append(warnings, check)
}
fmt.Printf(" %s %s", statusIcon, check.Name)
if check.Message != "" {
fmt.Printf("%s", ui.RenderMuted(" "+check.Message))
}
fmt.Println()
if check.Detail != "" {
fmt.Printf(" %s%s\n", ui.MutedStyle.Render(ui.TreeLast), ui.RenderMuted(check.Detail))
}
}
fmt.Println()
}
// Print summary line
fmt.Println(ui.RenderSeparator())
summary := fmt.Sprintf("%s %d passed %s %d warnings %s %d failed",
ui.RenderPassIcon(), passCount,
ui.RenderWarnIcon(), warnCount,
ui.RenderFailIcon(), failCount,
)
fmt.Println(summary)
// Print warnings/errors section with fixes
if len(warnings) > 0 {
fmt.Println()
fmt.Println(ui.RenderWarn(ui.IconWarn + " WARNINGS"))
// Sort by severity: errors first, then warnings
slices.SortStableFunc(warnings, func(a, b doctorCheck) int {
// Errors (statusError) come before warnings (statusWarning)
if a.Status == statusError && b.Status != statusError {
return -1
}
if a.Status != statusError && b.Status == statusError {
return 1
}
return 0 // maintain original order within same severity
})
for i, check := range warnings {
// Show numbered items with icon and color based on status
// Errors get entire line in red, warnings just the number in yellow
line := fmt.Sprintf("%s: %s", check.Name, check.Message)
if check.Status == statusError {
fmt.Printf(" %s %s %s\n", ui.RenderFailIcon(), ui.RenderFail(fmt.Sprintf("%d.", i+1)), ui.RenderFail(line))
} else {
fmt.Printf(" %s %s %s\n", ui.RenderWarnIcon(), ui.RenderWarn(fmt.Sprintf("%d.", i+1)), line)
}
if check.Fix != "" {
fmt.Printf(" Fix: %s\n", check.Fix)
fmt.Printf(" %s%s\n", ui.MutedStyle.Render(ui.TreeLast), check.Fix)
}
fmt.Println()
}
}
// Print warnings section (always shown if any)
if warnCount > 0 {
fmt.Println(ui.RenderSeparator())
fmt.Printf("%s Warnings (%d)\n", ui.RenderWarnIcon(), warnCount)
fmt.Println(ui.RenderSeparator())
fmt.Println()
for _, check := range warnings {
fmt.Printf("[%s] %s\n", check.Name, check.Message)
if check.Detail != "" {
fmt.Printf(" %s\n", check.Detail)
}
if check.Fix != "" {
fmt.Printf(" Fix: %s\n", check.Fix)
}
fmt.Println()
}
}
// Print passed section
if passCount > 0 {
fmt.Println(ui.RenderSeparator())
if doctorVerbose {
// Verbose mode: show all passed checks grouped by category
fmt.Printf("%s Passed (%d)\n", ui.RenderPassIcon(), passCount)
fmt.Println(ui.RenderSeparator())
fmt.Println()
for _, category := range doctor.CategoryOrder {
checks, exists := passedByCategory[category]
if !exists || len(checks) == 0 {
continue
}
fmt.Printf(" %s\n", category)
for _, check := range checks {
fmt.Printf(" %s %s", ui.RenderPassIcon(), check.Name)
if check.Message != "" {
fmt.Printf(" %s", ui.RenderMuted(check.Message))
}
fmt.Println()
}
fmt.Println()
}
// Print "Other" category if exists
if otherChecks, exists := passedByCategory["Other"]; exists && len(otherChecks) > 0 {
fmt.Printf(" %s\n", "Other")
for _, check := range otherChecks {
fmt.Printf(" %s %s", ui.RenderPassIcon(), check.Name)
if check.Message != "" {
fmt.Printf(" %s", ui.RenderMuted(check.Message))
}
fmt.Println()
}
fmt.Println()
}
} else {
// Default mode: collapsed summary
fmt.Printf("%s Passed (%d) %s\n", ui.RenderPassIcon(), passCount, ui.RenderMuted("[use --verbose to show details]"))
fmt.Println(ui.RenderSeparator())
}
}
// Final status message
if failCount == 0 && warnCount == 0 {
} else {
fmt.Println()
fmt.Printf("%s\n", ui.RenderPass("✓ All checks passed"))
}
@@ -990,5 +998,4 @@ func init() {
doctorCmd.Flags().BoolVar(&perfMode, "perf", false, "Run performance diagnostics and generate CPU profile")
doctorCmd.Flags().BoolVar(&checkHealthMode, "check-health", false, "Quick health check for git hooks (silent on success)")
doctorCmd.Flags().StringVarP(&doctorOutput, "output", "o", "", "Export diagnostics to JSON file (bd-9cc)")
doctorCmd.Flags().BoolVarP(&doctorVerbose, "verbose", "v", false, "Show all checks including passed (bd-4qfb)")
}

View File

@@ -620,92 +620,3 @@ func isNoDbModeConfigured(beadsDir string) bool {
return cfg.NoDb
}
// CheckDatabaseSize warns when the database has accumulated many closed issues.
// This is purely informational - pruning is NEVER auto-fixed because it
// permanently deletes data. Users must explicitly run 'bd cleanup' to prune.
//
// Config: doctor.suggest_pruning_issue_count (default: 5000, 0 = disabled)
//
// DESIGN NOTE: This check intentionally has NO auto-fix. Unlike other doctor
// checks that fix configuration or sync issues, pruning is destructive and
// irreversible. The user must make an explicit decision to delete their
// closed issue history. We only provide guidance, never action.
func CheckDatabaseSize(path string) DoctorCheck {
beadsDir := filepath.Join(path, ".beads")
// Get database path
var dbPath string
if cfg, err := configfile.Load(beadsDir); err == nil && cfg != nil && cfg.Database != "" {
dbPath = cfg.DatabasePath(beadsDir)
} else {
dbPath = filepath.Join(beadsDir, beads.CanonicalDatabaseName)
}
// If no database, skip this check
if _, err := os.Stat(dbPath); os.IsNotExist(err) {
return DoctorCheck{
Name: "Large Database",
Status: StatusOK,
Message: "N/A (no database)",
}
}
// Read threshold from config (default 5000, 0 = disabled)
threshold := 5000
db, err := sql.Open("sqlite3", "file:"+dbPath+"?mode=ro&_pragma=busy_timeout(30000)")
if err != nil {
return DoctorCheck{
Name: "Large Database",
Status: StatusOK,
Message: "N/A (unable to open database)",
}
}
defer db.Close()
// Check for custom threshold in config table
var thresholdStr string
err = db.QueryRow("SELECT value FROM config WHERE key = ?", "doctor.suggest_pruning_issue_count").Scan(&thresholdStr)
if err == nil {
if _, err := fmt.Sscanf(thresholdStr, "%d", &threshold); err != nil {
threshold = 5000 // Reset to default on parse error
}
}
// If disabled, return OK
if threshold == 0 {
return DoctorCheck{
Name: "Large Database",
Status: StatusOK,
Message: "Check disabled (threshold = 0)",
}
}
// Count closed issues
var closedCount int
err = db.QueryRow("SELECT COUNT(*) FROM issues WHERE status = 'closed'").Scan(&closedCount)
if err != nil {
return DoctorCheck{
Name: "Large Database",
Status: StatusOK,
Message: "N/A (unable to count issues)",
}
}
// Check against threshold
if closedCount > threshold {
return DoctorCheck{
Name: "Large Database",
Status: StatusWarning,
Message: fmt.Sprintf("%d closed issues (threshold: %d)", closedCount, threshold),
Detail: "Large number of closed issues may impact performance",
Fix: "Consider running 'bd cleanup --older-than 90' to prune old closed issues",
}
}
return DoctorCheck{
Name: "Large Database",
Status: StatusOK,
Message: fmt.Sprintf("%d closed issues (threshold: %d)", closedCount, threshold),
}
}

View File

@@ -145,8 +145,6 @@ func CheckSyncBranchHookCompatibility(path string) DoctorCheck {
Status: StatusWarning,
Message: "Pre-push hook is not a bd hook",
Detail: "Cannot verify sync-branch compatibility with custom hooks",
Fix: "Either run 'bd hooks install --force' to use bd hooks,\n" +
" or ensure your custom hook skips validation when pushing to sync-branch",
}
}

View File

@@ -188,7 +188,7 @@ func CheckLegacyJSONLFilename(repoPath string) DoctorCheck {
Detail: "Having multiple JSONL files can cause sync and merge conflicts.\n" +
" Only one JSONL file should be used per repository.",
Fix: "Determine which file is current and remove the others:\n" +
" 1. Check .beads/metadata.json for 'jsonl_export' setting\n" +
" 1. Check 'bd stats' to see which file is being used\n" +
" 2. Verify with 'git log .beads/*.jsonl' to see commit history\n" +
" 3. Remove the unused file(s): git rm .beads/<unused>.jsonl\n" +
" 4. Commit the change",

View File

@@ -65,11 +65,7 @@ func TestExportUpdatesDatabaseMtime(t *testing.T) {
}
// Update metadata after export (bd-ymj fix)
mockLogger := daemonLogger{
logFunc: func(format string, args ...interface{}) {
t.Logf(format, args...)
},
}
mockLogger := newTestLogger()
updateExportMetadata(ctx, store, jsonlPath, mockLogger, "")
// Get JSONL mtime
@@ -170,11 +166,7 @@ func TestDaemonExportScenario(t *testing.T) {
}
// Daemon updates metadata after export (bd-ymj fix)
mockLogger := daemonLogger{
logFunc: func(format string, args ...interface{}) {
t.Logf(format, args...)
},
}
mockLogger := newTestLogger()
updateExportMetadata(ctx, store, jsonlPath, mockLogger, "")
// THIS IS THE FIX: daemon now calls TouchDatabaseFile after export
@@ -249,11 +241,7 @@ func TestMultipleExportCycles(t *testing.T) {
}
// Update metadata after export (bd-ymj fix)
mockLogger := daemonLogger{
logFunc: func(format string, args ...interface{}) {
t.Logf(format, args...)
},
}
mockLogger := newTestLogger()
updateExportMetadata(ctx, store, jsonlPath, mockLogger, "")
// Apply fix

View File

@@ -8,6 +8,7 @@ import (
"time"
"github.com/spf13/cobra"
"github.com/steveyegge/beads/internal/rpc"
"github.com/steveyegge/beads/internal/storage/sqlite"
"github.com/steveyegge/beads/internal/types"
"github.com/steveyegge/beads/internal/ui"
@@ -105,42 +106,65 @@ Examples:
title = fmt.Sprintf("Gate: %s:%s", awaitType, awaitID)
}
// Gate creation requires direct store access
if store == nil {
if daemonClient != nil {
fmt.Fprintf(os.Stderr, "Error: gate create requires direct database access\n")
fmt.Fprintf(os.Stderr, "Hint: use --no-daemon flag: bd --no-daemon gate create ...\n")
} else {
fmt.Fprintf(os.Stderr, "Error: no database connection\n")
var gate *types.Issue
// Try daemon first, fall back to direct store access
if daemonClient != nil {
resp, err := daemonClient.GateCreate(&rpc.GateCreateArgs{
Title: title,
AwaitType: awaitType,
AwaitID: awaitID,
Timeout: timeout,
Waiters: notifyAddrs,
})
if err != nil {
FatalError("gate create: %v", err)
}
// Parse the gate ID from response and fetch full gate
var result rpc.GateCreateResult
if err := json.Unmarshal(resp.Data, &result); err != nil {
FatalError("failed to parse gate create result: %v", err)
}
// Get the full gate for output
showResp, err := daemonClient.GateShow(&rpc.GateShowArgs{ID: result.ID})
if err != nil {
FatalError("failed to fetch created gate: %v", err)
}
if err := json.Unmarshal(showResp.Data, &gate); err != nil {
FatalError("failed to parse gate: %v", err)
}
} else if store != nil {
now := time.Now()
gate = &types.Issue{
// ID will be generated by CreateIssue
Title: title,
IssueType: types.TypeGate,
Status: types.StatusOpen,
Priority: 1, // Gates are typically high priority
Assignee: "deacon/",
Wisp: true, // Gates are wisps (ephemeral)
AwaitType: awaitType,
AwaitID: awaitID,
Timeout: timeout,
Waiters: notifyAddrs,
CreatedAt: now,
UpdatedAt: now,
}
gate.ContentHash = gate.ComputeContentHash()
if err := store.CreateIssue(ctx, gate, actor); err != nil {
fmt.Fprintf(os.Stderr, "Error creating gate: %v\n", err)
os.Exit(1)
}
markDirtyAndScheduleFlush()
} else {
fmt.Fprintf(os.Stderr, "Error: no database connection\n")
os.Exit(1)
}
now := time.Now()
gate := &types.Issue{
// ID will be generated by CreateIssue
Title: title,
IssueType: types.TypeGate,
Status: types.StatusOpen,
Priority: 1, // Gates are typically high priority
Assignee: "deacon/",
Wisp: true, // Gates are wisps (ephemeral)
AwaitType: awaitType,
AwaitID: awaitID,
Timeout: timeout,
Waiters: notifyAddrs,
CreatedAt: now,
UpdatedAt: now,
}
gate.ContentHash = gate.ComputeContentHash()
if err := store.CreateIssue(ctx, gate, actor); err != nil {
fmt.Fprintf(os.Stderr, "Error creating gate: %v\n", err)
os.Exit(1)
}
markDirtyAndScheduleFlush()
if jsonOutput {
outputJSON(gate)
return
@@ -197,34 +221,39 @@ var gateShowCmd = &cobra.Command{
Run: func(cmd *cobra.Command, args []string) {
ctx := rootCtx
// Gate show requires direct store access
if store == nil {
if daemonClient != nil {
fmt.Fprintf(os.Stderr, "Error: gate show requires direct database access\n")
fmt.Fprintf(os.Stderr, "Hint: use --no-daemon flag: bd --no-daemon gate show %s\n", args[0])
} else {
fmt.Fprintf(os.Stderr, "Error: no database connection\n")
var gate *types.Issue
// Try daemon first, fall back to direct store access
if daemonClient != nil {
resp, err := daemonClient.GateShow(&rpc.GateShowArgs{ID: args[0]})
if err != nil {
FatalError("gate show: %v", err)
}
if err := json.Unmarshal(resp.Data, &gate); err != nil {
FatalError("failed to parse gate: %v", err)
}
} else if store != nil {
gateID, err := utils.ResolvePartialID(ctx, store, args[0])
if err != nil {
fmt.Fprintf(os.Stderr, "Error: %v\n", err)
os.Exit(1)
}
os.Exit(1)
}
gateID, err := utils.ResolvePartialID(ctx, store, args[0])
if err != nil {
fmt.Fprintf(os.Stderr, "Error: %v\n", err)
os.Exit(1)
}
gate, err := store.GetIssue(ctx, gateID)
if err != nil {
fmt.Fprintf(os.Stderr, "Error: %v\n", err)
os.Exit(1)
}
if gate == nil {
fmt.Fprintf(os.Stderr, "Error: gate %s not found\n", gateID)
os.Exit(1)
}
if gate.IssueType != types.TypeGate {
fmt.Fprintf(os.Stderr, "Error: %s is not a gate (type: %s)\n", gateID, gate.IssueType)
gate, err = store.GetIssue(ctx, gateID)
if err != nil {
fmt.Fprintf(os.Stderr, "Error: %v\n", err)
os.Exit(1)
}
if gate == nil {
fmt.Fprintf(os.Stderr, "Error: gate %s not found\n", gateID)
os.Exit(1)
}
if gate.IssueType != types.TypeGate {
fmt.Fprintf(os.Stderr, "Error: %s is not a gate (type: %s)\n", gateID, gate.IssueType)
os.Exit(1)
}
} else {
fmt.Fprintf(os.Stderr, "Error: no database connection\n")
os.Exit(1)
}
@@ -263,30 +292,36 @@ var gateListCmd = &cobra.Command{
ctx := rootCtx
showAll, _ := cmd.Flags().GetBool("all")
// Gate list requires direct store access
if store == nil {
if daemonClient != nil {
fmt.Fprintf(os.Stderr, "Error: gate list requires direct database access\n")
fmt.Fprintf(os.Stderr, "Hint: use --no-daemon flag: bd --no-daemon gate list\n")
} else {
fmt.Fprintf(os.Stderr, "Error: no database connection\n")
var issues []*types.Issue
// Try daemon first, fall back to direct store access
if daemonClient != nil {
resp, err := daemonClient.GateList(&rpc.GateListArgs{All: showAll})
if err != nil {
FatalError("gate list: %v", err)
}
if err := json.Unmarshal(resp.Data, &issues); err != nil {
FatalError("failed to parse gates: %v", err)
}
} else if store != nil {
// Build filter for gates
gateType := types.TypeGate
filter := types.IssueFilter{
IssueType: &gateType,
}
if !showAll {
openStatus := types.StatusOpen
filter.Status = &openStatus
}
os.Exit(1)
}
// Build filter for gates
gateType := types.TypeGate
filter := types.IssueFilter{
IssueType: &gateType,
}
if !showAll {
openStatus := types.StatusOpen
filter.Status = &openStatus
}
issues, err := store.SearchIssues(ctx, "", filter)
if err != nil {
fmt.Fprintf(os.Stderr, "Error listing gates: %v\n", err)
var err error
issues, err = store.SearchIssues(ctx, "", filter)
if err != nil {
fmt.Fprintf(os.Stderr, "Error listing gates: %v\n", err)
os.Exit(1)
}
} else {
fmt.Fprintf(os.Stderr, "Error: no database connection\n")
os.Exit(1)
}
@@ -338,47 +373,58 @@ var gateCloseCmd = &cobra.Command{
reason = "Gate closed"
}
// Gate close requires direct store access
if store == nil {
if daemonClient != nil {
fmt.Fprintf(os.Stderr, "Error: gate close requires direct database access\n")
fmt.Fprintf(os.Stderr, "Hint: use --no-daemon flag: bd --no-daemon gate close %s\n", args[0])
} else {
fmt.Fprintf(os.Stderr, "Error: no database connection\n")
var closedGate *types.Issue
var gateID string
// Try daemon first, fall back to direct store access
if daemonClient != nil {
resp, err := daemonClient.GateClose(&rpc.GateCloseArgs{
ID: args[0],
Reason: reason,
})
if err != nil {
FatalError("gate close: %v", err)
}
if err := json.Unmarshal(resp.Data, &closedGate); err != nil {
FatalError("failed to parse gate: %v", err)
}
gateID = closedGate.ID
} else if store != nil {
var err error
gateID, err = utils.ResolvePartialID(ctx, store, args[0])
if err != nil {
fmt.Fprintf(os.Stderr, "Error: %v\n", err)
os.Exit(1)
}
os.Exit(1)
}
gateID, err := utils.ResolvePartialID(ctx, store, args[0])
if err != nil {
fmt.Fprintf(os.Stderr, "Error: %v\n", err)
os.Exit(1)
}
// Verify it's a gate
gate, err := store.GetIssue(ctx, gateID)
if err != nil {
fmt.Fprintf(os.Stderr, "Error: %v\n", err)
os.Exit(1)
}
if gate == nil {
fmt.Fprintf(os.Stderr, "Error: gate %s not found\n", gateID)
os.Exit(1)
}
if gate.IssueType != types.TypeGate {
fmt.Fprintf(os.Stderr, "Error: %s is not a gate (type: %s)\n", gateID, gate.IssueType)
os.Exit(1)
}
// Verify it's a gate
gate, err := store.GetIssue(ctx, gateID)
if err != nil {
fmt.Fprintf(os.Stderr, "Error: %v\n", err)
os.Exit(1)
}
if gate == nil {
fmt.Fprintf(os.Stderr, "Error: gate %s not found\n", gateID)
os.Exit(1)
}
if gate.IssueType != types.TypeGate {
fmt.Fprintf(os.Stderr, "Error: %s is not a gate (type: %s)\n", gateID, gate.IssueType)
os.Exit(1)
}
if err := store.CloseIssue(ctx, gateID, reason, actor); err != nil {
fmt.Fprintf(os.Stderr, "Error closing gate: %v\n", err)
os.Exit(1)
}
if err := store.CloseIssue(ctx, gateID, reason, actor); err != nil {
fmt.Fprintf(os.Stderr, "Error closing gate: %v\n", err)
markDirtyAndScheduleFlush()
closedGate, _ = store.GetIssue(ctx, gateID)
} else {
fmt.Fprintf(os.Stderr, "Error: no database connection\n")
os.Exit(1)
}
markDirtyAndScheduleFlush()
if jsonOutput {
closedGate, _ := store.GetIssue(ctx, gateID)
outputJSON(closedGate)
return
}
@@ -402,87 +448,116 @@ var gateWaitCmd = &cobra.Command{
os.Exit(1)
}
// Gate wait requires direct store access for now
if store == nil {
if daemonClient != nil {
fmt.Fprintf(os.Stderr, "Error: gate wait requires direct database access\n")
fmt.Fprintf(os.Stderr, "Hint: use --no-daemon flag: bd --no-daemon gate wait %s --notify ...\n", args[0])
} else {
fmt.Fprintf(os.Stderr, "Error: no database connection\n")
var addedCount int
var gateID string
var newWaiters []string
// Try daemon first, fall back to direct store access
if daemonClient != nil {
resp, err := daemonClient.GateWait(&rpc.GateWaitArgs{
ID: args[0],
Waiters: notifyAddrs,
})
if err != nil {
FatalError("gate wait: %v", err)
}
os.Exit(1)
}
gateID, err := utils.ResolvePartialID(ctx, store, args[0])
if err != nil {
fmt.Fprintf(os.Stderr, "Error: %v\n", err)
os.Exit(1)
}
// Get existing gate
gate, err := store.GetIssue(ctx, gateID)
if err != nil {
fmt.Fprintf(os.Stderr, "Error: %v\n", err)
os.Exit(1)
}
if gate == nil {
fmt.Fprintf(os.Stderr, "Error: gate %s not found\n", gateID)
os.Exit(1)
}
if gate.IssueType != types.TypeGate {
fmt.Fprintf(os.Stderr, "Error: %s is not a gate (type: %s)\n", gateID, gate.IssueType)
os.Exit(1)
}
if gate.Status == types.StatusClosed {
fmt.Fprintf(os.Stderr, "Error: gate %s is already closed\n", gateID)
os.Exit(1)
}
// Add new waiters (avoiding duplicates)
waiterSet := make(map[string]bool)
for _, w := range gate.Waiters {
waiterSet[w] = true
}
newWaiters := []string{}
for _, addr := range notifyAddrs {
if !waiterSet[addr] {
newWaiters = append(newWaiters, addr)
waiterSet[addr] = true
var result rpc.GateWaitResult
if err := json.Unmarshal(resp.Data, &result); err != nil {
FatalError("failed to parse gate wait result: %v", err)
}
addedCount = result.AddedCount
gateID = args[0] // Use the input ID for display
// For daemon mode, we don't know exactly which waiters were added
// Just report the count
newWaiters = nil
} else if store != nil {
var err error
gateID, err = utils.ResolvePartialID(ctx, store, args[0])
if err != nil {
fmt.Fprintf(os.Stderr, "Error: %v\n", err)
os.Exit(1)
}
// Get existing gate
gate, err := store.GetIssue(ctx, gateID)
if err != nil {
fmt.Fprintf(os.Stderr, "Error: %v\n", err)
os.Exit(1)
}
if gate == nil {
fmt.Fprintf(os.Stderr, "Error: gate %s not found\n", gateID)
os.Exit(1)
}
if gate.IssueType != types.TypeGate {
fmt.Fprintf(os.Stderr, "Error: %s is not a gate (type: %s)\n", gateID, gate.IssueType)
os.Exit(1)
}
if gate.Status == types.StatusClosed {
fmt.Fprintf(os.Stderr, "Error: gate %s is already closed\n", gateID)
os.Exit(1)
}
// Add new waiters (avoiding duplicates)
waiterSet := make(map[string]bool)
for _, w := range gate.Waiters {
waiterSet[w] = true
}
for _, addr := range notifyAddrs {
if !waiterSet[addr] {
newWaiters = append(newWaiters, addr)
waiterSet[addr] = true
}
}
addedCount = len(newWaiters)
if addedCount == 0 {
fmt.Println("All specified waiters are already registered on this gate")
return
}
// Update waiters - need to use SQLite directly for Waiters field
sqliteStore, ok := store.(*sqlite.SQLiteStorage)
if !ok {
fmt.Fprintf(os.Stderr, "Error: gate wait requires SQLite storage\n")
os.Exit(1)
}
allWaiters := append(gate.Waiters, newWaiters...)
waitersJSON, _ := json.Marshal(allWaiters)
// Use raw SQL to update the waiters field
_, err = sqliteStore.UnderlyingDB().ExecContext(ctx, `UPDATE issues SET waiters = ?, updated_at = ? WHERE id = ?`,
string(waitersJSON), time.Now(), gateID)
if err != nil {
fmt.Fprintf(os.Stderr, "Error adding waiters: %v\n", err)
os.Exit(1)
}
markDirtyAndScheduleFlush()
if jsonOutput {
updatedGate, _ := store.GetIssue(ctx, gateID)
outputJSON(updatedGate)
return
}
} else {
fmt.Fprintf(os.Stderr, "Error: no database connection\n")
os.Exit(1)
}
if len(newWaiters) == 0 {
if addedCount == 0 {
fmt.Println("All specified waiters are already registered on this gate")
return
}
// Update waiters - need to use SQLite directly for Waiters field
sqliteStore, ok := store.(*sqlite.SQLiteStorage)
if !ok {
fmt.Fprintf(os.Stderr, "Error: gate wait requires SQLite storage\n")
os.Exit(1)
}
allWaiters := append(gate.Waiters, newWaiters...)
waitersJSON, _ := json.Marshal(allWaiters)
// Use raw SQL to update the waiters field
_, err = sqliteStore.UnderlyingDB().ExecContext(ctx, `UPDATE issues SET waiters = ?, updated_at = ? WHERE id = ?`,
string(waitersJSON), time.Now(), gateID)
if err != nil {
fmt.Fprintf(os.Stderr, "Error adding waiters: %v\n", err)
os.Exit(1)
}
markDirtyAndScheduleFlush()
if jsonOutput {
updatedGate, _ := store.GetIssue(ctx, gateID)
outputJSON(updatedGate)
// For daemon mode, output the result
outputJSON(map[string]interface{}{"added_count": addedCount, "gate_id": gateID})
return
}
fmt.Printf("%s Added waiter(s) to gate %s:\n", ui.RenderPass("✓"), gateID)
fmt.Printf("%s Added %d waiter(s) to gate %s\n", ui.RenderPass("✓"), addedCount, gateID)
for _, addr := range newWaiters {
fmt.Printf(" + %s\n", addr)
}

View File

@@ -84,6 +84,92 @@ func TestImportMultiPartIDs(t *testing.T) {
}
}
// TestImportMultiHyphenPrefix tests GH#422: importing with multi-hyphen prefixes
// like "asianops-audit-" should not cause false positive prefix mismatch errors.
func TestImportMultiHyphenPrefix(t *testing.T) {
tmpDir := t.TempDir()
dbPath := filepath.Join(tmpDir, ".beads", "beads.db")
// Create database with multi-hyphen prefix "asianops-audit"
st := newTestStoreWithPrefix(t, dbPath, "asianops-audit")
ctx := context.Background()
// Create issues with hash-like suffixes that could be mistaken for words
// The key is that "test", "task", "demo" look like English words (4+ chars, no digits)
// which previously caused ExtractIssuePrefix to fall back to first hyphen
issues := []*types.Issue{
{
ID: "asianops-audit-sa0",
Title: "Issue with short hash suffix",
Description: "Short hash suffix should work",
Status: "open",
Priority: 1,
IssueType: "task",
},
{
ID: "asianops-audit-test",
Title: "Issue with word-like suffix",
Description: "Word-like suffix 'test' was causing false positive",
Status: "open",
Priority: 1,
IssueType: "task",
},
{
ID: "asianops-audit-task",
Title: "Another word-like suffix",
Description: "Word-like suffix 'task' was also problematic",
Status: "open",
Priority: 1,
IssueType: "task",
},
{
ID: "asianops-audit-demo",
Title: "Demo issue",
Description: "Word-like suffix 'demo'",
Status: "open",
Priority: 1,
IssueType: "task",
},
}
// Import should succeed without prefix mismatch errors
opts := ImportOptions{
DryRun: false,
SkipUpdate: false,
Strict: false,
}
result, err := importIssuesCore(ctx, dbPath, st, issues, opts)
if err != nil {
t.Fatalf("Import failed: %v", err)
}
// GH#422: Should NOT detect prefix mismatch
if result.PrefixMismatch {
t.Errorf("Import incorrectly detected prefix mismatch for multi-hyphen prefix")
t.Logf("Expected prefix: asianops-audit")
t.Logf("Mismatched prefixes detected: %v", result.MismatchPrefixes)
}
// All issues should be created
if result.Created != 4 {
t.Errorf("Expected 4 issues created, got %d", result.Created)
}
// Verify issues exist in database
for _, issue := range issues {
dbIssue, err := st.GetIssue(ctx, issue.ID)
if err != nil {
t.Errorf("Failed to get issue %s: %v", issue.ID, err)
continue
}
if dbIssue.Title != issue.Title {
t.Errorf("Issue %s title mismatch: got %q, want %q", issue.ID, dbIssue.Title, issue.Title)
}
}
}
// TestDetectPrefixFromIssues tests the detectPrefixFromIssues function
// with multi-part IDs
func TestDetectPrefixFromIssues(t *testing.T) {

View File

@@ -33,8 +33,8 @@ and database file. Optionally specify a custom issue prefix.
With --no-db: creates .beads/ directory and issues.jsonl file instead of SQLite database.
With --stealth: configures global git settings for invisible beads usage:
Global gitignore to prevent beads files from being committed
With --stealth: configures per-repository git settings for invisible beads usage:
.git/info/exclude to prevent beads files from being committed
• Claude Code settings with bd onboard instruction
Perfect for personal use without affecting repo collaborators.`,
Run: func(cmd *cobra.Command, _ []string) {
@@ -1361,22 +1361,15 @@ func readFirstIssueFromGit(jsonlPath, gitRef string) (*types.Issue, error) {
return nil, nil
}
// setupStealthMode configures global git settings for stealth operation
// setupStealthMode configures git settings for stealth operation
// Uses .git/info/exclude (per-repository) instead of global gitignore because:
// - Global gitignore doesn't support absolute paths (GitHub #704)
// - .git/info/exclude is designed for user-specific, repo-local ignores
// - Patterns are relative to repo root, so ".beads/" works correctly
func setupStealthMode(verbose bool) error {
homeDir, err := os.UserHomeDir()
if err != nil {
return fmt.Errorf("failed to get user home directory: %w", err)
}
// Get the absolute path of the current project
projectPath, err := os.Getwd()
if err != nil {
return fmt.Errorf("failed to get current working directory: %w", err)
}
// Setup global gitignore with project-specific paths
if err := setupGlobalGitIgnore(homeDir, projectPath, verbose); err != nil {
return fmt.Errorf("failed to setup global gitignore: %w", err)
// Setup per-repository git exclude file
if err := setupGitExclude(verbose); err != nil {
return fmt.Errorf("failed to setup git exclude: %w", err)
}
// Setup claude settings
@@ -1386,7 +1379,7 @@ func setupStealthMode(verbose bool) error {
if verbose {
fmt.Printf("\n%s Stealth mode configured successfully!\n\n", ui.RenderPass("✓"))
fmt.Printf(" Global gitignore: %s\n", ui.RenderAccent(projectPath+"/.beads/ ignored"))
fmt.Printf(" Git exclude: %s\n", ui.RenderAccent(".git/info/exclude configured"))
fmt.Printf(" Claude settings: %s\n\n", ui.RenderAccent("configured with bd onboard instruction"))
fmt.Printf("Your beads setup is now %s - other repo collaborators won't see any beads-related files.\n\n", ui.RenderAccent("invisible"))
}
@@ -1394,7 +1387,80 @@ func setupStealthMode(verbose bool) error {
return nil
}
// setupGitExclude configures .git/info/exclude to ignore beads and claude files
// This is the correct approach for per-repository user-specific ignores (GitHub #704).
// Unlike global gitignore, patterns here are relative to the repo root.
func setupGitExclude(verbose bool) error {
// Find the .git directory (handles both regular repos and worktrees)
gitDir, err := exec.Command("git", "rev-parse", "--git-dir").Output()
if err != nil {
return fmt.Errorf("not a git repository")
}
gitDirPath := strings.TrimSpace(string(gitDir))
// Path to the exclude file
excludePath := filepath.Join(gitDirPath, "info", "exclude")
// Ensure the info directory exists
infoDir := filepath.Join(gitDirPath, "info")
if err := os.MkdirAll(infoDir, 0755); err != nil {
return fmt.Errorf("failed to create git info directory: %w", err)
}
// Read existing exclude file if it exists
var existingContent string
// #nosec G304 - git config path
if content, err := os.ReadFile(excludePath); err == nil {
existingContent = string(content)
}
// Use relative patterns (these work correctly in .git/info/exclude)
beadsPattern := ".beads/"
claudePattern := ".claude/settings.local.json"
hasBeads := strings.Contains(existingContent, beadsPattern)
hasClaude := strings.Contains(existingContent, claudePattern)
if hasBeads && hasClaude {
if verbose {
fmt.Printf("Git exclude already configured for stealth mode\n")
}
return nil
}
// Append missing patterns
newContent := existingContent
if !strings.HasSuffix(newContent, "\n") && len(newContent) > 0 {
newContent += "\n"
}
if !hasBeads || !hasClaude {
newContent += "\n# Beads stealth mode (added by bd init --stealth)\n"
}
if !hasBeads {
newContent += beadsPattern + "\n"
}
if !hasClaude {
newContent += claudePattern + "\n"
}
// Write the updated exclude file
// #nosec G306 - config file needs 0644
if err := os.WriteFile(excludePath, []byte(newContent), 0644); err != nil {
return fmt.Errorf("failed to write git exclude file: %w", err)
}
if verbose {
fmt.Printf("Configured git exclude for stealth mode: %s\n", excludePath)
}
return nil
}
// setupGlobalGitIgnore configures global gitignore to ignore beads and claude files for a specific project
// DEPRECATED: This function uses absolute paths which don't work in gitignore (GitHub #704).
// Use setupGitExclude instead for new code.
func setupGlobalGitIgnore(homeDir string, projectPath string, verbose bool) error {
// Check if user already has a global gitignore file configured
cmd := exec.Command("git", "config", "--global", "core.excludesfile")

View File

@@ -74,11 +74,10 @@ This command:
"error": "no_beads_directory",
"message": "No .beads directory found. Run 'bd init' first.",
})
} else {
fmt.Fprintf(os.Stderr, "Error: no .beads directory found\n")
fmt.Fprintf(os.Stderr, "Hint: run 'bd init' to initialize bd\n")
}
os.Exit(1)
} else {
FatalErrorWithHint("no .beads directory found", "run 'bd init' to initialize bd")
}
}
// Load config to get target database name (respects user's config.json)
@@ -103,10 +102,10 @@ This command:
"error": "detection_failed",
"message": err.Error(),
})
os.Exit(1)
} else {
fmt.Fprintf(os.Stderr, "Error: %v\n", err)
FatalError("%v", err)
}
os.Exit(1)
}
if len(databases) == 0 {
@@ -174,14 +173,15 @@ This command:
"message": "Multiple old database files found",
"databases": formatDBList(oldDBs),
})
os.Exit(1)
} else {
fmt.Fprintf(os.Stderr, "Error: multiple old database files found:\n")
for _, db := range oldDBs {
fmt.Fprintf(os.Stderr, " - %s (version: %s)\n", filepath.Base(db.path), db.version)
}
fmt.Fprintf(os.Stderr, "\nPlease manually rename the correct database to %s and remove others.\n", cfg.Database)
os.Exit(1)
}
os.Exit(1)
} else if currentDB != nil && currentDB.version != Version {
// Update version metadata
needsVersionUpdate = true

View File

@@ -227,9 +227,9 @@ func runMolBond(cmd *cobra.Command, args []string) {
// Compound protos are templates - always use permanent storage
result, err = bondProtoProto(ctx, store, issueA, issueB, bondType, customTitle, actor)
case aIsProto && !bIsProto:
result, err = bondProtoMol(ctx, targetStore, issueA, issueB, bondType, vars, childRef, actor)
result, err = bondProtoMol(ctx, targetStore, issueA, issueB, bondType, vars, childRef, actor, pour)
case !aIsProto && bIsProto:
result, err = bondMolProto(ctx, targetStore, issueA, issueB, bondType, vars, childRef, actor)
result, err = bondMolProto(ctx, targetStore, issueA, issueB, bondType, vars, childRef, actor, pour)
default:
result, err = bondMolMol(ctx, targetStore, issueA, issueB, bondType, actor)
}
@@ -366,7 +366,7 @@ func bondProtoProto(ctx context.Context, s storage.Storage, protoA, protoB *type
// bondProtoMol bonds a proto to an existing molecule by spawning the proto.
// If childRef is provided, generates custom IDs like "parent.childref" (dynamic bonding).
func bondProtoMol(ctx context.Context, s storage.Storage, proto, mol *types.Issue, bondType string, vars map[string]string, childRef string, actorName string) (*BondResult, error) {
func bondProtoMol(ctx context.Context, s storage.Storage, proto, mol *types.Issue, bondType string, vars map[string]string, childRef string, actorName string, pour bool) (*BondResult, error) {
// Load proto subgraph
subgraph, err := loadTemplateSubgraph(ctx, s, proto.ID)
if err != nil {
@@ -389,7 +389,7 @@ func bondProtoMol(ctx context.Context, s storage.Storage, proto, mol *types.Issu
opts := CloneOptions{
Vars: vars,
Actor: actorName,
Wisp: true, // wisp by default for molecule execution - bd-2vh3
Wisp: !pour, // wisp by default, but --pour makes persistent (bd-l7y3)
}
// Dynamic bonding: use custom IDs if childRef is provided
@@ -444,9 +444,9 @@ func bondProtoMol(ctx context.Context, s storage.Storage, proto, mol *types.Issu
}
// bondMolProto bonds a molecule to a proto (symmetric with bondProtoMol)
func bondMolProto(ctx context.Context, s storage.Storage, mol, proto *types.Issue, bondType string, vars map[string]string, childRef string, actorName string) (*BondResult, error) {
func bondMolProto(ctx context.Context, s storage.Storage, mol, proto *types.Issue, bondType string, vars map[string]string, childRef string, actorName string, pour bool) (*BondResult, error) {
// Same as bondProtoMol but with arguments swapped
return bondProtoMol(ctx, s, proto, mol, bondType, vars, childRef, actorName)
return bondProtoMol(ctx, s, proto, mol, bondType, vars, childRef, actorName, pour)
}
// bondMolMol bonds two molecules together

View File

@@ -6,6 +6,8 @@ import (
"strings"
"github.com/spf13/cobra"
"github.com/steveyegge/beads/internal/beads"
"github.com/steveyegge/beads/internal/storage/sqlite"
"github.com/steveyegge/beads/internal/types"
"github.com/steveyegge/beads/internal/ui"
"github.com/steveyegge/beads/internal/utils"
@@ -25,9 +27,15 @@ This command:
After a crash or session reset, the pinned root issue ensures the agent
can resume from where it left off by checking 'bd ready'.
The --template-db flag enables cross-database spawning: read templates from
one database (e.g., main) while writing spawned instances to another (e.g., wisp).
This is essential for wisp molecule spawning where templates exist in the main
database but instances should be ephemeral.
Example:
bd mol run mol-version-bump --var version=1.2.0
bd mol run bd-qqc --var version=0.32.0 --var date=2025-01-01`,
bd mol run bd-qqc --var version=0.32.0 --var date=2025-01-01
bd --db .beads-wisp/beads.db mol run mol-patrol --template-db .beads/beads.db`,
Args: cobra.ExactArgs(1),
Run: runMolRun,
}
@@ -49,6 +57,7 @@ func runMolRun(cmd *cobra.Command, args []string) {
}
varFlags, _ := cmd.Flags().GetStringSlice("var")
templateDB, _ := cmd.Flags().GetString("template-db")
// Parse variables
vars := make(map[string]string)
@@ -61,15 +70,42 @@ func runMolRun(cmd *cobra.Command, args []string) {
vars[parts[0]] = parts[1]
}
// Resolve molecule ID
moleculeID, err := utils.ResolvePartialID(ctx, store, args[0])
// Determine which store to use for reading the template
// If --template-db is set, open a separate connection for reading the template
// This enables cross-database spawning (read from main, write to wisp)
//
// Auto-discovery: if --db contains ".beads-wisp" (wisp storage) but --template-db
// is not set, automatically use the main database for templates. This handles the
// common case of spawning patrol molecules from main DB into wisp storage.
templateStore := store
if templateDB == "" && strings.Contains(dbPath, ".beads-wisp") {
// Auto-discover main database for templates
templateDB = beads.FindDatabasePath()
if templateDB == "" {
fmt.Fprintf(os.Stderr, "Error: cannot find main database for templates\n")
fmt.Fprintf(os.Stderr, "Hint: specify --template-db explicitly\n")
os.Exit(1)
}
}
if templateDB != "" {
var err error
templateStore, err = sqlite.NewWithTimeout(ctx, templateDB, lockTimeout)
if err != nil {
fmt.Fprintf(os.Stderr, "Error opening template database %s: %v\n", templateDB, err)
os.Exit(1)
}
defer templateStore.Close()
}
// Resolve molecule ID from template store
moleculeID, err := utils.ResolvePartialID(ctx, templateStore, args[0])
if err != nil {
fmt.Fprintf(os.Stderr, "Error resolving molecule ID %s: %v\n", args[0], err)
os.Exit(1)
}
// Load the molecule subgraph
subgraph, err := loadTemplateSubgraph(ctx, store, moleculeID)
// Load the molecule subgraph from template store
subgraph, err := loadTemplateSubgraph(ctx, templateStore, moleculeID)
if err != nil {
fmt.Fprintf(os.Stderr, "Error loading molecule: %v\n", err)
os.Exit(1)
@@ -132,6 +168,7 @@ func runMolRun(cmd *cobra.Command, args []string) {
func init() {
molRunCmd.Flags().StringSlice("var", []string{}, "Variable substitution (key=value)")
molRunCmd.Flags().String("template-db", "", "Database to read templates from (enables cross-database spawning)")
molCmd.AddCommand(molRunCmd)
}

View File

@@ -219,7 +219,7 @@ func runMolSpawn(cmd *cobra.Command, args []string) {
}
for _, attach := range attachments {
bondResult, err := bondProtoMol(ctx, store, attach.issue, spawnedMol, attachType, vars, "", actor)
bondResult, err := bondProtoMol(ctx, store, attach.issue, spawnedMol, attachType, vars, "", actor, pour)
if err != nil {
fmt.Fprintf(os.Stderr, "Error attaching %s: %v\n", attach.id, err)
os.Exit(1)

View File

@@ -343,7 +343,7 @@ func TestBondProtoMol(t *testing.T) {
// Bond proto to molecule
vars := map[string]string{"name": "auth-feature"}
result, err := bondProtoMol(ctx, store, proto, mol, types.BondTypeSequential, vars, "", "test")
result, err := bondProtoMol(ctx, store, proto, mol, types.BondTypeSequential, vars, "", "test", false)
if err != nil {
t.Fatalf("bondProtoMol failed: %v", err)
}
@@ -840,7 +840,7 @@ func TestSpawnWithBasicAttach(t *testing.T) {
}
// Attach the second proto (simulating --attach flag behavior)
bondResult, err := bondProtoMol(ctx, s, attachProto, spawnedMol, types.BondTypeSequential, vars, "", "test")
bondResult, err := bondProtoMol(ctx, s, attachProto, spawnedMol, types.BondTypeSequential, vars, "", "test", false)
if err != nil {
t.Fatalf("Failed to bond attachment: %v", err)
}
@@ -945,12 +945,12 @@ func TestSpawnWithMultipleAttachments(t *testing.T) {
}
// Attach both protos (simulating --attach A --attach B)
bondResultA, err := bondProtoMol(ctx, s, attachA, spawnedMol, types.BondTypeSequential, nil, "", "test")
bondResultA, err := bondProtoMol(ctx, s, attachA, spawnedMol, types.BondTypeSequential, nil, "", "test", false)
if err != nil {
t.Fatalf("Failed to bond attachA: %v", err)
}
bondResultB, err := bondProtoMol(ctx, s, attachB, spawnedMol, types.BondTypeSequential, nil, "", "test")
bondResultB, err := bondProtoMol(ctx, s, attachB, spawnedMol, types.BondTypeSequential, nil, "", "test", false)
if err != nil {
t.Fatalf("Failed to bond attachB: %v", err)
}
@@ -1063,7 +1063,7 @@ func TestSpawnAttachTypes(t *testing.T) {
}
// Bond with specified type
bondResult, err := bondProtoMol(ctx, s, attachProto, spawnedMol, tt.bondType, nil, "", "test")
bondResult, err := bondProtoMol(ctx, s, attachProto, spawnedMol, tt.bondType, nil, "", "test", false)
if err != nil {
t.Fatalf("Failed to bond: %v", err)
}
@@ -1228,7 +1228,7 @@ func TestSpawnVariableAggregation(t *testing.T) {
// Bond attachment with same variables
spawnedMol, _ := s.GetIssue(ctx, spawnResult.NewEpicID)
bondResult, err := bondProtoMol(ctx, s, attachProto, spawnedMol, types.BondTypeSequential, vars, "", "test")
bondResult, err := bondProtoMol(ctx, s, attachProto, spawnedMol, types.BondTypeSequential, vars, "", "test", false)
if err != nil {
t.Fatalf("Failed to bond: %v", err)
}
@@ -2238,7 +2238,7 @@ func TestBondProtoMolWithRef(t *testing.T) {
// Bond proto to patrol with custom child ref
vars := map[string]string{"polecat_name": "ace"}
childRef := "arm-{{polecat_name}}"
result, err := bondProtoMol(ctx, s, protoRoot, patrol, types.BondTypeSequential, vars, childRef, "test")
result, err := bondProtoMol(ctx, s, protoRoot, patrol, types.BondTypeSequential, vars, childRef, "test", false)
if err != nil {
t.Fatalf("bondProtoMol failed: %v", err)
}
@@ -2309,14 +2309,14 @@ func TestBondProtoMolMultipleArms(t *testing.T) {
// Bond arm-ace
varsAce := map[string]string{"name": "ace"}
resultAce, err := bondProtoMol(ctx, s, proto, patrol, types.BondTypeParallel, varsAce, "arm-{{name}}", "test")
resultAce, err := bondProtoMol(ctx, s, proto, patrol, types.BondTypeParallel, varsAce, "arm-{{name}}", "test", false)
if err != nil {
t.Fatalf("bondProtoMol (ace) failed: %v", err)
}
// Bond arm-nux
varsNux := map[string]string{"name": "nux"}
resultNux, err := bondProtoMol(ctx, s, proto, patrol, types.BondTypeParallel, varsNux, "arm-{{name}}", "test")
resultNux, err := bondProtoMol(ctx, s, proto, patrol, types.BondTypeParallel, varsNux, "arm-{{name}}", "test", false)
if err != nil {
t.Fatalf("bondProtoMol (nux) failed: %v", err)
}

View File

@@ -200,7 +200,7 @@ func runPour(cmd *cobra.Command, args []string) {
}
for _, attach := range attachments {
bondResult, err := bondProtoMol(ctx, store, attach.issue, spawnedMol, attachType, vars, "", actor)
bondResult, err := bondProtoMol(ctx, store, attach.issue, spawnedMol, attachType, vars, "", actor, true)
if err != nil {
fmt.Fprintf(os.Stderr, "Error attaching %s: %v\n", attach.id, err)
os.Exit(1)

View File

@@ -26,14 +26,9 @@ Examples:
bd search "database" --label backend --limit 10
bd search --query "performance" --assignee alice
bd search "bd-5q" # Search by partial ID
bd search "security" --priority 1 # Exact priority match
bd search "security" --priority-min 0 --priority-max 2 # Priority range
bd search "security" --priority-min 0 --priority-max 2
bd search "bug" --created-after 2025-01-01
bd search "refactor" --updated-after 2025-01-01 --priority-min 1
bd search "bug" --desc-contains "authentication" # Search in description
bd search "" --empty-description # Issues without description
bd search "" --no-assignee # Unassigned issues
bd search "" --no-labels # Issues without labels
bd search "bug" --sort priority
bd search "task" --sort created --reverse`,
Run: func(cmd *cobra.Command, args []string) {
@@ -46,31 +41,9 @@ Examples:
query = queryFlag
}
// Check if any filter flags are set (allows empty query with filters)
hasFilters := cmd.Flags().Changed("status") ||
cmd.Flags().Changed("priority") ||
cmd.Flags().Changed("assignee") ||
cmd.Flags().Changed("type") ||
cmd.Flags().Changed("label") ||
cmd.Flags().Changed("label-any") ||
cmd.Flags().Changed("created-after") ||
cmd.Flags().Changed("created-before") ||
cmd.Flags().Changed("updated-after") ||
cmd.Flags().Changed("updated-before") ||
cmd.Flags().Changed("closed-after") ||
cmd.Flags().Changed("closed-before") ||
cmd.Flags().Changed("priority-min") ||
cmd.Flags().Changed("priority-max") ||
cmd.Flags().Changed("title-contains") ||
cmd.Flags().Changed("desc-contains") ||
cmd.Flags().Changed("notes-contains") ||
cmd.Flags().Changed("empty-description") ||
cmd.Flags().Changed("no-assignee") ||
cmd.Flags().Changed("no-labels")
// If no query and no filters provided, show help
if query == "" && !hasFilters {
fmt.Fprintf(os.Stderr, "Error: search query or filter is required\n")
// If no query provided, show help
if query == "" {
fmt.Fprintf(os.Stderr, "Error: search query is required\n")
if err := cmd.Help(); err != nil {
fmt.Fprintf(os.Stderr, "Error displaying help: %v\n", err)
}
@@ -88,11 +61,6 @@ Examples:
sortBy, _ := cmd.Flags().GetString("sort")
reverse, _ := cmd.Flags().GetBool("reverse")
// Pattern matching flags
titleContains, _ := cmd.Flags().GetString("title-contains")
descContains, _ := cmd.Flags().GetString("desc-contains")
notesContains, _ := cmd.Flags().GetString("notes-contains")
// Date range flags
createdAfter, _ := cmd.Flags().GetString("created-after")
createdBefore, _ := cmd.Flags().GetString("created-before")
@@ -101,11 +69,6 @@ Examples:
closedAfter, _ := cmd.Flags().GetString("closed-after")
closedBefore, _ := cmd.Flags().GetString("closed-before")
// Empty/null check flags
emptyDesc, _ := cmd.Flags().GetBool("empty-description")
noAssignee, _ := cmd.Flags().GetBool("no-assignee")
noLabels, _ := cmd.Flags().GetBool("no-labels")
// Priority range flags
priorityMinStr, _ := cmd.Flags().GetString("priority-min")
priorityMaxStr, _ := cmd.Flags().GetString("priority-max")
@@ -141,39 +104,6 @@ Examples:
filter.LabelsAny = labelsAny
}
// Exact priority match (use Changed() to properly handle P0)
if cmd.Flags().Changed("priority") {
priorityStr, _ := cmd.Flags().GetString("priority")
priority, err := validation.ValidatePriority(priorityStr)
if err != nil {
fmt.Fprintf(os.Stderr, "Error: %v\n", err)
os.Exit(1)
}
filter.Priority = &priority
}
// Pattern matching
if titleContains != "" {
filter.TitleContains = titleContains
}
if descContains != "" {
filter.DescriptionContains = descContains
}
if notesContains != "" {
filter.NotesContains = notesContains
}
// Empty/null checks
if emptyDesc {
filter.EmptyDescription = true
}
if noAssignee {
filter.NoAssignee = true
}
if noLabels {
filter.NoLabels = true
}
// Date ranges
if createdAfter != "" {
t, err := parseTimeFlag(createdAfter)
@@ -270,21 +200,6 @@ Examples:
listArgs.LabelsAny = labelsAny
}
// Exact priority match
if filter.Priority != nil {
listArgs.Priority = filter.Priority
}
// Pattern matching
listArgs.TitleContains = titleContains
listArgs.DescriptionContains = descContains
listArgs.NotesContains = notesContains
// Empty/null checks
listArgs.EmptyDescription = filter.EmptyDescription
listArgs.NoAssignee = filter.NoAssignee
listArgs.NoLabels = filter.NoLabels
// Date ranges
if filter.CreatedAfter != nil {
listArgs.CreatedAfter = filter.CreatedAfter.Format(time.RFC3339)
@@ -457,7 +372,6 @@ func outputSearchResults(issues []*types.Issue, query string, longFormat bool) {
func init() {
searchCmd.Flags().String("query", "", "Search query (alternative to positional argument)")
searchCmd.Flags().StringP("status", "s", "", "Filter by status (open, in_progress, blocked, deferred, closed)")
registerPriorityFlag(searchCmd, "")
searchCmd.Flags().StringP("assignee", "a", "", "Filter by assignee")
searchCmd.Flags().StringP("type", "t", "", "Filter by type (bug, feature, task, epic, chore, merge-request, molecule, gate)")
searchCmd.Flags().StringSliceP("label", "l", []string{}, "Filter by labels (AND: must have ALL)")
@@ -467,11 +381,6 @@ func init() {
searchCmd.Flags().String("sort", "", "Sort by field: priority, created, updated, closed, status, id, title, type, assignee")
searchCmd.Flags().BoolP("reverse", "r", false, "Reverse sort order")
// Pattern matching flags
searchCmd.Flags().String("title-contains", "", "Filter by title substring (case-insensitive)")
searchCmd.Flags().String("desc-contains", "", "Filter by description substring (case-insensitive)")
searchCmd.Flags().String("notes-contains", "", "Filter by notes substring (case-insensitive)")
// Date range flags
searchCmd.Flags().String("created-after", "", "Filter issues created after date (YYYY-MM-DD or RFC3339)")
searchCmd.Flags().String("created-before", "", "Filter issues created before date (YYYY-MM-DD or RFC3339)")
@@ -480,11 +389,6 @@ func init() {
searchCmd.Flags().String("closed-after", "", "Filter issues closed after date (YYYY-MM-DD or RFC3339)")
searchCmd.Flags().String("closed-before", "", "Filter issues closed before date (YYYY-MM-DD or RFC3339)")
// Empty/null check flags
searchCmd.Flags().Bool("empty-description", false, "Filter issues with empty or missing description")
searchCmd.Flags().Bool("no-assignee", false, "Filter issues with no assignee")
searchCmd.Flags().Bool("no-labels", false, "Filter issues with no labels")
// Priority range flags
searchCmd.Flags().String("priority-min", "", "Filter by minimum priority (inclusive, 0-4 or P0-P4)")
searchCmd.Flags().String("priority-max", "", "Filter by maximum priority (inclusive, 0-4 or P0-P4)")

View File

@@ -972,10 +972,6 @@ var closeCmd = &cobra.Command{
Run: func(cmd *cobra.Command, args []string) {
CheckReadonly("close")
reason, _ := cmd.Flags().GetString("reason")
// Check --resolution alias if --reason not provided
if reason == "" {
reason, _ = cmd.Flags().GetString("resolution")
}
if reason == "" {
reason = "Closed"
}
@@ -1057,8 +1053,6 @@ var closeCmd = &cobra.Command{
if hookRunner != nil {
hookRunner.Run(hooks.EventClose, &issue)
}
// Run config-based close hooks (bd-g4b4)
hooks.RunConfigCloseHooks(ctx, &issue)
if jsonOutput {
closedIssues = append(closedIssues, &issue)
}
@@ -1111,12 +1105,8 @@ var closeCmd = &cobra.Command{
// Run close hook (bd-kwro.8)
closedIssue, _ := store.GetIssue(ctx, id)
if closedIssue != nil {
if hookRunner != nil {
hookRunner.Run(hooks.EventClose, closedIssue)
}
// Run config-based close hooks (bd-g4b4)
hooks.RunConfigCloseHooks(ctx, closedIssue)
if closedIssue != nil && hookRunner != nil {
hookRunner.Run(hooks.EventClose, closedIssue)
}
if jsonOutput {
@@ -1421,8 +1411,6 @@ func init() {
rootCmd.AddCommand(editCmd)
closeCmd.Flags().StringP("reason", "r", "", "Reason for closing")
closeCmd.Flags().String("resolution", "", "Alias for --reason (Jira CLI convention)")
_ = closeCmd.Flags().MarkHidden("resolution") // Hidden alias for agent/CLI ergonomics
closeCmd.Flags().Bool("json", false, "Output JSON format")
closeCmd.Flags().BoolP("force", "f", false, "Force close pinned issues")
closeCmd.Flags().Bool("continue", false, "Auto-advance to next step in molecule")

File diff suppressed because it is too large Load Diff

285
cmd/bd/sync_branch.go Normal file
View File

@@ -0,0 +1,285 @@
package main
import (
"context"
"fmt"
"os/exec"
"path/filepath"
"strings"
"time"
"github.com/steveyegge/beads/internal/syncbranch"
)
// getCurrentBranch returns the name of the current git branch
// Uses symbolic-ref instead of rev-parse to work in fresh repos without commits (bd-flil)
func getCurrentBranch(ctx context.Context) (string, error) {
cmd := exec.CommandContext(ctx, "git", "symbolic-ref", "--short", "HEAD")
output, err := cmd.Output()
if err != nil {
return "", fmt.Errorf("failed to get current branch: %w", err)
}
return strings.TrimSpace(string(output)), nil
}
// getSyncBranch returns the configured sync branch name
func getSyncBranch(ctx context.Context) (string, error) {
// Ensure store is initialized
if err := ensureStoreActive(); err != nil {
return "", fmt.Errorf("failed to initialize store: %w", err)
}
syncBranch, err := syncbranch.Get(ctx, store)
if err != nil {
return "", fmt.Errorf("failed to get sync branch config: %w", err)
}
if syncBranch == "" {
return "", fmt.Errorf("sync.branch not configured (run 'bd config set sync.branch <branch-name>')")
}
return syncBranch, nil
}
// showSyncStatus shows the diff between sync branch and main branch
func showSyncStatus(ctx context.Context) error {
if !isGitRepo() {
return fmt.Errorf("not in a git repository")
}
currentBranch, err := getCurrentBranch(ctx)
if err != nil {
return err
}
syncBranch, err := getSyncBranch(ctx)
if err != nil {
return err
}
// Check if sync branch exists
checkCmd := exec.CommandContext(ctx, "git", "show-ref", "--verify", "--quiet", "refs/heads/"+syncBranch)
if err := checkCmd.Run(); err != nil {
return fmt.Errorf("sync branch '%s' does not exist", syncBranch)
}
fmt.Printf("Current branch: %s\n", currentBranch)
fmt.Printf("Sync branch: %s\n\n", syncBranch)
// Show commit diff
fmt.Println("Commits in sync branch not in main:")
logCmd := exec.CommandContext(ctx, "git", "log", "--oneline", currentBranch+".."+syncBranch)
logOutput, err := logCmd.CombinedOutput()
if err != nil {
return fmt.Errorf("failed to get commit log: %w\n%s", err, logOutput)
}
if len(strings.TrimSpace(string(logOutput))) == 0 {
fmt.Println(" (none)")
} else {
fmt.Print(string(logOutput))
}
fmt.Println("\nCommits in main not in sync branch:")
logCmd = exec.CommandContext(ctx, "git", "log", "--oneline", syncBranch+".."+currentBranch)
logOutput, err = logCmd.CombinedOutput()
if err != nil {
return fmt.Errorf("failed to get commit log: %w\n%s", err, logOutput)
}
if len(strings.TrimSpace(string(logOutput))) == 0 {
fmt.Println(" (none)")
} else {
fmt.Print(string(logOutput))
}
// Show file diff for .beads/issues.jsonl
fmt.Println("\nFile differences in .beads/issues.jsonl:")
diffCmd := exec.CommandContext(ctx, "git", "diff", currentBranch+"..."+syncBranch, "--", ".beads/issues.jsonl")
diffOutput, err := diffCmd.CombinedOutput()
if err != nil {
// diff returns non-zero when there are differences, which is fine
if len(diffOutput) == 0 {
return fmt.Errorf("failed to get diff: %w", err)
}
}
if len(strings.TrimSpace(string(diffOutput))) == 0 {
fmt.Println(" (no differences)")
} else {
fmt.Print(string(diffOutput))
}
return nil
}
// mergeSyncBranch merges the sync branch back to the main branch
func mergeSyncBranch(ctx context.Context, dryRun bool) error {
if !isGitRepo() {
return fmt.Errorf("not in a git repository")
}
currentBranch, err := getCurrentBranch(ctx)
if err != nil {
return err
}
syncBranch, err := getSyncBranch(ctx)
if err != nil {
return err
}
// Check if sync branch exists
checkCmd := exec.CommandContext(ctx, "git", "show-ref", "--verify", "--quiet", "refs/heads/"+syncBranch)
if err := checkCmd.Run(); err != nil {
return fmt.Errorf("sync branch '%s' does not exist", syncBranch)
}
// Check if there are uncommitted changes
statusCmd := exec.CommandContext(ctx, "git", "status", "--porcelain")
statusOutput, err := statusCmd.Output()
if err != nil {
return fmt.Errorf("failed to check git status: %w", err)
}
if len(strings.TrimSpace(string(statusOutput))) > 0 {
return fmt.Errorf("uncommitted changes detected - commit or stash them first")
}
fmt.Printf("Merging sync branch '%s' into '%s'...\n", syncBranch, currentBranch)
if dryRun {
fmt.Println("→ [DRY RUN] Would merge sync branch")
// Show what would be merged
logCmd := exec.CommandContext(ctx, "git", "log", "--oneline", currentBranch+".."+syncBranch)
logOutput, _ := logCmd.CombinedOutput()
if len(strings.TrimSpace(string(logOutput))) > 0 {
fmt.Println("\nCommits that would be merged:")
fmt.Print(string(logOutput))
} else {
fmt.Println("No commits to merge")
}
return nil
}
// Perform the merge
mergeCmd := exec.CommandContext(ctx, "git", "merge", syncBranch, "-m", fmt.Sprintf("Merge sync branch '%s'", syncBranch))
mergeOutput, err := mergeCmd.CombinedOutput()
if err != nil {
return fmt.Errorf("merge failed: %w\n%s", err, mergeOutput)
}
fmt.Print(string(mergeOutput))
fmt.Println("\n✓ Merge complete")
// Suggest next steps
fmt.Println("\nNext steps:")
fmt.Println("1. Review the merged changes")
fmt.Println("2. Run 'bd sync --import-only' to sync the database with merged JSONL")
fmt.Println("3. Run 'bd sync' to push changes to remote")
return nil
}
// isExternalBeadsDir checks if the beads directory is in a different git repo than cwd.
// This is used to detect when BEADS_DIR points to a separate repository.
// Contributed by dand-oss (https://github.com/steveyegge/beads/pull/533)
func isExternalBeadsDir(ctx context.Context, beadsDir string) bool {
// Get repo root of cwd
cwdRepoRoot, err := syncbranch.GetRepoRoot(ctx)
if err != nil {
return false // Can't determine, assume local
}
// Get repo root of beads dir
beadsRepoRoot, err := getRepoRootFromPath(ctx, beadsDir)
if err != nil {
return false // Can't determine, assume local
}
return cwdRepoRoot != beadsRepoRoot
}
// getRepoRootFromPath returns the git repository root for a given path.
// Unlike syncbranch.GetRepoRoot which uses cwd, this allows getting the repo root
// for any path.
// Contributed by dand-oss (https://github.com/steveyegge/beads/pull/533)
func getRepoRootFromPath(ctx context.Context, path string) (string, error) {
cmd := exec.CommandContext(ctx, "git", "-C", path, "rev-parse", "--show-toplevel")
output, err := cmd.Output()
if err != nil {
return "", fmt.Errorf("failed to get git root for %s: %w", path, err)
}
return strings.TrimSpace(string(output)), nil
}
// commitToExternalBeadsRepo commits changes directly to an external beads repo.
// Used when BEADS_DIR points to a different git repository than cwd.
// This bypasses the worktree-based sync which fails when beads dir is external.
// Contributed by dand-oss (https://github.com/steveyegge/beads/pull/533)
func commitToExternalBeadsRepo(ctx context.Context, beadsDir, message string, push bool) (bool, error) {
repoRoot, err := getRepoRootFromPath(ctx, beadsDir)
if err != nil {
return false, fmt.Errorf("failed to get repo root: %w", err)
}
// Stage beads files (use relative path from repo root)
relBeadsDir, err := filepath.Rel(repoRoot, beadsDir)
if err != nil {
relBeadsDir = beadsDir // Fallback to absolute path
}
addCmd := exec.CommandContext(ctx, "git", "-C", repoRoot, "add", relBeadsDir)
if output, err := addCmd.CombinedOutput(); err != nil {
return false, fmt.Errorf("git add failed: %w\n%s", err, output)
}
// Check if there are staged changes
diffCmd := exec.CommandContext(ctx, "git", "-C", repoRoot, "diff", "--cached", "--quiet")
if diffCmd.Run() == nil {
return false, nil // No changes to commit
}
// Commit with config-based author and signing options
if message == "" {
message = fmt.Sprintf("bd sync: %s", time.Now().Format("2006-01-02 15:04:05"))
}
commitArgs := buildGitCommitArgs(repoRoot, message)
commitCmd := exec.CommandContext(ctx, "git", commitArgs...)
if output, err := commitCmd.CombinedOutput(); err != nil {
return false, fmt.Errorf("git commit failed: %w\n%s", err, output)
}
// Push if requested
if push {
pushCmd := exec.CommandContext(ctx, "git", "-C", repoRoot, "push")
if pushOutput, err := runGitCmdWithTimeoutMsg(ctx, pushCmd, "git push", 5*time.Second); err != nil {
return true, fmt.Errorf("git push failed: %w\n%s", err, pushOutput)
}
}
return true, nil
}
// pullFromExternalBeadsRepo pulls changes in an external beads repo.
// Used when BEADS_DIR points to a different git repository than cwd.
// Contributed by dand-oss (https://github.com/steveyegge/beads/pull/533)
func pullFromExternalBeadsRepo(ctx context.Context, beadsDir string) error {
repoRoot, err := getRepoRootFromPath(ctx, beadsDir)
if err != nil {
return fmt.Errorf("failed to get repo root: %w", err)
}
// Check if remote exists
remoteCmd := exec.CommandContext(ctx, "git", "-C", repoRoot, "remote")
remoteOutput, err := remoteCmd.Output()
if err != nil || len(strings.TrimSpace(string(remoteOutput))) == 0 {
return nil // No remote, skip pull
}
pullCmd := exec.CommandContext(ctx, "git", "-C", repoRoot, "pull")
if output, err := pullCmd.CombinedOutput(); err != nil {
return fmt.Errorf("git pull failed: %w\n%s", err, output)
}
return nil
}

395
cmd/bd/sync_check.go Normal file
View File

@@ -0,0 +1,395 @@
package main
import (
"bufio"
"bytes"
"context"
"encoding/json"
"fmt"
"os"
"os/exec"
"strings"
"time"
"github.com/steveyegge/beads/internal/syncbranch"
"github.com/steveyegge/beads/internal/types"
)
// SyncIntegrityResult contains the results of a pre-sync integrity check.
// bd-hlsw.1: Pre-sync integrity check
type SyncIntegrityResult struct {
ForcedPush *ForcedPushCheck `json:"forced_push,omitempty"`
PrefixMismatch *PrefixMismatch `json:"prefix_mismatch,omitempty"`
OrphanedChildren *OrphanedChildren `json:"orphaned_children,omitempty"`
HasProblems bool `json:"has_problems"`
}
// ForcedPushCheck detects if sync branch has diverged from remote.
type ForcedPushCheck struct {
Detected bool `json:"detected"`
LocalRef string `json:"local_ref,omitempty"`
RemoteRef string `json:"remote_ref,omitempty"`
Message string `json:"message"`
}
// PrefixMismatch detects issues with wrong prefix in JSONL.
type PrefixMismatch struct {
ConfiguredPrefix string `json:"configured_prefix"`
MismatchedIDs []string `json:"mismatched_ids,omitempty"`
Count int `json:"count"`
}
// OrphanedChildren detects issues with parent that doesn't exist.
type OrphanedChildren struct {
OrphanedIDs []string `json:"orphaned_ids,omitempty"`
Count int `json:"count"`
}
// showSyncIntegrityCheck performs pre-sync integrity checks without modifying state.
// bd-hlsw.1: Detects forced pushes, prefix mismatches, and orphaned children.
// Exits with code 1 if problems are detected.
func showSyncIntegrityCheck(ctx context.Context, jsonlPath string) {
fmt.Println("Sync Integrity Check")
fmt.Println("====================")
result := &SyncIntegrityResult{}
// Check 1: Detect forced pushes on sync branch
forcedPush := checkForcedPush(ctx)
result.ForcedPush = forcedPush
if forcedPush.Detected {
result.HasProblems = true
}
printForcedPushResult(forcedPush)
// Check 2: Detect prefix mismatches in JSONL
prefixMismatch, err := checkPrefixMismatch(ctx, jsonlPath)
if err != nil {
fmt.Fprintf(os.Stderr, "Warning: prefix check failed: %v\n", err)
} else {
result.PrefixMismatch = prefixMismatch
if prefixMismatch != nil && prefixMismatch.Count > 0 {
result.HasProblems = true
}
printPrefixMismatchResult(prefixMismatch)
}
// Check 3: Detect orphaned children (parent issues that don't exist)
orphaned, err := checkOrphanedChildrenInJSONL(jsonlPath)
if err != nil {
fmt.Fprintf(os.Stderr, "Warning: orphaned check failed: %v\n", err)
} else {
result.OrphanedChildren = orphaned
if orphaned != nil && orphaned.Count > 0 {
result.HasProblems = true
}
printOrphanedChildrenResult(orphaned)
}
// Summary
fmt.Println("\nSummary")
fmt.Println("-------")
if result.HasProblems {
fmt.Println("Problems detected! Review above and consider:")
if result.ForcedPush != nil && result.ForcedPush.Detected {
fmt.Println(" - Force push: Reset local sync branch or use 'bd sync --from-main'")
}
if result.PrefixMismatch != nil && result.PrefixMismatch.Count > 0 {
fmt.Println(" - Prefix mismatch: Use 'bd import --rename-on-import' to fix")
}
if result.OrphanedChildren != nil && result.OrphanedChildren.Count > 0 {
fmt.Println(" - Orphaned children: Remove parent references or create missing parents")
}
os.Exit(1)
} else {
fmt.Println("No problems detected. Safe to sync.")
}
if jsonOutput {
data, _ := json.MarshalIndent(result, "", " ")
fmt.Println(string(data))
}
}
// checkForcedPush detects if the sync branch has diverged from remote.
// This can happen when someone force-pushes to the sync branch.
func checkForcedPush(ctx context.Context) *ForcedPushCheck {
result := &ForcedPushCheck{
Detected: false,
Message: "No sync branch configured or no remote",
}
// Get sync branch name
if err := ensureStoreActive(); err != nil {
return result
}
syncBranch, _ := syncbranch.Get(ctx, store)
if syncBranch == "" {
return result
}
// Check if sync branch exists locally
checkLocalCmd := exec.CommandContext(ctx, "git", "show-ref", "--verify", "--quiet", "refs/heads/"+syncBranch)
if checkLocalCmd.Run() != nil {
result.Message = fmt.Sprintf("Sync branch '%s' does not exist locally", syncBranch)
return result
}
// Get local ref
localRefCmd := exec.CommandContext(ctx, "git", "rev-parse", syncBranch)
localRefOutput, err := localRefCmd.Output()
if err != nil {
result.Message = "Failed to get local sync branch ref"
return result
}
localRef := strings.TrimSpace(string(localRefOutput))
result.LocalRef = localRef
// Check if remote tracking branch exists
remote := "origin"
if configuredRemote, err := store.GetConfig(ctx, "sync.remote"); err == nil && configuredRemote != "" {
remote = configuredRemote
}
// Get remote ref
remoteRefCmd := exec.CommandContext(ctx, "git", "rev-parse", remote+"/"+syncBranch)
remoteRefOutput, err := remoteRefCmd.Output()
if err != nil {
result.Message = fmt.Sprintf("Remote tracking branch '%s/%s' does not exist", remote, syncBranch)
return result
}
remoteRef := strings.TrimSpace(string(remoteRefOutput))
result.RemoteRef = remoteRef
// If refs match, no divergence
if localRef == remoteRef {
result.Message = "Sync branch is in sync with remote"
return result
}
// Check if local is ahead of remote (normal case)
aheadCmd := exec.CommandContext(ctx, "git", "merge-base", "--is-ancestor", remoteRef, localRef)
if aheadCmd.Run() == nil {
result.Message = "Local sync branch is ahead of remote (normal)"
return result
}
// Check if remote is ahead of local (behind, needs pull)
behindCmd := exec.CommandContext(ctx, "git", "merge-base", "--is-ancestor", localRef, remoteRef)
if behindCmd.Run() == nil {
result.Message = "Local sync branch is behind remote (needs pull)"
return result
}
// If neither is ancestor, branches have diverged - likely a force push
result.Detected = true
result.Message = fmt.Sprintf("Sync branch has DIVERGED from remote! Local: %s, Remote: %s. This may indicate a force push on the remote.", localRef[:8], remoteRef[:8])
return result
}
func printForcedPushResult(fp *ForcedPushCheck) {
fmt.Println("1. Force Push Detection")
if fp.Detected {
fmt.Printf(" [PROBLEM] %s\n", fp.Message)
} else {
fmt.Printf(" [OK] %s\n", fp.Message)
}
fmt.Println()
}
// checkPrefixMismatch detects issues in JSONL that don't match the configured prefix.
func checkPrefixMismatch(ctx context.Context, jsonlPath string) (*PrefixMismatch, error) {
result := &PrefixMismatch{
MismatchedIDs: []string{},
}
// Get configured prefix
if err := ensureStoreActive(); err != nil {
return nil, err
}
prefix, err := store.GetConfig(ctx, "issue_prefix")
if err != nil || prefix == "" {
prefix = "bd" // Default
}
result.ConfiguredPrefix = prefix
// Read JSONL and check each issue's prefix
f, err := os.Open(jsonlPath) // #nosec G304 - controlled path
if err != nil {
if os.IsNotExist(err) {
return result, nil // No JSONL, no mismatches
}
return nil, fmt.Errorf("failed to open JSONL: %w", err)
}
defer f.Close()
scanner := bufio.NewScanner(f)
scanner.Buffer(make([]byte, 0, 64*1024), 10*1024*1024)
for scanner.Scan() {
line := scanner.Bytes()
if len(bytes.TrimSpace(line)) == 0 {
continue
}
var issue struct {
ID string `json:"id"`
}
if err := json.Unmarshal(line, &issue); err != nil {
continue // Skip malformed lines
}
// Check if ID starts with configured prefix
if !strings.HasPrefix(issue.ID, prefix+"-") {
result.MismatchedIDs = append(result.MismatchedIDs, issue.ID)
}
}
if err := scanner.Err(); err != nil {
return nil, fmt.Errorf("failed to read JSONL: %w", err)
}
result.Count = len(result.MismatchedIDs)
return result, nil
}
func printPrefixMismatchResult(pm *PrefixMismatch) {
fmt.Println("2. Prefix Mismatch Check")
if pm == nil {
fmt.Println(" [SKIP] Could not check prefix")
fmt.Println()
return
}
fmt.Printf(" Configured prefix: %s\n", pm.ConfiguredPrefix)
if pm.Count > 0 {
fmt.Printf(" [PROBLEM] Found %d issue(s) with wrong prefix:\n", pm.Count)
// Show first 10
limit := pm.Count
if limit > 10 {
limit = 10
}
for i := 0; i < limit; i++ {
fmt.Printf(" - %s\n", pm.MismatchedIDs[i])
}
if pm.Count > 10 {
fmt.Printf(" ... and %d more\n", pm.Count-10)
}
} else {
fmt.Println(" [OK] All issues have correct prefix")
}
fmt.Println()
}
// checkOrphanedChildrenInJSONL detects issues with parent references to non-existent issues.
func checkOrphanedChildrenInJSONL(jsonlPath string) (*OrphanedChildren, error) {
result := &OrphanedChildren{
OrphanedIDs: []string{},
}
// Read JSONL and build maps of IDs and parent references
f, err := os.Open(jsonlPath) // #nosec G304 - controlled path
if err != nil {
if os.IsNotExist(err) {
return result, nil
}
return nil, fmt.Errorf("failed to open JSONL: %w", err)
}
defer f.Close()
existingIDs := make(map[string]bool)
parentRefs := make(map[string]string) // child ID -> parent ID
scanner := bufio.NewScanner(f)
scanner.Buffer(make([]byte, 0, 64*1024), 10*1024*1024)
for scanner.Scan() {
line := scanner.Bytes()
if len(bytes.TrimSpace(line)) == 0 {
continue
}
var issue struct {
ID string `json:"id"`
Parent string `json:"parent,omitempty"`
Status string `json:"status"`
}
if err := json.Unmarshal(line, &issue); err != nil {
continue
}
// Skip tombstones
if issue.Status == string(types.StatusTombstone) {
continue
}
existingIDs[issue.ID] = true
if issue.Parent != "" {
parentRefs[issue.ID] = issue.Parent
}
}
if err := scanner.Err(); err != nil {
return nil, fmt.Errorf("failed to read JSONL: %w", err)
}
// Find orphaned children (parent doesn't exist)
for childID, parentID := range parentRefs {
if !existingIDs[parentID] {
result.OrphanedIDs = append(result.OrphanedIDs, fmt.Sprintf("%s (parent: %s)", childID, parentID))
}
}
result.Count = len(result.OrphanedIDs)
return result, nil
}
// runGitCmdWithTimeoutMsg runs a git command and prints a helpful message if it takes too long.
// This helps when git operations hang waiting for credential/browser auth.
func runGitCmdWithTimeoutMsg(ctx context.Context, cmd *exec.Cmd, cmdName string, timeoutDelay time.Duration) ([]byte, error) {
// Use done channel to cleanly exit goroutine when command completes
done := make(chan struct{})
go func() {
select {
case <-time.After(timeoutDelay):
fmt.Fprintf(os.Stderr, "⏳ %s is taking longer than expected (possibly waiting for authentication). If this hangs, check for a browser auth prompt or run 'git status' in another terminal.\n", cmdName)
case <-done:
// Command completed, exit cleanly
case <-ctx.Done():
// Context canceled, don't print message
}
}()
output, err := cmd.CombinedOutput()
close(done)
return output, err
}
func printOrphanedChildrenResult(oc *OrphanedChildren) {
fmt.Println("3. Orphaned Children Check")
if oc == nil {
fmt.Println(" [SKIP] Could not check orphaned children")
fmt.Println()
return
}
if oc.Count > 0 {
fmt.Printf(" [PROBLEM] Found %d issue(s) with missing parent:\n", oc.Count)
limit := oc.Count
if limit > 10 {
limit = 10
}
for i := 0; i < limit; i++ {
fmt.Printf(" - %s\n", oc.OrphanedIDs[i])
}
if oc.Count > 10 {
fmt.Printf(" ... and %d more\n", oc.Count-10)
}
} else {
fmt.Println(" [OK] No orphaned children found")
}
fmt.Println()
}

170
cmd/bd/sync_export.go Normal file
View File

@@ -0,0 +1,170 @@
package main
import (
"cmp"
"context"
"encoding/json"
"fmt"
"os"
"path/filepath"
"slices"
"time"
"github.com/steveyegge/beads/internal/rpc"
"github.com/steveyegge/beads/internal/types"
)
// exportToJSONL exports the database to JSONL format
func exportToJSONL(ctx context.Context, jsonlPath string) error {
// If daemon is running, use RPC
if daemonClient != nil {
exportArgs := &rpc.ExportArgs{
JSONLPath: jsonlPath,
}
resp, err := daemonClient.Export(exportArgs)
if err != nil {
return fmt.Errorf("daemon export failed: %w", err)
}
if !resp.Success {
return fmt.Errorf("daemon export error: %s", resp.Error)
}
return nil
}
// Direct mode: access store directly
// Ensure store is initialized
if err := ensureStoreActive(); err != nil {
return fmt.Errorf("failed to initialize store: %w", err)
}
// Get all issues including tombstones for sync propagation (bd-rp4o fix)
// Tombstones must be exported so they propagate to other clones and prevent resurrection
issues, err := store.SearchIssues(ctx, "", types.IssueFilter{IncludeTombstones: true})
if err != nil {
return fmt.Errorf("failed to get issues: %w", err)
}
// Safety check: prevent exporting empty database over non-empty JSONL
// Note: The main bd-53c protection is the reverse ZFC check earlier in sync.go
// which runs BEFORE export. Here we only block the most catastrophic case (empty DB)
// to allow legitimate deletions.
if len(issues) == 0 {
existingCount, countErr := countIssuesInJSONL(jsonlPath)
if countErr != nil {
// If we can't read the file, it might not exist yet, which is fine
if !os.IsNotExist(countErr) {
fmt.Fprintf(os.Stderr, "Warning: failed to read existing JSONL: %v\n", countErr)
}
} else if existingCount > 0 {
return fmt.Errorf("refusing to export empty database over non-empty JSONL file (database: 0 issues, JSONL: %d issues)", existingCount)
}
}
// Sort by ID for consistent output
slices.SortFunc(issues, func(a, b *types.Issue) int {
return cmp.Compare(a.ID, b.ID)
})
// Populate dependencies for all issues (avoid N+1)
allDeps, err := store.GetAllDependencyRecords(ctx)
if err != nil {
return fmt.Errorf("failed to get dependencies: %w", err)
}
for _, issue := range issues {
issue.Dependencies = allDeps[issue.ID]
}
// Populate labels for all issues
for _, issue := range issues {
labels, err := store.GetLabels(ctx, issue.ID)
if err != nil {
return fmt.Errorf("failed to get labels for %s: %w", issue.ID, err)
}
issue.Labels = labels
}
// Populate comments for all issues
for _, issue := range issues {
comments, err := store.GetIssueComments(ctx, issue.ID)
if err != nil {
return fmt.Errorf("failed to get comments for %s: %w", issue.ID, err)
}
issue.Comments = comments
}
// Create temp file for atomic write
dir := filepath.Dir(jsonlPath)
base := filepath.Base(jsonlPath)
tempFile, err := os.CreateTemp(dir, base+".tmp.*")
if err != nil {
return fmt.Errorf("failed to create temp file: %w", err)
}
tempPath := tempFile.Name()
defer func() {
_ = tempFile.Close()
_ = os.Remove(tempPath)
}()
// Write JSONL
encoder := json.NewEncoder(tempFile)
exportedIDs := make([]string, 0, len(issues))
for _, issue := range issues {
if err := encoder.Encode(issue); err != nil {
return fmt.Errorf("failed to encode issue %s: %w", issue.ID, err)
}
exportedIDs = append(exportedIDs, issue.ID)
}
// Close temp file before rename (error checked implicitly by Rename success)
_ = tempFile.Close()
// Atomic replace
if err := os.Rename(tempPath, jsonlPath); err != nil {
return fmt.Errorf("failed to replace JSONL file: %w", err)
}
// Set appropriate file permissions (0600: rw-------)
if err := os.Chmod(jsonlPath, 0600); err != nil {
// Non-fatal warning
fmt.Fprintf(os.Stderr, "Warning: failed to set file permissions: %v\n", err)
}
// Clear dirty flags for exported issues
if err := store.ClearDirtyIssuesByID(ctx, exportedIDs); err != nil {
// Non-fatal warning
fmt.Fprintf(os.Stderr, "Warning: failed to clear dirty flags: %v\n", err)
}
// Clear auto-flush state
clearAutoFlushState()
// Update jsonl_content_hash metadata to enable content-based staleness detection (bd-khnb fix)
// After export, database and JSONL are in sync, so update hash to prevent unnecessary auto-import
// Renamed from last_import_hash (bd-39o) - more accurate since updated on both import AND export
if currentHash, err := computeJSONLHash(jsonlPath); err == nil {
if err := store.SetMetadata(ctx, "jsonl_content_hash", currentHash); err != nil {
// Non-fatal warning: Metadata update failures are intentionally non-fatal to prevent blocking
// successful exports. System degrades gracefully to mtime-based staleness detection if metadata
// is unavailable. This ensures export operations always succeed even if metadata storage fails.
fmt.Fprintf(os.Stderr, "Warning: failed to update jsonl_content_hash: %v\n", err)
}
// Use RFC3339Nano for nanosecond precision to avoid race with file mtime (fixes #399)
exportTime := time.Now().Format(time.RFC3339Nano)
if err := store.SetMetadata(ctx, "last_import_time", exportTime); err != nil {
// Non-fatal warning (see above comment about graceful degradation)
fmt.Fprintf(os.Stderr, "Warning: failed to update last_import_time: %v\n", err)
}
// Note: mtime tracking removed in bd-v0y fix (git doesn't preserve mtime)
}
// Update database mtime to be >= JSONL mtime (fixes #278, #301, #321)
// This prevents validatePreExport from incorrectly blocking on next export
beadsDir := filepath.Dir(jsonlPath)
dbPath := filepath.Join(beadsDir, "beads.db")
if err := TouchDatabaseFile(dbPath, jsonlPath); err != nil {
// Non-fatal warning
fmt.Fprintf(os.Stderr, "Warning: failed to update database mtime: %v\n", err)
}
return nil
}

132
cmd/bd/sync_import.go Normal file
View File

@@ -0,0 +1,132 @@
package main
import (
"context"
"fmt"
"os"
"os/exec"
)
// importFromJSONL imports the JSONL file by running the import command
// Optional parameters: noGitHistory, protectLeftSnapshot (bd-sync-deletion fix)
func importFromJSONL(ctx context.Context, jsonlPath string, renameOnImport bool, opts ...bool) error {
// Get current executable path to avoid "./bd" path issues
exe, err := os.Executable()
if err != nil {
return fmt.Errorf("cannot resolve current executable: %w", err)
}
// Parse optional parameters
noGitHistory := false
protectLeftSnapshot := false
if len(opts) > 0 {
noGitHistory = opts[0]
}
if len(opts) > 1 {
protectLeftSnapshot = opts[1]
}
// Build args for import command
// Use --no-daemon to ensure subprocess uses direct mode, avoiding daemon connection issues
args := []string{"--no-daemon", "import", "-i", jsonlPath}
if renameOnImport {
args = append(args, "--rename-on-import")
}
if noGitHistory {
args = append(args, "--no-git-history")
}
// Add --protect-left-snapshot flag for post-pull imports (bd-sync-deletion fix)
if protectLeftSnapshot {
args = append(args, "--protect-left-snapshot")
}
// Run import command
cmd := exec.CommandContext(ctx, exe, args...) // #nosec G204 - bd import command from trusted binary
output, err := cmd.CombinedOutput()
if err != nil {
return fmt.Errorf("import failed: %w\n%s", err, output)
}
// Show output (import command provides the summary)
if len(output) > 0 {
fmt.Print(string(output))
}
return nil
}
// resolveNoGitHistoryForFromMain returns the resolved noGitHistory value for sync operations.
// When syncing from main (--from-main), noGitHistory is forced to true to prevent creating
// incorrect deletion records for locally-created beads that don't exist on main.
// See: https://github.com/steveyegge/beads/issues/417
func resolveNoGitHistoryForFromMain(fromMain, noGitHistory bool) bool {
if fromMain {
return true
}
return noGitHistory
}
// doSyncFromMain performs a one-way sync from the default branch (main/master)
// Used for ephemeral branches without upstream tracking (gt-ick9)
// This fetches beads from main and imports them, discarding local beads changes.
// If sync.remote is configured (e.g., "upstream" for fork workflows), uses that remote
// instead of "origin" (bd-bx9).
func doSyncFromMain(ctx context.Context, jsonlPath string, renameOnImport bool, dryRun bool, noGitHistory bool) error {
// Determine which remote to use (default: origin, but can be configured via sync.remote)
remote := "origin"
if err := ensureStoreActive(); err == nil && store != nil {
if configuredRemote, err := store.GetConfig(ctx, "sync.remote"); err == nil && configuredRemote != "" {
remote = configuredRemote
}
}
if dryRun {
fmt.Println("→ [DRY RUN] Would sync beads from main branch")
fmt.Printf(" 1. Fetch %s main\n", remote)
fmt.Printf(" 2. Checkout .beads/ from %s/main\n", remote)
fmt.Println(" 3. Import JSONL into database")
fmt.Println("\n✓ Dry run complete (no changes made)")
return nil
}
// Check if we're in a git repository
if !isGitRepo() {
return fmt.Errorf("not in a git repository")
}
// Check if remote exists
if !hasGitRemote(ctx) {
return fmt.Errorf("no git remote configured")
}
// Verify the configured remote exists
checkRemoteCmd := exec.CommandContext(ctx, "git", "remote", "get-url", remote)
if err := checkRemoteCmd.Run(); err != nil {
return fmt.Errorf("configured sync.remote '%s' does not exist (run 'git remote add %s <url>')", remote, remote)
}
defaultBranch := getDefaultBranchForRemote(ctx, remote)
// Step 1: Fetch from main
fmt.Printf("→ Fetching from %s/%s...\n", remote, defaultBranch)
fetchCmd := exec.CommandContext(ctx, "git", "fetch", remote, defaultBranch)
if output, err := fetchCmd.CombinedOutput(); err != nil {
return fmt.Errorf("git fetch %s %s failed: %w\n%s", remote, defaultBranch, err, output)
}
// Step 2: Checkout .beads/ directory from main
fmt.Printf("→ Checking out beads from %s/%s...\n", remote, defaultBranch)
checkoutCmd := exec.CommandContext(ctx, "git", "checkout", fmt.Sprintf("%s/%s", remote, defaultBranch), "--", ".beads/")
if output, err := checkoutCmd.CombinedOutput(); err != nil {
return fmt.Errorf("git checkout .beads/ from %s/%s failed: %w\n%s", remote, defaultBranch, err, output)
}
// Step 3: Import JSONL
fmt.Println("→ Importing JSONL...")
if err := importFromJSONL(ctx, jsonlPath, renameOnImport, noGitHistory); err != nil {
return fmt.Errorf("import failed: %w", err)
}
fmt.Println("\n✓ Sync from main complete")
return nil
}

View File

@@ -1,16 +0,0 @@
# Test bd close --resolution alias (GH#721)
# Jira CLI convention: --resolution instead of --reason
bd init --prefix test
# Create issue
bd create 'Issue to close with resolution'
cp stdout issue.txt
exec sh -c 'grep -oE "test-[a-z0-9]+" issue.txt > issue_id.txt'
# Close using --resolution alias
exec sh -c 'bd close $(cat issue_id.txt) --resolution "Fixed via resolution alias"'
stdout 'Closed test-'
# Verify close_reason is set correctly
exec sh -c 'bd show $(cat issue_id.txt) --json'
stdout 'Fixed via resolution alias'

View File

@@ -1,484 +0,0 @@
package main
import (
"bytes"
"fmt"
"os"
"testing"
"time"
"github.com/steveyegge/beads/internal/rpc"
"github.com/steveyegge/beads/internal/types"
)
func TestPluralize(t *testing.T) {
tests := []struct {
count int
expected string
}{
{0, "s"},
{1, ""},
{2, "s"},
{10, "s"},
{100, "s"},
{-1, "s"}, // Edge case
}
for _, tt := range tests {
t.Run(fmt.Sprintf("count=%d", tt.count), func(t *testing.T) {
result := pluralize(tt.count)
if result != tt.expected {
t.Errorf("pluralize(%d) = %q, want %q", tt.count, result, tt.expected)
}
})
}
}
func TestFormatTimeAgo(t *testing.T) {
now := time.Now()
tests := []struct {
name string
time time.Time
expected string
}{
{
name: "just now",
time: now.Add(-30 * time.Second),
expected: "just now",
},
{
name: "1 min ago",
time: now.Add(-1 * time.Minute),
expected: "1 min ago",
},
{
name: "multiple minutes ago",
time: now.Add(-5 * time.Minute),
expected: "5 mins ago",
},
{
name: "1 hour ago",
time: now.Add(-1 * time.Hour),
expected: "1 hour ago",
},
{
name: "multiple hours ago",
time: now.Add(-3 * time.Hour),
expected: "3 hours ago",
},
{
name: "1 day ago",
time: now.Add(-24 * time.Hour),
expected: "1 day ago",
},
{
name: "multiple days ago",
time: now.Add(-3 * 24 * time.Hour),
expected: "3 days ago",
},
{
name: "more than a week ago",
time: now.Add(-10 * 24 * time.Hour),
expected: now.Add(-10 * 24 * time.Hour).Format("2006-01-02"),
},
}
for _, tt := range tests {
t.Run(tt.name, func(t *testing.T) {
result := formatTimeAgo(tt.time)
if result != tt.expected {
t.Errorf("formatTimeAgo() = %q, want %q", result, tt.expected)
}
})
}
}
func TestPrintEvent(t *testing.T) {
// Capture stdout
old := os.Stdout
r, w, _ := os.Pipe()
os.Stdout = w
event := rpc.MutationEvent{
Type: rpc.MutationCreate,
IssueID: "bd-test123",
Timestamp: time.Date(2025, 1, 15, 14, 30, 0, 0, time.UTC),
}
printEvent(event)
w.Close()
os.Stdout = old
var buf bytes.Buffer
buf.ReadFrom(r)
output := buf.String()
// Check that output contains expected elements
if len(output) == 0 {
t.Error("printEvent produced no output")
}
if !containsSubstring(output, "bd-test123") {
t.Errorf("printEvent output missing issue ID, got: %s", output)
}
if !containsSubstring(output, "created") {
t.Errorf("printEvent output missing 'created' message, got: %s", output)
}
}
func TestShowCleanupDeprecationHint(t *testing.T) {
// Capture stderr
old := os.Stderr
r, w, _ := os.Pipe()
os.Stderr = w
showCleanupDeprecationHint()
w.Close()
os.Stderr = old
var buf bytes.Buffer
buf.ReadFrom(r)
output := buf.String()
// Check that output contains expected elements
if len(output) == 0 {
t.Error("showCleanupDeprecationHint produced no output")
}
if !containsSubstring(output, "doctor --fix") {
t.Errorf("showCleanupDeprecationHint output missing 'doctor --fix', got: %s", output)
}
}
// containsSubstring checks if haystack contains needle
func containsSubstring(haystack, needle string) bool {
for i := 0; i <= len(haystack)-len(needle); i++ {
if haystack[i:i+len(needle)] == needle {
return true
}
}
return false
}
// Note: TestExtractPrefix is already defined in helpers_test.go
func TestPinIndicator(t *testing.T) {
tests := []struct {
name string
pinned bool
expected string
}{
{
name: "pinned issue",
pinned: true,
expected: "📌 ",
},
{
name: "unpinned issue",
pinned: false,
expected: "",
},
}
for _, tt := range tests {
t.Run(tt.name, func(t *testing.T) {
issue := &types.Issue{Pinned: tt.pinned}
result := pinIndicator(issue)
if result != tt.expected {
t.Errorf("pinIndicator() = %q, want %q", result, tt.expected)
}
})
}
}
func TestSortIssues(t *testing.T) {
now := time.Now()
yesterday := now.Add(-24 * time.Hour)
twoDaysAgo := now.Add(-48 * time.Hour)
closedAt := now
closedYesterday := yesterday
baseIssues := func() []*types.Issue {
return []*types.Issue{
{ID: "bd-2", Title: "Beta", Priority: 2, CreatedAt: yesterday, UpdatedAt: yesterday, Status: "open"},
{ID: "bd-1", Title: "Alpha", Priority: 1, CreatedAt: now, UpdatedAt: now, Status: "closed", ClosedAt: &closedAt},
{ID: "bd-3", Title: "Gamma", Priority: 3, CreatedAt: twoDaysAgo, UpdatedAt: twoDaysAgo, Status: "in_progress", ClosedAt: &closedYesterday},
}
}
t.Run("sort by priority ascending", func(t *testing.T) {
issues := baseIssues()
sortIssues(issues, "priority", false)
if issues[0].Priority != 1 {
t.Errorf("expected priority 1 first, got %d", issues[0].Priority)
}
if issues[1].Priority != 2 {
t.Errorf("expected priority 2 second, got %d", issues[1].Priority)
}
if issues[2].Priority != 3 {
t.Errorf("expected priority 3 third, got %d", issues[2].Priority)
}
})
t.Run("sort by priority descending", func(t *testing.T) {
issues := baseIssues()
sortIssues(issues, "priority", true)
if issues[0].Priority != 3 {
t.Errorf("expected priority 3 first, got %d", issues[0].Priority)
}
})
t.Run("sort by title", func(t *testing.T) {
issues := baseIssues()
sortIssues(issues, "title", false)
if issues[0].Title != "Alpha" {
t.Errorf("expected 'Alpha' first, got %q", issues[0].Title)
}
})
t.Run("sort by ID", func(t *testing.T) {
issues := baseIssues()
sortIssues(issues, "id", false)
if issues[0].ID != "bd-1" {
t.Errorf("expected 'bd-1' first, got %q", issues[0].ID)
}
})
t.Run("sort by status", func(t *testing.T) {
issues := baseIssues()
sortIssues(issues, "status", false)
// closed < in_progress < open alphabetically
if issues[0].Status != "closed" {
t.Errorf("expected 'closed' first, got %q", issues[0].Status)
}
})
t.Run("empty sortBy does nothing", func(t *testing.T) {
issues := baseIssues()
origFirst := issues[0].ID
sortIssues(issues, "", false)
if issues[0].ID != origFirst {
t.Error("expected no sorting when sortBy is empty")
}
})
t.Run("unknown sortBy does nothing", func(t *testing.T) {
issues := baseIssues()
origFirst := issues[0].ID
sortIssues(issues, "unknown_field", false)
if issues[0].ID != origFirst {
t.Error("expected no sorting when sortBy is unknown")
}
})
}
func TestFormatHookWarnings(t *testing.T) {
t.Run("no warnings when all installed and current", func(t *testing.T) {
statuses := []HookStatus{
{Name: "pre-commit", Installed: true, Outdated: false},
{Name: "post-merge", Installed: true, Outdated: false},
}
result := FormatHookWarnings(statuses)
if result != "" {
t.Errorf("expected empty string, got %q", result)
}
})
t.Run("warning when hooks missing", func(t *testing.T) {
statuses := []HookStatus{
{Name: "pre-commit", Installed: false, Outdated: false},
{Name: "post-merge", Installed: false, Outdated: false},
}
result := FormatHookWarnings(statuses)
if !containsSubstring(result, "2 missing") {
t.Errorf("expected '2 missing' in output, got %q", result)
}
if !containsSubstring(result, "bd hooks install") {
t.Errorf("expected 'bd hooks install' in output, got %q", result)
}
})
t.Run("warning when hooks outdated", func(t *testing.T) {
statuses := []HookStatus{
{Name: "pre-commit", Installed: true, Outdated: true},
{Name: "post-merge", Installed: true, Outdated: true},
}
result := FormatHookWarnings(statuses)
if !containsSubstring(result, "2 hooks") {
t.Errorf("expected '2 hooks' in output, got %q", result)
}
if !containsSubstring(result, "outdated") {
t.Errorf("expected 'outdated' in output, got %q", result)
}
})
t.Run("both missing and outdated", func(t *testing.T) {
statuses := []HookStatus{
{Name: "pre-commit", Installed: false, Outdated: false},
{Name: "post-merge", Installed: true, Outdated: true},
}
result := FormatHookWarnings(statuses)
if !containsSubstring(result, "1 missing") {
t.Errorf("expected '1 missing' in output, got %q", result)
}
if !containsSubstring(result, "1 hooks") {
t.Errorf("expected '1 hooks' in output, got %q", result)
}
})
t.Run("empty statuses", func(t *testing.T) {
statuses := []HookStatus{}
result := FormatHookWarnings(statuses)
if result != "" {
t.Errorf("expected empty string for empty statuses, got %q", result)
}
})
}
func TestGetContributorsSorted(t *testing.T) {
contributors := getContributorsSorted()
// Should have at least some contributors
if len(contributors) == 0 {
t.Error("expected non-empty contributors list")
}
// First contributor should be the one with most commits (Steve Yegge)
if contributors[0] != "Steve Yegge" {
t.Errorf("expected 'Steve Yegge' first, got %q", contributors[0])
}
}
func TestExtractIDSuffix(t *testing.T) {
tests := []struct {
name string
id string
expected string
}{
{
name: "hierarchical ID with dot",
id: "bd-xyz.1",
expected: "1",
},
{
name: "nested hierarchical ID",
id: "bd-abc.step1.sub",
expected: "sub",
},
{
name: "prefix-hash ID",
id: "patrol-abc123",
expected: "abc123",
},
{
name: "simple ID",
id: "bd-123",
expected: "123",
},
{
name: "no separators",
id: "standalone",
expected: "standalone",
},
}
for _, tt := range tests {
t.Run(tt.name, func(t *testing.T) {
result := extractIDSuffix(tt.id)
if result != tt.expected {
t.Errorf("extractIDSuffix(%q) = %q, want %q", tt.id, result, tt.expected)
}
})
}
}
// Note: TestGetRelativeID is already defined in mol_test.go
func TestIsRebaseInProgress(t *testing.T) {
// Create a temp directory to simulate a git repo
tmpDir := t.TempDir()
origDir, _ := os.Getwd()
if err := os.Chdir(tmpDir); err != nil {
t.Fatalf("failed to change to temp dir: %v", err)
}
defer func() { _ = os.Chdir(origDir) }()
// Create .git directory
if err := os.MkdirAll(".git", 0755); err != nil {
t.Fatalf("failed to create .git dir: %v", err)
}
t.Run("no rebase in progress", func(t *testing.T) {
if isRebaseInProgress() {
t.Error("expected false when no rebase markers exist")
}
})
t.Run("rebase-merge in progress", func(t *testing.T) {
if err := os.MkdirAll(".git/rebase-merge", 0755); err != nil {
t.Fatalf("failed to create rebase-merge dir: %v", err)
}
defer os.RemoveAll(".git/rebase-merge")
if !isRebaseInProgress() {
t.Error("expected true when .git/rebase-merge exists")
}
})
t.Run("rebase-apply in progress", func(t *testing.T) {
if err := os.MkdirAll(".git/rebase-apply", 0755); err != nil {
t.Fatalf("failed to create rebase-apply dir: %v", err)
}
defer os.RemoveAll(".git/rebase-apply")
if !isRebaseInProgress() {
t.Error("expected true when .git/rebase-apply exists")
}
})
}
func TestHasBeadsJSONL(t *testing.T) {
// Create a temp directory
tmpDir := t.TempDir()
origDir, _ := os.Getwd()
if err := os.Chdir(tmpDir); err != nil {
t.Fatalf("failed to change to temp dir: %v", err)
}
defer func() { _ = os.Chdir(origDir) }()
t.Run("no JSONL files", func(t *testing.T) {
if hasBeadsJSONL() {
t.Error("expected false when no .beads directory exists")
}
})
t.Run("with issues.jsonl", func(t *testing.T) {
if err := os.MkdirAll(".beads", 0755); err != nil {
t.Fatalf("failed to create .beads dir: %v", err)
}
if err := os.WriteFile(".beads/issues.jsonl", []byte("{}"), 0644); err != nil {
t.Fatalf("failed to create issues.jsonl: %v", err)
}
defer os.RemoveAll(".beads")
if !hasBeadsJSONL() {
t.Error("expected true when .beads/issues.jsonl exists")
}
})
t.Run("with beads.jsonl", func(t *testing.T) {
if err := os.MkdirAll(".beads", 0755); err != nil {
t.Fatalf("failed to create .beads dir: %v", err)
}
if err := os.WriteFile(".beads/beads.jsonl", []byte("{}"), 0644); err != nil {
t.Fatalf("failed to create beads.jsonl: %v", err)
}
defer os.RemoveAll(".beads")
if !hasBeadsJSONL() {
t.Error("expected true when .beads/beads.jsonl exists")
}
})
}