bd sync: 2025-12-23 22:33:32

This commit is contained in:
Steve Yegge
2025-12-23 22:33:33 -08:00
parent a10f580bbe
commit 2de1695615
77 changed files with 8319 additions and 7677 deletions

File diff suppressed because one or more lines are too long

View File

@@ -740,17 +740,6 @@ bd close bd-42 --reason "Completed" --json
- `3` - Low (polish, optimization)
- `4` - Backlog (future ideas)
### Dependencies: Avoid the Temporal Trap
When adding dependencies, think "X **needs** Y" not "X **comes before** Y":
```bash
# ❌ WRONG: "Phase 1 blocks Phase 2" → bd dep add phase1 phase2
# ✅ RIGHT: "Phase 2 needs Phase 1" → bd dep add phase2 phase1
```
Verify with `bd blocked` - tasks should be blocked by prerequisites, not dependents.
### Workflow for AI Agents
1. **Check your inbox**: `gt mail inbox` (from your cwd, not ~/gt)

View File

@@ -166,7 +166,8 @@ Examples:
} else {
sqliteStore, ok := store.(*sqlite.SQLiteStorage)
if !ok {
FatalError("compact requires SQLite storage")
fmt.Fprintf(os.Stderr, "Error: compact requires SQLite storage\n")
os.Exit(1)
}
runCompactStats(ctx, sqliteStore)
}
@@ -187,20 +188,26 @@ Examples:
// Check for exactly one mode
if activeModes == 0 {
FatalError("must specify one mode: --analyze, --apply, or --auto")
fmt.Fprintf(os.Stderr, "Error: must specify one mode: --analyze, --apply, or --auto\n")
os.Exit(1)
}
if activeModes > 1 {
FatalError("cannot use multiple modes together (--analyze, --apply, --auto are mutually exclusive)")
fmt.Fprintf(os.Stderr, "Error: cannot use multiple modes together (--analyze, --apply, --auto are mutually exclusive)\n")
os.Exit(1)
}
// Handle analyze mode (requires direct database access)
if compactAnalyze {
if err := ensureDirectMode("compact --analyze requires direct database access"); err != nil {
FatalErrorWithHint(fmt.Sprintf("%v", err), "Use --no-daemon flag to bypass daemon and access database directly")
fmt.Fprintf(os.Stderr, "Error: %v\n", err)
fmt.Fprintf(os.Stderr, "Hint: Use --no-daemon flag to bypass daemon and access database directly\n")
os.Exit(1)
}
sqliteStore, ok := store.(*sqlite.SQLiteStorage)
if !ok {
FatalErrorWithHint("failed to open database in direct mode", "Ensure .beads/beads.db exists and is readable")
fmt.Fprintf(os.Stderr, "Error: failed to open database in direct mode\n")
fmt.Fprintf(os.Stderr, "Hint: Ensure .beads/beads.db exists and is readable\n")
os.Exit(1)
}
runCompactAnalyze(ctx, sqliteStore)
return
@@ -209,17 +216,23 @@ Examples:
// Handle apply mode (requires direct database access)
if compactApply {
if err := ensureDirectMode("compact --apply requires direct database access"); err != nil {
FatalErrorWithHint(fmt.Sprintf("%v", err), "Use --no-daemon flag to bypass daemon and access database directly")
fmt.Fprintf(os.Stderr, "Error: %v\n", err)
fmt.Fprintf(os.Stderr, "Hint: Use --no-daemon flag to bypass daemon and access database directly\n")
os.Exit(1)
}
if compactID == "" {
FatalError("--apply requires --id")
fmt.Fprintf(os.Stderr, "Error: --apply requires --id\n")
os.Exit(1)
}
if compactSummary == "" {
FatalError("--apply requires --summary")
fmt.Fprintf(os.Stderr, "Error: --apply requires --summary\n")
os.Exit(1)
}
sqliteStore, ok := store.(*sqlite.SQLiteStorage)
if !ok {
FatalErrorWithHint("failed to open database in direct mode", "Ensure .beads/beads.db exists and is readable")
fmt.Fprintf(os.Stderr, "Error: failed to open database in direct mode\n")
fmt.Fprintf(os.Stderr, "Hint: Ensure .beads/beads.db exists and is readable\n")
os.Exit(1)
}
runCompactApply(ctx, sqliteStore)
return
@@ -235,13 +248,16 @@ Examples:
// Validation checks
if compactID != "" && compactAll {
FatalError("cannot use --id and --all together")
fmt.Fprintf(os.Stderr, "Error: cannot use --id and --all together\n")
os.Exit(1)
}
if compactForce && compactID == "" {
FatalError("--force requires --id")
fmt.Fprintf(os.Stderr, "Error: --force requires --id\n")
os.Exit(1)
}
if compactID == "" && !compactAll && !compactDryRun {
FatalError("must specify --all, --id, or --dry-run")
fmt.Fprintf(os.Stderr, "Error: must specify --all, --id, or --dry-run\n")
os.Exit(1)
}
// Use RPC if daemon available, otherwise direct mode
@@ -253,12 +269,14 @@ Examples:
// Fallback to direct mode
apiKey := os.Getenv("ANTHROPIC_API_KEY")
if apiKey == "" && !compactDryRun {
FatalError("--auto mode requires ANTHROPIC_API_KEY environment variable")
fmt.Fprintf(os.Stderr, "Error: --auto mode requires ANTHROPIC_API_KEY environment variable\n")
os.Exit(1)
}
sqliteStore, ok := store.(*sqlite.SQLiteStorage)
if !ok {
FatalError("compact requires SQLite storage")
fmt.Fprintf(os.Stderr, "Error: compact requires SQLite storage\n")
os.Exit(1)
}
config := &compact.Config{
@@ -271,7 +289,8 @@ Examples:
compactor, err := compact.New(sqliteStore, apiKey, config)
if err != nil {
FatalError("failed to create compactor: %v", err)
fmt.Fprintf(os.Stderr, "Error: failed to create compactor: %v\n", err)
os.Exit(1)
}
if compactID != "" {
@@ -290,16 +309,19 @@ func runCompactSingle(ctx context.Context, compactor *compact.Compactor, store *
if !compactForce {
eligible, reason, err := store.CheckEligibility(ctx, issueID, compactTier)
if err != nil {
FatalError("failed to check eligibility: %v", err)
fmt.Fprintf(os.Stderr, "Error: failed to check eligibility: %v\n", err)
os.Exit(1)
}
if !eligible {
FatalError("%s is not eligible for Tier %d compaction: %s", issueID, compactTier, reason)
fmt.Fprintf(os.Stderr, "Error: %s is not eligible for Tier %d compaction: %s\n", issueID, compactTier, reason)
os.Exit(1)
}
}
issue, err := store.GetIssue(ctx, issueID)
if err != nil {
FatalError("failed to get issue: %v", err)
fmt.Fprintf(os.Stderr, "Error: failed to get issue: %v\n", err)
os.Exit(1)
}
originalSize := len(issue.Description) + len(issue.Design) + len(issue.Notes) + len(issue.AcceptanceCriteria)
@@ -327,16 +349,19 @@ func runCompactSingle(ctx context.Context, compactor *compact.Compactor, store *
if compactTier == 1 {
compactErr = compactor.CompactTier1(ctx, issueID)
} else {
FatalError("Tier 2 compaction not yet implemented")
fmt.Fprintf(os.Stderr, "Error: Tier 2 compaction not yet implemented\n")
os.Exit(1)
}
if compactErr != nil {
FatalError("%v", compactErr)
fmt.Fprintf(os.Stderr, "Error: %v\n", compactErr)
os.Exit(1)
}
issue, err = store.GetIssue(ctx, issueID)
if err != nil {
FatalError("failed to get updated issue: %v", err)
fmt.Fprintf(os.Stderr, "Error: failed to get updated issue: %v\n", err)
os.Exit(1)
}
compactedSize := len(issue.Description)
@@ -382,7 +407,8 @@ func runCompactAll(ctx context.Context, compactor *compact.Compactor, store *sql
if compactTier == 1 {
tier1, err := store.GetTier1Candidates(ctx)
if err != nil {
FatalError("failed to get candidates: %v", err)
fmt.Fprintf(os.Stderr, "Error: failed to get candidates: %v\n", err)
os.Exit(1)
}
for _, c := range tier1 {
candidates = append(candidates, c.IssueID)
@@ -390,7 +416,8 @@ func runCompactAll(ctx context.Context, compactor *compact.Compactor, store *sql
} else {
tier2, err := store.GetTier2Candidates(ctx)
if err != nil {
FatalError("failed to get candidates: %v", err)
fmt.Fprintf(os.Stderr, "Error: failed to get candidates: %v\n", err)
os.Exit(1)
}
for _, c := range tier2 {
candidates = append(candidates, c.IssueID)
@@ -444,7 +471,8 @@ func runCompactAll(ctx context.Context, compactor *compact.Compactor, store *sql
results, err := compactor.CompactTier1Batch(ctx, candidates)
if err != nil {
FatalError("batch compaction failed: %v", err)
fmt.Fprintf(os.Stderr, "Error: batch compaction failed: %v\n", err)
os.Exit(1)
}
successCount := 0
@@ -507,12 +535,14 @@ func runCompactAll(ctx context.Context, compactor *compact.Compactor, store *sql
func runCompactStats(ctx context.Context, store *sqlite.SQLiteStorage) {
tier1, err := store.GetTier1Candidates(ctx)
if err != nil {
FatalError("failed to get Tier 1 candidates: %v", err)
fmt.Fprintf(os.Stderr, "Error: failed to get Tier 1 candidates: %v\n", err)
os.Exit(1)
}
tier2, err := store.GetTier2Candidates(ctx)
if err != nil {
FatalError("failed to get Tier 2 candidates: %v", err)
fmt.Fprintf(os.Stderr, "Error: failed to get Tier 2 candidates: %v\n", err)
os.Exit(1)
}
tier1Size := 0
@@ -578,20 +608,24 @@ func progressBar(current, total int) string {
//nolint:unparam // ctx may be used in future for cancellation
func runCompactRPC(_ context.Context) {
if compactID != "" && compactAll {
FatalError("cannot use --id and --all together")
fmt.Fprintf(os.Stderr, "Error: cannot use --id and --all together\n")
os.Exit(1)
}
if compactForce && compactID == "" {
FatalError("--force requires --id")
fmt.Fprintf(os.Stderr, "Error: --force requires --id\n")
os.Exit(1)
}
if compactID == "" && !compactAll && !compactDryRun {
FatalError("must specify --all, --id, or --dry-run")
fmt.Fprintf(os.Stderr, "Error: must specify --all, --id, or --dry-run\n")
os.Exit(1)
}
apiKey := os.Getenv("ANTHROPIC_API_KEY")
if apiKey == "" && !compactDryRun {
FatalError("ANTHROPIC_API_KEY environment variable not set")
fmt.Fprintf(os.Stderr, "Error: ANTHROPIC_API_KEY environment variable not set\n")
os.Exit(1)
}
args := map[string]interface{}{
@@ -609,11 +643,13 @@ func runCompactRPC(_ context.Context) {
resp, err := daemonClient.Execute("compact", args)
if err != nil {
FatalError("%v", err)
fmt.Fprintf(os.Stderr, "Error: %v\n", err)
os.Exit(1)
}
if !resp.Success {
FatalError("%s", resp.Error)
fmt.Fprintf(os.Stderr, "Error: %s\n", resp.Error)
os.Exit(1)
}
if jsonOutput {
@@ -640,7 +676,8 @@ func runCompactRPC(_ context.Context) {
}
if err := json.Unmarshal(resp.Data, &result); err != nil {
FatalError("parsing response: %v", err)
fmt.Fprintf(os.Stderr, "Error parsing response: %v\n", err)
os.Exit(1)
}
if compactID != "" {
@@ -685,11 +722,13 @@ func runCompactStatsRPC() {
resp, err := daemonClient.Execute("compact_stats", args)
if err != nil {
FatalError("%v", err)
fmt.Fprintf(os.Stderr, "Error: %v\n", err)
os.Exit(1)
}
if !resp.Success {
FatalError("%s", resp.Error)
fmt.Fprintf(os.Stderr, "Error: %s\n", resp.Error)
os.Exit(1)
}
if jsonOutput {
@@ -710,7 +749,8 @@ func runCompactStatsRPC() {
}
if err := json.Unmarshal(resp.Data, &result); err != nil {
FatalError("parsing response: %v", err)
fmt.Fprintf(os.Stderr, "Error parsing response: %v\n", err)
os.Exit(1)
}
fmt.Printf("\nCompaction Statistics\n")
@@ -744,7 +784,8 @@ func runCompactAnalyze(ctx context.Context, store *sqlite.SQLiteStorage) {
if compactID != "" {
issue, err := store.GetIssue(ctx, compactID)
if err != nil {
FatalError("failed to get issue: %v", err)
fmt.Fprintf(os.Stderr, "Error: failed to get issue: %v\n", err)
os.Exit(1)
}
sizeBytes := len(issue.Description) + len(issue.Design) + len(issue.Notes) + len(issue.AcceptanceCriteria)
@@ -775,7 +816,8 @@ func runCompactAnalyze(ctx context.Context, store *sqlite.SQLiteStorage) {
tierCandidates, err = store.GetTier2Candidates(ctx)
}
if err != nil {
FatalError("failed to get candidates: %v", err)
fmt.Fprintf(os.Stderr, "Error: failed to get candidates: %v\n", err)
os.Exit(1)
}
// Apply limit if specified
@@ -837,13 +879,15 @@ func runCompactApply(ctx context.Context, store *sqlite.SQLiteStorage) {
// Read from stdin
summaryBytes, err = io.ReadAll(os.Stdin)
if err != nil {
FatalError("failed to read summary from stdin: %v", err)
fmt.Fprintf(os.Stderr, "Error: failed to read summary from stdin: %v\n", err)
os.Exit(1)
}
} else {
// #nosec G304 -- summary file path provided explicitly by operator
summaryBytes, err = os.ReadFile(compactSummary)
if err != nil {
FatalError("failed to read summary file: %v", err)
fmt.Fprintf(os.Stderr, "Error: failed to read summary file: %v\n", err)
os.Exit(1)
}
}
summary := string(summaryBytes)
@@ -851,7 +895,8 @@ func runCompactApply(ctx context.Context, store *sqlite.SQLiteStorage) {
// Get issue
issue, err := store.GetIssue(ctx, compactID)
if err != nil {
FatalError("failed to get issue: %v", err)
fmt.Fprintf(os.Stderr, "Error: failed to get issue: %v\n", err)
os.Exit(1)
}
// Calculate sizes
@@ -862,15 +907,20 @@ func runCompactApply(ctx context.Context, store *sqlite.SQLiteStorage) {
if !compactForce {
eligible, reason, err := store.CheckEligibility(ctx, compactID, compactTier)
if err != nil {
FatalError("failed to check eligibility: %v", err)
fmt.Fprintf(os.Stderr, "Error: failed to check eligibility: %v\n", err)
os.Exit(1)
}
if !eligible {
FatalErrorWithHint(fmt.Sprintf("%s is not eligible for Tier %d compaction: %s", compactID, compactTier, reason), "use --force to bypass eligibility checks")
fmt.Fprintf(os.Stderr, "Error: %s is not eligible for Tier %d compaction: %s\n", compactID, compactTier, reason)
fmt.Fprintf(os.Stderr, "Hint: use --force to bypass eligibility checks\n")
os.Exit(1)
}
// Enforce size reduction unless --force
if compactedSize >= originalSize {
FatalErrorWithHint(fmt.Sprintf("summary (%d bytes) is not shorter than original (%d bytes)", compactedSize, originalSize), "use --force to bypass size validation")
fmt.Fprintf(os.Stderr, "Error: summary (%d bytes) is not shorter than original (%d bytes)\n", compactedSize, originalSize)
fmt.Fprintf(os.Stderr, "Hint: use --force to bypass size validation\n")
os.Exit(1)
}
}
@@ -888,23 +938,27 @@ func runCompactApply(ctx context.Context, store *sqlite.SQLiteStorage) {
}
if err := store.UpdateIssue(ctx, compactID, updates, actor); err != nil {
FatalError("failed to update issue: %v", err)
fmt.Fprintf(os.Stderr, "Error: failed to update issue: %v\n", err)
os.Exit(1)
}
commitHash := compact.GetCurrentCommitHash()
if err := store.ApplyCompaction(ctx, compactID, compactTier, originalSize, compactedSize, commitHash); err != nil {
FatalError("failed to apply compaction: %v", err)
fmt.Fprintf(os.Stderr, "Error: failed to apply compaction: %v\n", err)
os.Exit(1)
}
savingBytes := originalSize - compactedSize
reductionPct := float64(savingBytes) / float64(originalSize) * 100
eventData := fmt.Sprintf("Tier %d compaction: %d → %d bytes (saved %d, %.1f%%)", compactTier, originalSize, compactedSize, savingBytes, reductionPct)
if err := store.AddComment(ctx, compactID, actor, eventData); err != nil {
FatalError("failed to record event: %v", err)
fmt.Fprintf(os.Stderr, "Error: failed to record event: %v\n", err)
os.Exit(1)
}
if err := store.MarkIssueDirty(ctx, compactID); err != nil {
FatalError("failed to mark dirty: %v", err)
fmt.Fprintf(os.Stderr, "Error: failed to mark dirty: %v\n", err)
os.Exit(1)
}
elapsed := time.Since(start)

View File

@@ -7,7 +7,6 @@ import (
"strings"
"github.com/spf13/cobra"
"github.com/steveyegge/beads/internal/config"
"github.com/steveyegge/beads/internal/syncbranch"
)
@@ -50,38 +49,17 @@ var configSetCmd = &cobra.Command{
Short: "Set a configuration value",
Args: cobra.ExactArgs(2),
Run: func(_ *cobra.Command, args []string) {
key := args[0]
value := args[1]
// Check if this is a yaml-only key (startup settings like no-db, no-daemon, etc.)
// These must be written to config.yaml, not SQLite, because they're read
// before the database is opened. (GH#536)
if config.IsYamlOnlyKey(key) {
if err := config.SetYamlConfig(key, value); err != nil {
fmt.Fprintf(os.Stderr, "Error setting config: %v\n", err)
os.Exit(1)
}
if jsonOutput {
outputJSON(map[string]interface{}{
"key": key,
"value": value,
"location": "config.yaml",
})
} else {
fmt.Printf("Set %s = %s (in config.yaml)\n", key, value)
}
return
}
// Database-stored config requires direct mode
// Config operations work in direct mode only
if err := ensureDirectMode("config set requires direct database access"); err != nil {
fmt.Fprintf(os.Stderr, "Error: %v\n", err)
os.Exit(1)
}
ctx := rootCtx
key := args[0]
value := args[1]
ctx := rootCtx
// Special handling for sync.branch to apply validation
if strings.TrimSpace(key) == syncbranch.ConfigKey {
if err := syncbranch.Set(ctx, store, value); err != nil {
@@ -111,46 +89,25 @@ var configGetCmd = &cobra.Command{
Short: "Get a configuration value",
Args: cobra.ExactArgs(1),
Run: func(cmd *cobra.Command, args []string) {
key := args[0]
// Check if this is a yaml-only key (startup settings)
// These are read from config.yaml via viper, not SQLite. (GH#536)
if config.IsYamlOnlyKey(key) {
value := config.GetYamlConfig(key)
if jsonOutput {
outputJSON(map[string]interface{}{
"key": key,
"value": value,
"location": "config.yaml",
})
} else {
if value == "" {
fmt.Printf("%s (not set in config.yaml)\n", key)
} else {
fmt.Printf("%s\n", value)
}
}
return
}
// Database-stored config requires direct mode
// Config operations work in direct mode only
if err := ensureDirectMode("config get requires direct database access"); err != nil {
fmt.Fprintf(os.Stderr, "Error: %v\n", err)
os.Exit(1)
}
key := args[0]
ctx := rootCtx
var value string
var err error
// Special handling for sync.branch to support env var override
if strings.TrimSpace(key) == syncbranch.ConfigKey {
value, err = syncbranch.Get(ctx, store)
} else {
value, err = store.GetConfig(ctx, key)
}
if err != nil {
fmt.Fprintf(os.Stderr, "Error getting config: %v\n", err)
os.Exit(1)

View File

@@ -56,8 +56,6 @@ Run 'bd daemon' with no flags to see available options.`,
localMode, _ := cmd.Flags().GetBool("local")
logFile, _ := cmd.Flags().GetString("log")
foreground, _ := cmd.Flags().GetBool("foreground")
logLevel, _ := cmd.Flags().GetString("log-level")
logJSON, _ := cmd.Flags().GetBool("log-json")
// If no operation flags provided, show help
if !start && !stop && !stopAll && !status && !health && !metrics {
@@ -247,7 +245,7 @@ Run 'bd daemon' with no flags to see available options.`,
fmt.Printf("Logging to: %s\n", logFile)
}
startDaemon(interval, autoCommit, autoPush, autoPull, localMode, foreground, logFile, pidFile, logLevel, logJSON)
startDaemon(interval, autoCommit, autoPush, autoPull, localMode, foreground, logFile, pidFile)
},
}
@@ -265,8 +263,6 @@ func init() {
daemonCmd.Flags().Bool("metrics", false, "Show detailed daemon metrics")
daemonCmd.Flags().String("log", "", "Log file path (default: .beads/daemon.log)")
daemonCmd.Flags().Bool("foreground", false, "Run in foreground (don't daemonize)")
daemonCmd.Flags().String("log-level", "info", "Log level (debug, info, warn, error)")
daemonCmd.Flags().Bool("log-json", false, "Output logs in JSON format (structured logging)")
daemonCmd.Flags().BoolVar(&jsonOutput, "json", false, "Output JSON format")
rootCmd.AddCommand(daemonCmd)
}
@@ -283,9 +279,8 @@ func computeDaemonParentPID() int {
}
return os.Getppid()
}
func runDaemonLoop(interval time.Duration, autoCommit, autoPush, autoPull, localMode bool, logPath, pidFile, logLevel string, logJSON bool) {
level := parseLogLevel(logLevel)
logF, log := setupDaemonLogger(logPath, logJSON, level)
func runDaemonLoop(interval time.Duration, autoCommit, autoPush, autoPull, localMode bool, logPath, pidFile string) {
logF, log := setupDaemonLogger(logPath)
defer func() { _ = logF.Close() }()
// Set up signal-aware context for graceful shutdown
@@ -295,13 +290,13 @@ func runDaemonLoop(interval time.Duration, autoCommit, autoPush, autoPull, local
// Top-level panic recovery to ensure clean shutdown and diagnostics
defer func() {
if r := recover(); r != nil {
log.Error("daemon crashed", "panic", r)
log.log("PANIC: daemon crashed: %v", r)
// Capture stack trace
stackBuf := make([]byte, 4096)
stackSize := runtime.Stack(stackBuf, false)
stackTrace := string(stackBuf[:stackSize])
log.Error("stack trace", "trace", stackTrace)
log.log("Stack trace:\n%s", stackTrace)
// Write crash report to daemon-error file for user visibility
var beadsDir string
@@ -310,21 +305,21 @@ func runDaemonLoop(interval time.Duration, autoCommit, autoPush, autoPull, local
} else if foundDB := beads.FindDatabasePath(); foundDB != "" {
beadsDir = filepath.Dir(foundDB)
}
if beadsDir != "" {
errFile := filepath.Join(beadsDir, "daemon-error")
crashReport := fmt.Sprintf("Daemon crashed at %s\n\nPanic: %v\n\nStack trace:\n%s\n",
time.Now().Format(time.RFC3339), r, stackTrace)
// nolint:gosec // G306: Error file needs to be readable for debugging
if err := os.WriteFile(errFile, []byte(crashReport), 0644); err != nil {
log.Warn("could not write crash report", "error", err)
log.log("Warning: could not write crash report: %v", err)
}
}
// Clean up PID file
_ = os.Remove(pidFile)
log.Info("daemon terminated after panic")
log.log("Daemon terminated after panic")
}
}()
@@ -334,8 +329,8 @@ func runDaemonLoop(interval time.Duration, autoCommit, autoPush, autoPull, local
if foundDB := beads.FindDatabasePath(); foundDB != "" {
daemonDBPath = foundDB
} else {
log.Error("no beads database found")
log.Info("hint: run 'bd init' to create a database or set BEADS_DB environment variable")
log.log("Error: no beads database found")
log.log("Hint: run 'bd init' to create a database or set BEADS_DB environment variable")
return // Use return instead of os.Exit to allow defers to run
}
}
@@ -381,7 +376,7 @@ func runDaemonLoop(interval time.Duration, autoCommit, autoPush, autoPull, local
errFile := filepath.Join(beadsDir, "daemon-error")
// nolint:gosec // G306: Error file needs to be readable for debugging
if err := os.WriteFile(errFile, []byte(errMsg), 0644); err != nil {
log.Warn("could not write daemon-error file", "error", err)
log.log("Warning: could not write daemon-error file: %v", err)
}
return // Use return instead of os.Exit to allow defers to run
@@ -391,22 +386,24 @@ func runDaemonLoop(interval time.Duration, autoCommit, autoPush, autoPull, local
// Validate using canonical name
dbBaseName := filepath.Base(daemonDBPath)
if dbBaseName != beads.CanonicalDatabaseName {
log.Error("non-canonical database name", "name", dbBaseName, "expected", beads.CanonicalDatabaseName)
log.Info("run 'bd init' to migrate to canonical name")
log.log("Error: Non-canonical database name: %s", dbBaseName)
log.log("Expected: %s", beads.CanonicalDatabaseName)
log.log("")
log.log("Run 'bd init' to migrate to canonical name")
return // Use return instead of os.Exit to allow defers to run
}
log.Info("using database", "path", daemonDBPath)
log.log("Using database: %s", daemonDBPath)
// Clear any previous daemon-error file on successful startup
errFile := filepath.Join(beadsDir, "daemon-error")
if err := os.Remove(errFile); err != nil && !os.IsNotExist(err) {
log.Warn("could not remove daemon-error file", "error", err)
log.log("Warning: could not remove daemon-error file: %v", err)
}
store, err := sqlite.New(ctx, daemonDBPath)
if err != nil {
log.Error("cannot open database", "error", err)
log.log("Error: cannot open database: %v", err)
return // Use return instead of os.Exit to allow defers to run
}
defer func() { _ = store.Close() }()
@@ -414,71 +411,73 @@ func runDaemonLoop(interval time.Duration, autoCommit, autoPush, autoPull, local
// Enable freshness checking to detect external database file modifications
// (e.g., when git merge replaces the database file)
store.EnableFreshnessChecking()
log.Info("database opened", "path", daemonDBPath, "freshness_checking", true)
log.log("Database opened: %s (freshness checking enabled)", daemonDBPath)
// Auto-upgrade .beads/.gitignore if outdated
gitignoreCheck := doctor.CheckGitignore()
if gitignoreCheck.Status == "warning" || gitignoreCheck.Status == "error" {
log.Info("upgrading .beads/.gitignore")
log.log("Upgrading .beads/.gitignore...")
if err := doctor.FixGitignore(); err != nil {
log.Warn("failed to upgrade .gitignore", "error", err)
log.log("Warning: failed to upgrade .gitignore: %v", err)
} else {
log.Info("successfully upgraded .beads/.gitignore")
log.log("Successfully upgraded .beads/.gitignore")
}
}
// Hydrate from multi-repo if configured
if results, err := store.HydrateFromMultiRepo(ctx); err != nil {
log.Error("multi-repo hydration failed", "error", err)
log.log("Error: multi-repo hydration failed: %v", err)
return // Use return instead of os.Exit to allow defers to run
} else if results != nil {
log.Info("multi-repo hydration complete")
log.log("Multi-repo hydration complete:")
for repo, count := range results {
log.Info("hydrated issues", "repo", repo, "count", count)
log.log(" %s: %d issues", repo, count)
}
}
// Validate database fingerprint (skip in local mode - no git available)
if localMode {
log.Info("skipping fingerprint validation (local mode)")
log.log("Skipping fingerprint validation (local mode)")
} else if err := validateDatabaseFingerprint(ctx, store, &log); err != nil {
if os.Getenv("BEADS_IGNORE_REPO_MISMATCH") != "1" {
log.Error("repository fingerprint validation failed", "error", err)
log.log("Error: %v", err)
return // Use return instead of os.Exit to allow defers to run
}
log.Warn("repository mismatch ignored (BEADS_IGNORE_REPO_MISMATCH=1)")
log.log("Warning: repository mismatch ignored (BEADS_IGNORE_REPO_MISMATCH=1)")
}
// Validate schema version matches daemon version
versionCtx := context.Background()
dbVersion, err := store.GetMetadata(versionCtx, "bd_version")
if err != nil && err.Error() != "metadata key not found: bd_version" {
log.Error("failed to read database version", "error", err)
log.log("Error: failed to read database version: %v", err)
return // Use return instead of os.Exit to allow defers to run
}
if dbVersion != "" && dbVersion != Version {
log.Warn("database schema version mismatch", "db_version", dbVersion, "daemon_version", Version)
log.Info("auto-upgrading database to daemon version")
log.log("Warning: Database schema version mismatch")
log.log(" Database version: %s", dbVersion)
log.log(" Daemon version: %s", Version)
log.log(" Auto-upgrading database to daemon version...")
// Auto-upgrade database to daemon version
// The daemon operates on its own database, so it should always use its own version
if err := store.SetMetadata(versionCtx, "bd_version", Version); err != nil {
log.Error("failed to update database version", "error", err)
log.log("Error: failed to update database version: %v", err)
// Allow override via environment variable for emergencies
if os.Getenv("BEADS_IGNORE_VERSION_MISMATCH") != "1" {
return // Use return instead of os.Exit to allow defers to run
}
log.Warn("proceeding despite version update failure (BEADS_IGNORE_VERSION_MISMATCH=1)")
log.log("Warning: Proceeding despite version update failure (BEADS_IGNORE_VERSION_MISMATCH=1)")
} else {
log.Info("database version updated", "version", Version)
log.log(" Database version updated to %s", Version)
}
} else if dbVersion == "" {
// Old database without version metadata - set it now
log.Warn("database missing version metadata", "setting_to", Version)
log.log("Warning: Database missing version metadata, setting to %s", Version)
if err := store.SetMetadata(versionCtx, "bd_version", Version); err != nil {
log.Error("failed to set database version", "error", err)
log.log("Error: failed to set database version: %v", err)
return // Use return instead of os.Exit to allow defers to run
}
}
@@ -507,7 +506,7 @@ func runDaemonLoop(interval time.Duration, autoCommit, autoPush, autoPull, local
// Register daemon in global registry
registry, err := daemon.NewRegistry()
if err != nil {
log.Warn("failed to create registry", "error", err)
log.log("Warning: failed to create registry: %v", err)
} else {
entry := daemon.RegistryEntry{
WorkspacePath: workspacePath,
@@ -518,14 +517,14 @@ func runDaemonLoop(interval time.Duration, autoCommit, autoPush, autoPull, local
StartedAt: time.Now(),
}
if err := registry.Register(entry); err != nil {
log.Warn("failed to register daemon", "error", err)
log.log("Warning: failed to register daemon: %v", err)
} else {
log.Info("registered in global registry")
log.log("Registered in global registry")
}
// Ensure we unregister on exit
defer func() {
if err := registry.Unregister(workspacePath, os.Getpid()); err != nil {
log.Warn("failed to unregister daemon", "error", err)
log.log("Warning: failed to unregister daemon: %v", err)
}
}()
}
@@ -544,16 +543,16 @@ func runDaemonLoop(interval time.Duration, autoCommit, autoPush, autoPull, local
// Get parent PID for monitoring (exit if parent dies)
parentPID := computeDaemonParentPID()
log.Info("monitoring parent process", "pid", parentPID)
log.log("Monitoring parent process (PID %d)", parentPID)
// daemonMode already determined above for SetConfig
switch daemonMode {
case "events":
log.Info("using event-driven mode")
log.log("Using event-driven mode")
jsonlPath := findJSONLPath()
if jsonlPath == "" {
log.Error("JSONL path not found, cannot use event-driven mode")
log.Info("falling back to polling mode")
log.log("Error: JSONL path not found, cannot use event-driven mode")
log.log("Falling back to polling mode")
runEventLoop(ctx, cancel, ticker, doSync, server, serverErrChan, parentPID, log)
} else {
// Event-driven mode uses separate export-only and import-only functions
@@ -568,10 +567,10 @@ func runDaemonLoop(interval time.Duration, autoCommit, autoPush, autoPull, local
runEventDrivenLoop(ctx, cancel, server, serverErrChan, store, jsonlPath, doExport, doAutoImport, autoPull, parentPID, log)
}
case "poll":
log.Info("using polling mode", "interval", interval)
log.log("Using polling mode (interval: %v)", interval)
runEventLoop(ctx, cancel, ticker, doSync, server, serverErrChan, parentPID, log)
default:
log.Warn("unknown BEADS_DAEMON_MODE, defaulting to poll", "mode", daemonMode, "valid", "poll, events")
log.log("Unknown BEADS_DAEMON_MODE: %s (valid: poll, events), defaulting to poll", daemonMode)
runEventLoop(ctx, cancel, ticker, doSync, server, serverErrChan, parentPID, log)
}
}

View File

@@ -457,7 +457,11 @@ func TestEventLoopSignalHandling(t *testing.T) {
// createTestLogger creates a daemonLogger for testing
func createTestLogger(t *testing.T) daemonLogger {
return newTestLogger()
return daemonLogger{
logFunc: func(format string, args ...interface{}) {
t.Logf("[daemon] "+format, args...)
},
}
}
// TestDaemonIntegration_SocketCleanup verifies socket cleanup after daemon stops

View File

@@ -369,7 +369,7 @@ func stopAllDaemons() {
}
// startDaemon starts the daemon (in foreground if requested, otherwise background)
func startDaemon(interval time.Duration, autoCommit, autoPush, autoPull, localMode, foreground bool, logFile, pidFile, logLevel string, logJSON bool) {
func startDaemon(interval time.Duration, autoCommit, autoPush, autoPull, localMode, foreground bool, logFile, pidFile string) {
logPath, err := getLogFilePath(logFile)
if err != nil {
fmt.Fprintf(os.Stderr, "Error: %v\n", err)
@@ -378,7 +378,7 @@ func startDaemon(interval time.Duration, autoCommit, autoPush, autoPull, localMo
// Run in foreground if --foreground flag set or if we're the forked child process
if foreground || os.Getenv("BD_DAEMON_FOREGROUND") == "1" {
runDaemonLoop(interval, autoCommit, autoPush, autoPull, localMode, logPath, pidFile, logLevel, logJSON)
runDaemonLoop(interval, autoCommit, autoPush, autoPull, localMode, logPath, pidFile)
return
}
@@ -406,12 +406,6 @@ func startDaemon(interval time.Duration, autoCommit, autoPush, autoPull, localMo
if logFile != "" {
args = append(args, "--log", logFile)
}
if logLevel != "" && logLevel != "info" {
args = append(args, "--log-level", logLevel)
}
if logJSON {
args = append(args, "--log-json")
}
cmd := exec.Command(exe, args...) // #nosec G204 - bd daemon command from trusted binary
cmd.Env = append(os.Environ(), "BD_DAEMON_FOREGROUND=1")
@@ -461,18 +455,18 @@ func setupDaemonLock(pidFile string, dbPath string, log daemonLogger) (*DaemonLo
// Detect nested .beads directories (e.g., .beads/.beads/.beads/)
cleanPath := filepath.Clean(beadsDir)
if strings.Contains(cleanPath, string(filepath.Separator)+".beads"+string(filepath.Separator)+".beads") {
log.Error("nested .beads directory detected", "path", cleanPath)
log.Info("hint: do not run 'bd daemon' from inside .beads/ directory")
log.Info("hint: use absolute paths for BEADS_DB or run from workspace root")
log.log("Error: Nested .beads directory detected: %s", cleanPath)
log.log("Hint: Do not run 'bd daemon' from inside .beads/ directory")
log.log("Hint: Use absolute paths for BEADS_DB or run from workspace root")
return nil, fmt.Errorf("nested .beads directory detected")
}
lock, err := acquireDaemonLock(beadsDir, dbPath)
if err != nil {
if err == ErrDaemonLocked {
log.Info("daemon already running (lock held), exiting")
log.log("Daemon already running (lock held), exiting")
} else {
log.Error("acquiring daemon lock", "error", err)
log.log("Error acquiring daemon lock: %v", err)
}
return nil, err
}
@@ -483,11 +477,11 @@ func setupDaemonLock(pidFile string, dbPath string, log daemonLogger) (*DaemonLo
if pid, err := strconv.Atoi(strings.TrimSpace(string(data))); err == nil && pid == myPID {
// PID file is correct, continue
} else {
log.Warn("PID file has wrong PID, overwriting", "expected", myPID, "got", pid)
log.log("PID file has wrong PID (expected %d, got %d), overwriting", myPID, pid)
_ = os.WriteFile(pidFile, []byte(fmt.Sprintf("%d\n", myPID)), 0600)
}
} else {
log.Info("PID file missing after lock acquisition, creating")
log.log("PID file missing after lock acquisition, creating")
_ = os.WriteFile(pidFile, []byte(fmt.Sprintf("%d\n", myPID)), 0600)
}

View File

@@ -122,8 +122,12 @@ func TestCreateLocalSyncFunc(t *testing.T) {
t.Fatalf("Failed to create issue: %v", err)
}
// Create logger (test output via newTestLogger)
log := newTestLogger()
// Create logger
log := daemonLogger{
logFunc: func(format string, args ...interface{}) {
t.Logf(format, args...)
},
}
// Create and run local sync function
doSync := createLocalSyncFunc(ctx, testStore, log)
@@ -189,7 +193,11 @@ func TestCreateLocalExportFunc(t *testing.T) {
}
}
log := newTestLogger()
log := daemonLogger{
logFunc: func(format string, args ...interface{}) {
t.Logf(format, args...)
},
}
doExport := createLocalExportFunc(ctx, testStore, log)
doExport()
@@ -250,7 +258,11 @@ func TestCreateLocalAutoImportFunc(t *testing.T) {
t.Fatalf("Failed to write JSONL: %v", err)
}
log := newTestLogger()
log := daemonLogger{
logFunc: func(format string, args ...interface{}) {
t.Logf(format, args...)
},
}
doImport := createLocalAutoImportFunc(ctx, testStore, log)
doImport()
@@ -367,7 +379,11 @@ func TestLocalModeInNonGitDirectory(t *testing.T) {
t.Fatalf("Failed to create issue: %v", err)
}
log := newTestLogger()
log := daemonLogger{
logFunc: func(format string, args ...interface{}) {
t.Logf(format, args...)
},
}
// Run local sync (should work without git)
doSync := createLocalSyncFunc(ctx, testStore, log)
@@ -421,7 +437,11 @@ func TestLocalModeExportImportRoundTrip(t *testing.T) {
defer func() { dbPath = oldDBPath }()
dbPath = testDBPath
log := newTestLogger()
log := daemonLogger{
logFunc: func(format string, args ...interface{}) {
t.Logf(format, args...)
},
}
// Create issues
for i := 0; i < 5; i++ {

View File

@@ -1,97 +1,23 @@
package main
import (
"io"
"log/slog"
"os"
"strings"
"fmt"
"time"
"gopkg.in/natefinch/lumberjack.v2"
)
// daemonLogger wraps slog for daemon logging.
// Provides level-specific methods and backward-compatible log() for migration.
// daemonLogger wraps a logging function for the daemon
type daemonLogger struct {
logger *slog.Logger
logFunc func(string, ...interface{})
}
// log is the backward-compatible logging method (maps to Info level).
// Use Info(), Warn(), Error(), Debug() for explicit levels.
func (d *daemonLogger) log(format string, args ...interface{}) {
d.logger.Info(format, toSlogArgs(args)...)
d.logFunc(format, args...)
}
// Info logs at INFO level.
func (d *daemonLogger) Info(msg string, args ...interface{}) {
d.logger.Info(msg, toSlogArgs(args)...)
}
// Warn logs at WARN level.
func (d *daemonLogger) Warn(msg string, args ...interface{}) {
d.logger.Warn(msg, toSlogArgs(args)...)
}
// Error logs at ERROR level.
func (d *daemonLogger) Error(msg string, args ...interface{}) {
d.logger.Error(msg, toSlogArgs(args)...)
}
// Debug logs at DEBUG level.
func (d *daemonLogger) Debug(msg string, args ...interface{}) {
d.logger.Debug(msg, toSlogArgs(args)...)
}
// toSlogArgs converts variadic args to slog-compatible key-value pairs.
// If args are already in key-value format (string, value, string, value...),
// they're passed through. Otherwise, they're wrapped as "args" for sprintf-style logs.
func toSlogArgs(args []interface{}) []any {
if len(args) == 0 {
return nil
}
// Check if args look like slog key-value pairs (string key followed by value)
// If first arg is a string and we have pairs, treat as slog format
if len(args) >= 2 {
if _, ok := args[0].(string); ok {
// Likely slog-style: "key", value, "key2", value2
result := make([]any, len(args))
for i, a := range args {
result[i] = a
}
return result
}
}
// For sprintf-style args, wrap them (caller should use fmt.Sprintf)
result := make([]any, len(args))
for i, a := range args {
result[i] = a
}
return result
}
// parseLogLevel converts a log level string to slog.Level.
func parseLogLevel(level string) slog.Level {
switch strings.ToLower(level) {
case "debug":
return slog.LevelDebug
case "info":
return slog.LevelInfo
case "warn", "warning":
return slog.LevelWarn
case "error":
return slog.LevelError
default:
return slog.LevelInfo
}
}
// setupDaemonLogger creates a structured logger for the daemon.
// Returns the lumberjack logger (for cleanup) and the daemon logger.
//
// Parameters:
// - logPath: path to log file (uses lumberjack for rotation)
// - jsonFormat: if true, output JSON; otherwise text format
// - level: log level (debug, info, warn, error)
func setupDaemonLogger(logPath string, jsonFormat bool, level slog.Level) (*lumberjack.Logger, daemonLogger) {
// setupDaemonLogger creates a rotating log file logger for the daemon
func setupDaemonLogger(logPath string) (*lumberjack.Logger, daemonLogger) {
maxSizeMB := getEnvInt("BEADS_DAEMON_LOG_MAX_SIZE", 50)
maxBackups := getEnvInt("BEADS_DAEMON_LOG_MAX_BACKUPS", 7)
maxAgeDays := getEnvInt("BEADS_DAEMON_LOG_MAX_AGE", 30)
@@ -105,65 +31,13 @@ func setupDaemonLogger(logPath string, jsonFormat bool, level slog.Level) (*lumb
Compress: compress,
}
// Create multi-writer to log to both file and stderr (for foreground mode visibility)
var w io.Writer = logF
// Configure slog handler
opts := &slog.HandlerOptions{
Level: level,
}
var handler slog.Handler
if jsonFormat {
handler = slog.NewJSONHandler(w, opts)
} else {
handler = slog.NewTextHandler(w, opts)
}
logger := daemonLogger{
logger: slog.New(handler),
logFunc: func(format string, args ...interface{}) {
msg := fmt.Sprintf(format, args...)
timestamp := time.Now().Format("2006-01-02 15:04:05")
_, _ = fmt.Fprintf(logF, "[%s] %s\n", timestamp, msg)
},
}
return logF, logger
}
// setupDaemonLoggerLegacy is the old signature for backward compatibility during migration.
// TODO: Remove this once all callers are updated to use the new signature.
func setupDaemonLoggerLegacy(logPath string) (*lumberjack.Logger, daemonLogger) {
return setupDaemonLogger(logPath, false, slog.LevelInfo)
}
// SetupStderrLogger creates a logger that writes to stderr only (no file).
// Useful for foreground mode or testing.
func SetupStderrLogger(jsonFormat bool, level slog.Level) daemonLogger {
opts := &slog.HandlerOptions{
Level: level,
}
var handler slog.Handler
if jsonFormat {
handler = slog.NewJSONHandler(os.Stderr, opts)
} else {
handler = slog.NewTextHandler(os.Stderr, opts)
}
return daemonLogger{
logger: slog.New(handler),
}
}
// newTestLogger creates a no-op logger for testing.
// Logs are discarded - use this when you don't need to verify log output.
func newTestLogger() daemonLogger {
return daemonLogger{
logger: slog.New(slog.NewTextHandler(io.Discard, nil)),
}
}
// newTestLoggerWithWriter creates a logger that writes to the given writer.
// Use this when you need to capture and verify log output in tests.
func newTestLoggerWithWriter(w io.Writer) daemonLogger {
return daemonLogger{
logger: slog.New(slog.NewTextHandler(w, nil)),
}
}

View File

@@ -19,21 +19,21 @@ func startRPCServer(ctx context.Context, socketPath string, store storage.Storag
serverErrChan := make(chan error, 1)
go func() {
log.Info("starting RPC server", "socket", socketPath)
log.log("Starting RPC server: %s", socketPath)
if err := server.Start(ctx); err != nil {
log.Error("RPC server error", "error", err)
log.log("RPC server error: %v", err)
serverErrChan <- err
}
}()
select {
case err := <-serverErrChan:
log.Error("RPC server failed to start", "error", err)
log.log("RPC server failed to start: %v", err)
return nil, nil, err
case <-server.WaitReady():
log.Info("RPC server ready (socket listening)")
log.log("RPC server ready (socket listening)")
case <-time.After(5 * time.Second):
log.Warn("server didn't signal ready after 5 seconds (may still be starting)")
log.log("WARNING: Server didn't signal ready after 5 seconds (may still be starting)")
}
return server, serverErrChan, nil
@@ -78,35 +78,35 @@ func runEventLoop(ctx context.Context, cancel context.CancelFunc, ticker *time.T
case <-parentCheckTicker.C:
// Check if parent process is still alive
if !checkParentProcessAlive(parentPID) {
log.Info("parent process died, shutting down daemon", "parent_pid", parentPID)
log.log("Parent process (PID %d) died, shutting down daemon", parentPID)
cancel()
if err := server.Stop(); err != nil {
log.Error("stopping server", "error", err)
log.log("Error stopping server: %v", err)
}
return
}
case sig := <-sigChan:
if isReloadSignal(sig) {
log.Info("received reload signal, ignoring (daemon continues running)")
log.log("Received reload signal, ignoring (daemon continues running)")
continue
}
log.Info("received signal, shutting down gracefully", "signal", sig)
log.log("Received signal %v, shutting down gracefully...", sig)
cancel()
if err := server.Stop(); err != nil {
log.Error("stopping RPC server", "error", err)
log.log("Error stopping RPC server: %v", err)
}
return
case <-ctx.Done():
log.Info("context canceled, shutting down")
log.log("Context canceled, shutting down")
if err := server.Stop(); err != nil {
log.Error("stopping RPC server", "error", err)
log.log("Error stopping RPC server: %v", err)
}
return
case err := <-serverErrChan:
log.Error("RPC server failed", "error", err)
log.log("RPC server failed: %v", err)
cancel()
if err := server.Stop(); err != nil {
log.Error("stopping RPC server", "error", err)
log.log("Error stopping RPC server: %v", err)
}
return
}

View File

@@ -772,11 +772,13 @@ func TestSyncBranchIntegration_EndToEnd(t *testing.T) {
// Helper types for testing
func newTestSyncBranchLogger() (daemonLogger, *string) {
// Note: With slog, we can't easily capture formatted messages like before.
// For tests that need to verify log output, use strings.Builder and newTestLoggerWithWriter.
// This helper is kept for backward compatibility but messages won't be captured.
messages := ""
return newTestLogger(), &messages
logger := daemonLogger{
logFunc: func(format string, args ...interface{}) {
messages += "\n" + format
},
}
return logger, &messages
}
// TestSyncBranchConfigChange tests changing sync.branch after worktree exists

View File

@@ -335,7 +335,11 @@ func TestExportUpdatesMetadata(t *testing.T) {
// Update metadata using the actual daemon helper function (bd-ar2.3 fix)
// This verifies that updateExportMetadata (used by createExportFunc and createSyncFunc) works correctly
mockLogger := newTestLogger()
mockLogger := daemonLogger{
logFunc: func(format string, args ...interface{}) {
t.Logf(format, args...)
},
}
updateExportMetadata(ctx, store, jsonlPath, mockLogger, "")
// Verify metadata was set (renamed from last_import_hash to jsonl_content_hash - bd-39o)
@@ -434,7 +438,11 @@ func TestUpdateExportMetadataMultiRepo(t *testing.T) {
}
// Create mock logger
mockLogger := newTestLogger()
mockLogger := daemonLogger{
logFunc: func(format string, args ...interface{}) {
t.Logf(format, args...)
},
}
// Update metadata for each repo with different keys (bd-ar2.2 multi-repo support)
updateExportMetadata(ctx, store, jsonlPath1, mockLogger, jsonlPath1)
@@ -546,7 +554,11 @@ func TestExportWithMultiRepoConfigUpdatesAllMetadata(t *testing.T) {
// Simulate multi-repo export flow (as in createExportFunc)
// This tests the full integration: getMultiRepoJSONLPaths -> getRepoKeyForPath -> updateExportMetadata
mockLogger := newTestLogger()
mockLogger := daemonLogger{
logFunc: func(format string, args ...interface{}) {
t.Logf(format, args...)
},
}
// Simulate multi-repo mode with stable keys
multiRepoPaths := []string{primaryJSONL, additionalJSONL}
@@ -664,7 +676,11 @@ func TestUpdateExportMetadataInvalidKeySuffix(t *testing.T) {
}
// Create mock logger
mockLogger := newTestLogger()
mockLogger := daemonLogger{
logFunc: func(format string, args ...interface{}) {
t.Logf(format, args...)
},
}
// Update metadata with keySuffix containing ':' (bd-web8: should be auto-sanitized)
// This simulates Windows absolute paths like "C:\Users\..."

View File

@@ -15,7 +15,9 @@ import (
// newMockLogger creates a daemonLogger that does nothing
func newMockLogger() daemonLogger {
return newTestLogger()
return daemonLogger{
logFunc: func(format string, args ...interface{}) {},
}
}
func TestFileWatcher_JSONLChangeDetection(t *testing.T) {

View File

@@ -272,330 +272,3 @@ func countJSONLIssuesTest(t *testing.T, jsonlPath string) int {
}
return count
}
// TestCreateTombstoneWrapper tests the createTombstone wrapper function
func TestCreateTombstoneWrapper(t *testing.T) {
if testing.Short() {
t.Skip("skipping integration test in short mode")
}
tmpDir := t.TempDir()
beadsDir := filepath.Join(tmpDir, ".beads")
testDB := filepath.Join(beadsDir, "beads.db")
s := newTestStore(t, testDB)
ctx := context.Background()
// Save and restore global store
oldStore := store
defer func() { store = oldStore }()
store = s
t.Run("successful tombstone creation", func(t *testing.T) {
issue := &types.Issue{
Title: "Test Issue",
Description: "Issue to be tombstoned",
Status: types.StatusOpen,
Priority: 2,
IssueType: "task",
}
if err := s.CreateIssue(ctx, issue, "test"); err != nil {
t.Fatalf("Failed to create issue: %v", err)
}
err := createTombstone(ctx, issue.ID, "test-actor", "Test deletion reason")
if err != nil {
t.Fatalf("createTombstone failed: %v", err)
}
// Verify tombstone status
updated, err := s.GetIssue(ctx, issue.ID)
if err != nil {
t.Fatalf("GetIssue failed: %v", err)
}
if updated == nil {
t.Fatal("Issue should still exist as tombstone")
}
if updated.Status != types.StatusTombstone {
t.Errorf("Expected status %s, got %s", types.StatusTombstone, updated.Status)
}
})
t.Run("tombstone with actor and reason tracking", func(t *testing.T) {
issue := &types.Issue{
Title: "Issue with tracking",
Description: "Check actor/reason",
Status: types.StatusOpen,
Priority: 1,
IssueType: "bug",
}
if err := s.CreateIssue(ctx, issue, "test"); err != nil {
t.Fatalf("Failed to create issue: %v", err)
}
actor := "admin-user"
reason := "Duplicate issue"
err := createTombstone(ctx, issue.ID, actor, reason)
if err != nil {
t.Fatalf("createTombstone failed: %v", err)
}
// Verify actor and reason were recorded
updated, err := s.GetIssue(ctx, issue.ID)
if err != nil {
t.Fatalf("GetIssue failed: %v", err)
}
if updated.DeletedBy != actor {
t.Errorf("Expected DeletedBy %q, got %q", actor, updated.DeletedBy)
}
if updated.DeleteReason != reason {
t.Errorf("Expected DeleteReason %q, got %q", reason, updated.DeleteReason)
}
})
t.Run("error when issue does not exist", func(t *testing.T) {
err := createTombstone(ctx, "nonexistent-issue-id", "actor", "reason")
if err == nil {
t.Error("Expected error for non-existent issue")
}
})
t.Run("verify tombstone preserves original type", func(t *testing.T) {
issue := &types.Issue{
Title: "Feature issue",
Description: "Should preserve type",
Status: types.StatusOpen,
Priority: 2,
IssueType: types.TypeFeature,
}
if err := s.CreateIssue(ctx, issue, "test"); err != nil {
t.Fatalf("Failed to create issue: %v", err)
}
err := createTombstone(ctx, issue.ID, "actor", "reason")
if err != nil {
t.Fatalf("createTombstone failed: %v", err)
}
updated, err := s.GetIssue(ctx, issue.ID)
if err != nil {
t.Fatalf("GetIssue failed: %v", err)
}
if updated.OriginalType != string(types.TypeFeature) {
t.Errorf("Expected OriginalType %q, got %q", types.TypeFeature, updated.OriginalType)
}
})
t.Run("verify audit trail recorded", func(t *testing.T) {
issue := &types.Issue{
Title: "Issue for audit",
Description: "Check event recording",
Status: types.StatusOpen,
Priority: 2,
IssueType: "task",
}
if err := s.CreateIssue(ctx, issue, "test"); err != nil {
t.Fatalf("Failed to create issue: %v", err)
}
err := createTombstone(ctx, issue.ID, "audit-actor", "audit-reason")
if err != nil {
t.Fatalf("createTombstone failed: %v", err)
}
// Verify an event was recorded
events, err := s.GetEvents(ctx, issue.ID, 100)
if err != nil {
t.Fatalf("GetEvents failed: %v", err)
}
found := false
for _, e := range events {
if e.EventType == "deleted" && e.Actor == "audit-actor" {
found = true
break
}
}
if !found {
t.Error("Expected 'deleted' event in audit trail")
}
})
}
// TestDeleteIssueWrapper tests the deleteIssue wrapper function
func TestDeleteIssueWrapper(t *testing.T) {
if testing.Short() {
t.Skip("skipping integration test in short mode")
}
tmpDir := t.TempDir()
beadsDir := filepath.Join(tmpDir, ".beads")
testDB := filepath.Join(beadsDir, "beads.db")
s := newTestStore(t, testDB)
ctx := context.Background()
// Save and restore global store
oldStore := store
defer func() { store = oldStore }()
store = s
t.Run("successful issue deletion", func(t *testing.T) {
issue := &types.Issue{
Title: "Issue to delete",
Description: "Will be permanently deleted",
Status: types.StatusOpen,
Priority: 2,
IssueType: "task",
}
if err := s.CreateIssue(ctx, issue, "test"); err != nil {
t.Fatalf("Failed to create issue: %v", err)
}
err := deleteIssue(ctx, issue.ID)
if err != nil {
t.Fatalf("deleteIssue failed: %v", err)
}
// Verify issue is gone
deleted, err := s.GetIssue(ctx, issue.ID)
if err != nil {
t.Fatalf("GetIssue failed: %v", err)
}
if deleted != nil {
t.Error("Issue should be completely deleted")
}
})
t.Run("error on non-existent issue", func(t *testing.T) {
err := deleteIssue(ctx, "nonexistent-issue-id")
if err == nil {
t.Error("Expected error for non-existent issue")
}
})
t.Run("verify dependencies are removed", func(t *testing.T) {
// Create two issues with a dependency
issue1 := &types.Issue{
Title: "Blocker issue",
Status: types.StatusOpen,
Priority: 1,
IssueType: "task",
}
issue2 := &types.Issue{
Title: "Dependent issue",
Status: types.StatusOpen,
Priority: 2,
IssueType: "task",
}
if err := s.CreateIssue(ctx, issue1, "test"); err != nil {
t.Fatalf("Failed to create issue1: %v", err)
}
if err := s.CreateIssue(ctx, issue2, "test"); err != nil {
t.Fatalf("Failed to create issue2: %v", err)
}
// Add dependency: issue2 depends on issue1
dep := &types.Dependency{
IssueID: issue2.ID,
DependsOnID: issue1.ID,
Type: types.DepBlocks,
}
if err := s.AddDependency(ctx, dep, "test"); err != nil {
t.Fatalf("Failed to add dependency: %v", err)
}
// Delete issue1 (the blocker)
err := deleteIssue(ctx, issue1.ID)
if err != nil {
t.Fatalf("deleteIssue failed: %v", err)
}
// Verify issue2 no longer has dependencies
deps, err := s.GetDependencies(ctx, issue2.ID)
if err != nil {
t.Fatalf("GetDependencies failed: %v", err)
}
if len(deps) > 0 {
t.Errorf("Expected no dependencies after deleting blocker, got %d", len(deps))
}
})
t.Run("verify issue removed from database", func(t *testing.T) {
issue := &types.Issue{
Title: "Verify removal",
Status: types.StatusOpen,
Priority: 2,
IssueType: "task",
}
if err := s.CreateIssue(ctx, issue, "test"); err != nil {
t.Fatalf("Failed to create issue: %v", err)
}
// Get statistics before delete
statsBefore, err := s.GetStatistics(ctx)
if err != nil {
t.Fatalf("GetStatistics failed: %v", err)
}
err = deleteIssue(ctx, issue.ID)
if err != nil {
t.Fatalf("deleteIssue failed: %v", err)
}
// Get statistics after delete
statsAfter, err := s.GetStatistics(ctx)
if err != nil {
t.Fatalf("GetStatistics failed: %v", err)
}
if statsAfter.TotalIssues != statsBefore.TotalIssues-1 {
t.Errorf("Expected total issues to decrease by 1, was %d now %d",
statsBefore.TotalIssues, statsAfter.TotalIssues)
}
})
}
func TestCreateTombstoneUnsupportedStorage(t *testing.T) {
if testing.Short() {
t.Skip("skipping integration test in short mode")
}
oldStore := store
defer func() { store = oldStore }()
// Set store to nil - the type assertion will fail
store = nil
ctx := context.Background()
err := createTombstone(ctx, "any-id", "actor", "reason")
if err == nil {
t.Error("Expected error when storage is nil")
}
expectedMsg := "tombstone operation not supported by this storage backend"
if err.Error() != expectedMsg {
t.Errorf("Expected error %q, got %q", expectedMsg, err.Error())
}
}
func TestDeleteIssueUnsupportedStorage(t *testing.T) {
if testing.Short() {
t.Skip("skipping integration test in short mode")
}
oldStore := store
defer func() { store = oldStore }()
// Set store to nil - the type assertion will fail
store = nil
ctx := context.Background()
err := deleteIssue(ctx, "any-id")
if err == nil {
t.Error("Expected error when storage is nil")
}
expectedMsg := "delete operation not supported by this storage backend"
if err.Error() != expectedMsg {
t.Errorf("Expected error %q, got %q", expectedMsg, err.Error())
}
}

View File

@@ -7,7 +7,6 @@ import (
"fmt"
"os"
"path/filepath"
"slices"
"strings"
"time"
@@ -53,6 +52,7 @@ var (
doctorInteractive bool // bd-3xl: per-fix confirmation mode
doctorDryRun bool // bd-a5z: preview fixes without applying
doctorOutput string // bd-9cc: export diagnostics to file
doctorVerbose bool // bd-4qfb: show all checks including passed
perfMode bool
checkHealthMode bool
)
@@ -422,6 +422,10 @@ func applyFixList(path string, fixes []doctorCheck) {
// No auto-fix: compaction requires agent review
fmt.Printf(" ⚠ Run 'bd compact --analyze' to review candidates\n")
continue
case "Large Database":
// No auto-fix: pruning deletes data, must be user-controlled
fmt.Printf(" ⚠ Run 'bd cleanup --older-than 90' to prune old closed issues\n")
continue
default:
fmt.Printf(" ⚠ No automatic fix available for %s\n", check.Name)
fmt.Printf(" Manual fix: %s\n", check.Fix)
@@ -817,6 +821,12 @@ func runDiagnostics(path string) doctorResult {
result.Checks = append(result.Checks, compactionCheck)
// Info only, not a warning - compaction requires human review
// Check 29: Database size (pruning suggestion)
// Note: This check has no auto-fix - pruning is destructive and user-controlled
sizeCheck := convertDoctorCheck(doctor.CheckDatabaseSize(path))
result.Checks = append(result.Checks, sizeCheck)
// Don't fail overall check for size warning, just inform
return result
}
@@ -858,136 +868,118 @@ func exportDiagnostics(result doctorResult, outputPath string) error {
}
func printDiagnostics(result doctorResult) {
// Print header with version
fmt.Printf("\nbd doctor v%s\n\n", result.CLIVersion)
// Group checks by category
checksByCategory := make(map[string][]doctorCheck)
for _, check := range result.Checks {
cat := check.Category
if cat == "" {
cat = "Other"
}
checksByCategory[cat] = append(checksByCategory[cat], check)
}
// Track counts
// Count checks by status and collect into categories
var passCount, warnCount, failCount int
var warnings []doctorCheck
var errors, warnings []doctorCheck
passedByCategory := make(map[string][]doctorCheck)
// Print checks by category in defined order
for _, category := range doctor.CategoryOrder {
checks, exists := checksByCategory[category]
if !exists || len(checks) == 0 {
continue
for _, check := range result.Checks {
switch check.Status {
case statusOK:
passCount++
cat := check.Category
if cat == "" {
cat = "Other"
}
passedByCategory[cat] = append(passedByCategory[cat], check)
case statusWarning:
warnCount++
warnings = append(warnings, check)
case statusError:
failCount++
errors = append(errors, check)
}
// Print category header
fmt.Println(ui.RenderCategory(category))
// Print each check in this category
for _, check := range checks {
// Determine status icon
var statusIcon string
switch check.Status {
case statusOK:
statusIcon = ui.RenderPassIcon()
passCount++
case statusWarning:
statusIcon = ui.RenderWarnIcon()
warnCount++
warnings = append(warnings, check)
case statusError:
statusIcon = ui.RenderFailIcon()
failCount++
warnings = append(warnings, check)
}
// Print check line: icon + name + message
fmt.Printf(" %s %s", statusIcon, check.Name)
if check.Message != "" {
fmt.Printf("%s", ui.RenderMuted(" "+check.Message))
}
fmt.Println()
// Print detail if present (indented)
if check.Detail != "" {
fmt.Printf(" %s%s\n", ui.MutedStyle.Render(ui.TreeLast), ui.RenderMuted(check.Detail))
}
}
fmt.Println()
}
// Print any checks without a category
if otherChecks, exists := checksByCategory["Other"]; exists && len(otherChecks) > 0 {
fmt.Println(ui.RenderCategory("Other"))
for _, check := range otherChecks {
var statusIcon string
switch check.Status {
case statusOK:
statusIcon = ui.RenderPassIcon()
passCount++
case statusWarning:
statusIcon = ui.RenderWarnIcon()
warnCount++
warnings = append(warnings, check)
case statusError:
statusIcon = ui.RenderFailIcon()
failCount++
warnings = append(warnings, check)
}
fmt.Printf(" %s %s", statusIcon, check.Name)
if check.Message != "" {
fmt.Printf("%s", ui.RenderMuted(" "+check.Message))
}
fmt.Println()
// Print header with version and summary at TOP
fmt.Printf("\nbd doctor v%s\n\n", result.CLIVersion)
fmt.Printf("Summary: %d checks passed, %d warnings, %d errors\n", passCount, warnCount, failCount)
// Print errors section (always shown if any)
if failCount > 0 {
fmt.Println()
fmt.Println(ui.RenderSeparator())
fmt.Printf("%s Errors (%d)\n", ui.RenderFailIcon(), failCount)
fmt.Println(ui.RenderSeparator())
fmt.Println()
for _, check := range errors {
fmt.Printf("[%s] %s\n", check.Name, check.Message)
if check.Detail != "" {
fmt.Printf(" %s%s\n", ui.MutedStyle.Render(ui.TreeLast), ui.RenderMuted(check.Detail))
}
}
fmt.Println()
}
// Print summary line
fmt.Println(ui.RenderSeparator())
summary := fmt.Sprintf("%s %d passed %s %d warnings %s %d failed",
ui.RenderPassIcon(), passCount,
ui.RenderWarnIcon(), warnCount,
ui.RenderFailIcon(), failCount,
)
fmt.Println(summary)
// Print warnings/errors section with fixes
if len(warnings) > 0 {
fmt.Println()
fmt.Println(ui.RenderWarn(ui.IconWarn + " WARNINGS"))
// Sort by severity: errors first, then warnings
slices.SortStableFunc(warnings, func(a, b doctorCheck) int {
// Errors (statusError) come before warnings (statusWarning)
if a.Status == statusError && b.Status != statusError {
return -1
}
if a.Status != statusError && b.Status == statusError {
return 1
}
return 0 // maintain original order within same severity
})
for i, check := range warnings {
// Show numbered items with icon and color based on status
// Errors get entire line in red, warnings just the number in yellow
line := fmt.Sprintf("%s: %s", check.Name, check.Message)
if check.Status == statusError {
fmt.Printf(" %s %s %s\n", ui.RenderFailIcon(), ui.RenderFail(fmt.Sprintf("%d.", i+1)), ui.RenderFail(line))
} else {
fmt.Printf(" %s %s %s\n", ui.RenderWarnIcon(), ui.RenderWarn(fmt.Sprintf("%d.", i+1)), line)
fmt.Printf(" %s\n", check.Detail)
}
if check.Fix != "" {
fmt.Printf(" %s%s\n", ui.MutedStyle.Render(ui.TreeLast), check.Fix)
fmt.Printf(" Fix: %s\n", check.Fix)
}
fmt.Println()
}
} else {
}
// Print warnings section (always shown if any)
if warnCount > 0 {
fmt.Println(ui.RenderSeparator())
fmt.Printf("%s Warnings (%d)\n", ui.RenderWarnIcon(), warnCount)
fmt.Println(ui.RenderSeparator())
fmt.Println()
for _, check := range warnings {
fmt.Printf("[%s] %s\n", check.Name, check.Message)
if check.Detail != "" {
fmt.Printf(" %s\n", check.Detail)
}
if check.Fix != "" {
fmt.Printf(" Fix: %s\n", check.Fix)
}
fmt.Println()
}
}
// Print passed section
if passCount > 0 {
fmt.Println(ui.RenderSeparator())
if doctorVerbose {
// Verbose mode: show all passed checks grouped by category
fmt.Printf("%s Passed (%d)\n", ui.RenderPassIcon(), passCount)
fmt.Println(ui.RenderSeparator())
fmt.Println()
for _, category := range doctor.CategoryOrder {
checks, exists := passedByCategory[category]
if !exists || len(checks) == 0 {
continue
}
fmt.Printf(" %s\n", category)
for _, check := range checks {
fmt.Printf(" %s %s", ui.RenderPassIcon(), check.Name)
if check.Message != "" {
fmt.Printf(" %s", ui.RenderMuted(check.Message))
}
fmt.Println()
}
fmt.Println()
}
// Print "Other" category if exists
if otherChecks, exists := passedByCategory["Other"]; exists && len(otherChecks) > 0 {
fmt.Printf(" %s\n", "Other")
for _, check := range otherChecks {
fmt.Printf(" %s %s", ui.RenderPassIcon(), check.Name)
if check.Message != "" {
fmt.Printf(" %s", ui.RenderMuted(check.Message))
}
fmt.Println()
}
fmt.Println()
}
} else {
// Default mode: collapsed summary
fmt.Printf("%s Passed (%d) %s\n", ui.RenderPassIcon(), passCount, ui.RenderMuted("[use --verbose to show details]"))
fmt.Println(ui.RenderSeparator())
}
}
// Final status message
if failCount == 0 && warnCount == 0 {
fmt.Println()
fmt.Printf("%s\n", ui.RenderPass("✓ All checks passed"))
}
@@ -998,4 +990,5 @@ func init() {
doctorCmd.Flags().BoolVar(&perfMode, "perf", false, "Run performance diagnostics and generate CPU profile")
doctorCmd.Flags().BoolVar(&checkHealthMode, "check-health", false, "Quick health check for git hooks (silent on success)")
doctorCmd.Flags().StringVarP(&doctorOutput, "output", "o", "", "Export diagnostics to JSON file (bd-9cc)")
doctorCmd.Flags().BoolVarP(&doctorVerbose, "verbose", "v", false, "Show all checks including passed (bd-4qfb)")
}

View File

@@ -620,3 +620,92 @@ func isNoDbModeConfigured(beadsDir string) bool {
return cfg.NoDb
}
// CheckDatabaseSize warns when the database has accumulated many closed issues.
// This is purely informational - pruning is NEVER auto-fixed because it
// permanently deletes data. Users must explicitly run 'bd cleanup' to prune.
//
// Config: doctor.suggest_pruning_issue_count (default: 5000, 0 = disabled)
//
// DESIGN NOTE: This check intentionally has NO auto-fix. Unlike other doctor
// checks that fix configuration or sync issues, pruning is destructive and
// irreversible. The user must make an explicit decision to delete their
// closed issue history. We only provide guidance, never action.
func CheckDatabaseSize(path string) DoctorCheck {
beadsDir := filepath.Join(path, ".beads")
// Get database path
var dbPath string
if cfg, err := configfile.Load(beadsDir); err == nil && cfg != nil && cfg.Database != "" {
dbPath = cfg.DatabasePath(beadsDir)
} else {
dbPath = filepath.Join(beadsDir, beads.CanonicalDatabaseName)
}
// If no database, skip this check
if _, err := os.Stat(dbPath); os.IsNotExist(err) {
return DoctorCheck{
Name: "Large Database",
Status: StatusOK,
Message: "N/A (no database)",
}
}
// Read threshold from config (default 5000, 0 = disabled)
threshold := 5000
db, err := sql.Open("sqlite3", "file:"+dbPath+"?mode=ro&_pragma=busy_timeout(30000)")
if err != nil {
return DoctorCheck{
Name: "Large Database",
Status: StatusOK,
Message: "N/A (unable to open database)",
}
}
defer db.Close()
// Check for custom threshold in config table
var thresholdStr string
err = db.QueryRow("SELECT value FROM config WHERE key = ?", "doctor.suggest_pruning_issue_count").Scan(&thresholdStr)
if err == nil {
if _, err := fmt.Sscanf(thresholdStr, "%d", &threshold); err != nil {
threshold = 5000 // Reset to default on parse error
}
}
// If disabled, return OK
if threshold == 0 {
return DoctorCheck{
Name: "Large Database",
Status: StatusOK,
Message: "Check disabled (threshold = 0)",
}
}
// Count closed issues
var closedCount int
err = db.QueryRow("SELECT COUNT(*) FROM issues WHERE status = 'closed'").Scan(&closedCount)
if err != nil {
return DoctorCheck{
Name: "Large Database",
Status: StatusOK,
Message: "N/A (unable to count issues)",
}
}
// Check against threshold
if closedCount > threshold {
return DoctorCheck{
Name: "Large Database",
Status: StatusWarning,
Message: fmt.Sprintf("%d closed issues (threshold: %d)", closedCount, threshold),
Detail: "Large number of closed issues may impact performance",
Fix: "Consider running 'bd cleanup --older-than 90' to prune old closed issues",
}
}
return DoctorCheck{
Name: "Large Database",
Status: StatusOK,
Message: fmt.Sprintf("%d closed issues (threshold: %d)", closedCount, threshold),
}
}

View File

@@ -145,6 +145,8 @@ func CheckSyncBranchHookCompatibility(path string) DoctorCheck {
Status: StatusWarning,
Message: "Pre-push hook is not a bd hook",
Detail: "Cannot verify sync-branch compatibility with custom hooks",
Fix: "Either run 'bd hooks install --force' to use bd hooks,\n" +
" or ensure your custom hook skips validation when pushing to sync-branch",
}
}

View File

@@ -188,7 +188,7 @@ func CheckLegacyJSONLFilename(repoPath string) DoctorCheck {
Detail: "Having multiple JSONL files can cause sync and merge conflicts.\n" +
" Only one JSONL file should be used per repository.",
Fix: "Determine which file is current and remove the others:\n" +
" 1. Check 'bd stats' to see which file is being used\n" +
" 1. Check .beads/metadata.json for 'jsonl_export' setting\n" +
" 2. Verify with 'git log .beads/*.jsonl' to see commit history\n" +
" 3. Remove the unused file(s): git rm .beads/<unused>.jsonl\n" +
" 4. Commit the change",

View File

@@ -65,7 +65,11 @@ func TestExportUpdatesDatabaseMtime(t *testing.T) {
}
// Update metadata after export (bd-ymj fix)
mockLogger := newTestLogger()
mockLogger := daemonLogger{
logFunc: func(format string, args ...interface{}) {
t.Logf(format, args...)
},
}
updateExportMetadata(ctx, store, jsonlPath, mockLogger, "")
// Get JSONL mtime
@@ -166,7 +170,11 @@ func TestDaemonExportScenario(t *testing.T) {
}
// Daemon updates metadata after export (bd-ymj fix)
mockLogger := newTestLogger()
mockLogger := daemonLogger{
logFunc: func(format string, args ...interface{}) {
t.Logf(format, args...)
},
}
updateExportMetadata(ctx, store, jsonlPath, mockLogger, "")
// THIS IS THE FIX: daemon now calls TouchDatabaseFile after export
@@ -241,7 +249,11 @@ func TestMultipleExportCycles(t *testing.T) {
}
// Update metadata after export (bd-ymj fix)
mockLogger := newTestLogger()
mockLogger := daemonLogger{
logFunc: func(format string, args ...interface{}) {
t.Logf(format, args...)
},
}
updateExportMetadata(ctx, store, jsonlPath, mockLogger, "")
// Apply fix

View File

@@ -8,7 +8,6 @@ import (
"time"
"github.com/spf13/cobra"
"github.com/steveyegge/beads/internal/rpc"
"github.com/steveyegge/beads/internal/storage/sqlite"
"github.com/steveyegge/beads/internal/types"
"github.com/steveyegge/beads/internal/ui"
@@ -106,65 +105,42 @@ Examples:
title = fmt.Sprintf("Gate: %s:%s", awaitType, awaitID)
}
var gate *types.Issue
// Try daemon first, fall back to direct store access
if daemonClient != nil {
resp, err := daemonClient.GateCreate(&rpc.GateCreateArgs{
Title: title,
AwaitType: awaitType,
AwaitID: awaitID,
Timeout: timeout,
Waiters: notifyAddrs,
})
if err != nil {
FatalError("gate create: %v", err)
// Gate creation requires direct store access
if store == nil {
if daemonClient != nil {
fmt.Fprintf(os.Stderr, "Error: gate create requires direct database access\n")
fmt.Fprintf(os.Stderr, "Hint: use --no-daemon flag: bd --no-daemon gate create ...\n")
} else {
fmt.Fprintf(os.Stderr, "Error: no database connection\n")
}
// Parse the gate ID from response and fetch full gate
var result rpc.GateCreateResult
if err := json.Unmarshal(resp.Data, &result); err != nil {
FatalError("failed to parse gate create result: %v", err)
}
// Get the full gate for output
showResp, err := daemonClient.GateShow(&rpc.GateShowArgs{ID: result.ID})
if err != nil {
FatalError("failed to fetch created gate: %v", err)
}
if err := json.Unmarshal(showResp.Data, &gate); err != nil {
FatalError("failed to parse gate: %v", err)
}
} else if store != nil {
now := time.Now()
gate = &types.Issue{
// ID will be generated by CreateIssue
Title: title,
IssueType: types.TypeGate,
Status: types.StatusOpen,
Priority: 1, // Gates are typically high priority
Assignee: "deacon/",
Wisp: true, // Gates are wisps (ephemeral)
AwaitType: awaitType,
AwaitID: awaitID,
Timeout: timeout,
Waiters: notifyAddrs,
CreatedAt: now,
UpdatedAt: now,
}
gate.ContentHash = gate.ComputeContentHash()
if err := store.CreateIssue(ctx, gate, actor); err != nil {
fmt.Fprintf(os.Stderr, "Error creating gate: %v\n", err)
os.Exit(1)
}
markDirtyAndScheduleFlush()
} else {
fmt.Fprintf(os.Stderr, "Error: no database connection\n")
os.Exit(1)
}
now := time.Now()
gate := &types.Issue{
// ID will be generated by CreateIssue
Title: title,
IssueType: types.TypeGate,
Status: types.StatusOpen,
Priority: 1, // Gates are typically high priority
Assignee: "deacon/",
Wisp: true, // Gates are wisps (ephemeral)
AwaitType: awaitType,
AwaitID: awaitID,
Timeout: timeout,
Waiters: notifyAddrs,
CreatedAt: now,
UpdatedAt: now,
}
gate.ContentHash = gate.ComputeContentHash()
if err := store.CreateIssue(ctx, gate, actor); err != nil {
fmt.Fprintf(os.Stderr, "Error creating gate: %v\n", err)
os.Exit(1)
}
markDirtyAndScheduleFlush()
if jsonOutput {
outputJSON(gate)
return
@@ -221,39 +197,34 @@ var gateShowCmd = &cobra.Command{
Run: func(cmd *cobra.Command, args []string) {
ctx := rootCtx
var gate *types.Issue
// Gate show requires direct store access
if store == nil {
if daemonClient != nil {
fmt.Fprintf(os.Stderr, "Error: gate show requires direct database access\n")
fmt.Fprintf(os.Stderr, "Hint: use --no-daemon flag: bd --no-daemon gate show %s\n", args[0])
} else {
fmt.Fprintf(os.Stderr, "Error: no database connection\n")
}
os.Exit(1)
}
// Try daemon first, fall back to direct store access
if daemonClient != nil {
resp, err := daemonClient.GateShow(&rpc.GateShowArgs{ID: args[0]})
if err != nil {
FatalError("gate show: %v", err)
}
if err := json.Unmarshal(resp.Data, &gate); err != nil {
FatalError("failed to parse gate: %v", err)
}
} else if store != nil {
gateID, err := utils.ResolvePartialID(ctx, store, args[0])
if err != nil {
fmt.Fprintf(os.Stderr, "Error: %v\n", err)
os.Exit(1)
}
gateID, err := utils.ResolvePartialID(ctx, store, args[0])
if err != nil {
fmt.Fprintf(os.Stderr, "Error: %v\n", err)
os.Exit(1)
}
gate, err = store.GetIssue(ctx, gateID)
if err != nil {
fmt.Fprintf(os.Stderr, "Error: %v\n", err)
os.Exit(1)
}
if gate == nil {
fmt.Fprintf(os.Stderr, "Error: gate %s not found\n", gateID)
os.Exit(1)
}
if gate.IssueType != types.TypeGate {
fmt.Fprintf(os.Stderr, "Error: %s is not a gate (type: %s)\n", gateID, gate.IssueType)
os.Exit(1)
}
} else {
fmt.Fprintf(os.Stderr, "Error: no database connection\n")
gate, err := store.GetIssue(ctx, gateID)
if err != nil {
fmt.Fprintf(os.Stderr, "Error: %v\n", err)
os.Exit(1)
}
if gate == nil {
fmt.Fprintf(os.Stderr, "Error: gate %s not found\n", gateID)
os.Exit(1)
}
if gate.IssueType != types.TypeGate {
fmt.Fprintf(os.Stderr, "Error: %s is not a gate (type: %s)\n", gateID, gate.IssueType)
os.Exit(1)
}
@@ -292,36 +263,30 @@ var gateListCmd = &cobra.Command{
ctx := rootCtx
showAll, _ := cmd.Flags().GetBool("all")
var issues []*types.Issue
// Gate list requires direct store access
if store == nil {
if daemonClient != nil {
fmt.Fprintf(os.Stderr, "Error: gate list requires direct database access\n")
fmt.Fprintf(os.Stderr, "Hint: use --no-daemon flag: bd --no-daemon gate list\n")
} else {
fmt.Fprintf(os.Stderr, "Error: no database connection\n")
}
os.Exit(1)
}
// Try daemon first, fall back to direct store access
if daemonClient != nil {
resp, err := daemonClient.GateList(&rpc.GateListArgs{All: showAll})
if err != nil {
FatalError("gate list: %v", err)
}
if err := json.Unmarshal(resp.Data, &issues); err != nil {
FatalError("failed to parse gates: %v", err)
}
} else if store != nil {
// Build filter for gates
gateType := types.TypeGate
filter := types.IssueFilter{
IssueType: &gateType,
}
if !showAll {
openStatus := types.StatusOpen
filter.Status = &openStatus
}
// Build filter for gates
gateType := types.TypeGate
filter := types.IssueFilter{
IssueType: &gateType,
}
if !showAll {
openStatus := types.StatusOpen
filter.Status = &openStatus
}
var err error
issues, err = store.SearchIssues(ctx, "", filter)
if err != nil {
fmt.Fprintf(os.Stderr, "Error listing gates: %v\n", err)
os.Exit(1)
}
} else {
fmt.Fprintf(os.Stderr, "Error: no database connection\n")
issues, err := store.SearchIssues(ctx, "", filter)
if err != nil {
fmt.Fprintf(os.Stderr, "Error listing gates: %v\n", err)
os.Exit(1)
}
@@ -373,58 +338,47 @@ var gateCloseCmd = &cobra.Command{
reason = "Gate closed"
}
var closedGate *types.Issue
var gateID string
// Try daemon first, fall back to direct store access
if daemonClient != nil {
resp, err := daemonClient.GateClose(&rpc.GateCloseArgs{
ID: args[0],
Reason: reason,
})
if err != nil {
FatalError("gate close: %v", err)
// Gate close requires direct store access
if store == nil {
if daemonClient != nil {
fmt.Fprintf(os.Stderr, "Error: gate close requires direct database access\n")
fmt.Fprintf(os.Stderr, "Hint: use --no-daemon flag: bd --no-daemon gate close %s\n", args[0])
} else {
fmt.Fprintf(os.Stderr, "Error: no database connection\n")
}
if err := json.Unmarshal(resp.Data, &closedGate); err != nil {
FatalError("failed to parse gate: %v", err)
}
gateID = closedGate.ID
} else if store != nil {
var err error
gateID, err = utils.ResolvePartialID(ctx, store, args[0])
if err != nil {
fmt.Fprintf(os.Stderr, "Error: %v\n", err)
os.Exit(1)
}
// Verify it's a gate
gate, err := store.GetIssue(ctx, gateID)
if err != nil {
fmt.Fprintf(os.Stderr, "Error: %v\n", err)
os.Exit(1)
}
if gate == nil {
fmt.Fprintf(os.Stderr, "Error: gate %s not found\n", gateID)
os.Exit(1)
}
if gate.IssueType != types.TypeGate {
fmt.Fprintf(os.Stderr, "Error: %s is not a gate (type: %s)\n", gateID, gate.IssueType)
os.Exit(1)
}
if err := store.CloseIssue(ctx, gateID, reason, actor); err != nil {
fmt.Fprintf(os.Stderr, "Error closing gate: %v\n", err)
os.Exit(1)
}
markDirtyAndScheduleFlush()
closedGate, _ = store.GetIssue(ctx, gateID)
} else {
fmt.Fprintf(os.Stderr, "Error: no database connection\n")
os.Exit(1)
}
gateID, err := utils.ResolvePartialID(ctx, store, args[0])
if err != nil {
fmt.Fprintf(os.Stderr, "Error: %v\n", err)
os.Exit(1)
}
// Verify it's a gate
gate, err := store.GetIssue(ctx, gateID)
if err != nil {
fmt.Fprintf(os.Stderr, "Error: %v\n", err)
os.Exit(1)
}
if gate == nil {
fmt.Fprintf(os.Stderr, "Error: gate %s not found\n", gateID)
os.Exit(1)
}
if gate.IssueType != types.TypeGate {
fmt.Fprintf(os.Stderr, "Error: %s is not a gate (type: %s)\n", gateID, gate.IssueType)
os.Exit(1)
}
if err := store.CloseIssue(ctx, gateID, reason, actor); err != nil {
fmt.Fprintf(os.Stderr, "Error closing gate: %v\n", err)
os.Exit(1)
}
markDirtyAndScheduleFlush()
if jsonOutput {
closedGate, _ := store.GetIssue(ctx, gateID)
outputJSON(closedGate)
return
}
@@ -448,116 +402,87 @@ var gateWaitCmd = &cobra.Command{
os.Exit(1)
}
var addedCount int
var gateID string
var newWaiters []string
// Try daemon first, fall back to direct store access
if daemonClient != nil {
resp, err := daemonClient.GateWait(&rpc.GateWaitArgs{
ID: args[0],
Waiters: notifyAddrs,
})
if err != nil {
FatalError("gate wait: %v", err)
// Gate wait requires direct store access for now
if store == nil {
if daemonClient != nil {
fmt.Fprintf(os.Stderr, "Error: gate wait requires direct database access\n")
fmt.Fprintf(os.Stderr, "Hint: use --no-daemon flag: bd --no-daemon gate wait %s --notify ...\n", args[0])
} else {
fmt.Fprintf(os.Stderr, "Error: no database connection\n")
}
var result rpc.GateWaitResult
if err := json.Unmarshal(resp.Data, &result); err != nil {
FatalError("failed to parse gate wait result: %v", err)
}
addedCount = result.AddedCount
gateID = args[0] // Use the input ID for display
// For daemon mode, we don't know exactly which waiters were added
// Just report the count
newWaiters = nil
} else if store != nil {
var err error
gateID, err = utils.ResolvePartialID(ctx, store, args[0])
if err != nil {
fmt.Fprintf(os.Stderr, "Error: %v\n", err)
os.Exit(1)
}
// Get existing gate
gate, err := store.GetIssue(ctx, gateID)
if err != nil {
fmt.Fprintf(os.Stderr, "Error: %v\n", err)
os.Exit(1)
}
if gate == nil {
fmt.Fprintf(os.Stderr, "Error: gate %s not found\n", gateID)
os.Exit(1)
}
if gate.IssueType != types.TypeGate {
fmt.Fprintf(os.Stderr, "Error: %s is not a gate (type: %s)\n", gateID, gate.IssueType)
os.Exit(1)
}
if gate.Status == types.StatusClosed {
fmt.Fprintf(os.Stderr, "Error: gate %s is already closed\n", gateID)
os.Exit(1)
}
// Add new waiters (avoiding duplicates)
waiterSet := make(map[string]bool)
for _, w := range gate.Waiters {
waiterSet[w] = true
}
for _, addr := range notifyAddrs {
if !waiterSet[addr] {
newWaiters = append(newWaiters, addr)
waiterSet[addr] = true
}
}
addedCount = len(newWaiters)
if addedCount == 0 {
fmt.Println("All specified waiters are already registered on this gate")
return
}
// Update waiters - need to use SQLite directly for Waiters field
sqliteStore, ok := store.(*sqlite.SQLiteStorage)
if !ok {
fmt.Fprintf(os.Stderr, "Error: gate wait requires SQLite storage\n")
os.Exit(1)
}
allWaiters := append(gate.Waiters, newWaiters...)
waitersJSON, _ := json.Marshal(allWaiters)
// Use raw SQL to update the waiters field
_, err = sqliteStore.UnderlyingDB().ExecContext(ctx, `UPDATE issues SET waiters = ?, updated_at = ? WHERE id = ?`,
string(waitersJSON), time.Now(), gateID)
if err != nil {
fmt.Fprintf(os.Stderr, "Error adding waiters: %v\n", err)
os.Exit(1)
}
markDirtyAndScheduleFlush()
if jsonOutput {
updatedGate, _ := store.GetIssue(ctx, gateID)
outputJSON(updatedGate)
return
}
} else {
fmt.Fprintf(os.Stderr, "Error: no database connection\n")
os.Exit(1)
}
if addedCount == 0 {
gateID, err := utils.ResolvePartialID(ctx, store, args[0])
if err != nil {
fmt.Fprintf(os.Stderr, "Error: %v\n", err)
os.Exit(1)
}
// Get existing gate
gate, err := store.GetIssue(ctx, gateID)
if err != nil {
fmt.Fprintf(os.Stderr, "Error: %v\n", err)
os.Exit(1)
}
if gate == nil {
fmt.Fprintf(os.Stderr, "Error: gate %s not found\n", gateID)
os.Exit(1)
}
if gate.IssueType != types.TypeGate {
fmt.Fprintf(os.Stderr, "Error: %s is not a gate (type: %s)\n", gateID, gate.IssueType)
os.Exit(1)
}
if gate.Status == types.StatusClosed {
fmt.Fprintf(os.Stderr, "Error: gate %s is already closed\n", gateID)
os.Exit(1)
}
// Add new waiters (avoiding duplicates)
waiterSet := make(map[string]bool)
for _, w := range gate.Waiters {
waiterSet[w] = true
}
newWaiters := []string{}
for _, addr := range notifyAddrs {
if !waiterSet[addr] {
newWaiters = append(newWaiters, addr)
waiterSet[addr] = true
}
}
if len(newWaiters) == 0 {
fmt.Println("All specified waiters are already registered on this gate")
return
}
// Update waiters - need to use SQLite directly for Waiters field
sqliteStore, ok := store.(*sqlite.SQLiteStorage)
if !ok {
fmt.Fprintf(os.Stderr, "Error: gate wait requires SQLite storage\n")
os.Exit(1)
}
allWaiters := append(gate.Waiters, newWaiters...)
waitersJSON, _ := json.Marshal(allWaiters)
// Use raw SQL to update the waiters field
_, err = sqliteStore.UnderlyingDB().ExecContext(ctx, `UPDATE issues SET waiters = ?, updated_at = ? WHERE id = ?`,
string(waitersJSON), time.Now(), gateID)
if err != nil {
fmt.Fprintf(os.Stderr, "Error adding waiters: %v\n", err)
os.Exit(1)
}
markDirtyAndScheduleFlush()
if jsonOutput {
// For daemon mode, output the result
outputJSON(map[string]interface{}{"added_count": addedCount, "gate_id": gateID})
updatedGate, _ := store.GetIssue(ctx, gateID)
outputJSON(updatedGate)
return
}
fmt.Printf("%s Added %d waiter(s) to gate %s\n", ui.RenderPass("✓"), addedCount, gateID)
fmt.Printf("%s Added waiter(s) to gate %s:\n", ui.RenderPass("✓"), gateID)
for _, addr := range newWaiters {
fmt.Printf(" + %s\n", addr)
}

View File

@@ -84,92 +84,6 @@ func TestImportMultiPartIDs(t *testing.T) {
}
}
// TestImportMultiHyphenPrefix tests GH#422: importing with multi-hyphen prefixes
// like "asianops-audit-" should not cause false positive prefix mismatch errors.
func TestImportMultiHyphenPrefix(t *testing.T) {
tmpDir := t.TempDir()
dbPath := filepath.Join(tmpDir, ".beads", "beads.db")
// Create database with multi-hyphen prefix "asianops-audit"
st := newTestStoreWithPrefix(t, dbPath, "asianops-audit")
ctx := context.Background()
// Create issues with hash-like suffixes that could be mistaken for words
// The key is that "test", "task", "demo" look like English words (4+ chars, no digits)
// which previously caused ExtractIssuePrefix to fall back to first hyphen
issues := []*types.Issue{
{
ID: "asianops-audit-sa0",
Title: "Issue with short hash suffix",
Description: "Short hash suffix should work",
Status: "open",
Priority: 1,
IssueType: "task",
},
{
ID: "asianops-audit-test",
Title: "Issue with word-like suffix",
Description: "Word-like suffix 'test' was causing false positive",
Status: "open",
Priority: 1,
IssueType: "task",
},
{
ID: "asianops-audit-task",
Title: "Another word-like suffix",
Description: "Word-like suffix 'task' was also problematic",
Status: "open",
Priority: 1,
IssueType: "task",
},
{
ID: "asianops-audit-demo",
Title: "Demo issue",
Description: "Word-like suffix 'demo'",
Status: "open",
Priority: 1,
IssueType: "task",
},
}
// Import should succeed without prefix mismatch errors
opts := ImportOptions{
DryRun: false,
SkipUpdate: false,
Strict: false,
}
result, err := importIssuesCore(ctx, dbPath, st, issues, opts)
if err != nil {
t.Fatalf("Import failed: %v", err)
}
// GH#422: Should NOT detect prefix mismatch
if result.PrefixMismatch {
t.Errorf("Import incorrectly detected prefix mismatch for multi-hyphen prefix")
t.Logf("Expected prefix: asianops-audit")
t.Logf("Mismatched prefixes detected: %v", result.MismatchPrefixes)
}
// All issues should be created
if result.Created != 4 {
t.Errorf("Expected 4 issues created, got %d", result.Created)
}
// Verify issues exist in database
for _, issue := range issues {
dbIssue, err := st.GetIssue(ctx, issue.ID)
if err != nil {
t.Errorf("Failed to get issue %s: %v", issue.ID, err)
continue
}
if dbIssue.Title != issue.Title {
t.Errorf("Issue %s title mismatch: got %q, want %q", issue.ID, dbIssue.Title, issue.Title)
}
}
}
// TestDetectPrefixFromIssues tests the detectPrefixFromIssues function
// with multi-part IDs
func TestDetectPrefixFromIssues(t *testing.T) {

View File

@@ -33,8 +33,8 @@ and database file. Optionally specify a custom issue prefix.
With --no-db: creates .beads/ directory and issues.jsonl file instead of SQLite database.
With --stealth: configures per-repository git settings for invisible beads usage:
.git/info/exclude to prevent beads files from being committed
With --stealth: configures global git settings for invisible beads usage:
Global gitignore to prevent beads files from being committed
• Claude Code settings with bd onboard instruction
Perfect for personal use without affecting repo collaborators.`,
Run: func(cmd *cobra.Command, _ []string) {
@@ -1364,15 +1364,22 @@ func readFirstIssueFromGit(jsonlPath, gitRef string) (*types.Issue, error) {
return nil, nil
}
// setupStealthMode configures git settings for stealth operation
// Uses .git/info/exclude (per-repository) instead of global gitignore because:
// - Global gitignore doesn't support absolute paths (GitHub #704)
// - .git/info/exclude is designed for user-specific, repo-local ignores
// - Patterns are relative to repo root, so ".beads/" works correctly
// setupStealthMode configures global git settings for stealth operation
func setupStealthMode(verbose bool) error {
// Setup per-repository git exclude file
if err := setupGitExclude(verbose); err != nil {
return fmt.Errorf("failed to setup git exclude: %w", err)
homeDir, err := os.UserHomeDir()
if err != nil {
return fmt.Errorf("failed to get user home directory: %w", err)
}
// Get the absolute path of the current project
projectPath, err := os.Getwd()
if err != nil {
return fmt.Errorf("failed to get current working directory: %w", err)
}
// Setup global gitignore with project-specific paths
if err := setupGlobalGitIgnore(homeDir, projectPath, verbose); err != nil {
return fmt.Errorf("failed to setup global gitignore: %w", err)
}
// Setup claude settings
@@ -1382,7 +1389,7 @@ func setupStealthMode(verbose bool) error {
if verbose {
fmt.Printf("\n%s Stealth mode configured successfully!\n\n", ui.RenderPass("✓"))
fmt.Printf(" Git exclude: %s\n", ui.RenderAccent(".git/info/exclude configured"))
fmt.Printf(" Global gitignore: %s\n", ui.RenderAccent(projectPath+"/.beads/ ignored"))
fmt.Printf(" Claude settings: %s\n\n", ui.RenderAccent("configured with bd onboard instruction"))
fmt.Printf("Your beads setup is now %s - other repo collaborators won't see any beads-related files.\n\n", ui.RenderAccent("invisible"))
}
@@ -1390,80 +1397,7 @@ func setupStealthMode(verbose bool) error {
return nil
}
// setupGitExclude configures .git/info/exclude to ignore beads and claude files
// This is the correct approach for per-repository user-specific ignores (GitHub #704).
// Unlike global gitignore, patterns here are relative to the repo root.
func setupGitExclude(verbose bool) error {
// Find the .git directory (handles both regular repos and worktrees)
gitDir, err := exec.Command("git", "rev-parse", "--git-dir").Output()
if err != nil {
return fmt.Errorf("not a git repository")
}
gitDirPath := strings.TrimSpace(string(gitDir))
// Path to the exclude file
excludePath := filepath.Join(gitDirPath, "info", "exclude")
// Ensure the info directory exists
infoDir := filepath.Join(gitDirPath, "info")
if err := os.MkdirAll(infoDir, 0755); err != nil {
return fmt.Errorf("failed to create git info directory: %w", err)
}
// Read existing exclude file if it exists
var existingContent string
// #nosec G304 - git config path
if content, err := os.ReadFile(excludePath); err == nil {
existingContent = string(content)
}
// Use relative patterns (these work correctly in .git/info/exclude)
beadsPattern := ".beads/"
claudePattern := ".claude/settings.local.json"
hasBeads := strings.Contains(existingContent, beadsPattern)
hasClaude := strings.Contains(existingContent, claudePattern)
if hasBeads && hasClaude {
if verbose {
fmt.Printf("Git exclude already configured for stealth mode\n")
}
return nil
}
// Append missing patterns
newContent := existingContent
if !strings.HasSuffix(newContent, "\n") && len(newContent) > 0 {
newContent += "\n"
}
if !hasBeads || !hasClaude {
newContent += "\n# Beads stealth mode (added by bd init --stealth)\n"
}
if !hasBeads {
newContent += beadsPattern + "\n"
}
if !hasClaude {
newContent += claudePattern + "\n"
}
// Write the updated exclude file
// #nosec G306 - config file needs 0644
if err := os.WriteFile(excludePath, []byte(newContent), 0644); err != nil {
return fmt.Errorf("failed to write git exclude file: %w", err)
}
if verbose {
fmt.Printf("Configured git exclude for stealth mode: %s\n", excludePath)
}
return nil
}
// setupGlobalGitIgnore configures global gitignore to ignore beads and claude files for a specific project
// DEPRECATED: This function uses absolute paths which don't work in gitignore (GitHub #704).
// Use setupGitExclude instead for new code.
func setupGlobalGitIgnore(homeDir string, projectPath string, verbose bool) error {
// Check if user already has a global gitignore file configured
cmd := exec.Command("git", "config", "--global", "core.excludesfile")

View File

@@ -74,10 +74,11 @@ This command:
"error": "no_beads_directory",
"message": "No .beads directory found. Run 'bd init' first.",
})
os.Exit(1)
} else {
FatalErrorWithHint("no .beads directory found", "run 'bd init' to initialize bd")
fmt.Fprintf(os.Stderr, "Error: no .beads directory found\n")
fmt.Fprintf(os.Stderr, "Hint: run 'bd init' to initialize bd\n")
}
os.Exit(1)
}
// Load config to get target database name (respects user's config.json)
@@ -102,10 +103,10 @@ This command:
"error": "detection_failed",
"message": err.Error(),
})
os.Exit(1)
} else {
FatalError("%v", err)
fmt.Fprintf(os.Stderr, "Error: %v\n", err)
}
os.Exit(1)
}
if len(databases) == 0 {
@@ -173,15 +174,14 @@ This command:
"message": "Multiple old database files found",
"databases": formatDBList(oldDBs),
})
os.Exit(1)
} else {
fmt.Fprintf(os.Stderr, "Error: multiple old database files found:\n")
for _, db := range oldDBs {
fmt.Fprintf(os.Stderr, " - %s (version: %s)\n", filepath.Base(db.path), db.version)
}
fmt.Fprintf(os.Stderr, "\nPlease manually rename the correct database to %s and remove others.\n", cfg.Database)
os.Exit(1)
}
os.Exit(1)
} else if currentDB != nil && currentDB.version != Version {
// Update version metadata
needsVersionUpdate = true

View File

@@ -227,9 +227,9 @@ func runMolBond(cmd *cobra.Command, args []string) {
// Compound protos are templates - always use permanent storage
result, err = bondProtoProto(ctx, store, issueA, issueB, bondType, customTitle, actor)
case aIsProto && !bIsProto:
result, err = bondProtoMol(ctx, targetStore, issueA, issueB, bondType, vars, childRef, actor, pour)
result, err = bondProtoMol(ctx, targetStore, issueA, issueB, bondType, vars, childRef, actor)
case !aIsProto && bIsProto:
result, err = bondMolProto(ctx, targetStore, issueA, issueB, bondType, vars, childRef, actor, pour)
result, err = bondMolProto(ctx, targetStore, issueA, issueB, bondType, vars, childRef, actor)
default:
result, err = bondMolMol(ctx, targetStore, issueA, issueB, bondType, actor)
}
@@ -366,7 +366,7 @@ func bondProtoProto(ctx context.Context, s storage.Storage, protoA, protoB *type
// bondProtoMol bonds a proto to an existing molecule by spawning the proto.
// If childRef is provided, generates custom IDs like "parent.childref" (dynamic bonding).
func bondProtoMol(ctx context.Context, s storage.Storage, proto, mol *types.Issue, bondType string, vars map[string]string, childRef string, actorName string, pour bool) (*BondResult, error) {
func bondProtoMol(ctx context.Context, s storage.Storage, proto, mol *types.Issue, bondType string, vars map[string]string, childRef string, actorName string) (*BondResult, error) {
// Load proto subgraph
subgraph, err := loadTemplateSubgraph(ctx, s, proto.ID)
if err != nil {
@@ -389,7 +389,7 @@ func bondProtoMol(ctx context.Context, s storage.Storage, proto, mol *types.Issu
opts := CloneOptions{
Vars: vars,
Actor: actorName,
Wisp: !pour, // wisp by default, but --pour makes persistent (bd-l7y3)
Wisp: true, // wisp by default for molecule execution - bd-2vh3
}
// Dynamic bonding: use custom IDs if childRef is provided
@@ -444,9 +444,9 @@ func bondProtoMol(ctx context.Context, s storage.Storage, proto, mol *types.Issu
}
// bondMolProto bonds a molecule to a proto (symmetric with bondProtoMol)
func bondMolProto(ctx context.Context, s storage.Storage, mol, proto *types.Issue, bondType string, vars map[string]string, childRef string, actorName string, pour bool) (*BondResult, error) {
func bondMolProto(ctx context.Context, s storage.Storage, mol, proto *types.Issue, bondType string, vars map[string]string, childRef string, actorName string) (*BondResult, error) {
// Same as bondProtoMol but with arguments swapped
return bondProtoMol(ctx, s, proto, mol, bondType, vars, childRef, actorName, pour)
return bondProtoMol(ctx, s, proto, mol, bondType, vars, childRef, actorName)
}
// bondMolMol bonds two molecules together

View File

@@ -6,8 +6,6 @@ import (
"strings"
"github.com/spf13/cobra"
"github.com/steveyegge/beads/internal/beads"
"github.com/steveyegge/beads/internal/storage/sqlite"
"github.com/steveyegge/beads/internal/types"
"github.com/steveyegge/beads/internal/ui"
"github.com/steveyegge/beads/internal/utils"
@@ -27,15 +25,9 @@ This command:
After a crash or session reset, the pinned root issue ensures the agent
can resume from where it left off by checking 'bd ready'.
The --template-db flag enables cross-database spawning: read templates from
one database (e.g., main) while writing spawned instances to another (e.g., wisp).
This is essential for wisp molecule spawning where templates exist in the main
database but instances should be ephemeral.
Example:
bd mol run mol-version-bump --var version=1.2.0
bd mol run bd-qqc --var version=0.32.0 --var date=2025-01-01
bd --db .beads-wisp/beads.db mol run mol-patrol --template-db .beads/beads.db`,
bd mol run bd-qqc --var version=0.32.0 --var date=2025-01-01`,
Args: cobra.ExactArgs(1),
Run: runMolRun,
}
@@ -57,7 +49,6 @@ func runMolRun(cmd *cobra.Command, args []string) {
}
varFlags, _ := cmd.Flags().GetStringSlice("var")
templateDB, _ := cmd.Flags().GetString("template-db")
// Parse variables
vars := make(map[string]string)
@@ -70,42 +61,15 @@ func runMolRun(cmd *cobra.Command, args []string) {
vars[parts[0]] = parts[1]
}
// Determine which store to use for reading the template
// If --template-db is set, open a separate connection for reading the template
// This enables cross-database spawning (read from main, write to wisp)
//
// Auto-discovery: if --db contains ".beads-wisp" (wisp storage) but --template-db
// is not set, automatically use the main database for templates. This handles the
// common case of spawning patrol molecules from main DB into wisp storage.
templateStore := store
if templateDB == "" && strings.Contains(dbPath, ".beads-wisp") {
// Auto-discover main database for templates
templateDB = beads.FindDatabasePath()
if templateDB == "" {
fmt.Fprintf(os.Stderr, "Error: cannot find main database for templates\n")
fmt.Fprintf(os.Stderr, "Hint: specify --template-db explicitly\n")
os.Exit(1)
}
}
if templateDB != "" {
var err error
templateStore, err = sqlite.NewWithTimeout(ctx, templateDB, lockTimeout)
if err != nil {
fmt.Fprintf(os.Stderr, "Error opening template database %s: %v\n", templateDB, err)
os.Exit(1)
}
defer templateStore.Close()
}
// Resolve molecule ID from template store
moleculeID, err := utils.ResolvePartialID(ctx, templateStore, args[0])
// Resolve molecule ID
moleculeID, err := utils.ResolvePartialID(ctx, store, args[0])
if err != nil {
fmt.Fprintf(os.Stderr, "Error resolving molecule ID %s: %v\n", args[0], err)
os.Exit(1)
}
// Load the molecule subgraph from template store
subgraph, err := loadTemplateSubgraph(ctx, templateStore, moleculeID)
// Load the molecule subgraph
subgraph, err := loadTemplateSubgraph(ctx, store, moleculeID)
if err != nil {
fmt.Fprintf(os.Stderr, "Error loading molecule: %v\n", err)
os.Exit(1)
@@ -168,7 +132,6 @@ func runMolRun(cmd *cobra.Command, args []string) {
func init() {
molRunCmd.Flags().StringSlice("var", []string{}, "Variable substitution (key=value)")
molRunCmd.Flags().String("template-db", "", "Database to read templates from (enables cross-database spawning)")
molCmd.AddCommand(molRunCmd)
}

View File

@@ -219,7 +219,7 @@ func runMolSpawn(cmd *cobra.Command, args []string) {
}
for _, attach := range attachments {
bondResult, err := bondProtoMol(ctx, store, attach.issue, spawnedMol, attachType, vars, "", actor, pour)
bondResult, err := bondProtoMol(ctx, store, attach.issue, spawnedMol, attachType, vars, "", actor)
if err != nil {
fmt.Fprintf(os.Stderr, "Error attaching %s: %v\n", attach.id, err)
os.Exit(1)

View File

@@ -343,7 +343,7 @@ func TestBondProtoMol(t *testing.T) {
// Bond proto to molecule
vars := map[string]string{"name": "auth-feature"}
result, err := bondProtoMol(ctx, store, proto, mol, types.BondTypeSequential, vars, "", "test", false)
result, err := bondProtoMol(ctx, store, proto, mol, types.BondTypeSequential, vars, "", "test")
if err != nil {
t.Fatalf("bondProtoMol failed: %v", err)
}
@@ -840,7 +840,7 @@ func TestSpawnWithBasicAttach(t *testing.T) {
}
// Attach the second proto (simulating --attach flag behavior)
bondResult, err := bondProtoMol(ctx, s, attachProto, spawnedMol, types.BondTypeSequential, vars, "", "test", false)
bondResult, err := bondProtoMol(ctx, s, attachProto, spawnedMol, types.BondTypeSequential, vars, "", "test")
if err != nil {
t.Fatalf("Failed to bond attachment: %v", err)
}
@@ -945,12 +945,12 @@ func TestSpawnWithMultipleAttachments(t *testing.T) {
}
// Attach both protos (simulating --attach A --attach B)
bondResultA, err := bondProtoMol(ctx, s, attachA, spawnedMol, types.BondTypeSequential, nil, "", "test", false)
bondResultA, err := bondProtoMol(ctx, s, attachA, spawnedMol, types.BondTypeSequential, nil, "", "test")
if err != nil {
t.Fatalf("Failed to bond attachA: %v", err)
}
bondResultB, err := bondProtoMol(ctx, s, attachB, spawnedMol, types.BondTypeSequential, nil, "", "test", false)
bondResultB, err := bondProtoMol(ctx, s, attachB, spawnedMol, types.BondTypeSequential, nil, "", "test")
if err != nil {
t.Fatalf("Failed to bond attachB: %v", err)
}
@@ -1063,7 +1063,7 @@ func TestSpawnAttachTypes(t *testing.T) {
}
// Bond with specified type
bondResult, err := bondProtoMol(ctx, s, attachProto, spawnedMol, tt.bondType, nil, "", "test", false)
bondResult, err := bondProtoMol(ctx, s, attachProto, spawnedMol, tt.bondType, nil, "", "test")
if err != nil {
t.Fatalf("Failed to bond: %v", err)
}
@@ -1228,7 +1228,7 @@ func TestSpawnVariableAggregation(t *testing.T) {
// Bond attachment with same variables
spawnedMol, _ := s.GetIssue(ctx, spawnResult.NewEpicID)
bondResult, err := bondProtoMol(ctx, s, attachProto, spawnedMol, types.BondTypeSequential, vars, "", "test", false)
bondResult, err := bondProtoMol(ctx, s, attachProto, spawnedMol, types.BondTypeSequential, vars, "", "test")
if err != nil {
t.Fatalf("Failed to bond: %v", err)
}
@@ -2238,7 +2238,7 @@ func TestBondProtoMolWithRef(t *testing.T) {
// Bond proto to patrol with custom child ref
vars := map[string]string{"polecat_name": "ace"}
childRef := "arm-{{polecat_name}}"
result, err := bondProtoMol(ctx, s, protoRoot, patrol, types.BondTypeSequential, vars, childRef, "test", false)
result, err := bondProtoMol(ctx, s, protoRoot, patrol, types.BondTypeSequential, vars, childRef, "test")
if err != nil {
t.Fatalf("bondProtoMol failed: %v", err)
}
@@ -2309,14 +2309,14 @@ func TestBondProtoMolMultipleArms(t *testing.T) {
// Bond arm-ace
varsAce := map[string]string{"name": "ace"}
resultAce, err := bondProtoMol(ctx, s, proto, patrol, types.BondTypeParallel, varsAce, "arm-{{name}}", "test", false)
resultAce, err := bondProtoMol(ctx, s, proto, patrol, types.BondTypeParallel, varsAce, "arm-{{name}}", "test")
if err != nil {
t.Fatalf("bondProtoMol (ace) failed: %v", err)
}
// Bond arm-nux
varsNux := map[string]string{"name": "nux"}
resultNux, err := bondProtoMol(ctx, s, proto, patrol, types.BondTypeParallel, varsNux, "arm-{{name}}", "test", false)
resultNux, err := bondProtoMol(ctx, s, proto, patrol, types.BondTypeParallel, varsNux, "arm-{{name}}", "test")
if err != nil {
t.Fatalf("bondProtoMol (nux) failed: %v", err)
}

View File

@@ -200,7 +200,7 @@ func runPour(cmd *cobra.Command, args []string) {
}
for _, attach := range attachments {
bondResult, err := bondProtoMol(ctx, store, attach.issue, spawnedMol, attachType, vars, "", actor, true)
bondResult, err := bondProtoMol(ctx, store, attach.issue, spawnedMol, attachType, vars, "", actor)
if err != nil {
fmt.Fprintf(os.Stderr, "Error attaching %s: %v\n", attach.id, err)
os.Exit(1)

View File

@@ -26,9 +26,14 @@ Examples:
bd search "database" --label backend --limit 10
bd search --query "performance" --assignee alice
bd search "bd-5q" # Search by partial ID
bd search "security" --priority-min 0 --priority-max 2
bd search "security" --priority 1 # Exact priority match
bd search "security" --priority-min 0 --priority-max 2 # Priority range
bd search "bug" --created-after 2025-01-01
bd search "refactor" --updated-after 2025-01-01 --priority-min 1
bd search "bug" --desc-contains "authentication" # Search in description
bd search "" --empty-description # Issues without description
bd search "" --no-assignee # Unassigned issues
bd search "" --no-labels # Issues without labels
bd search "bug" --sort priority
bd search "task" --sort created --reverse`,
Run: func(cmd *cobra.Command, args []string) {
@@ -41,9 +46,31 @@ Examples:
query = queryFlag
}
// If no query provided, show help
if query == "" {
fmt.Fprintf(os.Stderr, "Error: search query is required\n")
// Check if any filter flags are set (allows empty query with filters)
hasFilters := cmd.Flags().Changed("status") ||
cmd.Flags().Changed("priority") ||
cmd.Flags().Changed("assignee") ||
cmd.Flags().Changed("type") ||
cmd.Flags().Changed("label") ||
cmd.Flags().Changed("label-any") ||
cmd.Flags().Changed("created-after") ||
cmd.Flags().Changed("created-before") ||
cmd.Flags().Changed("updated-after") ||
cmd.Flags().Changed("updated-before") ||
cmd.Flags().Changed("closed-after") ||
cmd.Flags().Changed("closed-before") ||
cmd.Flags().Changed("priority-min") ||
cmd.Flags().Changed("priority-max") ||
cmd.Flags().Changed("title-contains") ||
cmd.Flags().Changed("desc-contains") ||
cmd.Flags().Changed("notes-contains") ||
cmd.Flags().Changed("empty-description") ||
cmd.Flags().Changed("no-assignee") ||
cmd.Flags().Changed("no-labels")
// If no query and no filters provided, show help
if query == "" && !hasFilters {
fmt.Fprintf(os.Stderr, "Error: search query or filter is required\n")
if err := cmd.Help(); err != nil {
fmt.Fprintf(os.Stderr, "Error displaying help: %v\n", err)
}
@@ -61,6 +88,11 @@ Examples:
sortBy, _ := cmd.Flags().GetString("sort")
reverse, _ := cmd.Flags().GetBool("reverse")
// Pattern matching flags
titleContains, _ := cmd.Flags().GetString("title-contains")
descContains, _ := cmd.Flags().GetString("desc-contains")
notesContains, _ := cmd.Flags().GetString("notes-contains")
// Date range flags
createdAfter, _ := cmd.Flags().GetString("created-after")
createdBefore, _ := cmd.Flags().GetString("created-before")
@@ -69,6 +101,11 @@ Examples:
closedAfter, _ := cmd.Flags().GetString("closed-after")
closedBefore, _ := cmd.Flags().GetString("closed-before")
// Empty/null check flags
emptyDesc, _ := cmd.Flags().GetBool("empty-description")
noAssignee, _ := cmd.Flags().GetBool("no-assignee")
noLabels, _ := cmd.Flags().GetBool("no-labels")
// Priority range flags
priorityMinStr, _ := cmd.Flags().GetString("priority-min")
priorityMaxStr, _ := cmd.Flags().GetString("priority-max")
@@ -104,6 +141,39 @@ Examples:
filter.LabelsAny = labelsAny
}
// Exact priority match (use Changed() to properly handle P0)
if cmd.Flags().Changed("priority") {
priorityStr, _ := cmd.Flags().GetString("priority")
priority, err := validation.ValidatePriority(priorityStr)
if err != nil {
fmt.Fprintf(os.Stderr, "Error: %v\n", err)
os.Exit(1)
}
filter.Priority = &priority
}
// Pattern matching
if titleContains != "" {
filter.TitleContains = titleContains
}
if descContains != "" {
filter.DescriptionContains = descContains
}
if notesContains != "" {
filter.NotesContains = notesContains
}
// Empty/null checks
if emptyDesc {
filter.EmptyDescription = true
}
if noAssignee {
filter.NoAssignee = true
}
if noLabels {
filter.NoLabels = true
}
// Date ranges
if createdAfter != "" {
t, err := parseTimeFlag(createdAfter)
@@ -200,6 +270,21 @@ Examples:
listArgs.LabelsAny = labelsAny
}
// Exact priority match
if filter.Priority != nil {
listArgs.Priority = filter.Priority
}
// Pattern matching
listArgs.TitleContains = titleContains
listArgs.DescriptionContains = descContains
listArgs.NotesContains = notesContains
// Empty/null checks
listArgs.EmptyDescription = filter.EmptyDescription
listArgs.NoAssignee = filter.NoAssignee
listArgs.NoLabels = filter.NoLabels
// Date ranges
if filter.CreatedAfter != nil {
listArgs.CreatedAfter = filter.CreatedAfter.Format(time.RFC3339)
@@ -372,6 +457,7 @@ func outputSearchResults(issues []*types.Issue, query string, longFormat bool) {
func init() {
searchCmd.Flags().String("query", "", "Search query (alternative to positional argument)")
searchCmd.Flags().StringP("status", "s", "", "Filter by status (open, in_progress, blocked, deferred, closed)")
registerPriorityFlag(searchCmd, "")
searchCmd.Flags().StringP("assignee", "a", "", "Filter by assignee")
searchCmd.Flags().StringP("type", "t", "", "Filter by type (bug, feature, task, epic, chore, merge-request, molecule, gate)")
searchCmd.Flags().StringSliceP("label", "l", []string{}, "Filter by labels (AND: must have ALL)")
@@ -381,6 +467,11 @@ func init() {
searchCmd.Flags().String("sort", "", "Sort by field: priority, created, updated, closed, status, id, title, type, assignee")
searchCmd.Flags().BoolP("reverse", "r", false, "Reverse sort order")
// Pattern matching flags
searchCmd.Flags().String("title-contains", "", "Filter by title substring (case-insensitive)")
searchCmd.Flags().String("desc-contains", "", "Filter by description substring (case-insensitive)")
searchCmd.Flags().String("notes-contains", "", "Filter by notes substring (case-insensitive)")
// Date range flags
searchCmd.Flags().String("created-after", "", "Filter issues created after date (YYYY-MM-DD or RFC3339)")
searchCmd.Flags().String("created-before", "", "Filter issues created before date (YYYY-MM-DD or RFC3339)")
@@ -389,6 +480,11 @@ func init() {
searchCmd.Flags().String("closed-after", "", "Filter issues closed after date (YYYY-MM-DD or RFC3339)")
searchCmd.Flags().String("closed-before", "", "Filter issues closed before date (YYYY-MM-DD or RFC3339)")
// Empty/null check flags
searchCmd.Flags().Bool("empty-description", false, "Filter issues with empty or missing description")
searchCmd.Flags().Bool("no-assignee", false, "Filter issues with no assignee")
searchCmd.Flags().Bool("no-labels", false, "Filter issues with no labels")
// Priority range flags
searchCmd.Flags().String("priority-min", "", "Filter by minimum priority (inclusive, 0-4 or P0-P4)")
searchCmd.Flags().String("priority-max", "", "Filter by maximum priority (inclusive, 0-4 or P0-P4)")

View File

@@ -972,6 +972,10 @@ var closeCmd = &cobra.Command{
Run: func(cmd *cobra.Command, args []string) {
CheckReadonly("close")
reason, _ := cmd.Flags().GetString("reason")
// Check --resolution alias if --reason not provided
if reason == "" {
reason, _ = cmd.Flags().GetString("resolution")
}
if reason == "" {
reason = "Closed"
}
@@ -1053,6 +1057,8 @@ var closeCmd = &cobra.Command{
if hookRunner != nil {
hookRunner.Run(hooks.EventClose, &issue)
}
// Run config-based close hooks (bd-g4b4)
hooks.RunConfigCloseHooks(ctx, &issue)
if jsonOutput {
closedIssues = append(closedIssues, &issue)
}
@@ -1105,8 +1111,12 @@ var closeCmd = &cobra.Command{
// Run close hook (bd-kwro.8)
closedIssue, _ := store.GetIssue(ctx, id)
if closedIssue != nil && hookRunner != nil {
hookRunner.Run(hooks.EventClose, closedIssue)
if closedIssue != nil {
if hookRunner != nil {
hookRunner.Run(hooks.EventClose, closedIssue)
}
// Run config-based close hooks (bd-g4b4)
hooks.RunConfigCloseHooks(ctx, closedIssue)
}
if jsonOutput {
@@ -1411,6 +1421,8 @@ func init() {
rootCmd.AddCommand(editCmd)
closeCmd.Flags().StringP("reason", "r", "", "Reason for closing")
closeCmd.Flags().String("resolution", "", "Alias for --reason (Jira CLI convention)")
_ = closeCmd.Flags().MarkHidden("resolution") // Hidden alias for agent/CLI ergonomics
closeCmd.Flags().Bool("json", false, "Output JSON format")
closeCmd.Flags().BoolP("force", "f", false, "Force close pinned issues")
closeCmd.Flags().Bool("continue", false, "Auto-advance to next step in molecule")

File diff suppressed because it is too large Load Diff

View File

@@ -1,285 +0,0 @@
package main
import (
"context"
"fmt"
"os/exec"
"path/filepath"
"strings"
"time"
"github.com/steveyegge/beads/internal/syncbranch"
)
// getCurrentBranch returns the name of the current git branch
// Uses symbolic-ref instead of rev-parse to work in fresh repos without commits (bd-flil)
func getCurrentBranch(ctx context.Context) (string, error) {
cmd := exec.CommandContext(ctx, "git", "symbolic-ref", "--short", "HEAD")
output, err := cmd.Output()
if err != nil {
return "", fmt.Errorf("failed to get current branch: %w", err)
}
return strings.TrimSpace(string(output)), nil
}
// getSyncBranch returns the configured sync branch name
func getSyncBranch(ctx context.Context) (string, error) {
// Ensure store is initialized
if err := ensureStoreActive(); err != nil {
return "", fmt.Errorf("failed to initialize store: %w", err)
}
syncBranch, err := syncbranch.Get(ctx, store)
if err != nil {
return "", fmt.Errorf("failed to get sync branch config: %w", err)
}
if syncBranch == "" {
return "", fmt.Errorf("sync.branch not configured (run 'bd config set sync.branch <branch-name>')")
}
return syncBranch, nil
}
// showSyncStatus shows the diff between sync branch and main branch
func showSyncStatus(ctx context.Context) error {
if !isGitRepo() {
return fmt.Errorf("not in a git repository")
}
currentBranch, err := getCurrentBranch(ctx)
if err != nil {
return err
}
syncBranch, err := getSyncBranch(ctx)
if err != nil {
return err
}
// Check if sync branch exists
checkCmd := exec.CommandContext(ctx, "git", "show-ref", "--verify", "--quiet", "refs/heads/"+syncBranch)
if err := checkCmd.Run(); err != nil {
return fmt.Errorf("sync branch '%s' does not exist", syncBranch)
}
fmt.Printf("Current branch: %s\n", currentBranch)
fmt.Printf("Sync branch: %s\n\n", syncBranch)
// Show commit diff
fmt.Println("Commits in sync branch not in main:")
logCmd := exec.CommandContext(ctx, "git", "log", "--oneline", currentBranch+".."+syncBranch)
logOutput, err := logCmd.CombinedOutput()
if err != nil {
return fmt.Errorf("failed to get commit log: %w\n%s", err, logOutput)
}
if len(strings.TrimSpace(string(logOutput))) == 0 {
fmt.Println(" (none)")
} else {
fmt.Print(string(logOutput))
}
fmt.Println("\nCommits in main not in sync branch:")
logCmd = exec.CommandContext(ctx, "git", "log", "--oneline", syncBranch+".."+currentBranch)
logOutput, err = logCmd.CombinedOutput()
if err != nil {
return fmt.Errorf("failed to get commit log: %w\n%s", err, logOutput)
}
if len(strings.TrimSpace(string(logOutput))) == 0 {
fmt.Println(" (none)")
} else {
fmt.Print(string(logOutput))
}
// Show file diff for .beads/issues.jsonl
fmt.Println("\nFile differences in .beads/issues.jsonl:")
diffCmd := exec.CommandContext(ctx, "git", "diff", currentBranch+"..."+syncBranch, "--", ".beads/issues.jsonl")
diffOutput, err := diffCmd.CombinedOutput()
if err != nil {
// diff returns non-zero when there are differences, which is fine
if len(diffOutput) == 0 {
return fmt.Errorf("failed to get diff: %w", err)
}
}
if len(strings.TrimSpace(string(diffOutput))) == 0 {
fmt.Println(" (no differences)")
} else {
fmt.Print(string(diffOutput))
}
return nil
}
// mergeSyncBranch merges the sync branch back to the main branch
func mergeSyncBranch(ctx context.Context, dryRun bool) error {
if !isGitRepo() {
return fmt.Errorf("not in a git repository")
}
currentBranch, err := getCurrentBranch(ctx)
if err != nil {
return err
}
syncBranch, err := getSyncBranch(ctx)
if err != nil {
return err
}
// Check if sync branch exists
checkCmd := exec.CommandContext(ctx, "git", "show-ref", "--verify", "--quiet", "refs/heads/"+syncBranch)
if err := checkCmd.Run(); err != nil {
return fmt.Errorf("sync branch '%s' does not exist", syncBranch)
}
// Check if there are uncommitted changes
statusCmd := exec.CommandContext(ctx, "git", "status", "--porcelain")
statusOutput, err := statusCmd.Output()
if err != nil {
return fmt.Errorf("failed to check git status: %w", err)
}
if len(strings.TrimSpace(string(statusOutput))) > 0 {
return fmt.Errorf("uncommitted changes detected - commit or stash them first")
}
fmt.Printf("Merging sync branch '%s' into '%s'...\n", syncBranch, currentBranch)
if dryRun {
fmt.Println("→ [DRY RUN] Would merge sync branch")
// Show what would be merged
logCmd := exec.CommandContext(ctx, "git", "log", "--oneline", currentBranch+".."+syncBranch)
logOutput, _ := logCmd.CombinedOutput()
if len(strings.TrimSpace(string(logOutput))) > 0 {
fmt.Println("\nCommits that would be merged:")
fmt.Print(string(logOutput))
} else {
fmt.Println("No commits to merge")
}
return nil
}
// Perform the merge
mergeCmd := exec.CommandContext(ctx, "git", "merge", syncBranch, "-m", fmt.Sprintf("Merge sync branch '%s'", syncBranch))
mergeOutput, err := mergeCmd.CombinedOutput()
if err != nil {
return fmt.Errorf("merge failed: %w\n%s", err, mergeOutput)
}
fmt.Print(string(mergeOutput))
fmt.Println("\n✓ Merge complete")
// Suggest next steps
fmt.Println("\nNext steps:")
fmt.Println("1. Review the merged changes")
fmt.Println("2. Run 'bd sync --import-only' to sync the database with merged JSONL")
fmt.Println("3. Run 'bd sync' to push changes to remote")
return nil
}
// isExternalBeadsDir checks if the beads directory is in a different git repo than cwd.
// This is used to detect when BEADS_DIR points to a separate repository.
// Contributed by dand-oss (https://github.com/steveyegge/beads/pull/533)
func isExternalBeadsDir(ctx context.Context, beadsDir string) bool {
// Get repo root of cwd
cwdRepoRoot, err := syncbranch.GetRepoRoot(ctx)
if err != nil {
return false // Can't determine, assume local
}
// Get repo root of beads dir
beadsRepoRoot, err := getRepoRootFromPath(ctx, beadsDir)
if err != nil {
return false // Can't determine, assume local
}
return cwdRepoRoot != beadsRepoRoot
}
// getRepoRootFromPath returns the git repository root for a given path.
// Unlike syncbranch.GetRepoRoot which uses cwd, this allows getting the repo root
// for any path.
// Contributed by dand-oss (https://github.com/steveyegge/beads/pull/533)
func getRepoRootFromPath(ctx context.Context, path string) (string, error) {
cmd := exec.CommandContext(ctx, "git", "-C", path, "rev-parse", "--show-toplevel")
output, err := cmd.Output()
if err != nil {
return "", fmt.Errorf("failed to get git root for %s: %w", path, err)
}
return strings.TrimSpace(string(output)), nil
}
// commitToExternalBeadsRepo commits changes directly to an external beads repo.
// Used when BEADS_DIR points to a different git repository than cwd.
// This bypasses the worktree-based sync which fails when beads dir is external.
// Contributed by dand-oss (https://github.com/steveyegge/beads/pull/533)
func commitToExternalBeadsRepo(ctx context.Context, beadsDir, message string, push bool) (bool, error) {
repoRoot, err := getRepoRootFromPath(ctx, beadsDir)
if err != nil {
return false, fmt.Errorf("failed to get repo root: %w", err)
}
// Stage beads files (use relative path from repo root)
relBeadsDir, err := filepath.Rel(repoRoot, beadsDir)
if err != nil {
relBeadsDir = beadsDir // Fallback to absolute path
}
addCmd := exec.CommandContext(ctx, "git", "-C", repoRoot, "add", relBeadsDir)
if output, err := addCmd.CombinedOutput(); err != nil {
return false, fmt.Errorf("git add failed: %w\n%s", err, output)
}
// Check if there are staged changes
diffCmd := exec.CommandContext(ctx, "git", "-C", repoRoot, "diff", "--cached", "--quiet")
if diffCmd.Run() == nil {
return false, nil // No changes to commit
}
// Commit with config-based author and signing options
if message == "" {
message = fmt.Sprintf("bd sync: %s", time.Now().Format("2006-01-02 15:04:05"))
}
commitArgs := buildGitCommitArgs(repoRoot, message)
commitCmd := exec.CommandContext(ctx, "git", commitArgs...)
if output, err := commitCmd.CombinedOutput(); err != nil {
return false, fmt.Errorf("git commit failed: %w\n%s", err, output)
}
// Push if requested
if push {
pushCmd := exec.CommandContext(ctx, "git", "-C", repoRoot, "push")
if pushOutput, err := runGitCmdWithTimeoutMsg(ctx, pushCmd, "git push", 5*time.Second); err != nil {
return true, fmt.Errorf("git push failed: %w\n%s", err, pushOutput)
}
}
return true, nil
}
// pullFromExternalBeadsRepo pulls changes in an external beads repo.
// Used when BEADS_DIR points to a different git repository than cwd.
// Contributed by dand-oss (https://github.com/steveyegge/beads/pull/533)
func pullFromExternalBeadsRepo(ctx context.Context, beadsDir string) error {
repoRoot, err := getRepoRootFromPath(ctx, beadsDir)
if err != nil {
return fmt.Errorf("failed to get repo root: %w", err)
}
// Check if remote exists
remoteCmd := exec.CommandContext(ctx, "git", "-C", repoRoot, "remote")
remoteOutput, err := remoteCmd.Output()
if err != nil || len(strings.TrimSpace(string(remoteOutput))) == 0 {
return nil // No remote, skip pull
}
pullCmd := exec.CommandContext(ctx, "git", "-C", repoRoot, "pull")
if output, err := pullCmd.CombinedOutput(); err != nil {
return fmt.Errorf("git pull failed: %w\n%s", err, output)
}
return nil
}

View File

@@ -1,395 +0,0 @@
package main
import (
"bufio"
"bytes"
"context"
"encoding/json"
"fmt"
"os"
"os/exec"
"strings"
"time"
"github.com/steveyegge/beads/internal/syncbranch"
"github.com/steveyegge/beads/internal/types"
)
// SyncIntegrityResult contains the results of a pre-sync integrity check.
// bd-hlsw.1: Pre-sync integrity check
type SyncIntegrityResult struct {
ForcedPush *ForcedPushCheck `json:"forced_push,omitempty"`
PrefixMismatch *PrefixMismatch `json:"prefix_mismatch,omitempty"`
OrphanedChildren *OrphanedChildren `json:"orphaned_children,omitempty"`
HasProblems bool `json:"has_problems"`
}
// ForcedPushCheck detects if sync branch has diverged from remote.
type ForcedPushCheck struct {
Detected bool `json:"detected"`
LocalRef string `json:"local_ref,omitempty"`
RemoteRef string `json:"remote_ref,omitempty"`
Message string `json:"message"`
}
// PrefixMismatch detects issues with wrong prefix in JSONL.
type PrefixMismatch struct {
ConfiguredPrefix string `json:"configured_prefix"`
MismatchedIDs []string `json:"mismatched_ids,omitempty"`
Count int `json:"count"`
}
// OrphanedChildren detects issues with parent that doesn't exist.
type OrphanedChildren struct {
OrphanedIDs []string `json:"orphaned_ids,omitempty"`
Count int `json:"count"`
}
// showSyncIntegrityCheck performs pre-sync integrity checks without modifying state.
// bd-hlsw.1: Detects forced pushes, prefix mismatches, and orphaned children.
// Exits with code 1 if problems are detected.
func showSyncIntegrityCheck(ctx context.Context, jsonlPath string) {
fmt.Println("Sync Integrity Check")
fmt.Println("====================")
result := &SyncIntegrityResult{}
// Check 1: Detect forced pushes on sync branch
forcedPush := checkForcedPush(ctx)
result.ForcedPush = forcedPush
if forcedPush.Detected {
result.HasProblems = true
}
printForcedPushResult(forcedPush)
// Check 2: Detect prefix mismatches in JSONL
prefixMismatch, err := checkPrefixMismatch(ctx, jsonlPath)
if err != nil {
fmt.Fprintf(os.Stderr, "Warning: prefix check failed: %v\n", err)
} else {
result.PrefixMismatch = prefixMismatch
if prefixMismatch != nil && prefixMismatch.Count > 0 {
result.HasProblems = true
}
printPrefixMismatchResult(prefixMismatch)
}
// Check 3: Detect orphaned children (parent issues that don't exist)
orphaned, err := checkOrphanedChildrenInJSONL(jsonlPath)
if err != nil {
fmt.Fprintf(os.Stderr, "Warning: orphaned check failed: %v\n", err)
} else {
result.OrphanedChildren = orphaned
if orphaned != nil && orphaned.Count > 0 {
result.HasProblems = true
}
printOrphanedChildrenResult(orphaned)
}
// Summary
fmt.Println("\nSummary")
fmt.Println("-------")
if result.HasProblems {
fmt.Println("Problems detected! Review above and consider:")
if result.ForcedPush != nil && result.ForcedPush.Detected {
fmt.Println(" - Force push: Reset local sync branch or use 'bd sync --from-main'")
}
if result.PrefixMismatch != nil && result.PrefixMismatch.Count > 0 {
fmt.Println(" - Prefix mismatch: Use 'bd import --rename-on-import' to fix")
}
if result.OrphanedChildren != nil && result.OrphanedChildren.Count > 0 {
fmt.Println(" - Orphaned children: Remove parent references or create missing parents")
}
os.Exit(1)
} else {
fmt.Println("No problems detected. Safe to sync.")
}
if jsonOutput {
data, _ := json.MarshalIndent(result, "", " ")
fmt.Println(string(data))
}
}
// checkForcedPush detects if the sync branch has diverged from remote.
// This can happen when someone force-pushes to the sync branch.
func checkForcedPush(ctx context.Context) *ForcedPushCheck {
result := &ForcedPushCheck{
Detected: false,
Message: "No sync branch configured or no remote",
}
// Get sync branch name
if err := ensureStoreActive(); err != nil {
return result
}
syncBranch, _ := syncbranch.Get(ctx, store)
if syncBranch == "" {
return result
}
// Check if sync branch exists locally
checkLocalCmd := exec.CommandContext(ctx, "git", "show-ref", "--verify", "--quiet", "refs/heads/"+syncBranch)
if checkLocalCmd.Run() != nil {
result.Message = fmt.Sprintf("Sync branch '%s' does not exist locally", syncBranch)
return result
}
// Get local ref
localRefCmd := exec.CommandContext(ctx, "git", "rev-parse", syncBranch)
localRefOutput, err := localRefCmd.Output()
if err != nil {
result.Message = "Failed to get local sync branch ref"
return result
}
localRef := strings.TrimSpace(string(localRefOutput))
result.LocalRef = localRef
// Check if remote tracking branch exists
remote := "origin"
if configuredRemote, err := store.GetConfig(ctx, "sync.remote"); err == nil && configuredRemote != "" {
remote = configuredRemote
}
// Get remote ref
remoteRefCmd := exec.CommandContext(ctx, "git", "rev-parse", remote+"/"+syncBranch)
remoteRefOutput, err := remoteRefCmd.Output()
if err != nil {
result.Message = fmt.Sprintf("Remote tracking branch '%s/%s' does not exist", remote, syncBranch)
return result
}
remoteRef := strings.TrimSpace(string(remoteRefOutput))
result.RemoteRef = remoteRef
// If refs match, no divergence
if localRef == remoteRef {
result.Message = "Sync branch is in sync with remote"
return result
}
// Check if local is ahead of remote (normal case)
aheadCmd := exec.CommandContext(ctx, "git", "merge-base", "--is-ancestor", remoteRef, localRef)
if aheadCmd.Run() == nil {
result.Message = "Local sync branch is ahead of remote (normal)"
return result
}
// Check if remote is ahead of local (behind, needs pull)
behindCmd := exec.CommandContext(ctx, "git", "merge-base", "--is-ancestor", localRef, remoteRef)
if behindCmd.Run() == nil {
result.Message = "Local sync branch is behind remote (needs pull)"
return result
}
// If neither is ancestor, branches have diverged - likely a force push
result.Detected = true
result.Message = fmt.Sprintf("Sync branch has DIVERGED from remote! Local: %s, Remote: %s. This may indicate a force push on the remote.", localRef[:8], remoteRef[:8])
return result
}
func printForcedPushResult(fp *ForcedPushCheck) {
fmt.Println("1. Force Push Detection")
if fp.Detected {
fmt.Printf(" [PROBLEM] %s\n", fp.Message)
} else {
fmt.Printf(" [OK] %s\n", fp.Message)
}
fmt.Println()
}
// checkPrefixMismatch detects issues in JSONL that don't match the configured prefix.
func checkPrefixMismatch(ctx context.Context, jsonlPath string) (*PrefixMismatch, error) {
result := &PrefixMismatch{
MismatchedIDs: []string{},
}
// Get configured prefix
if err := ensureStoreActive(); err != nil {
return nil, err
}
prefix, err := store.GetConfig(ctx, "issue_prefix")
if err != nil || prefix == "" {
prefix = "bd" // Default
}
result.ConfiguredPrefix = prefix
// Read JSONL and check each issue's prefix
f, err := os.Open(jsonlPath) // #nosec G304 - controlled path
if err != nil {
if os.IsNotExist(err) {
return result, nil // No JSONL, no mismatches
}
return nil, fmt.Errorf("failed to open JSONL: %w", err)
}
defer f.Close()
scanner := bufio.NewScanner(f)
scanner.Buffer(make([]byte, 0, 64*1024), 10*1024*1024)
for scanner.Scan() {
line := scanner.Bytes()
if len(bytes.TrimSpace(line)) == 0 {
continue
}
var issue struct {
ID string `json:"id"`
}
if err := json.Unmarshal(line, &issue); err != nil {
continue // Skip malformed lines
}
// Check if ID starts with configured prefix
if !strings.HasPrefix(issue.ID, prefix+"-") {
result.MismatchedIDs = append(result.MismatchedIDs, issue.ID)
}
}
if err := scanner.Err(); err != nil {
return nil, fmt.Errorf("failed to read JSONL: %w", err)
}
result.Count = len(result.MismatchedIDs)
return result, nil
}
func printPrefixMismatchResult(pm *PrefixMismatch) {
fmt.Println("2. Prefix Mismatch Check")
if pm == nil {
fmt.Println(" [SKIP] Could not check prefix")
fmt.Println()
return
}
fmt.Printf(" Configured prefix: %s\n", pm.ConfiguredPrefix)
if pm.Count > 0 {
fmt.Printf(" [PROBLEM] Found %d issue(s) with wrong prefix:\n", pm.Count)
// Show first 10
limit := pm.Count
if limit > 10 {
limit = 10
}
for i := 0; i < limit; i++ {
fmt.Printf(" - %s\n", pm.MismatchedIDs[i])
}
if pm.Count > 10 {
fmt.Printf(" ... and %d more\n", pm.Count-10)
}
} else {
fmt.Println(" [OK] All issues have correct prefix")
}
fmt.Println()
}
// checkOrphanedChildrenInJSONL detects issues with parent references to non-existent issues.
func checkOrphanedChildrenInJSONL(jsonlPath string) (*OrphanedChildren, error) {
result := &OrphanedChildren{
OrphanedIDs: []string{},
}
// Read JSONL and build maps of IDs and parent references
f, err := os.Open(jsonlPath) // #nosec G304 - controlled path
if err != nil {
if os.IsNotExist(err) {
return result, nil
}
return nil, fmt.Errorf("failed to open JSONL: %w", err)
}
defer f.Close()
existingIDs := make(map[string]bool)
parentRefs := make(map[string]string) // child ID -> parent ID
scanner := bufio.NewScanner(f)
scanner.Buffer(make([]byte, 0, 64*1024), 10*1024*1024)
for scanner.Scan() {
line := scanner.Bytes()
if len(bytes.TrimSpace(line)) == 0 {
continue
}
var issue struct {
ID string `json:"id"`
Parent string `json:"parent,omitempty"`
Status string `json:"status"`
}
if err := json.Unmarshal(line, &issue); err != nil {
continue
}
// Skip tombstones
if issue.Status == string(types.StatusTombstone) {
continue
}
existingIDs[issue.ID] = true
if issue.Parent != "" {
parentRefs[issue.ID] = issue.Parent
}
}
if err := scanner.Err(); err != nil {
return nil, fmt.Errorf("failed to read JSONL: %w", err)
}
// Find orphaned children (parent doesn't exist)
for childID, parentID := range parentRefs {
if !existingIDs[parentID] {
result.OrphanedIDs = append(result.OrphanedIDs, fmt.Sprintf("%s (parent: %s)", childID, parentID))
}
}
result.Count = len(result.OrphanedIDs)
return result, nil
}
// runGitCmdWithTimeoutMsg runs a git command and prints a helpful message if it takes too long.
// This helps when git operations hang waiting for credential/browser auth.
func runGitCmdWithTimeoutMsg(ctx context.Context, cmd *exec.Cmd, cmdName string, timeoutDelay time.Duration) ([]byte, error) {
// Use done channel to cleanly exit goroutine when command completes
done := make(chan struct{})
go func() {
select {
case <-time.After(timeoutDelay):
fmt.Fprintf(os.Stderr, "⏳ %s is taking longer than expected (possibly waiting for authentication). If this hangs, check for a browser auth prompt or run 'git status' in another terminal.\n", cmdName)
case <-done:
// Command completed, exit cleanly
case <-ctx.Done():
// Context canceled, don't print message
}
}()
output, err := cmd.CombinedOutput()
close(done)
return output, err
}
func printOrphanedChildrenResult(oc *OrphanedChildren) {
fmt.Println("3. Orphaned Children Check")
if oc == nil {
fmt.Println(" [SKIP] Could not check orphaned children")
fmt.Println()
return
}
if oc.Count > 0 {
fmt.Printf(" [PROBLEM] Found %d issue(s) with missing parent:\n", oc.Count)
limit := oc.Count
if limit > 10 {
limit = 10
}
for i := 0; i < limit; i++ {
fmt.Printf(" - %s\n", oc.OrphanedIDs[i])
}
if oc.Count > 10 {
fmt.Printf(" ... and %d more\n", oc.Count-10)
}
} else {
fmt.Println(" [OK] No orphaned children found")
}
fmt.Println()
}

View File

@@ -1,170 +0,0 @@
package main
import (
"cmp"
"context"
"encoding/json"
"fmt"
"os"
"path/filepath"
"slices"
"time"
"github.com/steveyegge/beads/internal/rpc"
"github.com/steveyegge/beads/internal/types"
)
// exportToJSONL exports the database to JSONL format
func exportToJSONL(ctx context.Context, jsonlPath string) error {
// If daemon is running, use RPC
if daemonClient != nil {
exportArgs := &rpc.ExportArgs{
JSONLPath: jsonlPath,
}
resp, err := daemonClient.Export(exportArgs)
if err != nil {
return fmt.Errorf("daemon export failed: %w", err)
}
if !resp.Success {
return fmt.Errorf("daemon export error: %s", resp.Error)
}
return nil
}
// Direct mode: access store directly
// Ensure store is initialized
if err := ensureStoreActive(); err != nil {
return fmt.Errorf("failed to initialize store: %w", err)
}
// Get all issues including tombstones for sync propagation (bd-rp4o fix)
// Tombstones must be exported so they propagate to other clones and prevent resurrection
issues, err := store.SearchIssues(ctx, "", types.IssueFilter{IncludeTombstones: true})
if err != nil {
return fmt.Errorf("failed to get issues: %w", err)
}
// Safety check: prevent exporting empty database over non-empty JSONL
// Note: The main bd-53c protection is the reverse ZFC check earlier in sync.go
// which runs BEFORE export. Here we only block the most catastrophic case (empty DB)
// to allow legitimate deletions.
if len(issues) == 0 {
existingCount, countErr := countIssuesInJSONL(jsonlPath)
if countErr != nil {
// If we can't read the file, it might not exist yet, which is fine
if !os.IsNotExist(countErr) {
fmt.Fprintf(os.Stderr, "Warning: failed to read existing JSONL: %v\n", countErr)
}
} else if existingCount > 0 {
return fmt.Errorf("refusing to export empty database over non-empty JSONL file (database: 0 issues, JSONL: %d issues)", existingCount)
}
}
// Sort by ID for consistent output
slices.SortFunc(issues, func(a, b *types.Issue) int {
return cmp.Compare(a.ID, b.ID)
})
// Populate dependencies for all issues (avoid N+1)
allDeps, err := store.GetAllDependencyRecords(ctx)
if err != nil {
return fmt.Errorf("failed to get dependencies: %w", err)
}
for _, issue := range issues {
issue.Dependencies = allDeps[issue.ID]
}
// Populate labels for all issues
for _, issue := range issues {
labels, err := store.GetLabels(ctx, issue.ID)
if err != nil {
return fmt.Errorf("failed to get labels for %s: %w", issue.ID, err)
}
issue.Labels = labels
}
// Populate comments for all issues
for _, issue := range issues {
comments, err := store.GetIssueComments(ctx, issue.ID)
if err != nil {
return fmt.Errorf("failed to get comments for %s: %w", issue.ID, err)
}
issue.Comments = comments
}
// Create temp file for atomic write
dir := filepath.Dir(jsonlPath)
base := filepath.Base(jsonlPath)
tempFile, err := os.CreateTemp(dir, base+".tmp.*")
if err != nil {
return fmt.Errorf("failed to create temp file: %w", err)
}
tempPath := tempFile.Name()
defer func() {
_ = tempFile.Close()
_ = os.Remove(tempPath)
}()
// Write JSONL
encoder := json.NewEncoder(tempFile)
exportedIDs := make([]string, 0, len(issues))
for _, issue := range issues {
if err := encoder.Encode(issue); err != nil {
return fmt.Errorf("failed to encode issue %s: %w", issue.ID, err)
}
exportedIDs = append(exportedIDs, issue.ID)
}
// Close temp file before rename (error checked implicitly by Rename success)
_ = tempFile.Close()
// Atomic replace
if err := os.Rename(tempPath, jsonlPath); err != nil {
return fmt.Errorf("failed to replace JSONL file: %w", err)
}
// Set appropriate file permissions (0600: rw-------)
if err := os.Chmod(jsonlPath, 0600); err != nil {
// Non-fatal warning
fmt.Fprintf(os.Stderr, "Warning: failed to set file permissions: %v\n", err)
}
// Clear dirty flags for exported issues
if err := store.ClearDirtyIssuesByID(ctx, exportedIDs); err != nil {
// Non-fatal warning
fmt.Fprintf(os.Stderr, "Warning: failed to clear dirty flags: %v\n", err)
}
// Clear auto-flush state
clearAutoFlushState()
// Update jsonl_content_hash metadata to enable content-based staleness detection (bd-khnb fix)
// After export, database and JSONL are in sync, so update hash to prevent unnecessary auto-import
// Renamed from last_import_hash (bd-39o) - more accurate since updated on both import AND export
if currentHash, err := computeJSONLHash(jsonlPath); err == nil {
if err := store.SetMetadata(ctx, "jsonl_content_hash", currentHash); err != nil {
// Non-fatal warning: Metadata update failures are intentionally non-fatal to prevent blocking
// successful exports. System degrades gracefully to mtime-based staleness detection if metadata
// is unavailable. This ensures export operations always succeed even if metadata storage fails.
fmt.Fprintf(os.Stderr, "Warning: failed to update jsonl_content_hash: %v\n", err)
}
// Use RFC3339Nano for nanosecond precision to avoid race with file mtime (fixes #399)
exportTime := time.Now().Format(time.RFC3339Nano)
if err := store.SetMetadata(ctx, "last_import_time", exportTime); err != nil {
// Non-fatal warning (see above comment about graceful degradation)
fmt.Fprintf(os.Stderr, "Warning: failed to update last_import_time: %v\n", err)
}
// Note: mtime tracking removed in bd-v0y fix (git doesn't preserve mtime)
}
// Update database mtime to be >= JSONL mtime (fixes #278, #301, #321)
// This prevents validatePreExport from incorrectly blocking on next export
beadsDir := filepath.Dir(jsonlPath)
dbPath := filepath.Join(beadsDir, "beads.db")
if err := TouchDatabaseFile(dbPath, jsonlPath); err != nil {
// Non-fatal warning
fmt.Fprintf(os.Stderr, "Warning: failed to update database mtime: %v\n", err)
}
return nil
}

View File

@@ -1,132 +0,0 @@
package main
import (
"context"
"fmt"
"os"
"os/exec"
)
// importFromJSONL imports the JSONL file by running the import command
// Optional parameters: noGitHistory, protectLeftSnapshot (bd-sync-deletion fix)
func importFromJSONL(ctx context.Context, jsonlPath string, renameOnImport bool, opts ...bool) error {
// Get current executable path to avoid "./bd" path issues
exe, err := os.Executable()
if err != nil {
return fmt.Errorf("cannot resolve current executable: %w", err)
}
// Parse optional parameters
noGitHistory := false
protectLeftSnapshot := false
if len(opts) > 0 {
noGitHistory = opts[0]
}
if len(opts) > 1 {
protectLeftSnapshot = opts[1]
}
// Build args for import command
// Use --no-daemon to ensure subprocess uses direct mode, avoiding daemon connection issues
args := []string{"--no-daemon", "import", "-i", jsonlPath}
if renameOnImport {
args = append(args, "--rename-on-import")
}
if noGitHistory {
args = append(args, "--no-git-history")
}
// Add --protect-left-snapshot flag for post-pull imports (bd-sync-deletion fix)
if protectLeftSnapshot {
args = append(args, "--protect-left-snapshot")
}
// Run import command
cmd := exec.CommandContext(ctx, exe, args...) // #nosec G204 - bd import command from trusted binary
output, err := cmd.CombinedOutput()
if err != nil {
return fmt.Errorf("import failed: %w\n%s", err, output)
}
// Show output (import command provides the summary)
if len(output) > 0 {
fmt.Print(string(output))
}
return nil
}
// resolveNoGitHistoryForFromMain returns the resolved noGitHistory value for sync operations.
// When syncing from main (--from-main), noGitHistory is forced to true to prevent creating
// incorrect deletion records for locally-created beads that don't exist on main.
// See: https://github.com/steveyegge/beads/issues/417
func resolveNoGitHistoryForFromMain(fromMain, noGitHistory bool) bool {
if fromMain {
return true
}
return noGitHistory
}
// doSyncFromMain performs a one-way sync from the default branch (main/master)
// Used for ephemeral branches without upstream tracking (gt-ick9)
// This fetches beads from main and imports them, discarding local beads changes.
// If sync.remote is configured (e.g., "upstream" for fork workflows), uses that remote
// instead of "origin" (bd-bx9).
func doSyncFromMain(ctx context.Context, jsonlPath string, renameOnImport bool, dryRun bool, noGitHistory bool) error {
// Determine which remote to use (default: origin, but can be configured via sync.remote)
remote := "origin"
if err := ensureStoreActive(); err == nil && store != nil {
if configuredRemote, err := store.GetConfig(ctx, "sync.remote"); err == nil && configuredRemote != "" {
remote = configuredRemote
}
}
if dryRun {
fmt.Println("→ [DRY RUN] Would sync beads from main branch")
fmt.Printf(" 1. Fetch %s main\n", remote)
fmt.Printf(" 2. Checkout .beads/ from %s/main\n", remote)
fmt.Println(" 3. Import JSONL into database")
fmt.Println("\n✓ Dry run complete (no changes made)")
return nil
}
// Check if we're in a git repository
if !isGitRepo() {
return fmt.Errorf("not in a git repository")
}
// Check if remote exists
if !hasGitRemote(ctx) {
return fmt.Errorf("no git remote configured")
}
// Verify the configured remote exists
checkRemoteCmd := exec.CommandContext(ctx, "git", "remote", "get-url", remote)
if err := checkRemoteCmd.Run(); err != nil {
return fmt.Errorf("configured sync.remote '%s' does not exist (run 'git remote add %s <url>')", remote, remote)
}
defaultBranch := getDefaultBranchForRemote(ctx, remote)
// Step 1: Fetch from main
fmt.Printf("→ Fetching from %s/%s...\n", remote, defaultBranch)
fetchCmd := exec.CommandContext(ctx, "git", "fetch", remote, defaultBranch)
if output, err := fetchCmd.CombinedOutput(); err != nil {
return fmt.Errorf("git fetch %s %s failed: %w\n%s", remote, defaultBranch, err, output)
}
// Step 2: Checkout .beads/ directory from main
fmt.Printf("→ Checking out beads from %s/%s...\n", remote, defaultBranch)
checkoutCmd := exec.CommandContext(ctx, "git", "checkout", fmt.Sprintf("%s/%s", remote, defaultBranch), "--", ".beads/")
if output, err := checkoutCmd.CombinedOutput(); err != nil {
return fmt.Errorf("git checkout .beads/ from %s/%s failed: %w\n%s", remote, defaultBranch, err, output)
}
// Step 3: Import JSONL
fmt.Println("→ Importing JSONL...")
if err := importFromJSONL(ctx, jsonlPath, renameOnImport, noGitHistory); err != nil {
return fmt.Errorf("import failed: %w", err)
}
fmt.Println("\n✓ Sync from main complete")
return nil
}

View File

@@ -0,0 +1,16 @@
# Test bd close --resolution alias (GH#721)
# Jira CLI convention: --resolution instead of --reason
bd init --prefix test
# Create issue
bd create 'Issue to close with resolution'
cp stdout issue.txt
exec sh -c 'grep -oE "test-[a-z0-9]+" issue.txt > issue_id.txt'
# Close using --resolution alias
exec sh -c 'bd close $(cat issue_id.txt) --resolution "Fixed via resolution alias"'
stdout 'Closed test-'
# Verify close_reason is set correctly
exec sh -c 'bd show $(cat issue_id.txt) --json'
stdout 'Fixed via resolution alias'

View File

@@ -104,6 +104,73 @@ external_projects:
gastown: /path/to/gastown
```
### Hooks Configuration
bd supports config-based hooks for automation and notifications. Currently, close hooks are implemented.
#### Close Hooks
Close hooks run after an issue is successfully closed via `bd close`. They execute synchronously but failures are logged as warnings and don't block the close operation.
**Configuration:**
```yaml
# .beads/config.yaml
hooks:
on_close:
- name: show-next
command: bd ready --limit 1
- name: context-check
command: echo "Issue $BEAD_ID closed. Check context if nearing limit."
- command: notify-team.sh # name is optional
```
**Environment Variables:**
Hook commands receive issue data via environment variables:
| Variable | Description |
|----------|-------------|
| `BEAD_ID` | Issue ID (e.g., `bd-abc1`) |
| `BEAD_TITLE` | Issue title |
| `BEAD_TYPE` | Issue type (`task`, `bug`, `feature`, etc.) |
| `BEAD_PRIORITY` | Priority (0-4) |
| `BEAD_CLOSE_REASON` | Close reason if provided |
**Example Use Cases:**
1. **Show next work item:**
```yaml
hooks:
on_close:
- name: next-task
command: bd ready --limit 1
```
2. **Context check reminder:**
```yaml
hooks:
on_close:
- name: context-check
command: |
echo "Issue $BEAD_ID ($BEAD_TITLE) closed."
echo "Priority was P$BEAD_PRIORITY. Reason: $BEAD_CLOSE_REASON"
```
3. **Integration with external tools:**
```yaml
hooks:
on_close:
- name: slack-notify
command: curl -X POST "$SLACK_WEBHOOK" -d "{\"text\":\"Closed: $BEAD_ID - $BEAD_TITLE\"}"
```
**Notes:**
- Hooks have a 10-second timeout
- Hook failures log warnings but don't fail the close operation
- Commands run via `sh -c`, so shell features like pipes and redirects work
- Both script-based hooks (`.beads/hooks/on_close`) and config-based hooks run
### Why Two Systems?
**Tool settings (Viper)** are user preferences:

View File

@@ -1427,6 +1427,237 @@ func TestIsWispDatabase(t *testing.T) {
}
}
// TestFindDatabaseInBeadsDir tests the database discovery within a .beads directory
func TestFindDatabaseInBeadsDir(t *testing.T) {
tests := []struct {
name string
files []string
configJSON string
expectDB string
warnOnIssues bool
}{
{
name: "canonical beads.db only",
files: []string{"beads.db"},
expectDB: "beads.db",
},
{
name: "legacy bd.db only",
files: []string{"bd.db"},
expectDB: "bd.db",
},
{
name: "prefers beads.db over other db files",
files: []string{"custom.db", "beads.db", "other.db"},
expectDB: "beads.db",
},
{
name: "skips backup files",
files: []string{"beads.backup.db", "real.db"},
expectDB: "real.db",
},
{
name: "skips vc.db",
files: []string{"vc.db", "beads.db"},
expectDB: "beads.db",
},
{
name: "no db files returns empty",
files: []string{"readme.txt", "config.yaml"},
expectDB: "",
},
{
name: "only backup files returns empty",
files: []string{"beads.backup.db", "vc.db"},
expectDB: "",
},
}
for _, tt := range tests {
t.Run(tt.name, func(t *testing.T) {
tmpDir, err := os.MkdirTemp("", "beads-findindir-test-*")
if err != nil {
t.Fatal(err)
}
defer os.RemoveAll(tmpDir)
// Create test files
for _, file := range tt.files {
path := filepath.Join(tmpDir, file)
if err := os.WriteFile(path, []byte{}, 0644); err != nil {
t.Fatal(err)
}
}
// Write config.json if specified
if tt.configJSON != "" {
configPath := filepath.Join(tmpDir, "config.json")
if err := os.WriteFile(configPath, []byte(tt.configJSON), 0644); err != nil {
t.Fatal(err)
}
}
result := findDatabaseInBeadsDir(tmpDir, tt.warnOnIssues)
if tt.expectDB == "" {
if result != "" {
t.Errorf("findDatabaseInBeadsDir() = %q, want empty string", result)
}
} else {
expected := filepath.Join(tmpDir, tt.expectDB)
if result != expected {
t.Errorf("findDatabaseInBeadsDir() = %q, want %q", result, expected)
}
}
})
}
}
// TestFindAllDatabases tests the multi-database discovery
func TestFindAllDatabases(t *testing.T) {
// Save original state
originalEnv := os.Getenv("BEADS_DIR")
defer func() {
if originalEnv != "" {
os.Setenv("BEADS_DIR", originalEnv)
} else {
os.Unsetenv("BEADS_DIR")
}
}()
os.Unsetenv("BEADS_DIR")
// Create temp directory structure
tmpDir, err := os.MkdirTemp("", "beads-findall-test-*")
if err != nil {
t.Fatal(err)
}
defer os.RemoveAll(tmpDir)
// Create .beads directory with database
beadsDir := filepath.Join(tmpDir, ".beads")
if err := os.MkdirAll(beadsDir, 0755); err != nil {
t.Fatal(err)
}
dbPath := filepath.Join(beadsDir, "beads.db")
if err := os.WriteFile(dbPath, []byte{}, 0644); err != nil {
t.Fatal(err)
}
// Create subdirectory and change to it
subDir := filepath.Join(tmpDir, "sub", "nested")
if err := os.MkdirAll(subDir, 0755); err != nil {
t.Fatal(err)
}
t.Chdir(subDir)
// FindAllDatabases should find the parent .beads
result := FindAllDatabases()
if len(result) == 0 {
t.Error("FindAllDatabases() returned empty slice, expected at least one database")
} else {
// Verify the path matches
resultResolved, _ := filepath.EvalSymlinks(result[0].Path)
dbPathResolved, _ := filepath.EvalSymlinks(dbPath)
if resultResolved != dbPathResolved {
t.Errorf("FindAllDatabases()[0].Path = %q, want %q", result[0].Path, dbPath)
}
}
}
// TestFindAllDatabases_NoDatabase tests FindAllDatabases when no database exists
func TestFindAllDatabases_NoDatabase(t *testing.T) {
// Save original state
originalEnv := os.Getenv("BEADS_DIR")
defer func() {
if originalEnv != "" {
os.Setenv("BEADS_DIR", originalEnv)
} else {
os.Unsetenv("BEADS_DIR")
}
}()
os.Unsetenv("BEADS_DIR")
// Create temp directory without .beads
tmpDir, err := os.MkdirTemp("", "beads-findall-nodb-*")
if err != nil {
t.Fatal(err)
}
defer os.RemoveAll(tmpDir)
t.Chdir(tmpDir)
// FindAllDatabases should return empty slice (not nil)
result := FindAllDatabases()
if result == nil {
t.Error("FindAllDatabases() returned nil, expected empty slice")
}
if len(result) != 0 {
t.Errorf("FindAllDatabases() returned %d databases, expected 0", len(result))
}
}
// TestFindAllDatabases_StopsAtFirst tests that FindAllDatabases stops at first .beads found
func TestFindAllDatabases_StopsAtFirst(t *testing.T) {
// Save original state
originalEnv := os.Getenv("BEADS_DIR")
defer func() {
if originalEnv != "" {
os.Setenv("BEADS_DIR", originalEnv)
} else {
os.Unsetenv("BEADS_DIR")
}
}()
os.Unsetenv("BEADS_DIR")
// Create temp directory structure with nested .beads dirs
tmpDir, err := os.MkdirTemp("", "beads-findall-nested-*")
if err != nil {
t.Fatal(err)
}
defer os.RemoveAll(tmpDir)
// Create parent .beads
parentBeadsDir := filepath.Join(tmpDir, ".beads")
if err := os.MkdirAll(parentBeadsDir, 0755); err != nil {
t.Fatal(err)
}
if err := os.WriteFile(filepath.Join(parentBeadsDir, "beads.db"), []byte{}, 0644); err != nil {
t.Fatal(err)
}
// Create child project with its own .beads
childDir := filepath.Join(tmpDir, "child")
childBeadsDir := filepath.Join(childDir, ".beads")
if err := os.MkdirAll(childBeadsDir, 0755); err != nil {
t.Fatal(err)
}
childDBPath := filepath.Join(childBeadsDir, "beads.db")
if err := os.WriteFile(childDBPath, []byte{}, 0644); err != nil {
t.Fatal(err)
}
// Change to child directory
t.Chdir(childDir)
// FindAllDatabases should return only the child's database (stops at first)
result := FindAllDatabases()
if len(result) != 1 {
t.Errorf("FindAllDatabases() returned %d databases, expected 1 (should stop at first)", len(result))
}
if len(result) > 0 {
resultResolved, _ := filepath.EvalSymlinks(result[0].Path)
childDBResolved, _ := filepath.EvalSymlinks(childDBPath)
if resultResolved != childDBResolved {
t.Errorf("FindAllDatabases() found %q, expected child database %q", result[0].Path, childDBPath)
}
}
}
// TestEnsureWispGitignore tests that EnsureWispGitignore correctly
// adds the wisp directory to .gitignore
func TestEnsureWispGitignore(t *testing.T) {

View File

@@ -0,0 +1,507 @@
package beads
import (
"os"
"os/exec"
"path/filepath"
"strings"
"testing"
)
// TestCanonicalizeGitURL tests URL normalization for various git URL formats
func TestCanonicalizeGitURL(t *testing.T) {
tests := []struct {
name string
input string
expected string
}{
// HTTPS URLs
{
name: "https basic",
input: "https://github.com/user/repo",
expected: "github.com/user/repo",
},
{
name: "https with .git suffix",
input: "https://github.com/user/repo.git",
expected: "github.com/user/repo",
},
{
name: "https with trailing slash",
input: "https://github.com/user/repo/",
expected: "github.com/user/repo",
},
{
name: "https uppercase host",
input: "https://GitHub.COM/User/Repo.git",
expected: "github.com/User/Repo",
},
{
name: "https with port 443",
input: "https://github.com:443/user/repo.git",
expected: "github.com/user/repo",
},
{
name: "https with custom port",
input: "https://gitlab.company.com:8443/user/repo.git",
expected: "gitlab.company.com:8443/user/repo",
},
// SSH URLs (protocol style)
{
name: "ssh protocol basic",
input: "ssh://git@github.com/user/repo.git",
expected: "github.com/user/repo",
},
{
name: "ssh with port 22",
input: "ssh://git@github.com:22/user/repo.git",
expected: "github.com/user/repo",
},
{
name: "ssh with custom port",
input: "ssh://git@gitlab.company.com:2222/user/repo.git",
expected: "gitlab.company.com:2222/user/repo",
},
// SCP-style URLs (git@host:path)
{
name: "scp style basic",
input: "git@github.com:user/repo.git",
expected: "github.com/user/repo",
},
{
name: "scp style without .git",
input: "git@github.com:user/repo",
expected: "github.com/user/repo",
},
{
name: "scp style uppercase host",
input: "git@GITHUB.COM:User/Repo.git",
expected: "github.com/User/Repo",
},
{
name: "scp style with trailing slash",
input: "git@github.com:user/repo/",
expected: "github.com/user/repo",
},
{
name: "scp style deep path",
input: "git@gitlab.com:org/team/project/repo.git",
expected: "gitlab.com/org/team/project/repo",
},
// HTTP URLs (less common but valid)
{
name: "http basic",
input: "http://github.com/user/repo.git",
expected: "github.com/user/repo",
},
{
name: "http with port 80",
input: "http://github.com:80/user/repo.git",
expected: "github.com/user/repo",
},
// Git protocol
{
name: "git protocol",
input: "git://github.com/user/repo.git",
expected: "github.com/user/repo",
},
// Whitespace handling
{
name: "with leading whitespace",
input: " https://github.com/user/repo.git",
expected: "github.com/user/repo",
},
{
name: "with trailing whitespace",
input: "https://github.com/user/repo.git ",
expected: "github.com/user/repo",
},
{
name: "with newline",
input: "https://github.com/user/repo.git\n",
expected: "github.com/user/repo",
},
}
for _, tt := range tests {
t.Run(tt.name, func(t *testing.T) {
result, err := canonicalizeGitURL(tt.input)
if err != nil {
t.Fatalf("canonicalizeGitURL(%q) error = %v", tt.input, err)
}
if result != tt.expected {
t.Errorf("canonicalizeGitURL(%q) = %q, want %q", tt.input, result, tt.expected)
}
})
}
}
// TestCanonicalizeGitURL_LocalPath tests that local paths are handled
func TestCanonicalizeGitURL_LocalPath(t *testing.T) {
// Create a temp directory to use as a "local path"
tmpDir := t.TempDir()
// Local absolute path
result, err := canonicalizeGitURL(tmpDir)
if err != nil {
t.Fatalf("canonicalizeGitURL(%q) error = %v", tmpDir, err)
}
// Should return a forward-slash path
if strings.Contains(result, "\\") {
t.Errorf("canonicalizeGitURL(%q) = %q, should use forward slashes", tmpDir, result)
}
}
// TestCanonicalizeGitURL_WindowsPath tests Windows path detection
func TestCanonicalizeGitURL_WindowsPath(t *testing.T) {
// This tests the Windows path detection logic (C:/)
// The function should NOT treat "C:/foo/bar" as an scp-style URL
tests := []struct {
input string
expected string
}{
// These are NOT scp-style URLs - they're Windows paths
{"C:/Users/test/repo", "C:/Users/test/repo"},
{"D:/projects/myrepo", "D:/projects/myrepo"},
}
for _, tt := range tests {
result, err := canonicalizeGitURL(tt.input)
if err != nil {
t.Fatalf("canonicalizeGitURL(%q) error = %v", tt.input, err)
}
// Should preserve the Windows path structure (forward slashes)
if !strings.Contains(result, "/") {
t.Errorf("canonicalizeGitURL(%q) = %q, expected path with slashes", tt.input, result)
}
}
}
// TestComputeRepoID_WithRemote tests ComputeRepoID when remote.origin.url exists
func TestComputeRepoID_WithRemote(t *testing.T) {
// Create temporary directory for test repo
tmpDir := t.TempDir()
// Initialize git repo
cmd := exec.Command("git", "init")
cmd.Dir = tmpDir
if err := cmd.Run(); err != nil {
t.Skipf("git not available: %v", err)
}
// Configure git user
cmd = exec.Command("git", "config", "user.email", "test@example.com")
cmd.Dir = tmpDir
_ = cmd.Run()
cmd = exec.Command("git", "config", "user.name", "Test User")
cmd.Dir = tmpDir
_ = cmd.Run()
// Set remote.origin.url
cmd = exec.Command("git", "remote", "add", "origin", "https://github.com/user/test-repo.git")
cmd.Dir = tmpDir
if err := cmd.Run(); err != nil {
t.Fatalf("git remote add failed: %v", err)
}
// Change to repo dir
t.Chdir(tmpDir)
// ComputeRepoID should return a consistent hash
result1, err := ComputeRepoID()
if err != nil {
t.Fatalf("ComputeRepoID() error = %v", err)
}
// Should be a 32-character hex string (16 bytes)
if len(result1) != 32 {
t.Errorf("ComputeRepoID() = %q, expected 32 character hex string", result1)
}
// Should be consistent across calls
result2, err := ComputeRepoID()
if err != nil {
t.Fatalf("ComputeRepoID() second call error = %v", err)
}
if result1 != result2 {
t.Errorf("ComputeRepoID() not consistent: %q vs %q", result1, result2)
}
}
// TestComputeRepoID_NoRemote tests ComputeRepoID when no remote exists
func TestComputeRepoID_NoRemote(t *testing.T) {
// Create temporary directory for test repo
tmpDir := t.TempDir()
// Initialize git repo (no remote)
cmd := exec.Command("git", "init")
cmd.Dir = tmpDir
if err := cmd.Run(); err != nil {
t.Skipf("git not available: %v", err)
}
// Change to repo dir
t.Chdir(tmpDir)
// ComputeRepoID should fall back to using the local path
result, err := ComputeRepoID()
if err != nil {
t.Fatalf("ComputeRepoID() error = %v", err)
}
// Should still return a 32-character hex string
if len(result) != 32 {
t.Errorf("ComputeRepoID() = %q, expected 32 character hex string", result)
}
}
// TestComputeRepoID_NotGitRepo tests ComputeRepoID when not in a git repo
func TestComputeRepoID_NotGitRepo(t *testing.T) {
// Create temporary directory that is NOT a git repo
tmpDir := t.TempDir()
t.Chdir(tmpDir)
// ComputeRepoID should return an error
_, err := ComputeRepoID()
if err == nil {
t.Error("ComputeRepoID() expected error for non-git directory, got nil")
}
if !strings.Contains(err.Error(), "not a git repository") {
t.Errorf("ComputeRepoID() error = %q, expected 'not a git repository'", err.Error())
}
}
// TestComputeRepoID_DifferentRemotesSameCanonical tests that different URL formats
// for the same repo produce the same ID
func TestComputeRepoID_DifferentRemotesSameCanonical(t *testing.T) {
remotes := []string{
"https://github.com/user/repo.git",
"git@github.com:user/repo.git",
"ssh://git@github.com/user/repo.git",
}
var ids []string
for _, remote := range remotes {
tmpDir := t.TempDir()
// Initialize git repo
cmd := exec.Command("git", "init")
cmd.Dir = tmpDir
if err := cmd.Run(); err != nil {
t.Skipf("git not available: %v", err)
}
// Set remote
cmd = exec.Command("git", "remote", "add", "origin", remote)
cmd.Dir = tmpDir
if err := cmd.Run(); err != nil {
t.Fatalf("git remote add failed for %q: %v", remote, err)
}
t.Chdir(tmpDir)
id, err := ComputeRepoID()
if err != nil {
t.Fatalf("ComputeRepoID() for remote %q error = %v", remote, err)
}
ids = append(ids, id)
}
// All IDs should be the same since they point to the same canonical repo
for i := 1; i < len(ids); i++ {
if ids[i] != ids[0] {
t.Errorf("ComputeRepoID() produced different IDs for same repo:\n remote[0]=%q id=%s\n remote[%d]=%q id=%s",
remotes[0], ids[0], i, remotes[i], ids[i])
}
}
}
// TestGetCloneID_Basic tests GetCloneID returns a consistent ID
func TestGetCloneID_Basic(t *testing.T) {
// Create temporary directory for test repo
tmpDir := t.TempDir()
// Initialize git repo
cmd := exec.Command("git", "init")
cmd.Dir = tmpDir
if err := cmd.Run(); err != nil {
t.Skipf("git not available: %v", err)
}
t.Chdir(tmpDir)
// GetCloneID should return a consistent hash
result1, err := GetCloneID()
if err != nil {
t.Fatalf("GetCloneID() error = %v", err)
}
// Should be a 16-character hex string (8 bytes)
if len(result1) != 16 {
t.Errorf("GetCloneID() = %q, expected 16 character hex string", result1)
}
// Should be consistent across calls
result2, err := GetCloneID()
if err != nil {
t.Fatalf("GetCloneID() second call error = %v", err)
}
if result1 != result2 {
t.Errorf("GetCloneID() not consistent: %q vs %q", result1, result2)
}
}
// TestGetCloneID_DifferentDirs tests GetCloneID produces different IDs for different clones
func TestGetCloneID_DifferentDirs(t *testing.T) {
ids := make(map[string]string)
for i := 0; i < 3; i++ {
tmpDir := t.TempDir()
// Initialize git repo
cmd := exec.Command("git", "init")
cmd.Dir = tmpDir
if err := cmd.Run(); err != nil {
t.Skipf("git not available: %v", err)
}
t.Chdir(tmpDir)
id, err := GetCloneID()
if err != nil {
t.Fatalf("GetCloneID() error = %v", err)
}
// Each clone should have a unique ID
if prev, exists := ids[id]; exists {
t.Errorf("GetCloneID() produced duplicate ID %q for dirs %q and %q", id, prev, tmpDir)
}
ids[id] = tmpDir
}
}
// TestGetCloneID_NotGitRepo tests GetCloneID when not in a git repo
func TestGetCloneID_NotGitRepo(t *testing.T) {
// Create temporary directory that is NOT a git repo
tmpDir := t.TempDir()
t.Chdir(tmpDir)
// GetCloneID should return an error
_, err := GetCloneID()
if err == nil {
t.Error("GetCloneID() expected error for non-git directory, got nil")
}
if !strings.Contains(err.Error(), "not a git repository") {
t.Errorf("GetCloneID() error = %q, expected 'not a git repository'", err.Error())
}
}
// TestGetCloneID_IncludesHostname tests that GetCloneID includes hostname
// to differentiate the same path on different machines
func TestGetCloneID_IncludesHostname(t *testing.T) {
// This test verifies the concept - we can't actually test different hostnames
// but we can verify that the same path produces the same ID on this machine
tmpDir := t.TempDir()
// Initialize git repo
cmd := exec.Command("git", "init")
cmd.Dir = tmpDir
if err := cmd.Run(); err != nil {
t.Skipf("git not available: %v", err)
}
t.Chdir(tmpDir)
hostname, _ := os.Hostname()
id, err := GetCloneID()
if err != nil {
t.Fatalf("GetCloneID() error = %v", err)
}
// Just verify we got a valid ID - we can't test different hostnames
// but the implementation includes hostname in the hash
if len(id) != 16 {
t.Errorf("GetCloneID() = %q, expected 16 character hex string (hostname=%s)", id, hostname)
}
}
// TestGetCloneID_Worktree tests GetCloneID in a worktree
func TestGetCloneID_Worktree(t *testing.T) {
// Create temporary directory for test
tmpDir := t.TempDir()
// Initialize main git repo
mainRepoDir := filepath.Join(tmpDir, "main-repo")
if err := os.MkdirAll(mainRepoDir, 0755); err != nil {
t.Fatal(err)
}
cmd := exec.Command("git", "init")
cmd.Dir = mainRepoDir
if err := cmd.Run(); err != nil {
t.Skipf("git not available: %v", err)
}
// Configure git user
cmd = exec.Command("git", "config", "user.email", "test@example.com")
cmd.Dir = mainRepoDir
_ = cmd.Run()
cmd = exec.Command("git", "config", "user.name", "Test User")
cmd.Dir = mainRepoDir
_ = cmd.Run()
// Create initial commit (required for worktree)
dummyFile := filepath.Join(mainRepoDir, "README.md")
if err := os.WriteFile(dummyFile, []byte("# Test\n"), 0644); err != nil {
t.Fatal(err)
}
cmd = exec.Command("git", "add", "README.md")
cmd.Dir = mainRepoDir
_ = cmd.Run()
cmd = exec.Command("git", "commit", "-m", "Initial commit")
cmd.Dir = mainRepoDir
if err := cmd.Run(); err != nil {
t.Fatalf("git commit failed: %v", err)
}
// Create a worktree
worktreeDir := filepath.Join(tmpDir, "worktree")
cmd = exec.Command("git", "worktree", "add", worktreeDir, "HEAD")
cmd.Dir = mainRepoDir
if err := cmd.Run(); err != nil {
t.Fatalf("git worktree add failed: %v", err)
}
defer func() {
cmd := exec.Command("git", "worktree", "remove", worktreeDir)
cmd.Dir = mainRepoDir
_ = cmd.Run()
}()
// Get IDs from both locations
t.Chdir(mainRepoDir)
mainID, err := GetCloneID()
if err != nil {
t.Fatalf("GetCloneID() in main repo error = %v", err)
}
t.Chdir(worktreeDir)
worktreeID, err := GetCloneID()
if err != nil {
t.Fatalf("GetCloneID() in worktree error = %v", err)
}
// Worktree should have a DIFFERENT ID than main repo
// because they're different paths (different clones conceptually)
if mainID == worktreeID {
t.Errorf("GetCloneID() returned same ID for main repo and worktree - should be different")
}
}

View File

@@ -0,0 +1,732 @@
package compact
import (
"context"
"encoding/json"
"net/http"
"net/http/httptest"
"strings"
"testing"
"time"
"github.com/anthropics/anthropic-sdk-go/option"
"github.com/steveyegge/beads/internal/storage/sqlite"
"github.com/steveyegge/beads/internal/types"
)
// setupTestStore creates a test SQLite store for unit tests
func setupTestStore(t *testing.T) *sqlite.SQLiteStorage {
t.Helper()
tmpDB := t.TempDir() + "/test.db"
store, err := sqlite.New(context.Background(), tmpDB)
if err != nil {
t.Fatalf("failed to create storage: %v", err)
}
ctx := context.Background()
// Set issue_prefix to prevent "database not initialized" errors
if err := store.SetConfig(ctx, "issue_prefix", "bd"); err != nil {
t.Fatalf("failed to set issue_prefix: %v", err)
}
// Use 7 days minimum for Tier 1 compaction
if err := store.SetConfig(ctx, "compact_tier1_days", "7"); err != nil {
t.Fatalf("failed to set config: %v", err)
}
if err := store.SetConfig(ctx, "compact_tier1_dep_levels", "2"); err != nil {
t.Fatalf("failed to set config: %v", err)
}
return store
}
// createTestIssue creates a closed issue eligible for compaction
func createTestIssue(t *testing.T, store *sqlite.SQLiteStorage, id string) *types.Issue {
t.Helper()
ctx := context.Background()
prefix, _ := store.GetConfig(ctx, "issue_prefix")
if prefix == "" {
prefix = "bd"
}
now := time.Now()
// Issue closed 8 days ago (beyond 7-day threshold for Tier 1)
closedAt := now.Add(-8 * 24 * time.Hour)
issue := &types.Issue{
ID: id,
Title: "Test Issue",
Description: `Implemented a comprehensive authentication system for the application.
The system includes JWT token generation, refresh token handling, password hashing with bcrypt,
rate limiting on login attempts, and session management.`,
Design: `Authentication Flow:
1. User submits credentials
2. Server validates against database
3. On success, generate JWT with user claims`,
Notes: "Performance considerations and testing strategy notes.",
AcceptanceCriteria: "- Users can register\n- Users can login\n- Protected endpoints work",
Status: types.StatusClosed,
Priority: 2,
IssueType: types.TypeTask,
CreatedAt: now.Add(-48 * time.Hour),
UpdatedAt: now.Add(-24 * time.Hour),
ClosedAt: &closedAt,
}
if err := store.CreateIssue(ctx, issue, prefix); err != nil {
t.Fatalf("failed to create issue: %v", err)
}
return issue
}
func TestNew_WithConfig(t *testing.T) {
store := setupTestStore(t)
defer store.Close()
config := &Config{
Concurrency: 10,
DryRun: true,
}
c, err := New(store, "", config)
if err != nil {
t.Fatalf("failed to create compactor: %v", err)
}
if c.config.Concurrency != 10 {
t.Errorf("expected concurrency 10, got %d", c.config.Concurrency)
}
if !c.config.DryRun {
t.Error("expected DryRun to be true")
}
}
func TestNew_DefaultConcurrency(t *testing.T) {
store := setupTestStore(t)
defer store.Close()
c, err := New(store, "", nil)
if err != nil {
t.Fatalf("failed to create compactor: %v", err)
}
if c.config.Concurrency != defaultConcurrency {
t.Errorf("expected default concurrency %d, got %d", defaultConcurrency, c.config.Concurrency)
}
}
func TestNew_ZeroConcurrency(t *testing.T) {
store := setupTestStore(t)
defer store.Close()
config := &Config{
Concurrency: 0,
DryRun: true,
}
c, err := New(store, "", config)
if err != nil {
t.Fatalf("failed to create compactor: %v", err)
}
// Zero concurrency should be replaced with default
if c.config.Concurrency != defaultConcurrency {
t.Errorf("expected default concurrency %d, got %d", defaultConcurrency, c.config.Concurrency)
}
}
func TestNew_NegativeConcurrency(t *testing.T) {
store := setupTestStore(t)
defer store.Close()
config := &Config{
Concurrency: -5,
DryRun: true,
}
c, err := New(store, "", config)
if err != nil {
t.Fatalf("failed to create compactor: %v", err)
}
// Negative concurrency should be replaced with default
if c.config.Concurrency != defaultConcurrency {
t.Errorf("expected default concurrency %d, got %d", defaultConcurrency, c.config.Concurrency)
}
}
func TestNew_WithAPIKey(t *testing.T) {
store := setupTestStore(t)
defer store.Close()
// Clear env var to test explicit key
t.Setenv("ANTHROPIC_API_KEY", "")
config := &Config{
DryRun: true, // DryRun so we don't actually need a valid key
}
c, err := New(store, "test-api-key", config)
if err != nil {
t.Fatalf("failed to create compactor: %v", err)
}
if c.config.APIKey != "test-api-key" {
t.Errorf("expected api key 'test-api-key', got '%s'", c.config.APIKey)
}
}
func TestNew_NoAPIKeyFallsToDryRun(t *testing.T) {
store := setupTestStore(t)
defer store.Close()
// Clear env var
t.Setenv("ANTHROPIC_API_KEY", "")
config := &Config{
DryRun: false, // Try to create real client
}
c, err := New(store, "", config)
if err != nil {
t.Fatalf("failed to create compactor: %v", err)
}
// Should fall back to DryRun when no API key
if !c.config.DryRun {
t.Error("expected DryRun to be true when no API key provided")
}
}
func TestNew_AuditSettings(t *testing.T) {
store := setupTestStore(t)
defer store.Close()
t.Setenv("ANTHROPIC_API_KEY", "test-key")
config := &Config{
AuditEnabled: true,
Actor: "test-actor",
}
c, err := New(store, "", config)
if err != nil {
t.Fatalf("failed to create compactor: %v", err)
}
if c.haiku == nil {
t.Fatal("expected haiku client to be created")
}
if !c.haiku.auditEnabled {
t.Error("expected auditEnabled to be true")
}
if c.haiku.auditActor != "test-actor" {
t.Errorf("expected auditActor 'test-actor', got '%s'", c.haiku.auditActor)
}
}
func TestCompactTier1_DryRun(t *testing.T) {
store := setupTestStore(t)
defer store.Close()
issue := createTestIssue(t, store, "bd-1")
config := &Config{DryRun: true}
c, err := New(store, "", config)
if err != nil {
t.Fatalf("failed to create compactor: %v", err)
}
ctx := context.Background()
err = c.CompactTier1(ctx, issue.ID)
if err == nil {
t.Fatal("expected dry-run error, got nil")
}
if !strings.HasPrefix(err.Error(), "dry-run:") {
t.Errorf("expected dry-run error prefix, got: %v", err)
}
// Verify issue was not modified
afterIssue, err := store.GetIssue(ctx, issue.ID)
if err != nil {
t.Fatalf("failed to get issue: %v", err)
}
if afterIssue.Description != issue.Description {
t.Error("dry-run should not modify issue")
}
}
func TestCompactTier1_IneligibleOpenIssue(t *testing.T) {
store := setupTestStore(t)
defer store.Close()
ctx := context.Background()
prefix, _ := store.GetConfig(ctx, "issue_prefix")
if prefix == "" {
prefix = "bd"
}
now := time.Now()
issue := &types.Issue{
ID: "bd-open",
Title: "Open Issue",
Description: "Should not be compacted",
Status: types.StatusOpen,
Priority: 2,
IssueType: types.TypeTask,
CreatedAt: now,
UpdatedAt: now,
}
if err := store.CreateIssue(ctx, issue, prefix); err != nil {
t.Fatalf("failed to create issue: %v", err)
}
config := &Config{DryRun: true}
c, err := New(store, "", config)
if err != nil {
t.Fatalf("failed to create compactor: %v", err)
}
err = c.CompactTier1(ctx, issue.ID)
if err == nil {
t.Fatal("expected error for ineligible issue, got nil")
}
if !strings.Contains(err.Error(), "not eligible") {
t.Errorf("expected 'not eligible' error, got: %v", err)
}
}
func TestCompactTier1_NonexistentIssue(t *testing.T) {
store := setupTestStore(t)
defer store.Close()
config := &Config{DryRun: true}
c, err := New(store, "", config)
if err != nil {
t.Fatalf("failed to create compactor: %v", err)
}
ctx := context.Background()
err = c.CompactTier1(ctx, "bd-nonexistent")
if err == nil {
t.Fatal("expected error for nonexistent issue")
}
}
func TestCompactTier1_ContextCanceled(t *testing.T) {
store := setupTestStore(t)
defer store.Close()
issue := createTestIssue(t, store, "bd-cancel")
config := &Config{DryRun: true}
c, err := New(store, "", config)
if err != nil {
t.Fatalf("failed to create compactor: %v", err)
}
ctx, cancel := context.WithCancel(context.Background())
cancel() // Cancel immediately
err = c.CompactTier1(ctx, issue.ID)
if err == nil {
t.Fatal("expected error for canceled context")
}
if err != context.Canceled {
t.Errorf("expected context.Canceled, got: %v", err)
}
}
func TestCompactTier1Batch_EmptyList(t *testing.T) {
store := setupTestStore(t)
defer store.Close()
config := &Config{DryRun: true}
c, err := New(store, "", config)
if err != nil {
t.Fatalf("failed to create compactor: %v", err)
}
ctx := context.Background()
results, err := c.CompactTier1Batch(ctx, []string{})
if err != nil {
t.Fatalf("unexpected error: %v", err)
}
if results != nil {
t.Errorf("expected nil results for empty list, got: %v", results)
}
}
func TestCompactTier1Batch_DryRun(t *testing.T) {
store := setupTestStore(t)
defer store.Close()
issue1 := createTestIssue(t, store, "bd-batch-1")
issue2 := createTestIssue(t, store, "bd-batch-2")
config := &Config{DryRun: true, Concurrency: 2}
c, err := New(store, "", config)
if err != nil {
t.Fatalf("failed to create compactor: %v", err)
}
ctx := context.Background()
results, err := c.CompactTier1Batch(ctx, []string{issue1.ID, issue2.ID})
if err != nil {
t.Fatalf("failed to batch compact: %v", err)
}
if len(results) != 2 {
t.Fatalf("expected 2 results, got %d", len(results))
}
for _, result := range results {
if result.Err != nil {
t.Errorf("unexpected error for %s: %v", result.IssueID, result.Err)
}
if result.OriginalSize == 0 {
t.Errorf("expected non-zero original size for %s", result.IssueID)
}
}
}
func TestCompactTier1Batch_MixedEligibility(t *testing.T) {
store := setupTestStore(t)
defer store.Close()
closedIssue := createTestIssue(t, store, "bd-closed")
ctx := context.Background()
prefix, _ := store.GetConfig(ctx, "issue_prefix")
if prefix == "" {
prefix = "bd"
}
now := time.Now()
openIssue := &types.Issue{
ID: "bd-open",
Title: "Open Issue",
Description: "Should not be compacted",
Status: types.StatusOpen,
Priority: 2,
IssueType: types.TypeTask,
CreatedAt: now,
UpdatedAt: now,
}
if err := store.CreateIssue(ctx, openIssue, prefix); err != nil {
t.Fatalf("failed to create issue: %v", err)
}
config := &Config{DryRun: true, Concurrency: 2}
c, err := New(store, "", config)
if err != nil {
t.Fatalf("failed to create compactor: %v", err)
}
results, err := c.CompactTier1Batch(ctx, []string{closedIssue.ID, openIssue.ID})
if err != nil {
t.Fatalf("failed to batch compact: %v", err)
}
if len(results) != 2 {
t.Fatalf("expected 2 results, got %d", len(results))
}
var foundClosed, foundOpen bool
for _, result := range results {
switch result.IssueID {
case openIssue.ID:
foundOpen = true
if result.Err == nil {
t.Error("expected error for ineligible issue")
}
case closedIssue.ID:
foundClosed = true
if result.Err != nil {
t.Errorf("unexpected error for eligible issue: %v", result.Err)
}
}
}
if !foundClosed || !foundOpen {
t.Error("missing expected results")
}
}
func TestCompactTier1Batch_NonexistentIssue(t *testing.T) {
store := setupTestStore(t)
defer store.Close()
closedIssue := createTestIssue(t, store, "bd-closed")
config := &Config{DryRun: true, Concurrency: 2}
c, err := New(store, "", config)
if err != nil {
t.Fatalf("failed to create compactor: %v", err)
}
ctx := context.Background()
results, err := c.CompactTier1Batch(ctx, []string{closedIssue.ID, "bd-nonexistent"})
if err != nil {
t.Fatalf("batch operation failed: %v", err)
}
if len(results) != 2 {
t.Fatalf("expected 2 results, got %d", len(results))
}
var successCount, errorCount int
for _, r := range results {
if r.Err == nil {
successCount++
} else {
errorCount++
}
}
if successCount != 1 {
t.Errorf("expected 1 success, got %d", successCount)
}
if errorCount != 1 {
t.Errorf("expected 1 error, got %d", errorCount)
}
}
func TestCompactTier1_WithMockAPI(t *testing.T) {
store := setupTestStore(t)
defer store.Close()
issue := createTestIssue(t, store, "bd-mock-api")
// Create mock server that returns a short summary
server := httptest.NewServer(http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) {
w.Header().Set("Content-Type", "application/json")
json.NewEncoder(w).Encode(map[string]interface{}{
"id": "msg_test123",
"type": "message",
"role": "assistant",
"model": "claude-3-5-haiku-20241022",
"content": []map[string]interface{}{
{
"type": "text",
"text": "**Summary:** Short summary.\n\n**Key Decisions:** None.\n\n**Resolution:** Done.",
},
},
})
}))
defer server.Close()
t.Setenv("ANTHROPIC_API_KEY", "test-key")
// Create compactor with mock API
config := &Config{Concurrency: 1}
c, err := New(store, "", config)
if err != nil {
t.Fatalf("failed to create compactor: %v", err)
}
// Replace the haiku client with one pointing to mock server
c.haiku, err = NewHaikuClient("test-key", option.WithBaseURL(server.URL), option.WithMaxRetries(0))
if err != nil {
t.Fatalf("failed to create mock haiku client: %v", err)
}
ctx := context.Background()
err = c.CompactTier1(ctx, issue.ID)
if err != nil {
t.Fatalf("unexpected error: %v", err)
}
// Verify issue was updated
afterIssue, err := store.GetIssue(ctx, issue.ID)
if err != nil {
t.Fatalf("failed to get issue: %v", err)
}
if afterIssue.Description == issue.Description {
t.Error("description should have been updated")
}
if afterIssue.Design != "" {
t.Error("design should be cleared")
}
if afterIssue.Notes != "" {
t.Error("notes should be cleared")
}
if afterIssue.AcceptanceCriteria != "" {
t.Error("acceptance criteria should be cleared")
}
}
func TestCompactTier1_SummaryNotShorter(t *testing.T) {
store := setupTestStore(t)
defer store.Close()
// Create issue with very short content
ctx := context.Background()
prefix, _ := store.GetConfig(ctx, "issue_prefix")
if prefix == "" {
prefix = "bd"
}
now := time.Now()
closedAt := now.Add(-8 * 24 * time.Hour)
issue := &types.Issue{
ID: "bd-short",
Title: "Short",
Description: "X", // Very short description
Status: types.StatusClosed,
Priority: 2,
IssueType: types.TypeTask,
CreatedAt: now.Add(-48 * time.Hour),
UpdatedAt: now.Add(-24 * time.Hour),
ClosedAt: &closedAt,
}
if err := store.CreateIssue(ctx, issue, prefix); err != nil {
t.Fatalf("failed to create issue: %v", err)
}
// Create mock server that returns a longer summary
server := httptest.NewServer(http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) {
w.Header().Set("Content-Type", "application/json")
json.NewEncoder(w).Encode(map[string]interface{}{
"id": "msg_test123",
"type": "message",
"role": "assistant",
"model": "claude-3-5-haiku-20241022",
"content": []map[string]interface{}{
{
"type": "text",
"text": "**Summary:** This is a much longer summary that exceeds the original content length.\n\n**Key Decisions:** Multiple decisions.\n\n**Resolution:** Complete.",
},
},
})
}))
defer server.Close()
t.Setenv("ANTHROPIC_API_KEY", "test-key")
config := &Config{Concurrency: 1}
c, err := New(store, "", config)
if err != nil {
t.Fatalf("failed to create compactor: %v", err)
}
c.haiku, err = NewHaikuClient("test-key", option.WithBaseURL(server.URL), option.WithMaxRetries(0))
if err != nil {
t.Fatalf("failed to create mock haiku client: %v", err)
}
err = c.CompactTier1(ctx, issue.ID)
if err == nil {
t.Fatal("expected error when summary is longer")
}
if !strings.Contains(err.Error(), "would increase size") {
t.Errorf("expected 'would increase size' error, got: %v", err)
}
// Verify issue was NOT modified (kept original)
afterIssue, err := store.GetIssue(ctx, issue.ID)
if err != nil {
t.Fatalf("failed to get issue: %v", err)
}
if afterIssue.Description != issue.Description {
t.Error("description should not have been modified when summary is longer")
}
}
func TestCompactTier1Batch_WithMockAPI(t *testing.T) {
store := setupTestStore(t)
defer store.Close()
issue1 := createTestIssue(t, store, "bd-batch-mock-1")
issue2 := createTestIssue(t, store, "bd-batch-mock-2")
// Create mock server
server := httptest.NewServer(http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) {
w.Header().Set("Content-Type", "application/json")
json.NewEncoder(w).Encode(map[string]interface{}{
"id": "msg_test123",
"type": "message",
"role": "assistant",
"model": "claude-3-5-haiku-20241022",
"content": []map[string]interface{}{
{
"type": "text",
"text": "**Summary:** Compacted.\n\n**Key Decisions:** None.\n\n**Resolution:** Done.",
},
},
})
}))
defer server.Close()
t.Setenv("ANTHROPIC_API_KEY", "test-key")
config := &Config{Concurrency: 2}
c, err := New(store, "", config)
if err != nil {
t.Fatalf("failed to create compactor: %v", err)
}
c.haiku, err = NewHaikuClient("test-key", option.WithBaseURL(server.URL), option.WithMaxRetries(0))
if err != nil {
t.Fatalf("failed to create mock haiku client: %v", err)
}
ctx := context.Background()
results, err := c.CompactTier1Batch(ctx, []string{issue1.ID, issue2.ID})
if err != nil {
t.Fatalf("failed to batch compact: %v", err)
}
if len(results) != 2 {
t.Fatalf("expected 2 results, got %d", len(results))
}
for _, result := range results {
if result.Err != nil {
t.Errorf("unexpected error for %s: %v", result.IssueID, result.Err)
}
if result.CompactedSize == 0 {
t.Errorf("expected non-zero compacted size for %s", result.IssueID)
}
if result.CompactedSize >= result.OriginalSize {
t.Errorf("expected size reduction for %s: %d → %d", result.IssueID, result.OriginalSize, result.CompactedSize)
}
}
}
func TestResult_Fields(t *testing.T) {
r := &Result{
IssueID: "bd-1",
OriginalSize: 100,
CompactedSize: 50,
Err: nil,
}
if r.IssueID != "bd-1" {
t.Errorf("expected IssueID 'bd-1', got '%s'", r.IssueID)
}
if r.OriginalSize != 100 {
t.Errorf("expected OriginalSize 100, got %d", r.OriginalSize)
}
if r.CompactedSize != 50 {
t.Errorf("expected CompactedSize 50, got %d", r.CompactedSize)
}
if r.Err != nil {
t.Errorf("expected nil Err, got %v", r.Err)
}
}
func TestConfig_Fields(t *testing.T) {
c := &Config{
APIKey: "test-key",
Concurrency: 10,
DryRun: true,
AuditEnabled: true,
Actor: "test-actor",
}
if c.APIKey != "test-key" {
t.Errorf("expected APIKey 'test-key', got '%s'", c.APIKey)
}
if c.Concurrency != 10 {
t.Errorf("expected Concurrency 10, got %d", c.Concurrency)
}
if !c.DryRun {
t.Error("expected DryRun true")
}
if !c.AuditEnabled {
t.Error("expected AuditEnabled true")
}
if c.Actor != "test-actor" {
t.Errorf("expected Actor 'test-actor', got '%s'", c.Actor)
}
}

View File

@@ -0,0 +1,171 @@
package compact
import (
"os"
"os/exec"
"path/filepath"
"regexp"
"testing"
)
func TestGetCurrentCommitHash_InGitRepo(t *testing.T) {
// This test runs in the actual beads repo, so it should return a valid hash
hash := GetCurrentCommitHash()
// Should be a 40-character hex string
if len(hash) != 40 {
t.Errorf("expected 40-char hash, got %d chars: %s", len(hash), hash)
}
// Should be valid hex
matched, err := regexp.MatchString("^[0-9a-f]{40}$", hash)
if err != nil {
t.Fatalf("regex error: %v", err)
}
if !matched {
t.Errorf("expected hex hash, got: %s", hash)
}
}
func TestGetCurrentCommitHash_NotInGitRepo(t *testing.T) {
// Save current directory
originalDir, err := os.Getwd()
if err != nil {
t.Fatalf("failed to get cwd: %v", err)
}
// Create a temporary directory that is NOT a git repo
tmpDir := t.TempDir()
// Change to the temp directory
if err := os.Chdir(tmpDir); err != nil {
t.Fatalf("failed to chdir to temp dir: %v", err)
}
defer func() {
// Restore original directory
if err := os.Chdir(originalDir); err != nil {
t.Fatalf("failed to restore cwd: %v", err)
}
}()
// Should return empty string when not in a git repo
hash := GetCurrentCommitHash()
if hash != "" {
t.Errorf("expected empty string outside git repo, got: %s", hash)
}
}
func TestGetCurrentCommitHash_NewGitRepo(t *testing.T) {
// Save current directory
originalDir, err := os.Getwd()
if err != nil {
t.Fatalf("failed to get cwd: %v", err)
}
// Create a temporary directory
tmpDir := t.TempDir()
// Initialize a new git repo
cmd := exec.Command("git", "init")
cmd.Dir = tmpDir
if err := cmd.Run(); err != nil {
t.Fatalf("failed to init git repo: %v", err)
}
// Configure git user for the commit
cmd = exec.Command("git", "config", "user.email", "test@test.com")
cmd.Dir = tmpDir
if err := cmd.Run(); err != nil {
t.Fatalf("failed to set git email: %v", err)
}
cmd = exec.Command("git", "config", "user.name", "Test User")
cmd.Dir = tmpDir
if err := cmd.Run(); err != nil {
t.Fatalf("failed to set git name: %v", err)
}
// Create a file and commit it
testFile := filepath.Join(tmpDir, "test.txt")
if err := os.WriteFile(testFile, []byte("test"), 0644); err != nil {
t.Fatalf("failed to write test file: %v", err)
}
cmd = exec.Command("git", "add", ".")
cmd.Dir = tmpDir
if err := cmd.Run(); err != nil {
t.Fatalf("failed to git add: %v", err)
}
cmd = exec.Command("git", "commit", "-m", "test commit")
cmd.Dir = tmpDir
if err := cmd.Run(); err != nil {
t.Fatalf("failed to git commit: %v", err)
}
// Change to the new git repo
if err := os.Chdir(tmpDir); err != nil {
t.Fatalf("failed to chdir to git repo: %v", err)
}
defer func() {
// Restore original directory
if err := os.Chdir(originalDir); err != nil {
t.Fatalf("failed to restore cwd: %v", err)
}
}()
// Should return a valid hash
hash := GetCurrentCommitHash()
if len(hash) != 40 {
t.Errorf("expected 40-char hash, got %d chars: %s", len(hash), hash)
}
// Verify it matches git rev-parse output
cmd = exec.Command("git", "rev-parse", "HEAD")
cmd.Dir = tmpDir
out, err := cmd.Output()
if err != nil {
t.Fatalf("failed to run git rev-parse: %v", err)
}
expected := string(out)
expected = expected[:len(expected)-1] // trim newline
if hash != expected {
t.Errorf("hash mismatch: got %s, expected %s", hash, expected)
}
}
func TestGetCurrentCommitHash_EmptyGitRepo(t *testing.T) {
// Save current directory
originalDir, err := os.Getwd()
if err != nil {
t.Fatalf("failed to get cwd: %v", err)
}
// Create a temporary directory
tmpDir := t.TempDir()
// Initialize a new git repo but don't commit anything
cmd := exec.Command("git", "init")
cmd.Dir = tmpDir
if err := cmd.Run(); err != nil {
t.Fatalf("failed to init git repo: %v", err)
}
// Change to the empty git repo
if err := os.Chdir(tmpDir); err != nil {
t.Fatalf("failed to chdir to git repo: %v", err)
}
defer func() {
// Restore original directory
if err := os.Chdir(originalDir); err != nil {
t.Fatalf("failed to restore cwd: %v", err)
}
}()
// Should return empty string for repo with no commits
hash := GetCurrentCommitHash()
if hash != "" {
t.Errorf("expected empty string for empty git repo, got: %s", hash)
}
}

View File

@@ -38,7 +38,7 @@ type HaikuClient struct {
}
// NewHaikuClient creates a new Haiku API client. Env var ANTHROPIC_API_KEY takes precedence over explicit apiKey.
func NewHaikuClient(apiKey string) (*HaikuClient, error) {
func NewHaikuClient(apiKey string, opts ...option.RequestOption) (*HaikuClient, error) {
envKey := os.Getenv("ANTHROPIC_API_KEY")
if envKey != "" {
apiKey = envKey
@@ -47,7 +47,10 @@ func NewHaikuClient(apiKey string) (*HaikuClient, error) {
return nil, fmt.Errorf("%w: set ANTHROPIC_API_KEY environment variable or provide via config", ErrAPIKeyRequired)
}
client := anthropic.NewClient(option.WithAPIKey(apiKey))
// Build options: API key first, then any additional options (for testing)
allOpts := []option.RequestOption{option.WithAPIKey(apiKey)}
allOpts = append(allOpts, opts...)
client := anthropic.NewClient(allOpts...)
tier1Tmpl, err := template.New("tier1").Parse(tier1PromptTemplate)
if err != nil {

View File

@@ -2,11 +2,18 @@ package compact
import (
"context"
"encoding/json"
"errors"
"net"
"net/http"
"net/http/httptest"
"strings"
"sync/atomic"
"testing"
"time"
"github.com/anthropics/anthropic-sdk-go"
"github.com/anthropics/anthropic-sdk-go/option"
"github.com/steveyegge/beads/internal/types"
)
@@ -189,3 +196,399 @@ func TestIsRetryable(t *testing.T) {
})
}
}
// mockTimeoutError implements net.Error for timeout testing
type mockTimeoutError struct {
timeout bool
}
func (e *mockTimeoutError) Error() string { return "mock timeout error" }
func (e *mockTimeoutError) Timeout() bool { return e.timeout }
func (e *mockTimeoutError) Temporary() bool { return false }
func TestIsRetryable_NetworkTimeout(t *testing.T) {
// Network timeout should be retryable
timeoutErr := &mockTimeoutError{timeout: true}
if !isRetryable(timeoutErr) {
t.Error("network timeout error should be retryable")
}
// Non-timeout network error should not be retryable
nonTimeoutErr := &mockTimeoutError{timeout: false}
if isRetryable(nonTimeoutErr) {
t.Error("non-timeout network error should not be retryable")
}
}
func TestIsRetryable_APIErrors(t *testing.T) {
tests := []struct {
name string
statusCode int
expected bool
}{
{"rate limit 429", 429, true},
{"server error 500", 500, true},
{"server error 502", 502, true},
{"server error 503", 503, true},
{"bad request 400", 400, false},
{"unauthorized 401", 401, false},
{"forbidden 403", 403, false},
{"not found 404", 404, false},
}
for _, tt := range tests {
t.Run(tt.name, func(t *testing.T) {
apiErr := &anthropic.Error{StatusCode: tt.statusCode}
got := isRetryable(apiErr)
if got != tt.expected {
t.Errorf("isRetryable(API error %d) = %v, want %v", tt.statusCode, got, tt.expected)
}
})
}
}
// createMockAnthropicServer creates a mock server that returns Anthropic API responses
func createMockAnthropicServer(handler http.HandlerFunc) *httptest.Server {
return httptest.NewServer(handler)
}
// mockAnthropicResponse creates a valid Anthropic Messages API response
func mockAnthropicResponse(text string) map[string]interface{} {
return map[string]interface{}{
"id": "msg_test123",
"type": "message",
"role": "assistant",
"model": "claude-3-5-haiku-20241022",
"stop_reason": "end_turn",
"stop_sequence": nil,
"usage": map[string]int{
"input_tokens": 100,
"output_tokens": 50,
},
"content": []map[string]interface{}{
{
"type": "text",
"text": text,
},
},
}
}
func TestSummarizeTier1_MockAPI(t *testing.T) {
// Create mock server that returns a valid summary
server := createMockAnthropicServer(func(w http.ResponseWriter, r *http.Request) {
// Verify request method and path
if r.Method != "POST" {
t.Errorf("expected POST, got %s", r.Method)
}
if !strings.HasSuffix(r.URL.Path, "/messages") {
t.Errorf("expected /messages path, got %s", r.URL.Path)
}
w.Header().Set("Content-Type", "application/json")
resp := mockAnthropicResponse("**Summary:** Fixed auth bug.\n\n**Key Decisions:** Used OAuth.\n\n**Resolution:** Complete.")
json.NewEncoder(w).Encode(resp)
})
defer server.Close()
client, err := NewHaikuClient("test-key", option.WithBaseURL(server.URL))
if err != nil {
t.Fatalf("failed to create client: %v", err)
}
issue := &types.Issue{
ID: "bd-1",
Title: "Fix authentication bug",
Description: "OAuth login was broken",
Status: types.StatusClosed,
}
ctx := context.Background()
result, err := client.SummarizeTier1(ctx, issue)
if err != nil {
t.Fatalf("unexpected error: %v", err)
}
if !strings.Contains(result, "**Summary:**") {
t.Error("result should contain Summary section")
}
if !strings.Contains(result, "Fixed auth bug") {
t.Error("result should contain summary text")
}
}
func TestSummarizeTier1_APIError(t *testing.T) {
// Create mock server that returns an error
server := createMockAnthropicServer(func(w http.ResponseWriter, r *http.Request) {
w.WriteHeader(http.StatusBadRequest)
json.NewEncoder(w).Encode(map[string]interface{}{
"type": "error",
"error": map[string]interface{}{
"type": "invalid_request_error",
"message": "Invalid API key",
},
})
})
defer server.Close()
client, err := NewHaikuClient("test-key", option.WithBaseURL(server.URL))
if err != nil {
t.Fatalf("failed to create client: %v", err)
}
issue := &types.Issue{
ID: "bd-1",
Title: "Test",
Description: "Test",
Status: types.StatusClosed,
}
ctx := context.Background()
_, err = client.SummarizeTier1(ctx, issue)
if err == nil {
t.Fatal("expected error from API")
}
if !strings.Contains(err.Error(), "non-retryable") {
t.Errorf("expected non-retryable error, got: %v", err)
}
}
func TestCallWithRetry_RetriesOn429(t *testing.T) {
var attempts int32
server := createMockAnthropicServer(func(w http.ResponseWriter, r *http.Request) {
attempt := atomic.AddInt32(&attempts, 1)
if attempt <= 2 {
// First two attempts return 429
w.WriteHeader(http.StatusTooManyRequests)
json.NewEncoder(w).Encode(map[string]interface{}{
"type": "error",
"error": map[string]interface{}{
"type": "rate_limit_error",
"message": "Rate limited",
},
})
return
}
// Third attempt succeeds
w.Header().Set("Content-Type", "application/json")
json.NewEncoder(w).Encode(mockAnthropicResponse("Success after retries"))
})
defer server.Close()
// Disable SDK's internal retries to test our retry logic only
client, err := NewHaikuClient("test-key", option.WithBaseURL(server.URL), option.WithMaxRetries(0))
if err != nil {
t.Fatalf("failed to create client: %v", err)
}
// Use short backoff for testing
client.initialBackoff = 10 * time.Millisecond
ctx := context.Background()
result, err := client.callWithRetry(ctx, "test prompt")
if err != nil {
t.Fatalf("expected success after retries, got: %v", err)
}
if result != "Success after retries" {
t.Errorf("expected 'Success after retries', got: %s", result)
}
if attempts != 3 {
t.Errorf("expected 3 attempts, got: %d", attempts)
}
}
func TestCallWithRetry_RetriesOn500(t *testing.T) {
var attempts int32
server := createMockAnthropicServer(func(w http.ResponseWriter, r *http.Request) {
attempt := atomic.AddInt32(&attempts, 1)
if attempt == 1 {
// First attempt returns 500
w.WriteHeader(http.StatusInternalServerError)
json.NewEncoder(w).Encode(map[string]interface{}{
"type": "error",
"error": map[string]interface{}{
"type": "api_error",
"message": "Internal server error",
},
})
return
}
// Second attempt succeeds
w.Header().Set("Content-Type", "application/json")
json.NewEncoder(w).Encode(mockAnthropicResponse("Recovered from 500"))
})
defer server.Close()
// Disable SDK's internal retries to test our retry logic only
client, err := NewHaikuClient("test-key", option.WithBaseURL(server.URL), option.WithMaxRetries(0))
if err != nil {
t.Fatalf("failed to create client: %v", err)
}
client.initialBackoff = 10 * time.Millisecond
ctx := context.Background()
result, err := client.callWithRetry(ctx, "test prompt")
if err != nil {
t.Fatalf("expected success after retry, got: %v", err)
}
if result != "Recovered from 500" {
t.Errorf("expected 'Recovered from 500', got: %s", result)
}
}
func TestCallWithRetry_ExhaustsRetries(t *testing.T) {
var attempts int32
server := createMockAnthropicServer(func(w http.ResponseWriter, r *http.Request) {
atomic.AddInt32(&attempts, 1)
// Always return 429
w.WriteHeader(http.StatusTooManyRequests)
json.NewEncoder(w).Encode(map[string]interface{}{
"type": "error",
"error": map[string]interface{}{
"type": "rate_limit_error",
"message": "Rate limited",
},
})
})
defer server.Close()
// Disable SDK's internal retries to test our retry logic only
client, err := NewHaikuClient("test-key", option.WithBaseURL(server.URL), option.WithMaxRetries(0))
if err != nil {
t.Fatalf("failed to create client: %v", err)
}
client.initialBackoff = 1 * time.Millisecond
client.maxRetries = 2
ctx := context.Background()
_, err = client.callWithRetry(ctx, "test prompt")
if err == nil {
t.Fatal("expected error after exhausting retries")
}
if !strings.Contains(err.Error(), "failed after") {
t.Errorf("expected 'failed after' error, got: %v", err)
}
// Initial attempt + 2 retries = 3 total
if attempts != 3 {
t.Errorf("expected 3 attempts, got: %d", attempts)
}
}
func TestCallWithRetry_NoRetryOn400(t *testing.T) {
var attempts int32
server := createMockAnthropicServer(func(w http.ResponseWriter, r *http.Request) {
atomic.AddInt32(&attempts, 1)
w.WriteHeader(http.StatusBadRequest)
json.NewEncoder(w).Encode(map[string]interface{}{
"type": "error",
"error": map[string]interface{}{
"type": "invalid_request_error",
"message": "Bad request",
},
})
})
defer server.Close()
client, err := NewHaikuClient("test-key", option.WithBaseURL(server.URL))
if err != nil {
t.Fatalf("failed to create client: %v", err)
}
client.initialBackoff = 10 * time.Millisecond
ctx := context.Background()
_, err = client.callWithRetry(ctx, "test prompt")
if err == nil {
t.Fatal("expected error for bad request")
}
if !strings.Contains(err.Error(), "non-retryable") {
t.Errorf("expected non-retryable error, got: %v", err)
}
if attempts != 1 {
t.Errorf("expected only 1 attempt for non-retryable error, got: %d", attempts)
}
}
func TestCallWithRetry_ContextTimeout(t *testing.T) {
server := createMockAnthropicServer(func(w http.ResponseWriter, r *http.Request) {
// Delay longer than context timeout
time.Sleep(200 * time.Millisecond)
w.Header().Set("Content-Type", "application/json")
json.NewEncoder(w).Encode(mockAnthropicResponse("too late"))
})
defer server.Close()
client, err := NewHaikuClient("test-key", option.WithBaseURL(server.URL))
if err != nil {
t.Fatalf("failed to create client: %v", err)
}
ctx, cancel := context.WithTimeout(context.Background(), 50*time.Millisecond)
defer cancel()
_, err = client.callWithRetry(ctx, "test prompt")
if err == nil {
t.Fatal("expected timeout error")
}
if !errors.Is(err, context.DeadlineExceeded) {
t.Errorf("expected context.DeadlineExceeded, got: %v", err)
}
}
func TestCallWithRetry_EmptyContent(t *testing.T) {
server := createMockAnthropicServer(func(w http.ResponseWriter, r *http.Request) {
w.Header().Set("Content-Type", "application/json")
// Return response with empty content array
json.NewEncoder(w).Encode(map[string]interface{}{
"id": "msg_test123",
"type": "message",
"role": "assistant",
"model": "claude-3-5-haiku-20241022",
"content": []map[string]interface{}{},
})
})
defer server.Close()
client, err := NewHaikuClient("test-key", option.WithBaseURL(server.URL))
if err != nil {
t.Fatalf("failed to create client: %v", err)
}
ctx := context.Background()
_, err = client.callWithRetry(ctx, "test prompt")
if err == nil {
t.Fatal("expected error for empty content")
}
if !strings.Contains(err.Error(), "no content blocks") {
t.Errorf("expected 'no content blocks' error, got: %v", err)
}
}
func TestBytesWriter(t *testing.T) {
w := &bytesWriter{}
n, err := w.Write([]byte("hello"))
if err != nil {
t.Fatalf("unexpected error: %v", err)
}
if n != 5 {
t.Errorf("expected n=5, got %d", n)
}
n, err = w.Write([]byte(" world"))
if err != nil {
t.Fatalf("unexpected error: %v", err)
}
if n != 6 {
t.Errorf("expected n=6, got %d", n)
}
if string(w.buf) != "hello world" {
t.Errorf("expected 'hello world', got '%s'", string(w.buf))
}
}
// Verify net.Error interface is properly satisfied for test mocks
var _ net.Error = (*mockTimeoutError)(nil)

View File

@@ -306,6 +306,43 @@ func ResolveExternalProjectPath(projectName string) string {
return path
}
// HookEntry represents a single config-based hook
type HookEntry struct {
Command string `yaml:"command" mapstructure:"command"` // Shell command to run
Name string `yaml:"name" mapstructure:"name"` // Optional display name
}
// GetCloseHooks returns the on_close hooks from config
func GetCloseHooks() []HookEntry {
if v == nil {
return nil
}
var hooks []HookEntry
raw := v.Get("hooks.on_close")
if raw == nil {
return nil
}
// Handle slice of maps (from YAML parsing)
if rawSlice, ok := raw.([]interface{}); ok {
for _, item := range rawSlice {
if m, ok := item.(map[string]interface{}); ok {
entry := HookEntry{}
if cmd, ok := m["command"].(string); ok {
entry.Command = cmd
}
if name, ok := m["name"].(string); ok {
entry.Name = name
}
if entry.Command != "" {
hooks = append(hooks, entry)
}
}
}
}
return hooks
}
// GetIdentity resolves the user's identity for messaging.
// Priority chain:
// 1. flagValue (if non-empty, from --identity flag)

View File

@@ -1,245 +0,0 @@
package config
import (
"bufio"
"fmt"
"os"
"path/filepath"
"regexp"
"strings"
)
// YamlOnlyKeys are configuration keys that must be stored in config.yaml
// rather than the SQLite database. These are "startup" settings that are
// read before the database is opened.
//
// This fixes GH#536: users were confused when `bd config set no-db true`
// appeared to succeed but had no effect (because no-db is read from yaml
// at startup, not from SQLite).
var YamlOnlyKeys = map[string]bool{
// Bootstrap flags (affect how bd starts)
"no-db": true,
"no-daemon": true,
"no-auto-flush": true,
"no-auto-import": true,
"json": true,
"auto-start-daemon": true,
// Database and identity
"db": true,
"actor": true,
"identity": true,
// Timing settings
"flush-debounce": true,
"lock-timeout": true,
"remote-sync-interval": true,
// Git settings
"git.author": true,
"git.no-gpg-sign": true,
"no-push": true,
// Sync settings
"sync-branch": true,
"sync.branch": true,
"sync.require_confirmation_on_mass_delete": true,
// Routing settings
"routing.mode": true,
"routing.default": true,
"routing.maintainer": true,
"routing.contributor": true,
// Create command settings
"create.require-description": true,
}
// IsYamlOnlyKey returns true if the given key should be stored in config.yaml
// rather than the SQLite database.
func IsYamlOnlyKey(key string) bool {
// Check exact match
if YamlOnlyKeys[key] {
return true
}
// Check prefix matches for nested keys
prefixes := []string{"routing.", "sync.", "git.", "directory.", "repos.", "external_projects."}
for _, prefix := range prefixes {
if strings.HasPrefix(key, prefix) {
return true
}
}
return false
}
// SetYamlConfig sets a configuration value in the project's config.yaml file.
// It handles both adding new keys and updating existing (possibly commented) keys.
func SetYamlConfig(key, value string) error {
configPath, err := findProjectConfigYaml()
if err != nil {
return err
}
// Read existing config
content, err := os.ReadFile(configPath)
if err != nil {
return fmt.Errorf("failed to read config.yaml: %w", err)
}
// Update or add the key
newContent, err := updateYamlKey(string(content), key, value)
if err != nil {
return err
}
// Write back
if err := os.WriteFile(configPath, []byte(newContent), 0644); err != nil {
return fmt.Errorf("failed to write config.yaml: %w", err)
}
return nil
}
// GetYamlConfig gets a configuration value from config.yaml.
// Returns empty string if key is not found or is commented out.
func GetYamlConfig(key string) string {
if v == nil {
return ""
}
return v.GetString(key)
}
// findProjectConfigYaml finds the project's .beads/config.yaml file.
func findProjectConfigYaml() (string, error) {
cwd, err := os.Getwd()
if err != nil {
return "", fmt.Errorf("failed to get working directory: %w", err)
}
// Walk up parent directories to find .beads/config.yaml
for dir := cwd; dir != filepath.Dir(dir); dir = filepath.Dir(dir) {
configPath := filepath.Join(dir, ".beads", "config.yaml")
if _, err := os.Stat(configPath); err == nil {
return configPath, nil
}
}
return "", fmt.Errorf("no .beads/config.yaml found (run 'bd init' first)")
}
// updateYamlKey updates a key in yaml content, handling commented-out keys.
// If the key exists (commented or not), it updates it in place.
// If the key doesn't exist, it appends it at the end.
func updateYamlKey(content, key, value string) (string, error) {
// Format the value appropriately
formattedValue := formatYamlValue(value)
newLine := fmt.Sprintf("%s: %s", key, formattedValue)
// Build regex to match the key (commented or not)
// Matches: "key: value" or "# key: value" with optional leading whitespace
keyPattern := regexp.MustCompile(`^(\s*)(#\s*)?` + regexp.QuoteMeta(key) + `\s*:`)
found := false
var result []string
scanner := bufio.NewScanner(strings.NewReader(content))
for scanner.Scan() {
line := scanner.Text()
if keyPattern.MatchString(line) {
// Found the key - replace with new value (uncommented)
// Preserve leading whitespace
matches := keyPattern.FindStringSubmatch(line)
indent := ""
if len(matches) > 1 {
indent = matches[1]
}
result = append(result, indent+newLine)
found = true
} else {
result = append(result, line)
}
}
if !found {
// Key not found - append at end
// Add blank line before if content doesn't end with one
if len(result) > 0 && result[len(result)-1] != "" {
result = append(result, "")
}
result = append(result, newLine)
}
return strings.Join(result, "\n"), nil
}
// formatYamlValue formats a value appropriately for YAML.
func formatYamlValue(value string) string {
// Boolean values
lower := strings.ToLower(value)
if lower == "true" || lower == "false" {
return lower
}
// Numeric values - return as-is
if isNumeric(value) {
return value
}
// Duration values (like "30s", "5m") - return as-is
if isDuration(value) {
return value
}
// String values that need quoting
if needsQuoting(value) {
return fmt.Sprintf("%q", value)
}
return value
}
func isNumeric(s string) bool {
if s == "" {
return false
}
for i, c := range s {
if c == '-' && i == 0 {
continue
}
if c == '.' {
continue
}
if c < '0' || c > '9' {
return false
}
}
return true
}
func isDuration(s string) bool {
if len(s) < 2 {
return false
}
suffix := s[len(s)-1]
if suffix != 's' && suffix != 'm' && suffix != 'h' {
return false
}
return isNumeric(s[:len(s)-1])
}
func needsQuoting(s string) bool {
// Quote if contains special YAML characters
special := []string{":", "#", "[", "]", "{", "}", ",", "&", "*", "!", "|", ">", "'", "\"", "%", "@", "`"}
for _, c := range special {
if strings.Contains(s, c) {
return true
}
}
// Quote if starts/ends with whitespace
if strings.TrimSpace(s) != s {
return true
}
return false
}

View File

@@ -1,206 +0,0 @@
package config
import (
"os"
"path/filepath"
"strings"
"testing"
)
func TestIsYamlOnlyKey(t *testing.T) {
tests := []struct {
key string
expected bool
}{
// Exact matches
{"no-db", true},
{"no-daemon", true},
{"no-auto-flush", true},
{"json", true},
{"auto-start-daemon", true},
{"flush-debounce", true},
{"git.author", true},
{"git.no-gpg-sign", true},
// Prefix matches
{"routing.mode", true},
{"routing.custom-key", true},
{"sync.branch", true},
{"sync.require_confirmation_on_mass_delete", true},
{"directory.labels", true},
{"repos.primary", true},
{"external_projects.beads", true},
// SQLite keys (should return false)
{"jira.url", false},
{"jira.project", false},
{"linear.api_key", false},
{"github.org", false},
{"custom.setting", false},
{"status.custom", false},
{"issue_prefix", false},
}
for _, tt := range tests {
t.Run(tt.key, func(t *testing.T) {
got := IsYamlOnlyKey(tt.key)
if got != tt.expected {
t.Errorf("IsYamlOnlyKey(%q) = %v, want %v", tt.key, got, tt.expected)
}
})
}
}
func TestUpdateYamlKey(t *testing.T) {
tests := []struct {
name string
content string
key string
value string
expected string
}{
{
name: "update commented key",
content: "# no-db: false\nother: value",
key: "no-db",
value: "true",
expected: "no-db: true\nother: value",
},
{
name: "update existing key",
content: "no-db: false\nother: value",
key: "no-db",
value: "true",
expected: "no-db: true\nother: value",
},
{
name: "add new key",
content: "other: value",
key: "no-db",
value: "true",
expected: "other: value\n\nno-db: true",
},
{
name: "preserve indentation",
content: " # no-db: false\nother: value",
key: "no-db",
value: "true",
expected: " no-db: true\nother: value",
},
{
name: "handle string value",
content: "# actor: \"\"\nother: value",
key: "actor",
value: "steve",
expected: "actor: steve\nother: value",
},
{
name: "handle duration value",
content: "# flush-debounce: \"5s\"",
key: "flush-debounce",
value: "30s",
expected: "flush-debounce: 30s",
},
{
name: "quote special characters",
content: "other: value",
key: "actor",
value: "user: name",
expected: "other: value\n\nactor: \"user: name\"",
},
}
for _, tt := range tests {
t.Run(tt.name, func(t *testing.T) {
got, err := updateYamlKey(tt.content, tt.key, tt.value)
if err != nil {
t.Fatalf("updateYamlKey() error = %v", err)
}
if got != tt.expected {
t.Errorf("updateYamlKey() =\n%q\nwant:\n%q", got, tt.expected)
}
})
}
}
func TestFormatYamlValue(t *testing.T) {
tests := []struct {
value string
expected string
}{
{"true", "true"},
{"false", "false"},
{"TRUE", "true"},
{"FALSE", "false"},
{"123", "123"},
{"3.14", "3.14"},
{"30s", "30s"},
{"5m", "5m"},
{"simple", "simple"},
{"has space", "has space"},
{"has:colon", "\"has:colon\""},
{"has#hash", "\"has#hash\""},
{" leading", "\" leading\""},
}
for _, tt := range tests {
t.Run(tt.value, func(t *testing.T) {
got := formatYamlValue(tt.value)
if got != tt.expected {
t.Errorf("formatYamlValue(%q) = %q, want %q", tt.value, got, tt.expected)
}
})
}
}
func TestSetYamlConfig(t *testing.T) {
// Create a temp directory with .beads/config.yaml
tmpDir, err := os.MkdirTemp("", "beads-yaml-test-*")
if err != nil {
t.Fatalf("Failed to create temp dir: %v", err)
}
defer os.RemoveAll(tmpDir)
beadsDir := filepath.Join(tmpDir, ".beads")
if err := os.MkdirAll(beadsDir, 0755); err != nil {
t.Fatalf("Failed to create .beads dir: %v", err)
}
configPath := filepath.Join(beadsDir, "config.yaml")
initialConfig := `# Beads Config
# no-db: false
other-setting: value
`
if err := os.WriteFile(configPath, []byte(initialConfig), 0644); err != nil {
t.Fatalf("Failed to write config.yaml: %v", err)
}
// Change to temp directory for the test
oldWd, _ := os.Getwd()
if err := os.Chdir(tmpDir); err != nil {
t.Fatalf("Failed to chdir: %v", err)
}
defer os.Chdir(oldWd)
// Test SetYamlConfig
if err := SetYamlConfig("no-db", "true"); err != nil {
t.Fatalf("SetYamlConfig() error = %v", err)
}
// Read back and verify
content, err := os.ReadFile(configPath)
if err != nil {
t.Fatalf("Failed to read config.yaml: %v", err)
}
contentStr := string(content)
if !strings.Contains(contentStr, "no-db: true") {
t.Errorf("config.yaml should contain 'no-db: true', got:\n%s", contentStr)
}
if strings.Contains(contentStr, "# no-db") {
t.Errorf("config.yaml should not have commented no-db, got:\n%s", contentStr)
}
if !strings.Contains(contentStr, "other-setting: value") {
t.Errorf("config.yaml should preserve other settings, got:\n%s", contentStr)
}
}

View File

@@ -0,0 +1,66 @@
// Package hooks provides a hook system for extensibility.
// This file implements config-based hooks defined in .beads/config.yaml.
package hooks
import (
"context"
"fmt"
"os"
"os/exec"
"strconv"
"time"
"github.com/steveyegge/beads/internal/config"
"github.com/steveyegge/beads/internal/types"
)
// RunConfigCloseHooks executes all on_close hooks from config.yaml.
// Hook commands receive issue data via environment variables:
// - BEAD_ID: Issue ID (e.g., bd-abc1)
// - BEAD_TITLE: Issue title
// - BEAD_TYPE: Issue type (task, bug, feature, etc.)
// - BEAD_PRIORITY: Priority (0-4)
// - BEAD_CLOSE_REASON: Close reason if provided
//
// Hooks run synchronously but failures are logged as warnings and don't
// block the close operation.
func RunConfigCloseHooks(ctx context.Context, issue *types.Issue) {
hooks := config.GetCloseHooks()
if len(hooks) == 0 {
return
}
// Build environment variables for hooks
env := append(os.Environ(),
"BEAD_ID="+issue.ID,
"BEAD_TITLE="+issue.Title,
"BEAD_TYPE="+string(issue.IssueType),
"BEAD_PRIORITY="+strconv.Itoa(issue.Priority),
"BEAD_CLOSE_REASON="+issue.CloseReason,
)
timeout := 10 * time.Second
for _, hook := range hooks {
hookCtx, cancel := context.WithTimeout(ctx, timeout)
// #nosec G204 -- command comes from user's config file
cmd := exec.CommandContext(hookCtx, "sh", "-c", hook.Command)
cmd.Env = env
cmd.Stdout = os.Stdout
cmd.Stderr = os.Stderr
err := cmd.Run()
cancel()
if err != nil {
// Log warning but don't fail the close
name := hook.Name
if name == "" {
name = hook.Command
}
fmt.Fprintf(os.Stderr, "Warning: close hook %q failed: %v\n", name, err)
}
}
}

View File

@@ -0,0 +1,271 @@
package hooks
import (
"context"
"os"
"path/filepath"
"strings"
"testing"
"time"
"github.com/steveyegge/beads/internal/config"
"github.com/steveyegge/beads/internal/types"
)
func TestRunConfigCloseHooks_NoHooks(t *testing.T) {
// Create a temp dir without any config
tmpDir := t.TempDir()
beadsDir := filepath.Join(tmpDir, ".beads")
if err := os.MkdirAll(beadsDir, 0755); err != nil {
t.Fatalf("Failed to create .beads dir: %v", err)
}
// Change to the temp dir and initialize config
oldWd, _ := os.Getwd()
defer func() { _ = os.Chdir(oldWd) }()
if err := os.Chdir(tmpDir); err != nil {
t.Fatalf("Failed to chdir: %v", err)
}
// Re-initialize config
if err := config.Initialize(); err != nil {
t.Fatalf("Failed to initialize config: %v", err)
}
issue := &types.Issue{ID: "bd-test", Title: "Test Issue"}
ctx := context.Background()
// Should not panic with no hooks
RunConfigCloseHooks(ctx, issue)
}
func TestRunConfigCloseHooks_ExecutesCommand(t *testing.T) {
tmpDir := t.TempDir()
beadsDir := filepath.Join(tmpDir, ".beads")
if err := os.MkdirAll(beadsDir, 0755); err != nil {
t.Fatalf("Failed to create .beads dir: %v", err)
}
outputFile := filepath.Join(tmpDir, "hook_output.txt")
// Create config.yaml with a close hook
configContent := `hooks:
on_close:
- name: test-hook
command: echo "$BEAD_ID $BEAD_TITLE" > ` + outputFile + `
`
configPath := filepath.Join(beadsDir, "config.yaml")
if err := os.WriteFile(configPath, []byte(configContent), 0644); err != nil {
t.Fatalf("Failed to write config: %v", err)
}
// Change to the temp dir and initialize config
oldWd, _ := os.Getwd()
defer func() { _ = os.Chdir(oldWd) }()
if err := os.Chdir(tmpDir); err != nil {
t.Fatalf("Failed to chdir: %v", err)
}
// Re-initialize config
if err := config.Initialize(); err != nil {
t.Fatalf("Failed to initialize config: %v", err)
}
issue := &types.Issue{
ID: "bd-abc1",
Title: "Test Issue",
IssueType: types.TypeBug,
Priority: 1,
CloseReason: "Fixed",
}
ctx := context.Background()
RunConfigCloseHooks(ctx, issue)
// Wait for hook to complete
time.Sleep(100 * time.Millisecond)
// Verify output
output, err := os.ReadFile(outputFile)
if err != nil {
t.Fatalf("Failed to read output file: %v", err)
}
expected := "bd-abc1 Test Issue"
if !strings.Contains(string(output), expected) {
t.Errorf("Hook output = %q, want to contain %q", string(output), expected)
}
}
func TestRunConfigCloseHooks_EnvVars(t *testing.T) {
tmpDir := t.TempDir()
beadsDir := filepath.Join(tmpDir, ".beads")
if err := os.MkdirAll(beadsDir, 0755); err != nil {
t.Fatalf("Failed to create .beads dir: %v", err)
}
outputFile := filepath.Join(tmpDir, "env_output.txt")
// Create config.yaml with a close hook that outputs all env vars
configContent := `hooks:
on_close:
- name: env-check
command: echo "ID=$BEAD_ID TYPE=$BEAD_TYPE PRIORITY=$BEAD_PRIORITY REASON=$BEAD_CLOSE_REASON" > ` + outputFile + `
`
configPath := filepath.Join(beadsDir, "config.yaml")
if err := os.WriteFile(configPath, []byte(configContent), 0644); err != nil {
t.Fatalf("Failed to write config: %v", err)
}
// Change to the temp dir and initialize config
oldWd, _ := os.Getwd()
defer func() { _ = os.Chdir(oldWd) }()
if err := os.Chdir(tmpDir); err != nil {
t.Fatalf("Failed to chdir: %v", err)
}
// Re-initialize config
if err := config.Initialize(); err != nil {
t.Fatalf("Failed to initialize config: %v", err)
}
issue := &types.Issue{
ID: "bd-xyz9",
Title: "Bug Fix",
IssueType: types.TypeFeature,
Priority: 2,
CloseReason: "Completed",
}
ctx := context.Background()
RunConfigCloseHooks(ctx, issue)
// Wait for hook to complete
time.Sleep(100 * time.Millisecond)
// Verify output contains all env vars
output, err := os.ReadFile(outputFile)
if err != nil {
t.Fatalf("Failed to read output file: %v", err)
}
outputStr := string(output)
checks := []string{
"ID=bd-xyz9",
"TYPE=feature",
"PRIORITY=2",
"REASON=Completed",
}
for _, check := range checks {
if !strings.Contains(outputStr, check) {
t.Errorf("Hook output = %q, want to contain %q", outputStr, check)
}
}
}
func TestRunConfigCloseHooks_HookFailure(t *testing.T) {
tmpDir := t.TempDir()
beadsDir := filepath.Join(tmpDir, ".beads")
if err := os.MkdirAll(beadsDir, 0755); err != nil {
t.Fatalf("Failed to create .beads dir: %v", err)
}
successFile := filepath.Join(tmpDir, "success.txt")
// Create config.yaml with a failing hook followed by a succeeding one
configContent := `hooks:
on_close:
- name: failing-hook
command: exit 1
- name: success-hook
command: echo "success" > ` + successFile + `
`
configPath := filepath.Join(beadsDir, "config.yaml")
if err := os.WriteFile(configPath, []byte(configContent), 0644); err != nil {
t.Fatalf("Failed to write config: %v", err)
}
// Change to the temp dir and initialize config
oldWd, _ := os.Getwd()
defer func() { _ = os.Chdir(oldWd) }()
if err := os.Chdir(tmpDir); err != nil {
t.Fatalf("Failed to chdir: %v", err)
}
// Re-initialize config
if err := config.Initialize(); err != nil {
t.Fatalf("Failed to initialize config: %v", err)
}
issue := &types.Issue{ID: "bd-test", Title: "Test"}
ctx := context.Background()
// Should not panic even with failing hook
RunConfigCloseHooks(ctx, issue)
// Wait for hooks to complete
time.Sleep(100 * time.Millisecond)
// Verify second hook still ran
output, err := os.ReadFile(successFile)
if err != nil {
t.Fatalf("Second hook should have run despite first failing: %v", err)
}
if !strings.Contains(string(output), "success") {
t.Error("Second hook did not produce expected output")
}
}
func TestGetCloseHooks(t *testing.T) {
tmpDir := t.TempDir()
beadsDir := filepath.Join(tmpDir, ".beads")
if err := os.MkdirAll(beadsDir, 0755); err != nil {
t.Fatalf("Failed to create .beads dir: %v", err)
}
// Create config.yaml with multiple hooks
configContent := `hooks:
on_close:
- name: first-hook
command: echo first
- name: second-hook
command: echo second
- command: echo unnamed
`
configPath := filepath.Join(beadsDir, "config.yaml")
if err := os.WriteFile(configPath, []byte(configContent), 0644); err != nil {
t.Fatalf("Failed to write config: %v", err)
}
// Change to the temp dir and initialize config
oldWd, _ := os.Getwd()
defer func() { _ = os.Chdir(oldWd) }()
if err := os.Chdir(tmpDir); err != nil {
t.Fatalf("Failed to chdir: %v", err)
}
// Re-initialize config
if err := config.Initialize(); err != nil {
t.Fatalf("Failed to initialize config: %v", err)
}
hooks := config.GetCloseHooks()
if len(hooks) != 3 {
t.Fatalf("Expected 3 hooks, got %d", len(hooks))
}
if hooks[0].Name != "first-hook" || hooks[0].Command != "echo first" {
t.Errorf("First hook = %+v, want name=first-hook, command=echo first", hooks[0])
}
if hooks[1].Name != "second-hook" || hooks[1].Command != "echo second" {
t.Errorf("Second hook = %+v, want name=second-hook, command=echo second", hooks[1])
}
if hooks[2].Name != "" || hooks[2].Command != "echo unnamed" {
t.Errorf("Third hook = %+v, want name='', command=echo unnamed", hooks[2])
}
}

View File

@@ -231,13 +231,8 @@ func handlePrefixMismatch(ctx context.Context, sqliteStore *sqlite.SQLiteStorage
var tombstonesToRemove []string
for _, issue := range issues {
// GH#422: Check if issue ID starts with configured prefix directly
// rather than extracting/guessing. This handles multi-hyphen prefixes
// like "asianops-audit-" correctly.
prefixMatches := strings.HasPrefix(issue.ID, configuredPrefix+"-")
if !prefixMatches {
// Extract prefix for error reporting (best effort)
prefix := utils.ExtractIssuePrefix(issue.ID)
prefix := utils.ExtractIssuePrefix(issue.ID)
if !allowedPrefixes[prefix] {
if issue.IsTombstone() {
tombstoneMismatchPrefixes[prefix]++
tombstonesToRemove = append(tombstonesToRemove, issue.ID)
@@ -572,11 +567,8 @@ func upsertIssues(ctx context.Context, sqliteStore *sqlite.SQLiteStorage, issues
updates["acceptance_criteria"] = incoming.AcceptanceCriteria
updates["notes"] = incoming.Notes
updates["closed_at"] = incoming.ClosedAt
// Pinned field (bd-phtv): Only update if explicitly true in JSONL
// (omitempty means false values are absent, so false = don't change existing)
if incoming.Pinned {
updates["pinned"] = incoming.Pinned
}
// Pinned field (bd-7h5)
updates["pinned"] = incoming.Pinned
if incoming.Assignee != "" {
updates["assignee"] = incoming.Assignee
@@ -670,11 +662,8 @@ func upsertIssues(ctx context.Context, sqliteStore *sqlite.SQLiteStorage, issues
updates["acceptance_criteria"] = incoming.AcceptanceCriteria
updates["notes"] = incoming.Notes
updates["closed_at"] = incoming.ClosedAt
// Pinned field (bd-phtv): Only update if explicitly true in JSONL
// (omitempty means false values are absent, so false = don't change existing)
if incoming.Pinned {
updates["pinned"] = incoming.Pinned
}
// Pinned field (bd-7h5)
updates["pinned"] = incoming.Pinned
if incoming.Assignee != "" {
updates["assignee"] = incoming.Assignee

View File

@@ -1479,151 +1479,7 @@ func TestImportMixedPrefixMismatch(t *testing.T) {
}
}
// TestImportPreservesPinnedField tests that importing from JSONL (which has omitempty
// for the pinned field) does NOT reset an existing pinned=true issue to pinned=false.
//
// Bug scenario (bd-phtv):
// 1. User runs `bd pin <issue-id>` which sets pinned=true in SQLite
// 2. Any subsequent bd command (e.g., `bd show`) triggers auto-import from JSONL
// 3. JSONL has pinned=false due to omitempty (field absent means false in Go)
// 4. Import overwrites pinned=true with pinned=false, losing the pinned state
//
// Expected: Import should preserve existing pinned=true when incoming pinned=false
// (since false just means "field was absent in JSONL due to omitempty").
func TestImportPreservesPinnedField(t *testing.T) {
ctx := context.Background()
tmpDB := t.TempDir() + "/test.db"
store, err := sqlite.New(context.Background(), tmpDB)
if err != nil {
t.Fatalf("Failed to create store: %v", err)
}
defer store.Close()
if err := store.SetConfig(ctx, "issue_prefix", "test"); err != nil {
t.Fatalf("Failed to set prefix: %v", err)
}
// Create an issue with pinned=true (simulates `bd pin` command)
pinnedIssue := &types.Issue{
ID: "test-abc123",
Title: "Pinned Issue",
Status: types.StatusOpen,
Priority: 2,
IssueType: types.TypeTask,
Pinned: true, // This is set by `bd pin`
CreatedAt: time.Now().Add(-time.Hour),
UpdatedAt: time.Now().Add(-time.Hour),
}
pinnedIssue.ContentHash = pinnedIssue.ComputeContentHash()
if err := store.CreateIssue(ctx, pinnedIssue, "test-setup"); err != nil {
t.Fatalf("Failed to create pinned issue: %v", err)
}
// Verify issue is pinned before import
before, err := store.GetIssue(ctx, "test-abc123")
if err != nil {
t.Fatalf("Failed to get issue before import: %v", err)
}
if !before.Pinned {
t.Fatal("Issue should be pinned before import")
}
// Import same issue from JSONL (simulates auto-import after git pull)
// JSONL has pinned=false because omitempty means absent fields are false
importedIssue := &types.Issue{
ID: "test-abc123",
Title: "Pinned Issue", // Same content
Status: types.StatusOpen,
Priority: 2,
IssueType: types.TypeTask,
Pinned: false, // This is what JSONL deserialization produces due to omitempty
CreatedAt: time.Now().Add(-time.Hour),
UpdatedAt: time.Now(), // Newer timestamp to trigger update
}
importedIssue.ContentHash = importedIssue.ComputeContentHash()
result, err := ImportIssues(ctx, tmpDB, store, []*types.Issue{importedIssue}, Options{})
if err != nil {
t.Fatalf("Import failed: %v", err)
}
// Import should recognize this as an update (same ID, different timestamp)
// The unchanged count may vary based on whether other fields changed
t.Logf("Import result: Created=%d Updated=%d Unchanged=%d", result.Created, result.Updated, result.Unchanged)
// CRITICAL: Verify pinned field was preserved
after, err := store.GetIssue(ctx, "test-abc123")
if err != nil {
t.Fatalf("Failed to get issue after import: %v", err)
}
if !after.Pinned {
t.Error("FAIL (bd-phtv): pinned=true was reset to false by import. " +
"Import should preserve existing pinned field when incoming is false (omitempty).")
}
}
// TestImportSetsPinnedTrue tests that importing an issue with pinned=true
// correctly sets the pinned field in the database.
func TestImportSetsPinnedTrue(t *testing.T) {
ctx := context.Background()
tmpDB := t.TempDir() + "/test.db"
store, err := sqlite.New(context.Background(), tmpDB)
if err != nil {
t.Fatalf("Failed to create store: %v", err)
}
defer store.Close()
if err := store.SetConfig(ctx, "issue_prefix", "test"); err != nil {
t.Fatalf("Failed to set prefix: %v", err)
}
// Create an unpinned issue
unpinnedIssue := &types.Issue{
ID: "test-abc123",
Title: "Unpinned Issue",
Status: types.StatusOpen,
Priority: 2,
IssueType: types.TypeTask,
Pinned: false,
CreatedAt: time.Now().Add(-time.Hour),
UpdatedAt: time.Now().Add(-time.Hour),
}
unpinnedIssue.ContentHash = unpinnedIssue.ComputeContentHash()
if err := store.CreateIssue(ctx, unpinnedIssue, "test-setup"); err != nil {
t.Fatalf("Failed to create issue: %v", err)
}
// Import with pinned=true (from JSONL that explicitly has "pinned": true)
importedIssue := &types.Issue{
ID: "test-abc123",
Title: "Unpinned Issue",
Status: types.StatusOpen,
Priority: 2,
IssueType: types.TypeTask,
Pinned: true, // Explicitly set to true in JSONL
CreatedAt: time.Now().Add(-time.Hour),
UpdatedAt: time.Now(), // Newer timestamp
}
importedIssue.ContentHash = importedIssue.ComputeContentHash()
result, err := ImportIssues(ctx, tmpDB, store, []*types.Issue{importedIssue}, Options{})
if err != nil {
t.Fatalf("Import failed: %v", err)
}
t.Logf("Import result: Created=%d Updated=%d Unchanged=%d", result.Created, result.Updated, result.Unchanged)
// Verify pinned field was set to true
after, err := store.GetIssue(ctx, "test-abc123")
if err != nil {
t.Fatalf("Failed to get issue after import: %v", err)
}
if !after.Pinned {
t.Error("FAIL: pinned=true from JSONL should set the field to true in database")
}
}
// TestMultiRepoPrefixValidation tests GH#686: multi-repo allows foreign prefixes.
func TestMultiRepoPrefixValidation(t *testing.T) {
if err := config.Initialize(); err != nil {
t.Fatalf("Failed to initialize config: %v", err)

View File

@@ -395,48 +395,6 @@ func (c *Client) EpicStatus(args *EpicStatusArgs) (*Response, error) {
return c.Execute(OpEpicStatus, args)
}
// Gate operations (bd-likt)
// GateCreate creates a gate via the daemon
func (c *Client) GateCreate(args *GateCreateArgs) (*Response, error) {
return c.Execute(OpGateCreate, args)
}
// GateList lists gates via the daemon
func (c *Client) GateList(args *GateListArgs) (*Response, error) {
return c.Execute(OpGateList, args)
}
// GateShow shows a gate via the daemon
func (c *Client) GateShow(args *GateShowArgs) (*Response, error) {
return c.Execute(OpGateShow, args)
}
// GateClose closes a gate via the daemon
func (c *Client) GateClose(args *GateCloseArgs) (*Response, error) {
return c.Execute(OpGateClose, args)
}
// GateWait adds waiters to a gate via the daemon
func (c *Client) GateWait(args *GateWaitArgs) (*Response, error) {
return c.Execute(OpGateWait, args)
}
// GetWorkerStatus retrieves worker status via the daemon
func (c *Client) GetWorkerStatus(args *GetWorkerStatusArgs) (*GetWorkerStatusResponse, error) {
resp, err := c.Execute(OpGetWorkerStatus, args)
if err != nil {
return nil, err
}
var result GetWorkerStatusResponse
if err := json.Unmarshal(resp.Data, &result); err != nil {
return nil, fmt.Errorf("failed to unmarshal worker status response: %w", err)
}
return &result, nil
}
// cleanupStaleDaemonArtifacts removes stale daemon.pid file when socket is missing and lock is free.
// This prevents stale artifacts from accumulating after daemon crashes.
// Only removes pid file - lock file is managed by OS (released on process exit).

View File

@@ -2,7 +2,6 @@ package rpc
import (
"encoding/json"
"time"
)
// Operation constants for all bd commands
@@ -35,18 +34,9 @@ const (
OpExport = "export"
OpImport = "import"
OpEpicStatus = "epic_status"
OpGetMutations = "get_mutations"
OpGetMoleculeProgress = "get_molecule_progress"
OpShutdown = "shutdown"
OpDelete = "delete"
OpGetWorkerStatus = "get_worker_status"
// Gate operations (bd-likt)
OpGateCreate = "gate_create"
OpGateList = "gate_list"
OpGateShow = "gate_show"
OpGateClose = "gate_close"
OpGateWait = "gate_wait"
OpGetMutations = "get_mutations"
OpShutdown = "shutdown"
OpDelete = "delete"
)
// Request represents an RPC request from client to daemon
@@ -423,92 +413,3 @@ type ImportArgs struct {
type GetMutationsArgs struct {
Since int64 `json:"since"` // Unix timestamp in milliseconds (0 for all recent)
}
// Gate operations (bd-likt)
// GateCreateArgs represents arguments for creating a gate
type GateCreateArgs struct {
Title string `json:"title"`
AwaitType string `json:"await_type"` // gh:run, gh:pr, timer, human, mail
AwaitID string `json:"await_id"` // ID/value for the await type
Timeout time.Duration `json:"timeout"` // Timeout duration
Waiters []string `json:"waiters"` // Mail addresses to notify when gate clears
}
// GateCreateResult represents the result of creating a gate
type GateCreateResult struct {
ID string `json:"id"` // Created gate ID
}
// GateListArgs represents arguments for listing gates
type GateListArgs struct {
All bool `json:"all"` // Include closed gates
}
// GateShowArgs represents arguments for showing a gate
type GateShowArgs struct {
ID string `json:"id"` // Gate ID (partial or full)
}
// GateCloseArgs represents arguments for closing a gate
type GateCloseArgs struct {
ID string `json:"id"` // Gate ID (partial or full)
Reason string `json:"reason,omitempty"` // Close reason
}
// GateWaitArgs represents arguments for adding waiters to a gate
type GateWaitArgs struct {
ID string `json:"id"` // Gate ID (partial or full)
Waiters []string `json:"waiters"` // Additional waiters to add
}
// GateWaitResult represents the result of adding waiters
type GateWaitResult struct {
AddedCount int `json:"added_count"` // Number of new waiters added
}
// GetWorkerStatusArgs represents arguments for retrieving worker status
type GetWorkerStatusArgs struct {
// Assignee filters to a specific worker (optional, empty = all workers)
Assignee string `json:"assignee,omitempty"`
}
// WorkerStatus represents the status of a single worker and their current work
type WorkerStatus struct {
Assignee string `json:"assignee"` // Worker identifier
MoleculeID string `json:"molecule_id,omitempty"` // Parent molecule/epic ID (if working on a step)
MoleculeTitle string `json:"molecule_title,omitempty"` // Parent molecule/epic title
CurrentStep int `json:"current_step,omitempty"` // Current step number (1-indexed)
TotalSteps int `json:"total_steps,omitempty"` // Total number of steps in molecule
StepID string `json:"step_id,omitempty"` // Current step issue ID
StepTitle string `json:"step_title,omitempty"` // Current step issue title
LastActivity string `json:"last_activity"` // ISO 8601 timestamp of last update
Status string `json:"status"` // Current work status (in_progress, blocked, etc.)
}
// GetWorkerStatusResponse is the response for get_worker_status operation
type GetWorkerStatusResponse struct {
Workers []WorkerStatus `json:"workers"`
}
// GetMoleculeProgressArgs represents arguments for the get_molecule_progress operation
type GetMoleculeProgressArgs struct {
MoleculeID string `json:"molecule_id"` // The ID of the molecule (parent issue)
}
// MoleculeStep represents a single step within a molecule
type MoleculeStep struct {
ID string `json:"id"`
Title string `json:"title"`
Status string `json:"status"` // "done", "current", "ready", "blocked"
StartTime *string `json:"start_time"` // ISO 8601 timestamp when step was created
CloseTime *string `json:"close_time"` // ISO 8601 timestamp when step was closed (if done)
}
// MoleculeProgress represents the progress of a molecule (parent issue with steps)
type MoleculeProgress struct {
MoleculeID string `json:"molecule_id"`
Title string `json:"title"`
Assignee string `json:"assignee"`
Steps []MoleculeStep `json:"steps"`
}

View File

@@ -1,7 +1,6 @@
package rpc
import (
"context"
"encoding/json"
"fmt"
"net"
@@ -11,7 +10,6 @@ import (
"time"
"github.com/steveyegge/beads/internal/storage"
"github.com/steveyegge/beads/internal/types"
)
// ServerVersion is the version of this RPC server
@@ -82,8 +80,6 @@ const (
type MutationEvent struct {
Type string // One of the Mutation* constants
IssueID string // e.g., "bd-42"
Title string // Issue title for display context (may be empty for some operations)
Assignee string // Issue assignee for display context (may be empty)
Timestamp time.Time
// Optional metadata for richer events (used by status, bonded, etc.)
OldStatus string `json:"old_status,omitempty"` // Previous status (for status events)
@@ -142,13 +138,10 @@ func NewServer(socketPath string, store storage.Storage, workspacePath string, d
// emitMutation sends a mutation event to the daemon's event-driven loop.
// Non-blocking: drops event if channel is full (sync will happen eventually).
// Also stores in recent mutations buffer for polling.
// Title and assignee provide context for activity feeds; pass empty strings if unknown.
func (s *Server) emitMutation(eventType, issueID, title, assignee string) {
func (s *Server) emitMutation(eventType, issueID string) {
s.emitRichMutation(MutationEvent{
Type: eventType,
IssueID: issueID,
Title: title,
Assignee: assignee,
Type: eventType,
IssueID: issueID,
})
}
@@ -234,120 +227,3 @@ func (s *Server) handleGetMutations(req *Request) Response {
Data: data,
}
}
// handleGetMoleculeProgress handles the get_molecule_progress RPC operation
// Returns detailed progress for a molecule (parent issue with child steps)
func (s *Server) handleGetMoleculeProgress(req *Request) Response {
var args GetMoleculeProgressArgs
if err := json.Unmarshal(req.Args, &args); err != nil {
return Response{
Success: false,
Error: fmt.Sprintf("invalid arguments: %v", err),
}
}
store := s.storage
if store == nil {
return Response{
Success: false,
Error: "storage not available",
}
}
ctx := s.reqCtx(req)
// Get the molecule (parent issue)
molecule, err := store.GetIssue(ctx, args.MoleculeID)
if err != nil {
return Response{
Success: false,
Error: fmt.Sprintf("failed to get molecule: %v", err),
}
}
if molecule == nil {
return Response{
Success: false,
Error: fmt.Sprintf("molecule not found: %s", args.MoleculeID),
}
}
// Get children (issues that have parent-child dependency on this molecule)
var children []*types.IssueWithDependencyMetadata
if sqliteStore, ok := store.(interface {
GetDependentsWithMetadata(ctx context.Context, issueID string) ([]*types.IssueWithDependencyMetadata, error)
}); ok {
allDependents, err := sqliteStore.GetDependentsWithMetadata(ctx, args.MoleculeID)
if err != nil {
return Response{
Success: false,
Error: fmt.Sprintf("failed to get molecule children: %v", err),
}
}
// Filter for parent-child relationships only
for _, dep := range allDependents {
if dep.DependencyType == types.DepParentChild {
children = append(children, dep)
}
}
}
// Get blocked issue IDs for status computation
blockedIDs := make(map[string]bool)
if sqliteStore, ok := store.(interface {
GetBlockedIssueIDs(ctx context.Context) ([]string, error)
}); ok {
ids, err := sqliteStore.GetBlockedIssueIDs(ctx)
if err == nil {
for _, id := range ids {
blockedIDs[id] = true
}
}
}
// Build steps from children
steps := make([]MoleculeStep, 0, len(children))
for _, child := range children {
step := MoleculeStep{
ID: child.ID,
Title: child.Title,
}
// Compute step status
switch child.Status {
case types.StatusClosed:
step.Status = "done"
case types.StatusInProgress:
step.Status = "current"
default: // open, blocked, etc.
if blockedIDs[child.ID] {
step.Status = "blocked"
} else {
step.Status = "ready"
}
}
// Set timestamps
startTime := child.CreatedAt.Format(time.RFC3339)
step.StartTime = &startTime
if child.ClosedAt != nil {
closeTime := child.ClosedAt.Format(time.RFC3339)
step.CloseTime = &closeTime
}
steps = append(steps, step)
}
progress := MoleculeProgress{
MoleculeID: molecule.ID,
Title: molecule.Title,
Assignee: molecule.Assignee,
Steps: steps,
}
data, _ := json.Marshal(progress)
return Response{
Success: true,
Data: data,
}
}

View File

@@ -350,7 +350,7 @@ func (s *Server) handleCreate(req *Request) Response {
}
// Emit mutation event for event-driven daemon
s.emitMutation(MutationCreate, issue.ID, issue.Title, issue.Assignee)
s.emitMutation(MutationCreate, issue.ID)
data, _ := json.Marshal(issue)
return Response{
@@ -470,13 +470,11 @@ func (s *Server) handleUpdate(req *Request) Response {
s.emitRichMutation(MutationEvent{
Type: MutationStatus,
IssueID: updateArgs.ID,
Title: issue.Title,
Assignee: issue.Assignee,
OldStatus: string(issue.Status),
NewStatus: *updateArgs.Status,
})
} else {
s.emitMutation(MutationUpdate, updateArgs.ID, issue.Title, issue.Assignee)
s.emitMutation(MutationUpdate, updateArgs.ID)
}
}
@@ -546,8 +544,6 @@ func (s *Server) handleClose(req *Request) Response {
s.emitRichMutation(MutationEvent{
Type: MutationStatus,
IssueID: closeArgs.ID,
Title: issue.Title,
Assignee: issue.Assignee,
OldStatus: oldStatus,
NewStatus: "closed",
})
@@ -644,7 +640,7 @@ func (s *Server) handleDelete(req *Request) Response {
}
// Emit mutation event for event-driven daemon
s.emitMutation(MutationDelete, issueID, issue.Title, issue.Assignee)
s.emitMutation(MutationDelete, issueID)
deletedCount++
}
@@ -1377,341 +1373,3 @@ func (s *Server) handleEpicStatus(req *Request) Response {
Data: data,
}
}
// Gate handlers (bd-likt)
func (s *Server) handleGateCreate(req *Request) Response {
var args GateCreateArgs
if err := json.Unmarshal(req.Args, &args); err != nil {
return Response{
Success: false,
Error: fmt.Sprintf("invalid gate create args: %v", err),
}
}
store := s.storage
if store == nil {
return Response{
Success: false,
Error: "storage not available",
}
}
ctx := s.reqCtx(req)
now := time.Now()
// Create gate issue
gate := &types.Issue{
Title: args.Title,
IssueType: types.TypeGate,
Status: types.StatusOpen,
Priority: 1, // Gates are typically high priority
Assignee: "deacon/",
Wisp: true, // Gates are wisps (ephemeral)
AwaitType: args.AwaitType,
AwaitID: args.AwaitID,
Timeout: args.Timeout,
Waiters: args.Waiters,
CreatedAt: now,
UpdatedAt: now,
}
gate.ContentHash = gate.ComputeContentHash()
if err := store.CreateIssue(ctx, gate, s.reqActor(req)); err != nil {
return Response{
Success: false,
Error: fmt.Sprintf("failed to create gate: %v", err),
}
}
// Emit mutation event
s.emitMutation(MutationCreate, gate.ID, gate.Title, gate.Assignee)
data, _ := json.Marshal(GateCreateResult{ID: gate.ID})
return Response{
Success: true,
Data: data,
}
}
func (s *Server) handleGateList(req *Request) Response {
var args GateListArgs
if err := json.Unmarshal(req.Args, &args); err != nil {
return Response{
Success: false,
Error: fmt.Sprintf("invalid gate list args: %v", err),
}
}
store := s.storage
if store == nil {
return Response{
Success: false,
Error: "storage not available",
}
}
ctx := s.reqCtx(req)
// Build filter for gates
gateType := types.TypeGate
filter := types.IssueFilter{
IssueType: &gateType,
}
if !args.All {
openStatus := types.StatusOpen
filter.Status = &openStatus
}
gates, err := store.SearchIssues(ctx, "", filter)
if err != nil {
return Response{
Success: false,
Error: fmt.Sprintf("failed to list gates: %v", err),
}
}
data, _ := json.Marshal(gates)
return Response{
Success: true,
Data: data,
}
}
func (s *Server) handleGateShow(req *Request) Response {
var args GateShowArgs
if err := json.Unmarshal(req.Args, &args); err != nil {
return Response{
Success: false,
Error: fmt.Sprintf("invalid gate show args: %v", err),
}
}
store := s.storage
if store == nil {
return Response{
Success: false,
Error: "storage not available",
}
}
ctx := s.reqCtx(req)
// Resolve partial ID
gateID, err := utils.ResolvePartialID(ctx, store, args.ID)
if err != nil {
return Response{
Success: false,
Error: fmt.Sprintf("failed to resolve gate ID: %v", err),
}
}
gate, err := store.GetIssue(ctx, gateID)
if err != nil {
return Response{
Success: false,
Error: fmt.Sprintf("failed to get gate: %v", err),
}
}
if gate == nil {
return Response{
Success: false,
Error: fmt.Sprintf("gate %s not found", gateID),
}
}
if gate.IssueType != types.TypeGate {
return Response{
Success: false,
Error: fmt.Sprintf("%s is not a gate (type: %s)", gateID, gate.IssueType),
}
}
data, _ := json.Marshal(gate)
return Response{
Success: true,
Data: data,
}
}
func (s *Server) handleGateClose(req *Request) Response {
var args GateCloseArgs
if err := json.Unmarshal(req.Args, &args); err != nil {
return Response{
Success: false,
Error: fmt.Sprintf("invalid gate close args: %v", err),
}
}
store := s.storage
if store == nil {
return Response{
Success: false,
Error: "storage not available",
}
}
ctx := s.reqCtx(req)
// Resolve partial ID
gateID, err := utils.ResolvePartialID(ctx, store, args.ID)
if err != nil {
return Response{
Success: false,
Error: fmt.Sprintf("failed to resolve gate ID: %v", err),
}
}
// Verify it's a gate
gate, err := store.GetIssue(ctx, gateID)
if err != nil {
return Response{
Success: false,
Error: fmt.Sprintf("failed to get gate: %v", err),
}
}
if gate == nil {
return Response{
Success: false,
Error: fmt.Sprintf("gate %s not found", gateID),
}
}
if gate.IssueType != types.TypeGate {
return Response{
Success: false,
Error: fmt.Sprintf("%s is not a gate (type: %s)", gateID, gate.IssueType),
}
}
reason := args.Reason
if reason == "" {
reason = "Gate closed"
}
oldStatus := string(gate.Status)
if err := store.CloseIssue(ctx, gateID, reason, s.reqActor(req)); err != nil {
return Response{
Success: false,
Error: fmt.Sprintf("failed to close gate: %v", err),
}
}
// Emit rich status change event
s.emitRichMutation(MutationEvent{
Type: MutationStatus,
IssueID: gateID,
OldStatus: oldStatus,
NewStatus: "closed",
})
closedGate, _ := store.GetIssue(ctx, gateID)
data, _ := json.Marshal(closedGate)
return Response{
Success: true,
Data: data,
}
}
func (s *Server) handleGateWait(req *Request) Response {
var args GateWaitArgs
if err := json.Unmarshal(req.Args, &args); err != nil {
return Response{
Success: false,
Error: fmt.Sprintf("invalid gate wait args: %v", err),
}
}
store := s.storage
if store == nil {
return Response{
Success: false,
Error: "storage not available",
}
}
ctx := s.reqCtx(req)
// Resolve partial ID
gateID, err := utils.ResolvePartialID(ctx, store, args.ID)
if err != nil {
return Response{
Success: false,
Error: fmt.Sprintf("failed to resolve gate ID: %v", err),
}
}
// Get existing gate
gate, err := store.GetIssue(ctx, gateID)
if err != nil {
return Response{
Success: false,
Error: fmt.Sprintf("failed to get gate: %v", err),
}
}
if gate == nil {
return Response{
Success: false,
Error: fmt.Sprintf("gate %s not found", gateID),
}
}
if gate.IssueType != types.TypeGate {
return Response{
Success: false,
Error: fmt.Sprintf("%s is not a gate (type: %s)", gateID, gate.IssueType),
}
}
if gate.Status == types.StatusClosed {
return Response{
Success: false,
Error: fmt.Sprintf("gate %s is already closed", gateID),
}
}
// Add new waiters (avoiding duplicates)
waiterSet := make(map[string]bool)
for _, w := range gate.Waiters {
waiterSet[w] = true
}
newWaiters := []string{}
for _, addr := range args.Waiters {
if !waiterSet[addr] {
newWaiters = append(newWaiters, addr)
waiterSet[addr] = true
}
}
addedCount := len(newWaiters)
if addedCount > 0 {
// Update waiters using SQLite directly
sqliteStore, ok := store.(*sqlite.SQLiteStorage)
if !ok {
return Response{
Success: false,
Error: "gate wait requires SQLite storage",
}
}
allWaiters := append(gate.Waiters, newWaiters...)
waitersJSON, _ := json.Marshal(allWaiters)
// Use raw SQL to update the waiters field
_, err = sqliteStore.UnderlyingDB().ExecContext(ctx, `UPDATE issues SET waiters = ?, updated_at = ? WHERE id = ?`,
string(waitersJSON), time.Now(), gateID)
if err != nil {
return Response{
Success: false,
Error: fmt.Sprintf("failed to add waiters: %v", err),
}
}
// Emit mutation event
s.emitMutation(MutationUpdate, gateID, gate.Title, gate.Assignee)
}
data, _ := json.Marshal(GateWaitResult{AddedCount: addedCount})
return Response{
Success: true,
Data: data,
}
}

View File

@@ -41,8 +41,7 @@ func (s *Server) handleDepAdd(req *Request) Response {
}
// Emit mutation event for event-driven daemon
// Title/assignee empty for dependency operations (would require extra lookup)
s.emitMutation(MutationUpdate, depArgs.FromID, "", "")
s.emitMutation(MutationUpdate, depArgs.FromID)
return Response{Success: true}
}
@@ -74,8 +73,7 @@ func (s *Server) handleSimpleStoreOp(req *Request, argsPtr interface{}, argDesc
}
// Emit mutation event for event-driven daemon
// Title/assignee empty for simple store operations (would require extra lookup)
s.emitMutation(MutationUpdate, issueID, "", "")
s.emitMutation(MutationUpdate, issueID)
return Response{Success: true}
}
@@ -149,8 +147,7 @@ func (s *Server) handleCommentAdd(req *Request) Response {
}
// Emit mutation event for event-driven daemon
// Title/assignee empty for comment operations (would require extra lookup)
s.emitMutation(MutationComment, commentArgs.ID, "", "")
s.emitMutation(MutationComment, commentArgs.ID)
data, _ := json.Marshal(comment)
return Response{

View File

@@ -13,7 +13,7 @@ func TestEmitMutation(t *testing.T) {
server := NewServer("/tmp/test.sock", store, "/tmp", "/tmp/test.db")
// Emit a mutation
server.emitMutation(MutationCreate, "bd-123", "Test Issue", "")
server.emitMutation(MutationCreate, "bd-123")
// Check that mutation was stored in buffer
mutations := server.GetRecentMutations(0)
@@ -45,14 +45,14 @@ func TestGetRecentMutations_TimestampFiltering(t *testing.T) {
server := NewServer("/tmp/test.sock", store, "/tmp", "/tmp/test.db")
// Emit mutations with delays
server.emitMutation(MutationCreate, "bd-1", "Issue 1", "")
server.emitMutation(MutationCreate, "bd-1")
time.Sleep(10 * time.Millisecond)
checkpoint := time.Now().UnixMilli()
time.Sleep(10 * time.Millisecond)
server.emitMutation(MutationUpdate, "bd-2", "Issue 2", "")
server.emitMutation(MutationUpdate, "bd-3", "Issue 3", "")
server.emitMutation(MutationUpdate, "bd-2")
server.emitMutation(MutationUpdate, "bd-3")
// Get mutations after checkpoint
mutations := server.GetRecentMutations(checkpoint)
@@ -82,7 +82,7 @@ func TestGetRecentMutations_CircularBuffer(t *testing.T) {
// Emit more than maxMutationBuffer (100) mutations
for i := 0; i < 150; i++ {
server.emitMutation(MutationCreate, "bd-"+string(rune(i)), "", "")
server.emitMutation(MutationCreate, "bd-"+string(rune(i)))
time.Sleep(time.Millisecond) // Ensure different timestamps
}
@@ -110,7 +110,7 @@ func TestGetRecentMutations_ConcurrentAccess(t *testing.T) {
// Writer goroutine
go func() {
for i := 0; i < 50; i++ {
server.emitMutation(MutationUpdate, "bd-write", "", "")
server.emitMutation(MutationUpdate, "bd-write")
time.Sleep(time.Millisecond)
}
done <- true
@@ -141,11 +141,11 @@ func TestHandleGetMutations(t *testing.T) {
server := NewServer("/tmp/test.sock", store, "/tmp", "/tmp/test.db")
// Emit some mutations
server.emitMutation(MutationCreate, "bd-1", "Issue 1", "")
server.emitMutation(MutationCreate, "bd-1")
time.Sleep(10 * time.Millisecond)
checkpoint := time.Now().UnixMilli()
time.Sleep(10 * time.Millisecond)
server.emitMutation(MutationUpdate, "bd-2", "Issue 2", "")
server.emitMutation(MutationUpdate, "bd-2")
// Create RPC request
args := GetMutationsArgs{Since: checkpoint}
@@ -213,7 +213,7 @@ func TestMutationEventTypes(t *testing.T) {
}
for _, mutationType := range types {
server.emitMutation(mutationType, "bd-test", "", "")
server.emitMutation(mutationType, "bd-test")
}
mutations := server.GetRecentMutations(0)
@@ -305,7 +305,7 @@ func TestMutationTimestamps(t *testing.T) {
server := NewServer("/tmp/test.sock", store, "/tmp", "/tmp/test.db")
before := time.Now()
server.emitMutation(MutationCreate, "bd-123", "Test Issue", "")
server.emitMutation(MutationCreate, "bd-123")
after := time.Now()
mutations := server.GetRecentMutations(0)
@@ -327,7 +327,7 @@ func TestEmitMutation_NonBlocking(t *testing.T) {
// Fill the buffer (default size is 512 from BEADS_MUTATION_BUFFER or default)
for i := 0; i < 600; i++ {
// This should not block even when channel is full
server.emitMutation(MutationCreate, "bd-test", "", "")
server.emitMutation(MutationCreate, "bd-test")
}
// Verify mutations were still stored in recent buffer

View File

@@ -219,23 +219,8 @@ func (s *Server) handleRequest(req *Request) Response {
resp = s.handleEpicStatus(req)
case OpGetMutations:
resp = s.handleGetMutations(req)
case OpGetMoleculeProgress:
resp = s.handleGetMoleculeProgress(req)
case OpGetWorkerStatus:
resp = s.handleGetWorkerStatus(req)
case OpShutdown:
resp = s.handleShutdown(req)
// Gate operations (bd-likt)
case OpGateCreate:
resp = s.handleGateCreate(req)
case OpGateList:
resp = s.handleGateList(req)
case OpGateShow:
resp = s.handleGateShow(req)
case OpGateClose:
resp = s.handleGateClose(req)
case OpGateWait:
resp = s.handleGateWait(req)
default:
s.metrics.RecordError(req.Operation)
return Response{
@@ -394,107 +379,3 @@ func (s *Server) handleMetrics(_ *Request) Response {
Data: data,
}
}
func (s *Server) handleGetWorkerStatus(req *Request) Response {
ctx := s.reqCtx(req)
// Parse optional args
var args GetWorkerStatusArgs
if len(req.Args) > 0 {
if err := json.Unmarshal(req.Args, &args); err != nil {
return Response{
Success: false,
Error: fmt.Sprintf("invalid args: %v", err),
}
}
}
// Build filter: find all in_progress issues with assignees
filter := types.IssueFilter{
Status: func() *types.Status { s := types.StatusInProgress; return &s }(),
}
if args.Assignee != "" {
filter.Assignee = &args.Assignee
}
// Get all in_progress issues (potential workers)
issues, err := s.storage.SearchIssues(ctx, "", filter)
if err != nil {
return Response{
Success: false,
Error: fmt.Sprintf("failed to search issues: %v", err),
}
}
var workers []WorkerStatus
for _, issue := range issues {
// Skip issues without assignees
if issue.Assignee == "" {
continue
}
worker := WorkerStatus{
Assignee: issue.Assignee,
LastActivity: issue.UpdatedAt.Format(time.RFC3339),
Status: string(issue.Status),
}
// Check if this issue is a child of a molecule/epic (has parent-child dependency)
deps, err := s.storage.GetDependencyRecords(ctx, issue.ID)
if err == nil {
for _, dep := range deps {
if dep.Type == types.DepParentChild {
// This issue is a child - get the parent molecule
parentIssue, err := s.storage.GetIssue(ctx, dep.DependsOnID)
if err == nil && parentIssue != nil {
worker.MoleculeID = parentIssue.ID
worker.MoleculeTitle = parentIssue.Title
worker.StepID = issue.ID
worker.StepTitle = issue.Title
// Count total steps and determine current step number
// by getting all children of the molecule
children, err := s.storage.GetDependents(ctx, parentIssue.ID)
if err == nil {
// Filter to only parent-child dependencies
var steps []*types.Issue
for _, child := range children {
childDeps, err := s.storage.GetDependencyRecords(ctx, child.ID)
if err == nil {
for _, childDep := range childDeps {
if childDep.Type == types.DepParentChild && childDep.DependsOnID == parentIssue.ID {
steps = append(steps, child)
break
}
}
}
}
worker.TotalSteps = len(steps)
// Find current step number (1-indexed)
for i, step := range steps {
if step.ID == issue.ID {
worker.CurrentStep = i + 1
break
}
}
}
}
break // Found the parent, no need to check other deps
}
}
}
workers = append(workers, worker)
}
resp := GetWorkerStatusResponse{
Workers: workers,
}
data, _ := json.Marshal(resp)
return Response{
Success: true,
Data: data,
}
}

View File

@@ -1,314 +0,0 @@
package rpc
import (
"context"
"testing"
"time"
"github.com/steveyegge/beads/internal/types"
)
func TestGetWorkerStatus_NoWorkers(t *testing.T) {
_, client, cleanup := setupTestServer(t)
defer cleanup()
// With no in_progress issues assigned, should return empty list
result, err := client.GetWorkerStatus(&GetWorkerStatusArgs{})
if err != nil {
t.Fatalf("GetWorkerStatus failed: %v", err)
}
if len(result.Workers) != 0 {
t.Errorf("expected 0 workers, got %d", len(result.Workers))
}
}
func TestGetWorkerStatus_SingleWorker(t *testing.T) {
server, client, cleanup := setupTestServer(t)
defer cleanup()
ctx := context.Background()
// Create an in_progress issue with an assignee
issue := &types.Issue{
ID: "bd-test1",
Title: "Test task",
Status: types.StatusInProgress,
IssueType: types.TypeTask,
Priority: 2,
Assignee: "worker1",
CreatedAt: time.Now(),
UpdatedAt: time.Now(),
}
if err := server.storage.CreateIssue(ctx, issue, "test"); err != nil {
t.Fatalf("failed to create issue: %v", err)
}
// Query worker status
result, err := client.GetWorkerStatus(&GetWorkerStatusArgs{})
if err != nil {
t.Fatalf("GetWorkerStatus failed: %v", err)
}
if len(result.Workers) != 1 {
t.Fatalf("expected 1 worker, got %d", len(result.Workers))
}
worker := result.Workers[0]
if worker.Assignee != "worker1" {
t.Errorf("expected assignee 'worker1', got '%s'", worker.Assignee)
}
if worker.Status != "in_progress" {
t.Errorf("expected status 'in_progress', got '%s'", worker.Status)
}
if worker.LastActivity == "" {
t.Error("expected last activity to be set")
}
// Not part of a molecule, so these should be empty
if worker.MoleculeID != "" {
t.Errorf("expected empty molecule ID, got '%s'", worker.MoleculeID)
}
}
func TestGetWorkerStatus_WithMolecule(t *testing.T) {
server, client, cleanup := setupTestServer(t)
defer cleanup()
ctx := context.Background()
// Create a molecule (epic)
molecule := &types.Issue{
ID: "bd-mol1",
Title: "Test Molecule",
Status: types.StatusOpen,
IssueType: types.TypeEpic,
Priority: 2,
CreatedAt: time.Now(),
UpdatedAt: time.Now(),
}
if err := server.storage.CreateIssue(ctx, molecule, "test"); err != nil {
t.Fatalf("failed to create molecule: %v", err)
}
// Create step 1 (completed)
step1 := &types.Issue{
ID: "bd-step1",
Title: "Step 1: Setup",
Status: types.StatusClosed,
IssueType: types.TypeTask,
Priority: 2,
Assignee: "worker1",
CreatedAt: time.Now(),
UpdatedAt: time.Now(),
ClosedAt: func() *time.Time { t := time.Now(); return &t }(),
}
if err := server.storage.CreateIssue(ctx, step1, "test"); err != nil {
t.Fatalf("failed to create step1: %v", err)
}
// Create step 2 (current step - in progress)
step2 := &types.Issue{
ID: "bd-step2",
Title: "Step 2: Implementation",
Status: types.StatusInProgress,
IssueType: types.TypeTask,
Priority: 2,
Assignee: "worker1",
CreatedAt: time.Now(),
UpdatedAt: time.Now(),
}
if err := server.storage.CreateIssue(ctx, step2, "test"); err != nil {
t.Fatalf("failed to create step2: %v", err)
}
// Create step 3 (pending)
step3 := &types.Issue{
ID: "bd-step3",
Title: "Step 3: Testing",
Status: types.StatusOpen,
IssueType: types.TypeTask,
Priority: 2,
CreatedAt: time.Now(),
UpdatedAt: time.Now(),
}
if err := server.storage.CreateIssue(ctx, step3, "test"); err != nil {
t.Fatalf("failed to create step3: %v", err)
}
// Add parent-child dependencies (steps depend on molecule)
for _, stepID := range []string{"bd-step1", "bd-step2", "bd-step3"} {
dep := &types.Dependency{
IssueID: stepID,
DependsOnID: "bd-mol1",
Type: types.DepParentChild,
CreatedAt: time.Now(),
CreatedBy: "test",
}
if err := server.storage.AddDependency(ctx, dep, "test"); err != nil {
t.Fatalf("failed to add dependency for %s: %v", stepID, err)
}
}
// Query worker status
result, err := client.GetWorkerStatus(&GetWorkerStatusArgs{})
if err != nil {
t.Fatalf("GetWorkerStatus failed: %v", err)
}
if len(result.Workers) != 1 {
t.Fatalf("expected 1 worker (only in_progress issues), got %d", len(result.Workers))
}
worker := result.Workers[0]
if worker.Assignee != "worker1" {
t.Errorf("expected assignee 'worker1', got '%s'", worker.Assignee)
}
if worker.MoleculeID != "bd-mol1" {
t.Errorf("expected molecule ID 'bd-mol1', got '%s'", worker.MoleculeID)
}
if worker.MoleculeTitle != "Test Molecule" {
t.Errorf("expected molecule title 'Test Molecule', got '%s'", worker.MoleculeTitle)
}
if worker.StepID != "bd-step2" {
t.Errorf("expected step ID 'bd-step2', got '%s'", worker.StepID)
}
if worker.StepTitle != "Step 2: Implementation" {
t.Errorf("expected step title 'Step 2: Implementation', got '%s'", worker.StepTitle)
}
if worker.TotalSteps != 3 {
t.Errorf("expected 3 total steps, got %d", worker.TotalSteps)
}
// Note: CurrentStep ordering depends on how GetDependents orders results
// Just verify it's set
if worker.CurrentStep < 1 || worker.CurrentStep > 3 {
t.Errorf("expected current step between 1 and 3, got %d", worker.CurrentStep)
}
}
func TestGetWorkerStatus_FilterByAssignee(t *testing.T) {
server, client, cleanup := setupTestServer(t)
defer cleanup()
ctx := context.Background()
// Create issues for two different workers
issue1 := &types.Issue{
ID: "bd-test1",
Title: "Task for worker1",
Status: types.StatusInProgress,
IssueType: types.TypeTask,
Priority: 2,
Assignee: "worker1",
CreatedAt: time.Now(),
UpdatedAt: time.Now(),
}
issue2 := &types.Issue{
ID: "bd-test2",
Title: "Task for worker2",
Status: types.StatusInProgress,
IssueType: types.TypeTask,
Priority: 2,
Assignee: "worker2",
CreatedAt: time.Now(),
UpdatedAt: time.Now(),
}
if err := server.storage.CreateIssue(ctx, issue1, "test"); err != nil {
t.Fatalf("failed to create issue1: %v", err)
}
if err := server.storage.CreateIssue(ctx, issue2, "test"); err != nil {
t.Fatalf("failed to create issue2: %v", err)
}
// Query all workers
allResult, err := client.GetWorkerStatus(&GetWorkerStatusArgs{})
if err != nil {
t.Fatalf("GetWorkerStatus (all) failed: %v", err)
}
if len(allResult.Workers) != 2 {
t.Errorf("expected 2 workers, got %d", len(allResult.Workers))
}
// Query specific worker
filteredResult, err := client.GetWorkerStatus(&GetWorkerStatusArgs{Assignee: "worker1"})
if err != nil {
t.Fatalf("GetWorkerStatus (filtered) failed: %v", err)
}
if len(filteredResult.Workers) != 1 {
t.Fatalf("expected 1 worker, got %d", len(filteredResult.Workers))
}
if filteredResult.Workers[0].Assignee != "worker1" {
t.Errorf("expected assignee 'worker1', got '%s'", filteredResult.Workers[0].Assignee)
}
}
func TestGetWorkerStatus_OnlyInProgressIssues(t *testing.T) {
server, client, cleanup := setupTestServer(t)
defer cleanup()
ctx := context.Background()
// Create issues with different statuses
openIssue := &types.Issue{
ID: "bd-open",
Title: "Open task",
Status: types.StatusOpen,
IssueType: types.TypeTask,
Priority: 2,
Assignee: "worker1",
CreatedAt: time.Now(),
UpdatedAt: time.Now(),
}
inProgressIssue := &types.Issue{
ID: "bd-inprog",
Title: "In progress task",
Status: types.StatusInProgress,
IssueType: types.TypeTask,
Priority: 2,
Assignee: "worker2",
CreatedAt: time.Now(),
UpdatedAt: time.Now(),
}
closedIssue := &types.Issue{
ID: "bd-closed",
Title: "Closed task",
Status: types.StatusClosed,
IssueType: types.TypeTask,
Priority: 2,
Assignee: "worker3",
CreatedAt: time.Now(),
UpdatedAt: time.Now(),
ClosedAt: func() *time.Time { t := time.Now(); return &t }(),
}
for _, issue := range []*types.Issue{openIssue, inProgressIssue, closedIssue} {
if err := server.storage.CreateIssue(ctx, issue, "test"); err != nil {
t.Fatalf("failed to create issue %s: %v", issue.ID, err)
}
}
// Query worker status - should only return in_progress issues
result, err := client.GetWorkerStatus(&GetWorkerStatusArgs{})
if err != nil {
t.Fatalf("GetWorkerStatus failed: %v", err)
}
if len(result.Workers) != 1 {
t.Fatalf("expected 1 worker (only in_progress), got %d", len(result.Workers))
}
if result.Workers[0].Assignee != "worker2" {
t.Errorf("expected assignee 'worker2', got '%s'", result.Workers[0].Assignee)
}
}

View File

@@ -935,20 +935,6 @@ func (m *MemoryStorage) GetReadyWork(ctx context.Context, filter types.WorkFilte
continue
}
// Type filtering (gt-7xtn)
if filter.Type != "" {
if string(issue.IssueType) != filter.Type {
continue
}
} else {
// Exclude workflow types from ready work by default
// These are internal workflow items, not work for polecats to claim
switch issue.IssueType {
case types.TypeMergeRequest, types.TypeGate, types.TypeMolecule, types.TypeMessage:
continue
}
}
// Unassigned takes precedence over Assignee filter
if filter.Unassigned {
if issue.Assignee != "" {

View File

@@ -246,22 +246,3 @@ func (s *SQLiteStorage) rebuildBlockedCache(ctx context.Context, exec execer) er
func (s *SQLiteStorage) invalidateBlockedCache(ctx context.Context, exec execer) error {
return s.rebuildBlockedCache(ctx, exec)
}
// GetBlockedIssueIDs returns all issue IDs currently in the blocked cache
func (s *SQLiteStorage) GetBlockedIssueIDs(ctx context.Context) ([]string, error) {
rows, err := s.db.QueryContext(ctx, "SELECT issue_id FROM blocked_issues_cache")
if err != nil {
return nil, fmt.Errorf("failed to query blocked_issues_cache: %w", err)
}
defer rows.Close()
var ids []string
for rows.Next() {
var id string
if err := rows.Scan(&id); err != nil {
return nil, fmt.Errorf("failed to scan blocked issue ID: %w", err)
}
ids = append(ids, id)
}
return ids, rows.Err()
}

View File

@@ -330,9 +330,6 @@ func (s *SQLiteStorage) upsertIssueInTx(ctx context.Context, tx *sql.Tx, issue *
}
if existingHash != issue.ContentHash {
// Pinned field fix (bd-phtv): Use COALESCE(NULLIF(?, 0), pinned) to preserve
// existing pinned=1 when incoming pinned=0 (which means field was absent in
// JSONL due to omitempty). This prevents auto-import from resetting pinned issues.
_, err = tx.ExecContext(ctx, `
UPDATE issues SET
content_hash = ?, title = ?, description = ?, design = ?,
@@ -340,7 +337,7 @@ func (s *SQLiteStorage) upsertIssueInTx(ctx context.Context, tx *sql.Tx, issue *
issue_type = ?, assignee = ?, estimated_minutes = ?,
updated_at = ?, closed_at = ?, external_ref = ?, source_repo = ?,
deleted_at = ?, deleted_by = ?, delete_reason = ?, original_type = ?,
sender = ?, ephemeral = ?, pinned = COALESCE(NULLIF(?, 0), pinned), is_template = ?,
sender = ?, ephemeral = ?, pinned = ?, is_template = ?,
await_type = ?, await_id = ?, timeout_ns = ?, waiters = ?
WHERE id = ?
`,

File diff suppressed because it is too large Load Diff

View File

@@ -0,0 +1,464 @@
package sqlite
import (
"context"
"database/sql"
"fmt"
"strings"
"time"
"github.com/steveyegge/beads/internal/types"
)
// CreateTombstone converts an existing issue to a tombstone record.
// This is a soft-delete that preserves the issue in the database with status="tombstone".
// The issue will still appear in exports but be excluded from normal queries.
// Dependencies must be removed separately before calling this method.
func (s *SQLiteStorage) CreateTombstone(ctx context.Context, id string, actor string, reason string) error {
// Get the issue to preserve its original type
issue, err := s.GetIssue(ctx, id)
if err != nil {
return fmt.Errorf("failed to get issue: %w", err)
}
if issue == nil {
return fmt.Errorf("issue not found: %s", id)
}
tx, err := s.db.BeginTx(ctx, nil)
if err != nil {
return fmt.Errorf("failed to begin transaction: %w", err)
}
defer func() { _ = tx.Rollback() }()
now := time.Now()
originalType := string(issue.IssueType)
// Convert issue to tombstone
// Note: closed_at must be set to NULL because of CHECK constraint:
// (status = 'closed') = (closed_at IS NOT NULL)
_, err = tx.ExecContext(ctx, `
UPDATE issues
SET status = ?,
closed_at = NULL,
deleted_at = ?,
deleted_by = ?,
delete_reason = ?,
original_type = ?,
updated_at = ?
WHERE id = ?
`, types.StatusTombstone, now, actor, reason, originalType, now, id)
if err != nil {
return fmt.Errorf("failed to create tombstone: %w", err)
}
// Record tombstone creation event
_, err = tx.ExecContext(ctx, `
INSERT INTO events (issue_id, event_type, actor, comment)
VALUES (?, ?, ?, ?)
`, id, "deleted", actor, reason)
if err != nil {
return fmt.Errorf("failed to record tombstone event: %w", err)
}
// Mark issue as dirty for incremental export
_, err = tx.ExecContext(ctx, `
INSERT INTO dirty_issues (issue_id, marked_at)
VALUES (?, ?)
ON CONFLICT (issue_id) DO UPDATE SET marked_at = excluded.marked_at
`, id, now)
if err != nil {
return fmt.Errorf("failed to mark issue dirty: %w", err)
}
// Invalidate blocked issues cache since status changed (bd-5qim)
// Tombstone issues don't block others, so this affects blocking calculations
if err := s.invalidateBlockedCache(ctx, tx); err != nil {
return fmt.Errorf("failed to invalidate blocked cache: %w", err)
}
if err := tx.Commit(); err != nil {
return wrapDBError("commit tombstone transaction", err)
}
return nil
}
// DeleteIssue permanently removes an issue from the database
func (s *SQLiteStorage) DeleteIssue(ctx context.Context, id string) error {
tx, err := s.db.BeginTx(ctx, nil)
if err != nil {
return fmt.Errorf("failed to begin transaction: %w", err)
}
defer func() { _ = tx.Rollback() }()
// Delete dependencies (both directions)
_, err = tx.ExecContext(ctx, `DELETE FROM dependencies WHERE issue_id = ? OR depends_on_id = ?`, id, id)
if err != nil {
return fmt.Errorf("failed to delete dependencies: %w", err)
}
// Delete events
_, err = tx.ExecContext(ctx, `DELETE FROM events WHERE issue_id = ?`, id)
if err != nil {
return fmt.Errorf("failed to delete events: %w", err)
}
// Delete comments (no FK cascade on this table) (bd-687g)
_, err = tx.ExecContext(ctx, `DELETE FROM comments WHERE issue_id = ?`, id)
if err != nil {
return fmt.Errorf("failed to delete comments: %w", err)
}
// Delete from dirty_issues
_, err = tx.ExecContext(ctx, `DELETE FROM dirty_issues WHERE issue_id = ?`, id)
if err != nil {
return fmt.Errorf("failed to delete dirty marker: %w", err)
}
// Delete the issue itself
result, err := tx.ExecContext(ctx, `DELETE FROM issues WHERE id = ?`, id)
if err != nil {
return fmt.Errorf("failed to delete issue: %w", err)
}
rowsAffected, err := result.RowsAffected()
if err != nil {
return fmt.Errorf("failed to check rows affected: %w", err)
}
if rowsAffected == 0 {
return fmt.Errorf("issue not found: %s", id)
}
if err := tx.Commit(); err != nil {
return wrapDBError("commit delete transaction", err)
}
// REMOVED (bd-c7af): Counter sync after deletion - no longer needed with hash IDs
return nil
}
// DeleteIssuesResult contains statistics about a batch deletion operation
type DeleteIssuesResult struct {
DeletedCount int
DependenciesCount int
LabelsCount int
EventsCount int
OrphanedIssues []string
}
// DeleteIssues deletes multiple issues in a single transaction
// If cascade is true, recursively deletes dependents
// If cascade is false but force is true, deletes issues and orphans their dependents
// If cascade and force are both false, returns an error if any issue has dependents
// If dryRun is true, only computes statistics without deleting
func (s *SQLiteStorage) DeleteIssues(ctx context.Context, ids []string, cascade bool, force bool, dryRun bool) (*DeleteIssuesResult, error) {
if len(ids) == 0 {
return &DeleteIssuesResult{}, nil
}
tx, err := s.db.BeginTx(ctx, nil)
if err != nil {
return nil, fmt.Errorf("failed to begin transaction: %w", err)
}
defer func() { _ = tx.Rollback() }()
idSet := buildIDSet(ids)
result := &DeleteIssuesResult{}
expandedIDs, err := s.resolveDeleteSet(ctx, tx, ids, idSet, cascade, force, result)
if err != nil {
return nil, wrapDBError("resolve delete set", err)
}
inClause, args := buildSQLInClause(expandedIDs)
if err := s.populateDeleteStats(ctx, tx, inClause, args, result); err != nil {
return nil, err
}
if dryRun {
return result, nil
}
if err := s.executeDelete(ctx, tx, inClause, args, result); err != nil {
return nil, err
}
if err := tx.Commit(); err != nil {
return nil, fmt.Errorf("failed to commit transaction: %w", err)
}
// REMOVED (bd-c7af): Counter sync after deletion - no longer needed with hash IDs
return result, nil
}
func buildIDSet(ids []string) map[string]bool {
idSet := make(map[string]bool, len(ids))
for _, id := range ids {
idSet[id] = true
}
return idSet
}
func (s *SQLiteStorage) resolveDeleteSet(ctx context.Context, tx *sql.Tx, ids []string, idSet map[string]bool, cascade bool, force bool, result *DeleteIssuesResult) ([]string, error) {
if cascade {
return s.expandWithDependents(ctx, tx, ids, idSet)
}
if !force {
return ids, s.validateNoDependents(ctx, tx, ids, idSet, result)
}
return ids, s.trackOrphanedIssues(ctx, tx, ids, idSet, result)
}
func (s *SQLiteStorage) expandWithDependents(ctx context.Context, tx *sql.Tx, ids []string, _ map[string]bool) ([]string, error) {
allToDelete, err := s.findAllDependentsRecursive(ctx, tx, ids)
if err != nil {
return nil, fmt.Errorf("failed to find dependents: %w", err)
}
expandedIDs := make([]string, 0, len(allToDelete))
for id := range allToDelete {
expandedIDs = append(expandedIDs, id)
}
return expandedIDs, nil
}
func (s *SQLiteStorage) validateNoDependents(ctx context.Context, tx *sql.Tx, ids []string, idSet map[string]bool, result *DeleteIssuesResult) error {
for _, id := range ids {
if err := s.checkSingleIssueValidation(ctx, tx, id, idSet, result); err != nil {
return wrapDBError("check dependents", err)
}
}
return nil
}
func (s *SQLiteStorage) checkSingleIssueValidation(ctx context.Context, tx *sql.Tx, id string, idSet map[string]bool, result *DeleteIssuesResult) error {
var depCount int
err := tx.QueryRowContext(ctx,
`SELECT COUNT(*) FROM dependencies WHERE depends_on_id = ?`, id).Scan(&depCount)
if err != nil {
return fmt.Errorf("failed to check dependents for %s: %w", id, err)
}
if depCount == 0 {
return nil
}
rows, err := tx.QueryContext(ctx,
`SELECT issue_id FROM dependencies WHERE depends_on_id = ?`, id)
if err != nil {
return fmt.Errorf("failed to get dependents for %s: %w", id, err)
}
defer func() { _ = rows.Close() }()
hasExternal := false
for rows.Next() {
var depID string
if err := rows.Scan(&depID); err != nil {
return fmt.Errorf("failed to scan dependent: %w", err)
}
if !idSet[depID] {
hasExternal = true
result.OrphanedIssues = append(result.OrphanedIssues, depID)
}
}
if err := rows.Err(); err != nil {
return fmt.Errorf("failed to iterate dependents for %s: %w", id, err)
}
if hasExternal {
return fmt.Errorf("issue %s has dependents not in deletion set; use --cascade to delete them or --force to orphan them", id)
}
return nil
}
func (s *SQLiteStorage) trackOrphanedIssues(ctx context.Context, tx *sql.Tx, ids []string, idSet map[string]bool, result *DeleteIssuesResult) error {
orphanSet := make(map[string]bool)
for _, id := range ids {
if err := s.collectOrphansForID(ctx, tx, id, idSet, orphanSet); err != nil {
return wrapDBError("collect orphans", err)
}
}
for orphanID := range orphanSet {
result.OrphanedIssues = append(result.OrphanedIssues, orphanID)
}
return nil
}
func (s *SQLiteStorage) collectOrphansForID(ctx context.Context, tx *sql.Tx, id string, idSet map[string]bool, orphanSet map[string]bool) error {
rows, err := tx.QueryContext(ctx,
`SELECT issue_id FROM dependencies WHERE depends_on_id = ?`, id)
if err != nil {
return fmt.Errorf("failed to get dependents for %s: %w", id, err)
}
defer func() { _ = rows.Close() }()
for rows.Next() {
var depID string
if err := rows.Scan(&depID); err != nil {
return fmt.Errorf("failed to scan dependent: %w", err)
}
if !idSet[depID] {
orphanSet[depID] = true
}
}
return rows.Err()
}
func buildSQLInClause(ids []string) (string, []interface{}) {
placeholders := make([]string, len(ids))
args := make([]interface{}, len(ids))
for i, id := range ids {
placeholders[i] = "?"
args[i] = id
}
return strings.Join(placeholders, ","), args
}
func (s *SQLiteStorage) populateDeleteStats(ctx context.Context, tx *sql.Tx, inClause string, args []interface{}, result *DeleteIssuesResult) error {
counts := []struct {
query string
dest *int
}{
{fmt.Sprintf(`SELECT COUNT(*) FROM dependencies WHERE issue_id IN (%s) OR depends_on_id IN (%s)`, inClause, inClause), &result.DependenciesCount},
{fmt.Sprintf(`SELECT COUNT(*) FROM labels WHERE issue_id IN (%s)`, inClause), &result.LabelsCount},
{fmt.Sprintf(`SELECT COUNT(*) FROM events WHERE issue_id IN (%s)`, inClause), &result.EventsCount},
}
for _, c := range counts {
queryArgs := args
if c.dest == &result.DependenciesCount {
queryArgs = append(args, args...)
}
if err := tx.QueryRowContext(ctx, c.query, queryArgs...).Scan(c.dest); err != nil {
return fmt.Errorf("failed to count: %w", err)
}
}
result.DeletedCount = len(args)
return nil
}
func (s *SQLiteStorage) executeDelete(ctx context.Context, tx *sql.Tx, inClause string, args []interface{}, result *DeleteIssuesResult) error {
// Note: This method now creates tombstones instead of hard-deleting (bd-3b4)
// Only dependencies are deleted - issues are converted to tombstones
// 1. Delete dependencies - tombstones don't block other issues
_, err := tx.ExecContext(ctx,
fmt.Sprintf(`DELETE FROM dependencies WHERE issue_id IN (%s) OR depends_on_id IN (%s)`, inClause, inClause),
append(args, args...)...)
if err != nil {
return fmt.Errorf("failed to delete dependencies: %w", err)
}
// 2. Get issue types before converting to tombstones (need for original_type)
issueTypes := make(map[string]string)
rows, err := tx.QueryContext(ctx,
fmt.Sprintf(`SELECT id, issue_type FROM issues WHERE id IN (%s)`, inClause),
args...)
if err != nil {
return fmt.Errorf("failed to get issue types: %w", err)
}
for rows.Next() {
var id, issueType string
if err := rows.Scan(&id, &issueType); err != nil {
_ = rows.Close() // #nosec G104 - error handling not critical in error path
return fmt.Errorf("failed to scan issue type: %w", err)
}
issueTypes[id] = issueType
}
_ = rows.Close()
// 3. Convert issues to tombstones (only for issues that exist)
// Note: closed_at must be set to NULL because of CHECK constraint:
// (status = 'closed') = (closed_at IS NOT NULL)
now := time.Now()
deletedCount := 0
for id, originalType := range issueTypes {
execResult, err := tx.ExecContext(ctx, `
UPDATE issues
SET status = ?,
closed_at = NULL,
deleted_at = ?,
deleted_by = ?,
delete_reason = ?,
original_type = ?,
updated_at = ?
WHERE id = ?
`, types.StatusTombstone, now, "batch delete", "batch delete", originalType, now, id)
if err != nil {
return fmt.Errorf("failed to create tombstone for %s: %w", id, err)
}
rowsAffected, _ := execResult.RowsAffected()
if rowsAffected == 0 {
continue // Issue doesn't exist, skip
}
deletedCount++
// Record tombstone creation event
_, err = tx.ExecContext(ctx, `
INSERT INTO events (issue_id, event_type, actor, comment)
VALUES (?, ?, ?, ?)
`, id, "deleted", "batch delete", "batch delete")
if err != nil {
return fmt.Errorf("failed to record tombstone event for %s: %w", id, err)
}
// Mark issue as dirty for incremental export
_, err = tx.ExecContext(ctx, `
INSERT INTO dirty_issues (issue_id, marked_at)
VALUES (?, ?)
ON CONFLICT (issue_id) DO UPDATE SET marked_at = excluded.marked_at
`, id, now)
if err != nil {
return fmt.Errorf("failed to mark issue dirty for %s: %w", id, err)
}
}
// 4. Invalidate blocked issues cache since statuses changed (bd-5qim)
if err := s.invalidateBlockedCache(ctx, tx); err != nil {
return fmt.Errorf("failed to invalidate blocked cache: %w", err)
}
result.DeletedCount = deletedCount
return nil
}
// findAllDependentsRecursive finds all issues that depend on the given issues, recursively
func (s *SQLiteStorage) findAllDependentsRecursive(ctx context.Context, tx *sql.Tx, ids []string) (map[string]bool, error) {
result := make(map[string]bool)
for _, id := range ids {
result[id] = true
}
toProcess := make([]string, len(ids))
copy(toProcess, ids)
for len(toProcess) > 0 {
current := toProcess[0]
toProcess = toProcess[1:]
rows, err := tx.QueryContext(ctx,
`SELECT issue_id FROM dependencies WHERE depends_on_id = ?`, current)
if err != nil {
return nil, err
}
defer rows.Close()
for rows.Next() {
var depID string
if err := rows.Scan(&depID); err != nil {
return nil, err
}
if !result[depID] {
result[depID] = true
toProcess = append(toProcess, depID)
}
}
if err := rows.Err(); err != nil {
return nil, err
}
}
return result, nil
}

View File

@@ -0,0 +1,50 @@
package sqlite
import (
"database/sql"
"encoding/json"
"time"
)
// parseNullableTimeString parses a nullable time string from database TEXT columns.
// The ncruces/go-sqlite3 driver only auto-converts TEXT→time.Time for columns declared
// as DATETIME/DATE/TIME/TIMESTAMP. For TEXT columns (like deleted_at), we must parse manually.
// Supports RFC3339, RFC3339Nano, and SQLite's native format.
func parseNullableTimeString(ns sql.NullString) *time.Time {
if !ns.Valid || ns.String == "" {
return nil
}
// Try RFC3339Nano first (more precise), then RFC3339, then SQLite format
for _, layout := range []string{time.RFC3339Nano, time.RFC3339, "2006-01-02 15:04:05"} {
if t, err := time.Parse(layout, ns.String); err == nil {
return &t
}
}
return nil // Unparseable - shouldn't happen with valid data
}
// parseJSONStringArray parses a JSON string array from database TEXT column.
// Returns empty slice if the string is empty or invalid JSON.
func parseJSONStringArray(s string) []string {
if s == "" {
return nil
}
var result []string
if err := json.Unmarshal([]byte(s), &result); err != nil {
return nil // Invalid JSON - shouldn't happen with valid data
}
return result
}
// formatJSONStringArray formats a string slice as JSON for database storage.
// Returns empty string if the slice is nil or empty.
func formatJSONStringArray(arr []string) string {
if len(arr) == 0 {
return ""
}
data, err := json.Marshal(arr)
if err != nil {
return ""
}
return string(data)
}

View File

@@ -0,0 +1,149 @@
package sqlite
import (
"context"
"fmt"
"time"
"github.com/steveyegge/beads/internal/types"
)
// UpdateIssueID updates an issue ID and all its text fields in a single transaction
func (s *SQLiteStorage) UpdateIssueID(ctx context.Context, oldID, newID string, issue *types.Issue, actor string) error {
// Get exclusive connection to ensure PRAGMA applies
conn, err := s.db.Conn(ctx)
if err != nil {
return fmt.Errorf("failed to get connection: %w", err)
}
defer func() { _ = conn.Close() }()
// Disable foreign keys on this specific connection
_, err = conn.ExecContext(ctx, `PRAGMA foreign_keys = OFF`)
if err != nil {
return fmt.Errorf("failed to disable foreign keys: %w", err)
}
tx, err := conn.BeginTx(ctx, nil)
if err != nil {
return fmt.Errorf("failed to begin transaction: %w", err)
}
defer func() { _ = tx.Rollback() }()
result, err := tx.ExecContext(ctx, `
UPDATE issues
SET id = ?, title = ?, description = ?, design = ?, acceptance_criteria = ?, notes = ?, updated_at = ?
WHERE id = ?
`, newID, issue.Title, issue.Description, issue.Design, issue.AcceptanceCriteria, issue.Notes, time.Now(), oldID)
if err != nil {
return fmt.Errorf("failed to update issue ID: %w", err)
}
rows, err := result.RowsAffected()
if err != nil {
return fmt.Errorf("failed to get rows affected: %w", err)
}
if rows == 0 {
return fmt.Errorf("issue not found: %s", oldID)
}
_, err = tx.ExecContext(ctx, `UPDATE dependencies SET issue_id = ? WHERE issue_id = ?`, newID, oldID)
if err != nil {
return fmt.Errorf("failed to update issue_id in dependencies: %w", err)
}
_, err = tx.ExecContext(ctx, `UPDATE dependencies SET depends_on_id = ? WHERE depends_on_id = ?`, newID, oldID)
if err != nil {
return fmt.Errorf("failed to update depends_on_id in dependencies: %w", err)
}
_, err = tx.ExecContext(ctx, `UPDATE events SET issue_id = ? WHERE issue_id = ?`, newID, oldID)
if err != nil {
return fmt.Errorf("failed to update events: %w", err)
}
_, err = tx.ExecContext(ctx, `UPDATE labels SET issue_id = ? WHERE issue_id = ?`, newID, oldID)
if err != nil {
return fmt.Errorf("failed to update labels: %w", err)
}
_, err = tx.ExecContext(ctx, `UPDATE comments SET issue_id = ? WHERE issue_id = ?`, newID, oldID)
if err != nil {
return fmt.Errorf("failed to update comments: %w", err)
}
_, err = tx.ExecContext(ctx, `
UPDATE dirty_issues SET issue_id = ? WHERE issue_id = ?
`, newID, oldID)
if err != nil {
return fmt.Errorf("failed to update dirty_issues: %w", err)
}
_, err = tx.ExecContext(ctx, `UPDATE issue_snapshots SET issue_id = ? WHERE issue_id = ?`, newID, oldID)
if err != nil {
return fmt.Errorf("failed to update issue_snapshots: %w", err)
}
_, err = tx.ExecContext(ctx, `UPDATE compaction_snapshots SET issue_id = ? WHERE issue_id = ?`, newID, oldID)
if err != nil {
return fmt.Errorf("failed to update compaction_snapshots: %w", err)
}
_, err = tx.ExecContext(ctx, `
INSERT INTO dirty_issues (issue_id, marked_at)
VALUES (?, ?)
ON CONFLICT (issue_id) DO UPDATE SET marked_at = excluded.marked_at
`, newID, time.Now())
if err != nil {
return fmt.Errorf("failed to mark issue dirty: %w", err)
}
_, err = tx.ExecContext(ctx, `
INSERT INTO events (issue_id, event_type, actor, old_value, new_value)
VALUES (?, 'renamed', ?, ?, ?)
`, newID, actor, oldID, newID)
if err != nil {
return fmt.Errorf("failed to record rename event: %w", err)
}
return tx.Commit()
}
// RenameDependencyPrefix updates the prefix in all dependency records
// GH#630: This was previously a no-op, causing dependencies to break after rename-prefix
func (s *SQLiteStorage) RenameDependencyPrefix(ctx context.Context, oldPrefix, newPrefix string) error {
// Update issue_id column
_, err := s.db.ExecContext(ctx, `
UPDATE dependencies
SET issue_id = ? || substr(issue_id, length(?) + 1)
WHERE issue_id LIKE ? || '%'
`, newPrefix, oldPrefix, oldPrefix)
if err != nil {
return fmt.Errorf("failed to update issue_id in dependencies: %w", err)
}
// Update depends_on_id column
_, err = s.db.ExecContext(ctx, `
UPDATE dependencies
SET depends_on_id = ? || substr(depends_on_id, length(?) + 1)
WHERE depends_on_id LIKE ? || '%'
`, newPrefix, oldPrefix, oldPrefix)
if err != nil {
return fmt.Errorf("failed to update depends_on_id in dependencies: %w", err)
}
return nil
}
// RenameCounterPrefix is a no-op with hash-based IDs (bd-8e05)
// Kept for backward compatibility with rename-prefix command
func (s *SQLiteStorage) RenameCounterPrefix(ctx context.Context, oldPrefix, newPrefix string) error {
// Hash-based IDs don't use counters, so nothing to update
return nil
}
// ResetCounter is a no-op with hash-based IDs (bd-8e05)
// Kept for backward compatibility
func (s *SQLiteStorage) ResetCounter(ctx context.Context, prefix string) error {
// Hash-based IDs don't use counters, so nothing to reset
return nil
}

View File

@@ -0,0 +1,429 @@
package sqlite
import (
"context"
"database/sql"
"fmt"
"strings"
"time"
"github.com/steveyegge/beads/internal/types"
)
// GetCloseReason retrieves the close reason from the most recent closed event for an issue
func (s *SQLiteStorage) GetCloseReason(ctx context.Context, issueID string) (string, error) {
var comment sql.NullString
err := s.db.QueryRowContext(ctx, `
SELECT comment FROM events
WHERE issue_id = ? AND event_type = ?
ORDER BY created_at DESC
LIMIT 1
`, issueID, types.EventClosed).Scan(&comment)
if err == sql.ErrNoRows {
return "", nil
}
if err != nil {
return "", fmt.Errorf("failed to get close reason: %w", err)
}
if comment.Valid {
return comment.String, nil
}
return "", nil
}
// GetCloseReasonsForIssues retrieves close reasons for multiple issues in a single query
func (s *SQLiteStorage) GetCloseReasonsForIssues(ctx context.Context, issueIDs []string) (map[string]string, error) {
result := make(map[string]string)
if len(issueIDs) == 0 {
return result, nil
}
// Build placeholders for IN clause
placeholders := make([]string, len(issueIDs))
args := make([]interface{}, len(issueIDs)+1)
args[0] = types.EventClosed
for i, id := range issueIDs {
placeholders[i] = "?"
args[i+1] = id
}
// Use a subquery to get the most recent closed event for each issue
// #nosec G201 - safe SQL with controlled formatting
query := fmt.Sprintf(`
SELECT e.issue_id, e.comment
FROM events e
INNER JOIN (
SELECT issue_id, MAX(created_at) as max_created_at
FROM events
WHERE event_type = ? AND issue_id IN (%s)
GROUP BY issue_id
) latest ON e.issue_id = latest.issue_id AND e.created_at = latest.max_created_at
WHERE e.event_type = ?
`, strings.Join(placeholders, ", "))
// Append event_type again for the outer WHERE clause
args = append(args, types.EventClosed)
rows, err := s.db.QueryContext(ctx, query, args...)
if err != nil {
return nil, fmt.Errorf("failed to get close reasons: %w", err)
}
defer func() { _ = rows.Close() }()
for rows.Next() {
var issueID string
var comment sql.NullString
if err := rows.Scan(&issueID, &comment); err != nil {
return nil, fmt.Errorf("failed to scan close reason: %w", err)
}
if comment.Valid && comment.String != "" {
result[issueID] = comment.String
}
}
return result, nil
}
// GetIssueByExternalRef retrieves an issue by external reference
func (s *SQLiteStorage) GetIssueByExternalRef(ctx context.Context, externalRef string) (*types.Issue, error) {
var issue types.Issue
var closedAt sql.NullTime
var estimatedMinutes sql.NullInt64
var assignee sql.NullString
var externalRefCol sql.NullString
var compactedAt sql.NullTime
var originalSize sql.NullInt64
var contentHash sql.NullString
var compactedAtCommit sql.NullString
var sourceRepo sql.NullString
var closeReason sql.NullString
var deletedAt sql.NullString // TEXT column, not DATETIME - must parse manually
var deletedBy sql.NullString
var deleteReason sql.NullString
var originalType sql.NullString
// Messaging fields (bd-kwro)
var sender sql.NullString
var wisp sql.NullInt64
// Pinned field (bd-7h5)
var pinned sql.NullInt64
// Template field (beads-1ra)
var isTemplate sql.NullInt64
// Gate fields (bd-udsi)
var awaitType sql.NullString
var awaitID sql.NullString
var timeoutNs sql.NullInt64
var waiters sql.NullString
err := s.db.QueryRowContext(ctx, `
SELECT id, content_hash, title, description, design, acceptance_criteria, notes,
status, priority, issue_type, assignee, estimated_minutes,
created_at, updated_at, closed_at, external_ref,
compaction_level, compacted_at, compacted_at_commit, original_size, source_repo, close_reason,
deleted_at, deleted_by, delete_reason, original_type,
sender, ephemeral, pinned, is_template,
await_type, await_id, timeout_ns, waiters
FROM issues
WHERE external_ref = ?
`, externalRef).Scan(
&issue.ID, &contentHash, &issue.Title, &issue.Description, &issue.Design,
&issue.AcceptanceCriteria, &issue.Notes, &issue.Status,
&issue.Priority, &issue.IssueType, &assignee, &estimatedMinutes,
&issue.CreatedAt, &issue.UpdatedAt, &closedAt, &externalRefCol,
&issue.CompactionLevel, &compactedAt, &compactedAtCommit, &originalSize, &sourceRepo, &closeReason,
&deletedAt, &deletedBy, &deleteReason, &originalType,
&sender, &wisp, &pinned, &isTemplate,
&awaitType, &awaitID, &timeoutNs, &waiters,
)
if err == sql.ErrNoRows {
return nil, nil
}
if err != nil {
return nil, fmt.Errorf("failed to get issue by external_ref: %w", err)
}
if contentHash.Valid {
issue.ContentHash = contentHash.String
}
if closedAt.Valid {
issue.ClosedAt = &closedAt.Time
}
if estimatedMinutes.Valid {
mins := int(estimatedMinutes.Int64)
issue.EstimatedMinutes = &mins
}
if assignee.Valid {
issue.Assignee = assignee.String
}
if externalRefCol.Valid {
issue.ExternalRef = &externalRefCol.String
}
if compactedAt.Valid {
issue.CompactedAt = &compactedAt.Time
}
if compactedAtCommit.Valid {
issue.CompactedAtCommit = &compactedAtCommit.String
}
if originalSize.Valid {
issue.OriginalSize = int(originalSize.Int64)
}
if sourceRepo.Valid {
issue.SourceRepo = sourceRepo.String
}
if closeReason.Valid {
issue.CloseReason = closeReason.String
}
issue.DeletedAt = parseNullableTimeString(deletedAt)
if deletedBy.Valid {
issue.DeletedBy = deletedBy.String
}
if deleteReason.Valid {
issue.DeleteReason = deleteReason.String
}
if originalType.Valid {
issue.OriginalType = originalType.String
}
// Messaging fields (bd-kwro)
if sender.Valid {
issue.Sender = sender.String
}
if wisp.Valid && wisp.Int64 != 0 {
issue.Wisp = true
}
// Pinned field (bd-7h5)
if pinned.Valid && pinned.Int64 != 0 {
issue.Pinned = true
}
// Template field (beads-1ra)
if isTemplate.Valid && isTemplate.Int64 != 0 {
issue.IsTemplate = true
}
// Gate fields (bd-udsi)
if awaitType.Valid {
issue.AwaitType = awaitType.String
}
if awaitID.Valid {
issue.AwaitID = awaitID.String
}
if timeoutNs.Valid {
issue.Timeout = time.Duration(timeoutNs.Int64)
}
if waiters.Valid && waiters.String != "" {
issue.Waiters = parseJSONStringArray(waiters.String)
}
// Fetch labels for this issue
labels, err := s.GetLabels(ctx, issue.ID)
if err != nil {
return nil, fmt.Errorf("failed to get labels: %w", err)
}
issue.Labels = labels
return &issue, nil
}
// SearchIssues finds issues matching query and filters
func (s *SQLiteStorage) SearchIssues(ctx context.Context, query string, filter types.IssueFilter) ([]*types.Issue, error) {
// Check for external database file modifications (daemon mode)
s.checkFreshness()
// Hold read lock during database operations to prevent reconnect() from
// closing the connection mid-query (GH#607 race condition fix)
s.reconnectMu.RLock()
defer s.reconnectMu.RUnlock()
whereClauses := []string{}
args := []interface{}{}
if query != "" {
whereClauses = append(whereClauses, "(title LIKE ? OR description LIKE ? OR id LIKE ?)")
pattern := "%" + query + "%"
args = append(args, pattern, pattern, pattern)
}
if filter.TitleSearch != "" {
whereClauses = append(whereClauses, "title LIKE ?")
pattern := "%" + filter.TitleSearch + "%"
args = append(args, pattern)
}
// Pattern matching
if filter.TitleContains != "" {
whereClauses = append(whereClauses, "title LIKE ?")
args = append(args, "%"+filter.TitleContains+"%")
}
if filter.DescriptionContains != "" {
whereClauses = append(whereClauses, "description LIKE ?")
args = append(args, "%"+filter.DescriptionContains+"%")
}
if filter.NotesContains != "" {
whereClauses = append(whereClauses, "notes LIKE ?")
args = append(args, "%"+filter.NotesContains+"%")
}
if filter.Status != nil {
whereClauses = append(whereClauses, "status = ?")
args = append(args, *filter.Status)
} else if !filter.IncludeTombstones {
// Exclude tombstones by default unless explicitly filtering for them (bd-1bu)
whereClauses = append(whereClauses, "status != ?")
args = append(args, types.StatusTombstone)
}
if filter.Priority != nil {
whereClauses = append(whereClauses, "priority = ?")
args = append(args, *filter.Priority)
}
// Priority ranges
if filter.PriorityMin != nil {
whereClauses = append(whereClauses, "priority >= ?")
args = append(args, *filter.PriorityMin)
}
if filter.PriorityMax != nil {
whereClauses = append(whereClauses, "priority <= ?")
args = append(args, *filter.PriorityMax)
}
if filter.IssueType != nil {
whereClauses = append(whereClauses, "issue_type = ?")
args = append(args, *filter.IssueType)
}
if filter.Assignee != nil {
whereClauses = append(whereClauses, "assignee = ?")
args = append(args, *filter.Assignee)
}
// Date ranges
if filter.CreatedAfter != nil {
whereClauses = append(whereClauses, "created_at > ?")
args = append(args, filter.CreatedAfter.Format(time.RFC3339))
}
if filter.CreatedBefore != nil {
whereClauses = append(whereClauses, "created_at < ?")
args = append(args, filter.CreatedBefore.Format(time.RFC3339))
}
if filter.UpdatedAfter != nil {
whereClauses = append(whereClauses, "updated_at > ?")
args = append(args, filter.UpdatedAfter.Format(time.RFC3339))
}
if filter.UpdatedBefore != nil {
whereClauses = append(whereClauses, "updated_at < ?")
args = append(args, filter.UpdatedBefore.Format(time.RFC3339))
}
if filter.ClosedAfter != nil {
whereClauses = append(whereClauses, "closed_at > ?")
args = append(args, filter.ClosedAfter.Format(time.RFC3339))
}
if filter.ClosedBefore != nil {
whereClauses = append(whereClauses, "closed_at < ?")
args = append(args, filter.ClosedBefore.Format(time.RFC3339))
}
// Empty/null checks
if filter.EmptyDescription {
whereClauses = append(whereClauses, "(description IS NULL OR description = '')")
}
if filter.NoAssignee {
whereClauses = append(whereClauses, "(assignee IS NULL OR assignee = '')")
}
if filter.NoLabels {
whereClauses = append(whereClauses, "id NOT IN (SELECT DISTINCT issue_id FROM labels)")
}
// Label filtering: issue must have ALL specified labels
if len(filter.Labels) > 0 {
for _, label := range filter.Labels {
whereClauses = append(whereClauses, "id IN (SELECT issue_id FROM labels WHERE label = ?)")
args = append(args, label)
}
}
// Label filtering (OR): issue must have AT LEAST ONE of these labels
if len(filter.LabelsAny) > 0 {
placeholders := make([]string, len(filter.LabelsAny))
for i, label := range filter.LabelsAny {
placeholders[i] = "?"
args = append(args, label)
}
whereClauses = append(whereClauses, fmt.Sprintf("id IN (SELECT issue_id FROM labels WHERE label IN (%s))", strings.Join(placeholders, ", ")))
}
// ID filtering: match specific issue IDs
if len(filter.IDs) > 0 {
placeholders := make([]string, len(filter.IDs))
for i, id := range filter.IDs {
placeholders[i] = "?"
args = append(args, id)
}
whereClauses = append(whereClauses, fmt.Sprintf("id IN (%s)", strings.Join(placeholders, ", ")))
}
// Wisp filtering (bd-kwro.9)
if filter.Wisp != nil {
if *filter.Wisp {
whereClauses = append(whereClauses, "ephemeral = 1") // SQL column is still 'ephemeral'
} else {
whereClauses = append(whereClauses, "(ephemeral = 0 OR ephemeral IS NULL)")
}
}
// Pinned filtering (bd-7h5)
if filter.Pinned != nil {
if *filter.Pinned {
whereClauses = append(whereClauses, "pinned = 1")
} else {
whereClauses = append(whereClauses, "(pinned = 0 OR pinned IS NULL)")
}
}
// Template filtering (beads-1ra)
if filter.IsTemplate != nil {
if *filter.IsTemplate {
whereClauses = append(whereClauses, "is_template = 1")
} else {
whereClauses = append(whereClauses, "(is_template = 0 OR is_template IS NULL)")
}
}
// Parent filtering (bd-yqhh): filter children by parent issue
if filter.ParentID != nil {
whereClauses = append(whereClauses, "id IN (SELECT issue_id FROM dependencies WHERE type = 'parent-child' AND depends_on_id = ?)")
args = append(args, *filter.ParentID)
}
whereSQL := ""
if len(whereClauses) > 0 {
whereSQL = "WHERE " + strings.Join(whereClauses, " AND ")
}
limitSQL := ""
if filter.Limit > 0 {
limitSQL = " LIMIT ?"
args = append(args, filter.Limit)
}
// #nosec G201 - safe SQL with controlled formatting
querySQL := fmt.Sprintf(`
SELECT id, content_hash, title, description, design, acceptance_criteria, notes,
status, priority, issue_type, assignee, estimated_minutes,
created_at, updated_at, closed_at, external_ref, source_repo, close_reason,
deleted_at, deleted_by, delete_reason, original_type,
sender, ephemeral, pinned, is_template,
await_type, await_id, timeout_ns, waiters
FROM issues
%s
ORDER BY priority ASC, created_at DESC
%s
`, whereSQL, limitSQL)
rows, err := s.db.QueryContext(ctx, querySQL, args...)
if err != nil {
return nil, fmt.Errorf("failed to search issues: %w", err)
}
defer func() { _ = rows.Close() }()
return s.scanIssues(ctx, rows)
}

View File

@@ -33,14 +33,6 @@ func (s *SQLiteStorage) GetReadyWork(ctx context.Context, filter types.WorkFilte
if filter.Type != "" {
whereClauses = append(whereClauses, "i.issue_type = ?")
args = append(args, filter.Type)
} else {
// Exclude workflow types from ready work by default (gt-7xtn)
// These are internal workflow items, not work for polecats to claim:
// - merge-request: processed by Refinery
// - gate: async wait conditions
// - molecule: workflow containers
// - message: mail/communication items
whereClauses = append(whereClauses, "i.issue_type NOT IN ('merge-request', 'gate', 'molecule', 'message')")
}
if filter.Priority != nil {

View File

@@ -200,12 +200,12 @@ func TestUnset(t *testing.T) {
t.Run("removes config value", func(t *testing.T) {
store := newTestStore(t)
defer store.Close()
// Set a value first
if err := Set(ctx, store, "beads-metadata"); err != nil {
t.Fatalf("Set() error = %v", err)
}
// Verify it's set
value, err := store.GetConfig(ctx, ConfigKey)
if err != nil {
@@ -214,12 +214,12 @@ func TestUnset(t *testing.T) {
if value != "beads-metadata" {
t.Errorf("GetConfig() = %q, want %q", value, "beads-metadata")
}
// Unset it
if err := Unset(ctx, store); err != nil {
t.Fatalf("Unset() error = %v", err)
}
// Verify it's gone
value, err = store.GetConfig(ctx, ConfigKey)
if err != nil {
@@ -230,152 +230,3 @@ func TestUnset(t *testing.T) {
}
})
}
func TestGetFromYAML(t *testing.T) {
// Save and restore any existing env var
origEnv := os.Getenv(EnvVar)
defer os.Setenv(EnvVar, origEnv)
t.Run("returns empty when nothing configured", func(t *testing.T) {
os.Unsetenv(EnvVar)
branch := GetFromYAML()
// GetFromYAML checks env var first, then config.yaml
// Without env var set, it should return what's in config.yaml (or empty)
// We can't easily mock config.yaml here, so just verify no panic
_ = branch
})
t.Run("returns env var value when set", func(t *testing.T) {
os.Setenv(EnvVar, "env-sync-branch")
defer os.Unsetenv(EnvVar)
branch := GetFromYAML()
if branch != "env-sync-branch" {
t.Errorf("GetFromYAML() = %q, want %q", branch, "env-sync-branch")
}
})
}
func TestIsConfigured(t *testing.T) {
// Save and restore any existing env var
origEnv := os.Getenv(EnvVar)
defer os.Setenv(EnvVar, origEnv)
t.Run("returns true when env var is set", func(t *testing.T) {
os.Setenv(EnvVar, "test-branch")
defer os.Unsetenv(EnvVar)
if !IsConfigured() {
t.Error("IsConfigured() = false when env var is set, want true")
}
})
t.Run("behavior with no env var", func(t *testing.T) {
os.Unsetenv(EnvVar)
// Just verify no panic - actual value depends on config.yaml
_ = IsConfigured()
})
}
func TestIsConfiguredWithDB(t *testing.T) {
// Save and restore any existing env var
origEnv := os.Getenv(EnvVar)
defer os.Setenv(EnvVar, origEnv)
t.Run("returns true when env var is set", func(t *testing.T) {
os.Setenv(EnvVar, "test-branch")
defer os.Unsetenv(EnvVar)
if !IsConfiguredWithDB("") {
t.Error("IsConfiguredWithDB() = false when env var is set, want true")
}
})
t.Run("returns false for nonexistent database", func(t *testing.T) {
os.Unsetenv(EnvVar)
result := IsConfiguredWithDB("/nonexistent/path/beads.db")
// Should return false because db doesn't exist
if result {
t.Error("IsConfiguredWithDB() = true for nonexistent db, want false")
}
})
t.Run("returns false for empty path with no db found", func(t *testing.T) {
os.Unsetenv(EnvVar)
// When empty path is passed and beads.FindDatabasePath() returns empty,
// IsConfiguredWithDB should return false
// This tests the code path where dbPath is empty
tmpDir, _ := os.MkdirTemp("", "test-no-beads-*")
defer os.RemoveAll(tmpDir)
origWd, _ := os.Getwd()
os.Chdir(tmpDir)
defer os.Chdir(origWd)
result := IsConfiguredWithDB("")
// Should return false because no database exists
if result {
t.Error("IsConfiguredWithDB('') with no db = true, want false")
}
})
}
func TestGetConfigFromDB(t *testing.T) {
t.Run("returns empty for nonexistent database", func(t *testing.T) {
result := getConfigFromDB("/nonexistent/path/beads.db", ConfigKey)
if result != "" {
t.Errorf("getConfigFromDB() for nonexistent db = %q, want empty", result)
}
})
t.Run("returns empty when key not found", func(t *testing.T) {
// Create a temporary database
tmpDir, _ := os.MkdirTemp("", "test-beads-db-*")
defer os.RemoveAll(tmpDir)
dbPath := tmpDir + "/beads.db"
// Create a valid SQLite database with the config table
store, err := sqlite.New(context.Background(), "file:"+dbPath)
if err != nil {
t.Fatalf("Failed to create test database: %v", err)
}
store.Close()
result := getConfigFromDB(dbPath, "nonexistent.key")
if result != "" {
t.Errorf("getConfigFromDB() for missing key = %q, want empty", result)
}
})
t.Run("returns value when key exists", func(t *testing.T) {
// Create a temporary database
tmpDir, _ := os.MkdirTemp("", "test-beads-db-*")
defer os.RemoveAll(tmpDir)
dbPath := tmpDir + "/beads.db"
// Create a valid SQLite database with the config table
ctx := context.Background()
// Use the same connection string format as getConfigFromDB expects
store, err := sqlite.New(ctx, "file:"+dbPath+"?_journal_mode=DELETE")
if err != nil {
t.Fatalf("Failed to create test database: %v", err)
}
// Set issue_prefix first (required by storage)
if err := store.SetConfig(ctx, "issue_prefix", "bd"); err != nil {
store.Close()
t.Fatalf("Failed to set issue_prefix: %v", err)
}
// Set the config value we're testing
if err := store.SetConfig(ctx, ConfigKey, "test-sync-branch"); err != nil {
store.Close()
t.Fatalf("Failed to set config: %v", err)
}
store.Close()
result := getConfigFromDB(dbPath, ConfigKey)
if result != "test-sync-branch" {
t.Errorf("getConfigFromDB() = %q, want %q", result, "test-sync-branch")
}
})
}

View File

@@ -1,716 +0,0 @@
package syncbranch
import (
"context"
"os"
"os/exec"
"path/filepath"
"strings"
"testing"
)
// TestIsNonFastForwardError tests the non-fast-forward error detection
func TestIsNonFastForwardError(t *testing.T) {
tests := []struct {
name string
output string
want bool
}{
{
name: "non-fast-forward message",
output: "error: failed to push some refs to 'origin'\n! [rejected] main -> main (non-fast-forward)",
want: true,
},
{
name: "fetch first message",
output: "error: failed to push some refs to 'origin'\nhint: Updates were rejected because the remote contains work that you do\nhint: not have locally. This is usually caused by another repository pushing\nhint: to the same ref. You may want to first integrate the remote changes\nhint: (e.g., 'git pull ...') before pushing again.\nhint: See the 'Note about fast-forwards' in 'git push --help' for details.\nfetch first",
want: true,
},
{
name: "rejected behind message",
output: "To github.com:user/repo.git\n! [rejected] main -> main (non-fast-forward)\nerror: failed to push some refs\nhint: rejected because behind remote",
want: true,
},
{
name: "normal push success",
output: "Everything up-to-date",
want: false,
},
{
name: "authentication error",
output: "fatal: Authentication failed for 'https://github.com/user/repo.git/'",
want: false,
},
{
name: "permission denied",
output: "ERROR: Permission to user/repo.git denied to user.",
want: false,
},
{
name: "empty output",
output: "",
want: false,
},
}
for _, tt := range tests {
t.Run(tt.name, func(t *testing.T) {
got := isNonFastForwardError(tt.output)
if got != tt.want {
t.Errorf("isNonFastForwardError(%q) = %v, want %v", tt.output, got, tt.want)
}
})
}
}
// TestHasChangesInWorktree tests change detection in worktree
func TestHasChangesInWorktree(t *testing.T) {
if testing.Short() {
t.Skip("skipping integration test in short mode")
}
ctx := context.Background()
t.Run("no changes in clean worktree", func(t *testing.T) {
repoDir := setupTestRepo(t)
defer os.RemoveAll(repoDir)
// Create initial commit
jsonlPath := filepath.Join(repoDir, ".beads", "issues.jsonl")
writeFile(t, jsonlPath, `{"id":"test-1"}`)
runGit(t, repoDir, "add", ".")
runGit(t, repoDir, "commit", "-m", "initial")
hasChanges, err := hasChangesInWorktree(ctx, repoDir, jsonlPath)
if err != nil {
t.Fatalf("hasChangesInWorktree() error = %v", err)
}
if hasChanges {
t.Error("hasChangesInWorktree() = true for clean worktree, want false")
}
})
t.Run("detects uncommitted changes", func(t *testing.T) {
repoDir := setupTestRepo(t)
defer os.RemoveAll(repoDir)
// Create initial commit
jsonlPath := filepath.Join(repoDir, ".beads", "issues.jsonl")
writeFile(t, jsonlPath, `{"id":"test-1"}`)
runGit(t, repoDir, "add", ".")
runGit(t, repoDir, "commit", "-m", "initial")
// Modify file without committing
writeFile(t, jsonlPath, `{"id":"test-1"}`+"\n"+`{"id":"test-2"}`)
hasChanges, err := hasChangesInWorktree(ctx, repoDir, jsonlPath)
if err != nil {
t.Fatalf("hasChangesInWorktree() error = %v", err)
}
if !hasChanges {
t.Error("hasChangesInWorktree() = false with uncommitted changes, want true")
}
})
t.Run("detects new untracked files", func(t *testing.T) {
repoDir := setupTestRepo(t)
defer os.RemoveAll(repoDir)
// Create initial commit
jsonlPath := filepath.Join(repoDir, ".beads", "issues.jsonl")
writeFile(t, jsonlPath, `{"id":"test-1"}`)
runGit(t, repoDir, "add", ".")
runGit(t, repoDir, "commit", "-m", "initial")
// Add new file in .beads
writeFile(t, filepath.Join(repoDir, ".beads", "metadata.json"), `{}`)
hasChanges, err := hasChangesInWorktree(ctx, repoDir, jsonlPath)
if err != nil {
t.Fatalf("hasChangesInWorktree() error = %v", err)
}
if !hasChanges {
t.Error("hasChangesInWorktree() = false with new file, want true")
}
})
t.Run("handles file outside .beads dir", func(t *testing.T) {
repoDir := setupTestRepo(t)
defer os.RemoveAll(repoDir)
// Create initial commit
jsonlPath := filepath.Join(repoDir, "issues.jsonl") // Not in .beads
writeFile(t, jsonlPath, `{"id":"test-1"}`)
runGit(t, repoDir, "add", ".")
runGit(t, repoDir, "commit", "-m", "initial")
// Modify file
writeFile(t, jsonlPath, `{"id":"test-1"}`+"\n"+`{"id":"test-2"}`)
hasChanges, err := hasChangesInWorktree(ctx, repoDir, jsonlPath)
if err != nil {
t.Fatalf("hasChangesInWorktree() error = %v", err)
}
if !hasChanges {
t.Error("hasChangesInWorktree() = false with modified file outside .beads, want true")
}
})
}
// TestCommitInWorktree tests committing changes in worktree
func TestCommitInWorktree(t *testing.T) {
if testing.Short() {
t.Skip("skipping integration test in short mode")
}
ctx := context.Background()
t.Run("commits staged changes", func(t *testing.T) {
repoDir := setupTestRepo(t)
defer os.RemoveAll(repoDir)
// Create initial commit
jsonlPath := filepath.Join(repoDir, ".beads", "issues.jsonl")
writeFile(t, jsonlPath, `{"id":"test-1"}`)
runGit(t, repoDir, "add", ".")
runGit(t, repoDir, "commit", "-m", "initial")
// Modify file
writeFile(t, jsonlPath, `{"id":"test-1"}`+"\n"+`{"id":"test-2"}`)
// Commit using our function
err := commitInWorktree(ctx, repoDir, ".beads/issues.jsonl", "test commit message")
if err != nil {
t.Fatalf("commitInWorktree() error = %v", err)
}
// Verify commit was made
output := getGitOutput(t, repoDir, "log", "-1", "--format=%s")
if !strings.Contains(output, "test commit message") {
t.Errorf("commit message = %q, want to contain 'test commit message'", output)
}
})
t.Run("commits entire .beads directory", func(t *testing.T) {
repoDir := setupTestRepo(t)
defer os.RemoveAll(repoDir)
// Create initial commit
jsonlPath := filepath.Join(repoDir, ".beads", "issues.jsonl")
writeFile(t, jsonlPath, `{"id":"test-1"}`)
runGit(t, repoDir, "add", ".")
runGit(t, repoDir, "commit", "-m", "initial")
// Add multiple files
writeFile(t, filepath.Join(repoDir, ".beads", "metadata.json"), `{"version":"1"}`)
writeFile(t, jsonlPath, `{"id":"test-1"}`+"\n"+`{"id":"test-2"}`)
err := commitInWorktree(ctx, repoDir, ".beads/issues.jsonl", "multi-file commit")
if err != nil {
t.Fatalf("commitInWorktree() error = %v", err)
}
// Verify both files were committed
output := getGitOutput(t, repoDir, "diff", "--name-only", "HEAD~1")
if !strings.Contains(output, "issues.jsonl") {
t.Error("issues.jsonl not in commit")
}
if !strings.Contains(output, "metadata.json") {
t.Error("metadata.json not in commit")
}
})
}
// TestCopyJSONLToMainRepo tests copying JSONL between worktree and main repo
func TestCopyJSONLToMainRepo(t *testing.T) {
t.Run("copies JSONL file successfully", func(t *testing.T) {
// Setup worktree directory
worktreeDir, _ := os.MkdirTemp("", "test-worktree-*")
defer os.RemoveAll(worktreeDir)
// Setup main repo directory
mainRepoDir, _ := os.MkdirTemp("", "test-mainrepo-*")
defer os.RemoveAll(mainRepoDir)
// Create .beads directories
os.MkdirAll(filepath.Join(worktreeDir, ".beads"), 0750)
os.MkdirAll(filepath.Join(mainRepoDir, ".beads"), 0750)
// Write content to worktree JSONL
worktreeContent := `{"id":"test-1","title":"Test Issue"}`
if err := os.WriteFile(filepath.Join(worktreeDir, ".beads", "issues.jsonl"), []byte(worktreeContent), 0600); err != nil {
t.Fatalf("Failed to write worktree JSONL: %v", err)
}
mainJSONLPath := filepath.Join(mainRepoDir, ".beads", "issues.jsonl")
err := copyJSONLToMainRepo(worktreeDir, ".beads/issues.jsonl", mainJSONLPath)
if err != nil {
t.Fatalf("copyJSONLToMainRepo() error = %v", err)
}
// Verify content was copied
copied, err := os.ReadFile(mainJSONLPath)
if err != nil {
t.Fatalf("Failed to read copied file: %v", err)
}
if string(copied) != worktreeContent {
t.Errorf("copied content = %q, want %q", string(copied), worktreeContent)
}
})
t.Run("returns nil when worktree JSONL does not exist", func(t *testing.T) {
worktreeDir, _ := os.MkdirTemp("", "test-worktree-*")
defer os.RemoveAll(worktreeDir)
mainRepoDir, _ := os.MkdirTemp("", "test-mainrepo-*")
defer os.RemoveAll(mainRepoDir)
mainJSONLPath := filepath.Join(mainRepoDir, ".beads", "issues.jsonl")
err := copyJSONLToMainRepo(worktreeDir, ".beads/issues.jsonl", mainJSONLPath)
if err != nil {
t.Errorf("copyJSONLToMainRepo() for nonexistent file = %v, want nil", err)
}
})
t.Run("also copies metadata.json if present", func(t *testing.T) {
worktreeDir, _ := os.MkdirTemp("", "test-worktree-*")
defer os.RemoveAll(worktreeDir)
mainRepoDir, _ := os.MkdirTemp("", "test-mainrepo-*")
defer os.RemoveAll(mainRepoDir)
// Create .beads directories
os.MkdirAll(filepath.Join(worktreeDir, ".beads"), 0750)
os.MkdirAll(filepath.Join(mainRepoDir, ".beads"), 0750)
// Write JSONL and metadata to worktree
if err := os.WriteFile(filepath.Join(worktreeDir, ".beads", "issues.jsonl"), []byte(`{}`), 0600); err != nil {
t.Fatalf("Failed to write worktree JSONL: %v", err)
}
metadataContent := `{"prefix":"bd"}`
if err := os.WriteFile(filepath.Join(worktreeDir, ".beads", "metadata.json"), []byte(metadataContent), 0600); err != nil {
t.Fatalf("Failed to write metadata: %v", err)
}
mainJSONLPath := filepath.Join(mainRepoDir, ".beads", "issues.jsonl")
err := copyJSONLToMainRepo(worktreeDir, ".beads/issues.jsonl", mainJSONLPath)
if err != nil {
t.Fatalf("copyJSONLToMainRepo() error = %v", err)
}
// Verify metadata was also copied
metadata, err := os.ReadFile(filepath.Join(mainRepoDir, ".beads", "metadata.json"))
if err != nil {
t.Fatalf("Failed to read metadata: %v", err)
}
if string(metadata) != metadataContent {
t.Errorf("metadata content = %q, want %q", string(metadata), metadataContent)
}
})
}
// TestGetRemoteForBranch tests remote detection for branches
func TestGetRemoteForBranch(t *testing.T) {
if testing.Short() {
t.Skip("skipping integration test in short mode")
}
ctx := context.Background()
t.Run("returns origin as default", func(t *testing.T) {
repoDir := setupTestRepo(t)
defer os.RemoveAll(repoDir)
// Create initial commit
writeFile(t, filepath.Join(repoDir, ".beads", "issues.jsonl"), `{"id":"test-1"}`)
runGit(t, repoDir, "add", ".")
runGit(t, repoDir, "commit", "-m", "initial")
remote := getRemoteForBranch(ctx, repoDir, "nonexistent-branch")
if remote != "origin" {
t.Errorf("getRemoteForBranch() = %q, want 'origin'", remote)
}
})
t.Run("returns configured remote", func(t *testing.T) {
repoDir := setupTestRepo(t)
defer os.RemoveAll(repoDir)
// Create initial commit
writeFile(t, filepath.Join(repoDir, ".beads", "issues.jsonl"), `{"id":"test-1"}`)
runGit(t, repoDir, "add", ".")
runGit(t, repoDir, "commit", "-m", "initial")
// Configure a custom remote for a branch
runGit(t, repoDir, "config", "branch.test-branch.remote", "upstream")
remote := getRemoteForBranch(ctx, repoDir, "test-branch")
if remote != "upstream" {
t.Errorf("getRemoteForBranch() = %q, want 'upstream'", remote)
}
})
}
// TestGetRepoRoot tests repository root detection
func TestGetRepoRoot(t *testing.T) {
if testing.Short() {
t.Skip("skipping integration test in short mode")
}
ctx := context.Background()
t.Run("returns repo root for regular repository", func(t *testing.T) {
repoDir := setupTestRepo(t)
defer os.RemoveAll(repoDir)
// Create initial commit
writeFile(t, filepath.Join(repoDir, ".beads", "issues.jsonl"), `{"id":"test-1"}`)
runGit(t, repoDir, "add", ".")
runGit(t, repoDir, "commit", "-m", "initial")
// Change to repo directory
origWd, _ := os.Getwd()
os.Chdir(repoDir)
defer os.Chdir(origWd)
root, err := GetRepoRoot(ctx)
if err != nil {
t.Fatalf("GetRepoRoot() error = %v", err)
}
// Resolve symlinks for comparison
expectedRoot, _ := filepath.EvalSymlinks(repoDir)
actualRoot, _ := filepath.EvalSymlinks(root)
if actualRoot != expectedRoot {
t.Errorf("GetRepoRoot() = %q, want %q", actualRoot, expectedRoot)
}
})
t.Run("returns error for non-git directory", func(t *testing.T) {
tmpDir, _ := os.MkdirTemp("", "non-git-*")
defer os.RemoveAll(tmpDir)
origWd, _ := os.Getwd()
os.Chdir(tmpDir)
defer os.Chdir(origWd)
_, err := GetRepoRoot(ctx)
if err == nil {
t.Error("GetRepoRoot() expected error for non-git directory")
}
})
t.Run("returns repo root from subdirectory", func(t *testing.T) {
repoDir := setupTestRepo(t)
defer os.RemoveAll(repoDir)
// Create initial commit
writeFile(t, filepath.Join(repoDir, ".beads", "issues.jsonl"), `{"id":"test-1"}`)
runGit(t, repoDir, "add", ".")
runGit(t, repoDir, "commit", "-m", "initial")
// Create and change to subdirectory
subDir := filepath.Join(repoDir, "subdir", "nested")
os.MkdirAll(subDir, 0750)
origWd, _ := os.Getwd()
os.Chdir(subDir)
defer os.Chdir(origWd)
root, err := GetRepoRoot(ctx)
if err != nil {
t.Fatalf("GetRepoRoot() error = %v", err)
}
// Resolve symlinks for comparison
expectedRoot, _ := filepath.EvalSymlinks(repoDir)
actualRoot, _ := filepath.EvalSymlinks(root)
if actualRoot != expectedRoot {
t.Errorf("GetRepoRoot() from subdirectory = %q, want %q", actualRoot, expectedRoot)
}
})
t.Run("handles worktree correctly", func(t *testing.T) {
// Create main repo
mainRepoDir := setupTestRepo(t)
defer os.RemoveAll(mainRepoDir)
// Create initial commit
writeFile(t, filepath.Join(mainRepoDir, ".beads", "issues.jsonl"), `{"id":"test-1"}`)
runGit(t, mainRepoDir, "add", ".")
runGit(t, mainRepoDir, "commit", "-m", "initial")
// Create a worktree
worktreeDir, _ := os.MkdirTemp("", "test-worktree-*")
defer os.RemoveAll(worktreeDir)
runGit(t, mainRepoDir, "worktree", "add", worktreeDir, "-b", "feature")
// Test from worktree - should return main repo root
origWd, _ := os.Getwd()
os.Chdir(worktreeDir)
defer os.Chdir(origWd)
root, err := GetRepoRoot(ctx)
if err != nil {
t.Fatalf("GetRepoRoot() from worktree error = %v", err)
}
// Should return the main repo root, not the worktree
expectedRoot, _ := filepath.EvalSymlinks(mainRepoDir)
actualRoot, _ := filepath.EvalSymlinks(root)
if actualRoot != expectedRoot {
t.Errorf("GetRepoRoot() from worktree = %q, want main repo %q", actualRoot, expectedRoot)
}
})
}
// TestHasGitRemote tests remote detection
func TestHasGitRemote(t *testing.T) {
if testing.Short() {
t.Skip("skipping integration test in short mode")
}
ctx := context.Background()
t.Run("returns false for repo without remote", func(t *testing.T) {
repoDir := setupTestRepo(t)
defer os.RemoveAll(repoDir)
// Create initial commit
writeFile(t, filepath.Join(repoDir, ".beads", "issues.jsonl"), `{"id":"test-1"}`)
runGit(t, repoDir, "add", ".")
runGit(t, repoDir, "commit", "-m", "initial")
origWd, _ := os.Getwd()
os.Chdir(repoDir)
defer os.Chdir(origWd)
if HasGitRemote(ctx) {
t.Error("HasGitRemote() = true for repo without remote, want false")
}
})
t.Run("returns true for repo with remote", func(t *testing.T) {
repoDir := setupTestRepo(t)
defer os.RemoveAll(repoDir)
// Create initial commit
writeFile(t, filepath.Join(repoDir, ".beads", "issues.jsonl"), `{"id":"test-1"}`)
runGit(t, repoDir, "add", ".")
runGit(t, repoDir, "commit", "-m", "initial")
// Add a remote
runGit(t, repoDir, "remote", "add", "origin", "https://github.com/test/repo.git")
origWd, _ := os.Getwd()
os.Chdir(repoDir)
defer os.Chdir(origWd)
if !HasGitRemote(ctx) {
t.Error("HasGitRemote() = false for repo with remote, want true")
}
})
t.Run("returns false for non-git directory", func(t *testing.T) {
tmpDir, _ := os.MkdirTemp("", "non-git-*")
defer os.RemoveAll(tmpDir)
origWd, _ := os.Getwd()
os.Chdir(tmpDir)
defer os.Chdir(origWd)
if HasGitRemote(ctx) {
t.Error("HasGitRemote() = true for non-git directory, want false")
}
})
}
// TestGetCurrentBranch tests current branch detection
func TestGetCurrentBranch(t *testing.T) {
if testing.Short() {
t.Skip("skipping integration test in short mode")
}
ctx := context.Background()
t.Run("returns current branch name", func(t *testing.T) {
repoDir := setupTestRepo(t)
defer os.RemoveAll(repoDir)
// Create initial commit
writeFile(t, filepath.Join(repoDir, ".beads", "issues.jsonl"), `{"id":"test-1"}`)
runGit(t, repoDir, "add", ".")
runGit(t, repoDir, "commit", "-m", "initial")
origWd, _ := os.Getwd()
os.Chdir(repoDir)
defer os.Chdir(origWd)
branch, err := GetCurrentBranch(ctx)
if err != nil {
t.Fatalf("GetCurrentBranch() error = %v", err)
}
// The default branch is usually "master" or "main" depending on git config
if branch != "master" && branch != "main" {
// Could also be a user-defined default, just verify it's not empty
if branch == "" {
t.Error("GetCurrentBranch() returned empty string")
}
}
})
t.Run("returns correct branch after checkout", func(t *testing.T) {
repoDir := setupTestRepo(t)
defer os.RemoveAll(repoDir)
// Create initial commit
writeFile(t, filepath.Join(repoDir, ".beads", "issues.jsonl"), `{"id":"test-1"}`)
runGit(t, repoDir, "add", ".")
runGit(t, repoDir, "commit", "-m", "initial")
// Create and checkout new branch
runGit(t, repoDir, "checkout", "-b", "feature-branch")
origWd, _ := os.Getwd()
os.Chdir(repoDir)
defer os.Chdir(origWd)
branch, err := GetCurrentBranch(ctx)
if err != nil {
t.Fatalf("GetCurrentBranch() error = %v", err)
}
if branch != "feature-branch" {
t.Errorf("GetCurrentBranch() = %q, want 'feature-branch'", branch)
}
})
}
// TestFormatVanishedIssues tests the forensic logging formatter
func TestFormatVanishedIssues(t *testing.T) {
t.Run("formats vanished issues correctly", func(t *testing.T) {
localIssues := map[string]issueSummary{
"bd-1": {ID: "bd-1", Title: "First Issue"},
"bd-2": {ID: "bd-2", Title: "Second Issue"},
"bd-3": {ID: "bd-3", Title: "Third Issue"},
}
mergedIssues := map[string]issueSummary{
"bd-1": {ID: "bd-1", Title: "First Issue"},
}
lines := formatVanishedIssues(localIssues, mergedIssues, 3, 1)
// Should contain header
found := false
for _, line := range lines {
if strings.Contains(line, "Mass deletion forensic log") {
found = true
break
}
}
if !found {
t.Error("formatVanishedIssues() missing header")
}
// Should list vanished issues
foundBd2 := false
foundBd3 := false
for _, line := range lines {
if strings.Contains(line, "bd-2") {
foundBd2 = true
}
if strings.Contains(line, "bd-3") {
foundBd3 = true
}
}
if !foundBd2 || !foundBd3 {
t.Errorf("formatVanishedIssues() missing vanished issues: bd-2=%v, bd-3=%v", foundBd2, foundBd3)
}
// Should show totals
foundTotal := false
for _, line := range lines {
if strings.Contains(line, "Total vanished: 2") {
foundTotal = true
break
}
}
if !foundTotal {
t.Error("formatVanishedIssues() missing total count")
}
})
t.Run("truncates long titles", func(t *testing.T) {
longTitle := strings.Repeat("A", 100)
localIssues := map[string]issueSummary{
"bd-1": {ID: "bd-1", Title: longTitle},
}
mergedIssues := map[string]issueSummary{}
lines := formatVanishedIssues(localIssues, mergedIssues, 1, 0)
// Find the line with bd-1 and check title is truncated
for _, line := range lines {
if strings.Contains(line, "bd-1") {
if len(line) > 80 { // Line should be reasonably short
// Verify it ends with "..."
if !strings.Contains(line, "...") {
t.Error("formatVanishedIssues() should truncate long titles with '...'")
}
}
break
}
}
})
}
// TestCheckDivergence tests the public CheckDivergence function
func TestCheckDivergence(t *testing.T) {
if testing.Short() {
t.Skip("skipping integration test in short mode")
}
ctx := context.Background()
t.Run("returns no divergence when remote does not exist", func(t *testing.T) {
repoDir := setupTestRepo(t)
defer os.RemoveAll(repoDir)
// Create initial commit
writeFile(t, filepath.Join(repoDir, ".beads", "issues.jsonl"), `{"id":"test-1"}`)
runGit(t, repoDir, "add", ".")
runGit(t, repoDir, "commit", "-m", "initial")
// Add remote but don't create the branch on it
runGit(t, repoDir, "remote", "add", "origin", repoDir) // Use self as remote
info, err := CheckDivergence(ctx, repoDir, "beads-sync")
if err != nil {
// Expected to fail since remote branch doesn't exist
return
}
// If it succeeds, verify no divergence
if info.IsDiverged {
t.Error("CheckDivergence() should not report divergence when remote doesn't exist")
}
})
}
// helper to run git with error handling (already exists but needed for this file)
func runGitHelper(t *testing.T, dir string, args ...string) {
t.Helper()
cmd := exec.Command("git", args...)
cmd.Dir = dir
output, err := cmd.CombinedOutput()
if err != nil {
t.Fatalf("git %v failed: %v\n%s", args, err, output)
}
}

View File

@@ -1,416 +0,0 @@
package syncbranch
import (
"context"
"os"
"os/exec"
"path/filepath"
"strings"
"testing"
"time"
)
// TestCommitToSyncBranch tests the main commit function
func TestCommitToSyncBranch(t *testing.T) {
if testing.Short() {
t.Skip("skipping integration test in short mode")
}
ctx := context.Background()
t.Run("commits changes to sync branch", func(t *testing.T) {
// Setup: create a repo with a sync branch
repoDir := setupTestRepoWithRemote(t)
defer os.RemoveAll(repoDir)
syncBranch := "beads-sync"
jsonlPath := filepath.Join(repoDir, ".beads", "issues.jsonl")
// Create sync branch
runGit(t, repoDir, "checkout", "-b", syncBranch)
writeFile(t, jsonlPath, `{"id":"test-1"}`)
runGit(t, repoDir, "add", ".")
runGit(t, repoDir, "commit", "-m", "initial sync branch commit")
runGit(t, repoDir, "checkout", "master")
// Write new content to commit
writeFile(t, jsonlPath, `{"id":"test-1"}`+"\n"+`{"id":"test-2"}`)
result, err := CommitToSyncBranch(ctx, repoDir, syncBranch, jsonlPath, false)
if err != nil {
t.Fatalf("CommitToSyncBranch() error = %v", err)
}
if !result.Committed {
t.Error("CommitToSyncBranch() Committed = false, want true")
}
if result.Branch != syncBranch {
t.Errorf("CommitToSyncBranch() Branch = %q, want %q", result.Branch, syncBranch)
}
if !strings.Contains(result.Message, "bd sync:") {
t.Errorf("CommitToSyncBranch() Message = %q, want to contain 'bd sync:'", result.Message)
}
})
t.Run("returns not committed when no changes", func(t *testing.T) {
repoDir := setupTestRepoWithRemote(t)
defer os.RemoveAll(repoDir)
syncBranch := "beads-sync"
jsonlPath := filepath.Join(repoDir, ".beads", "issues.jsonl")
// Create sync branch with content
runGit(t, repoDir, "checkout", "-b", syncBranch)
writeFile(t, jsonlPath, `{"id":"test-1"}`)
runGit(t, repoDir, "add", ".")
runGit(t, repoDir, "commit", "-m", "initial")
runGit(t, repoDir, "checkout", "master")
// Write the same content that's in the sync branch
writeFile(t, jsonlPath, `{"id":"test-1"}`)
// Commit with same content (no changes)
result, err := CommitToSyncBranch(ctx, repoDir, syncBranch, jsonlPath, false)
if err != nil {
t.Fatalf("CommitToSyncBranch() error = %v", err)
}
if result.Committed {
t.Error("CommitToSyncBranch() Committed = true when no changes, want false")
}
})
}
// TestPullFromSyncBranch tests pulling changes from sync branch
func TestPullFromSyncBranch(t *testing.T) {
if testing.Short() {
t.Skip("skipping integration test in short mode")
}
ctx := context.Background()
t.Run("handles sync branch not on remote", func(t *testing.T) {
repoDir := setupTestRepoWithRemote(t)
defer os.RemoveAll(repoDir)
syncBranch := "beads-sync"
jsonlPath := filepath.Join(repoDir, ".beads", "issues.jsonl")
// Create local sync branch but don't set up remote tracking
runGit(t, repoDir, "checkout", "-b", syncBranch)
writeFile(t, jsonlPath, `{"id":"test-1"}`)
runGit(t, repoDir, "add", ".")
runGit(t, repoDir, "commit", "-m", "local sync")
runGit(t, repoDir, "checkout", "master")
// Pull should handle the case where remote doesn't have the branch
result, err := PullFromSyncBranch(ctx, repoDir, syncBranch, jsonlPath, false)
// This tests the fetch failure path since "origin" points to self without the sync branch
// It should either succeed (not pulled) or fail gracefully
if err != nil {
// Expected - fetch will fail since origin doesn't have sync branch
return
}
if result.Pulled && !result.FastForwarded && !result.Merged {
// Pulled but no change - acceptable
_ = result
}
})
t.Run("pulls when already up to date", func(t *testing.T) {
repoDir := setupTestRepoWithRemote(t)
defer os.RemoveAll(repoDir)
syncBranch := "beads-sync"
jsonlPath := filepath.Join(repoDir, ".beads", "issues.jsonl")
// Create sync branch and simulate it being tracked
runGit(t, repoDir, "checkout", "-b", syncBranch)
writeFile(t, jsonlPath, `{"id":"test-1"}`)
runGit(t, repoDir, "add", ".")
runGit(t, repoDir, "commit", "-m", "sync commit")
// Set up a fake remote ref at the same commit
runGit(t, repoDir, "update-ref", "refs/remotes/origin/"+syncBranch, "HEAD")
runGit(t, repoDir, "checkout", "master")
// Pull when already at remote HEAD
result, err := PullFromSyncBranch(ctx, repoDir, syncBranch, jsonlPath, false)
if err != nil {
// Might fail on fetch step, that's acceptable
return
}
// Should have pulled successfully (even if no new content)
if result.Pulled {
// Good - it recognized it's up to date
}
})
t.Run("copies JSONL to main repo after sync", func(t *testing.T) {
repoDir := setupTestRepoWithRemote(t)
defer os.RemoveAll(repoDir)
syncBranch := "beads-sync"
jsonlPath := filepath.Join(repoDir, ".beads", "issues.jsonl")
// Create sync branch with content
runGit(t, repoDir, "checkout", "-b", syncBranch)
writeFile(t, jsonlPath, `{"id":"sync-1"}`)
runGit(t, repoDir, "add", ".")
runGit(t, repoDir, "commit", "-m", "sync commit")
runGit(t, repoDir, "update-ref", "refs/remotes/origin/"+syncBranch, "HEAD")
runGit(t, repoDir, "checkout", "master")
// Remove local JSONL to verify it gets copied back
os.Remove(jsonlPath)
result, err := PullFromSyncBranch(ctx, repoDir, syncBranch, jsonlPath, false)
if err != nil {
return // Acceptable in test env
}
if result.Pulled {
// Verify JSONL was copied to main repo
if _, err := os.Stat(jsonlPath); os.IsNotExist(err) {
t.Error("PullFromSyncBranch() did not copy JSONL to main repo")
}
}
})
t.Run("handles fast-forward case", func(t *testing.T) {
repoDir := setupTestRepoWithRemote(t)
defer os.RemoveAll(repoDir)
syncBranch := "beads-sync"
jsonlPath := filepath.Join(repoDir, ".beads", "issues.jsonl")
// Create sync branch with base commit
runGit(t, repoDir, "checkout", "-b", syncBranch)
writeFile(t, jsonlPath, `{"id":"base"}`)
runGit(t, repoDir, "add", ".")
runGit(t, repoDir, "commit", "-m", "base")
baseCommit := strings.TrimSpace(getGitOutput(t, repoDir, "rev-parse", "HEAD"))
// Add another commit and set as remote
writeFile(t, jsonlPath, `{"id":"base"}`+"\n"+`{"id":"remote"}`)
runGit(t, repoDir, "add", ".")
runGit(t, repoDir, "commit", "-m", "remote commit")
runGit(t, repoDir, "update-ref", "refs/remotes/origin/"+syncBranch, "HEAD")
// Reset back to base (so remote is ahead)
runGit(t, repoDir, "reset", "--hard", baseCommit)
runGit(t, repoDir, "checkout", "master")
// Pull should fast-forward
result, err := PullFromSyncBranch(ctx, repoDir, syncBranch, jsonlPath, false)
if err != nil {
return // Acceptable with self-remote
}
// Just verify result is populated correctly
_ = result.FastForwarded
_ = result.Merged
})
}
// TestResetToRemote tests resetting sync branch to remote state
// Note: Full remote tests are in cmd/bd tests; this tests the basic flow
func TestResetToRemote(t *testing.T) {
if testing.Short() {
t.Skip("skipping integration test in short mode")
}
ctx := context.Background()
t.Run("returns error when fetch fails", func(t *testing.T) {
repoDir := setupTestRepoWithRemote(t)
defer os.RemoveAll(repoDir)
syncBranch := "beads-sync"
jsonlPath := filepath.Join(repoDir, ".beads", "issues.jsonl")
// Create local sync branch without remote
runGit(t, repoDir, "checkout", "-b", syncBranch)
writeFile(t, jsonlPath, `{"id":"local-1"}`)
runGit(t, repoDir, "add", ".")
runGit(t, repoDir, "commit", "-m", "local commit")
runGit(t, repoDir, "checkout", "master")
// ResetToRemote should fail since remote branch doesn't exist
err := ResetToRemote(ctx, repoDir, syncBranch, jsonlPath)
if err == nil {
// If it succeeds without remote, that's also acceptable
// (the remote is set to self, might not have sync branch)
}
})
}
// TestPushSyncBranch tests the push function
// Note: Full push tests require actual remote; this tests basic error handling
func TestPushSyncBranch(t *testing.T) {
if testing.Short() {
t.Skip("skipping integration test in short mode")
}
ctx := context.Background()
t.Run("handles missing worktree gracefully", func(t *testing.T) {
repoDir := setupTestRepoWithRemote(t)
defer os.RemoveAll(repoDir)
syncBranch := "beads-sync"
// Create sync branch
runGit(t, repoDir, "checkout", "-b", syncBranch)
writeFile(t, filepath.Join(repoDir, ".beads", "issues.jsonl"), `{"id":"test-1"}`)
runGit(t, repoDir, "add", ".")
runGit(t, repoDir, "commit", "-m", "initial")
runGit(t, repoDir, "checkout", "master")
// PushSyncBranch should handle the worktree creation
err := PushSyncBranch(ctx, repoDir, syncBranch)
// Will fail because origin doesn't have the branch, but should not panic
if err != nil {
// Expected - push will fail since origin doesn't have the branch set up
if !strings.Contains(err.Error(), "push failed") {
// Some other error - acceptable in test env
}
}
})
}
// TestRunCmdWithTimeoutMessage tests the timeout message function
func TestRunCmdWithTimeoutMessage(t *testing.T) {
ctx := context.Background()
t.Run("runs command and returns output", func(t *testing.T) {
cmd := exec.CommandContext(ctx, "echo", "hello")
output, err := runCmdWithTimeoutMessage(ctx, "test message", 5*time.Second, cmd)
if err != nil {
t.Fatalf("runCmdWithTimeoutMessage() error = %v", err)
}
if !strings.Contains(string(output), "hello") {
t.Errorf("runCmdWithTimeoutMessage() output = %q, want to contain 'hello'", output)
}
})
t.Run("returns error for failing command", func(t *testing.T) {
cmd := exec.CommandContext(ctx, "false") // Always exits with 1
_, err := runCmdWithTimeoutMessage(ctx, "test message", 5*time.Second, cmd)
if err == nil {
t.Error("runCmdWithTimeoutMessage() expected error for failing command")
}
})
}
// TestPreemptiveFetchAndFastForward tests the pre-emptive fetch function
func TestPreemptiveFetchAndFastForward(t *testing.T) {
if testing.Short() {
t.Skip("skipping integration test in short mode")
}
ctx := context.Background()
t.Run("returns nil when remote branch does not exist", func(t *testing.T) {
repoDir := setupTestRepoWithRemote(t)
defer os.RemoveAll(repoDir)
// Create sync branch locally but don't push
runGit(t, repoDir, "checkout", "-b", "beads-sync")
writeFile(t, filepath.Join(repoDir, ".beads", "issues.jsonl"), `{"id":"test-1"}`)
runGit(t, repoDir, "add", ".")
runGit(t, repoDir, "commit", "-m", "initial")
err := preemptiveFetchAndFastForward(ctx, repoDir, "beads-sync", "origin")
if err != nil {
t.Errorf("preemptiveFetchAndFastForward() error = %v, want nil (not an error when remote doesn't exist)", err)
}
})
t.Run("no-op when local equals remote", func(t *testing.T) {
repoDir := setupTestRepoWithRemote(t)
defer os.RemoveAll(repoDir)
syncBranch := "beads-sync"
// Create sync branch
runGit(t, repoDir, "checkout", "-b", syncBranch)
writeFile(t, filepath.Join(repoDir, ".beads", "issues.jsonl"), `{"id":"test-1"}`)
runGit(t, repoDir, "add", ".")
runGit(t, repoDir, "commit", "-m", "initial")
// Set remote ref at same commit
runGit(t, repoDir, "update-ref", "refs/remotes/origin/"+syncBranch, "HEAD")
err := preemptiveFetchAndFastForward(ctx, repoDir, syncBranch, "origin")
// Should succeed since we're already in sync
if err != nil {
// Might fail on fetch step with self-remote, acceptable
return
}
})
}
// TestFetchAndRebaseInWorktree tests the fetch and rebase function
func TestFetchAndRebaseInWorktree(t *testing.T) {
if testing.Short() {
t.Skip("skipping integration test in short mode")
}
ctx := context.Background()
t.Run("returns error when fetch fails", func(t *testing.T) {
repoDir := setupTestRepoWithRemote(t)
defer os.RemoveAll(repoDir)
syncBranch := "beads-sync"
// Create sync branch locally
runGit(t, repoDir, "checkout", "-b", syncBranch)
writeFile(t, filepath.Join(repoDir, ".beads", "issues.jsonl"), `{"id":"test-1"}`)
runGit(t, repoDir, "add", ".")
runGit(t, repoDir, "commit", "-m", "initial")
// fetchAndRebaseInWorktree should fail since remote doesn't have the branch
err := fetchAndRebaseInWorktree(ctx, repoDir, syncBranch, "origin")
if err == nil {
// If it succeeds, it means the test setup allowed it (self remote)
return
}
// Expected to fail
if !strings.Contains(err.Error(), "fetch failed") {
// Some other error - still acceptable
}
})
}
// Helper: setup a test repo with a (fake) remote
func setupTestRepoWithRemote(t *testing.T) string {
t.Helper()
tmpDir, err := os.MkdirTemp("", "bd-test-repo-*")
if err != nil {
t.Fatalf("Failed to create temp dir: %v", err)
}
// Initialize git repo
runGit(t, tmpDir, "init")
runGit(t, tmpDir, "config", "user.email", "test@test.com")
runGit(t, tmpDir, "config", "user.name", "Test User")
// Create initial commit
writeFile(t, filepath.Join(tmpDir, "README.md"), "# Test Repo")
runGit(t, tmpDir, "add", ".")
runGit(t, tmpDir, "commit", "-m", "initial commit")
// Create .beads directory
beadsDir := filepath.Join(tmpDir, ".beads")
if err := os.MkdirAll(beadsDir, 0750); err != nil {
os.RemoveAll(tmpDir)
t.Fatalf("Failed to create .beads dir: %v", err)
}
// Add a fake remote (just for configuration purposes)
runGit(t, tmpDir, "remote", "add", "origin", tmpDir)
return tmpDir
}

View File

@@ -348,7 +348,7 @@ type Dependency struct {
DependsOnID string `json:"depends_on_id"`
Type DependencyType `json:"type"`
CreatedAt time.Time `json:"created_at"`
CreatedBy string `json:"created_by,omitempty"`
CreatedBy string `json:"created_by"`
// Metadata contains type-specific edge data (JSON blob)
// Examples: similarity scores, approval details, skill proficiency
Metadata string `json:"metadata,omitempty"`

109
skills/beads/README.md Normal file
View File

@@ -0,0 +1,109 @@
# Beads Skill for Claude Code
A comprehensive skill for using [beads](https://github.com/steveyegge/beads) (bd) issue tracking with Claude Code.
## What This Skill Does
This skill teaches Claude Code how to use bd effectively for:
- **Multi-session work tracking** - Persistent memory across conversation compactions
- **Dependency management** - Graph-based issue relationships
- **Session handoff** - Writing notes that survive context resets
- **Molecules and wisps** (v0.34.0+) - Reusable work templates and ephemeral workflows
## Installation
Copy the `beads/` directory to your Claude Code skills location:
```bash
# Global installation
cp -r beads ~/.claude/skills/
# Or project-local
cp -r beads .claude/skills/
```
## When Claude Uses This Skill
The skill activates when conversations involve:
- "multi-session", "complex dependencies", "resume after weeks"
- "project memory", "persistent context", "side quest tracking"
- Work that spans multiple days or compaction cycles
- Tasks too complex for simple TodoWrite lists
## File Structure
```
beads/
├── SKILL.md # Main skill file (Claude reads this first)
├── README.md # This file (for humans)
└── references/ # Detailed documentation (loaded on demand)
├── BOUNDARIES.md # When to use bd vs TodoWrite
├── CLI_BOOTSTRAP_ADMIN.md # CLI command reference
├── DEPENDENCIES.md # Dependency semantics (A blocks B vs B blocks A)
├── INTEGRATION_PATTERNS.md # TodoWrite and other tool integration
├── ISSUE_CREATION.md # When and how to create issues
├── MOLECULES.md # Protos, mols, wisps (v0.34.0+)
├── PATTERNS.md # Common usage patterns
├── RESUMABILITY.md # Writing notes for post-compaction recovery
├── STATIC_DATA.md # Using bd for reference databases
├── TROUBLESHOOTING.md # Common issues and fixes
└── WORKFLOWS.md # Step-by-step workflow guides
```
## Key Concepts
### bd vs TodoWrite
| Use bd when... | Use TodoWrite when... |
|----------------|----------------------|
| Work spans multiple sessions | Single-session tasks |
| Complex dependencies exist | Linear step-by-step work |
| Need to resume after weeks | Just need a quick checklist |
| Knowledge work with fuzzy boundaries | Clear, immediate tasks |
### The Dependency Direction Trap
`bd dep add A B` means **"A depends on B"** (B must complete before A can start).
```bash
# Want: "Setup must complete before Implementation"
bd dep add implementation setup # ✓ CORRECT
# NOT: bd dep add setup implementation # ✗ WRONG
```
### Surviving Compaction
When Claude's context gets compacted, conversation history is lost but bd state survives. Write notes as if explaining to a future Claude with zero context:
```bash
bd update issue-123 --notes "COMPLETED: JWT auth with RS256
KEY DECISION: RS256 over HS256 for key rotation
IN PROGRESS: Password reset flow
NEXT: Implement rate limiting"
```
## Requirements
- [bd CLI](https://github.com/steveyegge/beads) installed (`brew install steveyegge/beads/bd`)
- A git repository (bd requires git for sync)
- Initialized database (`bd init` in project root)
## Version Compatibility
- **v0.34.0+**: Full support including molecules, wisps, and cross-project dependencies
- **v0.15.0+**: Core functionality (dependencies, notes, status tracking)
- **Earlier versions**: Basic functionality but some features may be missing
## Contributing
This skill is maintained at [github.com/steveyegge/beads](https://github.com/steveyegge/beads) in the `skills/beads/` directory.
Issues and PRs welcome for:
- Documentation improvements
- New workflow patterns
- Bug fixes in examples
- Additional troubleshooting scenarios
## License
MIT (same as beads)

File diff suppressed because it is too large Load Diff

View File

@@ -0,0 +1,407 @@
# Integration Patterns with Other Skills
How bd-issue-tracking integrates with TodoWrite, writing-plans, and other skills for optimal workflow.
## Contents
- [TodoWrite Integration](#todowrite-integration) - Temporal layering pattern
- [writing-plans Integration](#writing-plans-integration) - Detailed implementation plans
- [Cross-Skill Workflows](#cross-skill-workflows) - Using multiple skills together
- [Decision Framework](#decision-framework) - When to use which tool
---
## TodoWrite Integration
**Both tools complement each other at different timescales:**
### Temporal Layering Pattern
**TodoWrite** (short-term working memory - this hour):
- Tactical execution: "Review Section 3", "Expand Q&A answers"
- Marked completed as you go
- Present/future tense ("Review", "Expand", "Create")
- Ephemeral: Disappears when session ends
**Beads** (long-term episodic memory - this week/month):
- Strategic objectives: "Continue work on strategic planning document"
- Key decisions and outcomes in notes field
- Past tense in notes ("COMPLETED", "Discovered", "Blocked by")
- Persistent: Survives compaction and session boundaries
**Key insight**: TodoWrite = working copy for the current hour. Beads = project journal for the current month.
### The Handoff Pattern
1. **Session start**: Read bead → Create TodoWrite items for immediate actions
2. **During work**: Mark TodoWrite items completed as you go
3. **Reach milestone**: Update bead notes with outcomes + context
4. **Session end**: TodoWrite disappears, bead survives with enriched notes
**After compaction**: TodoWrite is gone forever, but bead notes reconstruct what happened.
### Example: TodoWrite tracks execution, Beads capture meaning
**TodoWrite (ephemeral execution view):**
```
[completed] Implement login endpoint
[in_progress] Add password hashing with bcrypt
[pending] Create session middleware
```
**Corresponding bead notes (persistent context):**
```bash
bd update issue-123 --notes "COMPLETED: Login endpoint with bcrypt password
hashing (12 rounds). KEY DECISION: Using JWT tokens (not sessions) for stateless
auth - simplifies horizontal scaling. IN PROGRESS: Session middleware implementation.
NEXT: Need user input on token expiry time (1hr vs 24hr trade-off)."
```
**What's different**:
- TodoWrite: Task names (what to do)
- Beads: Outcomes and decisions (what was learned, why it matters)
**Don't duplicate**: TodoWrite tracks execution, Beads captures meaning and context.
### When to Update Each Tool
**Update TodoWrite** (frequently):
- Mark task completed as you finish each one
- Add new tasks as you break down work
- Update in_progress when switching tasks
**Update Beads** (at milestones):
- Completed a significant piece of work
- Made a key decision that needs documentation
- Hit a blocker that pauses progress
- About to ask user for input
- Session token usage > 70%
- End of session
**Pattern**: TodoWrite changes every few minutes. Beads updates every hour or at natural breakpoints.
### Full Workflow Example
**Scenario**: Implement OAuth authentication (multi-session work)
**Session 1 - Planning**:
```bash
# Create bd issue
bd create "Implement OAuth authentication" -t feature -p 0 --design "
JWT tokens with refresh rotation.
See BOUNDARIES.md for bd vs TodoWrite decision.
"
# Mark in_progress
bd update oauth-1 --status in_progress
# Create TodoWrite for today's work
TodoWrite:
- [ ] Research OAuth 2.0 refresh token flow
- [ ] Design token schema
- [ ] Set up test environment
```
**End of Session 1**:
```bash
# Update bd with outcomes
bd update oauth-1 --notes "COMPLETED: Researched OAuth2 refresh flow. Decided on 7-day refresh tokens.
KEY DECISION: RS256 over HS256 (enables key rotation per security review).
IN PROGRESS: Need to set up test OAuth provider.
NEXT: Configure test provider, then implement token endpoint."
# TodoWrite disappears when session ends
```
**Session 2 - Implementation** (after compaction):
```bash
# Read bd to reconstruct context
bd show oauth-1
# See: COMPLETED research, NEXT is configure test provider
# Create fresh TodoWrite from NEXT
TodoWrite:
- [ ] Configure test OAuth provider
- [ ] Implement token endpoint
- [ ] Add basic tests
# Work proceeds...
# Update bd at milestone
bd update oauth-1 --notes "COMPLETED: Test provider configured, token endpoint implemented.
TESTS: 5 passing (token generation, validation, expiry).
IN PROGRESS: Adding refresh token rotation.
NEXT: Implement rotation, add rate limiting, security review."
```
**For complete decision criteria and boundaries, see:** [BOUNDARIES.md](BOUNDARIES.md)
---
## writing-plans Integration
**For complex multi-step features**, the design field in bd issues can link to detailed implementation plans that break work into bite-sized RED-GREEN-REFACTOR steps.
### When to Create Detailed Plans
**Use detailed plans for:**
- Complex features with multiple components
- Multi-session work requiring systematic breakdown
- Features where TDD discipline adds value (core logic, critical paths)
- Work that benefits from explicit task sequencing
**Skip detailed plans for:**
- Simple features (single function, straightforward logic)
- Exploratory work (API testing, pattern discovery)
- Infrastructure setup (configuration, wiring)
**The test:** If you can implement it in one session without a checklist, skip the detailed plan.
### Using the writing-plans Skill
When design field needs detailed breakdown, reference the **writing-plans** skill:
**Pattern:**
```bash
# Create issue with high-level design
bd create "Implement OAuth token refresh" --design "
Add JWT refresh token flow with rotation.
See docs/plans/2025-10-23-oauth-refresh-design.md for detailed plan.
"
# Then use writing-plans skill to create detailed plan
# The skill creates: docs/plans/YYYY-MM-DD-<feature-name>.md
```
**Detailed plan structure** (from writing-plans):
- Bite-sized tasks (2-5 minutes each)
- Explicit RED-GREEN-REFACTOR steps per task
- Exact file paths and complete code
- Verification commands with expected output
- Frequent commit points
**Example task from detailed plan:**
```markdown
### Task 1: Token Refresh Endpoint
**Files:**
- Create: `src/auth/refresh.py`
- Test: `tests/auth/test_refresh.py`
**Step 1: Write failing test**
```python
def test_refresh_token_returns_new_access_token():
refresh_token = create_valid_refresh_token()
response = refresh_endpoint(refresh_token)
assert response.status == 200
assert response.access_token is not None
```
**Step 2: Run test to verify it fails**
Run: `pytest tests/auth/test_refresh.py::test_refresh_token_returns_new_access_token -v`
Expected: FAIL with "refresh_endpoint not defined"
**Step 3: Implement minimal code**
[... exact implementation ...]
**Step 4: Verify test passes**
[... verification ...]
**Step 5: Commit**
```bash
git add tests/auth/test_refresh.py src/auth/refresh.py
git commit -m "feat: add token refresh endpoint"
```
```
### Integration with bd Workflow
**Three-layer structure**:
1. **bd issue**: Strategic objective + high-level design
2. **Detailed plan** (writing-plans): Step-by-step execution guide
3. **TodoWrite**: Current task within the plan
**During planning phase:**
1. Create bd issue with high-level design
2. If complex: Use writing-plans skill to create detailed plan
3. Link plan in design field: `See docs/plans/YYYY-MM-DD-<topic>.md`
**During execution phase:**
1. Open detailed plan (if exists)
2. Use TodoWrite to track current task within plan
3. Update bd notes at milestones, not per-task
4. Close bd issue when all plan tasks complete
**Don't duplicate:** Detailed plan = execution steps. BD notes = outcomes and decisions.
**Example bd notes after using detailed plan:**
```bash
bd update oauth-5 --notes "COMPLETED: Token refresh endpoint (5 tasks from plan: endpoint + rotation + tests)
KEY DECISION: 7-day refresh tokens (vs 30-day) - reduces risk of token theft
TESTS: All 12 tests passing (auth, rotation, expiry, error handling)"
```
### When NOT to Use Detailed Plans
**Red flags:**
- Feature is simple enough to implement in one pass
- Work is exploratory (discovering patterns, testing APIs)
- Infrastructure work (OAuth setup, MCP configuration)
- Would spend more time planning than implementing
**Rule of thumb:** Use detailed plans when systematic breakdown prevents mistakes, not for ceremony.
**Pattern summary**:
- **Simple feature**: bd issue only
- **Complex feature**: bd issue + TodoWrite
- **Very complex feature**: bd issue + writing-plans + TodoWrite
---
## Cross-Skill Workflows
### Pattern: Research Document with Strategic Planning
**Scenario**: User asks "Help me write a strategic planning document for Q4"
**Tools used**: bd-issue-tracking + developing-strategic-documents skill
**Workflow**:
1. Create bd issue for tracking:
```bash
bd create "Q4 strategic planning document" -t task -p 0
bd update strat-1 --status in_progress
```
2. Use developing-strategic-documents skill for research and writing
3. Update bd notes at milestones:
```bash
bd update strat-1 --notes "COMPLETED: Research phase (reviewed 5 competitor docs, 3 internal reports)
KEY DECISION: Focus on market expansion over cost optimization per exec input
IN PROGRESS: Drafting recommendations section
NEXT: Get exec review of draft recommendations before finalizing"
```
4. TodoWrite tracks immediate writing tasks:
```
- [ ] Draft recommendation 1: Market expansion
- [ ] Add supporting data from research
- [ ] Create budget estimates
```
**Why this works**: bd preserves context across sessions (document might take days), skill provides writing framework, TodoWrite tracks current work.
### Pattern: Multi-File Refactoring
**Scenario**: Refactor authentication system across 8 files
**Tools used**: bd-issue-tracking + systematic-debugging (if issues found)
**Workflow**:
1. Create epic and subtasks:
```bash
bd create "Refactor auth system to use JWT" -t epic -p 0
bd create "Update login endpoint" -t task
bd create "Update token validation" -t task
bd create "Update middleware" -t task
bd create "Update tests" -t task
# Link hierarchy
bd dep add auth-epic login-1 --type parent-child
bd dep add auth-epic validation-2 --type parent-child
bd dep add auth-epic middleware-3 --type parent-child
bd dep add auth-epic tests-4 --type parent-child
# Add ordering
bd dep add validation-2 login-1 # validation depends on login
bd dep add middleware-3 validation-2 # middleware depends on validation
bd dep add tests-4 middleware-3 # tests depend on middleware
```
2. Work through subtasks in order, using TodoWrite for each:
```
Current: login-1
TodoWrite:
- [ ] Update login route signature
- [ ] Add JWT generation
- [ ] Update tests
- [ ] Verify backward compatibility
```
3. Update bd notes as each completes:
```bash
bd close login-1 --reason "Updated to JWT. Tests passing. Backward compatible with session auth."
```
4. If issues discovered, use systematic-debugging skill + create blocker issues
**Why this works**: bd tracks dependencies and progress across files, TodoWrite focuses on current file, skills provide specialized frameworks when needed.
---
## Decision Framework
### Which Tool for Which Purpose?
| Need | Tool | Why |
|------|------|-----|
| Track today's execution | TodoWrite | Lightweight, shows current progress |
| Preserve context across sessions | bd | Survives compaction, persistent memory |
| Detailed implementation steps | writing-plans | RED-GREEN-REFACTOR breakdown |
| Research document structure | developing-strategic-documents | Domain-specific framework |
| Debug complex issue | systematic-debugging | Structured debugging protocol |
### Decision Tree
```
Is this work done in this session?
├─ Yes → Use TodoWrite only
└─ No → Use bd
├─ Simple feature → bd issue + TodoWrite
└─ Complex feature → bd issue + writing-plans + TodoWrite
Will conversation history get compacted?
├─ Likely → Use bd (context survives)
└─ Unlikely → TodoWrite is sufficient
Does work have dependencies or blockers?
├─ Yes → Use bd (tracks relationships)
└─ No → TodoWrite is sufficient
Is this specialized domain work?
├─ Research/writing → developing-strategic-documents
├─ Complex debugging → systematic-debugging
├─ Detailed implementation → writing-plans
└─ General tracking → bd + TodoWrite
```
### Integration Anti-Patterns
**Don't**:
- Duplicate TodoWrite tasks into bd notes (different purposes)
- Create bd issues for single-session linear work (use TodoWrite)
- Put detailed implementation steps in bd notes (use writing-plans)
- Update bd after every TodoWrite task (update at milestones)
- Use writing-plans for exploratory work (defeats the purpose)
**Do**:
- Update bd when changing tools or reaching milestones
- Use TodoWrite as "working copy" of bd's NEXT section
- Link between tools (bd design field → writing-plans file path)
- Choose the right level of formality for the work complexity
---
## Summary
**Key principle**: Each tool operates at a different timescale and level of detail.
- **TodoWrite**: Minutes to hours (current execution)
- **bd**: Hours to weeks (persistent context)
- **writing-plans**: Days to weeks (detailed breakdown)
- **Other skills**: As needed (domain frameworks)
**Integration pattern**: Use the lightest tool sufficient for the task, add heavier tools only when complexity demands it.
**For complete boundaries and decision criteria, see:** [BOUNDARIES.md](BOUNDARIES.md)

View File

@@ -0,0 +1,354 @@
# Molecules and Wisps Reference
This reference covers bd's molecular chemistry system for reusable work templates and ephemeral workflows.
## The Chemistry Metaphor
bd v0.34.0 introduces a chemistry-inspired workflow system:
| Phase | Name | Storage | Synced? | Use Case |
|-------|------|---------|---------|----------|
| **Solid** | Proto | `.beads/` | Yes | Reusable template (epic with `template` label) |
| **Liquid** | Mol | `.beads/` | Yes | Persistent instance (real issues from template) |
| **Vapor** | Wisp | `.beads-wisp/` | No | Ephemeral instance (operational work, no audit trail) |
**Phase transitions:**
- `spawn` / `pour`: Solid (proto) → Liquid (mol)
- `wisp create`: Solid (proto) → Vapor (wisp)
- `squash`: Vapor (wisp) → Digest (permanent summary)
- `burn`: Vapor (wisp) → Nothing (deleted, no trace)
- `distill`: Liquid (ad-hoc epic) → Solid (proto)
## When to Use Molecules
### Use Protos/Mols When:
- **Repeatable patterns** - Same workflow structure used multiple times (releases, reviews, onboarding)
- **Team knowledge capture** - Encoding tribal knowledge as executable templates
- **Audit trail matters** - Work that needs to be tracked and reviewed later
- **Cross-session persistence** - Work spanning multiple days/sessions
### Use Wisps When:
- **Operational loops** - Patrol cycles, health checks, routine monitoring
- **One-shot orchestration** - Temporary coordination that shouldn't clutter history
- **Diagnostic runs** - Debugging workflows with no archival value
- **High-frequency ephemeral work** - Would create noise in permanent database
**Key insight:** Wisps prevent database bloat from routine operations while still providing structure during execution.
---
## Proto Management
### Creating a Proto
Protos are epics with the `template` label. Create manually or distill from existing work:
```bash
# Manual creation
bd create "Release Workflow" --type epic --label template
bd create "Run tests for {{component}}" --type task
bd dep add task-id epic-id --type parent-child
# Distill from ad-hoc work (extracts template from existing epic)
bd mol distill bd-abc123 --as "Release Workflow" --var version=1.0.0
```
**Proto naming convention:** Use `mol-` prefix for clarity (e.g., `mol-release`, `mol-patrol`).
### Listing Protos
```bash
bd mol catalog # List all protos
bd mol catalog --json # Machine-readable
```
### Viewing Proto Structure
```bash
bd mol show mol-release # Show template structure and variables
bd mol show mol-release --json # Machine-readable
```
---
## Spawning Molecules
### Basic Spawn (Creates Wisp by Default)
```bash
bd mol spawn mol-patrol # Creates wisp (ephemeral)
bd mol spawn mol-feature --pour # Creates mol (persistent)
bd mol spawn mol-release --var version=2.0 # With variable substitution
```
**Chemistry shortcuts:**
```bash
bd pour mol-feature # Shortcut for spawn --pour
bd wisp create mol-patrol # Explicit wisp creation
```
### Spawn with Immediate Execution
```bash
bd mol run mol-release --var version=2.0
```
`bd mol run` does three things:
1. Spawns the molecule (persistent)
2. Assigns root issue to caller
3. Pins root issue for session recovery
**Use `mol run` when:** Starting durable work that should survive crashes. The pin ensures `bd ready` shows the work after restart.
### Spawn with Attachments
Attach additional protos in a single command:
```bash
bd mol spawn mol-feature --attach mol-testing --var name=auth
# Spawns mol-feature, then spawns mol-testing and bonds them
```
**Attach types:**
- `sequential` (default) - Attached runs after primary completes
- `parallel` - Attached runs alongside primary
- `conditional` - Attached runs only if primary fails
```bash
bd mol spawn mol-deploy --attach mol-rollback --attach-type conditional
```
---
## Bonding Molecules
### Bond Types
```bash
bd mol bond A B # Sequential: B runs after A
bd mol bond A B --type parallel # Parallel: B runs alongside A
bd mol bond A B --type conditional # Conditional: B runs if A fails
```
### Operand Combinations
| A | B | Result |
|---|---|--------|
| proto | proto | Compound proto (reusable template) |
| proto | mol | Spawn proto, attach to molecule |
| mol | proto | Spawn proto, attach to molecule |
| mol | mol | Join into compound molecule |
### Phase Control in Bonds
By default, spawned protos inherit target's phase. Override with flags:
```bash
# Found bug during wisp patrol? Persist it:
bd mol bond mol-critical-bug wisp-patrol --pour
# Need ephemeral diagnostic on persistent feature?
bd mol bond mol-temp-check bd-feature --wisp
```
### Custom Compound Names
```bash
bd mol bond mol-feature mol-deploy --as "Feature with Deploy"
```
---
## Wisp Lifecycle
### Creating Wisps
```bash
bd wisp create mol-patrol # From proto
bd mol spawn mol-patrol # Same (spawn defaults to wisp)
bd mol spawn mol-check --var target=db # With variables
```
### Listing Wisps
```bash
bd wisp list # List all wisps
bd wisp list --json # Machine-readable
```
### Ending Wisps
**Option 1: Squash (compress to digest)**
```bash
bd mol squash wisp-abc123 # Auto-generate summary
bd mol squash wisp-abc123 --summary "Completed patrol" # Agent-provided summary
bd mol squash wisp-abc123 --keep-children # Keep children, just create digest
bd mol squash wisp-abc123 --dry-run # Preview
```
Squash creates a permanent digest issue summarizing the wisp's work, then deletes the wisp children.
**Option 2: Burn (delete without trace)**
```bash
bd mol burn wisp-abc123 # Delete wisp, no digest
```
Use burn for routine work with no archival value.
### Garbage Collection
```bash
bd wisp gc # Clean up orphaned wisps
```
---
## Distilling Protos
Extract a reusable template from ad-hoc work:
```bash
bd mol distill bd-o5xe --as "Release Workflow"
bd mol distill bd-abc --var feature_name=auth-refactor --var version=1.0.0
```
**What distill does:**
1. Loads existing epic and all children
2. Clones structure as new proto (adds `template` label)
3. Replaces concrete values with `{{variable}}` placeholders
**Variable syntax (both work):**
```bash
--var branch=feature-auth # variable=value (recommended)
--var feature-auth=branch # value=variable (auto-detected)
```
**Use cases:**
- Team develops good workflow organically, wants to reuse it
- Capture tribal knowledge as executable templates
- Create starting point for similar future work
---
## Cross-Project Dependencies
### Concept
Projects can depend on capabilities shipped by other projects:
```bash
# Project A ships a capability
bd ship auth-api # Marks capability as available
# Project B depends on it
bd dep add bd-123 external:project-a:auth-api
```
### Shipping Capabilities
```bash
bd ship <capability> # Ship capability (requires closed issue)
bd ship <capability> --force # Ship even if issue not closed
bd ship <capability> --dry-run # Preview
```
**How it works:**
1. Find issue with `export:<capability>` label
2. Validate issue is closed
3. Add `provides:<capability>` label
### Depending on External Capabilities
```bash
bd dep add <issue> external:<project>:<capability>
```
The dependency is satisfied when the external project has a closed issue with `provides:<capability>` label.
**`bd ready` respects external deps:** Issues blocked by unsatisfied external dependencies won't appear in ready list.
---
## Common Patterns
### Pattern: Weekly Review Proto
```bash
# Create proto
bd create "Weekly Review" --type epic --label template
bd create "Review open issues" --type task
bd create "Update priorities" --type task
bd create "Archive stale work" --type task
# Link as children...
# Use each week
bd mol spawn mol-weekly-review --pour
```
### Pattern: Ephemeral Patrol Cycle
```bash
# Patrol proto exists
bd wisp create mol-patrol
# Execute patrol work...
# End patrol
bd mol squash wisp-abc123 --summary "Patrol complete: 3 issues found, 2 resolved"
```
### Pattern: Feature with Rollback
```bash
bd mol spawn mol-deploy --attach mol-rollback --attach-type conditional
# If deploy fails, rollback automatically becomes unblocked
```
### Pattern: Capture Tribal Knowledge
```bash
# After completing a good workflow organically
bd mol distill bd-release-epic --as "Release Process" --var version=X.Y.Z
# Now team can: bd mol spawn mol-release-process --var version=2.0.0
```
---
## CLI Quick Reference
| Command | Purpose |
|---------|---------|
| `bd mol catalog` | List available protos |
| `bd mol show <id>` | Show proto/mol structure |
| `bd mol spawn <proto>` | Create wisp from proto (default) |
| `bd mol spawn <proto> --pour` | Create persistent mol from proto |
| `bd mol run <proto>` | Spawn + assign + pin (durable execution) |
| `bd mol bond <A> <B>` | Combine protos or molecules |
| `bd mol distill <epic>` | Extract proto from ad-hoc work |
| `bd mol squash <mol>` | Compress wisp children to digest |
| `bd mol burn <wisp>` | Delete wisp without trace |
| `bd pour <proto>` | Shortcut for `spawn --pour` |
| `bd wisp create <proto>` | Create ephemeral wisp |
| `bd wisp list` | List all wisps |
| `bd wisp gc` | Garbage collect orphaned wisps |
| `bd ship <capability>` | Publish capability for cross-project deps |
---
## Troubleshooting
**"Proto not found"**
- Check `bd mol catalog` for available protos
- Protos need `template` label on the epic
**"Variable not substituted"**
- Use `--var key=value` syntax
- Check proto for `{{key}}` placeholders with `bd mol show`
**"Wisp commands fail"**
- Wisps stored in `.beads-wisp/` (separate from `.beads/`)
- Check `bd wisp list` for active wisps
**"External dependency not satisfied"**
- Target project must have closed issue with `provides:<capability>` label
- Use `bd ship <capability>` in target project first

View File

@@ -0,0 +1,341 @@
# Common Usage Patterns
Practical patterns for using bd effectively across different scenarios.
## Contents
- [Knowledge Work Session](#knowledge-work-session) - Resume long-running research or writing tasks
- [Side Quest Handling](#side-quest-handling) - Capture discovered work without losing context
- [Multi-Session Project Resume](#multi-session-project-resume) - Pick up work after time away
- [Status Transitions](#status-transitions) - When to change issue status
- [Compaction Recovery](#compaction-recovery) - Resume after conversation history is lost
- [Issue Closure](#issue-closure) - Documenting completion properly
---
## Knowledge Work Session
**Scenario**: User asks "Help me write a proposal for expanding the analytics platform"
**What you see**:
```bash
$ bd ready
# Returns: bd-42 "Research analytics platform expansion proposal" (in_progress)
$ bd show bd-42
Notes: "COMPLETED: Reviewed current stack (Mixpanel, Amplitude)
IN PROGRESS: Drafting cost-benefit analysis section
NEXT: Need user input on budget constraints before finalizing recommendations"
```
**What you do**:
1. Read notes to understand current state
2. Create TodoWrite for immediate work:
```
- [ ] Draft cost-benefit analysis
- [ ] Ask user about budget constraints
- [ ] Finalize recommendations
```
3. Work on tasks, mark TodoWrite items completed
4. At milestone, update bd notes:
```bash
bd update bd-42 --notes "COMPLETED: Cost-benefit analysis drafted.
KEY DECISION: User confirmed $50k budget cap - ruled out enterprise options.
IN PROGRESS: Finalizing recommendations (Posthog + custom ETL).
NEXT: Get user review of draft before closing issue."
```
**Outcome**: TodoWrite disappears at session end, but bd notes preserve context for next session.
**Key insight**: Notes field captures the "why" and context, TodoWrite tracks the "doing" right now.
---
## Side Quest Handling
**Scenario**: During main task, discover a problem that needs attention.
**Pattern**:
1. Create issue immediately: `bd create "Found: inventory system needs refactoring"`
2. Link provenance: `bd dep add main-task new-issue --type discovered-from`
3. Assess urgency: blocker or can defer?
4. **If blocker**:
- `bd update main-task --status blocked`
- `bd update new-issue --status in_progress`
- Work on the blocker
5. **If deferrable**:
- Note in new issue's design field
- Continue main task
- New issue persists for later
**Why this works**: Captures context immediately (before forgetting), preserves relationship to main work, allows flexible prioritization.
**Example (with MCP):**
Working on "Implement checkout flow" (checkout-1), discover payment validation security hole:
1. Create bug issue: `mcp__plugin_beads_beads__create` with `{title: "Fix: payment validation bypasses card expiry check", type: "bug", priority: 0}`
2. Link discovery: `mcp__plugin_beads_beads__dep` with `{from_issue: "checkout-1", to_issue: "payment-bug-2", type: "discovered-from"}`
3. Block current work: `mcp__plugin_beads_beads__update` with `{issue_id: "checkout-1", status: "blocked", notes: "Blocked by payment-bug-2: security hole in validation"}`
4. Start new work: `mcp__plugin_beads_beads__update` with `{issue_id: "payment-bug-2", status: "in_progress"}`
(CLI: `bd create "Fix: payment validation..." -t bug -p 0` then `bd dep add` and `bd update` commands)
---
## Multi-Session Project Resume
**Scenario**: Starting work after days or weeks away from a project.
**Pattern (with MCP)**:
1. **Check what's ready**: Use `mcp__plugin_beads_beads__ready` to see available work
2. **Check what's stuck**: Use `mcp__plugin_beads_beads__blocked` to understand blockers
3. **Check recent progress**: Use `mcp__plugin_beads_beads__list` with `status:"closed"` to see completions
4. **Read detailed context**: Use `mcp__plugin_beads_beads__show` for the issue you'll work on
5. **Update status**: Use `mcp__plugin_beads_beads__update` with `status:"in_progress"`
6. **Begin work**: Create TodoWrite from notes field's NEXT section
(CLI: `bd ready`, `bd blocked`, `bd list --status closed`, `bd show <id>`, `bd update <id> --status in_progress`)
**Example**:
```bash
$ bd ready
Ready to work on (3):
auth-5: "Add OAuth refresh token rotation" (priority: 0)
api-12: "Document REST API endpoints" (priority: 1)
test-8: "Add integration tests for payment flow" (priority: 2)
$ bd show auth-5
Title: Add OAuth refresh token rotation
Status: open
Priority: 0 (critical)
Notes:
COMPLETED: Basic JWT auth working
IN PROGRESS: Need to add token refresh
NEXT: Implement rotation per OWASP guidelines (7-day refresh tokens)
BLOCKER: None - ready to proceed
$ bd update auth-5 --status in_progress
# Now create TodoWrite based on NEXT section
```
**For complete session start workflow with checklist, see:** [WORKFLOWS.md](WORKFLOWS.md#session-start)
---
## Status Transitions
Understanding when to change issue status.
### Status Lifecycle
```
open → in_progress → closed
↓ ↓
blocked blocked
```
### When to Use Each Status
**open** (default):
- Issue created but not started
- Waiting for dependencies to clear
- Planned work not yet begun
- **Command**: Issues start as `open` by default
**in_progress**:
- Actively working on this issue right now
- Has been read and understood
- Making commits or changes related to this
- **Command**: `bd update issue-id --status in_progress`
- **When**: Start of work session on this issue
**blocked**:
- Cannot proceed due to external blocker
- Waiting for user input/decision
- Dependency not completed
- Technical blocker discovered
- **Command**: `bd update issue-id --status blocked`
- **When**: Hit a blocker, capture what blocks you in notes
- **Note**: Document blocker in notes field: "BLOCKER: Waiting for API key from ops team"
**closed**:
- Work completed and verified
- Tests passing
- Acceptance criteria met
- **Command**: `bd close issue-id --reason "Implemented with tests passing"`
- **When**: All work done, ready to move on
- **Note**: Issues remain in database, just marked complete
### Transition Examples
**Starting work**:
```bash
bd ready # See what's available
bd update auth-5 --status in_progress
# Begin working
```
**Hit a blocker**:
```bash
bd update auth-5 --status blocked --notes "BLOCKER: Need OAuth client ID from product team. Emailed Jane on 2025-10-23."
# Switch to different issue or create new work
```
**Unblocking**:
```bash
# Once blocker resolved
bd update auth-5 --status in_progress --notes "UNBLOCKED: Received OAuth credentials. Resuming implementation."
```
**Completing**:
```bash
bd close auth-5 --reason "Implemented OAuth refresh with 7-day rotation. Tests passing. PR #42 merged."
```
---
## Compaction Recovery
**Scenario**: Conversation history has been compacted. You need to resume work with zero conversation context.
**What survives compaction**:
- All bd issues and notes
- Complete work history
- Dependencies and relationships
**What's lost**:
- Conversation history
- TodoWrite lists
- Recent discussion
### Recovery Pattern
1. **Check in-progress work**:
```bash
bd list --status in_progress
```
2. **Read notes for context**:
```bash
bd show issue-id
# Read notes field - should explain current state
```
3. **Reconstruct TodoWrite from notes**:
- COMPLETED section: Done, skip
- IN PROGRESS section: Current state
- NEXT section: **This becomes your TodoWrite list**
4. **Report to user**:
```
"From bd notes: [summary of COMPLETED]. Currently [IN PROGRESS].
Next steps: [from NEXT]. Should I continue with that?"
```
### Example Recovery
**bd show returns**:
```
Issue: bd-42 "OAuth refresh token implementation"
Status: in_progress
Notes:
COMPLETED: Basic JWT validation working (RS256, 1hr access tokens)
KEY DECISION: 7-day refresh tokens per security review
IN PROGRESS: Implementing token rotation endpoint
NEXT: Add rate limiting (5 refresh attempts per 15min), then write tests
BLOCKER: None
```
**Recovery actions**:
1. Read notes, understand context
2. Create TodoWrite:
```
- [ ] Implement rate limiting on refresh endpoint
- [ ] Write tests for token rotation
- [ ] Verify security guidelines met
```
3. Report: "From notes: JWT validation is done with 7-day refresh tokens. Currently implementing rotation endpoint. Next: add rate limiting and tests. Should I continue?"
4. Resume work based on user response
**For complete compaction survival workflow, see:** [WORKFLOWS.md](WORKFLOWS.md#compaction-survival)
---
## Issue Closure
**Scenario**: Work is complete. How to close properly?
### Closure Checklist
Before closing, verify:
- [ ] **Acceptance criteria met**: All items checked off
- [ ] **Tests passing**: If applicable
- [ ] **Documentation updated**: If needed
- [ ] **Follow-up work filed**: New issues created for discovered work
- [ ] **Key decisions documented**: In notes field
### Closure Pattern
**Minimal closure** (simple tasks):
```bash
bd close task-123 --reason "Implemented feature X"
```
**Detailed closure** (complex work):
```bash
# Update notes with final state
bd update task-123 --notes "COMPLETED: OAuth refresh with 7-day rotation
KEY DECISION: RS256 over HS256 per security review
TESTS: 12 tests passing (auth, rotation, expiry, errors)
FOLLOW-UP: Filed perf-99 for token cleanup job"
# Close with summary
bd close task-123 --reason "Implemented OAuth refresh token rotation with rate limiting. All security guidelines met. Tests passing."
```
### Documenting Resolution (Outcome vs Design)
For issues where the outcome differed from initial design, use `--notes` to document what actually happened:
```bash
# Initial design was hypothesis - document actual outcome in notes
bd update bug-456 --notes "RESOLUTION: Not a bug - behavior is correct per OAuth spec. Documentation was unclear. Filed docs-789 to clarify auth flow in user guide."
bd close bug-456 --reason "Resolved: documentation issue, not bug"
```
**Pattern**: Design field = initial approach. Notes field = what actually happened (prefix with RESOLUTION: for clarity).
### Discovering Follow-up Work
When closing reveals new work:
```bash
# While closing auth feature, realize performance needs work
bd create "Optimize token lookup query" -t task -p 2
# Link the provenance
bd dep add auth-5 perf-99 --type discovered-from
# Now close original
bd close auth-5 --reason "OAuth refresh implemented. Discovered perf optimization needed (filed perf-99)."
```
**Why link with discovered-from**: Preserves the context of how you found the new work. Future you will appreciate knowing it came from the auth implementation.
---
## Pattern Summary
| Pattern | When to Use | Key Command | Preserves |
|---------|-------------|-------------|-----------|
| **Knowledge Work** | Long-running research, writing | `bd update --notes` | Context across sessions |
| **Side Quest** | Discovered during other work | `bd dep add --type discovered-from` | Relationship to original |
| **Multi-Session Resume** | Returning after time away | `bd ready`, `bd show` | Full project state |
| **Status Transitions** | Tracking work state | `bd update --status` | Current state |
| **Compaction Recovery** | History lost | Read notes field | All context in notes |
| **Issue Closure** | Completing work | `bd close --reason` | Decisions and outcomes |
**For detailed workflows with step-by-step checklists, see:** [WORKFLOWS.md](WORKFLOWS.md)

View File

@@ -0,0 +1,489 @@
# Troubleshooting Guide
Common issues encountered when using bd and how to resolve them.
## Interface-Specific Troubleshooting
**MCP tools (local environment):**
- MCP tools require bd daemon running
- Check daemon status: `bd daemon --status` (CLI)
- If MCP tools fail, verify daemon is running and restart if needed
- MCP tools automatically use daemon mode (no --no-daemon option)
**CLI (web environment or local):**
- CLI can use daemon mode (default) or direct mode (--no-daemon)
- Direct mode has 3-5 second sync delay
- Web environment: Install via `npm install -g @beads/cli`
- Web environment: Initialize via `bd init <prefix>` before first use
**Most issues below apply to both interfaces** - the underlying database and daemon behavior is the same.
## Contents
- [Dependencies Not Persisting](#dependencies-not-persisting)
- [Status Updates Not Visible](#status-updates-not-visible)
- [Daemon Won't Start](#daemon-wont-start)
- [Database Errors on Cloud Storage](#database-errors-on-cloud-storage)
- [JSONL File Not Created](#jsonl-file-not-created)
- [Version Requirements](#version-requirements)
---
## Dependencies Not Persisting
### Symptom
```bash
bd dep add issue-2 issue-1 --type blocks
# Reports: ✓ Added dependency
bd show issue-2
# Shows: No dependencies listed
```
### Root Cause (Fixed in v0.15.0+)
This was a **bug in bd** (GitHub issue #101) where the daemon ignored dependencies during issue creation. **Fixed in bd v0.15.0** (Oct 21, 2025).
### Resolution
**1. Check your bd version:**
```bash
bd version
```
**2. If version < 0.15.0, update bd:**
```bash
# Via Homebrew (macOS/Linux)
brew upgrade bd
# Via go install
go install github.com/steveyegge/beads/cmd/bd@latest
# Via package manager
# See https://github.com/steveyegge/beads#installing
```
**3. Restart daemon after upgrade:**
```bash
pkill -f "bd daemon" # Kill old daemon
bd daemon # Start new daemon with fix
```
**4. Test dependency creation:**
```bash
bd create "Test A" -t task
bd create "Test B" -t task
bd dep add <B-id> <A-id> --type blocks
bd show <B-id>
# Should show: "Depends on (1): → <A-id>"
```
### Still Not Working?
If dependencies still don't persist after updating:
1. **Check daemon is running:**
```bash
ps aux | grep "bd daemon"
```
2. **Try without --no-daemon flag:**
```bash
# Instead of: bd --no-daemon dep add ...
# Use: bd dep add ... (let daemon handle it)
```
3. **Check JSONL file:**
```bash
cat .beads/issues.jsonl | jq '.dependencies'
# Should show dependency array
```
4. **Report to beads GitHub** with:
- `bd version` output
- Operating system
- Reproducible test case
---
## Status Updates Not Visible
### Symptom
```bash
bd --no-daemon update issue-1 --status in_progress
# Reports: ✓ Updated issue: issue-1
bd show issue-1
# Shows: Status: open (not in_progress!)
```
### Root Cause
This is **expected behavior**, not a bug. Understanding requires knowing bd's architecture:
**BD Architecture:**
- **JSONL files** (`.beads/issues.jsonl`): Human-readable export format
- **SQLite database** (`.beads/*.db`): Source of truth for queries
- **Daemon**: Syncs JSONL ↔ SQLite every 5 minutes
**What `--no-daemon` actually does:**
- **Writes**: Go directly to JSONL file
- **Reads**: Still come from SQLite database
- **Sync delay**: Daemon imports JSONL → SQLite periodically
### Resolution
**Option 1: Use daemon mode (recommended)**
```bash
# Don't use --no-daemon for CRUD operations
bd update issue-1 --status in_progress
bd show issue-1
# ✓ Status reflects immediately
```
**Option 2: Wait for sync (if using --no-daemon)**
```bash
bd --no-daemon update issue-1 --status in_progress
# Wait 3-5 seconds for daemon to sync
sleep 5
bd show issue-1
# ✓ Status should reflect now
```
**Option 3: Manual sync trigger**
```bash
bd --no-daemon update issue-1 --status in_progress
# Trigger sync by exporting/importing
bd export > /dev/null 2>&1 # Forces sync
bd show issue-1
```
### When to Use `--no-daemon`
**Use --no-daemon for:**
- Batch import scripts (performance)
- CI/CD environments (no persistent daemon)
- Testing/debugging
**Don't use --no-daemon for:**
- Interactive development
- Real-time status checks
- When you need immediate query results
---
## Daemon Won't Start
### Symptom
```bash
bd daemon
# Error: not in a git repository
# Hint: run 'git init' to initialize a repository
```
### Root Cause
bd daemon requires a **git repository** because it uses git for:
- Syncing issues to git remote (optional)
- Version control of `.beads/*.jsonl` files
- Commit history of issue changes
### Resolution
**Initialize git repository:**
```bash
# In your project directory
git init
bd daemon
# ✓ Daemon should start now
```
**Prevent git remote operations:**
```bash
# If you don't want daemon to pull from remote
bd daemon --global=false
```
**Flags:**
- `--global=false`: Don't sync with git remote
- `--interval=10m`: Custom sync interval (default: 5m)
- `--auto-commit=true`: Auto-commit JSONL changes
---
## Database Errors on Cloud Storage
### Symptom
```bash
# In directory: /Users/name/Google Drive/...
bd init myproject
# Error: disk I/O error (522)
# OR: Error: database is locked
```
### Root Cause
**SQLite incompatibility with cloud sync filesystems.**
Cloud services (Google Drive, Dropbox, OneDrive, iCloud) don't support:
- POSIX file locking (required by SQLite)
- Consistent file handles across sync operations
- Atomic write operations
This is a **known SQLite limitation**, not a bd bug.
### Resolution
**Move bd database to local filesystem:**
```bash
# Wrong location (cloud sync)
~/Google Drive/My Work/project/.beads/ # ✗ Will fail
# Correct location (local disk)
~/Repos/project/.beads/ # ✓ Works reliably
~/Projects/project/.beads/ # ✓ Works reliably
```
**Migration steps:**
1. **Move project to local disk:**
```bash
mv ~/Google\ Drive/project ~/Repos/project
cd ~/Repos/project
```
2. **Re-initialize bd (if needed):**
```bash
bd init myproject
```
3. **Import existing issues (if you had JSONL export):**
```bash
bd import < issues-backup.jsonl
```
**Alternative: Use global `~/.beads/` database**
If you must keep work on cloud storage:
```bash
# Don't initialize bd in cloud-synced directory
# Use global database instead
cd ~/Google\ Drive/project
bd create "My task"
# Uses ~/.beads/default.db (on local disk)
```
**Workaround limitations:**
- No per-project database isolation
- All projects share same issue prefix
- Manual tracking of which issues belong to which project
**Recommendation:** Keep code/projects on local disk, sync final deliverables to cloud.
---
## JSONL File Not Created
### Symptom
```bash
bd init myproject
bd --no-daemon create "Test" -t task
ls .beads/
# Only shows: .gitignore, myproject.db
# Missing: issues.jsonl
```
### Root Cause
**JSONL initialization coupling.** The `issues.jsonl` file is created by daemon on first startup, not by `bd init`.
### Resolution
**Start daemon once to initialize JSONL:**
```bash
bd daemon --global=false &
# Wait for initialization
sleep 2
# Now JSONL file exists
ls .beads/issues.jsonl
# ✓ File created
# Subsequent --no-daemon operations work
bd --no-daemon create "Task 1" -t task
cat .beads/issues.jsonl
# ✓ Shows task data
```
**Why this matters:**
- Daemon owns the JSONL export format
- First daemon run creates empty JSONL skeleton
- `--no-daemon` operations assume JSONL exists
**Pattern for batch scripts:**
```bash
#!/bin/bash
# Batch import script
bd init myproject
bd daemon --global=false & # Start daemon
sleep 3 # Wait for initialization
# Now safe to use --no-daemon for performance
for item in "${items[@]}"; do
bd --no-daemon create "$item" -t feature
done
# Daemon syncs JSONL → SQLite in background
sleep 5 # Wait for final sync
# Query results
bd stats
```
---
## Version Requirements
### Minimum Version for Dependency Persistence
**Issue:** Dependencies created but don't appear in `bd show` or dependency tree.
**Fix:** Upgrade to **bd v0.15.0+** (released Oct 2025)
**Check version:**
```bash
bd version
# Should show: bd version 0.15.0 or higher
```
**If using MCP plugin:**
```bash
# Update Claude Code beads plugin
claude plugin update beads
```
### Breaking Changes
**v0.15.0:**
- MCP parameter names changed from `from_id/to_id` to `issue_id/depends_on_id`
- Dependency creation now persists correctly in daemon mode
**v0.14.0:**
- Daemon architecture changes
- Auto-sync JSONL behavior introduced
---
## MCP-Specific Issues
### Dependencies Created Backwards
**Symptom:**
Using MCP tools, dependencies end up reversed from intended.
**Example:**
```python
# Want: "task-2 depends on task-1" (task-1 blocks task-2)
beads_add_dependency(issue_id="task-1", depends_on_id="task-2")
# Wrong! This makes task-1 depend on task-2
```
**Root Cause:**
Parameter confusion between old (`from_id/to_id`) and new (`issue_id/depends_on_id`) names.
**Resolution:**
**Correct MCP usage (bd v0.15.0+):**
```python
# Correct: task-2 depends on task-1
beads_add_dependency(
issue_id="task-2", # Issue that has dependency
depends_on_id="task-1", # Issue that must complete first
dep_type="blocks"
)
```
**Mnemonic:**
- `issue_id`: The issue that **waits**
- `depends_on_id`: The issue that **must finish first**
**Equivalent CLI:**
```bash
bd dep add task-2 task-1 --type blocks
# Meaning: task-2 depends on task-1
```
**Verify dependency direction:**
```bash
bd show task-2
# Should show: "Depends on: task-1"
# Not the other way around
```
---
## Getting Help
### Debug Checklist
Before reporting issues, collect this information:
```bash
# 1. Version
bd version
# 2. Daemon status
ps aux | grep "bd daemon"
# 3. Database location
echo $PWD/.beads/*.db
ls -la .beads/
# 4. Git status
git status
git log --oneline -1
# 5. JSONL contents (for dependency issues)
cat .beads/issues.jsonl | jq '.' | head -50
```
### Report to beads GitHub
If problems persist:
1. **Check existing issues:** https://github.com/steveyegge/beads/issues
2. **Create new issue** with:
- bd version (`bd version`)
- Operating system
- Debug checklist output (above)
- Minimal reproducible example
- Expected vs actual behavior
### Claude Code Skill Issues
If the **bd-issue-tracking skill** provides incorrect guidance:
1. **Check skill version:**
```bash
ls -la ~/.claude/skills/bd-issue-tracking/
head -20 ~/.claude/skills/bd-issue-tracking/SKILL.md
```
2. **Report via Claude Code feedback** or user's GitHub
---
## Quick Reference: Common Fixes
| Problem | Quick Fix |
|---------|-----------|
| Dependencies not saving | Upgrade to bd v0.15.0+ |
| Status updates lag | Use daemon mode (not `--no-daemon`) |
| Daemon won't start | Run `git init` first |
| Database errors on Google Drive | Move to local filesystem |
| JSONL file missing | Start daemon once: `bd daemon &` |
| Dependencies backwards (MCP) | Update to v0.15.0+, use `issue_id/depends_on_id` correctly |
---
## Related Documentation
- [CLI Reference](CLI_REFERENCE.md) - Complete command documentation
- [Dependencies Guide](DEPENDENCIES.md) - Understanding dependency types
- [Workflows](WORKFLOWS.md) - Step-by-step workflow guides
- [beads GitHub](https://github.com/steveyegge/beads) - Official documentation