Merge remote-tracking branch 'origin/main' into fix-jj-detached-head-upstream-check

# Conflicts:
#	cmd/bd/sync_git_test.go
This commit is contained in:
Phredrick Phool
2026-01-12 06:49:26 -06:00
205 changed files with 9409 additions and 1305 deletions

202
cmd/bd/actor_test.go Normal file
View File

@@ -0,0 +1,202 @@
package main
import (
"os"
"os/exec"
"strings"
"testing"
)
// TestGetActorWithGit tests the actor resolution fallback chain.
// Priority: --actor flag > BD_ACTOR env > BEADS_ACTOR env > git config user.name > $USER > "unknown"
func TestGetActorWithGit(t *testing.T) {
// Save original environment and actor variable
origActor := actor
origBdActor, bdActorSet := os.LookupEnv("BD_ACTOR")
origBeadsActor, beadsActorSet := os.LookupEnv("BEADS_ACTOR")
origUser, userSet := os.LookupEnv("USER")
// Cleanup after test
defer func() {
actor = origActor
if bdActorSet {
os.Setenv("BD_ACTOR", origBdActor)
} else {
os.Unsetenv("BD_ACTOR")
}
if beadsActorSet {
os.Setenv("BEADS_ACTOR", origBeadsActor)
} else {
os.Unsetenv("BEADS_ACTOR")
}
if userSet {
os.Setenv("USER", origUser)
} else {
os.Unsetenv("USER")
}
}()
// Helper to get current git user.name (may be empty if not configured)
getGitUserName := func() string {
out, err := exec.Command("git", "config", "user.name").Output()
if err != nil {
return ""
}
return strings.TrimSpace(string(out))
}
gitUserName := getGitUserName()
tests := []struct {
name string
actorFlag string
bdActor string
beadsActor string
user string
expected string
skipIfNoGit bool // Skip if git user.name is not configured
}{
{
name: "actor flag takes priority",
actorFlag: "flag-actor",
bdActor: "bd-actor",
beadsActor: "beads-actor",
user: "system-user",
expected: "flag-actor",
},
{
name: "BD_ACTOR takes priority when no flag",
actorFlag: "",
bdActor: "bd-actor",
beadsActor: "beads-actor",
user: "system-user",
expected: "bd-actor",
},
{
name: "BEADS_ACTOR takes priority when no BD_ACTOR",
actorFlag: "",
bdActor: "",
beadsActor: "beads-actor",
user: "system-user",
expected: "beads-actor",
},
{
name: "git config user.name used when no env vars",
actorFlag: "",
bdActor: "",
beadsActor: "",
user: "system-user",
expected: gitUserName, // Will be git user.name if configured
skipIfNoGit: true,
},
{
name: "USER fallback when no git config",
actorFlag: "",
bdActor: "",
beadsActor: "",
user: "fallback-user",
expected: "fallback-user",
// Note: This test may fail if git user.name is configured
// We handle this by checking the actual git config in the test
},
{
name: "unknown as final fallback",
actorFlag: "",
bdActor: "",
beadsActor: "",
user: "",
expected: "unknown",
// Note: This test may get git user.name instead if configured
},
}
for _, tt := range tests {
t.Run(tt.name, func(t *testing.T) {
// Skip tests that require git user.name to not be configured
if tt.skipIfNoGit && gitUserName == "" {
t.Skip("Skipping: git config user.name is not configured")
}
// For tests expecting USER or unknown, skip if git user.name is configured
// because git takes priority over USER
if (tt.expected == tt.user || tt.expected == "unknown") && gitUserName != "" && tt.bdActor == "" && tt.beadsActor == "" && tt.actorFlag == "" {
t.Skipf("Skipping: git config user.name (%s) takes priority over expected %s", gitUserName, tt.expected)
}
// Set up test environment
actor = tt.actorFlag
if tt.bdActor != "" {
os.Setenv("BD_ACTOR", tt.bdActor)
} else {
os.Unsetenv("BD_ACTOR")
}
if tt.beadsActor != "" {
os.Setenv("BEADS_ACTOR", tt.beadsActor)
} else {
os.Unsetenv("BEADS_ACTOR")
}
if tt.user != "" {
os.Setenv("USER", tt.user)
} else {
os.Unsetenv("USER")
}
// Call the function
result := getActorWithGit()
// Check result
if result != tt.expected {
t.Errorf("getActorWithGit() = %q, want %q", result, tt.expected)
}
})
}
}
// TestGetActorWithGit_PriorityOrder tests that the priority order is respected
func TestGetActorWithGit_PriorityOrder(t *testing.T) {
// Save original state
origActor := actor
origBdActor, bdActorSet := os.LookupEnv("BD_ACTOR")
origBeadsActor, beadsActorSet := os.LookupEnv("BEADS_ACTOR")
defer func() {
actor = origActor
if bdActorSet {
os.Setenv("BD_ACTOR", origBdActor)
} else {
os.Unsetenv("BD_ACTOR")
}
if beadsActorSet {
os.Setenv("BEADS_ACTOR", origBeadsActor)
} else {
os.Unsetenv("BEADS_ACTOR")
}
}()
// Test: flag > BD_ACTOR > BEADS_ACTOR
actor = "from-flag"
os.Setenv("BD_ACTOR", "from-bd-actor")
os.Setenv("BEADS_ACTOR", "from-beads-actor")
result := getActorWithGit()
if result != "from-flag" {
t.Errorf("Expected flag to take priority, got %q", result)
}
// Test: BD_ACTOR > BEADS_ACTOR (no flag)
actor = ""
result = getActorWithGit()
if result != "from-bd-actor" {
t.Errorf("Expected BD_ACTOR to take priority over BEADS_ACTOR, got %q", result)
}
// Test: BEADS_ACTOR when BD_ACTOR is empty
os.Unsetenv("BD_ACTOR")
result = getActorWithGit()
if result != "from-beads-actor" {
t.Errorf("Expected BEADS_ACTOR to be used, got %q", result)
}
}

View File

@@ -102,6 +102,39 @@ func canonicalizeIfRelative(path string) string {
return path
}
// detectPrefixFromJSONL extracts the issue prefix from JSONL data.
// Returns empty string if prefix cannot be detected.
// Used by cold-start bootstrap to initialize the database (GH#b09).
func detectPrefixFromJSONL(jsonlData []byte) string {
// Parse first issue to extract prefix from its ID
scanner := bufio.NewScanner(bytes.NewReader(jsonlData))
for scanner.Scan() {
line := scanner.Text()
if line == "" {
continue
}
var issue struct {
ID string `json:"id"`
}
if err := json.Unmarshal([]byte(line), &issue); err != nil {
continue
}
if issue.ID == "" {
continue
}
// Extract prefix from ID (e.g., "gt-abc" -> "gt", "test-001" -> "test")
if idx := strings.Index(issue.ID, "-"); idx > 0 {
return issue.ID[:idx]
}
// No hyphen - use whole ID as prefix
return issue.ID
}
return ""
}
// autoImportIfNewer checks if JSONL content changed (via hash) and imports if so
// Hash-based comparison is git-proof (mtime comparison fails after git pull).
// Uses collision detection to prevent silently overwriting local changes.
@@ -152,6 +185,34 @@ func autoImportIfNewer() {
debug.Logf("auto-import triggered (hash changed)")
// Check if database needs initialization (GH#b09 - cold-start bootstrap)
// If issue_prefix is not set, the DB is uninitialized and import will fail.
// Auto-detect and set the prefix to enable seamless cold-start recovery.
// Note: Use global store directly as cmdCtx.Store may not be synced yet (GH#b09)
if store != nil {
prefix, prefixErr := store.GetConfig(ctx, "issue_prefix")
if prefixErr != nil || prefix == "" {
// Database needs initialization - detect prefix from JSONL or directory
detectedPrefix := detectPrefixFromJSONL(jsonlData)
if detectedPrefix == "" {
// Fallback: detect from directory name
beadsDir := filepath.Dir(jsonlPath)
parentDir := filepath.Dir(beadsDir)
detectedPrefix = filepath.Base(parentDir)
if detectedPrefix == "." || detectedPrefix == "/" {
detectedPrefix = "bd"
}
}
detectedPrefix = strings.TrimRight(detectedPrefix, "-")
if setErr := store.SetConfig(ctx, "issue_prefix", detectedPrefix); setErr != nil {
fmt.Fprintf(os.Stderr, "Auto-import: failed to initialize database prefix: %v\n", setErr)
return
}
debug.Logf("auto-import: initialized database with prefix '%s'", detectedPrefix)
}
}
// Check for Git merge conflict markers
// Only match if they appear as standalone lines (not embedded in JSON strings)
lines := bytes.Split(jsonlData, []byte("\n"))

View File

@@ -122,6 +122,7 @@ create, update, show, or close operation).`,
Reason: reason,
Session: session,
SuggestNext: suggestNext,
Force: force,
}
resp, err := daemonClient.CloseIssue(closeArgs)
if err != nil {
@@ -191,6 +192,21 @@ create, update, show, or close operation).`,
continue
}
// Check if issue has open blockers (GH#962)
if !force {
blocked, blockers, err := result.Store.IsBlocked(ctx, result.ResolvedID)
if err != nil {
result.Close()
fmt.Fprintf(os.Stderr, "Error checking blockers for %s: %v\n", id, err)
continue
}
if blocked && len(blockers) > 0 {
result.Close()
fmt.Fprintf(os.Stderr, "cannot close %s: blocked by open issues %v (use --force to override)\n", id, blockers)
continue
}
}
if err := result.Store.CloseIssue(ctx, result.ResolvedID, reason, actor, session); err != nil {
result.Close()
fmt.Fprintf(os.Stderr, "Error closing %s: %v\n", id, err)
@@ -240,6 +256,19 @@ create, update, show, or close operation).`,
continue
}
// Check if issue has open blockers (GH#962)
if !force {
blocked, blockers, err := store.IsBlocked(ctx, id)
if err != nil {
fmt.Fprintf(os.Stderr, "Error checking blockers for %s: %v\n", id, err)
continue
}
if blocked && len(blockers) > 0 {
fmt.Fprintf(os.Stderr, "cannot close %s: blocked by open issues %v (use --force to override)\n", id, blockers)
continue
}
}
if err := store.CloseIssue(ctx, id, reason, actor, session); err != nil {
fmt.Fprintf(os.Stderr, "Error closing %s: %v\n", id, err)
continue
@@ -283,6 +312,21 @@ create, update, show, or close operation).`,
continue
}
// Check if issue has open blockers (GH#962)
if !force {
blocked, blockers, err := result.Store.IsBlocked(ctx, result.ResolvedID)
if err != nil {
result.Close()
fmt.Fprintf(os.Stderr, "Error checking blockers for %s: %v\n", id, err)
continue
}
if blocked && len(blockers) > 0 {
result.Close()
fmt.Fprintf(os.Stderr, "cannot close %s: blocked by open issues %v (use --force to override)\n", id, blockers)
continue
}
}
if err := result.Store.CloseIssue(ctx, result.ResolvedID, reason, actor, session); err != nil {
result.Close()
fmt.Fprintf(os.Stderr, "Error closing %s: %v\n", id, err)

View File

@@ -4,12 +4,12 @@ import (
"encoding/json"
"fmt"
"os"
"os/user"
"strings"
"github.com/spf13/cobra"
"github.com/steveyegge/beads/internal/rpc"
"github.com/steveyegge/beads/internal/types"
"github.com/steveyegge/beads/internal/ui"
"github.com/steveyegge/beads/internal/utils"
)
@@ -95,7 +95,12 @@ Examples:
fmt.Printf("\nComments on %s:\n\n", issueID)
for _, comment := range comments {
fmt.Printf("[%s] %s at %s\n", comment.Author, comment.Text, comment.CreatedAt.Format("2006-01-02 15:04"))
fmt.Printf("[%s] at %s\n", comment.Author, comment.CreatedAt.Format("2006-01-02 15:04"))
rendered := ui.RenderMarkdown(comment.Text)
// TrimRight removes trailing newlines that Glamour adds, preventing extra blank lines
for _, line := range strings.Split(strings.TrimRight(rendered, "\n"), "\n") {
fmt.Printf(" %s\n", line)
}
fmt.Println()
}
},
@@ -132,20 +137,10 @@ Examples:
commentText = args[1]
}
// Get author from author flag, BD_ACTOR var, or system USER var
// Get author from author flag, or use git-aware default
author, _ := cmd.Flags().GetString("author")
if author == "" {
author = os.Getenv("BD_ACTOR")
if author == "" {
author = os.Getenv("USER")
}
if author == "" {
if u, err := user.Current(); err == nil {
author = u.Username
} else {
author = "unknown"
}
}
author = getActorWithGit()
}
var comment *types.Comment

View File

@@ -199,6 +199,7 @@ var createCmd = &cobra.Command{
ExternalRef: externalRefPtr,
Ephemeral: wisp,
CreatedBy: getActorWithGit(),
Owner: getOwner(),
MolType: molType,
RoleType: roleType,
Rig: agentRig,
@@ -423,6 +424,7 @@ var createCmd = &cobra.Command{
WaitsForGate: waitsForGate,
Ephemeral: wisp,
CreatedBy: getActorWithGit(),
Owner: getOwner(),
MolType: string(molType),
RoleType: roleType,
Rig: agentRig,
@@ -482,6 +484,7 @@ var createCmd = &cobra.Command{
EstimatedMinutes: estimatedMinutes,
Ephemeral: wisp,
CreatedBy: getActorWithGit(),
Owner: getOwner(),
MolType: molType,
RoleType: roleType,
Rig: agentRig,
@@ -735,8 +738,8 @@ func createInRig(cmd *cobra.Command, rigName, title, description, issueType stri
FatalError("cannot use --rig: %v", err)
}
// Resolve the target rig's beads directory
targetBeadsDir, _, err := routing.ResolveBeadsDirForRig(rigName, townBeadsDir)
// Resolve the target rig's beads directory and prefix
targetBeadsDir, targetPrefix, err := routing.ResolveBeadsDirForRig(rigName, townBeadsDir)
if err != nil {
FatalError("%v", err)
}
@@ -753,6 +756,13 @@ func createInRig(cmd *cobra.Command, rigName, title, description, issueType stri
}
}()
// Prepare prefix override from routes.jsonl for cross-rig creation
// Strip trailing hyphen - database stores prefix without it (e.g., "aops" not "aops-")
var prefixOverride string
if targetPrefix != "" {
prefixOverride = strings.TrimSuffix(targetPrefix, "-")
}
var externalRefPtr *string
if externalRef != "" {
externalRefPtr = &externalRef
@@ -808,6 +818,7 @@ func createInRig(cmd *cobra.Command, rigName, title, description, issueType stri
ExternalRef: externalRefPtr,
Ephemeral: wisp,
CreatedBy: getActorWithGit(),
Owner: getOwner(),
// Event fields (bd-xwvo fix)
EventKind: eventCategory,
Actor: eventActor,
@@ -820,6 +831,8 @@ func createInRig(cmd *cobra.Command, rigName, title, description, issueType stri
// Time scheduling fields (bd-xwvo fix)
DueAt: dueAt,
DeferUntil: deferUntil,
// Cross-rig routing: use route prefix instead of database config
PrefixOverride: prefixOverride,
}
if err := targetStore.CreateIssue(ctx, issue, actor); err != nil {

View File

@@ -35,14 +35,16 @@ The daemon will:
- Auto-import when remote changes detected
Common operations:
bd daemon --start Start the daemon (background)
bd daemon --start --foreground Start in foreground (for systemd/supervisord)
bd daemon --stop Stop a running daemon
bd daemon --stop-all Stop ALL running bd daemons
bd daemon --status Check if daemon is running
bd daemon --health Check daemon health and metrics
bd daemon start Start the daemon (background)
bd daemon start --foreground Start in foreground (for systemd/supervisord)
bd daemon stop Stop current workspace daemon
bd daemon status Show daemon status
bd daemon status --all Show all daemons with health check
bd daemon logs View daemon logs
bd daemon restart Restart daemon
bd daemon killall Stop all running daemons
Run 'bd daemon' with no flags to see available options.`,
Run 'bd daemon --help' to see all subcommands.`,
Run: func(cmd *cobra.Command, args []string) {
start, _ := cmd.Flags().GetBool("start")
stop, _ := cmd.Flags().GetBool("stop")
@@ -66,6 +68,25 @@ Run 'bd daemon' with no flags to see available options.`,
return
}
// Show deprecation warnings for flag-based actions (skip in JSON mode for agent ergonomics)
if !jsonOutput {
if start {
fmt.Fprintf(os.Stderr, "Warning: --start is deprecated, use 'bd daemon start' instead\n")
}
if stop {
fmt.Fprintf(os.Stderr, "Warning: --stop is deprecated, use 'bd daemon stop' instead\n")
}
if stopAll {
fmt.Fprintf(os.Stderr, "Warning: --stop-all is deprecated, use 'bd daemon killall' instead\n")
}
if status {
fmt.Fprintf(os.Stderr, "Warning: --status is deprecated, use 'bd daemon status' instead\n")
}
if health {
fmt.Fprintf(os.Stderr, "Warning: --health is deprecated, use 'bd daemon status --all' instead\n")
}
}
// If auto-commit/auto-push flags weren't explicitly provided, read from config
// GH#871: Read from config.yaml first (team-shared), then fall back to SQLite (legacy)
// (skip if --stop, --status, --health, --metrics)
@@ -219,16 +240,22 @@ Run 'bd daemon' with no flags to see available options.`,
}
func init() {
daemonCmd.Flags().Bool("start", false, "Start the daemon")
// Register subcommands (preferred interface)
daemonCmd.AddCommand(daemonStartCmd)
daemonCmd.AddCommand(daemonStatusCmd)
// Note: stop, restart, logs, killall, list, health subcommands are registered in daemons.go
// Legacy flags (deprecated - use subcommands instead)
daemonCmd.Flags().Bool("start", false, "Start the daemon (deprecated: use 'bd daemon start')")
daemonCmd.Flags().Duration("interval", 5*time.Second, "Sync check interval")
daemonCmd.Flags().Bool("auto-commit", false, "Automatically commit changes")
daemonCmd.Flags().Bool("auto-push", false, "Automatically push commits")
daemonCmd.Flags().Bool("auto-pull", false, "Automatically pull from remote (default: true when sync.branch configured)")
daemonCmd.Flags().Bool("local", false, "Run in local-only mode (no git required, no sync)")
daemonCmd.Flags().Bool("stop", false, "Stop running daemon")
daemonCmd.Flags().Bool("stop-all", false, "Stop all running bd daemons")
daemonCmd.Flags().Bool("status", false, "Show daemon status")
daemonCmd.Flags().Bool("health", false, "Check daemon health and metrics")
daemonCmd.Flags().Bool("stop", false, "Stop running daemon (deprecated: use 'bd daemon stop')")
daemonCmd.Flags().Bool("stop-all", false, "Stop all running bd daemons (deprecated: use 'bd daemon killall')")
daemonCmd.Flags().Bool("status", false, "Show daemon status (deprecated: use 'bd daemon status')")
daemonCmd.Flags().Bool("health", false, "Check daemon health (deprecated: use 'bd daemon status --all')")
daemonCmd.Flags().Bool("metrics", false, "Show detailed daemon metrics")
daemonCmd.Flags().String("log", "", "Log file path (default: .beads/daemon.log)")
daemonCmd.Flags().Bool("foreground", false, "Run in foreground (don't daemonize)")
@@ -464,7 +491,12 @@ func runDaemonLoop(interval time.Duration, autoCommit, autoPush, autoPull, local
// Get workspace path (.beads directory) - beadsDir already defined above
// Get actual workspace root (parent of .beads)
workspacePath := filepath.Dir(beadsDir)
socketPath := filepath.Join(beadsDir, "bd.sock")
// Use short socket path to avoid Unix socket path length limits (macOS: 104 chars)
socketPath, err := rpc.EnsureSocketDir(rpc.ShortSocketPath(workspacePath))
if err != nil {
log.Error("failed to create socket directory", "error", err)
return
}
serverCtx, serverCancel := context.WithCancel(ctx)
defer serverCancel()

View File

@@ -457,13 +457,17 @@ func recordDaemonStartFailure() {
// getSocketPath returns the daemon socket path based on the database location.
// If BD_SOCKET env var is set, uses that value instead (enables test isolation).
// Returns local socket path (.beads/bd.sock relative to database)
// On Unix systems, uses rpc.ShortSocketPath to avoid exceeding socket path limits
// (macOS: 104 chars) by relocating long paths to /tmp/beads-{hash}/ (GH#1001).
func getSocketPath() string {
// Check environment variable first (enables test isolation)
if socketPath := os.Getenv("BD_SOCKET"); socketPath != "" {
return socketPath
}
return filepath.Join(filepath.Dir(dbPath), "bd.sock")
// Get workspace path (parent of .beads directory)
beadsDir := filepath.Dir(dbPath)
workspacePath := filepath.Dir(beadsDir)
return rpc.ShortSocketPath(workspacePath)
}
// emitVerboseWarning prints a one-line warning when falling back to direct mode

View File

@@ -340,6 +340,11 @@ func TestDaemonAutostart_RestartDaemonForVersionMismatch_Stubbed(t *testing.T) {
t.Fatalf("getPIDFilePath: %v", err)
}
sock := getSocketPath()
// Create socket directory if needed (GH#1001 - socket may be in /tmp/beads-{hash}/)
sockDir := filepath.Dir(sock)
if err := os.MkdirAll(sockDir, 0o750); err != nil {
t.Fatalf("MkdirAll sockDir: %v", err)
}
if err := os.WriteFile(pidFile, []byte("999999\n"), 0o600); err != nil {
t.Fatalf("WriteFile pid: %v", err)
}

View File

@@ -7,6 +7,7 @@ import (
"strconv"
"github.com/steveyegge/beads/internal/beads"
"github.com/steveyegge/beads/internal/rpc"
)
// ensureBeadsDir ensures the local beads directory exists (.beads in the current workspace)
@@ -60,13 +61,16 @@ func getEnvBool(key string, defaultValue bool) bool {
// getSocketPathForPID determines the socket path for a given PID file.
// If BD_SOCKET env var is set, uses that value instead.
// Uses rpc.ShortSocketPath to avoid Unix socket path length limits (macOS: 104 chars).
func getSocketPathForPID(pidFile string) string {
// Check environment variable first (enables test isolation)
if socketPath := os.Getenv("BD_SOCKET"); socketPath != "" {
return socketPath
}
// Socket is in same directory as PID file
return filepath.Join(filepath.Dir(pidFile), "bd.sock")
// PID file is in .beads/, so workspace is parent of that
beadsDir := filepath.Dir(pidFile)
workspacePath := filepath.Dir(beadsDir)
return rpc.ShortSocketPath(workspacePath)
}
// getPIDFilePath returns the path to the daemon PID file

View File

@@ -4,6 +4,8 @@ import (
"path/filepath"
"strings"
"testing"
"github.com/steveyegge/beads/internal/rpc"
)
// TestSocketPathEnvOverride verifies that BD_SOCKET env var overrides default socket path.
@@ -53,6 +55,77 @@ func TestSocketPathDefaultBehavior(t *testing.T) {
}
}
// TestSocketPathForPIDLongPath verifies that long workspace paths use shortened socket paths.
// This fixes GH#1001 where pytest temp directories exceeded macOS's 104-byte socket path limit.
func TestSocketPathForPIDLongPath(t *testing.T) {
t.Setenv("BD_SOCKET", "")
// Create a path that would exceed the 103-byte limit when .beads/bd.sock is appended
// /long/path/.beads/daemon.pid -> workspace is /long/path
// socket would be /long/path/.beads/bd.sock
longWorkspace := "/" + strings.Repeat("a", 90) // 91 bytes
pidFile := filepath.Join(longWorkspace, ".beads", "daemon.pid")
got := getSocketPathForPID(pidFile)
// Should NOT be the natural path (which would be too long)
naturalPath := filepath.Join(longWorkspace, ".beads", "bd.sock")
if got == naturalPath {
t.Errorf("getSocketPathForPID should use short path for long workspaces, got natural path %q (%d bytes)",
got, len(got))
}
// Should be in /tmp/beads-{hash}/
if !strings.HasPrefix(got, "/tmp/beads-") {
t.Errorf("getSocketPathForPID(%q) = %q, want path starting with /tmp/beads-", pidFile, got)
}
// Should end with bd.sock
if !strings.HasSuffix(got, "/bd.sock") {
t.Errorf("getSocketPathForPID(%q) = %q, want path ending with /bd.sock", pidFile, got)
}
// Should be under the limit
if len(got) > 103 {
t.Errorf("getSocketPathForPID returned path of %d bytes, want <= 103", len(got))
}
}
// TestSocketPathForPIDClientDaemonAgreement verifies that getSocketPathForPID
// returns the same path as rpc.ShortSocketPath for the same workspace.
// This is critical - if they disagree, the daemon listens on one path while
// the client tries to connect to another, causing connection failures.
// This test caught the GH#1001 bug where daemon.go used filepath.Join directly
// instead of rpc.ShortSocketPath.
func TestSocketPathForPIDClientDaemonAgreement(t *testing.T) {
t.Setenv("BD_SOCKET", "")
tests := []struct {
name string
workspacePath string
}{
{"short_path", "/home/user/project"},
{"medium_path", "/Users/testuser/Documents/projects/myapp"},
{"long_path", "/" + strings.Repeat("a", 90)}, // Forces short socket path
}
for _, tt := range tests {
t.Run(tt.name, func(t *testing.T) {
// What getSocketPathForPID returns (used by daemon operations)
pidFile := filepath.Join(tt.workspacePath, ".beads", "daemon.pid")
fromPID := getSocketPathForPID(pidFile)
// What rpc.ShortSocketPath returns (used by client via getSocketPath)
fromRPC := rpc.ShortSocketPath(tt.workspacePath)
if fromPID != fromRPC {
t.Errorf("socket path mismatch for workspace %q:\n getSocketPathForPID: %q\n rpc.ShortSocketPath: %q",
tt.workspacePath, fromPID, fromRPC)
}
})
}
}
// TestDaemonSocketIsolation demonstrates that two test instances can use different sockets.
// This is the key pattern for parallel test isolation.
func TestDaemonSocketIsolation(t *testing.T) {

151
cmd/bd/daemon_start.go Normal file
View File

@@ -0,0 +1,151 @@
package main
import (
"fmt"
"os"
"time"
"github.com/spf13/cobra"
"github.com/steveyegge/beads/internal/beads"
"github.com/steveyegge/beads/internal/rpc"
)
var daemonStartCmd = &cobra.Command{
Use: "start",
Short: "Start the background daemon",
Long: `Start the background daemon that automatically syncs issues with git remote.
The daemon will:
- Poll for changes at configurable intervals (default: 5 seconds)
- Export pending database changes to JSONL
- Auto-commit changes if --auto-commit flag set
- Auto-push commits if --auto-push flag set
- Pull remote changes periodically
- Auto-import when remote changes detected
Examples:
bd daemon start # Start with defaults
bd daemon start --auto-commit # Enable auto-commit
bd daemon start --auto-push # Enable auto-push (implies --auto-commit)
bd daemon start --foreground # Run in foreground (for systemd/supervisord)
bd daemon start --local # Local-only mode (no git sync)`,
Run: func(cmd *cobra.Command, args []string) {
interval, _ := cmd.Flags().GetDuration("interval")
autoCommit, _ := cmd.Flags().GetBool("auto-commit")
autoPush, _ := cmd.Flags().GetBool("auto-push")
autoPull, _ := cmd.Flags().GetBool("auto-pull")
localMode, _ := cmd.Flags().GetBool("local")
logFile, _ := cmd.Flags().GetString("log")
foreground, _ := cmd.Flags().GetBool("foreground")
logLevel, _ := cmd.Flags().GetString("log-level")
logJSON, _ := cmd.Flags().GetBool("log-json")
// Load auto-commit/push/pull defaults from env vars, config, or sync-branch
autoCommit, autoPush, autoPull = loadDaemonAutoSettings(cmd, autoCommit, autoPush, autoPull)
if interval <= 0 {
fmt.Fprintf(os.Stderr, "Error: interval must be positive (got %v)\n", interval)
os.Exit(1)
}
pidFile, err := getPIDFilePath()
if err != nil {
fmt.Fprintf(os.Stderr, "Error: %v\n", err)
os.Exit(1)
}
// Skip daemon-running check if we're the forked child (BD_DAEMON_FOREGROUND=1)
if os.Getenv("BD_DAEMON_FOREGROUND") != "1" {
// Check if daemon is already running
if isRunning, pid := isDaemonRunning(pidFile); isRunning {
// Check if running daemon has compatible version
socketPath := getSocketPathForPID(pidFile)
if client, err := rpc.TryConnectWithTimeout(socketPath, 1*time.Second); err == nil && client != nil {
health, healthErr := client.Health()
_ = client.Close()
// If we can check version and it's compatible, exit
if healthErr == nil && health.Compatible {
fmt.Fprintf(os.Stderr, "Error: daemon already running (PID %d, version %s)\n", pid, health.Version)
fmt.Fprintf(os.Stderr, "Use 'bd daemon stop' to stop it first\n")
os.Exit(1)
}
// Version mismatch - auto-stop old daemon
if healthErr == nil && !health.Compatible {
fmt.Fprintf(os.Stderr, "Warning: daemon version mismatch (daemon: %s, client: %s)\n", health.Version, Version)
fmt.Fprintf(os.Stderr, "Stopping old daemon and starting new one...\n")
stopDaemon(pidFile)
}
} else {
fmt.Fprintf(os.Stderr, "Error: daemon already running (PID %d)\n", pid)
fmt.Fprintf(os.Stderr, "Use 'bd daemon stop' to stop it first\n")
os.Exit(1)
}
}
}
// Validate --local mode constraints
if localMode {
if autoCommit {
fmt.Fprintf(os.Stderr, "Error: --auto-commit cannot be used with --local mode\n")
fmt.Fprintf(os.Stderr, "Hint: --local mode runs without git, so commits are not possible\n")
os.Exit(1)
}
if autoPush {
fmt.Fprintf(os.Stderr, "Error: --auto-push cannot be used with --local mode\n")
fmt.Fprintf(os.Stderr, "Hint: --local mode runs without git, so pushes are not possible\n")
os.Exit(1)
}
}
// Validate we're in a git repo (skip in local mode)
if !localMode && !isGitRepo() {
fmt.Fprintf(os.Stderr, "Error: not in a git repository\n")
fmt.Fprintf(os.Stderr, "Hint: run 'git init' to initialize a repository, or use --local for local-only mode\n")
os.Exit(1)
}
// Check for upstream if auto-push enabled
if autoPush && !gitHasUpstream() {
fmt.Fprintf(os.Stderr, "Error: no upstream configured (required for --auto-push)\n")
fmt.Fprintf(os.Stderr, "Hint: git push -u origin <branch-name>\n")
os.Exit(1)
}
// Warn if starting daemon in a git worktree
if dbPath == "" {
if foundDB := beads.FindDatabasePath(); foundDB != "" {
dbPath = foundDB
}
}
if dbPath != "" {
warnWorktreeDaemon(dbPath)
}
// Start daemon
if localMode {
fmt.Printf("Starting bd daemon in LOCAL mode (interval: %v, no git sync)\n", interval)
} else {
fmt.Printf("Starting bd daemon (interval: %v, auto-commit: %v, auto-push: %v, auto-pull: %v)\n",
interval, autoCommit, autoPush, autoPull)
}
if logFile != "" {
fmt.Printf("Logging to: %s\n", logFile)
}
startDaemon(interval, autoCommit, autoPush, autoPull, localMode, foreground, logFile, pidFile, logLevel, logJSON)
},
}
func init() {
daemonStartCmd.Flags().Duration("interval", 5*time.Second, "Sync check interval")
daemonStartCmd.Flags().Bool("auto-commit", false, "Automatically commit changes")
daemonStartCmd.Flags().Bool("auto-push", false, "Automatically push commits")
daemonStartCmd.Flags().Bool("auto-pull", false, "Automatically pull from remote")
daemonStartCmd.Flags().Bool("local", false, "Run in local-only mode (no git required, no sync)")
daemonStartCmd.Flags().String("log", "", "Log file path (default: .beads/daemon.log)")
daemonStartCmd.Flags().Bool("foreground", false, "Run in foreground (don't daemonize)")
daemonStartCmd.Flags().String("log-level", "info", "Log level (debug, info, warn, error)")
daemonStartCmd.Flags().Bool("log-json", false, "Output logs in JSON format")
}

444
cmd/bd/daemon_status.go Normal file
View File

@@ -0,0 +1,444 @@
package main
import (
"fmt"
"os"
"path/filepath"
"strings"
"text/tabwriter"
"time"
"github.com/spf13/cobra"
"github.com/steveyegge/beads/internal/daemon"
"github.com/steveyegge/beads/internal/rpc"
"github.com/steveyegge/beads/internal/ui"
)
// DaemonStatusReport is a single daemon status entry for JSON output
type DaemonStatusReport struct {
Workspace string `json:"workspace"`
PID int `json:"pid,omitempty"`
Version string `json:"version,omitempty"`
Status string `json:"status"`
Issue string `json:"issue,omitempty"`
Started string `json:"started,omitempty"`
UptimeSeconds float64 `json:"uptime_seconds,omitempty"`
AutoCommit bool `json:"auto_commit,omitempty"`
AutoPush bool `json:"auto_push,omitempty"`
AutoPull bool `json:"auto_pull,omitempty"`
LocalMode bool `json:"local_mode,omitempty"`
SyncInterval string `json:"sync_interval,omitempty"`
DaemonMode string `json:"daemon_mode,omitempty"`
LogPath string `json:"log_path,omitempty"`
VersionMismatch bool `json:"version_mismatch,omitempty"`
IsCurrent bool `json:"is_current,omitempty"`
}
// DaemonStatusAllResponse is returned for --all mode
type DaemonStatusAllResponse struct {
Total int `json:"total"`
Healthy int `json:"healthy"`
Outdated int `json:"outdated"`
Stale int `json:"stale"`
Unresponsive int `json:"unresponsive"`
Daemons []DaemonStatusReport `json:"daemons"`
}
var daemonStatusCmd = &cobra.Command{
Use: "status",
Short: "Show daemon status",
Long: `Show status of the current workspace's daemon, or all daemons with --all.
Examples:
bd daemon status # Current workspace daemon
bd daemon status --all # All running daemons`,
Run: func(cmd *cobra.Command, args []string) {
showAll, _ := cmd.Flags().GetBool("all")
if showAll {
showAllDaemonsStatus(cmd)
} else {
showCurrentDaemonStatus()
}
},
}
func init() {
daemonStatusCmd.Flags().Bool("all", false, "Show status of all daemons")
daemonStatusCmd.Flags().StringSlice("search", nil, "Directories to search for daemons (with --all)")
}
// shortenPath replaces home directory with ~ for display
func shortenPath(p string) string {
home, err := os.UserHomeDir()
if err != nil {
return p
}
if strings.HasPrefix(p, home) {
return "~" + p[len(home):]
}
return p
}
// formatRelativeTime formats a time as relative (e.g., "2h ago")
func formatRelativeTime(t time.Time) string {
d := time.Since(t)
if d < time.Minute {
return "just now"
} else if d < time.Hour {
mins := int(d.Minutes())
if mins == 1 {
return "1m ago"
}
return fmt.Sprintf("%dm ago", mins)
} else if d < 24*time.Hour {
hours := int(d.Hours())
if hours == 1 {
return "1h ago"
}
return fmt.Sprintf("%dh ago", hours)
}
days := int(d.Hours() / 24)
if days == 1 {
return "1d ago"
}
return fmt.Sprintf("%dd ago", days)
}
// formatBoolIcon returns a styled checkmark or dash for boolean values
func formatBoolIcon(enabled bool) string {
if enabled {
return ui.RenderPass(ui.IconPass)
}
return ui.RenderMuted("-")
}
// renderDaemonStatusIcon renders status with semantic styling
func renderDaemonStatusIcon(status string) string {
switch status {
case "healthy", "running":
return ui.RenderPass(ui.IconPass + " " + status)
case "outdated", "version_mismatch":
return ui.RenderWarn(ui.IconWarn + " outdated")
case "stale":
return ui.RenderWarn(ui.IconWarn + " stale")
case "unresponsive":
return ui.RenderFail(ui.IconFail + " unresponsive")
case "not_running":
return ui.RenderMuted("○ not running")
default:
return status
}
}
// showCurrentDaemonStatus shows detailed status for current workspace daemon
func showCurrentDaemonStatus() {
pidFile, err := getPIDFilePath()
if err != nil {
if jsonOutput {
outputJSON(map[string]string{"error": err.Error()})
} else {
fmt.Fprintf(os.Stderr, "Error: %v\n", err)
}
os.Exit(1)
}
beadsDir := filepath.Dir(pidFile)
socketPath := filepath.Join(beadsDir, "bd.sock")
workspacePath := filepath.Dir(beadsDir)
// Check if daemon is running
isRunning, pid := isDaemonRunning(pidFile)
if !isRunning {
if jsonOutput {
outputJSON(DaemonStatusReport{
Workspace: workspacePath,
Status: "not_running",
})
} else {
fmt.Printf("%s\n\n", renderDaemonStatusIcon("not_running"))
fmt.Printf(" Workspace: %s\n", shortenPath(workspacePath))
fmt.Printf("\n To start: bd daemon start\n")
}
return
}
// Get detailed status via RPC
var rpcStatus *rpc.StatusResponse
if client, err := rpc.TryConnectWithTimeout(socketPath, 1*time.Second); err == nil && client != nil {
if status, err := client.Status(); err == nil {
rpcStatus = status
}
_ = client.Close()
}
// Get started time from PID file
var startedTime time.Time
if info, err := os.Stat(pidFile); err == nil {
startedTime = info.ModTime()
}
// Determine daemon version and check for mismatch
daemonVersion := ""
versionMismatch := false
if rpcStatus != nil {
daemonVersion = rpcStatus.Version
if daemonVersion != "" && daemonVersion != Version {
versionMismatch = true
}
}
// Determine status
status := "running"
issue := ""
if versionMismatch {
status = "outdated"
issue = fmt.Sprintf("daemon %s != cli %s", daemonVersion, Version)
}
// Get log path
logPath := filepath.Join(beadsDir, "daemon.log")
if _, err := os.Stat(logPath); err != nil {
logPath = ""
}
if jsonOutput {
report := DaemonStatusReport{
Workspace: workspacePath,
PID: pid,
Version: daemonVersion,
Status: status,
Issue: issue,
LogPath: logPath,
VersionMismatch: versionMismatch,
IsCurrent: true,
}
if !startedTime.IsZero() {
report.Started = startedTime.Format(time.RFC3339)
}
if rpcStatus != nil {
report.UptimeSeconds = rpcStatus.UptimeSeconds
report.AutoCommit = rpcStatus.AutoCommit
report.AutoPush = rpcStatus.AutoPush
report.AutoPull = rpcStatus.AutoPull
report.LocalMode = rpcStatus.LocalMode
report.SyncInterval = rpcStatus.SyncInterval
report.DaemonMode = rpcStatus.DaemonMode
}
outputJSON(report)
return
}
// Human-readable output with semantic styling
// Status line
versionStr := ""
if daemonVersion != "" {
versionStr = fmt.Sprintf(", v%s", daemonVersion)
}
if versionMismatch {
fmt.Printf("%s (PID %d%s)\n", renderDaemonStatusIcon("outdated"), pid, versionStr)
fmt.Printf(" %s\n\n", ui.RenderWarn(fmt.Sprintf("CLI version: %s", Version)))
} else {
fmt.Printf("%s (PID %d%s)\n\n", renderDaemonStatusIcon("running"), pid, versionStr)
}
// Details
fmt.Printf(" Workspace: %s\n", shortenPath(workspacePath))
if !startedTime.IsZero() {
fmt.Printf(" Started: %s (%s)\n", startedTime.Format("2006-01-02 15:04:05"), formatRelativeTime(startedTime))
}
if rpcStatus != nil {
fmt.Printf(" Mode: %s\n", rpcStatus.DaemonMode)
fmt.Printf(" Interval: %s\n", rpcStatus.SyncInterval)
// Compact sync flags display
syncFlags := []string{}
if rpcStatus.AutoCommit {
syncFlags = append(syncFlags, ui.RenderPass(ui.IconPass)+" commit")
}
if rpcStatus.AutoPush {
syncFlags = append(syncFlags, ui.RenderPass(ui.IconPass)+" push")
}
if rpcStatus.AutoPull {
syncFlags = append(syncFlags, ui.RenderPass(ui.IconPass)+" pull")
}
if len(syncFlags) > 0 {
fmt.Printf(" Sync: %s\n", strings.Join(syncFlags, " "))
} else {
fmt.Printf(" Sync: %s\n", ui.RenderMuted("none"))
}
if rpcStatus.LocalMode {
fmt.Printf(" Local: %s\n", ui.RenderWarn("yes (no git sync)"))
}
}
if logPath != "" {
// Show relative path for log
relLog := ".beads/daemon.log"
fmt.Printf(" Log: %s\n", relLog)
}
// Show hint about other daemons
daemons, err := daemon.DiscoverDaemons(nil)
if err == nil {
aliveCount := 0
for _, d := range daemons {
if d.Alive {
aliveCount++
}
}
if aliveCount > 1 {
fmt.Printf("\n %s\n", ui.RenderMuted(fmt.Sprintf("%d other daemon(s) running (bd daemon status --all)", aliveCount-1)))
}
}
}
// showAllDaemonsStatus shows status of all daemons
func showAllDaemonsStatus(cmd *cobra.Command) {
searchRoots, _ := cmd.Flags().GetStringSlice("search")
// Discover daemons
daemons, err := daemon.DiscoverDaemons(searchRoots)
if err != nil {
if jsonOutput {
outputJSON(map[string]string{"error": err.Error()})
} else {
fmt.Fprintf(os.Stderr, "Error discovering daemons: %v\n", err)
}
os.Exit(1)
}
// Auto-cleanup stale sockets
if cleaned, err := daemon.CleanupStaleSockets(daemons); err == nil && cleaned > 0 && !jsonOutput {
fmt.Fprintf(os.Stderr, "Cleaned up %d stale socket(s)\n", cleaned)
}
// Get current workspace to mark it
currentWorkspace := ""
if pidFile, err := getPIDFilePath(); err == nil {
beadsDir := filepath.Dir(pidFile)
currentWorkspace = filepath.Dir(beadsDir)
}
currentVersion := Version
var reports []DaemonStatusReport
healthyCount := 0
outdatedCount := 0
staleCount := 0
unresponsiveCount := 0
for _, d := range daemons {
report := DaemonStatusReport{
Workspace: d.WorkspacePath,
PID: d.PID,
Version: d.Version,
IsCurrent: d.WorkspacePath == currentWorkspace,
}
if !d.Alive {
report.Status = "stale"
report.Issue = d.Error
staleCount++
} else if d.Version != "" && d.Version != currentVersion {
report.Status = "outdated"
report.Issue = fmt.Sprintf("daemon %s != cli %s", d.Version, currentVersion)
report.VersionMismatch = true
outdatedCount++
} else {
report.Status = "healthy"
healthyCount++
}
reports = append(reports, report)
}
if jsonOutput {
outputJSON(DaemonStatusAllResponse{
Total: len(reports),
Healthy: healthyCount,
Outdated: outdatedCount,
Stale: staleCount,
Unresponsive: unresponsiveCount,
Daemons: reports,
})
return
}
// Human-readable output
if len(reports) == 0 {
fmt.Println("No daemons found")
return
}
// Summary line
fmt.Printf("Daemons: %d total", len(reports))
if healthyCount > 0 {
fmt.Printf(", %s", ui.RenderPass(fmt.Sprintf("%d healthy", healthyCount)))
}
if outdatedCount > 0 {
fmt.Printf(", %s", ui.RenderWarn(fmt.Sprintf("%d outdated", outdatedCount)))
}
if staleCount > 0 {
fmt.Printf(", %s", ui.RenderWarn(fmt.Sprintf("%d stale", staleCount)))
}
if unresponsiveCount > 0 {
fmt.Printf(", %s", ui.RenderFail(fmt.Sprintf("%d unresponsive", unresponsiveCount)))
}
fmt.Println()
fmt.Println()
// Table
w := tabwriter.NewWriter(os.Stdout, 0, 0, 2, ' ', 0)
_, _ = fmt.Fprintln(w, " WORKSPACE\tPID\tVERSION\tSTATUS")
for _, r := range reports {
workspace := shortenPath(r.Workspace)
if workspace == "" {
workspace = "(unknown)"
}
// Add arrow for current workspace
prefix := " "
if r.IsCurrent {
prefix = ui.RenderAccent("→ ")
}
pidStr := "-"
if r.PID != 0 {
pidStr = fmt.Sprintf("%d", r.PID)
}
version := r.Version
if version == "" {
version = "-"
}
// Render status with icon and color
var statusDisplay string
switch r.Status {
case "healthy":
statusDisplay = ui.RenderPass(ui.IconPass + " healthy")
case "outdated":
statusDisplay = ui.RenderWarn(ui.IconWarn + " outdated")
// Add version hint
statusDisplay += ui.RenderMuted(fmt.Sprintf(" (cli: %s)", currentVersion))
case "stale":
statusDisplay = ui.RenderWarn(ui.IconWarn + " stale")
case "unresponsive":
statusDisplay = ui.RenderFail(ui.IconFail + " unresponsive")
default:
statusDisplay = r.Status
}
_, _ = fmt.Fprintf(w, "%s%s\t%s\t%s\t%s\n",
prefix, workspace, pidStr, version, statusDisplay)
}
_ = w.Flush()
// Exit with error if there are issues
if outdatedCount > 0 || staleCount > 0 || unresponsiveCount > 0 {
os.Exit(1)
}
}

View File

@@ -6,7 +6,6 @@ import (
"encoding/json"
"fmt"
"os"
"os/exec"
"regexp"
"strings"
@@ -802,30 +801,6 @@ func uniqueStrings(slice []string) []string {
return result
}
// getActorWithGit returns the actor for audit trail with git config fallback.
// Priority: global actor var (from --actor flag or BD_ACTOR env) > git config user.name > $USER > "unknown"
func getActorWithGit() string {
// If actor is already set (from flag or env), use it
if actor != "" && actor != "unknown" {
return actor
}
// Try git config user.name
cmd := exec.Command("git", "config", "user.name")
if output, err := cmd.Output(); err == nil {
if gitUser := strings.TrimSpace(string(output)); gitUser != "" {
return gitUser
}
}
// Fall back to USER env
if user := os.Getenv("USER"); user != "" {
return user
}
return "unknown"
}
func init() {
deleteCmd.Flags().BoolP("force", "f", false, "Actually delete (without this flag, shows preview)")
deleteCmd.Flags().String("from-file", "", "Read issue IDs from file (one per line)")

View File

@@ -45,6 +45,9 @@ func isChildOf(childID, parentID string) bool {
// warnIfCyclesExist checks for dependency cycles and prints a warning if found.
func warnIfCyclesExist(s storage.Storage) {
if s == nil {
return // Skip cycle check in daemon mode (daemon handles it)
}
cycles, err := s.DetectCycles(rootCtx)
if err != nil {
fmt.Fprintf(os.Stderr, "Warning: Failed to check for cycles: %v\n", err)

View File

@@ -3,6 +3,7 @@ package main
import (
"bytes"
"context"
"encoding/json"
"os"
"path/filepath"
"testing"
@@ -139,3 +140,137 @@ func TestFallbackToDirectModeEnablesFlush(t *testing.T) {
t.Fatalf("expected JSONL export to contain neighbor issue ID %s", neighbor.ID)
}
}
// TestImportFromJSONLInlineAfterDaemonDisconnect verifies that importFromJSONLInline
// works after daemon disconnect when ensureStoreActive is called.
//
// This tests the fix for the bug where `bd sync --import-only` fails with
// "no database store available for inline import" when daemon mode was active.
//
// The bug occurs because:
// 1. PersistentPreRun connects to daemon and returns early (store = nil)
// 2. sync command closes daemon connection
// 3. sync --import-only calls importFromJSONLInline which requires store != nil
// 4. Without ensureStoreActive(), the store is never initialized
//
// The fix: call ensureStoreActive() after closing daemon in sync.go
func TestImportFromJSONLInlineAfterDaemonDisconnect(t *testing.T) {
ctx, cancel := context.WithTimeout(context.Background(), 30*time.Second)
defer cancel()
// Save and restore all global state
oldRootCtx := rootCtx
rootCtx = ctx
origDaemonClient := daemonClient
origDaemonStatus := daemonStatus
origStore := store
origStoreActive := storeActive
origDBPath := dbPath
origAutoImport := autoImportEnabled
defer func() {
rootCtx = oldRootCtx
if store != nil && store != origStore {
_ = store.Close()
}
storeMutex.Lock()
store = origStore
storeActive = origStoreActive
storeMutex.Unlock()
daemonClient = origDaemonClient
daemonStatus = origDaemonStatus
dbPath = origDBPath
autoImportEnabled = origAutoImport
}()
// Setup: Create temp directory with .beads structure
tmpDir := t.TempDir()
beadsDir := filepath.Join(tmpDir, ".beads")
if err := os.MkdirAll(beadsDir, 0o755); err != nil {
t.Fatalf("failed to create .beads dir: %v", err)
}
testDBPath := filepath.Join(beadsDir, "beads.db")
jsonlPath := filepath.Join(beadsDir, "issues.jsonl")
// Create and seed the database
setupStore := newTestStore(t, testDBPath)
issue := &types.Issue{
Title: "Test Issue",
IssueType: types.TypeTask,
Priority: 2,
Status: types.StatusOpen,
}
if err := setupStore.CreateIssue(ctx, issue, "test"); err != nil {
t.Fatalf("failed to create issue: %v", err)
}
issueID := issue.ID
// Export to JSONL
issues, err := setupStore.SearchIssues(ctx, "", types.IssueFilter{})
if err != nil {
t.Fatalf("failed to search issues: %v", err)
}
f, err := os.Create(jsonlPath)
if err != nil {
t.Fatalf("failed to create JSONL: %v", err)
}
for _, iss := range issues {
data, _ := json.Marshal(iss)
f.Write(data)
f.Write([]byte("\n"))
}
f.Close()
// Close setup store
if err := setupStore.Close(); err != nil {
t.Fatalf("failed to close setup store: %v", err)
}
// Simulate daemon-connected state (as PersistentPreRun leaves it)
dbPath = testDBPath
storeMutex.Lock()
store = nil
storeActive = false
storeMutex.Unlock()
daemonClient = &rpc.Client{} // Non-nil means daemon was connected
autoImportEnabled = false
// Simulate what sync.go does: close daemon but DON'T initialize store
// This is the bug scenario
_ = daemonClient.Close()
daemonClient = nil
// BUG: Without ensureStoreActive(), importFromJSONLInline fails
err = importFromJSONLInline(ctx, jsonlPath, false, false)
if err == nil {
t.Fatal("expected importFromJSONLInline to fail when store is nil")
}
if err.Error() != "no database store available for inline import" {
t.Fatalf("unexpected error: %v", err)
}
// FIX: Call ensureStoreActive() after daemon disconnect
if err := ensureStoreActive(); err != nil {
t.Fatalf("ensureStoreActive failed: %v", err)
}
// Now importFromJSONLInline should work
err = importFromJSONLInline(ctx, jsonlPath, false, false)
if err != nil {
t.Fatalf("importFromJSONLInline failed after ensureStoreActive: %v", err)
}
// Verify the import worked by checking the issue exists
storeMutex.Lock()
currentStore := store
storeMutex.Unlock()
imported, err := currentStore.GetIssue(ctx, issueID)
if err != nil {
t.Fatalf("failed to get imported issue: %v", err)
}
if imported.Title != "Test Issue" {
t.Errorf("expected title 'Test Issue', got %q", imported.Title)
}
}

View File

@@ -342,7 +342,12 @@ func runDiagnostics(path string) doctorResult {
result.Checks = append(result.Checks, configValuesCheck)
// Don't fail overall check for config value warnings, just warn
// Check 7b: JSONL integrity (malformed lines, missing IDs)
// Check 7b: Multi-repo custom types discovery (bd-9ji4z)
multiRepoTypesCheck := convertWithCategory(doctor.CheckMultiRepoTypes(path), doctor.CategoryData)
result.Checks = append(result.Checks, multiRepoTypesCheck)
// Don't fail overall check for multi-repo types, just informational
// Check 7c: JSONL integrity (malformed lines, missing IDs)
jsonlIntegrityCheck := convertWithCategory(doctor.CheckJSONLIntegrity(path), doctor.CategoryData)
result.Checks = append(result.Checks, jsonlIntegrityCheck)
if jsonlIntegrityCheck.Status == statusWarning || jsonlIntegrityCheck.Status == statusError {
@@ -445,6 +450,21 @@ func runDiagnostics(path string) doctorResult {
result.Checks = append(result.Checks, redirectTrackingCheck)
// Don't fail overall check for redirect tracking, just warn
// Check 14c: redirect target validity (target exists and has valid db)
redirectTargetCheck := convertWithCategory(doctor.CheckRedirectTargetValid(), doctor.CategoryGit)
result.Checks = append(result.Checks, redirectTargetCheck)
// Don't fail overall check for redirect target, just warn
// Check 14d: redirect target sync worktree (target has beads-sync if needed)
redirectTargetSyncCheck := convertWithCategory(doctor.CheckRedirectTargetSyncWorktree(), doctor.CategoryGit)
result.Checks = append(result.Checks, redirectTargetSyncCheck)
// Don't fail overall check for redirect target sync, just warn
// Check 14e: vestigial sync worktrees (unused worktrees in redirected repos)
vestigialWorktreesCheck := convertWithCategory(doctor.CheckNoVestigialSyncWorktrees(), doctor.CategoryGit)
result.Checks = append(result.Checks, vestigialWorktreesCheck)
// Don't fail overall check for vestigial worktrees, just warn
// Check 15: Git merge driver configuration
mergeDriverCheck := convertWithCategory(doctor.CheckMergeDriver(path), doctor.CategoryGit)
result.Checks = append(result.Checks, mergeDriverCheck)

View File

@@ -222,47 +222,60 @@ func CheckLefthookBdIntegration(path string) *HookIntegrationStatus {
}
// hasBdInCommands checks if any command's "run" field contains bd hooks run.
// Walks the lefthook structure: hookSection.commands.*.run
// Walks the lefthook structure for both syntaxes:
// - commands (map-based, older): hookSection.commands.*.run
// - jobs (array-based, v1.10.0+): hookSection.jobs[*].run
func hasBdInCommands(hookSection interface{}) bool {
sectionMap, ok := hookSection.(map[string]interface{})
if !ok {
return false
}
commands, ok := sectionMap["commands"]
if !ok {
return false
// Check "commands" syntax (map-based, older)
if commands, ok := sectionMap["commands"]; ok {
if commandsMap, ok := commands.(map[string]interface{}); ok {
for _, cmdConfig := range commandsMap {
if hasBdInRunField(cmdConfig) {
return true
}
}
}
}
commandsMap, ok := commands.(map[string]interface{})
if !ok {
return false
}
for _, cmdConfig := range commandsMap {
cmdMap, ok := cmdConfig.(map[string]interface{})
if !ok {
continue
}
runVal, ok := cmdMap["run"]
if !ok {
continue
}
runStr, ok := runVal.(string)
if !ok {
continue
}
if bdHookPattern.MatchString(runStr) {
return true
// Check "jobs" syntax (array-based, v1.10.0+)
if jobs, ok := sectionMap["jobs"]; ok {
if jobsList, ok := jobs.([]interface{}); ok {
for _, job := range jobsList {
if hasBdInRunField(job) {
return true
}
}
}
}
return false
}
// hasBdInRunField checks if a command/job config has bd hooks run in its "run" field.
func hasBdInRunField(config interface{}) bool {
configMap, ok := config.(map[string]interface{})
if !ok {
return false
}
runVal, ok := configMap["run"]
if !ok {
return false
}
runStr, ok := runVal.(string)
if !ok {
return false
}
return bdHookPattern.MatchString(runStr)
}
// precommitConfigFiles lists pre-commit config files.
var precommitConfigFiles = []string{".pre-commit-config.yaml", ".pre-commit-config.yml"}

View File

@@ -46,6 +46,11 @@ beads.left.meta.json
beads.right.jsonl
beads.right.meta.json
# Sync state (local-only, per-machine)
# These files are machine-specific and should not be shared across clones
.sync.lock
sync_base.jsonl
# NOTE: Do NOT add negation patterns (e.g., !issues.jsonl) here.
# They would override fork protection in .git/info/exclude, allowing
# contributors to accidentally commit upstream issue databases.
@@ -64,6 +69,8 @@ var requiredPatterns = []string{
"*.db?*",
"redirect",
"last-touched",
".sync.lock",
"sync_base.jsonl",
}
// CheckGitignore checks if .beads/.gitignore is up to date
@@ -264,6 +271,255 @@ func FixRedirectTracking() error {
return nil
}
// CheckRedirectTargetValid verifies that the redirect target exists and has a valid beads database.
// This catches cases where the redirect points to a non-existent directory or one without a database.
func CheckRedirectTargetValid() DoctorCheck {
redirectPath := filepath.Join(".beads", "redirect")
// Check if redirect file exists
data, err := os.ReadFile(redirectPath) // #nosec G304 - path is hardcoded
if os.IsNotExist(err) {
return DoctorCheck{
Name: "Redirect Target Valid",
Status: StatusOK,
Message: "No redirect configured",
}
}
if err != nil {
return DoctorCheck{
Name: "Redirect Target Valid",
Status: StatusWarning,
Message: "Cannot read redirect file",
Detail: err.Error(),
}
}
// Parse redirect target
target := strings.TrimSpace(string(data))
if target == "" {
return DoctorCheck{
Name: "Redirect Target Valid",
Status: StatusWarning,
Message: "Redirect file is empty",
Fix: "Remove the empty redirect file or add a valid path",
}
}
// Resolve the redirect path relative to the parent of .beads
cwd, err := os.Getwd()
if err != nil {
return DoctorCheck{
Name: "Redirect Target Valid",
Status: StatusWarning,
Message: "Cannot determine current directory",
}
}
resolvedTarget := filepath.Clean(filepath.Join(cwd, target))
// Check if target directory exists
info, err := os.Stat(resolvedTarget)
if os.IsNotExist(err) {
return DoctorCheck{
Name: "Redirect Target Valid",
Status: StatusError,
Message: "Redirect target does not exist",
Detail: fmt.Sprintf("Target: %s", resolvedTarget),
Fix: "Fix the redirect path or create the target directory",
}
}
if err != nil {
return DoctorCheck{
Name: "Redirect Target Valid",
Status: StatusWarning,
Message: "Cannot access redirect target",
Detail: err.Error(),
}
}
if !info.IsDir() {
return DoctorCheck{
Name: "Redirect Target Valid",
Status: StatusError,
Message: "Redirect target is not a directory",
Detail: fmt.Sprintf("Target: %s", resolvedTarget),
}
}
// Check for valid beads database in target
dbPath := filepath.Join(resolvedTarget, "beads.db")
if _, err := os.Stat(dbPath); os.IsNotExist(err) {
// Also check for any .db file
matches, _ := filepath.Glob(filepath.Join(resolvedTarget, "*.db"))
if len(matches) == 0 {
return DoctorCheck{
Name: "Redirect Target Valid",
Status: StatusWarning,
Message: "Redirect target has no beads database",
Detail: fmt.Sprintf("Target: %s", resolvedTarget),
Fix: "Run 'bd init' in the target directory or check redirect path",
}
}
}
return DoctorCheck{
Name: "Redirect Target Valid",
Status: StatusOK,
Message: fmt.Sprintf("Redirect target valid: %s", resolvedTarget),
}
}
// CheckRedirectTargetSyncWorktree verifies that the redirect target has a working beads-sync worktree.
// This is important for repos using sync-branch mode with redirects.
func CheckRedirectTargetSyncWorktree() DoctorCheck {
redirectPath := filepath.Join(".beads", "redirect")
// Check if redirect file exists
data, err := os.ReadFile(redirectPath) // #nosec G304 - path is hardcoded
if os.IsNotExist(err) {
return DoctorCheck{
Name: "Redirect Target Sync",
Status: StatusOK,
Message: "No redirect configured",
}
}
if err != nil {
return DoctorCheck{
Name: "Redirect Target Sync",
Status: StatusOK, // Don't warn if we can't read - other check handles that
Message: "N/A (cannot read redirect)",
}
}
target := strings.TrimSpace(string(data))
if target == "" {
return DoctorCheck{
Name: "Redirect Target Sync",
Status: StatusOK,
Message: "N/A (empty redirect)",
}
}
// Resolve the target path
cwd, err := os.Getwd()
if err != nil {
return DoctorCheck{
Name: "Redirect Target Sync",
Status: StatusOK,
Message: "N/A (cannot determine cwd)",
}
}
resolvedTarget := filepath.Clean(filepath.Join(cwd, target))
// Check if the target has a sync-branch configured in config.yaml
configPath := filepath.Join(resolvedTarget, "config.yaml")
configData, err := os.ReadFile(configPath) // #nosec G304 - constructed from known path
if err != nil {
// No config.yaml means no sync-branch, which is fine
return DoctorCheck{
Name: "Redirect Target Sync",
Status: StatusOK,
Message: "N/A (target not using sync-branch mode)",
}
}
// Simple check for sync-branch in config
if !strings.Contains(string(configData), "sync-branch:") {
return DoctorCheck{
Name: "Redirect Target Sync",
Status: StatusOK,
Message: "N/A (target not using sync-branch mode)",
}
}
// Target uses sync-branch - check for beads-sync worktree in the repo containing the target
// The target is inside a .beads dir, so the repo is the parent of .beads
targetRepoRoot := filepath.Dir(resolvedTarget)
// Check for beads-sync worktree
worktreePath := filepath.Join(targetRepoRoot, ".beads-sync")
if _, err := os.Stat(worktreePath); os.IsNotExist(err) {
return DoctorCheck{
Name: "Redirect Target Sync",
Status: StatusWarning,
Message: "Redirect target missing beads-sync worktree",
Detail: fmt.Sprintf("Expected worktree at: %s", worktreePath),
Fix: fmt.Sprintf("Run 'bd sync' in %s to create the worktree", targetRepoRoot),
}
}
return DoctorCheck{
Name: "Redirect Target Sync",
Status: StatusOK,
Message: "Redirect target has beads-sync worktree",
}
}
// CheckNoVestigialSyncWorktrees detects beads-sync worktrees in redirected repos that are unused.
// When a repo uses .beads/redirect, it doesn't need its own beads-sync worktree since
// sync operations happen in the redirect target. These vestigial worktrees waste space.
func CheckNoVestigialSyncWorktrees() DoctorCheck {
redirectPath := filepath.Join(".beads", "redirect")
// Check if redirect file exists
if _, err := os.Stat(redirectPath); os.IsNotExist(err) {
// No redirect - this check doesn't apply
return DoctorCheck{
Name: "Vestigial Sync Worktrees",
Status: StatusOK,
Message: "N/A (no redirect configured)",
}
}
// Check for local .beads-sync worktree
cwd, err := os.Getwd()
if err != nil {
return DoctorCheck{
Name: "Vestigial Sync Worktrees",
Status: StatusOK,
Message: "N/A (cannot determine cwd)",
}
}
// Walk up to find git root
gitRoot := cwd
for {
if _, err := os.Stat(filepath.Join(gitRoot, ".git")); err == nil {
break
}
parent := filepath.Dir(gitRoot)
if parent == gitRoot {
// Reached filesystem root, not in a git repo
return DoctorCheck{
Name: "Vestigial Sync Worktrees",
Status: StatusOK,
Message: "N/A (not in git repository)",
}
}
gitRoot = parent
}
// Check for .beads-sync worktree
syncWorktreePath := filepath.Join(gitRoot, ".beads-sync")
if _, err := os.Stat(syncWorktreePath); os.IsNotExist(err) {
// No local worktree - good
return DoctorCheck{
Name: "Vestigial Sync Worktrees",
Status: StatusOK,
Message: "No vestigial sync worktrees found",
}
}
// Found a local .beads-sync but we have a redirect - this is vestigial
return DoctorCheck{
Name: "Vestigial Sync Worktrees",
Status: StatusWarning,
Message: "Vestigial .beads-sync worktree found",
Detail: fmt.Sprintf("This repo uses redirect but has unused worktree at: %s", syncWorktreePath),
Fix: fmt.Sprintf("Remove with: rm -rf %s", syncWorktreePath),
}
}
// CheckSyncBranchGitignore checks if git index flags are set on issues.jsonl when sync.branch is configured.
// Without these flags, the file appears modified in git status even though changes go to the sync branch.
// GH#797, GH#801, GH#870.

View File

@@ -1368,3 +1368,43 @@ func TestRequiredPatterns_ContainsRedirect(t *testing.T) {
t.Error("requiredPatterns should include 'redirect'")
}
}
// TestGitignoreTemplate_ContainsSyncStateFiles verifies that sync state files
// introduced in PR #918 (pull-first sync with 3-way merge) are gitignored.
// These files are machine-specific and should not be shared across clones.
// GH#974
func TestGitignoreTemplate_ContainsSyncStateFiles(t *testing.T) {
syncStateFiles := []string{
".sync.lock", // Concurrency guard
"sync_base.jsonl", // Base state for 3-way merge (per-machine)
}
for _, pattern := range syncStateFiles {
if !strings.Contains(GitignoreTemplate, pattern) {
t.Errorf("GitignoreTemplate should contain '%s' pattern", pattern)
}
}
}
// TestRequiredPatterns_ContainsSyncStatePatterns verifies that bd doctor
// validates the presence of sync state patterns in .beads/.gitignore.
// GH#974
func TestRequiredPatterns_ContainsSyncStatePatterns(t *testing.T) {
syncStatePatterns := []string{
".sync.lock",
"sync_base.jsonl",
}
for _, expected := range syncStatePatterns {
found := false
for _, pattern := range requiredPatterns {
if pattern == expected {
found = true
break
}
}
if !found {
t.Errorf("requiredPatterns should include '%s'", expected)
}
}
}

View File

@@ -156,6 +156,7 @@ func CheckLegacyJSONLFilename(repoPath string) DoctorCheck {
name == "deletions.jsonl" ||
name == "interactions.jsonl" ||
name == "molecules.jsonl" ||
name == "sync_base.jsonl" ||
// Git merge conflict artifacts (e.g., issues.base.jsonl, issues.left.jsonl)
strings.Contains(lowerName, ".base.jsonl") ||
strings.Contains(lowerName, ".left.jsonl") ||

View File

@@ -290,9 +290,15 @@ func TestCheckLegacyJSONLFilename(t *testing.T) {
expectedStatus: "ok",
expectWarning: false,
},
{
name: "sync_base.jsonl ignored as system file (GH#1021)",
files: []string{"issues.jsonl", "sync_base.jsonl"},
expectedStatus: "ok",
expectWarning: false,
},
{
name: "all system files ignored together",
files: []string{"issues.jsonl", "deletions.jsonl", "interactions.jsonl", "molecules.jsonl"},
files: []string{"issues.jsonl", "deletions.jsonl", "interactions.jsonl", "molecules.jsonl", "sync_base.jsonl"},
expectedStatus: "ok",
expectWarning: false,
},

261
cmd/bd/doctor/multirepo.go Normal file
View File

@@ -0,0 +1,261 @@
package doctor
import (
"database/sql"
"fmt"
"os"
"path/filepath"
"strings"
"github.com/steveyegge/beads/internal/beads"
"github.com/steveyegge/beads/internal/config"
"github.com/steveyegge/beads/internal/configfile"
)
// CheckMultiRepoTypes discovers and reports custom types used by child repos in multi-repo setups.
// This is informational - the federation trust model means we don't require parent config to
// list child types, but it's useful to know what types each child uses.
func CheckMultiRepoTypes(repoPath string) DoctorCheck {
multiRepo := config.GetMultiRepoConfig()
if multiRepo == nil || len(multiRepo.Additional) == 0 {
return DoctorCheck{
Name: "Multi-Repo Types",
Status: StatusOK,
Message: "N/A (single-repo mode)",
Category: CategoryData,
}
}
var details []string
var warnings []string
// Discover types from each child repo
for _, repoPathStr := range multiRepo.Additional {
childTypes := discoverChildTypes(repoPathStr)
if len(childTypes) > 0 {
details = append(details, fmt.Sprintf(" %s: %s", repoPathStr, strings.Join(childTypes, ", ")))
} else {
details = append(details, fmt.Sprintf(" %s: (no custom types)", repoPathStr))
}
}
// Check for hydrated issues using types not found anywhere
unknownTypes := findUnknownTypesInHydratedIssues(repoPath, multiRepo)
if len(unknownTypes) > 0 {
warnings = append(warnings, fmt.Sprintf("Issues with unknown types: %s", strings.Join(unknownTypes, ", ")))
}
status := StatusOK
message := fmt.Sprintf("Discovered types from %d child repo(s)", len(multiRepo.Additional))
if len(warnings) > 0 {
status = StatusWarning
message = fmt.Sprintf("Found %d type warning(s)", len(warnings))
details = append(details, "")
details = append(details, "Warnings:")
details = append(details, warnings...)
}
return DoctorCheck{
Name: "Multi-Repo Types",
Status: status,
Message: message,
Detail: strings.Join(details, "\n"),
Category: CategoryData,
}
}
// discoverChildTypes reads custom types from a child repo's config or database.
// Returns nil if no custom types are found (not an error - child may not have any).
func discoverChildTypes(repoPath string) []string {
// Expand tilde
if strings.HasPrefix(repoPath, "~") {
if home, err := os.UserHomeDir(); err == nil {
repoPath = filepath.Join(home, repoPath[1:])
}
}
beadsDir := filepath.Join(repoPath, ".beads")
// First try reading from database config table
types, err := readTypesFromDB(beadsDir)
if err == nil && len(types) > 0 {
return types
}
// Fall back to reading from config.yaml
types, err = readTypesFromYAML(beadsDir)
if err == nil {
return types
}
// No custom types found
return nil
}
// readTypesFromDB reads types.custom from the database config table
func readTypesFromDB(beadsDir string) ([]string, error) {
// Get database path
var dbPath string
if cfg, err := configfile.Load(beadsDir); err == nil && cfg != nil && cfg.Database != "" {
dbPath = cfg.DatabasePath(beadsDir)
} else {
dbPath = filepath.Join(beadsDir, beads.CanonicalDatabaseName)
}
if _, err := os.Stat(dbPath); os.IsNotExist(err) {
return nil, err
}
db, err := sql.Open("sqlite3", sqliteConnString(dbPath, true))
if err != nil {
return nil, err
}
defer db.Close()
var typesStr string
err = db.QueryRow("SELECT value FROM config WHERE key = 'types.custom'").Scan(&typesStr)
if err != nil {
return nil, err
}
if typesStr == "" {
return nil, nil
}
// Parse comma-separated list
var types []string
for _, t := range strings.Split(typesStr, ",") {
t = strings.TrimSpace(t)
if t != "" {
types = append(types, t)
}
}
return types, nil
}
// readTypesFromYAML reads types.custom from config.yaml
func readTypesFromYAML(beadsDir string) ([]string, error) {
configPath := filepath.Join(beadsDir, "config.yaml")
if _, err := os.Stat(configPath); os.IsNotExist(err) {
return nil, err
}
// Use viper to read the config
// For simplicity, we'll parse it manually to avoid viper state issues
content, err := os.ReadFile(configPath) // #nosec G304 - path is controlled
if err != nil {
return nil, err
}
// Simple YAML parsing for types.custom
// Looking for "types:" section with "custom:" key
lines := strings.Split(string(content), "\n")
inTypes := false
for _, line := range lines {
trimmed := strings.TrimSpace(line)
if strings.HasPrefix(trimmed, "types:") {
inTypes = true
continue
}
if inTypes && strings.HasPrefix(trimmed, "custom:") {
// Parse the value
value := strings.TrimPrefix(trimmed, "custom:")
value = strings.TrimSpace(value)
// Handle array format [a, b, c] or string format "a,b,c"
value = strings.Trim(value, "[]\"'")
if value == "" {
return nil, nil
}
var types []string
for _, t := range strings.Split(value, ",") {
t = strings.TrimSpace(t)
t = strings.Trim(t, "\"'")
if t != "" {
types = append(types, t)
}
}
return types, nil
}
// Exit types section if we hit another top-level key
if inTypes && len(line) > 0 && line[0] != ' ' && line[0] != '\t' {
break
}
}
return nil, nil
}
// findUnknownTypesInHydratedIssues checks if any hydrated issues use types not found in any config
func findUnknownTypesInHydratedIssues(repoPath string, multiRepo *config.MultiRepoConfig) []string {
beadsDir := filepath.Join(repoPath, ".beads")
// Get database path
var dbPath string
if cfg, err := configfile.Load(beadsDir); err == nil && cfg != nil && cfg.Database != "" {
dbPath = cfg.DatabasePath(beadsDir)
} else {
dbPath = filepath.Join(beadsDir, beads.CanonicalDatabaseName)
}
if _, err := os.Stat(dbPath); os.IsNotExist(err) {
return nil
}
db, err := sql.Open("sqlite3", sqliteConnString(dbPath, true))
if err != nil {
return nil
}
defer db.Close()
// Collect all known types (built-in + parent custom + all child custom)
knownTypes := map[string]bool{
"bug": true, "feature": true, "task": true, "epic": true, "chore": true,
"message": true, "merge-request": true, "molecule": true, "gate": true, "event": true,
}
// Add parent's custom types
var parentTypes string
if err := db.QueryRow("SELECT value FROM config WHERE key = 'types.custom'").Scan(&parentTypes); err == nil {
for _, t := range strings.Split(parentTypes, ",") {
t = strings.TrimSpace(t)
if t != "" {
knownTypes[t] = true
}
}
}
// Add child types
for _, repoPathStr := range multiRepo.Additional {
childTypes := discoverChildTypes(repoPathStr)
for _, t := range childTypes {
knownTypes[t] = true
}
}
// Find issues with types not in knownTypes
rows, err := db.Query(`
SELECT DISTINCT issue_type FROM issues
WHERE status != 'tombstone' AND source_repo != '' AND source_repo != '.'
`)
if err != nil {
return nil
}
defer rows.Close()
var unknownTypes []string
seen := make(map[string]bool)
for rows.Next() {
var issueType string
if err := rows.Scan(&issueType); err != nil {
continue
}
if !knownTypes[issueType] && !seen[issueType] {
unknownTypes = append(unknownTypes, issueType)
seen[issueType] = true
}
}
return unknownTypes
}

View File

@@ -75,11 +75,13 @@ Example:
}
// Count references for each issue
refCounts := countReferences(allIssues)
// Count structural relationships (children, dependencies) for duplicate groups
structuralScores := countStructuralRelationships(duplicateGroups)
// Prepare output
var mergeCommands []string
var mergeResults []map[string]interface{}
for _, group := range duplicateGroups {
target := chooseMergeTarget(group, refCounts)
target := chooseMergeTarget(group, refCounts, structuralScores)
sources := make([]string, 0, len(group)-1)
for _, issue := range group {
if issue.ID != target.ID {
@@ -110,7 +112,7 @@ Example:
if jsonOutput {
output := map[string]interface{}{
"duplicate_groups": len(duplicateGroups),
"groups": formatDuplicateGroupsJSON(duplicateGroups, refCounts),
"groups": formatDuplicateGroupsJSON(duplicateGroups, refCounts, structuralScores),
}
if autoMerge || dryRun {
output["merge_commands"] = mergeCommands
@@ -122,16 +124,20 @@ Example:
} else {
fmt.Printf("%s Found %d duplicate group(s):\n\n", ui.RenderWarn("🔍"), len(duplicateGroups))
for i, group := range duplicateGroups {
target := chooseMergeTarget(group, refCounts)
target := chooseMergeTarget(group, refCounts, structuralScores)
fmt.Printf("%s Group %d: %s\n", ui.RenderAccent("━━"), i+1, group[0].Title)
for _, issue := range group {
refs := refCounts[issue.ID]
depCount := 0
if score, ok := structuralScores[issue.ID]; ok {
depCount = score.dependentCount
}
marker := " "
if issue.ID == target.ID {
marker = ui.RenderPass("→ ")
}
fmt.Printf("%s%s (%s, P%d, %d references)\n",
marker, issue.ID, issue.Status, issue.Priority, refs)
fmt.Printf("%s%s (%s, P%d, %d dependents, %d refs)\n",
marker, issue.ID, issue.Status, issue.Priority, depCount, refs)
}
sources := make([]string, 0, len(group)-1)
for _, issue := range group {
@@ -190,6 +196,13 @@ func findDuplicateGroups(issues []*types.Issue) [][]*types.Issue {
}
return duplicates
}
// issueScore captures all factors used to choose which duplicate to keep
type issueScore struct {
dependentCount int // Issues that depend on this one (children, blocked-by) - highest priority
dependsOnCount int // Issues this one depends on
textRefs int // Text mentions in other issues' descriptions/notes
}
// countReferences counts how many times each issue is referenced in text fields
func countReferences(issues []*types.Issue) map[string]int {
counts := make(map[string]int)
@@ -211,36 +224,110 @@ func countReferences(issues []*types.Issue) map[string]int {
}
return counts
}
// countStructuralRelationships counts dependency relationships for issues in duplicate groups.
// Uses the efficient GetDependencyCounts batch query.
func countStructuralRelationships(groups [][]*types.Issue) map[string]*issueScore {
scores := make(map[string]*issueScore)
ctx := rootCtx
// Collect all issue IDs from all groups
var issueIDs []string
for _, group := range groups {
for _, issue := range group {
issueIDs = append(issueIDs, issue.ID)
scores[issue.ID] = &issueScore{}
}
}
// Batch query for dependency counts
depCounts, err := store.GetDependencyCounts(ctx, issueIDs)
if err != nil {
// On error, return empty scores - fallback to text refs only
return scores
}
// Populate scores from dependency counts
for id, counts := range depCounts {
if score, ok := scores[id]; ok {
score.dependentCount = counts.DependentCount // Issues that depend on this one (children, etc)
score.dependsOnCount = counts.DependencyCount
}
}
return scores
}
// chooseMergeTarget selects the best issue to merge into
// Priority: highest reference count, then lexicographically smallest ID
func chooseMergeTarget(group []*types.Issue, refCounts map[string]int) *types.Issue {
// Priority order:
// 1. Highest dependent count (children, blocked-by relationships) - most connected issue wins
// 2. Highest text reference count (mentions in descriptions/notes)
// 3. Lexicographically smallest ID (stable tiebreaker)
func chooseMergeTarget(group []*types.Issue, refCounts map[string]int, structuralScores map[string]*issueScore) *types.Issue {
if len(group) == 0 {
return nil
}
getScore := func(id string) (int, int) {
depCount := 0
if score, ok := structuralScores[id]; ok {
depCount = score.dependentCount
}
textRefs := refCounts[id]
return depCount, textRefs
}
target := group[0]
targetRefs := refCounts[target.ID]
targetDeps, targetRefs := getScore(target.ID)
for _, issue := range group[1:] {
issueRefs := refCounts[issue.ID]
if issueRefs > targetRefs || (issueRefs == targetRefs && issue.ID < target.ID) {
issueDeps, issueRefs := getScore(issue.ID)
// Compare by dependent count first (children/blocked-by)
if issueDeps > targetDeps {
target = issue
targetRefs = issueRefs
targetDeps, targetRefs = issueDeps, issueRefs
continue
}
if issueDeps < targetDeps {
continue
}
// Equal dependent count - compare by text references
if issueRefs > targetRefs {
target = issue
targetDeps, targetRefs = issueDeps, issueRefs
continue
}
if issueRefs < targetRefs {
continue
}
// Equal on both - use lexicographically smallest ID as tiebreaker
if issue.ID < target.ID {
target = issue
targetDeps, targetRefs = issueDeps, issueRefs
}
}
return target
}
// formatDuplicateGroupsJSON formats duplicate groups for JSON output
func formatDuplicateGroupsJSON(groups [][]*types.Issue, refCounts map[string]int) []map[string]interface{} {
func formatDuplicateGroupsJSON(groups [][]*types.Issue, refCounts map[string]int, structuralScores map[string]*issueScore) []map[string]interface{} {
var result []map[string]interface{}
for _, group := range groups {
target := chooseMergeTarget(group, refCounts)
target := chooseMergeTarget(group, refCounts, structuralScores)
issues := make([]map[string]interface{}, len(group))
for i, issue := range group {
depCount := 0
if score, ok := structuralScores[issue.ID]; ok {
depCount = score.dependentCount
}
issues[i] = map[string]interface{}{
"id": issue.ID,
"title": issue.Title,
"status": issue.Status,
"priority": issue.Priority,
"references": refCounts[issue.ID],
"dependents": depCount,
"is_merge_target": issue.ID == target.ID,
}
}

View File

@@ -86,13 +86,14 @@ func TestFindDuplicateGroups(t *testing.T) {
func TestChooseMergeTarget(t *testing.T) {
tests := []struct {
name string
group []*types.Issue
refCounts map[string]int
wantID string
name string
group []*types.Issue
refCounts map[string]int
structuralScores map[string]*issueScore
wantID string
}{
{
name: "choose by reference count",
name: "choose by reference count when no structural data",
group: []*types.Issue{
{ID: "bd-2", Title: "Task"},
{ID: "bd-1", Title: "Task"},
@@ -101,7 +102,8 @@ func TestChooseMergeTarget(t *testing.T) {
"bd-1": 5,
"bd-2": 0,
},
wantID: "bd-1",
structuralScores: map[string]*issueScore{},
wantID: "bd-1",
},
{
name: "choose by lexicographic order if same references",
@@ -113,7 +115,8 @@ func TestChooseMergeTarget(t *testing.T) {
"bd-1": 0,
"bd-2": 0,
},
wantID: "bd-1",
structuralScores: map[string]*issueScore{},
wantID: "bd-1",
},
{
name: "prefer higher references even with larger ID",
@@ -125,13 +128,46 @@ func TestChooseMergeTarget(t *testing.T) {
"bd-1": 1,
"bd-100": 10,
},
wantID: "bd-100",
structuralScores: map[string]*issueScore{},
wantID: "bd-100",
},
{
name: "prefer dependents over text references (GH#1022)",
group: []*types.Issue{
{ID: "HONEY-s2g1", Title: "P1 / Foundations"}, // Has 17 children
{ID: "HONEY-d0mw", Title: "P1 / Foundations"}, // Empty shell
},
refCounts: map[string]int{
"HONEY-s2g1": 0,
"HONEY-d0mw": 0,
},
structuralScores: map[string]*issueScore{
"HONEY-s2g1": {dependentCount: 17, dependsOnCount: 2, textRefs: 0},
"HONEY-d0mw": {dependentCount: 0, dependsOnCount: 0, textRefs: 0},
},
wantID: "HONEY-s2g1", // Should keep the one with children
},
{
name: "dependents beat text references",
group: []*types.Issue{
{ID: "bd-1", Title: "Task"}, // Has text refs but no deps
{ID: "bd-2", Title: "Task"}, // Has deps but no text refs
},
refCounts: map[string]int{
"bd-1": 100, // Lots of text references
"bd-2": 0,
},
structuralScores: map[string]*issueScore{
"bd-1": {dependentCount: 0, dependsOnCount: 0, textRefs: 100},
"bd-2": {dependentCount: 5, dependsOnCount: 0, textRefs: 0}, // 5 children/dependents
},
wantID: "bd-2", // Dependents take priority
},
}
for _, tt := range tests {
t.Run(tt.name, func(t *testing.T) {
target := chooseMergeTarget(tt.group, tt.refCounts)
target := chooseMergeTarget(tt.group, tt.refCounts, tt.structuralScores)
if target.ID != tt.wantID {
t.Errorf("chooseMergeTarget() = %v, want %v", target.ID, tt.wantID)
}

View File

@@ -130,8 +130,10 @@ Examples:
}
_ = tmpFile.Close()
// Open the editor
editorCmd := exec.Command(editor, tmpPath) //nolint:gosec // G204: editor from trusted $EDITOR/$VISUAL env or known defaults
// Open the editor - parse command and args (handles "vim -w" or "zeditor --wait")
editorParts := strings.Fields(editor)
editorArgs := append(editorParts[1:], tmpPath)
editorCmd := exec.Command(editorParts[0], editorArgs...) //nolint:gosec // G204: editor from trusted $EDITOR/$VISUAL env or known defaults
editorCmd.Stdin = os.Stdin
editorCmd.Stdout = os.Stdout
editorCmd.Stderr = os.Stderr

View File

@@ -33,51 +33,44 @@ type GraphLayout struct {
RootID string
}
var (
graphCompact bool
graphBox bool
graphAll bool
)
var graphCmd = &cobra.Command{
Use: "graph <issue-id>",
Use: "graph [issue-id]",
GroupID: "deps",
Short: "Display issue dependency graph",
Long: `Display an ASCII visualization of an issue's dependency graph.
Long: `Display a visualization of an issue's dependency graph.
For epics, shows all children and their dependencies.
For regular issues, shows the issue and its direct dependencies.
The graph shows execution order left-to-right:
- Leftmost nodes have no dependencies (can start immediately)
- Rightmost nodes depend on everything to their left
- Nodes in the same column can run in parallel
With --all, shows all open issues grouped by connected component.
Colors indicate status:
- White: open (ready to work)
- Yellow: in progress
- Red: blocked
- Green: closed`,
Args: cobra.ExactArgs(1),
Display formats:
--box (default) ASCII boxes showing layers, more detailed
--compact Tree format, one line per issue, more scannable
The graph shows execution order:
- Layer 0 / leftmost = no dependencies (can start immediately)
- Higher layers depend on lower layers
- Nodes in the same layer can run in parallel
Status icons: ○ open ◐ in_progress ● blocked ✓ closed ❄ deferred`,
Args: cobra.RangeArgs(0, 1),
Run: func(cmd *cobra.Command, args []string) {
ctx := rootCtx
var issueID string
// Resolve the issue ID
if daemonClient != nil {
resolveArgs := &rpc.ResolveIDArgs{ID: args[0]}
resp, err := daemonClient.ResolveID(resolveArgs)
if err != nil {
fmt.Fprintf(os.Stderr, "Error: issue '%s' not found\n", args[0])
os.Exit(1)
}
if err := json.Unmarshal(resp.Data, &issueID); err != nil {
fmt.Fprintf(os.Stderr, "Error: %v\n", err)
os.Exit(1)
}
} else if store != nil {
var err error
issueID, err = utils.ResolvePartialID(ctx, store, args[0])
if err != nil {
fmt.Fprintf(os.Stderr, "Error: issue '%s' not found\n", args[0])
os.Exit(1)
}
} else {
fmt.Fprintf(os.Stderr, "Error: no database connection\n")
// Validate args
if graphAll && len(args) > 0 {
fmt.Fprintf(os.Stderr, "Error: cannot specify issue ID with --all flag\n")
os.Exit(1)
}
if !graphAll && len(args) == 0 {
fmt.Fprintf(os.Stderr, "Error: issue ID required (or use --all for all open issues)\n")
os.Exit(1)
}
@@ -92,6 +85,66 @@ Colors indicate status:
defer func() { _ = store.Close() }()
}
if store == nil {
fmt.Fprintf(os.Stderr, "Error: no database connection\n")
os.Exit(1)
}
// Handle --all flag: show graph for all open issues
if graphAll {
subgraphs, err := loadAllGraphSubgraphs(ctx, store)
if err != nil {
fmt.Fprintf(os.Stderr, "Error loading all issues: %v\n", err)
os.Exit(1)
}
if len(subgraphs) == 0 {
fmt.Println("No open issues found")
return
}
if jsonOutput {
outputJSON(subgraphs)
return
}
// Render all subgraphs
for i, subgraph := range subgraphs {
layout := computeLayout(subgraph)
if graphCompact {
renderGraphCompact(layout, subgraph)
} else {
renderGraph(layout, subgraph)
}
if i < len(subgraphs)-1 {
fmt.Println(strings.Repeat("─", 60))
}
}
return
}
// Single issue mode
var issueID string
if daemonClient != nil {
resolveArgs := &rpc.ResolveIDArgs{ID: args[0]}
resp, err := daemonClient.ResolveID(resolveArgs)
if err != nil {
fmt.Fprintf(os.Stderr, "Error: issue '%s' not found\n", args[0])
os.Exit(1)
}
if err := json.Unmarshal(resp.Data, &issueID); err != nil {
fmt.Fprintf(os.Stderr, "Error: %v\n", err)
os.Exit(1)
}
} else {
var err error
issueID, err = utils.ResolvePartialID(ctx, store, args[0])
if err != nil {
fmt.Fprintf(os.Stderr, "Error: issue '%s' not found\n", args[0])
os.Exit(1)
}
}
// Load the subgraph
subgraph, err := loadGraphSubgraph(ctx, store, issueID)
if err != nil {
@@ -111,12 +164,19 @@ Colors indicate status:
return
}
// Render ASCII graph
renderGraph(layout, subgraph)
// Render graph - compact tree format or box format (default)
if graphCompact {
renderGraphCompact(layout, subgraph)
} else {
renderGraph(layout, subgraph)
}
},
}
func init() {
graphCmd.Flags().BoolVar(&graphAll, "all", false, "Show graph for all open issues")
graphCmd.Flags().BoolVar(&graphCompact, "compact", false, "Tree format, one line per issue, more scannable")
graphCmd.Flags().BoolVar(&graphBox, "box", true, "ASCII boxes showing layers (default)")
graphCmd.ValidArgsFunction = issueIDCompletion
rootCmd.AddCommand(graphCmd)
}
@@ -191,6 +251,157 @@ func loadGraphSubgraph(ctx context.Context, s storage.Storage, issueID string) (
return subgraph, nil
}
// loadAllGraphSubgraphs loads all open issues and groups them by connected component
// Each component is a subgraph of issues that share dependencies
func loadAllGraphSubgraphs(ctx context.Context, s storage.Storage) ([]*TemplateSubgraph, error) {
if s == nil {
return nil, fmt.Errorf("no database connection")
}
// Get all open issues (open, in_progress, blocked)
// We need to make multiple calls since IssueFilter takes a single status
var allIssues []*types.Issue
for _, status := range []types.Status{types.StatusOpen, types.StatusInProgress, types.StatusBlocked} {
statusCopy := status
issues, err := s.SearchIssues(ctx, "", types.IssueFilter{
Status: &statusCopy,
})
if err != nil {
return nil, fmt.Errorf("failed to search issues: %w", err)
}
allIssues = append(allIssues, issues...)
}
if len(allIssues) == 0 {
return nil, nil
}
// Build issue map
issueMap := make(map[string]*types.Issue)
for _, issue := range allIssues {
issueMap[issue.ID] = issue
}
// Load all dependencies between these issues
allDeps := make([]*types.Dependency, 0)
for _, issue := range allIssues {
deps, err := s.GetDependencyRecords(ctx, issue.ID)
if err != nil {
continue
}
for _, dep := range deps {
// Only include deps where both ends are in our issue set
if _, ok := issueMap[dep.DependsOnID]; ok {
allDeps = append(allDeps, dep)
}
}
}
// Build adjacency list for union-find
adj := make(map[string][]string)
for _, dep := range allDeps {
adj[dep.IssueID] = append(adj[dep.IssueID], dep.DependsOnID)
adj[dep.DependsOnID] = append(adj[dep.DependsOnID], dep.IssueID)
}
// Find connected components using BFS
visited := make(map[string]bool)
var components [][]string
for _, issue := range allIssues {
if visited[issue.ID] {
continue
}
// BFS to find all connected issues
var component []string
queue := []string{issue.ID}
visited[issue.ID] = true
for len(queue) > 0 {
current := queue[0]
queue = queue[1:]
component = append(component, current)
for _, neighbor := range adj[current] {
if !visited[neighbor] {
visited[neighbor] = true
queue = append(queue, neighbor)
}
}
}
components = append(components, component)
}
// Sort components by size (largest first) and then by priority of first issue
sort.Slice(components, func(i, j int) bool {
// First by size (descending)
if len(components[i]) != len(components[j]) {
return len(components[i]) > len(components[j])
}
// Then by priority of first issue (ascending = higher priority first)
issueI := issueMap[components[i][0]]
issueJ := issueMap[components[j][0]]
return issueI.Priority < issueJ.Priority
})
// Create subgraph for each component
var subgraphs []*TemplateSubgraph
for _, component := range components {
if len(component) == 0 {
continue
}
// Find the best "root" for this component
// Prefer: epics > highest priority > oldest
var root *types.Issue
for _, id := range component {
issue := issueMap[id]
if root == nil {
root = issue
continue
}
// Prefer epics
if issue.IssueType == types.TypeEpic && root.IssueType != types.TypeEpic {
root = issue
continue
}
if root.IssueType == types.TypeEpic && issue.IssueType != types.TypeEpic {
continue
}
// Prefer higher priority (lower number)
if issue.Priority < root.Priority {
root = issue
}
}
subgraph := &TemplateSubgraph{
Root: root,
IssueMap: make(map[string]*types.Issue),
}
for _, id := range component {
issue := issueMap[id]
subgraph.Issues = append(subgraph.Issues, issue)
subgraph.IssueMap[id] = issue
}
// Add dependencies for this component
for _, dep := range allDeps {
if _, inComponent := subgraph.IssueMap[dep.IssueID]; inComponent {
if _, depInComponent := subgraph.IssueMap[dep.DependsOnID]; depInComponent {
subgraph.Dependencies = append(subgraph.Dependencies, dep)
}
}
}
subgraphs = append(subgraphs, subgraph)
}
return subgraphs, nil
}
// computeLayout assigns layers to nodes using topological sort
func computeLayout(subgraph *TemplateSubgraph) *GraphLayout {
layout := &GraphLayout{
@@ -379,33 +590,155 @@ func renderGraph(layout *GraphLayout, subgraph *TemplateSubgraph) {
fmt.Printf(" Total: %d issues across %d layers\n\n", len(layout.Nodes), len(layout.Layers))
}
// renderGraphCompact renders the graph in compact tree format
// One line per issue, more scannable, uses tree connectors (├──, └──, │)
func renderGraphCompact(layout *GraphLayout, subgraph *TemplateSubgraph) {
if len(layout.Nodes) == 0 {
fmt.Println("Empty graph")
return
}
fmt.Printf("\n%s Dependency graph for %s (%d issues, %d layers)\n\n",
ui.RenderAccent("📊"), layout.RootID, len(layout.Nodes), len(layout.Layers))
// Legend
fmt.Println(" Status: ○ open ◐ in_progress ● blocked ✓ closed ❄ deferred")
fmt.Println()
// Build parent-child map from subgraph dependencies
children := make(map[string][]string) // parent -> children
childSet := make(map[string]bool) // track which issues are children
for _, dep := range subgraph.Dependencies {
if dep.Type == types.DepParentChild {
children[dep.DependsOnID] = append(children[dep.DependsOnID], dep.IssueID)
childSet[dep.IssueID] = true
}
}
// Sort children by priority then ID for consistent output
for parentID := range children {
sort.Slice(children[parentID], func(i, j int) bool {
nodeI := layout.Nodes[children[parentID][i]]
nodeJ := layout.Nodes[children[parentID][j]]
if nodeI.Issue.Priority != nodeJ.Issue.Priority {
return nodeI.Issue.Priority < nodeJ.Issue.Priority
}
return nodeI.Issue.ID < nodeJ.Issue.ID
})
}
// Render by layer with tree structure
for layerIdx, layer := range layout.Layers {
// Layer header
layerHeader := fmt.Sprintf("LAYER %d", layerIdx)
if layerIdx == 0 {
layerHeader += " (ready)"
}
fmt.Printf(" %s\n", ui.RenderAccent(layerHeader))
for i, id := range layer {
node := layout.Nodes[id]
isLast := i == len(layer)-1
// Format node line
line := formatCompactNode(node)
// Tree connector
connector := "├── "
if isLast {
connector = "└── "
}
fmt.Printf(" %s%s\n", connector, line)
// Render children (if this issue has children in the subgraph)
if childIDs, ok := children[id]; ok && len(childIDs) > 0 {
childPrefix := "│ "
if isLast {
childPrefix = " "
}
renderCompactChildren(layout, childIDs, children, childPrefix, 1)
}
}
fmt.Println()
}
}
// renderCompactChildren recursively renders children in tree format
func renderCompactChildren(layout *GraphLayout, childIDs []string, children map[string][]string, prefix string, depth int) {
for i, childID := range childIDs {
node := layout.Nodes[childID]
if node == nil {
continue
}
isLast := i == len(childIDs)-1
connector := "├── "
if isLast {
connector = "└── "
}
line := formatCompactNode(node)
fmt.Printf(" %s%s%s\n", prefix, connector, line)
// Recurse for nested children
if grandchildren, ok := children[childID]; ok && len(grandchildren) > 0 {
childPrefix := prefix
if isLast {
childPrefix += " "
} else {
childPrefix += "│ "
}
renderCompactChildren(layout, grandchildren, children, childPrefix, depth+1)
}
}
}
// formatCompactNode formats a single node for compact output
// Format: STATUS_ICON ID PRIORITY Title
func formatCompactNode(node *GraphNode) string {
status := string(node.Issue.Status)
// Use shared status icon with semantic color
statusIcon := ui.RenderStatusIcon(status)
// Priority with icon
priorityTag := ui.RenderPriority(node.Issue.Priority)
// Title - truncate if too long
title := truncateTitle(node.Issue.Title, 50)
// Build line - apply status style to entire line for closed issues
style := ui.GetStatusStyle(status)
if node.Issue.Status == types.StatusClosed {
return fmt.Sprintf("%s %s %s %s",
statusIcon,
style.Render(node.Issue.ID),
style.Render(fmt.Sprintf("● P%d", node.Issue.Priority)),
style.Render(title))
}
return fmt.Sprintf("%s %s %s %s", statusIcon, node.Issue.ID, priorityTag, title)
}
// renderNodeBox renders a single node as an ASCII box
// Uses semantic status styles from ui package for consistency
func renderNodeBox(node *GraphNode, width int) string {
// Status indicator
var statusIcon string
var titleStr string
title := truncateTitle(node.Issue.Title, width-4)
paddedTitle := padRight(title, width-4)
status := string(node.Issue.Status)
switch node.Issue.Status {
case types.StatusOpen:
statusIcon = "○"
titleStr = padRight(title, width-4)
case types.StatusInProgress:
statusIcon = "◐"
titleStr = ui.RenderWarn(padRight(title, width-4))
case types.StatusBlocked:
statusIcon = "●"
titleStr = ui.RenderFail(padRight(title, width-4))
case types.StatusDeferred:
statusIcon = "❄"
titleStr = ui.RenderAccent(padRight(title, width-4))
case types.StatusClosed:
statusIcon = "✓"
titleStr = ui.RenderPass(padRight(title, width-4))
default:
statusIcon = "?"
titleStr = padRight(title, width-4)
// Use shared status icon and style
statusIcon := ui.RenderStatusIcon(status)
style := ui.GetStatusStyle(status)
// Apply style to title for actionable statuses
var titleStr string
if node.Issue.Status == types.StatusOpen {
titleStr = paddedTitle // no color for open - available but not urgent
} else {
titleStr = style.Render(paddedTitle)
}
id := node.Issue.ID
@@ -438,6 +771,7 @@ func padRight(s string, width int) string {
}
// computeDependencyCounts calculates how many issues each issue blocks and is blocked by
// Excludes parent-child relationships and the root issue from counts to reduce cognitive noise
func computeDependencyCounts(subgraph *TemplateSubgraph) (blocks map[string]int, blockedBy map[string]int) {
blocks = make(map[string]int)
blockedBy = make(map[string]int)
@@ -446,61 +780,76 @@ func computeDependencyCounts(subgraph *TemplateSubgraph) (blocks map[string]int,
return blocks, blockedBy
}
rootID := ""
if subgraph.Root != nil {
rootID = subgraph.Root.ID
}
for _, dep := range subgraph.Dependencies {
if dep.Type == types.DepBlocks {
// dep.DependsOnID blocks dep.IssueID
// So dep.DependsOnID "blocks" count increases
blocks[dep.DependsOnID]++
// And dep.IssueID "blocked by" count increases
blockedBy[dep.IssueID]++
// Only count "blocks" dependencies (not parent-child, related, etc.)
if dep.Type != types.DepBlocks {
continue
}
// Skip if the blocker is the root issue - this is obvious from graph structure
// and showing "needs:1" when it's just the parent epic is cognitive noise
if dep.DependsOnID == rootID {
continue
}
// dep.DependsOnID blocks dep.IssueID
// So dep.DependsOnID "blocks" count increases
blocks[dep.DependsOnID]++
// And dep.IssueID "blocked by" count increases
blockedBy[dep.IssueID]++
}
return blocks, blockedBy
}
// renderNodeBoxWithDeps renders a node box with dependency information
// Uses semantic status styles from ui package for consistency across commands
// Design principle: only actionable states get color, closed items fade
func renderNodeBoxWithDeps(node *GraphNode, width int, blocksCount int, blockedByCount int) string {
// Status indicator
var statusIcon string
var titleStr string
title := truncateTitle(node.Issue.Title, width-4)
paddedTitle := padRight(title, width-4)
status := string(node.Issue.Status)
switch node.Issue.Status {
case types.StatusOpen:
statusIcon = "○"
titleStr = padRight(title, width-4)
case types.StatusInProgress:
statusIcon = "◐"
titleStr = ui.RenderWarn(padRight(title, width-4))
case types.StatusBlocked:
statusIcon = "●"
titleStr = ui.RenderFail(padRight(title, width-4))
case types.StatusDeferred:
statusIcon = "❄"
titleStr = ui.RenderAccent(padRight(title, width-4))
case types.StatusClosed:
statusIcon = "✓"
titleStr = ui.RenderPass(padRight(title, width-4))
default:
statusIcon = "?"
titleStr = padRight(title, width-4)
// Use shared status icon and style from ui package
statusIcon := ui.RenderStatusIcon(status)
style := ui.GetStatusStyle(status)
// Apply style to title for actionable statuses
var titleStr string
if node.Issue.Status == types.StatusOpen {
titleStr = paddedTitle // no color for open - available but not urgent
} else {
titleStr = style.Render(paddedTitle)
}
id := node.Issue.ID
// Build dependency info string
var depInfo string
// Build dependency info string - only show if meaningful counts exist
// Note: we build the plain text version first for padding, then apply colors
var depInfoPlain string
var depInfoStyled string
if blocksCount > 0 || blockedByCount > 0 {
parts := []string{}
plainParts := []string{}
styledParts := []string{}
if blocksCount > 0 {
parts = append(parts, fmt.Sprintf("blocks:%d", blocksCount))
plainText := fmt.Sprintf("blocks:%d", blocksCount)
plainParts = append(plainParts, plainText)
// Use semantic color for blocks indicator - attention-grabbing
styledParts = append(styledParts, ui.StatusBlockedStyle.Render(plainText))
}
if blockedByCount > 0 {
parts = append(parts, fmt.Sprintf("needs:%d", blockedByCount))
plainText := fmt.Sprintf("needs:%d", blockedByCount)
plainParts = append(plainParts, plainText)
// Use muted color for needs indicator - informational
styledParts = append(styledParts, ui.MutedStyle.Render(plainText))
}
depInfo = strings.Join(parts, " ")
depInfoPlain = strings.Join(plainParts, " ")
depInfoStyled = strings.Join(styledParts, " ")
}
// Build the box
@@ -509,8 +858,13 @@ func renderNodeBoxWithDeps(node *GraphNode, width int, blocksCount int, blockedB
idLine := fmt.Sprintf(" │ %s │", ui.RenderMuted(padRight(id, width-2)))
var result string
if depInfo != "" {
depLine := fmt.Sprintf(" │ %s │", ui.RenderAccent(padRight(depInfo, width-2)))
if depInfoPlain != "" {
// Pad based on plain text length, then render with styled version
padding := width - 2 - len([]rune(depInfoPlain))
if padding < 0 {
padding = 0
}
depLine := fmt.Sprintf(" │ %s%s │", depInfoStyled, strings.Repeat(" ", padding))
bottom := " └" + strings.Repeat("─", width) + "┘"
result = topBottom + "\n" + middle + "\n" + idLine + "\n" + depLine + "\n" + bottom
} else {

View File

@@ -466,21 +466,26 @@ NOTE: Import requires direct database access and does not work with daemon mode.
}
refCounts := countReferences(allIssues)
structuralScores := countStructuralRelationships(duplicateGroups)
fmt.Fprintf(os.Stderr, "Found %d duplicate group(s)\n\n", len(duplicateGroups))
for i, group := range duplicateGroups {
target := chooseMergeTarget(group, refCounts)
target := chooseMergeTarget(group, refCounts, structuralScores)
fmt.Fprintf(os.Stderr, "Group %d: %s\n", i+1, group[0].Title)
for _, issue := range group {
refs := refCounts[issue.ID]
depCount := 0
if score, ok := structuralScores[issue.ID]; ok {
depCount = score.dependentCount
}
marker := " "
if issue.ID == target.ID {
marker = "→ "
}
fmt.Fprintf(os.Stderr, " %s%s (%s, P%d, %d refs)\n",
marker, issue.ID, issue.Status, issue.Priority, refs)
fmt.Fprintf(os.Stderr, " %s%s (%s, P%d, %d dependents, %d refs)\n",
marker, issue.ID, issue.Status, issue.Priority, depCount, refs)
}
sources := make([]string, 0, len(group)-1)

View File

@@ -296,6 +296,42 @@ type VersionChange struct {
// versionChanges contains agent-actionable changes for recent versions
var versionChanges = []VersionChange{
{
Version: "0.47.1",
Date: "2026-01-12",
Changes: []string{
"NEW: bd list --ready flag - Show only issues with no blockers (bd-ihu31)",
"NEW: Markdown rendering in comments - Enhanced display for notes (#1019)",
"FIX: Nil pointer in wisp create - Prevent panic in molecule creation",
"FIX: Route prefix for rig issues - Use correct prefix when creating (#1028)",
"FIX: Duplicate merge target - Prefer issues with children/deps (GH#1022)",
"FIX: SQLite cache rebuild after rename-prefix (GH#1016)",
"FIX: MCP custom types - Support non-built-in types/statuses (#1023)",
"FIX: Hyphenated prefix validation - Support hyphens in prefixes (#1013)",
"FIX: Git worktree initialization - Prevent bd init in worktrees (#1026)",
},
},
{
Version: "0.47.0",
Date: "2026-01-11",
Changes: []string{
"NEW: Pull-first sync with 3-way merge - Reconciles local/remote before push (#918)",
"NEW: bd resolve-conflicts command - Mechanical JSONL conflict resolution (bd-7e7ddffa)",
"NEW: bd create --dry-run - Preview issue creation without side effects (bd-0hi7)",
"NEW: bd ready --gated - Find molecules waiting on gates (bd-lhalq)",
"NEW: Gate auto-discovery - Auto-discover workflow run ID in bd gate check (bd-fbkd)",
"NEW: Multi-repo custom types - bd doctor discovers types across repos (bd-62g22)",
"NEW: Stale DB handling - Read-only commands auto-import on stale DB (#977, #982)",
"NEW: Linear project filter - linear.project_id config for sync (#938)",
"FIX: Windows infinite loop in findLocalBeadsDir (GH#996)",
"FIX: bd init hangs on Windows when not in git repo (#991)",
"FIX: Daemon socket for deep paths - Long workspace paths now work (GH#1001)",
"FIX: Prevent closing issues with open blockers (GH#962)",
"FIX: bd edit parses EDITOR with args (GH#987)",
"FIX: Worktree/redirect handling - Skip restore when redirected (bd-lmqhe)",
"CHANGE: Daemon CLI refactored to subcommands (#1006)",
},
},
{
Version: "0.46.0",
Date: "2026-01-06",

View File

@@ -147,20 +147,31 @@ With --stealth: configures per-repository git settings for invisible beads usage
if isGitRepo() {
isWorktree = git.IsWorktree()
}
var beadsDir string
// Prevent initialization from within a worktree
if isWorktree {
// For worktrees, .beads should be in the main repository root
mainRepoRoot, err := git.GetMainRepoRoot()
if err != nil {
fmt.Fprintf(os.Stderr, "Error: failed to get main repository root: %v\n", err)
os.Exit(1)
}
beadsDir = filepath.Join(mainRepoRoot, ".beads")
} else {
// For regular repos, use current directory
beadsDir = filepath.Join(cwd, ".beads")
fmt.Fprintf(os.Stderr, "Error: cannot run 'bd init' from within a git worktree\n\n")
fmt.Fprintf(os.Stderr, "Git worktrees share the .beads database from the main repository.\n")
fmt.Fprintf(os.Stderr, "To fix this:\n\n")
fmt.Fprintf(os.Stderr, " 1. Initialize beads in the main repository:\n")
fmt.Fprintf(os.Stderr, " cd %s\n", mainRepoRoot)
fmt.Fprintf(os.Stderr, " bd init\n\n")
fmt.Fprintf(os.Stderr, " 2. Then create worktrees with beads support:\n")
fmt.Fprintf(os.Stderr, " bd worktree create <path> --branch <branch-name>\n\n")
fmt.Fprintf(os.Stderr, "For more information, see: https://github.com/steveyegge/beads/blob/main/docs/WORKTREES.md\n")
os.Exit(1)
}
var beadsDir string
// For regular repos, use current directory
beadsDir = filepath.Join(cwd, ".beads")
// Prevent nested .beads directories
// Check if current working directory is inside a .beads directory
if strings.Contains(filepath.Clean(cwd), string(filepath.Separator)+".beads"+string(filepath.Separator)) ||

View File

@@ -10,6 +10,7 @@ import (
"github.com/steveyegge/beads/internal/config"
"github.com/steveyegge/beads/internal/storage"
"github.com/steveyegge/beads/internal/syncbranch"
"github.com/steveyegge/beads/internal/ui"
)
@@ -71,8 +72,8 @@ func runTeamWizard(ctx context.Context, store storage.Storage) error {
fmt.Printf("\n%s Sync branch set to: %s\n", ui.RenderPass("✓"), syncBranch)
// Set sync.branch config
if err := store.SetConfig(ctx, "sync.branch", syncBranch); err != nil {
// Set sync.branch config (GH#923: use syncbranch.Set for validation)
if err := syncbranch.Set(ctx, store, syncBranch); err != nil {
return fmt.Errorf("failed to set sync branch: %w", err)
}

View File

@@ -466,16 +466,27 @@ func TestInitNoDbMode(t *testing.T) {
// Reset global state
origDBPath := dbPath
origNoDb := noDb
defer func() {
defer func() {
dbPath = origDBPath
noDb = origNoDb
}()
dbPath = ""
noDb = false
tmpDir := t.TempDir()
t.Chdir(tmpDir)
// Set BEADS_DIR to prevent git repo detection from finding project's .beads
origBeadsDir := os.Getenv("BEADS_DIR")
os.Setenv("BEADS_DIR", filepath.Join(tmpDir, ".beads"))
defer func() {
if origBeadsDir != "" {
os.Setenv("BEADS_DIR", origBeadsDir)
} else {
os.Unsetenv("BEADS_DIR")
}
}()
// Initialize with --no-db flag
rootCmd.SetArgs([]string{"init", "--no-db", "--no-daemon", "--prefix", "test", "--quiet"})

View File

@@ -20,6 +20,7 @@ import (
"github.com/steveyegge/beads/internal/config"
"github.com/steveyegge/beads/internal/rpc"
"github.com/steveyegge/beads/internal/storage"
"github.com/steveyegge/beads/internal/storage/sqlite"
"github.com/steveyegge/beads/internal/timeparsing"
"github.com/steveyegge/beads/internal/types"
"github.com/steveyegge/beads/internal/ui"
@@ -42,76 +43,131 @@ func pinIndicator(issue *types.Issue) string {
return ""
}
// Priority symbols for pretty output (GH#654)
var prioritySymbols = map[int]string{
0: "🔴", // P0 - Critical
1: "🟠", // P1 - High
2: "🟡", // P2 - Medium (default)
3: "🔵", // P3 - Low
4: "⚪", // P4 - Lowest
// Priority tags for pretty output - simple text, semantic colors applied via ui package
// Design principle: only P0/P1 get color for attention, P2-P4 are neutral
func renderPriorityTag(priority int) string {
return ui.RenderPriority(priority)
}
// Status symbols for pretty output (GH#654)
var statusSymbols = map[types.Status]string{
"open": "○",
"in_progress": "◐",
"blocked": "⊗",
"deferred": "◇",
"closed": "●",
// renderStatusIcon returns the status icon with semantic coloring applied
// Delegates to the shared ui.RenderStatusIcon for consistency across commands
func renderStatusIcon(status types.Status) string {
return ui.RenderStatusIcon(string(status))
}
// formatPrettyIssue formats a single issue for pretty output
// Uses semantic colors: status icon colored, priority P0/P1 colored, rest neutral
func formatPrettyIssue(issue *types.Issue) string {
prioritySym := prioritySymbols[issue.Priority]
if prioritySym == "" {
prioritySym = "⚪"
}
statusSym := statusSymbols[issue.Status]
if statusSym == "" {
statusSym = "○"
}
// Use shared helpers from ui package
statusIcon := ui.RenderStatusIcon(string(issue.Status))
priorityTag := renderPriorityTag(issue.Priority)
// Type badge - only show for notable types
typeBadge := ""
switch issue.IssueType {
case "epic":
typeBadge = "[EPIC] "
case "feature":
typeBadge = "[FEAT] "
typeBadge = ui.TypeEpicStyle.Render("[epic]") + " "
case "bug":
typeBadge = "[BUG] "
typeBadge = ui.TypeBugStyle.Render("[bug]") + " "
}
return fmt.Sprintf("%s %s %s - %s%s", statusSym, prioritySym, issue.ID, typeBadge, issue.Title)
// Format: STATUS_ICON ID PRIORITY [Type] Title
// Priority uses ● icon with color, no brackets needed
// Closed issues: entire line is muted
if issue.Status == types.StatusClosed {
return fmt.Sprintf("%s %s %s %s%s",
statusIcon,
ui.RenderMuted(issue.ID),
ui.RenderMuted(fmt.Sprintf("● P%d", issue.Priority)),
ui.RenderMuted(string(issue.IssueType)),
ui.RenderMuted(" "+issue.Title))
}
return fmt.Sprintf("%s %s %s %s%s", statusIcon, issue.ID, priorityTag, typeBadge, issue.Title)
}
// buildIssueTree builds parent-child tree structure from issues
// Uses actual parent-child dependencies from the database when store is provided
func buildIssueTree(issues []*types.Issue) (roots []*types.Issue, childrenMap map[string][]*types.Issue) {
return buildIssueTreeWithDeps(issues, nil)
}
// buildIssueTreeWithDeps builds parent-child tree using dependency records
// If allDeps is nil, falls back to dotted ID hierarchy (e.g., "parent.1")
// Treats any dependency on an epic as a parent-child relationship
func buildIssueTreeWithDeps(issues []*types.Issue, allDeps map[string][]*types.Dependency) (roots []*types.Issue, childrenMap map[string][]*types.Issue) {
issueMap := make(map[string]*types.Issue)
childrenMap = make(map[string][]*types.Issue)
isChild := make(map[string]bool)
// Build issue map and identify epics
epicIDs := make(map[string]bool)
for _, issue := range issues {
issueMap[issue.ID] = issue
if issue.IssueType == "epic" {
epicIDs[issue.ID] = true
}
}
// If we have dependency records, use them to find parent-child relationships
if allDeps != nil {
for issueID, deps := range allDeps {
for _, dep := range deps {
parentID := dep.DependsOnID
// Only include if both parent and child are in the issue set
child, childOk := issueMap[issueID]
_, parentOk := issueMap[parentID]
if !childOk || !parentOk {
continue
}
// Treat as parent-child if:
// 1. Explicit parent-child dependency type, OR
// 2. Any dependency where the target is an epic
if dep.Type == types.DepParentChild || epicIDs[parentID] {
childrenMap[parentID] = append(childrenMap[parentID], child)
isChild[issueID] = true
}
}
}
}
// Fallback: check for hierarchical subtask IDs (e.g., "parent.1")
for _, issue := range issues {
// Check if this is a hierarchical subtask (e.g., "parent.1")
if isChild[issue.ID] {
continue // Already a child via dependency
}
if strings.Contains(issue.ID, ".") {
parts := strings.Split(issue.ID, ".")
parentID := strings.Join(parts[:len(parts)-1], ".")
if _, exists := issueMap[parentID]; exists {
childrenMap[parentID] = append(childrenMap[parentID], issue)
isChild[issue.ID] = true
continue
}
}
roots = append(roots, issue)
}
// Roots are issues that aren't children of any other issue
for _, issue := range issues {
if !isChild[issue.ID] {
roots = append(roots, issue)
}
}
return roots, childrenMap
}
// printPrettyTree recursively prints the issue tree
// Children are sorted by priority (P0 first) for intuitive reading
func printPrettyTree(childrenMap map[string][]*types.Issue, parentID string, prefix string) {
children := childrenMap[parentID]
// Sort children by priority (ascending: P0 before P1 before P2...)
slices.SortFunc(children, func(a, b *types.Issue) int {
return cmp.Compare(a.Priority, b.Priority)
})
for i, child := range children {
isLast := i == len(children)-1
connector := "├── "
@@ -129,7 +185,13 @@ func printPrettyTree(childrenMap map[string][]*types.Issue, parentID string, pre
}
// displayPrettyList displays issues in pretty tree format (GH#654)
// Uses buildIssueTree which only supports dotted ID hierarchy
func displayPrettyList(issues []*types.Issue, showHeader bool) {
displayPrettyListWithDeps(issues, showHeader, nil)
}
// displayPrettyListWithDeps displays issues in tree format using dependency data
func displayPrettyListWithDeps(issues []*types.Issue, showHeader bool, allDeps map[string][]*types.Dependency) {
if showHeader {
// Clear screen and show header
fmt.Print("\033[2J\033[H")
@@ -144,14 +206,11 @@ func displayPrettyList(issues []*types.Issue, showHeader bool) {
return
}
roots, childrenMap := buildIssueTree(issues)
roots, childrenMap := buildIssueTreeWithDeps(issues, allDeps)
for i, issue := range roots {
for _, issue := range roots {
fmt.Println(formatPrettyIssue(issue))
printPrettyTree(childrenMap, issue.ID, "")
if i < len(roots)-1 {
fmt.Println()
}
}
// Summary
@@ -169,7 +228,7 @@ func displayPrettyList(issues []*types.Issue, showHeader bool) {
}
fmt.Printf("Total: %d issues (%d open, %d in progress)\n", len(issues), openCount, inProgressCount)
fmt.Println()
fmt.Println("Legend: ○ open | ◐ in progress | ⊗ blocked | 🔴 P0 | 🟠 P1 | 🟡 P2 | 🔵 P3 | ⚪ P4")
fmt.Println("Status: ○ open ◐ in_progress blocked ✓ closed ❄ deferred")
}
// watchIssues starts watching for changes and re-displays (GH#654)
@@ -330,6 +389,8 @@ func formatAgentIssue(buf *strings.Builder, issue *types.Issue) {
}
// formatIssueCompact formats a single issue in compact format to a buffer
// Uses status icons for better scanability - consistent with bd graph
// Format: [icon] [pin] ID [Priority] [Type] @assignee [labels] - Title
func formatIssueCompact(buf *strings.Builder, issue *types.Issue, labels []string) {
labelsStr := ""
if len(labels) > 0 {
@@ -339,20 +400,25 @@ func formatIssueCompact(buf *strings.Builder, issue *types.Issue, labels []strin
if issue.Assignee != "" {
assigneeStr = fmt.Sprintf(" @%s", issue.Assignee)
}
status := string(issue.Status)
if status == "closed" {
line := fmt.Sprintf("%s%s [P%d] [%s] %s%s%s - %s",
pinIndicator(issue), issue.ID, issue.Priority,
issue.IssueType, status, assigneeStr, labelsStr, issue.Title)
// Get styled status icon
statusIcon := renderStatusIcon(issue.Status)
if issue.Status == types.StatusClosed {
// Closed issues: entire line muted (fades visually)
line := fmt.Sprintf("%s %s%s [P%d] [%s]%s%s - %s",
statusIcon, pinIndicator(issue), issue.ID, issue.Priority,
issue.IssueType, assigneeStr, labelsStr, issue.Title)
buf.WriteString(ui.RenderClosedLine(line))
buf.WriteString("\n")
} else {
buf.WriteString(fmt.Sprintf("%s%s [%s] [%s] %s%s%s - %s\n",
// Active issues: status icon + semantic colors for priority/type
buf.WriteString(fmt.Sprintf("%s %s%s [%s] [%s]%s%s - %s\n",
statusIcon,
pinIndicator(issue),
ui.RenderID(issue.ID),
ui.RenderPriority(issue.Priority),
ui.RenderType(string(issue.IssueType)),
ui.RenderStatus(status),
assigneeStr, labelsStr, issue.Title))
}
}
@@ -437,11 +503,16 @@ var listCmd = &cobra.Command{
// Pretty and watch flags (GH#654)
prettyFormat, _ := cmd.Flags().GetBool("pretty")
treeFormat, _ := cmd.Flags().GetBool("tree")
prettyFormat = prettyFormat || treeFormat // --tree is alias for --pretty
watchMode, _ := cmd.Flags().GetBool("watch")
// Pager control (bd-jdz3)
noPager, _ := cmd.Flags().GetBool("no-pager")
// Ready filter (bd-ihu31)
readyFlag, _ := cmd.Flags().GetBool("ready")
// Watch mode implies pretty format
if watchMode {
prettyFormat = true
@@ -473,13 +544,18 @@ var listCmd = &cobra.Command{
filter := types.IssueFilter{
Limit: effectiveLimit,
}
if status != "" && status != "all" {
// --ready flag: show only open issues (excludes hooked/in_progress/blocked/deferred) (bd-ihu31)
if readyFlag {
s := types.StatusOpen
filter.Status = &s
} else if status != "" && status != "all" {
s := types.Status(status)
filter.Status = &s
}
// Default to non-closed issues unless --all or explicit --status (GH#788)
if status == "" && !allFlag {
if status == "" && !allFlag && !readyFlag {
filter.ExcludeStatus = []types.Status{types.StatusClosed}
}
// Use Changed() to properly handle P0 (priority=0)
@@ -693,8 +769,13 @@ var listCmd = &cobra.Command{
// If daemon is running, use RPC
if daemonClient != nil {
// Determine effective status for RPC (--ready overrides to "open")
effectiveStatus := status
if readyFlag {
effectiveStatus = "open"
}
listArgs := &rpc.ListArgs{
Status: status,
Status: effectiveStatus,
IssueType: issueType,
Assignee: assignee,
Limit: effectiveLimit,
@@ -795,6 +876,9 @@ var listCmd = &cobra.Command{
}
listArgs.Overdue = filter.Overdue
// Pass through --allow-stale flag for resilient queries (bd-dpkdm)
listArgs.AllowStale = allowStale
resp, err := daemonClient.List(listArgs)
if err != nil {
fmt.Fprintf(os.Stderr, "Error: %v\n", err)
@@ -824,6 +908,33 @@ var listCmd = &cobra.Command{
// Apply sorting
sortIssues(issues, sortBy, reverse)
// Handle watch mode (GH#654)
if watchMode {
watchIssues(ctx, store, filter, sortBy, reverse)
return
}
// Handle pretty/tree format (GH#654)
if prettyFormat {
// Load dependencies for tree structure
// In daemon mode, open a read-only store to get dependencies
var allDeps map[string][]*types.Dependency
if store != nil {
allDeps, _ = store.GetAllDependencyRecords(ctx)
} else if dbPath != "" {
// Daemon mode: open read-only connection for tree deps
if roStore, err := sqlite.NewReadOnlyWithTimeout(ctx, dbPath, lockTimeout); err == nil {
allDeps, _ = roStore.GetAllDependencyRecords(ctx)
_ = roStore.Close()
}
}
displayPrettyListWithDeps(issues, false, allDeps)
if effectiveLimit > 0 && len(issues) == effectiveLimit {
fmt.Fprintf(os.Stderr, "\nShowing %d issues (use --limit 0 for all)\n", effectiveLimit)
}
return
}
// Build output in buffer for pager support (bd-jdz3)
var buf strings.Builder
if ui.IsAgentMode() {
@@ -891,7 +1002,9 @@ var listCmd = &cobra.Command{
// Handle pretty format (GH#654)
if prettyFormat {
displayPrettyList(issues, false)
// Load dependencies for tree structure
allDeps, _ := store.GetAllDependencyRecords(ctx)
displayPrettyListWithDeps(issues, false, allDeps)
// Show truncation hint if we hit the limit (GH#788)
if effectiveLimit > 0 && len(issues) == effectiveLimit {
fmt.Fprintf(os.Stderr, "\nShowing %d issues (use --limit 0 for all)\n", effectiveLimit)
@@ -1055,11 +1168,15 @@ func init() {
// Pretty and watch flags (GH#654)
listCmd.Flags().Bool("pretty", false, "Display issues in a tree format with status/priority symbols")
listCmd.Flags().Bool("tree", false, "Alias for --pretty: hierarchical tree format")
listCmd.Flags().BoolP("watch", "w", false, "Watch for changes and auto-update display (implies --pretty)")
// Pager control (bd-jdz3)
listCmd.Flags().Bool("no-pager", false, "Disable pager output")
// Ready filter: show only issues ready to be worked on (bd-ihu31)
listCmd.Flags().Bool("ready", false, "Show only ready issues (status=open, excludes hooked/in_progress/blocked/deferred)")
// Note: --json flag is defined as a persistent flag in main.go, not here
rootCmd.AddCommand(listCmd)
}

View File

@@ -57,8 +57,8 @@ func TestListFormatPrettyIssue_BadgesAndDefaults(t *testing.T) {
if !strings.Contains(out, "bd-1") || !strings.Contains(out, "Hello") {
t.Fatalf("unexpected output: %q", out)
}
if !strings.Contains(out, "[BUG]") {
t.Fatalf("expected BUG badge: %q", out)
if !strings.Contains(out, "[bug]") {
t.Fatalf("expected bug badge: %q", out)
}
}

View File

@@ -4,11 +4,13 @@ import (
"context"
"fmt"
"os"
"os/exec"
"os/signal"
"path/filepath"
"runtime/pprof"
"runtime/trace"
"slices"
"strings"
"sync"
"syscall"
"time"
@@ -106,6 +108,62 @@ func isReadOnlyCommand(cmdName string) bool {
return readOnlyCommands[cmdName]
}
// getActorWithGit returns the actor for audit trails with git config fallback.
// Priority: --actor flag > BD_ACTOR env > BEADS_ACTOR env > git config user.name > $USER > "unknown"
// This provides a sensible default for developers: their git identity is used unless
// explicitly overridden
func getActorWithGit() string {
// If actor is already set (from --actor flag), use it
if actor != "" {
return actor
}
// Check BD_ACTOR env var (primary env override)
if bdActor := os.Getenv("BD_ACTOR"); bdActor != "" {
return bdActor
}
// Check BEADS_ACTOR env var (alias for MCP/integration compatibility)
if beadsActor := os.Getenv("BEADS_ACTOR"); beadsActor != "" {
return beadsActor
}
// Try git config user.name - the natural default for a git-native tool
if out, err := exec.Command("git", "config", "user.name").Output(); err == nil {
if gitUser := strings.TrimSpace(string(out)); gitUser != "" {
return gitUser
}
}
// Fall back to system username
if user := os.Getenv("USER"); user != "" {
return user
}
return "unknown"
}
// getOwner returns the human owner for CV attribution.
// Priority: GIT_AUTHOR_EMAIL env > git config user.email > "" (empty)
// This is the foundation for HOP CV (curriculum vitae) chains per Decision 008.
// Unlike actor (which tracks who executed), owner tracks the human responsible.
func getOwner() string {
// Check GIT_AUTHOR_EMAIL first - this is set during git commit operations
if authorEmail := os.Getenv("GIT_AUTHOR_EMAIL"); authorEmail != "" {
return authorEmail
}
// Fall back to git config user.email - the natural default
if out, err := exec.Command("git", "config", "user.email").Output(); err == nil {
if gitEmail := strings.TrimSpace(string(out)); gitEmail != "" {
return gitEmail
}
}
// Return empty if no email found (owner is optional)
return ""
}
func init() {
// Initialize viper configuration
if err := config.Initialize(); err != nil {
@@ -120,7 +178,7 @@ func init() {
// Register persistent flags
rootCmd.PersistentFlags().StringVar(&dbPath, "db", "", "Database path (default: auto-discover .beads/*.db)")
rootCmd.PersistentFlags().StringVar(&actor, "actor", "", "Actor name for audit trail (default: $BD_ACTOR or $USER)")
rootCmd.PersistentFlags().StringVar(&actor, "actor", "", "Actor name for audit trail (default: $BD_ACTOR, git user.name, $USER)")
rootCmd.PersistentFlags().BoolVar(&jsonOutput, "json", false, "Output in JSON format")
rootCmd.PersistentFlags().BoolVar(&noDaemon, "no-daemon", false, "Force direct storage mode, bypass daemon if running")
rootCmd.PersistentFlags().BoolVar(&noAutoFlush, "no-auto-flush", false, "Disable automatic JSONL sync after CRUD operations")
@@ -377,15 +435,7 @@ var rootCmd = &cobra.Command{
}
// Set actor for audit trail
if actor == "" {
if bdActor := os.Getenv("BD_ACTOR"); bdActor != "" {
actor = bdActor
} else if user := os.Getenv("USER"); user != "" {
actor = user
} else {
actor = "unknown"
}
}
actor = getActorWithGit()
// Skip daemon and SQLite initialization - we're in memory mode
return
@@ -419,15 +469,7 @@ var rootCmd = &cobra.Command{
os.Exit(1)
}
// Set actor for audit trail
if actor == "" {
if bdActor := os.Getenv("BD_ACTOR"); bdActor != "" {
actor = bdActor
} else if user := os.Getenv("USER"); user != "" {
actor = user
} else {
actor = "unknown"
}
}
actor = getActorWithGit()
return
}
}
@@ -442,7 +484,19 @@ var rootCmd = &cobra.Command{
isYamlOnlyConfigOp = true
}
}
if cmd.Name() != "import" && cmd.Name() != "setup" && !isYamlOnlyConfigOp {
// Allow read-only commands to auto-bootstrap from JSONL (GH#b09)
// This enables `bd --no-daemon show` after cold-start when DB is missing
canAutoBootstrap := false
if isReadOnlyCommand(cmd.Name()) && beadsDir != "" {
jsonlPath := filepath.Join(beadsDir, "issues.jsonl")
if _, err := os.Stat(jsonlPath); err == nil {
canAutoBootstrap = true
debug.Logf("cold-start bootstrap: JSONL exists, allowing auto-create for %s", cmd.Name())
}
}
if cmd.Name() != "import" && cmd.Name() != "setup" && !isYamlOnlyConfigOp && !canAutoBootstrap {
// No database found - provide context-aware error message
fmt.Fprintf(os.Stderr, "Error: no beads database found\n")
@@ -476,16 +530,7 @@ var rootCmd = &cobra.Command{
}
// Set actor for audit trail
// Priority: --actor flag > BD_ACTOR env > USER env > "unknown"
if actor == "" {
if bdActor := os.Getenv("BD_ACTOR"); bdActor != "" {
actor = bdActor
} else if user := os.Getenv("USER"); user != "" {
actor = user
} else {
actor = "unknown"
}
}
actor = getActorWithGit()
// Track bd version changes
// Best-effort tracking - failures are silent
@@ -707,6 +752,7 @@ var rootCmd = &cobra.Command{
// Fall back to direct storage access
var err error
var needsBootstrap bool // Track if DB needs initial import (GH#b09)
if useReadOnly {
// Read-only mode: prevents file modifications (GH#804)
store, err = sqlite.NewReadOnlyWithTimeout(rootCtx, dbPath, lockTimeout)
@@ -715,6 +761,7 @@ var rootCmd = &cobra.Command{
// This handles the case where user runs "bd list" before "bd init"
debug.Logf("read-only open failed, falling back to read-write: %v", err)
store, err = sqlite.NewWithTimeout(rootCtx, dbPath, lockTimeout)
needsBootstrap = true // New DB needs auto-import (GH#b09)
}
} else {
store, err = sqlite.NewWithTimeout(rootCtx, dbPath, lockTimeout)
@@ -760,7 +807,9 @@ var rootCmd = &cobra.Command{
// Skip for delete command to prevent resurrection of deleted issues
// Skip if sync --dry-run to avoid modifying DB in dry-run mode
// Skip for read-only commands - they can't write anyway (GH#804)
if cmd.Name() != "import" && cmd.Name() != "delete" && autoImportEnabled && !useReadOnly {
// Exception: allow auto-import for read-only commands that fell back to
// read-write mode due to missing DB (needsBootstrap) - fixes GH#b09
if cmd.Name() != "import" && cmd.Name() != "delete" && autoImportEnabled && (!useReadOnly || needsBootstrap) {
// Check if this is sync command with --dry-run flag
if cmd.Name() == "sync" {
if dryRun, _ := cmd.Flags().GetBool("dry-run"); dryRun {

View File

@@ -64,17 +64,8 @@ func signalOrchestratorActivity() {
// Build command line from os.Args
cmdLine := strings.Join(os.Args, " ")
// Determine actor (use package-level var if set, else fall back to env)
actorName := actor
if actorName == "" {
if bdActor := os.Getenv("BD_ACTOR"); bdActor != "" {
actorName = bdActor
} else if user := os.Getenv("USER"); user != "" {
actorName = user
} else {
actorName = "unknown"
}
}
// Determine actor (uses git config user.name as default)
actorName := getActorWithGit()
// Build activity signal
activity := struct {

View File

@@ -12,6 +12,7 @@ import (
"github.com/steveyegge/beads/internal/beads"
"github.com/steveyegge/beads/internal/configfile"
"github.com/steveyegge/beads/internal/storage/sqlite"
"github.com/steveyegge/beads/internal/syncbranch"
"github.com/steveyegge/beads/internal/types"
"github.com/steveyegge/beads/internal/ui"
"github.com/steveyegge/beads/internal/utils"
@@ -933,8 +934,8 @@ func handleToSeparateBranch(branch string, dryRun bool) {
return
}
// Update sync.branch config
if err := store.SetConfig(ctx, "sync.branch", b); err != nil {
// Update sync.branch config (GH#923: use syncbranch.Set for validation)
if err := syncbranch.Set(ctx, store, b); err != nil {
if jsonOutput {
outputJSON(map[string]interface{}{
"error": "config_update_failed",

View File

@@ -155,12 +155,12 @@ func runMigrateSync(ctx context.Context, branchName string, dryRun, force bool)
fmt.Printf("→ Would create new branch '%s'\n", branchName)
}
// Use worktree-aware git directory detection
gitDir, err := git.GetGitDir()
// Use git-common-dir for worktree path to support bare repos and worktrees (GH#639)
gitCommonDir, err := git.GetGitCommonDir()
if err != nil {
return fmt.Errorf("not a git repository: %w", err)
}
worktreePath := filepath.Join(gitDir, "beads-worktrees", branchName)
worktreePath := filepath.Join(gitCommonDir, "beads-worktrees", branchName)
fmt.Printf("→ Would create worktree at: %s\n", worktreePath)
fmt.Println("\n=== END DRY RUN ===")
@@ -195,12 +195,12 @@ func runMigrateSync(ctx context.Context, branchName string, dryRun, force bool)
}
// Step 2: Create the worktree
// Use worktree-aware git directory detection
gitDir, err := git.GetGitDir()
// Use git-common-dir for worktree path to support bare repos and worktrees (GH#639)
gitCommonDir, err := git.GetGitCommonDir()
if err != nil {
return fmt.Errorf("not a git repository: %w", err)
}
worktreePath := filepath.Join(gitDir, "beads-worktrees", branchName)
worktreePath := filepath.Join(gitCommonDir, "beads-worktrees", branchName)
fmt.Printf("→ Creating worktree at %s...\n", worktreePath)
wtMgr := git.NewWorktreeManager(repoRoot)

View File

@@ -12,7 +12,7 @@ import (
)
var molBurnCmd = &cobra.Command{
Use: "burn <molecule-id>",
Use: "burn <molecule-id> [molecule-id...]",
Short: "Delete a molecule without creating a digest",
Long: `Burn a molecule, deleting it without creating a digest.
@@ -32,8 +32,9 @@ permanently lost. If you want to preserve a summary, use 'bd mol squash'.
Example:
bd mol burn bd-abc123 # Delete molecule with no trace
bd mol burn bd-abc123 --dry-run # Preview what would be deleted
bd mol burn bd-abc123 --force # Skip confirmation`,
Args: cobra.ExactArgs(1),
bd mol burn bd-abc123 --force # Skip confirmation
bd mol burn bd-a1 bd-b2 bd-c3 # Batch delete multiple wisps`,
Args: cobra.MinimumNArgs(1),
Run: runMolBurn,
}
@@ -44,6 +45,13 @@ type BurnResult struct {
DeletedCount int `json:"deleted_count"`
}
// BatchBurnResult holds aggregated results when burning multiple molecules
type BatchBurnResult struct {
Results []BurnResult `json:"results"`
TotalDeleted int `json:"total_deleted"`
FailedCount int `json:"failed_count"`
}
func runMolBurn(cmd *cobra.Command, args []string) {
CheckReadonly("mol burn")
@@ -59,8 +67,18 @@ func runMolBurn(cmd *cobra.Command, args []string) {
dryRun, _ := cmd.Flags().GetBool("dry-run")
force, _ := cmd.Flags().GetBool("force")
moleculeID := args[0]
// Single ID: use original logic for backward compatibility
if len(args) == 1 {
burnSingleMolecule(ctx, args[0], dryRun, force)
return
}
// Multiple IDs: batch mode for efficiency
burnMultipleMolecules(ctx, args, dryRun, force)
}
// burnSingleMolecule handles the single molecule case (original behavior)
func burnSingleMolecule(ctx context.Context, moleculeID string, dryRun, force bool) {
// Resolve molecule ID in main store
resolvedID, err := utils.ResolvePartialID(ctx, store, moleculeID)
if err != nil {
@@ -85,6 +103,145 @@ func runMolBurn(cmd *cobra.Command, args []string) {
}
}
// burnMultipleMolecules handles batch deletion of multiple molecules efficiently
func burnMultipleMolecules(ctx context.Context, moleculeIDs []string, dryRun, force bool) {
var wispIDs []string
var persistentIDs []string
var failedResolve []string
// First pass: resolve and categorize all IDs
for _, moleculeID := range moleculeIDs {
resolvedID, err := utils.ResolvePartialID(ctx, store, moleculeID)
if err != nil {
if !jsonOutput {
fmt.Fprintf(os.Stderr, "Warning: failed to resolve %s: %v\n", moleculeID, err)
}
failedResolve = append(failedResolve, moleculeID)
continue
}
issue, err := store.GetIssue(ctx, resolvedID)
if err != nil {
if !jsonOutput {
fmt.Fprintf(os.Stderr, "Warning: failed to load %s: %v\n", resolvedID, err)
}
failedResolve = append(failedResolve, moleculeID)
continue
}
if issue.Ephemeral {
wispIDs = append(wispIDs, resolvedID)
} else {
persistentIDs = append(persistentIDs, resolvedID)
}
}
if len(wispIDs) == 0 && len(persistentIDs) == 0 {
if jsonOutput {
outputJSON(BatchBurnResult{FailedCount: len(failedResolve)})
} else {
fmt.Println("No valid molecules to burn")
}
return
}
if dryRun {
if !jsonOutput {
fmt.Printf("\nDry run: would burn %d wisp(s) and %d persistent molecule(s)\n", len(wispIDs), len(persistentIDs))
if len(wispIDs) > 0 {
fmt.Printf("\nWisps to delete:\n")
for _, id := range wispIDs {
fmt.Printf(" - %s\n", id)
}
}
if len(persistentIDs) > 0 {
fmt.Printf("\nPersistent molecules to delete (will create tombstones):\n")
for _, id := range persistentIDs {
fmt.Printf(" - %s\n", id)
}
}
if len(failedResolve) > 0 {
fmt.Printf("\nFailed to resolve (%d):\n", len(failedResolve))
for _, id := range failedResolve {
fmt.Printf(" - %s\n", id)
}
}
}
return
}
// Confirm unless --force
if !force && !jsonOutput {
fmt.Printf("About to burn %d wisp(s) and %d persistent molecule(s)\n", len(wispIDs), len(persistentIDs))
fmt.Printf("This will permanently delete all molecule data with no digest.\n")
fmt.Printf("\nContinue? [y/N] ")
var response string
_, _ = fmt.Scanln(&response)
if response != "y" && response != "Y" {
fmt.Println("Canceled.")
return
}
}
batchResult := BatchBurnResult{
Results: make([]BurnResult, 0),
FailedCount: len(failedResolve),
}
// Batch delete all wisps in one call
if len(wispIDs) > 0 {
result, err := burnWisps(ctx, store, wispIDs)
if err != nil {
if !jsonOutput {
fmt.Fprintf(os.Stderr, "Error burning wisps: %v\n", err)
}
} else {
batchResult.TotalDeleted += result.DeletedCount
batchResult.Results = append(batchResult.Results, *result)
}
}
// Handle persistent molecules individually (they need subgraph loading)
for _, id := range persistentIDs {
subgraph, err := loadTemplateSubgraph(ctx, store, id)
if err != nil {
if !jsonOutput {
fmt.Fprintf(os.Stderr, "Warning: failed to load subgraph for %s: %v\n", id, err)
}
batchResult.FailedCount++
continue
}
var issueIDs []string
for _, issue := range subgraph.Issues {
issueIDs = append(issueIDs, issue.ID)
}
// Use deleteBatch for persistent molecules
deleteBatch(nil, issueIDs, true, false, false, false, false, "mol burn")
batchResult.TotalDeleted += len(issueIDs)
batchResult.Results = append(batchResult.Results, BurnResult{
MoleculeID: id,
DeletedIDs: issueIDs,
DeletedCount: len(issueIDs),
})
}
// Schedule auto-flush
markDirtyAndScheduleFlush()
if jsonOutput {
outputJSON(batchResult)
return
}
fmt.Printf("%s Burned %d molecule(s): %d issues deleted\n", ui.RenderPass("✓"), len(wispIDs)+len(persistentIDs), batchResult.TotalDeleted)
if batchResult.FailedCount > 0 {
fmt.Printf(" %d failed\n", batchResult.FailedCount)
}
}
// burnWispMolecule handles wisp deletion (no tombstones, ephemeral-only)
func burnWispMolecule(ctx context.Context, resolvedID string, dryRun, force bool) {
// Load the molecule subgraph

225
cmd/bd/mol_ready_gated.go Normal file
View File

@@ -0,0 +1,225 @@
package main
import (
"context"
"fmt"
"os"
"sort"
"github.com/spf13/cobra"
"github.com/steveyegge/beads/internal/storage"
"github.com/steveyegge/beads/internal/types"
"github.com/steveyegge/beads/internal/ui"
)
// GatedMolecule represents a molecule ready for gate-resume dispatch
type GatedMolecule struct {
MoleculeID string `json:"molecule_id"`
MoleculeTitle string `json:"molecule_title"`
ClosedGate *types.Issue `json:"closed_gate"`
ReadyStep *types.Issue `json:"ready_step"`
}
// GatedReadyOutput is the JSON output for bd mol ready --gated
type GatedReadyOutput struct {
Molecules []*GatedMolecule `json:"molecules"`
Count int `json:"count"`
}
var molReadyGatedCmd = &cobra.Command{
Use: "ready --gated",
Short: "Find molecules ready for gate-resume dispatch",
Long: `Find molecules where a gate has closed and the workflow is ready to resume.
This command discovers molecules waiting at a gate step where:
1. The molecule has a gate bead that blocks a step
2. The gate bead is now closed (condition satisfied)
3. The blocked step is now ready to proceed
4. No agent currently has this molecule hooked
This enables discovery-based resume without explicit waiter tracking.
The Deacon patrol uses this to find and dispatch gate-ready molecules.
Examples:
bd mol ready --gated # Find all gate-ready molecules
bd mol ready --gated --json # JSON output for automation`,
Run: runMolReadyGated,
}
func runMolReadyGated(cmd *cobra.Command, args []string) {
ctx := rootCtx
// mol ready --gated requires direct store access
if store == nil {
if daemonClient != nil {
fmt.Fprintf(os.Stderr, "Error: mol ready --gated requires direct database access\n")
fmt.Fprintf(os.Stderr, "Hint: use --no-daemon flag: bd --no-daemon mol ready --gated\n")
} else {
fmt.Fprintf(os.Stderr, "Error: no database connection\n")
}
os.Exit(1)
}
// Find gate-ready molecules
molecules, err := findGateReadyMolecules(ctx, store)
if err != nil {
fmt.Fprintf(os.Stderr, "Error: %v\n", err)
os.Exit(1)
}
if jsonOutput {
output := GatedReadyOutput{
Molecules: molecules,
Count: len(molecules),
}
if output.Molecules == nil {
output.Molecules = []*GatedMolecule{}
}
outputJSON(output)
return
}
// Human-readable output
if len(molecules) == 0 {
fmt.Printf("\n%s No molecules ready for gate-resume dispatch\n\n", ui.RenderWarn(""))
return
}
fmt.Printf("\n%s Molecules ready for gate-resume dispatch (%d):\n\n",
ui.RenderAccent(""), len(molecules))
for i, mol := range molecules {
fmt.Printf("%d. %s: %s\n", i+1, ui.RenderID(mol.MoleculeID), mol.MoleculeTitle)
if mol.ClosedGate != nil {
fmt.Printf(" Gate closed: %s (%s)\n", mol.ClosedGate.ID, mol.ClosedGate.AwaitType)
}
if mol.ReadyStep != nil {
fmt.Printf(" Ready step: %s - %s\n", mol.ReadyStep.ID, mol.ReadyStep.Title)
}
fmt.Println()
}
fmt.Println("To dispatch a molecule:")
fmt.Println(" gt sling <agent> --mol <molecule-id>")
}
// findGateReadyMolecules finds molecules where a gate has closed and work can resume.
//
// Logic:
// 1. Find all closed gate beads
// 2. For each closed gate, find what step it was blocking
// 3. Check if that step is now ready (unblocked)
// 4. Find the parent molecule
// 5. Filter out molecules that are already hooked by someone
func findGateReadyMolecules(ctx context.Context, s storage.Storage) ([]*GatedMolecule, error) {
// Step 1: Find all closed gate beads
gateType := types.TypeGate
closedStatus := types.StatusClosed
gateFilter := types.IssueFilter{
IssueType: &gateType,
Status: &closedStatus,
Limit: 100,
}
closedGates, err := s.SearchIssues(ctx, "", gateFilter)
if err != nil {
return nil, fmt.Errorf("searching closed gates: %w", err)
}
if len(closedGates) == 0 {
return nil, nil
}
// Step 2: Get ready work to check which steps are ready
readyIssues, err := s.GetReadyWork(ctx, types.WorkFilter{Limit: 500})
if err != nil {
return nil, fmt.Errorf("getting ready work: %w", err)
}
readyIDs := make(map[string]bool)
for _, issue := range readyIssues {
readyIDs[issue.ID] = true
}
// Step 3: Get hooked molecules to filter out
hookedStatus := types.StatusHooked
hookedFilter := types.IssueFilter{
Status: &hookedStatus,
Limit: 100,
}
hookedIssues, err := s.SearchIssues(ctx, "", hookedFilter)
if err != nil {
// Non-fatal: just continue without filtering
hookedIssues = nil
}
hookedMolecules := make(map[string]bool)
for _, issue := range hookedIssues {
// If the hooked issue is a molecule root, mark it
hookedMolecules[issue.ID] = true
// Also find parent molecule for hooked steps
if parentMol := findParentMolecule(ctx, s, issue.ID); parentMol != "" {
hookedMolecules[parentMol] = true
}
}
// Step 4: For each closed gate, find issues that depend on it (were blocked)
moleculeMap := make(map[string]*GatedMolecule)
for _, gate := range closedGates {
// Find issues that depend on this gate (GetDependents returns issues where depends_on_id = gate.ID)
dependents, err := s.GetDependents(ctx, gate.ID)
if err != nil {
continue
}
for _, dependent := range dependents {
// Check if the previously blocked step is now ready
if !readyIDs[dependent.ID] {
continue
}
// Find the parent molecule
moleculeID := findParentMolecule(ctx, s, dependent.ID)
if moleculeID == "" {
continue
}
// Skip if already hooked
if hookedMolecules[moleculeID] {
continue
}
// Get molecule details
moleculeIssue, err := s.GetIssue(ctx, moleculeID)
if err != nil || moleculeIssue == nil {
continue
}
// Add to results (dedupe by molecule ID)
if _, exists := moleculeMap[moleculeID]; !exists {
moleculeMap[moleculeID] = &GatedMolecule{
MoleculeID: moleculeID,
MoleculeTitle: moleculeIssue.Title,
ClosedGate: gate,
ReadyStep: dependent,
}
}
}
}
// Convert to slice and sort
var molecules []*GatedMolecule
for _, mol := range moleculeMap {
molecules = append(molecules, mol)
}
sort.Slice(molecules, func(i, j int) bool {
return molecules[i].MoleculeID < molecules[j].MoleculeID
})
return molecules, nil
}
func init() {
// Note: --gated flag is registered in ready.go
// Also add as a subcommand under mol for discoverability
molCmd.AddCommand(molReadyGatedCmd)
}

View File

@@ -0,0 +1,439 @@
package main
import (
"context"
"fmt"
"os"
"path/filepath"
"testing"
"time"
"github.com/steveyegge/beads/internal/storage/sqlite"
"github.com/steveyegge/beads/internal/types"
)
// setupGatedTestDB creates a temporary file-based test database
func setupGatedTestDB(t *testing.T) (*sqlite.SQLiteStorage, func()) {
t.Helper()
tmpDir, err := os.MkdirTemp("", "bd-test-gated-*")
if err != nil {
t.Fatalf("Failed to create temp dir: %v", err)
}
testDB := filepath.Join(tmpDir, "test.db")
store, err := sqlite.New(context.Background(), testDB)
if err != nil {
os.RemoveAll(tmpDir)
t.Fatalf("Failed to create test database: %v", err)
}
// Set issue_prefix (required for beads)
ctx := context.Background()
if err := store.SetConfig(ctx, "issue_prefix", "test"); err != nil {
store.Close()
os.RemoveAll(tmpDir)
t.Fatalf("Failed to set issue_prefix: %v", err)
}
cleanup := func() {
store.Close()
os.RemoveAll(tmpDir)
}
return store, cleanup
}
// =============================================================================
// mol ready --gated Tests (bd-lhalq: Gate-resume discovery)
// =============================================================================
// TestFindGateReadyMolecules_NoGates tests finding gate-ready molecules when no gates exist
func TestFindGateReadyMolecules_NoGates(t *testing.T) {
ctx := context.Background()
store, cleanup := setupGatedTestDB(t)
defer cleanup()
// Create a regular molecule (no gates)
mol := &types.Issue{
ID: "test-mol-001",
Title: "Test Molecule",
IssueType: types.TypeEpic,
Status: types.StatusInProgress,
CreatedAt: time.Now(),
UpdatedAt: time.Now(),
}
step := &types.Issue{
ID: "test-mol-001.step1",
Title: "Step 1",
IssueType: types.TypeTask,
Status: types.StatusOpen,
CreatedAt: time.Now(),
UpdatedAt: time.Now(),
}
if err := store.CreateIssue(ctx, mol, "test"); err != nil {
t.Fatalf("Failed to create molecule: %v", err)
}
if err := store.CreateIssue(ctx, step, "test"); err != nil {
t.Fatalf("Failed to create step: %v", err)
}
// Add parent-child relationship
if err := store.AddDependency(ctx, &types.Dependency{
IssueID: step.ID,
DependsOnID: mol.ID,
Type: types.DepParentChild,
}, "test"); err != nil {
t.Fatalf("Failed to add dependency: %v", err)
}
// Find gate-ready molecules
molecules, err := findGateReadyMolecules(ctx, store)
if err != nil {
t.Fatalf("findGateReadyMolecules failed: %v", err)
}
if len(molecules) != 0 {
t.Errorf("Expected 0 gate-ready molecules, got %d", len(molecules))
}
}
// TestFindGateReadyMolecules_ClosedGate tests finding molecules with closed gates
func TestFindGateReadyMolecules_ClosedGate(t *testing.T) {
ctx := context.Background()
store, cleanup := setupGatedTestDB(t)
defer cleanup()
// Create molecule structure:
// mol-001
// └── gate-await-ci (closed)
// └── step1 (blocked by gate-await-ci, should become ready)
mol := &types.Issue{
ID: "test-mol-002",
Title: "Test Molecule with Gate",
IssueType: types.TypeEpic,
Status: types.StatusInProgress,
CreatedAt: time.Now(),
UpdatedAt: time.Now(),
}
gate := &types.Issue{
ID: "test-mol-002.gate-await-ci",
Title: "Gate: gh:run ci-workflow",
IssueType: types.TypeGate,
Status: types.StatusClosed, // Gate has closed
AwaitType: "gh:run",
AwaitID: "ci-workflow",
CreatedAt: time.Now(),
UpdatedAt: time.Now(),
}
step := &types.Issue{
ID: "test-mol-002.step1",
Title: "Deploy after CI",
IssueType: types.TypeTask,
Status: types.StatusOpen,
CreatedAt: time.Now(),
UpdatedAt: time.Now(),
}
if err := store.CreateIssue(ctx, mol, "test"); err != nil {
t.Fatalf("Failed to create molecule: %v", err)
}
if err := store.CreateIssue(ctx, gate, "test"); err != nil {
t.Fatalf("Failed to create gate: %v", err)
}
if err := store.CreateIssue(ctx, step, "test"); err != nil {
t.Fatalf("Failed to create step: %v", err)
}
// Add parent-child relationships
if err := store.AddDependency(ctx, &types.Dependency{
IssueID: gate.ID,
DependsOnID: mol.ID,
Type: types.DepParentChild,
}, "test"); err != nil {
t.Fatalf("Failed to add gate parent-child: %v", err)
}
if err := store.AddDependency(ctx, &types.Dependency{
IssueID: step.ID,
DependsOnID: mol.ID,
Type: types.DepParentChild,
}, "test"); err != nil {
t.Fatalf("Failed to add step parent-child: %v", err)
}
// Add blocking dependency: step depends on gate (gate blocks step)
if err := store.AddDependency(ctx, &types.Dependency{
IssueID: step.ID,
DependsOnID: gate.ID,
Type: types.DepBlocks,
}, "test"); err != nil {
t.Fatalf("Failed to add blocking dependency: %v", err)
}
// Find gate-ready molecules
molecules, err := findGateReadyMolecules(ctx, store)
if err != nil {
t.Fatalf("findGateReadyMolecules failed: %v", err)
}
if len(molecules) != 1 {
t.Errorf("Expected 1 gate-ready molecule, got %d", len(molecules))
return
}
if molecules[0].MoleculeID != mol.ID {
t.Errorf("Expected molecule ID %s, got %s", mol.ID, molecules[0].MoleculeID)
}
if molecules[0].ClosedGate == nil {
t.Error("Expected closed gate to be set")
} else if molecules[0].ClosedGate.ID != gate.ID {
t.Errorf("Expected closed gate ID %s, got %s", gate.ID, molecules[0].ClosedGate.ID)
}
if molecules[0].ReadyStep == nil {
t.Error("Expected ready step to be set")
} else if molecules[0].ReadyStep.ID != step.ID {
t.Errorf("Expected ready step ID %s, got %s", step.ID, molecules[0].ReadyStep.ID)
}
}
// TestFindGateReadyMolecules_OpenGate tests that open gates don't trigger ready
func TestFindGateReadyMolecules_OpenGate(t *testing.T) {
ctx := context.Background()
store, cleanup := setupGatedTestDB(t)
defer cleanup()
// Create molecule with OPEN gate
mol := &types.Issue{
ID: "test-mol-003",
Title: "Test Molecule with Open Gate",
IssueType: types.TypeEpic,
Status: types.StatusInProgress,
CreatedAt: time.Now(),
UpdatedAt: time.Now(),
}
gate := &types.Issue{
ID: "test-mol-003.gate-await-ci",
Title: "Gate: gh:run ci-workflow",
IssueType: types.TypeGate,
Status: types.StatusOpen, // Gate is still open
AwaitType: "gh:run",
AwaitID: "ci-workflow",
CreatedAt: time.Now(),
UpdatedAt: time.Now(),
}
step := &types.Issue{
ID: "test-mol-003.step1",
Title: "Deploy after CI",
IssueType: types.TypeTask,
Status: types.StatusOpen,
CreatedAt: time.Now(),
UpdatedAt: time.Now(),
}
if err := store.CreateIssue(ctx, mol, "test"); err != nil {
t.Fatalf("Failed to create molecule: %v", err)
}
if err := store.CreateIssue(ctx, gate, "test"); err != nil {
t.Fatalf("Failed to create gate: %v", err)
}
if err := store.CreateIssue(ctx, step, "test"); err != nil {
t.Fatalf("Failed to create step: %v", err)
}
// Add parent-child relationships
if err := store.AddDependency(ctx, &types.Dependency{
IssueID: gate.ID,
DependsOnID: mol.ID,
Type: types.DepParentChild,
}, "test"); err != nil {
t.Fatalf("Failed to add gate parent-child: %v", err)
}
if err := store.AddDependency(ctx, &types.Dependency{
IssueID: step.ID,
DependsOnID: mol.ID,
Type: types.DepParentChild,
}, "test"); err != nil {
t.Fatalf("Failed to add step parent-child: %v", err)
}
// Add blocking dependency: step depends on gate
if err := store.AddDependency(ctx, &types.Dependency{
IssueID: step.ID,
DependsOnID: gate.ID,
Type: types.DepBlocks,
}, "test"); err != nil {
t.Fatalf("Failed to add blocking dependency: %v", err)
}
// Find gate-ready molecules
molecules, err := findGateReadyMolecules(ctx, store)
if err != nil {
t.Fatalf("findGateReadyMolecules failed: %v", err)
}
if len(molecules) != 0 {
t.Errorf("Expected 0 gate-ready molecules (gate is open), got %d", len(molecules))
}
}
// TestFindGateReadyMolecules_HookedMolecule tests that hooked molecules are filtered out
func TestFindGateReadyMolecules_HookedMolecule(t *testing.T) {
ctx := context.Background()
store, cleanup := setupGatedTestDB(t)
defer cleanup()
// Create molecule with closed gate, but molecule is hooked
mol := &types.Issue{
ID: "test-mol-004",
Title: "Test Hooked Molecule",
IssueType: types.TypeEpic,
Status: types.StatusHooked, // Already hooked by an agent
CreatedAt: time.Now(),
UpdatedAt: time.Now(),
}
gate := &types.Issue{
ID: "test-mol-004.gate-await-ci",
Title: "Gate: gh:run ci-workflow",
IssueType: types.TypeGate,
Status: types.StatusClosed,
AwaitType: "gh:run",
AwaitID: "ci-workflow",
CreatedAt: time.Now(),
UpdatedAt: time.Now(),
}
step := &types.Issue{
ID: "test-mol-004.step1",
Title: "Deploy after CI",
IssueType: types.TypeTask,
Status: types.StatusOpen,
CreatedAt: time.Now(),
UpdatedAt: time.Now(),
}
if err := store.CreateIssue(ctx, mol, "test"); err != nil {
t.Fatalf("Failed to create molecule: %v", err)
}
if err := store.CreateIssue(ctx, gate, "test"); err != nil {
t.Fatalf("Failed to create gate: %v", err)
}
if err := store.CreateIssue(ctx, step, "test"); err != nil {
t.Fatalf("Failed to create step: %v", err)
}
// Add parent-child relationships
if err := store.AddDependency(ctx, &types.Dependency{
IssueID: gate.ID,
DependsOnID: mol.ID,
Type: types.DepParentChild,
}, "test"); err != nil {
t.Fatalf("Failed to add gate parent-child: %v", err)
}
if err := store.AddDependency(ctx, &types.Dependency{
IssueID: step.ID,
DependsOnID: mol.ID,
Type: types.DepParentChild,
}, "test"); err != nil {
t.Fatalf("Failed to add step parent-child: %v", err)
}
// Add blocking dependency
if err := store.AddDependency(ctx, &types.Dependency{
IssueID: step.ID,
DependsOnID: gate.ID,
Type: types.DepBlocks,
}, "test"); err != nil {
t.Fatalf("Failed to add blocking dependency: %v", err)
}
// Find gate-ready molecules
molecules, err := findGateReadyMolecules(ctx, store)
if err != nil {
t.Fatalf("findGateReadyMolecules failed: %v", err)
}
if len(molecules) != 0 {
t.Errorf("Expected 0 gate-ready molecules (molecule is hooked), got %d", len(molecules))
}
}
// TestFindGateReadyMolecules_MultipleGates tests handling multiple closed gates
func TestFindGateReadyMolecules_MultipleGates(t *testing.T) {
ctx := context.Background()
store, cleanup := setupGatedTestDB(t)
defer cleanup()
// Create two molecules, each with a closed gate
for i := 1; i <= 2; i++ {
molID := fmt.Sprintf("test-multi-%d", i)
mol := &types.Issue{
ID: molID,
Title: fmt.Sprintf("Multi Gate Mol %d", i),
IssueType: types.TypeEpic,
Status: types.StatusInProgress,
CreatedAt: time.Now(),
UpdatedAt: time.Now(),
}
gate := &types.Issue{
ID: fmt.Sprintf("%s.gate", molID),
Title: "Gate: gh:run",
IssueType: types.TypeGate,
Status: types.StatusClosed,
AwaitType: "gh:run",
CreatedAt: time.Now(),
UpdatedAt: time.Now(),
}
step := &types.Issue{
ID: fmt.Sprintf("%s.step1", molID),
Title: "Step 1",
IssueType: types.TypeTask,
Status: types.StatusOpen,
CreatedAt: time.Now(),
UpdatedAt: time.Now(),
}
if err := store.CreateIssue(ctx, mol, "test"); err != nil {
t.Fatalf("Failed to create molecule %d: %v", i, err)
}
if err := store.CreateIssue(ctx, gate, "test"); err != nil {
t.Fatalf("Failed to create gate %d: %v", i, err)
}
if err := store.CreateIssue(ctx, step, "test"); err != nil {
t.Fatalf("Failed to create step %d: %v", i, err)
}
// Add dependencies
if err := store.AddDependency(ctx, &types.Dependency{
IssueID: gate.ID,
DependsOnID: mol.ID,
Type: types.DepParentChild,
}, "test"); err != nil {
t.Fatalf("Failed to add gate parent-child %d: %v", i, err)
}
if err := store.AddDependency(ctx, &types.Dependency{
IssueID: step.ID,
DependsOnID: mol.ID,
Type: types.DepParentChild,
}, "test"); err != nil {
t.Fatalf("Failed to add step parent-child %d: %v", i, err)
}
if err := store.AddDependency(ctx, &types.Dependency{
IssueID: step.ID,
DependsOnID: gate.ID,
Type: types.DepBlocks,
}, "test"); err != nil {
t.Fatalf("Failed to add blocking dep %d: %v", i, err)
}
}
// Find gate-ready molecules
molecules, err := findGateReadyMolecules(ctx, store)
if err != nil {
t.Fatalf("findGateReadyMolecules failed: %v", err)
}
if len(molecules) != 2 {
t.Errorf("Expected 2 gate-ready molecules, got %d", len(molecules))
}
}

View File

@@ -23,8 +23,18 @@ var readyCmd = &cobra.Command{
Use --mol to filter to a specific molecule's steps:
bd ready --mol bd-patrol # Show ready steps within molecule
Use --gated to find molecules ready for gate-resume dispatch:
bd ready --gated # Find molecules where a gate closed
This is useful for agents executing molecules to see which steps can run next.`,
Run: func(cmd *cobra.Command, args []string) {
// Handle --gated flag (gate-resume discovery)
gated, _ := cmd.Flags().GetBool("gated")
if gated {
runMolReadyGated(cmd, args)
return
}
// Handle molecule-specific ready query
molID, _ := cmd.Flags().GetString("mol")
if molID != "" {
@@ -451,6 +461,7 @@ func init() {
readyCmd.Flags().String("mol-type", "", "Filter by molecule type: swarm, patrol, or work")
readyCmd.Flags().Bool("pretty", false, "Display issues in a tree format with status/priority symbols")
readyCmd.Flags().Bool("include-deferred", false, "Include issues with future defer_until timestamps")
readyCmd.Flags().Bool("gated", false, "Find molecules ready for gate-resume dispatch")
rootCmd.AddCommand(readyCmd)
blockedCmd.Flags().String("parent", "", "Filter to descendants of this bead/epic")
rootCmd.AddCommand(blockedCmd)

View File

@@ -283,7 +283,7 @@ func parseConflicts(content string) ([]conflictRegion, []string, error) {
}
// resolveConflict resolves a single conflict region using merge semantics
func resolveConflict(conflict conflictRegion, num int) ([]string, conflictResolutionInfo) {
func resolveConflict(conflict conflictRegion, _ int) ([]string, conflictResolutionInfo) {
info := conflictResolutionInfo{
LineRange: fmt.Sprintf("%d-%d", conflict.StartLine, conflict.EndLine),
LeftLabel: conflict.LeftLabel,

View File

@@ -96,6 +96,14 @@ func installClaude(env claudeEnv, project bool, stealth bool) error {
settings["hooks"] = hooks
}
// GH#955: Clean up any null values left by previous buggy removal
// Claude Code expects arrays, not null values
for key, val := range hooks {
if val == nil {
delete(hooks, key)
}
}
command := "bd prime"
if stealth {
command = "bd prime --stealth"
@@ -272,7 +280,8 @@ func removeHookCommand(hooks map[string]interface{}, event, command string) {
}
// Filter out bd prime hooks
var filtered []interface{}
// Initialize as empty slice (not nil) to avoid JSON null serialization
filtered := make([]interface{}, 0, len(eventHooks))
for _, hook := range eventHooks {
hookMap, ok := hook.(map[string]interface{})
if !ok {
@@ -304,7 +313,14 @@ func removeHookCommand(hooks map[string]interface{}, event, command string) {
}
}
hooks[event] = filtered
// GH#955: Delete the key entirely if no hooks remain, rather than
// leaving an empty array. This is cleaner and avoids potential
// issues with empty arrays in settings.
if len(filtered) == 0 {
delete(hooks, event)
} else {
hooks[event] = filtered
}
}
// hasBeadsHooks checks if a settings file has bd prime hooks

View File

@@ -281,6 +281,94 @@ func TestRemoveHookCommand(t *testing.T) {
}
}
// TestRemoveHookCommandNoNull verifies that removing all hooks deletes the key
// instead of setting it to null. GH#955: null values in hooks cause Claude Code to fail.
func TestRemoveHookCommandNoNull(t *testing.T) {
hooks := map[string]interface{}{
"SessionStart": []interface{}{
map[string]interface{}{
"matcher": "",
"hooks": []interface{}{
map[string]interface{}{
"type": "command",
"command": "bd prime",
},
},
},
},
}
removeHookCommand(hooks, "SessionStart", "bd prime")
// Key should be deleted, not set to null or empty array
if _, exists := hooks["SessionStart"]; exists {
t.Error("Expected SessionStart key to be deleted after removing all hooks")
}
// Verify JSON serialization doesn't produce null
data, err := json.Marshal(hooks)
if err != nil {
t.Fatalf("marshal: %v", err)
}
if strings.Contains(string(data), "null") {
t.Errorf("JSON contains null: %s", data)
}
}
// TestInstallClaudeCleanupNullHooks verifies that install cleans up existing null values.
// GH#955: null values left by previous buggy removal cause Claude Code to fail.
func TestInstallClaudeCleanupNullHooks(t *testing.T) {
env, stdout, _ := newClaudeTestEnv(t)
// Create settings file with null hooks (simulating the bug)
settingsPath := globalSettingsPath(env.homeDir)
writeSettings(t, settingsPath, map[string]interface{}{
"hooks": map[string]interface{}{
"SessionStart": nil,
"PreCompact": nil,
},
})
// Install should clean up null values and add proper hooks
err := installClaude(env, false, false)
if err != nil {
t.Fatalf("install failed: %v", err)
}
// Verify hooks were properly added
if !strings.Contains(stdout.String(), "Registered SessionStart hook") {
t.Error("Expected SessionStart hook to be registered")
}
// Read back the file and verify no null values
data, err := env.readFile(settingsPath)
if err != nil {
t.Fatalf("read settings: %v", err)
}
if strings.Contains(string(data), "null") {
t.Errorf("Settings file still contains null: %s", data)
}
// Verify it parses as valid Claude settings
var settings map[string]interface{}
if err := json.Unmarshal(data, &settings); err != nil {
t.Fatalf("parse settings: %v", err)
}
hooks, ok := settings["hooks"].(map[string]interface{})
if !ok {
t.Fatal("hooks section missing")
}
for _, event := range []string{"SessionStart", "PreCompact"} {
eventHooks, ok := hooks[event].([]interface{})
if !ok {
t.Errorf("%s should be an array, not nil or missing", event)
}
if len(eventHooks) == 0 {
t.Errorf("%s should have hooks", event)
}
}
}
func TestHasBeadsHooks(t *testing.T) {
tmpDir := t.TempDir()

View File

@@ -131,14 +131,14 @@ var showCmd = &cobra.Command{
allDetails = append(allDetails, details)
} else {
if displayIdx > 0 {
fmt.Println("\n" + strings.Repeat("─", 60))
fmt.Println("\n" + ui.RenderMuted(strings.Repeat("─", 60)))
}
fmt.Printf("\n%s: %s\n", ui.RenderAccent(issue.ID), issue.Title)
fmt.Printf("Status: %s\n", issue.Status)
fmt.Printf("Priority: P%d\n", issue.Priority)
fmt.Printf("Type: %s\n", issue.IssueType)
// Tufte-aligned header: STATUS_ICON ID · Title [Priority · STATUS]
fmt.Printf("\n%s\n", formatIssueHeader(issue))
// Metadata: Owner · Type | Created · Updated
fmt.Println(formatIssueMetadata(issue))
if issue.Description != "" {
fmt.Printf("\nDescription:\n%s\n", issue.Description)
fmt.Printf("\n%s\n%s\n", ui.RenderBold("DESCRIPTION"), ui.RenderMarkdown(issue.Description))
}
fmt.Println()
displayIdx++
@@ -188,54 +188,17 @@ var showCmd = &cobra.Command{
}
if displayIdx > 0 {
fmt.Println("\n" + strings.Repeat("─", 60))
fmt.Println("\n" + ui.RenderMuted(strings.Repeat("─", 60)))
}
displayIdx++
// Format output (same as direct mode below)
tierEmoji := ""
statusSuffix := ""
switch issue.CompactionLevel {
case 1:
tierEmoji = " 🗜️"
statusSuffix = " (compacted L1)"
case 2:
tierEmoji = " 📦"
statusSuffix = " (compacted L2)"
}
// Tufte-aligned header: STATUS_ICON ID · Title [Priority · STATUS]
fmt.Printf("\n%s\n", formatIssueHeader(issue))
fmt.Printf("\n%s: %s%s\n", ui.RenderAccent(issue.ID), issue.Title, tierEmoji)
fmt.Printf("Status: %s%s\n", issue.Status, statusSuffix)
if issue.CloseReason != "" {
fmt.Printf("Close reason: %s\n", issue.CloseReason)
}
if issue.ClosedBySession != "" {
fmt.Printf("Closed by session: %s\n", issue.ClosedBySession)
}
fmt.Printf("Priority: P%d\n", issue.Priority)
fmt.Printf("Type: %s\n", issue.IssueType)
if issue.Assignee != "" {
fmt.Printf("Assignee: %s\n", issue.Assignee)
}
if issue.EstimatedMinutes != nil {
fmt.Printf("Estimated: %d minutes\n", *issue.EstimatedMinutes)
}
fmt.Printf("Created: %s\n", issue.CreatedAt.Format("2006-01-02 15:04"))
if issue.CreatedBy != "" {
fmt.Printf("Created by: %s\n", issue.CreatedBy)
}
fmt.Printf("Updated: %s\n", issue.UpdatedAt.Format("2006-01-02 15:04"))
if issue.DueAt != nil {
fmt.Printf("Due: %s\n", issue.DueAt.Format("2006-01-02 15:04"))
}
if issue.DeferUntil != nil {
fmt.Printf("Deferred until: %s\n", issue.DeferUntil.Format("2006-01-02 15:04"))
}
if issue.ExternalRef != nil && *issue.ExternalRef != "" {
fmt.Printf("External Ref: %s\n", *issue.ExternalRef)
}
// Metadata: Owner · Type | Created · Updated
fmt.Println(formatIssueMetadata(issue))
// Show compaction status
// Compaction info (if applicable)
if issue.CompactionLevel > 0 {
fmt.Println()
if issue.OriginalSize > 0 {
@@ -243,47 +206,40 @@ var showCmd = &cobra.Command{
saved := issue.OriginalSize - currentSize
if saved > 0 {
reduction := float64(saved) / float64(issue.OriginalSize) * 100
fmt.Printf("📊 Original: %d bytes | Compressed: %d bytes (%.0f%% reduction)\n",
fmt.Printf("📊 %d → %d bytes (%.0f%% reduction)\n",
issue.OriginalSize, currentSize, reduction)
}
}
tierEmoji2 := "🗜️"
if issue.CompactionLevel == 2 {
tierEmoji2 = "📦"
}
compactedDate := ""
if issue.CompactedAt != nil {
compactedDate = issue.CompactedAt.Format("2006-01-02")
}
fmt.Printf("%s Compacted: %s (Tier %d)\n", tierEmoji2, compactedDate, issue.CompactionLevel)
}
// Content sections
if issue.Description != "" {
fmt.Printf("\nDescription:\n%s\n", issue.Description)
fmt.Printf("\n%s\n%s\n", ui.RenderBold("DESCRIPTION"), ui.RenderMarkdown(issue.Description))
}
if issue.Design != "" {
fmt.Printf("\nDesign:\n%s\n", issue.Design)
fmt.Printf("\n%s\n%s\n", ui.RenderBold("DESIGN"), ui.RenderMarkdown(issue.Design))
}
if issue.Notes != "" {
fmt.Printf("\nNotes:\n%s\n", issue.Notes)
fmt.Printf("\n%s\n%s\n", ui.RenderBold("NOTES"), ui.RenderMarkdown(issue.Notes))
}
if issue.AcceptanceCriteria != "" {
fmt.Printf("\nAcceptance Criteria:\n%s\n", issue.AcceptanceCriteria)
fmt.Printf("\n%s\n%s\n", ui.RenderBold("ACCEPTANCE CRITERIA"), ui.RenderMarkdown(issue.AcceptanceCriteria))
}
if len(details.Labels) > 0 {
fmt.Printf("\nLabels: %v\n", details.Labels)
fmt.Printf("\n%s %s\n", ui.RenderBold("LABELS:"), strings.Join(details.Labels, ", "))
}
// Dependencies with semantic colors
if len(details.Dependencies) > 0 {
fmt.Printf("\nDepends on (%d):\n", len(details.Dependencies))
fmt.Printf("\n%s\n", ui.RenderBold("DEPENDS ON"))
for _, dep := range details.Dependencies {
fmt.Printf(" → %s: %s [P%d]\n", dep.ID, dep.Title, dep.Priority)
fmt.Println(formatDependencyLine("→", dep))
}
}
// Dependents grouped by type with semantic colors
if len(details.Dependents) > 0 {
// Group by dependency type for clarity
var blocks, children, related, discovered []*types.IssueWithDependencyMetadata
for _, dep := range details.Dependents {
switch dep.DependencyType {
@@ -301,37 +257,38 @@ var showCmd = &cobra.Command{
}
if len(children) > 0 {
fmt.Printf("\nChildren (%d):\n", len(children))
fmt.Printf("\n%s\n", ui.RenderBold("CHILDREN"))
for _, dep := range children {
fmt.Printf(" ↳ %s: %s [P%d - %s]\n", dep.ID, dep.Title, dep.Priority, dep.Status)
fmt.Println(formatDependencyLine("↳", dep))
}
}
if len(blocks) > 0 {
fmt.Printf("\nBlocks (%d):\n", len(blocks))
fmt.Printf("\n%s\n", ui.RenderBold("BLOCKS"))
for _, dep := range blocks {
fmt.Printf(" ← %s: %s [P%d - %s]\n", dep.ID, dep.Title, dep.Priority, dep.Status)
fmt.Println(formatDependencyLine("←", dep))
}
}
if len(related) > 0 {
fmt.Printf("\nRelated (%d):\n", len(related))
fmt.Printf("\n%s\n", ui.RenderBold("RELATED"))
for _, dep := range related {
fmt.Printf(" ↔ %s: %s [P%d - %s]\n", dep.ID, dep.Title, dep.Priority, dep.Status)
fmt.Println(formatDependencyLine("↔", dep))
}
}
if len(discovered) > 0 {
fmt.Printf("\nDiscovered (%d):\n", len(discovered))
fmt.Printf("\n%s\n", ui.RenderBold("DISCOVERED"))
for _, dep := range discovered {
fmt.Printf(" ◊ %s: %s [P%d - %s]\n", dep.ID, dep.Title, dep.Priority, dep.Status)
fmt.Println(formatDependencyLine("◊", dep))
}
}
}
if len(details.Comments) > 0 {
fmt.Printf("\nComments (%d):\n", len(details.Comments))
fmt.Printf("\n%s\n", ui.RenderBold("COMMENTS"))
for _, comment := range details.Comments {
fmt.Printf(" [%s] %s\n", comment.Author, comment.CreatedAt.Format("2006-01-02 15:04"))
commentLines := strings.Split(comment.Text, "\n")
for _, line := range commentLines {
fmt.Printf(" %s %s\n", ui.RenderMuted(comment.CreatedAt.Format("2006-01-02")), comment.Author)
rendered := ui.RenderMarkdown(comment.Text)
// TrimRight removes trailing newlines that Glamour adds, preventing extra blank lines
for _, line := range strings.Split(strings.TrimRight(rendered, "\n"), "\n") {
fmt.Printf(" %s\n", line)
}
}
@@ -418,102 +375,55 @@ var showCmd = &cobra.Command{
}
if idx > 0 {
fmt.Println("\n" + strings.Repeat("─", 60))
fmt.Println("\n" + ui.RenderMuted(strings.Repeat("─", 60)))
}
// Add compaction emoji to title line
tierEmoji := ""
statusSuffix := ""
switch issue.CompactionLevel {
case 1:
tierEmoji = " 🗜️"
statusSuffix = " (compacted L1)"
case 2:
tierEmoji = " 📦"
statusSuffix = " (compacted L2)"
}
// Tufte-aligned header: STATUS_ICON ID · Title [Priority · STATUS]
fmt.Printf("\n%s\n", formatIssueHeader(issue))
fmt.Printf("\n%s: %s%s\n", ui.RenderAccent(issue.ID), issue.Title, tierEmoji)
fmt.Printf("Status: %s%s\n", issue.Status, statusSuffix)
if issue.CloseReason != "" {
fmt.Printf("Close reason: %s\n", issue.CloseReason)
}
if issue.ClosedBySession != "" {
fmt.Printf("Closed by session: %s\n", issue.ClosedBySession)
}
fmt.Printf("Priority: P%d\n", issue.Priority)
fmt.Printf("Type: %s\n", issue.IssueType)
if issue.Assignee != "" {
fmt.Printf("Assignee: %s\n", issue.Assignee)
}
if issue.EstimatedMinutes != nil {
fmt.Printf("Estimated: %d minutes\n", *issue.EstimatedMinutes)
}
fmt.Printf("Created: %s\n", issue.CreatedAt.Format("2006-01-02 15:04"))
if issue.CreatedBy != "" {
fmt.Printf("Created by: %s\n", issue.CreatedBy)
}
fmt.Printf("Updated: %s\n", issue.UpdatedAt.Format("2006-01-02 15:04"))
if issue.DueAt != nil {
fmt.Printf("Due: %s\n", issue.DueAt.Format("2006-01-02 15:04"))
}
if issue.DeferUntil != nil {
fmt.Printf("Deferred until: %s\n", issue.DeferUntil.Format("2006-01-02 15:04"))
}
if issue.ExternalRef != nil && *issue.ExternalRef != "" {
fmt.Printf("External Ref: %s\n", *issue.ExternalRef)
}
// Metadata: Owner · Type | Created · Updated
fmt.Println(formatIssueMetadata(issue))
// Show compaction status footer
// Compaction info (if applicable)
if issue.CompactionLevel > 0 {
tierEmoji := "🗜️"
if issue.CompactionLevel == 2 {
tierEmoji = "📦"
}
tierName := fmt.Sprintf("Tier %d", issue.CompactionLevel)
fmt.Println()
if issue.OriginalSize > 0 {
currentSize := len(issue.Description) + len(issue.Design) + len(issue.Notes) + len(issue.AcceptanceCriteria)
saved := issue.OriginalSize - currentSize
if saved > 0 {
reduction := float64(saved) / float64(issue.OriginalSize) * 100
fmt.Printf("📊 Original: %d bytes | Compressed: %d bytes (%.0f%% reduction)\n",
fmt.Printf("📊 %d → %d bytes (%.0f%% reduction)\n",
issue.OriginalSize, currentSize, reduction)
}
}
compactedDate := ""
if issue.CompactedAt != nil {
compactedDate = issue.CompactedAt.Format("2006-01-02")
}
fmt.Printf("%s Compacted: %s (%s)\n", tierEmoji, compactedDate, tierName)
}
// Content sections
if issue.Description != "" {
fmt.Printf("\nDescription:\n%s\n", issue.Description)
fmt.Printf("\n%s\n%s\n", ui.RenderBold("DESCRIPTION"), ui.RenderMarkdown(issue.Description))
}
if issue.Design != "" {
fmt.Printf("\nDesign:\n%s\n", issue.Design)
fmt.Printf("\n%s\n%s\n", ui.RenderBold("DESIGN"), ui.RenderMarkdown(issue.Design))
}
if issue.Notes != "" {
fmt.Printf("\nNotes:\n%s\n", issue.Notes)
fmt.Printf("\n%s\n%s\n", ui.RenderBold("NOTES"), ui.RenderMarkdown(issue.Notes))
}
if issue.AcceptanceCriteria != "" {
fmt.Printf("\nAcceptance Criteria:\n%s\n", issue.AcceptanceCriteria)
fmt.Printf("\n%s\n%s\n", ui.RenderBold("ACCEPTANCE CRITERIA"), ui.RenderMarkdown(issue.AcceptanceCriteria))
}
// Show labels
labels, _ := issueStore.GetLabels(ctx, issue.ID)
if len(labels) > 0 {
fmt.Printf("\nLabels: %v\n", labels)
fmt.Printf("\n%s %s\n", ui.RenderBold("LABELS:"), strings.Join(labels, ", "))
}
// Show dependencies
// Show dependencies with semantic colors
deps, _ := issueStore.GetDependencies(ctx, issue.ID)
if len(deps) > 0 {
fmt.Printf("\nDepends on (%d):\n", len(deps))
fmt.Printf("\n%s\n", ui.RenderBold("DEPENDS ON"))
for _, dep := range deps {
fmt.Printf(" → %s: %s [P%d]\n", dep.ID, dep.Title, dep.Priority)
fmt.Println(formatSimpleDependencyLine("→", dep))
}
}
@@ -541,27 +451,27 @@ var showCmd = &cobra.Command{
}
if len(children) > 0 {
fmt.Printf("\nChildren (%d):\n", len(children))
fmt.Printf("\n%s\n", ui.RenderBold("CHILDREN"))
for _, dep := range children {
fmt.Printf(" ↳ %s: %s [P%d - %s]\n", dep.ID, dep.Title, dep.Priority, dep.Status)
fmt.Println(formatDependencyLine("↳", dep))
}
}
if len(blocks) > 0 {
fmt.Printf("\nBlocks (%d):\n", len(blocks))
fmt.Printf("\n%s\n", ui.RenderBold("BLOCKS"))
for _, dep := range blocks {
fmt.Printf(" ← %s: %s [P%d - %s]\n", dep.ID, dep.Title, dep.Priority, dep.Status)
fmt.Println(formatDependencyLine("←", dep))
}
}
if len(related) > 0 {
fmt.Printf("\nRelated (%d):\n", len(related))
fmt.Printf("\n%s\n", ui.RenderBold("RELATED"))
for _, dep := range related {
fmt.Printf(" ↔ %s: %s [P%d - %s]\n", dep.ID, dep.Title, dep.Priority, dep.Status)
fmt.Println(formatDependencyLine("↔", dep))
}
}
if len(discovered) > 0 {
fmt.Printf("\nDiscovered (%d):\n", len(discovered))
fmt.Printf("\n%s\n", ui.RenderBold("DISCOVERED"))
for _, dep := range discovered {
fmt.Printf(" ◊ %s: %s [P%d - %s]\n", dep.ID, dep.Title, dep.Priority, dep.Status)
fmt.Println(formatDependencyLine("◊", dep))
}
}
}
@@ -569,9 +479,9 @@ var showCmd = &cobra.Command{
// Fallback for non-SQLite storage
dependents, _ := issueStore.GetDependents(ctx, issue.ID)
if len(dependents) > 0 {
fmt.Printf("\nBlocks (%d):\n", len(dependents))
fmt.Printf("\n%s\n", ui.RenderBold("BLOCKS"))
for _, dep := range dependents {
fmt.Printf(" ← %s: %s [P%d - %s]\n", dep.ID, dep.Title, dep.Priority, dep.Status)
fmt.Println(formatSimpleDependencyLine("←", dep))
}
}
}
@@ -579,9 +489,14 @@ var showCmd = &cobra.Command{
// Show comments
comments, _ := issueStore.GetIssueComments(ctx, issue.ID)
if len(comments) > 0 {
fmt.Printf("\nComments (%d):\n", len(comments))
fmt.Printf("\n%s\n", ui.RenderBold("COMMENTS"))
for _, comment := range comments {
fmt.Printf(" [%s at %s]\n %s\n\n", comment.Author, comment.CreatedAt.Format("2006-01-02 15:04"), comment.Text)
fmt.Printf(" %s %s\n", ui.RenderMuted(comment.CreatedAt.Format("2006-01-02")), comment.Author)
rendered := ui.RenderMarkdown(comment.Text)
// TrimRight removes trailing newlines that Glamour adds, preventing extra blank lines
for _, line := range strings.Split(strings.TrimRight(rendered, "\n"), "\n") {
fmt.Printf(" %s\n", line)
}
}
}
@@ -605,10 +520,178 @@ var showCmd = &cobra.Command{
// formatShortIssue returns a compact one-line representation of an issue
// Format: <id> [<status>] P<priority> <type>: <title>
// Format: STATUS_ICON ID PRIORITY [Type] Title
func formatShortIssue(issue *types.Issue) string {
return fmt.Sprintf("%s [%s] P%d %s: %s",
issue.ID, issue.Status, issue.Priority, issue.IssueType, issue.Title)
statusIcon := ui.RenderStatusIcon(string(issue.Status))
priorityTag := ui.RenderPriority(issue.Priority)
// Type badge only for notable types
typeBadge := ""
switch issue.IssueType {
case "epic":
typeBadge = ui.TypeEpicStyle.Render("[epic]") + " "
case "bug":
typeBadge = ui.TypeBugStyle.Render("[bug]") + " "
}
// Closed issues: entire line is muted
if issue.Status == types.StatusClosed {
return fmt.Sprintf("%s %s %s %s%s",
statusIcon,
ui.RenderMuted(issue.ID),
ui.RenderMuted(fmt.Sprintf("● P%d", issue.Priority)),
ui.RenderMuted(string(issue.IssueType)),
ui.RenderMuted(" "+issue.Title))
}
return fmt.Sprintf("%s %s %s %s%s", statusIcon, issue.ID, priorityTag, typeBadge, issue.Title)
}
// formatIssueHeader returns the Tufte-aligned header line
// Format: ID · Title [Priority · STATUS]
// All elements in bd show get semantic colors since focus is on one issue
func formatIssueHeader(issue *types.Issue) string {
// Get status icon and style
statusIcon := ui.RenderStatusIcon(string(issue.Status))
statusStyle := ui.GetStatusStyle(string(issue.Status))
statusStr := statusStyle.Render(strings.ToUpper(string(issue.Status)))
// Priority with semantic color (includes ● icon)
priorityTag := ui.RenderPriority(issue.Priority)
// Type badge for notable types
typeBadge := ""
switch issue.IssueType {
case "epic":
typeBadge = " " + ui.TypeEpicStyle.Render("[EPIC]")
case "bug":
typeBadge = " " + ui.TypeBugStyle.Render("[BUG]")
}
// Compaction indicator
tierEmoji := ""
switch issue.CompactionLevel {
case 1:
tierEmoji = " 🗜️"
case 2:
tierEmoji = " 📦"
}
// Build header: STATUS_ICON ID · Title [Priority · STATUS]
idStyled := ui.RenderAccent(issue.ID)
return fmt.Sprintf("%s %s%s · %s%s [%s · %s]",
statusIcon, idStyled, typeBadge, issue.Title, tierEmoji, priorityTag, statusStr)
}
// formatIssueMetadata returns the metadata line(s) with grouped info
// Format: Owner: user · Type: task
//
// Created: 2026-01-06 · Updated: 2026-01-08
func formatIssueMetadata(issue *types.Issue) string {
var lines []string
// Line 1: Owner/Assignee · Type
metaParts := []string{}
if issue.CreatedBy != "" {
metaParts = append(metaParts, fmt.Sprintf("Owner: %s", issue.CreatedBy))
}
if issue.Assignee != "" {
metaParts = append(metaParts, fmt.Sprintf("Assignee: %s", issue.Assignee))
}
// Type with semantic color
typeStr := string(issue.IssueType)
switch issue.IssueType {
case "epic":
typeStr = ui.TypeEpicStyle.Render("epic")
case "bug":
typeStr = ui.TypeBugStyle.Render("bug")
}
metaParts = append(metaParts, fmt.Sprintf("Type: %s", typeStr))
if len(metaParts) > 0 {
lines = append(lines, strings.Join(metaParts, " · "))
}
// Line 2: Created · Updated · Due/Defer
timeParts := []string{}
timeParts = append(timeParts, fmt.Sprintf("Created: %s", issue.CreatedAt.Format("2006-01-02")))
timeParts = append(timeParts, fmt.Sprintf("Updated: %s", issue.UpdatedAt.Format("2006-01-02")))
if issue.DueAt != nil {
timeParts = append(timeParts, fmt.Sprintf("Due: %s", issue.DueAt.Format("2006-01-02")))
}
if issue.DeferUntil != nil {
timeParts = append(timeParts, fmt.Sprintf("Deferred: %s", issue.DeferUntil.Format("2006-01-02")))
}
if len(timeParts) > 0 {
lines = append(lines, strings.Join(timeParts, " · "))
}
// Line 3: Close reason (if closed)
if issue.Status == types.StatusClosed && issue.CloseReason != "" {
lines = append(lines, ui.RenderMuted(fmt.Sprintf("Close reason: %s", issue.CloseReason)))
}
// Line 4: External ref (if exists)
if issue.ExternalRef != nil && *issue.ExternalRef != "" {
lines = append(lines, fmt.Sprintf("External: %s", *issue.ExternalRef))
}
return strings.Join(lines, "\n")
}
// formatDependencyLine formats a single dependency with semantic colors
// Closed items get entire row muted - the work is done, no need for attention
func formatDependencyLine(prefix string, dep *types.IssueWithDependencyMetadata) string {
// Status icon (always rendered with semantic color)
statusIcon := ui.GetStatusIcon(string(dep.Status))
// Closed items: mute entire row since the work is complete
if dep.Status == types.StatusClosed {
return fmt.Sprintf(" %s %s %s: %s %s",
prefix, statusIcon,
ui.RenderMuted(dep.ID),
ui.RenderMuted(dep.Title),
ui.RenderMuted(fmt.Sprintf("● P%d", dep.Priority)))
}
// Active items: ID with status color, priority with semantic color
style := ui.GetStatusStyle(string(dep.Status))
idStr := style.Render(dep.ID)
priorityTag := ui.RenderPriority(dep.Priority)
// Type indicator for epics/bugs
typeStr := ""
if dep.IssueType == "epic" {
typeStr = ui.TypeEpicStyle.Render("(EPIC)") + " "
} else if dep.IssueType == "bug" {
typeStr = ui.TypeBugStyle.Render("(BUG)") + " "
}
return fmt.Sprintf(" %s %s %s: %s%s %s", prefix, statusIcon, idStr, typeStr, dep.Title, priorityTag)
}
// formatSimpleDependencyLine formats a dependency without metadata (fallback)
// Closed items get entire row muted - the work is done, no need for attention
func formatSimpleDependencyLine(prefix string, dep *types.Issue) string {
statusIcon := ui.GetStatusIcon(string(dep.Status))
// Closed items: mute entire row since the work is complete
if dep.Status == types.StatusClosed {
return fmt.Sprintf(" %s %s %s: %s %s",
prefix, statusIcon,
ui.RenderMuted(dep.ID),
ui.RenderMuted(dep.Title),
ui.RenderMuted(fmt.Sprintf("● P%d", dep.Priority)))
}
// Active items: use semantic colors
style := ui.GetStatusStyle(string(dep.Status))
idStr := style.Render(dep.ID)
priorityTag := ui.RenderPriority(dep.Priority)
return fmt.Sprintf(" %s %s %s: %s %s", prefix, statusIcon, idStr, dep.Title, priorityTag)
}
// showIssueRefs displays issues that reference the given issue(s), grouped by relationship type
@@ -747,13 +830,23 @@ func showIssueRefs(ctx context.Context, args []string, resolvedIDs []string, rou
}
// displayRefGroup displays a group of references with a given type
// Closed items get entire row muted - the work is done, no need for attention
func displayRefGroup(depType types.DependencyType, refs []*types.IssueWithDependencyMetadata) {
// Get emoji for type
emoji := getRefTypeEmoji(depType)
fmt.Printf("\n %s %s (%d):\n", emoji, depType, len(refs))
for _, ref := range refs {
// Color ID based on status
// Closed items: mute entire row since the work is complete
if ref.Status == types.StatusClosed {
fmt.Printf(" %s: %s %s\n",
ui.RenderMuted(ref.ID),
ui.RenderMuted(ref.Title),
ui.RenderMuted(fmt.Sprintf("[P%d - %s]", ref.Priority, ref.Status)))
continue
}
// Active items: color ID based on status
var idStr string
switch ref.Status {
case types.StatusOpen:
@@ -762,8 +855,6 @@ func displayRefGroup(depType types.DependencyType, refs []*types.IssueWithDepend
idStr = ui.StatusInProgressStyle.Render(ref.ID)
case types.StatusBlocked:
idStr = ui.StatusBlockedStyle.Render(ref.ID)
case types.StatusClosed:
idStr = ui.StatusClosedStyle.Render(ref.ID)
default:
idStr = ref.ID
}

View File

@@ -55,8 +55,8 @@ func TestShow_ExternalRef(t *testing.T) {
}
out := string(showOut)
if !strings.Contains(out, "External Ref:") {
t.Errorf("expected 'External Ref:' in output, got: %s", out)
if !strings.Contains(out, "External:") {
t.Errorf("expected 'External:' in output, got: %s", out)
}
if !strings.Contains(out, "https://example.com/spec.md") {
t.Errorf("expected external ref URL in output, got: %s", out)
@@ -108,7 +108,7 @@ func TestShow_NoExternalRef(t *testing.T) {
}
out := string(showOut)
if strings.Contains(out, "External Ref:") {
t.Errorf("expected no 'External Ref:' line for issue without external ref, got: %s", out)
if strings.Contains(out, "External:") {
t.Errorf("expected no 'External:' line for issue without external ref, got: %s", out)
}
}

View File

@@ -43,7 +43,15 @@ func ensureDatabaseFresh(ctx context.Context) error {
return nil
}
// Database is stale - refuse to operate
// Database is stale - auto-import to refresh (bd-9dao fix)
// For read-only commands running in --no-daemon mode, auto-import instead of
// returning an error. This allows commands like `bd show` to work after git pull.
if !noAutoImport {
autoImportIfNewer()
return nil
}
// Auto-import is disabled, refuse to operate
return fmt.Errorf(
"Database out of sync with JSONL. Run 'bd sync --import-only' to fix.\n\n"+
"The JSONL file has been updated (e.g., after 'git pull') but the database\n"+

373
cmd/bd/staleness_test.go Normal file
View File

@@ -0,0 +1,373 @@
package main
import (
"context"
"encoding/json"
"os"
"path/filepath"
"testing"
"time"
"github.com/steveyegge/beads/internal/storage/sqlite"
"github.com/steveyegge/beads/internal/types"
)
// TestEnsureDatabaseFresh_AutoImportsOnStale verifies that when the database
// is stale (JSONL is newer), ensureDatabaseFresh triggers auto-import instead
// of returning an error. This is the fix for bd-9dao.
func TestEnsureDatabaseFresh_AutoImportsOnStale(t *testing.T) {
ctx := context.Background()
tmpDir := t.TempDir()
beadsDir := filepath.Join(tmpDir, ".beads")
if err := os.MkdirAll(beadsDir, 0755); err != nil {
t.Fatalf("Failed to create beads dir: %v", err)
}
testDBPath := filepath.Join(beadsDir, "bd.db")
jsonlPath := filepath.Join(beadsDir, "issues.jsonl")
// Create database
testStore, err := sqlite.New(ctx, testDBPath)
if err != nil {
t.Fatalf("Failed to create store: %v", err)
}
defer testStore.Close()
// Set prefix
if err := testStore.SetConfig(ctx, "issue_prefix", "test"); err != nil {
t.Fatalf("Failed to set prefix: %v", err)
}
// Set an old last_import_time to make DB appear stale
oldTime := time.Now().Add(-1 * time.Hour)
if err := testStore.SetMetadata(ctx, "last_import_time", oldTime.Format(time.RFC3339Nano)); err != nil {
t.Fatalf("Failed to set metadata: %v", err)
}
// Create JSONL with a new issue that should be auto-imported
jsonlIssue := &types.Issue{
ID: "test-stale-auto-bd9dao",
Title: "Should Auto Import on Stale DB",
Status: types.StatusOpen,
Priority: 1,
IssueType: types.TypeTask,
}
f, err := os.Create(jsonlPath)
if err != nil {
t.Fatalf("Failed to create JSONL: %v", err)
}
encoder := json.NewEncoder(f)
if err := encoder.Encode(jsonlIssue); err != nil {
t.Fatalf("Failed to encode issue: %v", err)
}
f.Close()
// Save and set global state
oldNoAutoImport := noAutoImport
oldAutoImportEnabled := autoImportEnabled
oldStore := store
oldDbPath := dbPath
oldRootCtx := rootCtx
oldStoreActive := storeActive
oldAllowStale := allowStale
noAutoImport = false // Allow auto-import
autoImportEnabled = true // Enable auto-import
allowStale = false // Don't skip staleness check
store = testStore
dbPath = testDBPath
rootCtx = ctx
storeActive = true
defer func() {
noAutoImport = oldNoAutoImport
autoImportEnabled = oldAutoImportEnabled
allowStale = oldAllowStale
store = oldStore
dbPath = oldDbPath
rootCtx = oldRootCtx
storeActive = oldStoreActive
}()
// Call ensureDatabaseFresh - should auto-import and return nil
err = ensureDatabaseFresh(ctx)
if err != nil {
t.Errorf("ensureDatabaseFresh() returned error when it should have auto-imported: %v", err)
}
// Verify issue was auto-imported
imported, err := testStore.GetIssue(ctx, "test-stale-auto-bd9dao")
if err != nil {
t.Fatalf("Failed to check for issue: %v", err)
}
if imported == nil {
t.Error("ensureDatabaseFresh() did not auto-import when DB was stale - bd-9dao fix failed")
}
}
// TestEnsureDatabaseFresh_NoAutoImportFlag verifies that when noAutoImport is
// true, ensureDatabaseFresh returns an error instead of auto-importing.
func TestEnsureDatabaseFresh_NoAutoImportFlag(t *testing.T) {
ctx := context.Background()
tmpDir := t.TempDir()
beadsDir := filepath.Join(tmpDir, ".beads")
if err := os.MkdirAll(beadsDir, 0755); err != nil {
t.Fatalf("Failed to create beads dir: %v", err)
}
testDBPath := filepath.Join(beadsDir, "bd.db")
jsonlPath := filepath.Join(beadsDir, "issues.jsonl")
// Create database
testStore, err := sqlite.New(ctx, testDBPath)
if err != nil {
t.Fatalf("Failed to create store: %v", err)
}
defer testStore.Close()
// Set prefix
if err := testStore.SetConfig(ctx, "issue_prefix", "test"); err != nil {
t.Fatalf("Failed to set prefix: %v", err)
}
// Set an old last_import_time to make DB appear stale
oldTime := time.Now().Add(-1 * time.Hour)
if err := testStore.SetMetadata(ctx, "last_import_time", oldTime.Format(time.RFC3339Nano)); err != nil {
t.Fatalf("Failed to set metadata: %v", err)
}
// Create JSONL with a new issue
jsonlIssue := &types.Issue{
ID: "test-noauto-bd9dao",
Title: "Should NOT Auto Import",
Status: types.StatusOpen,
Priority: 1,
IssueType: types.TypeTask,
}
f, err := os.Create(jsonlPath)
if err != nil {
t.Fatalf("Failed to create JSONL: %v", err)
}
encoder := json.NewEncoder(f)
if err := encoder.Encode(jsonlIssue); err != nil {
t.Fatalf("Failed to encode issue: %v", err)
}
f.Close()
// Save and set global state
oldNoAutoImport := noAutoImport
oldAutoImportEnabled := autoImportEnabled
oldStore := store
oldDbPath := dbPath
oldRootCtx := rootCtx
oldStoreActive := storeActive
oldAllowStale := allowStale
noAutoImport = true // Disable auto-import
autoImportEnabled = false // Disable auto-import
allowStale = false // Don't skip staleness check
store = testStore
dbPath = testDBPath
rootCtx = ctx
storeActive = true
defer func() {
noAutoImport = oldNoAutoImport
autoImportEnabled = oldAutoImportEnabled
allowStale = oldAllowStale
store = oldStore
dbPath = oldDbPath
rootCtx = oldRootCtx
storeActive = oldStoreActive
}()
// Call ensureDatabaseFresh - should return error since noAutoImport is set
err = ensureDatabaseFresh(ctx)
if err == nil {
t.Error("ensureDatabaseFresh() should have returned error when noAutoImport is true")
}
// Verify issue was NOT imported
imported, err := testStore.GetIssue(ctx, "test-noauto-bd9dao")
if err != nil {
t.Fatalf("Failed to check for issue: %v", err)
}
if imported != nil {
t.Error("ensureDatabaseFresh() imported despite noAutoImport=true")
}
}
// TestEnsureDatabaseFresh_AllowStaleFlag verifies that when allowStale is
// true, ensureDatabaseFresh skips the check entirely.
func TestEnsureDatabaseFresh_AllowStaleFlag(t *testing.T) {
ctx := context.Background()
tmpDir := t.TempDir()
beadsDir := filepath.Join(tmpDir, ".beads")
if err := os.MkdirAll(beadsDir, 0755); err != nil {
t.Fatalf("Failed to create beads dir: %v", err)
}
testDBPath := filepath.Join(beadsDir, "bd.db")
jsonlPath := filepath.Join(beadsDir, "issues.jsonl")
// Create database
testStore, err := sqlite.New(ctx, testDBPath)
if err != nil {
t.Fatalf("Failed to create store: %v", err)
}
defer testStore.Close()
// Set prefix
if err := testStore.SetConfig(ctx, "issue_prefix", "test"); err != nil {
t.Fatalf("Failed to set prefix: %v", err)
}
// Set an old last_import_time to make DB appear stale
oldTime := time.Now().Add(-1 * time.Hour)
if err := testStore.SetMetadata(ctx, "last_import_time", oldTime.Format(time.RFC3339Nano)); err != nil {
t.Fatalf("Failed to set metadata: %v", err)
}
// Create JSONL with a new issue
jsonlIssue := &types.Issue{
ID: "test-allowstale-bd9dao",
Title: "Should Skip Check",
Status: types.StatusOpen,
Priority: 1,
IssueType: types.TypeTask,
}
f, err := os.Create(jsonlPath)
if err != nil {
t.Fatalf("Failed to create JSONL: %v", err)
}
encoder := json.NewEncoder(f)
if err := encoder.Encode(jsonlIssue); err != nil {
t.Fatalf("Failed to encode issue: %v", err)
}
f.Close()
// Save and set global state
oldNoAutoImport := noAutoImport
oldAutoImportEnabled := autoImportEnabled
oldStore := store
oldDbPath := dbPath
oldRootCtx := rootCtx
oldStoreActive := storeActive
oldAllowStale := allowStale
noAutoImport = true // Disable auto-import (shouldn't matter with allowStale)
autoImportEnabled = false
allowStale = true // Skip staleness check entirely
store = testStore
dbPath = testDBPath
rootCtx = ctx
storeActive = true
defer func() {
noAutoImport = oldNoAutoImport
autoImportEnabled = oldAutoImportEnabled
allowStale = oldAllowStale
store = oldStore
dbPath = oldDbPath
rootCtx = oldRootCtx
storeActive = oldStoreActive
}()
// Call ensureDatabaseFresh - should return nil (skip check)
err = ensureDatabaseFresh(ctx)
if err != nil {
t.Errorf("ensureDatabaseFresh() should have returned nil with allowStale=true: %v", err)
}
// Verify issue was NOT imported (check was skipped entirely)
imported, err := testStore.GetIssue(ctx, "test-allowstale-bd9dao")
if err != nil {
t.Fatalf("Failed to check for issue: %v", err)
}
if imported != nil {
t.Error("ensureDatabaseFresh() imported even though allowStale=true (should skip check entirely)")
}
}
// TestEnsureDatabaseFresh_FreshDB verifies that when the database is fresh,
// ensureDatabaseFresh returns nil without doing anything.
func TestEnsureDatabaseFresh_FreshDB(t *testing.T) {
ctx := context.Background()
tmpDir := t.TempDir()
beadsDir := filepath.Join(tmpDir, ".beads")
if err := os.MkdirAll(beadsDir, 0755); err != nil {
t.Fatalf("Failed to create beads dir: %v", err)
}
testDBPath := filepath.Join(beadsDir, "bd.db")
jsonlPath := filepath.Join(beadsDir, "issues.jsonl")
// Create JSONL first
jsonlIssue := &types.Issue{
ID: "test-fresh-bd9dao",
Title: "Fresh DB Test",
Status: types.StatusOpen,
Priority: 1,
IssueType: types.TypeTask,
}
f, err := os.Create(jsonlPath)
if err != nil {
t.Fatalf("Failed to create JSONL: %v", err)
}
encoder := json.NewEncoder(f)
if err := encoder.Encode(jsonlIssue); err != nil {
t.Fatalf("Failed to encode issue: %v", err)
}
f.Close()
// Create database
testStore, err := sqlite.New(ctx, testDBPath)
if err != nil {
t.Fatalf("Failed to create store: %v", err)
}
defer testStore.Close()
// Set prefix
if err := testStore.SetConfig(ctx, "issue_prefix", "test"); err != nil {
t.Fatalf("Failed to set prefix: %v", err)
}
// Set a future last_import_time to make DB appear fresh
futureTime := time.Now().Add(1 * time.Hour)
if err := testStore.SetMetadata(ctx, "last_import_time", futureTime.Format(time.RFC3339Nano)); err != nil {
t.Fatalf("Failed to set metadata: %v", err)
}
// Save and set global state
oldNoAutoImport := noAutoImport
oldAutoImportEnabled := autoImportEnabled
oldStore := store
oldDbPath := dbPath
oldRootCtx := rootCtx
oldStoreActive := storeActive
oldAllowStale := allowStale
noAutoImport = false
autoImportEnabled = true
allowStale = false
store = testStore
dbPath = testDBPath
rootCtx = ctx
storeActive = true
defer func() {
noAutoImport = oldNoAutoImport
autoImportEnabled = oldAutoImportEnabled
allowStale = oldAllowStale
store = oldStore
dbPath = oldDbPath
rootCtx = oldRootCtx
storeActive = oldStoreActive
}()
// Call ensureDatabaseFresh - should return nil (DB is fresh)
err = ensureDatabaseFresh(ctx)
if err != nil {
t.Errorf("ensureDatabaseFresh() should have returned nil for fresh DB: %v", err)
}
}

View File

@@ -65,10 +65,24 @@ Use --merge to merge the sync branch back to main branch.`,
// (e.g., during recovery), the daemon's SQLite connection points to the old
// (deleted) file, causing export to return incomplete/corrupt data.
// Using direct mode ensures we always read from the current database file.
//
// GH#984: Must use fallbackToDirectMode() instead of just closing daemon.
// When connected to daemon, PersistentPreRun skips store initialization.
// Just closing daemon leaves store=nil, causing "no database store available"
// errors in post-checkout hook's `bd sync --import-only`.
if daemonClient != nil {
debug.Logf("sync: forcing direct mode for consistency")
_ = daemonClient.Close()
daemonClient = nil
if err := fallbackToDirectMode("sync requires direct database access"); err != nil {
FatalError("failed to initialize direct mode: %v", err)
}
}
// Initialize local store after daemon disconnect.
// When daemon was connected, PersistentPreRun returns early without initializing
// the store global. Commands like --import-only need the store, so we must
// initialize it here after closing the daemon connection.
if err := ensureStoreActive(); err != nil {
FatalError("failed to initialize store: %v", err)
}
// Resolve noGitHistory based on fromMain (fixes #417)

View File

@@ -463,6 +463,14 @@ func restoreBeadsDirFromBranch(ctx context.Context) error {
return fmt.Errorf("no .beads directory found")
}
// Skip restore when beads directory is redirected (bd-lmqhe)
// When redirected, the beads directory is in a different repo, so
// git checkout from the current repo won't work for paths outside it.
redirectInfo := beads.GetRedirectInfo()
if redirectInfo.IsRedirected {
return nil
}
// Restore .beads/ from HEAD (current branch's committed state)
// Using -- to ensure .beads/ is treated as a path, not a branch name
cmd := exec.CommandContext(ctx, "git", "checkout", "HEAD", "--", beadsDir) //nolint:gosec // G204: beadsDir from FindBeadsDir(), not user input

View File

@@ -1,12 +1,339 @@
package main
import (
"context"
"os"
"os/exec"
"path/filepath"
"testing"
"github.com/steveyegge/beads/internal/git"
)
// setupGitRepoWithBeads creates a temporary git repository with a .beads directory.
// Returns the repo path and cleanup function.
func setupGitRepoWithBeads(t *testing.T) (repoPath string, cleanup func()) {
t.Helper()
tmpDir := t.TempDir()
originalWd, err := os.Getwd()
if err != nil {
t.Fatalf("failed to get working directory: %v", err)
}
if err := os.Chdir(tmpDir); err != nil {
t.Fatalf("failed to change to temp directory: %v", err)
}
git.ResetCaches()
// Initialize git repo
if err := exec.Command("git", "init", "--initial-branch=main").Run(); err != nil {
_ = os.Chdir(originalWd)
t.Fatalf("failed to init git repo: %v", err)
}
git.ResetCaches()
// Configure git
_ = exec.Command("git", "config", "user.email", "test@test.com").Run()
_ = exec.Command("git", "config", "user.name", "Test User").Run()
// Create .beads directory with issues.jsonl
beadsDir := filepath.Join(tmpDir, ".beads")
if err := os.MkdirAll(beadsDir, 0755); err != nil {
_ = os.Chdir(originalWd)
t.Fatalf("failed to create .beads directory: %v", err)
}
jsonlPath := filepath.Join(beadsDir, "issues.jsonl")
if err := os.WriteFile(jsonlPath, []byte(`{"id":"test-1"}`+"\n"), 0644); err != nil {
_ = os.Chdir(originalWd)
t.Fatalf("failed to write issues.jsonl: %v", err)
}
// Create initial commit
_ = exec.Command("git", "add", ".beads").Run()
if err := exec.Command("git", "commit", "-m", "initial").Run(); err != nil {
_ = os.Chdir(originalWd)
t.Fatalf("failed to create initial commit: %v", err)
}
cleanup = func() {
_ = os.Chdir(originalWd)
git.ResetCaches()
}
return tmpDir, cleanup
}
// setupRedirectedBeadsRepo creates two git repos: source with redirect, target with actual .beads.
// Returns source path, target path, and cleanup function.
func setupRedirectedBeadsRepo(t *testing.T) (sourcePath, targetPath string, cleanup func()) {
t.Helper()
baseDir := t.TempDir()
originalWd, err := os.Getwd()
if err != nil {
t.Fatalf("failed to get working directory: %v", err)
}
// Create target repo with actual .beads
targetPath = filepath.Join(baseDir, "target")
if err := os.MkdirAll(targetPath, 0755); err != nil {
t.Fatalf("failed to create target directory: %v", err)
}
targetBeadsDir := filepath.Join(targetPath, ".beads")
if err := os.MkdirAll(targetBeadsDir, 0755); err != nil {
t.Fatalf("failed to create target .beads directory: %v", err)
}
// Initialize target as git repo
cmd := exec.Command("git", "init", "--initial-branch=main")
cmd.Dir = targetPath
if err := cmd.Run(); err != nil {
t.Fatalf("failed to init target git repo: %v", err)
}
cmd = exec.Command("git", "config", "user.email", "test@test.com")
cmd.Dir = targetPath
_ = cmd.Run()
cmd = exec.Command("git", "config", "user.name", "Test User")
cmd.Dir = targetPath
_ = cmd.Run()
// Write issues.jsonl in target
jsonlPath := filepath.Join(targetBeadsDir, "issues.jsonl")
if err := os.WriteFile(jsonlPath, []byte(`{"id":"test-1"}`+"\n"), 0644); err != nil {
t.Fatalf("failed to write issues.jsonl: %v", err)
}
// Commit in target
cmd = exec.Command("git", "add", ".beads")
cmd.Dir = targetPath
_ = cmd.Run()
cmd = exec.Command("git", "commit", "-m", "initial")
cmd.Dir = targetPath
if err := cmd.Run(); err != nil {
t.Fatalf("failed to create initial commit in target: %v", err)
}
// Create source repo with redirect
sourcePath = filepath.Join(baseDir, "source")
if err := os.MkdirAll(sourcePath, 0755); err != nil {
t.Fatalf("failed to create source directory: %v", err)
}
sourceBeadsDir := filepath.Join(sourcePath, ".beads")
if err := os.MkdirAll(sourceBeadsDir, 0755); err != nil {
t.Fatalf("failed to create source .beads directory: %v", err)
}
// Write redirect file pointing to target
redirectPath := filepath.Join(sourceBeadsDir, "redirect")
// Use relative path: ../target/.beads
if err := os.WriteFile(redirectPath, []byte("../target/.beads\n"), 0644); err != nil {
t.Fatalf("failed to write redirect file: %v", err)
}
// Initialize source as git repo
cmd = exec.Command("git", "init", "--initial-branch=main")
cmd.Dir = sourcePath
if err := cmd.Run(); err != nil {
t.Fatalf("failed to init source git repo: %v", err)
}
cmd = exec.Command("git", "config", "user.email", "test@test.com")
cmd.Dir = sourcePath
_ = cmd.Run()
cmd = exec.Command("git", "config", "user.name", "Test User")
cmd.Dir = sourcePath
_ = cmd.Run()
// Commit redirect in source
cmd = exec.Command("git", "add", ".beads")
cmd.Dir = sourcePath
_ = cmd.Run()
cmd = exec.Command("git", "commit", "-m", "initial with redirect")
cmd.Dir = sourcePath
if err := cmd.Run(); err != nil {
t.Fatalf("failed to create initial commit in source: %v", err)
}
// Change to source directory
if err := os.Chdir(sourcePath); err != nil {
t.Fatalf("failed to change to source directory: %v", err)
}
git.ResetCaches()
cleanup = func() {
_ = os.Chdir(originalWd)
git.ResetCaches()
}
return sourcePath, targetPath, cleanup
}
func TestGitHasBeadsChanges_NoChanges(t *testing.T) {
ctx := context.Background()
_, cleanup := setupGitRepoWithBeads(t)
defer cleanup()
hasChanges, err := gitHasBeadsChanges(ctx)
if err != nil {
t.Fatalf("gitHasBeadsChanges() error = %v", err)
}
if hasChanges {
t.Error("expected no changes for clean repo")
}
}
func TestGitHasBeadsChanges_WithChanges(t *testing.T) {
ctx := context.Background()
repoPath, cleanup := setupGitRepoWithBeads(t)
defer cleanup()
// Modify the issues.jsonl file
jsonlPath := filepath.Join(repoPath, ".beads", "issues.jsonl")
if err := os.WriteFile(jsonlPath, []byte(`{"id":"test-2"}`+"\n"), 0644); err != nil {
t.Fatalf("failed to modify issues.jsonl: %v", err)
}
hasChanges, err := gitHasBeadsChanges(ctx)
if err != nil {
t.Fatalf("gitHasBeadsChanges() error = %v", err)
}
if !hasChanges {
t.Error("expected changes for modified file")
}
}
func TestGitHasBeadsChanges_WithRedirect_NoChanges(t *testing.T) {
ctx := context.Background()
sourcePath, _, cleanup := setupRedirectedBeadsRepo(t)
defer cleanup()
// Set BEADS_DIR to point to source's .beads (which has the redirect)
oldBeadsDir := os.Getenv("BEADS_DIR")
os.Setenv("BEADS_DIR", filepath.Join(sourcePath, ".beads"))
defer os.Setenv("BEADS_DIR", oldBeadsDir)
hasChanges, err := gitHasBeadsChanges(ctx)
if err != nil {
t.Fatalf("gitHasBeadsChanges() error = %v", err)
}
if hasChanges {
t.Error("expected no changes for clean redirected repo")
}
}
func TestGitHasBeadsChanges_WithRedirect_WithChanges(t *testing.T) {
ctx := context.Background()
sourcePath, targetPath, cleanup := setupRedirectedBeadsRepo(t)
defer cleanup()
// Set BEADS_DIR to point to source's .beads (which has the redirect)
oldBeadsDir := os.Getenv("BEADS_DIR")
os.Setenv("BEADS_DIR", filepath.Join(sourcePath, ".beads"))
defer os.Setenv("BEADS_DIR", oldBeadsDir)
// Modify the issues.jsonl file in target (where actual beads is)
jsonlPath := filepath.Join(targetPath, ".beads", "issues.jsonl")
if err := os.WriteFile(jsonlPath, []byte(`{"id":"test-2"}`+"\n"), 0644); err != nil {
t.Fatalf("failed to modify issues.jsonl: %v", err)
}
hasChanges, err := gitHasBeadsChanges(ctx)
if err != nil {
t.Fatalf("gitHasBeadsChanges() error = %v", err)
}
if !hasChanges {
t.Error("expected changes for modified file in redirected repo")
}
}
func TestGitHasUncommittedBeadsChanges_NoChanges(t *testing.T) {
ctx := context.Background()
_, cleanup := setupGitRepoWithBeads(t)
defer cleanup()
hasChanges, err := gitHasUncommittedBeadsChanges(ctx)
if err != nil {
t.Fatalf("gitHasUncommittedBeadsChanges() error = %v", err)
}
if hasChanges {
t.Error("expected no changes for clean repo")
}
}
func TestGitHasUncommittedBeadsChanges_WithChanges(t *testing.T) {
ctx := context.Background()
repoPath, cleanup := setupGitRepoWithBeads(t)
defer cleanup()
// Modify the issues.jsonl file
jsonlPath := filepath.Join(repoPath, ".beads", "issues.jsonl")
if err := os.WriteFile(jsonlPath, []byte(`{"id":"test-2"}`+"\n"), 0644); err != nil {
t.Fatalf("failed to modify issues.jsonl: %v", err)
}
hasChanges, err := gitHasUncommittedBeadsChanges(ctx)
if err != nil {
t.Fatalf("gitHasUncommittedBeadsChanges() error = %v", err)
}
if !hasChanges {
t.Error("expected changes for modified file")
}
}
func TestGitHasUncommittedBeadsChanges_WithRedirect_NoChanges(t *testing.T) {
ctx := context.Background()
sourcePath, _, cleanup := setupRedirectedBeadsRepo(t)
defer cleanup()
// Set BEADS_DIR to point to source's .beads (which has the redirect)
oldBeadsDir := os.Getenv("BEADS_DIR")
os.Setenv("BEADS_DIR", filepath.Join(sourcePath, ".beads"))
defer os.Setenv("BEADS_DIR", oldBeadsDir)
hasChanges, err := gitHasUncommittedBeadsChanges(ctx)
if err != nil {
t.Fatalf("gitHasUncommittedBeadsChanges() error = %v", err)
}
if hasChanges {
t.Error("expected no changes for clean redirected repo")
}
}
func TestGitHasUncommittedBeadsChanges_WithRedirect_WithChanges(t *testing.T) {
ctx := context.Background()
sourcePath, targetPath, cleanup := setupRedirectedBeadsRepo(t)
defer cleanup()
// Set BEADS_DIR to point to source's .beads (which has the redirect)
oldBeadsDir := os.Getenv("BEADS_DIR")
os.Setenv("BEADS_DIR", filepath.Join(sourcePath, ".beads"))
defer os.Setenv("BEADS_DIR", oldBeadsDir)
// Modify the issues.jsonl file in target (where actual beads is)
jsonlPath := filepath.Join(targetPath, ".beads", "issues.jsonl")
if err := os.WriteFile(jsonlPath, []byte(`{"id":"test-2"}`+"\n"), 0644); err != nil {
t.Fatalf("failed to modify issues.jsonl: %v", err)
}
hasChanges, err := gitHasUncommittedBeadsChanges(ctx)
if err != nil {
t.Fatalf("gitHasUncommittedBeadsChanges() error = %v", err)
}
if !hasChanges {
t.Error("expected changes for modified file in redirected repo")
}
}
func TestParseGitStatusForBeadsChanges(t *testing.T) {
tests := []struct {
name string

View File

@@ -1,6 +1,6 @@
#!/usr/bin/env sh
# bd-shim v1
# bd-hooks-version: 0.46.0
# bd-hooks-version: 0.47.1
#
# bd (beads) post-checkout hook - thin shim
#

View File

@@ -1,6 +1,6 @@
#!/usr/bin/env sh
# bd-shim v1
# bd-hooks-version: 0.46.0
# bd-hooks-version: 0.47.1
#
# bd (beads) post-merge hook - thin shim
#

View File

@@ -1,6 +1,6 @@
#!/usr/bin/env sh
# bd-shim v1
# bd-hooks-version: 0.46.0
# bd-hooks-version: 0.47.1
#
# bd (beads) pre-commit hook - thin shim
#

View File

@@ -1,6 +1,6 @@
#!/usr/bin/env sh
# bd-shim v1
# bd-hooks-version: 0.46.0
# bd-hooks-version: 0.47.1
#
# bd (beads) pre-push hook - thin shim
#

View File

@@ -29,6 +29,17 @@ func TestMain(m *testing.M) {
}
}()
// Clear BEADS_DIR to prevent tests from accidentally picking up the project's
// .beads directory via git repo detection when there's a redirect file.
// Each test that needs a .beads directory should set BEADS_DIR explicitly.
origBeadsDir := os.Getenv("BEADS_DIR")
os.Unsetenv("BEADS_DIR")
defer func() {
if origBeadsDir != "" {
os.Setenv("BEADS_DIR", origBeadsDir)
}
}()
if os.Getenv("BEADS_TEST_GUARD_DISABLE") != "" {
os.Exit(m.Run())
}

View File

@@ -14,7 +14,7 @@ import (
var (
// Version is the current version of bd (overridden by ldflags at build time)
Version = "0.46.0"
Version = "0.47.1"
// Build can be set via ldflags at compile time
Build = "dev"
// Commit and branch the git revision the binary was built from (optional ldflag)

View File

@@ -118,7 +118,7 @@ func findOriginalBeadsDir() string {
}
// Walk up directory tree looking for .beads with redirect
for dir := cwd; dir != "/" && dir != "."; dir = filepath.Dir(dir) {
for dir := cwd; dir != "/" && dir != "."; {
beadsDir := filepath.Join(dir, ".beads")
if info, err := os.Stat(beadsDir); err == nil && info.IsDir() {
redirectFile := filepath.Join(beadsDir, beads.RedirectFileName)
@@ -128,6 +128,16 @@ func findOriginalBeadsDir() string {
// Found .beads without redirect - this is the actual location
return ""
}
// Move up one directory
parent := filepath.Dir(dir)
if parent == dir {
// Reached filesystem root (works on both Unix and Windows)
// On Unix: filepath.Dir("/") returns "/"
// On Windows: filepath.Dir("C:\\") returns "C:\\"
break
}
dir = parent
}
return ""

View File

@@ -211,11 +211,16 @@ func runWispCreate(cmd *cobra.Command, args []string) {
}
// Load the proto
// Note: GetIssue returns (nil, nil) for not-found, so check both
protoIssue, err := store.GetIssue(ctx, protoID)
if err != nil {
fmt.Fprintf(os.Stderr, "Error loading proto %s: %v\n", protoID, err)
os.Exit(1)
}
if protoIssue == nil {
fmt.Fprintf(os.Stderr, "Error: proto not found: %s\n", protoID)
os.Exit(1)
}
if !isProtoIssue(protoIssue) {
fmt.Fprintf(os.Stderr, "Error: %s is not a proto (missing '%s' label)\n", protoID, MoleculeLabel)
os.Exit(1)
@@ -297,7 +302,8 @@ func isProtoIssue(issue *types.Issue) bool {
// resolvePartialIDDirect resolves a partial ID directly from store
func resolvePartialIDDirect(ctx context.Context, partial string) (string, error) {
// Try direct lookup first
if issue, err := store.GetIssue(ctx, partial); err == nil {
// Note: GetIssue returns (nil, nil) for not-found, so check both
if issue, err := store.GetIssue(ctx, partial); err == nil && issue != nil {
return issue.ID, nil
}
// Search by prefix

View File

@@ -86,6 +86,18 @@ func TestShouldDisableDaemonForWorktree(t *testing.T) {
// Reset git caches after changing directory (required for IsWorktree to re-detect)
git.ResetCaches()
// Set BEADS_DIR to the test's .beads directory to prevent
// git repo detection from finding the project's .beads
origBeadsDir := os.Getenv("BEADS_DIR")
os.Setenv("BEADS_DIR", mainDir+"/.beads")
defer func() {
if origBeadsDir != "" {
os.Setenv("BEADS_DIR", origBeadsDir)
} else {
os.Unsetenv("BEADS_DIR")
}
}()
// No sync-branch configured
os.Unsetenv("BEADS_SYNC_BRANCH")
@@ -217,6 +229,18 @@ func TestShouldAutoStartDaemonWorktreeIntegration(t *testing.T) {
// Reset git caches after changing directory
git.ResetCaches()
// Set BEADS_DIR to the test's .beads directory to prevent
// git repo detection from finding the project's .beads
origBeadsDir := os.Getenv("BEADS_DIR")
os.Setenv("BEADS_DIR", mainDir+"/.beads")
defer func() {
if origBeadsDir != "" {
os.Setenv("BEADS_DIR", origBeadsDir)
} else {
os.Unsetenv("BEADS_DIR")
}
}()
// Clear all daemon-related env vars
os.Unsetenv("BEADS_NO_DAEMON")
os.Unsetenv("BEADS_AUTO_START_DAEMON")