Merge pull request #752 from jordanhubbard/main

E2E / chaos testing - first PoC with just 48% code coverage
This commit is contained in:
Steve Yegge
2025-12-26 17:38:01 -08:00
committed by GitHub
63 changed files with 4854 additions and 516 deletions

View File

@@ -0,0 +1,426 @@
//go:build e2e
package main
import (
"bytes"
"context"
"encoding/json"
"io"
"os"
"path/filepath"
"strings"
"sync"
"testing"
"time"
"github.com/steveyegge/beads/internal/storage/sqlite"
"github.com/steveyegge/beads/internal/types"
)
var cliCoverageMutex sync.Mutex
func runBDForCoverage(t *testing.T, dir string, args ...string) (stdout string, stderr string) {
t.Helper()
cliCoverageMutex.Lock()
defer cliCoverageMutex.Unlock()
// Add --no-daemon to all commands except init.
if len(args) > 0 && args[0] != "init" {
args = append([]string{"--no-daemon"}, args...)
}
oldStdout := os.Stdout
oldStderr := os.Stderr
oldDir, _ := os.Getwd()
oldArgs := os.Args
if err := os.Chdir(dir); err != nil {
t.Fatalf("chdir %s: %v", dir, err)
}
rOut, wOut, _ := os.Pipe()
rErr, wErr, _ := os.Pipe()
os.Stdout = wOut
os.Stderr = wErr
// Ensure direct mode.
oldNoDaemon, noDaemonWasSet := os.LookupEnv("BEADS_NO_DAEMON")
os.Setenv("BEADS_NO_DAEMON", "1")
defer func() {
if noDaemonWasSet {
_ = os.Setenv("BEADS_NO_DAEMON", oldNoDaemon)
} else {
os.Unsetenv("BEADS_NO_DAEMON")
}
}()
// Mark tests explicitly.
oldTestMode, testModeWasSet := os.LookupEnv("BEADS_TEST_MODE")
os.Setenv("BEADS_TEST_MODE", "1")
defer func() {
if testModeWasSet {
_ = os.Setenv("BEADS_TEST_MODE", oldTestMode)
} else {
os.Unsetenv("BEADS_TEST_MODE")
}
}()
// Ensure all commands (including init) operate on the temp workspace DB.
db := filepath.Join(dir, ".beads", "beads.db")
beadsDir := filepath.Join(dir, ".beads")
oldBeadsDir, beadsDirWasSet := os.LookupEnv("BEADS_DIR")
os.Setenv("BEADS_DIR", beadsDir)
defer func() {
if beadsDirWasSet {
_ = os.Setenv("BEADS_DIR", oldBeadsDir)
} else {
os.Unsetenv("BEADS_DIR")
}
}()
oldDB, dbWasSet := os.LookupEnv("BEADS_DB")
os.Setenv("BEADS_DB", db)
defer func() {
if dbWasSet {
_ = os.Setenv("BEADS_DB", oldDB)
} else {
os.Unsetenv("BEADS_DB")
}
}()
oldBDDB, bdDBWasSet := os.LookupEnv("BD_DB")
os.Setenv("BD_DB", db)
defer func() {
if bdDBWasSet {
_ = os.Setenv("BD_DB", oldBDDB)
} else {
os.Unsetenv("BD_DB")
}
}()
// Ensure actor is set so label operations record audit fields.
oldActor, actorWasSet := os.LookupEnv("BD_ACTOR")
os.Setenv("BD_ACTOR", "test-user")
defer func() {
if actorWasSet {
_ = os.Setenv("BD_ACTOR", oldActor)
} else {
os.Unsetenv("BD_ACTOR")
}
}()
oldBeadsActor, beadsActorWasSet := os.LookupEnv("BEADS_ACTOR")
os.Setenv("BEADS_ACTOR", "test-user")
defer func() {
if beadsActorWasSet {
_ = os.Setenv("BEADS_ACTOR", oldBeadsActor)
} else {
os.Unsetenv("BEADS_ACTOR")
}
}()
rootCmd.SetArgs(args)
os.Args = append([]string{"bd"}, args...)
err := rootCmd.Execute()
// Close and clean up all global state to prevent contamination between tests.
if store != nil {
store.Close()
store = nil
}
if daemonClient != nil {
daemonClient.Close()
daemonClient = nil
}
// Reset all global flags and state (keep aligned with integration cli_fast_test).
dbPath = ""
actor = ""
jsonOutput = false
noDaemon = false
noAutoFlush = false
noAutoImport = false
sandboxMode = false
noDb = false
autoFlushEnabled = true
storeActive = false
flushFailureCount = 0
lastFlushError = nil
if flushManager != nil {
_ = flushManager.Shutdown()
flushManager = nil
}
rootCtx = nil
rootCancel = nil
// Give SQLite time to release file locks.
time.Sleep(10 * time.Millisecond)
_ = wOut.Close()
_ = wErr.Close()
os.Stdout = oldStdout
os.Stderr = oldStderr
_ = os.Chdir(oldDir)
os.Args = oldArgs
rootCmd.SetArgs(nil)
var outBuf, errBuf bytes.Buffer
_, _ = io.Copy(&outBuf, rOut)
_, _ = io.Copy(&errBuf, rErr)
_ = rOut.Close()
_ = rErr.Close()
stdout = outBuf.String()
stderr = errBuf.String()
if err != nil {
t.Fatalf("bd %v failed: %v\nStdout: %s\nStderr: %s", args, err, stdout, stderr)
}
return stdout, stderr
}
func extractJSONPayload(s string) string {
if i := strings.IndexAny(s, "[{"); i >= 0 {
return s[i:]
}
return s
}
func parseCreatedIssueID(t *testing.T, out string) string {
t.Helper()
p := extractJSONPayload(out)
var m map[string]interface{}
if err := json.Unmarshal([]byte(p), &m); err != nil {
t.Fatalf("parse create JSON: %v\n%s", err, out)
}
id, _ := m["id"].(string)
if id == "" {
t.Fatalf("missing id in create output: %s", out)
}
return id
}
func TestCoverage_ShowUpdateClose(t *testing.T) {
if testing.Short() {
t.Skip("skipping CLI coverage test in short mode")
}
dir := t.TempDir()
runBDForCoverage(t, dir, "init", "--prefix", "test", "--quiet")
out, _ := runBDForCoverage(t, dir, "create", "Show coverage issue", "-p", "1", "--json")
id := parseCreatedIssueID(t, out)
// Exercise update label flows (add -> set -> add/remove).
runBDForCoverage(t, dir, "update", id, "--add-label", "old", "--json")
runBDForCoverage(t, dir, "update", id, "--set-labels", "a,b", "--add-label", "c", "--remove-label", "a", "--json")
runBDForCoverage(t, dir, "update", id, "--remove-label", "old", "--json")
// Show JSON output and verify labels were applied.
showOut, _ := runBDForCoverage(t, dir, "show", "--allow-stale", id, "--json")
showPayload := extractJSONPayload(showOut)
var details []map[string]interface{}
if err := json.Unmarshal([]byte(showPayload), &details); err != nil {
// Some commands may emit a single object; fall back to object parse.
var single map[string]interface{}
if err2 := json.Unmarshal([]byte(showPayload), &single); err2 != nil {
t.Fatalf("parse show JSON: %v / %v\n%s", err, err2, showOut)
}
details = []map[string]interface{}{single}
}
if len(details) != 1 {
t.Fatalf("expected 1 issue, got %d", len(details))
}
labelsAny, ok := details[0]["labels"]
if !ok {
t.Fatalf("expected labels in show output: %s", showOut)
}
labelsBytes, _ := json.Marshal(labelsAny)
labelsStr := string(labelsBytes)
if !strings.Contains(labelsStr, "b") || !strings.Contains(labelsStr, "c") {
t.Fatalf("expected labels b and c, got %s", labelsStr)
}
if strings.Contains(labelsStr, "a") || strings.Contains(labelsStr, "old") {
t.Fatalf("expected labels a and old to be absent, got %s", labelsStr)
}
// Show text output.
showText, _ := runBDForCoverage(t, dir, "show", "--allow-stale", id)
if !strings.Contains(showText, "Show coverage issue") {
t.Fatalf("expected show output to contain title, got: %s", showText)
}
// Multi-ID show should print both issues.
out2, _ := runBDForCoverage(t, dir, "create", "Second issue", "-p", "2", "--json")
id2 := parseCreatedIssueID(t, out2)
multi, _ := runBDForCoverage(t, dir, "show", "--allow-stale", id, id2)
if !strings.Contains(multi, "Show coverage issue") || !strings.Contains(multi, "Second issue") {
t.Fatalf("expected multi-show output to include both titles, got: %s", multi)
}
if !strings.Contains(multi, "─") {
t.Fatalf("expected multi-show output to include a separator line, got: %s", multi)
}
// Close and verify JSON output.
closeOut, _ := runBDForCoverage(t, dir, "close", id, "--reason", "Done", "--json")
closePayload := extractJSONPayload(closeOut)
var closed []map[string]interface{}
if err := json.Unmarshal([]byte(closePayload), &closed); err != nil {
t.Fatalf("parse close JSON: %v\n%s", err, closeOut)
}
if len(closed) != 1 {
t.Fatalf("expected 1 closed issue, got %d", len(closed))
}
if status, _ := closed[0]["status"].(string); status != string(types.StatusClosed) {
t.Fatalf("expected status closed, got %q", status)
}
}
func TestCoverage_TemplateAndPinnedProtections(t *testing.T) {
if testing.Short() {
t.Skip("skipping CLI coverage test in short mode")
}
dir := t.TempDir()
runBDForCoverage(t, dir, "init", "--prefix", "test", "--quiet")
// Create a pinned issue and verify close requires --force.
out, _ := runBDForCoverage(t, dir, "create", "Pinned issue", "-p", "1", "--json")
pinnedID := parseCreatedIssueID(t, out)
runBDForCoverage(t, dir, "update", pinnedID, "--status", string(types.StatusPinned), "--json")
_, closeErr := runBDForCoverage(t, dir, "close", pinnedID, "--reason", "Done")
if !strings.Contains(closeErr, "cannot close pinned issue") {
t.Fatalf("expected pinned close to be rejected, stderr: %s", closeErr)
}
forceOut, _ := runBDForCoverage(t, dir, "close", pinnedID, "--force", "--reason", "Done", "--json")
forcePayload := extractJSONPayload(forceOut)
var closed []map[string]interface{}
if err := json.Unmarshal([]byte(forcePayload), &closed); err != nil {
t.Fatalf("parse close JSON: %v\n%s", err, forceOut)
}
if len(closed) != 1 {
t.Fatalf("expected 1 closed issue, got %d", len(closed))
}
// Insert a template issue directly and verify update/close protect it.
dbFile := filepath.Join(dir, ".beads", "beads.db")
s, err := sqlite.New(context.Background(), dbFile)
if err != nil {
t.Fatalf("sqlite.New: %v", err)
}
ctx := context.Background()
template := &types.Issue{
Title: "Template issue",
Status: types.StatusOpen,
Priority: 2,
IssueType: types.TypeTask,
IsTemplate: true,
}
if err := s.CreateIssue(ctx, template, "test-user"); err != nil {
s.Close()
t.Fatalf("CreateIssue: %v", err)
}
created, err := s.GetIssue(ctx, template.ID)
if err != nil {
s.Close()
t.Fatalf("GetIssue(template): %v", err)
}
if created == nil || !created.IsTemplate {
s.Close()
t.Fatalf("expected inserted issue to be IsTemplate=true, got %+v", created)
}
_ = s.Close()
showOut, _ := runBDForCoverage(t, dir, "show", "--allow-stale", template.ID, "--json")
showPayload := extractJSONPayload(showOut)
var showDetails []map[string]interface{}
if err := json.Unmarshal([]byte(showPayload), &showDetails); err != nil {
t.Fatalf("parse show JSON: %v\n%s", err, showOut)
}
if len(showDetails) != 1 {
t.Fatalf("expected 1 issue from show, got %d", len(showDetails))
}
// Re-open the DB after running the CLI to confirm is_template persisted.
s2, err := sqlite.New(context.Background(), dbFile)
if err != nil {
t.Fatalf("sqlite.New (reopen): %v", err)
}
postShow, err := s2.GetIssue(context.Background(), template.ID)
_ = s2.Close()
if err != nil {
t.Fatalf("GetIssue(template, post-show): %v", err)
}
if postShow == nil || !postShow.IsTemplate {
t.Fatalf("expected template to remain IsTemplate=true post-show, got %+v", postShow)
}
if v, ok := showDetails[0]["is_template"]; ok {
if b, ok := v.(bool); !ok || !b {
t.Fatalf("expected show JSON is_template=true, got %v", v)
}
} else {
t.Fatalf("expected show JSON to include is_template=true, got: %s", showOut)
}
_, updErr := runBDForCoverage(t, dir, "update", template.ID, "--title", "New title")
if !strings.Contains(updErr, "cannot update template") {
t.Fatalf("expected template update to be rejected, stderr: %s", updErr)
}
_, closeTemplateErr := runBDForCoverage(t, dir, "close", template.ID, "--reason", "Done")
if !strings.Contains(closeTemplateErr, "cannot close template") {
t.Fatalf("expected template close to be rejected, stderr: %s", closeTemplateErr)
}
}
func TestCoverage_ShowThread(t *testing.T) {
if testing.Short() {
t.Skip("skipping CLI coverage test in short mode")
}
dir := t.TempDir()
runBDForCoverage(t, dir, "init", "--prefix", "test", "--quiet")
dbFile := filepath.Join(dir, ".beads", "beads.db")
s, err := sqlite.New(context.Background(), dbFile)
if err != nil {
t.Fatalf("sqlite.New: %v", err)
}
ctx := context.Background()
root := &types.Issue{Title: "Root message", IssueType: types.TypeMessage, Status: types.StatusOpen, Sender: "alice", Assignee: "bob"}
reply1 := &types.Issue{Title: "Re: Root", IssueType: types.TypeMessage, Status: types.StatusOpen, Sender: "bob", Assignee: "alice"}
reply2 := &types.Issue{Title: "Re: Re: Root", IssueType: types.TypeMessage, Status: types.StatusOpen, Sender: "alice", Assignee: "bob"}
if err := s.CreateIssue(ctx, root, "test-user"); err != nil {
s.Close()
t.Fatalf("CreateIssue root: %v", err)
}
if err := s.CreateIssue(ctx, reply1, "test-user"); err != nil {
s.Close()
t.Fatalf("CreateIssue reply1: %v", err)
}
if err := s.CreateIssue(ctx, reply2, "test-user"); err != nil {
s.Close()
t.Fatalf("CreateIssue reply2: %v", err)
}
if err := s.AddDependency(ctx, &types.Dependency{IssueID: reply1.ID, DependsOnID: root.ID, Type: types.DepRepliesTo, ThreadID: root.ID}, "test-user"); err != nil {
s.Close()
t.Fatalf("AddDependency reply1->root: %v", err)
}
if err := s.AddDependency(ctx, &types.Dependency{IssueID: reply2.ID, DependsOnID: reply1.ID, Type: types.DepRepliesTo, ThreadID: root.ID}, "test-user"); err != nil {
s.Close()
t.Fatalf("AddDependency reply2->reply1: %v", err)
}
_ = s.Close()
out, _ := runBDForCoverage(t, dir, "show", "--allow-stale", reply2.ID, "--thread")
if !strings.Contains(out, "Thread") || !strings.Contains(out, "Total: 3 messages") {
t.Fatalf("expected thread output, got: %s", out)
}
if !strings.Contains(out, root.ID) || !strings.Contains(out, reply1.ID) || !strings.Contains(out, reply2.ID) {
t.Fatalf("expected thread output to include message IDs, got: %s", out)
}
}

View File

@@ -30,36 +30,36 @@ func TestDaemonAutoImportAfterGitPull(t *testing.T) {
t.Fatal(err)
}
defer os.RemoveAll(tempDir)
// Create "remote" repository
remoteDir := filepath.Join(tempDir, "remote")
if err := os.MkdirAll(remoteDir, 0750); err != nil {
t.Fatalf("Failed to create remote dir: %v", err)
}
// Initialize remote git repo
runGitCmd(t, remoteDir, "init", "--bare")
runGitCmd(t, remoteDir, "init", "--bare", "-b", "master")
// Create "clone1" repository (Agent A)
clone1Dir := filepath.Join(tempDir, "clone1")
runGitCmd(t, tempDir, "clone", remoteDir, clone1Dir)
configureGit(t, clone1Dir)
// Initialize beads in clone1
clone1BeadsDir := filepath.Join(clone1Dir, ".beads")
if err := os.MkdirAll(clone1BeadsDir, 0750); err != nil {
t.Fatalf("Failed to create .beads dir: %v", err)
}
clone1DBPath := filepath.Join(clone1BeadsDir, "test.db")
clone1Store := newTestStore(t, clone1DBPath)
defer clone1Store.Close()
ctx := context.Background()
if err := clone1Store.SetMetadata(ctx, "issue_prefix", "test"); err != nil {
t.Fatalf("Failed to set prefix: %v", err)
}
// Create an open issue in clone1
issue := &types.Issue{
Title: "Test daemon auto-import",
@@ -73,39 +73,39 @@ func TestDaemonAutoImportAfterGitPull(t *testing.T) {
t.Fatalf("Failed to create issue: %v", err)
}
issueID := issue.ID
// Export to JSONL
jsonlPath := filepath.Join(clone1BeadsDir, "issues.jsonl")
if err := exportIssuesToJSONL(ctx, clone1Store, jsonlPath); err != nil {
t.Fatalf("Failed to export: %v", err)
}
// Commit and push from clone1
runGitCmd(t, clone1Dir, "add", ".beads")
runGitCmd(t, clone1Dir, "commit", "-m", "Add test issue")
runGitCmd(t, clone1Dir, "push", "origin", "master")
// Create "clone2" repository (Agent B)
clone2Dir := filepath.Join(tempDir, "clone2")
runGitCmd(t, tempDir, "clone", remoteDir, clone2Dir)
configureGit(t, clone2Dir)
// Initialize empty database in clone2
clone2BeadsDir := filepath.Join(clone2Dir, ".beads")
clone2DBPath := filepath.Join(clone2BeadsDir, "test.db")
clone2Store := newTestStore(t, clone2DBPath)
defer clone2Store.Close()
if err := clone2Store.SetMetadata(ctx, "issue_prefix", "test"); err != nil {
t.Fatalf("Failed to set prefix: %v", err)
}
// Import initial JSONL in clone2
clone2JSONLPath := filepath.Join(clone2BeadsDir, "issues.jsonl")
if err := importJSONLToStore(ctx, clone2Store, clone2DBPath, clone2JSONLPath); err != nil {
t.Fatalf("Failed to import: %v", err)
}
// Verify issue exists in clone2
initialIssue, err := clone2Store.GetIssue(ctx, issueID)
if err != nil {
@@ -114,27 +114,27 @@ func TestDaemonAutoImportAfterGitPull(t *testing.T) {
if initialIssue.Status != types.StatusOpen {
t.Errorf("Expected status open, got %s", initialIssue.Status)
}
// NOW THE CRITICAL TEST: Agent A closes the issue and pushes
t.Run("DaemonAutoImportsAfterGitPull", func(t *testing.T) {
// Agent A closes the issue
if err := clone1Store.CloseIssue(ctx, issueID, "Completed", "agent-a"); err != nil {
t.Fatalf("Failed to close issue: %v", err)
}
// Agent A exports to JSONL
if err := exportIssuesToJSONL(ctx, clone1Store, jsonlPath); err != nil {
t.Fatalf("Failed to export after close: %v", err)
}
// Agent A commits and pushes
runGitCmd(t, clone1Dir, "add", ".beads/issues.jsonl")
runGitCmd(t, clone1Dir, "commit", "-m", "Close issue")
runGitCmd(t, clone1Dir, "push", "origin", "master")
// Agent B does git pull (updates JSONL on disk)
runGitCmd(t, clone2Dir, "pull")
// Wait for filesystem to settle after git operations
// Windows has lower filesystem timestamp precision (typically 100ms)
// and file I/O may be slower, so we need a longer delay
@@ -143,23 +143,23 @@ func TestDaemonAutoImportAfterGitPull(t *testing.T) {
} else {
time.Sleep(50 * time.Millisecond)
}
// Start daemon server in clone2
socketPath := filepath.Join(clone2BeadsDir, "bd.sock")
os.Remove(socketPath) // Ensure clean state
server := rpc.NewServer(socketPath, clone2Store, clone2Dir, clone2DBPath)
// Start server in background
serverCtx, serverCancel := context.WithCancel(context.Background())
defer serverCancel()
go func() {
if err := server.Start(serverCtx); err != nil {
t.Logf("Server error: %v", err)
}
}()
// Wait for server to be ready
for i := 0; i < 50; i++ {
time.Sleep(10 * time.Millisecond)
@@ -167,7 +167,7 @@ func TestDaemonAutoImportAfterGitPull(t *testing.T) {
break
}
}
// Simulate a daemon request (like "bd show <issue>")
// The daemon should auto-import the updated JSONL before responding
client, err := rpc.TryConnect(socketPath)
@@ -178,15 +178,15 @@ func TestDaemonAutoImportAfterGitPull(t *testing.T) {
t.Fatal("Client is nil")
}
defer client.Close()
client.SetDatabasePath(clone2DBPath) // Route to correct database
// Make a request that triggers auto-import check
resp, err := client.Execute("show", map[string]string{"id": issueID})
if err != nil {
t.Fatalf("Failed to get issue from daemon: %v", err)
}
// Parse response
var issue types.Issue
issueJSON, err := json.Marshal(resp.Data)
@@ -196,25 +196,25 @@ func TestDaemonAutoImportAfterGitPull(t *testing.T) {
if err := json.Unmarshal(issueJSON, &issue); err != nil {
t.Fatalf("Failed to unmarshal issue: %v", err)
}
status := issue.Status
// CRITICAL ASSERTION: Daemon should return CLOSED status from JSONL
// not stale OPEN status from SQLite
if status != types.StatusClosed {
t.Errorf("DAEMON AUTO-IMPORT FAILED: Expected status 'closed' but got '%s'", status)
t.Errorf("This means daemon is serving stale SQLite data instead of auto-importing JSONL")
// Double-check JSONL has correct status
jsonlData, _ := os.ReadFile(clone2JSONLPath)
t.Logf("JSONL content: %s", string(jsonlData))
// Double-check what's in SQLite
directIssue, _ := clone2Store.GetIssue(ctx, issueID)
t.Logf("SQLite status: %s", directIssue.Status)
}
})
// Additional test: Verify multiple rapid changes
t.Run("DaemonHandlesRapidUpdates", func(t *testing.T) {
// Agent A updates priority
@@ -223,18 +223,18 @@ func TestDaemonAutoImportAfterGitPull(t *testing.T) {
}, "agent-a"); err != nil {
t.Fatalf("Failed to update priority: %v", err)
}
if err := exportIssuesToJSONL(ctx, clone1Store, jsonlPath); err != nil {
t.Fatalf("Failed to export: %v", err)
}
runGitCmd(t, clone1Dir, "add", ".beads/issues.jsonl")
runGitCmd(t, clone1Dir, "commit", "-m", "Update priority")
runGitCmd(t, clone1Dir, "push", "origin", "master")
// Agent B pulls
runGitCmd(t, clone2Dir, "pull")
// Query via daemon - should see priority 0
// (Execute forces auto-import synchronously)
socketPath := filepath.Join(clone2BeadsDir, "bd.sock")
@@ -243,18 +243,18 @@ func TestDaemonAutoImportAfterGitPull(t *testing.T) {
t.Fatalf("Failed to connect to daemon: %v", err)
}
defer client.Close()
client.SetDatabasePath(clone2DBPath) // Route to correct database
resp, err := client.Execute("show", map[string]string{"id": issueID})
if err != nil {
t.Fatalf("Failed to get issue from daemon: %v", err)
}
var issue types.Issue
issueJSON, _ := json.Marshal(resp.Data)
json.Unmarshal(issueJSON, &issue)
if issue.Priority != 0 {
t.Errorf("Expected priority 0 after auto-import, got %d", issue.Priority)
}
@@ -273,23 +273,23 @@ func TestDaemonAutoImportDataCorruption(t *testing.T) {
t.Fatal(err)
}
defer os.RemoveAll(tempDir)
// Setup remote and two clones
remoteDir := filepath.Join(tempDir, "remote")
os.MkdirAll(remoteDir, 0750)
runGitCmd(t, remoteDir, "init", "--bare")
runGitCmd(t, remoteDir, "init", "--bare", "-b", "master")
clone1Dir := filepath.Join(tempDir, "clone1")
runGitCmd(t, tempDir, "clone", remoteDir, clone1Dir)
configureGit(t, clone1Dir)
clone2Dir := filepath.Join(tempDir, "clone2")
runGitCmd(t, tempDir, "clone", remoteDir, clone2Dir)
configureGit(t, clone2Dir)
// Initialize beads in both clones
ctx := context.Background()
// Clone1 setup
clone1BeadsDir := filepath.Join(clone1Dir, ".beads")
os.MkdirAll(clone1BeadsDir, 0750)
@@ -297,7 +297,7 @@ func TestDaemonAutoImportDataCorruption(t *testing.T) {
clone1Store := newTestStore(t, clone1DBPath)
defer clone1Store.Close()
clone1Store.SetMetadata(ctx, "issue_prefix", "test")
// Clone2 setup
clone2BeadsDir := filepath.Join(clone2Dir, ".beads")
os.MkdirAll(clone2BeadsDir, 0750)
@@ -305,7 +305,7 @@ func TestDaemonAutoImportDataCorruption(t *testing.T) {
clone2Store := newTestStore(t, clone2DBPath)
defer clone2Store.Close()
clone2Store.SetMetadata(ctx, "issue_prefix", "test")
// Agent A creates issue and pushes
issue2 := &types.Issue{
Title: "Shared issue",
@@ -317,18 +317,18 @@ func TestDaemonAutoImportDataCorruption(t *testing.T) {
}
clone1Store.CreateIssue(ctx, issue2, "agent-a")
issueID := issue2.ID
clone1JSONLPath := filepath.Join(clone1BeadsDir, "issues.jsonl")
exportIssuesToJSONL(ctx, clone1Store, clone1JSONLPath)
runGitCmd(t, clone1Dir, "add", ".beads")
runGitCmd(t, clone1Dir, "commit", "-m", "Initial issue")
runGitCmd(t, clone1Dir, "push", "origin", "master")
// Agent B pulls and imports
runGitCmd(t, clone2Dir, "pull")
clone2JSONLPath := filepath.Join(clone2BeadsDir, "issues.jsonl")
importJSONLToStore(ctx, clone2Store, clone2DBPath, clone2JSONLPath)
// THE CORRUPTION SCENARIO:
// 1. Agent A closes the issue and pushes
clone1Store.CloseIssue(ctx, issueID, "Done", "agent-a")
@@ -336,31 +336,31 @@ func TestDaemonAutoImportDataCorruption(t *testing.T) {
runGitCmd(t, clone1Dir, "add", ".beads/issues.jsonl")
runGitCmd(t, clone1Dir, "commit", "-m", "Close issue")
runGitCmd(t, clone1Dir, "push", "origin", "master")
// 2. Agent B does git pull (JSONL updated on disk)
runGitCmd(t, clone2Dir, "pull")
// Wait for filesystem to settle after git operations
time.Sleep(50 * time.Millisecond)
// 3. Agent B daemon exports STALE data (if auto-import doesn't work)
// This would overwrite Agent A's closure with old "open" status
// Start daemon in clone2
socketPath := filepath.Join(clone2BeadsDir, "bd.sock")
os.Remove(socketPath)
server := rpc.NewServer(socketPath, clone2Store, clone2Dir, clone2DBPath)
serverCtx, serverCancel := context.WithCancel(context.Background())
defer serverCancel()
go func() {
if err := server.Start(serverCtx); err != nil {
t.Logf("Server error: %v", err)
}
}()
// Wait for server
for i := 0; i < 50; i++ {
time.Sleep(10 * time.Millisecond)
@@ -368,43 +368,43 @@ func TestDaemonAutoImportDataCorruption(t *testing.T) {
break
}
}
// Trigger daemon operation (should auto-import first)
client, err := rpc.TryConnect(socketPath)
if err != nil {
t.Fatalf("Failed to connect: %v", err)
}
defer client.Close()
client.SetDatabasePath(clone2DBPath)
resp, err := client.Execute("show", map[string]string{"id": issueID})
if err != nil {
t.Fatalf("Failed to get issue: %v", err)
}
var issue types.Issue
issueJSON, _ := json.Marshal(resp.Data)
json.Unmarshal(issueJSON, &issue)
status := issue.Status
// If daemon didn't auto-import, this would be "open" (stale)
// With the fix, it should be "closed" (fresh from JSONL)
if status != types.StatusClosed {
t.Errorf("DATA CORRUPTION DETECTED: Daemon has stale status '%s' instead of 'closed'", status)
t.Error("If daemon exports this stale data, it will overwrite Agent A's changes on next push")
}
// Now simulate daemon export (which happens on timer)
// With auto-import working, this export should have fresh data
exportIssuesToJSONL(ctx, clone2Store, clone2JSONLPath)
// Read back JSONL to verify it has correct status
data, _ := os.ReadFile(clone2JSONLPath)
var exportedIssue types.Issue
json.NewDecoder(bytes.NewReader(data)).Decode(&exportedIssue)
if exportedIssue.Status != types.StatusClosed {
t.Errorf("CORRUPTION: Exported JSONL has wrong status '%s', would overwrite remote", exportedIssue.Status)
}

View File

@@ -31,6 +31,19 @@ var (
daemonStartFailures int
)
var (
executableFn = os.Executable
execCommandFn = exec.Command
openFileFn = os.OpenFile
findProcessFn = os.FindProcess
removeFileFn = os.Remove
configureDaemonProcessFn = configureDaemonProcess
waitForSocketReadinessFn = waitForSocketReadiness
startDaemonProcessFn = startDaemonProcess
isDaemonRunningFn = isDaemonRunning
sendStopSignalFn = sendStopSignal
)
// shouldAutoStartDaemon checks if daemon auto-start is enabled
func shouldAutoStartDaemon() bool {
// Check BEADS_NO_DAEMON first (escape hatch for single-user workflows)
@@ -53,7 +66,6 @@ func shouldAutoStartDaemon() bool {
return config.GetBool("auto-start-daemon") // Defaults to true
}
// restartDaemonForVersionMismatch stops the old daemon and starts a new one
// Returns true if restart was successful
func restartDaemonForVersionMismatch() bool {
@@ -67,17 +79,17 @@ func restartDaemonForVersionMismatch() bool {
// Check if daemon is running and stop it
forcedKill := false
if isRunning, pid := isDaemonRunning(pidFile); isRunning {
if isRunning, pid := isDaemonRunningFn(pidFile); isRunning {
debug.Logf("stopping old daemon (PID %d)", pid)
process, err := os.FindProcess(pid)
process, err := findProcessFn(pid)
if err != nil {
debug.Logf("failed to find process: %v", err)
return false
}
// Send stop signal
if err := sendStopSignal(process); err != nil {
if err := sendStopSignalFn(process); err != nil {
debug.Logf("failed to signal daemon: %v", err)
return false
}
@@ -85,14 +97,14 @@ func restartDaemonForVersionMismatch() bool {
// Wait for daemon to stop, then force kill
for i := 0; i < daemonShutdownAttempts; i++ {
time.Sleep(daemonShutdownPollInterval)
if isRunning, _ := isDaemonRunning(pidFile); !isRunning {
if isRunning, _ := isDaemonRunningFn(pidFile); !isRunning {
debug.Logf("old daemon stopped successfully")
break
}
}
// Force kill if still running
if isRunning, _ := isDaemonRunning(pidFile); isRunning {
if isRunning, _ := isDaemonRunningFn(pidFile); isRunning {
debug.Logf("force killing old daemon")
_ = process.Kill()
forcedKill = true
@@ -101,19 +113,19 @@ func restartDaemonForVersionMismatch() bool {
// Clean up stale socket and PID file after force kill or if not running
if forcedKill || !isDaemonRunningQuiet(pidFile) {
_ = os.Remove(socketPath)
_ = os.Remove(pidFile)
_ = removeFileFn(socketPath)
_ = removeFileFn(pidFile)
}
// Start new daemon with current binary version
exe, err := os.Executable()
exe, err := executableFn()
if err != nil {
debug.Logf("failed to get executable path: %v", err)
return false
}
args := []string{"daemon", "--start"}
cmd := exec.Command(exe, args...)
cmd := execCommandFn(exe, args...)
cmd.Env = append(os.Environ(), "BD_DAEMON_FOREGROUND=1")
// Set working directory to database directory so daemon finds correct DB
@@ -121,9 +133,9 @@ func restartDaemonForVersionMismatch() bool {
cmd.Dir = filepath.Dir(dbPath)
}
configureDaemonProcess(cmd)
configureDaemonProcessFn(cmd)
devNull, err := os.OpenFile(os.DevNull, os.O_RDWR, 0)
devNull, err := openFileFn(os.DevNull, os.O_RDWR, 0)
if err == nil {
cmd.Stdin = devNull
cmd.Stdout = devNull
@@ -140,7 +152,7 @@ func restartDaemonForVersionMismatch() bool {
go func() { _ = cmd.Wait() }()
// Wait for daemon to be ready using shared helper
if waitForSocketReadiness(socketPath, 5*time.Second) {
if waitForSocketReadinessFn(socketPath, 5*time.Second) {
debug.Logf("new daemon started successfully")
return true
}
@@ -153,7 +165,7 @@ func restartDaemonForVersionMismatch() bool {
// isDaemonRunningQuiet checks if daemon is running without output
func isDaemonRunningQuiet(pidFile string) bool {
isRunning, _ := isDaemonRunning(pidFile)
isRunning, _ := isDaemonRunningFn(pidFile)
return isRunning
}
@@ -185,7 +197,7 @@ func tryAutoStartDaemon(socketPath string) bool {
}
socketPath = determineSocketPath(socketPath)
return startDaemonProcess(socketPath)
return startDaemonProcessFn(socketPath)
}
func debugLog(msg string, args ...interface{}) {
@@ -269,21 +281,21 @@ func determineSocketPath(socketPath string) string {
}
func startDaemonProcess(socketPath string) bool {
binPath, err := os.Executable()
binPath, err := executableFn()
if err != nil {
binPath = os.Args[0]
}
args := []string{"daemon", "--start"}
cmd := exec.Command(binPath, args...)
cmd := execCommandFn(binPath, args...)
setupDaemonIO(cmd)
if dbPath != "" {
cmd.Dir = filepath.Dir(dbPath)
}
configureDaemonProcess(cmd)
configureDaemonProcessFn(cmd)
if err := cmd.Start(); err != nil {
recordDaemonStartFailure()
debugLog("failed to start daemon: %v", err)
@@ -292,7 +304,7 @@ func startDaemonProcess(socketPath string) bool {
go func() { _ = cmd.Wait() }()
if waitForSocketReadiness(socketPath, 5*time.Second) {
if waitForSocketReadinessFn(socketPath, 5*time.Second) {
recordDaemonStartSuccess()
return true
}
@@ -306,7 +318,7 @@ func startDaemonProcess(socketPath string) bool {
}
func setupDaemonIO(cmd *exec.Cmd) {
devNull, err := os.OpenFile(os.DevNull, os.O_RDWR, 0)
devNull, err := openFileFn(os.DevNull, os.O_RDWR, 0)
if err == nil {
cmd.Stdout = devNull
cmd.Stderr = devNull

View File

@@ -0,0 +1,331 @@
package main
import (
"bytes"
"context"
"io"
"os"
"os/exec"
"path/filepath"
"runtime"
"testing"
"time"
"github.com/steveyegge/beads/internal/config"
)
func tempSockDir(t *testing.T) string {
t.Helper()
base := "/tmp"
if runtime.GOOS == windowsOS {
base = os.TempDir()
} else if _, err := os.Stat(base); err != nil {
base = os.TempDir()
}
d, err := os.MkdirTemp(base, "bd-sock-*")
if err != nil {
t.Fatalf("MkdirTemp: %v", err)
}
t.Cleanup(func() { _ = os.RemoveAll(d) })
return d
}
func startTestRPCServer(t *testing.T) (socketPath string, cleanup func()) {
t.Helper()
tmpDir := tempSockDir(t)
beadsDir := filepath.Join(tmpDir, ".beads")
if err := os.MkdirAll(beadsDir, 0o750); err != nil {
t.Fatalf("MkdirAll: %v", err)
}
socketPath = filepath.Join(beadsDir, "bd.sock")
db := filepath.Join(beadsDir, "test.db")
store := newTestStore(t, db)
ctx, cancel := context.WithCancel(context.Background())
log := newTestLogger()
server, _, err := startRPCServer(ctx, socketPath, store, tmpDir, db, log)
if err != nil {
cancel()
t.Fatalf("startRPCServer: %v", err)
}
cleanup = func() {
cancel()
if server != nil {
_ = server.Stop()
}
}
return socketPath, cleanup
}
func captureStderr(t *testing.T, fn func()) string {
t.Helper()
old := os.Stderr
r, w, err := os.Pipe()
if err != nil {
t.Fatalf("os.Pipe: %v", err)
}
os.Stderr = w
var buf bytes.Buffer
done := make(chan struct{})
go func() {
_, _ = io.Copy(&buf, r)
close(done)
}()
fn()
_ = w.Close()
os.Stderr = old
<-done
_ = r.Close()
return buf.String()
}
func TestDaemonAutostart_AcquireStartLock_CreatesAndCleansStale(t *testing.T) {
tmpDir := t.TempDir()
lockPath := filepath.Join(tmpDir, "bd.sock.startlock")
pid, err := readPIDFromFile(lockPath)
if err == nil || pid != 0 {
// lock doesn't exist yet; expect read to fail.
}
if !acquireStartLock(lockPath, filepath.Join(tmpDir, "bd.sock")) {
t.Fatalf("expected acquireStartLock to succeed")
}
got, err := readPIDFromFile(lockPath)
if err != nil {
t.Fatalf("readPIDFromFile: %v", err)
}
if got != os.Getpid() {
t.Fatalf("expected lock PID %d, got %d", os.Getpid(), got)
}
// Stale lock: dead/unreadable PID should be removed and recreated.
if err := os.WriteFile(lockPath, []byte("0\n"), 0o600); err != nil {
t.Fatalf("WriteFile: %v", err)
}
if !acquireStartLock(lockPath, filepath.Join(tmpDir, "bd.sock")) {
t.Fatalf("expected acquireStartLock to succeed on stale lock")
}
got, err = readPIDFromFile(lockPath)
if err != nil {
t.Fatalf("readPIDFromFile: %v", err)
}
if got != os.Getpid() {
t.Fatalf("expected recreated lock PID %d, got %d", os.Getpid(), got)
}
}
func TestDaemonAutostart_SocketHealthAndReadiness(t *testing.T) {
socketPath, cleanup := startTestRPCServer(t)
defer cleanup()
if !canDialSocket(socketPath, 500*time.Millisecond) {
t.Fatalf("expected canDialSocket to succeed")
}
if !isDaemonHealthy(socketPath) {
t.Fatalf("expected isDaemonHealthy to succeed")
}
if !waitForSocketReadiness(socketPath, 500*time.Millisecond) {
t.Fatalf("expected waitForSocketReadiness to succeed")
}
missing := filepath.Join(tempSockDir(t), "missing.sock")
if canDialSocket(missing, 50*time.Millisecond) {
t.Fatalf("expected canDialSocket to fail")
}
if waitForSocketReadiness(missing, 200*time.Millisecond) {
t.Fatalf("expected waitForSocketReadiness to time out")
}
}
func TestDaemonAutostart_HandleExistingSocket(t *testing.T) {
socketPath, cleanup := startTestRPCServer(t)
defer cleanup()
if !handleExistingSocket(socketPath) {
t.Fatalf("expected handleExistingSocket true for running daemon")
}
}
func TestDaemonAutostart_HandleExistingSocket_StaleCleansUp(t *testing.T) {
tmpDir := t.TempDir()
beadsDir := filepath.Join(tmpDir, ".beads")
if err := os.MkdirAll(beadsDir, 0o750); err != nil {
t.Fatalf("MkdirAll: %v", err)
}
socketPath := filepath.Join(beadsDir, "bd.sock")
pidFile := filepath.Join(beadsDir, "daemon.pid")
if err := os.WriteFile(socketPath, []byte("not-a-socket"), 0o600); err != nil {
t.Fatalf("WriteFile socket: %v", err)
}
if err := os.WriteFile(pidFile, []byte("0\n"), 0o600); err != nil {
t.Fatalf("WriteFile pid: %v", err)
}
if handleExistingSocket(socketPath) {
t.Fatalf("expected false for stale socket")
}
if _, err := os.Stat(socketPath); !os.IsNotExist(err) {
t.Fatalf("expected socket removed")
}
if _, err := os.Stat(pidFile); !os.IsNotExist(err) {
t.Fatalf("expected pidfile removed")
}
}
func TestDaemonAutostart_TryAutoStartDaemon_EarlyExits(t *testing.T) {
oldFailures := daemonStartFailures
oldLast := lastDaemonStartAttempt
defer func() {
daemonStartFailures = oldFailures
lastDaemonStartAttempt = oldLast
}()
daemonStartFailures = 1
lastDaemonStartAttempt = time.Now()
if tryAutoStartDaemon(filepath.Join(t.TempDir(), "bd.sock")) {
t.Fatalf("expected tryAutoStartDaemon to skip due to backoff")
}
daemonStartFailures = 0
lastDaemonStartAttempt = time.Time{}
socketPath, cleanup := startTestRPCServer(t)
defer cleanup()
if !tryAutoStartDaemon(socketPath) {
t.Fatalf("expected tryAutoStartDaemon true when daemon already healthy")
}
}
func TestDaemonAutostart_MiscHelpers(t *testing.T) {
if determineSocketPath("/x") != "/x" {
t.Fatalf("determineSocketPath should be identity")
}
if err := config.Initialize(); err != nil {
t.Fatalf("config.Initialize: %v", err)
}
old := config.GetDuration("flush-debounce")
defer config.Set("flush-debounce", old)
config.Set("flush-debounce", 0)
if got := getDebounceDuration(); got != 5*time.Second {
t.Fatalf("expected default debounce 5s, got %v", got)
}
config.Set("flush-debounce", 2*time.Second)
if got := getDebounceDuration(); got != 2*time.Second {
t.Fatalf("expected debounce 2s, got %v", got)
}
}
func TestDaemonAutostart_EmitVerboseWarning(t *testing.T) {
old := daemonStatus
defer func() { daemonStatus = old }()
daemonStatus.SocketPath = "/tmp/bd.sock"
for _, tt := range []struct {
reason string
shouldWrite bool
}{
{FallbackConnectFailed, true},
{FallbackHealthFailed, true},
{FallbackAutoStartDisabled, true},
{FallbackAutoStartFailed, true},
{FallbackDaemonUnsupported, true},
{FallbackWorktreeSafety, false},
{FallbackFlagNoDaemon, false},
} {
t.Run(tt.reason, func(t *testing.T) {
daemonStatus.FallbackReason = tt.reason
out := captureStderr(t, emitVerboseWarning)
if tt.shouldWrite && out == "" {
t.Fatalf("expected output")
}
if !tt.shouldWrite && out != "" {
t.Fatalf("expected no output, got %q", out)
}
})
}
}
func TestDaemonAutostart_StartDaemonProcess_Stubbed(t *testing.T) {
oldExec := execCommandFn
oldWait := waitForSocketReadinessFn
oldCfg := configureDaemonProcessFn
defer func() {
execCommandFn = oldExec
waitForSocketReadinessFn = oldWait
configureDaemonProcessFn = oldCfg
}()
execCommandFn = func(string, ...string) *exec.Cmd {
return exec.Command(os.Args[0], "-test.run=^$")
}
waitForSocketReadinessFn = func(string, time.Duration) bool { return true }
configureDaemonProcessFn = func(*exec.Cmd) {}
if !startDaemonProcess(filepath.Join(t.TempDir(), "bd.sock")) {
t.Fatalf("expected startDaemonProcess true when readiness stubbed")
}
}
func TestDaemonAutostart_RestartDaemonForVersionMismatch_Stubbed(t *testing.T) {
oldExec := execCommandFn
oldWait := waitForSocketReadinessFn
oldRun := isDaemonRunningFn
oldCfg := configureDaemonProcessFn
defer func() {
execCommandFn = oldExec
waitForSocketReadinessFn = oldWait
isDaemonRunningFn = oldRun
configureDaemonProcessFn = oldCfg
}()
tmpDir := t.TempDir()
beadsDir := filepath.Join(tmpDir, ".beads")
if err := os.MkdirAll(beadsDir, 0o750); err != nil {
t.Fatalf("MkdirAll: %v", err)
}
oldDB := dbPath
defer func() { dbPath = oldDB }()
dbPath = filepath.Join(beadsDir, "test.db")
pidFile, err := getPIDFilePath()
if err != nil {
t.Fatalf("getPIDFilePath: %v", err)
}
sock := getSocketPath()
if err := os.WriteFile(pidFile, []byte("999999\n"), 0o600); err != nil {
t.Fatalf("WriteFile pid: %v", err)
}
if err := os.WriteFile(sock, []byte("stale"), 0o600); err != nil {
t.Fatalf("WriteFile sock: %v", err)
}
execCommandFn = func(string, ...string) *exec.Cmd {
return exec.Command(os.Args[0], "-test.run=^$")
}
waitForSocketReadinessFn = func(string, time.Duration) bool { return true }
isDaemonRunningFn = func(string) (bool, int) { return false, 0 }
configureDaemonProcessFn = func(*exec.Cmd) {}
if !restartDaemonForVersionMismatch() {
t.Fatalf("expected restartDaemonForVersionMismatch true when stubbed")
}
if _, err := os.Stat(pidFile); !os.IsNotExist(err) {
t.Fatalf("expected pidfile removed")
}
if _, err := os.Stat(sock); !os.IsNotExist(err) {
t.Fatalf("expected socket removed")
}
}

View File

@@ -157,23 +157,26 @@ func TestDebouncer_MultipleSequentialTriggerCycles(t *testing.T) {
})
t.Cleanup(debouncer.Cancel)
debouncer.Trigger()
time.Sleep(40 * time.Millisecond)
if got := atomic.LoadInt32(&count); got != 1 {
t.Errorf("first cycle: got %d, want 1", got)
awaitCount := func(want int32) {
deadline := time.Now().Add(500 * time.Millisecond)
for time.Now().Before(deadline) {
if got := atomic.LoadInt32(&count); got >= want {
return
}
time.Sleep(5 * time.Millisecond)
}
got := atomic.LoadInt32(&count)
t.Fatalf("timeout waiting for count=%d (got %d)", want, got)
}
debouncer.Trigger()
time.Sleep(40 * time.Millisecond)
if got := atomic.LoadInt32(&count); got != 2 {
t.Errorf("second cycle: got %d, want 2", got)
}
awaitCount(1)
debouncer.Trigger()
time.Sleep(40 * time.Millisecond)
if got := atomic.LoadInt32(&count); got != 3 {
t.Errorf("third cycle: got %d, want 3", got)
}
awaitCount(2)
debouncer.Trigger()
awaitCount(3)
}
func TestDebouncer_CancelImmediatelyAfterTrigger(t *testing.T) {

View File

@@ -48,12 +48,12 @@ func TestSyncBranchCommitAndPush_NotConfigured(t *testing.T) {
// Create test issue
issue := &types.Issue{
Title: "Test issue",
Status: types.StatusOpen,
Priority: 1,
IssueType: types.TypeTask,
CreatedAt: time.Now(),
UpdatedAt: time.Now(),
Title: "Test issue",
Status: types.StatusOpen,
Priority: 1,
IssueType: types.TypeTask,
CreatedAt: time.Now(),
UpdatedAt: time.Now(),
}
if err := store.CreateIssue(ctx, issue, "test"); err != nil {
t.Fatalf("Failed to create issue: %v", err)
@@ -122,12 +122,12 @@ func TestSyncBranchCommitAndPush_Success(t *testing.T) {
// Create test issue
issue := &types.Issue{
Title: "Test sync branch issue",
Status: types.StatusOpen,
Priority: 1,
IssueType: types.TypeTask,
CreatedAt: time.Now(),
UpdatedAt: time.Now(),
Title: "Test sync branch issue",
Status: types.StatusOpen,
Priority: 1,
IssueType: types.TypeTask,
CreatedAt: time.Now(),
UpdatedAt: time.Now(),
}
if err := store.CreateIssue(ctx, issue, "test"); err != nil {
t.Fatalf("Failed to create issue: %v", err)
@@ -228,12 +228,12 @@ func TestSyncBranchCommitAndPush_EnvOverridesDB(t *testing.T) {
// Create test issue and export JSONL
issue := &types.Issue{
Title: "Env override issue",
Status: types.StatusOpen,
Priority: 1,
IssueType: types.TypeTask,
CreatedAt: time.Now(),
UpdatedAt: time.Now(),
Title: "Env override issue",
Status: types.StatusOpen,
Priority: 1,
IssueType: types.TypeTask,
CreatedAt: time.Now(),
UpdatedAt: time.Now(),
}
if err := store.CreateIssue(ctx, issue, "test"); err != nil {
t.Fatalf("Failed to create issue: %v", err)
@@ -303,12 +303,12 @@ func TestSyncBranchCommitAndPush_NoChanges(t *testing.T) {
}
issue := &types.Issue{
Title: "Test issue",
Status: types.StatusOpen,
Priority: 1,
IssueType: types.TypeTask,
CreatedAt: time.Now(),
UpdatedAt: time.Now(),
Title: "Test issue",
Status: types.StatusOpen,
Priority: 1,
IssueType: types.TypeTask,
CreatedAt: time.Now(),
UpdatedAt: time.Now(),
}
if err := store.CreateIssue(ctx, issue, "test"); err != nil {
t.Fatalf("Failed to create issue: %v", err)
@@ -380,12 +380,12 @@ func TestSyncBranchCommitAndPush_WorktreeHealthCheck(t *testing.T) {
}
issue := &types.Issue{
Title: "Test issue",
Status: types.StatusOpen,
Priority: 1,
IssueType: types.TypeTask,
CreatedAt: time.Now(),
UpdatedAt: time.Now(),
Title: "Test issue",
Status: types.StatusOpen,
Priority: 1,
IssueType: types.TypeTask,
CreatedAt: time.Now(),
UpdatedAt: time.Now(),
}
if err := store.CreateIssue(ctx, issue, "test"); err != nil {
t.Fatalf("Failed to create issue: %v", err)
@@ -497,7 +497,7 @@ func TestSyncBranchPull_Success(t *testing.T) {
if err := os.MkdirAll(remoteDir, 0755); err != nil {
t.Fatalf("Failed to create remote dir: %v", err)
}
runGitCmd(t, remoteDir, "init", "--bare")
runGitCmd(t, remoteDir, "init", "--bare", "-b", "master")
// Create clone1 (will push changes)
clone1Dir := filepath.Join(tmpDir, "clone1")
@@ -528,12 +528,12 @@ func TestSyncBranchPull_Success(t *testing.T) {
// Create issue in clone1
issue := &types.Issue{
Title: "Test sync pull issue",
Status: types.StatusOpen,
Priority: 1,
IssueType: types.TypeTask,
CreatedAt: time.Now(),
UpdatedAt: time.Now(),
Title: "Test sync pull issue",
Status: types.StatusOpen,
Priority: 1,
IssueType: types.TypeTask,
CreatedAt: time.Now(),
UpdatedAt: time.Now(),
}
if err := store1.CreateIssue(ctx, issue, "test"); err != nil {
t.Fatalf("Failed to create issue: %v", err)
@@ -639,7 +639,7 @@ func TestSyncBranchIntegration_EndToEnd(t *testing.T) {
tmpDir := t.TempDir()
remoteDir := filepath.Join(tmpDir, "remote")
os.MkdirAll(remoteDir, 0755)
runGitCmd(t, remoteDir, "init", "--bare")
runGitCmd(t, remoteDir, "init", "--bare", "-b", "master")
// Clone1: Agent A
clone1Dir := filepath.Join(tmpDir, "clone1")
@@ -660,12 +660,12 @@ func TestSyncBranchIntegration_EndToEnd(t *testing.T) {
// Agent A creates issue
issue := &types.Issue{
Title: "E2E test issue",
Status: types.StatusOpen,
Priority: 1,
IssueType: types.TypeTask,
CreatedAt: time.Now(),
UpdatedAt: time.Now(),
Title: "E2E test issue",
Status: types.StatusOpen,
Priority: 1,
IssueType: types.TypeTask,
CreatedAt: time.Now(),
UpdatedAt: time.Now(),
}
store1.CreateIssue(ctx, issue, "agent-a")
issueID := issue.ID
@@ -914,7 +914,7 @@ func TestSyncBranchMultipleConcurrentClones(t *testing.T) {
tmpDir := t.TempDir()
remoteDir := filepath.Join(tmpDir, "remote")
os.MkdirAll(remoteDir, 0755)
runGitCmd(t, remoteDir, "init", "--bare")
runGitCmd(t, remoteDir, "init", "--bare", "-b", "master")
syncBranch := "beads-sync"
@@ -1454,7 +1454,7 @@ func TestGitPushFromWorktree_FetchRebaseRetry(t *testing.T) {
// Create a "remote" bare repository
remoteDir := t.TempDir()
runGitCmd(t, remoteDir, "init", "--bare")
runGitCmd(t, remoteDir, "init", "--bare", "-b", "master")
// Create first clone (simulates another developer's clone)
clone1Dir := t.TempDir()
@@ -1524,7 +1524,7 @@ func TestGitPushFromWorktree_FetchRebaseRetry(t *testing.T) {
// Now try to push from worktree - this should trigger the fetch-rebase-retry logic
// because the remote has commits that the local worktree doesn't have
err := gitPushFromWorktree(ctx, worktreePath, "beads-sync")
err := gitPushFromWorktree(ctx, worktreePath, "beads-sync", "")
if err != nil {
t.Fatalf("gitPushFromWorktree failed: %v (expected fetch-rebase-retry to succeed)", err)
}

View File

@@ -8,6 +8,7 @@ import (
"context"
"encoding/json"
"io"
"log/slog"
"os"
"path/filepath"
"strings"
@@ -897,11 +898,7 @@ func setupDaemonTestEnvForDelete(t *testing.T) (context.Context, context.CancelF
ctx, cancel := context.WithTimeout(context.Background(), 30*time.Second)
log := daemonLogger{
logFunc: func(format string, args ...interface{}) {
t.Logf("[daemon] "+format, args...)
},
}
log := daemonLogger{logger: slog.New(slog.NewTextHandler(io.Discard, &slog.HandlerOptions{Level: slog.LevelInfo}))}
server, _, err := startRPCServer(ctx, socketPath, testStore, tmpDir, testDBPath, log)
if err != nil {

View File

@@ -43,8 +43,8 @@ type doctorResult struct {
Checks []doctorCheck `json:"checks"`
OverallOK bool `json:"overall_ok"`
CLIVersion string `json:"cli_version"`
Timestamp string `json:"timestamp,omitempty"` // bd-9cc: ISO8601 timestamp for historical tracking
Platform map[string]string `json:"platform,omitempty"` // bd-9cc: platform info for debugging
Timestamp string `json:"timestamp,omitempty"` // bd-9cc: ISO8601 timestamp for historical tracking
Platform map[string]string `json:"platform,omitempty"` // bd-9cc: platform info for debugging
}
var (
@@ -353,6 +353,42 @@ func applyFixesInteractive(path string, issues []doctorCheck) {
// applyFixList applies a list of fixes and reports results
func applyFixList(path string, fixes []doctorCheck) {
// Apply fixes in a dependency-aware order.
// Rough dependency chain:
// permissions/daemon cleanup → config sanity → DB integrity/migrations → DB↔JSONL sync.
order := []string{
"Permissions",
"Daemon Health",
"Database Config",
"JSONL Config",
"Database Integrity",
"Database",
"Schema Compatibility",
"JSONL Integrity",
"DB-JSONL Sync",
}
priority := make(map[string]int, len(order))
for i, name := range order {
priority[name] = i
}
slices.SortStableFunc(fixes, func(a, b doctorCheck) int {
pa, oka := priority[a.Name]
if !oka {
pa = 1000
}
pb, okb := priority[b.Name]
if !okb {
pb = 1000
}
if pa < pb {
return -1
}
if pa > pb {
return 1
}
return 0
})
fixedCount := 0
errorCount := 0
@@ -373,6 +409,8 @@ func applyFixList(path string, fixes []doctorCheck) {
err = fix.Permissions(path)
case "Database":
err = fix.DatabaseVersion(path)
case "Database Integrity":
err = fix.DatabaseIntegrity(path)
case "Schema Compatibility":
err = fix.SchemaCompatibility(path)
case "Repo Fingerprint":
@@ -387,6 +425,8 @@ func applyFixList(path string, fixes []doctorCheck) {
err = fix.DatabaseConfig(path)
case "JSONL Config":
err = fix.LegacyJSONLConfig(path)
case "JSONL Integrity":
err = fix.JSONLIntegrity(path)
case "Deletions Manifest":
err = fix.MigrateTombstones(path)
case "Untracked Files":
@@ -687,6 +727,13 @@ func runDiagnostics(path string) doctorResult {
result.Checks = append(result.Checks, configValuesCheck)
// Don't fail overall check for config value warnings, just warn
// Check 7b: JSONL integrity (malformed lines, missing IDs)
jsonlIntegrityCheck := convertWithCategory(doctor.CheckJSONLIntegrity(path), doctor.CategoryData)
result.Checks = append(result.Checks, jsonlIntegrityCheck)
if jsonlIntegrityCheck.Status == statusWarning || jsonlIntegrityCheck.Status == statusError {
result.OverallOK = false
}
// Check 8: Daemon health
daemonCheck := convertWithCategory(doctor.CheckDaemonStatus(path, Version), doctor.CategoryRuntime)
result.Checks = append(result.Checks, daemonCheck)
@@ -750,6 +797,16 @@ func runDiagnostics(path string) doctorResult {
result.Checks = append(result.Checks, mergeDriverCheck)
// Don't fail overall check for merge driver, just warn
// Check 15a: Git working tree cleanliness (AGENTS.md hygiene)
gitWorkingTreeCheck := convertWithCategory(doctor.CheckGitWorkingTree(path), doctor.CategoryGit)
result.Checks = append(result.Checks, gitWorkingTreeCheck)
// Don't fail overall check for dirty working tree, just warn
// Check 15b: Git upstream sync (ahead/behind/diverged)
gitUpstreamCheck := convertWithCategory(doctor.CheckGitUpstream(path), doctor.CategoryGit)
result.Checks = append(result.Checks, gitUpstreamCheck)
// Don't fail overall check for upstream drift, just warn
// Check 16: Metadata.json version tracking (bd-u4sb)
metadataCheck := convertWithCategory(doctor.CheckMetadataVersionTracking(path, Version), doctor.CategoryMetadata)
result.Checks = append(result.Checks, metadataCheck)

View File

@@ -316,6 +316,10 @@ func checkMetadataConfigValues(repoPath string) []string {
// Validate jsonl_export filename
if cfg.JSONLExport != "" {
switch cfg.JSONLExport {
case "deletions.jsonl", "interactions.jsonl", "molecules.jsonl":
issues = append(issues, fmt.Sprintf("metadata.json jsonl_export: %q is a system file and should not be configured as a JSONL export (expected issues.jsonl)", cfg.JSONLExport))
}
if strings.Contains(cfg.JSONLExport, string(os.PathSeparator)) || strings.Contains(cfg.JSONLExport, "/") {
issues = append(issues, fmt.Sprintf("metadata.json jsonl_export: %q should be a filename, not a path", cfg.JSONLExport))
}
@@ -353,7 +357,7 @@ func checkDatabaseConfigValues(repoPath string) []string {
}
// Open database in read-only mode
db, err := sql.Open("sqlite3", "file:"+dbPath+"?mode=ro")
db, err := sql.Open("sqlite3", sqliteConnString(dbPath, true))
if err != nil {
return issues // Can't open database, skip
}

View File

@@ -213,6 +213,21 @@ func TestCheckMetadataConfigValues(t *testing.T) {
t.Error("expected issues for wrong jsonl extension")
}
})
t.Run("jsonl_export cannot be system file", func(t *testing.T) {
metadataContent := `{
"database": "beads.db",
"jsonl_export": "interactions.jsonl"
}`
if err := os.WriteFile(filepath.Join(beadsDir, "metadata.json"), []byte(metadataContent), 0644); err != nil {
t.Fatalf("failed to write metadata.json: %v", err)
}
issues := checkMetadataConfigValues(tmpDir)
if len(issues) == 0 {
t.Error("expected issues for system jsonl_export")
}
})
}
func contains(s, substr string) bool {

View File

@@ -155,9 +155,9 @@ func CheckSchemaCompatibility(path string) DoctorCheck {
}
}
// Open database (bd-ckvw: This will run migrations and schema probe)
// Open database (bd-ckvw: schema probe)
// Note: We can't use the global 'store' because doctor can check arbitrary paths
db, err := sql.Open("sqlite3", "file:"+dbPath+"?_pragma=foreign_keys(ON)&_pragma=busy_timeout(30000)")
db, err := sql.Open("sqlite3", sqliteConnString(dbPath, true))
if err != nil {
return DoctorCheck{
Name: "Schema Compatibility",
@@ -244,13 +244,14 @@ func CheckDatabaseIntegrity(path string) DoctorCheck {
}
// Open database in read-only mode for integrity check
db, err := sql.Open("sqlite3", "file:"+dbPath+"?mode=ro&_pragma=busy_timeout(30000)")
db, err := sql.Open("sqlite3", sqliteConnString(dbPath, true))
if err != nil {
return DoctorCheck{
Name: "Database Integrity",
Status: StatusError,
Message: "Failed to open database for integrity check",
Detail: err.Error(),
Fix: "Run 'bd doctor --fix' to back up the corrupt DB and rebuild from JSONL (if available), or restore from backup",
}
}
defer db.Close()
@@ -264,6 +265,7 @@ func CheckDatabaseIntegrity(path string) DoctorCheck {
Status: StatusError,
Message: "Failed to run integrity check",
Detail: err.Error(),
Fix: "Run 'bd doctor --fix' to back up the corrupt DB and rebuild from JSONL (if available), or restore from backup",
}
}
defer rows.Close()
@@ -292,22 +294,37 @@ func CheckDatabaseIntegrity(path string) DoctorCheck {
Status: StatusError,
Message: "Database corruption detected",
Detail: strings.Join(results, "; "),
Fix: "Database may need recovery. Export with 'bd export' if possible, then restore from backup or reinitialize",
Fix: "Run 'bd doctor --fix' to back up the corrupt DB and rebuild from JSONL (if available), or restore from backup",
}
}
// CheckDatabaseJSONLSync checks if database and JSONL are in sync
func CheckDatabaseJSONLSync(path string) DoctorCheck {
beadsDir := filepath.Join(path, ".beads")
dbPath := filepath.Join(beadsDir, beads.CanonicalDatabaseName)
// Find JSONL file
var jsonlPath string
for _, name := range []string{"issues.jsonl", "beads.jsonl"} {
testPath := filepath.Join(beadsDir, name)
if _, err := os.Stat(testPath); err == nil {
jsonlPath = testPath
break
// Resolve database path (respects metadata.json override).
dbPath := filepath.Join(beadsDir, beads.CanonicalDatabaseName)
if cfg, err := configfile.Load(beadsDir); err == nil && cfg != nil && cfg.Database != "" {
dbPath = cfg.DatabasePath(beadsDir)
}
// Find JSONL file (respects metadata.json override when set).
jsonlPath := ""
if cfg, err := configfile.Load(beadsDir); err == nil && cfg != nil {
if cfg.JSONLExport != "" && !isSystemJSONLFilename(cfg.JSONLExport) {
p := cfg.JSONLPath(beadsDir)
if _, err := os.Stat(p); err == nil {
jsonlPath = p
}
}
}
if jsonlPath == "" {
for _, name := range []string{"issues.jsonl", "beads.jsonl"} {
testPath := filepath.Join(beadsDir, name)
if _, err := os.Stat(testPath); err == nil {
jsonlPath = testPath
break
}
}
}
@@ -333,7 +350,7 @@ func CheckDatabaseJSONLSync(path string) DoctorCheck {
jsonlCount, jsonlPrefixes, jsonlErr := CountJSONLIssues(jsonlPath)
// Single database open for all queries (instead of 3 separate opens)
db, err := sql.Open("sqlite3", dbPath)
db, err := sql.Open("sqlite3", sqliteConnString(dbPath, true))
if err != nil {
// Database can't be opened. If JSONL has issues, suggest recovery.
if jsonlErr == nil && jsonlCount > 0 {
@@ -390,11 +407,16 @@ func CheckDatabaseJSONLSync(path string) DoctorCheck {
// Use JSONL error if we got it earlier
if jsonlErr != nil {
fixMsg := "Run 'bd doctor --fix' to attempt recovery"
if strings.Contains(jsonlErr.Error(), "malformed") {
fixMsg = "Run 'bd doctor --fix' to back up and regenerate the JSONL from the database"
}
return DoctorCheck{
Name: "DB-JSONL Sync",
Status: StatusWarning,
Message: "Unable to read JSONL file",
Detail: jsonlErr.Error(),
Fix: fixMsg,
}
}
@@ -501,7 +523,7 @@ func FixDBJSONLSync(path string) error {
// getDatabaseVersionFromPath reads the database version from the given path
func getDatabaseVersionFromPath(dbPath string) string {
db, err := sql.Open("sqlite3", "file:"+dbPath+"?mode=ro")
db, err := sql.Open("sqlite3", sqliteConnString(dbPath, true))
if err != nil {
return "unknown"
}

View File

@@ -12,6 +12,13 @@ import (
// This prevents fork bombs when tests call functions that execute bd subcommands.
var ErrTestBinary = fmt.Errorf("running as test binary - cannot execute bd subcommands")
func newBdCmd(bdBinary string, args ...string) *exec.Cmd {
fullArgs := append([]string{"--no-daemon"}, args...)
cmd := exec.Command(bdBinary, fullArgs...) // #nosec G204 -- bdBinary from validated executable path
cmd.Env = append(os.Environ(), "BEADS_NO_DAEMON=1")
return cmd
}
// getBdBinary returns the path to the bd binary to use for fix operations.
// It prefers the current executable to avoid command injection attacks.
// Returns ErrTestBinary if running as a test binary to prevent fork bombs.

View File

@@ -3,7 +3,6 @@ package fix
import (
"fmt"
"os"
"os/exec"
"path/filepath"
)
@@ -36,7 +35,7 @@ func Daemon(path string) error {
}
// Run bd daemons killall to clean up stale daemons
cmd := exec.Command(bdBinary, "daemons", "killall") // #nosec G204 -- bdBinary from validated executable path
cmd := newBdCmd(bdBinary, "daemons", "killall")
cmd.Stdout = os.Stdout
cmd.Stderr = os.Stderr

View File

@@ -32,6 +32,13 @@ func DatabaseConfig(path string) error {
fixed := false
// Never treat system JSONL files as a JSONL export configuration.
if isSystemJSONLFilename(cfg.JSONLExport) {
fmt.Printf(" Updating jsonl_export: %s → issues.jsonl\n", cfg.JSONLExport)
cfg.JSONLExport = "issues.jsonl"
fixed = true
}
// Check if configured JSONL exists
if cfg.JSONLExport != "" {
jsonlPath := cfg.JSONLPath(beadsDir)
@@ -99,7 +106,15 @@ func findActualJSONLFile(beadsDir string) string {
strings.Contains(lowerName, ".orig") ||
strings.Contains(lowerName, ".bak") ||
strings.Contains(lowerName, "~") ||
strings.HasPrefix(lowerName, "backup_") {
strings.HasPrefix(lowerName, "backup_") ||
// System files are not JSONL exports.
name == "deletions.jsonl" ||
name == "interactions.jsonl" ||
name == "molecules.jsonl" ||
// Git merge conflict artifacts (e.g., issues.base.jsonl, issues.left.jsonl)
strings.Contains(lowerName, ".base.jsonl") ||
strings.Contains(lowerName, ".left.jsonl") ||
strings.Contains(lowerName, ".right.jsonl") {
continue
}
@@ -121,6 +136,15 @@ func findActualJSONLFile(beadsDir string) string {
return candidates[0]
}
func isSystemJSONLFilename(name string) bool {
switch name {
case "deletions.jsonl", "interactions.jsonl", "molecules.jsonl":
return true
default:
return false
}
}
// LegacyJSONLConfig migrates from legacy beads.jsonl to canonical issues.jsonl.
// This renames the file, updates metadata.json, and updates .gitattributes if present.
// bd-6xd: issues.jsonl is the canonical filename

View File

@@ -220,3 +220,53 @@ func TestLegacyJSONLConfig_UpdatesGitattributes(t *testing.T) {
t.Errorf("Expected .gitattributes to reference issues.jsonl, got: %q", string(content))
}
}
// TestFindActualJSONLFile_SkipsSystemFiles ensures system JSONL files are never treated as JSONL exports.
func TestFindActualJSONLFile_SkipsSystemFiles(t *testing.T) {
tmpDir := t.TempDir()
// Only system files → no candidates.
if err := os.WriteFile(filepath.Join(tmpDir, "interactions.jsonl"), []byte(`{"id":"x"}`), 0644); err != nil {
t.Fatal(err)
}
if got := findActualJSONLFile(tmpDir); got != "" {
t.Fatalf("expected empty result, got %q", got)
}
// System + legacy export → legacy wins.
if err := os.WriteFile(filepath.Join(tmpDir, "beads.jsonl"), []byte(`{"id":"x"}`), 0644); err != nil {
t.Fatal(err)
}
if got := findActualJSONLFile(tmpDir); got != "beads.jsonl" {
t.Fatalf("expected beads.jsonl, got %q", got)
}
}
func TestDatabaseConfigFix_RejectsSystemJSONLExport(t *testing.T) {
tmpDir := t.TempDir()
beadsDir := filepath.Join(tmpDir, ".beads")
if err := os.Mkdir(beadsDir, 0755); err != nil {
t.Fatalf("Failed to create .beads dir: %v", err)
}
if err := os.WriteFile(filepath.Join(beadsDir, "interactions.jsonl"), []byte(`{"id":"x"}`), 0644); err != nil {
t.Fatalf("Failed to create interactions.jsonl: %v", err)
}
cfg := &configfile.Config{Database: "beads.db", JSONLExport: "interactions.jsonl"}
if err := cfg.Save(beadsDir); err != nil {
t.Fatalf("Failed to save config: %v", err)
}
if err := DatabaseConfig(tmpDir); err != nil {
t.Fatalf("DatabaseConfig failed: %v", err)
}
updated, err := configfile.Load(beadsDir)
if err != nil {
t.Fatalf("Failed to load updated config: %v", err)
}
if updated.JSONLExport != "issues.jsonl" {
t.Fatalf("expected issues.jsonl, got %q", updated.JSONLExport)
}
}

View File

@@ -0,0 +1,116 @@
package fix
import (
"fmt"
"os"
"path/filepath"
"time"
"github.com/steveyegge/beads/internal/beads"
"github.com/steveyegge/beads/internal/configfile"
)
// DatabaseIntegrity attempts to recover from database corruption by:
// 1. Backing up the corrupt database (and WAL/SHM if present)
// 2. Re-initializing the database from the working tree JSONL export
//
// This is intentionally conservative: it will not delete JSONL, and it preserves the
// original DB as a backup for forensic recovery.
func DatabaseIntegrity(path string) error {
if err := validateBeadsWorkspace(path); err != nil {
return err
}
absPath, err := filepath.Abs(path)
if err != nil {
return fmt.Errorf("failed to resolve path: %w", err)
}
beadsDir := filepath.Join(absPath, ".beads")
// Best-effort: stop any running daemon to reduce the chance of DB file locks.
_ = Daemon(absPath)
// Resolve database path (respects metadata.json database override).
var dbPath string
if cfg, err := configfile.Load(beadsDir); err == nil && cfg != nil && cfg.Database != "" {
dbPath = cfg.DatabasePath(beadsDir)
} else {
dbPath = filepath.Join(beadsDir, beads.CanonicalDatabaseName)
}
// Find JSONL source of truth.
jsonlPath := ""
if cfg, err := configfile.Load(beadsDir); err == nil && cfg != nil {
if cfg.JSONLExport != "" && !isSystemJSONLFilename(cfg.JSONLExport) {
candidate := cfg.JSONLPath(beadsDir)
if _, err := os.Stat(candidate); err == nil {
jsonlPath = candidate
}
}
}
if jsonlPath == "" {
for _, name := range []string{"issues.jsonl", "beads.jsonl"} {
candidate := filepath.Join(beadsDir, name)
if _, err := os.Stat(candidate); err == nil {
jsonlPath = candidate
break
}
}
}
if jsonlPath == "" {
return fmt.Errorf("cannot auto-recover: no JSONL export found in %s", beadsDir)
}
// Back up corrupt DB and its sidecar files.
ts := time.Now().UTC().Format("20060102T150405Z")
backupDB := dbPath + "." + ts + ".corrupt.backup.db"
if err := moveFile(dbPath, backupDB); err != nil {
// Retry once after attempting to kill daemons again (helps on platforms with strict file locks).
_ = Daemon(absPath)
if err2 := moveFile(dbPath, backupDB); err2 != nil {
// Prefer the original error (more likely root cause).
return fmt.Errorf("failed to back up database: %w", err)
}
}
for _, suffix := range []string{"-wal", "-shm", "-journal"} {
sidecar := dbPath + suffix
if _, err := os.Stat(sidecar); err == nil {
_ = moveFile(sidecar, backupDB+suffix) // best effort
}
}
// Rebuild by importing from the working tree JSONL into a fresh database.
bdBinary, err := getBdBinary()
if err != nil {
return err
}
// Use import (not init) so we always hydrate from the working tree JSONL, not git-tracked blobs.
args := []string{"--db", dbPath, "import", "-i", jsonlPath, "--force", "--no-git-history"}
cmd := newBdCmd(bdBinary, args...)
cmd.Dir = absPath
cmd.Stdout = os.Stdout
cmd.Stderr = os.Stderr
if err := cmd.Run(); err != nil {
// Best-effort rollback: attempt to restore the original DB, while preserving the backup.
failedTS := time.Now().UTC().Format("20060102T150405Z")
if _, statErr := os.Stat(dbPath); statErr == nil {
failedDB := dbPath + "." + failedTS + ".failed.init.db"
_ = moveFile(dbPath, failedDB)
for _, suffix := range []string{"-wal", "-shm", "-journal"} {
_ = moveFile(dbPath+suffix, failedDB+suffix)
}
}
_ = copyFile(backupDB, dbPath)
for _, suffix := range []string{"-wal", "-shm", "-journal"} {
if _, statErr := os.Stat(backupDB + suffix); statErr == nil {
_ = copyFile(backupDB+suffix, dbPath+suffix)
}
}
return fmt.Errorf("failed to rebuild database from JSONL: %w (backup: %s)", err, backupDB)
}
return nil
}

57
cmd/bd/doctor/fix/fs.go Normal file
View File

@@ -0,0 +1,57 @@
package fix
import (
"errors"
"fmt"
"io"
"os"
"syscall"
)
var (
renameFile = os.Rename
removeFile = os.Remove
openFileRO = os.Open
openFileRW = os.OpenFile
)
func moveFile(src, dst string) error {
if err := renameFile(src, dst); err == nil {
return nil
} else if isEXDEV(err) {
if err := copyFile(src, dst); err != nil {
return err
}
if err := removeFile(src); err != nil {
return fmt.Errorf("failed to remove source after copy: %w", err)
}
return nil
} else {
return err
}
}
func copyFile(src, dst string) error {
in, err := openFileRO(src) // #nosec G304 -- src is within the workspace
if err != nil {
return err
}
defer in.Close()
out, err := openFileRW(dst, os.O_CREATE|os.O_TRUNC|os.O_WRONLY, 0644)
if err != nil {
return err
}
defer func() { _ = out.Close() }()
if _, err := io.Copy(out, in); err != nil {
return err
}
return out.Close()
}
func isEXDEV(err error) bool {
var linkErr *os.LinkError
if errors.As(err, &linkErr) {
return errors.Is(linkErr.Err, syscall.EXDEV)
}
return errors.Is(err, syscall.EXDEV)
}

View File

@@ -0,0 +1,71 @@
package fix
import (
"errors"
"os"
"path/filepath"
"syscall"
"testing"
)
func TestMoveFile_EXDEV_FallsBackToCopy(t *testing.T) {
root := t.TempDir()
src := filepath.Join(root, "src.txt")
dst := filepath.Join(root, "dst.txt")
if err := os.WriteFile(src, []byte("hello"), 0644); err != nil {
t.Fatal(err)
}
oldRename := renameFile
defer func() { renameFile = oldRename }()
renameFile = func(oldpath, newpath string) error {
return &os.LinkError{Op: "rename", Old: oldpath, New: newpath, Err: syscall.EXDEV}
}
if err := moveFile(src, dst); err != nil {
t.Fatalf("moveFile failed: %v", err)
}
if _, err := os.Stat(src); !os.IsNotExist(err) {
t.Fatalf("expected src to be removed, stat err=%v", err)
}
data, err := os.ReadFile(dst)
if err != nil {
t.Fatalf("read dst: %v", err)
}
if string(data) != "hello" {
t.Fatalf("dst contents=%q", string(data))
}
}
func TestMoveFile_EXDEV_CopyFails_LeavesSource(t *testing.T) {
root := t.TempDir()
src := filepath.Join(root, "src.txt")
dst := filepath.Join(root, "dst.txt")
if err := os.WriteFile(src, []byte("hello"), 0644); err != nil {
t.Fatal(err)
}
oldRename := renameFile
oldOpenRW := openFileRW
defer func() {
renameFile = oldRename
openFileRW = oldOpenRW
}()
renameFile = func(oldpath, newpath string) error {
return &os.LinkError{Op: "rename", Old: oldpath, New: newpath, Err: syscall.EXDEV}
}
openFileRW = func(name string, flag int, perm os.FileMode) (*os.File, error) {
return nil, &os.PathError{Op: "open", Path: name, Err: syscall.ENOSPC}
}
err := moveFile(src, dst)
if err == nil {
t.Fatalf("expected error")
}
if !errors.Is(err, syscall.ENOSPC) {
t.Fatalf("expected ENOSPC, got %v", err)
}
if _, err := os.Stat(src); err != nil {
t.Fatalf("expected src to remain, stat err=%v", err)
}
}

View File

@@ -28,7 +28,7 @@ func GitHooks(path string) error {
}
// Run bd hooks install
cmd := exec.Command(bdBinary, "hooks", "install") // #nosec G204 -- bdBinary from validated executable path
cmd := newBdCmd(bdBinary, "hooks", "install")
cmd.Dir = path // Set working directory without changing process dir
cmd.Stdout = os.Stdout
cmd.Stderr = os.Stderr

View File

@@ -0,0 +1,87 @@
package fix
import (
"fmt"
"os"
"path/filepath"
"time"
"github.com/steveyegge/beads/internal/beads"
"github.com/steveyegge/beads/internal/configfile"
"github.com/steveyegge/beads/internal/utils"
)
// JSONLIntegrity backs up a malformed JSONL export and regenerates it from the database.
// This is safe only when a database exists and is readable.
func JSONLIntegrity(path string) error {
if err := validateBeadsWorkspace(path); err != nil {
return err
}
absPath, err := filepath.Abs(path)
if err != nil {
return fmt.Errorf("failed to resolve path: %w", err)
}
beadsDir := filepath.Join(absPath, ".beads")
// Resolve db path.
dbPath := filepath.Join(beadsDir, beads.CanonicalDatabaseName)
if cfg, err := configfile.Load(beadsDir); err == nil && cfg != nil && cfg.Database != "" {
dbPath = cfg.DatabasePath(beadsDir)
}
if _, err := os.Stat(dbPath); os.IsNotExist(err) {
return fmt.Errorf("cannot auto-repair JSONL: no database found")
}
// Resolve JSONL export path.
jsonlPath := ""
if cfg, err := configfile.Load(beadsDir); err == nil && cfg != nil {
if cfg.JSONLExport != "" && !isSystemJSONLFilename(cfg.JSONLExport) {
p := cfg.JSONLPath(beadsDir)
if _, err := os.Stat(p); err == nil {
jsonlPath = p
}
}
}
if jsonlPath == "" {
p := utils.FindJSONLInDir(beadsDir)
if _, err := os.Stat(p); err == nil {
jsonlPath = p
}
}
if jsonlPath == "" {
return fmt.Errorf("cannot auto-repair JSONL: no JSONL file found")
}
// Back up the JSONL.
ts := time.Now().UTC().Format("20060102T150405Z")
backup := jsonlPath + "." + ts + ".corrupt.backup.jsonl"
if err := moveFile(jsonlPath, backup); err != nil {
return fmt.Errorf("failed to back up JSONL: %w", err)
}
binary, err := getBdBinary()
if err != nil {
_ = moveFile(backup, jsonlPath)
return err
}
// Re-export from DB.
cmd := newBdCmd(binary, "--db", dbPath, "export", "-o", jsonlPath, "--force")
cmd.Dir = absPath
cmd.Stdout = os.Stdout
cmd.Stderr = os.Stderr
if err := cmd.Run(); err != nil {
// Best-effort rollback: restore the original JSONL, but keep the backup.
failedTS := time.Now().UTC().Format("20060102T150405Z")
if _, statErr := os.Stat(jsonlPath); statErr == nil {
failed := jsonlPath + "." + failedTS + ".failed.regen.jsonl"
_ = moveFile(jsonlPath, failed)
}
_ = copyFile(backup, jsonlPath)
return fmt.Errorf("failed to regenerate JSONL from database: %w (backup: %s)", err, backup)
}
return nil
}

View File

@@ -3,8 +3,10 @@ package fix
import (
"fmt"
"os"
"os/exec"
"path/filepath"
"github.com/steveyegge/beads/internal/beads"
"github.com/steveyegge/beads/internal/configfile"
)
// DatabaseVersion fixes database version mismatches by running bd migrate,
@@ -23,12 +25,15 @@ func DatabaseVersion(path string) error {
// Check if database exists - if not, run init instead of migrate (bd-4h9)
beadsDir := filepath.Join(path, ".beads")
dbPath := filepath.Join(beadsDir, "beads.db")
dbPath := filepath.Join(beadsDir, beads.CanonicalDatabaseName)
if cfg, err := configfile.Load(beadsDir); err == nil && cfg != nil && cfg.Database != "" {
dbPath = cfg.DatabasePath(beadsDir)
}
if _, err := os.Stat(dbPath); os.IsNotExist(err) {
// No database - this is a fresh clone, run bd init
fmt.Println("→ No database found, running 'bd init' to hydrate from JSONL...")
cmd := exec.Command(bdBinary, "init") // #nosec G204 -- bdBinary from validated executable path
cmd := newBdCmd(bdBinary, "--db", dbPath, "init")
cmd.Dir = path
cmd.Stdout = os.Stdout
cmd.Stderr = os.Stderr
@@ -41,8 +46,8 @@ func DatabaseVersion(path string) error {
}
// Database exists - run bd migrate
cmd := exec.Command(bdBinary, "migrate") // #nosec G204 -- bdBinary from validated executable path
cmd.Dir = path // Set working directory without changing process dir
cmd := newBdCmd(bdBinary, "--db", dbPath, "migrate")
cmd.Dir = path // Set working directory without changing process dir
cmd.Stdout = os.Stdout
cmd.Stderr = os.Stderr

View File

@@ -3,7 +3,6 @@ package fix
import (
"fmt"
"os"
"os/exec"
"path/filepath"
"strings"
)
@@ -31,9 +30,9 @@ func readLineUnbuffered() (string, error) {
// RepoFingerprint fixes repo fingerprint mismatches by prompting the user
// for which action to take. This is interactive because the consequences
// differ significantly between options:
// 1. Update repo ID (if URL changed or bd upgraded)
// 2. Reinitialize database (if wrong database was copied)
// 3. Skip (do nothing)
// 1. Update repo ID (if URL changed or bd upgraded)
// 2. Reinitialize database (if wrong database was copied)
// 3. Skip (do nothing)
func RepoFingerprint(path string) error {
// Validate workspace
if err := validateBeadsWorkspace(path); err != nil {
@@ -67,7 +66,7 @@ func RepoFingerprint(path string) error {
case "1":
// Run bd migrate --update-repo-id
fmt.Println(" → Running 'bd migrate --update-repo-id'...")
cmd := exec.Command(bdBinary, "migrate", "--update-repo-id") // #nosec G204 -- bdBinary from validated executable path
cmd := newBdCmd(bdBinary, "migrate", "--update-repo-id")
cmd.Dir = path
cmd.Stdin = os.Stdin // Allow user to respond to migrate's confirmation prompt
cmd.Stdout = os.Stdout
@@ -105,7 +104,7 @@ func RepoFingerprint(path string) error {
_ = os.Remove(dbPath + "-shm")
fmt.Println(" → Running 'bd init'...")
cmd := exec.Command(bdBinary, "init", "--quiet") // #nosec G204 -- bdBinary from validated executable path
cmd := newBdCmd(bdBinary, "init", "--quiet")
cmd.Dir = path
cmd.Stdout = os.Stdout
cmd.Stderr = os.Stderr

View File

@@ -0,0 +1,52 @@
package fix
import (
"fmt"
"os"
"strings"
"time"
)
func sqliteConnString(path string, readOnly bool) string {
path = strings.TrimSpace(path)
if path == "" {
return ""
}
busy := 30 * time.Second
if v := strings.TrimSpace(os.Getenv("BD_LOCK_TIMEOUT")); v != "" {
if d, err := time.ParseDuration(v); err == nil {
busy = d
}
}
busyMs := int64(busy / time.Millisecond)
if strings.HasPrefix(path, "file:") {
conn := path
sep := "?"
if strings.Contains(conn, "?") {
sep = "&"
}
if readOnly && !strings.Contains(conn, "mode=") {
conn += sep + "mode=ro"
sep = "&"
}
if !strings.Contains(conn, "_pragma=busy_timeout") {
conn += fmt.Sprintf("%s_pragma=busy_timeout(%d)", sep, busyMs)
sep = "&"
}
if !strings.Contains(conn, "_pragma=foreign_keys") {
conn += sep + "_pragma=foreign_keys(ON)"
sep = "&"
}
if !strings.Contains(conn, "_time_format=") {
conn += sep + "_time_format=sqlite"
}
return conn
}
if readOnly {
return fmt.Sprintf("file:%s?mode=ro&_pragma=foreign_keys(ON)&_pragma=busy_timeout(%d)&_time_format=sqlite", path, busyMs)
}
return fmt.Sprintf("file:%s?_pragma=foreign_keys(ON)&_pragma=busy_timeout(%d)&_time_format=sqlite", path, busyMs)
}

View File

@@ -6,7 +6,6 @@ import (
"encoding/json"
"fmt"
"os"
"os/exec"
"path/filepath"
_ "github.com/ncruces/go-sqlite3/driver"
@@ -38,13 +37,23 @@ func DBJSONLSync(path string) error {
// Find JSONL file
var jsonlPath string
issuesJSONL := filepath.Join(beadsDir, "issues.jsonl")
beadsJSONL := filepath.Join(beadsDir, "beads.jsonl")
if cfg, err := configfile.Load(beadsDir); err == nil && cfg != nil {
if cfg.JSONLExport != "" && !isSystemJSONLFilename(cfg.JSONLExport) {
p := cfg.JSONLPath(beadsDir)
if _, err := os.Stat(p); err == nil {
jsonlPath = p
}
}
}
if jsonlPath == "" {
issuesJSONL := filepath.Join(beadsDir, "issues.jsonl")
beadsJSONL := filepath.Join(beadsDir, "beads.jsonl")
if _, err := os.Stat(issuesJSONL); err == nil {
jsonlPath = issuesJSONL
} else if _, err := os.Stat(beadsJSONL); err == nil {
jsonlPath = beadsJSONL
if _, err := os.Stat(issuesJSONL); err == nil {
jsonlPath = issuesJSONL
} else if _, err := os.Stat(beadsJSONL); err == nil {
jsonlPath = beadsJSONL
}
}
// Check if both database and JSONL exist
@@ -102,21 +111,36 @@ func DBJSONLSync(path string) error {
return err
}
// Run the appropriate sync command
var cmd *exec.Cmd
if syncDirection == "export" {
// Export DB to JSONL file (must specify -o to write to file, not stdout)
jsonlOutputPath := filepath.Join(beadsDir, "issues.jsonl")
cmd = exec.Command(bdBinary, "export", "-o", jsonlOutputPath, "--force") // #nosec G204 -- bdBinary from validated executable path
} else {
cmd = exec.Command(bdBinary, "sync", "--import-only") // #nosec G204 -- bdBinary from validated executable path
jsonlOutputPath := jsonlPath
exportCmd := newBdCmd(bdBinary, "--db", dbPath, "export", "-o", jsonlOutputPath, "--force")
exportCmd.Dir = path // Set working directory without changing process dir
exportCmd.Stdout = os.Stdout
exportCmd.Stderr = os.Stderr
if err := exportCmd.Run(); err != nil {
return fmt.Errorf("failed to export database to JSONL: %w", err)
}
// Staleness check uses last_import_time. After exporting, JSONL mtime is newer,
// so mark the DB as fresh by running a no-op import (skip existing issues).
markFreshCmd := newBdCmd(bdBinary, "--db", dbPath, "import", "-i", jsonlOutputPath, "--force", "--skip-existing", "--no-git-history")
markFreshCmd.Dir = path
markFreshCmd.Stdout = os.Stdout
markFreshCmd.Stderr = os.Stderr
if err := markFreshCmd.Run(); err != nil {
return fmt.Errorf("failed to mark database as fresh after export: %w", err)
}
return nil
}
cmd.Dir = path // Set working directory without changing process dir
cmd.Stdout = os.Stdout
cmd.Stderr = os.Stderr
importCmd := newBdCmd(bdBinary, "--db", dbPath, "sync", "--import-only")
importCmd.Dir = path // Set working directory without changing process dir
importCmd.Stdout = os.Stdout
importCmd.Stderr = os.Stderr
if err := cmd.Run(); err != nil {
if err := importCmd.Run(); err != nil {
return fmt.Errorf("failed to sync database with JSONL: %w", err)
}
@@ -125,7 +149,7 @@ func DBJSONLSync(path string) error {
// countDatabaseIssues counts the number of issues in the database.
func countDatabaseIssues(dbPath string) (int, error) {
db, err := sql.Open("sqlite3", dbPath)
db, err := sql.Open("sqlite3", sqliteConnString(dbPath, true))
if err != nil {
return 0, fmt.Errorf("failed to open database: %w", err)
}

View File

@@ -32,8 +32,7 @@ func SyncBranchConfig(path string) error {
}
// Set sync.branch using bd config set
// #nosec G204 - bdBinary is controlled by getBdBinary() which returns os.Executable()
setCmd := exec.Command(bdBinary, "config", "set", "sync.branch", currentBranch)
setCmd := newBdCmd(bdBinary, "config", "set", "sync.branch", currentBranch)
setCmd.Dir = path
if output, err := setCmd.CombinedOutput(); err != nil {
return fmt.Errorf("failed to set sync.branch: %w\nOutput: %s", err, string(output))

View File

@@ -233,5 +233,5 @@ func ChildParentDependencies(path string) error {
// openDB opens a SQLite database for read-write access
func openDB(dbPath string) (*sql.DB, error) {
return sql.Open("sqlite3", dbPath)
return sql.Open("sqlite3", sqliteConnString(dbPath, false))
}

View File

@@ -78,6 +78,173 @@ func CheckGitHooks() DoctorCheck {
}
}
// CheckGitWorkingTree checks if the git working tree is clean.
// This helps prevent leaving work stranded (AGENTS.md: keep git state clean).
func CheckGitWorkingTree(path string) DoctorCheck {
cmd := exec.Command("git", "rev-parse", "--git-dir")
cmd.Dir = path
if err := cmd.Run(); err != nil {
return DoctorCheck{
Name: "Git Working Tree",
Status: StatusOK,
Message: "N/A (not a git repository)",
}
}
cmd = exec.Command("git", "status", "--porcelain")
cmd.Dir = path
out, err := cmd.Output()
if err != nil {
return DoctorCheck{
Name: "Git Working Tree",
Status: StatusWarning,
Message: "Unable to check git status",
Detail: err.Error(),
Fix: "Run 'git status' and commit/stash changes before syncing",
}
}
status := strings.TrimSpace(string(out))
if status == "" {
return DoctorCheck{
Name: "Git Working Tree",
Status: StatusOK,
Message: "Clean",
}
}
// Show a small sample of paths for quick debugging.
lines := strings.Split(status, "\n")
maxLines := 8
if len(lines) > maxLines {
lines = append(lines[:maxLines], "…")
}
return DoctorCheck{
Name: "Git Working Tree",
Status: StatusWarning,
Message: "Uncommitted changes present",
Detail: strings.Join(lines, "\n"),
Fix: "Commit or stash changes, then follow AGENTS.md: git pull --rebase && git push",
}
}
// CheckGitUpstream checks whether the current branch is up to date with its upstream.
// This catches common "forgot to pull/push" failure modes (AGENTS.md: pull --rebase, push).
func CheckGitUpstream(path string) DoctorCheck {
cmd := exec.Command("git", "rev-parse", "--git-dir")
cmd.Dir = path
if err := cmd.Run(); err != nil {
return DoctorCheck{
Name: "Git Upstream",
Status: StatusOK,
Message: "N/A (not a git repository)",
}
}
// Detect detached HEAD.
cmd = exec.Command("git", "symbolic-ref", "--short", "HEAD")
cmd.Dir = path
branchOut, err := cmd.Output()
if err != nil {
return DoctorCheck{
Name: "Git Upstream",
Status: StatusWarning,
Message: "Detached HEAD (no branch)",
Fix: "Check out a branch before syncing",
}
}
branch := strings.TrimSpace(string(branchOut))
cmd = exec.Command("git", "rev-parse", "--abbrev-ref", "--symbolic-full-name", "@{u}")
cmd.Dir = path
upOut, err := cmd.Output()
if err != nil {
return DoctorCheck{
Name: "Git Upstream",
Status: StatusWarning,
Message: fmt.Sprintf("No upstream configured for %s", branch),
Fix: fmt.Sprintf("Set upstream then push: git push -u origin %s", branch),
}
}
upstream := strings.TrimSpace(string(upOut))
ahead, aheadErr := gitRevListCount(path, "@{u}..HEAD")
behind, behindErr := gitRevListCount(path, "HEAD..@{u}")
if aheadErr != nil || behindErr != nil {
detailParts := []string{}
if aheadErr != nil {
detailParts = append(detailParts, "ahead: "+aheadErr.Error())
}
if behindErr != nil {
detailParts = append(detailParts, "behind: "+behindErr.Error())
}
return DoctorCheck{
Name: "Git Upstream",
Status: StatusWarning,
Message: fmt.Sprintf("Unable to compare with upstream (%s)", upstream),
Detail: strings.Join(detailParts, "; "),
Fix: "Run 'git fetch' then check: git status -sb",
}
}
if ahead == 0 && behind == 0 {
return DoctorCheck{
Name: "Git Upstream",
Status: StatusOK,
Message: fmt.Sprintf("Up to date (%s)", upstream),
Detail: fmt.Sprintf("Branch: %s", branch),
}
}
if ahead > 0 && behind == 0 {
return DoctorCheck{
Name: "Git Upstream",
Status: StatusWarning,
Message: fmt.Sprintf("Ahead of upstream by %d commit(s)", ahead),
Detail: fmt.Sprintf("Branch: %s, upstream: %s", branch, upstream),
Fix: "Run 'git push' (AGENTS.md: git pull --rebase && git push)",
}
}
if behind > 0 && ahead == 0 {
return DoctorCheck{
Name: "Git Upstream",
Status: StatusWarning,
Message: fmt.Sprintf("Behind upstream by %d commit(s)", behind),
Detail: fmt.Sprintf("Branch: %s, upstream: %s", branch, upstream),
Fix: "Run 'git pull --rebase' (then re-run bd sync / bd doctor)",
}
}
return DoctorCheck{
Name: "Git Upstream",
Status: StatusWarning,
Message: fmt.Sprintf("Diverged from upstream (ahead %d, behind %d)", ahead, behind),
Detail: fmt.Sprintf("Branch: %s, upstream: %s", branch, upstream),
Fix: "Run 'git pull --rebase' then 'git push'",
}
}
func gitRevListCount(path string, rangeExpr string) (int, error) {
cmd := exec.Command("git", "rev-list", "--count", rangeExpr) // #nosec G204 -- fixed args
cmd.Dir = path
out, err := cmd.Output()
if err != nil {
return 0, err
}
countStr := strings.TrimSpace(string(out))
if countStr == "" {
return 0, nil
}
var n int
if _, err := fmt.Sscanf(countStr, "%d", &n); err != nil {
return 0, err
}
return n, nil
}
// CheckSyncBranchHookCompatibility checks if pre-push hook is compatible with sync-branch mode.
// When sync-branch is configured, the pre-push hook must have the sync-branch bypass logic
// (added in version 0.29.0). Without it, users experience circular "bd sync" failures (issue #532).
@@ -662,5 +829,5 @@ func CheckOrphanedIssues(path string) DoctorCheck {
// openDBReadOnly opens a SQLite database in read-only mode
func openDBReadOnly(dbPath string) (*sql.DB, error) {
return sql.Open("sqlite3", "file:"+dbPath+"?mode=ro")
return sql.Open("sqlite3", sqliteConnString(dbPath, true))
}

View File

@@ -0,0 +1,176 @@
package doctor
import (
"os"
"os/exec"
"path/filepath"
"strings"
"testing"
)
func mkTmpDirInTmp(t *testing.T, prefix string) string {
t.Helper()
dir, err := os.MkdirTemp("/tmp", prefix)
if err != nil {
// Fallback for platforms without /tmp (e.g. Windows).
dir, err = os.MkdirTemp("", prefix)
if err != nil {
t.Fatalf("failed to create temp dir: %v", err)
}
}
t.Cleanup(func() { _ = os.RemoveAll(dir) })
return dir
}
func runGit(t *testing.T, dir string, args ...string) string {
t.Helper()
cmd := exec.Command("git", args...)
cmd.Dir = dir
out, err := cmd.CombinedOutput()
if err != nil {
t.Fatalf("git %v failed: %v\n%s", args, err, string(out))
}
return string(out)
}
func initRepo(t *testing.T, dir string, branch string) {
t.Helper()
_ = os.MkdirAll(filepath.Join(dir, ".beads"), 0755)
runGit(t, dir, "init", "-b", branch)
runGit(t, dir, "config", "user.email", "test@test.com")
runGit(t, dir, "config", "user.name", "Test User")
}
func commitFile(t *testing.T, dir, name, content, msg string) {
t.Helper()
path := filepath.Join(dir, name)
if err := os.MkdirAll(filepath.Dir(path), 0755); err != nil {
t.Fatalf("mkdir: %v", err)
}
if err := os.WriteFile(path, []byte(content), 0644); err != nil {
t.Fatalf("write file: %v", err)
}
runGit(t, dir, "add", name)
runGit(t, dir, "commit", "-m", msg)
}
func TestCheckGitWorkingTree(t *testing.T) {
t.Run("not a git repo", func(t *testing.T) {
dir := mkTmpDirInTmp(t, "bd-git-nt-*")
check := CheckGitWorkingTree(dir)
if check.Status != StatusOK {
t.Fatalf("status=%q want %q", check.Status, StatusOK)
}
if !strings.Contains(check.Message, "N/A") {
t.Fatalf("message=%q want N/A", check.Message)
}
})
t.Run("clean", func(t *testing.T) {
dir := mkTmpDirInTmp(t, "bd-git-clean-*")
initRepo(t, dir, "main")
commitFile(t, dir, "README.md", "# test\n", "initial")
check := CheckGitWorkingTree(dir)
if check.Status != StatusOK {
t.Fatalf("status=%q want %q (msg=%q)", check.Status, StatusOK, check.Message)
}
})
t.Run("dirty", func(t *testing.T) {
dir := mkTmpDirInTmp(t, "bd-git-dirty-*")
initRepo(t, dir, "main")
commitFile(t, dir, "README.md", "# test\n", "initial")
if err := os.WriteFile(filepath.Join(dir, "dirty.txt"), []byte("x"), 0644); err != nil {
t.Fatalf("write dirty file: %v", err)
}
check := CheckGitWorkingTree(dir)
if check.Status != StatusWarning {
t.Fatalf("status=%q want %q (msg=%q)", check.Status, StatusWarning, check.Message)
}
})
}
func TestCheckGitUpstream(t *testing.T) {
t.Run("no upstream", func(t *testing.T) {
dir := mkTmpDirInTmp(t, "bd-git-up-*")
initRepo(t, dir, "main")
commitFile(t, dir, "README.md", "# test\n", "initial")
check := CheckGitUpstream(dir)
if check.Status != StatusWarning {
t.Fatalf("status=%q want %q (msg=%q)", check.Status, StatusWarning, check.Message)
}
if !strings.Contains(check.Message, "No upstream") {
t.Fatalf("message=%q want to mention upstream", check.Message)
}
})
t.Run("up to date", func(t *testing.T) {
dir := mkTmpDirInTmp(t, "bd-git-up2-*")
remote := mkTmpDirInTmp(t, "bd-git-remote-*")
runGit(t, remote, "init", "--bare")
initRepo(t, dir, "main")
commitFile(t, dir, "README.md", "# test\n", "initial")
runGit(t, dir, "remote", "add", "origin", remote)
runGit(t, dir, "push", "-u", "origin", "main")
check := CheckGitUpstream(dir)
if check.Status != StatusOK {
t.Fatalf("status=%q want %q (msg=%q)", check.Status, StatusOK, check.Message)
}
})
t.Run("ahead of upstream", func(t *testing.T) {
dir := mkTmpDirInTmp(t, "bd-git-ahead-*")
remote := mkTmpDirInTmp(t, "bd-git-remote2-*")
runGit(t, remote, "init", "--bare")
initRepo(t, dir, "main")
commitFile(t, dir, "README.md", "# test\n", "initial")
runGit(t, dir, "remote", "add", "origin", remote)
runGit(t, dir, "push", "-u", "origin", "main")
commitFile(t, dir, "file2.txt", "x", "local commit")
check := CheckGitUpstream(dir)
if check.Status != StatusWarning {
t.Fatalf("status=%q want %q (msg=%q)", check.Status, StatusWarning, check.Message)
}
if !strings.Contains(check.Message, "Ahead") {
t.Fatalf("message=%q want to mention ahead", check.Message)
}
})
t.Run("behind upstream", func(t *testing.T) {
dir := mkTmpDirInTmp(t, "bd-git-behind-*")
remote := mkTmpDirInTmp(t, "bd-git-remote3-*")
runGit(t, remote, "init", "--bare")
initRepo(t, dir, "main")
commitFile(t, dir, "README.md", "# test\n", "initial")
runGit(t, dir, "remote", "add", "origin", remote)
runGit(t, dir, "push", "-u", "origin", "main")
// Advance remote via another clone.
clone := mkTmpDirInTmp(t, "bd-git-clone-*")
runGit(t, clone, "clone", remote, ".")
runGit(t, clone, "config", "user.email", "test@test.com")
runGit(t, clone, "config", "user.name", "Test User")
commitFile(t, clone, "remote.txt", "y", "remote commit")
runGit(t, clone, "push", "origin", "main")
// Update tracking refs.
runGit(t, dir, "fetch", "origin")
check := CheckGitUpstream(dir)
if check.Status != StatusWarning {
t.Fatalf("status=%q want %q (msg=%q)", check.Status, StatusWarning, check.Message)
}
if !strings.Contains(check.Message, "Behind") {
t.Fatalf("message=%q want to mention behind", check.Message)
}
})
}

View File

@@ -106,7 +106,7 @@ func CheckPermissions(path string) DoctorCheck {
dbPath := filepath.Join(beadsDir, beads.CanonicalDatabaseName)
if _, err := os.Stat(dbPath); err == nil {
// Try to open database
db, err := sql.Open("sqlite3", dbPath)
db, err := sql.Open("sqlite3", sqliteConnString(dbPath, true))
if err != nil {
return DoctorCheck{
Name: "Permissions",
@@ -118,7 +118,7 @@ func CheckPermissions(path string) DoctorCheck {
_ = db.Close() // Intentionally ignore close error
// Try a write test
db, err = sql.Open("sqlite", dbPath)
db, err = sql.Open("sqlite", sqliteConnString(dbPath, true))
if err == nil {
_, err = db.Exec("SELECT 1")
_ = db.Close() // Intentionally ignore close error

View File

@@ -51,7 +51,7 @@ func CheckIDFormat(path string) DoctorCheck {
}
// Open database
db, err := sql.Open("sqlite3", "file:"+dbPath+"?mode=ro")
db, err := sql.Open("sqlite3", sqliteConnString(dbPath, true))
if err != nil {
return DoctorCheck{
Name: "Issue IDs",
@@ -121,7 +121,7 @@ func CheckDependencyCycles(path string) DoctorCheck {
}
// Open database to check for cycles
db, err := sql.Open("sqlite3", dbPath)
db, err := sql.Open("sqlite3", sqliteConnString(dbPath, true))
if err != nil {
return DoctorCheck{
Name: "Dependency Cycles",
@@ -216,7 +216,7 @@ func CheckTombstones(path string) DoctorCheck {
}
}
db, err := sql.Open("sqlite3", dbPath)
db, err := sql.Open("sqlite3", sqliteConnString(dbPath, true))
if err != nil {
return DoctorCheck{
Name: "Tombstones",
@@ -420,7 +420,7 @@ func CheckRepoFingerprint(path string) DoctorCheck {
}
// Open database
db, err := sql.Open("sqlite3", "file:"+dbPath+"?mode=ro")
db, err := sql.Open("sqlite3", sqliteConnString(dbPath, true))
if err != nil {
return DoctorCheck{
Name: "Repo Fingerprint",

View File

@@ -0,0 +1,123 @@
package doctor
import (
"bufio"
"encoding/json"
"fmt"
"os"
"path/filepath"
"strings"
"github.com/steveyegge/beads/internal/beads"
"github.com/steveyegge/beads/internal/configfile"
"github.com/steveyegge/beads/internal/utils"
)
func CheckJSONLIntegrity(path string) DoctorCheck {
beadsDir := filepath.Join(path, ".beads")
// Resolve JSONL path.
jsonlPath := ""
if cfg, err := configfile.Load(beadsDir); err == nil && cfg != nil {
if cfg.JSONLExport != "" && !isSystemJSONLFilename(cfg.JSONLExport) {
p := cfg.JSONLPath(beadsDir)
if _, err := os.Stat(p); err == nil {
jsonlPath = p
}
}
}
if jsonlPath == "" {
// Fall back to a best-effort discovery within .beads/.
p := utils.FindJSONLInDir(beadsDir)
if _, err := os.Stat(p); err == nil {
jsonlPath = p
}
}
if jsonlPath == "" {
return DoctorCheck{Name: "JSONL Integrity", Status: StatusOK, Message: "N/A (no JSONL file)"}
}
// Best-effort scan for malformed lines.
f, err := os.Open(jsonlPath) // #nosec G304 -- jsonlPath is within the workspace
if err != nil {
return DoctorCheck{
Name: "JSONL Integrity",
Status: StatusWarning,
Message: "Unable to read JSONL file",
Detail: err.Error(),
}
}
defer f.Close()
var malformed int
var examples []string
scanner := bufio.NewScanner(f)
lineNo := 0
for scanner.Scan() {
lineNo++
line := strings.TrimSpace(scanner.Text())
if line == "" {
continue
}
var v struct {
ID string `json:"id"`
}
if err := json.Unmarshal([]byte(line), &v); err != nil || v.ID == "" {
malformed++
if len(examples) < 5 {
if err != nil {
examples = append(examples, fmt.Sprintf("line %d: %v", lineNo, err))
} else {
examples = append(examples, fmt.Sprintf("line %d: missing id", lineNo))
}
}
}
}
if err := scanner.Err(); err != nil {
return DoctorCheck{
Name: "JSONL Integrity",
Status: StatusWarning,
Message: "Unable to scan JSONL file",
Detail: err.Error(),
}
}
if malformed == 0 {
return DoctorCheck{
Name: "JSONL Integrity",
Status: StatusOK,
Message: fmt.Sprintf("%s looks valid", filepath.Base(jsonlPath)),
}
}
// If we have a database, we can auto-repair by re-exporting from DB.
dbPath := filepath.Join(beadsDir, beads.CanonicalDatabaseName)
if cfg, err := configfile.Load(beadsDir); err == nil && cfg != nil && cfg.Database != "" {
dbPath = cfg.DatabasePath(beadsDir)
}
if _, err := os.Stat(dbPath); os.IsNotExist(err) {
return DoctorCheck{
Name: "JSONL Integrity",
Status: StatusError,
Message: fmt.Sprintf("%s has %d malformed line(s)", filepath.Base(jsonlPath), malformed),
Detail: strings.Join(examples, "\n"),
Fix: "Restore the JSONL file from git or from a backup (no database available for auto-repair).",
}
}
return DoctorCheck{
Name: "JSONL Integrity",
Status: StatusError,
Message: fmt.Sprintf("%s has %d malformed line(s)", filepath.Base(jsonlPath), malformed),
Detail: strings.Join(examples, "\n"),
Fix: "Run 'bd doctor --fix' to back up the JSONL and regenerate it from the database.",
}
}
func isSystemJSONLFilename(name string) bool {
switch name {
case "deletions.jsonl", "interactions.jsonl", "molecules.jsonl":
return true
default:
return false
}
}

View File

@@ -0,0 +1,43 @@
package doctor
import (
"os"
"path/filepath"
"testing"
)
func TestCheckJSONLIntegrity_MalformedLine(t *testing.T) {
ws := t.TempDir()
beadsDir := filepath.Join(ws, ".beads")
if err := os.MkdirAll(beadsDir, 0755); err != nil {
t.Fatal(err)
}
jsonlPath := filepath.Join(beadsDir, "issues.jsonl")
if err := os.WriteFile(jsonlPath, []byte("{\"id\":\"t-1\"}\n{not json}\n"), 0644); err != nil {
t.Fatal(err)
}
// Ensure DB exists so check suggests auto-repair.
if err := os.WriteFile(filepath.Join(beadsDir, "beads.db"), []byte("x"), 0644); err != nil {
t.Fatal(err)
}
check := CheckJSONLIntegrity(ws)
if check.Status != StatusError {
t.Fatalf("expected StatusError, got %v (%s)", check.Status, check.Message)
}
if check.Fix == "" {
t.Fatalf("expected Fix guidance")
}
}
func TestCheckJSONLIntegrity_NoJSONL(t *testing.T) {
ws := t.TempDir()
beadsDir := filepath.Join(ws, ".beads")
if err := os.MkdirAll(beadsDir, 0755); err != nil {
t.Fatal(err)
}
check := CheckJSONLIntegrity(ws)
if check.Status != StatusOK {
t.Fatalf("expected StatusOK, got %v (%s)", check.Status, check.Message)
}
}

View File

@@ -53,7 +53,7 @@ func CheckLegacyBeadsSlashCommands(repoPath string) DoctorCheck {
Name: "Legacy Commands",
Status: "warning",
Message: fmt.Sprintf("Old beads integration detected in %s", strings.Join(filesWithLegacyCommands, ", ")),
Detail: "Found: /beads:* slash command references (deprecated)\n" +
Detail: "Found: /beads:* slash command references (deprecated)\n" +
" These commands are token-inefficient (~10.5k tokens per session)",
Fix: "Migrate to bd prime hooks for better token efficiency:\n" +
"\n" +
@@ -104,7 +104,7 @@ func CheckAgentDocumentation(repoPath string) DoctorCheck {
Name: "Agent Documentation",
Status: "warning",
Message: "No agent documentation found",
Detail: "Missing: AGENTS.md or CLAUDE.md\n" +
Detail: "Missing: AGENTS.md or CLAUDE.md\n" +
" Documenting workflow helps AI agents work more effectively",
Fix: "Add agent documentation:\n" +
" • Run 'bd onboard' to create AGENTS.md with workflow guidance\n" +
@@ -187,7 +187,7 @@ func CheckLegacyJSONLFilename(repoPath string) DoctorCheck {
Name: "JSONL Files",
Status: "warning",
Message: fmt.Sprintf("Multiple JSONL files found: %s", strings.Join(realJSONLFiles, ", ")),
Detail: "Having multiple JSONL files can cause sync and merge conflicts.\n" +
Detail: "Having multiple JSONL files can cause sync and merge conflicts.\n" +
" Only one JSONL file should be used per repository.",
Fix: "Determine which file is current and remove the others:\n" +
" 1. Check 'bd stats' to see which file is being used\n" +
@@ -235,7 +235,7 @@ func CheckLegacyJSONLConfig(repoPath string) DoctorCheck {
Name: "JSONL Config",
Status: "warning",
Message: "Using legacy beads.jsonl filename",
Detail: "The canonical filename is now issues.jsonl (bd-6xd).\n" +
Detail: "The canonical filename is now issues.jsonl (bd-6xd).\n" +
" Legacy beads.jsonl is still supported but should be migrated.",
Fix: "Run 'bd doctor --fix' to auto-migrate, or manually:\n" +
" 1. git mv .beads/beads.jsonl .beads/issues.jsonl\n" +
@@ -251,7 +251,7 @@ func CheckLegacyJSONLConfig(repoPath string) DoctorCheck {
Status: "warning",
Message: "Config references beads.jsonl but issues.jsonl exists",
Detail: "metadata.json says beads.jsonl but the actual file is issues.jsonl",
Fix: "Run 'bd doctor --fix' to update the configuration",
Fix: "Run 'bd doctor --fix' to update the configuration",
}
}
}
@@ -303,6 +303,16 @@ func CheckDatabaseConfig(repoPath string) DoctorCheck {
// Check if configured JSONL exists
if cfg.JSONLExport != "" {
if cfg.JSONLExport == "deletions.jsonl" || cfg.JSONLExport == "interactions.jsonl" || cfg.JSONLExport == "molecules.jsonl" {
return DoctorCheck{
Name: "Database Config",
Status: "error",
Message: fmt.Sprintf("Invalid jsonl_export %q (system file)", cfg.JSONLExport),
Detail: "metadata.json jsonl_export must reference the git-tracked issues export (typically issues.jsonl), not a system log file.",
Fix: "Run 'bd doctor --fix' to reset metadata.json jsonl_export to issues.jsonl, then commit the change.",
}
}
jsonlPath := cfg.JSONLPath(beadsDir)
if _, err := os.Stat(jsonlPath); os.IsNotExist(err) {
// Check if other .jsonl files exist
@@ -315,7 +325,15 @@ func CheckDatabaseConfig(repoPath string) DoctorCheck {
lowerName := strings.ToLower(name)
if !strings.Contains(lowerName, "backup") &&
!strings.Contains(lowerName, ".orig") &&
!strings.Contains(lowerName, ".bak") {
!strings.Contains(lowerName, ".bak") &&
!strings.Contains(lowerName, "~") &&
!strings.HasPrefix(lowerName, "backup_") &&
name != "deletions.jsonl" &&
name != "interactions.jsonl" &&
name != "molecules.jsonl" &&
!strings.Contains(lowerName, ".base.jsonl") &&
!strings.Contains(lowerName, ".left.jsonl") &&
!strings.Contains(lowerName, ".right.jsonl") {
otherJSONLs = append(otherJSONLs, name)
}
}
@@ -421,7 +439,7 @@ func CheckFreshClone(repoPath string) DoctorCheck {
Name: "Fresh Clone",
Status: "warning",
Message: fmt.Sprintf("Fresh clone detected (%d issues in %s, no database)", issueCount, jsonlName),
Detail: "This appears to be a freshly cloned repository.\n" +
Detail: "This appears to be a freshly cloned repository.\n" +
" The JSONL file contains issues but no local database exists.\n" +
" Run 'bd init' to create the database and import existing issues.",
Fix: fmt.Sprintf("Run '%s' to initialize the database and import issues", fixCmd),

View File

@@ -410,6 +410,49 @@ func TestCheckLegacyJSONLConfig(t *testing.T) {
}
}
func TestCheckDatabaseConfig_IgnoresSystemJSONLs(t *testing.T) {
tmpDir := t.TempDir()
beadsDir := filepath.Join(tmpDir, ".beads")
if err := os.Mkdir(beadsDir, 0750); err != nil {
t.Fatal(err)
}
// Configure issues.jsonl, but only create interactions.jsonl.
metadataPath := filepath.Join(beadsDir, "metadata.json")
if err := os.WriteFile(metadataPath, []byte(`{"database":"beads.db","jsonl_export":"issues.jsonl"}`), 0644); err != nil {
t.Fatal(err)
}
if err := os.WriteFile(filepath.Join(beadsDir, "interactions.jsonl"), []byte(`{"id":"x"}`), 0644); err != nil {
t.Fatal(err)
}
check := CheckDatabaseConfig(tmpDir)
if check.Status != "ok" {
t.Fatalf("expected ok, got %s: %s\n%s", check.Status, check.Message, check.Detail)
}
}
func TestCheckDatabaseConfig_SystemJSONLExportIsError(t *testing.T) {
tmpDir := t.TempDir()
beadsDir := filepath.Join(tmpDir, ".beads")
if err := os.Mkdir(beadsDir, 0750); err != nil {
t.Fatal(err)
}
metadataPath := filepath.Join(beadsDir, "metadata.json")
if err := os.WriteFile(metadataPath, []byte(`{"database":"beads.db","jsonl_export":"interactions.jsonl"}`), 0644); err != nil {
t.Fatal(err)
}
if err := os.WriteFile(filepath.Join(beadsDir, "interactions.jsonl"), []byte(`{"id":"x"}`), 0644); err != nil {
t.Fatal(err)
}
check := CheckDatabaseConfig(tmpDir)
if check.Status != "error" {
t.Fatalf("expected error, got %s: %s", check.Status, check.Message)
}
}
func TestCheckFreshClone(t *testing.T) {
tests := []struct {
name string

View File

@@ -0,0 +1,54 @@
package doctor
import (
"fmt"
"os"
"strings"
"time"
)
func sqliteConnString(path string, readOnly bool) string {
path = strings.TrimSpace(path)
if path == "" {
return ""
}
// Best-effort: honor the same env var viper uses (BD_LOCK_TIMEOUT).
busy := 30 * time.Second
if v := strings.TrimSpace(os.Getenv("BD_LOCK_TIMEOUT")); v != "" {
if d, err := time.ParseDuration(v); err == nil {
busy = d
}
}
busyMs := int64(busy / time.Millisecond)
// If it's already a URI, append pragmas if absent.
if strings.HasPrefix(path, "file:") {
conn := path
sep := "?"
if strings.Contains(conn, "?") {
sep = "&"
}
if readOnly && !strings.Contains(conn, "mode=") {
conn += sep + "mode=ro"
sep = "&"
}
if !strings.Contains(conn, "_pragma=busy_timeout") {
conn += fmt.Sprintf("%s_pragma=busy_timeout(%d)", sep, busyMs)
sep = "&"
}
if !strings.Contains(conn, "_pragma=foreign_keys") {
conn += sep + "_pragma=foreign_keys(ON)"
sep = "&"
}
if !strings.Contains(conn, "_time_format=") {
conn += sep + "_time_format=sqlite"
}
return conn
}
if readOnly {
return fmt.Sprintf("file:%s?mode=ro&_pragma=foreign_keys(ON)&_pragma=busy_timeout(%d)&_time_format=sqlite", path, busyMs)
}
return fmt.Sprintf("file:%s?_pragma=foreign_keys(ON)&_pragma=busy_timeout(%d)&_time_format=sqlite", path, busyMs)
}

View File

@@ -0,0 +1,378 @@
//go:build chaos
package main
import (
"bytes"
"context"
"database/sql"
"io"
"os"
"os/exec"
"path/filepath"
"strings"
"testing"
"time"
_ "github.com/ncruces/go-sqlite3/driver"
)
func TestDoctorRepair_CorruptDatabase_NotADatabase_RebuildFromJSONL(t *testing.T) {
bdExe := buildBDForTest(t)
ws := mkTmpDirInTmp(t, "bd-doctor-chaos-*")
dbPath := filepath.Join(ws, ".beads", "beads.db")
jsonlPath := filepath.Join(ws, ".beads", "issues.jsonl")
if _, err := runBDSideDB(t, bdExe, ws, dbPath, "init", "--prefix", "chaos", "--quiet"); err != nil {
t.Fatalf("bd init failed: %v", err)
}
if _, err := runBDSideDB(t, bdExe, ws, dbPath, "create", "Chaos issue", "-p", "1"); err != nil {
t.Fatalf("bd create failed: %v", err)
}
if _, err := runBDSideDB(t, bdExe, ws, dbPath, "export", "-o", jsonlPath, "--force"); err != nil {
t.Fatalf("bd export failed: %v", err)
}
// Make the DB unreadable.
if err := os.WriteFile(dbPath, []byte("not a database"), 0644); err != nil {
t.Fatalf("corrupt db: %v", err)
}
if _, err := runBDSideDB(t, bdExe, ws, dbPath, "doctor", "--fix", "--yes"); err != nil {
t.Fatalf("bd doctor --fix failed: %v", err)
}
if out, err := runBDSideDB(t, bdExe, ws, dbPath, "doctor"); err != nil {
t.Fatalf("bd doctor after fix failed: %v\n%s", err, out)
}
}
func TestDoctorRepair_CorruptDatabase_NoJSONL_FixFails(t *testing.T) {
bdExe := buildBDForTest(t)
ws := mkTmpDirInTmp(t, "bd-doctor-chaos-nojsonl-*")
dbPath := filepath.Join(ws, ".beads", "beads.db")
if _, err := runBDSideDB(t, bdExe, ws, dbPath, "init", "--prefix", "chaos", "--quiet"); err != nil {
t.Fatalf("bd init failed: %v", err)
}
if _, err := runBDSideDB(t, bdExe, ws, dbPath, "create", "Chaos issue", "-p", "1"); err != nil {
t.Fatalf("bd create failed: %v", err)
}
// Some workflows keep JSONL in sync automatically; force it to be missing.
_ = os.Remove(filepath.Join(ws, ".beads", "issues.jsonl"))
_ = os.Remove(filepath.Join(ws, ".beads", "beads.jsonl"))
// Corrupt without providing JSONL source-of-truth.
if err := os.Truncate(dbPath, 64); err != nil {
t.Fatalf("truncate db: %v", err)
}
out, err := runBDSideDB(t, bdExe, ws, dbPath, "doctor", "--fix", "--yes")
if err == nil {
t.Fatalf("expected bd doctor --fix to fail without JSONL")
}
if !strings.Contains(out, "cannot auto-recover") {
t.Fatalf("expected auto-recover error, got:\n%s", out)
}
// Ensure we don't mis-configure jsonl_export to a system file during failure.
metadata, readErr := os.ReadFile(filepath.Join(ws, ".beads", "metadata.json"))
if readErr == nil {
if strings.Contains(string(metadata), "interactions.jsonl") {
t.Fatalf("unexpected metadata.json jsonl_export set to interactions.jsonl:\n%s", string(metadata))
}
}
}
func TestDoctorRepair_CorruptDatabase_BacksUpSidecars(t *testing.T) {
bdExe := buildBDForTest(t)
ws := mkTmpDirInTmp(t, "bd-doctor-chaos-sidecars-*")
dbPath := filepath.Join(ws, ".beads", "beads.db")
jsonlPath := filepath.Join(ws, ".beads", "issues.jsonl")
if _, err := runBDSideDB(t, bdExe, ws, dbPath, "init", "--prefix", "chaos", "--quiet"); err != nil {
t.Fatalf("bd init failed: %v", err)
}
if _, err := runBDSideDB(t, bdExe, ws, dbPath, "create", "Chaos issue", "-p", "1"); err != nil {
t.Fatalf("bd create failed: %v", err)
}
if _, err := runBDSideDB(t, bdExe, ws, dbPath, "export", "-o", jsonlPath, "--force"); err != nil {
t.Fatalf("bd export failed: %v", err)
}
// Ensure sidecars exist so we can verify they get moved with the backup.
for _, suffix := range []string{"-wal", "-shm", "-journal"} {
if err := os.WriteFile(dbPath+suffix, []byte("x"), 0644); err != nil {
t.Fatalf("write sidecar %s: %v", suffix, err)
}
}
if err := os.Truncate(dbPath, 64); err != nil {
t.Fatalf("truncate db: %v", err)
}
if _, err := runBDSideDB(t, bdExe, ws, dbPath, "doctor", "--fix", "--yes"); err != nil {
t.Fatalf("bd doctor --fix failed: %v", err)
}
// Verify a backup exists, and at least one sidecar got moved.
entries, err := os.ReadDir(filepath.Join(ws, ".beads"))
if err != nil {
t.Fatalf("readdir: %v", err)
}
var backup string
for _, e := range entries {
if strings.Contains(e.Name(), ".corrupt.backup.db") {
backup = filepath.Join(ws, ".beads", e.Name())
break
}
}
if backup == "" {
t.Fatalf("expected backup db in .beads, found none")
}
wal := backup + "-wal"
if _, err := os.Stat(wal); err != nil {
// At minimum, the backup DB itself should exist; sidecar backup is best-effort.
if _, err2 := os.Stat(backup); err2 != nil {
t.Fatalf("backup db missing: %v", err2)
}
}
}
func TestDoctorRepair_CorruptDatabase_WithRunningDaemon_FixSucceeds(t *testing.T) {
bdExe := buildBDForTest(t)
ws := mkTmpDirInTmp(t, "bd-doctor-chaos-daemon-*")
dbPath := filepath.Join(ws, ".beads", "beads.db")
jsonlPath := filepath.Join(ws, ".beads", "issues.jsonl")
if _, err := runBDSideDB(t, bdExe, ws, dbPath, "init", "--prefix", "chaos", "--quiet"); err != nil {
t.Fatalf("bd init failed: %v", err)
}
if _, err := runBDSideDB(t, bdExe, ws, dbPath, "create", "Chaos issue", "-p", "1"); err != nil {
t.Fatalf("bd create failed: %v", err)
}
if _, err := runBDSideDB(t, bdExe, ws, dbPath, "export", "-o", jsonlPath, "--force"); err != nil {
t.Fatalf("bd export failed: %v", err)
}
cmd := startDaemonForChaosTest(t, bdExe, ws, dbPath)
defer func() {
if cmd.Process != nil && (cmd.ProcessState == nil || !cmd.ProcessState.Exited()) {
_ = cmd.Process.Kill()
_, _ = cmd.Process.Wait()
}
}()
// Corrupt the DB.
if err := os.WriteFile(dbPath, []byte("not a database"), 0644); err != nil {
t.Fatalf("corrupt db: %v", err)
}
if _, err := runBDSideDB(t, bdExe, ws, dbPath, "doctor", "--fix", "--yes"); err != nil {
t.Fatalf("bd doctor --fix failed: %v", err)
}
// Ensure we can cleanly stop the daemon afterwards (repair shouldn't wedge it).
if cmd.Process != nil {
_ = cmd.Process.Kill()
done := make(chan error, 1)
go func() { done <- cmd.Wait() }()
select {
case <-time.After(3 * time.Second):
t.Fatalf("expected daemon to exit when killed")
case <-done:
// ok
}
}
}
func TestDoctorRepair_JSONLIntegrity_MalformedLine_ReexportFromDB(t *testing.T) {
bdExe := buildBDForTest(t)
ws := mkTmpDirInTmp(t, "bd-doctor-chaos-jsonl-*")
dbPath := filepath.Join(ws, ".beads", "beads.db")
jsonlPath := filepath.Join(ws, ".beads", "issues.jsonl")
if _, err := runBDSideDB(t, bdExe, ws, dbPath, "init", "--prefix", "chaos", "--quiet"); err != nil {
t.Fatalf("bd init failed: %v", err)
}
if _, err := runBDSideDB(t, bdExe, ws, dbPath, "create", "Chaos issue", "-p", "1"); err != nil {
t.Fatalf("bd create failed: %v", err)
}
if _, err := runBDSideDB(t, bdExe, ws, dbPath, "export", "-o", jsonlPath, "--force"); err != nil {
t.Fatalf("bd export failed: %v", err)
}
// Corrupt JSONL (leave DB intact).
f, err := os.OpenFile(jsonlPath, os.O_APPEND|os.O_WRONLY, 0644)
if err != nil {
t.Fatalf("open jsonl: %v", err)
}
if _, err := f.WriteString("{not json}\n"); err != nil {
_ = f.Close()
t.Fatalf("append corrupt jsonl: %v", err)
}
_ = f.Close()
if _, err := runBDSideDB(t, bdExe, ws, dbPath, "doctor", "--fix", "--yes"); err != nil {
t.Fatalf("bd doctor --fix failed: %v", err)
}
data, err := os.ReadFile(jsonlPath)
if err != nil {
t.Fatalf("read jsonl: %v", err)
}
if strings.Contains(string(data), "{not json}") {
t.Fatalf("expected JSONL to be regenerated without corrupt line")
}
}
func TestDoctorRepair_DatabaseIntegrity_DBWriteLocked_ImportFailsFast(t *testing.T) {
bdExe := buildBDForTest(t)
ws := mkTmpDirInTmp(t, "bd-doctor-chaos-db-locked-*")
dbPath := filepath.Join(ws, ".beads", "beads.db")
jsonlPath := filepath.Join(ws, ".beads", "issues.jsonl")
if _, err := runBDSideDB(t, bdExe, ws, dbPath, "init", "--prefix", "chaos", "--quiet"); err != nil {
t.Fatalf("bd init failed: %v", err)
}
if _, err := runBDSideDB(t, bdExe, ws, dbPath, "create", "Chaos issue", "-p", "1"); err != nil {
t.Fatalf("bd create failed: %v", err)
}
if _, err := runBDSideDB(t, bdExe, ws, dbPath, "export", "-o", jsonlPath, "--force"); err != nil {
t.Fatalf("bd export failed: %v", err)
}
// Lock the DB for writes in-process.
db, err := sql.Open("sqlite3", dbPath)
if err != nil {
t.Fatalf("open db: %v", err)
}
defer db.Close()
tx, err := db.Begin()
if err != nil {
t.Fatalf("begin tx: %v", err)
}
if _, err := tx.Exec("INSERT INTO issues (id, title, status) VALUES ('lock-test', 'Lock Test', 'open')"); err != nil {
_ = tx.Rollback()
t.Fatalf("insert lock row: %v", err)
}
defer func() { _ = tx.Rollback() }()
ctx, cancel := context.WithTimeout(context.Background(), 5*time.Second)
defer cancel()
out, err := runBDWithEnv(ctx, bdExe, ws, dbPath, map[string]string{
"BD_LOCK_TIMEOUT": "200ms",
}, "import", "-i", jsonlPath, "--force", "--skip-existing", "--no-git-history")
if err == nil {
t.Fatalf("expected bd import to fail under DB write lock")
}
if ctx.Err() == context.DeadlineExceeded {
t.Fatalf("import exceeded timeout (likely hung); output:\n%s", out)
}
low := strings.ToLower(out)
if !strings.Contains(low, "locked") && !strings.Contains(low, "busy") && !strings.Contains(low, "timeout") {
t.Fatalf("expected lock/busy/timeout error, got:\n%s", out)
}
}
func TestDoctorRepair_CorruptDatabase_ReadOnlyBeadsDir_PermissionsFixMakesWritable(t *testing.T) {
bdExe := buildBDForTest(t)
ws := mkTmpDirInTmp(t, "bd-doctor-chaos-readonly-*")
beadsDir := filepath.Join(ws, ".beads")
dbPath := filepath.Join(beadsDir, "beads.db")
jsonlPath := filepath.Join(beadsDir, "issues.jsonl")
if _, err := runBDSideDB(t, bdExe, ws, dbPath, "init", "--prefix", "chaos", "--quiet"); err != nil {
t.Fatalf("bd init failed: %v", err)
}
if _, err := runBDSideDB(t, bdExe, ws, dbPath, "create", "Chaos issue", "-p", "1"); err != nil {
t.Fatalf("bd create failed: %v", err)
}
if _, err := runBDSideDB(t, bdExe, ws, dbPath, "export", "-o", jsonlPath, "--force"); err != nil {
t.Fatalf("bd export failed: %v", err)
}
// Corrupt the DB.
if err := os.Truncate(dbPath, 64); err != nil {
t.Fatalf("truncate db: %v", err)
}
// Make .beads read-only; the Permissions fix should make it writable again.
if err := os.Chmod(beadsDir, 0555); err != nil {
t.Fatalf("chmod beads dir: %v", err)
}
t.Cleanup(func() { _ = os.Chmod(beadsDir, 0755) })
if out, err := runBDSideDB(t, bdExe, ws, dbPath, "doctor", "--fix", "--yes"); err != nil {
t.Fatalf("expected bd doctor --fix to succeed (permissions auto-fix), got: %v\n%s", err, out)
}
info, err := os.Stat(beadsDir)
if err != nil {
t.Fatalf("stat beads dir: %v", err)
}
if info.Mode().Perm()&0200 == 0 {
t.Fatalf("expected .beads to be writable after permissions fix, mode=%v", info.Mode().Perm())
}
}
func startDaemonForChaosTest(t *testing.T, bdExe, ws, dbPath string) *exec.Cmd {
t.Helper()
cmd := exec.Command(bdExe, "--db", dbPath, "daemon", "--start", "--foreground", "--local", "--interval", "10m")
cmd.Dir = ws
var stdout, stderr bytes.Buffer
cmd.Stdout = &stdout
cmd.Stderr = &stderr
// Inherit environment, but explicitly ensure daemon mode is allowed.
env := make([]string, 0, len(os.Environ())+1)
for _, e := range os.Environ() {
if strings.HasPrefix(e, "BEADS_NO_DAEMON=") {
continue
}
env = append(env, e)
}
cmd.Env = env
if err := cmd.Start(); err != nil {
t.Fatalf("start daemon: %v", err)
}
// Wait for socket to appear.
sock := filepath.Join(ws, ".beads", "bd.sock")
deadline := time.Now().Add(8 * time.Second)
for time.Now().Before(deadline) {
if _, err := os.Stat(sock); err == nil {
// Put the process back into the caller's control.
cmd.Stdout = io.Discard
cmd.Stderr = io.Discard
return cmd
}
time.Sleep(50 * time.Millisecond)
}
_ = cmd.Process.Kill()
_ = cmd.Wait()
t.Fatalf("daemon failed to start (no socket: %s)\nstdout:\n%s\nstderr:\n%s", sock, stdout.String(), stderr.String())
return nil
}
func runBDWithEnv(ctx context.Context, exe, dir, dbPath string, env map[string]string, args ...string) (string, error) {
fullArgs := []string{"--db", dbPath}
if len(args) > 0 && args[0] != "init" {
fullArgs = append(fullArgs, "--no-daemon")
}
fullArgs = append(fullArgs, args...)
cmd := exec.CommandContext(ctx, exe, fullArgs...)
cmd.Dir = dir
cmd.Env = append(os.Environ(),
"BEADS_NO_DAEMON=1",
"BEADS_DIR="+filepath.Join(dir, ".beads"),
)
for k, v := range env {
cmd.Env = append(cmd.Env, k+"="+v)
}
out, err := cmd.CombinedOutput()
return string(out), err
}

View File

@@ -0,0 +1,151 @@
package main
import (
"encoding/json"
"os"
"os/exec"
"path/filepath"
"runtime"
"strings"
"testing"
)
func buildBDForTest(t *testing.T) string {
t.Helper()
exeName := "bd"
if runtime.GOOS == "windows" {
exeName = "bd.exe"
}
binDir := t.TempDir()
exe := filepath.Join(binDir, exeName)
cmd := exec.Command("go", "build", "-o", exe, ".")
out, err := cmd.CombinedOutput()
if err != nil {
t.Fatalf("go build failed: %v\n%s", err, string(out))
}
return exe
}
func mkTmpDirInTmp(t *testing.T, prefix string) string {
t.Helper()
dir, err := os.MkdirTemp("/tmp", prefix)
if err != nil {
// Fallback for platforms without /tmp (e.g. Windows).
dir, err = os.MkdirTemp("", prefix)
if err != nil {
t.Fatalf("failed to create temp dir: %v", err)
}
}
t.Cleanup(func() { _ = os.RemoveAll(dir) })
return dir
}
func runBDSideDB(t *testing.T, exe, dir, dbPath string, args ...string) (string, error) {
t.Helper()
fullArgs := []string{"--db", dbPath}
if len(args) > 0 && args[0] != "init" {
fullArgs = append(fullArgs, "--no-daemon")
}
fullArgs = append(fullArgs, args...)
cmd := exec.Command(exe, fullArgs...)
cmd.Dir = dir
cmd.Env = append(os.Environ(),
"BEADS_NO_DAEMON=1",
"BEADS_DIR="+filepath.Join(dir, ".beads"),
)
out, err := cmd.CombinedOutput()
return string(out), err
}
func TestDoctorRepair_CorruptDatabase_RebuildFromJSONL(t *testing.T) {
if testing.Short() {
t.Skip("skipping slow repair test in short mode")
}
bdExe := buildBDForTest(t)
ws := mkTmpDirInTmp(t, "bd-doctor-repair-*")
dbPath := filepath.Join(ws, ".beads", "beads.db")
jsonlPath := filepath.Join(ws, ".beads", "issues.jsonl")
if _, err := runBDSideDB(t, bdExe, ws, dbPath, "init", "--prefix", "chaos", "--quiet"); err != nil {
t.Fatalf("bd init failed: %v", err)
}
if _, err := runBDSideDB(t, bdExe, ws, dbPath, "create", "Chaos issue", "-p", "1"); err != nil {
t.Fatalf("bd create failed: %v", err)
}
if _, err := runBDSideDB(t, bdExe, ws, dbPath, "export", "-o", jsonlPath, "--force"); err != nil {
t.Fatalf("bd export failed: %v", err)
}
// Corrupt the SQLite file (truncate) and verify doctor reports an integrity error.
if err := os.Truncate(dbPath, 128); err != nil {
t.Fatalf("truncate db: %v", err)
}
out, err := runBDSideDB(t, bdExe, ws, dbPath, "doctor", "--json")
if err == nil {
t.Fatalf("expected bd doctor to fail on corrupt db")
}
jsonStart := strings.Index(out, "{")
if jsonStart < 0 {
t.Fatalf("doctor output missing JSON: %s", out)
}
var before doctorResult
if err := json.Unmarshal([]byte(out[jsonStart:]), &before); err != nil {
t.Fatalf("unmarshal doctor json: %v\n%s", err, out)
}
var foundIntegrity bool
for _, c := range before.Checks {
if c.Name == "Database Integrity" {
foundIntegrity = true
if c.Status != statusError {
t.Fatalf("Database Integrity status=%q want %q", c.Status, statusError)
}
}
}
if !foundIntegrity {
t.Fatalf("Database Integrity check not found")
}
// Attempt auto-repair.
out, err = runBDSideDB(t, bdExe, ws, dbPath, "doctor", "--fix", "--yes")
if err != nil {
t.Fatalf("bd doctor --fix failed: %v\n%s", err, out)
}
// Doctor should now pass.
out, err = runBDSideDB(t, bdExe, ws, dbPath, "doctor", "--json")
if err != nil {
t.Fatalf("bd doctor after fix failed: %v\n%s", err, out)
}
jsonStart = strings.Index(out, "{")
if jsonStart < 0 {
t.Fatalf("doctor output missing JSON: %s", out)
}
var after doctorResult
if err := json.Unmarshal([]byte(out[jsonStart:]), &after); err != nil {
t.Fatalf("unmarshal doctor json: %v\n%s", err, out)
}
if !after.OverallOK {
t.Fatalf("expected overall_ok=true after repair")
}
// Data should still be present.
out, err = runBDSideDB(t, bdExe, ws, dbPath, "list", "--json")
if err != nil {
t.Fatalf("bd list failed after repair: %v\n%s", err, out)
}
jsonStart = strings.Index(out, "[")
if jsonStart < 0 {
t.Fatalf("list output missing JSON array: %s", out)
}
var issues []map[string]any
if err := json.Unmarshal([]byte(out[jsonStart:]), &issues); err != nil {
t.Fatalf("unmarshal list json: %v\n%s", err, out)
}
if len(issues) != 1 {
t.Fatalf("expected 1 issue after repair, got %d", len(issues))
}
}

View File

@@ -156,7 +156,7 @@ Examples:
_ = daemonClient.Close()
daemonClient = nil
}
// Note: We used to check database file timestamps here, but WAL files
// get created when opening the DB, making timestamp checks unreliable.
// Instead, we check issue counts after loading (see below).
@@ -168,7 +168,7 @@ Examples:
fmt.Fprintf(os.Stderr, "Error: no database path found\n")
os.Exit(1)
}
store, err = sqlite.New(rootCtx, dbPath)
store, err = sqlite.NewWithTimeout(rootCtx, dbPath, lockTimeout)
if err != nil {
fmt.Fprintf(os.Stderr, "Error: failed to open database: %v\n", err)
os.Exit(1)
@@ -302,20 +302,20 @@ Examples:
// Safety check: prevent exporting stale database that would lose issues
if output != "" && !force {
debug.Logf("Debug: checking staleness - output=%s, force=%v\n", output, force)
// Read existing JSONL to get issue IDs
jsonlIDs, err := getIssueIDsFromJSONL(output)
if err != nil && !os.IsNotExist(err) {
fmt.Fprintf(os.Stderr, "Warning: failed to read existing JSONL for staleness check: %v\n", err)
}
if err == nil && len(jsonlIDs) > 0 {
// Build set of DB issue IDs
dbIDs := make(map[string]bool)
for _, issue := range issues {
dbIDs[issue.ID] = true
}
// Check if JSONL has any issues that DB doesn't have
var missingIDs []string
for id := range jsonlIDs {
@@ -323,17 +323,17 @@ Examples:
missingIDs = append(missingIDs, id)
}
}
debug.Logf("Debug: JSONL has %d issues, DB has %d issues, missing %d\n",
debug.Logf("Debug: JSONL has %d issues, DB has %d issues, missing %d\n",
len(jsonlIDs), len(issues), len(missingIDs))
if len(missingIDs) > 0 {
slices.Sort(missingIDs)
fmt.Fprintf(os.Stderr, "Error: refusing to export stale database that would lose issues\n")
fmt.Fprintf(os.Stderr, " Database has %d issues\n", len(issues))
fmt.Fprintf(os.Stderr, " JSONL has %d issues\n", len(jsonlIDs))
fmt.Fprintf(os.Stderr, " Export would lose %d issue(s):\n", len(missingIDs))
// Show first 10 missing issues
showCount := len(missingIDs)
if showCount > 10 {
@@ -345,7 +345,7 @@ Examples:
if len(missingIDs) > 10 {
fmt.Fprintf(os.Stderr, " ... and %d more\n", len(missingIDs)-10)
}
fmt.Fprintf(os.Stderr, "\n")
fmt.Fprintf(os.Stderr, "This usually means:\n")
fmt.Fprintf(os.Stderr, " 1. You need to run 'bd import -i %s' to sync the latest changes\n", output)
@@ -434,8 +434,8 @@ Examples:
skippedCount := 0
for _, issue := range issues {
if err := encoder.Encode(issue); err != nil {
fmt.Fprintf(os.Stderr, "Error encoding issue %s: %v\n", issue.ID, err)
os.Exit(1)
fmt.Fprintf(os.Stderr, "Error encoding issue %s: %v\n", issue.ID, err)
os.Exit(1)
}
exportedIDs = append(exportedIDs, issue.ID)
@@ -495,19 +495,19 @@ Examples:
}
}
// Verify JSONL file integrity after export
actualCount, err := countIssuesInJSONL(finalPath)
if err != nil {
fmt.Fprintf(os.Stderr, "Error: Export verification failed: %v\n", err)
os.Exit(1)
}
if actualCount != len(exportedIDs) {
fmt.Fprintf(os.Stderr, "Error: Export verification failed\n")
fmt.Fprintf(os.Stderr, " Expected: %d issues\n", len(exportedIDs))
fmt.Fprintf(os.Stderr, " JSONL file: %d lines\n", actualCount)
fmt.Fprintf(os.Stderr, " Mismatch indicates export failed to write all issues\n")
os.Exit(1)
}
// Verify JSONL file integrity after export
actualCount, err := countIssuesInJSONL(finalPath)
if err != nil {
fmt.Fprintf(os.Stderr, "Error: Export verification failed: %v\n", err)
os.Exit(1)
}
if actualCount != len(exportedIDs) {
fmt.Fprintf(os.Stderr, "Error: Export verification failed\n")
fmt.Fprintf(os.Stderr, " Expected: %d issues\n", len(exportedIDs))
fmt.Fprintf(os.Stderr, " JSONL file: %d lines\n", actualCount)
fmt.Fprintf(os.Stderr, " Mismatch indicates export failed to write all issues\n")
os.Exit(1)
}
// Update database mtime to be >= JSONL mtime (fixes #278, #301, #321)
// Only do this when exporting to default JSONL path (not arbitrary outputs)
@@ -520,9 +520,9 @@ Examples:
fmt.Fprintf(os.Stderr, "Warning: failed to update database mtime: %v\n", err)
}
}
}
}
// Output statistics if JSON format requested
// Output statistics if JSON format requested
if jsonOutput {
stats := map[string]interface{}{
"success": true,

View File

@@ -26,36 +26,36 @@ func TestGitPullSyncIntegration(t *testing.T) {
// Create temp directory for test repositories
tempDir := t.TempDir()
// Create "remote" repository
remoteDir := filepath.Join(tempDir, "remote")
if err := os.MkdirAll(remoteDir, 0750); err != nil {
t.Fatalf("Failed to create remote dir: %v", err)
}
// Initialize remote git repo
runGitCmd(t, remoteDir, "init", "--bare")
runGitCmd(t, remoteDir, "init", "--bare", "-b", "master")
// Create "clone1" repository
clone1Dir := filepath.Join(tempDir, "clone1")
runGitCmd(t, tempDir, "clone", remoteDir, clone1Dir)
configureGit(t, clone1Dir)
// Initialize beads in clone1
clone1BeadsDir := filepath.Join(clone1Dir, ".beads")
if err := os.MkdirAll(clone1BeadsDir, 0750); err != nil {
t.Fatalf("Failed to create .beads dir: %v", err)
}
clone1DBPath := filepath.Join(clone1BeadsDir, "test.db")
clone1Store := newTestStore(t, clone1DBPath)
defer clone1Store.Close()
ctx := context.Background()
if err := clone1Store.SetMetadata(ctx, "issue_prefix", "test"); err != nil {
t.Fatalf("Failed to set prefix: %v", err)
}
// Create and close an issue in clone1
issue := &types.Issue{
Title: "Test sync issue",
@@ -69,80 +69,80 @@ func TestGitPullSyncIntegration(t *testing.T) {
t.Fatalf("Failed to create issue: %v", err)
}
issueID := issue.ID
// Close the issue
if err := clone1Store.CloseIssue(ctx, issueID, "Test completed", "test-user"); err != nil {
t.Fatalf("Failed to close issue: %v", err)
}
// Export to JSONL
jsonlPath := filepath.Join(clone1BeadsDir, "issues.jsonl")
if err := exportIssuesToJSONL(ctx, clone1Store, jsonlPath); err != nil {
t.Fatalf("Failed to export: %v", err)
}
// Commit and push from clone1
runGitCmd(t, clone1Dir, "add", ".beads")
runGitCmd(t, clone1Dir, "commit", "-m", "Add closed issue")
runGitCmd(t, clone1Dir, "push", "origin", "master")
// Create "clone2" repository
clone2Dir := filepath.Join(tempDir, "clone2")
runGitCmd(t, tempDir, "clone", remoteDir, clone2Dir)
configureGit(t, clone2Dir)
// Initialize empty database in clone2
clone2BeadsDir := filepath.Join(clone2Dir, ".beads")
clone2DBPath := filepath.Join(clone2BeadsDir, "test.db")
clone2Store := newTestStore(t, clone2DBPath)
defer clone2Store.Close()
if err := clone2Store.SetMetadata(ctx, "issue_prefix", "test"); err != nil {
t.Fatalf("Failed to set prefix: %v", err)
}
// Import the existing JSONL (simulating initial sync)
clone2JSONLPath := filepath.Join(clone2BeadsDir, "issues.jsonl")
if err := importJSONLToStore(ctx, clone2Store, clone2DBPath, clone2JSONLPath); err != nil {
t.Fatalf("Failed to import: %v", err)
}
// Verify issue exists and is closed
verifyIssueClosed(t, clone2Store, issueID)
// Note: We don't commit in clone2 - it stays clean as a read-only consumer
// Now test git pull scenario: Clone1 makes a change (update priority)
if err := clone1Store.UpdateIssue(ctx, issueID, map[string]interface{}{
"priority": 0,
}, "test-user"); err != nil {
t.Fatalf("Failed to update issue: %v", err)
}
if err := exportIssuesToJSONL(ctx, clone1Store, jsonlPath); err != nil {
t.Fatalf("Failed to export after update: %v", err)
}
runGitCmd(t, clone1Dir, "add", ".beads/issues.jsonl")
runGitCmd(t, clone1Dir, "commit", "-m", "Update priority")
runGitCmd(t, clone1Dir, "push", "origin", "master")
// Clone2 pulls the change
runGitCmd(t, clone2Dir, "pull")
// Test auto-import in non-daemon mode
t.Run("NonDaemonAutoImport", func(t *testing.T) {
// Use a temporary local store for this test
localStore := newTestStore(t, clone2DBPath)
defer localStore.Close()
// Manually import to simulate auto-import behavior
startTime := time.Now()
if err := importJSONLToStore(ctx, localStore, clone2DBPath, clone2JSONLPath); err != nil {
t.Fatalf("Failed to auto-import: %v", err)
}
elapsed := time.Since(startTime)
// Verify priority was updated
issue, err := localStore.GetIssue(ctx, issueID)
if err != nil {
@@ -151,13 +151,13 @@ func TestGitPullSyncIntegration(t *testing.T) {
if issue.Priority != 0 {
t.Errorf("Expected priority 0 after auto-import, got %d", issue.Priority)
}
// Verify performance: import should be fast
if elapsed > 100*time.Millisecond {
t.Logf("Info: import took %v", elapsed)
}
})
// Test bd sync --import-only command
t.Run("BdSyncCommand", func(t *testing.T) {
// Make another change in clone1 (change priority back to 1)
@@ -166,27 +166,27 @@ func TestGitPullSyncIntegration(t *testing.T) {
}, "test-user"); err != nil {
t.Fatalf("Failed to update issue: %v", err)
}
if err := exportIssuesToJSONL(ctx, clone1Store, jsonlPath); err != nil {
t.Fatalf("Failed to export: %v", err)
}
runGitCmd(t, clone1Dir, "add", ".beads/issues.jsonl")
runGitCmd(t, clone1Dir, "commit", "-m", "Update priority")
runGitCmd(t, clone1Dir, "push", "origin", "master")
// Clone2 pulls
runGitCmd(t, clone2Dir, "pull")
// Use a fresh store for import
syncStore := newTestStore(t, clone2DBPath)
defer syncStore.Close()
// Manually trigger import via in-process equivalent
if err := importJSONLToStore(ctx, syncStore, clone2DBPath, clone2JSONLPath); err != nil {
t.Fatalf("Failed to import via sync: %v", err)
}
// Verify priority was updated back to 1
issue, err := syncStore.GetIssue(ctx, issueID)
if err != nil {
@@ -214,7 +214,7 @@ func configureGit(t *testing.T, dir string) {
runGitCmd(t, dir, "config", "user.email", "test@example.com")
runGitCmd(t, dir, "config", "user.name", "Test User")
runGitCmd(t, dir, "config", "pull.rebase", "false")
// Create .gitignore to prevent test database files from being tracked
gitignorePath := filepath.Join(dir, ".gitignore")
gitignoreContent := `# Test database files
@@ -233,7 +233,7 @@ func exportIssuesToJSONL(ctx context.Context, store *sqlite.SQLiteStorage, jsonl
if err != nil {
return err
}
// Populate dependencies
allDeps, err := store.GetAllDependencyRecords(ctx)
if err != nil {
@@ -244,20 +244,20 @@ func exportIssuesToJSONL(ctx context.Context, store *sqlite.SQLiteStorage, jsonl
labels, _ := store.GetLabels(ctx, issue.ID)
issue.Labels = labels
}
f, err := os.Create(jsonlPath)
if err != nil {
return err
}
defer f.Close()
encoder := json.NewEncoder(f)
for _, issue := range issues {
if err := encoder.Encode(issue); err != nil {
return err
}
}
return nil
}
@@ -266,7 +266,7 @@ func importJSONLToStore(ctx context.Context, store *sqlite.SQLiteStorage, dbPath
if err != nil {
return err
}
// Use the autoimport package's AutoImportIfNewer function
// For testing, we'll directly parse and import
var issues []*types.Issue
@@ -278,7 +278,7 @@ func importJSONLToStore(ctx context.Context, store *sqlite.SQLiteStorage, dbPath
}
issues = append(issues, &issue)
}
// Import each issue
for _, issue := range issues {
existing, _ := store.GetIssue(ctx, issue.ID)
@@ -298,12 +298,12 @@ func importJSONLToStore(ctx context.Context, store *sqlite.SQLiteStorage, dbPath
}
}
}
// Set last_import_time metadata so staleness check works
if err := store.SetMetadata(ctx, "last_import_time", time.Now().Format(time.RFC3339)); err != nil {
return err
}
return nil
}

View File

@@ -0,0 +1,107 @@
package main
import (
"os"
"os/exec"
"path/filepath"
"strings"
"testing"
"time"
"github.com/steveyegge/beads/internal/types"
)
func TestTouchDatabaseFile_UsesJSONLMtime(t *testing.T) {
tmp := t.TempDir()
dbPath := filepath.Join(tmp, "beads.db")
jsonlPath := filepath.Join(tmp, "issues.jsonl")
if err := os.WriteFile(dbPath, []byte(""), 0o600); err != nil {
t.Fatalf("WriteFile db: %v", err)
}
if err := os.WriteFile(jsonlPath, []byte("{}\n"), 0o600); err != nil {
t.Fatalf("WriteFile jsonl: %v", err)
}
jsonlTime := time.Now().Add(2 * time.Second)
if err := os.Chtimes(jsonlPath, jsonlTime, jsonlTime); err != nil {
t.Fatalf("Chtimes jsonl: %v", err)
}
if err := TouchDatabaseFile(dbPath, jsonlPath); err != nil {
t.Fatalf("TouchDatabaseFile: %v", err)
}
info, err := os.Stat(dbPath)
if err != nil {
t.Fatalf("Stat db: %v", err)
}
if info.ModTime().Before(jsonlTime) {
t.Fatalf("db mtime %v should be >= jsonl mtime %v", info.ModTime(), jsonlTime)
}
}
func TestImportDetectPrefixFromIssues(t *testing.T) {
if detectPrefixFromIssues(nil) != "" {
t.Fatalf("expected empty")
}
issues := []*types.Issue{
{ID: "test-1"},
{ID: "test-2"},
{ID: "other-1"},
}
if got := detectPrefixFromIssues(issues); got != "test" {
t.Fatalf("got %q, want %q", got, "test")
}
}
func TestCountLines(t *testing.T) {
tmp := t.TempDir()
p := filepath.Join(tmp, "f.txt")
if err := os.WriteFile(p, []byte("a\n\nb\n"), 0o600); err != nil {
t.Fatalf("WriteFile: %v", err)
}
if got := countLines(p); got != 3 {
t.Fatalf("countLines=%d, want 3", got)
}
}
func TestCheckUncommittedChanges_Warns(t *testing.T) {
_, cleanup := setupGitRepo(t)
defer cleanup()
if err := os.WriteFile("issues.jsonl", []byte("{\"id\":\"test-1\"}\n"), 0o600); err != nil {
t.Fatalf("WriteFile: %v", err)
}
_ = execCmd(t, "git", "add", "issues.jsonl")
_ = execCmd(t, "git", "commit", "-m", "add issues")
// Modify without committing.
if err := os.WriteFile("issues.jsonl", []byte("{\"id\":\"test-1\"}\n{\"id\":\"test-2\"}\n"), 0o600); err != nil {
t.Fatalf("WriteFile: %v", err)
}
warn := captureStderr(t, func() {
checkUncommittedChanges("issues.jsonl", &ImportResult{})
})
if !strings.Contains(warn, "uncommitted changes") {
t.Fatalf("expected warning, got: %q", warn)
}
noWarn := captureStderr(t, func() {
checkUncommittedChanges("issues.jsonl", &ImportResult{Created: 1})
})
if noWarn != "" {
t.Fatalf("expected no warning, got: %q", noWarn)
}
}
func execCmd(t *testing.T, name string, args ...string) string {
t.Helper()
out, err := exec.Command(name, args...).CombinedOutput()
if err != nil {
t.Fatalf("%s %v failed: %v\n%s", name, args, err, out)
}
return string(out)
}

116
cmd/bd/list_helpers_test.go Normal file
View File

@@ -0,0 +1,116 @@
package main
import (
"strings"
"testing"
"time"
"github.com/steveyegge/beads/internal/types"
)
func TestListParseTimeFlag(t *testing.T) {
cases := []string{
"2025-12-26",
"2025-12-26T12:34:56",
"2025-12-26 12:34:56",
time.DateOnly,
time.RFC3339,
}
for _, c := range cases {
// Just make sure we accept the expected formats.
var s string
switch c {
case time.DateOnly:
s = "2025-12-26"
case time.RFC3339:
s = "2025-12-26T12:34:56Z"
default:
s = c
}
got, err := parseTimeFlag(s)
if err != nil {
t.Fatalf("parseTimeFlag(%q) error: %v", s, err)
}
if got.Year() != 2025 {
t.Fatalf("parseTimeFlag(%q) year=%d, want 2025", s, got.Year())
}
}
if _, err := parseTimeFlag("not-a-date"); err == nil {
t.Fatalf("expected error")
}
}
func TestListPinIndicator(t *testing.T) {
if pinIndicator(&types.Issue{Pinned: true}) == "" {
t.Fatalf("expected pin indicator")
}
if pinIndicator(&types.Issue{Pinned: false}) != "" {
t.Fatalf("expected empty pin indicator")
}
}
func TestListFormatPrettyIssue_BadgesAndDefaults(t *testing.T) {
iss := &types.Issue{ID: "bd-1", Title: "Hello", Status: "wat", Priority: 99, IssueType: "bug"}
out := formatPrettyIssue(iss)
if !strings.Contains(out, "bd-1") || !strings.Contains(out, "Hello") {
t.Fatalf("unexpected output: %q", out)
}
if !strings.Contains(out, "[BUG]") {
t.Fatalf("expected BUG badge: %q", out)
}
}
func TestListBuildIssueTree_ParentChildByDotID(t *testing.T) {
parent := &types.Issue{ID: "bd-1", Title: "Parent", Status: types.StatusOpen, Priority: 2, IssueType: types.TypeTask}
child := &types.Issue{ID: "bd-1.1", Title: "Child", Status: types.StatusOpen, Priority: 2, IssueType: types.TypeTask}
orphan := &types.Issue{ID: "bd-2.1", Title: "Orphan", Status: types.StatusOpen, Priority: 2, IssueType: types.TypeTask}
roots, children := buildIssueTree([]*types.Issue{child, parent, orphan})
if len(children["bd-1"]) != 1 || children["bd-1"][0].ID != "bd-1.1" {
t.Fatalf("expected bd-1 to have bd-1.1 child: %+v", children)
}
if len(roots) != 2 {
t.Fatalf("expected 2 roots (parent + orphan), got %d", len(roots))
}
}
func TestListSortIssues_ClosedNilLast(t *testing.T) {
t1 := time.Now().Add(-2 * time.Hour)
t2 := time.Now().Add(-1 * time.Hour)
closedOld := &types.Issue{ID: "bd-1", ClosedAt: &t1}
closedNew := &types.Issue{ID: "bd-2", ClosedAt: &t2}
open := &types.Issue{ID: "bd-3", ClosedAt: nil}
issues := []*types.Issue{open, closedOld, closedNew}
sortIssues(issues, "closed", false)
if issues[0].ID != "bd-2" || issues[1].ID != "bd-1" || issues[2].ID != "bd-3" {
t.Fatalf("unexpected order: %s, %s, %s", issues[0].ID, issues[1].ID, issues[2].ID)
}
}
func TestListDisplayPrettyList(t *testing.T) {
out := captureStdout(t, func() error {
displayPrettyList(nil, false)
return nil
})
if !strings.Contains(out, "No issues found") {
t.Fatalf("unexpected output: %q", out)
}
issues := []*types.Issue{
{ID: "bd-1", Title: "A", Status: types.StatusOpen, Priority: 2, IssueType: types.TypeTask},
{ID: "bd-2", Title: "B", Status: types.StatusInProgress, Priority: 1, IssueType: types.TypeFeature},
{ID: "bd-1.1", Title: "C", Status: types.StatusOpen, Priority: 2, IssueType: types.TypeTask},
}
out = captureStdout(t, func() error {
displayPrettyList(issues, false)
return nil
})
if !strings.Contains(out, "bd-1") || !strings.Contains(out, "bd-1.1") || !strings.Contains(out, "Total:") {
t.Fatalf("unexpected output: %q", out)
}
}

View File

@@ -623,6 +623,13 @@ var rootCmd = &cobra.Command{
FallbackReason: FallbackNone,
}
// Doctor should always run in direct mode. It's specifically used to diagnose and
// repair daemon/DB issues, so attempting to connect to (or auto-start) a daemon
// can add noise and timeouts.
if cmd.Name() == "doctor" {
noDaemon = true
}
// Try to connect to daemon first (unless --no-daemon flag is set or worktree safety check fails)
if noDaemon {
daemonStatus.FallbackReason = FallbackFlagNoDaemon
@@ -917,8 +924,14 @@ var rootCmd = &cobra.Command{
if store != nil {
_ = store.Close()
}
if profileFile != nil { pprof.StopCPUProfile(); _ = profileFile.Close() }
if traceFile != nil { trace.Stop(); _ = traceFile.Close() }
if profileFile != nil {
pprof.StopCPUProfile()
_ = profileFile.Close()
}
if traceFile != nil {
trace.Stop()
_ = traceFile.Close()
}
// Cancel the signal context to clean up resources
if rootCancel != nil {

View File

@@ -9,6 +9,7 @@ import (
"runtime"
"testing"
"github.com/steveyegge/beads/internal/git"
"github.com/steveyegge/beads/internal/storage/sqlite"
"github.com/steveyegge/beads/internal/types"
)
@@ -90,6 +91,7 @@ func testFreshCloneAutoImport(t *testing.T) {
// Test checkGitForIssues detects issues.jsonl
t.Chdir(dir)
git.ResetCaches()
count, path, gitRef := checkGitForIssues()
if count != 1 {
@@ -169,6 +171,7 @@ func testDatabaseRemovalScenario(t *testing.T) {
// Change to test directory
t.Chdir(dir)
git.ResetCaches()
// Test checkGitForIssues finds issues.jsonl (canonical name)
count, path, gitRef := checkGitForIssues()
@@ -247,6 +250,7 @@ func testLegacyFilenameSupport(t *testing.T) {
// Change to test directory
t.Chdir(dir)
git.ResetCaches()
// Test checkGitForIssues finds issues.jsonl
count, path, gitRef := checkGitForIssues()
@@ -323,6 +327,7 @@ func testPrecedenceTest(t *testing.T) {
// Change to test directory
t.Chdir(dir)
git.ResetCaches()
// Test checkGitForIssues prefers issues.jsonl
count, path, _ := checkGitForIssues()
@@ -369,6 +374,7 @@ func testInitSafetyCheck(t *testing.T) {
// Change to test directory
t.Chdir(dir)
git.ResetCaches()
// Create empty database (simulating failed import)
dbPath := filepath.Join(beadsDir, "test.db")

View File

@@ -748,8 +748,8 @@ var updateCmd = &cobra.Command{
fmt.Fprintf(os.Stderr, "Error getting %s: %v\n", id, err)
continue
}
if issue != nil && issue.IsTemplate {
fmt.Fprintf(os.Stderr, "Error: cannot update template %s: templates are read-only; use 'bd molecule instantiate' to create a work item\n", id)
if err := validateIssueUpdatable(id, issue); err != nil {
fmt.Fprintf(os.Stderr, "%s\n", err)
continue
}
@@ -768,48 +768,21 @@ var updateCmd = &cobra.Command{
}
// Handle label operations
// Set labels (replaces all existing labels)
if setLabels, ok := updates["set_labels"].([]string); ok && len(setLabels) > 0 {
// Get current labels
currentLabels, err := store.GetLabels(ctx, id)
if err != nil {
fmt.Fprintf(os.Stderr, "Error getting labels for %s: %v\n", id, err)
var setLabels, addLabels, removeLabels []string
if v, ok := updates["set_labels"].([]string); ok {
setLabels = v
}
if v, ok := updates["add_labels"].([]string); ok {
addLabels = v
}
if v, ok := updates["remove_labels"].([]string); ok {
removeLabels = v
}
if len(setLabels) > 0 || len(addLabels) > 0 || len(removeLabels) > 0 {
if err := applyLabelUpdates(ctx, store, id, actor, setLabels, addLabels, removeLabels); err != nil {
fmt.Fprintf(os.Stderr, "Error updating labels for %s: %v\n", id, err)
continue
}
// Remove all current labels
for _, label := range currentLabels {
if err := store.RemoveLabel(ctx, id, label, actor); err != nil {
fmt.Fprintf(os.Stderr, "Error removing label %s from %s: %v\n", label, id, err)
continue
}
}
// Add new labels
for _, label := range setLabels {
if err := store.AddLabel(ctx, id, label, actor); err != nil {
fmt.Fprintf(os.Stderr, "Error setting label %s on %s: %v\n", label, id, err)
continue
}
}
}
// Add labels
if addLabels, ok := updates["add_labels"].([]string); ok {
for _, label := range addLabels {
if err := store.AddLabel(ctx, id, label, actor); err != nil {
fmt.Fprintf(os.Stderr, "Error adding label %s to %s: %v\n", label, id, err)
continue
}
}
}
// Remove labels
if removeLabels, ok := updates["remove_labels"].([]string); ok {
for _, label := range removeLabels {
if err := store.RemoveLabel(ctx, id, label, actor); err != nil {
fmt.Fprintf(os.Stderr, "Error removing label %s from %s: %v\n", label, id, err)
continue
}
}
}
// Run update hook (bd-kwro.8)
@@ -1084,14 +1057,8 @@ var closeCmd = &cobra.Command{
if showErr == nil {
var issue types.Issue
if json.Unmarshal(showResp.Data, &issue) == nil {
// Check if issue is a template (beads-1ra): templates are read-only
if issue.IsTemplate {
fmt.Fprintf(os.Stderr, "Error: cannot close template %s: templates are read-only\n", id)
continue
}
// Check if issue is pinned (bd-6v2)
if !force && issue.Status == types.StatusPinned {
fmt.Fprintf(os.Stderr, "Error: cannot close pinned issue %s (use --force to override)\n", id)
if err := validateIssueClosable(id, &issue, force); err != nil {
fmt.Fprintf(os.Stderr, "%s\n", err)
continue
}
}
@@ -1169,20 +1136,11 @@ var closeCmd = &cobra.Command{
// Get issue for checks
issue, _ := store.GetIssue(ctx, id)
// Check if issue is a template (beads-1ra): templates are read-only
if issue != nil && issue.IsTemplate {
fmt.Fprintf(os.Stderr, "Error: cannot close template %s: templates are read-only\n", id)
if err := validateIssueClosable(id, issue, force); err != nil {
fmt.Fprintf(os.Stderr, "%s\n", err)
continue
}
// Check if issue is pinned (bd-6v2)
if !force {
if issue != nil && issue.Status == types.StatusPinned {
fmt.Fprintf(os.Stderr, "Error: cannot close pinned issue %s (use --force to override)\n", id)
continue
}
}
if err := store.CloseIssue(ctx, id, reason, actor); err != nil {
fmt.Fprintf(os.Stderr, "Error closing %s: %v\n", id, err)
continue
@@ -1427,15 +1385,13 @@ func findRepliesTo(ctx context.Context, issueID string, daemonClient *rpc.Client
return ""
}
// Direct mode - query storage
if sqliteStore, ok := store.(*sqlite.SQLiteStorage); ok {
deps, err := sqliteStore.GetDependenciesWithMetadata(ctx, issueID)
if err != nil {
return ""
}
for _, dep := range deps {
if dep.DependencyType == types.DepRepliesTo {
return dep.ID
}
deps, err := store.GetDependencyRecords(ctx, issueID)
if err != nil {
return ""
}
for _, dep := range deps {
if dep.Type == types.DepRepliesTo {
return dep.DependsOnID
}
}
return ""
@@ -1484,7 +1440,25 @@ func findReplies(ctx context.Context, issueID string, daemonClient *rpc.Client,
}
return replies
}
return nil
allDeps, err := store.GetAllDependencyRecords(ctx)
if err != nil {
return nil
}
var replies []*types.Issue
for childID, deps := range allDeps {
for _, dep := range deps {
if dep.Type == types.DepRepliesTo && dep.DependsOnID == issueID {
issue, _ := store.GetIssue(ctx, childID)
if issue != nil {
replies = append(replies, issue)
}
}
}
}
return replies
}
func init() {

View File

@@ -0,0 +1,68 @@
package main
import (
"context"
"fmt"
"github.com/steveyegge/beads/internal/storage"
"github.com/steveyegge/beads/internal/types"
)
func validateIssueUpdatable(id string, issue *types.Issue) error {
if issue == nil {
return nil
}
if issue.IsTemplate {
return fmt.Errorf("Error: cannot update template %s: templates are read-only; use 'bd molecule instantiate' to create a work item", id)
}
return nil
}
func validateIssueClosable(id string, issue *types.Issue, force bool) error {
if issue == nil {
return nil
}
if issue.IsTemplate {
return fmt.Errorf("Error: cannot close template %s: templates are read-only", id)
}
if !force && issue.Status == types.StatusPinned {
return fmt.Errorf("Error: cannot close pinned issue %s (use --force to override)", id)
}
return nil
}
func applyLabelUpdates(ctx context.Context, st storage.Storage, issueID, actor string, setLabels, addLabels, removeLabels []string) error {
// Set labels (replaces all existing labels)
if len(setLabels) > 0 {
currentLabels, err := st.GetLabels(ctx, issueID)
if err != nil {
return err
}
for _, label := range currentLabels {
if err := st.RemoveLabel(ctx, issueID, label, actor); err != nil {
return err
}
}
for _, label := range setLabels {
if err := st.AddLabel(ctx, issueID, label, actor); err != nil {
return err
}
}
}
// Add labels
for _, label := range addLabels {
if err := st.AddLabel(ctx, issueID, label, actor); err != nil {
return err
}
}
// Remove labels
for _, label := range removeLabels {
if err := st.RemoveLabel(ctx, issueID, label, actor); err != nil {
return err
}
}
return nil
}

View File

@@ -0,0 +1,139 @@
package main
import (
"context"
"testing"
"github.com/steveyegge/beads/internal/storage/memory"
"github.com/steveyegge/beads/internal/types"
)
func TestValidateIssueUpdatable(t *testing.T) {
if err := validateIssueUpdatable("x", nil); err != nil {
t.Fatalf("expected nil error, got %v", err)
}
if err := validateIssueUpdatable("x", &types.Issue{IsTemplate: false}); err != nil {
t.Fatalf("expected nil error, got %v", err)
}
if err := validateIssueUpdatable("bd-1", &types.Issue{IsTemplate: true}); err == nil {
t.Fatalf("expected error")
}
}
func TestValidateIssueClosable(t *testing.T) {
if err := validateIssueClosable("x", nil, false); err != nil {
t.Fatalf("expected nil error, got %v", err)
}
if err := validateIssueClosable("bd-1", &types.Issue{IsTemplate: true}, false); err == nil {
t.Fatalf("expected template close error")
}
if err := validateIssueClosable("bd-2", &types.Issue{Status: types.StatusPinned}, false); err == nil {
t.Fatalf("expected pinned close error")
}
if err := validateIssueClosable("bd-2", &types.Issue{Status: types.StatusPinned}, true); err != nil {
t.Fatalf("expected pinned close to succeed with force, got %v", err)
}
}
func TestApplyLabelUpdates_SetAddRemove(t *testing.T) {
ctx := context.Background()
st := memory.New("")
if err := st.SetConfig(ctx, "issue_prefix", "test"); err != nil {
t.Fatalf("SetConfig: %v", err)
}
issue := &types.Issue{Title: "x", Status: types.StatusOpen, Priority: 2, IssueType: types.TypeTask}
if err := st.CreateIssue(ctx, issue, "tester"); err != nil {
t.Fatalf("CreateIssue: %v", err)
}
_ = st.AddLabel(ctx, issue.ID, "old1", "tester")
_ = st.AddLabel(ctx, issue.ID, "old2", "tester")
if err := applyLabelUpdates(ctx, st, issue.ID, "tester", []string{"a", "b"}, []string{"b", "c"}, []string{"a"}); err != nil {
t.Fatalf("applyLabelUpdates: %v", err)
}
labels, _ := st.GetLabels(ctx, issue.ID)
if len(labels) != 2 {
t.Fatalf("expected 2 labels, got %v", labels)
}
// Order is not guaranteed.
foundB := false
foundC := false
for _, l := range labels {
if l == "b" {
foundB = true
}
if l == "c" {
foundC = true
}
if l == "old1" || l == "old2" || l == "a" {
t.Fatalf("unexpected label %q in %v", l, labels)
}
}
if !foundB || !foundC {
t.Fatalf("expected labels b and c, got %v", labels)
}
}
func TestApplyLabelUpdates_AddRemoveOnly(t *testing.T) {
ctx := context.Background()
st := memory.New("")
issue := &types.Issue{Title: "x", Status: types.StatusOpen, Priority: 2, IssueType: types.TypeTask}
if err := st.CreateIssue(ctx, issue, "tester"); err != nil {
t.Fatalf("CreateIssue: %v", err)
}
_ = st.AddLabel(ctx, issue.ID, "a", "tester")
if err := applyLabelUpdates(ctx, st, issue.ID, "tester", nil, []string{"b"}, []string{"a"}); err != nil {
t.Fatalf("applyLabelUpdates: %v", err)
}
labels, _ := st.GetLabels(ctx, issue.ID)
if len(labels) != 1 || labels[0] != "b" {
t.Fatalf("expected [b], got %v", labels)
}
}
func TestFindRepliesToAndReplies_WorksWithMemoryStorage(t *testing.T) {
ctx := context.Background()
st := memory.New("")
if err := st.SetConfig(ctx, "issue_prefix", "test"); err != nil {
t.Fatalf("SetConfig: %v", err)
}
root := &types.Issue{Title: "root", Status: types.StatusOpen, Priority: 2, IssueType: types.TypeMessage, Sender: "a", Assignee: "b"}
reply1 := &types.Issue{Title: "r1", Status: types.StatusOpen, Priority: 2, IssueType: types.TypeMessage, Sender: "b", Assignee: "a"}
reply2 := &types.Issue{Title: "r2", Status: types.StatusOpen, Priority: 2, IssueType: types.TypeMessage, Sender: "a", Assignee: "b"}
if err := st.CreateIssue(ctx, root, "tester"); err != nil {
t.Fatalf("CreateIssue(root): %v", err)
}
if err := st.CreateIssue(ctx, reply1, "tester"); err != nil {
t.Fatalf("CreateIssue(reply1): %v", err)
}
if err := st.CreateIssue(ctx, reply2, "tester"); err != nil {
t.Fatalf("CreateIssue(reply2): %v", err)
}
if err := st.AddDependency(ctx, &types.Dependency{IssueID: reply1.ID, DependsOnID: root.ID, Type: types.DepRepliesTo}, "tester"); err != nil {
t.Fatalf("AddDependency(reply1->root): %v", err)
}
if err := st.AddDependency(ctx, &types.Dependency{IssueID: reply2.ID, DependsOnID: reply1.ID, Type: types.DepRepliesTo}, "tester"); err != nil {
t.Fatalf("AddDependency(reply2->reply1): %v", err)
}
if got := findRepliesTo(ctx, root.ID, nil, st); got != "" {
t.Fatalf("expected root replies-to to be empty, got %q", got)
}
if got := findRepliesTo(ctx, reply2.ID, nil, st); got != reply1.ID {
t.Fatalf("expected reply2 parent %q, got %q", reply1.ID, got)
}
rootReplies := findReplies(ctx, root.ID, nil, st)
if len(rootReplies) != 1 || rootReplies[0].ID != reply1.ID {
t.Fatalf("expected root replies [%s], got %+v", reply1.ID, rootReplies)
}
r1Replies := findReplies(ctx, reply1.ID, nil, st)
if len(r1Replies) != 1 || r1Replies[0].ID != reply2.ID {
t.Fatalf("expected reply1 replies [%s], got %+v", reply2.ID, r1Replies)
}
}

View File

@@ -0,0 +1,71 @@
package main
import (
"context"
"os"
"os/exec"
"path/filepath"
"strings"
"testing"
"github.com/steveyegge/beads/internal/config"
)
func TestBuildGitCommitArgs_ConfigOptions(t *testing.T) {
if err := config.Initialize(); err != nil {
t.Fatalf("config.Initialize: %v", err)
}
config.Set("git.author", "Test User <test@example.com>")
config.Set("git.no-gpg-sign", true)
args := buildGitCommitArgs("/repo", "hello", "--", ".beads")
joined := strings.Join(args, " ")
if !strings.Contains(joined, "--author") {
t.Fatalf("expected --author in args: %v", args)
}
if !strings.Contains(joined, "--no-gpg-sign") {
t.Fatalf("expected --no-gpg-sign in args: %v", args)
}
if !strings.Contains(joined, "-m hello") {
t.Fatalf("expected message in args: %v", args)
}
}
func TestGitCommitBeadsDir_PathspecDoesNotCommitOtherStagedFiles(t *testing.T) {
_, cleanup := setupGitRepo(t)
defer cleanup()
if err := config.Initialize(); err != nil {
t.Fatalf("config.Initialize: %v", err)
}
if err := os.MkdirAll(".beads", 0o755); err != nil {
t.Fatalf("MkdirAll: %v", err)
}
// Stage an unrelated file before running gitCommitBeadsDir.
if err := os.WriteFile("other.txt", []byte("x\n"), 0o600); err != nil {
t.Fatalf("WriteFile other: %v", err)
}
_ = exec.Command("git", "add", "other.txt").Run()
// Create a beads sync file to commit.
issuesPath := filepath.Join(".beads", "issues.jsonl")
if err := os.WriteFile(issuesPath, []byte("{\"id\":\"test-1\"}\n"), 0o600); err != nil {
t.Fatalf("WriteFile issues: %v", err)
}
ctx := context.Background()
if err := gitCommitBeadsDir(ctx, "beads commit"); err != nil {
t.Fatalf("gitCommitBeadsDir: %v", err)
}
// other.txt should still be staged after the beads-only commit.
out, err := exec.Command("git", "diff", "--cached", "--name-only").CombinedOutput()
if err != nil {
t.Fatalf("git diff --cached: %v\n%s", err, out)
}
if strings.TrimSpace(string(out)) != "other.txt" {
t.Fatalf("expected other.txt still staged, got: %q", out)
}
}

View File

@@ -0,0 +1,118 @@
package main
import (
"fmt"
"os"
"path/filepath"
"testing"
"time"
)
// Guardrail: ensure the cmd/bd test suite does not touch the real repo .beads state.
// Disable with BEADS_TEST_GUARD_DISABLE=1 (useful when running tests while actively using beads).
func TestMain(m *testing.M) {
if os.Getenv("BEADS_TEST_GUARD_DISABLE") != "" {
os.Exit(m.Run())
}
repoRoot := findRepoRoot()
if repoRoot == "" {
os.Exit(m.Run())
}
repoBeadsDir := filepath.Join(repoRoot, ".beads")
if _, err := os.Stat(repoBeadsDir); err != nil {
os.Exit(m.Run())
}
watch := []string{
"beads.db",
"beads.db-wal",
"beads.db-shm",
"beads.db-journal",
"issues.jsonl",
"beads.jsonl",
"metadata.json",
"interactions.jsonl",
"deletions.jsonl",
"molecules.jsonl",
"daemon.lock",
"daemon.pid",
"bd.sock",
}
before := snapshotFiles(repoBeadsDir, watch)
code := m.Run()
after := snapshotFiles(repoBeadsDir, watch)
if diff := diffSnapshots(before, after); diff != "" {
fmt.Fprintf(os.Stderr, "ERROR: test suite modified repo .beads state:\n%s\n", diff)
if code == 0 {
code = 1
}
}
os.Exit(code)
}
type fileSnap struct {
exists bool
size int64
modUnix int64
}
func snapshotFiles(dir string, names []string) map[string]fileSnap {
out := make(map[string]fileSnap, len(names))
for _, name := range names {
p := filepath.Join(dir, name)
info, err := os.Stat(p)
if err != nil {
out[name] = fileSnap{exists: false}
continue
}
out[name] = fileSnap{exists: true, size: info.Size(), modUnix: info.ModTime().UnixNano()}
}
return out
}
func diffSnapshots(before, after map[string]fileSnap) string {
var out string
for name, b := range before {
a := after[name]
if b.exists != a.exists {
out += fmt.Sprintf("- %s: exists %v → %v\n", name, b.exists, a.exists)
continue
}
if !b.exists {
continue
}
if b.size != a.size || b.modUnix != a.modUnix {
out += fmt.Sprintf("- %s: size %d → %d, mtime %s → %s\n",
name,
b.size,
a.size,
time.Unix(0, b.modUnix).UTC().Format(time.RFC3339Nano),
time.Unix(0, a.modUnix).UTC().Format(time.RFC3339Nano),
)
}
}
return out
}
func findRepoRoot() string {
wd, err := os.Getwd()
if err != nil {
return ""
}
for i := 0; i < 25; i++ {
if _, err := os.Stat(filepath.Join(wd, "go.mod")); err == nil {
return wd
}
parent := filepath.Dir(wd)
if parent == wd {
break
}
wd = parent
}
return ""
}

View File

@@ -5,6 +5,8 @@ import (
"os/exec"
"testing"
"time"
"github.com/steveyegge/beads/internal/git"
)
// waitFor repeatedly evaluates pred until it returns true or timeout expires.
@@ -42,6 +44,7 @@ func setupGitRepo(t *testing.T) (repoPath string, cleanup func()) {
_ = os.Chdir(originalWd)
t.Fatalf("failed to init git repo: %v", err)
}
git.ResetCaches()
// Configure git
_ = exec.Command("git", "config", "user.email", "test@test.com").Run()
@@ -85,6 +88,7 @@ func setupGitRepoWithBranch(t *testing.T, branch string) (repoPath string, clean
_ = os.Chdir(originalWd)
t.Fatalf("failed to init git repo: %v", err)
}
git.ResetCaches()
// Configure git
_ = exec.Command("git", "config", "user.email", "test@test.com").Run()

View File

@@ -7,6 +7,7 @@ import (
"testing"
"github.com/steveyegge/beads/internal/config"
"github.com/steveyegge/beads/internal/git"
// Import SQLite driver for test database creation
_ "github.com/ncruces/go-sqlite3/driver"
@@ -78,6 +79,7 @@ func TestShouldDisableDaemonForWorktree(t *testing.T) {
if err := os.Chdir(worktreeDir); err != nil {
t.Fatalf("Failed to change to worktree dir: %v", err)
}
git.ResetCaches()
// No sync-branch configured
os.Unsetenv("BEADS_SYNC_BRANCH")
@@ -113,6 +115,7 @@ func TestShouldDisableDaemonForWorktree(t *testing.T) {
if err := os.Chdir(worktreeDir); err != nil {
t.Fatalf("Failed to change to worktree dir: %v", err)
}
git.ResetCaches()
// Reinitialize config to pick up the new directory's config.yaml
if err := config.Initialize(); err != nil {
@@ -144,6 +147,7 @@ func TestShouldDisableDaemonForWorktree(t *testing.T) {
if err := os.Chdir(worktreeDir); err != nil {
t.Fatalf("Failed to change to worktree dir: %v", err)
}
git.ResetCaches()
// Reinitialize config to pick up the new directory's config.yaml
if err := config.Initialize(); err != nil {
@@ -194,6 +198,7 @@ func TestShouldAutoStartDaemonWorktreeIntegration(t *testing.T) {
if err := os.Chdir(worktreeDir); err != nil {
t.Fatalf("Failed to change to worktree dir: %v", err)
}
git.ResetCaches()
// Clear all daemon-related env vars
os.Unsetenv("BEADS_NO_DAEMON")
@@ -227,6 +232,7 @@ func TestShouldAutoStartDaemonWorktreeIntegration(t *testing.T) {
if err := os.Chdir(worktreeDir); err != nil {
t.Fatalf("Failed to change to worktree dir: %v", err)
}
git.ResetCaches()
// Reinitialize config to pick up the new directory's config.yaml
if err := config.Initialize(); err != nil {
@@ -260,6 +266,7 @@ func TestShouldAutoStartDaemonWorktreeIntegration(t *testing.T) {
if err := os.Chdir(worktreeDir); err != nil {
t.Fatalf("Failed to change to worktree dir: %v", err)
}
git.ResetCaches()
// Reinitialize config to pick up the new directory's config.yaml
if err := config.Initialize(); err != nil {