tests: add chaos doctor repair coverage and stabilize git init

Co-authored-by: factory-droid[bot] <138933559+factory-droid[bot]@users.noreply.github.com>
This commit is contained in:
Jordan Hubbard
2025-12-25 21:50:13 -04:00
parent 1184bd1e59
commit b089aaa0d6
8 changed files with 323 additions and 193 deletions

View File

@@ -30,36 +30,36 @@ func TestDaemonAutoImportAfterGitPull(t *testing.T) {
t.Fatal(err) t.Fatal(err)
} }
defer os.RemoveAll(tempDir) defer os.RemoveAll(tempDir)
// Create "remote" repository // Create "remote" repository
remoteDir := filepath.Join(tempDir, "remote") remoteDir := filepath.Join(tempDir, "remote")
if err := os.MkdirAll(remoteDir, 0750); err != nil { if err := os.MkdirAll(remoteDir, 0750); err != nil {
t.Fatalf("Failed to create remote dir: %v", err) t.Fatalf("Failed to create remote dir: %v", err)
} }
// Initialize remote git repo // Initialize remote git repo
runGitCmd(t, remoteDir, "init", "--bare") runGitCmd(t, remoteDir, "init", "--bare", "-b", "master")
// Create "clone1" repository (Agent A) // Create "clone1" repository (Agent A)
clone1Dir := filepath.Join(tempDir, "clone1") clone1Dir := filepath.Join(tempDir, "clone1")
runGitCmd(t, tempDir, "clone", remoteDir, clone1Dir) runGitCmd(t, tempDir, "clone", remoteDir, clone1Dir)
configureGit(t, clone1Dir) configureGit(t, clone1Dir)
// Initialize beads in clone1 // Initialize beads in clone1
clone1BeadsDir := filepath.Join(clone1Dir, ".beads") clone1BeadsDir := filepath.Join(clone1Dir, ".beads")
if err := os.MkdirAll(clone1BeadsDir, 0750); err != nil { if err := os.MkdirAll(clone1BeadsDir, 0750); err != nil {
t.Fatalf("Failed to create .beads dir: %v", err) t.Fatalf("Failed to create .beads dir: %v", err)
} }
clone1DBPath := filepath.Join(clone1BeadsDir, "test.db") clone1DBPath := filepath.Join(clone1BeadsDir, "test.db")
clone1Store := newTestStore(t, clone1DBPath) clone1Store := newTestStore(t, clone1DBPath)
defer clone1Store.Close() defer clone1Store.Close()
ctx := context.Background() ctx := context.Background()
if err := clone1Store.SetMetadata(ctx, "issue_prefix", "test"); err != nil { if err := clone1Store.SetMetadata(ctx, "issue_prefix", "test"); err != nil {
t.Fatalf("Failed to set prefix: %v", err) t.Fatalf("Failed to set prefix: %v", err)
} }
// Create an open issue in clone1 // Create an open issue in clone1
issue := &types.Issue{ issue := &types.Issue{
Title: "Test daemon auto-import", Title: "Test daemon auto-import",
@@ -73,39 +73,39 @@ func TestDaemonAutoImportAfterGitPull(t *testing.T) {
t.Fatalf("Failed to create issue: %v", err) t.Fatalf("Failed to create issue: %v", err)
} }
issueID := issue.ID issueID := issue.ID
// Export to JSONL // Export to JSONL
jsonlPath := filepath.Join(clone1BeadsDir, "issues.jsonl") jsonlPath := filepath.Join(clone1BeadsDir, "issues.jsonl")
if err := exportIssuesToJSONL(ctx, clone1Store, jsonlPath); err != nil { if err := exportIssuesToJSONL(ctx, clone1Store, jsonlPath); err != nil {
t.Fatalf("Failed to export: %v", err) t.Fatalf("Failed to export: %v", err)
} }
// Commit and push from clone1 // Commit and push from clone1
runGitCmd(t, clone1Dir, "add", ".beads") runGitCmd(t, clone1Dir, "add", ".beads")
runGitCmd(t, clone1Dir, "commit", "-m", "Add test issue") runGitCmd(t, clone1Dir, "commit", "-m", "Add test issue")
runGitCmd(t, clone1Dir, "push", "origin", "master") runGitCmd(t, clone1Dir, "push", "origin", "master")
// Create "clone2" repository (Agent B) // Create "clone2" repository (Agent B)
clone2Dir := filepath.Join(tempDir, "clone2") clone2Dir := filepath.Join(tempDir, "clone2")
runGitCmd(t, tempDir, "clone", remoteDir, clone2Dir) runGitCmd(t, tempDir, "clone", remoteDir, clone2Dir)
configureGit(t, clone2Dir) configureGit(t, clone2Dir)
// Initialize empty database in clone2 // Initialize empty database in clone2
clone2BeadsDir := filepath.Join(clone2Dir, ".beads") clone2BeadsDir := filepath.Join(clone2Dir, ".beads")
clone2DBPath := filepath.Join(clone2BeadsDir, "test.db") clone2DBPath := filepath.Join(clone2BeadsDir, "test.db")
clone2Store := newTestStore(t, clone2DBPath) clone2Store := newTestStore(t, clone2DBPath)
defer clone2Store.Close() defer clone2Store.Close()
if err := clone2Store.SetMetadata(ctx, "issue_prefix", "test"); err != nil { if err := clone2Store.SetMetadata(ctx, "issue_prefix", "test"); err != nil {
t.Fatalf("Failed to set prefix: %v", err) t.Fatalf("Failed to set prefix: %v", err)
} }
// Import initial JSONL in clone2 // Import initial JSONL in clone2
clone2JSONLPath := filepath.Join(clone2BeadsDir, "issues.jsonl") clone2JSONLPath := filepath.Join(clone2BeadsDir, "issues.jsonl")
if err := importJSONLToStore(ctx, clone2Store, clone2DBPath, clone2JSONLPath); err != nil { if err := importJSONLToStore(ctx, clone2Store, clone2DBPath, clone2JSONLPath); err != nil {
t.Fatalf("Failed to import: %v", err) t.Fatalf("Failed to import: %v", err)
} }
// Verify issue exists in clone2 // Verify issue exists in clone2
initialIssue, err := clone2Store.GetIssue(ctx, issueID) initialIssue, err := clone2Store.GetIssue(ctx, issueID)
if err != nil { if err != nil {
@@ -114,27 +114,27 @@ func TestDaemonAutoImportAfterGitPull(t *testing.T) {
if initialIssue.Status != types.StatusOpen { if initialIssue.Status != types.StatusOpen {
t.Errorf("Expected status open, got %s", initialIssue.Status) t.Errorf("Expected status open, got %s", initialIssue.Status)
} }
// NOW THE CRITICAL TEST: Agent A closes the issue and pushes // NOW THE CRITICAL TEST: Agent A closes the issue and pushes
t.Run("DaemonAutoImportsAfterGitPull", func(t *testing.T) { t.Run("DaemonAutoImportsAfterGitPull", func(t *testing.T) {
// Agent A closes the issue // Agent A closes the issue
if err := clone1Store.CloseIssue(ctx, issueID, "Completed", "agent-a"); err != nil { if err := clone1Store.CloseIssue(ctx, issueID, "Completed", "agent-a"); err != nil {
t.Fatalf("Failed to close issue: %v", err) t.Fatalf("Failed to close issue: %v", err)
} }
// Agent A exports to JSONL // Agent A exports to JSONL
if err := exportIssuesToJSONL(ctx, clone1Store, jsonlPath); err != nil { if err := exportIssuesToJSONL(ctx, clone1Store, jsonlPath); err != nil {
t.Fatalf("Failed to export after close: %v", err) t.Fatalf("Failed to export after close: %v", err)
} }
// Agent A commits and pushes // Agent A commits and pushes
runGitCmd(t, clone1Dir, "add", ".beads/issues.jsonl") runGitCmd(t, clone1Dir, "add", ".beads/issues.jsonl")
runGitCmd(t, clone1Dir, "commit", "-m", "Close issue") runGitCmd(t, clone1Dir, "commit", "-m", "Close issue")
runGitCmd(t, clone1Dir, "push", "origin", "master") runGitCmd(t, clone1Dir, "push", "origin", "master")
// Agent B does git pull (updates JSONL on disk) // Agent B does git pull (updates JSONL on disk)
runGitCmd(t, clone2Dir, "pull") runGitCmd(t, clone2Dir, "pull")
// Wait for filesystem to settle after git operations // Wait for filesystem to settle after git operations
// Windows has lower filesystem timestamp precision (typically 100ms) // Windows has lower filesystem timestamp precision (typically 100ms)
// and file I/O may be slower, so we need a longer delay // and file I/O may be slower, so we need a longer delay
@@ -143,23 +143,23 @@ func TestDaemonAutoImportAfterGitPull(t *testing.T) {
} else { } else {
time.Sleep(50 * time.Millisecond) time.Sleep(50 * time.Millisecond)
} }
// Start daemon server in clone2 // Start daemon server in clone2
socketPath := filepath.Join(clone2BeadsDir, "bd.sock") socketPath := filepath.Join(clone2BeadsDir, "bd.sock")
os.Remove(socketPath) // Ensure clean state os.Remove(socketPath) // Ensure clean state
server := rpc.NewServer(socketPath, clone2Store, clone2Dir, clone2DBPath) server := rpc.NewServer(socketPath, clone2Store, clone2Dir, clone2DBPath)
// Start server in background // Start server in background
serverCtx, serverCancel := context.WithCancel(context.Background()) serverCtx, serverCancel := context.WithCancel(context.Background())
defer serverCancel() defer serverCancel()
go func() { go func() {
if err := server.Start(serverCtx); err != nil { if err := server.Start(serverCtx); err != nil {
t.Logf("Server error: %v", err) t.Logf("Server error: %v", err)
} }
}() }()
// Wait for server to be ready // Wait for server to be ready
for i := 0; i < 50; i++ { for i := 0; i < 50; i++ {
time.Sleep(10 * time.Millisecond) time.Sleep(10 * time.Millisecond)
@@ -167,7 +167,7 @@ func TestDaemonAutoImportAfterGitPull(t *testing.T) {
break break
} }
} }
// Simulate a daemon request (like "bd show <issue>") // Simulate a daemon request (like "bd show <issue>")
// The daemon should auto-import the updated JSONL before responding // The daemon should auto-import the updated JSONL before responding
client, err := rpc.TryConnect(socketPath) client, err := rpc.TryConnect(socketPath)
@@ -178,15 +178,15 @@ func TestDaemonAutoImportAfterGitPull(t *testing.T) {
t.Fatal("Client is nil") t.Fatal("Client is nil")
} }
defer client.Close() defer client.Close()
client.SetDatabasePath(clone2DBPath) // Route to correct database client.SetDatabasePath(clone2DBPath) // Route to correct database
// Make a request that triggers auto-import check // Make a request that triggers auto-import check
resp, err := client.Execute("show", map[string]string{"id": issueID}) resp, err := client.Execute("show", map[string]string{"id": issueID})
if err != nil { if err != nil {
t.Fatalf("Failed to get issue from daemon: %v", err) t.Fatalf("Failed to get issue from daemon: %v", err)
} }
// Parse response // Parse response
var issue types.Issue var issue types.Issue
issueJSON, err := json.Marshal(resp.Data) issueJSON, err := json.Marshal(resp.Data)
@@ -196,25 +196,25 @@ func TestDaemonAutoImportAfterGitPull(t *testing.T) {
if err := json.Unmarshal(issueJSON, &issue); err != nil { if err := json.Unmarshal(issueJSON, &issue); err != nil {
t.Fatalf("Failed to unmarshal issue: %v", err) t.Fatalf("Failed to unmarshal issue: %v", err)
} }
status := issue.Status status := issue.Status
// CRITICAL ASSERTION: Daemon should return CLOSED status from JSONL // CRITICAL ASSERTION: Daemon should return CLOSED status from JSONL
// not stale OPEN status from SQLite // not stale OPEN status from SQLite
if status != types.StatusClosed { if status != types.StatusClosed {
t.Errorf("DAEMON AUTO-IMPORT FAILED: Expected status 'closed' but got '%s'", status) t.Errorf("DAEMON AUTO-IMPORT FAILED: Expected status 'closed' but got '%s'", status)
t.Errorf("This means daemon is serving stale SQLite data instead of auto-importing JSONL") t.Errorf("This means daemon is serving stale SQLite data instead of auto-importing JSONL")
// Double-check JSONL has correct status // Double-check JSONL has correct status
jsonlData, _ := os.ReadFile(clone2JSONLPath) jsonlData, _ := os.ReadFile(clone2JSONLPath)
t.Logf("JSONL content: %s", string(jsonlData)) t.Logf("JSONL content: %s", string(jsonlData))
// Double-check what's in SQLite // Double-check what's in SQLite
directIssue, _ := clone2Store.GetIssue(ctx, issueID) directIssue, _ := clone2Store.GetIssue(ctx, issueID)
t.Logf("SQLite status: %s", directIssue.Status) t.Logf("SQLite status: %s", directIssue.Status)
} }
}) })
// Additional test: Verify multiple rapid changes // Additional test: Verify multiple rapid changes
t.Run("DaemonHandlesRapidUpdates", func(t *testing.T) { t.Run("DaemonHandlesRapidUpdates", func(t *testing.T) {
// Agent A updates priority // Agent A updates priority
@@ -223,18 +223,18 @@ func TestDaemonAutoImportAfterGitPull(t *testing.T) {
}, "agent-a"); err != nil { }, "agent-a"); err != nil {
t.Fatalf("Failed to update priority: %v", err) t.Fatalf("Failed to update priority: %v", err)
} }
if err := exportIssuesToJSONL(ctx, clone1Store, jsonlPath); err != nil { if err := exportIssuesToJSONL(ctx, clone1Store, jsonlPath); err != nil {
t.Fatalf("Failed to export: %v", err) t.Fatalf("Failed to export: %v", err)
} }
runGitCmd(t, clone1Dir, "add", ".beads/issues.jsonl") runGitCmd(t, clone1Dir, "add", ".beads/issues.jsonl")
runGitCmd(t, clone1Dir, "commit", "-m", "Update priority") runGitCmd(t, clone1Dir, "commit", "-m", "Update priority")
runGitCmd(t, clone1Dir, "push", "origin", "master") runGitCmd(t, clone1Dir, "push", "origin", "master")
// Agent B pulls // Agent B pulls
runGitCmd(t, clone2Dir, "pull") runGitCmd(t, clone2Dir, "pull")
// Query via daemon - should see priority 0 // Query via daemon - should see priority 0
// (Execute forces auto-import synchronously) // (Execute forces auto-import synchronously)
socketPath := filepath.Join(clone2BeadsDir, "bd.sock") socketPath := filepath.Join(clone2BeadsDir, "bd.sock")
@@ -243,18 +243,18 @@ func TestDaemonAutoImportAfterGitPull(t *testing.T) {
t.Fatalf("Failed to connect to daemon: %v", err) t.Fatalf("Failed to connect to daemon: %v", err)
} }
defer client.Close() defer client.Close()
client.SetDatabasePath(clone2DBPath) // Route to correct database client.SetDatabasePath(clone2DBPath) // Route to correct database
resp, err := client.Execute("show", map[string]string{"id": issueID}) resp, err := client.Execute("show", map[string]string{"id": issueID})
if err != nil { if err != nil {
t.Fatalf("Failed to get issue from daemon: %v", err) t.Fatalf("Failed to get issue from daemon: %v", err)
} }
var issue types.Issue var issue types.Issue
issueJSON, _ := json.Marshal(resp.Data) issueJSON, _ := json.Marshal(resp.Data)
json.Unmarshal(issueJSON, &issue) json.Unmarshal(issueJSON, &issue)
if issue.Priority != 0 { if issue.Priority != 0 {
t.Errorf("Expected priority 0 after auto-import, got %d", issue.Priority) t.Errorf("Expected priority 0 after auto-import, got %d", issue.Priority)
} }
@@ -273,23 +273,23 @@ func TestDaemonAutoImportDataCorruption(t *testing.T) {
t.Fatal(err) t.Fatal(err)
} }
defer os.RemoveAll(tempDir) defer os.RemoveAll(tempDir)
// Setup remote and two clones // Setup remote and two clones
remoteDir := filepath.Join(tempDir, "remote") remoteDir := filepath.Join(tempDir, "remote")
os.MkdirAll(remoteDir, 0750) os.MkdirAll(remoteDir, 0750)
runGitCmd(t, remoteDir, "init", "--bare") runGitCmd(t, remoteDir, "init", "--bare", "-b", "master")
clone1Dir := filepath.Join(tempDir, "clone1") clone1Dir := filepath.Join(tempDir, "clone1")
runGitCmd(t, tempDir, "clone", remoteDir, clone1Dir) runGitCmd(t, tempDir, "clone", remoteDir, clone1Dir)
configureGit(t, clone1Dir) configureGit(t, clone1Dir)
clone2Dir := filepath.Join(tempDir, "clone2") clone2Dir := filepath.Join(tempDir, "clone2")
runGitCmd(t, tempDir, "clone", remoteDir, clone2Dir) runGitCmd(t, tempDir, "clone", remoteDir, clone2Dir)
configureGit(t, clone2Dir) configureGit(t, clone2Dir)
// Initialize beads in both clones // Initialize beads in both clones
ctx := context.Background() ctx := context.Background()
// Clone1 setup // Clone1 setup
clone1BeadsDir := filepath.Join(clone1Dir, ".beads") clone1BeadsDir := filepath.Join(clone1Dir, ".beads")
os.MkdirAll(clone1BeadsDir, 0750) os.MkdirAll(clone1BeadsDir, 0750)
@@ -297,7 +297,7 @@ func TestDaemonAutoImportDataCorruption(t *testing.T) {
clone1Store := newTestStore(t, clone1DBPath) clone1Store := newTestStore(t, clone1DBPath)
defer clone1Store.Close() defer clone1Store.Close()
clone1Store.SetMetadata(ctx, "issue_prefix", "test") clone1Store.SetMetadata(ctx, "issue_prefix", "test")
// Clone2 setup // Clone2 setup
clone2BeadsDir := filepath.Join(clone2Dir, ".beads") clone2BeadsDir := filepath.Join(clone2Dir, ".beads")
os.MkdirAll(clone2BeadsDir, 0750) os.MkdirAll(clone2BeadsDir, 0750)
@@ -305,7 +305,7 @@ func TestDaemonAutoImportDataCorruption(t *testing.T) {
clone2Store := newTestStore(t, clone2DBPath) clone2Store := newTestStore(t, clone2DBPath)
defer clone2Store.Close() defer clone2Store.Close()
clone2Store.SetMetadata(ctx, "issue_prefix", "test") clone2Store.SetMetadata(ctx, "issue_prefix", "test")
// Agent A creates issue and pushes // Agent A creates issue and pushes
issue2 := &types.Issue{ issue2 := &types.Issue{
Title: "Shared issue", Title: "Shared issue",
@@ -317,18 +317,18 @@ func TestDaemonAutoImportDataCorruption(t *testing.T) {
} }
clone1Store.CreateIssue(ctx, issue2, "agent-a") clone1Store.CreateIssue(ctx, issue2, "agent-a")
issueID := issue2.ID issueID := issue2.ID
clone1JSONLPath := filepath.Join(clone1BeadsDir, "issues.jsonl") clone1JSONLPath := filepath.Join(clone1BeadsDir, "issues.jsonl")
exportIssuesToJSONL(ctx, clone1Store, clone1JSONLPath) exportIssuesToJSONL(ctx, clone1Store, clone1JSONLPath)
runGitCmd(t, clone1Dir, "add", ".beads") runGitCmd(t, clone1Dir, "add", ".beads")
runGitCmd(t, clone1Dir, "commit", "-m", "Initial issue") runGitCmd(t, clone1Dir, "commit", "-m", "Initial issue")
runGitCmd(t, clone1Dir, "push", "origin", "master") runGitCmd(t, clone1Dir, "push", "origin", "master")
// Agent B pulls and imports // Agent B pulls and imports
runGitCmd(t, clone2Dir, "pull") runGitCmd(t, clone2Dir, "pull")
clone2JSONLPath := filepath.Join(clone2BeadsDir, "issues.jsonl") clone2JSONLPath := filepath.Join(clone2BeadsDir, "issues.jsonl")
importJSONLToStore(ctx, clone2Store, clone2DBPath, clone2JSONLPath) importJSONLToStore(ctx, clone2Store, clone2DBPath, clone2JSONLPath)
// THE CORRUPTION SCENARIO: // THE CORRUPTION SCENARIO:
// 1. Agent A closes the issue and pushes // 1. Agent A closes the issue and pushes
clone1Store.CloseIssue(ctx, issueID, "Done", "agent-a") clone1Store.CloseIssue(ctx, issueID, "Done", "agent-a")
@@ -336,31 +336,31 @@ func TestDaemonAutoImportDataCorruption(t *testing.T) {
runGitCmd(t, clone1Dir, "add", ".beads/issues.jsonl") runGitCmd(t, clone1Dir, "add", ".beads/issues.jsonl")
runGitCmd(t, clone1Dir, "commit", "-m", "Close issue") runGitCmd(t, clone1Dir, "commit", "-m", "Close issue")
runGitCmd(t, clone1Dir, "push", "origin", "master") runGitCmd(t, clone1Dir, "push", "origin", "master")
// 2. Agent B does git pull (JSONL updated on disk) // 2. Agent B does git pull (JSONL updated on disk)
runGitCmd(t, clone2Dir, "pull") runGitCmd(t, clone2Dir, "pull")
// Wait for filesystem to settle after git operations // Wait for filesystem to settle after git operations
time.Sleep(50 * time.Millisecond) time.Sleep(50 * time.Millisecond)
// 3. Agent B daemon exports STALE data (if auto-import doesn't work) // 3. Agent B daemon exports STALE data (if auto-import doesn't work)
// This would overwrite Agent A's closure with old "open" status // This would overwrite Agent A's closure with old "open" status
// Start daemon in clone2 // Start daemon in clone2
socketPath := filepath.Join(clone2BeadsDir, "bd.sock") socketPath := filepath.Join(clone2BeadsDir, "bd.sock")
os.Remove(socketPath) os.Remove(socketPath)
server := rpc.NewServer(socketPath, clone2Store, clone2Dir, clone2DBPath) server := rpc.NewServer(socketPath, clone2Store, clone2Dir, clone2DBPath)
serverCtx, serverCancel := context.WithCancel(context.Background()) serverCtx, serverCancel := context.WithCancel(context.Background())
defer serverCancel() defer serverCancel()
go func() { go func() {
if err := server.Start(serverCtx); err != nil { if err := server.Start(serverCtx); err != nil {
t.Logf("Server error: %v", err) t.Logf("Server error: %v", err)
} }
}() }()
// Wait for server // Wait for server
for i := 0; i < 50; i++ { for i := 0; i < 50; i++ {
time.Sleep(10 * time.Millisecond) time.Sleep(10 * time.Millisecond)
@@ -368,43 +368,43 @@ func TestDaemonAutoImportDataCorruption(t *testing.T) {
break break
} }
} }
// Trigger daemon operation (should auto-import first) // Trigger daemon operation (should auto-import first)
client, err := rpc.TryConnect(socketPath) client, err := rpc.TryConnect(socketPath)
if err != nil { if err != nil {
t.Fatalf("Failed to connect: %v", err) t.Fatalf("Failed to connect: %v", err)
} }
defer client.Close() defer client.Close()
client.SetDatabasePath(clone2DBPath) client.SetDatabasePath(clone2DBPath)
resp, err := client.Execute("show", map[string]string{"id": issueID}) resp, err := client.Execute("show", map[string]string{"id": issueID})
if err != nil { if err != nil {
t.Fatalf("Failed to get issue: %v", err) t.Fatalf("Failed to get issue: %v", err)
} }
var issue types.Issue var issue types.Issue
issueJSON, _ := json.Marshal(resp.Data) issueJSON, _ := json.Marshal(resp.Data)
json.Unmarshal(issueJSON, &issue) json.Unmarshal(issueJSON, &issue)
status := issue.Status status := issue.Status
// If daemon didn't auto-import, this would be "open" (stale) // If daemon didn't auto-import, this would be "open" (stale)
// With the fix, it should be "closed" (fresh from JSONL) // With the fix, it should be "closed" (fresh from JSONL)
if status != types.StatusClosed { if status != types.StatusClosed {
t.Errorf("DATA CORRUPTION DETECTED: Daemon has stale status '%s' instead of 'closed'", status) t.Errorf("DATA CORRUPTION DETECTED: Daemon has stale status '%s' instead of 'closed'", status)
t.Error("If daemon exports this stale data, it will overwrite Agent A's changes on next push") t.Error("If daemon exports this stale data, it will overwrite Agent A's changes on next push")
} }
// Now simulate daemon export (which happens on timer) // Now simulate daemon export (which happens on timer)
// With auto-import working, this export should have fresh data // With auto-import working, this export should have fresh data
exportIssuesToJSONL(ctx, clone2Store, clone2JSONLPath) exportIssuesToJSONL(ctx, clone2Store, clone2JSONLPath)
// Read back JSONL to verify it has correct status // Read back JSONL to verify it has correct status
data, _ := os.ReadFile(clone2JSONLPath) data, _ := os.ReadFile(clone2JSONLPath)
var exportedIssue types.Issue var exportedIssue types.Issue
json.NewDecoder(bytes.NewReader(data)).Decode(&exportedIssue) json.NewDecoder(bytes.NewReader(data)).Decode(&exportedIssue)
if exportedIssue.Status != types.StatusClosed { if exportedIssue.Status != types.StatusClosed {
t.Errorf("CORRUPTION: Exported JSONL has wrong status '%s', would overwrite remote", exportedIssue.Status) t.Errorf("CORRUPTION: Exported JSONL has wrong status '%s', would overwrite remote", exportedIssue.Status)
} }

View File

@@ -48,12 +48,12 @@ func TestSyncBranchCommitAndPush_NotConfigured(t *testing.T) {
// Create test issue // Create test issue
issue := &types.Issue{ issue := &types.Issue{
Title: "Test issue", Title: "Test issue",
Status: types.StatusOpen, Status: types.StatusOpen,
Priority: 1, Priority: 1,
IssueType: types.TypeTask, IssueType: types.TypeTask,
CreatedAt: time.Now(), CreatedAt: time.Now(),
UpdatedAt: time.Now(), UpdatedAt: time.Now(),
} }
if err := store.CreateIssue(ctx, issue, "test"); err != nil { if err := store.CreateIssue(ctx, issue, "test"); err != nil {
t.Fatalf("Failed to create issue: %v", err) t.Fatalf("Failed to create issue: %v", err)
@@ -122,12 +122,12 @@ func TestSyncBranchCommitAndPush_Success(t *testing.T) {
// Create test issue // Create test issue
issue := &types.Issue{ issue := &types.Issue{
Title: "Test sync branch issue", Title: "Test sync branch issue",
Status: types.StatusOpen, Status: types.StatusOpen,
Priority: 1, Priority: 1,
IssueType: types.TypeTask, IssueType: types.TypeTask,
CreatedAt: time.Now(), CreatedAt: time.Now(),
UpdatedAt: time.Now(), UpdatedAt: time.Now(),
} }
if err := store.CreateIssue(ctx, issue, "test"); err != nil { if err := store.CreateIssue(ctx, issue, "test"); err != nil {
t.Fatalf("Failed to create issue: %v", err) t.Fatalf("Failed to create issue: %v", err)
@@ -228,12 +228,12 @@ func TestSyncBranchCommitAndPush_EnvOverridesDB(t *testing.T) {
// Create test issue and export JSONL // Create test issue and export JSONL
issue := &types.Issue{ issue := &types.Issue{
Title: "Env override issue", Title: "Env override issue",
Status: types.StatusOpen, Status: types.StatusOpen,
Priority: 1, Priority: 1,
IssueType: types.TypeTask, IssueType: types.TypeTask,
CreatedAt: time.Now(), CreatedAt: time.Now(),
UpdatedAt: time.Now(), UpdatedAt: time.Now(),
} }
if err := store.CreateIssue(ctx, issue, "test"); err != nil { if err := store.CreateIssue(ctx, issue, "test"); err != nil {
t.Fatalf("Failed to create issue: %v", err) t.Fatalf("Failed to create issue: %v", err)
@@ -303,12 +303,12 @@ func TestSyncBranchCommitAndPush_NoChanges(t *testing.T) {
} }
issue := &types.Issue{ issue := &types.Issue{
Title: "Test issue", Title: "Test issue",
Status: types.StatusOpen, Status: types.StatusOpen,
Priority: 1, Priority: 1,
IssueType: types.TypeTask, IssueType: types.TypeTask,
CreatedAt: time.Now(), CreatedAt: time.Now(),
UpdatedAt: time.Now(), UpdatedAt: time.Now(),
} }
if err := store.CreateIssue(ctx, issue, "test"); err != nil { if err := store.CreateIssue(ctx, issue, "test"); err != nil {
t.Fatalf("Failed to create issue: %v", err) t.Fatalf("Failed to create issue: %v", err)
@@ -380,12 +380,12 @@ func TestSyncBranchCommitAndPush_WorktreeHealthCheck(t *testing.T) {
} }
issue := &types.Issue{ issue := &types.Issue{
Title: "Test issue", Title: "Test issue",
Status: types.StatusOpen, Status: types.StatusOpen,
Priority: 1, Priority: 1,
IssueType: types.TypeTask, IssueType: types.TypeTask,
CreatedAt: time.Now(), CreatedAt: time.Now(),
UpdatedAt: time.Now(), UpdatedAt: time.Now(),
} }
if err := store.CreateIssue(ctx, issue, "test"); err != nil { if err := store.CreateIssue(ctx, issue, "test"); err != nil {
t.Fatalf("Failed to create issue: %v", err) t.Fatalf("Failed to create issue: %v", err)
@@ -497,7 +497,7 @@ func TestSyncBranchPull_Success(t *testing.T) {
if err := os.MkdirAll(remoteDir, 0755); err != nil { if err := os.MkdirAll(remoteDir, 0755); err != nil {
t.Fatalf("Failed to create remote dir: %v", err) t.Fatalf("Failed to create remote dir: %v", err)
} }
runGitCmd(t, remoteDir, "init", "--bare") runGitCmd(t, remoteDir, "init", "--bare", "-b", "master")
// Create clone1 (will push changes) // Create clone1 (will push changes)
clone1Dir := filepath.Join(tmpDir, "clone1") clone1Dir := filepath.Join(tmpDir, "clone1")
@@ -528,12 +528,12 @@ func TestSyncBranchPull_Success(t *testing.T) {
// Create issue in clone1 // Create issue in clone1
issue := &types.Issue{ issue := &types.Issue{
Title: "Test sync pull issue", Title: "Test sync pull issue",
Status: types.StatusOpen, Status: types.StatusOpen,
Priority: 1, Priority: 1,
IssueType: types.TypeTask, IssueType: types.TypeTask,
CreatedAt: time.Now(), CreatedAt: time.Now(),
UpdatedAt: time.Now(), UpdatedAt: time.Now(),
} }
if err := store1.CreateIssue(ctx, issue, "test"); err != nil { if err := store1.CreateIssue(ctx, issue, "test"); err != nil {
t.Fatalf("Failed to create issue: %v", err) t.Fatalf("Failed to create issue: %v", err)
@@ -639,7 +639,7 @@ func TestSyncBranchIntegration_EndToEnd(t *testing.T) {
tmpDir := t.TempDir() tmpDir := t.TempDir()
remoteDir := filepath.Join(tmpDir, "remote") remoteDir := filepath.Join(tmpDir, "remote")
os.MkdirAll(remoteDir, 0755) os.MkdirAll(remoteDir, 0755)
runGitCmd(t, remoteDir, "init", "--bare") runGitCmd(t, remoteDir, "init", "--bare", "-b", "master")
// Clone1: Agent A // Clone1: Agent A
clone1Dir := filepath.Join(tmpDir, "clone1") clone1Dir := filepath.Join(tmpDir, "clone1")
@@ -660,12 +660,12 @@ func TestSyncBranchIntegration_EndToEnd(t *testing.T) {
// Agent A creates issue // Agent A creates issue
issue := &types.Issue{ issue := &types.Issue{
Title: "E2E test issue", Title: "E2E test issue",
Status: types.StatusOpen, Status: types.StatusOpen,
Priority: 1, Priority: 1,
IssueType: types.TypeTask, IssueType: types.TypeTask,
CreatedAt: time.Now(), CreatedAt: time.Now(),
UpdatedAt: time.Now(), UpdatedAt: time.Now(),
} }
store1.CreateIssue(ctx, issue, "agent-a") store1.CreateIssue(ctx, issue, "agent-a")
issueID := issue.ID issueID := issue.ID
@@ -914,7 +914,7 @@ func TestSyncBranchMultipleConcurrentClones(t *testing.T) {
tmpDir := t.TempDir() tmpDir := t.TempDir()
remoteDir := filepath.Join(tmpDir, "remote") remoteDir := filepath.Join(tmpDir, "remote")
os.MkdirAll(remoteDir, 0755) os.MkdirAll(remoteDir, 0755)
runGitCmd(t, remoteDir, "init", "--bare") runGitCmd(t, remoteDir, "init", "--bare", "-b", "master")
syncBranch := "beads-sync" syncBranch := "beads-sync"
@@ -1454,7 +1454,7 @@ func TestGitPushFromWorktree_FetchRebaseRetry(t *testing.T) {
// Create a "remote" bare repository // Create a "remote" bare repository
remoteDir := t.TempDir() remoteDir := t.TempDir()
runGitCmd(t, remoteDir, "init", "--bare") runGitCmd(t, remoteDir, "init", "--bare", "-b", "master")
// Create first clone (simulates another developer's clone) // Create first clone (simulates another developer's clone)
clone1Dir := t.TempDir() clone1Dir := t.TempDir()
@@ -1524,7 +1524,7 @@ func TestGitPushFromWorktree_FetchRebaseRetry(t *testing.T) {
// Now try to push from worktree - this should trigger the fetch-rebase-retry logic // Now try to push from worktree - this should trigger the fetch-rebase-retry logic
// because the remote has commits that the local worktree doesn't have // because the remote has commits that the local worktree doesn't have
err := gitPushFromWorktree(ctx, worktreePath, "beads-sync") err := gitPushFromWorktree(ctx, worktreePath, "beads-sync", "")
if err != nil { if err != nil {
t.Fatalf("gitPushFromWorktree failed: %v (expected fetch-rebase-retry to succeed)", err) t.Fatalf("gitPushFromWorktree failed: %v (expected fetch-rebase-retry to succeed)", err)
} }

View File

@@ -8,6 +8,7 @@ import (
"context" "context"
"encoding/json" "encoding/json"
"io" "io"
"log/slog"
"os" "os"
"path/filepath" "path/filepath"
"strings" "strings"
@@ -897,11 +898,7 @@ func setupDaemonTestEnvForDelete(t *testing.T) (context.Context, context.CancelF
ctx, cancel := context.WithTimeout(context.Background(), 30*time.Second) ctx, cancel := context.WithTimeout(context.Background(), 30*time.Second)
log := daemonLogger{ log := daemonLogger{logger: slog.New(slog.NewTextHandler(io.Discard, &slog.HandlerOptions{Level: slog.LevelInfo}))}
logFunc: func(format string, args ...interface{}) {
t.Logf("[daemon] "+format, args...)
},
}
server, _, err := startRPCServer(ctx, socketPath, testStore, tmpDir, testDBPath, log) server, _, err := startRPCServer(ctx, socketPath, testStore, tmpDir, testDBPath, log)
if err != nil { if err != nil {

View File

@@ -12,7 +12,11 @@ func mkTmpDirInTmp(t *testing.T, prefix string) string {
t.Helper() t.Helper()
dir, err := os.MkdirTemp("/tmp", prefix) dir, err := os.MkdirTemp("/tmp", prefix)
if err != nil { if err != nil {
t.Fatalf("failed to create temp dir: %v", err) // Fallback for platforms without /tmp (e.g. Windows).
dir, err = os.MkdirTemp("", prefix)
if err != nil {
t.Fatalf("failed to create temp dir: %v", err)
}
} }
t.Cleanup(func() { _ = os.RemoveAll(dir) }) t.Cleanup(func() { _ = os.RemoveAll(dir) })
return dir return dir

View File

@@ -0,0 +1,125 @@
//go:build chaos
package main
import (
"os"
"path/filepath"
"strings"
"testing"
)
func TestDoctorRepair_CorruptDatabase_NotADatabase_RebuildFromJSONL(t *testing.T) {
bdExe := buildBDForTest(t)
ws := mkTmpDirInTmp(t, "bd-doctor-chaos-*")
dbPath := filepath.Join(ws, ".beads", "beads.db")
jsonlPath := filepath.Join(ws, ".beads", "issues.jsonl")
if _, err := runBDSideDB(t, bdExe, ws, dbPath, "init", "--prefix", "chaos", "--quiet"); err != nil {
t.Fatalf("bd init failed: %v", err)
}
if _, err := runBDSideDB(t, bdExe, ws, dbPath, "create", "Chaos issue", "-p", "1"); err != nil {
t.Fatalf("bd create failed: %v", err)
}
if _, err := runBDSideDB(t, bdExe, ws, dbPath, "export", "-o", jsonlPath, "--force"); err != nil {
t.Fatalf("bd export failed: %v", err)
}
// Make the DB unreadable.
if err := os.WriteFile(dbPath, []byte("not a database"), 0644); err != nil {
t.Fatalf("corrupt db: %v", err)
}
if _, err := runBDSideDB(t, bdExe, ws, dbPath, "doctor", "--fix", "--yes"); err != nil {
t.Fatalf("bd doctor --fix failed: %v", err)
}
if out, err := runBDSideDB(t, bdExe, ws, dbPath, "doctor"); err != nil {
t.Fatalf("bd doctor after fix failed: %v\n%s", err, out)
}
}
func TestDoctorRepair_CorruptDatabase_NoJSONL_FixFails(t *testing.T) {
bdExe := buildBDForTest(t)
ws := mkTmpDirInTmp(t, "bd-doctor-chaos-nojsonl-*")
dbPath := filepath.Join(ws, ".beads", "beads.db")
if _, err := runBDSideDB(t, bdExe, ws, dbPath, "init", "--prefix", "chaos", "--quiet"); err != nil {
t.Fatalf("bd init failed: %v", err)
}
if _, err := runBDSideDB(t, bdExe, ws, dbPath, "create", "Chaos issue", "-p", "1"); err != nil {
t.Fatalf("bd create failed: %v", err)
}
// Some workflows keep JSONL in sync automatically; force it to be missing.
_ = os.Remove(filepath.Join(ws, ".beads", "issues.jsonl"))
_ = os.Remove(filepath.Join(ws, ".beads", "beads.jsonl"))
// Corrupt without providing JSONL source-of-truth.
if err := os.Truncate(dbPath, 64); err != nil {
t.Fatalf("truncate db: %v", err)
}
out, err := runBDSideDB(t, bdExe, ws, dbPath, "doctor", "--fix", "--yes")
if err == nil {
t.Fatalf("expected bd doctor --fix to fail without JSONL")
}
if !strings.Contains(out, "cannot auto-recover") {
t.Fatalf("expected auto-recover error, got:\n%s", out)
}
}
func TestDoctorRepair_CorruptDatabase_BacksUpSidecars(t *testing.T) {
bdExe := buildBDForTest(t)
ws := mkTmpDirInTmp(t, "bd-doctor-chaos-sidecars-*")
dbPath := filepath.Join(ws, ".beads", "beads.db")
jsonlPath := filepath.Join(ws, ".beads", "issues.jsonl")
if _, err := runBDSideDB(t, bdExe, ws, dbPath, "init", "--prefix", "chaos", "--quiet"); err != nil {
t.Fatalf("bd init failed: %v", err)
}
if _, err := runBDSideDB(t, bdExe, ws, dbPath, "create", "Chaos issue", "-p", "1"); err != nil {
t.Fatalf("bd create failed: %v", err)
}
if _, err := runBDSideDB(t, bdExe, ws, dbPath, "export", "-o", jsonlPath, "--force"); err != nil {
t.Fatalf("bd export failed: %v", err)
}
// Ensure sidecars exist so we can verify they get moved with the backup.
for _, suffix := range []string{"-wal", "-shm", "-journal"} {
if err := os.WriteFile(dbPath+suffix, []byte("x"), 0644); err != nil {
t.Fatalf("write sidecar %s: %v", suffix, err)
}
}
if err := os.Truncate(dbPath, 64); err != nil {
t.Fatalf("truncate db: %v", err)
}
if _, err := runBDSideDB(t, bdExe, ws, dbPath, "doctor", "--fix", "--yes"); err != nil {
t.Fatalf("bd doctor --fix failed: %v", err)
}
// Verify a backup exists, and at least one sidecar got moved.
entries, err := os.ReadDir(filepath.Join(ws, ".beads"))
if err != nil {
t.Fatalf("readdir: %v", err)
}
var backup string
for _, e := range entries {
if strings.Contains(e.Name(), ".corrupt.backup.db") {
backup = filepath.Join(ws, ".beads", e.Name())
break
}
}
if backup == "" {
t.Fatalf("expected backup db in .beads, found none")
}
wal := backup + "-wal"
if _, err := os.Stat(wal); err != nil {
// At minimum, the backup DB itself should exist; sidecar backup is best-effort.
if _, err2 := os.Stat(backup); err2 != nil {
t.Fatalf("backup db missing: %v", err2)
}
}
}

View File

@@ -31,7 +31,11 @@ func mkTmpDirInTmp(t *testing.T, prefix string) string {
t.Helper() t.Helper()
dir, err := os.MkdirTemp("/tmp", prefix) dir, err := os.MkdirTemp("/tmp", prefix)
if err != nil { if err != nil {
t.Fatalf("failed to create temp dir: %v", err) // Fallback for platforms without /tmp (e.g. Windows).
dir, err = os.MkdirTemp("", prefix)
if err != nil {
t.Fatalf("failed to create temp dir: %v", err)
}
} }
t.Cleanup(func() { _ = os.RemoveAll(dir) }) t.Cleanup(func() { _ = os.RemoveAll(dir) })
return dir return dir

View File

@@ -26,36 +26,36 @@ func TestGitPullSyncIntegration(t *testing.T) {
// Create temp directory for test repositories // Create temp directory for test repositories
tempDir := t.TempDir() tempDir := t.TempDir()
// Create "remote" repository // Create "remote" repository
remoteDir := filepath.Join(tempDir, "remote") remoteDir := filepath.Join(tempDir, "remote")
if err := os.MkdirAll(remoteDir, 0750); err != nil { if err := os.MkdirAll(remoteDir, 0750); err != nil {
t.Fatalf("Failed to create remote dir: %v", err) t.Fatalf("Failed to create remote dir: %v", err)
} }
// Initialize remote git repo // Initialize remote git repo
runGitCmd(t, remoteDir, "init", "--bare") runGitCmd(t, remoteDir, "init", "--bare", "-b", "master")
// Create "clone1" repository // Create "clone1" repository
clone1Dir := filepath.Join(tempDir, "clone1") clone1Dir := filepath.Join(tempDir, "clone1")
runGitCmd(t, tempDir, "clone", remoteDir, clone1Dir) runGitCmd(t, tempDir, "clone", remoteDir, clone1Dir)
configureGit(t, clone1Dir) configureGit(t, clone1Dir)
// Initialize beads in clone1 // Initialize beads in clone1
clone1BeadsDir := filepath.Join(clone1Dir, ".beads") clone1BeadsDir := filepath.Join(clone1Dir, ".beads")
if err := os.MkdirAll(clone1BeadsDir, 0750); err != nil { if err := os.MkdirAll(clone1BeadsDir, 0750); err != nil {
t.Fatalf("Failed to create .beads dir: %v", err) t.Fatalf("Failed to create .beads dir: %v", err)
} }
clone1DBPath := filepath.Join(clone1BeadsDir, "test.db") clone1DBPath := filepath.Join(clone1BeadsDir, "test.db")
clone1Store := newTestStore(t, clone1DBPath) clone1Store := newTestStore(t, clone1DBPath)
defer clone1Store.Close() defer clone1Store.Close()
ctx := context.Background() ctx := context.Background()
if err := clone1Store.SetMetadata(ctx, "issue_prefix", "test"); err != nil { if err := clone1Store.SetMetadata(ctx, "issue_prefix", "test"); err != nil {
t.Fatalf("Failed to set prefix: %v", err) t.Fatalf("Failed to set prefix: %v", err)
} }
// Create and close an issue in clone1 // Create and close an issue in clone1
issue := &types.Issue{ issue := &types.Issue{
Title: "Test sync issue", Title: "Test sync issue",
@@ -69,80 +69,80 @@ func TestGitPullSyncIntegration(t *testing.T) {
t.Fatalf("Failed to create issue: %v", err) t.Fatalf("Failed to create issue: %v", err)
} }
issueID := issue.ID issueID := issue.ID
// Close the issue // Close the issue
if err := clone1Store.CloseIssue(ctx, issueID, "Test completed", "test-user"); err != nil { if err := clone1Store.CloseIssue(ctx, issueID, "Test completed", "test-user"); err != nil {
t.Fatalf("Failed to close issue: %v", err) t.Fatalf("Failed to close issue: %v", err)
} }
// Export to JSONL // Export to JSONL
jsonlPath := filepath.Join(clone1BeadsDir, "issues.jsonl") jsonlPath := filepath.Join(clone1BeadsDir, "issues.jsonl")
if err := exportIssuesToJSONL(ctx, clone1Store, jsonlPath); err != nil { if err := exportIssuesToJSONL(ctx, clone1Store, jsonlPath); err != nil {
t.Fatalf("Failed to export: %v", err) t.Fatalf("Failed to export: %v", err)
} }
// Commit and push from clone1 // Commit and push from clone1
runGitCmd(t, clone1Dir, "add", ".beads") runGitCmd(t, clone1Dir, "add", ".beads")
runGitCmd(t, clone1Dir, "commit", "-m", "Add closed issue") runGitCmd(t, clone1Dir, "commit", "-m", "Add closed issue")
runGitCmd(t, clone1Dir, "push", "origin", "master") runGitCmd(t, clone1Dir, "push", "origin", "master")
// Create "clone2" repository // Create "clone2" repository
clone2Dir := filepath.Join(tempDir, "clone2") clone2Dir := filepath.Join(tempDir, "clone2")
runGitCmd(t, tempDir, "clone", remoteDir, clone2Dir) runGitCmd(t, tempDir, "clone", remoteDir, clone2Dir)
configureGit(t, clone2Dir) configureGit(t, clone2Dir)
// Initialize empty database in clone2 // Initialize empty database in clone2
clone2BeadsDir := filepath.Join(clone2Dir, ".beads") clone2BeadsDir := filepath.Join(clone2Dir, ".beads")
clone2DBPath := filepath.Join(clone2BeadsDir, "test.db") clone2DBPath := filepath.Join(clone2BeadsDir, "test.db")
clone2Store := newTestStore(t, clone2DBPath) clone2Store := newTestStore(t, clone2DBPath)
defer clone2Store.Close() defer clone2Store.Close()
if err := clone2Store.SetMetadata(ctx, "issue_prefix", "test"); err != nil { if err := clone2Store.SetMetadata(ctx, "issue_prefix", "test"); err != nil {
t.Fatalf("Failed to set prefix: %v", err) t.Fatalf("Failed to set prefix: %v", err)
} }
// Import the existing JSONL (simulating initial sync) // Import the existing JSONL (simulating initial sync)
clone2JSONLPath := filepath.Join(clone2BeadsDir, "issues.jsonl") clone2JSONLPath := filepath.Join(clone2BeadsDir, "issues.jsonl")
if err := importJSONLToStore(ctx, clone2Store, clone2DBPath, clone2JSONLPath); err != nil { if err := importJSONLToStore(ctx, clone2Store, clone2DBPath, clone2JSONLPath); err != nil {
t.Fatalf("Failed to import: %v", err) t.Fatalf("Failed to import: %v", err)
} }
// Verify issue exists and is closed // Verify issue exists and is closed
verifyIssueClosed(t, clone2Store, issueID) verifyIssueClosed(t, clone2Store, issueID)
// Note: We don't commit in clone2 - it stays clean as a read-only consumer // Note: We don't commit in clone2 - it stays clean as a read-only consumer
// Now test git pull scenario: Clone1 makes a change (update priority) // Now test git pull scenario: Clone1 makes a change (update priority)
if err := clone1Store.UpdateIssue(ctx, issueID, map[string]interface{}{ if err := clone1Store.UpdateIssue(ctx, issueID, map[string]interface{}{
"priority": 0, "priority": 0,
}, "test-user"); err != nil { }, "test-user"); err != nil {
t.Fatalf("Failed to update issue: %v", err) t.Fatalf("Failed to update issue: %v", err)
} }
if err := exportIssuesToJSONL(ctx, clone1Store, jsonlPath); err != nil { if err := exportIssuesToJSONL(ctx, clone1Store, jsonlPath); err != nil {
t.Fatalf("Failed to export after update: %v", err) t.Fatalf("Failed to export after update: %v", err)
} }
runGitCmd(t, clone1Dir, "add", ".beads/issues.jsonl") runGitCmd(t, clone1Dir, "add", ".beads/issues.jsonl")
runGitCmd(t, clone1Dir, "commit", "-m", "Update priority") runGitCmd(t, clone1Dir, "commit", "-m", "Update priority")
runGitCmd(t, clone1Dir, "push", "origin", "master") runGitCmd(t, clone1Dir, "push", "origin", "master")
// Clone2 pulls the change // Clone2 pulls the change
runGitCmd(t, clone2Dir, "pull") runGitCmd(t, clone2Dir, "pull")
// Test auto-import in non-daemon mode // Test auto-import in non-daemon mode
t.Run("NonDaemonAutoImport", func(t *testing.T) { t.Run("NonDaemonAutoImport", func(t *testing.T) {
// Use a temporary local store for this test // Use a temporary local store for this test
localStore := newTestStore(t, clone2DBPath) localStore := newTestStore(t, clone2DBPath)
defer localStore.Close() defer localStore.Close()
// Manually import to simulate auto-import behavior // Manually import to simulate auto-import behavior
startTime := time.Now() startTime := time.Now()
if err := importJSONLToStore(ctx, localStore, clone2DBPath, clone2JSONLPath); err != nil { if err := importJSONLToStore(ctx, localStore, clone2DBPath, clone2JSONLPath); err != nil {
t.Fatalf("Failed to auto-import: %v", err) t.Fatalf("Failed to auto-import: %v", err)
} }
elapsed := time.Since(startTime) elapsed := time.Since(startTime)
// Verify priority was updated // Verify priority was updated
issue, err := localStore.GetIssue(ctx, issueID) issue, err := localStore.GetIssue(ctx, issueID)
if err != nil { if err != nil {
@@ -151,13 +151,13 @@ func TestGitPullSyncIntegration(t *testing.T) {
if issue.Priority != 0 { if issue.Priority != 0 {
t.Errorf("Expected priority 0 after auto-import, got %d", issue.Priority) t.Errorf("Expected priority 0 after auto-import, got %d", issue.Priority)
} }
// Verify performance: import should be fast // Verify performance: import should be fast
if elapsed > 100*time.Millisecond { if elapsed > 100*time.Millisecond {
t.Logf("Info: import took %v", elapsed) t.Logf("Info: import took %v", elapsed)
} }
}) })
// Test bd sync --import-only command // Test bd sync --import-only command
t.Run("BdSyncCommand", func(t *testing.T) { t.Run("BdSyncCommand", func(t *testing.T) {
// Make another change in clone1 (change priority back to 1) // Make another change in clone1 (change priority back to 1)
@@ -166,27 +166,27 @@ func TestGitPullSyncIntegration(t *testing.T) {
}, "test-user"); err != nil { }, "test-user"); err != nil {
t.Fatalf("Failed to update issue: %v", err) t.Fatalf("Failed to update issue: %v", err)
} }
if err := exportIssuesToJSONL(ctx, clone1Store, jsonlPath); err != nil { if err := exportIssuesToJSONL(ctx, clone1Store, jsonlPath); err != nil {
t.Fatalf("Failed to export: %v", err) t.Fatalf("Failed to export: %v", err)
} }
runGitCmd(t, clone1Dir, "add", ".beads/issues.jsonl") runGitCmd(t, clone1Dir, "add", ".beads/issues.jsonl")
runGitCmd(t, clone1Dir, "commit", "-m", "Update priority") runGitCmd(t, clone1Dir, "commit", "-m", "Update priority")
runGitCmd(t, clone1Dir, "push", "origin", "master") runGitCmd(t, clone1Dir, "push", "origin", "master")
// Clone2 pulls // Clone2 pulls
runGitCmd(t, clone2Dir, "pull") runGitCmd(t, clone2Dir, "pull")
// Use a fresh store for import // Use a fresh store for import
syncStore := newTestStore(t, clone2DBPath) syncStore := newTestStore(t, clone2DBPath)
defer syncStore.Close() defer syncStore.Close()
// Manually trigger import via in-process equivalent // Manually trigger import via in-process equivalent
if err := importJSONLToStore(ctx, syncStore, clone2DBPath, clone2JSONLPath); err != nil { if err := importJSONLToStore(ctx, syncStore, clone2DBPath, clone2JSONLPath); err != nil {
t.Fatalf("Failed to import via sync: %v", err) t.Fatalf("Failed to import via sync: %v", err)
} }
// Verify priority was updated back to 1 // Verify priority was updated back to 1
issue, err := syncStore.GetIssue(ctx, issueID) issue, err := syncStore.GetIssue(ctx, issueID)
if err != nil { if err != nil {
@@ -214,7 +214,7 @@ func configureGit(t *testing.T, dir string) {
runGitCmd(t, dir, "config", "user.email", "test@example.com") runGitCmd(t, dir, "config", "user.email", "test@example.com")
runGitCmd(t, dir, "config", "user.name", "Test User") runGitCmd(t, dir, "config", "user.name", "Test User")
runGitCmd(t, dir, "config", "pull.rebase", "false") runGitCmd(t, dir, "config", "pull.rebase", "false")
// Create .gitignore to prevent test database files from being tracked // Create .gitignore to prevent test database files from being tracked
gitignorePath := filepath.Join(dir, ".gitignore") gitignorePath := filepath.Join(dir, ".gitignore")
gitignoreContent := `# Test database files gitignoreContent := `# Test database files
@@ -233,7 +233,7 @@ func exportIssuesToJSONL(ctx context.Context, store *sqlite.SQLiteStorage, jsonl
if err != nil { if err != nil {
return err return err
} }
// Populate dependencies // Populate dependencies
allDeps, err := store.GetAllDependencyRecords(ctx) allDeps, err := store.GetAllDependencyRecords(ctx)
if err != nil { if err != nil {
@@ -244,20 +244,20 @@ func exportIssuesToJSONL(ctx context.Context, store *sqlite.SQLiteStorage, jsonl
labels, _ := store.GetLabels(ctx, issue.ID) labels, _ := store.GetLabels(ctx, issue.ID)
issue.Labels = labels issue.Labels = labels
} }
f, err := os.Create(jsonlPath) f, err := os.Create(jsonlPath)
if err != nil { if err != nil {
return err return err
} }
defer f.Close() defer f.Close()
encoder := json.NewEncoder(f) encoder := json.NewEncoder(f)
for _, issue := range issues { for _, issue := range issues {
if err := encoder.Encode(issue); err != nil { if err := encoder.Encode(issue); err != nil {
return err return err
} }
} }
return nil return nil
} }
@@ -266,7 +266,7 @@ func importJSONLToStore(ctx context.Context, store *sqlite.SQLiteStorage, dbPath
if err != nil { if err != nil {
return err return err
} }
// Use the autoimport package's AutoImportIfNewer function // Use the autoimport package's AutoImportIfNewer function
// For testing, we'll directly parse and import // For testing, we'll directly parse and import
var issues []*types.Issue var issues []*types.Issue
@@ -278,7 +278,7 @@ func importJSONLToStore(ctx context.Context, store *sqlite.SQLiteStorage, dbPath
} }
issues = append(issues, &issue) issues = append(issues, &issue)
} }
// Import each issue // Import each issue
for _, issue := range issues { for _, issue := range issues {
existing, _ := store.GetIssue(ctx, issue.ID) existing, _ := store.GetIssue(ctx, issue.ID)
@@ -298,12 +298,12 @@ func importJSONLToStore(ctx context.Context, store *sqlite.SQLiteStorage, dbPath
} }
} }
} }
// Set last_import_time metadata so staleness check works // Set last_import_time metadata so staleness check works
if err := store.SetMetadata(ctx, "last_import_time", time.Now().Format(time.RFC3339)); err != nil { if err := store.SetMetadata(ctx, "last_import_time", time.Now().Format(time.RFC3339)); err != nil {
return err return err
} }
return nil return nil
} }

View File

@@ -48,10 +48,10 @@ func TestMain(m *testing.M) {
fmt.Fprintf(os.Stderr, "Failed to build bd binary: %v\n%s\n", err, out) fmt.Fprintf(os.Stderr, "Failed to build bd binary: %v\n%s\n", err, out)
os.Exit(1) os.Exit(1)
} }
// Optimize git for tests // Optimize git for tests
os.Setenv("GIT_CONFIG_NOSYSTEM", "1") os.Setenv("GIT_CONFIG_NOSYSTEM", "1")
os.Exit(m.Run()) os.Exit(m.Run())
} }
@@ -85,35 +85,35 @@ func TestHashIDs_MultiCloneConverge(t *testing.T) {
} }
t.Parallel() t.Parallel()
tmpDir := testutil.TempDirInMemory(t) tmpDir := testutil.TempDirInMemory(t)
bdPath := getBDPath() bdPath := getBDPath()
if _, err := os.Stat(bdPath); err != nil { if _, err := os.Stat(bdPath); err != nil {
t.Fatalf("bd binary not found at %s", bdPath) t.Fatalf("bd binary not found at %s", bdPath)
} }
// Setup remote and 3 clones // Setup remote and 3 clones
remoteDir := setupBareRepo(t, tmpDir) remoteDir := setupBareRepo(t, tmpDir)
cloneA := setupClone(t, tmpDir, remoteDir, "A", bdPath) cloneA := setupClone(t, tmpDir, remoteDir, "A", bdPath)
cloneB := setupClone(t, tmpDir, remoteDir, "B", bdPath) cloneB := setupClone(t, tmpDir, remoteDir, "B", bdPath)
cloneC := setupClone(t, tmpDir, remoteDir, "C", bdPath) cloneC := setupClone(t, tmpDir, remoteDir, "C", bdPath)
// Each clone creates unique issue (different content = different hash ID) // Each clone creates unique issue (different content = different hash ID)
createIssueInClone(t, cloneA, "Issue from clone A") createIssueInClone(t, cloneA, "Issue from clone A")
createIssueInClone(t, cloneB, "Issue from clone B") createIssueInClone(t, cloneB, "Issue from clone B")
createIssueInClone(t, cloneC, "Issue from clone C") createIssueInClone(t, cloneC, "Issue from clone C")
// Sync all clones once (hash IDs prevent collisions, don't need multiple rounds) // Sync all clones once (hash IDs prevent collisions, don't need multiple rounds)
for _, clone := range []string{cloneA, cloneB, cloneC} { for _, clone := range []string{cloneA, cloneB, cloneC} {
runCmdOutputWithEnvAllowError(t, clone, map[string]string{"BEADS_NO_DAEMON": "1"}, true, bdPath, "sync") runCmdOutputWithEnvAllowError(t, clone, map[string]string{"BEADS_NO_DAEMON": "1"}, true, bdPath, "sync")
} }
// Verify all clones have all 3 issues // Verify all clones have all 3 issues
expectedTitles := map[string]bool{ expectedTitles := map[string]bool{
"Issue from clone A": true, "Issue from clone A": true,
"Issue from clone B": true, "Issue from clone B": true,
"Issue from clone C": true, "Issue from clone C": true,
} }
allConverged := true allConverged := true
for name, dir := range map[string]string{"A": cloneA, "B": cloneB, "C": cloneC} { for name, dir := range map[string]string{"A": cloneA, "B": cloneB, "C": cloneC} {
titles := getTitlesFromClone(t, dir) titles := getTitlesFromClone(t, dir)
@@ -122,7 +122,7 @@ func TestHashIDs_MultiCloneConverge(t *testing.T) {
allConverged = false allConverged = false
} }
} }
if allConverged { if allConverged {
t.Log("✓ All 3 clones converged with hash-based IDs") t.Log("✓ All 3 clones converged with hash-based IDs")
} else { } else {
@@ -138,26 +138,26 @@ func TestHashIDs_IdenticalContentDedup(t *testing.T) {
} }
t.Parallel() t.Parallel()
tmpDir := testutil.TempDirInMemory(t) tmpDir := testutil.TempDirInMemory(t)
bdPath := getBDPath() bdPath := getBDPath()
if _, err := os.Stat(bdPath); err != nil { if _, err := os.Stat(bdPath); err != nil {
t.Fatalf("bd binary not found at %s", bdPath) t.Fatalf("bd binary not found at %s", bdPath)
} }
// Setup remote and 2 clones // Setup remote and 2 clones
remoteDir := setupBareRepo(t, tmpDir) remoteDir := setupBareRepo(t, tmpDir)
cloneA := setupClone(t, tmpDir, remoteDir, "A", bdPath) cloneA := setupClone(t, tmpDir, remoteDir, "A", bdPath)
cloneB := setupClone(t, tmpDir, remoteDir, "B", bdPath) cloneB := setupClone(t, tmpDir, remoteDir, "B", bdPath)
// Both clones create identical issue (same content = same hash ID) // Both clones create identical issue (same content = same hash ID)
createIssueInClone(t, cloneA, "Identical issue") createIssueInClone(t, cloneA, "Identical issue")
createIssueInClone(t, cloneB, "Identical issue") createIssueInClone(t, cloneB, "Identical issue")
// Sync both clones once (hash IDs handle dedup automatically) // Sync both clones once (hash IDs handle dedup automatically)
for _, clone := range []string{cloneA, cloneB} { for _, clone := range []string{cloneA, cloneB} {
runCmdOutputWithEnvAllowError(t, clone, map[string]string{"BEADS_NO_DAEMON": "1"}, true, bdPath, "sync") runCmdOutputWithEnvAllowError(t, clone, map[string]string{"BEADS_NO_DAEMON": "1"}, true, bdPath, "sync")
} }
// Verify both clones have exactly 1 issue (deduplication worked) // Verify both clones have exactly 1 issue (deduplication worked)
for name, dir := range map[string]string{"A": cloneA, "B": cloneB} { for name, dir := range map[string]string{"A": cloneA, "B": cloneB} {
titles := getTitlesFromClone(t, dir) titles := getTitlesFromClone(t, dir)
@@ -168,7 +168,7 @@ func TestHashIDs_IdenticalContentDedup(t *testing.T) {
t.Errorf("Clone %s missing expected issue: %v", name, sortedKeys(titles)) t.Errorf("Clone %s missing expected issue: %v", name, sortedKeys(titles))
} }
} }
t.Log("✓ Identical content deduplicated correctly with hash-based IDs") t.Log("✓ Identical content deduplicated correctly with hash-based IDs")
} }
@@ -177,36 +177,36 @@ func TestHashIDs_IdenticalContentDedup(t *testing.T) {
func setupBareRepo(t *testing.T, tmpDir string) string { func setupBareRepo(t *testing.T, tmpDir string) string {
t.Helper() t.Helper()
remoteDir := filepath.Join(tmpDir, "remote.git") remoteDir := filepath.Join(tmpDir, "remote.git")
runCmd(t, tmpDir, "git", "init", "--bare", remoteDir) runCmd(t, tmpDir, "git", "init", "--bare", "-b", "master", remoteDir)
tempClone := filepath.Join(tmpDir, "temp-init") tempClone := filepath.Join(tmpDir, "temp-init")
runCmd(t, tmpDir, "git", "clone", remoteDir, tempClone) runCmd(t, tmpDir, "git", "clone", remoteDir, tempClone)
runCmd(t, tempClone, "git", "commit", "--allow-empty", "-m", "Initial commit") runCmd(t, tempClone, "git", "commit", "--allow-empty", "-m", "Initial commit")
runCmd(t, tempClone, "git", "push", "origin", "master") runCmd(t, tempClone, "git", "push", "origin", "master")
return remoteDir return remoteDir
} }
func setupClone(t *testing.T, tmpDir, remoteDir, name, bdPath string) string { func setupClone(t *testing.T, tmpDir, remoteDir, name, bdPath string) string {
t.Helper() t.Helper()
cloneDir := filepath.Join(tmpDir, "clone-"+strings.ToLower(name)) cloneDir := filepath.Join(tmpDir, "clone-"+strings.ToLower(name))
// Use shallow, shared clones for speed // Use shallow, shared clones for speed
runCmd(t, tmpDir, "git", "clone", "--shared", "--depth=1", "--no-tags", remoteDir, cloneDir) runCmd(t, tmpDir, "git", "clone", "--shared", "--depth=1", "--no-tags", remoteDir, cloneDir)
// Disable hooks to avoid overhead // Disable hooks to avoid overhead
emptyHooks := filepath.Join(cloneDir, ".empty-hooks") emptyHooks := filepath.Join(cloneDir, ".empty-hooks")
os.MkdirAll(emptyHooks, 0755) os.MkdirAll(emptyHooks, 0755)
runCmd(t, cloneDir, "git", "config", "core.hooksPath", emptyHooks) runCmd(t, cloneDir, "git", "config", "core.hooksPath", emptyHooks)
// Speed configs // Speed configs
runCmd(t, cloneDir, "git", "config", "gc.auto", "0") runCmd(t, cloneDir, "git", "config", "gc.auto", "0")
runCmd(t, cloneDir, "git", "config", "core.fsync", "false") runCmd(t, cloneDir, "git", "config", "core.fsync", "false")
runCmd(t, cloneDir, "git", "config", "commit.gpgSign", "false") runCmd(t, cloneDir, "git", "config", "commit.gpgSign", "false")
bdCmd := getBDCommand() bdCmd := getBDCommand()
copyFile(t, bdPath, filepath.Join(cloneDir, filepath.Base(bdCmd))) copyFile(t, bdPath, filepath.Join(cloneDir, filepath.Base(bdCmd)))
if name == "A" { if name == "A" {
runCmd(t, cloneDir, bdCmd, "init", "--quiet", "--prefix", "test") runCmd(t, cloneDir, bdCmd, "init", "--quiet", "--prefix", "test")
runCmd(t, cloneDir, "git", "add", ".beads") runCmd(t, cloneDir, "git", "add", ".beads")
@@ -216,7 +216,7 @@ func setupClone(t *testing.T, tmpDir, remoteDir, name, bdPath string) string {
runCmd(t, cloneDir, "git", "pull", "origin", "master") runCmd(t, cloneDir, "git", "pull", "origin", "master")
runCmd(t, cloneDir, bdCmd, "init", "--quiet", "--prefix", "test") runCmd(t, cloneDir, bdCmd, "init", "--quiet", "--prefix", "test")
} }
return cloneDir return cloneDir
} }
@@ -231,13 +231,13 @@ func getTitlesFromClone(t *testing.T, cloneDir string) map[string]bool {
"BEADS_NO_DAEMON": "1", "BEADS_NO_DAEMON": "1",
"BD_NO_AUTO_IMPORT": "1", "BD_NO_AUTO_IMPORT": "1",
}, getBDCommand(), "list", "--json") }, getBDCommand(), "list", "--json")
jsonStart := strings.Index(listJSON, "[") jsonStart := strings.Index(listJSON, "[")
if jsonStart == -1 { if jsonStart == -1 {
return make(map[string]bool) return make(map[string]bool)
} }
listJSON = listJSON[jsonStart:] listJSON = listJSON[jsonStart:]
var issues []struct { var issues []struct {
Title string `json:"title"` Title string `json:"title"`
} }
@@ -245,7 +245,7 @@ func getTitlesFromClone(t *testing.T, cloneDir string) map[string]bool {
t.Logf("Failed to parse JSON: %v", err) t.Logf("Failed to parse JSON: %v", err)
return make(map[string]bool) return make(map[string]bool)
} }
titles := make(map[string]bool) titles := make(map[string]bool)
for _, issue := range issues { for _, issue := range issues {
titles[issue.Title] = true titles[issue.Title] = true
@@ -280,7 +280,7 @@ func installGitHooks(t *testing.T, repoDir string) {
hooksDir := filepath.Join(repoDir, ".git", "hooks") hooksDir := filepath.Join(repoDir, ".git", "hooks")
// Ensure POSIX-style path for sh scripts (even on Windows) // Ensure POSIX-style path for sh scripts (even on Windows)
bdCmd := strings.ReplaceAll(getBDCommand(), "\\", "/") bdCmd := strings.ReplaceAll(getBDCommand(), "\\", "/")
preCommit := fmt.Sprintf(`#!/bin/sh preCommit := fmt.Sprintf(`#!/bin/sh
%s --no-daemon export -o .beads/issues.jsonl >/dev/null 2>&1 || true %s --no-daemon export -o .beads/issues.jsonl >/dev/null 2>&1 || true
git add .beads/issues.jsonl >/dev/null 2>&1 || true git add .beads/issues.jsonl >/dev/null 2>&1 || true