Fix tests (bd-6ss and sub-issues) (#626)

Test coverage improvements for bd-6ss. Fixing failing test assumption in follow-up commit.
This commit is contained in:
matt wilkie
2025-12-18 19:23:30 -07:00
committed by GitHub
parent 7f9ee3d1c4
commit fb16e504e6
14 changed files with 1656 additions and 10 deletions

3
cmd/bd/.gitattributes vendored Normal file
View File

@@ -0,0 +1,3 @@
# Use bd merge for beads JSONL files
.beads/issues.jsonl merge=beads

40
cmd/bd/@AGENTS.md Normal file
View File

@@ -0,0 +1,40 @@
# Agent Instructions
This project uses **bd** (beads) for issue tracking. Run `bd onboard` to get started.
## Quick Reference
```bash
bd ready # Find available work
bd show <id> # View issue details
bd update <id> --status in_progress # Claim work
bd close <id> # Complete work
bd sync # Sync with git
```
## Landing the Plane (Session Completion)
**When ending a work session**, you MUST complete ALL steps below. Work is NOT complete until `git push` succeeds.
**MANDATORY WORKFLOW:**
1. **File issues for remaining work** - Create issues for anything that needs follow-up
2. **Run quality gates** (if code changed) - Tests, linters, builds
3. **Update issue status** - Close finished work, update in-progress items
4. **PUSH TO REMOTE** - This is MANDATORY:
```bash
git pull --rebase
bd sync
git push
git status # MUST show "up to date with origin"
```
5. **Clean up** - Clear stashes, prune remote branches
6. **Verify** - All changes committed AND pushed
7. **Hand off** - Provide context for next session
**CRITICAL RULES:**
- Work is NOT complete until `git push` succeeds
- NEVER stop before pushing - that leaves work stranded locally
- NEVER say "ready to push when you are" - YOU must push
- If push fails, resolve and retry until it succeeds

40
cmd/bd/AGENTS.md Normal file
View File

@@ -0,0 +1,40 @@
# Agent Instructions
This project uses **bd** (beads) for issue tracking. Run `bd onboard` to get started.
## Quick Reference
```bash
bd ready # Find available work
bd show <id> # View issue details
bd update <id> --status in_progress # Claim work
bd close <id> # Complete work
bd sync # Sync with git
```
## Landing the Plane (Session Completion)
**When ending a work session**, you MUST complete ALL steps below. Work is NOT complete until `git push` succeeds.
**MANDATORY WORKFLOW:**
1. **File issues for remaining work** - Create issues for anything that needs follow-up
2. **Run quality gates** (if code changed) - Tests, linters, builds
3. **Update issue status** - Close finished work, update in-progress items
4. **PUSH TO REMOTE** - This is MANDATORY:
```bash
git pull --rebase
bd sync
git push
git status # MUST show "up to date with origin"
```
5. **Clean up** - Clear stashes, prune remote branches
6. **Verify** - All changes committed AND pushed
7. **Hand off** - Provide context for next session
**CRITICAL RULES:**
- Work is NOT complete until `git push` succeeds
- NEVER stop before pushing - that leaves work stranded locally
- NEVER say "ready to push when you are" - YOU must push
- If push fails, resolve and retry until it succeeds

View File

@@ -17,14 +17,23 @@ func TestDaemonAutoStart(t *testing.T) {
// Save original env
origAutoStart := os.Getenv("BEADS_AUTO_START_DAEMON")
origNoDaemon := os.Getenv("BEADS_NO_DAEMON")
defer func() {
if origAutoStart != "" {
os.Setenv("BEADS_AUTO_START_DAEMON", origAutoStart)
} else {
os.Unsetenv("BEADS_AUTO_START_DAEMON")
}
if origNoDaemon != "" {
os.Setenv("BEADS_NO_DAEMON", origNoDaemon)
} else {
os.Unsetenv("BEADS_NO_DAEMON")
}
}()
// Ensure BEADS_NO_DAEMON doesn't interfere with these tests
os.Unsetenv("BEADS_NO_DAEMON")
t.Run("shouldAutoStartDaemon defaults to true", func(t *testing.T) {
os.Unsetenv("BEADS_AUTO_START_DAEMON")
if !shouldAutoStartDaemon() {

View File

@@ -0,0 +1,320 @@
package main
import (
"os"
"testing"
)
// TestComputeDaemonParentPID tests the parent PID computation logic
func TestComputeDaemonParentPID(t *testing.T) {
tests := []struct {
name string
envValue string
expectedPID int
expectsGetppid bool // whether we expect os.Getppid() to be called
}{
{
name: "BD_DAEMON_FOREGROUND not set",
envValue: "",
expectedPID: 0, // Placeholder - will be replaced with actual Getppid()
expectsGetppid: true,
},
{
name: "BD_DAEMON_FOREGROUND=1",
envValue: "1",
expectedPID: 0,
expectsGetppid: false,
},
{
name: "BD_DAEMON_FOREGROUND=0",
envValue: "0",
expectedPID: 0, // Placeholder - will be replaced with actual Getppid()
expectsGetppid: true,
},
}
for _, tt := range tests {
t.Run(tt.name, func(t *testing.T) {
// Save original env
oldVal, wasSet := os.LookupEnv("BD_DAEMON_FOREGROUND")
defer func() {
if wasSet {
os.Setenv("BD_DAEMON_FOREGROUND", oldVal)
} else {
os.Unsetenv("BD_DAEMON_FOREGROUND")
}
}()
// Set test env
if tt.envValue != "" {
os.Setenv("BD_DAEMON_FOREGROUND", tt.envValue)
} else {
os.Unsetenv("BD_DAEMON_FOREGROUND")
}
result := computeDaemonParentPID()
if tt.name == "BD_DAEMON_FOREGROUND=1" {
if result != 0 {
t.Errorf("computeDaemonParentPID() = %d, want 0", result)
}
} else if tt.expectsGetppid {
// When BD_DAEMON_FOREGROUND is not "1", we should get os.Getppid()
expectedPID := os.Getppid()
if result != expectedPID {
t.Errorf("computeDaemonParentPID() = %d, want %d (os.Getppid())", result, expectedPID)
}
}
})
}
}
// TestCheckParentProcessAlive tests parent process alive checking
func TestCheckParentProcessAlive(t *testing.T) {
tests := []struct {
name string
parentPID int
expected bool
description string
}{
{
name: "PID 0 (not tracked)",
parentPID: 0,
expected: true,
description: "Should return true for untracked (PID 0)",
},
{
name: "PID 1 (init/launchd)",
parentPID: 1,
expected: true,
description: "Should return true for init process",
},
{
name: "current process PID",
parentPID: os.Getpid(),
expected: true,
description: "Should return true for current running process",
},
}
for _, tt := range tests {
t.Run(tt.name, func(t *testing.T) {
result := checkParentProcessAlive(tt.parentPID)
if result != tt.expected {
t.Errorf("checkParentProcessAlive(%d) = %v, want %v (%s)",
tt.parentPID, result, tt.expected, tt.description)
}
})
}
}
// TestCheckParentProcessAlive_DeadProcess tests with an invalid PID
func TestCheckParentProcessAlive_InvalidPID(t *testing.T) {
// Use a very high PID that's unlikely to exist
invalidPID := 999999
result := checkParentProcessAlive(invalidPID)
// This should return false since the process doesn't exist
if result == true {
t.Errorf("checkParentProcessAlive(%d) = true, want false (process should not exist)", invalidPID)
}
}
// TestGetPIDFileForSocket tests socket to PID file path conversion
func TestGetPIDFileForSocket(t *testing.T) {
tests := []struct {
name string
socketPath string
expected string
}{
{
name: "typical beads socket",
socketPath: "/home/user/.beads/bd.sock",
expected: "/home/user/.beads/daemon.pid",
},
{
name: "root .beads directory",
socketPath: "/root/.beads/bd.sock",
expected: "/root/.beads/daemon.pid",
},
{
name: "temporary directory",
socketPath: "/tmp/test.sock",
expected: "/tmp/daemon.pid",
},
}
for _, tt := range tests {
t.Run(tt.name, func(t *testing.T) {
result := getPIDFileForSocket(tt.socketPath)
if result != tt.expected {
t.Errorf("getPIDFileForSocket(%q) = %q, want %q", tt.socketPath, result, tt.expected)
}
})
}
}
// TestReadPIDFromFile tests reading PID from file
func TestReadPIDFromFile(t *testing.T) {
t.Run("valid PID", func(t *testing.T) {
// Create a temporary file
tmpFile, err := os.CreateTemp("", "pid")
if err != nil {
t.Fatalf("Failed to create temp file: %v", err)
}
defer os.Remove(tmpFile.Name())
// Write a PID
if _, err := tmpFile.WriteString("12345\n"); err != nil {
t.Fatalf("Failed to write PID: %v", err)
}
tmpFile.Close()
// Read it back
pid, err := readPIDFromFile(tmpFile.Name())
if err != nil {
t.Errorf("readPIDFromFile() returned error: %v", err)
}
if pid != 12345 {
t.Errorf("readPIDFromFile() = %d, want 12345", pid)
}
})
t.Run("nonexistent file", func(t *testing.T) {
_, err := readPIDFromFile("/nonexistent/path/to/file")
if err == nil {
t.Error("readPIDFromFile() should return error for nonexistent file")
}
})
t.Run("invalid PID content", func(t *testing.T) {
tmpFile, err := os.CreateTemp("", "pid")
if err != nil {
t.Fatalf("Failed to create temp file: %v", err)
}
defer os.Remove(tmpFile.Name())
if _, err := tmpFile.WriteString("not-a-number\n"); err != nil {
t.Fatalf("Failed to write content: %v", err)
}
tmpFile.Close()
_, err = readPIDFromFile(tmpFile.Name())
if err == nil {
t.Error("readPIDFromFile() should return error for invalid content")
}
})
}
// TestIsPIDAlive tests PID alive checking
func TestIsPIDAlive(t *testing.T) {
tests := []struct {
name string
pid int
expected bool
description string
}{
{
name: "zero PID",
pid: 0,
expected: false,
description: "PID 0 is invalid",
},
{
name: "negative PID",
pid: -1,
expected: false,
description: "Negative PID is invalid",
},
{
name: "current process",
pid: os.Getpid(),
expected: true,
description: "Current process should be alive",
},
{
name: "invalid PID",
pid: 999999,
expected: false,
description: "Non-existent process should not be alive",
},
}
for _, tt := range tests {
t.Run(tt.name, func(t *testing.T) {
result := isPIDAlive(tt.pid)
if result != tt.expected {
t.Errorf("isPIDAlive(%d) = %v, want %v (%s)",
tt.pid, result, tt.expected, tt.description)
}
})
}
}
// TestShouldAutoStartDaemon_Disabled tests BEADS_NO_DAEMON environment variable handling
func TestShouldAutoStartDaemon_Disabled(t *testing.T) {
tests := []struct {
name string
noDaemonValue string
shouldDisable bool
description string
}{
{
name: "BEADS_NO_DAEMON=1",
noDaemonValue: "1",
shouldDisable: true,
description: "Should be disabled for BEADS_NO_DAEMON=1",
},
{
name: "BEADS_NO_DAEMON=true",
noDaemonValue: "true",
shouldDisable: true,
description: "Should be disabled for BEADS_NO_DAEMON=true",
},
{
name: "BEADS_NO_DAEMON=yes",
noDaemonValue: "yes",
shouldDisable: true,
description: "Should be disabled for BEADS_NO_DAEMON=yes",
},
{
name: "BEADS_NO_DAEMON=on",
noDaemonValue: "on",
shouldDisable: true,
description: "Should be disabled for BEADS_NO_DAEMON=on",
},
{
name: "BEADS_NO_DAEMON=0",
noDaemonValue: "0",
shouldDisable: false,
description: "Should NOT be disabled for BEADS_NO_DAEMON=0",
},
}
for _, tt := range tests {
t.Run(tt.name, func(t *testing.T) {
// Save original env
oldVal, wasSet := os.LookupEnv("BEADS_NO_DAEMON")
defer func() {
if wasSet {
os.Setenv("BEADS_NO_DAEMON", oldVal)
} else {
os.Unsetenv("BEADS_NO_DAEMON")
}
}()
// Set test env
os.Setenv("BEADS_NO_DAEMON", tt.noDaemonValue)
result := shouldAutoStartDaemon()
if tt.shouldDisable && result != false {
t.Errorf("shouldAutoStartDaemon() = %v, want false (%s)",
result, tt.description)
}
if !tt.shouldDisable && result == false {
t.Logf("shouldAutoStartDaemon() = %v (config-dependent, check passed)", result)
}
})
}
}

View File

@@ -0,0 +1,516 @@
//go:build integration
// +build integration
package main
import (
"context"
"net"
"os"
"path/filepath"
"testing"
"time"
)
// TestStartRPCServer verifies RPC server initialization and startup
func TestStartRPCServer(t *testing.T) {
if testing.Short() {
t.Skip("Skipping integration test in short mode")
}
tmpDir := makeSocketTempDir(t)
socketPath := filepath.Join(tmpDir, "bd.sock")
beadsDir := filepath.Join(tmpDir, ".beads")
if err := os.MkdirAll(beadsDir, 0755); err != nil {
t.Fatalf("Failed to create beads dir: %v", err)
}
testDBPath := filepath.Join(beadsDir, "test.db")
testStore := newTestStore(t, testDBPath)
defer testStore.Close()
ctx, cancel := context.WithTimeout(context.Background(), 5*time.Second)
defer cancel()
workspacePath := tmpDir
dbPath := testDBPath
log := createTestLogger(t)
t.Run("starts successfully with valid paths", func(t *testing.T) {
server, serverErrChan, err := startRPCServer(ctx, socketPath, testStore, workspacePath, dbPath, log)
if err != nil {
t.Fatalf("startRPCServer failed: %v", err)
}
defer func() {
if server != nil {
_ = server.Stop()
}
}()
// Verify server is ready
select {
case <-server.WaitReady():
// Server is ready
case <-time.After(2 * time.Second):
t.Fatal("Server did not become ready within 2 seconds")
}
// Verify socket exists and is connectable
conn, err := net.Dial("unix", socketPath)
if err != nil {
t.Fatalf("Failed to connect to socket: %v", err)
}
conn.Close()
// Verify no error on channel
select {
case err := <-serverErrChan:
t.Errorf("Unexpected error on serverErrChan: %v", err)
default:
// Expected - no error yet
}
})
t.Run("fails with invalid socket path", func(t *testing.T) {
invalidSocketPath := "/invalid/nonexistent/path/socket.sock"
_, _, err := startRPCServer(ctx, invalidSocketPath, testStore, workspacePath, dbPath, log)
if err == nil {
t.Error("startRPCServer should fail with invalid socket path")
}
})
t.Run("socket has restricted permissions", func(t *testing.T) {
ctx2, cancel2 := context.WithTimeout(context.Background(), 5*time.Second)
defer cancel2()
socketPath2 := filepath.Join(tmpDir, "bd2.sock")
server, _, err := startRPCServer(ctx2, socketPath2, testStore, workspacePath, dbPath, log)
if err != nil {
t.Fatalf("startRPCServer failed: %v", err)
}
defer func() {
if server != nil {
_ = server.Stop()
}
}()
// Wait for socket to be created
<-server.WaitReady()
info, err := os.Stat(socketPath2)
if err != nil {
t.Fatalf("Failed to stat socket: %v", err)
}
// Check permissions (should be 0600 or similar restricted)
mode := info.Mode().Perm()
// On Unix, should be 0600 (owner read/write only)
// Accept 0600 or similar restricted permissions
if mode > 0644 {
t.Errorf("Socket permissions %o are too permissive", mode)
}
})
}
// TestRunEventLoop verifies the polling-based event loop
func TestRunEventLoop(t *testing.T) {
if testing.Short() {
t.Skip("Skipping integration test in short mode")
}
tmpDir := makeSocketTempDir(t)
socketPath := filepath.Join(tmpDir, "bd.sock")
beadsDir := filepath.Join(tmpDir, ".beads")
if err := os.MkdirAll(beadsDir, 0755); err != nil {
t.Fatalf("Failed to create beads dir: %v", err)
}
testDBPath := filepath.Join(beadsDir, "test.db")
testStore := newTestStore(t, testDBPath)
defer testStore.Close()
ctx, cancel := context.WithTimeout(context.Background(), 10*time.Second)
defer cancel()
workspacePath := tmpDir
dbPath := testDBPath
log := createTestLogger(t)
// Start RPC server
server, serverErrChan, err := startRPCServer(ctx, socketPath, testStore, workspacePath, dbPath, log)
if err != nil {
t.Fatalf("Failed to start RPC server: %v", err)
}
defer func() {
if server != nil {
_ = server.Stop()
}
}()
<-server.WaitReady()
t.Run("processes ticker ticks", func(t *testing.T) {
ticker := time.NewTicker(100 * time.Millisecond)
defer ticker.Stop()
tickCount := 0
syncFunc := func() {
tickCount++
}
// Run event loop in goroutine with short timeout
ctx2, cancel2 := context.WithTimeout(context.Background(), 500*time.Millisecond)
defer cancel2()
go func() {
runEventLoop(ctx2, cancel2, ticker, syncFunc, server, serverErrChan, 0, log)
}()
// Wait for context to finish
<-ctx2.Done()
if tickCount == 0 {
t.Error("Event loop should have processed at least one tick")
}
})
t.Run("responds to context cancellation", func(t *testing.T) {
ticker := time.NewTicker(100 * time.Millisecond)
defer ticker.Stop()
ctx2, cancel2 := context.WithCancel(context.Background())
syncCalled := false
syncFunc := func() {
syncCalled = true
}
done := make(chan struct{})
go func() {
runEventLoop(ctx2, cancel2, ticker, syncFunc, server, serverErrChan, 0, log)
close(done)
}()
// Let it run briefly then cancel
time.Sleep(150 * time.Millisecond)
cancel2()
select {
case <-done:
// Expected - event loop exited
case <-time.After(2 * time.Second):
t.Fatal("Event loop did not exit within 2 seconds")
}
if !syncCalled {
t.Error("Sync function should have been called at least once")
}
})
t.Run("handles parent process death", func(t *testing.T) {
ticker := time.NewTicker(100 * time.Millisecond)
defer ticker.Stop()
ctx2, cancel2 := context.WithCancel(context.Background())
defer cancel2()
syncFunc := func() {}
done := make(chan struct{})
go func() {
// Use an invalid (non-existent) parent PID so event loop thinks parent died
runEventLoop(ctx2, cancel2, ticker, syncFunc, server, serverErrChan, 999999, log)
close(done)
}()
// Event loop should detect dead parent within 10 seconds and exit
select {
case <-done:
// Expected - event loop detected dead parent and exited
case <-time.After(15 * time.Second):
t.Fatal("Event loop did not exit after detecting dead parent")
}
})
}
// TestRunDaemonLoop_HealthyStartup verifies daemon initialization succeeds with proper setup
func TestRunDaemonLoop_HealthyStartup(t *testing.T) {
if testing.Short() {
t.Skip("Skipping integration test in short mode")
}
tmpDir := makeSocketTempDir(t)
initTestGitRepo(t, tmpDir)
beadsDir := filepath.Join(tmpDir, ".beads")
if err := os.MkdirAll(beadsDir, 0755); err != nil {
t.Fatalf("Failed to create beads dir: %v", err)
}
testDBPath := filepath.Join(beadsDir, "beads.db") // Use canonical name
// Save original globals and restore after test
oldDBPath := dbPath
oldStore := store
oldWorkingDir, _ := os.Getwd()
defer func() {
dbPath = oldDBPath
store = oldStore
os.Chdir(oldWorkingDir)
}()
// Set up for daemon
dbPath = testDBPath
os.Chdir(tmpDir)
// Create database first
testStore := newTestStore(t, testDBPath)
defer testStore.Close()
t.Run("initialization succeeds with proper database", func(t *testing.T) {
// Note: runDaemonLoop is designed to run indefinitely, so we test
// that it doesn't panic during initialization rather than running it fully
// The full daemon lifecycle is tested in integration with runEventLoop and runEventDrivenLoop
// Verify database exists and is accessible
store = testStore
if _, err := os.Stat(testDBPath); err != nil {
t.Errorf("Test database should exist: %v", err)
}
})
t.Run("validates database file exists", func(t *testing.T) {
// This is more of a setup validation than a runDaemonLoop test
// since runDaemonLoop is called from main without returning until shutdown
invalidDBPath := filepath.Join(tmpDir, "nonexistent", "beads.db")
if _, err := os.Stat(invalidDBPath); !os.IsNotExist(err) {
t.Error("Invalid database path should not exist")
}
})
}
// TestCheckDaemonHealth verifies health check operations
func TestCheckDaemonHealth_StorageAccess(t *testing.T) {
if testing.Short() {
t.Skip("Skipping integration test in short mode")
}
tmpDir := makeSocketTempDir(t)
beadsDir := filepath.Join(tmpDir, ".beads")
if err := os.MkdirAll(beadsDir, 0755); err != nil {
t.Fatalf("Failed to create beads dir: %v", err)
}
testDBPath := filepath.Join(beadsDir, "test.db")
testStore := newTestStore(t, testDBPath)
defer testStore.Close()
ctx := context.Background()
log := createTestLogger(t)
t.Run("completes without error on healthy storage", func(t *testing.T) {
// Should not panic or error
checkDaemonHealth(ctx, testStore, log)
})
t.Run("logs appropriately when storage is accessible", func(t *testing.T) {
// This just verifies it runs without panic
// In a real scenario, we'd check log output
checkDaemonHealth(ctx, testStore, log)
})
}
// TestIsDaemonHealthy verifies daemon health checking via RPC
func TestIsDaemonHealthy(t *testing.T) {
if testing.Short() {
t.Skip("Skipping integration test in short mode")
}
tmpDir := makeSocketTempDir(t)
socketPath := filepath.Join(tmpDir, "bd.sock")
beadsDir := filepath.Join(tmpDir, ".beads")
if err := os.MkdirAll(beadsDir, 0755); err != nil {
t.Fatalf("Failed to create beads dir: %v", err)
}
testDBPath := filepath.Join(beadsDir, "test.db")
testStore := newTestStore(t, testDBPath)
defer testStore.Close()
ctx, cancel := context.WithTimeout(context.Background(), 5*time.Second)
defer cancel()
workspacePath := tmpDir
dbPath := testDBPath
log := createTestLogger(t)
t.Run("returns false for unreachable daemon", func(t *testing.T) {
unreachableSocket := filepath.Join(tmpDir, "nonexistent.sock")
result := isDaemonHealthy(unreachableSocket)
if result != false {
t.Error("isDaemonHealthy should return false for unreachable daemon")
}
})
t.Run("returns true for running daemon", func(t *testing.T) {
server, _, err := startRPCServer(ctx, socketPath, testStore, workspacePath, dbPath, log)
if err != nil {
t.Fatalf("Failed to start RPC server: %v", err)
}
defer func() {
if server != nil {
_ = server.Stop()
}
}()
<-server.WaitReady()
// Give socket time to be fully ready
time.Sleep(100 * time.Millisecond)
result := isDaemonHealthy(socketPath)
if !result {
t.Error("isDaemonHealthy should return true for healthy daemon")
}
})
t.Run("detects stale socket", func(t *testing.T) {
staleSocket := filepath.Join(tmpDir, "stale.sock")
// Create a stale socket file (not actually listening)
f, err := os.Create(staleSocket)
if err != nil {
t.Fatalf("Failed to create stale socket: %v", err)
}
f.Close()
result := isDaemonHealthy(staleSocket)
if result != false {
t.Error("isDaemonHealthy should return false for stale socket")
}
})
}
// TestEventLoopSignalHandling tests signal handling in event loop
func TestEventLoopSignalHandling(t *testing.T) {
if testing.Short() {
t.Skip("Skipping integration test in short mode")
}
t.Run("handles SIGTERM gracefully", func(t *testing.T) {
tmpDir := makeSocketTempDir(t)
socketPath := filepath.Join(tmpDir, "bd.sock")
beadsDir := filepath.Join(tmpDir, ".beads")
if err := os.MkdirAll(beadsDir, 0755); err != nil {
t.Fatalf("Failed to create beads dir: %v", err)
}
testDBPath := filepath.Join(beadsDir, "test.db")
testStore := newTestStore(t, testDBPath)
defer testStore.Close()
ctx, cancel := context.WithTimeout(context.Background(), 5*time.Second)
defer cancel()
workspacePath := tmpDir
dbPath := testDBPath
log := createTestLogger(t)
server, serverErrChan, err := startRPCServer(ctx, socketPath, testStore, workspacePath, dbPath, log)
if err != nil {
t.Fatalf("Failed to start RPC server: %v", err)
}
defer func() {
if server != nil {
_ = server.Stop()
}
}()
<-server.WaitReady()
ticker := time.NewTicker(100 * time.Millisecond)
defer ticker.Stop()
ctx2, cancel2 := context.WithCancel(context.Background())
done := make(chan struct{})
go func() {
runEventLoop(ctx2, cancel2, ticker, func() {}, server, serverErrChan, 0, log)
close(done)
}()
// Let it run, then cancel
time.Sleep(200 * time.Millisecond)
cancel2()
select {
case <-done:
// Expected - event loop exited
case <-time.After(2 * time.Second):
t.Fatal("Event loop did not exit after signal")
}
})
}
// createTestLogger creates a daemonLogger for testing
func createTestLogger(t *testing.T) daemonLogger {
return daemonLogger{
logFunc: func(format string, args ...interface{}) {
t.Logf("[daemon] "+format, args...)
},
}
}
// TestDaemonIntegration_SocketCleanup verifies socket cleanup after daemon stops
func TestDaemonIntegration_SocketCleanup(t *testing.T) {
if testing.Short() {
t.Skip("Skipping integration test in short mode")
}
tmpDir := makeSocketTempDir(t)
beadsDir := filepath.Join(tmpDir, ".beads")
if err := os.MkdirAll(beadsDir, 0755); err != nil {
t.Fatalf("Failed to create beads dir: %v", err)
}
testDBPath := filepath.Join(beadsDir, "test.db")
testStore := newTestStore(t, testDBPath)
defer testStore.Close()
ctx := context.Background()
log := createTestLogger(t)
socketPath := filepath.Join(tmpDir, "bd1.sock")
workspacePath := tmpDir
dbPath := testDBPath
ctx1, cancel1 := context.WithTimeout(ctx, 3*time.Second)
server, _, err := startRPCServer(ctx1, socketPath, testStore, workspacePath, dbPath, log)
if err != nil {
t.Fatalf("Failed to start server: %v", err)
}
<-server.WaitReady()
// Verify socket exists
if _, err := os.Stat(socketPath); err != nil {
t.Errorf("Socket should exist: %v", err)
}
// Stop server
_ = server.Stop()
cancel1()
// Wait for cleanup
time.Sleep(500 * time.Millisecond)
// Socket should be gone after cleanup
if _, err := os.Stat(socketPath); !os.IsNotExist(err) {
t.Logf("Socket still exists after stop (may be cleanup timing): %v", err)
}
}

View File

@@ -699,3 +699,260 @@ func TestRenderTreeOutput(t *testing.T) {
}
}
}
func TestMergeBidirectionalTrees_Empty(t *testing.T) {
// Test merging empty trees
downTree := []*types.TreeNode{}
upTree := []*types.TreeNode{}
rootID := "test-root"
result := mergeBidirectionalTrees(downTree, upTree, rootID)
if len(result) != 0 {
t.Errorf("Expected empty result for empty trees, got %d nodes", len(result))
}
}
func TestMergeBidirectionalTrees_OnlyDown(t *testing.T) {
// Test with only down tree (dependencies)
downTree := []*types.TreeNode{
{
Issue: types.Issue{ID: "test-root", Title: "Root", Status: types.StatusOpen},
Depth: 0,
ParentID: "",
},
{
Issue: types.Issue{ID: "dep-1", Title: "Dependency 1", Status: types.StatusOpen},
Depth: 1,
ParentID: "test-root",
},
{
Issue: types.Issue{ID: "dep-2", Title: "Dependency 2", Status: types.StatusOpen},
Depth: 1,
ParentID: "test-root",
},
}
upTree := []*types.TreeNode{
{
Issue: types.Issue{ID: "test-root", Title: "Root", Status: types.StatusOpen},
Depth: 0,
ParentID: "",
},
}
result := mergeBidirectionalTrees(downTree, upTree, "test-root")
// Should have all nodes from down tree
if len(result) != 3 {
t.Errorf("Expected 3 nodes, got %d", len(result))
}
// Verify downTree nodes are present
hasRoot := false
hasDep1 := false
hasDep2 := false
for _, node := range result {
if node.ID == "test-root" {
hasRoot = true
}
if node.ID == "dep-1" {
hasDep1 = true
}
if node.ID == "dep-2" {
hasDep2 = true
}
}
if !hasRoot || !hasDep1 || !hasDep2 {
t.Error("Expected all down tree nodes in result")
}
}
func TestMergeBidirectionalTrees_WithDependents(t *testing.T) {
// Test with both dependencies and dependents
downTree := []*types.TreeNode{
{
Issue: types.Issue{ID: "test-root", Title: "Root", Status: types.StatusOpen},
Depth: 0,
ParentID: "",
},
{
Issue: types.Issue{ID: "dep-1", Title: "Dependency 1", Status: types.StatusOpen},
Depth: 1,
ParentID: "test-root",
},
}
upTree := []*types.TreeNode{
{
Issue: types.Issue{ID: "test-root", Title: "Root", Status: types.StatusOpen},
Depth: 0,
ParentID: "",
},
{
Issue: types.Issue{ID: "dependent-1", Title: "Dependent 1", Status: types.StatusOpen},
Depth: 1,
ParentID: "test-root",
},
}
result := mergeBidirectionalTrees(downTree, upTree, "test-root")
// Should have dependent first, then down tree nodes (3 total, root appears once)
// Pattern: dependent node(s), then root + dependencies
if len(result) < 3 {
t.Errorf("Expected at least 3 nodes, got %d", len(result))
}
// Find dependent-1 and dep-1 in result
foundDependentID := false
foundDepID := false
for _, node := range result {
if node.ID == "dependent-1" {
foundDependentID = true
}
if node.ID == "dep-1" {
foundDepID = true
}
}
if !foundDependentID {
t.Error("Expected dependent-1 in merged result")
}
if !foundDepID {
t.Error("Expected dep-1 in merged result")
}
}
func TestMergeBidirectionalTrees_MultipleDepth(t *testing.T) {
// Test with multi-level hierarchies
downTree := []*types.TreeNode{
{
Issue: types.Issue{ID: "root", Title: "Root", Status: types.StatusOpen},
Depth: 0,
ParentID: "",
},
{
Issue: types.Issue{ID: "dep-1", Title: "Dep 1", Status: types.StatusOpen},
Depth: 1,
ParentID: "root",
},
{
Issue: types.Issue{ID: "dep-1-1", Title: "Dep 1.1", Status: types.StatusOpen},
Depth: 2,
ParentID: "dep-1",
},
}
upTree := []*types.TreeNode{
{
Issue: types.Issue{ID: "root", Title: "Root", Status: types.StatusOpen},
Depth: 0,
ParentID: "",
},
{
Issue: types.Issue{ID: "dependent-1", Title: "Dependent 1", Status: types.StatusOpen},
Depth: 1,
ParentID: "root",
},
{
Issue: types.Issue{ID: "dependent-1-1", Title: "Dependent 1.1", Status: types.StatusOpen},
Depth: 2,
ParentID: "dependent-1",
},
}
result := mergeBidirectionalTrees(downTree, upTree, "root")
// Should include all nodes from both trees (minus duplicate root)
if len(result) < 5 {
t.Errorf("Expected at least 5 nodes, got %d", len(result))
}
// Verify all IDs are present (except we might have root twice from both trees)
expectedIDs := map[string]bool{
"root": false,
"dep-1": false,
"dep-1-1": false,
"dependent-1": false,
"dependent-1-1": false,
}
for _, node := range result {
if _, exists := expectedIDs[node.ID]; exists {
expectedIDs[node.ID] = true
}
}
for id, found := range expectedIDs {
if !found {
t.Errorf("Expected ID %s in merged result", id)
}
}
}
func TestMergeBidirectionalTrees_ExcludesRootFromUp(t *testing.T) {
// Test that root is excluded from upTree
downTree := []*types.TreeNode{
{
Issue: types.Issue{ID: "root", Title: "Root", Status: types.StatusOpen},
Depth: 0,
ParentID: "",
},
}
upTree := []*types.TreeNode{
{
Issue: types.Issue{ID: "root", Title: "Root", Status: types.StatusOpen},
Depth: 0,
ParentID: "",
},
}
result := mergeBidirectionalTrees(downTree, upTree, "root")
// Should have exactly 1 node (root)
if len(result) != 1 {
t.Errorf("Expected 1 node (root only), got %d", len(result))
}
if result[0].ID != "root" {
t.Errorf("Expected root node, got %s", result[0].ID)
}
}
func TestMergeBidirectionalTrees_PreservesDepth(t *testing.T) {
// Test that depth values are preserved from original trees
downTree := []*types.TreeNode{
{
Issue: types.Issue{ID: "root", Title: "Root", Status: types.StatusOpen},
Depth: 0,
ParentID: "",
},
{
Issue: types.Issue{ID: "dep-1", Title: "Dep 1", Status: types.StatusOpen},
Depth: 5, // Non-standard depth to verify preservation
ParentID: "root",
},
}
upTree := []*types.TreeNode{
{
Issue: types.Issue{ID: "root", Title: "Root", Status: types.StatusOpen},
Depth: 0,
ParentID: "",
},
{
Issue: types.Issue{ID: "dependent-1", Title: "Dependent 1", Status: types.StatusOpen},
Depth: 3, // Different depth
ParentID: "root",
},
}
result := mergeBidirectionalTrees(downTree, upTree, "root")
// Find nodes and verify their depths are preserved
for _, node := range result {
if node.ID == "dep-1" && node.Depth != 5 {
t.Errorf("Expected dep-1 depth=5, got %d", node.Depth)
}
if node.ID == "dependent-1" && node.Depth != 3 {
t.Errorf("Expected dependent-1 depth=3, got %d", node.Depth)
}
}
}

View File

@@ -1175,6 +1175,9 @@ func TestCheckSyncBranchHookCompatibility(t *testing.T) {
// Create pre-push hook if specified
if tc.hookVersion != "" {
hooksDir := filepath.Join(tmpDir, ".git", "hooks")
if err := os.MkdirAll(hooksDir, 0755); err != nil {
t.Fatal(err)
}
hookPath := filepath.Join(hooksDir, "pre-push")
var hookContent string
if tc.hookVersion == "custom" {
@@ -1246,6 +1249,9 @@ func TestCheckSyncBranchHookQuick(t *testing.T) {
if tc.hookVersion != "" {
hooksDir := filepath.Join(tmpDir, ".git", "hooks")
if err := os.MkdirAll(hooksDir, 0755); err != nil {
t.Fatal(err)
}
hookPath := filepath.Join(hooksDir, "pre-push")
hookContent := fmt.Sprintf("#!/bin/sh\n# bd-hooks-version: %s\nexit 0\n", tc.hookVersion)
if err := os.WriteFile(hookPath, []byte(hookContent), 0755); err != nil {

View File

@@ -99,7 +99,7 @@ Colors indicate status:
}
// Render ASCII graph
renderGraph(layout, subgraph)
renderGraph(layout)
},
}
@@ -277,7 +277,7 @@ func computeLayout(subgraph *TemplateSubgraph) *GraphLayout {
}
// renderGraph renders the ASCII visualization
func renderGraph(layout *GraphLayout, subgraph *TemplateSubgraph) {
func renderGraph(layout *GraphLayout) {
if len(layout.Nodes) == 0 {
fmt.Println("Empty graph")
return

View File

@@ -123,7 +123,7 @@ The old approach of embedding full instructions in AGENTS.md is deprecated
because it wasted tokens and got stale when bd upgraded.`,
Run: func(cmd *cobra.Command, args []string) {
if err := renderOnboardInstructions(cmd.OutOrStdout()); err != nil {
fmt.Fprintf(cmd.ErrOrStderr(), "Error: %v\n", err)
_, _ = fmt.Fprintf(cmd.ErrOrStderr(), "Error: %v\n", err)
}
},
}