Merge main into fix/ci-errors, resolve graph.go conflict

This commit is contained in:
Steve Yegge
2025-12-18 18:30:47 -08:00
15 changed files with 1666 additions and 65 deletions

4
.beads/.gitignore vendored
View File

@@ -18,10 +18,6 @@ bd.sock
db.sqlite db.sqlite
bd.db bd.db
# Legacy deletions manifest (replaced by tombstones)
deletions.jsonl
deletions.jsonl.migrated
# Merge artifacts (temporary files from 3-way merge) # Merge artifacts (temporary files from 3-way merge)
beads.base.jsonl beads.base.jsonl
beads.base.meta.json beads.base.meta.json

3
cmd/bd/.gitattributes vendored Normal file
View File

@@ -0,0 +1,3 @@
# Use bd merge for beads JSONL files
.beads/issues.jsonl merge=beads

40
cmd/bd/@AGENTS.md Normal file
View File

@@ -0,0 +1,40 @@
# Agent Instructions
This project uses **bd** (beads) for issue tracking. Run `bd onboard` to get started.
## Quick Reference
```bash
bd ready # Find available work
bd show <id> # View issue details
bd update <id> --status in_progress # Claim work
bd close <id> # Complete work
bd sync # Sync with git
```
## Landing the Plane (Session Completion)
**When ending a work session**, you MUST complete ALL steps below. Work is NOT complete until `git push` succeeds.
**MANDATORY WORKFLOW:**
1. **File issues for remaining work** - Create issues for anything that needs follow-up
2. **Run quality gates** (if code changed) - Tests, linters, builds
3. **Update issue status** - Close finished work, update in-progress items
4. **PUSH TO REMOTE** - This is MANDATORY:
```bash
git pull --rebase
bd sync
git push
git status # MUST show "up to date with origin"
```
5. **Clean up** - Clear stashes, prune remote branches
6. **Verify** - All changes committed AND pushed
7. **Hand off** - Provide context for next session
**CRITICAL RULES:**
- Work is NOT complete until `git push` succeeds
- NEVER stop before pushing - that leaves work stranded locally
- NEVER say "ready to push when you are" - YOU must push
- If push fails, resolve and retry until it succeeds

40
cmd/bd/AGENTS.md Normal file
View File

@@ -0,0 +1,40 @@
# Agent Instructions
This project uses **bd** (beads) for issue tracking. Run `bd onboard` to get started.
## Quick Reference
```bash
bd ready # Find available work
bd show <id> # View issue details
bd update <id> --status in_progress # Claim work
bd close <id> # Complete work
bd sync # Sync with git
```
## Landing the Plane (Session Completion)
**When ending a work session**, you MUST complete ALL steps below. Work is NOT complete until `git push` succeeds.
**MANDATORY WORKFLOW:**
1. **File issues for remaining work** - Create issues for anything that needs follow-up
2. **Run quality gates** (if code changed) - Tests, linters, builds
3. **Update issue status** - Close finished work, update in-progress items
4. **PUSH TO REMOTE** - This is MANDATORY:
```bash
git pull --rebase
bd sync
git push
git status # MUST show "up to date with origin"
```
5. **Clean up** - Clear stashes, prune remote branches
6. **Verify** - All changes committed AND pushed
7. **Hand off** - Provide context for next session
**CRITICAL RULES:**
- Work is NOT complete until `git push` succeeds
- NEVER stop before pushing - that leaves work stranded locally
- NEVER say "ready to push when you are" - YOU must push
- If push fails, resolve and retry until it succeeds

View File

@@ -17,14 +17,23 @@ func TestDaemonAutoStart(t *testing.T) {
// Save original env // Save original env
origAutoStart := os.Getenv("BEADS_AUTO_START_DAEMON") origAutoStart := os.Getenv("BEADS_AUTO_START_DAEMON")
origNoDaemon := os.Getenv("BEADS_NO_DAEMON")
defer func() { defer func() {
if origAutoStart != "" { if origAutoStart != "" {
os.Setenv("BEADS_AUTO_START_DAEMON", origAutoStart) os.Setenv("BEADS_AUTO_START_DAEMON", origAutoStart)
} else { } else {
os.Unsetenv("BEADS_AUTO_START_DAEMON") os.Unsetenv("BEADS_AUTO_START_DAEMON")
} }
if origNoDaemon != "" {
os.Setenv("BEADS_NO_DAEMON", origNoDaemon)
} else {
os.Unsetenv("BEADS_NO_DAEMON")
}
}() }()
// Ensure BEADS_NO_DAEMON doesn't interfere with these tests
os.Unsetenv("BEADS_NO_DAEMON")
t.Run("shouldAutoStartDaemon defaults to true", func(t *testing.T) { t.Run("shouldAutoStartDaemon defaults to true", func(t *testing.T) {
os.Unsetenv("BEADS_AUTO_START_DAEMON") os.Unsetenv("BEADS_AUTO_START_DAEMON")
if !shouldAutoStartDaemon() { if !shouldAutoStartDaemon() {

View File

@@ -0,0 +1,320 @@
package main
import (
"os"
"testing"
)
// TestComputeDaemonParentPID tests the parent PID computation logic
func TestComputeDaemonParentPID(t *testing.T) {
tests := []struct {
name string
envValue string
expectedPID int
expectsGetppid bool // whether we expect os.Getppid() to be called
}{
{
name: "BD_DAEMON_FOREGROUND not set",
envValue: "",
expectedPID: 0, // Placeholder - will be replaced with actual Getppid()
expectsGetppid: true,
},
{
name: "BD_DAEMON_FOREGROUND=1",
envValue: "1",
expectedPID: 0,
expectsGetppid: false,
},
{
name: "BD_DAEMON_FOREGROUND=0",
envValue: "0",
expectedPID: 0, // Placeholder - will be replaced with actual Getppid()
expectsGetppid: true,
},
}
for _, tt := range tests {
t.Run(tt.name, func(t *testing.T) {
// Save original env
oldVal, wasSet := os.LookupEnv("BD_DAEMON_FOREGROUND")
defer func() {
if wasSet {
os.Setenv("BD_DAEMON_FOREGROUND", oldVal)
} else {
os.Unsetenv("BD_DAEMON_FOREGROUND")
}
}()
// Set test env
if tt.envValue != "" {
os.Setenv("BD_DAEMON_FOREGROUND", tt.envValue)
} else {
os.Unsetenv("BD_DAEMON_FOREGROUND")
}
result := computeDaemonParentPID()
if tt.name == "BD_DAEMON_FOREGROUND=1" {
if result != 0 {
t.Errorf("computeDaemonParentPID() = %d, want 0", result)
}
} else if tt.expectsGetppid {
// When BD_DAEMON_FOREGROUND is not "1", we should get os.Getppid()
expectedPID := os.Getppid()
if result != expectedPID {
t.Errorf("computeDaemonParentPID() = %d, want %d (os.Getppid())", result, expectedPID)
}
}
})
}
}
// TestCheckParentProcessAlive tests parent process alive checking
func TestCheckParentProcessAlive(t *testing.T) {
tests := []struct {
name string
parentPID int
expected bool
description string
}{
{
name: "PID 0 (not tracked)",
parentPID: 0,
expected: true,
description: "Should return true for untracked (PID 0)",
},
{
name: "PID 1 (init/launchd)",
parentPID: 1,
expected: true,
description: "Should return true for init process",
},
{
name: "current process PID",
parentPID: os.Getpid(),
expected: true,
description: "Should return true for current running process",
},
}
for _, tt := range tests {
t.Run(tt.name, func(t *testing.T) {
result := checkParentProcessAlive(tt.parentPID)
if result != tt.expected {
t.Errorf("checkParentProcessAlive(%d) = %v, want %v (%s)",
tt.parentPID, result, tt.expected, tt.description)
}
})
}
}
// TestCheckParentProcessAlive_DeadProcess tests with an invalid PID
func TestCheckParentProcessAlive_InvalidPID(t *testing.T) {
// Use a very high PID that's unlikely to exist
invalidPID := 999999
result := checkParentProcessAlive(invalidPID)
// This should return false since the process doesn't exist
if result == true {
t.Errorf("checkParentProcessAlive(%d) = true, want false (process should not exist)", invalidPID)
}
}
// TestGetPIDFileForSocket tests socket to PID file path conversion
func TestGetPIDFileForSocket(t *testing.T) {
tests := []struct {
name string
socketPath string
expected string
}{
{
name: "typical beads socket",
socketPath: "/home/user/.beads/bd.sock",
expected: "/home/user/.beads/daemon.pid",
},
{
name: "root .beads directory",
socketPath: "/root/.beads/bd.sock",
expected: "/root/.beads/daemon.pid",
},
{
name: "temporary directory",
socketPath: "/tmp/test.sock",
expected: "/tmp/daemon.pid",
},
}
for _, tt := range tests {
t.Run(tt.name, func(t *testing.T) {
result := getPIDFileForSocket(tt.socketPath)
if result != tt.expected {
t.Errorf("getPIDFileForSocket(%q) = %q, want %q", tt.socketPath, result, tt.expected)
}
})
}
}
// TestReadPIDFromFile tests reading PID from file
func TestReadPIDFromFile(t *testing.T) {
t.Run("valid PID", func(t *testing.T) {
// Create a temporary file
tmpFile, err := os.CreateTemp("", "pid")
if err != nil {
t.Fatalf("Failed to create temp file: %v", err)
}
defer os.Remove(tmpFile.Name())
// Write a PID
if _, err := tmpFile.WriteString("12345\n"); err != nil {
t.Fatalf("Failed to write PID: %v", err)
}
tmpFile.Close()
// Read it back
pid, err := readPIDFromFile(tmpFile.Name())
if err != nil {
t.Errorf("readPIDFromFile() returned error: %v", err)
}
if pid != 12345 {
t.Errorf("readPIDFromFile() = %d, want 12345", pid)
}
})
t.Run("nonexistent file", func(t *testing.T) {
_, err := readPIDFromFile("/nonexistent/path/to/file")
if err == nil {
t.Error("readPIDFromFile() should return error for nonexistent file")
}
})
t.Run("invalid PID content", func(t *testing.T) {
tmpFile, err := os.CreateTemp("", "pid")
if err != nil {
t.Fatalf("Failed to create temp file: %v", err)
}
defer os.Remove(tmpFile.Name())
if _, err := tmpFile.WriteString("not-a-number\n"); err != nil {
t.Fatalf("Failed to write content: %v", err)
}
tmpFile.Close()
_, err = readPIDFromFile(tmpFile.Name())
if err == nil {
t.Error("readPIDFromFile() should return error for invalid content")
}
})
}
// TestIsPIDAlive tests PID alive checking
func TestIsPIDAlive(t *testing.T) {
tests := []struct {
name string
pid int
expected bool
description string
}{
{
name: "zero PID",
pid: 0,
expected: false,
description: "PID 0 is invalid",
},
{
name: "negative PID",
pid: -1,
expected: false,
description: "Negative PID is invalid",
},
{
name: "current process",
pid: os.Getpid(),
expected: true,
description: "Current process should be alive",
},
{
name: "invalid PID",
pid: 999999,
expected: false,
description: "Non-existent process should not be alive",
},
}
for _, tt := range tests {
t.Run(tt.name, func(t *testing.T) {
result := isPIDAlive(tt.pid)
if result != tt.expected {
t.Errorf("isPIDAlive(%d) = %v, want %v (%s)",
tt.pid, result, tt.expected, tt.description)
}
})
}
}
// TestShouldAutoStartDaemon_Disabled tests BEADS_NO_DAEMON environment variable handling
func TestShouldAutoStartDaemon_Disabled(t *testing.T) {
tests := []struct {
name string
noDaemonValue string
shouldDisable bool
description string
}{
{
name: "BEADS_NO_DAEMON=1",
noDaemonValue: "1",
shouldDisable: true,
description: "Should be disabled for BEADS_NO_DAEMON=1",
},
{
name: "BEADS_NO_DAEMON=true",
noDaemonValue: "true",
shouldDisable: true,
description: "Should be disabled for BEADS_NO_DAEMON=true",
},
{
name: "BEADS_NO_DAEMON=yes",
noDaemonValue: "yes",
shouldDisable: true,
description: "Should be disabled for BEADS_NO_DAEMON=yes",
},
{
name: "BEADS_NO_DAEMON=on",
noDaemonValue: "on",
shouldDisable: true,
description: "Should be disabled for BEADS_NO_DAEMON=on",
},
{
name: "BEADS_NO_DAEMON=0",
noDaemonValue: "0",
shouldDisable: false,
description: "Should NOT be disabled for BEADS_NO_DAEMON=0",
},
}
for _, tt := range tests {
t.Run(tt.name, func(t *testing.T) {
// Save original env
oldVal, wasSet := os.LookupEnv("BEADS_NO_DAEMON")
defer func() {
if wasSet {
os.Setenv("BEADS_NO_DAEMON", oldVal)
} else {
os.Unsetenv("BEADS_NO_DAEMON")
}
}()
// Set test env
os.Setenv("BEADS_NO_DAEMON", tt.noDaemonValue)
result := shouldAutoStartDaemon()
if tt.shouldDisable && result != false {
t.Errorf("shouldAutoStartDaemon() = %v, want false (%s)",
result, tt.description)
}
if !tt.shouldDisable && result == false {
t.Logf("shouldAutoStartDaemon() = %v (config-dependent, check passed)", result)
}
})
}
}

View File

@@ -0,0 +1,516 @@
//go:build integration
// +build integration
package main
import (
"context"
"net"
"os"
"path/filepath"
"testing"
"time"
)
// TestStartRPCServer verifies RPC server initialization and startup
func TestStartRPCServer(t *testing.T) {
if testing.Short() {
t.Skip("Skipping integration test in short mode")
}
tmpDir := makeSocketTempDir(t)
socketPath := filepath.Join(tmpDir, "bd.sock")
beadsDir := filepath.Join(tmpDir, ".beads")
if err := os.MkdirAll(beadsDir, 0755); err != nil {
t.Fatalf("Failed to create beads dir: %v", err)
}
testDBPath := filepath.Join(beadsDir, "test.db")
testStore := newTestStore(t, testDBPath)
defer testStore.Close()
ctx, cancel := context.WithTimeout(context.Background(), 5*time.Second)
defer cancel()
workspacePath := tmpDir
dbPath := testDBPath
log := createTestLogger(t)
t.Run("starts successfully with valid paths", func(t *testing.T) {
server, serverErrChan, err := startRPCServer(ctx, socketPath, testStore, workspacePath, dbPath, log)
if err != nil {
t.Fatalf("startRPCServer failed: %v", err)
}
defer func() {
if server != nil {
_ = server.Stop()
}
}()
// Verify server is ready
select {
case <-server.WaitReady():
// Server is ready
case <-time.After(2 * time.Second):
t.Fatal("Server did not become ready within 2 seconds")
}
// Verify socket exists and is connectable
conn, err := net.Dial("unix", socketPath)
if err != nil {
t.Fatalf("Failed to connect to socket: %v", err)
}
conn.Close()
// Verify no error on channel
select {
case err := <-serverErrChan:
t.Errorf("Unexpected error on serverErrChan: %v", err)
default:
// Expected - no error yet
}
})
t.Run("fails with invalid socket path", func(t *testing.T) {
invalidSocketPath := "/invalid/nonexistent/path/socket.sock"
_, _, err := startRPCServer(ctx, invalidSocketPath, testStore, workspacePath, dbPath, log)
if err == nil {
t.Error("startRPCServer should fail with invalid socket path")
}
})
t.Run("socket has restricted permissions", func(t *testing.T) {
ctx2, cancel2 := context.WithTimeout(context.Background(), 5*time.Second)
defer cancel2()
socketPath2 := filepath.Join(tmpDir, "bd2.sock")
server, _, err := startRPCServer(ctx2, socketPath2, testStore, workspacePath, dbPath, log)
if err != nil {
t.Fatalf("startRPCServer failed: %v", err)
}
defer func() {
if server != nil {
_ = server.Stop()
}
}()
// Wait for socket to be created
<-server.WaitReady()
info, err := os.Stat(socketPath2)
if err != nil {
t.Fatalf("Failed to stat socket: %v", err)
}
// Check permissions (should be 0600 or similar restricted)
mode := info.Mode().Perm()
// On Unix, should be 0600 (owner read/write only)
// Accept 0600 or similar restricted permissions
if mode > 0644 {
t.Errorf("Socket permissions %o are too permissive", mode)
}
})
}
// TestRunEventLoop verifies the polling-based event loop
func TestRunEventLoop(t *testing.T) {
if testing.Short() {
t.Skip("Skipping integration test in short mode")
}
tmpDir := makeSocketTempDir(t)
socketPath := filepath.Join(tmpDir, "bd.sock")
beadsDir := filepath.Join(tmpDir, ".beads")
if err := os.MkdirAll(beadsDir, 0755); err != nil {
t.Fatalf("Failed to create beads dir: %v", err)
}
testDBPath := filepath.Join(beadsDir, "test.db")
testStore := newTestStore(t, testDBPath)
defer testStore.Close()
ctx, cancel := context.WithTimeout(context.Background(), 10*time.Second)
defer cancel()
workspacePath := tmpDir
dbPath := testDBPath
log := createTestLogger(t)
// Start RPC server
server, serverErrChan, err := startRPCServer(ctx, socketPath, testStore, workspacePath, dbPath, log)
if err != nil {
t.Fatalf("Failed to start RPC server: %v", err)
}
defer func() {
if server != nil {
_ = server.Stop()
}
}()
<-server.WaitReady()
t.Run("processes ticker ticks", func(t *testing.T) {
ticker := time.NewTicker(100 * time.Millisecond)
defer ticker.Stop()
tickCount := 0
syncFunc := func() {
tickCount++
}
// Run event loop in goroutine with short timeout
ctx2, cancel2 := context.WithTimeout(context.Background(), 500*time.Millisecond)
defer cancel2()
go func() {
runEventLoop(ctx2, cancel2, ticker, syncFunc, server, serverErrChan, 0, log)
}()
// Wait for context to finish
<-ctx2.Done()
if tickCount == 0 {
t.Error("Event loop should have processed at least one tick")
}
})
t.Run("responds to context cancellation", func(t *testing.T) {
ticker := time.NewTicker(100 * time.Millisecond)
defer ticker.Stop()
ctx2, cancel2 := context.WithCancel(context.Background())
syncCalled := false
syncFunc := func() {
syncCalled = true
}
done := make(chan struct{})
go func() {
runEventLoop(ctx2, cancel2, ticker, syncFunc, server, serverErrChan, 0, log)
close(done)
}()
// Let it run briefly then cancel
time.Sleep(150 * time.Millisecond)
cancel2()
select {
case <-done:
// Expected - event loop exited
case <-time.After(2 * time.Second):
t.Fatal("Event loop did not exit within 2 seconds")
}
if !syncCalled {
t.Error("Sync function should have been called at least once")
}
})
t.Run("handles parent process death", func(t *testing.T) {
ticker := time.NewTicker(100 * time.Millisecond)
defer ticker.Stop()
ctx2, cancel2 := context.WithCancel(context.Background())
defer cancel2()
syncFunc := func() {}
done := make(chan struct{})
go func() {
// Use an invalid (non-existent) parent PID so event loop thinks parent died
runEventLoop(ctx2, cancel2, ticker, syncFunc, server, serverErrChan, 999999, log)
close(done)
}()
// Event loop should detect dead parent within 10 seconds and exit
select {
case <-done:
// Expected - event loop detected dead parent and exited
case <-time.After(15 * time.Second):
t.Fatal("Event loop did not exit after detecting dead parent")
}
})
}
// TestRunDaemonLoop_HealthyStartup verifies daemon initialization succeeds with proper setup
func TestRunDaemonLoop_HealthyStartup(t *testing.T) {
if testing.Short() {
t.Skip("Skipping integration test in short mode")
}
tmpDir := makeSocketTempDir(t)
initTestGitRepo(t, tmpDir)
beadsDir := filepath.Join(tmpDir, ".beads")
if err := os.MkdirAll(beadsDir, 0755); err != nil {
t.Fatalf("Failed to create beads dir: %v", err)
}
testDBPath := filepath.Join(beadsDir, "beads.db") // Use canonical name
// Save original globals and restore after test
oldDBPath := dbPath
oldStore := store
oldWorkingDir, _ := os.Getwd()
defer func() {
dbPath = oldDBPath
store = oldStore
os.Chdir(oldWorkingDir)
}()
// Set up for daemon
dbPath = testDBPath
os.Chdir(tmpDir)
// Create database first
testStore := newTestStore(t, testDBPath)
defer testStore.Close()
t.Run("initialization succeeds with proper database", func(t *testing.T) {
// Note: runDaemonLoop is designed to run indefinitely, so we test
// that it doesn't panic during initialization rather than running it fully
// The full daemon lifecycle is tested in integration with runEventLoop and runEventDrivenLoop
// Verify database exists and is accessible
store = testStore
if _, err := os.Stat(testDBPath); err != nil {
t.Errorf("Test database should exist: %v", err)
}
})
t.Run("validates database file exists", func(t *testing.T) {
// This is more of a setup validation than a runDaemonLoop test
// since runDaemonLoop is called from main without returning until shutdown
invalidDBPath := filepath.Join(tmpDir, "nonexistent", "beads.db")
if _, err := os.Stat(invalidDBPath); !os.IsNotExist(err) {
t.Error("Invalid database path should not exist")
}
})
}
// TestCheckDaemonHealth verifies health check operations
func TestCheckDaemonHealth_StorageAccess(t *testing.T) {
if testing.Short() {
t.Skip("Skipping integration test in short mode")
}
tmpDir := makeSocketTempDir(t)
beadsDir := filepath.Join(tmpDir, ".beads")
if err := os.MkdirAll(beadsDir, 0755); err != nil {
t.Fatalf("Failed to create beads dir: %v", err)
}
testDBPath := filepath.Join(beadsDir, "test.db")
testStore := newTestStore(t, testDBPath)
defer testStore.Close()
ctx := context.Background()
log := createTestLogger(t)
t.Run("completes without error on healthy storage", func(t *testing.T) {
// Should not panic or error
checkDaemonHealth(ctx, testStore, log)
})
t.Run("logs appropriately when storage is accessible", func(t *testing.T) {
// This just verifies it runs without panic
// In a real scenario, we'd check log output
checkDaemonHealth(ctx, testStore, log)
})
}
// TestIsDaemonHealthy verifies daemon health checking via RPC
func TestIsDaemonHealthy(t *testing.T) {
if testing.Short() {
t.Skip("Skipping integration test in short mode")
}
tmpDir := makeSocketTempDir(t)
socketPath := filepath.Join(tmpDir, "bd.sock")
beadsDir := filepath.Join(tmpDir, ".beads")
if err := os.MkdirAll(beadsDir, 0755); err != nil {
t.Fatalf("Failed to create beads dir: %v", err)
}
testDBPath := filepath.Join(beadsDir, "test.db")
testStore := newTestStore(t, testDBPath)
defer testStore.Close()
ctx, cancel := context.WithTimeout(context.Background(), 5*time.Second)
defer cancel()
workspacePath := tmpDir
dbPath := testDBPath
log := createTestLogger(t)
t.Run("returns false for unreachable daemon", func(t *testing.T) {
unreachableSocket := filepath.Join(tmpDir, "nonexistent.sock")
result := isDaemonHealthy(unreachableSocket)
if result != false {
t.Error("isDaemonHealthy should return false for unreachable daemon")
}
})
t.Run("returns true for running daemon", func(t *testing.T) {
server, _, err := startRPCServer(ctx, socketPath, testStore, workspacePath, dbPath, log)
if err != nil {
t.Fatalf("Failed to start RPC server: %v", err)
}
defer func() {
if server != nil {
_ = server.Stop()
}
}()
<-server.WaitReady()
// Give socket time to be fully ready
time.Sleep(100 * time.Millisecond)
result := isDaemonHealthy(socketPath)
if !result {
t.Error("isDaemonHealthy should return true for healthy daemon")
}
})
t.Run("detects stale socket", func(t *testing.T) {
staleSocket := filepath.Join(tmpDir, "stale.sock")
// Create a stale socket file (not actually listening)
f, err := os.Create(staleSocket)
if err != nil {
t.Fatalf("Failed to create stale socket: %v", err)
}
f.Close()
result := isDaemonHealthy(staleSocket)
if result != false {
t.Error("isDaemonHealthy should return false for stale socket")
}
})
}
// TestEventLoopSignalHandling tests signal handling in event loop
func TestEventLoopSignalHandling(t *testing.T) {
if testing.Short() {
t.Skip("Skipping integration test in short mode")
}
t.Run("handles SIGTERM gracefully", func(t *testing.T) {
tmpDir := makeSocketTempDir(t)
socketPath := filepath.Join(tmpDir, "bd.sock")
beadsDir := filepath.Join(tmpDir, ".beads")
if err := os.MkdirAll(beadsDir, 0755); err != nil {
t.Fatalf("Failed to create beads dir: %v", err)
}
testDBPath := filepath.Join(beadsDir, "test.db")
testStore := newTestStore(t, testDBPath)
defer testStore.Close()
ctx, cancel := context.WithTimeout(context.Background(), 5*time.Second)
defer cancel()
workspacePath := tmpDir
dbPath := testDBPath
log := createTestLogger(t)
server, serverErrChan, err := startRPCServer(ctx, socketPath, testStore, workspacePath, dbPath, log)
if err != nil {
t.Fatalf("Failed to start RPC server: %v", err)
}
defer func() {
if server != nil {
_ = server.Stop()
}
}()
<-server.WaitReady()
ticker := time.NewTicker(100 * time.Millisecond)
defer ticker.Stop()
ctx2, cancel2 := context.WithCancel(context.Background())
done := make(chan struct{})
go func() {
runEventLoop(ctx2, cancel2, ticker, func() {}, server, serverErrChan, 0, log)
close(done)
}()
// Let it run, then cancel
time.Sleep(200 * time.Millisecond)
cancel2()
select {
case <-done:
// Expected - event loop exited
case <-time.After(2 * time.Second):
t.Fatal("Event loop did not exit after signal")
}
})
}
// createTestLogger creates a daemonLogger for testing
func createTestLogger(t *testing.T) daemonLogger {
return daemonLogger{
logFunc: func(format string, args ...interface{}) {
t.Logf("[daemon] "+format, args...)
},
}
}
// TestDaemonIntegration_SocketCleanup verifies socket cleanup after daemon stops
func TestDaemonIntegration_SocketCleanup(t *testing.T) {
if testing.Short() {
t.Skip("Skipping integration test in short mode")
}
tmpDir := makeSocketTempDir(t)
beadsDir := filepath.Join(tmpDir, ".beads")
if err := os.MkdirAll(beadsDir, 0755); err != nil {
t.Fatalf("Failed to create beads dir: %v", err)
}
testDBPath := filepath.Join(beadsDir, "test.db")
testStore := newTestStore(t, testDBPath)
defer testStore.Close()
ctx := context.Background()
log := createTestLogger(t)
socketPath := filepath.Join(tmpDir, "bd1.sock")
workspacePath := tmpDir
dbPath := testDBPath
ctx1, cancel1 := context.WithTimeout(ctx, 3*time.Second)
server, _, err := startRPCServer(ctx1, socketPath, testStore, workspacePath, dbPath, log)
if err != nil {
t.Fatalf("Failed to start server: %v", err)
}
<-server.WaitReady()
// Verify socket exists
if _, err := os.Stat(socketPath); err != nil {
t.Errorf("Socket should exist: %v", err)
}
// Stop server
_ = server.Stop()
cancel1()
// Wait for cleanup
time.Sleep(500 * time.Millisecond)
// Socket should be gone after cleanup
if _, err := os.Stat(socketPath); !os.IsNotExist(err) {
t.Logf("Socket still exists after stop (may be cleanup timing): %v", err)
}
}

View File

@@ -699,3 +699,260 @@ func TestRenderTreeOutput(t *testing.T) {
} }
} }
} }
func TestMergeBidirectionalTrees_Empty(t *testing.T) {
// Test merging empty trees
downTree := []*types.TreeNode{}
upTree := []*types.TreeNode{}
rootID := "test-root"
result := mergeBidirectionalTrees(downTree, upTree, rootID)
if len(result) != 0 {
t.Errorf("Expected empty result for empty trees, got %d nodes", len(result))
}
}
func TestMergeBidirectionalTrees_OnlyDown(t *testing.T) {
// Test with only down tree (dependencies)
downTree := []*types.TreeNode{
{
Issue: types.Issue{ID: "test-root", Title: "Root", Status: types.StatusOpen},
Depth: 0,
ParentID: "",
},
{
Issue: types.Issue{ID: "dep-1", Title: "Dependency 1", Status: types.StatusOpen},
Depth: 1,
ParentID: "test-root",
},
{
Issue: types.Issue{ID: "dep-2", Title: "Dependency 2", Status: types.StatusOpen},
Depth: 1,
ParentID: "test-root",
},
}
upTree := []*types.TreeNode{
{
Issue: types.Issue{ID: "test-root", Title: "Root", Status: types.StatusOpen},
Depth: 0,
ParentID: "",
},
}
result := mergeBidirectionalTrees(downTree, upTree, "test-root")
// Should have all nodes from down tree
if len(result) != 3 {
t.Errorf("Expected 3 nodes, got %d", len(result))
}
// Verify downTree nodes are present
hasRoot := false
hasDep1 := false
hasDep2 := false
for _, node := range result {
if node.ID == "test-root" {
hasRoot = true
}
if node.ID == "dep-1" {
hasDep1 = true
}
if node.ID == "dep-2" {
hasDep2 = true
}
}
if !hasRoot || !hasDep1 || !hasDep2 {
t.Error("Expected all down tree nodes in result")
}
}
func TestMergeBidirectionalTrees_WithDependents(t *testing.T) {
// Test with both dependencies and dependents
downTree := []*types.TreeNode{
{
Issue: types.Issue{ID: "test-root", Title: "Root", Status: types.StatusOpen},
Depth: 0,
ParentID: "",
},
{
Issue: types.Issue{ID: "dep-1", Title: "Dependency 1", Status: types.StatusOpen},
Depth: 1,
ParentID: "test-root",
},
}
upTree := []*types.TreeNode{
{
Issue: types.Issue{ID: "test-root", Title: "Root", Status: types.StatusOpen},
Depth: 0,
ParentID: "",
},
{
Issue: types.Issue{ID: "dependent-1", Title: "Dependent 1", Status: types.StatusOpen},
Depth: 1,
ParentID: "test-root",
},
}
result := mergeBidirectionalTrees(downTree, upTree, "test-root")
// Should have dependent first, then down tree nodes (3 total, root appears once)
// Pattern: dependent node(s), then root + dependencies
if len(result) < 3 {
t.Errorf("Expected at least 3 nodes, got %d", len(result))
}
// Find dependent-1 and dep-1 in result
foundDependentID := false
foundDepID := false
for _, node := range result {
if node.ID == "dependent-1" {
foundDependentID = true
}
if node.ID == "dep-1" {
foundDepID = true
}
}
if !foundDependentID {
t.Error("Expected dependent-1 in merged result")
}
if !foundDepID {
t.Error("Expected dep-1 in merged result")
}
}
func TestMergeBidirectionalTrees_MultipleDepth(t *testing.T) {
// Test with multi-level hierarchies
downTree := []*types.TreeNode{
{
Issue: types.Issue{ID: "root", Title: "Root", Status: types.StatusOpen},
Depth: 0,
ParentID: "",
},
{
Issue: types.Issue{ID: "dep-1", Title: "Dep 1", Status: types.StatusOpen},
Depth: 1,
ParentID: "root",
},
{
Issue: types.Issue{ID: "dep-1-1", Title: "Dep 1.1", Status: types.StatusOpen},
Depth: 2,
ParentID: "dep-1",
},
}
upTree := []*types.TreeNode{
{
Issue: types.Issue{ID: "root", Title: "Root", Status: types.StatusOpen},
Depth: 0,
ParentID: "",
},
{
Issue: types.Issue{ID: "dependent-1", Title: "Dependent 1", Status: types.StatusOpen},
Depth: 1,
ParentID: "root",
},
{
Issue: types.Issue{ID: "dependent-1-1", Title: "Dependent 1.1", Status: types.StatusOpen},
Depth: 2,
ParentID: "dependent-1",
},
}
result := mergeBidirectionalTrees(downTree, upTree, "root")
// Should include all nodes from both trees (minus duplicate root)
if len(result) < 5 {
t.Errorf("Expected at least 5 nodes, got %d", len(result))
}
// Verify all IDs are present (except we might have root twice from both trees)
expectedIDs := map[string]bool{
"root": false,
"dep-1": false,
"dep-1-1": false,
"dependent-1": false,
"dependent-1-1": false,
}
for _, node := range result {
if _, exists := expectedIDs[node.ID]; exists {
expectedIDs[node.ID] = true
}
}
for id, found := range expectedIDs {
if !found {
t.Errorf("Expected ID %s in merged result", id)
}
}
}
func TestMergeBidirectionalTrees_ExcludesRootFromUp(t *testing.T) {
// Test that root is excluded from upTree
downTree := []*types.TreeNode{
{
Issue: types.Issue{ID: "root", Title: "Root", Status: types.StatusOpen},
Depth: 0,
ParentID: "",
},
}
upTree := []*types.TreeNode{
{
Issue: types.Issue{ID: "root", Title: "Root", Status: types.StatusOpen},
Depth: 0,
ParentID: "",
},
}
result := mergeBidirectionalTrees(downTree, upTree, "root")
// Should have exactly 1 node (root)
if len(result) != 1 {
t.Errorf("Expected 1 node (root only), got %d", len(result))
}
if result[0].ID != "root" {
t.Errorf("Expected root node, got %s", result[0].ID)
}
}
func TestMergeBidirectionalTrees_PreservesDepth(t *testing.T) {
// Test that depth values are preserved from original trees
downTree := []*types.TreeNode{
{
Issue: types.Issue{ID: "root", Title: "Root", Status: types.StatusOpen},
Depth: 0,
ParentID: "",
},
{
Issue: types.Issue{ID: "dep-1", Title: "Dep 1", Status: types.StatusOpen},
Depth: 5, // Non-standard depth to verify preservation
ParentID: "root",
},
}
upTree := []*types.TreeNode{
{
Issue: types.Issue{ID: "root", Title: "Root", Status: types.StatusOpen},
Depth: 0,
ParentID: "",
},
{
Issue: types.Issue{ID: "dependent-1", Title: "Dependent 1", Status: types.StatusOpen},
Depth: 3, // Different depth
ParentID: "root",
},
}
result := mergeBidirectionalTrees(downTree, upTree, "root")
// Find nodes and verify their depths are preserved
for _, node := range result {
if node.ID == "dep-1" && node.Depth != 5 {
t.Errorf("Expected dep-1 depth=5, got %d", node.Depth)
}
if node.ID == "dependent-1" && node.Depth != 3 {
t.Errorf("Expected dependent-1 depth=3, got %d", node.Depth)
}
}
}

View File

@@ -1175,6 +1175,9 @@ func TestCheckSyncBranchHookCompatibility(t *testing.T) {
// Create pre-push hook if specified // Create pre-push hook if specified
if tc.hookVersion != "" { if tc.hookVersion != "" {
hooksDir := filepath.Join(tmpDir, ".git", "hooks") hooksDir := filepath.Join(tmpDir, ".git", "hooks")
if err := os.MkdirAll(hooksDir, 0755); err != nil {
t.Fatal(err)
}
hookPath := filepath.Join(hooksDir, "pre-push") hookPath := filepath.Join(hooksDir, "pre-push")
var hookContent string var hookContent string
if tc.hookVersion == "custom" { if tc.hookVersion == "custom" {
@@ -1246,6 +1249,9 @@ func TestCheckSyncBranchHookQuick(t *testing.T) {
if tc.hookVersion != "" { if tc.hookVersion != "" {
hooksDir := filepath.Join(tmpDir, ".git", "hooks") hooksDir := filepath.Join(tmpDir, ".git", "hooks")
if err := os.MkdirAll(hooksDir, 0755); err != nil {
t.Fatal(err)
}
hookPath := filepath.Join(hooksDir, "pre-push") hookPath := filepath.Join(hooksDir, "pre-push")
hookContent := fmt.Sprintf("#!/bin/sh\n# bd-hooks-version: %s\nexit 0\n", tc.hookVersion) hookContent := fmt.Sprintf("#!/bin/sh\n# bd-hooks-version: %s\nexit 0\n", tc.hookVersion)
if err := os.WriteFile(hookPath, []byte(hookContent), 0755); err != nil { if err := os.WriteFile(hookPath, []byte(hookContent), 0755); err != nil {

View File

@@ -99,7 +99,7 @@ Colors indicate status:
} }
// Render ASCII graph // Render ASCII graph
renderGraph(layout, subgraph) renderGraph(layout, nil)
}, },
} }

View File

@@ -25,7 +25,8 @@ bd info --json
# { # {
# "database_path": "/path/to/.beads/beads.db", # "database_path": "/path/to/.beads/beads.db",
# "issue_prefix": "bd", # "issue_prefix": "bd",
# "daemon_running": true # "daemon_running": true,
# "agent_mail_enabled": false
# } # }
``` ```
@@ -64,6 +65,14 @@ bd create "Add support for OAuth 2.0" -d "Implement RFC 6749 (OAuth 2.0 spec)" -
# Create multiple issues from markdown file # Create multiple issues from markdown file
bd create -f feature-plan.md --json bd create -f feature-plan.md --json
# Create with description from file (avoids shell escaping issues)
bd create "Issue title" --body-file=description.md --json
bd create "Issue title" --body-file description.md -p 1 --json
# Read description from stdin
echo "Description text" | bd create "Issue title" --body-file=- --json
cat description.md | bd create "Issue title" --body-file - -p 1 --json
# Create epic with hierarchical child tasks # Create epic with hierarchical child tasks
bd create "Auth System" -t epic -p 1 --json # Returns: bd-a3f8e9 bd create "Auth System" -t epic -p 1 --json # Returns: bd-a3f8e9
bd create "Login UI" -p 1 --json # Auto-assigned: bd-a3f8e9.1 bd create "Login UI" -p 1 --json # Auto-assigned: bd-a3f8e9.1

View File

@@ -299,10 +299,17 @@ func TestRunSync_KillsDescendants(t *testing.T) {
t.Fatalf("Invalid pid in pid file: %v", err) t.Fatalf("Invalid pid in pid file: %v", err)
} }
// Check /proc/<pid> does not exist // Check /proc/<pid> does not exist - retry a few times in case of timing
if _, err := os.Stat(filepath.Join("/proc", strconv.Itoa(pid))); err == nil { for i := 0; i < 10; i++ {
t.Fatalf("Child process %d still exists after timeout", pid) if _, err := os.Stat(filepath.Join("/proc", strconv.Itoa(pid))); err != nil {
// Process is gone, test passed
return
}
time.Sleep(100 * time.Millisecond)
} }
// If we get here, the process is still running
t.Fatalf("Child process %d still exists after timeout", pid)
} }
func TestRunSync_HookFailure(t *testing.T) { func TestRunSync_HookFailure(t *testing.T) {

View File

@@ -1215,3 +1215,396 @@ func TestGetDependenciesWithMetadataMultipleTypes(t *testing.T) {
t.Errorf("Expected discovered dependency type 'discovered-from', got %s", typeMap[discovered.ID]) t.Errorf("Expected discovered dependency type 'discovered-from', got %s", typeMap[discovered.ID])
} }
} }
// TestGetDependencyTree_ComplexDiamond tests a diamond dependency pattern
func TestGetDependencyTree_ComplexDiamond(t *testing.T) {
store, cleanup := setupTestDB(t)
defer cleanup()
ctx := context.Background()
// Create diamond pattern:
// D
// / \
// B C
// \ /
// A
issueA := &types.Issue{Title: "A", Status: types.StatusOpen, Priority: 1, IssueType: types.TypeTask}
issueB := &types.Issue{Title: "B", Status: types.StatusOpen, Priority: 1, IssueType: types.TypeTask}
issueC := &types.Issue{Title: "C", Status: types.StatusOpen, Priority: 1, IssueType: types.TypeTask}
issueD := &types.Issue{Title: "D", Status: types.StatusOpen, Priority: 1, IssueType: types.TypeTask}
store.CreateIssue(ctx, issueA, "test-user")
store.CreateIssue(ctx, issueB, "test-user")
store.CreateIssue(ctx, issueC, "test-user")
store.CreateIssue(ctx, issueD, "test-user")
// Create dependencies: D blocks B, D blocks C, B blocks A, C blocks A
store.AddDependency(ctx, &types.Dependency{IssueID: issueB.ID, DependsOnID: issueD.ID, Type: types.DepBlocks}, "test-user")
store.AddDependency(ctx, &types.Dependency{IssueID: issueC.ID, DependsOnID: issueD.ID, Type: types.DepBlocks}, "test-user")
store.AddDependency(ctx, &types.Dependency{IssueID: issueA.ID, DependsOnID: issueB.ID, Type: types.DepBlocks}, "test-user")
store.AddDependency(ctx, &types.Dependency{IssueID: issueA.ID, DependsOnID: issueC.ID, Type: types.DepBlocks}, "test-user")
// Get tree from A
tree, err := store.GetDependencyTree(ctx, issueA.ID, 50, false, false)
if err != nil {
t.Fatalf("GetDependencyTree failed: %v", err)
}
// Should have all 4 nodes
if len(tree) != 4 {
t.Fatalf("Expected 4 nodes in diamond, got %d", len(tree))
}
// Verify all expected nodes are present
idSet := make(map[string]bool)
for _, node := range tree {
idSet[node.ID] = true
}
expected := []string{issueA.ID, issueB.ID, issueC.ID, issueD.ID}
for _, id := range expected {
if !idSet[id] {
t.Errorf("Expected node %s in diamond tree", id)
}
}
}
// TestGetDependencyTree_ShowAllPaths tests the showAllPaths flag behavior
func TestGetDependencyTree_ShowAllPaths(t *testing.T) {
store, cleanup := setupTestDB(t)
defer cleanup()
ctx := context.Background()
// Create diamond again
issueA := &types.Issue{Title: "A", Status: types.StatusOpen, Priority: 1, IssueType: types.TypeTask}
issueB := &types.Issue{Title: "B", Status: types.StatusOpen, Priority: 1, IssueType: types.TypeTask}
issueC := &types.Issue{Title: "C", Status: types.StatusOpen, Priority: 1, IssueType: types.TypeTask}
issueD := &types.Issue{Title: "D", Status: types.StatusOpen, Priority: 1, IssueType: types.TypeTask}
store.CreateIssue(ctx, issueA, "test-user")
store.CreateIssue(ctx, issueB, "test-user")
store.CreateIssue(ctx, issueC, "test-user")
store.CreateIssue(ctx, issueD, "test-user")
store.AddDependency(ctx, &types.Dependency{IssueID: issueB.ID, DependsOnID: issueD.ID, Type: types.DepBlocks}, "test-user")
store.AddDependency(ctx, &types.Dependency{IssueID: issueC.ID, DependsOnID: issueD.ID, Type: types.DepBlocks}, "test-user")
store.AddDependency(ctx, &types.Dependency{IssueID: issueA.ID, DependsOnID: issueB.ID, Type: types.DepBlocks}, "test-user")
store.AddDependency(ctx, &types.Dependency{IssueID: issueA.ID, DependsOnID: issueC.ID, Type: types.DepBlocks}, "test-user")
// Get tree with showAllPaths=true
treeAll, err := store.GetDependencyTree(ctx, issueA.ID, 50, true, false)
if err != nil {
t.Fatalf("GetDependencyTree with showAllPaths failed: %v", err)
}
// Get tree with showAllPaths=false
treeDedup, err := store.GetDependencyTree(ctx, issueA.ID, 50, false, false)
if err != nil {
t.Fatalf("GetDependencyTree without showAllPaths failed: %v", err)
}
// Both should have at least the core nodes
if len(treeAll) < len(treeDedup) {
t.Errorf("showAllPaths=true should have >= nodes than showAllPaths=false: got %d vs %d", len(treeAll), len(treeDedup))
}
}
// TestGetDependencyTree_ReverseDirection tests getting dependents instead of dependencies
func TestGetDependencyTree_ReverseDirection(t *testing.T) {
store, cleanup := setupTestDB(t)
defer cleanup()
ctx := context.Background()
// Create a chain: A depends on B, B depends on C
// So: B blocks A, C blocks B
// Normal (down): From A we get [A, B, C] (dependencies)
// Reverse (up): From C we get [C, B, A] (dependents)
issueA := &types.Issue{Title: "A", Status: types.StatusOpen, Priority: 1, IssueType: types.TypeTask}
issueB := &types.Issue{Title: "B", Status: types.StatusOpen, Priority: 1, IssueType: types.TypeTask}
issueC := &types.Issue{Title: "C", Status: types.StatusOpen, Priority: 1, IssueType: types.TypeTask}
store.CreateIssue(ctx, issueA, "test-user")
store.CreateIssue(ctx, issueB, "test-user")
store.CreateIssue(ctx, issueC, "test-user")
// A depends on B, B depends on C
store.AddDependency(ctx, &types.Dependency{IssueID: issueA.ID, DependsOnID: issueB.ID, Type: types.DepBlocks}, "test-user")
store.AddDependency(ctx, &types.Dependency{IssueID: issueB.ID, DependsOnID: issueC.ID, Type: types.DepBlocks}, "test-user")
// Get normal tree from A (should get A as root, then dependencies B, C)
downTree, err := store.GetDependencyTree(ctx, issueA.ID, 50, false, false)
if err != nil {
t.Fatalf("GetDependencyTree down failed: %v", err)
}
// Get reverse tree from C (should get C as root, then dependents B, A)
upTree, err := store.GetDependencyTree(ctx, issueC.ID, 50, false, true)
if err != nil {
t.Fatalf("GetDependencyTree reverse failed: %v", err)
}
// Both should include their root nodes
if len(downTree) < 1 {
t.Fatal("Down tree should include at least the root node A")
}
if len(upTree) < 1 {
t.Fatal("Up tree should include at least the root node C")
}
// Down tree should start with A at depth 0
if downTree[0].ID != issueA.ID {
t.Errorf("Down tree should start with A, got %s", downTree[0].ID)
}
// Up tree should start with C at depth 0
if upTree[0].ID != issueC.ID {
t.Errorf("Up tree should start with C, got %s", upTree[0].ID)
}
// Down tree from A should have B and C as dependencies
downHasB := false
downHasC := false
for _, node := range downTree {
if node.ID == issueB.ID {
downHasB = true
}
if node.ID == issueC.ID {
downHasC = true
}
}
if !downHasB || !downHasC {
t.Error("Down tree from A should include B and C as dependencies")
}
// Up tree from C should have B and A as dependents
upHasB := false
upHasA := false
for _, node := range upTree {
if node.ID == issueB.ID {
upHasB = true
}
if node.ID == issueA.ID {
upHasA = true
}
}
if !upHasB || !upHasA {
t.Error("Up tree from C should include B and A as dependents")
}
}
// TestDetectCycles_SingleCyclePrevention verifies single-issue cycles are caught
func TestDetectCycles_PreventionAtAddTime(t *testing.T) {
store, cleanup := setupTestDB(t)
defer cleanup()
ctx := context.Background()
// Create two issues
issueA := &types.Issue{Title: "A", Status: types.StatusOpen, Priority: 1, IssueType: types.TypeTask}
issueB := &types.Issue{Title: "B", Status: types.StatusOpen, Priority: 1, IssueType: types.TypeTask}
store.CreateIssue(ctx, issueA, "test-user")
store.CreateIssue(ctx, issueB, "test-user")
// Add A -> B
err := store.AddDependency(ctx, &types.Dependency{
IssueID: issueA.ID,
DependsOnID: issueB.ID,
Type: types.DepBlocks,
}, "test-user")
if err != nil {
t.Fatalf("First AddDependency failed: %v", err)
}
// Try to add B -> A (would create cycle) - should fail
err = store.AddDependency(ctx, &types.Dependency{
IssueID: issueB.ID,
DependsOnID: issueA.ID,
Type: types.DepBlocks,
}, "test-user")
if err == nil {
t.Fatal("Expected AddDependency to fail when creating 2-node cycle")
}
// Verify no cycles exist
cycles, err := store.DetectCycles(ctx)
if err != nil {
t.Fatalf("DetectCycles failed: %v", err)
}
if len(cycles) != 0 {
t.Error("Expected no cycles since cycle was prevented at add time")
}
}
// TestDetectCycles_LongerCycle tests detection of longer cycles
func TestDetectCycles_LongerCyclePrevention(t *testing.T) {
store, cleanup := setupTestDB(t)
defer cleanup()
ctx := context.Background()
// Create a chain: A -> B -> C
issues := make(map[string]*types.Issue)
for _, name := range []string{"A", "B", "C"} {
issue := &types.Issue{Title: name, Status: types.StatusOpen, Priority: 1, IssueType: types.TypeTask}
store.CreateIssue(ctx, issue, "test-user")
issues[name] = issue
}
// Build chain A -> B -> C
store.AddDependency(ctx, &types.Dependency{
IssueID: issues["A"].ID,
DependsOnID: issues["B"].ID,
Type: types.DepBlocks,
}, "test-user")
store.AddDependency(ctx, &types.Dependency{
IssueID: issues["B"].ID,
DependsOnID: issues["C"].ID,
Type: types.DepBlocks,
}, "test-user")
// Try to close the cycle: C -> A (should fail)
err := store.AddDependency(ctx, &types.Dependency{
IssueID: issues["C"].ID,
DependsOnID: issues["A"].ID,
Type: types.DepBlocks,
}, "test-user")
if err == nil {
t.Fatal("Expected AddDependency to fail when creating 3-node cycle")
}
// Verify no cycles
cycles, err := store.DetectCycles(ctx)
if err != nil {
t.Fatalf("DetectCycles failed: %v", err)
}
if len(cycles) != 0 {
t.Error("Expected no cycles since cycle was prevented")
}
}
// TestDetectCycles_MultipleIndependentGraphs tests cycles in isolated subgraphs
func TestDetectCycles_MultipleGraphs(t *testing.T) {
store, cleanup := setupTestDB(t)
defer cleanup()
ctx := context.Background()
// Create two independent dependency chains
// Chain 1: A1 -> B1 -> C1
// Chain 2: A2 -> B2 -> C2
chains := [][]string{{"A1", "B1", "C1"}, {"A2", "B2", "C2"}}
issuesMap := make(map[string]*types.Issue)
for _, chain := range chains {
for _, name := range chain {
issue := &types.Issue{Title: name, Status: types.StatusOpen, Priority: 1, IssueType: types.TypeTask}
store.CreateIssue(ctx, issue, "test-user")
issuesMap[name] = issue
}
// Link the chain
for i := 0; i < len(chain)-1; i++ {
store.AddDependency(ctx, &types.Dependency{
IssueID: issuesMap[chain[i]].ID,
DependsOnID: issuesMap[chain[i+1]].ID,
Type: types.DepBlocks,
}, "test-user")
}
}
// Verify no cycles
cycles, err := store.DetectCycles(ctx)
if err != nil {
t.Fatalf("DetectCycles failed: %v", err)
}
if len(cycles) != 0 {
t.Errorf("Expected no cycles in independent chains, got %d", len(cycles))
}
}
// TestDetectCycles_RelatedTypeAllowsAdditionButReportsDetection tests relates-to allows bidirectional links
// even though they're technically cycles. The cycle prevention only skips relates-to during AddDependency.
func TestDetectCycles_RelatedTypeAllowsAdditionButReportsDetection(t *testing.T) {
store, cleanup := setupTestDB(t)
defer cleanup()
ctx := context.Background()
// Create two issues
issueA := &types.Issue{Title: "A", Status: types.StatusOpen, Priority: 1, IssueType: types.TypeTask}
issueB := &types.Issue{Title: "B", Status: types.StatusOpen, Priority: 1, IssueType: types.TypeTask}
store.CreateIssue(ctx, issueA, "test-user")
store.CreateIssue(ctx, issueB, "test-user")
// Add A relates-to B
err := store.AddDependency(ctx, &types.Dependency{
IssueID: issueA.ID,
DependsOnID: issueB.ID,
Type: types.DepRelatesTo,
}, "test-user")
if err != nil {
t.Fatalf("AddDependency for relates-to failed: %v", err)
}
// Add B relates-to A (this should succeed despite creating a cycle because relates-to skips cycle detection)
err = store.AddDependency(ctx, &types.Dependency{
IssueID: issueB.ID,
DependsOnID: issueA.ID,
Type: types.DepRelatesTo,
}, "test-user")
if err != nil {
t.Fatalf("AddDependency for reverse relates-to failed: %v", err)
}
// DetectCycles will report the cycle even though AddDependency allowed it
cycles, err := store.DetectCycles(ctx)
if err != nil {
t.Fatalf("DetectCycles failed: %v", err)
}
// Relates-to bidirectional creates cycles (may report multiple entry points for same cycle)
if len(cycles) == 0 {
t.Error("Expected at least 1 cycle detected for bidirectional relates-to")
}
// Verify both directions exist
depsA, err := store.GetDependenciesWithMetadata(ctx, issueA.ID)
if err != nil {
t.Fatalf("GetDependenciesWithMetadata failed: %v", err)
}
depsB, err := store.GetDependenciesWithMetadata(ctx, issueB.ID)
if err != nil {
t.Fatalf("GetDependenciesWithMetadata failed: %v", err)
}
// A should have B as a dependency
hasB := false
for _, dep := range depsA {
if dep.ID == issueB.ID && dep.DependencyType == types.DepRelatesTo {
hasB = true
break
}
}
if !hasB {
t.Error("Expected A to relate-to B")
}
// B should have A as a dependency
hasA := false
for _, dep := range depsB {
if dep.ID == issueA.ID && dep.DependencyType == types.DepRelatesTo {
hasA = true
break
}
}
if !hasA {
t.Error("Expected B to relate-to A")
}
}

View File

@@ -512,62 +512,8 @@ func TestCreateIssuesRollback(t *testing.T) {
} }
}) })
t.Run("rollback on conflict with existing ID", func(t *testing.T) { // Note: "rollback on conflict with existing ID" test removed - CreateIssues
// Create an issue with explicit ID // uses INSERT OR IGNORE which silently skips duplicates (needed for JSONL import)
existingIssue := &types.Issue{
ID: "bd-existing",
Title: "Existing issue",
Status: types.StatusOpen,
Priority: 1,
IssueType: types.TypeTask,
}
err := store.CreateIssue(ctx, existingIssue, "test-user")
if err != nil {
t.Fatalf("failed to create existing issue: %v", err)
}
// Try to create batch with conflicting ID
issues := []*types.Issue{
{
Title: "Should rollback",
Status: types.StatusOpen,
Priority: 1,
IssueType: types.TypeTask,
},
{
ID: "bd-existing",
Title: "Conflict",
Status: types.StatusOpen,
Priority: 1,
IssueType: types.TypeTask,
},
}
err = store.CreateIssues(ctx, issues, "test-user")
if err == nil {
t.Fatal("expected error for duplicate ID, got nil")
}
// Verify rollback - "Should rollback" issue should not exist
filter := types.IssueFilter{}
allIssues, err := store.SearchIssues(ctx, "", filter)
if err != nil {
t.Fatalf("failed to search issues: %v", err)
}
// Count should only include the pre-existing issues
foundRollback := false
for _, issue := range allIssues {
if issue.Title == "Should rollback" {
foundRollback = true
break
}
}
if foundRollback {
t.Error("expected rollback of all issues in batch, but 'Should rollback' was found")
}
})
} }
func TestUpdateIssue(t *testing.T) { func TestUpdateIssue(t *testing.T) {

View File

@@ -1,7 +1,10 @@
package validation package validation
import ( import (
"strings"
"testing" "testing"
"github.com/steveyegge/beads/internal/types"
) )
func TestParsePriority(t *testing.T) { func TestParsePriority(t *testing.T) {
@@ -106,6 +109,62 @@ func TestValidateIDFormat(t *testing.T) {
} }
} }
func TestParseIssueType(t *testing.T) {
tests := []struct {
name string
input string
wantType types.IssueType
wantError bool
errorContains string
}{
// Valid issue types
{"bug type", "bug", types.TypeBug, false, ""},
{"feature type", "feature", types.TypeFeature, false, ""},
{"task type", "task", types.TypeTask, false, ""},
{"epic type", "epic", types.TypeEpic, false, ""},
{"chore type", "chore", types.TypeChore, false, ""},
// Case sensitivity (function is case-sensitive)
{"uppercase bug", "BUG", types.TypeTask, true, "invalid issue type"},
{"mixed case feature", "FeAtUrE", types.TypeTask, true, "invalid issue type"},
// With whitespace
{"bug with spaces", " bug ", types.TypeBug, false, ""},
{"feature with tabs", "\tfeature\t", types.TypeFeature, false, ""},
// Invalid issue types
{"invalid type", "invalid", types.TypeTask, true, "invalid issue type"},
{"empty string", "", types.TypeTask, true, "invalid issue type"},
{"whitespace only", " ", types.TypeTask, true, "invalid issue type"},
{"numeric type", "123", types.TypeTask, true, "invalid issue type"},
{"special chars", "bug!", types.TypeTask, true, "invalid issue type"},
}
for _, tt := range tests {
t.Run(tt.name, func(t *testing.T) {
got, err := ParseIssueType(tt.input)
// Check error conditions
if (err != nil) != tt.wantError {
t.Errorf("ParseIssueType(%q) error = %v, wantError %v", tt.input, err, tt.wantError)
return
}
if err != nil && tt.errorContains != "" {
if !strings.Contains(err.Error(), tt.errorContains) {
t.Errorf("ParseIssueType(%q) error message = %q, should contain %q", tt.input, err.Error(), tt.errorContains)
}
return
}
// Check return value
if got != tt.wantType {
t.Errorf("ParseIssueType(%q) = %v, want %v", tt.input, got, tt.wantType)
}
})
}
}
func TestValidatePrefix(t *testing.T) { func TestValidatePrefix(t *testing.T) {
tests := []struct { tests := []struct {
name string name string