bd sync: 2025-12-23 13:49:07

This commit is contained in:
Steve Yegge
2025-12-23 13:49:07 -08:00
parent 37ec967619
commit 7b671662aa
28 changed files with 1192 additions and 5622 deletions

View File

@@ -1427,237 +1427,6 @@ func TestIsWispDatabase(t *testing.T) {
}
}
// TestFindDatabaseInBeadsDir tests the database discovery within a .beads directory
func TestFindDatabaseInBeadsDir(t *testing.T) {
tests := []struct {
name string
files []string
configJSON string
expectDB string
warnOnIssues bool
}{
{
name: "canonical beads.db only",
files: []string{"beads.db"},
expectDB: "beads.db",
},
{
name: "legacy bd.db only",
files: []string{"bd.db"},
expectDB: "bd.db",
},
{
name: "prefers beads.db over other db files",
files: []string{"custom.db", "beads.db", "other.db"},
expectDB: "beads.db",
},
{
name: "skips backup files",
files: []string{"beads.backup.db", "real.db"},
expectDB: "real.db",
},
{
name: "skips vc.db",
files: []string{"vc.db", "beads.db"},
expectDB: "beads.db",
},
{
name: "no db files returns empty",
files: []string{"readme.txt", "config.yaml"},
expectDB: "",
},
{
name: "only backup files returns empty",
files: []string{"beads.backup.db", "vc.db"},
expectDB: "",
},
}
for _, tt := range tests {
t.Run(tt.name, func(t *testing.T) {
tmpDir, err := os.MkdirTemp("", "beads-findindir-test-*")
if err != nil {
t.Fatal(err)
}
defer os.RemoveAll(tmpDir)
// Create test files
for _, file := range tt.files {
path := filepath.Join(tmpDir, file)
if err := os.WriteFile(path, []byte{}, 0644); err != nil {
t.Fatal(err)
}
}
// Write config.json if specified
if tt.configJSON != "" {
configPath := filepath.Join(tmpDir, "config.json")
if err := os.WriteFile(configPath, []byte(tt.configJSON), 0644); err != nil {
t.Fatal(err)
}
}
result := findDatabaseInBeadsDir(tmpDir, tt.warnOnIssues)
if tt.expectDB == "" {
if result != "" {
t.Errorf("findDatabaseInBeadsDir() = %q, want empty string", result)
}
} else {
expected := filepath.Join(tmpDir, tt.expectDB)
if result != expected {
t.Errorf("findDatabaseInBeadsDir() = %q, want %q", result, expected)
}
}
})
}
}
// TestFindAllDatabases tests the multi-database discovery
func TestFindAllDatabases(t *testing.T) {
// Save original state
originalEnv := os.Getenv("BEADS_DIR")
defer func() {
if originalEnv != "" {
os.Setenv("BEADS_DIR", originalEnv)
} else {
os.Unsetenv("BEADS_DIR")
}
}()
os.Unsetenv("BEADS_DIR")
// Create temp directory structure
tmpDir, err := os.MkdirTemp("", "beads-findall-test-*")
if err != nil {
t.Fatal(err)
}
defer os.RemoveAll(tmpDir)
// Create .beads directory with database
beadsDir := filepath.Join(tmpDir, ".beads")
if err := os.MkdirAll(beadsDir, 0755); err != nil {
t.Fatal(err)
}
dbPath := filepath.Join(beadsDir, "beads.db")
if err := os.WriteFile(dbPath, []byte{}, 0644); err != nil {
t.Fatal(err)
}
// Create subdirectory and change to it
subDir := filepath.Join(tmpDir, "sub", "nested")
if err := os.MkdirAll(subDir, 0755); err != nil {
t.Fatal(err)
}
t.Chdir(subDir)
// FindAllDatabases should find the parent .beads
result := FindAllDatabases()
if len(result) == 0 {
t.Error("FindAllDatabases() returned empty slice, expected at least one database")
} else {
// Verify the path matches
resultResolved, _ := filepath.EvalSymlinks(result[0].Path)
dbPathResolved, _ := filepath.EvalSymlinks(dbPath)
if resultResolved != dbPathResolved {
t.Errorf("FindAllDatabases()[0].Path = %q, want %q", result[0].Path, dbPath)
}
}
}
// TestFindAllDatabases_NoDatabase tests FindAllDatabases when no database exists
func TestFindAllDatabases_NoDatabase(t *testing.T) {
// Save original state
originalEnv := os.Getenv("BEADS_DIR")
defer func() {
if originalEnv != "" {
os.Setenv("BEADS_DIR", originalEnv)
} else {
os.Unsetenv("BEADS_DIR")
}
}()
os.Unsetenv("BEADS_DIR")
// Create temp directory without .beads
tmpDir, err := os.MkdirTemp("", "beads-findall-nodb-*")
if err != nil {
t.Fatal(err)
}
defer os.RemoveAll(tmpDir)
t.Chdir(tmpDir)
// FindAllDatabases should return empty slice (not nil)
result := FindAllDatabases()
if result == nil {
t.Error("FindAllDatabases() returned nil, expected empty slice")
}
if len(result) != 0 {
t.Errorf("FindAllDatabases() returned %d databases, expected 0", len(result))
}
}
// TestFindAllDatabases_StopsAtFirst tests that FindAllDatabases stops at first .beads found
func TestFindAllDatabases_StopsAtFirst(t *testing.T) {
// Save original state
originalEnv := os.Getenv("BEADS_DIR")
defer func() {
if originalEnv != "" {
os.Setenv("BEADS_DIR", originalEnv)
} else {
os.Unsetenv("BEADS_DIR")
}
}()
os.Unsetenv("BEADS_DIR")
// Create temp directory structure with nested .beads dirs
tmpDir, err := os.MkdirTemp("", "beads-findall-nested-*")
if err != nil {
t.Fatal(err)
}
defer os.RemoveAll(tmpDir)
// Create parent .beads
parentBeadsDir := filepath.Join(tmpDir, ".beads")
if err := os.MkdirAll(parentBeadsDir, 0755); err != nil {
t.Fatal(err)
}
if err := os.WriteFile(filepath.Join(parentBeadsDir, "beads.db"), []byte{}, 0644); err != nil {
t.Fatal(err)
}
// Create child project with its own .beads
childDir := filepath.Join(tmpDir, "child")
childBeadsDir := filepath.Join(childDir, ".beads")
if err := os.MkdirAll(childBeadsDir, 0755); err != nil {
t.Fatal(err)
}
childDBPath := filepath.Join(childBeadsDir, "beads.db")
if err := os.WriteFile(childDBPath, []byte{}, 0644); err != nil {
t.Fatal(err)
}
// Change to child directory
t.Chdir(childDir)
// FindAllDatabases should return only the child's database (stops at first)
result := FindAllDatabases()
if len(result) != 1 {
t.Errorf("FindAllDatabases() returned %d databases, expected 1 (should stop at first)", len(result))
}
if len(result) > 0 {
resultResolved, _ := filepath.EvalSymlinks(result[0].Path)
childDBResolved, _ := filepath.EvalSymlinks(childDBPath)
if resultResolved != childDBResolved {
t.Errorf("FindAllDatabases() found %q, expected child database %q", result[0].Path, childDBPath)
}
}
}
// TestEnsureWispGitignore tests that EnsureWispGitignore correctly
// adds the wisp directory to .gitignore
func TestEnsureWispGitignore(t *testing.T) {

View File

@@ -1,507 +0,0 @@
package beads
import (
"os"
"os/exec"
"path/filepath"
"strings"
"testing"
)
// TestCanonicalizeGitURL tests URL normalization for various git URL formats
func TestCanonicalizeGitURL(t *testing.T) {
tests := []struct {
name string
input string
expected string
}{
// HTTPS URLs
{
name: "https basic",
input: "https://github.com/user/repo",
expected: "github.com/user/repo",
},
{
name: "https with .git suffix",
input: "https://github.com/user/repo.git",
expected: "github.com/user/repo",
},
{
name: "https with trailing slash",
input: "https://github.com/user/repo/",
expected: "github.com/user/repo",
},
{
name: "https uppercase host",
input: "https://GitHub.COM/User/Repo.git",
expected: "github.com/User/Repo",
},
{
name: "https with port 443",
input: "https://github.com:443/user/repo.git",
expected: "github.com/user/repo",
},
{
name: "https with custom port",
input: "https://gitlab.company.com:8443/user/repo.git",
expected: "gitlab.company.com:8443/user/repo",
},
// SSH URLs (protocol style)
{
name: "ssh protocol basic",
input: "ssh://git@github.com/user/repo.git",
expected: "github.com/user/repo",
},
{
name: "ssh with port 22",
input: "ssh://git@github.com:22/user/repo.git",
expected: "github.com/user/repo",
},
{
name: "ssh with custom port",
input: "ssh://git@gitlab.company.com:2222/user/repo.git",
expected: "gitlab.company.com:2222/user/repo",
},
// SCP-style URLs (git@host:path)
{
name: "scp style basic",
input: "git@github.com:user/repo.git",
expected: "github.com/user/repo",
},
{
name: "scp style without .git",
input: "git@github.com:user/repo",
expected: "github.com/user/repo",
},
{
name: "scp style uppercase host",
input: "git@GITHUB.COM:User/Repo.git",
expected: "github.com/User/Repo",
},
{
name: "scp style with trailing slash",
input: "git@github.com:user/repo/",
expected: "github.com/user/repo",
},
{
name: "scp style deep path",
input: "git@gitlab.com:org/team/project/repo.git",
expected: "gitlab.com/org/team/project/repo",
},
// HTTP URLs (less common but valid)
{
name: "http basic",
input: "http://github.com/user/repo.git",
expected: "github.com/user/repo",
},
{
name: "http with port 80",
input: "http://github.com:80/user/repo.git",
expected: "github.com/user/repo",
},
// Git protocol
{
name: "git protocol",
input: "git://github.com/user/repo.git",
expected: "github.com/user/repo",
},
// Whitespace handling
{
name: "with leading whitespace",
input: " https://github.com/user/repo.git",
expected: "github.com/user/repo",
},
{
name: "with trailing whitespace",
input: "https://github.com/user/repo.git ",
expected: "github.com/user/repo",
},
{
name: "with newline",
input: "https://github.com/user/repo.git\n",
expected: "github.com/user/repo",
},
}
for _, tt := range tests {
t.Run(tt.name, func(t *testing.T) {
result, err := canonicalizeGitURL(tt.input)
if err != nil {
t.Fatalf("canonicalizeGitURL(%q) error = %v", tt.input, err)
}
if result != tt.expected {
t.Errorf("canonicalizeGitURL(%q) = %q, want %q", tt.input, result, tt.expected)
}
})
}
}
// TestCanonicalizeGitURL_LocalPath tests that local paths are handled
func TestCanonicalizeGitURL_LocalPath(t *testing.T) {
// Create a temp directory to use as a "local path"
tmpDir := t.TempDir()
// Local absolute path
result, err := canonicalizeGitURL(tmpDir)
if err != nil {
t.Fatalf("canonicalizeGitURL(%q) error = %v", tmpDir, err)
}
// Should return a forward-slash path
if strings.Contains(result, "\\") {
t.Errorf("canonicalizeGitURL(%q) = %q, should use forward slashes", tmpDir, result)
}
}
// TestCanonicalizeGitURL_WindowsPath tests Windows path detection
func TestCanonicalizeGitURL_WindowsPath(t *testing.T) {
// This tests the Windows path detection logic (C:/)
// The function should NOT treat "C:/foo/bar" as an scp-style URL
tests := []struct {
input string
expected string
}{
// These are NOT scp-style URLs - they're Windows paths
{"C:/Users/test/repo", "C:/Users/test/repo"},
{"D:/projects/myrepo", "D:/projects/myrepo"},
}
for _, tt := range tests {
result, err := canonicalizeGitURL(tt.input)
if err != nil {
t.Fatalf("canonicalizeGitURL(%q) error = %v", tt.input, err)
}
// Should preserve the Windows path structure (forward slashes)
if !strings.Contains(result, "/") {
t.Errorf("canonicalizeGitURL(%q) = %q, expected path with slashes", tt.input, result)
}
}
}
// TestComputeRepoID_WithRemote tests ComputeRepoID when remote.origin.url exists
func TestComputeRepoID_WithRemote(t *testing.T) {
// Create temporary directory for test repo
tmpDir := t.TempDir()
// Initialize git repo
cmd := exec.Command("git", "init")
cmd.Dir = tmpDir
if err := cmd.Run(); err != nil {
t.Skipf("git not available: %v", err)
}
// Configure git user
cmd = exec.Command("git", "config", "user.email", "test@example.com")
cmd.Dir = tmpDir
_ = cmd.Run()
cmd = exec.Command("git", "config", "user.name", "Test User")
cmd.Dir = tmpDir
_ = cmd.Run()
// Set remote.origin.url
cmd = exec.Command("git", "remote", "add", "origin", "https://github.com/user/test-repo.git")
cmd.Dir = tmpDir
if err := cmd.Run(); err != nil {
t.Fatalf("git remote add failed: %v", err)
}
// Change to repo dir
t.Chdir(tmpDir)
// ComputeRepoID should return a consistent hash
result1, err := ComputeRepoID()
if err != nil {
t.Fatalf("ComputeRepoID() error = %v", err)
}
// Should be a 32-character hex string (16 bytes)
if len(result1) != 32 {
t.Errorf("ComputeRepoID() = %q, expected 32 character hex string", result1)
}
// Should be consistent across calls
result2, err := ComputeRepoID()
if err != nil {
t.Fatalf("ComputeRepoID() second call error = %v", err)
}
if result1 != result2 {
t.Errorf("ComputeRepoID() not consistent: %q vs %q", result1, result2)
}
}
// TestComputeRepoID_NoRemote tests ComputeRepoID when no remote exists
func TestComputeRepoID_NoRemote(t *testing.T) {
// Create temporary directory for test repo
tmpDir := t.TempDir()
// Initialize git repo (no remote)
cmd := exec.Command("git", "init")
cmd.Dir = tmpDir
if err := cmd.Run(); err != nil {
t.Skipf("git not available: %v", err)
}
// Change to repo dir
t.Chdir(tmpDir)
// ComputeRepoID should fall back to using the local path
result, err := ComputeRepoID()
if err != nil {
t.Fatalf("ComputeRepoID() error = %v", err)
}
// Should still return a 32-character hex string
if len(result) != 32 {
t.Errorf("ComputeRepoID() = %q, expected 32 character hex string", result)
}
}
// TestComputeRepoID_NotGitRepo tests ComputeRepoID when not in a git repo
func TestComputeRepoID_NotGitRepo(t *testing.T) {
// Create temporary directory that is NOT a git repo
tmpDir := t.TempDir()
t.Chdir(tmpDir)
// ComputeRepoID should return an error
_, err := ComputeRepoID()
if err == nil {
t.Error("ComputeRepoID() expected error for non-git directory, got nil")
}
if !strings.Contains(err.Error(), "not a git repository") {
t.Errorf("ComputeRepoID() error = %q, expected 'not a git repository'", err.Error())
}
}
// TestComputeRepoID_DifferentRemotesSameCanonical tests that different URL formats
// for the same repo produce the same ID
func TestComputeRepoID_DifferentRemotesSameCanonical(t *testing.T) {
remotes := []string{
"https://github.com/user/repo.git",
"git@github.com:user/repo.git",
"ssh://git@github.com/user/repo.git",
}
var ids []string
for _, remote := range remotes {
tmpDir := t.TempDir()
// Initialize git repo
cmd := exec.Command("git", "init")
cmd.Dir = tmpDir
if err := cmd.Run(); err != nil {
t.Skipf("git not available: %v", err)
}
// Set remote
cmd = exec.Command("git", "remote", "add", "origin", remote)
cmd.Dir = tmpDir
if err := cmd.Run(); err != nil {
t.Fatalf("git remote add failed for %q: %v", remote, err)
}
t.Chdir(tmpDir)
id, err := ComputeRepoID()
if err != nil {
t.Fatalf("ComputeRepoID() for remote %q error = %v", remote, err)
}
ids = append(ids, id)
}
// All IDs should be the same since they point to the same canonical repo
for i := 1; i < len(ids); i++ {
if ids[i] != ids[0] {
t.Errorf("ComputeRepoID() produced different IDs for same repo:\n remote[0]=%q id=%s\n remote[%d]=%q id=%s",
remotes[0], ids[0], i, remotes[i], ids[i])
}
}
}
// TestGetCloneID_Basic tests GetCloneID returns a consistent ID
func TestGetCloneID_Basic(t *testing.T) {
// Create temporary directory for test repo
tmpDir := t.TempDir()
// Initialize git repo
cmd := exec.Command("git", "init")
cmd.Dir = tmpDir
if err := cmd.Run(); err != nil {
t.Skipf("git not available: %v", err)
}
t.Chdir(tmpDir)
// GetCloneID should return a consistent hash
result1, err := GetCloneID()
if err != nil {
t.Fatalf("GetCloneID() error = %v", err)
}
// Should be a 16-character hex string (8 bytes)
if len(result1) != 16 {
t.Errorf("GetCloneID() = %q, expected 16 character hex string", result1)
}
// Should be consistent across calls
result2, err := GetCloneID()
if err != nil {
t.Fatalf("GetCloneID() second call error = %v", err)
}
if result1 != result2 {
t.Errorf("GetCloneID() not consistent: %q vs %q", result1, result2)
}
}
// TestGetCloneID_DifferentDirs tests GetCloneID produces different IDs for different clones
func TestGetCloneID_DifferentDirs(t *testing.T) {
ids := make(map[string]string)
for i := 0; i < 3; i++ {
tmpDir := t.TempDir()
// Initialize git repo
cmd := exec.Command("git", "init")
cmd.Dir = tmpDir
if err := cmd.Run(); err != nil {
t.Skipf("git not available: %v", err)
}
t.Chdir(tmpDir)
id, err := GetCloneID()
if err != nil {
t.Fatalf("GetCloneID() error = %v", err)
}
// Each clone should have a unique ID
if prev, exists := ids[id]; exists {
t.Errorf("GetCloneID() produced duplicate ID %q for dirs %q and %q", id, prev, tmpDir)
}
ids[id] = tmpDir
}
}
// TestGetCloneID_NotGitRepo tests GetCloneID when not in a git repo
func TestGetCloneID_NotGitRepo(t *testing.T) {
// Create temporary directory that is NOT a git repo
tmpDir := t.TempDir()
t.Chdir(tmpDir)
// GetCloneID should return an error
_, err := GetCloneID()
if err == nil {
t.Error("GetCloneID() expected error for non-git directory, got nil")
}
if !strings.Contains(err.Error(), "not a git repository") {
t.Errorf("GetCloneID() error = %q, expected 'not a git repository'", err.Error())
}
}
// TestGetCloneID_IncludesHostname tests that GetCloneID includes hostname
// to differentiate the same path on different machines
func TestGetCloneID_IncludesHostname(t *testing.T) {
// This test verifies the concept - we can't actually test different hostnames
// but we can verify that the same path produces the same ID on this machine
tmpDir := t.TempDir()
// Initialize git repo
cmd := exec.Command("git", "init")
cmd.Dir = tmpDir
if err := cmd.Run(); err != nil {
t.Skipf("git not available: %v", err)
}
t.Chdir(tmpDir)
hostname, _ := os.Hostname()
id, err := GetCloneID()
if err != nil {
t.Fatalf("GetCloneID() error = %v", err)
}
// Just verify we got a valid ID - we can't test different hostnames
// but the implementation includes hostname in the hash
if len(id) != 16 {
t.Errorf("GetCloneID() = %q, expected 16 character hex string (hostname=%s)", id, hostname)
}
}
// TestGetCloneID_Worktree tests GetCloneID in a worktree
func TestGetCloneID_Worktree(t *testing.T) {
// Create temporary directory for test
tmpDir := t.TempDir()
// Initialize main git repo
mainRepoDir := filepath.Join(tmpDir, "main-repo")
if err := os.MkdirAll(mainRepoDir, 0755); err != nil {
t.Fatal(err)
}
cmd := exec.Command("git", "init")
cmd.Dir = mainRepoDir
if err := cmd.Run(); err != nil {
t.Skipf("git not available: %v", err)
}
// Configure git user
cmd = exec.Command("git", "config", "user.email", "test@example.com")
cmd.Dir = mainRepoDir
_ = cmd.Run()
cmd = exec.Command("git", "config", "user.name", "Test User")
cmd.Dir = mainRepoDir
_ = cmd.Run()
// Create initial commit (required for worktree)
dummyFile := filepath.Join(mainRepoDir, "README.md")
if err := os.WriteFile(dummyFile, []byte("# Test\n"), 0644); err != nil {
t.Fatal(err)
}
cmd = exec.Command("git", "add", "README.md")
cmd.Dir = mainRepoDir
_ = cmd.Run()
cmd = exec.Command("git", "commit", "-m", "Initial commit")
cmd.Dir = mainRepoDir
if err := cmd.Run(); err != nil {
t.Fatalf("git commit failed: %v", err)
}
// Create a worktree
worktreeDir := filepath.Join(tmpDir, "worktree")
cmd = exec.Command("git", "worktree", "add", worktreeDir, "HEAD")
cmd.Dir = mainRepoDir
if err := cmd.Run(); err != nil {
t.Fatalf("git worktree add failed: %v", err)
}
defer func() {
cmd := exec.Command("git", "worktree", "remove", worktreeDir)
cmd.Dir = mainRepoDir
_ = cmd.Run()
}()
// Get IDs from both locations
t.Chdir(mainRepoDir)
mainID, err := GetCloneID()
if err != nil {
t.Fatalf("GetCloneID() in main repo error = %v", err)
}
t.Chdir(worktreeDir)
worktreeID, err := GetCloneID()
if err != nil {
t.Fatalf("GetCloneID() in worktree error = %v", err)
}
// Worktree should have a DIFFERENT ID than main repo
// because they're different paths (different clones conceptually)
if mainID == worktreeID {
t.Errorf("GetCloneID() returned same ID for main repo and worktree - should be different")
}
}

View File

@@ -1,732 +0,0 @@
package compact
import (
"context"
"encoding/json"
"net/http"
"net/http/httptest"
"strings"
"testing"
"time"
"github.com/anthropics/anthropic-sdk-go/option"
"github.com/steveyegge/beads/internal/storage/sqlite"
"github.com/steveyegge/beads/internal/types"
)
// setupTestStore creates a test SQLite store for unit tests
func setupTestStore(t *testing.T) *sqlite.SQLiteStorage {
t.Helper()
tmpDB := t.TempDir() + "/test.db"
store, err := sqlite.New(context.Background(), tmpDB)
if err != nil {
t.Fatalf("failed to create storage: %v", err)
}
ctx := context.Background()
// Set issue_prefix to prevent "database not initialized" errors
if err := store.SetConfig(ctx, "issue_prefix", "bd"); err != nil {
t.Fatalf("failed to set issue_prefix: %v", err)
}
// Use 7 days minimum for Tier 1 compaction
if err := store.SetConfig(ctx, "compact_tier1_days", "7"); err != nil {
t.Fatalf("failed to set config: %v", err)
}
if err := store.SetConfig(ctx, "compact_tier1_dep_levels", "2"); err != nil {
t.Fatalf("failed to set config: %v", err)
}
return store
}
// createTestIssue creates a closed issue eligible for compaction
func createTestIssue(t *testing.T, store *sqlite.SQLiteStorage, id string) *types.Issue {
t.Helper()
ctx := context.Background()
prefix, _ := store.GetConfig(ctx, "issue_prefix")
if prefix == "" {
prefix = "bd"
}
now := time.Now()
// Issue closed 8 days ago (beyond 7-day threshold for Tier 1)
closedAt := now.Add(-8 * 24 * time.Hour)
issue := &types.Issue{
ID: id,
Title: "Test Issue",
Description: `Implemented a comprehensive authentication system for the application.
The system includes JWT token generation, refresh token handling, password hashing with bcrypt,
rate limiting on login attempts, and session management.`,
Design: `Authentication Flow:
1. User submits credentials
2. Server validates against database
3. On success, generate JWT with user claims`,
Notes: "Performance considerations and testing strategy notes.",
AcceptanceCriteria: "- Users can register\n- Users can login\n- Protected endpoints work",
Status: types.StatusClosed,
Priority: 2,
IssueType: types.TypeTask,
CreatedAt: now.Add(-48 * time.Hour),
UpdatedAt: now.Add(-24 * time.Hour),
ClosedAt: &closedAt,
}
if err := store.CreateIssue(ctx, issue, prefix); err != nil {
t.Fatalf("failed to create issue: %v", err)
}
return issue
}
func TestNew_WithConfig(t *testing.T) {
store := setupTestStore(t)
defer store.Close()
config := &Config{
Concurrency: 10,
DryRun: true,
}
c, err := New(store, "", config)
if err != nil {
t.Fatalf("failed to create compactor: %v", err)
}
if c.config.Concurrency != 10 {
t.Errorf("expected concurrency 10, got %d", c.config.Concurrency)
}
if !c.config.DryRun {
t.Error("expected DryRun to be true")
}
}
func TestNew_DefaultConcurrency(t *testing.T) {
store := setupTestStore(t)
defer store.Close()
c, err := New(store, "", nil)
if err != nil {
t.Fatalf("failed to create compactor: %v", err)
}
if c.config.Concurrency != defaultConcurrency {
t.Errorf("expected default concurrency %d, got %d", defaultConcurrency, c.config.Concurrency)
}
}
func TestNew_ZeroConcurrency(t *testing.T) {
store := setupTestStore(t)
defer store.Close()
config := &Config{
Concurrency: 0,
DryRun: true,
}
c, err := New(store, "", config)
if err != nil {
t.Fatalf("failed to create compactor: %v", err)
}
// Zero concurrency should be replaced with default
if c.config.Concurrency != defaultConcurrency {
t.Errorf("expected default concurrency %d, got %d", defaultConcurrency, c.config.Concurrency)
}
}
func TestNew_NegativeConcurrency(t *testing.T) {
store := setupTestStore(t)
defer store.Close()
config := &Config{
Concurrency: -5,
DryRun: true,
}
c, err := New(store, "", config)
if err != nil {
t.Fatalf("failed to create compactor: %v", err)
}
// Negative concurrency should be replaced with default
if c.config.Concurrency != defaultConcurrency {
t.Errorf("expected default concurrency %d, got %d", defaultConcurrency, c.config.Concurrency)
}
}
func TestNew_WithAPIKey(t *testing.T) {
store := setupTestStore(t)
defer store.Close()
// Clear env var to test explicit key
t.Setenv("ANTHROPIC_API_KEY", "")
config := &Config{
DryRun: true, // DryRun so we don't actually need a valid key
}
c, err := New(store, "test-api-key", config)
if err != nil {
t.Fatalf("failed to create compactor: %v", err)
}
if c.config.APIKey != "test-api-key" {
t.Errorf("expected api key 'test-api-key', got '%s'", c.config.APIKey)
}
}
func TestNew_NoAPIKeyFallsToDryRun(t *testing.T) {
store := setupTestStore(t)
defer store.Close()
// Clear env var
t.Setenv("ANTHROPIC_API_KEY", "")
config := &Config{
DryRun: false, // Try to create real client
}
c, err := New(store, "", config)
if err != nil {
t.Fatalf("failed to create compactor: %v", err)
}
// Should fall back to DryRun when no API key
if !c.config.DryRun {
t.Error("expected DryRun to be true when no API key provided")
}
}
func TestNew_AuditSettings(t *testing.T) {
store := setupTestStore(t)
defer store.Close()
t.Setenv("ANTHROPIC_API_KEY", "test-key")
config := &Config{
AuditEnabled: true,
Actor: "test-actor",
}
c, err := New(store, "", config)
if err != nil {
t.Fatalf("failed to create compactor: %v", err)
}
if c.haiku == nil {
t.Fatal("expected haiku client to be created")
}
if !c.haiku.auditEnabled {
t.Error("expected auditEnabled to be true")
}
if c.haiku.auditActor != "test-actor" {
t.Errorf("expected auditActor 'test-actor', got '%s'", c.haiku.auditActor)
}
}
func TestCompactTier1_DryRun(t *testing.T) {
store := setupTestStore(t)
defer store.Close()
issue := createTestIssue(t, store, "bd-1")
config := &Config{DryRun: true}
c, err := New(store, "", config)
if err != nil {
t.Fatalf("failed to create compactor: %v", err)
}
ctx := context.Background()
err = c.CompactTier1(ctx, issue.ID)
if err == nil {
t.Fatal("expected dry-run error, got nil")
}
if !strings.HasPrefix(err.Error(), "dry-run:") {
t.Errorf("expected dry-run error prefix, got: %v", err)
}
// Verify issue was not modified
afterIssue, err := store.GetIssue(ctx, issue.ID)
if err != nil {
t.Fatalf("failed to get issue: %v", err)
}
if afterIssue.Description != issue.Description {
t.Error("dry-run should not modify issue")
}
}
func TestCompactTier1_IneligibleOpenIssue(t *testing.T) {
store := setupTestStore(t)
defer store.Close()
ctx := context.Background()
prefix, _ := store.GetConfig(ctx, "issue_prefix")
if prefix == "" {
prefix = "bd"
}
now := time.Now()
issue := &types.Issue{
ID: "bd-open",
Title: "Open Issue",
Description: "Should not be compacted",
Status: types.StatusOpen,
Priority: 2,
IssueType: types.TypeTask,
CreatedAt: now,
UpdatedAt: now,
}
if err := store.CreateIssue(ctx, issue, prefix); err != nil {
t.Fatalf("failed to create issue: %v", err)
}
config := &Config{DryRun: true}
c, err := New(store, "", config)
if err != nil {
t.Fatalf("failed to create compactor: %v", err)
}
err = c.CompactTier1(ctx, issue.ID)
if err == nil {
t.Fatal("expected error for ineligible issue, got nil")
}
if !strings.Contains(err.Error(), "not eligible") {
t.Errorf("expected 'not eligible' error, got: %v", err)
}
}
func TestCompactTier1_NonexistentIssue(t *testing.T) {
store := setupTestStore(t)
defer store.Close()
config := &Config{DryRun: true}
c, err := New(store, "", config)
if err != nil {
t.Fatalf("failed to create compactor: %v", err)
}
ctx := context.Background()
err = c.CompactTier1(ctx, "bd-nonexistent")
if err == nil {
t.Fatal("expected error for nonexistent issue")
}
}
func TestCompactTier1_ContextCanceled(t *testing.T) {
store := setupTestStore(t)
defer store.Close()
issue := createTestIssue(t, store, "bd-cancel")
config := &Config{DryRun: true}
c, err := New(store, "", config)
if err != nil {
t.Fatalf("failed to create compactor: %v", err)
}
ctx, cancel := context.WithCancel(context.Background())
cancel() // Cancel immediately
err = c.CompactTier1(ctx, issue.ID)
if err == nil {
t.Fatal("expected error for canceled context")
}
if err != context.Canceled {
t.Errorf("expected context.Canceled, got: %v", err)
}
}
func TestCompactTier1Batch_EmptyList(t *testing.T) {
store := setupTestStore(t)
defer store.Close()
config := &Config{DryRun: true}
c, err := New(store, "", config)
if err != nil {
t.Fatalf("failed to create compactor: %v", err)
}
ctx := context.Background()
results, err := c.CompactTier1Batch(ctx, []string{})
if err != nil {
t.Fatalf("unexpected error: %v", err)
}
if results != nil {
t.Errorf("expected nil results for empty list, got: %v", results)
}
}
func TestCompactTier1Batch_DryRun(t *testing.T) {
store := setupTestStore(t)
defer store.Close()
issue1 := createTestIssue(t, store, "bd-batch-1")
issue2 := createTestIssue(t, store, "bd-batch-2")
config := &Config{DryRun: true, Concurrency: 2}
c, err := New(store, "", config)
if err != nil {
t.Fatalf("failed to create compactor: %v", err)
}
ctx := context.Background()
results, err := c.CompactTier1Batch(ctx, []string{issue1.ID, issue2.ID})
if err != nil {
t.Fatalf("failed to batch compact: %v", err)
}
if len(results) != 2 {
t.Fatalf("expected 2 results, got %d", len(results))
}
for _, result := range results {
if result.Err != nil {
t.Errorf("unexpected error for %s: %v", result.IssueID, result.Err)
}
if result.OriginalSize == 0 {
t.Errorf("expected non-zero original size for %s", result.IssueID)
}
}
}
func TestCompactTier1Batch_MixedEligibility(t *testing.T) {
store := setupTestStore(t)
defer store.Close()
closedIssue := createTestIssue(t, store, "bd-closed")
ctx := context.Background()
prefix, _ := store.GetConfig(ctx, "issue_prefix")
if prefix == "" {
prefix = "bd"
}
now := time.Now()
openIssue := &types.Issue{
ID: "bd-open",
Title: "Open Issue",
Description: "Should not be compacted",
Status: types.StatusOpen,
Priority: 2,
IssueType: types.TypeTask,
CreatedAt: now,
UpdatedAt: now,
}
if err := store.CreateIssue(ctx, openIssue, prefix); err != nil {
t.Fatalf("failed to create issue: %v", err)
}
config := &Config{DryRun: true, Concurrency: 2}
c, err := New(store, "", config)
if err != nil {
t.Fatalf("failed to create compactor: %v", err)
}
results, err := c.CompactTier1Batch(ctx, []string{closedIssue.ID, openIssue.ID})
if err != nil {
t.Fatalf("failed to batch compact: %v", err)
}
if len(results) != 2 {
t.Fatalf("expected 2 results, got %d", len(results))
}
var foundClosed, foundOpen bool
for _, result := range results {
switch result.IssueID {
case openIssue.ID:
foundOpen = true
if result.Err == nil {
t.Error("expected error for ineligible issue")
}
case closedIssue.ID:
foundClosed = true
if result.Err != nil {
t.Errorf("unexpected error for eligible issue: %v", result.Err)
}
}
}
if !foundClosed || !foundOpen {
t.Error("missing expected results")
}
}
func TestCompactTier1Batch_NonexistentIssue(t *testing.T) {
store := setupTestStore(t)
defer store.Close()
closedIssue := createTestIssue(t, store, "bd-closed")
config := &Config{DryRun: true, Concurrency: 2}
c, err := New(store, "", config)
if err != nil {
t.Fatalf("failed to create compactor: %v", err)
}
ctx := context.Background()
results, err := c.CompactTier1Batch(ctx, []string{closedIssue.ID, "bd-nonexistent"})
if err != nil {
t.Fatalf("batch operation failed: %v", err)
}
if len(results) != 2 {
t.Fatalf("expected 2 results, got %d", len(results))
}
var successCount, errorCount int
for _, r := range results {
if r.Err == nil {
successCount++
} else {
errorCount++
}
}
if successCount != 1 {
t.Errorf("expected 1 success, got %d", successCount)
}
if errorCount != 1 {
t.Errorf("expected 1 error, got %d", errorCount)
}
}
func TestCompactTier1_WithMockAPI(t *testing.T) {
store := setupTestStore(t)
defer store.Close()
issue := createTestIssue(t, store, "bd-mock-api")
// Create mock server that returns a short summary
server := httptest.NewServer(http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) {
w.Header().Set("Content-Type", "application/json")
json.NewEncoder(w).Encode(map[string]interface{}{
"id": "msg_test123",
"type": "message",
"role": "assistant",
"model": "claude-3-5-haiku-20241022",
"content": []map[string]interface{}{
{
"type": "text",
"text": "**Summary:** Short summary.\n\n**Key Decisions:** None.\n\n**Resolution:** Done.",
},
},
})
}))
defer server.Close()
t.Setenv("ANTHROPIC_API_KEY", "test-key")
// Create compactor with mock API
config := &Config{Concurrency: 1}
c, err := New(store, "", config)
if err != nil {
t.Fatalf("failed to create compactor: %v", err)
}
// Replace the haiku client with one pointing to mock server
c.haiku, err = NewHaikuClient("test-key", option.WithBaseURL(server.URL), option.WithMaxRetries(0))
if err != nil {
t.Fatalf("failed to create mock haiku client: %v", err)
}
ctx := context.Background()
err = c.CompactTier1(ctx, issue.ID)
if err != nil {
t.Fatalf("unexpected error: %v", err)
}
// Verify issue was updated
afterIssue, err := store.GetIssue(ctx, issue.ID)
if err != nil {
t.Fatalf("failed to get issue: %v", err)
}
if afterIssue.Description == issue.Description {
t.Error("description should have been updated")
}
if afterIssue.Design != "" {
t.Error("design should be cleared")
}
if afterIssue.Notes != "" {
t.Error("notes should be cleared")
}
if afterIssue.AcceptanceCriteria != "" {
t.Error("acceptance criteria should be cleared")
}
}
func TestCompactTier1_SummaryNotShorter(t *testing.T) {
store := setupTestStore(t)
defer store.Close()
// Create issue with very short content
ctx := context.Background()
prefix, _ := store.GetConfig(ctx, "issue_prefix")
if prefix == "" {
prefix = "bd"
}
now := time.Now()
closedAt := now.Add(-8 * 24 * time.Hour)
issue := &types.Issue{
ID: "bd-short",
Title: "Short",
Description: "X", // Very short description
Status: types.StatusClosed,
Priority: 2,
IssueType: types.TypeTask,
CreatedAt: now.Add(-48 * time.Hour),
UpdatedAt: now.Add(-24 * time.Hour),
ClosedAt: &closedAt,
}
if err := store.CreateIssue(ctx, issue, prefix); err != nil {
t.Fatalf("failed to create issue: %v", err)
}
// Create mock server that returns a longer summary
server := httptest.NewServer(http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) {
w.Header().Set("Content-Type", "application/json")
json.NewEncoder(w).Encode(map[string]interface{}{
"id": "msg_test123",
"type": "message",
"role": "assistant",
"model": "claude-3-5-haiku-20241022",
"content": []map[string]interface{}{
{
"type": "text",
"text": "**Summary:** This is a much longer summary that exceeds the original content length.\n\n**Key Decisions:** Multiple decisions.\n\n**Resolution:** Complete.",
},
},
})
}))
defer server.Close()
t.Setenv("ANTHROPIC_API_KEY", "test-key")
config := &Config{Concurrency: 1}
c, err := New(store, "", config)
if err != nil {
t.Fatalf("failed to create compactor: %v", err)
}
c.haiku, err = NewHaikuClient("test-key", option.WithBaseURL(server.URL), option.WithMaxRetries(0))
if err != nil {
t.Fatalf("failed to create mock haiku client: %v", err)
}
err = c.CompactTier1(ctx, issue.ID)
if err == nil {
t.Fatal("expected error when summary is longer")
}
if !strings.Contains(err.Error(), "would increase size") {
t.Errorf("expected 'would increase size' error, got: %v", err)
}
// Verify issue was NOT modified (kept original)
afterIssue, err := store.GetIssue(ctx, issue.ID)
if err != nil {
t.Fatalf("failed to get issue: %v", err)
}
if afterIssue.Description != issue.Description {
t.Error("description should not have been modified when summary is longer")
}
}
func TestCompactTier1Batch_WithMockAPI(t *testing.T) {
store := setupTestStore(t)
defer store.Close()
issue1 := createTestIssue(t, store, "bd-batch-mock-1")
issue2 := createTestIssue(t, store, "bd-batch-mock-2")
// Create mock server
server := httptest.NewServer(http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) {
w.Header().Set("Content-Type", "application/json")
json.NewEncoder(w).Encode(map[string]interface{}{
"id": "msg_test123",
"type": "message",
"role": "assistant",
"model": "claude-3-5-haiku-20241022",
"content": []map[string]interface{}{
{
"type": "text",
"text": "**Summary:** Compacted.\n\n**Key Decisions:** None.\n\n**Resolution:** Done.",
},
},
})
}))
defer server.Close()
t.Setenv("ANTHROPIC_API_KEY", "test-key")
config := &Config{Concurrency: 2}
c, err := New(store, "", config)
if err != nil {
t.Fatalf("failed to create compactor: %v", err)
}
c.haiku, err = NewHaikuClient("test-key", option.WithBaseURL(server.URL), option.WithMaxRetries(0))
if err != nil {
t.Fatalf("failed to create mock haiku client: %v", err)
}
ctx := context.Background()
results, err := c.CompactTier1Batch(ctx, []string{issue1.ID, issue2.ID})
if err != nil {
t.Fatalf("failed to batch compact: %v", err)
}
if len(results) != 2 {
t.Fatalf("expected 2 results, got %d", len(results))
}
for _, result := range results {
if result.Err != nil {
t.Errorf("unexpected error for %s: %v", result.IssueID, result.Err)
}
if result.CompactedSize == 0 {
t.Errorf("expected non-zero compacted size for %s", result.IssueID)
}
if result.CompactedSize >= result.OriginalSize {
t.Errorf("expected size reduction for %s: %d → %d", result.IssueID, result.OriginalSize, result.CompactedSize)
}
}
}
func TestResult_Fields(t *testing.T) {
r := &Result{
IssueID: "bd-1",
OriginalSize: 100,
CompactedSize: 50,
Err: nil,
}
if r.IssueID != "bd-1" {
t.Errorf("expected IssueID 'bd-1', got '%s'", r.IssueID)
}
if r.OriginalSize != 100 {
t.Errorf("expected OriginalSize 100, got %d", r.OriginalSize)
}
if r.CompactedSize != 50 {
t.Errorf("expected CompactedSize 50, got %d", r.CompactedSize)
}
if r.Err != nil {
t.Errorf("expected nil Err, got %v", r.Err)
}
}
func TestConfig_Fields(t *testing.T) {
c := &Config{
APIKey: "test-key",
Concurrency: 10,
DryRun: true,
AuditEnabled: true,
Actor: "test-actor",
}
if c.APIKey != "test-key" {
t.Errorf("expected APIKey 'test-key', got '%s'", c.APIKey)
}
if c.Concurrency != 10 {
t.Errorf("expected Concurrency 10, got %d", c.Concurrency)
}
if !c.DryRun {
t.Error("expected DryRun true")
}
if !c.AuditEnabled {
t.Error("expected AuditEnabled true")
}
if c.Actor != "test-actor" {
t.Errorf("expected Actor 'test-actor', got '%s'", c.Actor)
}
}

View File

@@ -1,171 +0,0 @@
package compact
import (
"os"
"os/exec"
"path/filepath"
"regexp"
"testing"
)
func TestGetCurrentCommitHash_InGitRepo(t *testing.T) {
// This test runs in the actual beads repo, so it should return a valid hash
hash := GetCurrentCommitHash()
// Should be a 40-character hex string
if len(hash) != 40 {
t.Errorf("expected 40-char hash, got %d chars: %s", len(hash), hash)
}
// Should be valid hex
matched, err := regexp.MatchString("^[0-9a-f]{40}$", hash)
if err != nil {
t.Fatalf("regex error: %v", err)
}
if !matched {
t.Errorf("expected hex hash, got: %s", hash)
}
}
func TestGetCurrentCommitHash_NotInGitRepo(t *testing.T) {
// Save current directory
originalDir, err := os.Getwd()
if err != nil {
t.Fatalf("failed to get cwd: %v", err)
}
// Create a temporary directory that is NOT a git repo
tmpDir := t.TempDir()
// Change to the temp directory
if err := os.Chdir(tmpDir); err != nil {
t.Fatalf("failed to chdir to temp dir: %v", err)
}
defer func() {
// Restore original directory
if err := os.Chdir(originalDir); err != nil {
t.Fatalf("failed to restore cwd: %v", err)
}
}()
// Should return empty string when not in a git repo
hash := GetCurrentCommitHash()
if hash != "" {
t.Errorf("expected empty string outside git repo, got: %s", hash)
}
}
func TestGetCurrentCommitHash_NewGitRepo(t *testing.T) {
// Save current directory
originalDir, err := os.Getwd()
if err != nil {
t.Fatalf("failed to get cwd: %v", err)
}
// Create a temporary directory
tmpDir := t.TempDir()
// Initialize a new git repo
cmd := exec.Command("git", "init")
cmd.Dir = tmpDir
if err := cmd.Run(); err != nil {
t.Fatalf("failed to init git repo: %v", err)
}
// Configure git user for the commit
cmd = exec.Command("git", "config", "user.email", "test@test.com")
cmd.Dir = tmpDir
if err := cmd.Run(); err != nil {
t.Fatalf("failed to set git email: %v", err)
}
cmd = exec.Command("git", "config", "user.name", "Test User")
cmd.Dir = tmpDir
if err := cmd.Run(); err != nil {
t.Fatalf("failed to set git name: %v", err)
}
// Create a file and commit it
testFile := filepath.Join(tmpDir, "test.txt")
if err := os.WriteFile(testFile, []byte("test"), 0644); err != nil {
t.Fatalf("failed to write test file: %v", err)
}
cmd = exec.Command("git", "add", ".")
cmd.Dir = tmpDir
if err := cmd.Run(); err != nil {
t.Fatalf("failed to git add: %v", err)
}
cmd = exec.Command("git", "commit", "-m", "test commit")
cmd.Dir = tmpDir
if err := cmd.Run(); err != nil {
t.Fatalf("failed to git commit: %v", err)
}
// Change to the new git repo
if err := os.Chdir(tmpDir); err != nil {
t.Fatalf("failed to chdir to git repo: %v", err)
}
defer func() {
// Restore original directory
if err := os.Chdir(originalDir); err != nil {
t.Fatalf("failed to restore cwd: %v", err)
}
}()
// Should return a valid hash
hash := GetCurrentCommitHash()
if len(hash) != 40 {
t.Errorf("expected 40-char hash, got %d chars: %s", len(hash), hash)
}
// Verify it matches git rev-parse output
cmd = exec.Command("git", "rev-parse", "HEAD")
cmd.Dir = tmpDir
out, err := cmd.Output()
if err != nil {
t.Fatalf("failed to run git rev-parse: %v", err)
}
expected := string(out)
expected = expected[:len(expected)-1] // trim newline
if hash != expected {
t.Errorf("hash mismatch: got %s, expected %s", hash, expected)
}
}
func TestGetCurrentCommitHash_EmptyGitRepo(t *testing.T) {
// Save current directory
originalDir, err := os.Getwd()
if err != nil {
t.Fatalf("failed to get cwd: %v", err)
}
// Create a temporary directory
tmpDir := t.TempDir()
// Initialize a new git repo but don't commit anything
cmd := exec.Command("git", "init")
cmd.Dir = tmpDir
if err := cmd.Run(); err != nil {
t.Fatalf("failed to init git repo: %v", err)
}
// Change to the empty git repo
if err := os.Chdir(tmpDir); err != nil {
t.Fatalf("failed to chdir to git repo: %v", err)
}
defer func() {
// Restore original directory
if err := os.Chdir(originalDir); err != nil {
t.Fatalf("failed to restore cwd: %v", err)
}
}()
// Should return empty string for repo with no commits
hash := GetCurrentCommitHash()
if hash != "" {
t.Errorf("expected empty string for empty git repo, got: %s", hash)
}
}

View File

@@ -38,7 +38,7 @@ type HaikuClient struct {
}
// NewHaikuClient creates a new Haiku API client. Env var ANTHROPIC_API_KEY takes precedence over explicit apiKey.
func NewHaikuClient(apiKey string, opts ...option.RequestOption) (*HaikuClient, error) {
func NewHaikuClient(apiKey string) (*HaikuClient, error) {
envKey := os.Getenv("ANTHROPIC_API_KEY")
if envKey != "" {
apiKey = envKey
@@ -47,10 +47,7 @@ func NewHaikuClient(apiKey string, opts ...option.RequestOption) (*HaikuClient,
return nil, fmt.Errorf("%w: set ANTHROPIC_API_KEY environment variable or provide via config", ErrAPIKeyRequired)
}
// Build options: API key first, then any additional options (for testing)
allOpts := []option.RequestOption{option.WithAPIKey(apiKey)}
allOpts = append(allOpts, opts...)
client := anthropic.NewClient(allOpts...)
client := anthropic.NewClient(option.WithAPIKey(apiKey))
tier1Tmpl, err := template.New("tier1").Parse(tier1PromptTemplate)
if err != nil {

View File

@@ -2,18 +2,11 @@ package compact
import (
"context"
"encoding/json"
"errors"
"net"
"net/http"
"net/http/httptest"
"strings"
"sync/atomic"
"testing"
"time"
"github.com/anthropics/anthropic-sdk-go"
"github.com/anthropics/anthropic-sdk-go/option"
"github.com/steveyegge/beads/internal/types"
)
@@ -196,399 +189,3 @@ func TestIsRetryable(t *testing.T) {
})
}
}
// mockTimeoutError implements net.Error for timeout testing
type mockTimeoutError struct {
timeout bool
}
func (e *mockTimeoutError) Error() string { return "mock timeout error" }
func (e *mockTimeoutError) Timeout() bool { return e.timeout }
func (e *mockTimeoutError) Temporary() bool { return false }
func TestIsRetryable_NetworkTimeout(t *testing.T) {
// Network timeout should be retryable
timeoutErr := &mockTimeoutError{timeout: true}
if !isRetryable(timeoutErr) {
t.Error("network timeout error should be retryable")
}
// Non-timeout network error should not be retryable
nonTimeoutErr := &mockTimeoutError{timeout: false}
if isRetryable(nonTimeoutErr) {
t.Error("non-timeout network error should not be retryable")
}
}
func TestIsRetryable_APIErrors(t *testing.T) {
tests := []struct {
name string
statusCode int
expected bool
}{
{"rate limit 429", 429, true},
{"server error 500", 500, true},
{"server error 502", 502, true},
{"server error 503", 503, true},
{"bad request 400", 400, false},
{"unauthorized 401", 401, false},
{"forbidden 403", 403, false},
{"not found 404", 404, false},
}
for _, tt := range tests {
t.Run(tt.name, func(t *testing.T) {
apiErr := &anthropic.Error{StatusCode: tt.statusCode}
got := isRetryable(apiErr)
if got != tt.expected {
t.Errorf("isRetryable(API error %d) = %v, want %v", tt.statusCode, got, tt.expected)
}
})
}
}
// createMockAnthropicServer creates a mock server that returns Anthropic API responses
func createMockAnthropicServer(handler http.HandlerFunc) *httptest.Server {
return httptest.NewServer(handler)
}
// mockAnthropicResponse creates a valid Anthropic Messages API response
func mockAnthropicResponse(text string) map[string]interface{} {
return map[string]interface{}{
"id": "msg_test123",
"type": "message",
"role": "assistant",
"model": "claude-3-5-haiku-20241022",
"stop_reason": "end_turn",
"stop_sequence": nil,
"usage": map[string]int{
"input_tokens": 100,
"output_tokens": 50,
},
"content": []map[string]interface{}{
{
"type": "text",
"text": text,
},
},
}
}
func TestSummarizeTier1_MockAPI(t *testing.T) {
// Create mock server that returns a valid summary
server := createMockAnthropicServer(func(w http.ResponseWriter, r *http.Request) {
// Verify request method and path
if r.Method != "POST" {
t.Errorf("expected POST, got %s", r.Method)
}
if !strings.HasSuffix(r.URL.Path, "/messages") {
t.Errorf("expected /messages path, got %s", r.URL.Path)
}
w.Header().Set("Content-Type", "application/json")
resp := mockAnthropicResponse("**Summary:** Fixed auth bug.\n\n**Key Decisions:** Used OAuth.\n\n**Resolution:** Complete.")
json.NewEncoder(w).Encode(resp)
})
defer server.Close()
client, err := NewHaikuClient("test-key", option.WithBaseURL(server.URL))
if err != nil {
t.Fatalf("failed to create client: %v", err)
}
issue := &types.Issue{
ID: "bd-1",
Title: "Fix authentication bug",
Description: "OAuth login was broken",
Status: types.StatusClosed,
}
ctx := context.Background()
result, err := client.SummarizeTier1(ctx, issue)
if err != nil {
t.Fatalf("unexpected error: %v", err)
}
if !strings.Contains(result, "**Summary:**") {
t.Error("result should contain Summary section")
}
if !strings.Contains(result, "Fixed auth bug") {
t.Error("result should contain summary text")
}
}
func TestSummarizeTier1_APIError(t *testing.T) {
// Create mock server that returns an error
server := createMockAnthropicServer(func(w http.ResponseWriter, r *http.Request) {
w.WriteHeader(http.StatusBadRequest)
json.NewEncoder(w).Encode(map[string]interface{}{
"type": "error",
"error": map[string]interface{}{
"type": "invalid_request_error",
"message": "Invalid API key",
},
})
})
defer server.Close()
client, err := NewHaikuClient("test-key", option.WithBaseURL(server.URL))
if err != nil {
t.Fatalf("failed to create client: %v", err)
}
issue := &types.Issue{
ID: "bd-1",
Title: "Test",
Description: "Test",
Status: types.StatusClosed,
}
ctx := context.Background()
_, err = client.SummarizeTier1(ctx, issue)
if err == nil {
t.Fatal("expected error from API")
}
if !strings.Contains(err.Error(), "non-retryable") {
t.Errorf("expected non-retryable error, got: %v", err)
}
}
func TestCallWithRetry_RetriesOn429(t *testing.T) {
var attempts int32
server := createMockAnthropicServer(func(w http.ResponseWriter, r *http.Request) {
attempt := atomic.AddInt32(&attempts, 1)
if attempt <= 2 {
// First two attempts return 429
w.WriteHeader(http.StatusTooManyRequests)
json.NewEncoder(w).Encode(map[string]interface{}{
"type": "error",
"error": map[string]interface{}{
"type": "rate_limit_error",
"message": "Rate limited",
},
})
return
}
// Third attempt succeeds
w.Header().Set("Content-Type", "application/json")
json.NewEncoder(w).Encode(mockAnthropicResponse("Success after retries"))
})
defer server.Close()
// Disable SDK's internal retries to test our retry logic only
client, err := NewHaikuClient("test-key", option.WithBaseURL(server.URL), option.WithMaxRetries(0))
if err != nil {
t.Fatalf("failed to create client: %v", err)
}
// Use short backoff for testing
client.initialBackoff = 10 * time.Millisecond
ctx := context.Background()
result, err := client.callWithRetry(ctx, "test prompt")
if err != nil {
t.Fatalf("expected success after retries, got: %v", err)
}
if result != "Success after retries" {
t.Errorf("expected 'Success after retries', got: %s", result)
}
if attempts != 3 {
t.Errorf("expected 3 attempts, got: %d", attempts)
}
}
func TestCallWithRetry_RetriesOn500(t *testing.T) {
var attempts int32
server := createMockAnthropicServer(func(w http.ResponseWriter, r *http.Request) {
attempt := atomic.AddInt32(&attempts, 1)
if attempt == 1 {
// First attempt returns 500
w.WriteHeader(http.StatusInternalServerError)
json.NewEncoder(w).Encode(map[string]interface{}{
"type": "error",
"error": map[string]interface{}{
"type": "api_error",
"message": "Internal server error",
},
})
return
}
// Second attempt succeeds
w.Header().Set("Content-Type", "application/json")
json.NewEncoder(w).Encode(mockAnthropicResponse("Recovered from 500"))
})
defer server.Close()
// Disable SDK's internal retries to test our retry logic only
client, err := NewHaikuClient("test-key", option.WithBaseURL(server.URL), option.WithMaxRetries(0))
if err != nil {
t.Fatalf("failed to create client: %v", err)
}
client.initialBackoff = 10 * time.Millisecond
ctx := context.Background()
result, err := client.callWithRetry(ctx, "test prompt")
if err != nil {
t.Fatalf("expected success after retry, got: %v", err)
}
if result != "Recovered from 500" {
t.Errorf("expected 'Recovered from 500', got: %s", result)
}
}
func TestCallWithRetry_ExhaustsRetries(t *testing.T) {
var attempts int32
server := createMockAnthropicServer(func(w http.ResponseWriter, r *http.Request) {
atomic.AddInt32(&attempts, 1)
// Always return 429
w.WriteHeader(http.StatusTooManyRequests)
json.NewEncoder(w).Encode(map[string]interface{}{
"type": "error",
"error": map[string]interface{}{
"type": "rate_limit_error",
"message": "Rate limited",
},
})
})
defer server.Close()
// Disable SDK's internal retries to test our retry logic only
client, err := NewHaikuClient("test-key", option.WithBaseURL(server.URL), option.WithMaxRetries(0))
if err != nil {
t.Fatalf("failed to create client: %v", err)
}
client.initialBackoff = 1 * time.Millisecond
client.maxRetries = 2
ctx := context.Background()
_, err = client.callWithRetry(ctx, "test prompt")
if err == nil {
t.Fatal("expected error after exhausting retries")
}
if !strings.Contains(err.Error(), "failed after") {
t.Errorf("expected 'failed after' error, got: %v", err)
}
// Initial attempt + 2 retries = 3 total
if attempts != 3 {
t.Errorf("expected 3 attempts, got: %d", attempts)
}
}
func TestCallWithRetry_NoRetryOn400(t *testing.T) {
var attempts int32
server := createMockAnthropicServer(func(w http.ResponseWriter, r *http.Request) {
atomic.AddInt32(&attempts, 1)
w.WriteHeader(http.StatusBadRequest)
json.NewEncoder(w).Encode(map[string]interface{}{
"type": "error",
"error": map[string]interface{}{
"type": "invalid_request_error",
"message": "Bad request",
},
})
})
defer server.Close()
client, err := NewHaikuClient("test-key", option.WithBaseURL(server.URL))
if err != nil {
t.Fatalf("failed to create client: %v", err)
}
client.initialBackoff = 10 * time.Millisecond
ctx := context.Background()
_, err = client.callWithRetry(ctx, "test prompt")
if err == nil {
t.Fatal("expected error for bad request")
}
if !strings.Contains(err.Error(), "non-retryable") {
t.Errorf("expected non-retryable error, got: %v", err)
}
if attempts != 1 {
t.Errorf("expected only 1 attempt for non-retryable error, got: %d", attempts)
}
}
func TestCallWithRetry_ContextTimeout(t *testing.T) {
server := createMockAnthropicServer(func(w http.ResponseWriter, r *http.Request) {
// Delay longer than context timeout
time.Sleep(200 * time.Millisecond)
w.Header().Set("Content-Type", "application/json")
json.NewEncoder(w).Encode(mockAnthropicResponse("too late"))
})
defer server.Close()
client, err := NewHaikuClient("test-key", option.WithBaseURL(server.URL))
if err != nil {
t.Fatalf("failed to create client: %v", err)
}
ctx, cancel := context.WithTimeout(context.Background(), 50*time.Millisecond)
defer cancel()
_, err = client.callWithRetry(ctx, "test prompt")
if err == nil {
t.Fatal("expected timeout error")
}
if !errors.Is(err, context.DeadlineExceeded) {
t.Errorf("expected context.DeadlineExceeded, got: %v", err)
}
}
func TestCallWithRetry_EmptyContent(t *testing.T) {
server := createMockAnthropicServer(func(w http.ResponseWriter, r *http.Request) {
w.Header().Set("Content-Type", "application/json")
// Return response with empty content array
json.NewEncoder(w).Encode(map[string]interface{}{
"id": "msg_test123",
"type": "message",
"role": "assistant",
"model": "claude-3-5-haiku-20241022",
"content": []map[string]interface{}{},
})
})
defer server.Close()
client, err := NewHaikuClient("test-key", option.WithBaseURL(server.URL))
if err != nil {
t.Fatalf("failed to create client: %v", err)
}
ctx := context.Background()
_, err = client.callWithRetry(ctx, "test prompt")
if err == nil {
t.Fatal("expected error for empty content")
}
if !strings.Contains(err.Error(), "no content blocks") {
t.Errorf("expected 'no content blocks' error, got: %v", err)
}
}
func TestBytesWriter(t *testing.T) {
w := &bytesWriter{}
n, err := w.Write([]byte("hello"))
if err != nil {
t.Fatalf("unexpected error: %v", err)
}
if n != 5 {
t.Errorf("expected n=5, got %d", n)
}
n, err = w.Write([]byte(" world"))
if err != nil {
t.Fatalf("unexpected error: %v", err)
}
if n != 6 {
t.Errorf("expected n=6, got %d", n)
}
if string(w.buf) != "hello world" {
t.Errorf("expected 'hello world', got '%s'", string(w.buf))
}
}
// Verify net.Error interface is properly satisfied for test mocks
var _ net.Error = (*mockTimeoutError)(nil)

View File

@@ -306,43 +306,6 @@ func ResolveExternalProjectPath(projectName string) string {
return path
}
// HookEntry represents a single config-based hook
type HookEntry struct {
Command string `yaml:"command" mapstructure:"command"` // Shell command to run
Name string `yaml:"name" mapstructure:"name"` // Optional display name
}
// GetCloseHooks returns the on_close hooks from config
func GetCloseHooks() []HookEntry {
if v == nil {
return nil
}
var hooks []HookEntry
raw := v.Get("hooks.on_close")
if raw == nil {
return nil
}
// Handle slice of maps (from YAML parsing)
if rawSlice, ok := raw.([]interface{}); ok {
for _, item := range rawSlice {
if m, ok := item.(map[string]interface{}); ok {
entry := HookEntry{}
if cmd, ok := m["command"].(string); ok {
entry.Command = cmd
}
if name, ok := m["name"].(string); ok {
entry.Name = name
}
if entry.Command != "" {
hooks = append(hooks, entry)
}
}
}
}
return hooks
}
// GetIdentity resolves the user's identity for messaging.
// Priority chain:
// 1. flagValue (if non-empty, from --identity flag)

View File

@@ -1,66 +0,0 @@
// Package hooks provides a hook system for extensibility.
// This file implements config-based hooks defined in .beads/config.yaml.
package hooks
import (
"context"
"fmt"
"os"
"os/exec"
"strconv"
"time"
"github.com/steveyegge/beads/internal/config"
"github.com/steveyegge/beads/internal/types"
)
// RunConfigCloseHooks executes all on_close hooks from config.yaml.
// Hook commands receive issue data via environment variables:
// - BEAD_ID: Issue ID (e.g., bd-abc1)
// - BEAD_TITLE: Issue title
// - BEAD_TYPE: Issue type (task, bug, feature, etc.)
// - BEAD_PRIORITY: Priority (0-4)
// - BEAD_CLOSE_REASON: Close reason if provided
//
// Hooks run synchronously but failures are logged as warnings and don't
// block the close operation.
func RunConfigCloseHooks(ctx context.Context, issue *types.Issue) {
hooks := config.GetCloseHooks()
if len(hooks) == 0 {
return
}
// Build environment variables for hooks
env := append(os.Environ(),
"BEAD_ID="+issue.ID,
"BEAD_TITLE="+issue.Title,
"BEAD_TYPE="+string(issue.IssueType),
"BEAD_PRIORITY="+strconv.Itoa(issue.Priority),
"BEAD_CLOSE_REASON="+issue.CloseReason,
)
timeout := 10 * time.Second
for _, hook := range hooks {
hookCtx, cancel := context.WithTimeout(ctx, timeout)
// #nosec G204 -- command comes from user's config file
cmd := exec.CommandContext(hookCtx, "sh", "-c", hook.Command)
cmd.Env = env
cmd.Stdout = os.Stdout
cmd.Stderr = os.Stderr
err := cmd.Run()
cancel()
if err != nil {
// Log warning but don't fail the close
name := hook.Name
if name == "" {
name = hook.Command
}
fmt.Fprintf(os.Stderr, "Warning: close hook %q failed: %v\n", name, err)
}
}
}

View File

@@ -1,271 +0,0 @@
package hooks
import (
"context"
"os"
"path/filepath"
"strings"
"testing"
"time"
"github.com/steveyegge/beads/internal/config"
"github.com/steveyegge/beads/internal/types"
)
func TestRunConfigCloseHooks_NoHooks(t *testing.T) {
// Create a temp dir without any config
tmpDir := t.TempDir()
beadsDir := filepath.Join(tmpDir, ".beads")
if err := os.MkdirAll(beadsDir, 0755); err != nil {
t.Fatalf("Failed to create .beads dir: %v", err)
}
// Change to the temp dir and initialize config
oldWd, _ := os.Getwd()
defer func() { _ = os.Chdir(oldWd) }()
if err := os.Chdir(tmpDir); err != nil {
t.Fatalf("Failed to chdir: %v", err)
}
// Re-initialize config
if err := config.Initialize(); err != nil {
t.Fatalf("Failed to initialize config: %v", err)
}
issue := &types.Issue{ID: "bd-test", Title: "Test Issue"}
ctx := context.Background()
// Should not panic with no hooks
RunConfigCloseHooks(ctx, issue)
}
func TestRunConfigCloseHooks_ExecutesCommand(t *testing.T) {
tmpDir := t.TempDir()
beadsDir := filepath.Join(tmpDir, ".beads")
if err := os.MkdirAll(beadsDir, 0755); err != nil {
t.Fatalf("Failed to create .beads dir: %v", err)
}
outputFile := filepath.Join(tmpDir, "hook_output.txt")
// Create config.yaml with a close hook
configContent := `hooks:
on_close:
- name: test-hook
command: echo "$BEAD_ID $BEAD_TITLE" > ` + outputFile + `
`
configPath := filepath.Join(beadsDir, "config.yaml")
if err := os.WriteFile(configPath, []byte(configContent), 0644); err != nil {
t.Fatalf("Failed to write config: %v", err)
}
// Change to the temp dir and initialize config
oldWd, _ := os.Getwd()
defer func() { _ = os.Chdir(oldWd) }()
if err := os.Chdir(tmpDir); err != nil {
t.Fatalf("Failed to chdir: %v", err)
}
// Re-initialize config
if err := config.Initialize(); err != nil {
t.Fatalf("Failed to initialize config: %v", err)
}
issue := &types.Issue{
ID: "bd-abc1",
Title: "Test Issue",
IssueType: types.TypeBug,
Priority: 1,
CloseReason: "Fixed",
}
ctx := context.Background()
RunConfigCloseHooks(ctx, issue)
// Wait for hook to complete
time.Sleep(100 * time.Millisecond)
// Verify output
output, err := os.ReadFile(outputFile)
if err != nil {
t.Fatalf("Failed to read output file: %v", err)
}
expected := "bd-abc1 Test Issue"
if !strings.Contains(string(output), expected) {
t.Errorf("Hook output = %q, want to contain %q", string(output), expected)
}
}
func TestRunConfigCloseHooks_EnvVars(t *testing.T) {
tmpDir := t.TempDir()
beadsDir := filepath.Join(tmpDir, ".beads")
if err := os.MkdirAll(beadsDir, 0755); err != nil {
t.Fatalf("Failed to create .beads dir: %v", err)
}
outputFile := filepath.Join(tmpDir, "env_output.txt")
// Create config.yaml with a close hook that outputs all env vars
configContent := `hooks:
on_close:
- name: env-check
command: echo "ID=$BEAD_ID TYPE=$BEAD_TYPE PRIORITY=$BEAD_PRIORITY REASON=$BEAD_CLOSE_REASON" > ` + outputFile + `
`
configPath := filepath.Join(beadsDir, "config.yaml")
if err := os.WriteFile(configPath, []byte(configContent), 0644); err != nil {
t.Fatalf("Failed to write config: %v", err)
}
// Change to the temp dir and initialize config
oldWd, _ := os.Getwd()
defer func() { _ = os.Chdir(oldWd) }()
if err := os.Chdir(tmpDir); err != nil {
t.Fatalf("Failed to chdir: %v", err)
}
// Re-initialize config
if err := config.Initialize(); err != nil {
t.Fatalf("Failed to initialize config: %v", err)
}
issue := &types.Issue{
ID: "bd-xyz9",
Title: "Bug Fix",
IssueType: types.TypeFeature,
Priority: 2,
CloseReason: "Completed",
}
ctx := context.Background()
RunConfigCloseHooks(ctx, issue)
// Wait for hook to complete
time.Sleep(100 * time.Millisecond)
// Verify output contains all env vars
output, err := os.ReadFile(outputFile)
if err != nil {
t.Fatalf("Failed to read output file: %v", err)
}
outputStr := string(output)
checks := []string{
"ID=bd-xyz9",
"TYPE=feature",
"PRIORITY=2",
"REASON=Completed",
}
for _, check := range checks {
if !strings.Contains(outputStr, check) {
t.Errorf("Hook output = %q, want to contain %q", outputStr, check)
}
}
}
func TestRunConfigCloseHooks_HookFailure(t *testing.T) {
tmpDir := t.TempDir()
beadsDir := filepath.Join(tmpDir, ".beads")
if err := os.MkdirAll(beadsDir, 0755); err != nil {
t.Fatalf("Failed to create .beads dir: %v", err)
}
successFile := filepath.Join(tmpDir, "success.txt")
// Create config.yaml with a failing hook followed by a succeeding one
configContent := `hooks:
on_close:
- name: failing-hook
command: exit 1
- name: success-hook
command: echo "success" > ` + successFile + `
`
configPath := filepath.Join(beadsDir, "config.yaml")
if err := os.WriteFile(configPath, []byte(configContent), 0644); err != nil {
t.Fatalf("Failed to write config: %v", err)
}
// Change to the temp dir and initialize config
oldWd, _ := os.Getwd()
defer func() { _ = os.Chdir(oldWd) }()
if err := os.Chdir(tmpDir); err != nil {
t.Fatalf("Failed to chdir: %v", err)
}
// Re-initialize config
if err := config.Initialize(); err != nil {
t.Fatalf("Failed to initialize config: %v", err)
}
issue := &types.Issue{ID: "bd-test", Title: "Test"}
ctx := context.Background()
// Should not panic even with failing hook
RunConfigCloseHooks(ctx, issue)
// Wait for hooks to complete
time.Sleep(100 * time.Millisecond)
// Verify second hook still ran
output, err := os.ReadFile(successFile)
if err != nil {
t.Fatalf("Second hook should have run despite first failing: %v", err)
}
if !strings.Contains(string(output), "success") {
t.Error("Second hook did not produce expected output")
}
}
func TestGetCloseHooks(t *testing.T) {
tmpDir := t.TempDir()
beadsDir := filepath.Join(tmpDir, ".beads")
if err := os.MkdirAll(beadsDir, 0755); err != nil {
t.Fatalf("Failed to create .beads dir: %v", err)
}
// Create config.yaml with multiple hooks
configContent := `hooks:
on_close:
- name: first-hook
command: echo first
- name: second-hook
command: echo second
- command: echo unnamed
`
configPath := filepath.Join(beadsDir, "config.yaml")
if err := os.WriteFile(configPath, []byte(configContent), 0644); err != nil {
t.Fatalf("Failed to write config: %v", err)
}
// Change to the temp dir and initialize config
oldWd, _ := os.Getwd()
defer func() { _ = os.Chdir(oldWd) }()
if err := os.Chdir(tmpDir); err != nil {
t.Fatalf("Failed to chdir: %v", err)
}
// Re-initialize config
if err := config.Initialize(); err != nil {
t.Fatalf("Failed to initialize config: %v", err)
}
hooks := config.GetCloseHooks()
if len(hooks) != 3 {
t.Fatalf("Expected 3 hooks, got %d", len(hooks))
}
if hooks[0].Name != "first-hook" || hooks[0].Command != "echo first" {
t.Errorf("First hook = %+v, want name=first-hook, command=echo first", hooks[0])
}
if hooks[1].Name != "second-hook" || hooks[1].Command != "echo second" {
t.Errorf("Second hook = %+v, want name=second-hook, command=echo second", hooks[1])
}
if hooks[2].Name != "" || hooks[2].Command != "echo unnamed" {
t.Errorf("Third hook = %+v, want name='', command=echo unnamed", hooks[2])
}
}

File diff suppressed because it is too large Load Diff

View File

@@ -1,464 +0,0 @@
package sqlite
import (
"context"
"database/sql"
"fmt"
"strings"
"time"
"github.com/steveyegge/beads/internal/types"
)
// CreateTombstone converts an existing issue to a tombstone record.
// This is a soft-delete that preserves the issue in the database with status="tombstone".
// The issue will still appear in exports but be excluded from normal queries.
// Dependencies must be removed separately before calling this method.
func (s *SQLiteStorage) CreateTombstone(ctx context.Context, id string, actor string, reason string) error {
// Get the issue to preserve its original type
issue, err := s.GetIssue(ctx, id)
if err != nil {
return fmt.Errorf("failed to get issue: %w", err)
}
if issue == nil {
return fmt.Errorf("issue not found: %s", id)
}
tx, err := s.db.BeginTx(ctx, nil)
if err != nil {
return fmt.Errorf("failed to begin transaction: %w", err)
}
defer func() { _ = tx.Rollback() }()
now := time.Now()
originalType := string(issue.IssueType)
// Convert issue to tombstone
// Note: closed_at must be set to NULL because of CHECK constraint:
// (status = 'closed') = (closed_at IS NOT NULL)
_, err = tx.ExecContext(ctx, `
UPDATE issues
SET status = ?,
closed_at = NULL,
deleted_at = ?,
deleted_by = ?,
delete_reason = ?,
original_type = ?,
updated_at = ?
WHERE id = ?
`, types.StatusTombstone, now, actor, reason, originalType, now, id)
if err != nil {
return fmt.Errorf("failed to create tombstone: %w", err)
}
// Record tombstone creation event
_, err = tx.ExecContext(ctx, `
INSERT INTO events (issue_id, event_type, actor, comment)
VALUES (?, ?, ?, ?)
`, id, "deleted", actor, reason)
if err != nil {
return fmt.Errorf("failed to record tombstone event: %w", err)
}
// Mark issue as dirty for incremental export
_, err = tx.ExecContext(ctx, `
INSERT INTO dirty_issues (issue_id, marked_at)
VALUES (?, ?)
ON CONFLICT (issue_id) DO UPDATE SET marked_at = excluded.marked_at
`, id, now)
if err != nil {
return fmt.Errorf("failed to mark issue dirty: %w", err)
}
// Invalidate blocked issues cache since status changed (bd-5qim)
// Tombstone issues don't block others, so this affects blocking calculations
if err := s.invalidateBlockedCache(ctx, tx); err != nil {
return fmt.Errorf("failed to invalidate blocked cache: %w", err)
}
if err := tx.Commit(); err != nil {
return wrapDBError("commit tombstone transaction", err)
}
return nil
}
// DeleteIssue permanently removes an issue from the database
func (s *SQLiteStorage) DeleteIssue(ctx context.Context, id string) error {
tx, err := s.db.BeginTx(ctx, nil)
if err != nil {
return fmt.Errorf("failed to begin transaction: %w", err)
}
defer func() { _ = tx.Rollback() }()
// Delete dependencies (both directions)
_, err = tx.ExecContext(ctx, `DELETE FROM dependencies WHERE issue_id = ? OR depends_on_id = ?`, id, id)
if err != nil {
return fmt.Errorf("failed to delete dependencies: %w", err)
}
// Delete events
_, err = tx.ExecContext(ctx, `DELETE FROM events WHERE issue_id = ?`, id)
if err != nil {
return fmt.Errorf("failed to delete events: %w", err)
}
// Delete comments (no FK cascade on this table) (bd-687g)
_, err = tx.ExecContext(ctx, `DELETE FROM comments WHERE issue_id = ?`, id)
if err != nil {
return fmt.Errorf("failed to delete comments: %w", err)
}
// Delete from dirty_issues
_, err = tx.ExecContext(ctx, `DELETE FROM dirty_issues WHERE issue_id = ?`, id)
if err != nil {
return fmt.Errorf("failed to delete dirty marker: %w", err)
}
// Delete the issue itself
result, err := tx.ExecContext(ctx, `DELETE FROM issues WHERE id = ?`, id)
if err != nil {
return fmt.Errorf("failed to delete issue: %w", err)
}
rowsAffected, err := result.RowsAffected()
if err != nil {
return fmt.Errorf("failed to check rows affected: %w", err)
}
if rowsAffected == 0 {
return fmt.Errorf("issue not found: %s", id)
}
if err := tx.Commit(); err != nil {
return wrapDBError("commit delete transaction", err)
}
// REMOVED (bd-c7af): Counter sync after deletion - no longer needed with hash IDs
return nil
}
// DeleteIssuesResult contains statistics about a batch deletion operation
type DeleteIssuesResult struct {
DeletedCount int
DependenciesCount int
LabelsCount int
EventsCount int
OrphanedIssues []string
}
// DeleteIssues deletes multiple issues in a single transaction
// If cascade is true, recursively deletes dependents
// If cascade is false but force is true, deletes issues and orphans their dependents
// If cascade and force are both false, returns an error if any issue has dependents
// If dryRun is true, only computes statistics without deleting
func (s *SQLiteStorage) DeleteIssues(ctx context.Context, ids []string, cascade bool, force bool, dryRun bool) (*DeleteIssuesResult, error) {
if len(ids) == 0 {
return &DeleteIssuesResult{}, nil
}
tx, err := s.db.BeginTx(ctx, nil)
if err != nil {
return nil, fmt.Errorf("failed to begin transaction: %w", err)
}
defer func() { _ = tx.Rollback() }()
idSet := buildIDSet(ids)
result := &DeleteIssuesResult{}
expandedIDs, err := s.resolveDeleteSet(ctx, tx, ids, idSet, cascade, force, result)
if err != nil {
return nil, wrapDBError("resolve delete set", err)
}
inClause, args := buildSQLInClause(expandedIDs)
if err := s.populateDeleteStats(ctx, tx, inClause, args, result); err != nil {
return nil, err
}
if dryRun {
return result, nil
}
if err := s.executeDelete(ctx, tx, inClause, args, result); err != nil {
return nil, err
}
if err := tx.Commit(); err != nil {
return nil, fmt.Errorf("failed to commit transaction: %w", err)
}
// REMOVED (bd-c7af): Counter sync after deletion - no longer needed with hash IDs
return result, nil
}
func buildIDSet(ids []string) map[string]bool {
idSet := make(map[string]bool, len(ids))
for _, id := range ids {
idSet[id] = true
}
return idSet
}
func (s *SQLiteStorage) resolveDeleteSet(ctx context.Context, tx *sql.Tx, ids []string, idSet map[string]bool, cascade bool, force bool, result *DeleteIssuesResult) ([]string, error) {
if cascade {
return s.expandWithDependents(ctx, tx, ids, idSet)
}
if !force {
return ids, s.validateNoDependents(ctx, tx, ids, idSet, result)
}
return ids, s.trackOrphanedIssues(ctx, tx, ids, idSet, result)
}
func (s *SQLiteStorage) expandWithDependents(ctx context.Context, tx *sql.Tx, ids []string, _ map[string]bool) ([]string, error) {
allToDelete, err := s.findAllDependentsRecursive(ctx, tx, ids)
if err != nil {
return nil, fmt.Errorf("failed to find dependents: %w", err)
}
expandedIDs := make([]string, 0, len(allToDelete))
for id := range allToDelete {
expandedIDs = append(expandedIDs, id)
}
return expandedIDs, nil
}
func (s *SQLiteStorage) validateNoDependents(ctx context.Context, tx *sql.Tx, ids []string, idSet map[string]bool, result *DeleteIssuesResult) error {
for _, id := range ids {
if err := s.checkSingleIssueValidation(ctx, tx, id, idSet, result); err != nil {
return wrapDBError("check dependents", err)
}
}
return nil
}
func (s *SQLiteStorage) checkSingleIssueValidation(ctx context.Context, tx *sql.Tx, id string, idSet map[string]bool, result *DeleteIssuesResult) error {
var depCount int
err := tx.QueryRowContext(ctx,
`SELECT COUNT(*) FROM dependencies WHERE depends_on_id = ?`, id).Scan(&depCount)
if err != nil {
return fmt.Errorf("failed to check dependents for %s: %w", id, err)
}
if depCount == 0 {
return nil
}
rows, err := tx.QueryContext(ctx,
`SELECT issue_id FROM dependencies WHERE depends_on_id = ?`, id)
if err != nil {
return fmt.Errorf("failed to get dependents for %s: %w", id, err)
}
defer func() { _ = rows.Close() }()
hasExternal := false
for rows.Next() {
var depID string
if err := rows.Scan(&depID); err != nil {
return fmt.Errorf("failed to scan dependent: %w", err)
}
if !idSet[depID] {
hasExternal = true
result.OrphanedIssues = append(result.OrphanedIssues, depID)
}
}
if err := rows.Err(); err != nil {
return fmt.Errorf("failed to iterate dependents for %s: %w", id, err)
}
if hasExternal {
return fmt.Errorf("issue %s has dependents not in deletion set; use --cascade to delete them or --force to orphan them", id)
}
return nil
}
func (s *SQLiteStorage) trackOrphanedIssues(ctx context.Context, tx *sql.Tx, ids []string, idSet map[string]bool, result *DeleteIssuesResult) error {
orphanSet := make(map[string]bool)
for _, id := range ids {
if err := s.collectOrphansForID(ctx, tx, id, idSet, orphanSet); err != nil {
return wrapDBError("collect orphans", err)
}
}
for orphanID := range orphanSet {
result.OrphanedIssues = append(result.OrphanedIssues, orphanID)
}
return nil
}
func (s *SQLiteStorage) collectOrphansForID(ctx context.Context, tx *sql.Tx, id string, idSet map[string]bool, orphanSet map[string]bool) error {
rows, err := tx.QueryContext(ctx,
`SELECT issue_id FROM dependencies WHERE depends_on_id = ?`, id)
if err != nil {
return fmt.Errorf("failed to get dependents for %s: %w", id, err)
}
defer func() { _ = rows.Close() }()
for rows.Next() {
var depID string
if err := rows.Scan(&depID); err != nil {
return fmt.Errorf("failed to scan dependent: %w", err)
}
if !idSet[depID] {
orphanSet[depID] = true
}
}
return rows.Err()
}
func buildSQLInClause(ids []string) (string, []interface{}) {
placeholders := make([]string, len(ids))
args := make([]interface{}, len(ids))
for i, id := range ids {
placeholders[i] = "?"
args[i] = id
}
return strings.Join(placeholders, ","), args
}
func (s *SQLiteStorage) populateDeleteStats(ctx context.Context, tx *sql.Tx, inClause string, args []interface{}, result *DeleteIssuesResult) error {
counts := []struct {
query string
dest *int
}{
{fmt.Sprintf(`SELECT COUNT(*) FROM dependencies WHERE issue_id IN (%s) OR depends_on_id IN (%s)`, inClause, inClause), &result.DependenciesCount},
{fmt.Sprintf(`SELECT COUNT(*) FROM labels WHERE issue_id IN (%s)`, inClause), &result.LabelsCount},
{fmt.Sprintf(`SELECT COUNT(*) FROM events WHERE issue_id IN (%s)`, inClause), &result.EventsCount},
}
for _, c := range counts {
queryArgs := args
if c.dest == &result.DependenciesCount {
queryArgs = append(args, args...)
}
if err := tx.QueryRowContext(ctx, c.query, queryArgs...).Scan(c.dest); err != nil {
return fmt.Errorf("failed to count: %w", err)
}
}
result.DeletedCount = len(args)
return nil
}
func (s *SQLiteStorage) executeDelete(ctx context.Context, tx *sql.Tx, inClause string, args []interface{}, result *DeleteIssuesResult) error {
// Note: This method now creates tombstones instead of hard-deleting (bd-3b4)
// Only dependencies are deleted - issues are converted to tombstones
// 1. Delete dependencies - tombstones don't block other issues
_, err := tx.ExecContext(ctx,
fmt.Sprintf(`DELETE FROM dependencies WHERE issue_id IN (%s) OR depends_on_id IN (%s)`, inClause, inClause),
append(args, args...)...)
if err != nil {
return fmt.Errorf("failed to delete dependencies: %w", err)
}
// 2. Get issue types before converting to tombstones (need for original_type)
issueTypes := make(map[string]string)
rows, err := tx.QueryContext(ctx,
fmt.Sprintf(`SELECT id, issue_type FROM issues WHERE id IN (%s)`, inClause),
args...)
if err != nil {
return fmt.Errorf("failed to get issue types: %w", err)
}
for rows.Next() {
var id, issueType string
if err := rows.Scan(&id, &issueType); err != nil {
_ = rows.Close() // #nosec G104 - error handling not critical in error path
return fmt.Errorf("failed to scan issue type: %w", err)
}
issueTypes[id] = issueType
}
_ = rows.Close()
// 3. Convert issues to tombstones (only for issues that exist)
// Note: closed_at must be set to NULL because of CHECK constraint:
// (status = 'closed') = (closed_at IS NOT NULL)
now := time.Now()
deletedCount := 0
for id, originalType := range issueTypes {
execResult, err := tx.ExecContext(ctx, `
UPDATE issues
SET status = ?,
closed_at = NULL,
deleted_at = ?,
deleted_by = ?,
delete_reason = ?,
original_type = ?,
updated_at = ?
WHERE id = ?
`, types.StatusTombstone, now, "batch delete", "batch delete", originalType, now, id)
if err != nil {
return fmt.Errorf("failed to create tombstone for %s: %w", id, err)
}
rowsAffected, _ := execResult.RowsAffected()
if rowsAffected == 0 {
continue // Issue doesn't exist, skip
}
deletedCount++
// Record tombstone creation event
_, err = tx.ExecContext(ctx, `
INSERT INTO events (issue_id, event_type, actor, comment)
VALUES (?, ?, ?, ?)
`, id, "deleted", "batch delete", "batch delete")
if err != nil {
return fmt.Errorf("failed to record tombstone event for %s: %w", id, err)
}
// Mark issue as dirty for incremental export
_, err = tx.ExecContext(ctx, `
INSERT INTO dirty_issues (issue_id, marked_at)
VALUES (?, ?)
ON CONFLICT (issue_id) DO UPDATE SET marked_at = excluded.marked_at
`, id, now)
if err != nil {
return fmt.Errorf("failed to mark issue dirty for %s: %w", id, err)
}
}
// 4. Invalidate blocked issues cache since statuses changed (bd-5qim)
if err := s.invalidateBlockedCache(ctx, tx); err != nil {
return fmt.Errorf("failed to invalidate blocked cache: %w", err)
}
result.DeletedCount = deletedCount
return nil
}
// findAllDependentsRecursive finds all issues that depend on the given issues, recursively
func (s *SQLiteStorage) findAllDependentsRecursive(ctx context.Context, tx *sql.Tx, ids []string) (map[string]bool, error) {
result := make(map[string]bool)
for _, id := range ids {
result[id] = true
}
toProcess := make([]string, len(ids))
copy(toProcess, ids)
for len(toProcess) > 0 {
current := toProcess[0]
toProcess = toProcess[1:]
rows, err := tx.QueryContext(ctx,
`SELECT issue_id FROM dependencies WHERE depends_on_id = ?`, current)
if err != nil {
return nil, err
}
defer rows.Close()
for rows.Next() {
var depID string
if err := rows.Scan(&depID); err != nil {
return nil, err
}
if !result[depID] {
result[depID] = true
toProcess = append(toProcess, depID)
}
}
if err := rows.Err(); err != nil {
return nil, err
}
}
return result, nil
}

View File

@@ -1,50 +0,0 @@
package sqlite
import (
"database/sql"
"encoding/json"
"time"
)
// parseNullableTimeString parses a nullable time string from database TEXT columns.
// The ncruces/go-sqlite3 driver only auto-converts TEXT→time.Time for columns declared
// as DATETIME/DATE/TIME/TIMESTAMP. For TEXT columns (like deleted_at), we must parse manually.
// Supports RFC3339, RFC3339Nano, and SQLite's native format.
func parseNullableTimeString(ns sql.NullString) *time.Time {
if !ns.Valid || ns.String == "" {
return nil
}
// Try RFC3339Nano first (more precise), then RFC3339, then SQLite format
for _, layout := range []string{time.RFC3339Nano, time.RFC3339, "2006-01-02 15:04:05"} {
if t, err := time.Parse(layout, ns.String); err == nil {
return &t
}
}
return nil // Unparseable - shouldn't happen with valid data
}
// parseJSONStringArray parses a JSON string array from database TEXT column.
// Returns empty slice if the string is empty or invalid JSON.
func parseJSONStringArray(s string) []string {
if s == "" {
return nil
}
var result []string
if err := json.Unmarshal([]byte(s), &result); err != nil {
return nil // Invalid JSON - shouldn't happen with valid data
}
return result
}
// formatJSONStringArray formats a string slice as JSON for database storage.
// Returns empty string if the slice is nil or empty.
func formatJSONStringArray(arr []string) string {
if len(arr) == 0 {
return ""
}
data, err := json.Marshal(arr)
if err != nil {
return ""
}
return string(data)
}

View File

@@ -1,149 +0,0 @@
package sqlite
import (
"context"
"fmt"
"time"
"github.com/steveyegge/beads/internal/types"
)
// UpdateIssueID updates an issue ID and all its text fields in a single transaction
func (s *SQLiteStorage) UpdateIssueID(ctx context.Context, oldID, newID string, issue *types.Issue, actor string) error {
// Get exclusive connection to ensure PRAGMA applies
conn, err := s.db.Conn(ctx)
if err != nil {
return fmt.Errorf("failed to get connection: %w", err)
}
defer func() { _ = conn.Close() }()
// Disable foreign keys on this specific connection
_, err = conn.ExecContext(ctx, `PRAGMA foreign_keys = OFF`)
if err != nil {
return fmt.Errorf("failed to disable foreign keys: %w", err)
}
tx, err := conn.BeginTx(ctx, nil)
if err != nil {
return fmt.Errorf("failed to begin transaction: %w", err)
}
defer func() { _ = tx.Rollback() }()
result, err := tx.ExecContext(ctx, `
UPDATE issues
SET id = ?, title = ?, description = ?, design = ?, acceptance_criteria = ?, notes = ?, updated_at = ?
WHERE id = ?
`, newID, issue.Title, issue.Description, issue.Design, issue.AcceptanceCriteria, issue.Notes, time.Now(), oldID)
if err != nil {
return fmt.Errorf("failed to update issue ID: %w", err)
}
rows, err := result.RowsAffected()
if err != nil {
return fmt.Errorf("failed to get rows affected: %w", err)
}
if rows == 0 {
return fmt.Errorf("issue not found: %s", oldID)
}
_, err = tx.ExecContext(ctx, `UPDATE dependencies SET issue_id = ? WHERE issue_id = ?`, newID, oldID)
if err != nil {
return fmt.Errorf("failed to update issue_id in dependencies: %w", err)
}
_, err = tx.ExecContext(ctx, `UPDATE dependencies SET depends_on_id = ? WHERE depends_on_id = ?`, newID, oldID)
if err != nil {
return fmt.Errorf("failed to update depends_on_id in dependencies: %w", err)
}
_, err = tx.ExecContext(ctx, `UPDATE events SET issue_id = ? WHERE issue_id = ?`, newID, oldID)
if err != nil {
return fmt.Errorf("failed to update events: %w", err)
}
_, err = tx.ExecContext(ctx, `UPDATE labels SET issue_id = ? WHERE issue_id = ?`, newID, oldID)
if err != nil {
return fmt.Errorf("failed to update labels: %w", err)
}
_, err = tx.ExecContext(ctx, `UPDATE comments SET issue_id = ? WHERE issue_id = ?`, newID, oldID)
if err != nil {
return fmt.Errorf("failed to update comments: %w", err)
}
_, err = tx.ExecContext(ctx, `
UPDATE dirty_issues SET issue_id = ? WHERE issue_id = ?
`, newID, oldID)
if err != nil {
return fmt.Errorf("failed to update dirty_issues: %w", err)
}
_, err = tx.ExecContext(ctx, `UPDATE issue_snapshots SET issue_id = ? WHERE issue_id = ?`, newID, oldID)
if err != nil {
return fmt.Errorf("failed to update issue_snapshots: %w", err)
}
_, err = tx.ExecContext(ctx, `UPDATE compaction_snapshots SET issue_id = ? WHERE issue_id = ?`, newID, oldID)
if err != nil {
return fmt.Errorf("failed to update compaction_snapshots: %w", err)
}
_, err = tx.ExecContext(ctx, `
INSERT INTO dirty_issues (issue_id, marked_at)
VALUES (?, ?)
ON CONFLICT (issue_id) DO UPDATE SET marked_at = excluded.marked_at
`, newID, time.Now())
if err != nil {
return fmt.Errorf("failed to mark issue dirty: %w", err)
}
_, err = tx.ExecContext(ctx, `
INSERT INTO events (issue_id, event_type, actor, old_value, new_value)
VALUES (?, 'renamed', ?, ?, ?)
`, newID, actor, oldID, newID)
if err != nil {
return fmt.Errorf("failed to record rename event: %w", err)
}
return tx.Commit()
}
// RenameDependencyPrefix updates the prefix in all dependency records
// GH#630: This was previously a no-op, causing dependencies to break after rename-prefix
func (s *SQLiteStorage) RenameDependencyPrefix(ctx context.Context, oldPrefix, newPrefix string) error {
// Update issue_id column
_, err := s.db.ExecContext(ctx, `
UPDATE dependencies
SET issue_id = ? || substr(issue_id, length(?) + 1)
WHERE issue_id LIKE ? || '%'
`, newPrefix, oldPrefix, oldPrefix)
if err != nil {
return fmt.Errorf("failed to update issue_id in dependencies: %w", err)
}
// Update depends_on_id column
_, err = s.db.ExecContext(ctx, `
UPDATE dependencies
SET depends_on_id = ? || substr(depends_on_id, length(?) + 1)
WHERE depends_on_id LIKE ? || '%'
`, newPrefix, oldPrefix, oldPrefix)
if err != nil {
return fmt.Errorf("failed to update depends_on_id in dependencies: %w", err)
}
return nil
}
// RenameCounterPrefix is a no-op with hash-based IDs (bd-8e05)
// Kept for backward compatibility with rename-prefix command
func (s *SQLiteStorage) RenameCounterPrefix(ctx context.Context, oldPrefix, newPrefix string) error {
// Hash-based IDs don't use counters, so nothing to update
return nil
}
// ResetCounter is a no-op with hash-based IDs (bd-8e05)
// Kept for backward compatibility
func (s *SQLiteStorage) ResetCounter(ctx context.Context, prefix string) error {
// Hash-based IDs don't use counters, so nothing to reset
return nil
}

View File

@@ -1,429 +0,0 @@
package sqlite
import (
"context"
"database/sql"
"fmt"
"strings"
"time"
"github.com/steveyegge/beads/internal/types"
)
// GetCloseReason retrieves the close reason from the most recent closed event for an issue
func (s *SQLiteStorage) GetCloseReason(ctx context.Context, issueID string) (string, error) {
var comment sql.NullString
err := s.db.QueryRowContext(ctx, `
SELECT comment FROM events
WHERE issue_id = ? AND event_type = ?
ORDER BY created_at DESC
LIMIT 1
`, issueID, types.EventClosed).Scan(&comment)
if err == sql.ErrNoRows {
return "", nil
}
if err != nil {
return "", fmt.Errorf("failed to get close reason: %w", err)
}
if comment.Valid {
return comment.String, nil
}
return "", nil
}
// GetCloseReasonsForIssues retrieves close reasons for multiple issues in a single query
func (s *SQLiteStorage) GetCloseReasonsForIssues(ctx context.Context, issueIDs []string) (map[string]string, error) {
result := make(map[string]string)
if len(issueIDs) == 0 {
return result, nil
}
// Build placeholders for IN clause
placeholders := make([]string, len(issueIDs))
args := make([]interface{}, len(issueIDs)+1)
args[0] = types.EventClosed
for i, id := range issueIDs {
placeholders[i] = "?"
args[i+1] = id
}
// Use a subquery to get the most recent closed event for each issue
// #nosec G201 - safe SQL with controlled formatting
query := fmt.Sprintf(`
SELECT e.issue_id, e.comment
FROM events e
INNER JOIN (
SELECT issue_id, MAX(created_at) as max_created_at
FROM events
WHERE event_type = ? AND issue_id IN (%s)
GROUP BY issue_id
) latest ON e.issue_id = latest.issue_id AND e.created_at = latest.max_created_at
WHERE e.event_type = ?
`, strings.Join(placeholders, ", "))
// Append event_type again for the outer WHERE clause
args = append(args, types.EventClosed)
rows, err := s.db.QueryContext(ctx, query, args...)
if err != nil {
return nil, fmt.Errorf("failed to get close reasons: %w", err)
}
defer func() { _ = rows.Close() }()
for rows.Next() {
var issueID string
var comment sql.NullString
if err := rows.Scan(&issueID, &comment); err != nil {
return nil, fmt.Errorf("failed to scan close reason: %w", err)
}
if comment.Valid && comment.String != "" {
result[issueID] = comment.String
}
}
return result, nil
}
// GetIssueByExternalRef retrieves an issue by external reference
func (s *SQLiteStorage) GetIssueByExternalRef(ctx context.Context, externalRef string) (*types.Issue, error) {
var issue types.Issue
var closedAt sql.NullTime
var estimatedMinutes sql.NullInt64
var assignee sql.NullString
var externalRefCol sql.NullString
var compactedAt sql.NullTime
var originalSize sql.NullInt64
var contentHash sql.NullString
var compactedAtCommit sql.NullString
var sourceRepo sql.NullString
var closeReason sql.NullString
var deletedAt sql.NullString // TEXT column, not DATETIME - must parse manually
var deletedBy sql.NullString
var deleteReason sql.NullString
var originalType sql.NullString
// Messaging fields (bd-kwro)
var sender sql.NullString
var wisp sql.NullInt64
// Pinned field (bd-7h5)
var pinned sql.NullInt64
// Template field (beads-1ra)
var isTemplate sql.NullInt64
// Gate fields (bd-udsi)
var awaitType sql.NullString
var awaitID sql.NullString
var timeoutNs sql.NullInt64
var waiters sql.NullString
err := s.db.QueryRowContext(ctx, `
SELECT id, content_hash, title, description, design, acceptance_criteria, notes,
status, priority, issue_type, assignee, estimated_minutes,
created_at, updated_at, closed_at, external_ref,
compaction_level, compacted_at, compacted_at_commit, original_size, source_repo, close_reason,
deleted_at, deleted_by, delete_reason, original_type,
sender, ephemeral, pinned, is_template,
await_type, await_id, timeout_ns, waiters
FROM issues
WHERE external_ref = ?
`, externalRef).Scan(
&issue.ID, &contentHash, &issue.Title, &issue.Description, &issue.Design,
&issue.AcceptanceCriteria, &issue.Notes, &issue.Status,
&issue.Priority, &issue.IssueType, &assignee, &estimatedMinutes,
&issue.CreatedAt, &issue.UpdatedAt, &closedAt, &externalRefCol,
&issue.CompactionLevel, &compactedAt, &compactedAtCommit, &originalSize, &sourceRepo, &closeReason,
&deletedAt, &deletedBy, &deleteReason, &originalType,
&sender, &wisp, &pinned, &isTemplate,
&awaitType, &awaitID, &timeoutNs, &waiters,
)
if err == sql.ErrNoRows {
return nil, nil
}
if err != nil {
return nil, fmt.Errorf("failed to get issue by external_ref: %w", err)
}
if contentHash.Valid {
issue.ContentHash = contentHash.String
}
if closedAt.Valid {
issue.ClosedAt = &closedAt.Time
}
if estimatedMinutes.Valid {
mins := int(estimatedMinutes.Int64)
issue.EstimatedMinutes = &mins
}
if assignee.Valid {
issue.Assignee = assignee.String
}
if externalRefCol.Valid {
issue.ExternalRef = &externalRefCol.String
}
if compactedAt.Valid {
issue.CompactedAt = &compactedAt.Time
}
if compactedAtCommit.Valid {
issue.CompactedAtCommit = &compactedAtCommit.String
}
if originalSize.Valid {
issue.OriginalSize = int(originalSize.Int64)
}
if sourceRepo.Valid {
issue.SourceRepo = sourceRepo.String
}
if closeReason.Valid {
issue.CloseReason = closeReason.String
}
issue.DeletedAt = parseNullableTimeString(deletedAt)
if deletedBy.Valid {
issue.DeletedBy = deletedBy.String
}
if deleteReason.Valid {
issue.DeleteReason = deleteReason.String
}
if originalType.Valid {
issue.OriginalType = originalType.String
}
// Messaging fields (bd-kwro)
if sender.Valid {
issue.Sender = sender.String
}
if wisp.Valid && wisp.Int64 != 0 {
issue.Wisp = true
}
// Pinned field (bd-7h5)
if pinned.Valid && pinned.Int64 != 0 {
issue.Pinned = true
}
// Template field (beads-1ra)
if isTemplate.Valid && isTemplate.Int64 != 0 {
issue.IsTemplate = true
}
// Gate fields (bd-udsi)
if awaitType.Valid {
issue.AwaitType = awaitType.String
}
if awaitID.Valid {
issue.AwaitID = awaitID.String
}
if timeoutNs.Valid {
issue.Timeout = time.Duration(timeoutNs.Int64)
}
if waiters.Valid && waiters.String != "" {
issue.Waiters = parseJSONStringArray(waiters.String)
}
// Fetch labels for this issue
labels, err := s.GetLabels(ctx, issue.ID)
if err != nil {
return nil, fmt.Errorf("failed to get labels: %w", err)
}
issue.Labels = labels
return &issue, nil
}
// SearchIssues finds issues matching query and filters
func (s *SQLiteStorage) SearchIssues(ctx context.Context, query string, filter types.IssueFilter) ([]*types.Issue, error) {
// Check for external database file modifications (daemon mode)
s.checkFreshness()
// Hold read lock during database operations to prevent reconnect() from
// closing the connection mid-query (GH#607 race condition fix)
s.reconnectMu.RLock()
defer s.reconnectMu.RUnlock()
whereClauses := []string{}
args := []interface{}{}
if query != "" {
whereClauses = append(whereClauses, "(title LIKE ? OR description LIKE ? OR id LIKE ?)")
pattern := "%" + query + "%"
args = append(args, pattern, pattern, pattern)
}
if filter.TitleSearch != "" {
whereClauses = append(whereClauses, "title LIKE ?")
pattern := "%" + filter.TitleSearch + "%"
args = append(args, pattern)
}
// Pattern matching
if filter.TitleContains != "" {
whereClauses = append(whereClauses, "title LIKE ?")
args = append(args, "%"+filter.TitleContains+"%")
}
if filter.DescriptionContains != "" {
whereClauses = append(whereClauses, "description LIKE ?")
args = append(args, "%"+filter.DescriptionContains+"%")
}
if filter.NotesContains != "" {
whereClauses = append(whereClauses, "notes LIKE ?")
args = append(args, "%"+filter.NotesContains+"%")
}
if filter.Status != nil {
whereClauses = append(whereClauses, "status = ?")
args = append(args, *filter.Status)
} else if !filter.IncludeTombstones {
// Exclude tombstones by default unless explicitly filtering for them (bd-1bu)
whereClauses = append(whereClauses, "status != ?")
args = append(args, types.StatusTombstone)
}
if filter.Priority != nil {
whereClauses = append(whereClauses, "priority = ?")
args = append(args, *filter.Priority)
}
// Priority ranges
if filter.PriorityMin != nil {
whereClauses = append(whereClauses, "priority >= ?")
args = append(args, *filter.PriorityMin)
}
if filter.PriorityMax != nil {
whereClauses = append(whereClauses, "priority <= ?")
args = append(args, *filter.PriorityMax)
}
if filter.IssueType != nil {
whereClauses = append(whereClauses, "issue_type = ?")
args = append(args, *filter.IssueType)
}
if filter.Assignee != nil {
whereClauses = append(whereClauses, "assignee = ?")
args = append(args, *filter.Assignee)
}
// Date ranges
if filter.CreatedAfter != nil {
whereClauses = append(whereClauses, "created_at > ?")
args = append(args, filter.CreatedAfter.Format(time.RFC3339))
}
if filter.CreatedBefore != nil {
whereClauses = append(whereClauses, "created_at < ?")
args = append(args, filter.CreatedBefore.Format(time.RFC3339))
}
if filter.UpdatedAfter != nil {
whereClauses = append(whereClauses, "updated_at > ?")
args = append(args, filter.UpdatedAfter.Format(time.RFC3339))
}
if filter.UpdatedBefore != nil {
whereClauses = append(whereClauses, "updated_at < ?")
args = append(args, filter.UpdatedBefore.Format(time.RFC3339))
}
if filter.ClosedAfter != nil {
whereClauses = append(whereClauses, "closed_at > ?")
args = append(args, filter.ClosedAfter.Format(time.RFC3339))
}
if filter.ClosedBefore != nil {
whereClauses = append(whereClauses, "closed_at < ?")
args = append(args, filter.ClosedBefore.Format(time.RFC3339))
}
// Empty/null checks
if filter.EmptyDescription {
whereClauses = append(whereClauses, "(description IS NULL OR description = '')")
}
if filter.NoAssignee {
whereClauses = append(whereClauses, "(assignee IS NULL OR assignee = '')")
}
if filter.NoLabels {
whereClauses = append(whereClauses, "id NOT IN (SELECT DISTINCT issue_id FROM labels)")
}
// Label filtering: issue must have ALL specified labels
if len(filter.Labels) > 0 {
for _, label := range filter.Labels {
whereClauses = append(whereClauses, "id IN (SELECT issue_id FROM labels WHERE label = ?)")
args = append(args, label)
}
}
// Label filtering (OR): issue must have AT LEAST ONE of these labels
if len(filter.LabelsAny) > 0 {
placeholders := make([]string, len(filter.LabelsAny))
for i, label := range filter.LabelsAny {
placeholders[i] = "?"
args = append(args, label)
}
whereClauses = append(whereClauses, fmt.Sprintf("id IN (SELECT issue_id FROM labels WHERE label IN (%s))", strings.Join(placeholders, ", ")))
}
// ID filtering: match specific issue IDs
if len(filter.IDs) > 0 {
placeholders := make([]string, len(filter.IDs))
for i, id := range filter.IDs {
placeholders[i] = "?"
args = append(args, id)
}
whereClauses = append(whereClauses, fmt.Sprintf("id IN (%s)", strings.Join(placeholders, ", ")))
}
// Wisp filtering (bd-kwro.9)
if filter.Wisp != nil {
if *filter.Wisp {
whereClauses = append(whereClauses, "ephemeral = 1") // SQL column is still 'ephemeral'
} else {
whereClauses = append(whereClauses, "(ephemeral = 0 OR ephemeral IS NULL)")
}
}
// Pinned filtering (bd-7h5)
if filter.Pinned != nil {
if *filter.Pinned {
whereClauses = append(whereClauses, "pinned = 1")
} else {
whereClauses = append(whereClauses, "(pinned = 0 OR pinned IS NULL)")
}
}
// Template filtering (beads-1ra)
if filter.IsTemplate != nil {
if *filter.IsTemplate {
whereClauses = append(whereClauses, "is_template = 1")
} else {
whereClauses = append(whereClauses, "(is_template = 0 OR is_template IS NULL)")
}
}
// Parent filtering (bd-yqhh): filter children by parent issue
if filter.ParentID != nil {
whereClauses = append(whereClauses, "id IN (SELECT issue_id FROM dependencies WHERE type = 'parent-child' AND depends_on_id = ?)")
args = append(args, *filter.ParentID)
}
whereSQL := ""
if len(whereClauses) > 0 {
whereSQL = "WHERE " + strings.Join(whereClauses, " AND ")
}
limitSQL := ""
if filter.Limit > 0 {
limitSQL = " LIMIT ?"
args = append(args, filter.Limit)
}
// #nosec G201 - safe SQL with controlled formatting
querySQL := fmt.Sprintf(`
SELECT id, content_hash, title, description, design, acceptance_criteria, notes,
status, priority, issue_type, assignee, estimated_minutes,
created_at, updated_at, closed_at, external_ref, source_repo, close_reason,
deleted_at, deleted_by, delete_reason, original_type,
sender, ephemeral, pinned, is_template,
await_type, await_id, timeout_ns, waiters
FROM issues
%s
ORDER BY priority ASC, created_at DESC
%s
`, whereSQL, limitSQL)
rows, err := s.db.QueryContext(ctx, querySQL, args...)
if err != nil {
return nil, fmt.Errorf("failed to search issues: %w", err)
}
defer func() { _ = rows.Close() }()
return s.scanIssues(ctx, rows)
}