Reorganize project structure: move Go files to internal/beads, docs to docs/
Amp-Thread-ID: https://ampcode.com/threads/T-7a71671d-dd5c-4c7c-b557-fa427fceb04f Co-authored-by: Amp <amp@ampcode.com>
This commit is contained in:
366
internal/beads/beads.go
Normal file
366
internal/beads/beads.go
Normal file
@@ -0,0 +1,366 @@
|
||||
// Package beads provides a minimal public API for extending bd with custom orchestration.
|
||||
//
|
||||
// Most extensions should use direct SQL queries against bd's database.
|
||||
// This package exports only the essential types and functions needed for
|
||||
// Go-based extensions that want to use bd's storage layer programmatically.
|
||||
//
|
||||
// For detailed guidance on extending bd, see EXTENDING.md.
|
||||
package beads
|
||||
|
||||
import (
|
||||
"context"
|
||||
"fmt"
|
||||
"os"
|
||||
"path/filepath"
|
||||
"strings"
|
||||
|
||||
"github.com/steveyegge/beads/internal/configfile"
|
||||
"github.com/steveyegge/beads/internal/storage"
|
||||
"github.com/steveyegge/beads/internal/storage/sqlite"
|
||||
"github.com/steveyegge/beads/internal/types"
|
||||
"github.com/steveyegge/beads/internal/utils"
|
||||
)
|
||||
|
||||
// CanonicalDatabaseName is the required database filename for all beads repositories
|
||||
const CanonicalDatabaseName = "beads.db"
|
||||
|
||||
// LegacyDatabaseNames are old names that should be migrated
|
||||
var LegacyDatabaseNames = []string{"bd.db", "issues.db", "bugs.db"}
|
||||
|
||||
// Issue represents a tracked work item with metadata, dependencies, and status.
|
||||
type (
|
||||
Issue = types.Issue
|
||||
// Status represents the current state of an issue (open, in progress, closed, blocked).
|
||||
Status = types.Status
|
||||
// IssueType represents the type of issue (bug, feature, task, epic, chore).
|
||||
IssueType = types.IssueType
|
||||
// Dependency represents a relationship between issues.
|
||||
Dependency = types.Dependency
|
||||
// DependencyType represents the type of dependency (blocks, related, parent-child, discovered-from).
|
||||
DependencyType = types.DependencyType
|
||||
// Comment represents a user comment on an issue.
|
||||
Comment = types.Comment
|
||||
// Event represents an audit log event.
|
||||
Event = types.Event
|
||||
// EventType represents the type of audit event.
|
||||
EventType = types.EventType
|
||||
// Label represents a tag attached to an issue.
|
||||
Label = types.Label
|
||||
// BlockedIssue represents an issue with blocking dependencies.
|
||||
BlockedIssue = types.BlockedIssue
|
||||
// TreeNode represents a node in a dependency tree.
|
||||
TreeNode = types.TreeNode
|
||||
// Statistics represents project-wide metrics.
|
||||
Statistics = types.Statistics
|
||||
// IssueFilter represents filtering criteria for issue queries.
|
||||
IssueFilter = types.IssueFilter
|
||||
// WorkFilter represents filtering criteria for work queries.
|
||||
WorkFilter = types.WorkFilter
|
||||
// SortPolicy determines how ready work is ordered.
|
||||
SortPolicy = types.SortPolicy
|
||||
// EpicStatus represents the status of an epic issue.
|
||||
EpicStatus = types.EpicStatus
|
||||
)
|
||||
|
||||
// Status constants
|
||||
const (
|
||||
StatusOpen = types.StatusOpen
|
||||
StatusInProgress = types.StatusInProgress
|
||||
StatusClosed = types.StatusClosed
|
||||
StatusBlocked = types.StatusBlocked
|
||||
)
|
||||
|
||||
// IssueType constants
|
||||
const (
|
||||
TypeBug = types.TypeBug
|
||||
TypeFeature = types.TypeFeature
|
||||
TypeTask = types.TypeTask
|
||||
TypeEpic = types.TypeEpic
|
||||
TypeChore = types.TypeChore
|
||||
)
|
||||
|
||||
// DependencyType constants
|
||||
const (
|
||||
DepBlocks = types.DepBlocks
|
||||
DepRelated = types.DepRelated
|
||||
DepParentChild = types.DepParentChild
|
||||
DepDiscoveredFrom = types.DepDiscoveredFrom
|
||||
)
|
||||
|
||||
// SortPolicy constants
|
||||
const (
|
||||
SortPolicyHybrid = types.SortPolicyHybrid
|
||||
SortPolicyPriority = types.SortPolicyPriority
|
||||
SortPolicyOldest = types.SortPolicyOldest
|
||||
)
|
||||
|
||||
// EventType constants
|
||||
const (
|
||||
EventCreated = types.EventCreated
|
||||
EventUpdated = types.EventUpdated
|
||||
EventStatusChanged = types.EventStatusChanged
|
||||
EventCommented = types.EventCommented
|
||||
EventClosed = types.EventClosed
|
||||
EventReopened = types.EventReopened
|
||||
EventDependencyAdded = types.EventDependencyAdded
|
||||
EventDependencyRemoved = types.EventDependencyRemoved
|
||||
EventLabelAdded = types.EventLabelAdded
|
||||
EventLabelRemoved = types.EventLabelRemoved
|
||||
EventCompacted = types.EventCompacted
|
||||
)
|
||||
|
||||
// Storage provides the minimal interface for extension orchestration
|
||||
type Storage = storage.Storage
|
||||
|
||||
// NewSQLiteStorage opens a bd SQLite database for programmatic access.
|
||||
// Most extensions should use this to query ready work and update issue status.
|
||||
func NewSQLiteStorage(dbPath string) (Storage, error) {
|
||||
return sqlite.New(dbPath)
|
||||
}
|
||||
|
||||
// FindDatabasePath discovers the bd database path using bd's standard search order:
|
||||
// 1. $BEADS_DIR environment variable (points to .beads directory)
|
||||
// 2. $BEADS_DB environment variable (points directly to database file, deprecated)
|
||||
// 3. .beads/*.db in current directory or ancestors
|
||||
//
|
||||
// Returns empty string if no database is found.
|
||||
func FindDatabasePath() string {
|
||||
// 1. Check BEADS_DIR environment variable (preferred)
|
||||
if beadsDir := os.Getenv("BEADS_DIR"); beadsDir != "" {
|
||||
// Canonicalize the path to prevent nested .beads directories
|
||||
absBeadsDir := utils.CanonicalizePath(beadsDir)
|
||||
|
||||
// Check for config.json first (single source of truth)
|
||||
if cfg, err := configfile.Load(absBeadsDir); err == nil && cfg != nil {
|
||||
dbPath := cfg.DatabasePath(absBeadsDir)
|
||||
if _, err := os.Stat(dbPath); err == nil {
|
||||
return dbPath
|
||||
}
|
||||
}
|
||||
|
||||
// Fall back to canonical beads.db for backward compatibility
|
||||
canonicalDB := filepath.Join(absBeadsDir, CanonicalDatabaseName)
|
||||
if _, err := os.Stat(canonicalDB); err == nil {
|
||||
return canonicalDB
|
||||
}
|
||||
|
||||
// Look for any .db file in the beads directory
|
||||
matches, err := filepath.Glob(filepath.Join(absBeadsDir, "*.db"))
|
||||
if err == nil && len(matches) > 0 {
|
||||
// Filter out backup files and vc.db
|
||||
var validDBs []string
|
||||
for _, match := range matches {
|
||||
baseName := filepath.Base(match)
|
||||
if !strings.Contains(baseName, ".backup") && baseName != "vc.db" {
|
||||
validDBs = append(validDBs, match)
|
||||
}
|
||||
}
|
||||
if len(validDBs) > 0 {
|
||||
return validDBs[0]
|
||||
}
|
||||
}
|
||||
|
||||
// BEADS_DIR is set but no database found - this is OK for --no-db mode
|
||||
// Return empty string and let the caller handle it
|
||||
}
|
||||
|
||||
// 2. Check BEADS_DB environment variable (deprecated but still supported)
|
||||
if envDB := os.Getenv("BEADS_DB"); envDB != "" {
|
||||
// Canonicalize the path to prevent nested .beads directories
|
||||
if absDB, err := filepath.Abs(envDB); err == nil {
|
||||
if canonical, err := filepath.EvalSymlinks(absDB); err == nil {
|
||||
return canonical
|
||||
}
|
||||
return absDB // Return absolute path even if symlink resolution fails
|
||||
}
|
||||
return envDB // Fallback to original if Abs fails
|
||||
}
|
||||
|
||||
// 3. Search for .beads/*.db in current directory and ancestors
|
||||
if foundDB := findDatabaseInTree(); foundDB != "" {
|
||||
// Canonicalize found path
|
||||
if absDB, err := filepath.Abs(foundDB); err == nil {
|
||||
if canonical, err := filepath.EvalSymlinks(absDB); err == nil {
|
||||
return canonical
|
||||
}
|
||||
return absDB
|
||||
}
|
||||
return foundDB
|
||||
}
|
||||
|
||||
// No fallback to ~/.beads - return empty string
|
||||
return ""
|
||||
}
|
||||
|
||||
// FindJSONLPath returns the expected JSONL file path for the given database path.
|
||||
// It searches for existing *.jsonl files in the database directory and returns
|
||||
// the first one found, or defaults to "issues.jsonl".
|
||||
//
|
||||
// This function does not create directories or files - it only discovers paths.
|
||||
// Use this when you need to know where bd stores its JSONL export.
|
||||
func FindJSONLPath(dbPath string) string {
|
||||
if dbPath == "" {
|
||||
return ""
|
||||
}
|
||||
|
||||
// Get the directory containing the database
|
||||
dbDir := filepath.Dir(dbPath)
|
||||
|
||||
// Look for existing .jsonl files in the .beads directory
|
||||
pattern := filepath.Join(dbDir, "*.jsonl")
|
||||
matches, err := filepath.Glob(pattern)
|
||||
if err == nil && len(matches) > 0 {
|
||||
// Return the first .jsonl file found
|
||||
return matches[0]
|
||||
}
|
||||
|
||||
// Default to issues.jsonl
|
||||
return filepath.Join(dbDir, "issues.jsonl")
|
||||
}
|
||||
|
||||
// DatabaseInfo contains information about a discovered beads database
|
||||
type DatabaseInfo struct {
|
||||
Path string // Full path to the .db file
|
||||
BeadsDir string // Parent .beads directory
|
||||
IssueCount int // Number of issues (-1 if unknown)
|
||||
}
|
||||
|
||||
// findDatabaseInTree walks up the directory tree looking for .beads/*.db
|
||||
// Prefers config.json, falls back to beads.db, and returns an error if multiple .db files exist
|
||||
func findDatabaseInTree() string {
|
||||
dir, err := os.Getwd()
|
||||
if err != nil {
|
||||
return ""
|
||||
}
|
||||
|
||||
// Resolve symlinks in working directory to ensure consistent path handling
|
||||
// This prevents issues when repos are accessed via symlinks (e.g. /Users/user/Code -> /Users/user/Documents/Code)
|
||||
if resolvedDir, err := filepath.EvalSymlinks(dir); err == nil {
|
||||
dir = resolvedDir
|
||||
}
|
||||
|
||||
// Walk up directory tree
|
||||
for {
|
||||
beadsDir := filepath.Join(dir, ".beads")
|
||||
if info, err := os.Stat(beadsDir); err == nil && info.IsDir() {
|
||||
// Check for config.json first (single source of truth)
|
||||
if cfg, err := configfile.Load(beadsDir); err == nil && cfg != nil {
|
||||
dbPath := cfg.DatabasePath(beadsDir)
|
||||
if _, err := os.Stat(dbPath); err == nil {
|
||||
return dbPath
|
||||
}
|
||||
}
|
||||
|
||||
// Fall back to canonical beads.db for backward compatibility
|
||||
canonicalDB := filepath.Join(beadsDir, CanonicalDatabaseName)
|
||||
if _, err := os.Stat(canonicalDB); err == nil {
|
||||
return canonicalDB
|
||||
}
|
||||
|
||||
// Found .beads/ directory, look for *.db files
|
||||
matches, err := filepath.Glob(filepath.Join(beadsDir, "*.db"))
|
||||
if err == nil && len(matches) > 0 {
|
||||
// Filter out backup files and vc.db
|
||||
var validDBs []string
|
||||
for _, match := range matches {
|
||||
baseName := filepath.Base(match)
|
||||
// Skip backup files (contains ".backup" in name) and vc.db
|
||||
if !strings.Contains(baseName, ".backup") && baseName != "vc.db" {
|
||||
validDBs = append(validDBs, match)
|
||||
}
|
||||
}
|
||||
|
||||
if len(validDBs) > 1 {
|
||||
// Multiple databases found - this is ambiguous
|
||||
// Print error to stderr but return the first one for backward compatibility
|
||||
fmt.Fprintf(os.Stderr, "Warning: Multiple database files found in %s:\n", beadsDir)
|
||||
for _, db := range validDBs {
|
||||
fmt.Fprintf(os.Stderr, " - %s\n", filepath.Base(db))
|
||||
}
|
||||
fmt.Fprintf(os.Stderr, "Run 'bd init' to migrate to %s or manually remove old databases.\n\n", CanonicalDatabaseName)
|
||||
}
|
||||
|
||||
if len(validDBs) > 0 {
|
||||
// Check if using legacy name and warn
|
||||
dbName := filepath.Base(validDBs[0])
|
||||
if dbName != CanonicalDatabaseName {
|
||||
isLegacy := false
|
||||
for _, legacy := range LegacyDatabaseNames {
|
||||
if dbName == legacy {
|
||||
isLegacy = true
|
||||
break
|
||||
}
|
||||
}
|
||||
if isLegacy {
|
||||
fmt.Fprintf(os.Stderr, "WARNING: Using legacy database name: %s\n", dbName)
|
||||
fmt.Fprintf(os.Stderr, "Run 'bd migrate' to upgrade to canonical name: %s\n\n", CanonicalDatabaseName)
|
||||
}
|
||||
}
|
||||
return validDBs[0]
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// Move up one directory
|
||||
parent := filepath.Dir(dir)
|
||||
if parent == dir {
|
||||
// Reached filesystem root
|
||||
break
|
||||
}
|
||||
dir = parent
|
||||
}
|
||||
|
||||
return ""
|
||||
}
|
||||
|
||||
// FindAllDatabases scans the directory hierarchy for all .beads directories
|
||||
// Returns a slice of DatabaseInfo for each database found, starting from the
|
||||
// closest to CWD (most relevant) to the furthest (least relevant).
|
||||
func FindAllDatabases() []DatabaseInfo {
|
||||
var databases []DatabaseInfo
|
||||
|
||||
dir, err := os.Getwd()
|
||||
if err != nil {
|
||||
return databases
|
||||
}
|
||||
|
||||
// Walk up directory tree
|
||||
for {
|
||||
beadsDir := filepath.Join(dir, ".beads")
|
||||
if info, err := os.Stat(beadsDir); err == nil && info.IsDir() {
|
||||
// Found .beads/ directory, look for *.db files
|
||||
matches, err := filepath.Glob(filepath.Join(beadsDir, "*.db"))
|
||||
if err == nil && len(matches) > 0 {
|
||||
// Count issues if we can open the database (best-effort)
|
||||
issueCount := -1
|
||||
dbPath := matches[0]
|
||||
// Don't fail if we can't open/query the database - it might be locked
|
||||
// or corrupted, but we still want to detect and warn about it
|
||||
store, err := sqlite.New(dbPath)
|
||||
if err == nil {
|
||||
ctx := context.Background()
|
||||
if issues, err := store.SearchIssues(ctx, "", types.IssueFilter{}); err == nil {
|
||||
issueCount = len(issues)
|
||||
}
|
||||
_ = store.Close()
|
||||
}
|
||||
|
||||
databases = append(databases, DatabaseInfo{
|
||||
Path: dbPath,
|
||||
BeadsDir: beadsDir,
|
||||
IssueCount: issueCount,
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
// Move up one directory
|
||||
parent := filepath.Dir(dir)
|
||||
if parent == dir {
|
||||
// Reached filesystem root
|
||||
break
|
||||
}
|
||||
dir = parent
|
||||
}
|
||||
|
||||
return databases
|
||||
}
|
||||
375
internal/beads/beads_hash_multiclone_test.go
Normal file
375
internal/beads/beads_hash_multiclone_test.go
Normal file
@@ -0,0 +1,375 @@
|
||||
//go:build integration
|
||||
// +build integration
|
||||
|
||||
package beads_test
|
||||
|
||||
import (
|
||||
"encoding/json"
|
||||
"fmt"
|
||||
"os"
|
||||
"os/exec"
|
||||
"path/filepath"
|
||||
"runtime"
|
||||
"strings"
|
||||
"testing"
|
||||
|
||||
"github.com/steveyegge/beads/internal/testutil"
|
||||
)
|
||||
|
||||
var testBDBinary string
|
||||
|
||||
func TestMain(m *testing.M) {
|
||||
// Build bd binary once for all tests
|
||||
binName := "bd"
|
||||
if runtime.GOOS == "windows" {
|
||||
binName = "bd.exe"
|
||||
}
|
||||
|
||||
tmpDir, err := os.MkdirTemp("", "bd-test-bin-*")
|
||||
if err != nil {
|
||||
fmt.Fprintf(os.Stderr, "Failed to create temp dir for bd binary: %v\n", err)
|
||||
os.Exit(1)
|
||||
}
|
||||
defer os.RemoveAll(tmpDir)
|
||||
|
||||
testBDBinary = filepath.Join(tmpDir, binName)
|
||||
cmd := exec.Command("go", "build", "-o", testBDBinary, "./cmd/bd")
|
||||
if out, err := cmd.CombinedOutput(); err != nil {
|
||||
fmt.Fprintf(os.Stderr, "Failed to build bd binary: %v\n%s\n", err, out)
|
||||
os.Exit(1)
|
||||
}
|
||||
|
||||
// Optimize git for tests
|
||||
os.Setenv("GIT_CONFIG_NOSYSTEM", "1")
|
||||
|
||||
os.Exit(m.Run())
|
||||
}
|
||||
|
||||
// getBDPath returns the test bd binary path
|
||||
func getBDPath() string {
|
||||
if testBDBinary != "" {
|
||||
return testBDBinary
|
||||
}
|
||||
// Fallback for non-TestMain runs
|
||||
if runtime.GOOS == "windows" {
|
||||
return "./bd.exe"
|
||||
}
|
||||
return "./bd"
|
||||
}
|
||||
|
||||
// getBDCommand returns the platform-specific command to run bd from current dir
|
||||
// Always uses forward slashes for sh script compatibility (Git for Windows uses sh)
|
||||
func getBDCommand() string {
|
||||
if runtime.GOOS == "windows" {
|
||||
return "./bd.exe"
|
||||
}
|
||||
return "./bd"
|
||||
}
|
||||
|
||||
// TestHashIDs_MultiCloneConverge verifies that hash-based IDs work correctly
|
||||
// across multiple clones creating different issues. With hash IDs, each unique
|
||||
// issue gets a unique ID, so no collision resolution is needed.
|
||||
func TestHashIDs_MultiCloneConverge(t *testing.T) {
|
||||
if testing.Short() {
|
||||
t.Skip("slow git e2e test")
|
||||
}
|
||||
t.Parallel()
|
||||
tmpDir := testutil.TempDirInMemory(t)
|
||||
|
||||
bdPath := getBDPath()
|
||||
if _, err := os.Stat(bdPath); err != nil {
|
||||
t.Fatalf("bd binary not found at %s", bdPath)
|
||||
}
|
||||
|
||||
// Setup remote and 3 clones
|
||||
remoteDir := setupBareRepo(t, tmpDir)
|
||||
cloneA := setupClone(t, tmpDir, remoteDir, "A", bdPath)
|
||||
cloneB := setupClone(t, tmpDir, remoteDir, "B", bdPath)
|
||||
cloneC := setupClone(t, tmpDir, remoteDir, "C", bdPath)
|
||||
|
||||
// Each clone creates unique issue (different content = different hash ID)
|
||||
createIssueInClone(t, cloneA, "Issue from clone A")
|
||||
createIssueInClone(t, cloneB, "Issue from clone B")
|
||||
createIssueInClone(t, cloneC, "Issue from clone C")
|
||||
|
||||
// Sync all clones once (hash IDs prevent collisions, don't need multiple rounds)
|
||||
for _, clone := range []string{cloneA, cloneB, cloneC} {
|
||||
runCmdOutputWithEnvAllowError(t, clone, map[string]string{"BEADS_NO_DAEMON": "1"}, true, bdPath, "sync")
|
||||
}
|
||||
|
||||
// Verify all clones have all 3 issues
|
||||
expectedTitles := map[string]bool{
|
||||
"Issue from clone A": true,
|
||||
"Issue from clone B": true,
|
||||
"Issue from clone C": true,
|
||||
}
|
||||
|
||||
allConverged := true
|
||||
for name, dir := range map[string]string{"A": cloneA, "B": cloneB, "C": cloneC} {
|
||||
titles := getTitlesFromClone(t, dir)
|
||||
if !compareTitleSets(titles, expectedTitles) {
|
||||
t.Logf("Clone %s has %d/%d issues: %v", name, len(titles), len(expectedTitles), sortedKeys(titles))
|
||||
allConverged = false
|
||||
}
|
||||
}
|
||||
|
||||
if allConverged {
|
||||
t.Log("✓ All 3 clones converged with hash-based IDs")
|
||||
} else {
|
||||
t.Log("✓ Hash-based IDs prevent collisions (convergence may take more rounds)")
|
||||
}
|
||||
}
|
||||
|
||||
// TestHashIDs_IdenticalContentDedup verifies that when two clones create
|
||||
// identical issues, they get the same hash ID and deduplicate correctly.
|
||||
func TestHashIDs_IdenticalContentDedup(t *testing.T) {
|
||||
if testing.Short() {
|
||||
t.Skip("slow git e2e test")
|
||||
}
|
||||
t.Parallel()
|
||||
tmpDir := testutil.TempDirInMemory(t)
|
||||
|
||||
bdPath := getBDPath()
|
||||
if _, err := os.Stat(bdPath); err != nil {
|
||||
t.Fatalf("bd binary not found at %s", bdPath)
|
||||
}
|
||||
|
||||
// Setup remote and 2 clones
|
||||
remoteDir := setupBareRepo(t, tmpDir)
|
||||
cloneA := setupClone(t, tmpDir, remoteDir, "A", bdPath)
|
||||
cloneB := setupClone(t, tmpDir, remoteDir, "B", bdPath)
|
||||
|
||||
// Both clones create identical issue (same content = same hash ID)
|
||||
createIssueInClone(t, cloneA, "Identical issue")
|
||||
createIssueInClone(t, cloneB, "Identical issue")
|
||||
|
||||
// Sync both clones once (hash IDs handle dedup automatically)
|
||||
for _, clone := range []string{cloneA, cloneB} {
|
||||
runCmdOutputWithEnvAllowError(t, clone, map[string]string{"BEADS_NO_DAEMON": "1"}, true, bdPath, "sync")
|
||||
}
|
||||
|
||||
// Verify both clones have exactly 1 issue (deduplication worked)
|
||||
for name, dir := range map[string]string{"A": cloneA, "B": cloneB} {
|
||||
titles := getTitlesFromClone(t, dir)
|
||||
if len(titles) != 1 {
|
||||
t.Errorf("Clone %s should have 1 issue, got %d: %v", name, len(titles), sortedKeys(titles))
|
||||
}
|
||||
if !titles["Identical issue"] {
|
||||
t.Errorf("Clone %s missing expected issue: %v", name, sortedKeys(titles))
|
||||
}
|
||||
}
|
||||
|
||||
t.Log("✓ Identical content deduplicated correctly with hash-based IDs")
|
||||
}
|
||||
|
||||
// Shared test helpers
|
||||
|
||||
func setupBareRepo(t *testing.T, tmpDir string) string {
|
||||
t.Helper()
|
||||
remoteDir := filepath.Join(tmpDir, "remote.git")
|
||||
runCmd(t, tmpDir, "git", "init", "--bare", remoteDir)
|
||||
|
||||
tempClone := filepath.Join(tmpDir, "temp-init")
|
||||
runCmd(t, tmpDir, "git", "clone", remoteDir, tempClone)
|
||||
runCmd(t, tempClone, "git", "commit", "--allow-empty", "-m", "Initial commit")
|
||||
runCmd(t, tempClone, "git", "push", "origin", "master")
|
||||
|
||||
return remoteDir
|
||||
}
|
||||
|
||||
func setupClone(t *testing.T, tmpDir, remoteDir, name, bdPath string) string {
|
||||
t.Helper()
|
||||
cloneDir := filepath.Join(tmpDir, "clone-"+strings.ToLower(name))
|
||||
|
||||
// Use shallow, shared clones for speed
|
||||
runCmd(t, tmpDir, "git", "clone", "--shared", "--depth=1", "--no-tags", remoteDir, cloneDir)
|
||||
|
||||
// Disable hooks to avoid overhead
|
||||
emptyHooks := filepath.Join(cloneDir, ".empty-hooks")
|
||||
os.MkdirAll(emptyHooks, 0755)
|
||||
runCmd(t, cloneDir, "git", "config", "core.hooksPath", emptyHooks)
|
||||
|
||||
// Speed configs
|
||||
runCmd(t, cloneDir, "git", "config", "gc.auto", "0")
|
||||
runCmd(t, cloneDir, "git", "config", "core.fsync", "false")
|
||||
runCmd(t, cloneDir, "git", "config", "commit.gpgSign", "false")
|
||||
|
||||
bdCmd := getBDCommand()
|
||||
copyFile(t, bdPath, filepath.Join(cloneDir, filepath.Base(bdCmd)))
|
||||
|
||||
if name == "A" {
|
||||
runCmd(t, cloneDir, bdCmd, "init", "--quiet", "--prefix", "test")
|
||||
runCmd(t, cloneDir, "git", "add", ".beads")
|
||||
runCmd(t, cloneDir, "git", "commit", "--no-verify", "-m", "Initialize beads")
|
||||
runCmd(t, cloneDir, "git", "push", "origin", "master")
|
||||
} else {
|
||||
runCmd(t, cloneDir, "git", "pull", "origin", "master")
|
||||
runCmd(t, cloneDir, bdCmd, "init", "--quiet", "--prefix", "test")
|
||||
}
|
||||
|
||||
// Skip git hooks installation in tests - not needed and slows things down
|
||||
// installGitHooks(t, cloneDir)
|
||||
return cloneDir
|
||||
}
|
||||
|
||||
func createIssueInClone(t *testing.T, cloneDir, title string) {
|
||||
t.Helper()
|
||||
runCmdWithEnv(t, cloneDir, map[string]string{"BEADS_NO_DAEMON": "1"}, getBDCommand(), "create", title, "-t", "task", "-p", "1", "--json")
|
||||
}
|
||||
|
||||
func getTitlesFromClone(t *testing.T, cloneDir string) map[string]bool {
|
||||
t.Helper()
|
||||
listJSON := runCmdOutputWithEnv(t, cloneDir, map[string]string{
|
||||
"BEADS_NO_DAEMON": "1",
|
||||
"BD_NO_AUTO_IMPORT": "1",
|
||||
}, getBDCommand(), "list", "--json")
|
||||
|
||||
jsonStart := strings.Index(listJSON, "[")
|
||||
if jsonStart == -1 {
|
||||
return make(map[string]bool)
|
||||
}
|
||||
listJSON = listJSON[jsonStart:]
|
||||
|
||||
var issues []struct {
|
||||
Title string `json:"title"`
|
||||
}
|
||||
if err := json.Unmarshal([]byte(listJSON), &issues); err != nil {
|
||||
t.Logf("Failed to parse JSON: %v", err)
|
||||
return make(map[string]bool)
|
||||
}
|
||||
|
||||
titles := make(map[string]bool)
|
||||
for _, issue := range issues {
|
||||
titles[issue.Title] = true
|
||||
}
|
||||
return titles
|
||||
}
|
||||
|
||||
func resolveConflictMarkersIfPresent(t *testing.T, cloneDir string) {
|
||||
t.Helper()
|
||||
jsonlPath := filepath.Join(cloneDir, ".beads", "issues.jsonl")
|
||||
jsonlContent, _ := os.ReadFile(jsonlPath)
|
||||
if strings.Contains(string(jsonlContent), "<<<<<<<") {
|
||||
var cleanLines []string
|
||||
for _, line := range strings.Split(string(jsonlContent), "\n") {
|
||||
if !strings.HasPrefix(line, "<<<<<<<") &&
|
||||
!strings.HasPrefix(line, "=======") &&
|
||||
!strings.HasPrefix(line, ">>>>>>>") {
|
||||
if strings.TrimSpace(line) != "" {
|
||||
cleanLines = append(cleanLines, line)
|
||||
}
|
||||
}
|
||||
}
|
||||
cleaned := strings.Join(cleanLines, "\n") + "\n"
|
||||
os.WriteFile(jsonlPath, []byte(cleaned), 0644)
|
||||
runCmd(t, cloneDir, "git", "add", ".beads/issues.jsonl")
|
||||
runCmd(t, cloneDir, "git", "commit", "-m", "Resolve merge conflict")
|
||||
}
|
||||
}
|
||||
|
||||
func installGitHooks(t *testing.T, repoDir string) {
|
||||
t.Helper()
|
||||
hooksDir := filepath.Join(repoDir, ".git", "hooks")
|
||||
// Ensure POSIX-style path for sh scripts (even on Windows)
|
||||
bdCmd := strings.ReplaceAll(getBDCommand(), "\\", "/")
|
||||
|
||||
preCommit := fmt.Sprintf(`#!/bin/sh
|
||||
%s --no-daemon export -o .beads/issues.jsonl >/dev/null 2>&1 || true
|
||||
git add .beads/issues.jsonl >/dev/null 2>&1 || true
|
||||
exit 0
|
||||
`, bdCmd)
|
||||
postMerge := fmt.Sprintf(`#!/bin/sh
|
||||
%s --no-daemon import -i .beads/issues.jsonl >/dev/null 2>&1 || true
|
||||
exit 0
|
||||
`, bdCmd)
|
||||
os.WriteFile(filepath.Join(hooksDir, "pre-commit"), []byte(preCommit), 0755)
|
||||
os.WriteFile(filepath.Join(hooksDir, "post-merge"), []byte(postMerge), 0755)
|
||||
}
|
||||
|
||||
func runCmd(t *testing.T, dir string, name string, args ...string) {
|
||||
t.Helper()
|
||||
cmd := exec.Command(name, args...)
|
||||
cmd.Dir = dir
|
||||
if err := cmd.Run(); err != nil {
|
||||
out, _ := cmd.CombinedOutput()
|
||||
t.Fatalf("Command failed: %s %v\nError: %v\nOutput: %s", name, args, err, string(out))
|
||||
}
|
||||
}
|
||||
|
||||
func runCmdAllowError(t *testing.T, dir string, name string, args ...string) {
|
||||
t.Helper()
|
||||
cmd := exec.Command(name, args...)
|
||||
cmd.Dir = dir
|
||||
cmd.Run()
|
||||
}
|
||||
|
||||
func runCmdOutputAllowError(t *testing.T, dir string, name string, args ...string) string {
|
||||
t.Helper()
|
||||
cmd := exec.Command(name, args...)
|
||||
cmd.Dir = dir
|
||||
out, _ := cmd.CombinedOutput()
|
||||
return string(out)
|
||||
}
|
||||
|
||||
func runCmdWithEnv(t *testing.T, dir string, env map[string]string, name string, args ...string) {
|
||||
t.Helper()
|
||||
runCmdOutputWithEnvAllowError(t, dir, env, false, name, args...)
|
||||
}
|
||||
|
||||
func runCmdOutputWithEnv(t *testing.T, dir string, env map[string]string, name string, args ...string) string {
|
||||
t.Helper()
|
||||
return runCmdOutputWithEnvAllowError(t, dir, env, false, name, args...)
|
||||
}
|
||||
|
||||
func runCmdOutputWithEnvAllowError(t *testing.T, dir string, env map[string]string, allowError bool, name string, args ...string) string {
|
||||
t.Helper()
|
||||
cmd := exec.Command(name, args...)
|
||||
cmd.Dir = dir
|
||||
if env != nil {
|
||||
cmd.Env = append(os.Environ(), mapToEnvSlice(env)...)
|
||||
}
|
||||
out, err := cmd.CombinedOutput()
|
||||
if err != nil && !allowError {
|
||||
t.Fatalf("Command failed: %s %v\nError: %v\nOutput: %s", name, args, err, string(out))
|
||||
}
|
||||
return string(out)
|
||||
}
|
||||
|
||||
func mapToEnvSlice(m map[string]string) []string {
|
||||
result := make([]string, 0, len(m))
|
||||
for k, v := range m {
|
||||
result = append(result, k+"="+v)
|
||||
}
|
||||
return result
|
||||
}
|
||||
|
||||
func copyFile(t *testing.T, src, dst string) {
|
||||
t.Helper()
|
||||
data, err := os.ReadFile(src)
|
||||
if err != nil {
|
||||
t.Fatalf("Failed to read %s: %v", src, err)
|
||||
}
|
||||
if err := os.WriteFile(dst, data, 0755); err != nil {
|
||||
t.Fatalf("Failed to write %s: %v", dst, err)
|
||||
}
|
||||
}
|
||||
|
||||
func compareTitleSets(a, b map[string]bool) bool {
|
||||
if len(a) != len(b) {
|
||||
return false
|
||||
}
|
||||
for title := range a {
|
||||
if !b[title] {
|
||||
return false
|
||||
}
|
||||
}
|
||||
return true
|
||||
}
|
||||
|
||||
func sortedKeys(m map[string]bool) []string {
|
||||
keys := make([]string, 0, len(m))
|
||||
for k := range m {
|
||||
keys = append(keys, k)
|
||||
}
|
||||
return keys
|
||||
}
|
||||
570
internal/beads/beads_integration_test.go
Normal file
570
internal/beads/beads_integration_test.go
Normal file
@@ -0,0 +1,570 @@
|
||||
//go:build integration
|
||||
// +build integration
|
||||
|
||||
package beads_test
|
||||
|
||||
import (
|
||||
"context"
|
||||
"encoding/json"
|
||||
"os"
|
||||
"path/filepath"
|
||||
"testing"
|
||||
"time"
|
||||
|
||||
"github.com/steveyegge/beads/internal/beads"
|
||||
)
|
||||
|
||||
// integrationTestHelper provides common test setup and assertion methods
|
||||
type integrationTestHelper struct {
|
||||
t *testing.T
|
||||
ctx context.Context
|
||||
store beads.Storage
|
||||
}
|
||||
|
||||
func newIntegrationHelper(t *testing.T, store beads.Storage) *integrationTestHelper {
|
||||
return &integrationTestHelper{t: t, ctx: context.Background(), store: store}
|
||||
}
|
||||
|
||||
func (h *integrationTestHelper) createIssue(title string, issueType beads.IssueType, priority int) *beads.Issue {
|
||||
issue := &beads.Issue{
|
||||
Title: title,
|
||||
Status: beads.StatusOpen,
|
||||
Priority: priority,
|
||||
IssueType: issueType,
|
||||
CreatedAt: time.Now(),
|
||||
UpdatedAt: time.Now(),
|
||||
}
|
||||
if err := h.store.CreateIssue(h.ctx, issue, "test-actor"); err != nil {
|
||||
h.t.Fatalf("CreateIssue failed: %v", err)
|
||||
}
|
||||
return issue
|
||||
}
|
||||
|
||||
func (h *integrationTestHelper) createFullIssue(desc, design, acceptance, notes, assignee string) *beads.Issue {
|
||||
issue := &beads.Issue{
|
||||
Title: "Complete issue",
|
||||
Description: desc,
|
||||
Design: design,
|
||||
AcceptanceCriteria: acceptance,
|
||||
Notes: notes,
|
||||
Status: beads.StatusOpen,
|
||||
Priority: 1,
|
||||
IssueType: beads.TypeFeature,
|
||||
Assignee: assignee,
|
||||
CreatedAt: time.Now(),
|
||||
UpdatedAt: time.Now(),
|
||||
}
|
||||
if err := h.store.CreateIssue(h.ctx, issue, "test-actor"); err != nil {
|
||||
h.t.Fatalf("CreateIssue failed: %v", err)
|
||||
}
|
||||
return issue
|
||||
}
|
||||
|
||||
func (h *integrationTestHelper) updateIssue(id string, updates map[string]interface{}) {
|
||||
if err := h.store.UpdateIssue(h.ctx, id, updates, "test-actor"); err != nil {
|
||||
h.t.Fatalf("UpdateIssue failed: %v", err)
|
||||
}
|
||||
}
|
||||
|
||||
func (h *integrationTestHelper) closeIssue(id string, reason string) {
|
||||
if err := h.store.CloseIssue(h.ctx, id, reason, "test-actor"); err != nil {
|
||||
h.t.Fatalf("CloseIssue failed: %v", err)
|
||||
}
|
||||
}
|
||||
|
||||
func (h *integrationTestHelper) addDependency(issue1ID, issue2ID string) {
|
||||
dep := &beads.Dependency{
|
||||
IssueID: issue1ID,
|
||||
DependsOnID: issue2ID,
|
||||
Type: beads.DepBlocks,
|
||||
CreatedAt: time.Now(),
|
||||
CreatedBy: "test-actor",
|
||||
}
|
||||
if err := h.store.AddDependency(h.ctx, dep, "test-actor"); err != nil {
|
||||
h.t.Fatalf("AddDependency failed: %v", err)
|
||||
}
|
||||
}
|
||||
|
||||
func (h *integrationTestHelper) addLabel(id, label string) {
|
||||
if err := h.store.AddLabel(h.ctx, id, label, "test-actor"); err != nil {
|
||||
h.t.Fatalf("AddLabel failed: %v", err)
|
||||
}
|
||||
}
|
||||
|
||||
func (h *integrationTestHelper) addComment(id, user, text string) *beads.Comment {
|
||||
comment, err := h.store.AddIssueComment(h.ctx, id, user, text)
|
||||
if err != nil {
|
||||
h.t.Fatalf("AddIssueComment failed: %v", err)
|
||||
}
|
||||
return comment
|
||||
}
|
||||
|
||||
func (h *integrationTestHelper) getIssue(id string) *beads.Issue {
|
||||
issue, err := h.store.GetIssue(h.ctx, id)
|
||||
if err != nil {
|
||||
h.t.Fatalf("GetIssue failed: %v", err)
|
||||
}
|
||||
return issue
|
||||
}
|
||||
|
||||
func (h *integrationTestHelper) getDependencies(id string) []*beads.Issue {
|
||||
deps, err := h.store.GetDependencies(h.ctx, id)
|
||||
if err != nil {
|
||||
h.t.Fatalf("GetDependencies failed: %v", err)
|
||||
}
|
||||
return deps
|
||||
}
|
||||
|
||||
func (h *integrationTestHelper) getLabels(id string) []string {
|
||||
labels, err := h.store.GetLabels(h.ctx, id)
|
||||
if err != nil {
|
||||
h.t.Fatalf("GetLabels failed: %v", err)
|
||||
}
|
||||
return labels
|
||||
}
|
||||
|
||||
func (h *integrationTestHelper) getComments(id string) []*beads.Comment {
|
||||
comments, err := h.store.GetIssueComments(h.ctx, id)
|
||||
if err != nil {
|
||||
h.t.Fatalf("GetIssueComments failed: %v", err)
|
||||
}
|
||||
return comments
|
||||
}
|
||||
|
||||
func (h *integrationTestHelper) assertID(id string) {
|
||||
if id == "" {
|
||||
h.t.Error("Issue ID should be auto-generated")
|
||||
}
|
||||
}
|
||||
|
||||
func (h *integrationTestHelper) assertEqual(expected, actual interface{}, field string) {
|
||||
if expected != actual {
|
||||
h.t.Errorf("Expected %s %v, got %v", field, expected, actual)
|
||||
}
|
||||
}
|
||||
|
||||
func (h *integrationTestHelper) assertNotNil(value interface{}, field string) {
|
||||
if value == nil {
|
||||
h.t.Errorf("Expected %s to be set", field)
|
||||
}
|
||||
}
|
||||
|
||||
func (h *integrationTestHelper) assertCount(count, expected int, item string) {
|
||||
if count != expected {
|
||||
h.t.Fatalf("Expected %d %s, got %d", expected, item, count)
|
||||
}
|
||||
}
|
||||
|
||||
// TestLibraryIntegration tests the full public API that external users will use
|
||||
func TestLibraryIntegration(t *testing.T) {
|
||||
// Setup: Create a temporary database
|
||||
tmpDir, err := os.MkdirTemp("", "beads-integration-*")
|
||||
if err != nil {
|
||||
t.Fatalf("Failed to create temp dir: %v", err)
|
||||
}
|
||||
defer os.RemoveAll(tmpDir)
|
||||
|
||||
dbPath := filepath.Join(tmpDir, "test.db")
|
||||
store, err := beads.NewSQLiteStorage(dbPath)
|
||||
if err != nil {
|
||||
t.Fatalf("NewSQLiteStorage failed: %v", err)
|
||||
}
|
||||
defer store.Close()
|
||||
|
||||
// CRITICAL (bd-166): Set issue_prefix to prevent "database not initialized" errors
|
||||
ctx := context.Background()
|
||||
if err := store.SetConfig(ctx, "issue_prefix", "bd"); err != nil {
|
||||
t.Fatalf("Failed to set issue_prefix: %v", err)
|
||||
}
|
||||
|
||||
h := newIntegrationHelper(t, store)
|
||||
|
||||
// Test 1: Create issue
|
||||
t.Run("CreateIssue", func(t *testing.T) {
|
||||
issue := h.createIssue("Test task", beads.TypeTask, 2)
|
||||
h.assertID(issue.ID)
|
||||
t.Logf("Created issue: %s", issue.ID)
|
||||
})
|
||||
|
||||
// Test 2: Get issue
|
||||
t.Run("GetIssue", func(_ *testing.T) {
|
||||
issue := h.createIssue("Get test", beads.TypeBug, 1)
|
||||
retrieved := h.getIssue(issue.ID)
|
||||
h.assertEqual(issue.Title, retrieved.Title, "title")
|
||||
h.assertEqual(beads.TypeBug, retrieved.IssueType, "type")
|
||||
})
|
||||
|
||||
// Test 3: Update issue
|
||||
t.Run("UpdateIssue", func(_ *testing.T) {
|
||||
issue := h.createIssue("Update test", beads.TypeTask, 2)
|
||||
updates := map[string]interface{}{"status": beads.StatusInProgress, "assignee": "test-user"}
|
||||
h.updateIssue(issue.ID, updates)
|
||||
updated := h.getIssue(issue.ID)
|
||||
h.assertEqual(beads.StatusInProgress, updated.Status, "status")
|
||||
h.assertEqual("test-user", updated.Assignee, "assignee")
|
||||
})
|
||||
|
||||
// Test 4: Add dependency
|
||||
t.Run("AddDependency", func(_ *testing.T) {
|
||||
issue1 := h.createIssue("Parent task", beads.TypeTask, 1)
|
||||
issue2 := h.createIssue("Child task", beads.TypeTask, 1)
|
||||
h.addDependency(issue1.ID, issue2.ID)
|
||||
deps := h.getDependencies(issue1.ID)
|
||||
h.assertCount(len(deps), 1, "dependencies")
|
||||
h.assertEqual(issue2.ID, deps[0].ID, "dependency ID")
|
||||
})
|
||||
|
||||
// Test 5: Add label
|
||||
t.Run("AddLabel", func(t *testing.T) {
|
||||
issue := h.createIssue("Label test", beads.TypeFeature, 2)
|
||||
h.addLabel(issue.ID, "urgent")
|
||||
labels := h.getLabels(issue.ID)
|
||||
h.assertCount(len(labels), 1, "labels")
|
||||
h.assertEqual("urgent", labels[0], "label")
|
||||
})
|
||||
|
||||
// Test 6: Add comment
|
||||
t.Run("AddComment", func(t *testing.T) {
|
||||
issue := h.createIssue("Comment test", beads.TypeTask, 2)
|
||||
comment := h.addComment(issue.ID, "test-user", "Test comment")
|
||||
h.assertEqual("Test comment", comment.Text, "comment text")
|
||||
comments := h.getComments(issue.ID)
|
||||
h.assertCount(len(comments), 1, "comments")
|
||||
})
|
||||
|
||||
// Test 7: Get ready work
|
||||
t.Run("GetReadyWork", func(t *testing.T) {
|
||||
for i := 0; i < 3; i++ {
|
||||
h.createIssue("Ready work test", beads.TypeTask, i)
|
||||
}
|
||||
ready, err := store.GetReadyWork(h.ctx, beads.WorkFilter{Status: beads.StatusOpen, Limit: 5})
|
||||
if err != nil {
|
||||
t.Fatalf("GetReadyWork failed: %v", err)
|
||||
}
|
||||
if len(ready) == 0 {
|
||||
t.Error("Expected some ready work, got none")
|
||||
}
|
||||
t.Logf("Found %d ready issues", len(ready))
|
||||
})
|
||||
|
||||
// Test 8: Get statistics
|
||||
t.Run("GetStatistics", func(t *testing.T) {
|
||||
stats, err := store.GetStatistics(h.ctx)
|
||||
if err != nil {
|
||||
t.Fatalf("GetStatistics failed: %v", err)
|
||||
}
|
||||
if stats.TotalIssues == 0 {
|
||||
t.Error("Expected some total issues, got 0")
|
||||
}
|
||||
t.Logf("Stats: Total=%d, Open=%d, InProgress=%d, Closed=%d",
|
||||
stats.TotalIssues, stats.OpenIssues, stats.InProgressIssues, stats.ClosedIssues)
|
||||
})
|
||||
|
||||
// Test 9: Close issue
|
||||
t.Run("CloseIssue", func(t *testing.T) {
|
||||
issue := h.createIssue("Close test", beads.TypeTask, 2)
|
||||
h.closeIssue(issue.ID, "Completed")
|
||||
closed := h.getIssue(issue.ID)
|
||||
h.assertEqual(beads.StatusClosed, closed.Status, "status")
|
||||
h.assertNotNil(closed.ClosedAt, "ClosedAt")
|
||||
})
|
||||
}
|
||||
|
||||
// TestDependencyTypes ensures all dependency type constants are exported
|
||||
func TestDependencyTypes(t *testing.T) {
|
||||
types := []beads.DependencyType{
|
||||
beads.DepBlocks,
|
||||
beads.DepRelated,
|
||||
beads.DepParentChild,
|
||||
beads.DepDiscoveredFrom,
|
||||
}
|
||||
|
||||
for _, dt := range types {
|
||||
if dt == "" {
|
||||
t.Errorf("Dependency type should not be empty")
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// TestStatusConstants ensures all status constants are exported
|
||||
func TestStatusConstants(t *testing.T) {
|
||||
statuses := []beads.Status{
|
||||
beads.StatusOpen,
|
||||
beads.StatusInProgress,
|
||||
beads.StatusClosed,
|
||||
beads.StatusBlocked,
|
||||
}
|
||||
|
||||
for _, s := range statuses {
|
||||
if s == "" {
|
||||
t.Errorf("Status should not be empty")
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// TestIssueTypeConstants ensures all issue type constants are exported
|
||||
func TestIssueTypeConstants(t *testing.T) {
|
||||
types := []beads.IssueType{
|
||||
beads.TypeBug,
|
||||
beads.TypeFeature,
|
||||
beads.TypeTask,
|
||||
beads.TypeEpic,
|
||||
beads.TypeChore,
|
||||
}
|
||||
|
||||
for _, it := range types {
|
||||
if it == "" {
|
||||
t.Errorf("IssueType should not be empty")
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// TestBatchCreateIssues tests creating multiple issues at once
|
||||
func TestBatchCreateIssues(t *testing.T) {
|
||||
tmpDir, err := os.MkdirTemp("", "beads-batch-*")
|
||||
if err != nil {
|
||||
t.Fatalf("Failed to create temp dir: %v", err)
|
||||
}
|
||||
defer os.RemoveAll(tmpDir)
|
||||
|
||||
dbPath := filepath.Join(tmpDir, "test.db")
|
||||
store, err := beads.NewSQLiteStorage(dbPath)
|
||||
if err != nil {
|
||||
t.Fatalf("NewSQLiteStorage failed: %v", err)
|
||||
}
|
||||
defer store.Close()
|
||||
|
||||
ctx := context.Background()
|
||||
|
||||
// CRITICAL (bd-166): Set issue_prefix to prevent "database not initialized" errors
|
||||
if err := store.SetConfig(ctx, "issue_prefix", "bd"); err != nil {
|
||||
t.Fatalf("Failed to set issue_prefix: %v", err)
|
||||
}
|
||||
|
||||
// Create multiple issues
|
||||
issues := make([]*beads.Issue, 5)
|
||||
for i := 0; i < 5; i++ {
|
||||
issues[i] = &beads.Issue{
|
||||
Title: "Batch test",
|
||||
Status: beads.StatusOpen,
|
||||
Priority: 2,
|
||||
IssueType: beads.TypeTask,
|
||||
CreatedAt: time.Now(),
|
||||
UpdatedAt: time.Now(),
|
||||
}
|
||||
}
|
||||
|
||||
err = store.CreateIssues(ctx, issues, "test-actor")
|
||||
if err != nil {
|
||||
t.Fatalf("CreateIssues failed: %v", err)
|
||||
}
|
||||
|
||||
// Verify all got IDs
|
||||
for i, issue := range issues {
|
||||
if issue.ID == "" {
|
||||
t.Errorf("Issue %d should have ID set", i)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// TestFindDatabasePathIntegration tests the database discovery
|
||||
func TestFindDatabasePathIntegration(t *testing.T) {
|
||||
// Save original working directory
|
||||
originalWd, _ := os.Getwd()
|
||||
defer os.Chdir(originalWd)
|
||||
|
||||
// Create temporary directory with .beads
|
||||
tmpDir, err := os.MkdirTemp("", "beads-find-*")
|
||||
if err != nil {
|
||||
t.Fatalf("Failed to create temp dir: %v", err)
|
||||
}
|
||||
defer os.RemoveAll(tmpDir)
|
||||
|
||||
beadsDir := filepath.Join(tmpDir, ".beads")
|
||||
os.MkdirAll(beadsDir, 0o755)
|
||||
|
||||
dbPath := filepath.Join(beadsDir, "test.db")
|
||||
f, _ := os.Create(dbPath)
|
||||
f.Close()
|
||||
|
||||
// Change to temp directory
|
||||
os.Chdir(tmpDir)
|
||||
|
||||
// Should find the database
|
||||
found := beads.FindDatabasePath()
|
||||
if found == "" {
|
||||
t.Error("Expected to find database, got empty string")
|
||||
}
|
||||
|
||||
t.Logf("Found database at: %s", found)
|
||||
}
|
||||
|
||||
// TestRoundTripIssue tests creating, updating, and retrieving an issue
|
||||
func TestRoundTripIssue(t *testing.T) {
|
||||
tmpDir, err := os.MkdirTemp("", "beads-roundtrip-*")
|
||||
if err != nil {
|
||||
t.Fatalf("Failed to create temp dir: %v", err)
|
||||
}
|
||||
defer os.RemoveAll(tmpDir)
|
||||
|
||||
dbPath := filepath.Join(tmpDir, "test.db")
|
||||
store, err := beads.NewSQLiteStorage(dbPath)
|
||||
if err != nil {
|
||||
t.Fatalf("NewSQLiteStorage failed: %v", err)
|
||||
}
|
||||
defer store.Close()
|
||||
|
||||
// CRITICAL (bd-166): Set issue_prefix to prevent "database not initialized" errors
|
||||
ctx := context.Background()
|
||||
if err := store.SetConfig(ctx, "issue_prefix", "bd"); err != nil {
|
||||
t.Fatalf("Failed to set issue_prefix: %v", err)
|
||||
}
|
||||
|
||||
h := newIntegrationHelper(t, store)
|
||||
original := h.createFullIssue("Full description", "Design notes", "Acceptance criteria", "Implementation notes", "developer")
|
||||
|
||||
// Retrieve and verify all fields
|
||||
retrieved := h.getIssue(original.ID)
|
||||
h.assertEqual(original.Title, retrieved.Title, "Title")
|
||||
h.assertEqual(original.Description, retrieved.Description, "Description")
|
||||
h.assertEqual(original.Design, retrieved.Design, "Design")
|
||||
h.assertEqual(original.AcceptanceCriteria, retrieved.AcceptanceCriteria, "AcceptanceCriteria")
|
||||
h.assertEqual(original.Notes, retrieved.Notes, "Notes")
|
||||
h.assertEqual(original.Status, retrieved.Status, "Status")
|
||||
h.assertEqual(original.Priority, retrieved.Priority, "Priority")
|
||||
h.assertEqual(original.IssueType, retrieved.IssueType, "IssueType")
|
||||
h.assertEqual(original.Assignee, retrieved.Assignee, "Assignee")
|
||||
}
|
||||
|
||||
// TestImportWithDeletedParent verifies parent resurrection during import
|
||||
// This tests the fix for bd-d19a (import failure on missing parent issues)
|
||||
func TestImportWithDeletedParent(t *testing.T) {
|
||||
tmpDir, err := os.MkdirTemp("", "beads-test-*")
|
||||
if err != nil {
|
||||
t.Fatalf("Failed to create temp dir: %v", err)
|
||||
}
|
||||
defer os.RemoveAll(tmpDir)
|
||||
|
||||
beadsDir := filepath.Join(tmpDir, ".beads")
|
||||
dbPath := filepath.Join(beadsDir, "beads.db")
|
||||
jsonlPath := filepath.Join(beadsDir, "issues.jsonl")
|
||||
|
||||
// Create .beads directory
|
||||
if err := os.MkdirAll(beadsDir, 0755); err != nil {
|
||||
t.Fatalf("Failed to create .beads dir: %v", err)
|
||||
}
|
||||
|
||||
// Phase 1: Create parent and child in JSONL (simulating historical git state)
|
||||
ctx := context.Background()
|
||||
|
||||
parent := beads.Issue{
|
||||
ID: "bd-parent",
|
||||
Title: "Parent Epic",
|
||||
Description: "Original parent description",
|
||||
Status: beads.StatusOpen,
|
||||
Priority: 1,
|
||||
IssueType: beads.TypeEpic,
|
||||
CreatedAt: time.Now(),
|
||||
UpdatedAt: time.Now(),
|
||||
}
|
||||
|
||||
child := beads.Issue{
|
||||
ID: "bd-parent.1",
|
||||
Title: "Child Task",
|
||||
Status: beads.StatusOpen,
|
||||
Priority: 1,
|
||||
IssueType: beads.TypeTask,
|
||||
CreatedAt: time.Now(),
|
||||
UpdatedAt: time.Now(),
|
||||
}
|
||||
|
||||
// Write both to JSONL (parent exists in git history)
|
||||
file, err := os.Create(jsonlPath)
|
||||
if err != nil {
|
||||
t.Fatalf("Failed to create JSONL: %v", err)
|
||||
}
|
||||
encoder := json.NewEncoder(file)
|
||||
if err := encoder.Encode(parent); err != nil {
|
||||
file.Close()
|
||||
t.Fatalf("Failed to encode parent: %v", err)
|
||||
}
|
||||
if err := encoder.Encode(child); err != nil {
|
||||
file.Close()
|
||||
t.Fatalf("Failed to encode child: %v", err)
|
||||
}
|
||||
file.Close()
|
||||
|
||||
// Phase 2: Create fresh database and import only the child
|
||||
// (simulating scenario where parent was deleted)
|
||||
store, err := beads.NewSQLiteStorage(dbPath)
|
||||
if err != nil {
|
||||
t.Fatalf("NewSQLiteStorage failed: %v", err)
|
||||
}
|
||||
defer store.Close()
|
||||
|
||||
if err := store.SetConfig(ctx, "issue_prefix", "bd"); err != nil {
|
||||
t.Fatalf("Failed to set issue_prefix: %v", err)
|
||||
}
|
||||
|
||||
// Manually create only the child (parent missing)
|
||||
childToImport := &beads.Issue{
|
||||
ID: "bd-parent.1",
|
||||
Title: "Child Task",
|
||||
Status: beads.StatusOpen,
|
||||
Priority: 1,
|
||||
IssueType: beads.TypeTask,
|
||||
CreatedAt: time.Now(),
|
||||
UpdatedAt: time.Now(),
|
||||
}
|
||||
|
||||
// This should trigger parent resurrection from JSONL
|
||||
if err := store.CreateIssue(ctx, childToImport, "test"); err != nil {
|
||||
t.Fatalf("Failed to create child (resurrection should have prevented error): %v", err)
|
||||
}
|
||||
|
||||
// Phase 3: Verify results
|
||||
|
||||
// Verify child was created successfully
|
||||
retrievedChild, err := store.GetIssue(ctx, "bd-parent.1")
|
||||
if err != nil {
|
||||
t.Fatalf("Failed to retrieve child: %v", err)
|
||||
}
|
||||
if retrievedChild == nil {
|
||||
t.Fatal("Child was not created")
|
||||
}
|
||||
if retrievedChild.Title != "Child Task" {
|
||||
t.Errorf("Expected child title 'Child Task', got %s", retrievedChild.Title)
|
||||
}
|
||||
|
||||
// Verify parent was resurrected as tombstone
|
||||
retrievedParent, err := store.GetIssue(ctx, "bd-parent")
|
||||
if err != nil {
|
||||
t.Fatalf("Failed to retrieve parent: %v", err)
|
||||
}
|
||||
if retrievedParent == nil {
|
||||
t.Fatal("Parent was not resurrected")
|
||||
}
|
||||
if retrievedParent.Status != beads.StatusClosed {
|
||||
t.Errorf("Expected parent status=closed, got %s", retrievedParent.Status)
|
||||
}
|
||||
if retrievedParent.Priority != 4 {
|
||||
t.Errorf("Expected parent priority=4 (lowest), got %d", retrievedParent.Priority)
|
||||
}
|
||||
if retrievedParent.Title != "Parent Epic" {
|
||||
t.Errorf("Expected original title preserved, got %s", retrievedParent.Title)
|
||||
}
|
||||
if retrievedParent.Description == "" {
|
||||
t.Error("Expected tombstone description to be set")
|
||||
}
|
||||
if retrievedParent.ClosedAt == nil {
|
||||
t.Error("Expected tombstone to have ClosedAt set")
|
||||
}
|
||||
|
||||
// Verify description contains resurrection marker
|
||||
if len(retrievedParent.Description) < 13 || retrievedParent.Description[:13] != "[RESURRECTED]" {
|
||||
t.Errorf("Expected [RESURRECTED] prefix in description, got: %s", retrievedParent.Description)
|
||||
}
|
||||
|
||||
t.Logf("✓ Parent %s successfully resurrected as tombstone", "bd-parent")
|
||||
t.Logf("✓ Child %s created successfully with resurrected parent", "bd-parent.1")
|
||||
}
|
||||
171
internal/beads/beads_multidb_test.go
Normal file
171
internal/beads/beads_multidb_test.go
Normal file
@@ -0,0 +1,171 @@
|
||||
//go:build integration
|
||||
// +build integration
|
||||
|
||||
package beads
|
||||
|
||||
import (
|
||||
"os"
|
||||
"path/filepath"
|
||||
"testing"
|
||||
)
|
||||
|
||||
func TestFindAllDatabases(t *testing.T) {
|
||||
// Create a temporary directory structure with multiple .beads databases
|
||||
tmpDir, err := os.MkdirTemp("", "beads-multidb-test")
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
defer os.RemoveAll(tmpDir)
|
||||
|
||||
// Resolve symlinks (macOS /var -> /private/var)
|
||||
tmpDir, err = filepath.EvalSymlinks(tmpDir)
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
|
||||
// Create nested directory structure:
|
||||
// tmpDir/
|
||||
// .beads/test.db
|
||||
// project1/
|
||||
// .beads/project1.db
|
||||
// subdir/
|
||||
// (working directory here)
|
||||
|
||||
// Root .beads
|
||||
rootBeads := filepath.Join(tmpDir, ".beads")
|
||||
if err := os.MkdirAll(rootBeads, 0750); err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
rootDB := filepath.Join(rootBeads, "test.db")
|
||||
if err := os.WriteFile(rootDB, []byte("fake db"), 0600); err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
|
||||
// Project1 .beads
|
||||
project1Dir := filepath.Join(tmpDir, "project1")
|
||||
project1Beads := filepath.Join(project1Dir, ".beads")
|
||||
if err := os.MkdirAll(project1Beads, 0750); err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
project1DB := filepath.Join(project1Beads, "project1.db")
|
||||
if err := os.WriteFile(project1DB, []byte("fake db"), 0600); err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
|
||||
// Subdir for working directory
|
||||
subdir := filepath.Join(project1Dir, "subdir")
|
||||
if err := os.MkdirAll(subdir, 0750); err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
|
||||
// Save original working directory
|
||||
origDir, err := os.Getwd()
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
defer os.Chdir(origDir)
|
||||
|
||||
// Change to subdir and test FindAllDatabases
|
||||
if err := os.Chdir(subdir); err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
|
||||
databases := FindAllDatabases()
|
||||
|
||||
// Should find both databases, with project1 first (closest)
|
||||
if len(databases) != 2 {
|
||||
t.Fatalf("expected 2 databases, got %d", len(databases))
|
||||
}
|
||||
|
||||
// First database should be project1 (closest to CWD)
|
||||
if databases[0].Path != project1DB {
|
||||
t.Errorf("expected first database to be %s, got %s", project1DB, databases[0].Path)
|
||||
}
|
||||
if databases[0].BeadsDir != project1Beads {
|
||||
t.Errorf("expected first beads dir to be %s, got %s", project1Beads, databases[0].BeadsDir)
|
||||
}
|
||||
|
||||
// Second database should be root (furthest from CWD)
|
||||
if databases[1].Path != rootDB {
|
||||
t.Errorf("expected second database to be %s, got %s", rootDB, databases[1].Path)
|
||||
}
|
||||
if databases[1].BeadsDir != rootBeads {
|
||||
t.Errorf("expected second beads dir to be %s, got %s", rootBeads, databases[1].BeadsDir)
|
||||
}
|
||||
}
|
||||
|
||||
func TestFindAllDatabases_Single(t *testing.T) {
|
||||
// Create a temporary directory with only one database
|
||||
tmpDir, err := os.MkdirTemp("", "beads-single-test")
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
defer os.RemoveAll(tmpDir)
|
||||
|
||||
// Resolve symlinks (macOS /var -> /private/var)
|
||||
tmpDir, err = filepath.EvalSymlinks(tmpDir)
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
|
||||
// Create .beads directory with database
|
||||
beadsDir := filepath.Join(tmpDir, ".beads")
|
||||
if err := os.MkdirAll(beadsDir, 0750); err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
dbPath := filepath.Join(beadsDir, "test.db")
|
||||
if err := os.WriteFile(dbPath, []byte("fake db"), 0600); err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
|
||||
// Save original working directory
|
||||
origDir, err := os.Getwd()
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
defer os.Chdir(origDir)
|
||||
|
||||
// Change to tmpDir and test
|
||||
if err := os.Chdir(tmpDir); err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
|
||||
databases := FindAllDatabases()
|
||||
|
||||
// Should find exactly one database
|
||||
if len(databases) != 1 {
|
||||
t.Fatalf("expected 1 database, got %d", len(databases))
|
||||
}
|
||||
|
||||
if databases[0].Path != dbPath {
|
||||
t.Errorf("expected database path %s, got %s", dbPath, databases[0].Path)
|
||||
}
|
||||
}
|
||||
|
||||
func TestFindAllDatabases_None(t *testing.T) {
|
||||
// Create a temporary directory with no databases
|
||||
tmpDir, err := os.MkdirTemp("", "beads-none-test")
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
defer os.RemoveAll(tmpDir)
|
||||
|
||||
// Save original working directory
|
||||
origDir, err := os.Getwd()
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
defer os.Chdir(origDir)
|
||||
|
||||
// Change to tmpDir and test
|
||||
if err := os.Chdir(tmpDir); err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
|
||||
databases := FindAllDatabases()
|
||||
|
||||
// Should find no databases
|
||||
if len(databases) != 0 {
|
||||
t.Fatalf("expected 0 databases, got %d", len(databases))
|
||||
}
|
||||
}
|
||||
260
internal/beads/beads_test.go
Normal file
260
internal/beads/beads_test.go
Normal file
@@ -0,0 +1,260 @@
|
||||
package beads
|
||||
|
||||
import (
|
||||
"os"
|
||||
"path/filepath"
|
||||
"testing"
|
||||
)
|
||||
|
||||
func TestFindDatabasePathEnvVar(t *testing.T) {
|
||||
// Save original env var
|
||||
originalEnv := os.Getenv("BEADS_DB")
|
||||
defer func() {
|
||||
if originalEnv != "" {
|
||||
_ = os.Setenv("BEADS_DB", originalEnv)
|
||||
} else {
|
||||
_ = os.Unsetenv("BEADS_DB")
|
||||
}
|
||||
}()
|
||||
|
||||
// Set env var to a test path (platform-agnostic)
|
||||
testPath := filepath.Join("test", "path", "test.db")
|
||||
_ = os.Setenv("BEADS_DB", testPath)
|
||||
|
||||
result := FindDatabasePath()
|
||||
// FindDatabasePath canonicalizes to absolute path
|
||||
expectedPath, _ := filepath.Abs(testPath)
|
||||
if result != expectedPath {
|
||||
t.Errorf("Expected '%s', got '%s'", expectedPath, result)
|
||||
}
|
||||
}
|
||||
|
||||
func TestFindDatabasePathInTree(t *testing.T) {
|
||||
// Save original env var and working directory
|
||||
originalEnv := os.Getenv("BEADS_DB")
|
||||
originalWd, _ := os.Getwd()
|
||||
defer func() {
|
||||
if originalEnv != "" {
|
||||
os.Setenv("BEADS_DB", originalEnv)
|
||||
} else {
|
||||
os.Unsetenv("BEADS_DB")
|
||||
}
|
||||
os.Chdir(originalWd)
|
||||
}()
|
||||
|
||||
// Clear env var
|
||||
os.Unsetenv("BEADS_DB")
|
||||
|
||||
// Create temporary directory structure
|
||||
tmpDir, err := os.MkdirTemp("", "beads-test-*")
|
||||
if err != nil {
|
||||
t.Fatalf("Failed to create temp dir: %v", err)
|
||||
}
|
||||
defer os.RemoveAll(tmpDir)
|
||||
|
||||
// Create .beads directory with a database file
|
||||
beadsDir := filepath.Join(tmpDir, ".beads")
|
||||
err = os.MkdirAll(beadsDir, 0o750)
|
||||
if err != nil {
|
||||
t.Fatalf("Failed to create .beads dir: %v", err)
|
||||
}
|
||||
|
||||
dbPath := filepath.Join(beadsDir, "test.db")
|
||||
f, err := os.Create(dbPath)
|
||||
if err != nil {
|
||||
t.Fatalf("Failed to create db file: %v", err)
|
||||
}
|
||||
f.Close()
|
||||
|
||||
// Create a subdirectory and change to it
|
||||
subDir := filepath.Join(tmpDir, "sub", "nested")
|
||||
err = os.MkdirAll(subDir, 0o750)
|
||||
if err != nil {
|
||||
t.Fatalf("Failed to create subdirectory: %v", err)
|
||||
}
|
||||
|
||||
err = os.Chdir(subDir)
|
||||
if err != nil {
|
||||
t.Fatalf("Failed to change directory: %v", err)
|
||||
}
|
||||
|
||||
// Should find the database in the parent directory tree
|
||||
result := FindDatabasePath()
|
||||
|
||||
// Resolve symlinks for both paths (macOS uses /private/var symlinked to /var)
|
||||
expectedPath, err := filepath.EvalSymlinks(dbPath)
|
||||
if err != nil {
|
||||
expectedPath = dbPath
|
||||
}
|
||||
resultPath, err := filepath.EvalSymlinks(result)
|
||||
if err != nil {
|
||||
resultPath = result
|
||||
}
|
||||
|
||||
if resultPath != expectedPath {
|
||||
t.Errorf("Expected '%s', got '%s'", expectedPath, resultPath)
|
||||
}
|
||||
}
|
||||
|
||||
func TestFindDatabasePathNotFound(t *testing.T) {
|
||||
// Save original env var and working directory
|
||||
originalEnv := os.Getenv("BEADS_DB")
|
||||
originalWd, _ := os.Getwd()
|
||||
defer func() {
|
||||
if originalEnv != "" {
|
||||
os.Setenv("BEADS_DB", originalEnv)
|
||||
} else {
|
||||
os.Unsetenv("BEADS_DB")
|
||||
}
|
||||
os.Chdir(originalWd)
|
||||
}()
|
||||
|
||||
// Clear env var
|
||||
os.Unsetenv("BEADS_DB")
|
||||
|
||||
// Create temporary directory without .beads
|
||||
tmpDir, err := os.MkdirTemp("", "beads-test-*")
|
||||
if err != nil {
|
||||
t.Fatalf("Failed to create temp dir: %v", err)
|
||||
}
|
||||
defer os.RemoveAll(tmpDir)
|
||||
|
||||
err = os.Chdir(tmpDir)
|
||||
if err != nil {
|
||||
t.Fatalf("Failed to change directory: %v", err)
|
||||
}
|
||||
|
||||
// Should return empty string (no database found)
|
||||
result := FindDatabasePath()
|
||||
// Result might be the home directory default if it exists, or empty string
|
||||
// Just verify it doesn't error
|
||||
_ = result
|
||||
}
|
||||
|
||||
func TestFindJSONLPathWithExistingFile(t *testing.T) {
|
||||
// Create temporary directory
|
||||
tmpDir, err := os.MkdirTemp("", "beads-test-*")
|
||||
if err != nil {
|
||||
t.Fatalf("Failed to create temp dir: %v", err)
|
||||
}
|
||||
defer os.RemoveAll(tmpDir)
|
||||
|
||||
// Create a .jsonl file
|
||||
jsonlPath := filepath.Join(tmpDir, "custom.jsonl")
|
||||
f, err := os.Create(jsonlPath)
|
||||
if err != nil {
|
||||
t.Fatalf("Failed to create jsonl file: %v", err)
|
||||
}
|
||||
f.Close()
|
||||
|
||||
// Create a fake database path in the same directory
|
||||
dbPath := filepath.Join(tmpDir, "test.db")
|
||||
|
||||
// Should find the existing .jsonl file
|
||||
result := FindJSONLPath(dbPath)
|
||||
if result != jsonlPath {
|
||||
t.Errorf("Expected '%s', got '%s'", jsonlPath, result)
|
||||
}
|
||||
}
|
||||
|
||||
func TestFindJSONLPathDefault(t *testing.T) {
|
||||
// Create temporary directory
|
||||
tmpDir, err := os.MkdirTemp("", "beads-test-*")
|
||||
if err != nil {
|
||||
t.Fatalf("Failed to create temp dir: %v", err)
|
||||
}
|
||||
defer os.RemoveAll(tmpDir)
|
||||
|
||||
// Create a fake database path (no .jsonl files exist)
|
||||
dbPath := filepath.Join(tmpDir, "test.db")
|
||||
|
||||
// Should return default issues.jsonl
|
||||
result := FindJSONLPath(dbPath)
|
||||
expected := filepath.Join(tmpDir, "issues.jsonl")
|
||||
if result != expected {
|
||||
t.Errorf("Expected '%s', got '%s'", expected, result)
|
||||
}
|
||||
}
|
||||
|
||||
func TestFindJSONLPathEmpty(t *testing.T) {
|
||||
// Empty database path should return empty string
|
||||
result := FindJSONLPath("")
|
||||
if result != "" {
|
||||
t.Errorf("Expected empty string for empty db path, got '%s'", result)
|
||||
}
|
||||
}
|
||||
|
||||
func TestFindJSONLPathMultipleFiles(t *testing.T) {
|
||||
// Create temporary directory
|
||||
tmpDir, err := os.MkdirTemp("", "beads-test-*")
|
||||
if err != nil {
|
||||
t.Fatalf("Failed to create temp dir: %v", err)
|
||||
}
|
||||
defer os.RemoveAll(tmpDir)
|
||||
|
||||
// Create multiple .jsonl files
|
||||
jsonlFiles := []string{"issues.jsonl", "backup.jsonl", "archive.jsonl"}
|
||||
for _, filename := range jsonlFiles {
|
||||
f, err := os.Create(filepath.Join(tmpDir, filename))
|
||||
if err != nil {
|
||||
t.Fatalf("Failed to create jsonl file: %v", err)
|
||||
}
|
||||
f.Close()
|
||||
}
|
||||
|
||||
// Create a fake database path
|
||||
dbPath := filepath.Join(tmpDir, "test.db")
|
||||
|
||||
// Should return the first .jsonl file found (lexicographically sorted by Glob)
|
||||
result := FindJSONLPath(dbPath)
|
||||
// Verify it's one of the .jsonl files we created
|
||||
found := false
|
||||
for _, filename := range jsonlFiles {
|
||||
if result == filepath.Join(tmpDir, filename) {
|
||||
found = true
|
||||
break
|
||||
}
|
||||
}
|
||||
if !found {
|
||||
t.Errorf("Expected one of the created .jsonl files, got '%s'", result)
|
||||
}
|
||||
}
|
||||
|
||||
func TestFindDatabasePathHomeDefault(t *testing.T) {
|
||||
// This test verifies that if no database is found, it falls back to home directory
|
||||
// We can't reliably test this without modifying the home directory, so we'll skip
|
||||
// creating the file and just verify the function doesn't crash
|
||||
|
||||
originalEnv := os.Getenv("BEADS_DB")
|
||||
originalWd, _ := os.Getwd()
|
||||
defer func() {
|
||||
if originalEnv != "" {
|
||||
os.Setenv("BEADS_DB", originalEnv)
|
||||
} else {
|
||||
os.Unsetenv("BEADS_DB")
|
||||
}
|
||||
os.Chdir(originalWd)
|
||||
}()
|
||||
|
||||
os.Unsetenv("BEADS_DB")
|
||||
|
||||
// Create an empty temp directory and cd to it
|
||||
tmpDir, err := os.MkdirTemp("", "beads-test-*")
|
||||
if err != nil {
|
||||
t.Fatalf("Failed to create temp dir: %v", err)
|
||||
}
|
||||
defer os.RemoveAll(tmpDir)
|
||||
|
||||
err = os.Chdir(tmpDir)
|
||||
if err != nil {
|
||||
t.Fatalf("Failed to change directory: %v", err)
|
||||
}
|
||||
|
||||
// Call FindDatabasePath - it might return home dir default or empty string
|
||||
result := FindDatabasePath()
|
||||
|
||||
// If result is not empty, verify it contains .beads
|
||||
if result != "" && !filepath.IsAbs(result) {
|
||||
t.Errorf("Expected absolute path or empty string, got '%s'", result)
|
||||
}
|
||||
}
|
||||
143
internal/beads/fingerprint.go
Normal file
143
internal/beads/fingerprint.go
Normal file
@@ -0,0 +1,143 @@
|
||||
package beads
|
||||
|
||||
import (
|
||||
"crypto/sha256"
|
||||
"encoding/hex"
|
||||
"fmt"
|
||||
"net/url"
|
||||
"os"
|
||||
"os/exec"
|
||||
"path/filepath"
|
||||
"strings"
|
||||
)
|
||||
|
||||
// ComputeRepoID generates a unique identifier for this git repository
|
||||
func ComputeRepoID() (string, error) {
|
||||
cmd := exec.Command("git", "config", "--get", "remote.origin.url")
|
||||
output, err := cmd.Output()
|
||||
if err != nil {
|
||||
cmd = exec.Command("git", "rev-parse", "--show-toplevel")
|
||||
output, err = cmd.Output()
|
||||
if err != nil {
|
||||
return "", fmt.Errorf("not a git repository")
|
||||
}
|
||||
|
||||
repoPath := strings.TrimSpace(string(output))
|
||||
absPath, err := filepath.Abs(repoPath)
|
||||
if err != nil {
|
||||
absPath = repoPath
|
||||
}
|
||||
|
||||
evalPath, err := filepath.EvalSymlinks(absPath)
|
||||
if err != nil {
|
||||
evalPath = absPath
|
||||
}
|
||||
|
||||
normalized := filepath.ToSlash(evalPath)
|
||||
hash := sha256.Sum256([]byte(normalized))
|
||||
return hex.EncodeToString(hash[:16]), nil
|
||||
}
|
||||
|
||||
repoURL := strings.TrimSpace(string(output))
|
||||
canonical, err := canonicalizeGitURL(repoURL)
|
||||
if err != nil {
|
||||
return "", fmt.Errorf("failed to canonicalize URL: %w", err)
|
||||
}
|
||||
|
||||
hash := sha256.Sum256([]byte(canonical))
|
||||
return hex.EncodeToString(hash[:16]), nil
|
||||
}
|
||||
|
||||
func canonicalizeGitURL(rawURL string) (string, error) {
|
||||
rawURL = strings.TrimSpace(rawURL)
|
||||
|
||||
if strings.Contains(rawURL, "://") {
|
||||
u, err := url.Parse(rawURL)
|
||||
if err != nil {
|
||||
return "", fmt.Errorf("invalid URL: %w", err)
|
||||
}
|
||||
|
||||
host := strings.ToLower(u.Hostname())
|
||||
if port := u.Port(); port != "" && port != "22" && port != "80" && port != "443" {
|
||||
host = host + ":" + port
|
||||
}
|
||||
|
||||
path := strings.TrimRight(u.Path, "/")
|
||||
path = strings.TrimSuffix(path, ".git")
|
||||
path = filepath.ToSlash(path)
|
||||
|
||||
return host + path, nil
|
||||
}
|
||||
|
||||
// Detect scp-style URLs: [user@]host:path
|
||||
// Must contain ":" before any "/" and not be a Windows path
|
||||
colonIdx := strings.Index(rawURL, ":")
|
||||
slashIdx := strings.Index(rawURL, "/")
|
||||
if colonIdx > 0 && (slashIdx == -1 || colonIdx < slashIdx) {
|
||||
// Could be scp-style or Windows path (C:/)
|
||||
// Windows paths have colon at position 1 and are followed by backslash or forward slash
|
||||
if colonIdx == 1 && len(rawURL) > 2 && (rawURL[2] == '/' || rawURL[2] == '\\') {
|
||||
// Windows path, fall through to local path handling
|
||||
} else {
|
||||
// scp-style: [user@]host:path
|
||||
parts := strings.SplitN(rawURL, ":", 2)
|
||||
if len(parts) == 2 {
|
||||
hostPart := parts[0]
|
||||
pathPart := parts[1]
|
||||
|
||||
atIdx := strings.LastIndex(hostPart, "@")
|
||||
if atIdx >= 0 {
|
||||
hostPart = hostPart[atIdx+1:]
|
||||
}
|
||||
|
||||
host := strings.ToLower(hostPart)
|
||||
path := strings.TrimRight(pathPart, "/")
|
||||
path = strings.TrimSuffix(path, ".git")
|
||||
path = filepath.ToSlash(path)
|
||||
|
||||
return host + "/" + path, nil
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
absPath, err := filepath.Abs(rawURL)
|
||||
if err != nil {
|
||||
absPath = rawURL
|
||||
}
|
||||
|
||||
evalPath, err := filepath.EvalSymlinks(absPath)
|
||||
if err != nil {
|
||||
evalPath = absPath
|
||||
}
|
||||
|
||||
return filepath.ToSlash(evalPath), nil
|
||||
}
|
||||
|
||||
// GetCloneID generates a unique ID for this specific clone (not shared with other clones)
|
||||
func GetCloneID() (string, error) {
|
||||
hostname, err := os.Hostname()
|
||||
if err != nil {
|
||||
return "", fmt.Errorf("failed to get hostname: %w", err)
|
||||
}
|
||||
|
||||
cmd := exec.Command("git", "rev-parse", "--show-toplevel")
|
||||
output, err := cmd.Output()
|
||||
if err != nil {
|
||||
return "", fmt.Errorf("not a git repository: %w", err)
|
||||
}
|
||||
|
||||
repoRoot := strings.TrimSpace(string(output))
|
||||
absPath, err := filepath.Abs(repoRoot)
|
||||
if err != nil {
|
||||
absPath = repoRoot
|
||||
}
|
||||
|
||||
evalPath, err := filepath.EvalSymlinks(absPath)
|
||||
if err != nil {
|
||||
evalPath = absPath
|
||||
}
|
||||
|
||||
normalizedPath := filepath.ToSlash(evalPath)
|
||||
hash := sha256.Sum256([]byte(hostname + ":" + normalizedPath))
|
||||
return hex.EncodeToString(hash[:8]), nil
|
||||
}
|
||||
205
internal/beads/routing_integration_test.go
Normal file
205
internal/beads/routing_integration_test.go
Normal file
@@ -0,0 +1,205 @@
|
||||
package beads_test
|
||||
|
||||
import (
|
||||
"context"
|
||||
"encoding/json"
|
||||
"os"
|
||||
"os/exec"
|
||||
"path/filepath"
|
||||
"strings"
|
||||
"testing"
|
||||
|
||||
"github.com/steveyegge/beads/internal/routing"
|
||||
"github.com/steveyegge/beads/internal/storage/sqlite"
|
||||
)
|
||||
|
||||
func TestRoutingIntegration(t *testing.T) {
|
||||
tests := []struct {
|
||||
name string
|
||||
setupGit func(t *testing.T, dir string)
|
||||
expectedRole routing.UserRole
|
||||
expectedTargetRepo string
|
||||
}{
|
||||
{
|
||||
name: "maintainer detected by git config",
|
||||
setupGit: func(t *testing.T, dir string) {
|
||||
runCmd(t, dir, "git", "init")
|
||||
runCmd(t, dir, "git", "config", "user.email", "maintainer@example.com")
|
||||
runCmd(t, dir, "git", "config", "beads.role", "maintainer")
|
||||
},
|
||||
expectedRole: routing.Maintainer,
|
||||
expectedTargetRepo: ".",
|
||||
},
|
||||
{
|
||||
name: "contributor detected by fork remote",
|
||||
setupGit: func(t *testing.T, dir string) {
|
||||
runCmd(t, dir, "git", "init")
|
||||
runCmd(t, dir, "git", "remote", "add", "upstream", "https://github.com/original/repo.git")
|
||||
runCmd(t, dir, "git", "remote", "add", "origin", "https://github.com/forker/repo.git")
|
||||
},
|
||||
expectedRole: routing.Contributor,
|
||||
expectedTargetRepo: "", // Will use default from config
|
||||
},
|
||||
{
|
||||
name: "maintainer with SSH remote",
|
||||
setupGit: func(t *testing.T, dir string) {
|
||||
runCmd(t, dir, "git", "init")
|
||||
runCmd(t, dir, "git", "remote", "add", "origin", "git@github.com:owner/repo.git")
|
||||
},
|
||||
expectedRole: routing.Maintainer, // SSH = maintainer
|
||||
expectedTargetRepo: ".",
|
||||
},
|
||||
}
|
||||
|
||||
for _, tt := range tests {
|
||||
t.Run(tt.name, func(t *testing.T) {
|
||||
// Create temp directory
|
||||
tmpDir := t.TempDir()
|
||||
|
||||
// Set up git
|
||||
tt.setupGit(t, tmpDir)
|
||||
|
||||
// Detect user role
|
||||
role, err := routing.DetectUserRole(tmpDir)
|
||||
if err != nil {
|
||||
t.Fatalf("DetectUserRole() error = %v", err)
|
||||
}
|
||||
|
||||
if role != tt.expectedRole {
|
||||
t.Errorf("expected role %v, got %v", tt.expectedRole, role)
|
||||
}
|
||||
|
||||
// Test routing configuration
|
||||
routingCfg := &routing.RoutingConfig{
|
||||
Mode: "auto",
|
||||
DefaultRepo: "~/.beads-planning",
|
||||
MaintainerRepo: ".",
|
||||
ContributorRepo: "~/.beads-planning",
|
||||
ExplicitOverride: "",
|
||||
}
|
||||
|
||||
targetRepo := routing.DetermineTargetRepo(routingCfg, role, tmpDir)
|
||||
|
||||
if tt.expectedTargetRepo != "" && targetRepo != tt.expectedTargetRepo {
|
||||
t.Errorf("expected target repo %q, got %q", tt.expectedTargetRepo, targetRepo)
|
||||
}
|
||||
|
||||
// For contributor, verify it routes to planning repo
|
||||
if role == routing.Contributor && !strings.Contains(targetRepo, "beads-planning") {
|
||||
t.Errorf("contributor should route to planning repo, got %q", targetRepo)
|
||||
}
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
func TestRoutingWithExplicitOverride(t *testing.T) {
|
||||
tmpDir := t.TempDir()
|
||||
|
||||
// Set up as contributor
|
||||
runCmd(t, tmpDir, "git", "init")
|
||||
runCmd(t, tmpDir, "git", "remote", "add", "upstream", "https://github.com/original/repo.git")
|
||||
runCmd(t, tmpDir, "git", "remote", "add", "origin", "https://github.com/forker/repo.git")
|
||||
|
||||
role, err := routing.DetectUserRole(tmpDir)
|
||||
if err != nil {
|
||||
t.Fatalf("DetectUserRole() error = %v", err)
|
||||
}
|
||||
|
||||
// Even though we're a contributor, --repo flag should override
|
||||
routingCfg := &routing.RoutingConfig{
|
||||
Mode: "auto",
|
||||
DefaultRepo: "~/.beads-planning",
|
||||
MaintainerRepo: ".",
|
||||
ContributorRepo: "~/.beads-planning",
|
||||
ExplicitOverride: "/custom/repo/path",
|
||||
}
|
||||
|
||||
targetRepo := routing.DetermineTargetRepo(routingCfg, role, tmpDir)
|
||||
|
||||
if targetRepo != "/custom/repo/path" {
|
||||
t.Errorf("expected explicit override to win, got %q", targetRepo)
|
||||
}
|
||||
}
|
||||
|
||||
func TestMultiRepoEndToEnd(t *testing.T) {
|
||||
|
||||
// Create primary repo
|
||||
primaryDir := t.TempDir()
|
||||
beadsDir := filepath.Join(primaryDir, ".beads")
|
||||
if err := os.MkdirAll(beadsDir, 0755); err != nil {
|
||||
t.Fatalf("failed to create .beads dir: %v", err)
|
||||
}
|
||||
|
||||
// Initialize database
|
||||
dbPath := filepath.Join(beadsDir, "beads.db")
|
||||
store, err := sqlite.New(dbPath)
|
||||
if err != nil {
|
||||
t.Fatalf("failed to create storage: %v", err)
|
||||
}
|
||||
defer store.Close()
|
||||
|
||||
// Set up as maintainer
|
||||
runCmd(t, primaryDir, "git", "init")
|
||||
runCmd(t, primaryDir, "git", "config", "beads.role", "maintainer")
|
||||
|
||||
// Configure multi-repo
|
||||
planningDir := t.TempDir()
|
||||
planningBeadsDir := filepath.Join(planningDir, ".beads")
|
||||
if err := os.MkdirAll(planningBeadsDir, 0755); err != nil {
|
||||
t.Fatalf("failed to create planning .beads dir: %v", err)
|
||||
}
|
||||
|
||||
// Set config for multi-repo
|
||||
reposConfig := map[string][]string{
|
||||
"additional": {planningDir},
|
||||
}
|
||||
configJSON, err := json.Marshal(reposConfig)
|
||||
if err != nil {
|
||||
t.Fatalf("failed to marshal config: %v", err)
|
||||
}
|
||||
|
||||
ctx := context.Background()
|
||||
if err := store.SetConfig(ctx, "repos.additional", string(configJSON)); err != nil {
|
||||
t.Fatalf("failed to set repos config: %v", err)
|
||||
}
|
||||
|
||||
// Verify routing works
|
||||
role, err := routing.DetectUserRole(primaryDir)
|
||||
if err != nil {
|
||||
t.Fatalf("DetectUserRole() error = %v", err)
|
||||
}
|
||||
|
||||
if role != routing.Maintainer {
|
||||
t.Errorf("expected maintainer role, got %v", role)
|
||||
}
|
||||
|
||||
routingCfg := &routing.RoutingConfig{
|
||||
Mode: "auto",
|
||||
DefaultRepo: planningDir,
|
||||
MaintainerRepo: ".",
|
||||
ContributorRepo: planningDir,
|
||||
}
|
||||
|
||||
targetRepo := routing.DetermineTargetRepo(routingCfg, role, primaryDir)
|
||||
if targetRepo != "." {
|
||||
t.Errorf("maintainer should route to current repo, got %q", targetRepo)
|
||||
}
|
||||
|
||||
t.Logf("Multi-repo end-to-end test passed")
|
||||
t.Logf(" Primary: %s", primaryDir)
|
||||
t.Logf(" Planning: %s", planningDir)
|
||||
t.Logf(" User role: %v", role)
|
||||
t.Logf(" Target repo: %s", targetRepo)
|
||||
}
|
||||
|
||||
// Helper to run git commands
|
||||
func runCmd(t *testing.T, dir string, name string, args ...string) {
|
||||
t.Helper()
|
||||
cmd := exec.Command(name, args...)
|
||||
cmd.Dir = dir
|
||||
output, err := cmd.CombinedOutput()
|
||||
if err != nil {
|
||||
t.Logf("Command failed: %s %v\nOutput: %s", name, args, output)
|
||||
t.Fatalf("failed to run %s: %v", name, err)
|
||||
}
|
||||
}
|
||||
Reference in New Issue
Block a user