Merge branch 'main' into show-rev-in-dev

# Conflicts:
#	.beads/beads.jsonl
This commit is contained in:
matt wilkie
2025-11-16 15:50:09 -07:00
26 changed files with 1645 additions and 79 deletions

File diff suppressed because one or more lines are too long

8
.gitignore vendored
View File

@@ -99,3 +99,11 @@ __pycache__/
*.so
.Python
.envrc
# Performance profiling files (benchmarks, bd doctor --perf, and bd --profile)
*.prof
*.out
beads-perf-*.prof
bench-cpu-*.prof
bd-profile-*.prof
bd-trace-*.out

57
Makefile Normal file
View File

@@ -0,0 +1,57 @@
# Makefile for beads project
.PHONY: all build test bench bench-quick clean install help
# Default target
all: build
# Build the bd binary
build:
@echo "Building bd..."
go build -o bd ./cmd/bd
# Run all tests
test:
@echo "Running tests..."
go test ./...
# Run performance benchmarks (10K and 20K issue databases with automatic CPU profiling)
# Generates CPU profile: internal/storage/sqlite/bench-cpu-<timestamp>.prof
# View flamegraph: go tool pprof -http=:8080 <profile-file>
bench:
@echo "Running performance benchmarks..."
@echo "This will generate 10K and 20K issue databases and profile all operations."
@echo "CPU profiles will be saved to internal/storage/sqlite/"
@echo ""
go test -bench=. -benchtime=1s -tags=bench -run=^$$ ./internal/storage/sqlite/ -timeout=30m
@echo ""
@echo "Benchmark complete. Profile files saved in internal/storage/sqlite/"
@echo "View flamegraph: cd internal/storage/sqlite && go tool pprof -http=:8080 bench-cpu-*.prof"
# Run quick benchmarks (shorter benchtime for faster feedback)
bench-quick:
@echo "Running quick performance benchmarks..."
go test -bench=. -benchtime=100ms -tags=bench -run=^$$ ./internal/storage/sqlite/ -timeout=15m
# Install bd to GOPATH/bin
install: build
@echo "Installing bd to $$(go env GOPATH)/bin..."
go install ./cmd/bd
# Clean build artifacts and benchmark profiles
clean:
@echo "Cleaning..."
rm -f bd
rm -f internal/storage/sqlite/bench-cpu-*.prof
rm -f beads-perf-*.prof
# Show help
help:
@echo "Beads Makefile targets:"
@echo " make build - Build the bd binary"
@echo " make test - Run all tests"
@echo " make bench - Run performance benchmarks (generates CPU profiles)"
@echo " make bench-quick - Run quick benchmarks (shorter benchtime)"
@echo " make install - Install bd to GOPATH/bin"
@echo " make clean - Remove build artifacts and profile files"
@echo " make help - Show this help message"

View File

@@ -313,6 +313,18 @@ var createCmd = &cobra.Command{
os.Exit(1)
}
// If parent was specified, add parent-child dependency
if parentID != "" {
dep := &types.Dependency{
IssueID: issue.ID,
DependsOnID: parentID,
Type: types.DepParentChild,
}
if err := store.AddDependency(ctx, dep, actor); err != nil {
fmt.Fprintf(os.Stderr, "Warning: failed to add parent-child dependency %s -> %s: %v\n", issue.ID, parentID, err)
}
}
// Add labels if specified
for _, label := range labels {
if err := store.AddLabel(ctx, issue.ID, label, actor); err != nil {

View File

@@ -54,7 +54,7 @@ func merge3WayAndPruneDeletions(ctx context.Context, store storage.Storage, json
// Ensure temp file cleanup on failure
defer func() {
if fileExists(tmpMerged) {
os.Remove(tmpMerged)
_ = os.Remove(tmpMerged)
}
}()

View File

@@ -46,6 +46,7 @@ type doctorResult struct {
var (
doctorFix bool
perfMode bool
)
var doctorCmd = &cobra.Command{
@@ -68,11 +69,19 @@ This command checks:
- Git hooks (pre-commit, post-merge, pre-push)
- .beads/.gitignore up to date
Performance Mode (--perf):
Run performance diagnostics on your database:
- Times key operations (bd ready, bd list, bd show, etc.)
- Collects system info (OS, arch, SQLite version, database stats)
- Generates CPU profile for analysis
- Outputs shareable report for bug reports
Examples:
bd doctor # Check current directory
bd doctor /path/to/repo # Check specific repository
bd doctor --json # Machine-readable output
bd doctor --fix # Automatically fix issues`,
bd doctor --fix # Automatically fix issues
bd doctor --perf # Performance diagnostics`,
Run: func(cmd *cobra.Command, args []string) {
// Use global jsonOutput set by PersistentPreRun
@@ -89,6 +98,12 @@ Examples:
os.Exit(1)
}
// Run performance diagnostics if --perf flag is set
if perfMode {
doctor.RunPerformanceDiagnostics(absPath)
return
}
// Run diagnostics
result := runDiagnostics(absPath)
@@ -1202,7 +1217,7 @@ func checkGitHooks(path string) doctorCheck {
}
}
hookInstallMsg := "See https://github.com/steveyegge/beads/tree/main/examples/git-hooks for installation instructions"
hookInstallMsg := "Install hooks with 'bd hooks install'. See https://github.com/steveyegge/beads/tree/main/examples/git-hooks for installation instructions"
if len(installedHooks) > 0 {
return doctorCheck{
@@ -1309,4 +1324,5 @@ func checkSchemaCompatibility(path string) doctorCheck {
func init() {
rootCmd.AddCommand(doctorCmd)
doctorCmd.Flags().BoolVar(&perfMode, "perf", false, "Run performance diagnostics and generate CPU profile")
}

276
cmd/bd/doctor/perf.go Normal file
View File

@@ -0,0 +1,276 @@
package doctor
import (
"database/sql"
"fmt"
"os"
"path/filepath"
"runtime"
"runtime/pprof"
"strings"
"time"
"github.com/steveyegge/beads/internal/beads"
)
var cpuProfileFile *os.File
// RunPerformanceDiagnostics runs performance diagnostics and generates a CPU profile
func RunPerformanceDiagnostics(path string) {
fmt.Println("\nBeads Performance Diagnostics")
fmt.Println(strings.Repeat("=", 50))
// Check if .beads directory exists
beadsDir := filepath.Join(path, ".beads")
if _, err := os.Stat(beadsDir); os.IsNotExist(err) {
fmt.Fprintf(os.Stderr, "Error: No .beads/ directory found at %s\n", path)
fmt.Fprintf(os.Stderr, "Run 'bd init' to initialize beads\n")
os.Exit(1)
}
// Get database path
dbPath := filepath.Join(beadsDir, beads.CanonicalDatabaseName)
if _, err := os.Stat(dbPath); os.IsNotExist(err) {
fmt.Fprintf(os.Stderr, "Error: No database found at %s\n", dbPath)
os.Exit(1)
}
// Collect platform info
platformInfo := collectPlatformInfo(dbPath)
fmt.Printf("\nPlatform: %s\n", platformInfo["os_arch"])
fmt.Printf("Go: %s\n", platformInfo["go_version"])
fmt.Printf("SQLite: %s\n", platformInfo["sqlite_version"])
// Collect database stats
dbStats := collectDatabaseStats(dbPath)
fmt.Printf("\nDatabase Statistics:\n")
fmt.Printf(" Total issues: %s\n", dbStats["total_issues"])
fmt.Printf(" Open issues: %s\n", dbStats["open_issues"])
fmt.Printf(" Closed issues: %s\n", dbStats["closed_issues"])
fmt.Printf(" Dependencies: %s\n", dbStats["dependencies"])
fmt.Printf(" Labels: %s\n", dbStats["labels"])
fmt.Printf(" Database size: %s\n", dbStats["db_size"])
// Start CPU profiling
profilePath := fmt.Sprintf("beads-perf-%s.prof", time.Now().Format("2006-01-02-150405"))
if err := startCPUProfile(profilePath); err != nil {
fmt.Fprintf(os.Stderr, "Warning: failed to start CPU profiling: %v\n", err)
} else {
defer stopCPUProfile()
fmt.Printf("\nCPU profiling enabled: %s\n", profilePath)
}
// Time key operations
fmt.Printf("\nOperation Performance:\n")
// Measure GetReadyWork
readyDuration := measureOperation("bd ready", func() error {
return runReadyWork(dbPath)
})
fmt.Printf(" bd ready %dms\n", readyDuration.Milliseconds())
// Measure SearchIssues (list open)
listDuration := measureOperation("bd list --status=open", func() error {
return runListOpen(dbPath)
})
fmt.Printf(" bd list --status=open %dms\n", listDuration.Milliseconds())
// Measure GetIssue (show random issue)
showDuration := measureOperation("bd show <issue>", func() error {
return runShowRandom(dbPath)
})
if showDuration > 0 {
fmt.Printf(" bd show <random-issue> %dms\n", showDuration.Milliseconds())
}
// Measure SearchIssues with filters
searchDuration := measureOperation("bd list (complex filters)", func() error {
return runComplexSearch(dbPath)
})
fmt.Printf(" bd list (complex filters) %dms\n", searchDuration.Milliseconds())
fmt.Printf("\nProfile saved: %s\n", profilePath)
fmt.Printf("Share this file with bug reports for performance issues.\n\n")
fmt.Printf("View flamegraph:\n")
fmt.Printf(" go tool pprof -http=:8080 %s\n\n", profilePath)
}
func collectPlatformInfo(dbPath string) map[string]string {
info := make(map[string]string)
// OS and architecture
info["os_arch"] = fmt.Sprintf("%s/%s", runtime.GOOS, runtime.GOARCH)
// Go version
info["go_version"] = runtime.Version()
// SQLite version
db, err := sql.Open("sqlite3", "file:"+dbPath+"?mode=ro")
if err == nil {
defer db.Close()
var version string
if err := db.QueryRow("SELECT sqlite_version()").Scan(&version); err == nil {
info["sqlite_version"] = version
} else {
info["sqlite_version"] = "unknown"
}
} else {
info["sqlite_version"] = "unknown"
}
return info
}
func collectDatabaseStats(dbPath string) map[string]string {
stats := make(map[string]string)
db, err := sql.Open("sqlite3", "file:"+dbPath+"?mode=ro")
if err != nil {
stats["total_issues"] = "error"
stats["open_issues"] = "error"
stats["closed_issues"] = "error"
stats["dependencies"] = "error"
stats["labels"] = "error"
stats["db_size"] = "error"
return stats
}
defer db.Close()
// Total issues
var total int
if err := db.QueryRow("SELECT COUNT(*) FROM issues").Scan(&total); err == nil {
stats["total_issues"] = fmt.Sprintf("%d", total)
} else {
stats["total_issues"] = "error"
}
// Open issues
var open int
if err := db.QueryRow("SELECT COUNT(*) FROM issues WHERE status != 'closed'").Scan(&open); err == nil {
stats["open_issues"] = fmt.Sprintf("%d", open)
} else {
stats["open_issues"] = "error"
}
// Closed issues
var closed int
if err := db.QueryRow("SELECT COUNT(*) FROM issues WHERE status = 'closed'").Scan(&closed); err == nil {
stats["closed_issues"] = fmt.Sprintf("%d", closed)
} else {
stats["closed_issues"] = "error"
}
// Dependencies
var deps int
if err := db.QueryRow("SELECT COUNT(*) FROM dependencies").Scan(&deps); err == nil {
stats["dependencies"] = fmt.Sprintf("%d", deps)
} else {
stats["dependencies"] = "error"
}
// Labels
var labels int
if err := db.QueryRow("SELECT COUNT(DISTINCT label) FROM labels").Scan(&labels); err == nil {
stats["labels"] = fmt.Sprintf("%d", labels)
} else {
stats["labels"] = "error"
}
// Database file size
if info, err := os.Stat(dbPath); err == nil {
sizeMB := float64(info.Size()) / (1024 * 1024)
stats["db_size"] = fmt.Sprintf("%.2f MB", sizeMB)
} else {
stats["db_size"] = "error"
}
return stats
}
func startCPUProfile(path string) error {
f, err := os.Create(path)
if err != nil {
return err
}
cpuProfileFile = f
return pprof.StartCPUProfile(f)
}
// stopCPUProfile stops CPU profiling and closes the profile file.
// Must be called after pprof.StartCPUProfile() to flush profile data to disk.
func stopCPUProfile() {
pprof.StopCPUProfile()
if cpuProfileFile != nil {
_ = cpuProfileFile.Close() // best effort cleanup
}
}
func measureOperation(name string, op func() error) time.Duration {
start := time.Now()
if err := op(); err != nil {
return 0
}
return time.Since(start)
}
// runQuery executes a read-only database query and returns any error
func runQuery(dbPath string, queryFn func(*sql.DB) error) error {
db, err := sql.Open("sqlite3", "file:"+dbPath+"?mode=ro")
if err != nil {
return err
}
defer db.Close()
return queryFn(db)
}
func runReadyWork(dbPath string) error {
return runQuery(dbPath, func(db *sql.DB) error {
// simplified ready work query (the real one is more complex)
_, err := db.Query(`
SELECT id FROM issues
WHERE status IN ('open', 'in_progress')
AND id NOT IN (
SELECT issue_id FROM dependencies WHERE type = 'blocks'
)
LIMIT 100
`)
return err
})
}
func runListOpen(dbPath string) error {
return runQuery(dbPath, func(db *sql.DB) error {
_, err := db.Query("SELECT id, title, status FROM issues WHERE status != 'closed' LIMIT 100")
return err
})
}
func runShowRandom(dbPath string) error {
return runQuery(dbPath, func(db *sql.DB) error {
// get a random issue
var issueID string
if err := db.QueryRow("SELECT id FROM issues ORDER BY RANDOM() LIMIT 1").Scan(&issueID); err != nil {
return err
}
// get issue details
_, err := db.Query("SELECT * FROM issues WHERE id = ?", issueID)
return err
})
}
func runComplexSearch(dbPath string) error {
return runQuery(dbPath, func(db *sql.DB) error {
// complex query with filters
_, err := db.Query(`
SELECT i.id, i.title, i.status, i.priority
FROM issues i
LEFT JOIN labels l ON i.id = l.issue_id
WHERE i.status IN ('open', 'in_progress')
AND i.priority <= 2
GROUP BY i.id
LIMIT 100
`)
return err
})
}

View File

@@ -545,7 +545,7 @@ func attemptAutoMerge(conflictedPath string) error {
if err != nil {
return fmt.Errorf("failed to create temp directory: %w", err)
}
defer os.RemoveAll(tmpDir)
defer func() { _ = os.RemoveAll(tmpDir) }()
basePath := filepath.Join(tmpDir, "base.jsonl")
leftPath := filepath.Join(tmpDir, "left.jsonl")

View File

@@ -4,6 +4,8 @@ import (
"fmt"
"os"
"path/filepath"
"runtime/pprof"
"runtime/trace"
"slices"
"sync"
"time"
@@ -78,6 +80,9 @@ var (
noAutoImport bool
sandboxMode bool
noDb bool // Use --no-db mode: load from JSONL, write back after each command
profileEnabled bool
profileFile *os.File
traceFile *os.File
)
func init() {
@@ -95,6 +100,7 @@ func init() {
rootCmd.PersistentFlags().BoolVar(&noAutoImport, "no-auto-import", false, "Disable automatic JSONL import when newer than DB")
rootCmd.PersistentFlags().BoolVar(&sandboxMode, "sandbox", false, "Sandbox mode: disables daemon and auto-sync")
rootCmd.PersistentFlags().BoolVar(&noDb, "no-db", false, "Use no-db mode: load from JSONL, no SQLite")
rootCmd.PersistentFlags().BoolVar(&profileEnabled, "profile", false, "Generate CPU profile for performance analysis")
// Add --version flag to root command (same behavior as version subcommand)
rootCmd.Flags().BoolP("version", "v", false, "Print version information")
@@ -141,6 +147,23 @@ var rootCmd = &cobra.Command{
actor = config.GetString("actor")
}
// Performance profiling setup
// When --profile is enabled, force direct mode to capture actual database operations
// rather than just RPC serialization/network overhead. This gives accurate profiles
// of the storage layer, query performance, and business logic.
if profileEnabled {
noDaemon = true
timestamp := time.Now().Format("20060102-150405")
if f, _ := os.Create(fmt.Sprintf("bd-profile-%s-%s.prof", cmd.Name(), timestamp)); f != nil {
profileFile = f
_ = pprof.StartCPUProfile(f)
}
if f, _ := os.Create(fmt.Sprintf("bd-trace-%s-%s.out", cmd.Name(), timestamp)); f != nil {
traceFile = f
_ = trace.Start(f)
}
}
// Skip database initialization for commands that don't need a database
noDbCommands := []string{
cmdDaemon,
@@ -505,6 +528,8 @@ var rootCmd = &cobra.Command{
if store != nil {
_ = store.Close()
}
if profileFile != nil { pprof.StopCPUProfile(); _ = profileFile.Close() }
if traceFile != nil { trace.Stop(); _ = traceFile.Close() }
},
}

View File

@@ -202,7 +202,7 @@ func sendAgentMailRequest(config *AgentMailConfig, method string, params interfa
if err != nil {
return nil, fmt.Errorf("failed to connect to Agent Mail server: %w", err)
}
defer resp.Body.Close()
defer func() { _ = resp.Body.Close() }()
body, err := io.ReadAll(resp.Body)
if err != nil {

View File

@@ -630,7 +630,7 @@ func displayMigrationPlan(plan migrationPlan, dryRun bool) error {
func confirmMigration(plan migrationPlan) bool {
fmt.Printf("\nMigrate %d issues from %s to %s? [y/N] ", len(plan.IssueIDs), plan.From, plan.To)
var response string
fmt.Scanln(&response)
_, _ = fmt.Scanln(&response)
return strings.ToLower(strings.TrimSpace(response)) == "y"
}
@@ -698,6 +698,6 @@ func init() {
migrateIssuesCmd.Flags().Bool("strict", false, "Fail on orphaned dependencies or missing repos")
migrateIssuesCmd.Flags().Bool("yes", false, "Skip confirmation prompt")
migrateIssuesCmd.MarkFlagRequired("from")
migrateIssuesCmd.MarkFlagRequired("to")
_ = migrateIssuesCmd.MarkFlagRequired("from")
_ = migrateIssuesCmd.MarkFlagRequired("to")
}

View File

@@ -607,11 +607,11 @@ Examples:
// Write current value to temp file
if _, err := tmpFile.WriteString(currentValue); err != nil {
tmpFile.Close()
_ = tmpFile.Close()
fmt.Fprintf(os.Stderr, "Error writing to temp file: %v\n", err)
os.Exit(1)
}
tmpFile.Close()
_ = tmpFile.Close()
// Open the editor
editorCmd := exec.Command(editor, tmpPath)

View File

@@ -1,30 +0,0 @@
package main
import (
"testing"
"github.com/steveyegge/beads/internal/types"
)
func TestFormatDependencyType(t *testing.T) {
tests := []struct {
name string
depType types.DependencyType
expected string
}{
{"blocks", types.DepBlocks, "blocks"},
{"related", types.DepRelated, "related"},
{"parent-child", types.DepParentChild, "parent-child"},
{"discovered-from", types.DepDiscoveredFrom, "discovered-from"},
{"unknown", types.DependencyType("unknown"), "unknown"},
}
for _, tt := range tests {
t.Run(tt.name, func(t *testing.T) {
result := formatDependencyType(tt.depType)
if result != tt.expected {
t.Errorf("formatDependencyType(%v) = %v, want %v", tt.depType, result, tt.expected)
}
})
}
}

View File

@@ -9,7 +9,7 @@ pkgs.buildGoModule {
subPackages = [ "cmd/bd" ];
doCheck = false;
# Go module dependencies hash (computed via nix build)
vendorHash = "sha256-eUwVXAe9d/e3OWEav61W8lI0bf/IIQYUol8QUiQiBbo=";
vendorHash = "sha256-jpaeKw5dbZuhV9Z18aQ9tDMS/Eo7HaXiZefm26UlPyI=";
# Git is required for tests
nativeBuildInputs = [ pkgs.git ];

View File

@@ -180,6 +180,21 @@ func (s *Server) handleCreate(req *Request) Response {
}
}
// If parent was specified, add parent-child dependency
if createArgs.Parent != "" {
dep := &types.Dependency{
IssueID: issue.ID,
DependsOnID: createArgs.Parent,
Type: types.DepParentChild,
}
if err := store.AddDependency(ctx, dep, s.reqActor(req)); err != nil {
return Response{
Success: false,
Error: fmt.Sprintf("failed to add parent-child dependency %s -> %s: %v", issue.ID, createArgs.Parent, err),
}
}
}
// Add labels if specified
for _, label := range createArgs.Labels {
if err := store.AddLabel(ctx, issue.ID, label, s.reqActor(req)); err != nil {

View File

@@ -0,0 +1,245 @@
//go:build bench
package sqlite
import (
"context"
"fmt"
"io"
"os"
"runtime/pprof"
"sync"
"testing"
"time"
"github.com/steveyegge/beads/internal/storage"
"github.com/steveyegge/beads/internal/testutil/fixtures"
)
var (
profileOnce sync.Once
profileFile *os.File
benchCacheDir = "/tmp/beads-bench-cache"
)
// startBenchmarkProfiling starts CPU profiling for the entire benchmark run.
// Uses sync.Once to ensure it only runs once per test process.
// The profile is saved to bench-cpu-<timestamp>.prof in the current directory.
func startBenchmarkProfiling(b *testing.B) {
b.Helper()
profileOnce.Do(func() {
profilePath := fmt.Sprintf("bench-cpu-%s.prof", time.Now().Format("2006-01-02-150405"))
f, err := os.Create(profilePath)
if err != nil {
b.Logf("Warning: failed to create CPU profile: %v", err)
return
}
profileFile = f
if err := pprof.StartCPUProfile(f); err != nil {
b.Logf("Warning: failed to start CPU profiling: %v", err)
f.Close()
return
}
b.Logf("CPU profiling enabled: %s", profilePath)
// Register cleanup to stop profiling when all benchmarks complete
b.Cleanup(func() {
pprof.StopCPUProfile()
if profileFile != nil {
profileFile.Close()
b.Logf("CPU profile saved: %s", profilePath)
b.Logf("View flamegraph: go tool pprof -http=:8080 %s", profilePath)
}
})
})
}
// Benchmark setup rationale:
// We only provide Large (10K) and XLarge (20K) setup functions because
// small databases don't exhibit the performance characteristics we need to optimize.
// See sqlite_bench_test.go for full rationale.
//
// Dataset caching:
// Datasets are cached in /tmp/beads-bench-cache/ to avoid regenerating 10K-20K
// issues on every benchmark run. Cached databases are ~10-30MB and reused across runs.
// getCachedOrGenerateDB returns a cached database or generates it if missing.
// cacheKey should be unique per dataset type (e.g., "large", "xlarge").
// generateFn is called only if the cached database doesn't exist.
func getCachedOrGenerateDB(b *testing.B, cacheKey string, generateFn func(context.Context, storage.Storage) error) string {
b.Helper()
// Ensure cache directory exists
if err := os.MkdirAll(benchCacheDir, 0755); err != nil {
b.Fatalf("Failed to create benchmark cache directory: %v", err)
}
dbPath := fmt.Sprintf("%s/%s.db", benchCacheDir, cacheKey)
// Check if cached database exists
if stat, err := os.Stat(dbPath); err == nil {
sizeMB := float64(stat.Size()) / (1024 * 1024)
b.Logf("Using cached benchmark database: %s (%.1f MB)", dbPath, sizeMB)
return dbPath
}
// Generate new database
b.Logf("===== Generating benchmark database: %s =====", dbPath)
b.Logf("This is a one-time operation that will be cached for future runs...")
b.Logf("Expected time: ~1-3 minutes for 10K issues, ~2-6 minutes for 20K issues")
store, err := New(dbPath)
if err != nil {
b.Fatalf("Failed to create storage: %v", err)
}
ctx := context.Background()
// Initialize database with prefix
if err := store.SetConfig(ctx, "issue_prefix", "bd-"); err != nil {
store.Close()
b.Fatalf("Failed to set issue_prefix: %v", err)
}
// Generate dataset using provided function
if err := generateFn(ctx, store); err != nil {
store.Close()
os.Remove(dbPath) // cleanup partial database
b.Fatalf("Failed to generate dataset: %v", err)
}
store.Close()
// Log completion with final size
if stat, err := os.Stat(dbPath); err == nil {
sizeMB := float64(stat.Size()) / (1024 * 1024)
b.Logf("===== Database generation complete: %s (%.1f MB) =====", dbPath, sizeMB)
}
return dbPath
}
// copyFile copies a file from src to dst.
func copyFile(src, dst string) error {
srcFile, err := os.Open(src)
if err != nil {
return err
}
defer srcFile.Close()
dstFile, err := os.Create(dst)
if err != nil {
return err
}
defer dstFile.Close()
if _, err := io.Copy(dstFile, srcFile); err != nil {
return err
}
return dstFile.Sync()
}
// setupLargeBenchDB creates or reuses a cached 10K issue database.
// Returns configured storage instance and cleanup function.
// Uses //go:build bench tag to avoid running in normal tests.
// Automatically enables CPU profiling on first call.
//
// Note: Copies the cached database to a temp location for each benchmark
// to prevent mutations from affecting subsequent runs.
func setupLargeBenchDB(b *testing.B) (*SQLiteStorage, func()) {
b.Helper()
// Start CPU profiling (only happens once per test run)
startBenchmarkProfiling(b)
// Get or generate cached database
cachedPath := getCachedOrGenerateDB(b, "large", fixtures.LargeSQLite)
// Copy to temp location to prevent mutations
tmpPath := b.TempDir() + "/large.db"
if err := copyFile(cachedPath, tmpPath); err != nil {
b.Fatalf("Failed to copy cached database: %v", err)
}
// Open the temporary copy
store, err := New(tmpPath)
if err != nil {
b.Fatalf("Failed to open database: %v", err)
}
return store, func() {
store.Close()
}
}
// setupXLargeBenchDB creates or reuses a cached 20K issue database.
// Returns configured storage instance and cleanup function.
// Uses //go:build bench tag to avoid running in normal tests.
// Automatically enables CPU profiling on first call.
//
// Note: Copies the cached database to a temp location for each benchmark
// to prevent mutations from affecting subsequent runs.
func setupXLargeBenchDB(b *testing.B) (*SQLiteStorage, func()) {
b.Helper()
// Start CPU profiling (only happens once per test run)
startBenchmarkProfiling(b)
// Get or generate cached database
cachedPath := getCachedOrGenerateDB(b, "xlarge", fixtures.XLargeSQLite)
// Copy to temp location to prevent mutations
tmpPath := b.TempDir() + "/xlarge.db"
if err := copyFile(cachedPath, tmpPath); err != nil {
b.Fatalf("Failed to copy cached database: %v", err)
}
// Open the temporary copy
store, err := New(tmpPath)
if err != nil {
b.Fatalf("Failed to open database: %v", err)
}
return store, func() {
store.Close()
}
}
// setupLargeFromJSONL creates or reuses a cached 10K issue database via JSONL import path.
// Returns configured storage instance and cleanup function.
// Uses //go:build bench tag to avoid running in normal tests.
// Automatically enables CPU profiling on first call.
//
// Note: Copies the cached database to a temp location for each benchmark
// to prevent mutations from affecting subsequent runs.
func setupLargeFromJSONL(b *testing.B) (*SQLiteStorage, func()) {
b.Helper()
// Start CPU profiling (only happens once per test run)
startBenchmarkProfiling(b)
// Get or generate cached database with JSONL import path
cachedPath := getCachedOrGenerateDB(b, "large-jsonl", func(ctx context.Context, store storage.Storage) error {
tempDir := b.TempDir()
return fixtures.LargeFromJSONL(ctx, store, tempDir)
})
// Copy to temp location to prevent mutations
tmpPath := b.TempDir() + "/large-jsonl.db"
if err := copyFile(cachedPath, tmpPath); err != nil {
b.Fatalf("Failed to copy cached database: %v", err)
}
// Open the temporary copy
store, err := New(tmpPath)
if err != nil {
b.Fatalf("Failed to open database: %v", err)
}
return store, func() {
store.Close()
}
}

View File

@@ -1,3 +1,5 @@
//go:build bench
package sqlite
import (
@@ -124,6 +126,9 @@ func setupBenchDB(tb testing.TB) (*SQLiteStorage, func()) {
}
ctx := context.Background()
if err := store.SetConfig(ctx, "issue_prefix", "bd"); err != nil {
tb.Fatalf("Failed to set issue_prefix: %v", err)
}
if err := store.SetConfig(ctx, "compact_tier1_days", "30"); err != nil {
tb.Fatalf("Failed to set config: %v", err)
}

View File

@@ -1,3 +1,5 @@
//go:build bench
package sqlite
import (
@@ -48,11 +50,13 @@ func BenchmarkCycleDetection_Linear_5000(b *testing.B) {
// BenchmarkCycleDetection_Dense_100 tests dense graph: each issue depends on 3-5 previous issues
func BenchmarkCycleDetection_Dense_100(b *testing.B) {
b.Skip("Dense graph benchmarks timeout (>120s). Known issue, no optimization needed for rare use case.")
benchmarkCycleDetectionDense(b, 100)
}
// BenchmarkCycleDetection_Dense_1000 tests dense graph with 1000 issues
func BenchmarkCycleDetection_Dense_1000(b *testing.B) {
b.Skip("Dense graph benchmarks timeout (>120s). Known issue, no optimization needed for rare use case.")
benchmarkCycleDetectionDense(b, 1000)
}

View File

@@ -11,7 +11,6 @@ func MigrateExternalRefColumn(db *sql.DB) error {
if err != nil {
return fmt.Errorf("failed to check schema: %w", err)
}
defer func() { _ = rows.Close() }()
for rows.Next() {
var cid int
@@ -20,6 +19,7 @@ func MigrateExternalRefColumn(db *sql.DB) error {
var dflt *string
err := rows.Scan(&cid, &name, &typ, &notnull, &dflt, &pk)
if err != nil {
rows.Close()
return fmt.Errorf("failed to scan column info: %w", err)
}
if name == "external_ref" {
@@ -29,9 +29,13 @@ func MigrateExternalRefColumn(db *sql.DB) error {
}
if err := rows.Err(); err != nil {
rows.Close()
return fmt.Errorf("error reading column info: %w", err)
}
// Close rows before executing any statements to avoid deadlock with MaxOpenConns(1)
rows.Close()
if !columnExists {
_, err := db.Exec(`ALTER TABLE issues ADD COLUMN external_ref TEXT`)
if err != nil {

View File

@@ -47,6 +47,7 @@ CREATE TABLE IF NOT EXISTS dependencies (
CREATE INDEX IF NOT EXISTS idx_dependencies_issue ON dependencies(issue_id);
CREATE INDEX IF NOT EXISTS idx_dependencies_depends_on ON dependencies(depends_on_id);
CREATE INDEX IF NOT EXISTS idx_dependencies_depends_on_type ON dependencies(depends_on_id, type);
CREATE INDEX IF NOT EXISTS idx_dependencies_depends_on_type_issue ON dependencies(depends_on_id, type, issue_id);
-- Labels table
CREATE TABLE IF NOT EXISTS labels (

View File

@@ -14,8 +14,10 @@ import (
// Import SQLite driver
"github.com/steveyegge/beads/internal/types"
sqlite3 "github.com/ncruces/go-sqlite3"
_ "github.com/ncruces/go-sqlite3/driver"
_ "github.com/ncruces/go-sqlite3/embed"
"github.com/tetratelabs/wazero"
)
// SQLiteStorage implements the Storage interface using SQLite
@@ -25,6 +27,53 @@ type SQLiteStorage struct {
closed atomic.Bool // Tracks whether Close() has been called
}
// setupWASMCache configures WASM compilation caching to reduce SQLite startup time.
// Returns the cache directory path (empty string if using in-memory cache).
//
// Cache behavior:
// - Location: ~/.cache/beads/wasm/ (platform-specific via os.UserCacheDir)
// - Version management: wazero automatically keys cache by its version
// - Cleanup: Old versions remain harmless (~5-10MB each); manual cleanup if needed
// - Fallback: Uses in-memory cache if filesystem cache creation fails
//
// Performance impact:
// - First run: ~220ms (compile + cache)
// - Subsequent runs: ~20ms (load from cache)
func setupWASMCache() string {
cacheDir := ""
if userCache, err := os.UserCacheDir(); err == nil {
cacheDir = filepath.Join(userCache, "beads", "wasm")
}
var cache wazero.CompilationCache
if cacheDir != "" {
// Try file-system cache first (persistent across runs)
if c, err := wazero.NewCompilationCacheWithDir(cacheDir); err == nil {
cache = c
// Optional: log cache location for debugging
// fmt.Fprintf(os.Stderr, "WASM cache: %s\n", cacheDir)
}
}
// Fallback to in-memory cache if dir creation failed
if cache == nil {
cache = wazero.NewCompilationCache()
cacheDir = "" // Indicate in-memory fallback
// Optional: log fallback for debugging
// fmt.Fprintln(os.Stderr, "WASM cache: in-memory only")
}
// Configure go-sqlite3's wazero runtime to use the cache
sqlite3.RuntimeConfig = wazero.NewRuntimeConfig().WithCompilationCache(cache)
return cacheDir
}
func init() {
// Setup WASM compilation cache to avoid 220ms JIT compilation overhead on every process start
_ = setupWASMCache()
}
// New creates a new SQLite storage backend
func New(path string) (*SQLiteStorage, error) {
// Build connection string with proper URI syntax
@@ -56,11 +105,14 @@ func New(path string) (*SQLiteStorage, error) {
return nil, fmt.Errorf("failed to open database: %w", err)
}
// For :memory: databases, force single connection to ensure cache sharing works properly.
// SQLite's shared cache mode for in-memory databases only works reliably with one connection.
// Without this, different connections in the pool can't see each other's writes (bd-b121).
if path == ":memory:" {
// For all in-memory databases (including file::memory:), force single connection.
// SQLite's in-memory databases are isolated per connection by default.
// Without this, different connections in the pool can't see each other's writes (bd-b121, bd-yvlc).
isInMemory := path == ":memory:" ||
(strings.HasPrefix(path, "file:") && strings.Contains(path, "mode=memory"))
if isInMemory {
db.SetMaxOpenConns(1)
db.SetMaxIdleConns(1)
}
// Test connection

View File

@@ -0,0 +1,145 @@
//go:build bench
package sqlite
import (
"context"
"testing"
"github.com/steveyegge/beads/internal/types"
)
// Benchmark size rationale:
// We only benchmark Large (10K) and XLarge (20K) databases because:
// - Small databases (<1K issues) perform acceptably without optimization
// - Performance issues only manifest at scale (10K+ issues)
// - Smaller benchmarks add code weight without providing optimization insights
// - Target users manage repos with thousands of issues, not hundreds
// runBenchmark sets up a benchmark with consistent configuration and runs the provided test function.
// It handles store setup/cleanup, timer management, and allocation reporting uniformly across all benchmarks.
func runBenchmark(b *testing.B, setupFunc func(*testing.B) (*SQLiteStorage, func()), testFunc func(*SQLiteStorage, context.Context) error) {
b.Helper()
store, cleanup := setupFunc(b)
defer cleanup()
ctx := context.Background()
b.ResetTimer()
b.ReportAllocs()
for i := 0; i < b.N; i++ {
if err := testFunc(store, ctx); err != nil {
b.Fatalf("benchmark failed: %v", err)
}
}
}
// BenchmarkGetReadyWork_Large benchmarks GetReadyWork on 10K issue database
func BenchmarkGetReadyWork_Large(b *testing.B) {
runBenchmark(b, setupLargeBenchDB, func(store *SQLiteStorage, ctx context.Context) error {
_, err := store.GetReadyWork(ctx, types.WorkFilter{})
return err
})
}
// BenchmarkGetReadyWork_XLarge benchmarks GetReadyWork on 20K issue database
func BenchmarkGetReadyWork_XLarge(b *testing.B) {
runBenchmark(b, setupXLargeBenchDB, func(store *SQLiteStorage, ctx context.Context) error {
_, err := store.GetReadyWork(ctx, types.WorkFilter{})
return err
})
}
// BenchmarkSearchIssues_Large_NoFilter benchmarks searching all open issues
func BenchmarkSearchIssues_Large_NoFilter(b *testing.B) {
openStatus := types.StatusOpen
filter := types.IssueFilter{
Status: &openStatus,
}
runBenchmark(b, setupLargeBenchDB, func(store *SQLiteStorage, ctx context.Context) error {
_, err := store.SearchIssues(ctx, "", filter)
return err
})
}
// BenchmarkSearchIssues_Large_ComplexFilter benchmarks complex filtered search
func BenchmarkSearchIssues_Large_ComplexFilter(b *testing.B) {
openStatus := types.StatusOpen
filter := types.IssueFilter{
Status: &openStatus,
PriorityMin: intPtr(0),
PriorityMax: intPtr(2),
}
runBenchmark(b, setupLargeBenchDB, func(store *SQLiteStorage, ctx context.Context) error {
_, err := store.SearchIssues(ctx, "", filter)
return err
})
}
// BenchmarkCreateIssue_Large benchmarks issue creation in large database
func BenchmarkCreateIssue_Large(b *testing.B) {
runBenchmark(b, setupLargeBenchDB, func(store *SQLiteStorage, ctx context.Context) error {
issue := &types.Issue{
Title: "Benchmark issue",
Description: "Test description",
Status: types.StatusOpen,
Priority: 2,
IssueType: types.TypeTask,
}
return store.CreateIssue(ctx, issue, "bench")
})
}
// BenchmarkUpdateIssue_Large benchmarks issue updates in large database
func BenchmarkUpdateIssue_Large(b *testing.B) {
// Setup phase: get an issue to update (not timed)
store, cleanup := setupLargeBenchDB(b)
defer cleanup()
ctx := context.Background()
openStatus := types.StatusOpen
issues, err := store.SearchIssues(ctx, "", types.IssueFilter{
Status: &openStatus,
})
if err != nil || len(issues) == 0 {
b.Fatalf("Failed to get issues for update test: %v", err)
}
targetID := issues[0].ID
// Benchmark phase: measure update operations
b.ResetTimer()
b.ReportAllocs()
for i := 0; i < b.N; i++ {
updates := map[string]interface{}{
"status": types.StatusInProgress,
}
if err := store.UpdateIssue(ctx, targetID, updates, "bench"); err != nil {
b.Fatalf("UpdateIssue failed: %v", err)
}
// reset back to open for next iteration
updates["status"] = types.StatusOpen
if err := store.UpdateIssue(ctx, targetID, updates, "bench"); err != nil {
b.Fatalf("UpdateIssue failed: %v", err)
}
}
}
// BenchmarkGetReadyWork_FromJSONL benchmarks ready work on JSONL-imported database
func BenchmarkGetReadyWork_FromJSONL(b *testing.B) {
runBenchmark(b, setupLargeFromJSONL, func(store *SQLiteStorage, ctx context.Context) error {
_, err := store.GetReadyWork(ctx, types.WorkFilter{})
return err
})
}
// Helper function
func intPtr(i int) *int {
return &i
}

View File

@@ -0,0 +1,541 @@
// Package fixtures provides realistic test data generation for benchmarks and tests.
package fixtures
import (
"context"
"encoding/json"
"fmt"
"math/rand"
"os"
"path/filepath"
"strings"
"time"
"github.com/steveyegge/beads/internal/storage"
"github.com/steveyegge/beads/internal/types"
)
// labels used across all fixtures
var commonLabels = []string{
"backend",
"frontend",
"urgent",
"tech-debt",
"documentation",
"performance",
"security",
"ux",
"api",
"database",
}
// assignees used across all fixtures
var commonAssignees = []string{
"alice",
"bob",
"charlie",
"diana",
"eve",
"frank",
}
// epic titles for realistic data
var epicTitles = []string{
"User Authentication System",
"Payment Processing Integration",
"Mobile App Redesign",
"Performance Optimization",
"API v2 Migration",
"Search Functionality Enhancement",
"Analytics Dashboard",
"Multi-tenant Support",
"Notification System",
"Data Export Feature",
}
// feature titles (under epics)
var featureTitles = []string{
"OAuth2 Integration",
"Password Reset Flow",
"Two-Factor Authentication",
"Session Management",
"API Endpoints",
"Database Schema",
"UI Components",
"Background Jobs",
"Error Handling",
"Testing Infrastructure",
}
// task titles (under features)
var taskTitles = []string{
"Implement login endpoint",
"Add validation logic",
"Write unit tests",
"Update documentation",
"Fix memory leak",
"Optimize query performance",
"Add error logging",
"Refactor helper functions",
"Update database migrations",
"Configure deployment",
}
// Fixture size rationale:
// We only provide Large (10K) and XLarge (20K) fixtures because:
// - Performance characteristics only emerge at scale (10K+ issues)
// - Smaller fixtures don't provide meaningful optimization insights
// - Code weight matters; we avoid unused complexity
// - Target use case: repositories with thousands of issues
// DataConfig controls the distribution and characteristics of generated test data
type DataConfig struct {
TotalIssues int // total number of issues to generate
EpicRatio float64 // percentage of issues that are epics (e.g., 0.1 for 10%)
FeatureRatio float64 // percentage of issues that are features (e.g., 0.3 for 30%)
OpenRatio float64 // percentage of issues that are open (e.g., 0.5 for 50%)
CrossLinkRatio float64 // percentage of tasks with cross-epic blocking dependencies (e.g., 0.2 for 20%)
MaxEpicAgeDays int // maximum age in days for epics (e.g., 180)
MaxFeatureAgeDays int // maximum age in days for features (e.g., 150)
MaxTaskAgeDays int // maximum age in days for tasks (e.g., 120)
MaxClosedAgeDays int // maximum days since closure (e.g., 30)
RandSeed int64 // random seed for reproducibility
}
// DefaultLargeConfig returns configuration for 10K issue dataset
func DefaultLargeConfig() DataConfig {
return DataConfig{
TotalIssues: 10000,
EpicRatio: 0.1,
FeatureRatio: 0.3,
OpenRatio: 0.5,
CrossLinkRatio: 0.2,
MaxEpicAgeDays: 180,
MaxFeatureAgeDays: 150,
MaxTaskAgeDays: 120,
MaxClosedAgeDays: 30,
RandSeed: 42,
}
}
// DefaultXLargeConfig returns configuration for 20K issue dataset
func DefaultXLargeConfig() DataConfig {
return DataConfig{
TotalIssues: 20000,
EpicRatio: 0.1,
FeatureRatio: 0.3,
OpenRatio: 0.5,
CrossLinkRatio: 0.2,
MaxEpicAgeDays: 180,
MaxFeatureAgeDays: 150,
MaxTaskAgeDays: 120,
MaxClosedAgeDays: 30,
RandSeed: 43,
}
}
// LargeSQLite creates a 10K issue database with realistic patterns
func LargeSQLite(ctx context.Context, store storage.Storage) error {
cfg := DefaultLargeConfig()
return generateIssuesWithConfig(ctx, store, cfg)
}
// XLargeSQLite creates a 20K issue database with realistic patterns
func XLargeSQLite(ctx context.Context, store storage.Storage) error {
cfg := DefaultXLargeConfig()
return generateIssuesWithConfig(ctx, store, cfg)
}
// LargeFromJSONL creates a 10K issue database by exporting to JSONL and reimporting
func LargeFromJSONL(ctx context.Context, store storage.Storage, tempDir string) error {
cfg := DefaultLargeConfig()
cfg.RandSeed = 44 // different seed for JSONL path
return generateFromJSONL(ctx, store, tempDir, cfg)
}
// XLargeFromJSONL creates a 20K issue database by exporting to JSONL and reimporting
func XLargeFromJSONL(ctx context.Context, store storage.Storage, tempDir string) error {
cfg := DefaultXLargeConfig()
cfg.RandSeed = 45 // different seed for JSONL path
return generateFromJSONL(ctx, store, tempDir, cfg)
}
// generateIssuesWithConfig creates issues with realistic epic hierarchies and cross-links using provided configuration
func generateIssuesWithConfig(ctx context.Context, store storage.Storage, cfg DataConfig) error {
rng := rand.New(rand.NewSource(cfg.RandSeed))
// Calculate breakdown using configuration ratios
numEpics := int(float64(cfg.TotalIssues) * cfg.EpicRatio)
numFeatures := int(float64(cfg.TotalIssues) * cfg.FeatureRatio)
numTasks := cfg.TotalIssues - numEpics - numFeatures
// Track created issues for cross-linking
var allIssues []*types.Issue
epicIssues := make([]*types.Issue, 0, numEpics)
featureIssues := make([]*types.Issue, 0, numFeatures)
taskIssues := make([]*types.Issue, 0, numTasks)
// Progress tracking
createdIssues := 0
lastPctLogged := -1
logProgress := func() {
pct := (createdIssues * 100) / cfg.TotalIssues
if pct >= lastPctLogged+10 {
fmt.Printf(" Progress: %d%% (%d/%d issues created)\n", pct, createdIssues, cfg.TotalIssues)
lastPctLogged = pct
}
}
// Create epics
for i := 0; i < numEpics; i++ {
issue := &types.Issue{
Title: fmt.Sprintf("%s (Epic %d)", epicTitles[i%len(epicTitles)], i),
Description: fmt.Sprintf("Epic for %s", epicTitles[i%len(epicTitles)]),
Status: randomStatus(rng, cfg.OpenRatio),
Priority: randomPriority(rng),
IssueType: types.TypeEpic,
Assignee: commonAssignees[rng.Intn(len(commonAssignees))],
CreatedAt: randomTime(rng, cfg.MaxEpicAgeDays),
UpdatedAt: time.Now(),
}
if issue.Status == types.StatusClosed {
closedAt := randomTime(rng, cfg.MaxClosedAgeDays)
issue.ClosedAt = &closedAt
}
if err := store.CreateIssue(ctx, issue, "fixture"); err != nil {
return fmt.Errorf("failed to create epic: %w", err)
}
// Add labels to epics
for j := 0; j < rng.Intn(3)+1; j++ {
label := commonLabels[rng.Intn(len(commonLabels))]
_ = store.AddLabel(ctx, issue.ID, label, "fixture")
}
epicIssues = append(epicIssues, issue)
allIssues = append(allIssues, issue)
createdIssues++
logProgress()
}
// Create features under epics
for i := 0; i < numFeatures; i++ {
parentEpic := epicIssues[i%len(epicIssues)]
issue := &types.Issue{
Title: fmt.Sprintf("%s (Feature %d)", featureTitles[i%len(featureTitles)], i),
Description: fmt.Sprintf("Feature under %s", parentEpic.Title),
Status: randomStatus(rng, cfg.OpenRatio),
Priority: randomPriority(rng),
IssueType: types.TypeFeature,
Assignee: commonAssignees[rng.Intn(len(commonAssignees))],
CreatedAt: randomTime(rng, cfg.MaxFeatureAgeDays),
UpdatedAt: time.Now(),
}
if issue.Status == types.StatusClosed {
closedAt := randomTime(rng, cfg.MaxClosedAgeDays)
issue.ClosedAt = &closedAt
}
if err := store.CreateIssue(ctx, issue, "fixture"); err != nil {
return fmt.Errorf("failed to create feature: %w", err)
}
// Add parent-child dependency to epic
dep := &types.Dependency{
IssueID: issue.ID,
DependsOnID: parentEpic.ID,
Type: types.DepParentChild,
CreatedAt: time.Now(),
CreatedBy: "fixture",
}
if err := store.AddDependency(ctx, dep, "fixture"); err != nil {
return fmt.Errorf("failed to add feature-epic dependency: %w", err)
}
// Add labels
for j := 0; j < rng.Intn(3)+1; j++ {
label := commonLabels[rng.Intn(len(commonLabels))]
_ = store.AddLabel(ctx, issue.ID, label, "fixture")
}
featureIssues = append(featureIssues, issue)
allIssues = append(allIssues, issue)
createdIssues++
logProgress()
}
// Create tasks under features
for i := 0; i < numTasks; i++ {
parentFeature := featureIssues[i%len(featureIssues)]
issue := &types.Issue{
Title: fmt.Sprintf("%s (Task %d)", taskTitles[i%len(taskTitles)], i),
Description: fmt.Sprintf("Task under %s", parentFeature.Title),
Status: randomStatus(rng, cfg.OpenRatio),
Priority: randomPriority(rng),
IssueType: types.TypeTask,
Assignee: commonAssignees[rng.Intn(len(commonAssignees))],
CreatedAt: randomTime(rng, cfg.MaxTaskAgeDays),
UpdatedAt: time.Now(),
}
if issue.Status == types.StatusClosed {
closedAt := randomTime(rng, cfg.MaxClosedAgeDays)
issue.ClosedAt = &closedAt
}
if err := store.CreateIssue(ctx, issue, "fixture"); err != nil {
return fmt.Errorf("failed to create task: %w", err)
}
// Add parent-child dependency to feature
dep := &types.Dependency{
IssueID: issue.ID,
DependsOnID: parentFeature.ID,
Type: types.DepParentChild,
CreatedAt: time.Now(),
CreatedBy: "fixture",
}
if err := store.AddDependency(ctx, dep, "fixture"); err != nil {
return fmt.Errorf("failed to add task-feature dependency: %w", err)
}
// Add labels
for j := 0; j < rng.Intn(2)+1; j++ {
label := commonLabels[rng.Intn(len(commonLabels))]
_ = store.AddLabel(ctx, issue.ID, label, "fixture")
}
taskIssues = append(taskIssues, issue)
allIssues = append(allIssues, issue)
createdIssues++
logProgress()
}
fmt.Printf(" Progress: 100%% (%d/%d issues created) - Complete!\n", cfg.TotalIssues, cfg.TotalIssues)
// Add cross-links between tasks across epics using configured ratio
numCrossLinks := int(float64(numTasks) * cfg.CrossLinkRatio)
for i := 0; i < numCrossLinks; i++ {
fromTask := taskIssues[rng.Intn(len(taskIssues))]
toTask := taskIssues[rng.Intn(len(taskIssues))]
// Avoid self-dependencies
if fromTask.ID == toTask.ID {
continue
}
dep := &types.Dependency{
IssueID: fromTask.ID,
DependsOnID: toTask.ID,
Type: types.DepBlocks,
CreatedAt: time.Now(),
CreatedBy: "fixture",
}
// Ignore cycle errors for cross-links (they're expected)
_ = store.AddDependency(ctx, dep, "fixture")
}
return nil
}
// generateFromJSONL creates issues, exports to JSONL, clears DB, and reimports
func generateFromJSONL(ctx context.Context, store storage.Storage, tempDir string, cfg DataConfig) error {
// First generate issues normally
if err := generateIssuesWithConfig(ctx, store, cfg); err != nil {
return fmt.Errorf("failed to generate issues: %w", err)
}
// Export to JSONL
jsonlPath := filepath.Join(tempDir, "issues.jsonl")
if err := exportToJSONL(ctx, store, jsonlPath); err != nil {
return fmt.Errorf("failed to export to JSONL: %w", err)
}
// Clear all issues (we'll reimport them)
allIssues, err := store.SearchIssues(ctx, "", types.IssueFilter{})
if err != nil {
return fmt.Errorf("failed to get all issues: %w", err)
}
for _, issue := range allIssues {
if err := store.DeleteIssue(ctx, issue.ID); err != nil {
return fmt.Errorf("failed to delete issue %s: %w", issue.ID, err)
}
}
// Import from JSONL
if err := importFromJSONL(ctx, store, jsonlPath); err != nil {
return fmt.Errorf("failed to import from JSONL: %w", err)
}
return nil
}
// exportToJSONL exports all issues to a JSONL file
func exportToJSONL(ctx context.Context, store storage.Storage, path string) error {
// Get all issues
allIssues, err := store.SearchIssues(ctx, "", types.IssueFilter{})
if err != nil {
return fmt.Errorf("failed to query issues: %w", err)
}
// Populate dependencies and labels for each issue
allDeps, err := store.GetAllDependencyRecords(ctx)
if err != nil {
return fmt.Errorf("failed to get dependencies: %w", err)
}
for _, issue := range allIssues {
issue.Dependencies = allDeps[issue.ID]
labels, err := store.GetLabels(ctx, issue.ID)
if err != nil {
return fmt.Errorf("failed to get labels for %s: %w", issue.ID, err)
}
issue.Labels = labels
}
// Write to JSONL file
f, err := os.Create(path)
if err != nil {
return fmt.Errorf("failed to create JSONL file: %w", err)
}
defer f.Close()
encoder := json.NewEncoder(f)
for _, issue := range allIssues {
if err := encoder.Encode(issue); err != nil {
return fmt.Errorf("failed to encode issue: %w", err)
}
}
return nil
}
// importFromJSONL imports issues from a JSONL file
func importFromJSONL(ctx context.Context, store storage.Storage, path string) error {
// Read JSONL file
data, err := os.ReadFile(path)
if err != nil {
return fmt.Errorf("failed to read JSONL file: %w", err)
}
// Parse issues
var issues []*types.Issue
lines := string(data)
for i, line := range splitLines(lines) {
if len(line) == 0 {
continue
}
var issue types.Issue
if err := json.Unmarshal([]byte(line), &issue); err != nil {
return fmt.Errorf("failed to parse issue at line %d: %w", i+1, err)
}
issues = append(issues, &issue)
}
// Import issues directly using storage interface
// Step 1: Create all issues first (without dependencies/labels)
type savedMetadata struct {
deps []*types.Dependency
labels []string
}
metadata := make(map[string]savedMetadata)
for _, issue := range issues {
// Save dependencies and labels for later
metadata[issue.ID] = savedMetadata{
deps: issue.Dependencies,
labels: issue.Labels,
}
issue.Dependencies = nil
issue.Labels = nil
if err := store.CreateIssue(ctx, issue, "fixture"); err != nil {
// Ignore duplicate errors
if !strings.Contains(err.Error(), "UNIQUE constraint failed") {
return fmt.Errorf("failed to create issue %s: %w", issue.ID, err)
}
}
}
// Step 2: Add all dependencies (now that all issues exist)
for issueID, meta := range metadata {
for _, dep := range meta.deps {
if err := store.AddDependency(ctx, dep, "fixture"); err != nil {
// Ignore duplicate and cycle errors
if !strings.Contains(err.Error(), "already exists") &&
!strings.Contains(err.Error(), "cycle") {
return fmt.Errorf("failed to add dependency for %s: %w", issueID, err)
}
}
}
// Add labels
for _, label := range meta.labels {
_ = store.AddLabel(ctx, issueID, label, "fixture")
}
}
return nil
}
// splitLines splits a string by newlines
func splitLines(s string) []string {
var lines []string
start := 0
for i := 0; i < len(s); i++ {
if s[i] == '\n' {
lines = append(lines, s[start:i])
start = i + 1
}
}
if start < len(s) {
lines = append(lines, s[start:])
}
return lines
}
// randomStatus returns a random status with given open ratio
func randomStatus(rng *rand.Rand, openRatio float64) types.Status {
r := rng.Float64()
if r < openRatio {
// Open statuses: open, in_progress, blocked
statuses := []types.Status{types.StatusOpen, types.StatusInProgress, types.StatusBlocked}
return statuses[rng.Intn(len(statuses))]
}
return types.StatusClosed
}
// randomPriority returns a random priority with realistic distribution
// P0: 5%, P1: 15%, P2: 50%, P3: 25%, P4: 5%
func randomPriority(rng *rand.Rand) int {
r := rng.Intn(100)
switch {
case r < 5:
return 0
case r < 20:
return 1
case r < 70:
return 2
case r < 95:
return 3
default:
return 4
}
}
// randomTime returns a random time up to maxDaysAgo days in the past
func randomTime(rng *rand.Rand, maxDaysAgo int) time.Time {
daysAgo := rng.Intn(maxDaysAgo)
return time.Now().Add(-time.Duration(daysAgo) * 24 * time.Hour)
}

View File

@@ -0,0 +1,128 @@
package fixtures
import (
"context"
"testing"
"github.com/steveyegge/beads/internal/storage/sqlite"
"github.com/steveyegge/beads/internal/types"
)
func TestLargeSQLite(t *testing.T) {
tmpDB := t.TempDir() + "/test.db"
store, err := sqlite.New(tmpDB)
if err != nil {
t.Fatalf("Failed to create storage: %v", err)
}
defer store.Close()
ctx := context.Background()
// Initialize database with prefix
if err := store.SetConfig(ctx, "issue_prefix", "bd-"); err != nil {
t.Fatalf("Failed to set issue_prefix: %v", err)
}
if err := LargeSQLite(ctx, store); err != nil {
t.Fatalf("LargeSQLite failed: %v", err)
}
// Verify issue count
allIssues, err := store.SearchIssues(ctx, "", types.IssueFilter{})
if err != nil {
t.Fatalf("Failed to search issues: %v", err)
}
if len(allIssues) != 10000 {
t.Errorf("Expected 10000 issues, got %d", len(allIssues))
}
// Verify we have epics, features, and tasks
var epics, features, tasks int
for _, issue := range allIssues {
switch issue.IssueType {
case types.TypeEpic:
epics++
case types.TypeFeature:
features++
case types.TypeTask:
tasks++
}
}
if epics == 0 || features == 0 || tasks == 0 {
t.Errorf("Missing issue types: epics=%d, features=%d, tasks=%d", epics, features, tasks)
}
t.Logf("Created %d epics, %d features, %d tasks", epics, features, tasks)
}
func TestXLargeSQLite(t *testing.T) {
if testing.Short() {
t.Skip("Skipping XLarge test in short mode")
}
tmpDB := t.TempDir() + "/test.db"
store, err := sqlite.New(tmpDB)
if err != nil {
t.Fatalf("Failed to create storage: %v", err)
}
defer store.Close()
ctx := context.Background()
// Initialize database with prefix
if err := store.SetConfig(ctx, "issue_prefix", "bd-"); err != nil {
t.Fatalf("Failed to set issue_prefix: %v", err)
}
if err := XLargeSQLite(ctx, store); err != nil {
t.Fatalf("XLargeSQLite failed: %v", err)
}
// Verify issue count
allIssues, err := store.SearchIssues(ctx, "", types.IssueFilter{})
if err != nil {
t.Fatalf("Failed to search issues: %v", err)
}
if len(allIssues) != 20000 {
t.Errorf("Expected 20000 issues, got %d", len(allIssues))
}
}
func TestLargeFromJSONL(t *testing.T) {
if testing.Short() {
t.Skip("Skipping JSONL test in short mode")
}
tmpDB := t.TempDir() + "/test.db"
store, err := sqlite.New(tmpDB)
if err != nil {
t.Fatalf("Failed to create storage: %v", err)
}
defer store.Close()
ctx := context.Background()
// Initialize database with prefix
if err := store.SetConfig(ctx, "issue_prefix", "bd-"); err != nil {
t.Fatalf("Failed to set issue_prefix: %v", err)
}
tempDir := t.TempDir()
if err := LargeFromJSONL(ctx, store, tempDir); err != nil {
t.Fatalf("LargeFromJSONL failed: %v", err)
}
// Verify issue count
allIssues, err := store.SearchIssues(ctx, "", types.IssueFilter{})
if err != nil {
t.Fatalf("Failed to search issues: %v", err)
}
if len(allIssues) != 10000 {
t.Errorf("Expected 10000 issues, got %d", len(allIssues))
}
}

View File

@@ -81,14 +81,41 @@ func ResolvePartialID(ctx context.Context, store storage.Storage, input string)
hashPart := strings.TrimPrefix(normalizedID, prefixWithHyphen)
var matches []string
var exactMatch string
for _, issue := range issues {
issueHash := strings.TrimPrefix(issue.ID, prefixWithHyphen)
// Check for exact full ID match first (case: user typed full ID with different prefix)
if issue.ID == input {
exactMatch = issue.ID
break
}
// Extract hash from each issue, regardless of its prefix
// This handles cross-prefix matching (e.g., "3d0" matching "offlinebrew-3d0")
var issueHash string
if idx := strings.Index(issue.ID, "-"); idx >= 0 {
issueHash = issue.ID[idx+1:]
} else {
issueHash = issue.ID
}
// Check for exact hash match (excluding hierarchical children)
if issueHash == hashPart {
exactMatch = issue.ID
// Don't break - keep searching in case there's a full ID match
}
// Check if the issue hash contains the input hash as substring
if strings.Contains(issueHash, hashPart) {
matches = append(matches, issue.ID)
}
}
// Prefer exact match over substring matches
if exactMatch != "" {
return exactMatch, nil
}
if len(matches) == 0 {
return "", fmt.Errorf("no issue found matching %q", input)
}

View File

@@ -90,6 +90,21 @@ func TestResolvePartialID(t *testing.T) {
Priority: 1,
IssueType: types.TypeTask,
}
// Test hierarchical IDs - parent and child
parentIssue := &types.Issue{
ID: "offlinebrew-3d0",
Title: "Parent Epic",
Status: types.StatusOpen,
Priority: 1,
IssueType: types.TypeEpic,
}
childIssue := &types.Issue{
ID: "offlinebrew-3d0.1",
Title: "Child Task",
Status: types.StatusOpen,
Priority: 1,
IssueType: types.TypeTask,
}
if err := store.CreateIssue(ctx, issue1, "test"); err != nil {
t.Fatal(err)
@@ -100,6 +115,12 @@ func TestResolvePartialID(t *testing.T) {
if err := store.CreateIssue(ctx, issue3, "test"); err != nil {
t.Fatal(err)
}
if err := store.CreateIssue(ctx, parentIssue, "test"); err != nil {
t.Fatal(err)
}
if err := store.CreateIssue(ctx, childIssue, "test"); err != nil {
t.Fatal(err)
}
// Set config for prefix
if err := store.SetConfig(ctx, "issue_prefix", "bd-"); err != nil {
@@ -149,6 +170,16 @@ func TestResolvePartialID(t *testing.T) {
input: "bd-1",
expected: "bd-1", // Will match exactly, not ambiguously
},
{
name: "exact match parent ID with hierarchical child - gh-316",
input: "offlinebrew-3d0",
expected: "offlinebrew-3d0", // Should match exactly, not be ambiguous with offlinebrew-3d0.1
},
{
name: "exact match parent without prefix - gh-316",
input: "3d0",
expected: "offlinebrew-3d0", // Should still prefer exact hash match
},
}
for _, tt := range tests {