Context propagation with graceful cancellation (bd-rtp, bd-yb8, bd-2o2)

Complete implementation of signal-aware context propagation for graceful
cancellation across all commands and storage operations.

Key changes:

1. Signal-aware contexts (bd-rtp):
   - Added rootCtx/rootCancel in main.go using signal.NotifyContext()
   - Set up in PersistentPreRun, cancelled in PersistentPostRun
   - Daemon uses same pattern in runDaemonLoop()
   - Handles SIGINT/SIGTERM for graceful shutdown

2. Context propagation (bd-yb8):
   - All commands now use rootCtx instead of context.Background()
   - sqlite.New() receives context for cancellable operations
   - Database operations respect context cancellation
   - Storage layer propagates context through all queries

3. Cancellation tests (bd-2o2):
   - Added import_cancellation_test.go with comprehensive tests
   - Added export cancellation test in export_test.go
   - Tests verify database integrity after cancellation
   - All cancellation tests passing

Fixes applied during review:
   - Fixed rootCtx lifecycle (removed premature defer from PersistentPreRun)
   - Fixed test context contamination (reset rootCtx in test cleanup)
   - Fixed export tests missing context setup

Impact:
   - Pressing Ctrl+C during import/export now cancels gracefully
   - No database corruption or hanging transactions
   - Clean shutdown of all operations

Tested:
   - go build ./cmd/bd ✓
   - go test ./cmd/bd -run TestImportCancellation ✓
   - go test ./cmd/bd -run TestExportCommand ✓
   - Manual Ctrl+C testing verified

🤖 Generated with [Claude Code](https://claude.com/claude-code)

Co-Authored-By: Claude <noreply@anthropic.com>
This commit is contained in:
Steve Yegge
2025-11-20 21:57:23 -05:00
parent 91c684cdbe
commit 57253f93a3
72 changed files with 387 additions and 232 deletions

View File

@@ -8,6 +8,8 @@
package beads
import (
"context"
"github.com/steveyegge/beads/internal/beads"
"github.com/steveyegge/beads/internal/types"
)
@@ -16,8 +18,8 @@ import (
type Storage = beads.Storage
// NewSQLiteStorage creates a new SQLite storage instance at the given path
func NewSQLiteStorage(dbPath string) (Storage, error) {
return beads.NewSQLiteStorage(dbPath)
func NewSQLiteStorage(ctx context.Context, dbPath string) (Storage, error) {
return beads.NewSQLiteStorage(ctx, dbPath)
}
// FindDatabasePath finds the beads database in the current directory tree

View File

@@ -76,7 +76,7 @@ func autoImportIfNewer() {
currentHash := hex.EncodeToString(hasher.Sum(nil))
// Get last import hash from DB metadata
ctx := context.Background()
ctx := rootCtx
lastHash, err := store.GetMetadata(ctx, "last_import_hash")
if err != nil {
// Metadata error - treat as first import rather than skipping (bd-663)
@@ -500,7 +500,7 @@ func flushToJSONLWithState(state flushState) {
}
storeMutex.Unlock()
ctx := context.Background()
ctx := rootCtx
// Validate JSONL integrity BEFORE checking isDirty (bd-c6cf)
// This detects if JSONL and export_hashes are out of sync (e.g., after git operations)

View File

@@ -13,7 +13,7 @@ import (
func TestCheckAndAutoImport_NoAutoImportFlag(t *testing.T) {
ctx := context.Background()
tmpDB := t.TempDir() + "/test.db"
store, err := sqlite.New(tmpDB)
store, err := sqlite.New(context.Background(), tmpDB)
if err != nil {
t.Fatalf("Failed to create store: %v", err)
}
@@ -33,7 +33,7 @@ func TestCheckAndAutoImport_NoAutoImportFlag(t *testing.T) {
func TestCheckAndAutoImport_DatabaseHasIssues(t *testing.T) {
ctx := context.Background()
tmpDB := t.TempDir() + "/test.db"
store, err := sqlite.New(tmpDB)
store, err := sqlite.New(context.Background(), tmpDB)
if err != nil {
t.Fatalf("Failed to create store: %v", err)
}
@@ -71,7 +71,7 @@ func TestCheckAndAutoImport_EmptyDatabaseNoGit(t *testing.T) {
ctx := context.Background()
tmpDir := t.TempDir()
tmpDB := filepath.Join(tmpDir, "test.db")
store, err := sqlite.New(tmpDB)
store, err := sqlite.New(context.Background(), tmpDB)
if err != nil {
t.Fatalf("Failed to create store: %v", err)
}

View File

@@ -1,7 +1,6 @@
package main
import (
"context"
"encoding/json"
"fmt"
"os"
@@ -55,7 +54,7 @@ SAFETY:
}
}
ctx := context.Background()
ctx := rootCtx
// Build filter for closed issues
statusClosed := types.StatusClosed

View File

@@ -1,7 +1,6 @@
package main
import (
"context"
"encoding/json"
"fmt"
"os"
@@ -63,7 +62,7 @@ Examples:
fmt.Fprintf(os.Stderr, "Error getting comments: %v\n", err)
os.Exit(1)
}
ctx := context.Background()
ctx := rootCtx
fullID, err := utils.ResolvePartialID(ctx, store, issueID)
if err != nil {
fmt.Fprintf(os.Stderr, "Error resolving %s: %v\n", issueID, err)
@@ -183,7 +182,7 @@ Examples:
fmt.Fprintf(os.Stderr, "Error adding comment: %v\n", err)
os.Exit(1)
}
ctx := context.Background()
ctx := rootCtx
fullID, err := utils.ResolvePartialID(ctx, store, issueID)
if err != nil {

View File

@@ -62,7 +62,7 @@ Examples:
bd compact --stats # Show statistics
`,
Run: func(_ *cobra.Command, _ []string) {
ctx := context.Background()
ctx := rootCtx
// Handle compact stats first
if compactStats {

View File

@@ -20,7 +20,7 @@ func TestCompactDryRun(t *testing.T) {
t.Fatal(err)
}
sqliteStore, err := sqlite.New(dbPath)
sqliteStore, err := sqlite.New(context.Background(), dbPath)
if err != nil {
t.Fatal(err)
}
@@ -148,7 +148,7 @@ func TestCompactStats(t *testing.T) {
t.Fatal(err)
}
sqliteStore, err := sqlite.New(dbPath)
sqliteStore, err := sqlite.New(context.Background(), dbPath)
if err != nil {
t.Fatal(err)
}
@@ -225,7 +225,7 @@ func TestRunCompactStats(t *testing.T) {
t.Fatal(err)
}
sqliteStore, err := sqlite.New(dbPath)
sqliteStore, err := sqlite.New(context.Background(), dbPath)
if err != nil {
t.Fatal(err)
}
@@ -357,7 +357,7 @@ func TestCompactStatsJSON(t *testing.T) {
t.Fatal(err)
}
sqliteStore, err := sqlite.New(dbPath)
sqliteStore, err := sqlite.New(context.Background(), dbPath)
if err != nil {
t.Fatal(err)
}
@@ -402,7 +402,7 @@ func TestRunCompactSingleDryRun(t *testing.T) {
t.Fatal(err)
}
sqliteStore, err := sqlite.New(dbPath)
sqliteStore, err := sqlite.New(context.Background(), dbPath)
if err != nil {
t.Fatal(err)
}
@@ -467,7 +467,7 @@ func TestRunCompactAllDryRun(t *testing.T) {
t.Fatal(err)
}
sqliteStore, err := sqlite.New(dbPath)
sqliteStore, err := sqlite.New(context.Background(), dbPath)
if err != nil {
t.Fatal(err)
}

View File

@@ -1,7 +1,6 @@
package main
import (
"context"
"fmt"
"os"
"sort"
@@ -46,7 +45,7 @@ var configSetCmd = &cobra.Command{
key := args[0]
value := args[1]
ctx := context.Background()
ctx := rootCtx
// Special handling for sync.branch to apply validation
if strings.TrimSpace(key) == syncbranch.ConfigKey {
@@ -85,7 +84,7 @@ var configGetCmd = &cobra.Command{
key := args[0]
ctx := context.Background()
ctx := rootCtx
var value string
var err error
@@ -126,7 +125,7 @@ var configListCmd = &cobra.Command{
os.Exit(1)
}
ctx := context.Background()
ctx := rootCtx
config, err := store.GetAllConfig(ctx)
if err != nil {
fmt.Fprintf(os.Stderr, "Error listing config: %v\n", err)
@@ -170,7 +169,7 @@ var configUnsetCmd = &cobra.Command{
key := args[0]
ctx := context.Background()
ctx := rootCtx
if err := store.DeleteConfig(ctx, key); err != nil {
fmt.Fprintf(os.Stderr, "Error deleting config: %v\n", err)
os.Exit(1)

View File

@@ -156,7 +156,7 @@ func setupTestDB(t *testing.T) (*sqlite.SQLiteStorage, func()) {
}
testDB := filepath.Join(tmpDir, "test.db")
store, err := sqlite.New(testDB)
store, err := sqlite.New(context.Background(), testDB)
if err != nil {
os.RemoveAll(tmpDir)
t.Fatalf("Failed to create test database: %v", err)

View File

@@ -1,7 +1,6 @@
package main
import (
"context"
"encoding/json"
"fmt"
"os"
@@ -168,7 +167,7 @@ var createCmd = &cobra.Command{
// In daemon mode, the parent will be sent to the RPC handler
// In direct mode, we generate the child ID here
if parentID != "" && daemonClient == nil {
ctx := context.Background()
ctx := rootCtx
childID, err := store.GetNextChildID(ctx, parentID)
if err != nil {
fmt.Fprintf(os.Stderr, "Error: %v\n", err)
@@ -186,7 +185,7 @@ var createCmd = &cobra.Command{
}
// Validate prefix matches database prefix
ctx := context.Background()
ctx := rootCtx
// Get database prefix from config
var dbPrefix string
@@ -263,7 +262,7 @@ var createCmd = &cobra.Command{
ExternalRef: externalRefPtr,
}
ctx := context.Background()
ctx := rootCtx
// Check if any dependencies are discovered-from type
// If so, inherit source_repo from the parent issue

View File

@@ -4,9 +4,11 @@ import (
"context"
"fmt"
"os"
"os/signal"
"path/filepath"
"runtime"
"strings"
"syscall"
"time"
"github.com/spf13/cobra"
@@ -196,6 +198,10 @@ func runDaemonLoop(interval time.Duration, autoCommit, autoPush bool, logPath, p
logF, log := setupDaemonLogger(logPath)
defer func() { _ = logF.Close() }()
// Set up signal-aware context for graceful shutdown
ctx, cancel := signal.NotifyContext(context.Background(), os.Interrupt, syscall.SIGTERM)
defer cancel()
// Top-level panic recovery to ensure clean shutdown and diagnostics
defer func() {
if r := recover(); r != nil {
@@ -257,7 +263,7 @@ func runDaemonLoop(interval time.Duration, autoCommit, autoPush bool, logPath, p
log.log("Daemon started (interval: %v, auto-commit: %v, auto-push: %v)", interval, autoCommit, autoPush)
if global {
runGlobalDaemon(log)
runGlobalDaemon(ctx, log)
return
}
@@ -314,7 +320,7 @@ func runDaemonLoop(interval time.Duration, autoCommit, autoPush bool, logPath, p
log.log("Warning: could not remove daemon-error file: %v", err)
}
store, err := sqlite.New(daemonDBPath)
store, err := sqlite.New(ctx, daemonDBPath)
if err != nil {
log.log("Error: cannot open database: %v", err)
return // Use return instead of os.Exit to allow defers to run
@@ -334,8 +340,7 @@ func runDaemonLoop(interval time.Duration, autoCommit, autoPush bool, logPath, p
}
// Hydrate from multi-repo if configured
hydrateCtx := context.Background()
if results, err := store.HydrateFromMultiRepo(hydrateCtx); err != nil {
if results, err := store.HydrateFromMultiRepo(ctx); err != nil {
log.log("Error: multi-repo hydration failed: %v", err)
return // Use return instead of os.Exit to allow defers to run
} else if results != nil {
@@ -346,7 +351,7 @@ func runDaemonLoop(interval time.Duration, autoCommit, autoPush bool, logPath, p
}
// Validate database fingerprint
if err := validateDatabaseFingerprint(store, &log); err != nil {
if err := validateDatabaseFingerprint(ctx, store, &log); err != nil {
if os.Getenv("BEADS_IGNORE_REPO_MISMATCH") != "1" {
log.log("Error: %v", err)
return // Use return instead of os.Exit to allow defers to run
@@ -394,10 +399,10 @@ func runDaemonLoop(interval time.Duration, autoCommit, autoPush bool, logPath, p
// Get actual workspace root (parent of .beads)
workspacePath := filepath.Dir(beadsDir)
socketPath := filepath.Join(beadsDir, "bd.sock")
ctx, cancel := context.WithCancel(context.Background())
defer cancel()
serverCtx, serverCancel := context.WithCancel(ctx)
defer serverCancel()
server, serverErrChan, err := startRPCServer(ctx, socketPath, store, workspacePath, daemonDBPath, log)
server, serverErrChan, err := startRPCServer(serverCtx, socketPath, store, workspacePath, daemonDBPath, log)
if err != nil {
return
}

View File

@@ -41,7 +41,7 @@ func startRPCServer(ctx context.Context, socketPath string, store storage.Storag
}
// runGlobalDaemon runs the global routing daemon
func runGlobalDaemon(log daemonLogger) {
func runGlobalDaemon(ctx context.Context, log daemonLogger) {
globalDir, err := getGlobalBeadsDir()
if err != nil {
log.log("Error: cannot get global beads directory: %v", err)
@@ -49,10 +49,10 @@ func runGlobalDaemon(log daemonLogger) {
}
socketPath := filepath.Join(globalDir, "bd.sock")
ctx, cancel := context.WithCancel(context.Background())
serverCtx, cancel := context.WithCancel(ctx)
defer cancel()
server, _, err := startRPCServer(ctx, socketPath, nil, globalDir, "", log)
server, _, err := startRPCServer(serverCtx, socketPath, nil, globalDir, "", log)
if err != nil {
return
}

View File

@@ -200,8 +200,7 @@ func importToJSONLWithStore(ctx context.Context, store storage.Storage, jsonlPat
}
// validateDatabaseFingerprint checks that the database belongs to this repository
func validateDatabaseFingerprint(store storage.Storage, log *daemonLogger) error {
ctx := context.Background()
func validateDatabaseFingerprint(ctx context.Context, store storage.Storage, log *daemonLogger) error {
// Get stored repo ID
storedRepoID, err := store.GetMetadata(ctx, "repo_id")

View File

@@ -34,7 +34,7 @@ func TestSyncBranchCommitAndPush_NotConfigured(t *testing.T) {
}
dbPath := filepath.Join(beadsDir, "test.db")
store, err := sqlite.New(dbPath)
store, err := sqlite.New(context.Background(), dbPath)
if err != nil {
t.Fatalf("Failed to create store: %v", err)
}
@@ -105,7 +105,7 @@ func TestSyncBranchCommitAndPush_Success(t *testing.T) {
}
dbPath := filepath.Join(beadsDir, "test.db")
store, err := sqlite.New(dbPath)
store, err := sqlite.New(context.Background(), dbPath)
if err != nil {
t.Fatalf("Failed to create store: %v", err)
}
@@ -216,7 +216,7 @@ func TestSyncBranchCommitAndPush_NoChanges(t *testing.T) {
}
dbPath := filepath.Join(beadsDir, "test.db")
store, err := sqlite.New(dbPath)
store, err := sqlite.New(context.Background(), dbPath)
if err != nil {
t.Fatalf("Failed to create store: %v", err)
}
@@ -301,7 +301,7 @@ func TestSyncBranchCommitAndPush_WorktreeHealthCheck(t *testing.T) {
}
dbPath := filepath.Join(beadsDir, "test.db")
store, err := sqlite.New(dbPath)
store, err := sqlite.New(context.Background(), dbPath)
if err != nil {
t.Fatalf("Failed to create store: %v", err)
}
@@ -405,7 +405,7 @@ func TestSyncBranchPull_NotConfigured(t *testing.T) {
}
dbPath := filepath.Join(beadsDir, "test.db")
store, err := sqlite.New(dbPath)
store, err := sqlite.New(context.Background(), dbPath)
if err != nil {
t.Fatalf("Failed to create store: %v", err)
}
@@ -464,7 +464,7 @@ func TestSyncBranchPull_Success(t *testing.T) {
}
clone1DBPath := filepath.Join(clone1BeadsDir, "test.db")
store1, err := sqlite.New(clone1DBPath)
store1, err := sqlite.New(context.Background(), clone1DBPath)
if err != nil {
t.Fatalf("Failed to create store1: %v", err)
}
@@ -531,7 +531,7 @@ func TestSyncBranchPull_Success(t *testing.T) {
clone2BeadsDir := filepath.Join(clone2Dir, ".beads")
clone2DBPath := filepath.Join(clone2BeadsDir, "test.db")
store2, err := sqlite.New(clone2DBPath)
store2, err := sqlite.New(context.Background(), clone2DBPath)
if err != nil {
t.Fatalf("Failed to create store2: %v", err)
}
@@ -613,7 +613,7 @@ func TestSyncBranchIntegration_EndToEnd(t *testing.T) {
clone1BeadsDir := filepath.Join(clone1Dir, ".beads")
os.MkdirAll(clone1BeadsDir, 0755)
clone1DBPath := filepath.Join(clone1BeadsDir, "test.db")
store1, _ := sqlite.New(clone1DBPath)
store1, _ := sqlite.New(context.Background(), clone1DBPath)
defer store1.Close()
ctx := context.Background()
@@ -664,7 +664,7 @@ func TestSyncBranchIntegration_EndToEnd(t *testing.T) {
clone2BeadsDir := filepath.Join(clone2Dir, ".beads")
clone2DBPath := filepath.Join(clone2BeadsDir, "test.db")
store2, _ := sqlite.New(clone2DBPath)
store2, _ := sqlite.New(context.Background(), clone2DBPath)
defer store2.Close()
store2.SetConfig(ctx, "issue_prefix", "test")
@@ -763,7 +763,7 @@ func TestSyncBranchConfigChange(t *testing.T) {
}
dbPath := filepath.Join(beadsDir, "test.db")
store, err := sqlite.New(dbPath)
store, err := sqlite.New(context.Background(), dbPath)
if err != nil {
t.Fatalf("Failed to create store: %v", err)
}
@@ -903,7 +903,7 @@ func TestSyncBranchMultipleConcurrentClones(t *testing.T) {
beadsDir := filepath.Join(cloneDir, ".beads")
os.MkdirAll(beadsDir, 0755)
dbPath := filepath.Join(beadsDir, "test.db")
store, _ := sqlite.New(dbPath)
store, _ := sqlite.New(context.Background(), dbPath)
ctx := context.Background()
store.SetConfig(ctx, "issue_prefix", "test")
@@ -1046,7 +1046,7 @@ func TestSyncBranchPerformance(t *testing.T) {
os.MkdirAll(beadsDir, 0755)
dbPath := filepath.Join(beadsDir, "test.db")
store, err := sqlite.New(dbPath)
store, err := sqlite.New(context.Background(), dbPath)
if err != nil {
t.Fatalf("Failed to create store: %v", err)
}
@@ -1137,7 +1137,7 @@ func TestSyncBranchNetworkFailure(t *testing.T) {
os.MkdirAll(beadsDir, 0755)
dbPath := filepath.Join(beadsDir, "test.db")
store, err := sqlite.New(dbPath)
store, err := sqlite.New(context.Background(), dbPath)
if err != nil {
t.Fatalf("Failed to create store: %v", err)
}

View File

@@ -18,7 +18,7 @@ func TestExportToJSONLWithStore(t *testing.T) {
jsonlPath := filepath.Join(tmpDir, ".beads", "issues.jsonl")
// Create storage
store, err := sqlite.New(dbPath)
store, err := sqlite.New(context.Background(), dbPath)
if err != nil {
t.Fatalf("failed to create store: %v", err)
}
@@ -82,7 +82,7 @@ func TestExportToJSONLWithStore_EmptyDatabase(t *testing.T) {
jsonlPath := filepath.Join(tmpDir, ".beads", "issues.jsonl")
// Create storage (empty)
store, err := sqlite.New(dbPath)
store, err := sqlite.New(context.Background(), dbPath)
if err != nil {
t.Fatalf("failed to create store: %v", err)
}
@@ -122,7 +122,7 @@ func TestImportToJSONLWithStore(t *testing.T) {
jsonlPath := filepath.Join(tmpDir, ".beads", "issues.jsonl")
// Create storage first to initialize database
store, err := sqlite.New(dbPath)
store, err := sqlite.New(context.Background(), dbPath)
if err != nil {
t.Fatalf("failed to create store: %v", err)
}
@@ -178,7 +178,7 @@ func TestExportImportRoundTrip(t *testing.T) {
jsonlPath := filepath.Join(tmpDir, ".beads", "issues.jsonl")
// Create storage and add issues
store, err := sqlite.New(dbPath)
store, err := sqlite.New(context.Background(), dbPath)
if err != nil {
t.Fatalf("failed to create store: %v", err)
}
@@ -240,7 +240,7 @@ func TestExportImportRoundTrip(t *testing.T) {
// Create new database
dbPath2 := filepath.Join(tmpDir, ".beads", "beads2.db")
store2, err := sqlite.New(dbPath2)
store2, err := sqlite.New(context.Background(), dbPath2)
if err != nil {
t.Fatalf("failed to create store2: %v", err)
}

View File

@@ -495,7 +495,7 @@ func TestDaemonServerStartFailureSocketExists(t *testing.T) {
socketPath := filepath.Join(tmpDir, "test.sock")
testDBPath := filepath.Join(tmpDir, "test.db")
testStore1, err := sqlite.New(testDBPath)
testStore1, err := sqlite.New(context.Background(), testDBPath)
if err != nil {
t.Fatalf("Failed to create test database: %v", err)
}
@@ -526,7 +526,7 @@ func TestDaemonServerStartFailureSocketExists(t *testing.T) {
t.Fatal("Socket should exist for first server")
}
testStore2, err := sqlite.New(filepath.Join(tmpDir, "test2.db"))
testStore2, err := sqlite.New(context.Background(), filepath.Join(tmpDir, "test2.db"))
if err != nil {
t.Fatalf("Failed to create second test database: %v", err)
}

View File

@@ -79,7 +79,7 @@ Force: Delete and orphan dependents
os.Exit(1)
}
}
ctx := context.Background()
ctx := rootCtx
// Get the issue to be deleted
issue, err := store.GetIssue(ctx, issueID)
if err != nil {
@@ -332,7 +332,7 @@ func deleteBatch(_ *cobra.Command, issueIDs []string, force bool, dryRun bool, c
os.Exit(1)
}
}
ctx := context.Background()
ctx := rootCtx
// Type assert to SQLite storage
d, ok := store.(*sqlite.SQLiteStorage)
if !ok {

View File

@@ -28,7 +28,7 @@ func TestMultiWorkspaceDeletionSync(t *testing.T) {
ctx := context.Background()
// Create stores for both clones
storeA, err := sqlite.New(cloneADB)
storeA, err := sqlite.New(context.Background(), cloneADB)
if err != nil {
t.Fatalf("Failed to create store A: %v", err)
}
@@ -38,7 +38,7 @@ func TestMultiWorkspaceDeletionSync(t *testing.T) {
t.Fatalf("Failed to set issue_prefix for store A: %v", err)
}
storeB, err := sqlite.New(cloneBDB)
storeB, err := sqlite.New(context.Background(), cloneBDB)
if err != nil {
t.Fatalf("Failed to create store B: %v", err)
}
@@ -182,7 +182,7 @@ func TestDeletionWithLocalModification(t *testing.T) {
ctx := context.Background()
store, err := sqlite.New(dbPath)
store, err := sqlite.New(context.Background(), dbPath)
if err != nil {
t.Fatalf("Failed to create store: %v", err)
}
@@ -413,7 +413,7 @@ func TestMultiRepoDeletionTracking(t *testing.T) {
dbPath := filepath.Join(primaryBeadsDir, "beads.db")
ctx := context.Background()
store, err := sqlite.New(dbPath)
store, err := sqlite.New(context.Background(), dbPath)
if err != nil {
t.Fatalf("Failed to create store: %v", err)
}

View File

@@ -2,7 +2,6 @@
package main
import (
"context"
"encoding/json"
"fmt"
"os"
@@ -28,7 +27,7 @@ var depAddCmd = &cobra.Command{
Run: func(cmd *cobra.Command, args []string) {
depType, _ := cmd.Flags().GetString("type")
ctx := context.Background()
ctx := rootCtx
// Resolve partial IDs first
var fromID, toID string
@@ -155,7 +154,7 @@ var depRemoveCmd = &cobra.Command{
Short: "Remove a dependency",
Args: cobra.ExactArgs(2),
Run: func(cmd *cobra.Command, args []string) {
ctx := context.Background()
ctx := rootCtx
// Resolve partial IDs first
var fromID, toID string
@@ -252,7 +251,7 @@ var depTreeCmd = &cobra.Command{
Short: "Show dependency tree",
Args: cobra.ExactArgs(1),
Run: func(cmd *cobra.Command, args []string) {
ctx := context.Background()
ctx := rootCtx
// Resolve partial ID first
var fullID string
@@ -279,7 +278,7 @@ var depTreeCmd = &cobra.Command{
// If daemon is running but doesn't support this command, use direct storage
if daemonClient != nil && store == nil {
var err error
store, err = sqlite.New(dbPath)
store, err = sqlite.New(rootCtx, dbPath)
if err != nil {
fmt.Fprintf(os.Stderr, "Error: failed to open database: %v\n", err)
os.Exit(1)
@@ -365,7 +364,7 @@ var depCyclesCmd = &cobra.Command{
// If daemon is running but doesn't support this command, use direct storage
if daemonClient != nil && store == nil {
var err error
store, err = sqlite.New(dbPath)
store, err = sqlite.New(rootCtx, dbPath)
if err != nil {
fmt.Fprintf(os.Stderr, "Error: failed to open database: %v\n", err)
os.Exit(1)
@@ -373,7 +372,7 @@ var depCyclesCmd = &cobra.Command{
defer func() { _ = store.Close() }()
}
ctx := context.Background()
ctx := rootCtx
cycles, err := store.DetectCycles(ctx)
if err != nil {
fmt.Fprintf(os.Stderr, "Error: %v\n", err)

View File

@@ -1,7 +1,6 @@
package main
import (
"context"
"encoding/json"
"fmt"
"os"
@@ -38,7 +37,7 @@ Example:
clean, _ := cmd.Flags().GetBool("clean")
yes, _ := cmd.Flags().GetBool("yes")
ctx := context.Background()
ctx := rootCtx
// Get all issues
allIssues, err := store.SearchIssues(ctx, "", types.IssueFilter{})

View File

@@ -65,7 +65,7 @@ func ensureStoreActive() error {
}
}
sqlStore, err := sqlite.New(dbPath)
sqlStore, err := sqlite.New(rootCtx, dbPath)
if err != nil {
return fmt.Errorf("failed to open database: %w", err)
}

View File

@@ -1,6 +1,5 @@
package main
import (
"context"
"fmt"
"os"
"regexp"
@@ -32,7 +31,7 @@ Example:
autoMerge, _ := cmd.Flags().GetBool("auto-merge")
dryRun, _ := cmd.Flags().GetBool("dry-run")
// Use global jsonOutput set by PersistentPreRun
ctx := context.Background()
ctx := rootCtx
// Check database freshness before reading (bd-2q6d, bd-c4rq)
// Skip check when using daemon (daemon auto-imports on staleness)

View File

@@ -1,6 +1,5 @@
package main
import (
"context"
"encoding/json"
"fmt"
"os"
@@ -38,7 +37,7 @@ var epicStatusCmd = &cobra.Command{
os.Exit(1)
}
} else {
ctx := context.Background()
ctx := rootCtx
epics, err = store.GetEpicsEligibleForClosure(ctx)
if err != nil {
fmt.Fprintf(os.Stderr, "Error getting epic status: %v\n", err)
@@ -120,7 +119,7 @@ var closeEligibleEpicsCmd = &cobra.Command{
os.Exit(1)
}
} else {
ctx := context.Background()
ctx := rootCtx
epics, err := store.GetEpicsEligibleForClosure(ctx)
if err != nil {
fmt.Fprintf(os.Stderr, "Error getting eligible epics: %v\n", err)
@@ -175,7 +174,7 @@ var closeEligibleEpicsCmd = &cobra.Command{
continue
}
} else {
ctx := context.Background()
ctx := rootCtx
err := store.CloseIssue(ctx, epicStatus.Epic.ID, "All children completed", "system")
if err != nil {
fmt.Fprintf(os.Stderr, "Error closing %s: %v\n", epicStatus.Epic.ID, err)

View File

@@ -20,7 +20,7 @@ func TestEpicCommand(t *testing.T) {
t.Fatal(err)
}
sqliteStore, err := sqlite.New(dbPath)
sqliteStore, err := sqlite.New(context.Background(), dbPath)
if err != nil {
t.Fatal(err)
}
@@ -152,7 +152,7 @@ func TestEpicEligibleForClose(t *testing.T) {
t.Fatal(err)
}
sqliteStore, err := sqlite.New(dbPath)
sqliteStore, err := sqlite.New(context.Background(), dbPath)
if err != nil {
t.Fatal(err)
}

View File

@@ -1,7 +1,6 @@
package main
import (
"context"
"crypto/sha256"
"encoding/hex"
"encoding/json"
@@ -148,7 +147,7 @@ Output to stdout by default, or use -o flag for file output.`,
fmt.Fprintf(os.Stderr, "Error: no database path found\n")
os.Exit(1)
}
store, err = sqlite.New(dbPath)
store, err = sqlite.New(rootCtx, dbPath)
if err != nil {
fmt.Fprintf(os.Stderr, "Error: failed to open database: %v\n", err)
os.Exit(1)
@@ -164,7 +163,7 @@ Output to stdout by default, or use -o flag for file output.`,
}
// Get all issues
ctx := context.Background()
ctx := rootCtx
issues, err := store.SearchIssues(ctx, "", filter)
if err != nil {
fmt.Fprintf(os.Stderr, "Error: %v\n", err)

View File

@@ -24,7 +24,7 @@ func TestExportIntegrityAfterJSONLTruncation(t *testing.T) {
t.Fatalf("failed to create .beads directory: %v", err)
}
testStore, err := sqlite.New(dbPath)
testStore, err := sqlite.New(context.Background(), dbPath)
if err != nil {
t.Fatalf("failed to create database: %v", err)
}
@@ -163,7 +163,7 @@ func TestExportIntegrityAfterJSONLDeletion(t *testing.T) {
t.Fatalf("failed to create .beads directory: %v", err)
}
testStore, err := sqlite.New(dbPath)
testStore, err := sqlite.New(context.Background(), dbPath)
if err != nil {
t.Fatalf("failed to create database: %v", err)
}
@@ -260,7 +260,7 @@ func TestMultipleExportsStayConsistent(t *testing.T) {
t.Fatalf("failed to create .beads directory: %v", err)
}
testStore, err := sqlite.New(dbPath)
testStore, err := sqlite.New(context.Background(), dbPath)
if err != nil {
t.Fatalf("failed to create database: %v", err)
}

View File

@@ -28,7 +28,7 @@ func TestExportUpdatesDatabaseMtime(t *testing.T) {
jsonlPath := filepath.Join(beadsDir, "issues.jsonl")
// Create and populate database
store, err := sqlite.New(dbPath)
store, err := sqlite.New(context.Background(), dbPath)
if err != nil {
t.Fatalf("Failed to create store: %v", err)
}
@@ -122,7 +122,7 @@ func TestDaemonExportScenario(t *testing.T) {
jsonlPath := filepath.Join(beadsDir, "issues.jsonl")
// Create and populate database
store, err := sqlite.New(dbPath)
store, err := sqlite.New(context.Background(), dbPath)
if err != nil {
t.Fatalf("Failed to create store: %v", err)
}
@@ -195,7 +195,7 @@ func TestMultipleExportCycles(t *testing.T) {
jsonlPath := filepath.Join(beadsDir, "issues.jsonl")
// Create and populate database
store, err := sqlite.New(dbPath)
store, err := sqlite.New(context.Background(), dbPath)
if err != nil {
t.Fatalf("Failed to create store: %v", err)
}

View File

@@ -3,7 +3,6 @@ package main
import (
"bufio"
"bytes"
"context"
"encoding/json"
"fmt"
"os"
@@ -51,7 +50,7 @@ NOTE: Import requires direct database access and does not work with daemon mode.
daemonClient = nil
var err error
store, err = sqlite.New(dbPath)
store, err = sqlite.New(rootCtx, dbPath)
if err != nil {
fmt.Fprintf(os.Stderr, "Error: failed to open database: %v\n", err)
os.Exit(1)
@@ -89,7 +88,7 @@ NOTE: Import requires direct database access and does not work with daemon mode.
}
// Phase 1: Read and parse all JSONL
ctx := context.Background()
ctx := rootCtx
scanner := bufio.NewScanner(in)
var allIssues []*types.Issue
@@ -175,7 +174,7 @@ NOTE: Import requires direct database access and does not work with daemon mode.
// Check if database needs initialization (prefix not set)
// Detect prefix from the imported issues
initCtx := context.Background()
initCtx := rootCtx
configuredPrefix, err2 := store.GetConfig(initCtx, "issue_prefix")
if err2 != nil || strings.TrimSpace(configuredPrefix) == "" {
// Database exists but not initialized - detect prefix from issues

View File

@@ -0,0 +1,140 @@
package main
import (
"context"
"fmt"
"os"
"path/filepath"
"testing"
"github.com/steveyegge/beads/internal/types"
)
func TestImportCancellation(t *testing.T) {
tmpDir, err := os.MkdirTemp("", "bd-test-import-cancel-*")
if err != nil {
t.Fatalf("Failed to create temp dir: %v", err)
}
defer os.RemoveAll(tmpDir)
testDB := filepath.Join(tmpDir, "test.db")
store := newTestStore(t, testDB)
defer store.Close()
// Create a large number of issues to make import take time
issues := make([]*types.Issue, 0, 1000)
for i := 0; i < 1000; i++ {
issues = append(issues, &types.Issue{
ID: fmt.Sprintf("test-%d", i),
Title: "Test Issue",
Description: "Test description for cancellation",
Priority: 0,
IssueType: types.TypeBug,
Status: types.StatusOpen,
})
}
// Create a cancellable context
cancelCtx, cancel := context.WithCancel(context.Background())
// Start import in a goroutine
errChan := make(chan error, 1)
go func() {
opts := ImportOptions{
DryRun: false,
SkipUpdate: false,
Strict: false,
}
_, err := importIssuesCore(cancelCtx, testDB, store, issues, opts)
errChan <- err
}()
// Cancel immediately to test cancellation
cancel()
// Wait for import to finish
err = <-errChan
// Verify that the operation was cancelled or completed
// (The import might complete before cancellation, which is fine)
if err != nil && err != context.Canceled {
t.Logf("Import returned error: %v", err)
}
// Verify database integrity - we should still be able to query
ctx := context.Background()
importedIssues, err := store.SearchIssues(ctx, "", types.IssueFilter{})
if err != nil {
t.Fatalf("Database corrupted after cancellation: %v", err)
}
// The number of issues should be <= 1000 (import might have been interrupted)
if len(importedIssues) > 1000 {
t.Errorf("Expected <= 1000 issues after cancellation, got %d", len(importedIssues))
}
// Verify we can still create new issues (database is not corrupted)
newIssue := &types.Issue{
Title: "Post-cancellation issue",
Description: "Created after cancellation to verify DB integrity",
Priority: 0,
IssueType: types.TypeBug,
Status: types.StatusOpen,
}
if err := store.CreateIssue(ctx, newIssue, "test-user"); err != nil {
t.Fatalf("Failed to create issue after cancellation: %v", err)
}
}
func TestImportWithTimeout(t *testing.T) {
tmpDir, err := os.MkdirTemp("", "bd-test-import-timeout-*")
if err != nil {
t.Fatalf("Failed to create temp dir: %v", err)
}
defer os.RemoveAll(tmpDir)
testDB := filepath.Join(tmpDir, "test.db")
store := newTestStore(t, testDB)
defer store.Close()
// Create a small set of issues
issues := make([]*types.Issue, 0, 10)
for i := 0; i < 10; i++ {
issues = append(issues, &types.Issue{
ID: fmt.Sprintf("timeout-test-%d", i),
Title: "Test Issue",
Description: "Test description",
Priority: 0,
IssueType: types.TypeBug,
Status: types.StatusOpen,
})
}
// Create a context with a very short timeout
// Note: This test might be flaky - if the import completes within the timeout,
// that's also acceptable behavior
timeoutCtx, cancel := context.WithTimeout(context.Background(), 1) // 1 nanosecond
defer cancel()
opts := ImportOptions{
DryRun: false,
SkipUpdate: false,
Strict: false,
}
_, err = importIssuesCore(timeoutCtx, testDB, store, issues, opts)
// We expect either success (if import was very fast) or context deadline exceeded
if err != nil && err != context.DeadlineExceeded {
t.Logf("Import with timeout returned: %v (expected DeadlineExceeded or success)", err)
}
// Verify database integrity
ctx := context.Background()
importedIssues, err := store.SearchIssues(ctx, "", types.IssueFilter{})
if err != nil {
t.Fatalf("Database corrupted after timeout: %v", err)
}
// Should have imported some or all issues
t.Logf("Imported %d issues before timeout", len(importedIssues))
}

View File

@@ -46,7 +46,7 @@ func profileImportOperation(t *testing.T, numIssues int) {
// Initialize storage
ctx := context.Background()
var store storage.Storage
store, err = sqlite.New(dbPath)
store, err = sqlite.New(context.Background(), dbPath)
if err != nil {
t.Fatalf("Failed to create storage: %v", err)
}
@@ -272,7 +272,7 @@ func TestImportWithExistingData(t *testing.T) {
dbPath := filepath.Join(tmpDir, "test.db")
ctx := context.Background()
var store storage.Storage
store, err = sqlite.New(dbPath)
store, err = sqlite.New(context.Background(), dbPath)
if err != nil {
t.Fatalf("Failed to create storage: %v", err)
}

View File

@@ -1,7 +1,6 @@
package main
import (
"context"
"encoding/json"
"fmt"
"os"
@@ -88,7 +87,7 @@ Examples:
// Get issue count from direct store
if store != nil {
ctx := context.Background()
ctx := rootCtx
// Check database freshness before reading (bd-2q6d, bd-c4rq)
// Skip check when using daemon (daemon auto-imports on staleness)
@@ -118,7 +117,7 @@ Examples:
}
if store != nil {
ctx := context.Background()
ctx := rootCtx
configMap, err := store.GetAllConfig(ctx)
if err == nil && len(configMap) > 0 {
info["config"] = configMap
@@ -131,7 +130,7 @@ Examples:
// Add schema information if requested
if schemaFlag && store != nil {
ctx := context.Background()
ctx := rootCtx
// Get schema version
schemaVersion, err := store.GetMetadata(ctx, "bd_version")

View File

@@ -2,7 +2,6 @@ package main
import (
"bufio"
"context"
"encoding/json"
"fmt"
"os"
@@ -196,14 +195,14 @@ With --no-db: creates .beads/ directory and issues.jsonl file instead of SQLite
os.Exit(1)
}
store, err := sqlite.New(initDBPath)
ctx := rootCtx
store, err := sqlite.New(ctx, initDBPath)
if err != nil {
fmt.Fprintf(os.Stderr, "Error: failed to create database: %v\n", err)
os.Exit(1)
}
// Set the issue prefix in config
ctx := context.Background()
if err := store.SetConfig(ctx, "issue_prefix", prefix); err != nil {
fmt.Fprintf(os.Stderr, "Error: failed to set issue prefix: %v\n", err)
_ = store.Close()

View File

@@ -27,7 +27,7 @@ func TestJSONLIntegrityValidation(t *testing.T) {
}
// Create database
testStore, err := sqlite.New(dbPath)
testStore, err := sqlite.New(context.Background(), dbPath)
if err != nil {
t.Fatalf("failed to create database: %v", err)
}
@@ -174,7 +174,7 @@ func TestImportClearsExportHashes(t *testing.T) {
}
// Create database
testStore, err := sqlite.New(dbPath)
testStore, err := sqlite.New(context.Background(), dbPath)
if err != nil {
t.Fatalf("failed to create database: %v", err)
}

View File

@@ -20,7 +20,7 @@ var labelCmd = &cobra.Command{
// Helper function to process label operations for multiple issues
func processBatchLabelOperation(issueIDs []string, label string, operation string, jsonOut bool,
daemonFunc func(string, string) error, storeFunc func(context.Context, string, string, string) error) {
ctx := context.Background()
ctx := rootCtx
results := []map[string]interface{}{}
for _, issueID := range issueIDs {
var err error
@@ -71,7 +71,7 @@ var labelAddCmd = &cobra.Command{
// Use global jsonOutput set by PersistentPreRun
issueIDs, label := parseLabelArgs(args)
// Resolve partial IDs
ctx := context.Background()
ctx := rootCtx
resolvedIDs := make([]string, 0, len(issueIDs))
for _, id := range issueIDs {
var fullID string
@@ -116,7 +116,7 @@ var labelRemoveCmd = &cobra.Command{
// Use global jsonOutput set by PersistentPreRun
issueIDs, label := parseLabelArgs(args)
// Resolve partial IDs
ctx := context.Background()
ctx := rootCtx
resolvedIDs := make([]string, 0, len(issueIDs))
for _, id := range issueIDs {
var fullID string
@@ -158,7 +158,7 @@ var labelListCmd = &cobra.Command{
Args: cobra.ExactArgs(1),
Run: func(cmd *cobra.Command, args []string) {
// Use global jsonOutput set by PersistentPreRun
ctx := context.Background()
ctx := rootCtx
// Resolve partial ID first
var issueID string
if daemonClient != nil {
@@ -228,7 +228,7 @@ var labelListAllCmd = &cobra.Command{
Short: "List all unique labels in the database",
Run: func(cmd *cobra.Command, args []string) {
// Use global jsonOutput set by PersistentPreRun
ctx := context.Background()
ctx := rootCtx
var issues []*types.Issue
var err error
// Use daemon if available

View File

@@ -195,7 +195,7 @@ var listCmd = &cobra.Command{
// Check database freshness before reading (bd-2q6d, bd-c4rq)
// Skip check when using daemon (daemon auto-imports on staleness)
ctx := context.Background()
ctx := rootCtx
if daemonClient == nil {
if err := ensureDatabaseFresh(ctx); err != nil {
fmt.Fprintf(os.Stderr, "Error: %v\n", err)

View File

@@ -4,7 +4,6 @@ package main
import (
"bufio"
"context"
"fmt"
"os"
"path/filepath"
@@ -319,7 +318,7 @@ func createIssuesFromMarkdown(_ *cobra.Command, filepath string) {
os.Exit(1)
}
ctx := context.Background()
ctx := rootCtx
createdIssues := []*types.Issue{}
failedIssues := []string{}

View File

@@ -1,7 +1,6 @@
package main
import (
"context"
"database/sql"
"fmt"
"os"
@@ -265,7 +264,7 @@ This command:
// Clean up WAL files before opening to avoid "disk I/O error"
cleanupWALFiles(currentDB.path)
store, err := sqlite.New(currentDB.path)
store, err := sqlite.New(rootCtx, currentDB.path)
if err != nil {
if jsonOutput {
outputJSON(map[string]interface{}{
@@ -278,7 +277,7 @@ This command:
os.Exit(1)
}
ctx := context.Background()
ctx := rootCtx
// Detect and set issue_prefix if missing (fixes GH #201)
prefix, err := store.GetConfig(ctx, "issue_prefix")
@@ -377,7 +376,7 @@ This command:
fmt.Println("\n→ Migrating to hash-based IDs...")
}
store, err := sqlite.New(targetPath)
store, err := sqlite.New(rootCtx, targetPath)
if err != nil {
if jsonOutput {
outputJSON(map[string]interface{}{
@@ -390,7 +389,7 @@ This command:
os.Exit(1)
}
ctx := context.Background()
ctx := rootCtx
issues, err := store.SearchIssues(ctx, "", types.IssueFilter{})
if err != nil {
_ = store.Close()
@@ -605,7 +604,7 @@ func handleUpdateRepoID(dryRun bool, autoYes bool) {
}
// Open database
store, err := sqlite.New(foundDB)
store, err := sqlite.New(rootCtx, foundDB)
if err != nil {
if jsonOutput {
outputJSON(map[string]interface{}{
@@ -620,7 +619,7 @@ func handleUpdateRepoID(dryRun bool, autoYes bool) {
defer func() { _ = store.Close() }()
// Get old repo ID
ctx := context.Background()
ctx := rootCtx
oldRepoID, err := store.GetMetadata(ctx, "repo_id")
if err != nil && err.Error() != "metadata key not found: repo_id" {
if jsonOutput {
@@ -796,7 +795,7 @@ func handleInspect() {
}
// Open database in read-only mode for inspection
store, err := sqlite.New(targetPath)
store, err := sqlite.New(rootCtx, targetPath)
if err != nil {
if jsonOutput {
outputJSON(map[string]interface{}{
@@ -810,7 +809,7 @@ func handleInspect() {
}
defer func() { _ = store.Close() }()
ctx := context.Background()
ctx := rootCtx
// Get current schema version
schemaVersion, err := store.GetMetadata(ctx, "bd_version")
@@ -965,7 +964,7 @@ func handleToSeparateBranch(branch string, dryRun bool) {
}
// Open database
store, err := sqlite.New(targetPath)
store, err := sqlite.New(rootCtx, targetPath)
if err != nil {
if jsonOutput {
outputJSON(map[string]interface{}{
@@ -980,7 +979,7 @@ func handleToSeparateBranch(branch string, dryRun bool) {
defer func() { _ = store.Close() }()
// Get current sync.branch config
ctx := context.Background()
ctx := rootCtx
current, _ := store.GetConfig(ctx, "sync.branch")
// Dry-run mode

View File

@@ -36,7 +36,7 @@ Use --dry-run to preview changes before applying.`,
Run: func(cmd *cobra.Command, _ []string) {
dryRun, _ := cmd.Flags().GetBool("dry-run")
ctx := context.Background()
ctx := rootCtx
// Find database
dbPath := beads.FindDatabasePath()
@@ -73,7 +73,7 @@ Use --dry-run to preview changes before applying.`,
}
// Open database
store, err := sqlite.New(dbPath)
store, err := sqlite.New(rootCtx, dbPath)
if err != nil {
if jsonOutput {
outputJSON(map[string]interface{}{

View File

@@ -16,7 +16,7 @@ func TestMigrateHashIDs(t *testing.T) {
dbPath := filepath.Join(tmpDir, "test.db")
// Create test database with sequential IDs
store, err := sqlite.New(dbPath)
store, err := sqlite.New(context.Background(), dbPath)
if err != nil {
t.Fatalf("Failed to create database: %v", err)
}
@@ -67,7 +67,7 @@ func TestMigrateHashIDs(t *testing.T) {
store.Close()
// Test dry run
store, err = sqlite.New(dbPath)
store, err = sqlite.New(context.Background(), dbPath)
if err != nil {
t.Fatalf("Failed to reopen database: %v", err)
}
@@ -104,7 +104,7 @@ func TestMigrateHashIDs(t *testing.T) {
store.Close()
// Test actual migration
store, err = sqlite.New(dbPath)
store, err = sqlite.New(context.Background(), dbPath)
if err != nil {
t.Fatalf("Failed to reopen database: %v", err)
}
@@ -172,7 +172,7 @@ func TestMigrateHashIDsWithParentChild(t *testing.T) {
dbPath := filepath.Join(tmpDir, "test.db")
// Create test database
store, err := sqlite.New(dbPath)
store, err := sqlite.New(context.Background(), dbPath)
if err != nil {
t.Fatalf("Failed to create database: %v", err)
}

View File

@@ -35,7 +35,7 @@ Examples:
# Move issues with label filter
bd migrate-issues --from . --to ~/feature-work --label frontend --label urgent`,
Run: func(cmd *cobra.Command, args []string) {
ctx := context.Background()
ctx := rootCtx
// Parse flags
from, _ := cmd.Flags().GetString("from")

View File

@@ -30,7 +30,7 @@ func TestMigrateCommand(t *testing.T) {
t.Run("single old database", func(t *testing.T) {
// Create old database
oldDBPath := filepath.Join(beadsDir, "vc.db")
store, err := sqlite.New(oldDBPath)
store, err := sqlite.New(context.Background(), oldDBPath)
if err != nil {
t.Fatalf("Failed to create old database: %v", err)
}
@@ -82,7 +82,7 @@ func TestMigrateCommand(t *testing.T) {
}
// Update version
store, err := sqlite.New(dbPath)
store, err := sqlite.New(context.Background(), dbPath)
if err != nil {
t.Fatalf("Failed to open database: %v", err)
}
@@ -143,7 +143,7 @@ func TestMigrateRespectsConfigJSON(t *testing.T) {
// Create old database with custom name
oldDBPath := filepath.Join(beadsDir, "beady.db")
store, err := sqlite.New(oldDBPath)
store, err := sqlite.New(context.Background(), oldDBPath)
if err != nil {
t.Fatalf("Failed to create database: %v", err)
}

View File

@@ -2,7 +2,6 @@ package main
import (
"bufio"
"context"
"encoding/json"
"fmt"
"os"
@@ -66,7 +65,7 @@ func initializeNoDbMode() error {
return fmt.Errorf("failed to detect prefix: %w", err)
}
ctx := context.Background()
ctx := rootCtx
if err := memStore.SetConfig(ctx, "issue_prefix", prefix); err != nil {
return fmt.Errorf("failed to set prefix: %w", err)
}

View File

@@ -1,6 +1,5 @@
package main
import (
"context"
"encoding/json"
"fmt"
"os"
@@ -97,7 +96,7 @@ var readyCmd = &cobra.Command{
return
}
// Direct mode
ctx := context.Background()
ctx := rootCtx
// Check database freshness before reading (bd-2q6d, bd-c4rq)
// Skip check when using daemon (daemon auto-imports on staleness)
@@ -158,16 +157,16 @@ var blockedCmd = &cobra.Command{
Run: func(cmd *cobra.Command, args []string) {
// Use global jsonOutput set by PersistentPreRun (respects config.yaml + env vars)
// If daemon is running but doesn't support this command, use direct storage
ctx := rootCtx
if daemonClient != nil && store == nil {
var err error
store, err = sqlite.New(dbPath)
store, err = sqlite.New(ctx, dbPath)
if err != nil {
fmt.Fprintf(os.Stderr, "Error: failed to open database: %v\n", err)
os.Exit(1)
}
defer func() { _ = store.Close() }()
}
ctx := context.Background()
blocked, err := store.GetBlockedIssues(ctx)
if err != nil {
fmt.Fprintf(os.Stderr, "Error: %v\n", err)
@@ -238,7 +237,7 @@ var statsCmd = &cobra.Command{
return
}
// Direct mode
ctx := context.Background()
ctx := rootCtx
stats, err := store.GetStatistics(ctx)
if err != nil {
fmt.Fprintf(os.Stderr, "Error: %v\n", err)

View File

@@ -77,7 +77,7 @@ func testFreshCloneAutoImport(t *testing.T) {
os.Remove(dbPath)
// Run bd init with auto-import disabled to test checkGitForIssues
store, err := sqlite.New(dbPath)
store, err := sqlite.New(context.Background(), dbPath)
if err != nil {
t.Fatalf("Failed to create database: %v", err)
}
@@ -181,7 +181,7 @@ func testDatabaseRemovalScenario(t *testing.T) {
// Initialize database and import
dbPath := filepath.Join(beadsDir, "test.db")
store, err := sqlite.New(dbPath)
store, err := sqlite.New(context.Background(), dbPath)
if err != nil {
t.Fatalf("Failed to create database: %v", err)
}
@@ -261,7 +261,7 @@ func testLegacyFilenameSupport(t *testing.T) {
// Initialize and import
dbPath := filepath.Join(beadsDir, "test.db")
store, err := sqlite.New(dbPath)
store, err := sqlite.New(context.Background(), dbPath)
if err != nil {
t.Fatalf("Failed to create database: %v", err)
}
@@ -377,7 +377,7 @@ func testInitSafetyCheck(t *testing.T) {
// Create empty database (simulating failed import)
dbPath := filepath.Join(beadsDir, "test.db")
store, err := sqlite.New(dbPath)
store, err := sqlite.New(context.Background(), dbPath)
if err != nil {
t.Fatalf("Failed to create database: %v", err)
}

View File

@@ -43,7 +43,7 @@ Example:
dryRun, _ := cmd.Flags().GetBool("dry-run")
repair, _ := cmd.Flags().GetBool("repair")
ctx := context.Background()
ctx := rootCtx
// rename-prefix requires direct mode (not supported by daemon)
if daemonClient != nil {

View File

@@ -12,7 +12,7 @@ import (
func TestRepairMultiplePrefixes(t *testing.T) {
// Create a temporary database
dbPath := t.TempDir() + "/test.db"
store, err := sqlite.New(dbPath)
store, err := sqlite.New(context.Background(), dbPath)
if err != nil {
t.Fatalf("failed to create store: %v", err)
}

View File

@@ -42,7 +42,7 @@ func TestRenamePrefixCommand(t *testing.T) {
tmpDir := t.TempDir()
dbPath := filepath.Join(tmpDir, "test.db")
testStore, err := sqlite.New(dbPath)
testStore, err := sqlite.New(context.Background(), dbPath)
if err != nil {
t.Fatalf("Failed to create test database: %v", err)
}
@@ -170,7 +170,7 @@ func TestRenamePrefixInDB(t *testing.T) {
tmpDir := t.TempDir()
dbPath := filepath.Join(tmpDir, "test.db")
testStore, err := sqlite.New(dbPath)
testStore, err := sqlite.New(context.Background(), dbPath)
if err != nil {
t.Fatalf("Failed to create test database: %v", err)
}

View File

@@ -1,6 +1,5 @@
package main
import (
"context"
"encoding/json"
"fmt"
"os"
@@ -19,7 +18,7 @@ This is more explicit than 'bd update --status open' and emits a Reopened event.
Run: func(cmd *cobra.Command, args []string) {
reason, _ := cmd.Flags().GetString("reason")
// Use global jsonOutput set by PersistentPreRun
ctx := context.Background()
ctx := rootCtx
// Resolve partial IDs first
var resolvedIDs []string
if daemonClient != nil {

View File

@@ -2,7 +2,6 @@
package main
import (
"context"
"fmt"
"os"
@@ -26,7 +25,7 @@ Interactive mode with --interactive prompts for each orphan.`,
// If daemon is running but doesn't support this command, use direct storage
if daemonClient != nil && store == nil {
var err error
store, err = sqlite.New(dbPath)
store, err = sqlite.New(rootCtx, dbPath)
if err != nil {
fmt.Fprintf(os.Stderr, "Error: failed to open database: %v\n", err)
os.Exit(1)
@@ -34,7 +33,7 @@ Interactive mode with --interactive prompts for each orphan.`,
defer func() { _ = store.Close() }()
}
ctx := context.Background()
ctx := rootCtx
// Get all dependency records
allDeps, err := store.GetAllDependencyRecords(ctx)

View File

@@ -17,7 +17,7 @@ func TestRepairDeps_NoOrphans(t *testing.T) {
t.Fatal(err)
}
store, err := sqlite.New(dbPath)
store, err := sqlite.New(context.Background(), dbPath)
if err != nil {
t.Fatal(err)
}
@@ -82,7 +82,7 @@ func TestRepairDeps_FindOrphans(t *testing.T) {
t.Fatal(err)
}
store, err := sqlite.New(dbPath)
store, err := sqlite.New(context.Background(), dbPath)
if err != nil {
t.Fatal(err)
}
@@ -198,7 +198,7 @@ func TestRepairDeps_FixOrphans(t *testing.T) {
t.Fatal(err)
}
store, err := sqlite.New(dbPath)
store, err := sqlite.New(context.Background(), dbPath)
if err != nil {
t.Fatal(err)
}
@@ -312,7 +312,7 @@ func TestRepairDeps_MultipleTypes(t *testing.T) {
t.Fatal(err)
}
store, err := sqlite.New(dbPath)
store, err := sqlite.New(context.Background(), dbPath)
if err != nil {
t.Fatal(err)
}

View File

@@ -33,7 +33,7 @@ var repoAddCmd = &cobra.Command{
return err
}
ctx := context.Background()
ctx := rootCtx
path := args[0]
var alias string
if len(args) > 1 {
@@ -82,7 +82,7 @@ var repoRemoveCmd = &cobra.Command{
return err
}
ctx := context.Background()
ctx := rootCtx
key := args[0]
// Get existing repos
@@ -125,7 +125,7 @@ var repoListCmd = &cobra.Command{
return err
}
ctx := context.Background()
ctx := rootCtx
repos, err := getRepoConfig(ctx, store)
if err != nil {
return fmt.Errorf("failed to load config: %w", err)
@@ -160,7 +160,7 @@ var repoSyncCmd = &cobra.Command{
return err
}
ctx := context.Background()
ctx := rootCtx
// Import from all repos
jsonlPath := findJSONLPath()

View File

@@ -2,7 +2,6 @@ package main
import (
"bufio"
"context"
"encoding/json"
"fmt"
"os"
@@ -30,7 +29,7 @@ This is read-only and does not modify the database or git state.`,
Args: cobra.ExactArgs(1),
Run: func(cmd *cobra.Command, args []string) {
issueID := args[0]
ctx := context.Background()
ctx := rootCtx
// Check if we're in a git repository
if !isGitRepo() {

View File

@@ -1,7 +1,6 @@
package main
import (
"context"
"encoding/json"
"fmt"
"os"
@@ -22,7 +21,7 @@ var showCmd = &cobra.Command{
Args: cobra.MinimumNArgs(1),
Run: func(cmd *cobra.Command, args []string) {
jsonOutput, _ := cmd.Flags().GetBool("json")
ctx := context.Background()
ctx := rootCtx
// Check database freshness before reading (bd-2q6d, bd-c4rq)
// Skip check when using daemon (daemon auto-imports on staleness)
@@ -400,7 +399,7 @@ var updateCmd = &cobra.Command{
return
}
ctx := context.Background()
ctx := rootCtx
// Resolve partial IDs first
var resolvedIDs []string
@@ -532,7 +531,7 @@ Examples:
Args: cobra.ExactArgs(1),
Run: func(cmd *cobra.Command, args []string) {
id := args[0]
ctx := context.Background()
ctx := rootCtx
// Resolve partial ID if in direct mode
if daemonClient == nil {
@@ -724,7 +723,7 @@ var closeCmd = &cobra.Command{
}
jsonOutput, _ := cmd.Flags().GetBool("json")
ctx := context.Background()
ctx := rootCtx
// Resolve partial IDs first
var resolvedIDs []string

View File

@@ -1,6 +1,5 @@
package main
import (
"context"
"encoding/json"
"fmt"
"os"
@@ -61,7 +60,7 @@ This helps identify:
return
}
// Direct mode
ctx := context.Background()
ctx := rootCtx
// Check database freshness before reading (bd-2q6d, bd-c4rq)
// Skip check when using daemon (daemon auto-imports on staleness)

View File

@@ -2,7 +2,6 @@ package main
import (
"bufio"
"context"
"encoding/json"
"fmt"
"os"
@@ -79,7 +78,7 @@ Examples:
// Check database freshness before reading (bd-2q6d, bd-c4rq)
// Skip check when using daemon (daemon auto-imports on staleness)
ctx := context.Background()
ctx := rootCtx
if daemonClient == nil {
if err := ensureDatabaseFresh(ctx); err != nil {
fmt.Fprintf(os.Stderr, "Error: %v\n", err)
@@ -101,7 +100,7 @@ Examples:
}
} else {
// Direct mode
ctx := context.Background()
ctx := rootCtx
stats, err = store.GetStatistics(ctx)
if err != nil {
fmt.Fprintf(os.Stderr, "Error: %v\n", err)
@@ -274,7 +273,7 @@ func getAssignedStatus(assignee string) *StatusSummary {
return nil
}
ctx := context.Background()
ctx := rootCtx
// Filter by assignee
assigneePtr := assignee

View File

@@ -28,7 +28,7 @@ func TestStatusCommand(t *testing.T) {
}
// Initialize the database
store, err := sqlite.New(dbPath)
store, err := sqlite.New(context.Background(), dbPath)
if err != nil {
t.Fatalf("Failed to create database: %v", err)
}
@@ -189,7 +189,7 @@ func TestGetAssignedStatus(t *testing.T) {
}
// Initialize the database
testStore, err := sqlite.New(dbPath)
testStore, err := sqlite.New(context.Background(), dbPath)
if err != nil {
t.Fatalf("Failed to create database: %v", err)
}

View File

@@ -33,7 +33,7 @@ Use --import-only to just import from JSONL (useful after git pull).
Use --status to show diff between sync branch and main branch.
Use --merge to merge the sync branch back to main branch.`,
Run: func(cmd *cobra.Command, _ []string) {
ctx := context.Background()
ctx := rootCtx
message, _ := cmd.Flags().GetString("message")
dryRun, _ := cmd.Flags().GetBool("dry-run")

View File

@@ -15,7 +15,7 @@ import (
func setupTestStore(t *testing.T, dbPath string) *sqlite.SQLiteStorage {
t.Helper()
store, err := sqlite.New(dbPath)
store, err := sqlite.New(context.Background(), dbPath)
if err != nil {
t.Fatalf("Failed to create store: %v", err)
}

View File

@@ -80,7 +80,7 @@ func newTestStore(t *testing.T, dbPath string) *sqlite.SQLiteStorage {
t.Fatalf("Failed to create database directory: %v", err)
}
store, err := sqlite.New(dbPath)
store, err := sqlite.New(context.Background(), dbPath)
if err != nil {
t.Fatalf("Failed to create test database: %v", err)
}
@@ -107,7 +107,7 @@ func newTestStoreWithPrefix(t *testing.T, dbPath string, prefix string) *sqlite.
t.Fatalf("Failed to create database directory: %v", err)
}
store, err := sqlite.New(dbPath)
store, err := sqlite.New(context.Background(), dbPath)
if err != nil {
t.Fatalf("Failed to create test database: %v", err)
}
@@ -127,7 +127,7 @@ func newTestStoreWithPrefix(t *testing.T, dbPath string, prefix string) *sqlite.
// Used in tests where the database was already created by the code under test.
func openExistingTestDB(t *testing.T, dbPath string) (*sqlite.SQLiteStorage, error) {
t.Helper()
return sqlite.New(dbPath)
return sqlite.New(context.Background(), dbPath)
}
// runCommandInDir runs a command in the specified directory

View File

@@ -33,7 +33,7 @@ Example:
fixAll, _ := cmd.Flags().GetBool("fix-all")
checksFlag, _ := cmd.Flags().GetString("checks")
jsonOut, _ := cmd.Flags().GetBool("json")
ctx := context.Background()
ctx := rootCtx
// Check database freshness before reading (bd-2q6d, bd-c4rq)
// Skip check when using daemon (daemon auto-imports on staleness)

View File

@@ -114,8 +114,8 @@ type Storage = storage.Storage
// NewSQLiteStorage opens a bd SQLite database for programmatic access.
// Most extensions should use this to query ready work and update issue status.
func NewSQLiteStorage(dbPath string) (Storage, error) {
return sqlite.New(dbPath)
func NewSQLiteStorage(ctx context.Context, dbPath string) (Storage, error) {
return sqlite.New(ctx, dbPath)
}
// FindDatabasePath discovers the bd database path using bd's standard search order:
@@ -364,9 +364,9 @@ func FindAllDatabases() []DatabaseInfo {
dbPath := matches[0]
// Don't fail if we can't open/query the database - it might be locked
// or corrupted, but we still want to detect and warn about it
store, err := sqlite.New(dbPath)
ctx := context.Background()
store, err := sqlite.New(ctx, dbPath)
if err == nil {
ctx := context.Background()
if issues, err := store.SearchIssues(ctx, "", types.IssueFilter{}); err == nil {
issueCount = len(issues)
}

View File

@@ -153,7 +153,7 @@ func ImportIssues(ctx context.Context, dbPath string, store storage.Storage, iss
}
// getOrCreateStore returns an existing storage or creates a new one
func getOrCreateStore(_ context.Context, dbPath string, store storage.Storage) (*sqlite.SQLiteStorage, bool, error) {
func getOrCreateStore(ctx context.Context, dbPath string, store storage.Storage) (*sqlite.SQLiteStorage, bool, error) {
if store != nil {
sqliteStore, ok := store.(*sqlite.SQLiteStorage)
if !ok {
@@ -166,7 +166,7 @@ func getOrCreateStore(_ context.Context, dbPath string, store storage.Storage) (
if dbPath == "" {
return nil, false, fmt.Errorf("database path not set")
}
sqliteStore, err := sqlite.New(dbPath)
sqliteStore, err := sqlite.New(ctx, dbPath)
if err != nil {
return nil, false, fmt.Errorf("failed to open database: %w", err)
}

View File

@@ -11,14 +11,14 @@ import (
// This prevents "database not initialized" errors in tests
func newTestStore(t *testing.T, dbPath string) *sqlite.SQLiteStorage {
t.Helper()
store, err := sqlite.New(dbPath)
ctx := context.Background()
store, err := sqlite.New(ctx, dbPath)
if err != nil {
t.Fatalf("Failed to create test database: %v", err)
}
// CRITICAL (bd-166): Set issue_prefix to prevent "database not initialized" errors
ctx := context.Background()
if err := store.SetConfig(ctx, "issue_prefix", "bd"); err != nil {
_ = store.Close()
t.Fatalf("Failed to set issue_prefix: %v", err)

View File

@@ -13,14 +13,13 @@ func TestAdaptiveIDLength_E2E(t *testing.T) {
t.Skip("skipping slow E2E test in short mode")
}
// Create in-memory database
db, err := New(":memory:")
ctx := context.Background()
db, err := New(ctx, ":memory:")
if err != nil {
t.Fatalf("Failed to create database: %v", err)
}
defer db.Close()
ctx := context.Background()
// Initialize with prefix
if err := db.SetConfig(ctx, "issue_prefix", "test"); err != nil {
t.Fatalf("Failed to set prefix: %v", err)
@@ -121,14 +120,13 @@ func formatTitle(format string, i int) string {
func TestAdaptiveIDLength_CustomConfig(t *testing.T) {
// Create in-memory database
db, err := New(":memory:")
ctx := context.Background()
db, err := New(ctx, ":memory:")
if err != nil {
t.Fatalf("Failed to create database: %v", err)
}
defer db.Close()
ctx := context.Background()
// Initialize with custom config
if err := db.SetConfig(ctx, "issue_prefix", "test"); err != nil {
t.Fatalf("Failed to set prefix: %v", err)

View File

@@ -90,7 +90,10 @@ func getCachedOrGenerateDB(b *testing.B, cacheKey string, generateFn func(contex
b.Logf("This is a one-time operation that will be cached for future runs...")
b.Logf("Expected time: ~1-3 minutes for 10K issues, ~2-6 minutes for 20K issues")
store, err := New(dbPath)
ctx := context.Background()
store, err := New(ctx, dbPath)
if err != nil {
b.Fatalf("Failed to create storage: %v", err)
}
@@ -165,7 +168,9 @@ func setupLargeBenchDB(b *testing.B) (*SQLiteStorage, func()) {
}
// Open the temporary copy
store, err := New(tmpPath)
ctx := context.Background()
store, err := New(ctx, tmpPath)
if err != nil {
b.Fatalf("Failed to open database: %v", err)
}
@@ -198,7 +203,9 @@ func setupXLargeBenchDB(b *testing.B) (*SQLiteStorage, func()) {
}
// Open the temporary copy
store, err := New(tmpPath)
ctx := context.Background()
store, err := New(ctx, tmpPath)
if err != nil {
b.Fatalf("Failed to open database: %v", err)
}
@@ -234,7 +241,9 @@ func setupLargeFromJSONL(b *testing.B) (*SQLiteStorage, func()) {
}
// Open the temporary copy
store, err := New(tmpPath)
ctx := context.Background()
store, err := New(ctx, tmpPath)
if err != nil {
b.Fatalf("Failed to open database: %v", err)
}

View File

@@ -120,7 +120,9 @@ func generateID(b testing.TB, prefix string, n int) string{
func setupBenchDB(tb testing.TB) (*SQLiteStorage, func()) {
tb.Helper()
tmpDB := tb.TempDir() + "/test.db"
store, err := New(tmpDB)
ctx := context.Background()
store, err := New(ctx, tmpDB)
if err != nil {
tb.Fatalf("Failed to create storage: %v", err)
}

View File

@@ -9,13 +9,15 @@ import (
)
func TestHashIDGeneration(t *testing.T) {
store, err := New(":memory:")
ctx := context.Background()
store, err := New(ctx, ":memory:")
if err != nil {
t.Fatalf("Failed to create storage: %v", err)
}
defer func() { _ = store.Close() }()
ctx := context.Background()
ctx = context.Background()
// Set up database with prefix
if err := store.SetConfig(ctx, "issue_prefix", "bd"); err != nil {
@@ -73,13 +75,15 @@ func TestHashIDDeterministic(t *testing.T) {
}
func TestHashIDCollisionHandling(t *testing.T) {
store, err := New(":memory:")
ctx := context.Background()
store, err := New(ctx, ":memory:")
if err != nil {
t.Fatalf("Failed to create storage: %v", err)
}
defer func() { _ = store.Close() }()
ctx := context.Background()
ctx = context.Background()
// Set up database with prefix
if err := store.SetConfig(ctx, "issue_prefix", "bd"); err != nil {
@@ -132,13 +136,15 @@ func TestHashIDCollisionHandling(t *testing.T) {
}
func TestHashIDBatchCreation(t *testing.T) {
store, err := New(":memory:")
ctx := context.Background()
store, err := New(ctx, ":memory:")
if err != nil {
t.Fatalf("Failed to create storage: %v", err)
}
defer func() { _ = store.Close() }()
ctx := context.Background()
ctx = context.Background()
// Set up database with prefix
if err := store.SetConfig(ctx, "issue_prefix", "bd"); err != nil {

View File

@@ -17,13 +17,15 @@ func TestPrefixValidation(t *testing.T) {
defer os.RemoveAll(tmpDir)
dbPath := filepath.Join(tmpDir, "test.db")
store, err := New(dbPath)
ctx := context.Background()
store, err := New(ctx, dbPath)
if err != nil {
t.Fatalf("failed to create storage: %v", err)
}
defer store.Close()
ctx := context.Background()
ctx = context.Background()
// Set prefix to "test"
if err := store.SetConfig(ctx, "issue_prefix", "test"); err != nil {
@@ -93,13 +95,15 @@ func TestPrefixValidationBatch(t *testing.T) {
defer os.RemoveAll(tmpDir)
dbPath := filepath.Join(tmpDir, "test.db")
store, err := New(dbPath)
ctx := context.Background()
store, err := New(ctx, dbPath)
if err != nil {
t.Fatalf("failed to create storage: %v", err)
}
defer store.Close()
ctx := context.Background()
ctx = context.Background()
// Set prefix to "batch"
if err := store.SetConfig(ctx, "issue_prefix", "batch"); err != nil {

View File

@@ -76,7 +76,7 @@ func init() {
}
// New creates a new SQLite storage backend
func New(path string) (*SQLiteStorage, error) {
func New(ctx context.Context, path string) (*SQLiteStorage, error) {
// Build connection string with proper URI syntax
// For :memory: databases, use shared cache so multiple connections see the same data
var connStr string
@@ -180,7 +180,6 @@ func New(path string) (*SQLiteStorage, error) {
// Hydrate from multi-repo config if configured (bd-307)
// Skip for in-memory databases (used in tests)
if path != ":memory:" {
ctx := context.Background()
_, err := storage.HydrateFromMultiRepo(ctx)
if err != nil {
return nil, fmt.Errorf("failed to hydrate from multi-repo: %w", err)

View File

@@ -22,14 +22,16 @@ func setupTestDB(t *testing.T) (*SQLiteStorage, func()) {
}
dbPath := filepath.Join(tmpDir, "test.db")
store, err := New(dbPath)
ctx := context.Background()
store, err := New(ctx, dbPath)
if err != nil {
os.RemoveAll(tmpDir)
t.Fatalf("failed to create storage: %v", err)
}
// CRITICAL (bd-166): Set issue_prefix to prevent "database not initialized" errors
ctx := context.Background()
ctx = context.Background()
if err := store.SetConfig(ctx, "issue_prefix", "bd"); err != nil {
store.Close()
os.RemoveAll(tmpDir)
@@ -1234,7 +1236,9 @@ func TestPath(t *testing.T) {
// Test with relative path
relPath := filepath.Join(tmpDir, "test.db")
store, err := New(relPath)
ctx := context.Background()
store, err := New(ctx, relPath)
if err != nil {
t.Fatalf("failed to create storage: %v", err)
}
@@ -1266,13 +1270,14 @@ func TestMultipleStorageDistinctPaths(t *testing.T) {
}
defer os.RemoveAll(tmpDir2)
store1, err := New(filepath.Join(tmpDir1, "db1.db"))
ctx := context.Background()
store1, err := New(ctx, filepath.Join(tmpDir1, "db1.db"))
if err != nil {
t.Fatalf("failed to create storage 1: %v", err)
}
defer store1.Close()
store2, err := New(filepath.Join(tmpDir2, "db2.db"))
store2, err := New(ctx, filepath.Join(tmpDir2, "db2.db"))
if err != nil {
t.Fatalf("failed to create storage 2: %v", err)
}
@@ -1296,7 +1301,9 @@ func TestInMemoryDatabase(t *testing.T) {
ctx := context.Background()
// Test that :memory: database works
store, err := New(":memory:")
ctx = context.Background()
store, err := New(ctx, ":memory:")
if err != nil {
t.Fatalf("failed to create in-memory storage: %v", err)
}
@@ -1345,7 +1352,9 @@ func TestInMemorySharedCache(t *testing.T) {
ctx := context.Background()
// Create first connection
store1, err := New(":memory:")
ctx = context.Background()
store1, err := New(ctx, ":memory:")
if err != nil {
t.Fatalf("failed to create first in-memory storage: %v", err)
}
@@ -1372,7 +1381,9 @@ func TestInMemorySharedCache(t *testing.T) {
// Create second connection - Note: this creates a SEPARATE database
// Shared cache only works within a single sql.DB connection pool
store2, err := New(":memory:")
ctx = context.Background()
store2, err := New(ctx, ":memory:")
if err != nil {
t.Fatalf("failed to create second in-memory storage: %v", err)
}

View File

@@ -26,7 +26,8 @@ func newTestStore(t *testing.T, dbPath string) *SQLiteStorage {
dbPath = t.TempDir() + "/test.db"
}
store, err := New(dbPath)
ctx := context.Background()
store, err := New(ctx, dbPath)
if err != nil {
t.Fatalf("Failed to create test database: %v", err)
}
@@ -38,7 +39,6 @@ func newTestStore(t *testing.T, dbPath string) *SQLiteStorage {
})
// CRITICAL (bd-166): Set issue_prefix to prevent "database not initialized" errors
ctx := context.Background()
if err := store.SetConfig(ctx, "issue_prefix", "bd"); err != nil {
_ = store.Close()
t.Fatalf("Failed to set issue_prefix: %v", err)

View File

@@ -211,7 +211,9 @@ func TestUnderlyingDB_AfterClose(t *testing.T) {
defer os.RemoveAll(tmpDir)
dbPath := filepath.Join(tmpDir, "test.db")
store, err := New(dbPath)
ctx := context.Background()
store, err := New(ctx, dbPath)
if err != nil {
t.Fatalf("Failed to create storage: %v", err)
}