Add UnderlyingConn(ctx) for safer scoped DB access

- Add UnderlyingConn method to Storage interface
- Implement in SQLiteStorage for scoped connection access
- Useful for migrations and DDL operations
- Add comprehensive tests for basic access, DDL, context cancellation, and concurrent connections
- Closes bd-66, bd-22, bd-24, bd-38, bd-39, bd-56

Amp-Thread-ID: https://ampcode.com/threads/T-e47963af-4ace-4914-a0ae-4737f77be6ff
Co-authored-by: Amp <amp@ampcode.com>
This commit is contained in:
Steve Yegge
2025-10-22 22:05:58 -07:00
parent fb64a33b99
commit 3fae41cb35
4 changed files with 243 additions and 9 deletions

View File

@@ -12,9 +12,9 @@
{"id":"bd-2","title":"Add validation/warning for malformed issue IDs","description":"getNextID silently ignores non-numeric ID suffixes (e.g., bd-foo). CAST returns NULL for invalid strings. Consider detecting and warning about malformed IDs in database. Location: internal/storage/sqlite/sqlite.go:79-82","status":"closed","priority":3,"issue_type":"task","created_at":"2025-10-21T23:53:44.31362-07:00","updated_at":"2025-10-22T21:23:20.821871-07:00","closed_at":"2025-10-14T02:51:52.198988-07:00"}
{"id":"bd-20","title":"Investigate stress test database pollution (vc-248)","description":"Investigation of stress tests polluting production database with 1,600+ test issues on Oct 21 at 20:24-20:25. Root cause analysis completed. Tests now verified to work correctly with proper isolation.","notes":"Bug confirmed! Tests DO pollute production DB. 1,000 test issues created at 20:46:01-20:46:02 during TestStressNoUniqueConstraintViolations. Root cause: test goroutines connect to production daemon at .beads/bd.sock instead of test daemon.","status":"closed","priority":1,"issue_type":"task","created_at":"2025-10-21T23:53:44.31362-07:00","updated_at":"2025-10-22T21:23:20.822112-07:00","closed_at":"2025-10-22T01:05:59.461242-07:00"}
{"id":"bd-21","title":"Test auto-export timing","description":"","status":"closed","priority":4,"issue_type":"task","created_at":"2025-10-21T23:53:44.31362-07:00","updated_at":"2025-10-22T21:23:20.822366-07:00","closed_at":"2025-10-20T22:00:31.964329-07:00"}
{"id":"bd-22","title":"Consider implementing pre-commit hooks for Storage interface changes","description":"The documentation (INTERFACE_CHANGES.md) suggests adding pre-commit hooks that automatically check for Storage interface changes and verify all mocks are updated. This would prevent similar issues in the future where interface changes break mock implementations.\n\nDiscovered during execution of vc-228 (dogfooding run #14/15).","design":"Implement a pre-commit hook that:\n1. Detects changes to internal/storage/storage.go\n2. Runs scripts/find-storage-mocks.sh to find all mock implementations\n3. Attempts to compile all test files with mocks\n4. Blocks commit if compilation fails\n\nTools: husky, pre-commit framework, or simple .git/hooks/pre-commit script","acceptance_criteria":"- Pre-commit hook installed and documented\n- Hook detects Storage interface changes\n- Hook validates all mocks compile\n- Hook can be bypassed with --no-verify if needed\n- Documentation updated with installation instructions","status":"open","priority":2,"issue_type":"feature","created_at":"2025-10-21T23:53:44.31362-07:00","updated_at":"2025-10-22T21:23:20.822598-07:00"}
{"id":"bd-22","title":"Consider implementing pre-commit hooks for Storage interface changes","description":"The documentation (INTERFACE_CHANGES.md) suggests adding pre-commit hooks that automatically check for Storage interface changes and verify all mocks are updated. This would prevent similar issues in the future where interface changes break mock implementations.\n\nDiscovered during execution of vc-228 (dogfooding run #14/15).","design":"Implement a pre-commit hook that:\n1. Detects changes to internal/storage/storage.go\n2. Runs scripts/find-storage-mocks.sh to find all mock implementations\n3. Attempts to compile all test files with mocks\n4. Blocks commit if compilation fails\n\nTools: husky, pre-commit framework, or simple .git/hooks/pre-commit script","acceptance_criteria":"- Pre-commit hook installed and documented\n- Hook detects Storage interface changes\n- Hook validates all mocks compile\n- Hook can be bypassed with --no-verify if needed\n- Documentation updated with installation instructions","status":"closed","priority":2,"issue_type":"feature","created_at":"2025-10-21T23:53:44.31362-07:00","updated_at":"2025-10-22T21:57:59.266619-07:00","closed_at":"2025-10-22T21:57:59.266619-07:00"}
{"id":"bd-23","title":"Implement bd quickstart command","description":"Add bd quickstart command to show context-aware repo information: recent issues, database location, configured prefix, example queries. Helps AI agents understand current project state. Companion to bd onboard.","notes":"After review, we already have good context tools: bd stats, bd list, bd ready, and the AGENTS.md onboarding section. Adding bd quickstart would be redundant and doesn't add enough value to justify maintenance cost. Closing as won't implement.","status":"closed","priority":2,"issue_type":"feature","created_at":"2025-10-21T23:53:44.31362-07:00","updated_at":"2025-10-22T21:30:29.491988-07:00","closed_at":"2025-10-22T21:30:29.491988-07:00"}
{"id":"bd-24","title":"Add customizable time threshold for compact command","description":"Currently compact uses fixed 30-day and 90-day tiers. Add support for custom time thresholds like '--older-than 60h' or '--older-than 2.5d' to allow more flexible compaction policies.\n\nExamples:\n bd compact --all --older-than 60h\n bd compact --all --older-than 2.5d\n bd compact --all --tier 1 --age 48h\n\nThis would allow users to set their own compaction schedules based on their workflow needs.","status":"open","priority":2,"issue_type":"feature","created_at":"2025-10-21T23:53:44.31362-07:00","updated_at":"2025-10-22T21:23:20.823101-07:00"}
{"id":"bd-24","title":"Add customizable time threshold for compact command","description":"Currently compact uses fixed 30-day and 90-day tiers. Add support for custom time thresholds like '--older-than 60h' or '--older-than 2.5d' to allow more flexible compaction policies.\n\nExamples:\n bd compact --all --older-than 60h\n bd compact --all --older-than 2.5d\n bd compact --all --tier 1 --age 48h\n\nThis would allow users to set their own compaction schedules based on their workflow needs.","status":"closed","priority":2,"issue_type":"feature","created_at":"2025-10-21T23:53:44.31362-07:00","updated_at":"2025-10-22T21:58:51.119025-07:00","closed_at":"2025-10-22T21:58:51.119025-07:00"}
{"id":"bd-25","title":"Add --id flag to bd list for filtering by specific issue IDs","description":"","design":"Add --id flag accepting comma-separated IDs. Usage: bd list --id wy-11,wy-12. Combines with other filters. From filter-flag-design.md.","status":"closed","priority":2,"issue_type":"feature","created_at":"2025-10-21T23:53:44.31362-07:00","updated_at":"2025-10-22T21:31:01.770796-07:00","closed_at":"2025-10-22T21:31:01.770796-07:00"}
{"id":"bd-26","title":"Make merge command idempotent for safe retry after partial failures","description":"The merge command currently performs 3 operations without an outer transaction:\n1. Migrate dependencies from source → target\n2. Update text references across all issues\n3. Close source issues\n\nIf merge fails mid-operation (network issue, daemon crash, etc.), a retry will fail or produce incorrect results because some operations already succeeded.\n\n**Goal:** Make merge idempotent so retrying after partial failure is safe and completes the remaining work.\n\n**Idempotency checks needed:**\n- Skip dependency migration if target already has the dependency\n- Skip text reference updates if already updated\n- Skip closing source issues if already closed\n- Report which operations were skipped vs performed\n\n**Example output:**\n```\n✓ Merged 2 issue(s) into bd-78\n - Dependencies: 3 migrated, 2 already existed\n - Text references: 5 updated, 0 already correct\n - Source issues: 1 closed, 1 already closed\n```\n\n**Related:** bd-23 originally requested transaction support, but idempotency is a better solution for this use case since individual operations are already atomic.","design":"Current merge code already has some idempotency:\n- Dependency migration checks `alreadyExists` before adding (line ~145-151 in merge.go)\n- Text reference updates are naturally idempotent (replacing bd-X with bd-Y twice has same result)\n\nMissing idempotency:\n- CloseIssue fails if source already closed\n- Error messages don't distinguish \"already done\" from \"real failure\"\n\nImplementation:\n1. Check source issue status before closing - skip if already closed\n2. Track which operations succeeded/skipped\n3. Return detailed results for user visibility\n4. Consider adding --dry-run output showing what would be done vs skipped","status":"closed","priority":2,"issue_type":"feature","created_at":"2025-10-21T23:53:44.31362-07:00","updated_at":"2025-10-22T21:23:20.823596-07:00","closed_at":"2025-10-22T12:01:51.907044-07:00"}
{"id":"bd-27","title":"bd sync crashes with nil pointer when daemon is running","description":"The 'bd sync' command crashes with a nil pointer dereference when the daemon is running.\n\n**Reproduction:**\n```bash\n# With daemon running\n./bd sync\n```\n\n**Error:**\n```\npanic: runtime error: invalid memory address or nil pointer dereference\n[signal SIGSEGV: segmentation violation code=0x2 addr=0x120 pc=0x1012314ac]\n\ngoroutine 1 [running]:\nmain.exportToJSONL({0x1014ec2e0, 0x101a49900}, {0x14000028db0, 0x30})\n /Users/stevey/src/fred/beads/cmd/bd/sync.go:245 +0x4c\n```\n\n**Root cause:**\nThe sync command's `exportToJSONL` function directly accesses `store.SearchIssues()` at line 245, but when daemon mode is active, the global `store` variable is nil. The sync command should either:\n1. Use daemon RPC when daemon is running, or\n2. Force direct mode for sync operations\n\n**Workaround:**\nUse `--no-daemon` flag: `bd sync --no-daemon`","status":"closed","priority":0,"issue_type":"bug","created_at":"2025-10-21T23:53:44.31362-07:00","updated_at":"2025-10-22T21:23:20.823864-07:00","closed_at":"2025-10-22T00:09:12.615536-07:00"}
@@ -29,8 +29,8 @@
{"id":"bd-35","title":"Optimize auto-flush to use incremental updates","description":"Every flush exports ALL issues and ALL dependencies, even if only one issue changed. For large projects (1000+ issues), this could be expensive. Current approach guarantees consistency, which is fine for MVP, but future optimization could track which issues changed and use incremental updates. Located in cmd/bd/main.go:255-276.","status":"closed","priority":3,"issue_type":"feature","created_at":"2025-10-21T23:53:44.31362-07:00","updated_at":"2025-10-22T21:23:20.826369-07:00","closed_at":"2025-10-14T02:51:52.200141-07:00"}
{"id":"bd-36","title":"Make auto-flush debounce duration configurable","description":"flushDebounce is hardcoded to 5 seconds. Make it configurable via environment variable BEADS_FLUSH_DEBOUNCE (e.g., '500ms', '10s'). Current 5-second value is reasonable for interactive use, but CI/automated scenarios might want faster flush. Add getDebounceDuration() helper function. Located in cmd/bd/main.go:31.","status":"closed","priority":3,"issue_type":"feature","created_at":"2025-10-21T23:53:44.31362-07:00","updated_at":"2025-10-22T21:23:20.826605-07:00","closed_at":"2025-10-18T09:47:43.22126-07:00"}
{"id":"bd-37","title":"Document label best practices and use cases","description":"Create documentation covering:\n- When to use labels vs structured fields\n- Common label sets (coding agents, open source, product dev, SRE)\n- Naming conventions (kebab-case, specificity, present tense)\n- Anti-patterns (too many labels, overlapping, personal labels)\n- Label lifecycle management\n\nContent from LABELS.md analysis document.","status":"closed","priority":3,"issue_type":"task","created_at":"2025-10-21T23:53:44.31362-07:00","updated_at":"2025-10-22T21:23:20.826875-07:00","closed_at":"2025-10-19T23:11:46.125417-07:00"}
{"id":"bd-38","title":"Add rule-based compaction (e.g., compact children of closed epics)","description":"Support semantic compaction rules beyond just time-based, such as:\n- Compact all children of closed epics\n- Compact by priority level (e.g., all P3/P4 closed issues)\n- Compact by label (e.g., all issues labeled 'archive')\n- Compact by type (e.g., all closed chores)\n\nThis would allow smarter database size management based on semantic meaning rather than just age.","status":"open","priority":3,"issue_type":"feature","created_at":"2025-10-21T23:53:44.31362-07:00","updated_at":"2025-10-22T21:23:20.827137-07:00"}
{"id":"bd-39","title":"Add compact --dry-run that shows size savings estimates","description":"When running 'bd compact --dry-run', show estimated database size reduction in KB/MB and percentage, similar to what 'du -h' would show.\n\nExample output:\n Tier 1 candidates: 15 issues\n Current size: 2.4 MB\n After compaction: ~1.7 MB (70% reduction, 0.7 MB saved)\n \nThis helps users understand impact before compacting.","status":"open","priority":3,"issue_type":"feature","created_at":"2025-10-21T23:53:44.31362-07:00","updated_at":"2025-10-22T21:23:20.827372-07:00"}
{"id":"bd-38","title":"Add rule-based compaction (e.g., compact children of closed epics)","description":"Support semantic compaction rules beyond just time-based, such as:\n- Compact all children of closed epics\n- Compact by priority level (e.g., all P3/P4 closed issues)\n- Compact by label (e.g., all issues labeled 'archive')\n- Compact by type (e.g., all closed chores)\n\nThis would allow smarter database size management based on semantic meaning rather than just age.","status":"closed","priority":3,"issue_type":"feature","created_at":"2025-10-21T23:53:44.31362-07:00","updated_at":"2025-10-22T21:59:19.989241-07:00","closed_at":"2025-10-22T21:59:19.989241-07:00"}
{"id":"bd-39","title":"Add compact --dry-run that shows size savings estimates","description":"When running 'bd compact --dry-run', show estimated database size reduction in KB/MB and percentage, similar to what 'du -h' would show.\n\nExample output:\n Tier 1 candidates: 15 issues\n Current size: 2.4 MB\n After compaction: ~1.7 MB (70% reduction, 0.7 MB saved)\n \nThis helps users understand impact before compacting.","status":"closed","priority":3,"issue_type":"feature","created_at":"2025-10-21T23:53:44.31362-07:00","updated_at":"2025-10-22T21:59:19.990804-07:00","closed_at":"2025-10-22T21:59:19.990804-07:00"}
{"id":"bd-4","title":"Remove unused issueMap in scoreCollisions","description":"scoreCollisions() creates issueMap and populates it (lines 135-138) but never uses it. Either remove it or add a TODO comment explaining future use. Located in collision.go:135-138. Cosmetic cleanup.","status":"closed","priority":4,"issue_type":"chore","created_at":"2025-10-21T23:53:44.31362-07:00","updated_at":"2025-10-22T21:23:20.8277-07:00","closed_at":"2025-10-19T19:27:34.230312-07:00"}
{"id":"bd-40","title":"Add EXPLAIN QUERY PLAN tests for ready work query","description":"Verify that the hierarchical blocking query uses proper indexes and doesn't do full table scans.\n\n**Queries to analyze:**\n1. The recursive CTE (both base case and recursive case)\n2. The final SELECT with NOT EXISTS\n3. Impact of various filters (status, priority, assignee)\n\n**Implementation:**\nAdd test function that:\n- Runs EXPLAIN QUERY PLAN on GetReadyWork query\n- Parses output to verify no SCAN TABLE operations\n- Documents expected query plan in comments\n- Fails if query plan degrades\n\n**Benefits:**\n- Catch performance regressions in tests\n- Document expected query behavior\n- Ensure indexes are being used\n\nRelated to: bd-87 (composite index on depends_on_id, type)","status":"closed","priority":3,"issue_type":"task","created_at":"2025-10-21T23:53:44.31362-07:00","updated_at":"2025-10-22T21:23:20.827974-07:00","closed_at":"2025-10-18T12:47:44.284846-07:00"}
{"id":"bd-41","title":"Add performance benchmarks document","description":"Document actual performance metrics with hyperfine tests","status":"closed","priority":3,"issue_type":"task","created_at":"2025-10-21T23:53:44.31362-07:00","updated_at":"2025-10-22T21:23:20.828212-07:00","closed_at":"2025-10-18T10:09:23.532938-07:00"}
@@ -49,18 +49,18 @@
{"id":"bd-53","title":"Make merge command idempotent for safe retry after partial failures","description":"The merge command currently performs 3 operations without an outer transaction:\n1. Migrate dependencies from source → target\n2. Update text references across all issues\n3. Close source issues\n\nIf merge fails mid-operation (network issue, daemon crash, etc.), a retry will fail or produce incorrect results because some operations already succeeded.\n\n**Goal:** Make merge idempotent so retrying after partial failure is safe and completes the remaining work.\n\n**Idempotency checks needed:**\n- Skip dependency migration if target already has the dependency\n- Skip text reference updates if already updated\n- Skip closing source issues if already closed\n- Report which operations were skipped vs performed\n\n**Example output:**\n```\n✓ Merged 2 issue(s) into bd-102\n - Dependencies: 3 migrated, 2 already existed\n - Text references: 5 updated, 0 already correct\n - Source issues: 1 closed, 1 already closed\n```\n\n**Related:** bd-23 originally requested transaction support, but idempotency is a better solution for this use case since individual operations are already atomic.","design":"Current merge code already has some idempotency:\n- Dependency migration checks `alreadyExists` before adding (line ~145-151 in merge.go)\n- Text reference updates are naturally idempotent (replacing bd-X with bd-Y twice has same result)\n\nMissing idempotency:\n- CloseIssue fails if source already closed\n- Error messages don't distinguish \"already done\" from \"real failure\"\n\nImplementation:\n1. Check source issue status before closing - skip if already closed\n2. Track which operations succeeded/skipped\n3. Return detailed results for user visibility\n4. Consider adding --dry-run output showing what would be done vs skipped","status":"closed","priority":2,"issue_type":"feature","created_at":"2025-10-22T00:47:43.165434-07:00","updated_at":"2025-10-22T21:23:20.832811-07:00","closed_at":"2025-10-22T11:56:36.526276-07:00"}
{"id":"bd-54","title":"Global daemon should warn/reject --auto-commit and --auto-push","description":"When user runs 'bd daemon --global --auto-commit', it's unclear which repo the daemon will commit to (especially after fixing bd-101 where global daemon won't open a DB).\n\nOptions:\n1. Warn and ignore the flags in global mode\n2. Error out with clear message\n\nLine 87-91 already checks autoPush, but should skip check entirely for global mode. Add user-friendly messaging about flag incompatibility.","status":"closed","priority":3,"issue_type":"feature","created_at":"2025-10-22T00:47:43.165645-07:00","updated_at":"2025-10-22T21:23:20.833031-07:00","closed_at":"2025-10-17T23:04:30.223432-07:00"}
{"id":"bd-55","title":"Add cross-repo issue references (future enhancement)","description":"Support referencing issues across different beads repositories. Useful for tracking dependencies between separate projects.\n\nProposed syntax:\n- Local reference: bd-102 (current behavior)\n- Cross-repo by path: ~/src/other-project#bd-456\n- Cross-repo by workspace name: @project2:bd-789\n\nUse cases:\n1. Frontend project depends on backend API issue\n2. Shared library changes blocking multiple projects\n3. System administrator tracking work across machines\n4. Monorepo with separate beads databases per component\n\nImplementation challenges:\n- Storage layer needs to query external databases\n- Dependency resolution across repos\n- What if external repo not available?\n- How to handle in JSONL export/import?\n- Security: should repos be able to read others?\n\nDesign questions to resolve first:\n1. Read-only references vs full cross-repo dependencies?\n2. How to handle repo renames/moves?\n3. Absolute paths vs workspace names vs git remotes?\n4. Should bd-77 auto-discover related repos?\n\nRecommendation: \n- Gather user feedback first\n- Start with read-only references\n- Implement as plugin/extension?\n\nContext: This is mentioned in bd-77 as approach #2. Much more complex than daemon multi-repo approach. Only implement if there's strong user demand.\n\nPriority: Backlog (4) - wait for user feedback before designing","status":"closed","priority":4,"issue_type":"feature","created_at":"2025-10-22T00:47:43.165857-07:00","updated_at":"2025-10-22T21:23:20.833247-07:00","closed_at":"2025-10-20T22:00:31.966891-07:00"}
{"id":"bd-56","title":"Add transaction support for atomic merges","description":"Wrap all merge operations in SQLite transaction for atomicity. Implement rollback on failure.","status":"open","priority":3,"issue_type":"task","created_at":"2025-10-22T11:56:36.505548-07:00","updated_at":"2025-10-22T21:23:20.833487-07:00"}
{"id":"bd-56","title":"Add transaction support for atomic merges","description":"Wrap all merge operations in SQLite transaction for atomicity. Implement rollback on failure.","status":"closed","priority":3,"issue_type":"task","created_at":"2025-10-22T11:56:36.505548-07:00","updated_at":"2025-10-22T21:59:19.999074-07:00","closed_at":"2025-10-22T21:59:19.999074-07:00"}
{"id":"bd-57","title":"Make beads reusable as a Go library for external projects like vc","description":"Currently beads is only usable as a CLI tool. We want to use beads as a library in other Go projects like ~/src/vc so they can programmatically manage issues without shelling out to the bd CLI.\n\nGoals:\n- Export public API from internal packages\n- Document Go package usage\n- Provide examples of programmatic usage\n- Ensure vc can import and use beads storage layer directly\n\nUse case: The vc project needs issue tracking and wants to use beads as an embedded library rather than as a separate CLI tool.","notes":"UnderlyingDB() method implemented and tested. Core functionality complete. Still needs documentation updates (bd-65) and lifecycle safety enhancements (bd-64).","status":"closed","priority":2,"issue_type":"feature","created_at":"2025-10-22T12:27:30.35968-07:00","updated_at":"2025-10-22T21:23:20.833709-07:00","closed_at":"2025-10-22T19:46:09.362533-07:00"}
{"id":"bd-58","title":"Beads Library Integration","description":"Migrate from custom SQLite implementation to using Beads as a library dependency. This eliminates ~3000 lines of duplicated code, reduces schema drift risk, and automatically inherits new Beads features.\n\n**Key Benefits:**\n- Remove 3000+ lines of duplicated SQLite code\n- Eliminate schema drift between bd and vc CLIs\n- Inherit Beads improvements automatically\n- Stronger type safety with Beads error types\n- Faster development velocity for new features\n- Clean separation: Beads stays general-purpose, VC extends via wrapper\n\n**Architecture Principle:**\nBeads remains 100% standalone with NO VC dependencies. VC imports Beads (VC → Beads dependency) and wraps it with VC-specific storage methods. Both share the same database but maintain separate table namespaces.\n\n**Current Pain Points:**\n1. Code Duplication: Issue CRUD, dependency graphs, labels, status transitions all reimplemented\n2. Schema Drift Risk: VC schema manually defined, could diverge from Beads\n3. Lost Features: Can't leverage Beads query optimizer or advanced features without process spawning\n4. Atomic Operations: Hand-rolled 100+ line transaction management\n5. Maintenance Burden: Every Beads feature must be manually replicated\n\n**Concrete Example:**\nWhen Beads adds a new field (e.g., estimated_hours):\n- Current: 4-6 hours of manual work (update types, 6+ SQL queries, migration, testing)\n- With Library: 5 minutes (go get -u github.com/steveyegge/beads)\n\n**Phased Approach:**\n1. Phase 1: Add Beads dependency (non-breaking, feature flag)\n2. Phase 2: Implement VCStorage wrapper (embeds beads.Storage)\n3. Phase 3: Migration script for existing databases\n4. Phase 4: Gradual cutover, deprecate SQLite code\n\n**Related Analysis:**\nSee BEADS_INTEGRATION_ANALYSIS.md for detailed current state analysis and BEADS_LIBRARY_INTEGRATION_EPIC.md for full design document (both to be archived after issue creation).\n\n**Estimated Effort:** 3-4 sprints\n**Priority:** P2 (Medium-High - architectural improvement, high ROI)","notes":"Phase 1 (bd-59) complete! Beads can now be used as a Go library. VC can import github.com/steveyegge/beads and use beads.Storage directly instead of spawning CLI processes. No custom tables needed - VC uses pure Beads primitives.","status":"closed","priority":2,"issue_type":"epic","created_at":"2025-10-22T14:04:08.692803-07:00","updated_at":"2025-10-22T21:23:20.833951-07:00","closed_at":"2025-10-22T14:40:10.225406-07:00"}
{"id":"bd-59","title":"Phase 1: Add Beads Dependency (Non-Breaking)","description":"Introduce Beads library alongside existing SQLite code without breaking production.\n\n**Goal:** Add Beads as optional dependency with feature flag, establish compatibility baseline.\n\n**Key Tasks:**\n1. Add github.com/steveyegge/beads to go.mod\n2. Create compatibility test suite comparing VC SQLite vs Beads schema\n3. Identify schema differences and document migration requirements\n4. Create internal/storage/beads/adapter.go implementing Storage interface\n5. Add feature flag: VC_USE_BEADS_LIBRARY=true (disabled by default)\n\n**Acceptance Criteria:**\n- Beads library imported successfully\n- Compatibility tests pass identifying all schema differences\n- Both implementations coexist without conflicts\n- No production impact (feature flag disabled by default)\n- Documentation of schema differences and migration needs\n\n**Technical Details:**\n- Use feature flag to allow A/B testing\n- Compatibility tests must cover: issues, dependencies, labels, status transitions, ID generation\n- Adapter must implement full Storage interface\n- Zero changes to existing production code paths\n\n**Blockers:** None - can start immediately\n\n**Estimated Effort:** 1 sprint","status":"closed","priority":2,"issue_type":"task","created_at":"2025-10-22T14:04:20.24179-07:00","updated_at":"2025-10-22T21:23:20.834186-07:00","closed_at":"2025-10-22T14:36:08.066041-07:00","dependencies":[{"issue_id":"bd-59","depends_on_id":"bd-58","type":"parent-child","created_at":"2025-10-22T14:04:20.242734-07:00","created_by":"daemon"}]}
{"id":"bd-6","title":"Daemon storage cache doesn't detect external database modifications","description":"When bd commands bypass the daemon and directly modify the database (e.g., `bd import` with direct file access, or deleting/recreating bd.db), the daemon's cached storage connection becomes stale and serves outdated data.\n\n**Reproduction**:\n1. Start daemon: `bd daemon`\n2. Run bd stats → shows N issues\n3. Delete database: `rm .beads/bd.db` \n4. Reinit and import: `bd init \u0026\u0026 bd import -i .beads/issues.jsonl`\n5. Run bd stats → shows 0 issues (wrong!)\n6. Direct query: `sqlite3 .beads/bd.db 'SELECT COUNT(*) FROM issues'` → shows correct count\n7. Restart daemon: `bd daemon --stop` then retry stats → now shows correct count\n\n**Root cause**: \n- server.go:1410-1414 retrieves cached storage without checking if DB file changed\n- Cache only evicts based on TTL (30min) or LRU, never on external modifications\n- Direct file operations bypass daemon, leaving cache stale\n\n**Impact**:\n- Users see incorrect/stale data after external DB operations\n- Confusing behavior with no clear indication cache is stale\n- Requires daemon restart to fix\n\n**Proposed fixes**:\n1. Check mtime on cache hit, invalidate if file changed\n2. Add cache eviction API (bd cache --clear)\n3. Use file locking to prevent external modifications while daemon running\n4. SQLite WAL mode change notifications","design":"**Better approach: Check DB file mtime on cache lookup**\n\nToo many commands bypass the daemon (import, init, renumber, compact, delete, dep tree, export, stale). Notifying from each would be error-prone and easy to forget when adding new commands.\n\n**Implementation:**\n\n1. Add `dbMtime time.Time` field to `StorageCacheEntry`\n2. In `getStorageForRequest()` on cache hit:\n - Stat the DB file to get current mtime\n - If mtime changed since cached, evict entry and reopen\n - Otherwise return cached connection\n3. Store mtime when initially caching\n\n**Code location:**\n- `internal/rpc/server.go:1410-1414` (cache hit path)\n- `internal/rpc/server.go:49-52` (StorageCacheEntry struct)\n\n**Benefits:**\n- Simple, centralized check\n- Works for all commands that bypass daemon\n- Works for external tools modifying DB\n- No need to update every command\n- Minimal performance overhead (one stat() call on cache hit)\n\n**Trade-offs:**\n- Small overhead on every cache hit (negligible - stat is fast)\n- mtime granularity may miss rapid changes (unlikely in practice)","status":"closed","priority":1,"issue_type":"bug","created_at":"2025-10-21T23:53:44.31362-07:00","updated_at":"2025-10-22T21:23:20.83442-07:00","closed_at":"2025-10-21T21:51:22.331957-07:00"}
{"id":"bd-60","title":"Phase 2: Implement VCStorage Wrapper","description":"Create VCStorage wrapper that embeds beads.Storage and adds VC-specific operations.\n\n**Goal:** Build clean abstraction layer where VC extends Beads without modifying Beads library.\n\n**Architecture:**\n- VCStorage embeds beads.Storage (delegates core operations)\n- VCStorage adds VC-specific methods (executor instances, events)\n- Same database, separate table namespaces (Beads tables + VC tables)\n- Zero changes to Beads library code\n\n**Key Tasks:**\n1. Create VCStorage struct that embeds beads.Storage\n2. Implement VC-specific methods: CreateExecutorInstance(), GetStaleExecutors(), LogEvent(), UpdateExecutionState()\n3. Create VC table schemas (executor_instances, issue_execution_state, agent_events)\n4. Verify type compatibility between VC types.Issue and Beads Issue\n5. Create MockVCStorage for testing\n6. Write unit tests for VC-specific methods\n7. Write integration tests (end-to-end with Beads)\n8. Benchmark performance vs current SQLite\n9. Verify NO changes needed to Beads library\n\n**Acceptance Criteria:**\n- VCStorage successfully wraps Beads storage (embedding works)\n- VC-specific tables created and accessible via foreign keys to Beads tables\n- VC-specific methods work (executor instances, events)\n- Core operations delegate to Beads correctly\n- Tests pass with \u003e90% coverage\n- Performance benchmark shows no regression\n- Beads library remains unmodified and standalone\n\n**Technical Details:**\n- Use beadsStore.DB() to get underlying database connection\n- Create VC tables with FOREIGN KEY references to Beads issues table\n- Schema separation: Beads owns (issues, dependencies, labels), VC owns (executor_instances, agent_events)\n- Testing: Embed MockBeadsStorage in MockVCStorage\n\n**Dependencies:**\n- Blocked by Phase 1 (need Beads library imported)\n\n**Estimated Effort:** 1.5 sprints","status":"open","priority":2,"issue_type":"task","created_at":"2025-10-22T14:04:36.674165-07:00","updated_at":"2025-10-22T21:23:20.834658-07:00","dependencies":[{"issue_id":"bd-60","depends_on_id":"bd-58","type":"parent-child","created_at":"2025-10-22T14:04:36.674919-07:00","created_by":"daemon"},{"issue_id":"bd-60","depends_on_id":"bd-59","type":"blocks","created_at":"2025-10-22T14:04:36.679667-07:00","created_by":"daemon"}]}
{"id":"bd-61","title":"Phase 3: Migration Path \u0026 Database Schema Alignment","description":"Enable existing .beads/vc.db files to work with Beads library through automated migration.\n\n**Goal:** Provide safe, tested migration path from SQLite implementation to Beads library.\n\n**Key Tasks:**\n1. Run compatibility tests against production databases\n2. Identify schema differences (columns, indexes, constraints)\n3. Document required migrations\n4. Create migration CLI command: 'vc migrate --from sqlite --to beads'\n5. Add dry-run mode for preview\n6. Add backup/restore capability\n7. Implement rollback mechanism\n8. Add auto-detection of schema version on startup\n9. Add auto-migrate with user prompt\n\n**Acceptance Criteria:**\n- Existing databases migrate successfully\n- Data integrity preserved (zero data loss verified via checksums)\n- Rollback works if migration fails\n- Migration tested on real production VC databases\n- Dry-run mode shows exactly what will change\n- Backup created before migration\n- Feature flag: VC_FORCE_SQLITE=true provides escape hatch\n\n**Technical Details:**\n- Compare current SQLite schema with Beads schema\n- Handle version detection (read schema_version or detect from structure)\n- Migration should be idempotent (safe to run multiple times)\n- Backup strategy: Copy .beads/vc.db to .beads/vc.db.backup-\u003ctimestamp\u003e\n- Verify foreign key integrity after migration\n\n**Safety Measures:**\n- Require executor shutdown before migration (check for running executors)\n- Atomic migration (BEGIN IMMEDIATE transaction)\n- Comprehensive pre/post migration validation\n- Clear error messages with recovery instructions\n\n**Dependencies:**\n- Blocked by Phase 2 (need VCStorage implementation)\n\n**Estimated Effort:** 0.5 sprint","status":"open","priority":2,"issue_type":"task","created_at":"2025-10-22T14:04:51.320435-07:00","updated_at":"2025-10-22T21:23:20.834901-07:00","dependencies":[{"issue_id":"bd-61","depends_on_id":"bd-58","type":"parent-child","created_at":"2025-10-22T14:04:51.321526-07:00","created_by":"daemon"},{"issue_id":"bd-61","depends_on_id":"bd-60","type":"blocks","created_at":"2025-10-22T14:04:51.321935-07:00","created_by":"daemon"}]}
{"id":"bd-62","title":"Phase 4: Gradual Cutover \u0026 Production Rollout","description":"Replace SQLite implementation with Beads library in production and remove legacy code.\n\n**Goal:** Complete transition to Beads library, deprecate and remove custom SQLite implementation.\n\n**Key Tasks:**\n1. Run VC executor with Beads library in CI\n2. Dogfood: Use Beads library for VC's own development\n3. Monitor for regressions and performance issues\n4. Flip feature flag: VC_USE_BEADS_LIBRARY=true by default\n5. Monitor production logs for errors\n6. Collect user feedback\n7. Add deprecation notice to CLAUDE.md\n8. Provide migration guide for users\n9. Remove legacy code: internal/storage/sqlite/sqlite.go (~1500 lines)\n10. Remove migration framework: internal/storage/migrations/\n11. Remove manual transaction management code\n12. Update all documentation\n\n**Acceptance Criteria:**\n- Beads library enabled by default in production\n- Zero production incidents related to migration\n- Performance meets or exceeds SQLite implementation\n- All tests passing with Beads library\n- Legacy SQLite code removed\n- Documentation updated\n- Celebration documented 🎉\n\n**Rollout Strategy:**\n1. Week 1: Enable for CI/testing environments\n2. Week 2: Dogfood on VC development\n3. Week 3: Enable for 50% of production (canary)\n4. Week 4: Enable for 100% of production\n5. Week 5: Remove legacy code\n\n**Monitoring:**\n- Track error rates before/after cutover\n- Monitor database query performance\n- Track issue creation/update latency\n- Monitor executor claim performance\n\n**Rollback Plan:**\n- Keep VC_FORCE_SQLITE=true escape hatch for 2 weeks post-cutover\n- Keep legacy code for 1 sprint after cutover\n- Document rollback procedure\n\n**Success Metrics:**\n- Zero data loss\n- No performance regression (\u003c 5% latency increase acceptable)\n- Reduced maintenance burden (code LOC reduction)\n- Positive developer feedback\n\n**Dependencies:**\n- Blocked by Phase 3 (need migration tooling)\n\n**Estimated Effort:** 1 sprint","status":"open","priority":2,"issue_type":"task","created_at":"2025-10-22T14:05:07.755107-07:00","updated_at":"2025-10-22T21:23:20.835139-07:00","dependencies":[{"issue_id":"bd-62","depends_on_id":"bd-58","type":"parent-child","created_at":"2025-10-22T14:05:07.756023-07:00","created_by":"daemon"},{"issue_id":"bd-62","depends_on_id":"bd-61","type":"blocks","created_at":"2025-10-22T14:05:07.75651-07:00","created_by":"daemon"}]}
{"id":"bd-60","title":"Phase 2: Implement VCStorage Wrapper","description":"Create VCStorage wrapper that embeds beads.Storage and adds VC-specific operations.\n\n**Goal:** Build clean abstraction layer where VC extends Beads without modifying Beads library.\n\n**Architecture:**\n- VCStorage embeds beads.Storage (delegates core operations)\n- VCStorage adds VC-specific methods (executor instances, events)\n- Same database, separate table namespaces (Beads tables + VC tables)\n- Zero changes to Beads library code\n\n**Key Tasks:**\n1. Create VCStorage struct that embeds beads.Storage\n2. Implement VC-specific methods: CreateExecutorInstance(), GetStaleExecutors(), LogEvent(), UpdateExecutionState()\n3. Create VC table schemas (executor_instances, issue_execution_state, agent_events)\n4. Verify type compatibility between VC types.Issue and Beads Issue\n5. Create MockVCStorage for testing\n6. Write unit tests for VC-specific methods\n7. Write integration tests (end-to-end with Beads)\n8. Benchmark performance vs current SQLite\n9. Verify NO changes needed to Beads library\n\n**Acceptance Criteria:**\n- VCStorage successfully wraps Beads storage (embedding works)\n- VC-specific tables created and accessible via foreign keys to Beads tables\n- VC-specific methods work (executor instances, events)\n- Core operations delegate to Beads correctly\n- Tests pass with \u003e90% coverage\n- Performance benchmark shows no regression\n- Beads library remains unmodified and standalone\n\n**Technical Details:**\n- Use beadsStore.DB() to get underlying database connection\n- Create VC tables with FOREIGN KEY references to Beads issues table\n- Schema separation: Beads owns (issues, dependencies, labels), VC owns (executor_instances, agent_events)\n- Testing: Embed MockBeadsStorage in MockVCStorage\n\n**Dependencies:**\n- Blocked by Phase 1 (need Beads library imported)\n\n**Estimated Effort:** 1.5 sprints","status":"closed","priority":2,"issue_type":"task","created_at":"2025-10-22T14:04:36.674165-07:00","updated_at":"2025-10-22T21:37:48.747033-07:00","closed_at":"2025-10-22T21:37:48.747033-07:00","dependencies":[{"issue_id":"bd-60","depends_on_id":"bd-58","type":"parent-child","created_at":"2025-10-22T14:04:36.674919-07:00","created_by":"daemon"},{"issue_id":"bd-60","depends_on_id":"bd-59","type":"blocks","created_at":"2025-10-22T14:04:36.679667-07:00","created_by":"daemon"}]}
{"id":"bd-61","title":"Phase 3: Migration Path \u0026 Database Schema Alignment","description":"Enable existing .beads/vc.db files to work with Beads library through automated migration.\n\n**Goal:** Provide safe, tested migration path from SQLite implementation to Beads library.\n\n**Key Tasks:**\n1. Run compatibility tests against production databases\n2. Identify schema differences (columns, indexes, constraints)\n3. Document required migrations\n4. Create migration CLI command: 'vc migrate --from sqlite --to beads'\n5. Add dry-run mode for preview\n6. Add backup/restore capability\n7. Implement rollback mechanism\n8. Add auto-detection of schema version on startup\n9. Add auto-migrate with user prompt\n\n**Acceptance Criteria:**\n- Existing databases migrate successfully\n- Data integrity preserved (zero data loss verified via checksums)\n- Rollback works if migration fails\n- Migration tested on real production VC databases\n- Dry-run mode shows exactly what will change\n- Backup created before migration\n- Feature flag: VC_FORCE_SQLITE=true provides escape hatch\n\n**Technical Details:**\n- Compare current SQLite schema with Beads schema\n- Handle version detection (read schema_version or detect from structure)\n- Migration should be idempotent (safe to run multiple times)\n- Backup strategy: Copy .beads/vc.db to .beads/vc.db.backup-\u003ctimestamp\u003e\n- Verify foreign key integrity after migration\n\n**Safety Measures:**\n- Require executor shutdown before migration (check for running executors)\n- Atomic migration (BEGIN IMMEDIATE transaction)\n- Comprehensive pre/post migration validation\n- Clear error messages with recovery instructions\n\n**Dependencies:**\n- Blocked by Phase 2 (need VCStorage implementation)\n\n**Estimated Effort:** 0.5 sprint","status":"closed","priority":2,"issue_type":"task","created_at":"2025-10-22T14:04:51.320435-07:00","updated_at":"2025-10-22T21:37:48.748273-07:00","closed_at":"2025-10-22T21:37:48.748273-07:00","dependencies":[{"issue_id":"bd-61","depends_on_id":"bd-58","type":"parent-child","created_at":"2025-10-22T14:04:51.321526-07:00","created_by":"daemon"},{"issue_id":"bd-61","depends_on_id":"bd-60","type":"blocks","created_at":"2025-10-22T14:04:51.321935-07:00","created_by":"daemon"}]}
{"id":"bd-62","title":"Phase 4: Gradual Cutover \u0026 Production Rollout","description":"Replace SQLite implementation with Beads library in production and remove legacy code.\n\n**Goal:** Complete transition to Beads library, deprecate and remove custom SQLite implementation.\n\n**Key Tasks:**\n1. Run VC executor with Beads library in CI\n2. Dogfood: Use Beads library for VC's own development\n3. Monitor for regressions and performance issues\n4. Flip feature flag: VC_USE_BEADS_LIBRARY=true by default\n5. Monitor production logs for errors\n6. Collect user feedback\n7. Add deprecation notice to CLAUDE.md\n8. Provide migration guide for users\n9. Remove legacy code: internal/storage/sqlite/sqlite.go (~1500 lines)\n10. Remove migration framework: internal/storage/migrations/\n11. Remove manual transaction management code\n12. Update all documentation\n\n**Acceptance Criteria:**\n- Beads library enabled by default in production\n- Zero production incidents related to migration\n- Performance meets or exceeds SQLite implementation\n- All tests passing with Beads library\n- Legacy SQLite code removed\n- Documentation updated\n- Celebration documented 🎉\n\n**Rollout Strategy:**\n1. Week 1: Enable for CI/testing environments\n2. Week 2: Dogfood on VC development\n3. Week 3: Enable for 50% of production (canary)\n4. Week 4: Enable for 100% of production\n5. Week 5: Remove legacy code\n\n**Monitoring:**\n- Track error rates before/after cutover\n- Monitor database query performance\n- Track issue creation/update latency\n- Monitor executor claim performance\n\n**Rollback Plan:**\n- Keep VC_FORCE_SQLITE=true escape hatch for 2 weeks post-cutover\n- Keep legacy code for 1 sprint after cutover\n- Document rollback procedure\n\n**Success Metrics:**\n- Zero data loss\n- No performance regression (\u003c 5% latency increase acceptable)\n- Reduced maintenance burden (code LOC reduction)\n- Positive developer feedback\n\n**Dependencies:**\n- Blocked by Phase 3 (need migration tooling)\n\n**Estimated Effort:** 1 sprint","status":"closed","priority":2,"issue_type":"task","created_at":"2025-10-22T14:05:07.755107-07:00","updated_at":"2025-10-22T21:37:48.748919-07:00","closed_at":"2025-10-22T21:37:48.748919-07:00","dependencies":[{"issue_id":"bd-62","depends_on_id":"bd-58","type":"parent-child","created_at":"2025-10-22T14:05:07.756023-07:00","created_by":"daemon"},{"issue_id":"bd-62","depends_on_id":"bd-61","type":"blocks","created_at":"2025-10-22T14:05:07.75651-07:00","created_by":"daemon"}]}
{"id":"bd-63","title":"Example library-created issue","description":"This issue was created programmatically using Beads as a library","status":"closed","priority":2,"issue_type":"task","created_at":"2025-10-22T14:34:44.081801-07:00","updated_at":"2025-10-22T21:23:20.835412-07:00","closed_at":"2025-10-22T14:34:44.084241-07:00","labels":["library-usage"],"dependencies":[{"issue_id":"bd-63","depends_on_id":"bd-1","type":"discovered-from","created_at":"2025-10-22T14:34:44.082772-07:00","created_by":"library-example"}],"comments":[{"id":7,"issue_id":"bd-63","author":"library-example","text":"This is a programmatic comment","created_at":"2025-10-22T21:34:44Z"}]}
{"id":"bd-64","title":"Add lifecycle safety docs and tests for UnderlyingDB() method","description":"The new UnderlyingDB() method exposes the raw *sql.DB connection for extensions like VC to create their own tables. While database/sql is concurrency-safe, there are lifecycle and misuse risks that need documentation and testing.\n\n**What needs to be done:**\n\n1. **Enhanced documentation** - Expand UnderlyingDB() comments to warn:\n - Callers MUST NOT call Close() on returned DB\n - Do NOT change pool/driver settings (SetMaxOpenConns, SetConnMaxIdleTime)\n - Do NOT modify SQLite PRAGMAs (WAL mode, journal, etc.)\n - Expect errors after Storage.Close() - use contexts\n - Keep write transactions short to avoid blocking core storage\n\n2. **Add lifecycle tracking** - Implement closed flag:\n - Add atomic.Bool closed field to SQLiteStorage\n - Set flag in Close(), clear in New()\n - Optional: Add IsClosed() bool method\n\n3. **Add safety tests** (run with -race):\n - TestUnderlyingDB_ConcurrentAccess - N goroutines using UnderlyingDB() during normal storage ops\n - TestUnderlyingDB_AfterClose - Verify operations fail cleanly after storage closed\n - TestUnderlyingDB_CreateExtensionTables - Create VC table with FK to issues, verify FK enforcement\n - TestUnderlyingDB_LongTxDoesNotCorrupt - Ensure long read tx doesn't block writes indefinitely\n\n**Why this matters:**\nVC will use this to create tables in the same database. Need to ensure production-ready safety without over-engineering.\n\n**Estimated effort:** S+S+S = M total (1-3h)","design":"Oracle recommends \"simple path\": enhanced docs + minimal guardrails + focused tests. See oracle output for detailed rationale on concurrency safety, lifecycle risks, and when to consider advanced path (wrapping interface).","status":"closed","priority":1,"issue_type":"task","created_at":"2025-10-22T17:07:56.812983-07:00","updated_at":"2025-10-22T21:23:20.835623-07:00","closed_at":"2025-10-22T20:10:52.636372-07:00"}
{"id":"bd-65","title":"Update EXTENDING.md with UnderlyingDB() usage and best practices","description":"EXTENDING.md currently shows how to use direct sql.Open() to access the database, but doesn't mention the new UnderlyingDB() method that's the recommended way for extensions.\n\n**Update needed:**\n1. Add section showing UnderlyingDB() usage:\n ```go\n store, err := beads.NewSQLiteStorage(dbPath)\n db := store.UnderlyingDB()\n // Create extension tables using db\n ```\n\n2. Document when to use UnderlyingDB() vs direct sql.Open():\n - Use UnderlyingDB() when you want to share the storage connection\n - Use sql.Open() when you need independent connection management\n\n3. Add safety warnings (cross-reference from UnderlyingDB() docs):\n - Don't close the DB\n - Don't modify pool settings\n - Keep transactions short\n\n4. Update the VC example to show UnderlyingDB() pattern\n\n5. Explain beads.Storage.UnderlyingDB() in the API section","status":"closed","priority":1,"issue_type":"task","created_at":"2025-10-22T17:07:56.820056-07:00","updated_at":"2025-10-22T21:23:20.835881-07:00","closed_at":"2025-10-22T19:41:19.895847-07:00","dependencies":[{"issue_id":"bd-65","depends_on_id":"bd-57","type":"discovered-from","created_at":"2025-10-22T17:07:56.822413-07:00","created_by":"daemon"}]}
{"id":"bd-66","title":"Consider adding UnderlyingConn(ctx) for safer scoped DB access","description":"Currently UnderlyingDB() returns *sql.DB which is correct for most uses, but for extension migrations/DDL, a scoped connection might be safer.\n\n**Proposal:** Add optional UnderlyingConn(ctx) (*sql.Conn, error) method that:\n- Returns a scoped connection via s.db.Conn(ctx)\n- Encourages lifetime-bounded usage\n- Reduces temptation to tune global pool settings\n- Better for one-time DDL operations like CREATE TABLE\n\n**Implementation:**\n```go\n// UnderlyingConn returns a single connection from the pool for scoped use\n// Useful for migrations and DDL. Close the connection when done.\nfunc (s *SQLiteStorage) UnderlyingConn(ctx context.Context) (*sql.Conn, error) {\n return s.db.Conn(ctx)\n}\n```\n\n**Benefits:**\n- Safer for migrations (explicit scope)\n- Complements UnderlyingDB() for different use cases\n- Low implementation cost\n\n**Trade-off:** Adds another method to maintain, but Oracle considers this balanced compromise between safety and flexibility.\n\n**Decision:** This is optional - evaluate based on VC's actual usage patterns.","status":"open","priority":3,"issue_type":"feature","created_at":"2025-10-22T17:07:56.832638-07:00","updated_at":"2025-10-22T21:23:20.836107-07:00","dependencies":[{"issue_id":"bd-66","depends_on_id":"bd-57","type":"related","created_at":"2025-10-22T17:07:56.835844-07:00","created_by":"daemon"}]}
{"id":"bd-66","title":"Consider adding UnderlyingConn(ctx) for safer scoped DB access","description":"Currently UnderlyingDB() returns *sql.DB which is correct for most uses, but for extension migrations/DDL, a scoped connection might be safer.\n\n**Proposal:** Add optional UnderlyingConn(ctx) (*sql.Conn, error) method that:\n- Returns a scoped connection via s.db.Conn(ctx)\n- Encourages lifetime-bounded usage\n- Reduces temptation to tune global pool settings\n- Better for one-time DDL operations like CREATE TABLE\n\n**Implementation:**\n```go\n// UnderlyingConn returns a single connection from the pool for scoped use\n// Useful for migrations and DDL. Close the connection when done.\nfunc (s *SQLiteStorage) UnderlyingConn(ctx context.Context) (*sql.Conn, error) {\n return s.db.Conn(ctx)\n}\n```\n\n**Benefits:**\n- Safer for migrations (explicit scope)\n- Complements UnderlyingDB() for different use cases\n- Low implementation cost\n\n**Trade-off:** Adds another method to maintain, but Oracle considers this balanced compromise between safety and flexibility.\n\n**Decision:** This is optional - evaluate based on VC's actual usage patterns.","status":"closed","priority":3,"issue_type":"feature","created_at":"2025-10-22T17:07:56.832638-07:00","updated_at":"2025-10-22T22:02:18.479512-07:00","closed_at":"2025-10-22T22:02:18.479512-07:00","dependencies":[{"issue_id":"bd-66","depends_on_id":"bd-57","type":"related","created_at":"2025-10-22T17:07:56.835844-07:00","created_by":"daemon"}]}
{"id":"bd-67","title":"MCP close tool method signature error - takes 1 positional argument but 2 were given","description":"The close approval routing fix in beads-mcp v0.11.0 works correctly and successfully routes update(status=\"closed\") calls to close() tool. However, the close() tool has a Python method signature bug that prevents execution.\n\nImpact: All MCP-based close operations are broken. Workaround: Use bd CLI directly.\n\nError: BdDaemonClient.close() takes 1 positional argument but 2 were given\n\nRoot cause: BdDaemonClient.close() only accepts self, but MCP tool passes issue_id and reason.\n\nAdditional issue: CLI close has FOREIGN KEY constraint error when recording reason parameter.\n\nSee GitHub issue #107 for full details.","status":"closed","priority":0,"issue_type":"bug","created_at":"2025-10-22T17:25:34.67056-07:00","updated_at":"2025-10-22T21:23:20.83634-07:00","closed_at":"2025-10-22T17:36:55.463445-07:00"}
{"id":"bd-68","title":"Test close issue","description":"","status":"closed","priority":1,"issue_type":"task","created_at":"2025-10-22T17:27:56.89475-07:00","updated_at":"2025-10-22T21:23:20.836566-07:00","closed_at":"2025-10-22T17:28:00.795511-07:00"}
{"id":"bd-69","title":"Fix pre-existing MCP test failures - show/update return arrays not dicts","description":"9 tests fail in beads-mcp because bd CLI commands return arrays but MCP client expects dicts:\n\nFailing tests:\n- test_create_and_show_issue: show returns array, expects dict\n- test_update_issue: update returns array, expects dict \n- test_add_dependency: show returns array, expects dict\n- test_invalid_issue_id: show returns empty dict instead of error\n- test_dependency_types: show returns array, expects dict\n- test_show_issue_tool: show returns array, expects dict\n- test_update_issue_tool: update returns array, expects dict\n- test_update_partial_fields: update returns array, expects dict\n- test_client_lazy_initialization: BdClient import issue\n\nRoot cause: bd CLI commands like 'bd show' and 'bd update' output JSON arrays, but BdCliClient.show() and BdCliClient.update() expect single dict objects.\n\nExample:\n```bash\nbd show test-1 --json\n[{\"id\":\"test-1\",...}] # Array, not dict\n```\n\nFix needed: Update bd_client.py to handle array responses and extract first element, or change CLI to return single object for single-ID operations.","status":"closed","priority":1,"issue_type":"bug","created_at":"2025-10-22T17:43:23.29302-07:00","updated_at":"2025-10-22T21:23:20.836775-07:00","closed_at":"2025-10-22T20:05:49.3826-07:00"}

View File

@@ -1963,3 +1963,40 @@ func (s *SQLiteStorage) IsClosed() bool {
func (s *SQLiteStorage) UnderlyingDB() *sql.DB {
return s.db
}
// UnderlyingConn returns a single connection from the pool for scoped use.
//
// This provides a connection with explicit lifetime boundaries, useful for:
// - One-time DDL operations (CREATE TABLE, ALTER TABLE)
// - Migration scripts that need transaction control
// - Operations that benefit from connection-level state
//
// IMPORTANT: The caller MUST close the connection when done:
//
// conn, err := storage.UnderlyingConn(ctx)
// if err != nil {
// return err
// }
// defer conn.Close()
//
// For general queries and transactions, prefer UnderlyingDB() which manages
// the connection pool automatically.
//
// EXAMPLE (extension table migration):
//
// conn, err := storage.UnderlyingConn(ctx)
// if err != nil {
// return err
// }
// defer conn.Close()
//
// _, err = conn.ExecContext(ctx, `
// CREATE TABLE IF NOT EXISTS vc_executions (
// id INTEGER PRIMARY KEY AUTOINCREMENT,
// issue_id TEXT NOT NULL,
// FOREIGN KEY (issue_id) REFERENCES issues(id) ON DELETE CASCADE
// )
// `)
func (s *SQLiteStorage) UnderlyingConn(ctx context.Context) (*sql.Conn, error) {
return s.db.Conn(ctx)
}

View File

@@ -287,3 +287,194 @@ func TestUnderlyingDB_LongTxDoesNotDeadlock(t *testing.T) {
t.Error("CreateIssue deadlocked or timed out")
}
}
// TestUnderlyingConn_BasicAccess tests that UnderlyingConn returns a usable connection
func TestUnderlyingConn_BasicAccess(t *testing.T) {
tmpDir, err := os.MkdirTemp("", "beads-conn-test-*")
if err != nil {
t.Fatal(err)
}
defer os.RemoveAll(tmpDir)
dbPath := filepath.Join(tmpDir, "test.db")
store, err := New(dbPath)
if err != nil {
t.Fatalf("Failed to create storage: %v", err)
}
defer store.Close()
ctx := context.Background()
// Get a scoped connection
conn, err := store.UnderlyingConn(ctx)
if err != nil {
t.Fatalf("UnderlyingConn() failed: %v", err)
}
defer conn.Close()
// Verify we can query it
var count int
err = conn.QueryRowContext(ctx, "SELECT COUNT(*) FROM issues").Scan(&count)
if err != nil {
t.Fatalf("Failed to query via UnderlyingConn: %v", err)
}
if count != 0 {
t.Errorf("Expected 0 issues, got %d", count)
}
}
// TestUnderlyingConn_DDLOperations tests using UnderlyingConn for DDL
func TestUnderlyingConn_DDLOperations(t *testing.T) {
tmpDir, err := os.MkdirTemp("", "beads-conn-ddl-test-*")
if err != nil {
t.Fatal(err)
}
defer os.RemoveAll(tmpDir)
dbPath := filepath.Join(tmpDir, "test.db")
store, err := New(dbPath)
if err != nil {
t.Fatalf("Failed to create storage: %v", err)
}
defer store.Close()
ctx := context.Background()
// Create a test issue first for FK reference
issue := &types.Issue{
Title: "Test issue",
Description: "For extension testing",
Status: types.StatusOpen,
Priority: 1,
IssueType: types.TypeTask,
}
if err := store.CreateIssue(ctx, issue, "test"); err != nil {
t.Fatalf("Failed to create issue: %v", err)
}
// Get a scoped connection for DDL
conn, err := store.UnderlyingConn(ctx)
if err != nil {
t.Fatalf("UnderlyingConn() failed: %v", err)
}
defer conn.Close()
// Create extension table using the scoped connection
schema := `
CREATE TABLE IF NOT EXISTS vc_migrations (
id INTEGER PRIMARY KEY AUTOINCREMENT,
issue_id TEXT NOT NULL,
version TEXT NOT NULL,
applied_at DATETIME DEFAULT CURRENT_TIMESTAMP,
FOREIGN KEY (issue_id) REFERENCES issues(id) ON DELETE CASCADE
);
CREATE INDEX IF NOT EXISTS idx_vc_migrations_issue ON vc_migrations(issue_id);
`
if _, err := conn.ExecContext(ctx, schema); err != nil {
t.Fatalf("Failed to create extension table: %v", err)
}
// Insert using the same connection
result, err := conn.ExecContext(ctx, `
INSERT INTO vc_migrations (issue_id, version)
VALUES (?, ?)
`, issue.ID, "v1.0.0")
if err != nil {
t.Fatalf("Failed to insert into extension table: %v", err)
}
id, _ := result.LastInsertId()
if id == 0 {
t.Error("Expected non-zero insert ID")
}
// Verify the data persists after connection close
conn.Close()
// Use UnderlyingDB to verify
db := store.UnderlyingDB()
var version string
err = db.QueryRowContext(ctx, `
SELECT version FROM vc_migrations WHERE issue_id = ?
`, issue.ID).Scan(&version)
if err != nil {
t.Fatalf("Failed to query after connection close: %v", err)
}
if version != "v1.0.0" {
t.Errorf("Expected version 'v1.0.0', got %q", version)
}
}
// TestUnderlyingConn_ContextCancellation tests that context cancellation works
func TestUnderlyingConn_ContextCancellation(t *testing.T) {
tmpDir, err := os.MkdirTemp("", "beads-conn-ctx-test-*")
if err != nil {
t.Fatal(err)
}
defer os.RemoveAll(tmpDir)
dbPath := filepath.Join(tmpDir, "test.db")
store, err := New(dbPath)
if err != nil {
t.Fatalf("Failed to create storage: %v", err)
}
defer store.Close()
// Create a context that's already canceled
ctx, cancel := context.WithCancel(context.Background())
cancel()
// Try to get connection with canceled context
conn, err := store.UnderlyingConn(ctx)
if err == nil {
conn.Close()
t.Error("Expected error with canceled context, got nil")
}
}
// TestUnderlyingConn_MultipleConnections tests multiple connections don't interfere
func TestUnderlyingConn_MultipleConnections(t *testing.T) {
tmpDir, err := os.MkdirTemp("", "beads-multi-conn-test-*")
if err != nil {
t.Fatal(err)
}
defer os.RemoveAll(tmpDir)
dbPath := filepath.Join(tmpDir, "test.db")
store, err := New(dbPath)
if err != nil {
t.Fatalf("Failed to create storage: %v", err)
}
defer store.Close()
ctx := context.Background()
// Get multiple connections
conn1, err := store.UnderlyingConn(ctx)
if err != nil {
t.Fatalf("Failed to get conn1: %v", err)
}
defer conn1.Close()
conn2, err := store.UnderlyingConn(ctx)
if err != nil {
t.Fatalf("Failed to get conn2: %v", err)
}
defer conn2.Close()
// Both should be able to query independently
var count1, count2 int
if err := conn1.QueryRowContext(ctx, "SELECT COUNT(*) FROM issues").Scan(&count1); err != nil {
t.Errorf("conn1 query failed: %v", err)
}
if err := conn2.QueryRowContext(ctx, "SELECT COUNT(*) FROM issues").Scan(&count2); err != nil {
t.Errorf("conn2 query failed: %v", err)
}
if count1 != count2 {
t.Errorf("Connections see different data: %d vs %d", count1, count2)
}
}

View File

@@ -79,6 +79,12 @@ type Storage interface {
// in the same database. Extensions should use foreign keys to reference core tables.
// WARNING: Direct database access bypasses the storage layer. Use with caution.
UnderlyingDB() *sql.DB
// UnderlyingConn returns a single connection from the pool for scoped use.
// Useful for migrations and DDL operations that benefit from explicit connection lifetime.
// The caller MUST close the connection when done to return it to the pool.
// For general queries, prefer UnderlyingDB() which manages the pool automatically.
UnderlyingConn(ctx context.Context) (*sql.Conn, error)
}
// Config holds database configuration