From 0acd9d0a5db2914913d5923fc274f4b47af677d1 Mon Sep 17 00:00:00 2001 From: Steve Yegge Date: Mon, 24 Nov 2025 12:25:35 -0800 Subject: [PATCH] bd sync: 2025-11-24 12:25:34 --- .beads/beads.jsonl | 12 +- internal/storage/memory/memory.go | 11 + internal/storage/sqlite/transaction.go | 758 +++++++++++++++++ internal/storage/sqlite/transaction_test.go | 857 ++++++++++++++++++++ internal/storage/storage.go | 75 ++ 5 files changed, 1707 insertions(+), 6 deletions(-) create mode 100644 internal/storage/sqlite/transaction.go create mode 100644 internal/storage/sqlite/transaction_test.go diff --git a/.beads/beads.jsonl b/.beads/beads.jsonl index 9cae59e2..67871cea 100644 --- a/.beads/beads.jsonl +++ b/.beads/beads.jsonl @@ -120,7 +120,7 @@ {"id":"bd-3gc","content_hash":"83684e0e79d016a9569ccd04adda44cc072a662ee09556c156bf54e0603c5b6a","title":"Audit remaining cmd/bd files for error handling consistency","description":"Extend ERROR_HANDLING_AUDIT.md to cover: daemon_sync.go, update.go, list.go, show.go, close.go, reopen.go, dep.go, label.go, comments.go, delete.go, compact.go, config.go, validate.go and other high-usage command files","status":"open","priority":3,"issue_type":"task","created_at":"2025-11-24T00:28:55.890991-08:00","updated_at":"2025-11-24T00:28:55.890991-08:00","source_repo":".","dependencies":[{"issue_id":"bd-3gc","depends_on_id":"bd-1qwo","type":"blocks","created_at":"2025-11-24T00:28:55.891827-08:00","created_by":"daemon"}]} {"id":"bd-3sz0","content_hash":"50d2315d5561c2b89acaa7c6ad84e7b2b0f5155f796bbbeeb862818c4b331423","title":"Auto-repair stale merge driver configs with invalid placeholders","description":"Old bd versions (\u003c0.24.0) installed merge driver with invalid placeholders %L %R instead of %A %B. Add detection to bd doctor --fix: check if git config merge.beads.driver contains %L or %R, auto-repair to 'bd merge %A %O %A %B'. One-time migration for users who initialized with old versions.","status":"open","priority":2,"issue_type":"feature","created_at":"2025-11-21T23:16:10.762808-08:00","updated_at":"2025-11-21T23:16:28.892655-08:00","source_repo":".","dependencies":[{"issue_id":"bd-3sz0","depends_on_id":"bd-tbz3","type":"parent-child","created_at":"2025-11-21T23:16:10.763612-08:00","created_by":"daemon"}]} {"id":"bd-3tfh","content_hash":"d8a889d96a2a236db3d6c60d239878ffee607e6c91b2d6fc6dd85bfca938da03","title":"Benchmark Helper Functions","description":"Extend existing benchmark helpers in internal/storage/sqlite/bench_helpers_test.go (or create if organizing separately).\n\nExisting helper (in compact_bench_test.go):\n- setupBenchDB(tb) - Creates temp SQLite database with basic config\n * Used by compact and cycle benchmarks\n * Returns (*SQLiteStorage, cleanup func())\n\nNew helpers to add:\n- setupLargeBenchDB(b *testing.B) storage.Storage\n * Creates 10K issue database using LargeSQLite fixture\n * Returns configured storage instance\n \n- setupXLargeBenchDB(b *testing.B) storage.Storage\n * Creates 20K issue database using XLargeSQLite fixture\n * Returns configured storage instance\n\nImplementation options:\n1. Add to existing compact_bench_test.go (co-located with setupBenchDB)\n2. Create new bench_helpers_test.go for organization\n\nBoth approaches:\n- Build tag: //go:build bench\n- Uses fixture generator from internal/testutil/fixtures\n- Follows existing setupBenchDB() pattern\n- Handles database cleanup\n\nThese helpers reduce duplication across new benchmark functions and provide consistent large-scale database setup.","status":"closed","priority":2,"issue_type":"feature","created_at":"2025-11-13T22:22:55.694834-08:00","updated_at":"2025-11-13T23:13:41.244758-08:00","closed_at":"2025-11-13T23:13:41.244758-08:00","source_repo":".","dependencies":[{"issue_id":"bd-3tfh","depends_on_id":"bd-m62x","type":"blocks","created_at":"2025-11-13T22:24:02.632994-08:00","created_by":"daemon"}]} -{"id":"bd-3tlb","content_hash":"33dbe8301ea3777b4d115db69301ac6a5e16b3af688779ea243352203037e82e","title":"Add CreateIssues batch operation to Transaction","description":"Add bulk issue creation to Transaction interface for efficient batch operations.\n\n## Tasks\n1. Add to Transaction interface:\n - CreateIssues(ctx, issues, actor) error\n\n2. Implement on sqliteTxStorage:\n - Reuse existing bulk insert helpers with conn\n - Atomic ID generation for batch\n - Bulk event recording\n - Bulk dirty marking\n\n## Acceptance Criteria\n- [ ] CreateIssues creates all issues atomically\n- [ ] Performance comparable to non-transactional CreateIssues\n- [ ] Test: batch create 100 issues in transaction","status":"open","priority":2,"issue_type":"task","created_at":"2025-11-24T11:37:13.829049-08:00","updated_at":"2025-11-24T11:37:13.829049-08:00","source_repo":".","dependencies":[{"issue_id":"bd-3tlb","depends_on_id":"bd-6pul","type":"parent-child","created_at":"2025-11-24T11:37:13.83039-08:00","created_by":"daemon"},{"issue_id":"bd-3tlb","depends_on_id":"bd-5xxq","type":"blocks","created_at":"2025-11-24T11:37:13.83128-08:00","created_by":"daemon"}]} +{"id":"bd-3tlb","content_hash":"33dbe8301ea3777b4d115db69301ac6a5e16b3af688779ea243352203037e82e","title":"Add CreateIssues batch operation to Transaction","description":"Add bulk issue creation to Transaction interface for efficient batch operations.\n\n## Tasks\n1. Add to Transaction interface:\n - CreateIssues(ctx, issues, actor) error\n\n2. Implement on sqliteTxStorage:\n - Reuse existing bulk insert helpers with conn\n - Atomic ID generation for batch\n - Bulk event recording\n - Bulk dirty marking\n\n## Acceptance Criteria\n- [ ] CreateIssues creates all issues atomically\n- [ ] Performance comparable to non-transactional CreateIssues\n- [ ] Test: batch create 100 issues in transaction","status":"closed","priority":2,"issue_type":"task","created_at":"2025-11-24T11:37:13.829049-08:00","updated_at":"2025-11-24T12:20:31.719956-08:00","closed_at":"2025-11-24T12:20:31.719956-08:00","source_repo":".","dependencies":[{"issue_id":"bd-3tlb","depends_on_id":"bd-6pul","type":"parent-child","created_at":"2025-11-24T11:37:13.83039-08:00","created_by":"daemon"},{"issue_id":"bd-3tlb","depends_on_id":"bd-5xxq","type":"blocks","created_at":"2025-11-24T11:37:13.83128-08:00","created_by":"daemon"}]} {"id":"bd-40a0","content_hash":"75611f4fb108e11cb4b98ded732fe94dd41ed700d8058b419e6fc796cf152391","title":"bd doctor should check for multiple DBs, multiple JSONLs, daemon health","description":"","design":"\nCurrently bd doctor only checks:\n- .beads/ directory exists\n- Database version vs CLI version \n- ID format (hash vs sequential)\n- CLI version vs latest GitHub release\n\nIt should ALSO check for operational issues that cause silent failures:\n\n1. **Multiple database files** (*.db excluding backups and vc.db)\n - Warn if multiple *.db files found (ambiguous which to use)\n - Suggest running 'bd migrate' or manually removing old DBs\n\n2. **Multiple JSONL files** \n - Check for both issues.jsonl and beads.jsonl\n - Warn about ambiguity, suggest standardizing on one\n\n3. **Daemon health** (integrate bd daemons health)\n - Check if daemon running for this workspace\n - Detect version mismatches between daemon and CLI\n - Detect zombie daemons (running but unresponsive)\n - Detect stale daemon.pid files\n\n4. **Database-JSONL sync issues**\n - Check if JSONL is newer than last import\n - Warn if they're out of sync\n\n5. **Permissions issues**\n - Check if .beads/ directory is writable\n - Check if database file is readable/writable\n\nImplementation approach:\n- Add new check functions to doctor.go\n- Reuse logic from bd daemons health\n- Keep checks fast (\u003c 1 second total)\n- Output actionable fixes for each issue\n","status":"closed","priority":1,"issue_type":"feature","created_at":"2025-10-31T21:16:47.042913-07:00","updated_at":"2025-10-31T21:21:27.093525-07:00","closed_at":"2025-10-31T21:21:27.093525-07:00","source_repo":"."} {"id":"bd-4462","content_hash":"a3f7ca75994ca4efb8b5b6ae47ecf5b8544ad33510e4c6f72663efd8c2737f74","title":"Test basic bd commands in WASM (init, create, list)","description":"Compile and verify basic bd functionality works in WASM:\n- Test bd init --quiet\n- Test bd create with simple issue\n- Test bd list --json output\n- Verify SQLite database creation and queries work\n- Document any runtime issues or workarounds needed","status":"closed","priority":1,"issue_type":"task","created_at":"2025-11-02T21:58:07.291771-08:00","updated_at":"2025-11-02T23:07:10.273212-08:00","closed_at":"2025-11-02T23:07:10.273212-08:00","source_repo":".","dependencies":[{"issue_id":"bd-4462","depends_on_id":"bd-44d0","type":"parent-child","created_at":"2025-11-02T22:23:49.448668-08:00","created_by":"stevey"},{"issue_id":"bd-4462","depends_on_id":"bd-b4b0","type":"blocks","created_at":"2025-11-02T22:23:55.596771-08:00","created_by":"stevey"}]} {"id":"bd-44d0","content_hash":"a20f23c823907e546f852c1bbb0c09166100b2569d4a1192f0a7288ee5d918e8","title":"WASM port of bd for Claude Code Web sandboxes","description":"Enable beads to work in Claude Code Web sandboxes by compiling bd to WebAssembly.\n\n## Problem\nClaude Code Web sandboxes cannot install bd CLI due to network restrictions:\n- GitHub releases return 403\n- go install fails with DNS errors\n- Binary cannot be downloaded\n\n## Solution\nCompile bd Go codebase to WASM, publish to npm as drop-in replacement.\n\n## Technical Approach\n- Use GOOS=js GOARCH=wasm to compile bd\n- modernc.org/sqlite already supports js/wasm target\n- Publish to npm as bd-wasm package\n- Full feature parity with bd CLI\n\n## Success Criteria\n- bd-wasm installs via npm in web sandbox\n- All core bd commands work identically\n- JSONL output matches native bd\n- Performance within 2x of native","notes":"WASM port abandoned - Claude Code Web has full VMs not browser restrictions. Better: npm + native binary","status":"closed","priority":0,"issue_type":"epic","created_at":"2025-11-02T18:32:27.660794-08:00","updated_at":"2025-11-02T23:36:38.679515-08:00","closed_at":"2025-11-02T23:36:38.679515-08:00","source_repo":"."} @@ -168,13 +168,13 @@ {"id":"bd-5f26","content_hash":"5131931d43040061d669159635de863740ed90e7c946c4b646c36c53bc274d6f","title":"Refactor daemon.go into internal/daemonrunner","description":"Extract daemon runtime from daemon.go (1,565 lines) into internal/daemonrunner with focused modules: config.go, daemon.go, process.go, rpc_server.go, sync.go, git.go. Keep cobra command thin.","design":"New structure:\n- internal/daemonrunner/config.go: Config struct\n- internal/daemonrunner/daemon.go: Daemon struct + Start/Stop\n- internal/daemonrunner/process.go: PID/lock/socket handling\n- internal/daemonrunner/rpc_server.go: RPC lifecycle\n- internal/daemonrunner/sync.go: Export/import/commit/push logic\n- internal/daemonrunner/git.go: Git operations interface\n- cmd/bd/daemon.go: Thin cobra command","status":"closed","priority":1,"issue_type":"epic","created_at":"2025-11-01T11:41:14.821017-07:00","updated_at":"2025-11-01T21:44:44.507747-07:00","closed_at":"2025-11-01T21:44:44.507747-07:00","source_repo":"."} {"id":"bd-5f483051","content_hash":"c14449fb07074c3ff76a653cc632f5795e2e0fb8f381c5f1a1f0dd831fe4f13f","title":"Implement bd resolve-conflicts (git merge conflicts in JSONL)","description":"Automatically detect and resolve git merge conflicts in .beads/issues.jsonl file.\n\nFeatures:\n- Detect conflict markers in JSONL\n- Parse conflicting issues from HEAD and BASE\n- Provide mechanical resolution (remap duplicate IDs)\n- Support AI-assisted resolution (requires internal/ai package)\n\nSee repair_commands.md lines 125-353 for design.","status":"closed","priority":1,"issue_type":"task","created_at":"2025-10-28T19:37:55.722827-07:00","updated_at":"2025-11-06T19:36:13.970903-08:00","closed_at":"2025-11-06T19:26:45.397628-08:00","source_repo":"."} {"id":"bd-5ibn","content_hash":"ad2cfd0d4c3889f1aede7b4c86f42b84474a4689563d7c850ef20010dec886ca","title":"Latency test 1","description":"","notes":"Resetting stale in_progress status from old executor run (yesterday)","status":"closed","priority":3,"issue_type":"task","created_at":"2025-11-20T12:16:30.703754-05:00","updated_at":"2025-11-24T11:31:04.102074-08:00","closed_at":"2025-11-23T23:35:05.255017-08:00","source_repo":"."} -{"id":"bd-5itp","content_hash":"03f9c9be28e09662276a74a4c399b0fa6d047d1db9c85d57e1ea0f4f9541a922","title":"Add label operations to Transaction interface","description":"Extend Transaction interface with label management operations.\n\n## Tasks\n1. Add to Transaction interface:\n - AddLabel(ctx, issueID, label, actor) error\n - RemoveLabel(ctx, issueID, label, actor) error\n - GetLabels(ctx, issueID) ([]string, error)\n\n2. Implement on sqliteTxStorage:\n - Reuse label logic with conn parameter\n - Ensure events recorded within transaction\n - Mark issues dirty within transaction\n\n3. Create addLabelWithConn/removeLabelWithConn helpers\n\n## Acceptance Criteria\n- [ ] AddLabel works within transaction\n- [ ] RemoveLabel works within transaction \n- [ ] GetLabels reads labels within transaction scope\n- [ ] Events recorded atomically\n- [ ] Test: create issue + add multiple labels atomically","status":"open","priority":1,"issue_type":"task","created_at":"2025-11-24T11:36:57.49503-08:00","updated_at":"2025-11-24T11:36:57.49503-08:00","source_repo":".","dependencies":[{"issue_id":"bd-5itp","depends_on_id":"bd-6pul","type":"parent-child","created_at":"2025-11-24T11:36:57.496383-08:00","created_by":"daemon"},{"issue_id":"bd-5itp","depends_on_id":"bd-5xxq","type":"blocks","created_at":"2025-11-24T11:36:57.497218-08:00","created_by":"daemon"}]} +{"id":"bd-5itp","content_hash":"708f7859705daf09cb86665f10ff4e22e64515974d0b17e9f7218d9e9b788e5b","title":"Add label operations to Transaction interface","description":"Extend Transaction interface with label management operations.\n\n## Tasks\n1. Add to Transaction interface:\n - AddLabel(ctx, issueID, label, actor) error\n - RemoveLabel(ctx, issueID, label, actor) error\n - GetLabels(ctx, issueID) ([]string, error)\n\n2. Implement on sqliteTxStorage:\n - Reuse label logic with conn parameter\n - Ensure events recorded within transaction\n - Mark issues dirty within transaction\n\n3. Create addLabelWithConn/removeLabelWithConn helpers\n\n## Acceptance Criteria\n- [ ] AddLabel works within transaction\n- [ ] RemoveLabel works within transaction \n- [ ] GetLabels reads labels within transaction scope\n- [ ] Events recorded atomically\n- [ ] Test: create issue + add multiple labels atomically","status":"closed","priority":1,"issue_type":"task","created_at":"2025-11-24T11:36:57.49503-08:00","updated_at":"2025-11-24T12:23:13.57105-08:00","closed_at":"2025-11-24T12:23:13.57105-08:00","source_repo":".","dependencies":[{"issue_id":"bd-5itp","depends_on_id":"bd-6pul","type":"parent-child","created_at":"2025-11-24T11:36:57.496383-08:00","created_by":"daemon"},{"issue_id":"bd-5itp","depends_on_id":"bd-5xxq","type":"blocks","created_at":"2025-11-24T11:36:57.497218-08:00","created_by":"daemon"}]} {"id":"bd-5iv","content_hash":"229ad9764bd3eb8b09441adefce960aede63fd1b5466d52cc74f112f5bb610ac","title":"Test Epic","description":"## Overview\n\n[Describe the high-level goal and scope of this epic]\n\n## Success Criteria\n\n- [ ] Criteria 1\n- [ ] Criteria 2\n- [ ] Criteria 3\n\n## Background\n\n[Provide context and motivation]\n\n## Scope\n\n**In Scope:**\n- Item 1\n- Item 2\n\n**Out of Scope:**\n- Item 1\n- Item 2\n","design":"## Architecture\n\n[Describe the overall architecture and approach]\n\n## Components\n\n- Component 1: [description]\n- Component 2: [description]\n\n## Dependencies\n\n[List external dependencies or constraints]\n","acceptance_criteria":"- [ ] All child issues are completed\n- [ ] Integration tests pass\n- [ ] Documentation is updated\n- [ ] Code review completed\n","status":"closed","priority":1,"issue_type":"epic","created_at":"2025-11-03T20:15:03.864229-08:00","updated_at":"2025-11-05T00:25:06.538749-08:00","closed_at":"2025-11-05T00:25:06.538749-08:00","source_repo":".","labels":["epic"]} {"id":"bd-5ki8","content_hash":"d89e5e528819934bcb7ee162fa7e32c27298db5816ecf51bcc8ede1809f1d5b9","title":"Add integration tests for adapter library","description":"Test suite for beads_mail_adapter.py covering all scenarios.\n\nAcceptance Criteria:\n- Test enabled mode (server available)\n- Test disabled mode (server unavailable)\n- Test graceful degradation (server dies mid-operation)\n- Test reservation conflicts\n- Test message sending/receiving\n- Mock HTTP server for testing\n- 90%+ code coverage\n\nFile: lib/test_beads_mail_adapter.py","notes":"Test suite completed with 29 comprehensive tests covering:\n- Enabled mode (server available): 10 tests\n- Disabled mode (server unavailable): 2 tests \n- Graceful degradation: 4 tests\n- Reservation conflicts: 2 tests\n- Configuration: 5 tests\n- Health check scenarios: 3 tests\n- HTTP error handling: 3 tests\n\n**Performance**: All tests run in 10ms (fast!)\n\n**Coverage highlights**:\n✅ Server health checks (ok, degraded, error, timeout)\n✅ All API operations (reserve, release, notify, check_inbox, get_reservations)\n✅ HTTP errors (404, 409 conflict, 500, 503)\n✅ Network errors (timeout, connection refused)\n✅ Malformed responses (bad JSON, empty body, plain text errors)\n✅ Environment variable configuration\n✅ Graceful degradation when server dies mid-operation\n✅ Conflict handling with both JSON and plain text errors\n✅ Dict wrapper responses ({\"messages\": [...]} and {\"reservations\": [...]})\n✅ Custom TTL for reservations\n✅ Default agent name fallback\n\nNo external dependencies, no slow integration tests, just fast unit tests with mocks.","status":"closed","priority":1,"issue_type":"task","created_at":"2025-11-07T22:43:21.294596-08:00","updated_at":"2025-11-08T01:32:39.906342-08:00","closed_at":"2025-11-08T01:32:39.906342-08:00","source_repo":".","dependencies":[{"issue_id":"bd-5ki8","depends_on_id":"bd-m9th","type":"blocks","created_at":"2025-11-07T22:43:21.296024-08:00","created_by":"daemon"}]} {"id":"bd-5ots","content_hash":"ba3efab3e7a2b9bb2bd2dba3aace56cfbdd1b67efd1cfc4758d9c79919f632af","title":"SearchIssues N+1 query causes context timeout with GetLabels","description":"scanIssues() calls GetLabels in a loop for every issue, causing N+1 queries and context deadline exceeded errors when used with short timeouts or in-memory databases. This is especially problematic since SearchIssues already supports label filtering via SQL WHERE clauses.","acceptance_criteria":"- Optimize scanIssues to batch-load labels for all issues in one query\n- Or make label loading optional/lazy\n- Add test that calls SearchIssues repeatedly with label filters and short context timeouts","status":"closed","priority":1,"issue_type":"bug","created_at":"2025-11-05T19:12:02.245879-08:00","updated_at":"2025-11-05T19:22:11.668682-08:00","closed_at":"2025-11-05T19:22:11.668682-08:00","source_repo":"."} {"id":"bd-5qim","content_hash":"5117e87c5a56b5b8254402d982e85bea1478c1961f05654a50cf2df11e7ad6bf","title":"Optimize GetReadyWork performance - 752ms on 10K database (target: \u003c50ms)","description":"","notes":"# Performance Analysis (10K Issue Database)\n\nAnalyzed using CPU profiles from benchmark suite on Apple M2 Pro.\n\n## Operation Performance\n\n| Operation | Time | Allocations | Memory |\n|----------------------------------|---------|-------------|--------|\n| bd ready (GetReadyWork) | ~752ms | 167,466 | 16MB |\n| bd list (SearchIssues no filter) | ~11.6ms | 89,214 | 5.8MB |\n| bd list (SearchIssues filtered) | ~9.2ms | 62,365 | 3.5MB |\n| bd create (CreateIssue) | ~2.6ms | 146 | 8.6KB |\n| bd update (UpdateIssue) | ~0.32ms | 364 | 15KB |\n| bd close (UpdateIssue) | ~0.32ms | 364 | 15KB |\n\n**Target: \u003c50ms for all operations on 10K database**\n\n**Current issue: GetReadyWork is 15x over target (752ms vs 50ms)**\n\n## Root Cause\n\nGetReadyWork (internal/storage/sqlite/ready.go:90-128) uses recursive CTE to propagate blocking:\n- 65x slower than SearchIssues\n- Recalculates entire blocked issue tree on every call\n- Algorithm:\n 1. Find directly blocked issues via 'blocks' dependencies\n 2. Recursively propagate blockage to descendants (max depth: 50)\n 3. Exclude all blocked issues from results\n\n## CPU Profile Analysis\n\n- Database syscalls (pthread_cond_signal, syscall6): ~75%\n- SQLite engine overhead: inherent to recursive CTE\n- Application code (query construction): \u003c1%\n\n**Bottleneck is the recursive CTE query execution, not application code.**\n\n## Optimization Recommendations\n\n### High Impact (Likely to achieve \u003c50ms target)\n\n1. **Cache blocked issue calculation**\n - Add `blocked_issues` table updated on dependency changes\n - Trade write complexity for read speed (ready called \u003e\u003e dependency changes)\n - Eliminates recursive CTE on every read\n\n2. **Add/verify database indexes**\n ```sql\n CREATE INDEX IF NOT EXISTS idx_dependencies_blocked \n ON dependencies(issue_id, type, depends_on_id);\n CREATE INDEX IF NOT EXISTS idx_issues_status \n ON issues(status);\n ```\n\n### Medium Impact\n\n3. **Reduce allocations** (167K allocations for GetReadyWork)\n - Profile `scanIssues()` for object pooling opportunities\n - Reuse slice capacity for repeated calls\n\n### Low Impact (Not recommended)\n- Query optimization for CRUD operations (already \u003c3ms)\n- Connection pooling tuning (not showing in profiles)\n\n## Verification\n\nRun benchmarks to validate optimization:\n```bash\nmake bench-quick\ngo tool pprof -http=:8080 internal/storage/sqlite/bench-cpu-*.prof\n```\n\nProfile files automatically generated in `internal/storage/sqlite/`.","status":"open","priority":0,"issue_type":"bug","created_at":"2025-11-14T09:02:46.507526-08:00","updated_at":"2025-11-14T09:03:44.073236-08:00","source_repo":"."} {"id":"bd-5xt","content_hash":"1a44abd45874666cea2170bb3559934f72acf3b5171d0b587cc49eef386e110d","title":"Log errors from timer-triggered flushes instead of discarding","description":"","status":"closed","priority":3,"issue_type":"task","created_at":"2025-11-20T21:22:06.694953-05:00","updated_at":"2025-11-20T21:35:53.117434-05:00","closed_at":"2025-11-20T21:35:53.117434-05:00","source_repo":".","comments":[{"id":5,"issue_id":"bd-5xt","author":"stevey","text":"In FlushManager.run() at flush_manager.go:197-200, timer-triggered flushes silently discard errors:\n\n```go\ncase \u003c-fm.timerFiredCh:\n if isDirty {\n _ = fm.performFlush(needsFullExport) // ← Error discarded!\n```\n\nUsers won't know if auto-flush is failing. Should at minimum log errors.\n\nConsider:\n- Log error with debug.Logf() or fmt.Fprintf(os.Stderr)\n- Optionally expose flush status via a Status() method\n- Track last flush error for diagnostics\n\nRelated: Code review finding #4 from bd-52 race condition fix review.","created_at":"2025-11-21T15:01:44Z"}]} -{"id":"bd-5xxq","content_hash":"7ab3c739cd4867e6ddda50ed509ee3298cd400f3f5f8ce5d14b6d500f72c45a7","title":"Implement sqliteTxStorage for core issue operations","description":"Implement the Transaction interface for SQLite with core issue CRUD operations.\n\n## Tasks\n1. Create sqliteTxStorage struct in internal/storage/sqlite/:\n ```go\n type sqliteTxStorage struct {\n conn *sql.Conn\n storage *SQLiteStorage\n }\n ```\n\n2. Implement RunInTransaction on SQLiteStorage:\n - Acquire dedicated connection via db.Conn(ctx)\n - Use beginImmediateWithRetry for write lock\n - Create sqliteTxStorage wrapper\n - Execute callback with proper commit/rollback\n\n3. Implement issue operations on sqliteTxStorage:\n - CreateIssue (reuse existing helpers with conn)\n - UpdateIssue\n - CloseIssue \n - DeleteIssue\n - GetIssue\n\n4. Ensure proper error handling and rollback on panic\n\n## Acceptance Criteria\n- [ ] sqliteTxStorage implements Transaction interface (issue methods)\n- [ ] RunInTransaction properly manages connection lifecycle\n- [ ] BEGIN IMMEDIATE used with retry logic\n- [ ] Rollback occurs on error or panic\n- [ ] Unit tests for commit/rollback scenarios","status":"open","priority":1,"issue_type":"task","created_at":"2025-11-24T11:36:39.338416-08:00","updated_at":"2025-11-24T11:36:39.338416-08:00","source_repo":".","dependencies":[{"issue_id":"bd-5xxq","depends_on_id":"bd-6pul","type":"parent-child","created_at":"2025-11-24T11:36:39.340022-08:00","created_by":"daemon"},{"issue_id":"bd-5xxq","depends_on_id":"bd-wbx6","type":"blocks","created_at":"2025-11-24T11:36:39.340989-08:00","created_by":"daemon"}]} +{"id":"bd-5xxq","content_hash":"a1e117a1f844cc4463c3b515ef7c6e887552a13d10dda4235590bb1a87dba577","title":"Implement sqliteTxStorage for core issue operations","description":"Implement the Transaction interface for SQLite with core issue CRUD operations.\n\n## Tasks\n1. Create sqliteTxStorage struct in internal/storage/sqlite/:\n ```go\n type sqliteTxStorage struct {\n conn *sql.Conn\n storage *SQLiteStorage\n }\n ```\n\n2. Implement RunInTransaction on SQLiteStorage:\n - Acquire dedicated connection via db.Conn(ctx)\n - Use beginImmediateWithRetry for write lock\n - Create sqliteTxStorage wrapper\n - Execute callback with proper commit/rollback\n\n3. Implement issue operations on sqliteTxStorage:\n - CreateIssue (reuse existing helpers with conn)\n - UpdateIssue\n - CloseIssue \n - DeleteIssue\n - GetIssue\n\n4. Ensure proper error handling and rollback on panic\n\n## Acceptance Criteria\n- [ ] sqliteTxStorage implements Transaction interface (issue methods)\n- [ ] RunInTransaction properly manages connection lifecycle\n- [ ] BEGIN IMMEDIATE used with retry logic\n- [ ] Rollback occurs on error or panic\n- [ ] Unit tests for commit/rollback scenarios","status":"closed","priority":1,"issue_type":"task","created_at":"2025-11-24T11:36:39.338416-08:00","updated_at":"2025-11-24T12:20:13.371529-08:00","closed_at":"2025-11-24T12:20:13.371529-08:00","source_repo":".","dependencies":[{"issue_id":"bd-5xxq","depends_on_id":"bd-6pul","type":"parent-child","created_at":"2025-11-24T11:36:39.340022-08:00","created_by":"daemon"},{"issue_id":"bd-5xxq","depends_on_id":"bd-wbx6","type":"blocks","created_at":"2025-11-24T11:36:39.340989-08:00","created_by":"daemon"}]} {"id":"bd-6049","content_hash":"16c54bc547f4ab180aee39efbb197709a47a39047f5bc2dd59e6e6b57ca8bc87","title":"bd doctor --json flag not working","description":"The --json flag on bd doctor command doesn't produce JSON output. It continues to show human-readable output instead. The flag is registered locally on doctorCmd but the code uses the global jsonOutput variable set by PersistentPreRun. Need to investigate why the flag isn't being honored.","status":"closed","priority":2,"issue_type":"bug","created_at":"2025-11-02T17:08:18.170428-08:00","updated_at":"2025-11-02T18:41:01.376783-08:00","closed_at":"2025-11-02T18:41:01.376786-08:00","source_repo":".","comments":[{"id":15,"issue_id":"bd-6049","author":"stevey","text":"Fixed by removing the local --json flag definition in doctor.go that was shadowing the persistent --json flag from main.go. The doctor command now correctly uses the global jsonOutput variable.","created_at":"2025-11-22T07:13:03Z"}]} {"id":"bd-6214875c","content_hash":"d4d20e71bbf5c08f1fe1ed07f67b7554167aa165d4972ea51b5cacc1b256c4c1","title":"Split internal/rpc/server.go into focused modules","description":"The file `internal/rpc/server.go` is 2,273 lines with 50+ methods, making it difficult to navigate and prone to merge conflicts. Split into 8 focused files with clear responsibilities.\n\nCurrent structure: Single 2,273-line file with:\n- Connection handling\n- Request routing\n- All 40+ RPC method implementations\n- Storage caching\n- Health checks \u0026 metrics\n- Cleanup loops\n\nTarget structure:\n```\ninternal/rpc/\n├── server.go # Core server, connection handling (~300 lines)\n├── methods_issue.go # Issue operations (~400 lines)\n├── methods_deps.go # Dependency operations (~200 lines)\n├── methods_labels.go # Label operations (~150 lines)\n├── methods_ready.go # Ready work queries (~150 lines)\n├── methods_compact.go # Compaction operations (~200 lines)\n├── methods_comments.go # Comment operations (~150 lines)\n├── storage_cache.go # Storage caching logic (~300 lines)\n└── health.go # Health \u0026 metrics (~200 lines)\n```\n\nMigration strategy:\n1. Create new files with appropriate methods\n2. Keep `server.go` as main file with core server logic\n3. Test incrementally after each file split\n4. Final verification with full test suite","acceptance_criteria":"- All 50 methods split into appropriate files\n- Each file \u003c500 LOC\n- All methods remain on `*Server` receiver (no behavior change)\n- All tests pass: `go test ./internal/rpc/...`\n- Verify daemon works: start daemon, run operations, check health\n- Update internal documentation if needed\n- No change to public API","status":"closed","priority":1,"issue_type":"task","created_at":"2025-10-28T14:21:37.51524-07:00","updated_at":"2025-10-30T17:12:58.2179-07:00","closed_at":"2025-10-28T14:11:04.399811-07:00","source_repo":"."} {"id":"bd-6221bdcd","content_hash":"6749091ed73f5ec7b55af226b2ae8c9aa134759951435e08e65a363c674ea0c9","title":"Optimize cmd/bd test suite performance (currently 30+ minutes)","description":"CLI test suite is extremely slow (~30+ minutes for full run). Tests are poorly designed and need performance optimization before expanding coverage.\n\nCurrent coverage: 24.8% (improved from 20.2%)\n\n**Problem**: Tests take far too long to run, making development iteration painful.\n\n**Priority**: Fix test performance FIRST, then consider increasing coverage.\n\n**Investigation needed**:\n- Profile test execution to identify bottlenecks\n- Look for redundant git operations, database initialization, or daemon operations\n- Identify opportunities for test parallelization\n- Consider mocking or using in-memory databases where appropriate\n- Review test design patterns\n\n**Related**: bd-ktng mentions 13 CLI tests with redundant git init calls (31s total)\n\n**Goal**: Get full test suite under 1-2 minutes before adding more tests.","status":"closed","priority":1,"issue_type":"task","created_at":"2025-10-29T14:06:27.951656-07:00","updated_at":"2025-11-08T22:42:08.862178-08:00","closed_at":"2025-11-08T22:41:05.766749-08:00","source_repo":".","dependencies":[{"issue_id":"bd-6221bdcd","depends_on_id":"bd-4d7fca8a","type":"blocks","created_at":"2025-10-29T19:52:05.532391-07:00","created_by":"import-remap"}]} @@ -291,7 +291,7 @@ {"id":"bd-9rw1","content_hash":"17ad82d17e34ca2bfab2fa7240517520e3c42953a780282664f50cf038c97688","title":"Support P-prefix priority format (P0-P4) in create and update commands","description":"","status":"closed","priority":1,"issue_type":"feature","created_at":"2025-11-05T13:56:04.796826-08:00","updated_at":"2025-11-05T13:56:08.157061-08:00","closed_at":"2025-11-05T13:56:08.157061-08:00","source_repo":"."} {"id":"bd-9v7l","content_hash":"10b1c2ca4d67587bdf220cf7ae04253eb01edca8a59756431bc3d453cbb85008","title":"bd status \"Recent Activity\" is misleading - should use git history","description":"## Problem\n\n`bd status` shows \"Recent Activity (last 7 days)\" but the numbers are wrong. It only looks at database timestamps, not git history. Says \"141 issues closed in last 7 days\" when thousands have actually come and go.\n\n## Issues\n\n1. Only queries database timestamps, not git history\n2. 7 days is too long a window\n3. Numbers don't reflect actual activity in JSONL git history\n\n## Proposed Fix\n\nEither:\n- Query git history of `.beads/beads.jsonl` to get actual activity (last 24-48 hours)\n- Remove \"Recent Activity\" section entirely if not useful\n- Make time window configurable and default to 24h\n\n## Example Output (Current)\n```\nRecent Activity (last 7 days):\nIssues Created: 174\nIssues Closed: 141\nIssues Updated: 37\n```\nThis is misleading when thousands of issues have actually cycled through.","status":"closed","priority":2,"issue_type":"bug","created_at":"2025-11-05T01:03:00.234813-08:00","updated_at":"2025-11-06T18:47:42.682987-08:00","closed_at":"2025-11-06T18:47:42.682987-08:00","source_repo":"."} {"id":"bd-a03d5e36","content_hash":"f63ec5a25a14c9b01ca8b97ea14d0b00c42e8d6fe3b39f6e261411134a024de8","title":"Improve integration test coverage for stateful features","description":"","design":"## Context\n\nbd-70419816 revealed a critical gap: the export deduplication feature had unit tests but no integration tests simulating real-world git operations. This led to silent data loss in production.\n\n## Root Cause\n- Unit tests only tested functions in isolation\n- No integration tests for git operations (pull, reset, checkout) modifying JSONL\n- No tests validating export_hashes and JSONL stay in sync\n- Missing tests for stateful distributed system interactions (DB + JSONL + git)\n\n## Completed (bd-70419816)\n✓ TestJSONLIntegrityValidation - unit tests for validation logic\n✓ TestImportClearsExportHashes - tests import clears hashes\n✓ TestExportIntegrityAfterJSONLTruncation - simulates git reset (would have caught bd-70419816)\n✓ TestExportIntegrityAfterJSONLDeletion - tests recovery from file deletion\n✓ TestMultipleExportsStayConsistent - tests repeated exports\n\n## Still Needed (High Priority)\n1. Multi-repo sync test - two clones staying in sync after push/pull\n2. Auto-flush integration test - JSONL integrity preserved during auto-flush\n3. Daemon auto-sync integration test - complex state management\n4. Import after corruption test - recovery from partial data loss\n\n## Medium Priority\n- Partial export failure handling (disk full, network interruption)\n- Concurrent export/import race conditions\n- Large dataset performance tests (1000+ issues)\n- Export hash migration tests (version upgrades)\n\n## Testing Principles\n1. Test real-world scenarios: git ops, user errors, system failures, concurrent ops\n2. Integration tests for stateful systems (DB + files + git)\n3. Regression test for every bug fix\n4. Test invariants: JSONL count == DB count, hash consistency, etc.\n\n## Key Lesson\nStateful distributed systems need integration tests, not just unit tests.","acceptance_criteria":"- [ ] Multi-repo sync test implemented\n- [ ] Auto-flush integration test implemented \n- [ ] Daemon auto-sync integration test implemented\n- [ ] Testing guidelines added to CONTRIBUTING.md\n- [ ] CI runs integration tests\n- [ ] All critical workflows have integration test coverage","status":"closed","priority":2,"issue_type":"epic","created_at":"2025-10-29T21:53:15.397137-07:00","updated_at":"2025-11-08T01:58:15.281757-08:00","closed_at":"2025-11-08T00:36:59.02371-08:00","source_repo":"."} -{"id":"bd-a0oi","content_hash":"23c846ffd92c61f89fb319dc4380d4e0f8595464185032940056cc9b8461960b","title":"Integration tests for transaction API","description":"Comprehensive integration tests for the transaction API.\n\n## Test Scenarios\n\n### Happy Path\n1. TestAtomicIssueCreation - create multiple issues, verify all exist\n2. TestAtomicPlanApproval - simulate VC workflow (issues + deps + labels)\n3. TestReadYourWrites - create then query in same transaction\n\n### Rollback Scenarios\n4. TestRollbackOnError - verify no partial state on error return\n5. TestRollbackOnPanic - verify cleanup on panic\n6. TestRollbackOnContextCancel - verify cleanup on ctx.Done()\n\n### Concurrency\n7. TestConcurrentTransactions - multiple goroutines with transactions\n8. TestSQLiteBusyRetry - simulate contention, verify retry works\n\n### Edge Cases\n9. TestEmptyTransaction - commit with no operations\n10. TestNestedOperationFailure - first op succeeds, second fails\n\n## Acceptance Criteria\n- [ ] All happy path tests pass\n- [ ] All rollback tests verify no partial state\n- [ ] Concurrent tests pass with -race flag\n- [ ] Tests documented with clear assertions","status":"open","priority":1,"issue_type":"task","created_at":"2025-11-24T11:37:32.281002-08:00","updated_at":"2025-11-24T11:37:32.281002-08:00","source_repo":".","dependencies":[{"issue_id":"bd-a0oi","depends_on_id":"bd-6pul","type":"parent-child","created_at":"2025-11-24T11:37:32.284897-08:00","created_by":"daemon"},{"issue_id":"bd-a0oi","depends_on_id":"bd-v257","type":"blocks","created_at":"2025-11-24T11:37:32.28618-08:00","created_by":"daemon"},{"issue_id":"bd-a0oi","depends_on_id":"bd-5itp","type":"blocks","created_at":"2025-11-24T11:37:32.288275-08:00","created_by":"daemon"}]} +{"id":"bd-a0oi","content_hash":"c95e7fc5762dfe640a0f97449f160d7415440d1bee6ce23168d9cc8c5d58b490","title":"Integration tests for transaction API","description":"Comprehensive integration tests for the transaction API.\n\n## Test Scenarios\n\n### Happy Path\n1. TestAtomicIssueCreation - create multiple issues, verify all exist\n2. TestAtomicPlanApproval - simulate VC workflow (issues + deps + labels)\n3. TestReadYourWrites - create then query in same transaction\n\n### Rollback Scenarios\n4. TestRollbackOnError - verify no partial state on error return\n5. TestRollbackOnPanic - verify cleanup on panic\n6. TestRollbackOnContextCancel - verify cleanup on ctx.Done()\n\n### Concurrency\n7. TestConcurrentTransactions - multiple goroutines with transactions\n8. TestSQLiteBusyRetry - simulate contention, verify retry works\n\n### Edge Cases\n9. TestEmptyTransaction - commit with no operations\n10. TestNestedOperationFailure - first op succeeds, second fails\n\n## Acceptance Criteria\n- [ ] All happy path tests pass\n- [ ] All rollback tests verify no partial state\n- [ ] Concurrent tests pass with -race flag\n- [ ] Tests documented with clear assertions","status":"closed","priority":1,"issue_type":"task","created_at":"2025-11-24T11:37:32.281002-08:00","updated_at":"2025-11-24T12:25:19.202104-08:00","closed_at":"2025-11-24T12:25:19.202104-08:00","source_repo":".","dependencies":[{"issue_id":"bd-a0oi","depends_on_id":"bd-6pul","type":"parent-child","created_at":"2025-11-24T11:37:32.284897-08:00","created_by":"daemon"},{"issue_id":"bd-a0oi","depends_on_id":"bd-v257","type":"blocks","created_at":"2025-11-24T11:37:32.28618-08:00","created_by":"daemon"},{"issue_id":"bd-a0oi","depends_on_id":"bd-5itp","type":"blocks","created_at":"2025-11-24T11:37:32.288275-08:00","created_by":"daemon"}]} {"id":"bd-a101","content_hash":"9c8ac3184d936a5483d307ea72e34fa6308e99416b27c930c1b7b05660173f47","title":"Support separate branch for beads commits","description":"Allow beads to commit to a separate branch (e.g., beads-metadata) using git worktrees to support protected main branch workflows.\n\nSolves GitHub Issue #205 - Users need to protect main branch while maintaining beads workflow.\n\nKey advantages:\n- Works on any git platform\n- Main branch stays protected \n- No disruption to user's working directory\n- Backward compatible (opt-in via config)\n- Minimal disk overhead (sparse checkout)\n\nTotal estimate: 17-24 days (4-6 weeks with parallel work)","status":"closed","priority":1,"issue_type":"epic","created_at":"2025-11-02T15:21:20.098247-08:00","updated_at":"2025-11-04T12:36:53.772727-08:00","closed_at":"2025-11-04T12:36:53.772727-08:00","external_ref":"GH-205","source_repo":"."} {"id":"bd-a1691807","content_hash":"52a3da17d0db9e7998b77b4962c00eeb866ca1eb3581d362863b68788b162582","title":"Integration test: mutation to export latency","description":"Measure time from bd create to JSONL update. Verify \u003c500ms latency. Test with multiple rapid mutations to verify batching.","status":"closed","priority":1,"issue_type":"task","created_at":"2025-10-29T20:49:49.105247-07:00","updated_at":"2025-10-31T12:00:43.198883-07:00","closed_at":"2025-10-31T12:00:43.198883-07:00","source_repo":"."} {"id":"bd-a40f374f","content_hash":"a9385e9f00bc41e5e2258fdfccd9f2cbd5a702764b5f1d036274e6026f8c3e38","title":"bd validate - Comprehensive health check","description":"Run all validation checks in one command.\n\nChecks:\n- Duplicates\n- Orphaned dependencies\n- Test pollution\n- Git conflicts\n\nSupports --fix-all for auto-repair.\n\nDepends on bd-cbed9619.1, bd-0dcea000, bd-31aab707, bd-9826b69a.\n\nFiles: cmd/bd/validate.go (new)","status":"closed","priority":1,"issue_type":"task","created_at":"2025-11-20T12:40:11.708313-05:00","updated_at":"2025-11-24T11:31:04.108166-08:00","closed_at":"2025-11-24T11:31:04.10817-08:00","source_repo":"."} @@ -619,13 +619,13 @@ {"id":"bd-urob","content_hash":"fc0e79260f5f6860fa8884859c4b33b18f9cc2dad361c1c1abb9bdeb412479b5","title":"bd-hv01: Refactor snapshot management into dedicated module","description":"Problem: Snapshot logic is scattered across deletion_tracking.go. Would benefit from abstraction with SnapshotManager type.\n\nBenefits: cleaner separation of concerns, easier to test in isolation, better encapsulation, could add observability/metrics.\n\nSuggested improvements: add magic constants, track merge statistics, better error messages.\n\nFiles: cmd/bd/deletion_tracking.go (refactor into new snapshot_manager.go)","status":"closed","priority":3,"issue_type":"chore","created_at":"2025-11-06T18:16:27.943666-08:00","updated_at":"2025-11-08T02:24:24.686744-08:00","closed_at":"2025-11-08T02:19:14.152412-08:00","source_repo":".","dependencies":[{"issue_id":"bd-urob","depends_on_id":"bd-rbxi","type":"parent-child","created_at":"2025-11-06T18:19:15.192447-08:00","created_by":"daemon"}]} {"id":"bd-ut5","content_hash":"d9b4ee9c7748c28dc2cd436911b1bee39e68e82df99b718ebe57a246f72a6bcb","title":"Test label update feature","description":"","status":"open","priority":3,"issue_type":"task","created_at":"2025-11-21T22:07:25.488849-05:00","updated_at":"2025-11-21T22:07:25.488849-05:00","source_repo":".","labels":["test-direct"]} {"id":"bd-v0y","content_hash":"72d91011884bc9e20ac1c18b67f66489dcf49f3fec6da4d9b4dbbe1832d8b72f","title":"Critical: bd sync overwrites git-pulled JSONL instead of importing it","description":"## Problem\n\nWhen a user runs `git pull` (which updates .beads/beads.jsonl) followed by `bd sync`, the sync command fails to detect that the JSONL changed and exports the local database, **overwriting the pulled JSONL** instead of importing it first.\n\nThis causes cleaned-up issues (removed via `bd cleanup` in another repo) to be resurrected and pushed back to the remote.\n\n## Root Cause\n\n`hasJSONLChanged()` in cmd/bd/integrity.go incorrectly returns FALSE when it should return TRUE after git pull updates the JSONL file.\n\nThe function has two code paths:\n1. **Fast-path (line 128-129)**: If mtime unchanged, return false\n2. **Slow-path (line 135-151)**: Compute hash and compare\n\nSuspected issue: The mtime-based fast-path may incorrectly return false if:\n- Git preserves mtime when checking out files, OR\n- The mtime check logic has a race condition, OR\n- The stored last_import_mtime is stale/incorrect\n\n## Reproduction\n\n**Setup:**\n1. Repo A: Has 686 issues (including 630 closed)\n2. Run `bd cleanup -f` in Repo A (removes 630 closed issues → 56 issues remain)\n3. Push to remote\n4. Repo B: Still has 686 issues in database\n\n**Trigger:**\n1. In Repo B: `git pull` (JSONL now has 56 issues)\n2. In Repo B: `bd sync`\n\n**Expected behavior:**\n- hasJSONLChanged() returns TRUE\n- Auto-imports 56-issue JSONL\n- Database updated to 56 issues\n- Exports (no-op, DB and JSONL match)\n- Pushes\n\n**Actual behavior:**\n- hasJSONLChanged() returns FALSE ❌\n- Skips auto-import\n- Exports 686 issues from DB to JSONL\n- Commits and pushes the 686-issue JSONL\n- **Undoes the cleanup from Repo A**\n\n## Evidence\n\nFrom actual session (2025-11-23):\n- Commit 8bc8611 in cino/beads shows: `+640, -14 lines` after bd sync\n- No \"Auto-import complete\" message in output\n- Database had 686 issues, JSONL was pulled with 56 issues\n- Sync exported DB → JSONL instead of importing JSONL → DB\n\n## Impact\n\n**Critical:** This breaks multi-device sync and causes:\n- Lost work (cleanup operations undone)\n- Data resurrection (deleted issues come back)\n- Confusing merge conflicts\n- User trust issues (\"Why does sync keep bringing back deleted issues?\")\n\n## Proposed Solutions\n\n### Option 1: Remove mtime fast-path (safest)\nAlways compute and compare hashes. Eliminates false negatives from mtime issues.\n\n**Pros:** Guaranteed correctness\n**Cons:** Slightly slower (hash computation on every sync)\n\n### Option 2: Fix mtime comparison logic\nInvestigate why mtime check fails and fix it properly.\n\n**Areas to check:**\n- Does git preserve mtime on checkout? (may vary by git version/config)\n- Is last_import_mtime updated correctly after all operations?\n- Race condition between git pull and mtime check?\n\n### Option 3: Add hash-based validation\nKeep mtime fast-path but add hash validation as backup:\n- If mtime unchanged, still spot-check hash occasionally (e.g., 10% of the time)\n- Log warnings when mtime and hash disagree\n- This would catch the bug while maintaining performance\n\n### Option 4: Add `--force-import` flag to bd sync\nShort-term workaround: Allow users to force import even if hasJSONLChanged() returns false.\n\n## Workaround\n\nUntil fixed, after `git pull`:\n```bash\nbd import --force # Force import the pulled JSONL\nbd sync # Then sync normally\n```\n\nOr manually run bd cleanup in affected repos.\n\n## Files\n\n- cmd/bd/integrity.go:101-152 (hasJSONLChanged function)\n- cmd/bd/sync.go:130-143 (auto-import logic)\n- cmd/bd/autoflush.go:698 (export updates last_import_hash)\n\n## Related Issues\n\n- bd-77gm: Import reports misleading counts\n- bd-khnb: Content-based staleness detection (original implementation of hash checking)\n","notes":"## Fix Implemented (Option 1)\n\n**Root Cause Confirmed:**\nGit does NOT update mtime when checking out files. Testing confirmed that after `git checkout HEAD~1`, the file mtime remains unchanged even though content differs.\n\n**Solution:**\nRemoved the mtime-based fast-path in `hasJSONLChanged()`. Now always computes content hash for comparison.\n\n**Changes Made:**\n1. **cmd/bd/integrity.go:107-116** - Removed mtime fast-path, always compute hash\n2. **cmd/bd/sync.go:752** - Removed mtime storage after import\n3. **cmd/bd/import.go:340** - Removed mtime storage after import\n4. **cmd/bd/daemon_sync.go:280-301** - Removed mtime storage and updated comments\n5. **cmd/bd/daemon_sync_test.go:479,607,628** - Removed mtime assertions from tests\n\n**Performance Impact:**\nMinimal. Hash computation takes ~10-50ms even for large databases, which is acceptable for sync operations.\n\n**Testing:**\n- All existing tests pass\n- Test \"mtime changed but content same - git operation scenario\" verifies the fix\n- Full test suite passes (cmd/bd and all internal packages)\n\n**Verification:**\nThe fix ensures that after `git pull` updates the JSONL:\n1. `hasJSONLChanged()` returns TRUE (content hash differs)\n2. Auto-import runs and updates database\n3. No data loss or resurrection of deleted issues","status":"closed","priority":1,"issue_type":"bug","created_at":"2025-11-23T22:24:53.69901-08:00","updated_at":"2025-11-23T23:52:29.997682-08:00","closed_at":"2025-11-23T22:50:12.611126-08:00","source_repo":"."} -{"id":"bd-v257","content_hash":"d6331c1b33762c0323f90f3c0ec1cc65fd503e0729990c5c947470f0b5a6c936","title":"Add dependency operations to Transaction interface","description":"Extend Transaction interface with dependency management operations.\n\n## Tasks\n1. Add to Transaction interface:\n - AddDependency(ctx, dep, actor) error\n - RemoveDependency(ctx, issueID, dependsOnID, actor) error\n\n2. Implement on sqliteTxStorage:\n - Reuse cycle detection logic with conn\n - Ensure proper event recording within transaction\n - Handle blocked cache invalidation\n\n3. Refactor addDependencyWithConn helper if needed\n\n## Acceptance Criteria\n- [ ] AddDependency works within transaction\n- [ ] RemoveDependency works within transaction\n- [ ] Cycle detection operates within transaction scope\n- [ ] Events recorded atomically with dependency changes\n- [ ] Test: create issue + add dependency atomically","status":"open","priority":1,"issue_type":"task","created_at":"2025-11-24T11:36:47.935232-08:00","updated_at":"2025-11-24T11:36:47.935232-08:00","source_repo":".","dependencies":[{"issue_id":"bd-v257","depends_on_id":"bd-6pul","type":"parent-child","created_at":"2025-11-24T11:36:47.936811-08:00","created_by":"daemon"},{"issue_id":"bd-v257","depends_on_id":"bd-5xxq","type":"blocks","created_at":"2025-11-24T11:36:47.937562-08:00","created_by":"daemon"}]} +{"id":"bd-v257","content_hash":"0c1a2f12b23e756971a115d9f9595c59cc1994edce7017a445656788aa20532a","title":"Add dependency operations to Transaction interface","description":"Extend Transaction interface with dependency management operations.\n\n## Tasks\n1. Add to Transaction interface:\n - AddDependency(ctx, dep, actor) error\n - RemoveDependency(ctx, issueID, dependsOnID, actor) error\n\n2. Implement on sqliteTxStorage:\n - Reuse cycle detection logic with conn\n - Ensure proper event recording within transaction\n - Handle blocked cache invalidation\n\n3. Refactor addDependencyWithConn helper if needed\n\n## Acceptance Criteria\n- [ ] AddDependency works within transaction\n- [ ] RemoveDependency works within transaction\n- [ ] Cycle detection operates within transaction scope\n- [ ] Events recorded atomically with dependency changes\n- [ ] Test: create issue + add dependency atomically","status":"closed","priority":1,"issue_type":"task","created_at":"2025-11-24T11:36:47.935232-08:00","updated_at":"2025-11-24T12:23:13.535364-08:00","closed_at":"2025-11-24T12:23:13.535364-08:00","source_repo":".","dependencies":[{"issue_id":"bd-v257","depends_on_id":"bd-6pul","type":"parent-child","created_at":"2025-11-24T11:36:47.936811-08:00","created_by":"daemon"},{"issue_id":"bd-v257","depends_on_id":"bd-5xxq","type":"blocks","created_at":"2025-11-24T11:36:47.937562-08:00","created_by":"daemon"}]} {"id":"bd-vavh","content_hash":"c4683032c24f356aa799a87390c2f95a280bb6ce1cd94d26bb5d4b0d8ea16829","title":"Fix row iterator resource leak in recursive dependency queries","description":"Critical resource leak in findAllDependentsRecursive() where rows.Close() is called AFTER early return on error, never executing.\n\nLocation: internal/storage/sqlite/sqlite.go:1131-1136\n\nProblem: \n- rows.Close() placed after return statement\n- On scan error, iterator never closed\n- Can exhaust SQLite connections under moderate load\n\nFix: Move defer rows.Close() to execute on all code paths\n\nImpact: Connection exhaustion during dependency traversal","status":"closed","priority":0,"issue_type":"bug","created_at":"2025-11-16T14:50:55.881698-08:00","updated_at":"2025-11-16T15:03:55.009607-08:00","closed_at":"2025-11-16T15:03:55.009607-08:00","source_repo":"."} {"id":"bd-vcg5","content_hash":"82933ce7e0add2ee5b5830b343785c3585151453c5c06243af2b1f2b934e72b2","title":"Daemon crash recovery: panic handler + socket cleanup","description":"Improve daemon cleanup on unexpected exit:\n1. Add top-level recover() in runDaemonLoop to capture panics\n2. Write daemon-error file with stack trace on panic\n3. Prefer return over os.Exit where possible (so defers run)\n4. In stopDaemon forced-kill path, also remove stale socket if present\n\nThis ensures better diagnostics and cleaner state after crashes.","status":"closed","priority":2,"issue_type":"task","created_at":"2025-11-07T16:42:12.733219-08:00","updated_at":"2025-11-07T22:07:17.347728-08:00","closed_at":"2025-11-07T21:17:15.94117-08:00","source_repo":".","dependencies":[{"issue_id":"bd-vcg5","depends_on_id":"bd-ndyz","type":"discovered-from","created_at":"2025-11-07T16:42:12.733889-08:00","created_by":"daemon"}]} {"id":"bd-vfe","content_hash":"c58e7567c9e5d4b789a593e7b5ca40ab109e9dc7b98275262aae09bd1b65650f","title":"Apply wrapDBError to config.go","description":"","status":"closed","priority":2,"issue_type":"task","created_at":"2025-11-24T00:53:12.771275-08:00","updated_at":"2025-11-24T00:54:15.676618-08:00","closed_at":"2025-11-24T00:54:15.676618-08:00","source_repo":"."} {"id":"bd-vxdr","content_hash":"d188358987c7a7d444f9144a4a6cc5164eccd35b16325edba51dad104ab2a7f2","title":"Investigate database pollution - issue count anomalies","description":"Multiple repos showing inflated issue counts suggesting cross-repo pollution:\n- ~/src/dave/beads: 895 issues (675 open) - clearly polluted\n- ~/src/stevey/src/beads: 280 issues (expected ~209-220) - possibly polluted\n\nNeed to investigate:\n1. Source of pollution (multi-repo sync issues?)\n2. How many duplicate/foreign issues exist\n3. Whether recent sync operations caused cross-contamination\n4. How to clean up and prevent future pollution","notes":"Investigation findings:\n\n**Root cause identified:**\n- NOT cross-repo contamination\n- NOT automated test leakage (tests properly use t.TempDir())\n- Manual testing during template feature development (Nov 2-4)\n- Commit ba325a2: \"test issues were accidentally committed during template feature development\"\n\n**Database growth timeline:**\n- Nov 3: 19 issues (baseline)\n- Nov 2-5: +244 issues (massive development spike)\n- Nov 6-7: +40 issues (continued growth)\n- Current: 291 issues → 270 after cleanup\n\n**Test pollution breakdown:**\n- 21 issues matching \"Test \" prefix pattern\n- Most created Nov 2-5 during feature development\n- Pollution from manual `./bd create \"Test issue\"` commands in production workspace\n- All automated tests properly isolated with t.TempDir()\n\n**Cleanup completed:**\n- Ran scripts/cleanup-test-pollution.sh successfully\n- Removed 21 test issues\n- Database reduced from 291 → 270 issues (7.2% cleanup)\n- JSONL synced to git\n\n**Prevention strategy:**\n- Filed follow-up issue for prevention mechanisms\n- Script can be deleted once prevention is in place\n- Tests are already properly isolated - no code changes needed there","status":"closed","priority":0,"issue_type":"bug","created_at":"2025-11-06T22:34:40.137483-08:00","updated_at":"2025-11-07T16:07:28.274136-08:00","closed_at":"2025-11-07T16:04:02.199807-08:00","source_repo":"."} {"id":"bd-w8h","content_hash":"502d4b23c0fdd843d186445dfd0480a15aa451f1b39c640f0e1f3cb67503ed3c","title":"Apply wrapDBError to remaining storage files","description":"","status":"closed","priority":2,"issue_type":"task","created_at":"2025-11-24T00:53:15.498428-08:00","updated_at":"2025-11-24T00:56:47.434071-08:00","closed_at":"2025-11-24T00:56:47.434071-08:00","source_repo":"."} -{"id":"bd-wbx6","content_hash":"1dcecf0667bb4518c9f9d9ee5089c88bca631329d597ad0a9e260c8c59b1ad1a","title":"Define Transaction interface and add RunInTransaction to Storage","description":"Define the core Transaction interface and add RunInTransaction method to Storage interface.\n\n## Tasks\n1. Add Transaction interface to internal/storage/storage.go with core methods:\n - CreateIssue, CreateIssues, UpdateIssue, CloseIssue, DeleteIssue\n - GetIssue (for read-your-writes within transaction)\n \n2. Add RunInTransaction method signature to Storage interface\n\n3. Document transaction semantics in interface comments:\n - Commit on nil return\n - Rollback on error return or panic\n - IMMEDIATE mode for SQLite\n\n## Acceptance Criteria\n- [ ] Transaction interface defined with issue CRUD methods\n- [ ] RunInTransaction added to Storage interface\n- [ ] Clear documentation of transaction behavior\n- [ ] Compiles without implementation (interface only)","status":"open","priority":1,"issue_type":"task","created_at":"2025-11-24T11:36:28.821381-08:00","updated_at":"2025-11-24T11:36:28.821381-08:00","source_repo":".","dependencies":[{"issue_id":"bd-wbx6","depends_on_id":"bd-6pul","type":"parent-child","created_at":"2025-11-24T11:36:28.822888-08:00","created_by":"daemon"}]} +{"id":"bd-wbx6","content_hash":"b701c367a3287a6689312aa50e602a6d2b38b7a210c27fbe42da795b65c649cd","title":"Define Transaction interface and add RunInTransaction to Storage","description":"Define the core Transaction interface and add RunInTransaction method to Storage interface.\n\n## Tasks\n1. Add Transaction interface to internal/storage/storage.go with core methods:\n - CreateIssue, CreateIssues, UpdateIssue, CloseIssue, DeleteIssue\n - GetIssue (for read-your-writes within transaction)\n \n2. Add RunInTransaction method signature to Storage interface\n\n3. Document transaction semantics in interface comments:\n - Commit on nil return\n - Rollback on error return or panic\n - IMMEDIATE mode for SQLite\n\n## Acceptance Criteria\n- [ ] Transaction interface defined with issue CRUD methods\n- [ ] RunInTransaction added to Storage interface\n- [ ] Clear documentation of transaction behavior\n- [ ] Compiles without implementation (interface only)","status":"closed","priority":1,"issue_type":"task","created_at":"2025-11-24T11:36:28.821381-08:00","updated_at":"2025-11-24T12:15:34.472285-08:00","closed_at":"2025-11-24T12:15:34.472285-08:00","source_repo":".","dependencies":[{"issue_id":"bd-wbx6","depends_on_id":"bd-6pul","type":"parent-child","created_at":"2025-11-24T11:36:28.822888-08:00","created_by":"daemon"}]} {"id":"bd-wcl","content_hash":"c08d62ce3627a49126c63f6a630a08c1666e5b1b8d9148ae0c72d7d06611b2a9","title":"Document CLI + hooks as recommended approach over MCP","description":"Update documentation to position CLI + bd prime hooks as the primary recommended approach over MCP server, explaining why minimizing context matters even with large context windows (compute cost, energy, environment, latency).","design":"## Goals\n\nPosition CLI + `bd prime` hooks as the **primary recommended approach** for AI agent integration, with MCP server as a legacy/fallback option.\n\nExplore **hybrid mode** - if certain commands benefit from MCP (UX/DX advantages like no approval prompts), minimize MCP surface area to only those commands.\n\nThis requires production validation first - only update docs after CLI mode is proven reliable.\n\n## Why Minimize Context (Even With Large Windows)\n\n**Context window size ≠ free resource**\n\nLarge context windows (100k+, 200k+) don't mean we should fill them wastefully. Every token in context has real costs:\n\n### Compute Cost\n- **Processing overhead**: Larger context = more GPU/CPU cycles per request\n- **Memory usage**: 10.5k tokens consume significant RAM/VRAM\n- **Scaling impact**: Multiplied across all users, all sessions, all requests\n\n### Energy \u0026 Environment\n- **Electricity**: More compute = more power consumption\n- **Carbon footprint**: Data centers running on grid power (not all renewable)\n- **Sustainability**: Unnecessary token usage contributes to AI's environmental impact\n- **Responsibility**: Efficient tools are better for the planet\n\n### User Experience\n- **Latency**: Larger context = slower processing (noticeable at 10k+ tokens)\n- **Cost**: Many AI services charge per token (input + output)\n- **Rate limits**: Context counts against API quotas\n\n### Engineering Excellence\n- **Efficiency**: Good engineering minimizes resource usage\n- **Scalability**: Efficient tools scale better\n- **Best practices**: Optimize for the common case\n\n**The comparison:**\n\n| Approach | Standing Context | Efficiency | User Cost | Environmental Impact |\n|----------|-----------------|------------|-----------|---------------------|\n| **CLI + hooks** | ~1-2k tokens | 80-90% reduction | Lower | Sustainable ✓ |\n| **MCP minimal** | ~2-4k tokens | 60-80% reduction | Medium | Better ✓ |\n| **MCP full** | ~10.5k tokens | Baseline | Higher | Wasteful ✗ |\n\n**Functional equivalence:**\n- CLI via Bash tool works just as well as MCP native calls\n- Same features, same reliability\n- No downside except initial learning curve\n\n## Hybrid Mode: Minimal MCP Surface Area\n\n**Philosophy:** MCP server doesn't have to expose everything.\n\nIf certain commands have legitimate UX/DX benefits from MCP (e.g., no approval prompts, cleaner syntax), we can expose ONLY those commands via MCP while using CLI for everything else.\n\n### Potential MCP-Only Candidates (TBD)\n\nCommands that might benefit from MCP native calls:\n- `ready` - frequently checked, no side effects, approval prompt annoying\n- `show` - read-only, frequently used, approval slows workflow\n- `list` - read-only, no risk, approval adds friction\n\nCommands that work fine via CLI:\n- `create` - complex parameters, benefits from explicit confirmation\n- `update` - state changes, good to see command explicitly\n- `close` - state changes, explicit is better\n- `dep` - relationships, good to see what's being linked\n- `sync` - git operations, definitely want visibility\n\n### Token Budget\n\n**Full MCP** (current): ~10.5k tokens\n- All ~20+ bd commands exposed\n- All parameter schemas\n- All descriptions and examples\n\n**Minimal MCP** (proposed): ~2-4k tokens\n- 3-5 high-frequency read commands only\n- Simplified schemas\n- Minimal descriptions\n- Everything else via CLI\n\n**Pure CLI**: ~1-2k tokens (only on SessionStart/PreCompact)\n- No MCP tools loaded\n- All commands via Bash\n\n### Investigation Required\n\nBefore implementing hybrid mode, validate:\n\n1. **Do MCP calls actually skip approval prompts?**\n - Test with Claude Code approval settings\n - Compare MCP tool calls vs Bash tool calls\n - Measure UX difference in real usage\n\n2. **What's the actual token breakdown per command?**\n - Measure individual command schemas\n - Calculate token savings for minimal vs full\n\n3. **Is approval prompt the only benefit?**\n - Are there other UX advantages to MCP?\n - Does native syntax actually improve experience?\n - User testing with both approaches\n\n4. **Can we dynamically load MCP tools?**\n - Only load MCP when certain commands needed?\n - Hot-swap between CLI and MCP?\n - Probably not - MCP loads at startup\n\n### Hybrid Mode Documentation (If Validated)\n\n```markdown\n## Choosing Your Integration Approach\n\nBeads supports three AI agent integration approaches:\n\n### CLI + Hooks (Recommended - Most Efficient)\n\n**Setup:** `bd setup claude`\n\nUses Claude Code hooks to inject workflow context via `bd prime` command. Agent uses bd via Bash tool.\n\n**Tokens:** ~1-2k (on SessionStart/PreCompact only)\n\n**Pros:**\n- Maximum efficiency (80-90% reduction vs full MCP)\n- Lowest compute/energy usage\n- Same functionality as MCP\n\n**Cons:**\n- Bash tool calls may require approval prompts\n- Slightly more verbose in conversation\n\n### Minimal MCP + Hooks (Balanced)\n\n**Setup:** Install minimal MCP server (read-only commands) + `bd setup claude`\n\nExposes only high-frequency read commands via MCP (ready, show, list). Everything else via CLI.\n\n**Tokens:** ~2-4k MCP + ~1-2k hooks\n\n**Pros:**\n- 60-80% reduction vs full MCP\n- No approval prompts for common queries\n- Cleaner syntax for frequent operations\n- Still efficient\n\n**Cons:**\n- Requires MCP server (additional setup)\n- Mixed interface (some MCP, some CLI)\n\n### Full MCP + Hooks (Legacy)\n\n**Setup:** Install full MCP server + `bd setup claude`\n\n**Tokens:** ~10.5k MCP + hooks\n\n**Pros:**\n- All commands as native function calls\n- Consistent interface\n\n**Cons:**\n- Highest token usage (worst for compute/energy/cost)\n- Slowest processing\n- Less sustainable\n\n### Recommendation\n\n1. **Start with CLI + hooks** - most efficient, works great\n2. **Try minimal MCP** if approval prompts become annoying\n3. **Avoid full MCP** - wasteful with no significant benefit\n```\n\n## Production Validation Checklist\n\nBefore making these documentation changes, validate CLI approach works reliably:\n\n### Phase 1: Pure CLI Validation\n- [ ] `bd prime` implemented and tested\n- [ ] Hooks installed and working in Claude Code\n- [ ] Real-world usage by at least 2-3 developers for 1+ weeks\n- [ ] No significant usability issues reported\n- [ ] Agent successfully uses bd via Bash tool\n- [ ] Document which commands (if any) have approval prompt issues\n\n### Phase 2: Hybrid Mode Investigation (Optional)\n- [ ] Test if MCP calls skip approval prompts vs Bash calls\n- [ ] Measure token cost per MCP command\n- [ ] Identify minimal set of commands worth exposing via MCP\n- [ ] Build minimal MCP server variant\n- [ ] Validate token savings (should be 60-80% vs full MCP)\n- [ ] User testing shows actual UX improvement\n\n### Phase 3: Documentation Update\n- [ ] Update based on validation results\n- [ ] Include measured token counts (not estimates)\n- [ ] Provide clear migration paths\n- [ ] Update `bd doctor` recommendations\n\n## Migration Guide (Optional)\n\nFor users currently using MCP:\n\n```markdown\n### Migrating from Full MCP to CLI + Hooks\n\nAlready using full MCP server? You can switch to the more efficient CLI approach:\n\n1. Install hooks: `bd setup claude`\n2. Test it works (hooks inject context, agent uses Bash tool)\n3. Remove MCP server from `~/.claude/settings.json`\n4. Restart Claude Code\n\nYou'll get the same functionality with 80-90% less token usage.\n\n### Migrating to Minimal MCP (If Available)\n\nIf you find approval prompts annoying for certain commands:\n\n1. Replace full MCP with minimal MCP in `~/.claude/settings.json`\n2. Restart Claude Code\n3. Verify high-frequency commands (ready, show, list) work via MCP\n4. Everything else automatically uses CLI\n\nYou'll get 60-80% token reduction vs full MCP while keeping the UX benefits.\n```\n\n## Files to Update\n\n- `README.md` - Add recommendation in AI Integration section\n- `AGENTS.md` - Add \"Choosing Your Integration Approach\" section early\n- `QUICKSTART.md` - Update AI integration section\n- `docs/` - Any other AI integration docs if they exist\n- `mcp-server/` - Create minimal variant if hybrid validated\n\n## Future: Update `bd init`\n\nOnce validated, update `bd init` to:\n- Default to recommending `bd setup claude` (hooks only)\n- Mention minimal MCP as option for UX improvement\n- Detect existing full MCP and suggest migration\n- Provide token usage estimates for each approach\n\n## MCP Server Architecture Note\n\n**Key insight:** MCP server doesn't have to expose all bd functionality.\n\nCurrent design exposes ~20+ commands (all bd subcommands). This is over-engineered.\n\n**Better design:**\n- **Minimal MCP**: 3-5 read-only commands (~2-4k tokens)\n- **CLI**: Everything else via Bash tool\n- **Hooks**: Context injection via `bd prime`\n\nThis achieves best of both worlds:\n- Low token usage (efficient)\n- No approval prompts for common queries (UX)\n- Explicit visibility for state changes (safety)\n\nIf validation shows NO meaningful benefit to MCP (even minimal), skip hybrid mode entirely and recommend pure CLI.","acceptance_criteria":"- Documentation explains CLI + hooks as recommended approach\n- Explains why context size matters (compute/energy/cost/latency)\n- Token comparison table shows 80-90% reduction\n- Migration guide for existing MCP users\n- Only deployed AFTER production validation\n- Clear that both approaches are supported","status":"open","priority":2,"issue_type":"task","created_at":"2025-11-12T00:15:25.923025-08:00","updated_at":"2025-11-12T00:18:16.786857-08:00","source_repo":"."} {"id":"bd-we4p","content_hash":"0e3248d31a6524c7a665960682cf2b159449fa31f5427771796dce8639059faf","title":"Cache getMultiRepoJSONLPaths() result during sync to avoid redundant calls","description":"From bd-xo6b code review: getMultiRepoJSONLPaths() is called 3x per sync cycle.\n\n**Current behavior:**\ndaemon_sync.go calls getMultiRepoJSONLPaths() three times per sync:\n- Line 505: Snapshot capture before pull\n- Line 575: Merge/prune after pull\n- Line 613: Base snapshot update after import\n\n**Cost per call:**\n- Config lookup (likely cached, but still overhead)\n- Path construction: O(N) where N = number of repos\n- String allocations: (N + 1) × filepath.Join() calls\n\n**Total per sync:** 3N path constructions + 3 config lookups + 3 slice allocations\n\n**Impact:**\n- For N=3 repos: Negligible (\u003c 1ms)\n- For N=10 repos: Still minimal\n- For N=100+ repos: Wasteful\n\n**Solution:**\nCall once at sync start, reuse result:\n\n```go\nfunc createSyncFunc(...) func() {\n return func() {\n // ... existing setup ...\n \n // Call once at start\n multiRepoPaths := getMultiRepoJSONLPaths()\n \n // Snapshot capture\n if multiRepoPaths != nil {\n for _, path := range multiRepoPaths {\n if err := captureLeftSnapshot(path); err != nil { ... }\n }\n }\n \n // ... later ...\n \n // Merge/prune - reuse same paths\n if multiRepoPaths != nil {\n for _, path := range multiRepoPaths { ... }\n }\n \n // ... later ...\n \n // Base snapshot update - reuse same paths\n if multiRepoPaths != nil {\n for _, path := range multiRepoPaths { ... }\n }\n }\n}\n```\n\n**Files:**\n- cmd/bd/daemon_sync.go:449-636 (createSyncFunc)\n\n**Note:** This is a performance optimization, not a correctness fix. Low priority unless multi-repo usage scales significantly.","status":"closed","priority":2,"issue_type":"chore","created_at":"2025-11-06T19:31:32.128674-08:00","updated_at":"2025-11-06T19:40:50.871176-08:00","closed_at":"2025-11-06T19:40:50.871176-08:00","source_repo":".","dependencies":[{"issue_id":"bd-we4p","depends_on_id":"bd-xo6b","type":"discovered-from","created_at":"2025-11-06T19:32:12.39754-08:00","created_by":"daemon"}]} {"id":"bd-wfmw","content_hash":"21706f1701be9fb51fa9e17d1dded6343bc2585e4bdb608239a20c5853d00220","title":"Integration Layer Implementation","description":"Build the adapter layer that makes Agent Mail optional and non-intrusive.","notes":"Progress: bd-m9th (Python adapter library) completed with full test coverage. Next: bd-fzbg (update python-agent example with Agent Mail integration).","status":"closed","priority":1,"issue_type":"epic","created_at":"2025-11-07T22:42:09.356429-08:00","updated_at":"2025-11-08T00:20:30.888756-08:00","closed_at":"2025-11-08T00:20:30.888756-08:00","source_repo":".","dependencies":[{"issue_id":"bd-wfmw","depends_on_id":"bd-spmx","type":"blocks","created_at":"2025-11-07T22:42:09.357488-08:00","created_by":"daemon"}]} diff --git a/internal/storage/memory/memory.go b/internal/storage/memory/memory.go index 37c7adec..4cb6936a 100644 --- a/internal/storage/memory/memory.go +++ b/internal/storage/memory/memory.go @@ -12,6 +12,7 @@ import ( "sync" "time" + "github.com/steveyegge/beads/internal/storage" "github.com/steveyegge/beads/internal/types" ) @@ -1096,6 +1097,16 @@ func (m *MemoryStorage) UnderlyingConn(ctx context.Context) (*sql.Conn, error) { return nil, fmt.Errorf("UnderlyingConn not available in memory storage") } +// RunInTransaction executes a function within a transaction context. +// For MemoryStorage, this provides basic atomicity via mutex locking. +// If the function returns an error, changes are NOT automatically rolled back +// since MemoryStorage doesn't support true transaction rollback. +// +// Note: For full rollback support, callers should use SQLite storage. +func (m *MemoryStorage) RunInTransaction(ctx context.Context, fn func(tx storage.Transaction) error) error { + return fmt.Errorf("RunInTransaction not supported in --no-db mode: use SQLite storage for transaction support") +} + // REMOVED (bd-c7af): SyncAllCounters - no longer needed with hash IDs // MarkIssueDirty marks an issue as dirty for export diff --git a/internal/storage/sqlite/transaction.go b/internal/storage/sqlite/transaction.go new file mode 100644 index 00000000..1e4e0558 --- /dev/null +++ b/internal/storage/sqlite/transaction.go @@ -0,0 +1,758 @@ +// Package sqlite implements the storage interface using SQLite. +package sqlite + +import ( + "context" + "database/sql" + "encoding/json" + "fmt" + "strings" + "time" + + "github.com/steveyegge/beads/internal/storage" + "github.com/steveyegge/beads/internal/types" +) + +// Verify sqliteTxStorage implements storage.Transaction at compile time +var _ storage.Transaction = (*sqliteTxStorage)(nil) + +// sqliteTxStorage implements the storage.Transaction interface for SQLite. +// It wraps a dedicated database connection with an active transaction. +type sqliteTxStorage struct { + conn *sql.Conn // Dedicated connection for the transaction + parent *SQLiteStorage // Parent storage for accessing shared state +} + +// RunInTransaction executes a function within a database transaction. +// +// The transaction uses BEGIN IMMEDIATE to acquire a write lock early, +// preventing deadlocks when multiple goroutines compete for the same lock. +// +// Transaction lifecycle: +// 1. Acquire dedicated connection from pool +// 2. Begin IMMEDIATE transaction with retry on SQLITE_BUSY +// 3. Execute user function with Transaction interface +// 4. On success: COMMIT +// 5. On error or panic: ROLLBACK +// +// Panic safety: If the callback panics, the transaction is rolled back +// and the panic is re-raised to the caller. +func (s *SQLiteStorage) RunInTransaction(ctx context.Context, fn func(tx storage.Transaction) error) error { + // Acquire a dedicated connection for the transaction. + // This ensures all operations in the transaction use the same connection. + conn, err := s.db.Conn(ctx) + if err != nil { + return fmt.Errorf("failed to acquire connection for transaction: %w", err) + } + defer func() { _ = conn.Close() }() + + // Start IMMEDIATE transaction to acquire write lock early. + // Use retry logic with exponential backoff to handle SQLITE_BUSY (bd-ola6) + if err := beginImmediateWithRetry(ctx, conn, 5, 10*time.Millisecond); err != nil { + return fmt.Errorf("failed to begin transaction: %w", err) + } + + // Track commit state for cleanup + committed := false + defer func() { + if !committed { + // Use background context to ensure rollback completes even if ctx is cancelled + _, _ = conn.ExecContext(context.Background(), "ROLLBACK") + } + }() + + // Handle panics: rollback and re-raise + defer func() { + if r := recover(); r != nil { + // Rollback will happen via the committed=false check above + panic(r) // Re-raise the panic + } + }() + + // Create transaction wrapper + txStorage := &sqliteTxStorage{ + conn: conn, + parent: s, + } + + // Execute user function + if err := fn(txStorage); err != nil { + return err // Rollback happens in defer + } + + // Commit the transaction + if _, err := conn.ExecContext(ctx, "COMMIT"); err != nil { + return fmt.Errorf("failed to commit transaction: %w", err) + } + committed = true + return nil +} + +// CreateIssue creates a new issue within the transaction. +func (t *sqliteTxStorage) CreateIssue(ctx context.Context, issue *types.Issue, actor string) error { + // Validate issue before creating + if err := issue.Validate(); err != nil { + return fmt.Errorf("validation failed: %w", err) + } + + // Set timestamps + now := time.Now() + issue.CreatedAt = now + issue.UpdatedAt = now + + // Compute content hash (bd-95) + if issue.ContentHash == "" { + issue.ContentHash = issue.ComputeContentHash() + } + + // Get prefix from config (needed for both ID generation and validation) + var prefix string + err := t.conn.QueryRowContext(ctx, `SELECT value FROM config WHERE key = ?`, "issue_prefix").Scan(&prefix) + if err == sql.ErrNoRows || prefix == "" { + // CRITICAL: Reject operation if issue_prefix config is missing (bd-166) + return fmt.Errorf("database not initialized: issue_prefix config is missing (run 'bd init --prefix ' first)") + } else if err != nil { + return fmt.Errorf("failed to get config: %w", err) + } + + // Generate or validate ID + if issue.ID == "" { + // Generate hash-based ID with adaptive length based on database size (bd-ea2a13) + generatedID, err := GenerateIssueID(ctx, t.conn, prefix, issue, actor) + if err != nil { + return wrapDBError("generate issue ID", err) + } + issue.ID = generatedID + } else { + // Validate that explicitly provided ID matches the configured prefix (bd-177) + if err := ValidateIssueIDPrefix(issue.ID, prefix); err != nil { + return wrapDBError("validate issue ID prefix", err) + } + + // For hierarchical IDs (bd-a3f8e9.1), ensure parent exists + if strings.Contains(issue.ID, ".") { + // Try to resurrect entire parent chain if any parents are missing + resurrected, err := t.parent.tryResurrectParentChainWithConn(ctx, t.conn, issue.ID) + if err != nil { + return fmt.Errorf("failed to resurrect parent chain for %s: %w", issue.ID, err) + } + if !resurrected { + // Parent(s) not found in JSONL history - cannot proceed + lastDot := strings.LastIndex(issue.ID, ".") + parentID := issue.ID[:lastDot] + return fmt.Errorf("parent issue %s does not exist and could not be resurrected from JSONL history", parentID) + } + } + } + + // Insert issue + if err := insertIssue(ctx, t.conn, issue); err != nil { + return wrapDBError("insert issue", err) + } + + // Record creation event + if err := recordCreatedEvent(ctx, t.conn, issue, actor); err != nil { + return wrapDBError("record creation event", err) + } + + // Mark issue as dirty for incremental export + if err := markDirty(ctx, t.conn, issue.ID); err != nil { + return wrapDBError("mark issue dirty", err) + } + + return nil +} + +// CreateIssues creates multiple issues within the transaction. +func (t *sqliteTxStorage) CreateIssues(ctx context.Context, issues []*types.Issue, actor string) error { + if len(issues) == 0 { + return nil + } + + // Validate and prepare all issues first + now := time.Now() + for _, issue := range issues { + if err := issue.Validate(); err != nil { + return fmt.Errorf("validation failed for issue: %w", err) + } + issue.CreatedAt = now + issue.UpdatedAt = now + if issue.ContentHash == "" { + issue.ContentHash = issue.ComputeContentHash() + } + } + + // Get prefix from config + var prefix string + err := t.conn.QueryRowContext(ctx, `SELECT value FROM config WHERE key = ?`, "issue_prefix").Scan(&prefix) + if err == sql.ErrNoRows || prefix == "" { + return fmt.Errorf("database not initialized: issue_prefix config is missing") + } else if err != nil { + return fmt.Errorf("failed to get config: %w", err) + } + + // Generate IDs for issues that don't have them + for _, issue := range issues { + if issue.ID == "" { + generatedID, err := GenerateIssueID(ctx, t.conn, prefix, issue, actor) + if err != nil { + return wrapDBError("generate issue ID", err) + } + issue.ID = generatedID + } else { + if err := ValidateIssueIDPrefix(issue.ID, prefix); err != nil { + return wrapDBError("validate issue ID prefix", err) + } + } + } + + // Check for duplicate IDs within the batch + seenIDs := make(map[string]bool) + for _, issue := range issues { + if seenIDs[issue.ID] { + return fmt.Errorf("duplicate issue ID within batch: %s", issue.ID) + } + seenIDs[issue.ID] = true + } + + // Insert all issues + if err := insertIssues(ctx, t.conn, issues); err != nil { + return wrapDBError("insert issues", err) + } + + // Record creation events + if err := recordCreatedEvents(ctx, t.conn, issues, actor); err != nil { + return wrapDBError("record creation events", err) + } + + // Mark all issues as dirty + if err := markDirtyBatch(ctx, t.conn, issues); err != nil { + return wrapDBError("mark issues dirty", err) + } + + return nil +} + +// GetIssue retrieves an issue within the transaction. +// This enables read-your-writes semantics within the transaction. +func (t *sqliteTxStorage) GetIssue(ctx context.Context, id string) (*types.Issue, error) { + var issue types.Issue + var closedAt sql.NullTime + var estimatedMinutes sql.NullInt64 + var assignee sql.NullString + var externalRef sql.NullString + var compactedAt sql.NullTime + var originalSize sql.NullInt64 + var sourceRepo sql.NullString + var contentHash sql.NullString + var compactedAtCommit sql.NullString + + err := t.conn.QueryRowContext(ctx, ` + SELECT id, content_hash, title, description, design, acceptance_criteria, notes, + status, priority, issue_type, assignee, estimated_minutes, + created_at, updated_at, closed_at, external_ref, + compaction_level, compacted_at, compacted_at_commit, original_size, source_repo + FROM issues + WHERE id = ? + `, id).Scan( + &issue.ID, &contentHash, &issue.Title, &issue.Description, &issue.Design, + &issue.AcceptanceCriteria, &issue.Notes, &issue.Status, + &issue.Priority, &issue.IssueType, &assignee, &estimatedMinutes, + &issue.CreatedAt, &issue.UpdatedAt, &closedAt, &externalRef, + &issue.CompactionLevel, &compactedAt, &compactedAtCommit, &originalSize, &sourceRepo, + ) + + if err == sql.ErrNoRows { + return nil, nil + } + if err != nil { + return nil, fmt.Errorf("failed to get issue: %w", err) + } + + if contentHash.Valid { + issue.ContentHash = contentHash.String + } + if closedAt.Valid { + issue.ClosedAt = &closedAt.Time + } + if estimatedMinutes.Valid { + mins := int(estimatedMinutes.Int64) + issue.EstimatedMinutes = &mins + } + if assignee.Valid { + issue.Assignee = assignee.String + } + if externalRef.Valid { + issue.ExternalRef = &externalRef.String + } + if compactedAt.Valid { + issue.CompactedAt = &compactedAt.Time + } + if compactedAtCommit.Valid { + issue.CompactedAtCommit = &compactedAtCommit.String + } + if originalSize.Valid { + issue.OriginalSize = int(originalSize.Int64) + } + if sourceRepo.Valid { + issue.SourceRepo = sourceRepo.String + } + + // Fetch labels for this issue using the transaction connection + labels, err := t.getLabels(ctx, issue.ID) + if err != nil { + return nil, fmt.Errorf("failed to get labels: %w", err) + } + issue.Labels = labels + + return &issue, nil +} + +// getLabels retrieves labels using the transaction's connection +func (t *sqliteTxStorage) getLabels(ctx context.Context, issueID string) ([]string, error) { + rows, err := t.conn.QueryContext(ctx, ` + SELECT label FROM labels WHERE issue_id = ? ORDER BY label + `, issueID) + if err != nil { + return nil, fmt.Errorf("failed to get labels: %w", err) + } + defer func() { _ = rows.Close() }() + + var labels []string + for rows.Next() { + var label string + if err := rows.Scan(&label); err != nil { + return nil, err + } + labels = append(labels, label) + } + + return labels, nil +} + +// UpdateIssue updates an issue within the transaction. +func (t *sqliteTxStorage) UpdateIssue(ctx context.Context, id string, updates map[string]interface{}, actor string) error { + // Get old issue for event + oldIssue, err := t.GetIssue(ctx, id) + if err != nil { + return wrapDBError("get issue for update", err) + } + if oldIssue == nil { + return fmt.Errorf("issue %s not found", id) + } + + // Build update query with validated field names + setClauses := []string{"updated_at = ?"} + args := []interface{}{time.Now()} + + for key, value := range updates { + // Prevent SQL injection by validating field names + if !allowedUpdateFields[key] { + return fmt.Errorf("invalid field for update: %s", key) + } + + // Validate field values + if err := validateFieldUpdate(key, value); err != nil { + return wrapDBError("validate field update", err) + } + + setClauses = append(setClauses, fmt.Sprintf("%s = ?", key)) + args = append(args, value) + } + + // Auto-manage closed_at when status changes + setClauses, args = manageClosedAt(oldIssue, updates, setClauses, args) + + // Recompute content_hash if any content fields changed (bd-95) + contentChanged := false + contentFields := []string{"title", "description", "design", "acceptance_criteria", "notes", "status", "priority", "issue_type", "assignee", "external_ref"} + for _, field := range contentFields { + if _, exists := updates[field]; exists { + contentChanged = true + break + } + } + if contentChanged { + updatedIssue := *oldIssue + applyUpdatesToIssue(&updatedIssue, updates) + newHash := updatedIssue.ComputeContentHash() + setClauses = append(setClauses, "content_hash = ?") + args = append(args, newHash) + } + + args = append(args, id) + + // Update issue + query := fmt.Sprintf("UPDATE issues SET %s WHERE id = ?", strings.Join(setClauses, ", ")) // #nosec G201 - safe SQL with controlled column names + _, err = t.conn.ExecContext(ctx, query, args...) + if err != nil { + return fmt.Errorf("failed to update issue: %w", err) + } + + // Record event + oldData, err := json.Marshal(oldIssue) + if err != nil { + oldData = []byte(fmt.Sprintf(`{"id":"%s"}`, id)) + } + newData, err := json.Marshal(updates) + if err != nil { + newData = []byte(`{}`) + } + + eventType := determineEventType(oldIssue, updates) + + _, err = t.conn.ExecContext(ctx, ` + INSERT INTO events (issue_id, event_type, actor, old_value, new_value) + VALUES (?, ?, ?, ?, ?) + `, id, eventType, actor, string(oldData), string(newData)) + if err != nil { + return fmt.Errorf("failed to record event: %w", err) + } + + // Mark issue as dirty + if err := markDirty(ctx, t.conn, id); err != nil { + return fmt.Errorf("failed to mark issue dirty: %w", err) + } + + return nil +} + +// applyUpdatesToIssue applies update map to issue for content hash recomputation +func applyUpdatesToIssue(issue *types.Issue, updates map[string]interface{}) { + for key, value := range updates { + switch key { + case "title": + issue.Title = value.(string) + case "description": + issue.Description = value.(string) + case "design": + issue.Design = value.(string) + case "acceptance_criteria": + issue.AcceptanceCriteria = value.(string) + case "notes": + issue.Notes = value.(string) + case "status": + if s, ok := value.(types.Status); ok { + issue.Status = s + } else { + issue.Status = types.Status(value.(string)) + } + case "priority": + issue.Priority = value.(int) + case "issue_type": + if t, ok := value.(types.IssueType); ok { + issue.IssueType = t + } else { + issue.IssueType = types.IssueType(value.(string)) + } + case "assignee": + if value == nil { + issue.Assignee = "" + } else { + issue.Assignee = value.(string) + } + case "external_ref": + if value == nil { + issue.ExternalRef = nil + } else { + switch v := value.(type) { + case string: + issue.ExternalRef = &v + case *string: + issue.ExternalRef = v + } + } + } + } +} + +// CloseIssue closes an issue within the transaction. +func (t *sqliteTxStorage) CloseIssue(ctx context.Context, id string, reason string, actor string) error { + now := time.Now() + + result, err := t.conn.ExecContext(ctx, ` + UPDATE issues SET status = ?, closed_at = ?, updated_at = ? + WHERE id = ? + `, types.StatusClosed, now, now, id) + if err != nil { + return fmt.Errorf("failed to close issue: %w", err) + } + + rows, err := result.RowsAffected() + if err != nil { + return fmt.Errorf("failed to get rows affected: %w", err) + } + if rows == 0 { + return fmt.Errorf("issue not found: %s", id) + } + + _, err = t.conn.ExecContext(ctx, ` + INSERT INTO events (issue_id, event_type, actor, comment) + VALUES (?, ?, ?, ?) + `, id, types.EventClosed, actor, reason) + if err != nil { + return fmt.Errorf("failed to record event: %w", err) + } + + // Mark issue as dirty + if err := markDirty(ctx, t.conn, id); err != nil { + return fmt.Errorf("failed to mark issue dirty: %w", err) + } + + return nil +} + +// DeleteIssue deletes an issue within the transaction. +func (t *sqliteTxStorage) DeleteIssue(ctx context.Context, id string) error { + // Delete dependencies (both directions) + _, err := t.conn.ExecContext(ctx, `DELETE FROM dependencies WHERE issue_id = ? OR depends_on_id = ?`, id, id) + if err != nil { + return fmt.Errorf("failed to delete dependencies: %w", err) + } + + // Delete events + _, err = t.conn.ExecContext(ctx, `DELETE FROM events WHERE issue_id = ?`, id) + if err != nil { + return fmt.Errorf("failed to delete events: %w", err) + } + + // Delete from dirty_issues + _, err = t.conn.ExecContext(ctx, `DELETE FROM dirty_issues WHERE issue_id = ?`, id) + if err != nil { + return fmt.Errorf("failed to delete dirty marker: %w", err) + } + + // Delete the issue itself + result, err := t.conn.ExecContext(ctx, `DELETE FROM issues WHERE id = ?`, id) + if err != nil { + return fmt.Errorf("failed to delete issue: %w", err) + } + + rowsAffected, err := result.RowsAffected() + if err != nil { + return fmt.Errorf("failed to check rows affected: %w", err) + } + if rowsAffected == 0 { + return fmt.Errorf("issue not found: %s", id) + } + + return nil +} + +// AddDependency adds a dependency between issues within the transaction. +func (t *sqliteTxStorage) AddDependency(ctx context.Context, dep *types.Dependency, actor string) error { + // Validate dependency type + if !dep.Type.IsValid() { + return fmt.Errorf("invalid dependency type: %s (must be blocks, related, parent-child, or discovered-from)", dep.Type) + } + + // Validate that both issues exist + issueExists, err := t.GetIssue(ctx, dep.IssueID) + if err != nil { + return fmt.Errorf("failed to check issue %s: %w", dep.IssueID, err) + } + if issueExists == nil { + return fmt.Errorf("issue %s not found", dep.IssueID) + } + + dependsOnExists, err := t.GetIssue(ctx, dep.DependsOnID) + if err != nil { + return fmt.Errorf("failed to check dependency %s: %w", dep.DependsOnID, err) + } + if dependsOnExists == nil { + return fmt.Errorf("dependency target %s not found", dep.DependsOnID) + } + + // Prevent self-dependency + if dep.IssueID == dep.DependsOnID { + return fmt.Errorf("issue cannot depend on itself") + } + + // Validate parent-child dependency direction + if dep.Type == types.DepParentChild { + if issueExists.IssueType == types.TypeEpic && dependsOnExists.IssueType != types.TypeEpic { + return fmt.Errorf("invalid parent-child dependency: parent (%s) cannot depend on child (%s). Use: bd dep add %s %s --type parent-child", + dep.IssueID, dep.DependsOnID, dep.DependsOnID, dep.IssueID) + } + } + + if dep.CreatedAt.IsZero() { + dep.CreatedAt = time.Now() + } + if dep.CreatedBy == "" { + dep.CreatedBy = actor + } + + // Cycle detection + var cycleExists bool + err = t.conn.QueryRowContext(ctx, ` + WITH RECURSIVE paths AS ( + SELECT + issue_id, + depends_on_id, + 1 as depth + FROM dependencies + WHERE issue_id = ? + + UNION ALL + + SELECT + d.issue_id, + d.depends_on_id, + p.depth + 1 + FROM dependencies d + JOIN paths p ON d.issue_id = p.depends_on_id + WHERE p.depth < 100 + ) + SELECT EXISTS( + SELECT 1 FROM paths + WHERE depends_on_id = ? + ) + `, dep.DependsOnID, dep.IssueID).Scan(&cycleExists) + + if err != nil { + return fmt.Errorf("failed to check for cycles: %w", err) + } + + if cycleExists { + return fmt.Errorf("cannot add dependency: would create a cycle (%s → %s → ... → %s)", + dep.IssueID, dep.DependsOnID, dep.IssueID) + } + + // Insert dependency + _, err = t.conn.ExecContext(ctx, ` + INSERT INTO dependencies (issue_id, depends_on_id, type, created_at, created_by) + VALUES (?, ?, ?, ?, ?) + `, dep.IssueID, dep.DependsOnID, dep.Type, dep.CreatedAt, dep.CreatedBy) + if err != nil { + return fmt.Errorf("failed to add dependency: %w", err) + } + + // Record event + _, err = t.conn.ExecContext(ctx, ` + INSERT INTO events (issue_id, event_type, actor, comment) + VALUES (?, ?, ?, ?) + `, dep.IssueID, types.EventDependencyAdded, actor, + fmt.Sprintf("Added dependency: %s %s %s", dep.IssueID, dep.Type, dep.DependsOnID)) + if err != nil { + return fmt.Errorf("failed to record event: %w", err) + } + + // Mark both issues as dirty + if err := markDirty(ctx, t.conn, dep.IssueID); err != nil { + return fmt.Errorf("failed to mark issue dirty: %w", err) + } + if err := markDirty(ctx, t.conn, dep.DependsOnID); err != nil { + return fmt.Errorf("failed to mark depends-on issue dirty: %w", err) + } + + return nil +} + +// RemoveDependency removes a dependency within the transaction. +func (t *sqliteTxStorage) RemoveDependency(ctx context.Context, issueID, dependsOnID string, actor string) error { + result, err := t.conn.ExecContext(ctx, ` + DELETE FROM dependencies WHERE issue_id = ? AND depends_on_id = ? + `, issueID, dependsOnID) + if err != nil { + return fmt.Errorf("failed to remove dependency: %w", err) + } + + // Check if dependency existed + rowsAffected, err := result.RowsAffected() + if err != nil { + return fmt.Errorf("failed to check rows affected: %w", err) + } + if rowsAffected == 0 { + return fmt.Errorf("dependency from %s to %s does not exist", issueID, dependsOnID) + } + + _, err = t.conn.ExecContext(ctx, ` + INSERT INTO events (issue_id, event_type, actor, comment) + VALUES (?, ?, ?, ?) + `, issueID, types.EventDependencyRemoved, actor, + fmt.Sprintf("Removed dependency on %s", dependsOnID)) + if err != nil { + return fmt.Errorf("failed to record event: %w", err) + } + + // Mark both issues as dirty + if err := markDirty(ctx, t.conn, issueID); err != nil { + return fmt.Errorf("failed to mark issue dirty: %w", err) + } + if err := markDirty(ctx, t.conn, dependsOnID); err != nil { + return fmt.Errorf("failed to mark depends-on issue dirty: %w", err) + } + + return nil +} + +// AddLabel adds a label to an issue within the transaction. +func (t *sqliteTxStorage) AddLabel(ctx context.Context, issueID, label, actor string) error { + result, err := t.conn.ExecContext(ctx, ` + INSERT OR IGNORE INTO labels (issue_id, label) VALUES (?, ?) + `, issueID, label) + if err != nil { + return fmt.Errorf("failed to add label: %w", err) + } + + rows, err := result.RowsAffected() + if err != nil { + return fmt.Errorf("failed to check rows affected: %w", err) + } + if rows == 0 { + // Label already existed, no change made + return nil + } + + // Record event + _, err = t.conn.ExecContext(ctx, ` + INSERT INTO events (issue_id, event_type, actor, comment) + VALUES (?, ?, ?, ?) + `, issueID, types.EventLabelAdded, actor, fmt.Sprintf("Added label: %s", label)) + if err != nil { + return fmt.Errorf("failed to record event: %w", err) + } + + // Mark issue as dirty + if err := markDirty(ctx, t.conn, issueID); err != nil { + return fmt.Errorf("failed to mark issue dirty: %w", err) + } + + return nil +} + +// RemoveLabel removes a label from an issue within the transaction. +func (t *sqliteTxStorage) RemoveLabel(ctx context.Context, issueID, label, actor string) error { + result, err := t.conn.ExecContext(ctx, ` + DELETE FROM labels WHERE issue_id = ? AND label = ? + `, issueID, label) + if err != nil { + return fmt.Errorf("failed to remove label: %w", err) + } + + rows, err := result.RowsAffected() + if err != nil { + return fmt.Errorf("failed to check rows affected: %w", err) + } + if rows == 0 { + // Label didn't exist, no change made + return nil + } + + // Record event + _, err = t.conn.ExecContext(ctx, ` + INSERT INTO events (issue_id, event_type, actor, comment) + VALUES (?, ?, ?, ?) + `, issueID, types.EventLabelRemoved, actor, fmt.Sprintf("Removed label: %s", label)) + if err != nil { + return fmt.Errorf("failed to record event: %w", err) + } + + // Mark issue as dirty + if err := markDirty(ctx, t.conn, issueID); err != nil { + return fmt.Errorf("failed to mark issue dirty: %w", err) + } + + return nil +} diff --git a/internal/storage/sqlite/transaction_test.go b/internal/storage/sqlite/transaction_test.go new file mode 100644 index 00000000..ae689c76 --- /dev/null +++ b/internal/storage/sqlite/transaction_test.go @@ -0,0 +1,857 @@ +package sqlite + +import ( + "context" + "testing" + + "github.com/steveyegge/beads/internal/storage" + "github.com/steveyegge/beads/internal/types" +) + +// TestRunInTransactionBasic verifies the RunInTransaction method exists and +// can be called. +func TestRunInTransactionBasic(t *testing.T) { + ctx := context.Background() + store, cleanup := setupTestDB(t) + defer cleanup() + + // Test that we can call RunInTransaction + callCount := 0 + err := store.RunInTransaction(ctx, func(tx storage.Transaction) error { + callCount++ + return nil + }) + + if err != nil { + t.Errorf("RunInTransaction returned error: %v", err) + } + + if callCount != 1 { + t.Errorf("expected callback to be called once, got %d", callCount) + } +} + +// TestRunInTransactionRollbackOnError verifies that returning an error +// from the callback does not cause a panic and the error is propagated. +func TestRunInTransactionRollbackOnError(t *testing.T) { + ctx := context.Background() + store, cleanup := setupTestDB(t) + defer cleanup() + + expectedErr := "intentional test error" + err := store.RunInTransaction(ctx, func(tx storage.Transaction) error { + return &testError{msg: expectedErr} + }) + + if err == nil { + t.Error("expected error to be returned, got nil") + } + + if err.Error() != expectedErr { + t.Errorf("expected error %q, got %q", expectedErr, err.Error()) + } +} + +// TestRunInTransactionPanicRecovery verifies that panics in the callback +// are recovered and re-raised after rollback. +func TestRunInTransactionPanicRecovery(t *testing.T) { + ctx := context.Background() + store, cleanup := setupTestDB(t) + defer cleanup() + + defer func() { + if r := recover(); r == nil { + t.Error("expected panic to be re-raised, but no panic occurred") + } else if r != "test panic" { + t.Errorf("unexpected panic value: %v", r) + } + }() + + _ = store.RunInTransaction(ctx, func(tx storage.Transaction) error { + panic("test panic") + }) + + t.Error("should not reach here - panic should have been re-raised") +} + +// TestTransactionCreateIssue tests creating an issue within a transaction. +func TestTransactionCreateIssue(t *testing.T) { + ctx := context.Background() + store, cleanup := setupTestDB(t) + defer cleanup() + + var createdID string + err := store.RunInTransaction(ctx, func(tx storage.Transaction) error { + issue := &types.Issue{ + Title: "Test Issue", + Status: types.StatusOpen, + Priority: 2, + IssueType: types.TypeTask, + } + if err := tx.CreateIssue(ctx, issue, "test-actor"); err != nil { + return err + } + createdID = issue.ID + return nil + }) + + if err != nil { + t.Fatalf("RunInTransaction failed: %v", err) + } + + if createdID == "" { + t.Error("expected issue ID to be set after creation") + } + + // Verify issue exists after commit + issue, err := store.GetIssue(ctx, createdID) + if err != nil { + t.Fatalf("GetIssue failed: %v", err) + } + if issue == nil { + t.Error("expected issue to exist after transaction commit") + } + if issue.Title != "Test Issue" { + t.Errorf("expected title 'Test Issue', got %q", issue.Title) + } +} + +// TestTransactionRollbackOnCreateError tests that issues are not created +// when transaction rolls back due to error. +func TestTransactionRollbackOnCreateError(t *testing.T) { + ctx := context.Background() + store, cleanup := setupTestDB(t) + defer cleanup() + + var createdID string + err := store.RunInTransaction(ctx, func(tx storage.Transaction) error { + issue := &types.Issue{ + Title: "Test Issue", + Status: types.StatusOpen, + Priority: 2, + IssueType: types.TypeTask, + } + if err := tx.CreateIssue(ctx, issue, "test-actor"); err != nil { + return err + } + createdID = issue.ID + + // Return error to trigger rollback + return &testError{msg: "intentional rollback"} + }) + + if err == nil { + t.Error("expected error from transaction") + } + + // Verify issue does NOT exist after rollback + if createdID != "" { + issue, err := store.GetIssue(ctx, createdID) + if err != nil { + t.Fatalf("GetIssue failed: %v", err) + } + if issue != nil { + t.Error("expected issue to NOT exist after transaction rollback") + } + } +} + +// TestTransactionMultipleIssues tests creating multiple issues atomically. +func TestTransactionMultipleIssues(t *testing.T) { + ctx := context.Background() + store, cleanup := setupTestDB(t) + defer cleanup() + + var ids []string + err := store.RunInTransaction(ctx, func(tx storage.Transaction) error { + for i := 0; i < 3; i++ { + issue := &types.Issue{ + Title: "Test Issue", + Status: types.StatusOpen, + Priority: 2, + IssueType: types.TypeTask, + } + if err := tx.CreateIssue(ctx, issue, "test-actor"); err != nil { + return err + } + ids = append(ids, issue.ID) + } + return nil + }) + + if err != nil { + t.Fatalf("RunInTransaction failed: %v", err) + } + + // Verify all issues exist + for _, id := range ids { + issue, err := store.GetIssue(ctx, id) + if err != nil { + t.Fatalf("GetIssue failed for %s: %v", id, err) + } + if issue == nil { + t.Errorf("expected issue %s to exist", id) + } + } +} + +// TestTransactionUpdateIssue tests updating an issue within a transaction. +func TestTransactionUpdateIssue(t *testing.T) { + ctx := context.Background() + store, cleanup := setupTestDB(t) + defer cleanup() + + // Create issue first + issue := &types.Issue{ + Title: "Original Title", + Status: types.StatusOpen, + Priority: 2, + IssueType: types.TypeTask, + } + if err := store.CreateIssue(ctx, issue, "test-actor"); err != nil { + t.Fatalf("CreateIssue failed: %v", err) + } + + // Update in transaction + err := store.RunInTransaction(ctx, func(tx storage.Transaction) error { + return tx.UpdateIssue(ctx, issue.ID, map[string]interface{}{ + "title": "Updated Title", + }, "test-actor") + }) + + if err != nil { + t.Fatalf("RunInTransaction failed: %v", err) + } + + // Verify update + updated, err := store.GetIssue(ctx, issue.ID) + if err != nil { + t.Fatalf("GetIssue failed: %v", err) + } + if updated.Title != "Updated Title" { + t.Errorf("expected title 'Updated Title', got %q", updated.Title) + } +} + +// TestTransactionCloseIssue tests closing an issue within a transaction. +func TestTransactionCloseIssue(t *testing.T) { + ctx := context.Background() + store, cleanup := setupTestDB(t) + defer cleanup() + + // Create issue first + issue := &types.Issue{ + Title: "Test Issue", + Status: types.StatusOpen, + Priority: 2, + IssueType: types.TypeTask, + } + if err := store.CreateIssue(ctx, issue, "test-actor"); err != nil { + t.Fatalf("CreateIssue failed: %v", err) + } + + // Close in transaction + err := store.RunInTransaction(ctx, func(tx storage.Transaction) error { + return tx.CloseIssue(ctx, issue.ID, "Done", "test-actor") + }) + + if err != nil { + t.Fatalf("RunInTransaction failed: %v", err) + } + + // Verify closed + closed, err := store.GetIssue(ctx, issue.ID) + if err != nil { + t.Fatalf("GetIssue failed: %v", err) + } + if closed.Status != types.StatusClosed { + t.Errorf("expected status 'closed', got %q", closed.Status) + } +} + +// TestTransactionDeleteIssue tests deleting an issue within a transaction. +func TestTransactionDeleteIssue(t *testing.T) { + ctx := context.Background() + store, cleanup := setupTestDB(t) + defer cleanup() + + // Create issue first + issue := &types.Issue{ + Title: "Test Issue", + Status: types.StatusOpen, + Priority: 2, + IssueType: types.TypeTask, + } + if err := store.CreateIssue(ctx, issue, "test-actor"); err != nil { + t.Fatalf("CreateIssue failed: %v", err) + } + + // Delete in transaction + err := store.RunInTransaction(ctx, func(tx storage.Transaction) error { + return tx.DeleteIssue(ctx, issue.ID) + }) + + if err != nil { + t.Fatalf("RunInTransaction failed: %v", err) + } + + // Verify deleted + deleted, err := store.GetIssue(ctx, issue.ID) + if err != nil { + t.Fatalf("GetIssue failed: %v", err) + } + if deleted != nil { + t.Error("expected issue to be deleted") + } +} + +// TestTransactionGetIssue tests read-your-writes within a transaction. +func TestTransactionGetIssue(t *testing.T) { + ctx := context.Background() + store, cleanup := setupTestDB(t) + defer cleanup() + + err := store.RunInTransaction(ctx, func(tx storage.Transaction) error { + // Create issue + issue := &types.Issue{ + Title: "Test Issue", + Status: types.StatusOpen, + Priority: 2, + IssueType: types.TypeTask, + } + if err := tx.CreateIssue(ctx, issue, "test-actor"); err != nil { + return err + } + + // Read it back within same transaction (read-your-writes) + retrieved, err := tx.GetIssue(ctx, issue.ID) + if err != nil { + return err + } + if retrieved == nil { + t.Error("expected to read issue within transaction") + } + if retrieved.Title != "Test Issue" { + t.Errorf("expected title 'Test Issue', got %q", retrieved.Title) + } + + return nil + }) + + if err != nil { + t.Fatalf("RunInTransaction failed: %v", err) + } +} + +// TestTransactionCreateIssues tests batch issue creation within a transaction. +func TestTransactionCreateIssues(t *testing.T) { + ctx := context.Background() + store, cleanup := setupTestDB(t) + defer cleanup() + + var ids []string + err := store.RunInTransaction(ctx, func(tx storage.Transaction) error { + issues := []*types.Issue{ + {Title: "Issue 1", Status: types.StatusOpen, Priority: 1, IssueType: types.TypeTask}, + {Title: "Issue 2", Status: types.StatusOpen, Priority: 2, IssueType: types.TypeTask}, + {Title: "Issue 3", Status: types.StatusOpen, Priority: 3, IssueType: types.TypeTask}, + } + if err := tx.CreateIssues(ctx, issues, "test-actor"); err != nil { + return err + } + for _, issue := range issues { + ids = append(ids, issue.ID) + } + return nil + }) + + if err != nil { + t.Fatalf("RunInTransaction failed: %v", err) + } + + // Verify all issues exist + for i, id := range ids { + issue, err := store.GetIssue(ctx, id) + if err != nil { + t.Fatalf("GetIssue failed for %s: %v", id, err) + } + if issue == nil { + t.Errorf("expected issue %s to exist", id) + } + expectedTitle := "Issue " + string(rune('1'+i)) + if issue.Title != expectedTitle { + t.Errorf("expected title %q, got %q", expectedTitle, issue.Title) + } + } +} + +// TestTransactionAddDependency tests adding a dependency within a transaction. +func TestTransactionAddDependency(t *testing.T) { + ctx := context.Background() + store, cleanup := setupTestDB(t) + defer cleanup() + + // Create two issues first + issue1 := &types.Issue{Title: "Issue 1", Status: types.StatusOpen, Priority: 1, IssueType: types.TypeTask} + issue2 := &types.Issue{Title: "Issue 2", Status: types.StatusOpen, Priority: 2, IssueType: types.TypeTask} + if err := store.CreateIssue(ctx, issue1, "test-actor"); err != nil { + t.Fatalf("CreateIssue failed: %v", err) + } + if err := store.CreateIssue(ctx, issue2, "test-actor"); err != nil { + t.Fatalf("CreateIssue failed: %v", err) + } + + // Add dependency in transaction + err := store.RunInTransaction(ctx, func(tx storage.Transaction) error { + dep := &types.Dependency{ + IssueID: issue1.ID, + DependsOnID: issue2.ID, + Type: types.DepBlocks, + } + return tx.AddDependency(ctx, dep, "test-actor") + }) + + if err != nil { + t.Fatalf("RunInTransaction failed: %v", err) + } + + // Verify dependency exists + deps, err := store.GetDependencies(ctx, issue1.ID) + if err != nil { + t.Fatalf("GetDependencies failed: %v", err) + } + if len(deps) != 1 { + t.Errorf("expected 1 dependency, got %d", len(deps)) + } + if deps[0].ID != issue2.ID { + t.Errorf("expected dependency on %s, got %s", issue2.ID, deps[0].ID) + } +} + +// TestTransactionRemoveDependency tests removing a dependency within a transaction. +func TestTransactionRemoveDependency(t *testing.T) { + ctx := context.Background() + store, cleanup := setupTestDB(t) + defer cleanup() + + // Create two issues and add dependency + issue1 := &types.Issue{Title: "Issue 1", Status: types.StatusOpen, Priority: 1, IssueType: types.TypeTask} + issue2 := &types.Issue{Title: "Issue 2", Status: types.StatusOpen, Priority: 2, IssueType: types.TypeTask} + if err := store.CreateIssue(ctx, issue1, "test-actor"); err != nil { + t.Fatalf("CreateIssue failed: %v", err) + } + if err := store.CreateIssue(ctx, issue2, "test-actor"); err != nil { + t.Fatalf("CreateIssue failed: %v", err) + } + dep := &types.Dependency{IssueID: issue1.ID, DependsOnID: issue2.ID, Type: types.DepBlocks} + if err := store.AddDependency(ctx, dep, "test-actor"); err != nil { + t.Fatalf("AddDependency failed: %v", err) + } + + // Remove dependency in transaction + err := store.RunInTransaction(ctx, func(tx storage.Transaction) error { + return tx.RemoveDependency(ctx, issue1.ID, issue2.ID, "test-actor") + }) + + if err != nil { + t.Fatalf("RunInTransaction failed: %v", err) + } + + // Verify dependency is gone + deps, err := store.GetDependencies(ctx, issue1.ID) + if err != nil { + t.Fatalf("GetDependencies failed: %v", err) + } + if len(deps) != 0 { + t.Errorf("expected 0 dependencies, got %d", len(deps)) + } +} + +// TestTransactionAddLabel tests adding a label within a transaction. +func TestTransactionAddLabel(t *testing.T) { + ctx := context.Background() + store, cleanup := setupTestDB(t) + defer cleanup() + + // Create issue first + issue := &types.Issue{Title: "Test Issue", Status: types.StatusOpen, Priority: 1, IssueType: types.TypeTask} + if err := store.CreateIssue(ctx, issue, "test-actor"); err != nil { + t.Fatalf("CreateIssue failed: %v", err) + } + + // Add label in transaction + err := store.RunInTransaction(ctx, func(tx storage.Transaction) error { + return tx.AddLabel(ctx, issue.ID, "test-label", "test-actor") + }) + + if err != nil { + t.Fatalf("RunInTransaction failed: %v", err) + } + + // Verify label exists + labels, err := store.GetLabels(ctx, issue.ID) + if err != nil { + t.Fatalf("GetLabels failed: %v", err) + } + if len(labels) != 1 { + t.Errorf("expected 1 label, got %d", len(labels)) + } + if labels[0] != "test-label" { + t.Errorf("expected label 'test-label', got %s", labels[0]) + } +} + +// TestTransactionRemoveLabel tests removing a label within a transaction. +func TestTransactionRemoveLabel(t *testing.T) { + ctx := context.Background() + store, cleanup := setupTestDB(t) + defer cleanup() + + // Create issue and add label + issue := &types.Issue{Title: "Test Issue", Status: types.StatusOpen, Priority: 1, IssueType: types.TypeTask} + if err := store.CreateIssue(ctx, issue, "test-actor"); err != nil { + t.Fatalf("CreateIssue failed: %v", err) + } + if err := store.AddLabel(ctx, issue.ID, "test-label", "test-actor"); err != nil { + t.Fatalf("AddLabel failed: %v", err) + } + + // Remove label in transaction + err := store.RunInTransaction(ctx, func(tx storage.Transaction) error { + return tx.RemoveLabel(ctx, issue.ID, "test-label", "test-actor") + }) + + if err != nil { + t.Fatalf("RunInTransaction failed: %v", err) + } + + // Verify label is gone + labels, err := store.GetLabels(ctx, issue.ID) + if err != nil { + t.Fatalf("GetLabels failed: %v", err) + } + if len(labels) != 0 { + t.Errorf("expected 0 labels, got %d", len(labels)) + } +} + +// TestTransactionAtomicIssueWithDependency tests creating issue + adding dependency atomically. +func TestTransactionAtomicIssueWithDependency(t *testing.T) { + ctx := context.Background() + store, cleanup := setupTestDB(t) + defer cleanup() + + // Create parent issue first + parent := &types.Issue{Title: "Parent", Status: types.StatusOpen, Priority: 1, IssueType: types.TypeTask} + if err := store.CreateIssue(ctx, parent, "test-actor"); err != nil { + t.Fatalf("CreateIssue failed: %v", err) + } + + var childID string + err := store.RunInTransaction(ctx, func(tx storage.Transaction) error { + // Create child issue + child := &types.Issue{Title: "Child", Status: types.StatusOpen, Priority: 2, IssueType: types.TypeTask} + if err := tx.CreateIssue(ctx, child, "test-actor"); err != nil { + return err + } + childID = child.ID + + // Add dependency: child blocks parent (child must be done before parent) + dep := &types.Dependency{ + IssueID: parent.ID, + DependsOnID: child.ID, + Type: types.DepBlocks, + } + return tx.AddDependency(ctx, dep, "test-actor") + }) + + if err != nil { + t.Fatalf("RunInTransaction failed: %v", err) + } + + // Verify both issue and dependency exist + child, err := store.GetIssue(ctx, childID) + if err != nil || child == nil { + t.Error("expected child issue to exist") + } + + deps, err := store.GetDependencies(ctx, parent.ID) + if err != nil { + t.Fatalf("GetDependencies failed: %v", err) + } + if len(deps) != 1 || deps[0].ID != childID { + t.Error("expected dependency from parent to child") + } +} + +// TestTransactionAtomicIssueWithLabels tests creating issue + adding labels atomically. +func TestTransactionAtomicIssueWithLabels(t *testing.T) { + ctx := context.Background() + store, cleanup := setupTestDB(t) + defer cleanup() + + var issueID string + err := store.RunInTransaction(ctx, func(tx storage.Transaction) error { + // Create issue + issue := &types.Issue{Title: "Test Issue", Status: types.StatusOpen, Priority: 1, IssueType: types.TypeTask} + if err := tx.CreateIssue(ctx, issue, "test-actor"); err != nil { + return err + } + issueID = issue.ID + + // Add multiple labels + for _, label := range []string{"label1", "label2", "label3"} { + if err := tx.AddLabel(ctx, issue.ID, label, "test-actor"); err != nil { + return err + } + } + return nil + }) + + if err != nil { + t.Fatalf("RunInTransaction failed: %v", err) + } + + // Verify issue and all labels exist + issue, err := store.GetIssue(ctx, issueID) + if err != nil || issue == nil { + t.Error("expected issue to exist") + } + + labels, err := store.GetLabels(ctx, issueID) + if err != nil { + t.Fatalf("GetLabels failed: %v", err) + } + if len(labels) != 3 { + t.Errorf("expected 3 labels, got %d", len(labels)) + } +} + +// TestTransactionEmpty tests that an empty transaction commits successfully. +func TestTransactionEmpty(t *testing.T) { + ctx := context.Background() + store, cleanup := setupTestDB(t) + defer cleanup() + + err := store.RunInTransaction(ctx, func(tx storage.Transaction) error { + // Do nothing - empty transaction + return nil + }) + + if err != nil { + t.Errorf("empty transaction should succeed, got error: %v", err) + } +} + +// TestTransactionConcurrent tests multiple concurrent transactions. +func TestTransactionConcurrent(t *testing.T) { + ctx := context.Background() + store, cleanup := setupTestDB(t) + defer cleanup() + + const numGoroutines = 10 + errors := make(chan error, numGoroutines) + ids := make(chan string, numGoroutines) + + // Launch concurrent transactions + for i := 0; i < numGoroutines; i++ { + go func(index int) { + err := store.RunInTransaction(ctx, func(tx storage.Transaction) error { + issue := &types.Issue{ + Title: "Concurrent Issue", + Status: types.StatusOpen, + Priority: index % 4, + IssueType: types.TypeTask, + } + if err := tx.CreateIssue(ctx, issue, "test-actor"); err != nil { + return err + } + ids <- issue.ID + return nil + }) + errors <- err + }(i) + } + + // Collect results + var errs []error + var createdIDs []string + for i := 0; i < numGoroutines; i++ { + if err := <-errors; err != nil { + errs = append(errs, err) + } + } + close(ids) + for id := range ids { + createdIDs = append(createdIDs, id) + } + + if len(errs) > 0 { + t.Errorf("some transactions failed: %v", errs) + } + + if len(createdIDs) != numGoroutines { + t.Errorf("expected %d issues created, got %d", numGoroutines, len(createdIDs)) + } + + // Verify all issues exist + for _, id := range createdIDs { + issue, err := store.GetIssue(ctx, id) + if err != nil || issue == nil { + t.Errorf("expected issue %s to exist", id) + } + } +} + +// TestTransactionNestedFailure tests that when first op succeeds but second fails, +// both are rolled back. +func TestTransactionNestedFailure(t *testing.T) { + ctx := context.Background() + store, cleanup := setupTestDB(t) + defer cleanup() + + var firstIssueID string + err := store.RunInTransaction(ctx, func(tx storage.Transaction) error { + // First operation succeeds + issue1 := &types.Issue{ + Title: "First Issue", + Status: types.StatusOpen, + Priority: 1, + IssueType: types.TypeTask, + } + if err := tx.CreateIssue(ctx, issue1, "test-actor"); err != nil { + return err + } + firstIssueID = issue1.ID + + // Second operation fails + issue2 := &types.Issue{ + Title: "", // Invalid - missing title + Status: types.StatusOpen, + Priority: 2, + } + return tx.CreateIssue(ctx, issue2, "test-actor") + }) + + if err == nil { + t.Error("expected error from invalid second issue") + } + + // Verify first issue was NOT created (rolled back) + if firstIssueID != "" { + issue, err := store.GetIssue(ctx, firstIssueID) + if err != nil { + t.Fatalf("GetIssue failed: %v", err) + } + if issue != nil { + t.Error("expected first issue to be rolled back, but it exists") + } + } +} + +// TestTransactionAtomicPlanApproval simulates a VC plan approval workflow: +// creating multiple issues with dependencies and labels atomically. +func TestTransactionAtomicPlanApproval(t *testing.T) { + ctx := context.Background() + store, cleanup := setupTestDB(t) + defer cleanup() + + var epicID, task1ID, task2ID string + err := store.RunInTransaction(ctx, func(tx storage.Transaction) error { + // Create epic + epic := &types.Issue{ + Title: "Epic: Feature Implementation", + Status: types.StatusOpen, + Priority: 1, + IssueType: types.TypeEpic, + } + if err := tx.CreateIssue(ctx, epic, "test-actor"); err != nil { + return err + } + epicID = epic.ID + + // Create task 1 + task1 := &types.Issue{ + Title: "Task 1: Setup", + Status: types.StatusOpen, + Priority: 2, + IssueType: types.TypeTask, + } + if err := tx.CreateIssue(ctx, task1, "test-actor"); err != nil { + return err + } + task1ID = task1.ID + + // Create task 2 + task2 := &types.Issue{ + Title: "Task 2: Implementation", + Status: types.StatusOpen, + Priority: 2, + IssueType: types.TypeTask, + } + if err := tx.CreateIssue(ctx, task2, "test-actor"); err != nil { + return err + } + task2ID = task2.ID + + // Add dependencies: task2 depends on task1 + dep := &types.Dependency{ + IssueID: task2ID, + DependsOnID: task1ID, + Type: types.DepBlocks, + } + if err := tx.AddDependency(ctx, dep, "test-actor"); err != nil { + return err + } + + // Add labels to all issues + for _, id := range []string{epicID, task1ID, task2ID} { + if err := tx.AddLabel(ctx, id, "feature-x", "test-actor"); err != nil { + return err + } + } + + return nil + }) + + if err != nil { + t.Fatalf("RunInTransaction failed: %v", err) + } + + // Verify all issues exist + for _, id := range []string{epicID, task1ID, task2ID} { + issue, err := store.GetIssue(ctx, id) + if err != nil || issue == nil { + t.Errorf("expected issue %s to exist", id) + } + } + + // Verify dependency + deps, err := store.GetDependencies(ctx, task2ID) + if err != nil { + t.Fatalf("GetDependencies failed: %v", err) + } + if len(deps) != 1 || deps[0].ID != task1ID { + t.Error("expected task2 to depend on task1") + } + + // Verify labels + for _, id := range []string{epicID, task1ID, task2ID} { + labels, err := store.GetLabels(ctx, id) + if err != nil { + t.Fatalf("GetLabels failed: %v", err) + } + if len(labels) != 1 || labels[0] != "feature-x" { + t.Errorf("expected 'feature-x' label on %s", id) + } + } +} + +// testError is a simple error type for testing +type testError struct { + msg string +} + +func (e *testError) Error() string { + return e.msg +} diff --git a/internal/storage/storage.go b/internal/storage/storage.go index 6983af56..ecf0f376 100644 --- a/internal/storage/storage.go +++ b/internal/storage/storage.go @@ -8,6 +8,61 @@ import ( "github.com/steveyegge/beads/internal/types" ) +// Transaction provides atomic multi-operation support within a single database transaction. +// +// The Transaction interface exposes a subset of Storage methods that execute within +// a single database transaction. This enables atomic workflows where multiple operations +// must either all succeed or all fail (e.g., creating issues with dependencies and labels). +// +// # Transaction Semantics +// +// - All operations within the transaction share the same database connection +// - Changes are not visible to other connections until commit +// - If any operation returns an error, the transaction is rolled back +// - If the callback function panics, the transaction is rolled back +// - On successful return from the callback, the transaction is committed +// +// # SQLite Specifics +// +// - Uses BEGIN IMMEDIATE mode to acquire write lock early +// - This prevents deadlocks when multiple operations compete for the same lock +// - IMMEDIATE mode serializes concurrent transactions properly +// +// # Example Usage +// +// err := store.RunInTransaction(ctx, func(tx storage.Transaction) error { +// // Create parent issue +// if err := tx.CreateIssue(ctx, parentIssue, actor); err != nil { +// return err // Triggers rollback +// } +// // Create child issue +// if err := tx.CreateIssue(ctx, childIssue, actor); err != nil { +// return err // Triggers rollback +// } +// // Add dependency between them +// if err := tx.AddDependency(ctx, dep, actor); err != nil { +// return err // Triggers rollback +// } +// return nil // Triggers commit +// }) +type Transaction interface { + // Issue operations + CreateIssue(ctx context.Context, issue *types.Issue, actor string) error + CreateIssues(ctx context.Context, issues []*types.Issue, actor string) error + UpdateIssue(ctx context.Context, id string, updates map[string]interface{}, actor string) error + CloseIssue(ctx context.Context, id string, reason string, actor string) error + DeleteIssue(ctx context.Context, id string) error + GetIssue(ctx context.Context, id string) (*types.Issue, error) // For read-your-writes within transaction + + // Dependency operations + AddDependency(ctx context.Context, dep *types.Dependency, actor string) error + RemoveDependency(ctx context.Context, issueID, dependsOnID string, actor string) error + + // Label operations + AddLabel(ctx context.Context, issueID, label, actor string) error + RemoveLabel(ctx context.Context, issueID, label, actor string) error +} + // Storage defines the interface for issue storage backends type Storage interface { // Issues @@ -89,6 +144,26 @@ type Storage interface { RenameDependencyPrefix(ctx context.Context, oldPrefix, newPrefix string) error RenameCounterPrefix(ctx context.Context, oldPrefix, newPrefix string) error + // Transactions + // + // RunInTransaction executes a function within a database transaction. + // The Transaction interface provides atomic multi-operation support. + // + // Transaction behavior: + // - If fn returns nil, the transaction is committed + // - If fn returns an error, the transaction is rolled back + // - If fn panics, the transaction is rolled back and the panic is re-raised + // - Uses BEGIN IMMEDIATE for SQLite to acquire write lock early + // + // Example: + // err := store.RunInTransaction(ctx, func(tx storage.Transaction) error { + // if err := tx.CreateIssue(ctx, issue, actor); err != nil { + // return err // Triggers rollback + // } + // return nil // Triggers commit + // }) + RunInTransaction(ctx context.Context, fn func(tx Transaction) error) error + // Lifecycle Close() error