From 520af76234d2e4b2f00c23767d6e08a2c310a516 Mon Sep 17 00:00:00 2001 From: Steve Yegge Date: Fri, 31 Oct 2025 21:51:54 -0700 Subject: [PATCH] Add comprehensive tests for create.go functionality - Add 10 test cases covering basic creation, descriptions, design/acceptance - Test labels, dependencies, discovered-from, explicit IDs, assignees - Test all issue types and multiple dependencies - All tests passing - Maintains 27.6% coverage in cmd/bd Amp-Thread-ID: https://ampcode.com/threads/T-2925a09a-b56d-4a2e-b79f-2d467c76feb2 Co-authored-by: Amp --- .beads/beads.jsonl | 9 + cmd/bd/create_test.go | 452 ++++++++++++++++++++++++++++++++++++++++++ 2 files changed, 461 insertions(+) create mode 100644 cmd/bd/create_test.go diff --git a/.beads/beads.jsonl b/.beads/beads.jsonl index a630c954..15c9d0cf 100644 --- a/.beads/beads.jsonl +++ b/.beads/beads.jsonl @@ -17,6 +17,7 @@ {"id":"bd-1f4086c5.1","content_hash":"ba5173c61613a29786641ba06a93427de87bed65ce39dbc3c3ddd2b6900f827e","title":"Integration test: mutation to export latency","description":"Measure time from bd create to JSONL update. Verify \u003c500ms latency. Test with multiple rapid mutations to verify batching.","notes":"Test added to daemon_test.go as TestMutationToExportLatency().\n\nCurrently skipped with note that it should be enabled once bd-146 (event-driven daemon) is fully implemented and enabled by default.\n\nThe test structure is complete:\n1. Sets up test environment with fast debounce (500ms)\n2. SingleMutationLatency: measures latency from mutation to JSONL update\n3. RapidMutationBatching: verifies multiple mutations batch into single export\n\nOnce event-driven mode is default, remove the t.Skip() line and the test will validate \u003c500ms latency.","status":"closed","priority":1,"issue_type":"task","created_at":"2025-10-29T20:49:49.103759-07:00","updated_at":"2025-10-30T17:12:58.195867-07:00","closed_at":"2025-10-29T14:19:19.808139-07:00","dependencies":[{"issue_id":"bd-1f4086c5.1","depends_on_id":"bd-1f4086c5","type":"parent-child","created_at":"2025-10-29T20:49:49.107244-07:00","created_by":"import-remap"}]} {"id":"bd-22e0bde9","content_hash":"4c03fb79e67c0948d0d887b56fcbf71ed3b987e4bfd84628d7b9b2fa047a61fa","title":"Add TestNWayCollision for 5+ clones","description":"## Overview\nAdd comprehensive tests for N-way (5+) collision resolution to verify the solution scales beyond 3 clones.\n\n## Purpose\nWhile TestThreeCloneCollision validates the basic N-way case, we need to verify:\n1. Solution scales to arbitrary N\n2. Performance is acceptable with more clones\n3. Convergence time is bounded\n4. No edge cases in larger collision groups\n\n## Implementation Tasks\n\n### 1. Create TestFiveCloneCollision\nFile: beads_twoclone_test.go (or new beads_nway_test.go)\n\n```go\nfunc TestFiveCloneCollision(t *testing.T) {\n // Test with 5 clones creating same ID with different content\n // Verify all 5 clones converge after sync rounds\n \n t.Run(\"SequentialSync\", func(t *testing.T) {\n testNCloneCollision(t, 5, \"A\", \"B\", \"C\", \"D\", \"E\")\n })\n \n t.Run(\"ReverseSync\", func(t *testing.T) {\n testNCloneCollision(t, 5, \"E\", \"D\", \"C\", \"B\", \"A\")\n })\n \n t.Run(\"RandomSync\", func(t *testing.T) {\n testNCloneCollision(t, 5, \"C\", \"A\", \"E\", \"B\", \"D\")\n })\n}\n```\n\n### 2. Implement generalized testNCloneCollision\nGeneralize the 3-clone test to handle arbitrary N:\n\n```go\nfunc testNCloneCollision(t *testing.T, numClones int, syncOrder ...string) {\n t.Helper()\n \n if len(syncOrder) != numClones {\n t.Fatalf(\"syncOrder length (%d) must match numClones (%d)\", \n len(syncOrder), numClones)\n }\n \n tmpDir := t.TempDir()\n \n // Setup remote and N clones\n remoteDir := setupBareRepo(t, tmpDir)\n cloneDirs := make(map[string]string)\n \n for i := 0; i \u003c numClones; i++ {\n name := string(rune('A' + i))\n cloneDirs[name] = setupClone(t, tmpDir, remoteDir, name)\n }\n \n // Each clone creates issue with same ID but different content\n for name, dir := range cloneDirs {\n createIssue(t, dir, fmt.Sprintf(\"Issue from clone %s\", name))\n }\n \n // Sync in specified order\n for _, name := range syncOrder {\n syncClone(t, cloneDirs[name], name)\n }\n \n // Final pull for convergence\n for name, dir := range cloneDirs {\n finalPull(t, dir, name)\n }\n \n // Verify all clones have all N issues\n expectedTitles := make(map[string]bool)\n for i := 0; i \u003c numClones; i++ {\n name := string(rune('A' + i))\n expectedTitles[fmt.Sprintf(\"Issue from clone %s\", name)] = true\n }\n \n for name, dir := range cloneDirs {\n titles := getTitles(t, dir)\n if !compareTitleSets(titles, expectedTitles) {\n t.Errorf(\"Clone %s missing issues: expected %v, got %v\", \n name, expectedTitles, titles)\n }\n }\n \n t.Log(\"✓ All\", numClones, \"clones converged successfully\")\n}\n```\n\n### 3. Add performance benchmarks\nTest convergence time and memory usage:\n\n```go\nfunc BenchmarkNWayCollision(b *testing.B) {\n for _, n := range []int{3, 5, 10, 20} {\n b.Run(fmt.Sprintf(\"N=%d\", n), func(b *testing.B) {\n for i := 0; i \u003c b.N; i++ {\n // Run N-way collision and measure time\n testNCloneCollisionBench(b, n)\n }\n })\n }\n}\n```\n\n### 4. Add convergence time tests\nVerify bounded convergence:\n\n```go\nfunc TestConvergenceTime(t *testing.T) {\n // Test that convergence happens within expected rounds\n // For N clones, should converge in at most N-1 sync rounds\n \n for n := 3; n \u003c= 10; n++ {\n t.Run(fmt.Sprintf(\"N=%d\", n), func(t *testing.T) {\n rounds := measureConvergenceRounds(t, n)\n maxExpected := n - 1\n if rounds \u003e maxExpected {\n t.Errorf(\"Convergence took %d rounds, expected ≤ %d\", \n rounds, maxExpected)\n }\n })\n }\n}\n```\n\n### 5. Add edge case tests\nTest boundary conditions:\n- All N clones have identical content (dedup works)\n- N-1 clones have same content, 1 differs\n- All N clones have unique content\n- Mix of collisions and non-collisions\n\n## Acceptance Criteria\n- TestFiveCloneCollision passes with all sync orders\n- All 5 clones converge to identical content\n- Performance is acceptable (\u003c 5 seconds for 5 clones)\n- Convergence time is bounded (≤ N-1 rounds)\n- Edge cases handled correctly\n- Benchmarks show scalability to 10+ clones\n\n## Files to Create/Modify\n- beads_twoclone_test.go or beads_nway_test.go\n- Add helper functions for N-clone setup\n\n## Testing Strategy\n\n### Test Matrix\n| N Clones | Sync Orders | Expected Result |\n|----------|-------------|-----------------|\n| 3 | A→B→C | Pass |\n| 3 | C→B→A | Pass |\n| 5 | A→B→C→D→E | Pass |\n| 5 | E→D→C→B→A | Pass |\n| 5 | Random | Pass |\n| 10 | Sequential | Pass |\n\n### Performance Targets\n- 3 clones: \u003c 2 seconds\n- 5 clones: \u003c 5 seconds\n- 10 clones: \u003c 15 seconds\n\n## Dependencies\n- Requires bd-cbed9619.5, bd-cbed9619.4, bd-cbed9619.3, bd-cbed9619.2 to be completed\n- TestThreeCloneCollision must pass first\n\n## Success Metrics\n- All tests pass for N ∈ {3, 5, 10}\n- Convergence time scales linearly (O(N))\n- Memory usage reasonable (\u003c 100MB for 10 clones)\n- No data corruption or loss in any scenario","status":"closed","priority":2,"issue_type":"task","created_at":"2025-10-29T23:05:13.974702-07:00","updated_at":"2025-10-31T12:00:43.197709-07:00","closed_at":"2025-10-31T12:00:43.197709-07:00"} {"id":"bd-248bdc3e","content_hash":"8eaeb2dbef1ed6b25fc1bcf3bc5cd1b38a5cf5a487772558ba9fe12a149978f3","title":"Add optional post-merge git hook example for bd sync","description":"Create example git hook that auto-runs bd sync after git pull/merge.\n\nAdd to examples/git-hooks/:\n- post-merge hook that checks if .beads/issues.jsonl changed\n- If changed: run `bd sync` automatically\n- Make it optional/documented (not auto-installed)\n\nBenefits:\n- Zero-friction sync after git pull\n- Complements auto-detection as belt-and-suspenders\n\nNote: post-merge hook already exists for pre-commit/post-merge. Extend it to support sync.","status":"open","priority":2,"issue_type":"task","created_at":"2025-10-25T22:47:14.668842-07:00","updated_at":"2025-10-30T17:12:58.218887-07:00"} +{"id":"bd-2530","content_hash":"7056a386ee4802bce2b83a981aaac7858b5911938263d212f8f9d1f60bf2a706","title":"Issue with labels","description":"This is a description","design":"Use MVC pattern","acceptance_criteria":"All tests pass","status":"open","priority":0,"issue_type":"feature","created_at":"2025-10-31T21:40:34.630173-07:00","updated_at":"2025-10-31T21:40:34.630173-07:00","labels":["bug","critical"]} {"id":"bd-2752a7a2","content_hash":"6b2a1aedbdbcb30b98d4a8196801953a1eb22204d63e31954ef9ab6020a7a26b","title":"Create cmd/bd/daemon_watcher.go (~150 LOC)","description":"Implement FileWatcher using fsnotify to watch JSONL file and git refs. Handle platform differences (inotify/FSEvents/ReadDirectoryChangesW). Include edge case handling for file rename, event storm, watcher failure.","status":"closed","priority":1,"issue_type":"task","created_at":"2025-10-29T23:05:13.887269-07:00","updated_at":"2025-10-31T18:30:24.131535-07:00","closed_at":"2025-10-31T18:30:24.131535-07:00"} {"id":"bd-27ea","content_hash":"c516a8bb5500e2857ec69afcef2e877853128c2c5eb83fdb5693aee683e7c465","title":"Improve cmd/bd test coverage from 21% to 40% (multi-session effort)","description":"Current coverage: 21.0% of statements in cmd/bd\nTarget: 40%\nThis is a multi-session incremental effort.\n\nFocus areas:\n- Command handler tests (create, update, close, list, etc.)\n- Flag validation and error cases\n- JSON output formatting\n- Edge cases and error handling\n\nTrack progress with 'go test -cover ./cmd/bd'","status":"open","priority":0,"issue_type":"task","created_at":"2025-10-31T19:35:57.558346-07:00","updated_at":"2025-10-31T20:36:49.215975-07:00"} {"id":"bd-29c128e8","content_hash":"b93b210ddad4f38c993d184e2f7c897eb00cb2f9c8183224e27ff54e129bb1f7","title":"Update AGENTS.md with event-driven mode","description":"Document BEADS_DAEMON_MODE env var. Explain opt-in during Phase 1. Add troubleshooting for watcher failures.","status":"closed","priority":2,"issue_type":"task","created_at":"2025-10-28T16:20:02.433145-07:00","updated_at":"2025-10-30T17:12:58.223058-07:00","closed_at":"2025-10-29T15:53:24.019613-07:00"} @@ -49,6 +50,7 @@ {"id":"bd-5aad5a9c","content_hash":"03aa8900fd0e6f4bd911364bb69d4446f239c6a5e9688f0fc4928f3de1259242","title":"Add TestNWayCollision for 5+ clones","description":"## Overview\nAdd comprehensive tests for N-way (5+) collision resolution to verify the solution scales beyond 3 clones.\n\n## Purpose\nWhile TestThreeCloneCollision validates the basic N-way case, we need to verify:\n1. Solution scales to arbitrary N\n2. Performance is acceptable with more clones\n3. Convergence time is bounded\n4. No edge cases in larger collision groups\n\n## Implementation Tasks\n\n### 1. Create TestFiveCloneCollision\nFile: beads_twoclone_test.go (or new beads_nway_test.go)\n\n```go\nfunc TestFiveCloneCollision(t *testing.T) {\n // Test with 5 clones creating same ID with different content\n // Verify all 5 clones converge after sync rounds\n \n t.Run(\"SequentialSync\", func(t *testing.T) {\n testNCloneCollision(t, 5, \"A\", \"B\", \"C\", \"D\", \"E\")\n })\n \n t.Run(\"ReverseSync\", func(t *testing.T) {\n testNCloneCollision(t, 5, \"E\", \"D\", \"C\", \"B\", \"A\")\n })\n \n t.Run(\"RandomSync\", func(t *testing.T) {\n testNCloneCollision(t, 5, \"C\", \"A\", \"E\", \"B\", \"D\")\n })\n}\n```\n\n### 2. Implement generalized testNCloneCollision\nGeneralize the 3-clone test to handle arbitrary N:\n\n```go\nfunc testNCloneCollision(t *testing.T, numClones int, syncOrder ...string) {\n t.Helper()\n \n if len(syncOrder) != numClones {\n t.Fatalf(\"syncOrder length (%d) must match numClones (%d)\", \n len(syncOrder), numClones)\n }\n \n tmpDir := t.TempDir()\n \n // Setup remote and N clones\n remoteDir := setupBareRepo(t, tmpDir)\n cloneDirs := make(map[string]string)\n \n for i := 0; i \u003c numClones; i++ {\n name := string(rune('A' + i))\n cloneDirs[name] = setupClone(t, tmpDir, remoteDir, name)\n }\n \n // Each clone creates issue with same ID but different content\n for name, dir := range cloneDirs {\n createIssue(t, dir, fmt.Sprintf(\"Issue from clone %s\", name))\n }\n \n // Sync in specified order\n for _, name := range syncOrder {\n syncClone(t, cloneDirs[name], name)\n }\n \n // Final pull for convergence\n for name, dir := range cloneDirs {\n finalPull(t, dir, name)\n }\n \n // Verify all clones have all N issues\n expectedTitles := make(map[string]bool)\n for i := 0; i \u003c numClones; i++ {\n name := string(rune('A' + i))\n expectedTitles[fmt.Sprintf(\"Issue from clone %s\", name)] = true\n }\n \n for name, dir := range cloneDirs {\n titles := getTitles(t, dir)\n if !compareTitleSets(titles, expectedTitles) {\n t.Errorf(\"Clone %s missing issues: expected %v, got %v\", \n name, expectedTitles, titles)\n }\n }\n \n t.Log(\"✓ All\", numClones, \"clones converged successfully\")\n}\n```\n\n### 3. Add performance benchmarks\nTest convergence time and memory usage:\n\n```go\nfunc BenchmarkNWayCollision(b *testing.B) {\n for _, n := range []int{3, 5, 10, 20} {\n b.Run(fmt.Sprintf(\"N=%d\", n), func(b *testing.B) {\n for i := 0; i \u003c b.N; i++ {\n // Run N-way collision and measure time\n testNCloneCollisionBench(b, n)\n }\n })\n }\n}\n```\n\n### 4. Add convergence time tests\nVerify bounded convergence:\n\n```go\nfunc TestConvergenceTime(t *testing.T) {\n // Test that convergence happens within expected rounds\n // For N clones, should converge in at most N-1 sync rounds\n \n for n := 3; n \u003c= 10; n++ {\n t.Run(fmt.Sprintf(\"N=%d\", n), func(t *testing.T) {\n rounds := measureConvergenceRounds(t, n)\n maxExpected := n - 1\n if rounds \u003e maxExpected {\n t.Errorf(\"Convergence took %d rounds, expected ≤ %d\", \n rounds, maxExpected)\n }\n })\n }\n}\n```\n\n### 5. Add edge case tests\nTest boundary conditions:\n- All N clones have identical content (dedup works)\n- N-1 clones have same content, 1 differs\n- All N clones have unique content\n- Mix of collisions and non-collisions\n\n## Acceptance Criteria\n- TestFiveCloneCollision passes with all sync orders\n- All 5 clones converge to identical content\n- Performance is acceptable (\u003c 5 seconds for 5 clones)\n- Convergence time is bounded (≤ N-1 rounds)\n- Edge cases handled correctly\n- Benchmarks show scalability to 10+ clones\n\n## Files to Create/Modify\n- beads_twoclone_test.go or beads_nway_test.go\n- Add helper functions for N-clone setup\n\n## Testing Strategy\n\n### Test Matrix\n| N Clones | Sync Orders | Expected Result |\n|----------|-------------|-----------------|\n| 3 | A→B→C | Pass |\n| 3 | C→B→A | Pass |\n| 5 | A→B→C→D→E | Pass |\n| 5 | E→D→C→B→A | Pass |\n| 5 | Random | Pass |\n| 10 | Sequential | Pass |\n\n### Performance Targets\n- 3 clones: \u003c 2 seconds\n- 5 clones: \u003c 5 seconds\n- 10 clones: \u003c 15 seconds\n\n## Dependencies\n- Requires bd-cbed9619.5, bd-cbed9619.4, bd-cbed9619.3, bd-dcd6f14b to be completed\n- TestThreeCloneCollision must pass first\n\n## Success Metrics\n- All tests pass for N ∈ {3, 5, 10}\n- Convergence time scales linearly (O(N))\n- Memory usage reasonable (\u003c 100MB for 10 clones)\n- No data corruption or loss in any scenario","status":"closed","priority":2,"issue_type":"task","created_at":"2025-10-29T19:52:05.462747-07:00","updated_at":"2025-10-31T12:00:43.198413-07:00","closed_at":"2025-10-31T12:00:43.198413-07:00"} {"id":"bd-5b40a0bf","content_hash":"fd8a94f744b6504f677cc8e196c5a8d8b5d13b230200add5a8d9b7a54a537eb9","title":"Batch test 5","description":"","status":"closed","priority":2,"issue_type":"task","created_at":"2025-10-29T15:29:02.136118-07:00","updated_at":"2025-10-31T12:00:43.181513-07:00","closed_at":"2025-10-31T12:00:43.181513-07:00"} {"id":"bd-5dae5504","content_hash":"4f2b5a203d6a7b5e38176dd6ef68afb4b7d0e11889718381e28bf006f4e83a16","title":"Export deduplication breaks when JSONL and export_hashes table diverge","description":"## Problem\n\nThe export deduplication feature (timestamp-only skipping) breaks when the JSONL file and export_hashes table get out of sync, causing exports to skip issues that aren't actually in the file.\n\n## Symptoms\n\n- `bd export` reports \"Skipped 128 issue(s) with timestamp-only changes\"\n- JSONL file only has 38 lines but DB has 149 issues\n- export_hashes table has 149 entries\n- Auto-import doesn't trigger (hash matches despite missing data)\n- Two repos on same commit show different issue counts\n\n## Root Cause\n\nshouldSkipExport() in autoflush.go compares current issue hash with stored export_hashes entry. If they match, it skips export assuming the issue is already in the JSONL.\n\nThis assumption fails when:\n1. Git operations (pull, reset, checkout) change JSONL without clearing export_hashes\n2. Manual JSONL edits or corruption\n3. Import operations that modify DB but don't update export_hashes\n4. Partial exports that update export_hashes but don't complete\n\n## Impact\n\n- **Critical data loss risk**: Issues appear to be tracked but aren't persisted to git\n- Breaks multi-repo sync (root cause of today's debugging session)\n- Auto-import fails to detect staleness (hash matches despite missing data)\n- Silent data corruption (no error messages, just missing issues)\n\n## Reproduction\n\n1. Have DB with 149 issues, all in export_hashes table\n2. Truncate JSONL to 38 lines (simulate git reset or corruption)\n3. Run `bd export` - it skips 128 issues\n4. JSONL still has only 38 lines but export thinks it succeeded\n\n## Current Workaround\n\n```bash\nsqlite3 .beads/beads.db \"DELETE FROM export_hashes\"\nbd export -o .beads/beads.jsonl\n```\n\n## Proposed Solutions\n\n**Option 1: Verify JSONL integrity before skipping**\n- Count lines in JSONL, compare with export_hashes count\n- If mismatch, clear export_hashes and force full export\n- Safe but adds I/O overhead\n\n**Option 2: Hash-based JSONL validation**\n- Store hash of entire JSONL file in metadata\n- Before export, check if JSONL hash matches\n- If mismatch, clear export_hashes\n- More efficient, detects any JSONL corruption\n\n**Option 3: Disable timestamp-only deduplication**\n- Remove the feature entirely\n- Always export all issues\n- Simplest and safest, but creates larger git commits\n\n**Option 4: Clear export_hashes on git operations**\n- Add post-merge hook to clear export_hashes\n- Clear on any import operation\n- Defensive approach but may over-clear\n\n## Recommended Fix\n\nCombination of Options 2 + 4:\n1. Store JSONL file hash in metadata after export\n2. Check hash before export, clear export_hashes if mismatch \n3. Clear export_hashes on import operations\n4. Add `bd validate` check for JSONL/export_hashes sync\n\n## Files Involved\n\n- cmd/bd/autoflush.go (shouldSkipExport)\n- cmd/bd/export.go (export with deduplication)\n- internal/storage/sqlite/metadata.go (export_hashes table)","notes":"## Recovery Session (2025-10-29 21:30)\n\n### What Happened\n- Created 14 new hash ID issues (bd-f8b764c9 through bd-f8b764c9.1) \n- bd sync appeared to succeed\n- Canonical repo (~/src/beads): 162 issues in DB + JSONL ✓\n- Secondary repo (fred/beads): Only 145 issues vs 162 in canonical ✗\n- Both repos on same git commit but different issue counts!\n\n### Bug Manifestation During Recovery\n\n1. **Initial state**: fred/beads had 145 issues, 145 lines in JSONL, 145 export_hashes entries\n\n2. **After git reset --hard origin/main**: \n - JSONL: 162 lines (from git)\n - DB: 150 issues (auto-import partially worked)\n - Auto-import failed with UNIQUE constraint error\n\n3. **After manual import --resolve-collisions**:\n - DB: 160 issues\n - JSONL: Still 162 lines\n - export_hashes: 159 entries\n\n4. **After bd export**: \n - **JSONL reduced to 17 lines!** ← The bug in action\n - export_hashes: 159 entries (skipped exporting 142 issues)\n - Silent data loss - no error message\n\n5. **After clearing export_hashes and re-export**:\n - JSONL: 159 lines (missing 3 issues still)\n - DB: 159 issues\n - Still diverged from canonical\n\n### The Bug Loop\nOnce export_hashes and JSONL diverge:\n- Export skips issues already in export_hashes\n- But those issues aren't actually in JSONL\n- This creates corrupt JSONL with missing issues\n- Auto-import can't detect the problem (file hash matches what was exported)\n- Data is lost with no error messages\n\n### Recovery Solution\nCouldn't break the loop with export alone. Had to:\n1. Copy .beads/beads.db from canonical repo\n2. Clear export_hashes\n3. Full re-export\n4. Finally converged to 162 issues\n\n### Key Learnings\n\n1. **The bug is worse than we thought**: It can create corrupt exports (17 lines instead of 162!)\n\n2. **Auto-import can't save you**: Once export is corrupt, auto-import just imports the corrupt data\n\n3. **Silent failure**: No warnings, no errors, just missing issues\n\n4. **Git operations trigger it**: git reset, git pull, etc. change JSONL without clearing export_hashes\n\n5. **Import operations populate export_hashes**: Even manual imports update export_hashes, setting up future export failures\n\n### Immediate Action Required\n\n**DISABLE EXPORT DEDUPLICATION NOW**\n\nThis feature is fundamentally broken and causes data loss. Should be disabled until properly fixed.\n\nQuick fix options:\n- Set environment variable to disable feature\n- Comment out shouldSkipExport check\n- Always clear export_hashes before export\n- Add validation that DB count == JSONL line count before allowing export\n\n### Long-term Fix\n\nNeed Option 2 + 4 from proposed solutions:\n1. Store JSONL file hash after every successful export\n2. Before export, verify JSONL hash matches expected\n3. If mismatch, log WARNING and clear export_hashes\n4. Clear export_hashes on every import operation\n5. Add git post-merge hook to clear export_hashes\n6. Add `bd validate` command to detect divergence\n","status":"closed","priority":0,"issue_type":"bug","created_at":"2025-10-29T23:05:13.959435-07:00","updated_at":"2025-10-30T17:12:58.207148-07:00","closed_at":"2025-10-29T21:57:03.06641-07:00"} +{"id":"bd-5e1f","content_hash":"3e3467773e73eb9dbb8dd8f213be7157c27d72b53c6cc9776616154db96c3864","title":"Issue with desc","description":"This is a description","status":"open","priority":2,"issue_type":"bug","created_at":"2025-10-31T21:41:11.128718-07:00","updated_at":"2025-10-31T21:41:11.128718-07:00"} {"id":"bd-5f483051","content_hash":"d69f64f7f0bdc46a539dfe0b699a8977309c9c8d59f3e9beffbbe4484275a16b","title":"Implement bd resolve-conflicts (git merge conflicts in JSONL)","description":"Automatically detect and resolve git merge conflicts in .beads/issues.jsonl file.\n\nFeatures:\n- Detect conflict markers in JSONL\n- Parse conflicting issues from HEAD and BASE\n- Provide mechanical resolution (remap duplicate IDs)\n- Support AI-assisted resolution (requires internal/ai package)\n\nSee repair_commands.md lines 125-353 for design.","status":"open","priority":1,"issue_type":"task","created_at":"2025-10-28T19:37:55.722827-07:00","updated_at":"2025-10-30T17:12:58.179718-07:00"} {"id":"bd-6214875c","content_hash":"d4d20e71bbf5c08f1fe1ed07f67b7554167aa165d4972ea51b5cacc1b256c4c1","title":"Split internal/rpc/server.go into focused modules","description":"The file `internal/rpc/server.go` is 2,273 lines with 50+ methods, making it difficult to navigate and prone to merge conflicts. Split into 8 focused files with clear responsibilities.\n\nCurrent structure: Single 2,273-line file with:\n- Connection handling\n- Request routing\n- All 40+ RPC method implementations\n- Storage caching\n- Health checks \u0026 metrics\n- Cleanup loops\n\nTarget structure:\n```\ninternal/rpc/\n├── server.go # Core server, connection handling (~300 lines)\n├── methods_issue.go # Issue operations (~400 lines)\n├── methods_deps.go # Dependency operations (~200 lines)\n├── methods_labels.go # Label operations (~150 lines)\n├── methods_ready.go # Ready work queries (~150 lines)\n├── methods_compact.go # Compaction operations (~200 lines)\n├── methods_comments.go # Comment operations (~150 lines)\n├── storage_cache.go # Storage caching logic (~300 lines)\n└── health.go # Health \u0026 metrics (~200 lines)\n```\n\nMigration strategy:\n1. Create new files with appropriate methods\n2. Keep `server.go` as main file with core server logic\n3. Test incrementally after each file split\n4. Final verification with full test suite","acceptance_criteria":"- All 50 methods split into appropriate files\n- Each file \u003c500 LOC\n- All methods remain on `*Server` receiver (no behavior change)\n- All tests pass: `go test ./internal/rpc/...`\n- Verify daemon works: start daemon, run operations, check health\n- Update internal documentation if needed\n- No change to public API","status":"closed","priority":1,"issue_type":"task","created_at":"2025-10-28T14:21:37.51524-07:00","updated_at":"2025-10-30T17:12:58.2179-07:00","closed_at":"2025-10-28T14:11:04.399811-07:00"} {"id":"bd-6221bdcd","content_hash":"3bf15bc9e418180e1e91691261817c872330e182dbc1bcb756522faa42416667","title":"Improve cmd/bd test coverage (currently 20.2%)","description":"CLI commands need better test coverage. Focus on:\n- Command argument parsing\n- Error handling paths\n- Edge cases in create, update, close commands\n- Daemon commands\n- Import/export workflows","status":"open","priority":2,"issue_type":"task","created_at":"2025-10-29T14:06:27.951656-07:00","updated_at":"2025-10-30T17:12:58.185819-07:00","dependencies":[{"issue_id":"bd-6221bdcd","depends_on_id":"bd-4d7fca8a","type":"blocks","created_at":"2025-10-29T19:52:05.532391-07:00","created_by":"import-remap"}]} @@ -67,6 +69,7 @@ {"id":"bd-710a4916","content_hash":"3d8be03f83f87067b1aaf295b0b829d20890e47686e0da10ef81d2096f5ca974","title":"CRDT-based architecture for guaranteed convergence (v2.0)","description":"## Vision\nRedesign beads around Conflict-Free Replicated Data Types (CRDTs) to provide mathematical guarantees for N-way collision resolution at arbitrary scale.\n\n## Current Limitations\n- Content-hash based collision resolution fails at 5+ clones\n- Non-deterministic convergence in multi-round scenarios\n- UNIQUE constraint violations during rename operations\n- No formal proof of convergence properties\n\n## CRDT Benefits\n- Provably convergent (Strong Eventual Consistency)\n- Commutative/Associative/Idempotent operations\n- No coordination required between clones\n- Scales to 100+ concurrent workers\n- Well-understood mathematical foundations\n\n## Proposed Architecture\n\n### 1. UUID-Based IDs\nReplace sequential IDs with UUIDs:\n- Current: bd-1c63eb84, bd-9063acda, bd-4d80b7b1\n- CRDT: bd-a1b2c3d4-e5f6-7890-abcd-ef1234567890\n- Human aliases maintained separately: #42 maps to UUID\n\n### 2. Last-Write-Wins (LWW) Elements\nEach field becomes an LWW register:\n- title: (timestamp, clone_id, value)\n- status: (timestamp, clone_id, value)\n- Deterministic conflict resolution via Lamport timestamp + clone_id tiebreaker\n\n### 3. Operation Log\nTrack all operations as CRDT ops:\n- CREATE(uuid, timestamp, clone_id, fields)\n- UPDATE(uuid, field, timestamp, clone_id, value)\n- DELETE(uuid, timestamp, clone_id) - tombstone, not hard delete\n\n### 4. Sync as Merge\nSyncing becomes merging two CRDT states:\n- No merge conflicts possible\n- Deterministic merge function\n- Guaranteed convergence\n\n## Implementation Phases\n\n### Phase 1: Research \u0026 Design (4 weeks)\n- Study existing CRDT implementations (Automerge, Yjs, Loro)\n- Design schema for CRDT-based issue tracking\n- Prototype LWW-based Issue CRDT\n- Benchmark performance vs current system\n\n### Phase 2: Parallel Implementation (6 weeks)\n- Implement CRDT storage layer alongside SQLite\n- Build conversion tools: SQLite ↔ CRDT\n- Maintain backward compatibility with v1.x format\n- Migration path for existing databases\n\n### Phase 3: Testing \u0026 Validation (4 weeks)\n- Formal verification of convergence properties\n- Stress testing with 100+ clone scenario\n- Performance profiling and optimization\n- Documentation and examples\n\n### Phase 4: Migration \u0026 Rollout (4 weeks)\n- Release v2.0-beta with CRDT backend\n- Gradual migration from v1.x\n- Monitoring and bug fixes\n- Final v2.0 release\n\n## Risks \u0026 Mitigations\n\n**Risk 1: Performance overhead**\n- Mitigation: Benchmark early, optimize hot paths\n- CRDTs can be slower than append-only logs\n- May need compaction strategy\n\n**Risk 2: Storage bloat**\n- Mitigation: Implement operation log compaction\n- Tombstone garbage collection for deleted issues\n- Periodic snapshots to reduce log size\n\n**Risk 3: Breaking changes**\n- Mitigation: Maintain v1.x compatibility layer\n- Gradual migration tools\n- Dual-mode operation during transition\n\n**Risk 4: Complexity**\n- Mitigation: Use battle-tested CRDT libraries\n- Comprehensive documentation\n- Clear migration guide\n\n## Success Criteria\n- 100-clone collision test passes without failures\n- Formal proof of convergence properties\n- Performance within 2x of current system\n- Zero manual conflict resolution required\n- Backward compatible with v1.x databases\n\n## Timeline\n18-20 weeks total (4-5 months)\n\n## References\n- Automerge: https://automerge.org\n- Yjs: https://docs.yjs.dev\n- Loro: https://loro.dev\n- CRDT theory: Shapiro et al, A comprehensive study of CRDTs\n- Related issues: bd-e6d71828, bd-7a2b58fc,-1","status":"open","priority":3,"issue_type":"feature","created_at":"2025-10-29T10:23:57.978339-07:00","updated_at":"2025-10-30T17:12:58.182513-07:00"} {"id":"bd-71107098","content_hash":"9feb9a8dc8ae2dc65b11edeff37cf5ce48d8f28e1ced45d64ac0176937610296","title":"Make two-clone workflow actually work (no hacks)","description":"TestTwoCloneCollision proves beads CANNOT handle two independent clones filing issues simultaneously. This is the basic collaborative workflow and it must work cleanly.\n\nTest location: beads_twoclone_test.go\n\nThe test creates two git clones, both file issues with same ID (test-1), --resolve-collisions remaps clone B's to test-2, but after sync:\n- Clone A has test-1=\"Issue from clone A\", test-2=\"Issue from clone B\" \n- Clone B has test-1=\"Issue from clone B\", test-2=\"Issue from clone A\"\n\nThe TITLES are swapped! Both clones have 2 issues but with opposite title assignments.\n\nWe've tried many fixes (per-project daemons, auto-sync, lamport hashing, precommit hooks) but nothing has made the test pass.\n\nGoal: Make the test pass WITHOUT hacks. The two clones should converge to identical state after sync.","acceptance_criteria":"1. TestTwoCloneCollision passes without EXPECTED FAILURE\n2. Both clones converge to identical issue database\n3. No manual conflict resolution required\n4. Git status clean in both clones\n5. bd ready output identical in both clones","notes":"**Major progress achieved!** The two-clone workflow now converges correctly.\n\n**What was fixed:**\n--3d844c58: Implemented content-hash based rename detection\n- bd-64c05d00.1: Fixed test to compare content not timestamps\n- Both clones now converge to identical issue databases\n- test-1 and test-2 have correct titles in both clones\n- No more title swapping!\n\n**Current status (VERIFIED):**\n✅ Acceptance criteria 1: TestTwoCloneCollision passes (confirmed Oct 28)\n✅ Acceptance criteria 2: Both clones converge to identical issue database (content matches)\n✅ Acceptance criteria 3: No manual conflict resolution required (automatic)\n✅ Acceptance criteria 4: Git status clean\n✅ Acceptance criteria 5: bd ready output identical (timestamps are expected difference)\n\n**ALL ACCEPTANCE CRITERIA MET!** This issue is complete and can be closed.","status":"closed","priority":0,"issue_type":"epic","created_at":"2025-10-28T16:34:53.278793-07:00","updated_at":"2025-10-31T19:38:09.206303-07:00","closed_at":"2025-10-28T19:20:04.143242-07:00"} {"id":"bd-763c","content_hash":"6943cea47a7851c7d1eb316f4669fb8ae2b843a68a89b441fdca5e0be0193494","title":"~/src/beads daemon has 'sql: database is closed' errors - zombie daemon","description":"","status":"open","priority":0,"issue_type":"bug","created_at":"2025-10-31T21:08:03.388007-07:00","updated_at":"2025-10-31T21:08:03.388007-07:00","dependencies":[{"issue_id":"bd-763c","depends_on_id":"bd-2752a7a2","type":"discovered-from","created_at":"2025-10-31T21:08:03.388716-07:00","created_by":"stevey"}]} +{"id":"bd-7857","content_hash":"7056a386ee4802bce2b83a981aaac7858b5911938263d212f8f9d1f60bf2a706","title":"Issue with labels","description":"This is a description","design":"Use MVC pattern","acceptance_criteria":"All tests pass","status":"open","priority":0,"issue_type":"feature","created_at":"2025-10-31T21:41:11.16555-07:00","updated_at":"2025-10-31T21:41:11.16555-07:00","labels":["bug","critical"]} {"id":"bd-7a00c94e","content_hash":"b31566a4b2a84db7d24364492e8ac6ebfa1f5fc27fe270fbd58b27e17218c9c4","title":"Rapid 2","description":"","status":"open","priority":3,"issue_type":"task","created_at":"2025-10-29T19:11:57.430725-07:00","updated_at":"2025-10-30T17:12:58.189251-07:00"} {"id":"bd-7a2b58fc","content_hash":"e887227ed9b3f477282569800eb4683b68bf1a5404f007e00ec44b2e570325b5","title":"Implement clone-scoped ID allocation to prevent N-way collisions","description":"## Problem\nCurrent ID allocation uses per-clone atomic counters (issue_counters table) that sync based on local database state. In N-way collision scenarios:\n- Clone B sees {test-1} locally, allocates test-2\n- Clone D sees {test-1, test-2, test-3} locally, allocates test-4\n- When same content gets assigned test-2 and test-4, convergence fails\n\nRoot cause: Each clone independently allocates IDs without global coordination, leading to overlapping assignments for the same content.\n\n## Solution\nAdd clone UUID to ID allocation to make every ID globally unique:\n\n**Current format:** `test-1`, `test-2`, `test-3`\n**New format:** `test-1-a7b3`, `test-2-a7b3`, `test-3-c4d9`\n\nWhere suffix is first 4 chars of clone UUID.\n\n## Implementation\n\n### 1. Add clone_identity table\n```sql\nCREATE TABLE clone_identity (\n clone_uuid TEXT PRIMARY KEY,\n created_at DATETIME DEFAULT CURRENT_TIMESTAMP\n);\n```\n\n### 2. Modify getNextIDForPrefix()\n```go\nfunc (s *SQLiteStorage) getNextIDForPrefix(ctx context.Context, prefix string) (string, error) {\n cloneUUID := s.getOrCreateCloneUUID(ctx)\n shortUUID := cloneUUID[:4]\n \n nextNum := s.getNextCounterForPrefix(ctx, prefix)\n return fmt.Sprintf(\"%s-%d-%s\", prefix, nextNum, shortUUID), nil\n}\n```\n\n### 3. Update ID parsing logic\nAll places that parse IDs (utils.ExtractIssueNumber, etc.) need to handle new format.\n\n### 4. Migration strategy\n- Existing IDs remain unchanged (no suffix)\n- New IDs get clone suffix automatically\n- Display layer can hide suffix in UI: `bd-cb64c226.3-a7b3` → `#42`\n\n## Benefits\n- **Zero collision risk**: Same content in different clones gets different IDs\n- **Maintains readability**: Still sequential numbering within clone\n- **No coordination needed**: Works offline, no central authority\n- **Scales to 100+ clones**: 4-char hex = 65,536 unique clones\n\n## Concerns\n- ID format change may break existing integrations\n- Need migration path for existing databases\n- Display logic needs update to hide/show suffixes appropriately\n\n## Success Criteria\n- 10+ clone collision test passes without failures\n- Existing issues continue to work (backward compatibility)\n- Documentation updated with new ID format\n- Migration guide for v1.x → v2.x\n\n## Timeline\nMedium-term (v1.1-v1.2), 2-3 weeks implementation\n\n## References\n- Related to bd-0dcea000 (immediate fix)\n- See beads_nway_test.go for failing N-way tests","status":"open","priority":2,"issue_type":"feature","created_at":"2025-10-29T20:02:47.952447-07:00","updated_at":"2025-10-31T17:53:09.075064-07:00"} {"id":"bd-7bbc4e6a","content_hash":"3251d757d9ad69cd4b3517862ec1b9b1cc13388ea4c93a2f3b2b54920112854f","title":"Add MCP server functions for repair commands","description":"Expose new repair commands via MCP server for agent access:\n\nFunctions to add:\n- beads_repair_deps()\n- beads_detect_pollution()\n- beads_validate()\n- beads_resolve_conflicts() (when implemented)\n\nUpdate integrations/beads-mcp/src/beads_mcp/server.py\n\nSee repair_commands.md lines 803-884 for design.","status":"open","priority":2,"issue_type":"task","created_at":"2025-10-28T19:37:55.72639-07:00","updated_at":"2025-10-30T17:12:58.179948-07:00"} @@ -91,6 +94,7 @@ {"id":"bd-98c4e1fa.1","content_hash":"6440d1ece0a91c8f49adc09aafa7a998b049bcd51f257125ad8bc0b7b03e317b","title":"Update AGENTS.md with event-driven mode","description":"Document BEADS_DAEMON_MODE env var. Explain opt-in during Phase 1. Add troubleshooting for watcher failures.","status":"open","priority":2,"issue_type":"task","created_at":"2025-10-29T23:05:13.986452-07:00","updated_at":"2025-10-31T20:36:49.381832-07:00","dependencies":[{"issue_id":"bd-98c4e1fa.1","depends_on_id":"bd-98c4e1fa","type":"parent-child","created_at":"2025-10-29T21:19:36.206187-07:00","created_by":"import-remap"},{"issue_id":"bd-98c4e1fa.1","depends_on_id":"bd-0e1f2b1b","type":"parent-child","created_at":"2025-10-31T19:38:09.131439-07:00","created_by":"stevey"}]} {"id":"bd-9a9530d8","content_hash":"7d6b35b6de9cbd784acb42c9bf67e47e1a73166e0c895afdcc522a9946c1833c","title":"Add TestNWayCollision for 5+ clones","description":"## Overview\nAdd comprehensive tests for N-way (5+) collision resolution to verify the solution scales beyond 3 clones.\n\n## Purpose\nWhile TestThreeCloneCollision validates the basic N-way case, we need to verify:\n1. Solution scales to arbitrary N\n2. Performance is acceptable with more clones\n3. Convergence time is bounded\n4. No edge cases in larger collision groups\n\n## Implementation Tasks\n\n### 1. Create TestFiveCloneCollision\nFile: beads_twoclone_test.go (or new beads_nway_test.go)\n\n```go\nfunc TestFiveCloneCollision(t *testing.T) {\n // Test with 5 clones creating same ID with different content\n // Verify all 5 clones converge after sync rounds\n \n t.Run(\"SequentialSync\", func(t *testing.T) {\n testNCloneCollision(t, 5, \"A\", \"B\", \"C\", \"D\", \"E\")\n })\n \n t.Run(\"ReverseSync\", func(t *testing.T) {\n testNCloneCollision(t, 5, \"E\", \"D\", \"C\", \"B\", \"A\")\n })\n \n t.Run(\"RandomSync\", func(t *testing.T) {\n testNCloneCollision(t, 5, \"C\", \"A\", \"E\", \"B\", \"D\")\n })\n}\n```\n\n### 2. Implement generalized testNCloneCollision\nGeneralize the 3-clone test to handle arbitrary N:\n\n```go\nfunc testNCloneCollision(t *testing.T, numClones int, syncOrder ...string) {\n t.Helper()\n \n if len(syncOrder) != numClones {\n t.Fatalf(\"syncOrder length (%d) must match numClones (%d)\", \n len(syncOrder), numClones)\n }\n \n tmpDir := t.TempDir()\n \n // Setup remote and N clones\n remoteDir := setupBareRepo(t, tmpDir)\n cloneDirs := make(map[string]string)\n \n for i := 0; i \u003c numClones; i++ {\n name := string(rune('A' + i))\n cloneDirs[name] = setupClone(t, tmpDir, remoteDir, name)\n }\n \n // Each clone creates issue with same ID but different content\n for name, dir := range cloneDirs {\n createIssue(t, dir, fmt.Sprintf(\"Issue from clone %s\", name))\n }\n \n // Sync in specified order\n for _, name := range syncOrder {\n syncClone(t, cloneDirs[name], name)\n }\n \n // Final pull for convergence\n for name, dir := range cloneDirs {\n finalPull(t, dir, name)\n }\n \n // Verify all clones have all N issues\n expectedTitles := make(map[string]bool)\n for i := 0; i \u003c numClones; i++ {\n name := string(rune('A' + i))\n expectedTitles[fmt.Sprintf(\"Issue from clone %s\", name)] = true\n }\n \n for name, dir := range cloneDirs {\n titles := getTitles(t, dir)\n if !compareTitleSets(titles, expectedTitles) {\n t.Errorf(\"Clone %s missing issues: expected %v, got %v\", \n name, expectedTitles, titles)\n }\n }\n \n t.Log(\"✓ All\", numClones, \"clones converged successfully\")\n}\n```\n\n### 3. Add performance benchmarks\nTest convergence time and memory usage:\n\n```go\nfunc BenchmarkNWayCollision(b *testing.B) {\n for _, n := range []int{3, 5, 10, 20} {\n b.Run(fmt.Sprintf(\"N=%d\", n), func(b *testing.B) {\n for i := 0; i \u003c b.N; i++ {\n // Run N-way collision and measure time\n testNCloneCollisionBench(b, n)\n }\n })\n }\n}\n```\n\n### 4. Add convergence time tests\nVerify bounded convergence:\n\n```go\nfunc TestConvergenceTime(t *testing.T) {\n // Test that convergence happens within expected rounds\n // For N clones, should converge in at most N-1 sync rounds\n \n for n := 3; n \u003c= 10; n++ {\n t.Run(fmt.Sprintf(\"N=%d\", n), func(t *testing.T) {\n rounds := measureConvergenceRounds(t, n)\n maxExpected := n - 1\n if rounds \u003e maxExpected {\n t.Errorf(\"Convergence took %d rounds, expected ≤ %d\", \n rounds, maxExpected)\n }\n })\n }\n}\n```\n\n### 5. Add edge case tests\nTest boundary conditions:\n- All N clones have identical content (dedup works)\n- N-1 clones have same content, 1 differs\n- All N clones have unique content\n- Mix of collisions and non-collisions\n\n## Acceptance Criteria\n- TestFiveCloneCollision passes with all sync orders\n- All 5 clones converge to identical content\n- Performance is acceptable (\u003c 5 seconds for 5 clones)\n- Convergence time is bounded (≤ N-1 rounds)\n- Edge cases handled correctly\n- Benchmarks show scalability to 10+ clones\n\n## Files to Create/Modify\n- beads_twoclone_test.go or beads_nway_test.go\n- Add helper functions for N-clone setup\n\n## Testing Strategy\n\n### Test Matrix\n| N Clones | Sync Orders | Expected Result |\n|----------|-------------|-----------------|\n| 3 | A→B→C | Pass |\n| 3 | C→B→A | Pass |\n| 5 | A→B→C→D→E | Pass |\n| 5 | E→D→C→B→A | Pass |\n| 5 | Random | Pass |\n| 10 | Sequential | Pass |\n\n### Performance Targets\n- 3 clones: \u003c 2 seconds\n- 5 clones: \u003c 5 seconds\n- 10 clones: \u003c 15 seconds\n\n## Dependencies\n- Requires bd-cbed9619.5, bd-cbed9619.4, bd-cbed9619.3, bd-cbed9619.2 to be completed\n- TestThreeCloneCollision must pass first\n\n## Success Metrics\n- All tests pass for N ∈ {3, 5, 10}\n- Convergence time scales linearly (O(N))\n- Memory usage reasonable (\u003c 100MB for 10 clones)\n- No data corruption or loss in any scenario","status":"closed","priority":2,"issue_type":"task","created_at":"2025-10-29T19:42:29.849243-07:00","updated_at":"2025-10-30T17:12:58.190232-07:00","closed_at":"2025-10-28T22:06:04.398141-07:00"} {"id":"bd-9ae788be","content_hash":"19599f6bcc268e97438593e08eb6343b551ce765f0d91956591aa811cbb90690","title":"Implement clone-scoped ID allocation to prevent N-way collisions","description":"## Problem\nCurrent ID allocation uses per-clone atomic counters (issue_counters table) that sync based on local database state. In N-way collision scenarios:\n- Clone B sees {test-1} locally, allocates test-2\n- Clone D sees {test-1, test-2, test-3} locally, allocates test-4\n- When same content gets assigned test-2 and test-4, convergence fails\n\nRoot cause: Each clone independently allocates IDs without global coordination, leading to overlapping assignments for the same content.\n\n## Solution\nAdd clone UUID to ID allocation to make every ID globally unique:\n\n**Current format:** `test-1`, `test-2`, `test-3`\n**New format:** `test-1-a7b3`, `test-2-a7b3`, `test-3-c4d9`\n\nWhere suffix is first 4 chars of clone UUID.\n\n## Implementation\n\n### 1. Add clone_identity table\n```sql\nCREATE TABLE clone_identity (\n clone_uuid TEXT PRIMARY KEY,\n created_at DATETIME DEFAULT CURRENT_TIMESTAMP\n);\n```\n\n### 2. Modify getNextIDForPrefix()\n```go\nfunc (s *SQLiteStorage) getNextIDForPrefix(ctx context.Context, prefix string) (string, error) {\n cloneUUID := s.getOrCreateCloneUUID(ctx)\n shortUUID := cloneUUID[:4]\n \n nextNum := s.getNextCounterForPrefix(ctx, prefix)\n return fmt.Sprintf(\"%s-%d-%s\", prefix, nextNum, shortUUID), nil\n}\n```\n\n### 3. Update ID parsing logic\nAll places that parse IDs (utils.ExtractIssueNumber, etc.) need to handle new format.\n\n### 4. Migration strategy\n- Existing IDs remain unchanged (no suffix)\n- New IDs get clone suffix automatically\n- Display layer can hide suffix in UI: `bd-cb64c226.3-a7b3` → `#42`\n\n## Benefits\n- **Zero collision risk**: Same content in different clones gets different IDs\n- **Maintains readability**: Still sequential numbering within clone\n- **No coordination needed**: Works offline, no central authority\n- **Scales to 100+ clones**: 4-char hex = 65,536 unique clones\n\n## Concerns\n- ID format change may break existing integrations\n- Need migration path for existing databases\n- Display logic needs update to hide/show suffixes appropriately\n\n## Success Criteria\n- 10+ clone collision test passes without failures\n- Existing issues continue to work (backward compatibility)\n- Documentation updated with new ID format\n- Migration guide for v1.x → v2.x\n\n## Timeline\nMedium-term (v1.1-v1.2), 2-3 weeks implementation\n\n## References\n- Related to bd-e6d71828 (immediate fix)\n- See beads_nway_test.go for failing N-way tests","status":"open","priority":2,"issue_type":"feature","created_at":"2025-10-29T10:22:52.260524-07:00","updated_at":"2025-10-30T17:12:58.18193-07:00"} +{"id":"bd-9e8d","content_hash":"8a2616a707f5ae932357049b0bc922716f4d729724fb8c38b256c91e292f851b","title":"Test Issue","description":"","status":"open","priority":1,"issue_type":"bug","created_at":"2025-10-31T21:41:11.107393-07:00","updated_at":"2025-10-31T21:41:11.107393-07:00"} {"id":"bd-9f1fce5d","content_hash":"14b0d330680e979e504043d2c560bd2eda204698f5622c3bdc6f91816f861d22","title":"Add internal/ai package for LLM integration","description":"Shared AI client for repair commands.\n\nProviders:\n- Anthropic (Claude)\n- OpenAI (GPT)\n- Ollama (local)\n\nEnv vars:\n- BEADS_AI_PROVIDER\n- BEADS_AI_API_KEY\n- BEADS_AI_MODEL\n\nFiles: internal/ai/client.go (new)","status":"open","priority":1,"issue_type":"task","created_at":"2025-10-28T14:48:29.072473-07:00","updated_at":"2025-10-30T17:12:58.219706-07:00"} {"id":"bd-a03d5e36","content_hash":"b4ee73e439a133a77e5df27e3e457674dbc9968fdbee0dc630175726960bb8cf","title":"Improve integration test coverage for stateful features","description":"","design":"## Context\n\nbd-70419816 revealed a critical gap: the export deduplication feature had unit tests but no integration tests simulating real-world git operations. This led to silent data loss in production.\n\n## Root Cause\n- Unit tests only tested functions in isolation\n- No integration tests for git operations (pull, reset, checkout) modifying JSONL\n- No tests validating export_hashes and JSONL stay in sync\n- Missing tests for stateful distributed system interactions (DB + JSONL + git)\n\n## Completed (bd-70419816)\n✓ TestJSONLIntegrityValidation - unit tests for validation logic\n✓ TestImportClearsExportHashes - tests import clears hashes\n✓ TestExportIntegrityAfterJSONLTruncation - simulates git reset (would have caught bd-70419816)\n✓ TestExportIntegrityAfterJSONLDeletion - tests recovery from file deletion\n✓ TestMultipleExportsStayConsistent - tests repeated exports\n\n## Still Needed (High Priority)\n1. Multi-repo sync test - two clones staying in sync after push/pull\n2. Auto-flush integration test - JSONL integrity preserved during auto-flush\n3. Daemon auto-sync integration test - complex state management\n4. Import after corruption test - recovery from partial data loss\n\n## Medium Priority\n- Partial export failure handling (disk full, network interruption)\n- Concurrent export/import race conditions\n- Large dataset performance tests (1000+ issues)\n- Export hash migration tests (version upgrades)\n\n## Testing Principles\n1. Test real-world scenarios: git ops, user errors, system failures, concurrent ops\n2. Integration tests for stateful systems (DB + files + git)\n3. Regression test for every bug fix\n4. Test invariants: JSONL count == DB count, hash consistency, etc.\n\n## Key Lesson\nStateful distributed systems need integration tests, not just unit tests.","acceptance_criteria":"- [ ] Multi-repo sync test implemented\n- [ ] Auto-flush integration test implemented \n- [ ] Daemon auto-sync integration test implemented\n- [ ] Testing guidelines added to CONTRIBUTING.md\n- [ ] CI runs integration tests\n- [ ] All critical workflows have integration test coverage","status":"open","priority":2,"issue_type":"epic","created_at":"2025-10-29T21:53:15.397137-07:00","updated_at":"2025-10-30T17:12:58.206063-07:00"} {"id":"bd-a1691807","content_hash":"23f0119ee9df98f1bf6d648dba06065c156963064ef1c7308dfb02c8bdd5bc58","title":"Integration test: mutation to export latency","description":"Measure time from bd create to JSONL update. Verify \u003c500ms latency. Test with multiple rapid mutations to verify batching.","status":"closed","priority":1,"issue_type":"task","created_at":"2025-10-29T20:49:49.105247-07:00","updated_at":"2025-10-31T12:00:43.198883-07:00","closed_at":"2025-10-31T12:00:43.198883-07:00"} @@ -102,6 +106,7 @@ {"id":"bd-b47c034e","content_hash":"133dfd651d402bb95928091138c77a57b2f3f349587962c744209a534fb800a6","title":"Address gosec security warnings (102 issues)","description":"Security linter warnings: file permissions (0755 should be 0750), G304 file inclusion via variable, G204 subprocess launches. Many are false positives but should be reviewed.","design":"Review each gosec warning. Add exclusions for legitimate cases to .golangci.yml. Fix real security issues (overly permissive file modes).","status":"open","priority":2,"issue_type":"task","created_at":"2025-10-25T13:47:10.719134-07:00","updated_at":"2025-10-30T17:12:58.216521-07:00"} {"id":"bd-b501fcc1","content_hash":"323c3b4f2e53d707ce73e75a357bbb4e320327bea00d0b010c3dd09d1e6555cf","title":"Unit tests for Debouncer","description":"Test debouncer batches multiple triggers into single action. Test timer reset on subsequent triggers. Test cancel during wait. Test thread safety.","status":"closed","priority":1,"issue_type":"task","created_at":"2025-10-29T19:42:29.86146-07:00","updated_at":"2025-10-31T17:54:06.880513-07:00","closed_at":"2025-10-31T17:54:06.880513-07:00"} {"id":"bd-b55e2ac2","content_hash":"44122b61b1dcd06407ecf36f57577ea72c5df6dc8cc2a8c1b173b37d16a10267","title":"Fix autoimport tests for content-hash collision scoring","description":"## Overview\nThree autoimport tests are failing after bd-cbed9619.4 because they expect behavior based on the old reference-counting collision resolution, but the system now uses deterministic content-hash scoring.\n\n## Failing Tests\n1. `TestAutoImportMultipleCollisionsRemapped` - expects local versions preserved\n2. `TestAutoImportAllCollisionsRemapped` - expects local versions preserved \n3. `TestAutoImportCollisionRemapMultipleFields` - expects specific collision resolution behavior\n\n## Root Cause\nThese tests were written when ScoreCollisions used reference counting to determine which version to keep. Now it uses content-hash comparison (introduced in commit 2e87329), which produces different but deterministic results.\n\n## Example\nOld behavior: Issue with more references would be kept\nNew behavior: Issue with lexicographically lower content hash is kept\n\n## Solution\nUpdate each test to:\n1. Verify the new content-hash based behavior is correct\n2. Check that the remapped issue (not necessarily local/remote) has the expected content\n3. Ensure dependencies are preserved on the correct remapped issue\n\n## Acceptance Criteria\n- All three autoimport tests pass\n- Tests verify content-hash determinism (same collision always resolves the same way)\n- Tests check dependency preservation on remapped issues\n- Test documentation explains content-hash scoring expectations\n\n## Files to Modify\n- `cmd/bd/autoimport_collision_test.go`\n\n## Testing\nRun: `go test ./cmd/bd -run \"TestAutoImport.*Collision\" -v`","status":"open","priority":2,"issue_type":"task","created_at":"2025-10-28T19:17:28.358028-07:00","updated_at":"2025-10-30T17:12:58.179059-07:00"} +{"id":"bd-b6b2","content_hash":"c7a641bdb4c98b14816e2d85ec09db6def89aa4918ad59f4c1f8f71c6a42c6d4","title":"Feature with design","description":"This is a description","design":"Use MVC pattern","acceptance_criteria":"All tests pass","status":"open","priority":2,"issue_type":"feature","created_at":"2025-10-31T21:40:34.612465-07:00","updated_at":"2025-10-31T21:40:34.612465-07:00"} {"id":"bd-bc2c6191","content_hash":"533e56b8628e24229a4beb52f8683355f6ca699e34a73650bf092003d73c2957","title":"Audit Current Cache Usage","description":"Understand exactly what code depends on the storage cache","acceptance_criteria":"- Document showing all cache dependencies\n- Confirmation that removing cache won't break MCP\n- List of tests that need updating\n\nFiles to examine:\n- internal/rpc/server_cache_storage.go (cache implementation)\n- internal/rpc/client.go (how req.Cwd is set)\n- internal/rpc/server_*.go (all getStorageForRequest calls)\n- integrations/beads-mcp/ (MCP multi-repo logic)\n\nTasks:\n- Document all callers of getStorageForRequest()\n- Verify req.Cwd is only set by RPC client for database discovery\n- Confirm MCP server doesn't rely on multi-repo cache behavior\n- Check if any tests assume multi-repo routing\n- Review environment variables: BEADS_DAEMON_MAX_CACHE_SIZE, BEADS_DAEMON_CACHE_TTL, BEADS_DAEMON_MEMORY_THRESHOLD_MB","status":"open","priority":1,"issue_type":"task","created_at":"2025-10-27T23:02:43.506373-07:00","updated_at":"2025-10-31T20:36:49.334214-07:00"} {"id":"bd-bdaf24d5","content_hash":"6ccdbf2362d22fbbe854fdc666695a7488353799e1a5c49e6095b34178c9bcb4","title":"Final validation test","description":"","status":"closed","priority":2,"issue_type":"task","created_at":"2025-10-27T18:27:28.310533-07:00","updated_at":"2025-10-31T12:00:43.184995-07:00","closed_at":"2025-10-31T12:00:43.184995-07:00"} {"id":"bd-c825f867","content_hash":"27cecaa2dc6cdabb2ae77fd65fbf8dca8f4c536bdf140a13b25cdd16376c9845","title":"Add docs/architecture/event_driven.md","description":"Copy event_driven_daemon.md into docs/ folder. Add to documentation index.","status":"open","priority":2,"issue_type":"task","created_at":"2025-10-28T16:20:02.431399-07:00","updated_at":"2025-10-30T17:12:58.221939-07:00"} @@ -137,8 +142,11 @@ {"id":"bd-e98221b3","content_hash":"39107dceb86c0f5588342036585cca9cb320d0df2814fe470e688c4172644890","title":"Update AGENTS.md and README.md with \"bd daemons\" documentation","description":"Document the new \"bd daemons\" command and all subcommands in AGENTS.md and README.md. Include examples and troubleshooting guidance.","status":"open","priority":2,"issue_type":"task","created_at":"2025-10-26T19:41:11.099254-07:00","updated_at":"2025-10-30T17:12:58.181671-07:00"} {"id":"bd-eb3c","content_hash":"6922e5dc2f24e0fb84ecdb7bea11284f38c3f9e7fed43b90eeef4d6372a96fd5","title":"UX nightmare: multiple ways daemon can fail with misleading messages","description":"","status":"open","priority":0,"issue_type":"epic","created_at":"2025-10-31T21:08:09.090553-07:00","updated_at":"2025-10-31T21:08:09.090553-07:00"} {"id":"bd-eef03e0a","content_hash":"f075e26fe762aa3fc5484f97441c0cc0b296fa49e9c7b1242bda1c5b6c8ec894","title":"Stress test: event storm handling","description":"Simulate 100+ rapid JSONL writes. Verify debouncer batches to single import. Verify no data loss. Test daemon stability.","status":"closed","priority":1,"issue_type":"task","created_at":"2025-10-29T20:49:49.138725-07:00","updated_at":"2025-10-31T19:18:50.682925-07:00","closed_at":"2025-10-31T19:18:50.682925-07:00"} +{"id":"bd-ef15","content_hash":"8a2616a707f5ae932357049b0bc922716f4d729724fb8c38b256c91e292f851b","title":"Test Issue","description":"","status":"open","priority":1,"issue_type":"bug","created_at":"2025-10-31T21:40:34.573905-07:00","updated_at":"2025-10-31T21:40:34.573905-07:00"} {"id":"bd-ef72b864","content_hash":"81f5c4fcc229c3ba653d29fc71c9ae3be75ed672296e3e790a88498ee2df3a64","title":"Add MCP server functions for repair commands","description":"Expose new repair commands via MCP server for agent access:\n\nFunctions to add:\n- beads_repair_deps()\n- beads_detect_pollution()\n- beads_validate()\n- beads_resolve_conflicts() (when implemented)\n\nUpdate integrations/beads-mcp/src/beads_mcp/server.py\n\nSee repair_commands.md lines 803-884 for design.","status":"closed","priority":2,"issue_type":"task","created_at":"2025-10-28T19:38:02.227921-07:00","updated_at":"2025-10-30T17:12:58.180404-07:00","closed_at":"2025-10-29T23:14:44.187562-07:00"} +{"id":"bd-eff3","content_hash":"c7a641bdb4c98b14816e2d85ec09db6def89aa4918ad59f4c1f8f71c6a42c6d4","title":"Feature with design","description":"This is a description","design":"Use MVC pattern","acceptance_criteria":"All tests pass","status":"open","priority":2,"issue_type":"feature","created_at":"2025-10-31T21:41:11.147816-07:00","updated_at":"2025-10-31T21:41:11.147816-07:00"} {"id":"bd-f0d9bcf2","content_hash":"d4c343a7d3ee7985fcde6f9438d9f4a4a98780e4abd75de0d7a7310c31e2cc94","title":"Batch test 1","description":"","status":"closed","priority":2,"issue_type":"task","created_at":"2025-10-29T15:29:01.795728-07:00","updated_at":"2025-10-31T12:00:43.184078-07:00","closed_at":"2025-10-31T12:00:43.184078-07:00"} +{"id":"bd-f5d5","content_hash":"3e3467773e73eb9dbb8dd8f213be7157c27d72b53c6cc9776616154db96c3864","title":"Issue with desc","description":"This is a description","status":"open","priority":2,"issue_type":"bug","created_at":"2025-10-31T21:40:34.593981-07:00","updated_at":"2025-10-31T21:40:34.593981-07:00"} {"id":"bd-f8b764c9","content_hash":"9f22cba5111b9e92f2880670226689ac818f5006267bceb36e0502433413f332","title":"Hash-based IDs with aliasing system","description":"Replace sequential auto-increment IDs (bd-1c63eb84, bd-9063acda) with content-hash based IDs (bd-af78e9a2) plus human-friendly aliases (#1, #2).\n\n## Motivation\nCurrent sequential IDs cause collision problems when multiple clones work offline:\n- Non-deterministic convergence in N-way scenarios (bd-cbed9619.1, bd-e6d71828)\n- Complex collision resolution logic (~2,100 LOC)\n- UNIQUE constraint violations during import\n- Requires coordination between workers\n\nHash-based IDs eliminate collisions entirely while aliases preserve human readability.\n\n## Benefits\n- ✅ Collision-free distributed ID generation\n- ✅ Eliminates ~2,100 LOC of collision handling code\n- ✅ Better git merge behavior (different IDs = different JSONL lines)\n- ✅ True offline-first workflows\n- ✅ Simpler auto-import (no remapping needed)\n- ✅ Enables parallel CI/CD workers without coordination\n\n## Design\n- Canonical ID: bd-af78e9a2 (8-char SHA256 prefix of title+desc+timestamp+creator)\n- Alias: #42 (auto-increment per workspace, mutable, display-only)\n- CLI accepts both: bd show bd-af78e9a2 OR bd show #42\n- JSONL stores hash IDs only (aliases reconstructed on import)\n- Alias conflicts resolved via content-hash ordering (deterministic)\n\n## Breaking Change\nThis is a v2.0 feature requiring migration. Provide bd migrate --hash-ids tool.\n\n## Timeline\n~9 weeks (Phase 1: Hash IDs 4w, Phase 2: Aliases 3w, Phase 3: Testing 2w)\n\n## Dependencies\nShould complete after bd-7c5915ae (cleanup validation) and before bd-710a4916 (CRDT).","status":"closed","priority":1,"issue_type":"epic","created_at":"2025-10-29T21:23:49.592315-07:00","updated_at":"2025-10-31T12:32:32.6038-07:00","closed_at":"2025-10-31T12:32:32.6038-07:00"} {"id":"bd-f8b764c9.1","content_hash":"4b348a47b8819c70bed5407a8a785605238738f5e56e344a8140a13de2c5dfd8","title":"Dogfood: Migrate beads repo to hash IDs","description":"Final validation: migrate the beads project itself to hash-based IDs.\n\n## Purpose\nDogfooding the migration on beads' own issue database to:\n1. Validate migration tool works on real data\n2. Discover edge cases\n3. Verify all workflows still work\n4. Build confidence for users\n\n## Pre-Migration Checklist\n- [ ] All bd-f8b764c9 child tasks completed\n- [ ] All tests pass: `go test ./...`\n- [ ] Migration tool tested on test databases\n- [ ] Documentation updated\n- [ ] MCP server updated and published\n- [ ] Clean git status\n\n## Migration Steps\n\n### 1. Create Backup\n```bash\n# Backup database\ncp -r .beads .beads.backup-1761798568\n\n# Backup JSONL\ncp .beads/beads.jsonl .beads/beads.jsonl.backup\n\n# Create git branch for migration\ngit checkout -b hash-id-migration\ngit add .beads.backup-*\ngit commit -m \"Pre-migration backup\"\n```\n\n### 2. Run Migration (Dry Run)\n```bash\nbd migrate --hash-ids --dry-run \u003e migration-plan.txt\ncat migration-plan.txt\n\n# Review:\n# - Number of issues to migrate\n# - Hash collision check (should be zero)\n# - Text reference updates\n# - Dependency updates\n```\n\n### 3. Run Migration (Real)\n```bash\nbd migrate --hash-ids 2\u003e\u00261 | tee migration-log.txt\n\n# Expected output:\n# ✓ Backup created: .beads/beads.db.backup-1234567890\n# ✓ Generated 150 hash IDs\n# ✓ No hash collisions detected\n# ✓ Updated issues table schema\n# ✓ Updated 150 issue IDs\n# ✓ Updated 87 dependencies\n# ✓ Updated 234 text references\n# ✓ Exported to .beads/beads.jsonl\n# ✓ Migration complete!\n```\n\n### 4. Validation\n\n#### Database Integrity\n```bash\n# Check all issues have hash IDs\nbd list | grep -v \"bd-[a-f0-9]\\{8\\}\" \u0026\u0026 echo \"FAIL: Non-hash IDs found\"\n\n# Check all issues have aliases\nsqlite3 .beads/beads.db \"SELECT COUNT(*) FROM issues WHERE alias IS NULL\"\n# Should be 0\n\n# Check no alias duplicates\nsqlite3 .beads/beads.db \"SELECT alias, COUNT(*) FROM issues GROUP BY alias HAVING COUNT(*) \u003e 1\"\n# Should be empty\n```\n\n#### Functionality Tests\n```bash\n# Test show by hash ID\nbd show bd-\n\n# Test show by alias\nbd show #1\n\n# Test create new issue\nbd create \"Test issue after migration\" -p 2\n# Should get hash ID + alias\n\n# Test update\nbd update #1 --priority 1\n\n# Test dependencies\nbd dep tree #1\n\n# Test export\nbd export\ngit diff .beads/beads.jsonl\n# Should show hash IDs\n```\n\n#### Text Reference Validation\n```bash\n# Check that old IDs were updated in descriptions\ngrep -r \"bd-[0-9]\\{1,3\\}[^a-f0-9]\" .beads/beads.jsonl \u0026\u0026 echo \"FAIL: Old ID format found\"\n\n# Verify hash ID references exist\ngrep -o \"bd-[a-f0-9]\\{8\\}\" .beads/beads.jsonl | sort -u | wc -l\n# Should match number of hash IDs\n```\n\n### 5. Commit Migration\n```bash\ngit add .beads/beads.jsonl .beads/beads.db\ngit commit -m \"Migrate to hash-based IDs (v2.0)\n\n- Migrated 150 issues to hash IDs\n- Preserved aliases (#1-#150)\n- Updated 87 dependencies\n- Updated 234 text references\n- Zero hash collisions\n\nMigration log: migration-log.txt\"\n\ngit push origin hash-id-migration\n```\n\n### 6. Create PR\n```bash\ngh pr create --title \"Migrate to hash-based IDs (v2.0)\" --body \"## Summary\nMigrates beads project to hash-based IDs as part of v2.0 release.\n\n## Migration Stats\n- Issues migrated: 150\n- Dependencies updated: 87\n- Text references updated: 234\n- Hash collisions: 0\n- Aliases assigned: 150\n\n## Validation\n- ✅ All tests pass\n- ✅ Database integrity verified\n- ✅ All workflows tested (show, update, create, deps)\n- ✅ Text references updated correctly\n- ✅ Export produces valid JSONL\n\n## Files Changed\n- `.beads/beads.jsonl` - Hash IDs in all entries\n- `.beads/beads.db` - Schema updated with aliases\n\n## Rollback\nIf issues arise:\n\\`\\`\\`bash\nmv .beads.backup-1234567890 .beads\nbd export\n\\`\\`\\`\n\nSee migration-log.txt for full details.\"\n```\n\n### 7. Merge and Cleanup\n```bash\n# After PR approval\ngit checkout main\ngit merge hash-id-migration\ngit push origin main\n\n# Tag release\ngit tag v2.0.0\ngit push origin v2.0.0\n\n# Cleanup\nrm migration-log.txt migration-plan.txt\ngit checkout .beads.backup-* # Keep in git history\n```\n\n## Rollback Procedure\nIf migration fails or has issues:\n\n```bash\n# Restore backup\nmv .beads .beads.failed-migration\nmv .beads.backup-1234567890 .beads\n\n# Regenerate JSONL\nbd export\n\n# Verify restoration\nbd list\ngit diff .beads/beads.jsonl\n\n# Cleanup\ngit checkout hash-id-migration\ngit reset --hard main\n```\n\n## Post-Migration Communication\n\n### GitHub Issue/Discussion\n```markdown\n## Beads v2.0 Released: Hash-Based IDs\n\nWe've migrated beads to hash-based IDs! 🎉\n\n**What changed:**\n- Issues now use hash IDs (bd-af78e9a2) instead of sequential (bd-cb64c226.3)\n- Human-friendly aliases (#42) for easy reference\n- Zero collision risk in distributed workflows\n\n**Action required:**\nIf you have a local clone, you need to migrate:\n\n\\`\\`\\`bash\ngit pull origin main\nbd migrate --hash-ids\ngit push origin main\n\\`\\`\\`\n\nSee MIGRATION.md for details.\n\n**Benefits:**\n- ✅ No more ID collisions\n- ✅ Work offline without coordination\n- ✅ Simpler codebase (-2,100 LOC)\n\nQuestions? Reply here or see docs/HASH_IDS.md\n```\n\n## Success Criteria\n- [ ] Migration completes without errors\n- [ ] All validation checks pass\n- [ ] PR merged to main\n- [ ] v2.0.0 tagged and released\n- [ ] Documentation updated\n- [ ] Community notified\n- [ ] No rollback needed within 1 week\n\n## Files to Create\n- migration-log.txt (transient)\n- migration-plan.txt (transient)\n\n## Timeline\nExecute after all other bd-f8b764c9 tasks complete (estimated: ~8 weeks from start)","status":"closed","priority":1,"issue_type":"task","created_at":"2025-10-29T21:29:28.591526-07:00","updated_at":"2025-10-31T12:32:32.607092-07:00","closed_at":"2025-10-31T12:32:32.607092-07:00","dependencies":[{"issue_id":"bd-f8b764c9.1","depends_on_id":"bd-f8b764c9","type":"parent-child","created_at":"2025-10-29T21:29:28.59248-07:00","created_by":"stevey"},{"issue_id":"bd-f8b764c9.1","depends_on_id":"bd-f8b764c9.4","type":"blocks","created_at":"2025-10-29T21:29:28.593033-07:00","created_by":"stevey"},{"issue_id":"bd-f8b764c9.1","depends_on_id":"bd-f8b764c9.3","type":"blocks","created_at":"2025-10-29T21:29:28.593437-07:00","created_by":"stevey"},{"issue_id":"bd-f8b764c9.1","depends_on_id":"bd-f8b764c9.12","type":"blocks","created_at":"2025-10-29T21:29:28.593876-07:00","created_by":"stevey"},{"issue_id":"bd-f8b764c9.1","depends_on_id":"bd-f8b764c9.2","type":"blocks","created_at":"2025-10-29T21:29:28.594521-07:00","created_by":"stevey"}]} {"id":"bd-f8b764c9.10","content_hash":"0315167c9165d2bf5f9e782f760a6350529126f9d1010d9ac206be9d8b9cc017","title":"Add alias field to database schema","description":"Extend database schema to support human-friendly aliases alongside hash IDs.\n\n## Database Changes\n\n### 1. Add alias column to issues table\n```sql\nALTER TABLE issues ADD COLUMN alias INTEGER UNIQUE;\nCREATE INDEX idx_issues_alias ON issues(alias);\n```\n\n### 2. Add alias counter table\n```sql\nCREATE TABLE alias_counter (\n id INTEGER PRIMARY KEY CHECK (id = 1),\n next_alias INTEGER NOT NULL DEFAULT 1\n);\nINSERT INTO alias_counter (id, next_alias) VALUES (1, 1);\n```\n\n### 3. Add alias conflict tracking (for multi-clone scenarios)\n```sql\nCREATE TABLE alias_history (\n issue_id TEXT NOT NULL,\n alias INTEGER NOT NULL,\n assigned_at TIMESTAMP NOT NULL,\n workspace_id TEXT NOT NULL,\n PRIMARY KEY (issue_id, alias)\n);\n```\n\n## API Changes\n\n### CreateIssue\n- Generate hash ID\n- Assign next available alias\n- Store both in database\n\n### ResolveAliasConflicts (new function)\n- Detect conflicting alias assignments after import\n- Apply resolution strategy (content-hash ordering)\n- Reassign losers to next available aliases\n\n## Migration Path\n```bash\nbd migrate --add-aliases # Adds columns, assigns aliases to existing issues\n```\n\n## Files to Modify\n- internal/storage/sqlite/schema.go\n- internal/storage/sqlite/sqlite.go (CreateIssue, GetIssue)\n- internal/storage/sqlite/aliases.go (new file for alias logic)\n- internal/storage/sqlite/migrations.go\n\n## Testing\n- Test alias auto-assignment on create\n- Test alias uniqueness constraint\n- Test alias lookup performance\n- Test alias conflict resolution","status":"closed","priority":1,"issue_type":"task","created_at":"2025-10-29T21:24:13.968241-07:00","updated_at":"2025-10-31T12:32:32.610663-07:00","closed_at":"2025-10-31T12:32:32.610663-07:00","dependencies":[{"issue_id":"bd-f8b764c9.10","depends_on_id":"bd-f8b764c9","type":"parent-child","created_at":"2025-10-29T21:24:13.96959-07:00","created_by":"stevey"},{"issue_id":"bd-f8b764c9.10","depends_on_id":"bd-f8b764c9.11","type":"blocks","created_at":"2025-10-29T21:29:45.952824-07:00","created_by":"stevey"}]} @@ -165,3 +173,4 @@ {"id":"bd-fb95094c.8","content_hash":"8a8df680150f73fef6ac9cede6a1b2b0033406b35553a8a3795b13a542cd62f1","title":"Remove unreachable utility functions","description":"Several small utility functions are unreachable:\n\nFiles to clean:\n1. `internal/storage/sqlite/hash.go` - `computeIssueContentHash` (line 17)\n - Check if entire file can be deleted if only contains this function\n\n2. `internal/config/config.go` - `FileUsed` (line 151)\n - Delete unused config helper\n\n3. `cmd/bd/git_sync_test.go` - `verifyIssueOpen` (line 300)\n - Delete dead test helper\n\n4. `internal/compact/haiku.go` - `HaikuClient.SummarizeTier2` (line 81)\n - Tier 2 summarization not implemented\n - Options: implement feature OR delete method\n\nImpact: Removes 50-100 LOC depending on decisions","acceptance_criteria":"- Remove unreachable functions\n- If entire files can be deleted (like hash.go), delete them\n- For SummarizeTier2: decide to implement or delete, document decision\n- All tests pass: `go test ./...`\n- Verify no callers exist for each function","status":"closed","priority":2,"issue_type":"task","created_at":"2025-10-27T20:30:19.963392-07:00","updated_at":"2025-10-30T17:12:58.206334-07:00","closed_at":"2025-10-28T14:14:55.724226-07:00","labels":["cleanup","dead-code","phase-1"],"dependencies":[{"issue_id":"bd-fb95094c.8","depends_on_id":"bd-fb95094c","type":"parent-child","created_at":"2025-10-27T20:30:19.968126-07:00","created_by":"daemon"}]} {"id":"bd-fb95094c.9","content_hash":"404b82a19dde2fdece7eb6bb3b816db7906e81a03a5a05341ed631af7a2a8e87","title":"Remove unreachable RPC methods","description":"Several RPC server and client methods are unreachable and should be removed:\n\nServer methods (internal/rpc/server.go):\n- `Server.GetLastImportTime` (line 2116)\n- `Server.SetLastImportTime` (line 2123)\n- `Server.findJSONLPath` (line 2255)\n\nClient methods (internal/rpc/client.go):\n- `Client.Import` (line 311) - RPC import not used (daemon uses autoimport)\n\nEvidence:\n```bash\ngo run golang.org/x/tools/cmd/deadcode@latest -test ./...\n```\n\nImpact: Removes ~80 LOC of unused RPC code","acceptance_criteria":"- Remove the 4 unreachable methods (~80 LOC total)\n- Verify no callers: `grep -r \"GetLastImportTime\\|SetLastImportTime\\|findJSONLPath\" .`\n- All tests pass: `go test ./internal/rpc/...`\n- Daemon functionality works: test daemon start/stop/operations","status":"closed","priority":2,"issue_type":"task","created_at":"2025-10-27T20:30:19.962209-07:00","updated_at":"2025-10-30T17:12:58.201466-07:00","closed_at":"2025-10-28T16:07:26.103703-07:00","labels":["cleanup","dead-code","phase-1","rpc"],"dependencies":[{"issue_id":"bd-fb95094c.9","depends_on_id":"bd-fb95094c","type":"parent-child","created_at":"2025-10-27T20:30:19.965239-07:00","created_by":"daemon"}]} {"id":"bd-fd8753d9","content_hash":"ae13fc833baa7d586a48ca62648dd4f0ee61fcc96aa1f238fb2639b6657b07da","title":"Document bd edit command and verify MCP exclusion","description":"Follow-up from PR #152:\n1. Add \"bd edit\" to AGENTS.md with \"Humans only\" note\n2. Verify MCP server doesn't expose bd edit command\n3. Consider adding test for command registration","status":"open","priority":2,"issue_type":"task","created_at":"2025-10-26T13:23:47.982295-07:00","updated_at":"2025-10-30T17:12:58.226229-07:00"} +{"id":"bd-ff9e","content_hash":"8a2616a707f5ae932357049b0bc922716f4d729724fb8c38b256c91e292f851b","title":"Test Issue","description":"","status":"open","priority":1,"issue_type":"bug","created_at":"2025-10-31T21:40:38.512682-07:00","updated_at":"2025-10-31T21:40:38.512682-07:00"} diff --git a/cmd/bd/create_test.go b/cmd/bd/create_test.go new file mode 100644 index 00000000..1efc1f8c --- /dev/null +++ b/cmd/bd/create_test.go @@ -0,0 +1,452 @@ +package main + +import ( + "context" + "path/filepath" + "testing" + "time" + + "github.com/steveyegge/beads/internal/types" +) + +func TestCreate_BasicIssue(t *testing.T) { + tmpDir := t.TempDir() + testDB := filepath.Join(tmpDir, ".beads", "beads.db") + s := newTestStore(t, testDB) + ctx := context.Background() + + issue := &types.Issue{ + Title: "Test Issue", + Priority: 1, + IssueType: types.TypeBug, + Status: types.StatusOpen, + CreatedAt: time.Now(), + } + + if err := s.CreateIssue(ctx, issue, "test"); err != nil { + t.Fatalf("failed to create issue: %v", err) + } + + issues, err := s.SearchIssues(ctx, "", types.IssueFilter{}) + if err != nil { + t.Fatalf("failed to search issues: %v", err) + } + + if len(issues) != 1 { + t.Fatalf("expected 1 issue, got %d", len(issues)) + } + + created := issues[0] + if created.Title != "Test Issue" { + t.Errorf("expected title 'Test Issue', got %q", created.Title) + } + if created.Priority != 1 { + t.Errorf("expected priority 1, got %d", created.Priority) + } + if created.IssueType != types.TypeBug { + t.Errorf("expected type bug, got %s", created.IssueType) + } +} + +func TestCreate_WithDescription(t *testing.T) { + tmpDir := t.TempDir() + testDB := filepath.Join(tmpDir, ".beads", "beads.db") + s := newTestStore(t, testDB) + ctx := context.Background() + + issue := &types.Issue{ + Title: "Issue with desc", + Description: "This is a description", + Priority: 2, + Status: types.StatusOpen, + IssueType: types.TypeTask, + CreatedAt: time.Now(), + } + + if err := s.CreateIssue(ctx, issue, "test"); err != nil { + t.Fatalf("failed to create issue: %v", err) + } + + issues, err := s.SearchIssues(ctx, "", types.IssueFilter{}) + if err != nil { + t.Fatalf("failed to search issues: %v", err) + } + + if len(issues) != 1 { + t.Fatalf("expected 1 issue, got %d", len(issues)) + } + + if issues[0].Description != "This is a description" { + t.Errorf("expected description, got %q", issues[0].Description) + } +} + +func TestCreate_WithDesignAndAcceptance(t *testing.T) { + tmpDir := t.TempDir() + testDB := filepath.Join(tmpDir, ".beads", "beads.db") + s := newTestStore(t, testDB) + ctx := context.Background() + + issue := &types.Issue{ + Title: "Feature with design", + Design: "Use MVC pattern", + AcceptanceCriteria: "All tests pass", + IssueType: types.TypeFeature, + Priority: 2, + Status: types.StatusOpen, + CreatedAt: time.Now(), + } + + if err := s.CreateIssue(ctx, issue, "test"); err != nil { + t.Fatalf("failed to create issue: %v", err) + } + + issues, err := s.SearchIssues(ctx, "", types.IssueFilter{}) + if err != nil { + t.Fatalf("failed to search issues: %v", err) + } + + if len(issues) != 1 { + t.Fatalf("expected 1 issue, got %d", len(issues)) + } + + created := issues[0] + if created.Design != "Use MVC pattern" { + t.Errorf("expected design, got %q", created.Design) + } + if created.AcceptanceCriteria != "All tests pass" { + t.Errorf("expected acceptance criteria, got %q", created.AcceptanceCriteria) + } +} + +func TestCreate_WithLabels(t *testing.T) { + tmpDir := t.TempDir() + testDB := filepath.Join(tmpDir, ".beads", "beads.db") + s := newTestStore(t, testDB) + ctx := context.Background() + + issue := &types.Issue{ + Title: "Issue with labels", + Priority: 0, + Status: types.StatusOpen, + IssueType: types.TypeBug, + CreatedAt: time.Now(), + } + + if err := s.CreateIssue(ctx, issue, "test"); err != nil { + t.Fatalf("failed to create issue: %v", err) + } + + // Add labels + if err := s.AddLabel(ctx, issue.ID, "bug", "test"); err != nil { + t.Fatalf("failed to add bug label: %v", err) + } + if err := s.AddLabel(ctx, issue.ID, "critical", "test"); err != nil { + t.Fatalf("failed to add critical label: %v", err) + } + + labels, err := s.GetLabels(ctx, issue.ID) + if err != nil { + t.Fatalf("failed to get labels: %v", err) + } + + if len(labels) != 2 { + t.Errorf("expected 2 labels, got %d", len(labels)) + } + + labelMap := make(map[string]bool) + for _, l := range labels { + labelMap[l] = true + } + + if !labelMap["bug"] || !labelMap["critical"] { + t.Errorf("expected labels 'bug' and 'critical', got %v", labels) + } +} + +func TestCreate_WithDependencies(t *testing.T) { + tmpDir := t.TempDir() + testDB := filepath.Join(tmpDir, ".beads", "beads.db") + s := newTestStore(t, testDB) + ctx := context.Background() + + parent := &types.Issue{ + Title: "Parent issue", + Priority: 1, + Status: types.StatusOpen, + IssueType: types.TypeTask, + CreatedAt: time.Now(), + } + + if err := s.CreateIssue(ctx, parent, "test"); err != nil { + t.Fatalf("failed to create parent: %v", err) + } + + child := &types.Issue{ + Title: "Child issue", + Priority: 1, + Status: types.StatusOpen, + IssueType: types.TypeTask, + CreatedAt: time.Now(), + } + + if err := s.CreateIssue(ctx, child, "test"); err != nil { + t.Fatalf("failed to create child: %v", err) + } + + // Add dependency + dep := &types.Dependency{ + IssueID: child.ID, + DependsOnID: parent.ID, + Type: types.DepBlocks, + CreatedAt: time.Now(), + } + + if err := s.AddDependency(ctx, dep, "test"); err != nil { + t.Fatalf("failed to add dependency: %v", err) + } + + deps, err := s.GetDependencies(ctx, child.ID) + if err != nil { + t.Fatalf("failed to get dependencies: %v", err) + } + + if len(deps) != 1 { + t.Fatalf("expected 1 dependency, got %d", len(deps)) + } + + if deps[0].ID != parent.ID { + t.Errorf("expected dependency on %s, got %s", parent.ID, deps[0].ID) + } +} + +func TestCreate_WithDiscoveredFromDependency(t *testing.T) { + tmpDir := t.TempDir() + testDB := filepath.Join(tmpDir, ".beads", "beads.db") + s := newTestStore(t, testDB) + ctx := context.Background() + + parent := &types.Issue{ + Title: "Parent work", + Priority: 1, + Status: types.StatusOpen, + IssueType: types.TypeTask, + CreatedAt: time.Now(), + } + + if err := s.CreateIssue(ctx, parent, "test"); err != nil { + t.Fatalf("failed to create parent: %v", err) + } + + discovered := &types.Issue{ + Title: "Found bug", + Priority: 1, + Status: types.StatusOpen, + IssueType: types.TypeBug, + CreatedAt: time.Now(), + } + + if err := s.CreateIssue(ctx, discovered, "test"); err != nil { + t.Fatalf("failed to create discovered issue: %v", err) + } + + // Add discovered-from dependency + dep := &types.Dependency{ + IssueID: discovered.ID, + DependsOnID: parent.ID, + Type: types.DepDiscoveredFrom, + CreatedAt: time.Now(), + } + + if err := s.AddDependency(ctx, dep, "test"); err != nil { + t.Fatalf("failed to add dependency: %v", err) + } + + deps, err := s.GetDependencies(ctx, discovered.ID) + if err != nil { + t.Fatalf("failed to get dependencies: %v", err) + } + + if len(deps) != 1 { + t.Fatalf("expected 1 dependency, got %d", len(deps)) + } + + if deps[0].ID != parent.ID { + t.Errorf("expected dependency on %s, got %s", parent.ID, deps[0].ID) + } +} + +func TestCreate_WithExplicitID(t *testing.T) { + tmpDir := t.TempDir() + testDB := filepath.Join(tmpDir, ".beads", "beads.db") + s := newTestStore(t, testDB) + ctx := context.Background() + + issue := &types.Issue{ + ID: "test-abc123", + Title: "Custom ID issue", + Priority: 1, + Status: types.StatusOpen, + IssueType: types.TypeTask, + CreatedAt: time.Now(), + } + + if err := s.CreateIssue(ctx, issue, "test"); err != nil { + t.Fatalf("failed to create issue: %v", err) + } + + issues, err := s.SearchIssues(ctx, "", types.IssueFilter{}) + if err != nil { + t.Fatalf("failed to search issues: %v", err) + } + + if len(issues) != 1 { + t.Fatalf("expected 1 issue, got %d", len(issues)) + } + + if issues[0].ID != "test-abc123" { + t.Errorf("expected ID 'test-abc123', got %q", issues[0].ID) + } +} + +func TestCreate_WithAssignee(t *testing.T) { + tmpDir := t.TempDir() + testDB := filepath.Join(tmpDir, ".beads", "beads.db") + s := newTestStore(t, testDB) + ctx := context.Background() + + issue := &types.Issue{ + Title: "Assigned issue", + Assignee: "alice", + Priority: 1, + Status: types.StatusOpen, + IssueType: types.TypeTask, + CreatedAt: time.Now(), + } + + if err := s.CreateIssue(ctx, issue, "test"); err != nil { + t.Fatalf("failed to create issue: %v", err) + } + + issues, err := s.SearchIssues(ctx, "", types.IssueFilter{}) + if err != nil { + t.Fatalf("failed to search issues: %v", err) + } + + if len(issues) != 1 { + t.Fatalf("expected 1 issue, got %d", len(issues)) + } + + if issues[0].Assignee != "alice" { + t.Errorf("expected assignee 'alice', got %q", issues[0].Assignee) + } +} + +func TestCreate_AllIssueTypes(t *testing.T) { + tmpDir := t.TempDir() + testDB := filepath.Join(tmpDir, ".beads", "beads.db") + s := newTestStore(t, testDB) + ctx := context.Background() + + issueTypes := []types.IssueType{ + types.TypeBug, + types.TypeFeature, + types.TypeTask, + types.TypeEpic, + types.TypeChore, + } + + for _, issueType := range issueTypes { + issue := &types.Issue{ + Title: "Test " + string(issueType), + IssueType: issueType, + Priority: 2, + Status: types.StatusOpen, + CreatedAt: time.Now(), + } + + if err := s.CreateIssue(ctx, issue, "test"); err != nil { + t.Fatalf("failed to create issue type %s: %v", issueType, err) + } + } + + issues, err := s.SearchIssues(ctx, "", types.IssueFilter{}) + if err != nil { + t.Fatalf("failed to search issues: %v", err) + } + + if len(issues) != 5 { + t.Errorf("expected 5 issues, got %d", len(issues)) + } +} + +func TestCreate_MultipleDependencies(t *testing.T) { + tmpDir := t.TempDir() + testDB := filepath.Join(tmpDir, ".beads", "beads.db") + s := newTestStore(t, testDB) + ctx := context.Background() + + parent1 := &types.Issue{ + Title: "Parent 1", + Priority: 1, + Status: types.StatusOpen, + IssueType: types.TypeTask, + CreatedAt: time.Now(), + } + + parent2 := &types.Issue{ + Title: "Parent 2", + Priority: 1, + Status: types.StatusOpen, + IssueType: types.TypeTask, + CreatedAt: time.Now(), + } + + child := &types.Issue{ + Title: "Child", + Priority: 1, + Status: types.StatusOpen, + IssueType: types.TypeTask, + CreatedAt: time.Now(), + } + + if err := s.CreateIssue(ctx, parent1, "test"); err != nil { + t.Fatalf("failed to create parent1: %v", err) + } + if err := s.CreateIssue(ctx, parent2, "test"); err != nil { + t.Fatalf("failed to create parent2: %v", err) + } + if err := s.CreateIssue(ctx, child, "test"); err != nil { + t.Fatalf("failed to create child: %v", err) + } + + // Add multiple dependencies + dep1 := &types.Dependency{ + IssueID: child.ID, + DependsOnID: parent1.ID, + Type: types.DepBlocks, + CreatedAt: time.Now(), + } + dep2 := &types.Dependency{ + IssueID: child.ID, + DependsOnID: parent2.ID, + Type: types.DepRelated, + CreatedAt: time.Now(), + } + + if err := s.AddDependency(ctx, dep1, "test"); err != nil { + t.Fatalf("failed to add dep1: %v", err) + } + if err := s.AddDependency(ctx, dep2, "test"); err != nil { + t.Fatalf("failed to add dep2: %v", err) + } + + deps, err := s.GetDependencies(ctx, child.ID) + if err != nil { + t.Fatalf("failed to get dependencies: %v", err) + } + + if len(deps) != 2 { + t.Fatalf("expected 2 dependencies, got %d", len(deps)) + } +}