Merge remote changes and add TTL/expiration tests (bd-d6aq)
- Resolved beads.jsonl merge conflict - Created test_reservation_ttl.py with 4 integration tests - Tests short TTL (30s), reservation blocking, auto-release, and renewal - Mock server implements full TTL management with expiration tracking - All tests passing in ~57s (includes 30s wait for expiration) - Updated integration test README with new test documentation
This commit is contained in:
@@ -89,7 +89,7 @@
|
||||
{"id":"bd-502e","content_hash":"0f40053f59ff205d858a9ddf0be845df1d52471cc25a812df78cb3d4667efbdd","title":"Add comprehensive tests for sync branch daemon logic","description":"The daemon sync branch functionality (bd-6545) was implemented but needs proper end-to-end testing.\n\nCurrent implementation:\n- daemon_sync_branch.go has syncBranchCommitAndPush() and syncBranchPull()\n- daemon_sync.go has been updated to use these functions when sync.branch is configured\n- All daemon tests pass, but no specific tests for sync branch behavior\n\nTesting needed:\n- Test that daemon commits to sync branch when sync.branch is configured\n- Test that daemon commits to current branch when sync.branch is NOT configured (backward compatibility)\n- Test that daemon pulls from sync branch and syncs JSONL back to main repo\n- Test worktree creation and health checks during daemon operations\n- Test error handling (missing branch, worktree corruption, etc.)\n\nKey challenge: Tests need to run in the context of the git repo (getGitRoot() uses current working directory), so test setup needs to properly change directory or mock the git root detection.\n\nReference existing daemon tests in daemon_test.go and daemon_autoimport_test.go for patterns.","status":"closed","priority":2,"issue_type":"task","created_at":"2025-11-02T15:59:13.341491-08:00","updated_at":"2025-11-02T16:39:53.278313-08:00","closed_at":"2025-11-02T16:39:53.278313-08:00","source_repo":".","dependencies":[{"issue_id":"bd-502e","depends_on_id":"bd-6545","type":"parent-child","created_at":"2025-11-02T15:59:13.342331-08:00","created_by":"daemon"}]}
|
||||
{"id":"bd-51jl","content_hash":"f926fc6615da9c2d0270449079fa7b4e937f8e817a73c7df179bb6d92ca464e6","title":"Feature P1","description":"","status":"closed","priority":1,"issue_type":"feature","assignee":"bob","created_at":"2025-11-07T19:04:24.852171-08:00","updated_at":"2025-11-07T22:07:17.343481-08:00","closed_at":"2025-11-07T21:55:09.426728-08:00","source_repo":"."}
|
||||
{"id":"bd-5314bddf","content_hash":"1c57b7a18279f8d87c68af9e1b99234ba4017a03c3f3b1cdb65ce4a8b93e12aa","title":"bd detect-pollution - Test pollution detector","description":"Detect test issues that leaked into production DB.\n\nPattern matching for:\n- Titles starting with 'test', 'benchmark', 'sample'\n- Sequential numbering (test-1, test-2)\n- Generic descriptions\n- Created in rapid succession\n\nOptional AI scoring for confidence.\n\nFiles: cmd/bd/detect_pollution.go (new)","status":"closed","priority":1,"issue_type":"task","created_at":"2025-10-28T14:48:17.466906-07:00","updated_at":"2025-11-06T19:36:13.970321-08:00","closed_at":"2025-11-06T19:27:11.75884-08:00","source_repo":"."}
|
||||
{"id":"bd-537e","content_hash":"88c95061990fd7be8e008d2571bf3cda613f6dda50218d7166a3c7af7e28469f","title":"Add external_ref change tracking and auditing","description":"Currently we don't track when external_ref is added, removed, or changed. This would be useful for debugging and auditing.\n\nProposed features:\n- Log event when external_ref changes\n- Track in events table with old/new values\n- Add query to find issues where external_ref changed\n- Add metrics: issues with external_ref vs without\n\nUse cases:\n- Debugging import issues\n- Understanding which issues are externally managed\n- Auditing external system linkage\n\nRelated: bd-1022","status":"open","priority":4,"issue_type":"feature","created_at":"2025-11-02T15:32:31.276883-08:00","updated_at":"2025-11-02T15:32:31.276883-08:00","source_repo":"."}
|
||||
{"id":"bd-537e","content_hash":"3d8bd59053d657a3710708f5e70feb9baa9545a87383286ff6fad29437856c44","title":"Add external_ref change tracking and auditing","description":"Currently we don't track when external_ref is added, removed, or changed. This would be useful for debugging and auditing.\n\nProposed features:\n- Log event when external_ref changes\n- Track in events table with old/new values\n- Add query to find issues where external_ref changed\n- Add metrics: issues with external_ref vs without\n\nUse cases:\n- Debugging import issues\n- Understanding which issues are externally managed\n- Auditing external system linkage\n\nRelated: bd-1022","status":"closed","priority":4,"issue_type":"feature","created_at":"2025-11-02T15:32:31.276883-08:00","updated_at":"2025-11-08T02:24:24.68524-08:00","closed_at":"2025-11-08T02:20:01.022406-08:00","source_repo":"."}
|
||||
{"id":"bd-581b80b3","content_hash":"f32f7d8f0b07aaaeb9d07d8a1d000eef8fc79cf864e8aa20ebb899f6e359ebda","title":"bd find-duplicates - AI-powered duplicate detection","description":"Find semantically duplicate issues.\n\nApproaches:\n1. Mechanical: Exact title/description matching\n2. Embeddings: Cosine similarity (cheap, scalable)\n3. AI: LLM-based semantic comparison (expensive, accurate)\n\nUses embeddings by default for \u003e100 issues.\n\nFiles: cmd/bd/find_duplicates.go (new)","status":"closed","priority":1,"issue_type":"task","created_at":"2025-10-29T20:49:49.126801-07:00","updated_at":"2025-11-06T19:36:13.970636-08:00","closed_at":"2025-11-06T19:27:11.778663-08:00","source_repo":"."}
|
||||
{"id":"bd-58c0","content_hash":"112d4123250ac875619a1f239cbf73c859b58d87f2b45a2d649da320dd72ecc5","title":"Fix transaction conflict in TryResurrectParent","description":"Integration test TestImportWithDeletedParent fails with 'database is locked' error when resurrection happens inside CreateIssue.\n\nRoot cause: TryResurrectParent calls conn.Get() and insertIssue() which conflicts with existing transaction in CreateIssue.\n\nError: failed to create tombstone for parent bd-parent: failed to insert issue: sqlite3: database is locked\n\nSolution: Refactor resurrection to accept optional transaction parameter, use existing transaction when available instead of creating new connection.\n\nImpact: Blocks resurrection from working in CreateIssue flow, only works in EnsureIDs (which may not have active transaction).","status":"closed","priority":0,"issue_type":"bug","created_at":"2025-11-04T16:32:20.981027-08:00","updated_at":"2025-11-04T17:00:44.258881-08:00","closed_at":"2025-11-04T17:00:44.258881-08:00","source_repo":".","dependencies":[{"issue_id":"bd-58c0","depends_on_id":"bd-d19a","type":"discovered-from","created_at":"2025-11-04T16:32:20.981969-08:00","created_by":"daemon"}]}
|
||||
{"id":"bd-5a90","content_hash":"819c14b3bb55fcd113b4e848e4bfcb0c3475756658575dba8d34922ca8e14077","title":"Test parent issue","description":"","status":"open","priority":3,"issue_type":"task","created_at":"2025-11-02T11:50:35.85367-08:00","updated_at":"2025-11-02T11:50:35.85367-08:00","source_repo":"."}
|
||||
@@ -122,6 +122,7 @@
|
||||
{"id":"bd-6sd1","content_hash":"1db772b8c6d380085b5f9b5978cf9c853723c24b5aa9245b307e473ce894d1d5","title":"Issue to close","description":"","status":"closed","priority":1,"issue_type":"bug","created_at":"2025-11-07T19:00:16.547698-08:00","updated_at":"2025-11-07T19:00:16.570826-08:00","closed_at":"2025-11-07T19:00:16.570826-08:00","source_repo":"."}
|
||||
{"id":"bd-6z7l","content_hash":"96ccdda5d2ef893f70cba842f813665cd3a8ae05cdc5fffef5f8f8a17425f145","title":"Auto-detect scenarios and prompt users","description":"Detect when user is in fork/contributor scenario and prompt with helpful suggestions. Check: git remote relationships, existing .beads config, repo ownership. Suggest appropriate wizard.","status":"closed","priority":2,"issue_type":"task","created_at":"2025-11-05T18:04:30.070695-08:00","updated_at":"2025-11-05T19:27:33.074733-08:00","closed_at":"2025-11-05T18:57:03.315476-08:00","source_repo":".","dependencies":[{"issue_id":"bd-6z7l","depends_on_id":"bd-8rd","type":"parent-child","created_at":"2025-11-05T18:04:39.205478-08:00","created_by":"daemon"}]}
|
||||
{"id":"bd-710a4916","content_hash":"f868eafd3460dccd57e0c50a27ad7fb273547d37dad7eb83efd3678106fad62a","title":"CRDT-based architecture for guaranteed convergence (v2.0)","description":"## Vision\nRedesign beads around Conflict-Free Replicated Data Types (CRDTs) to provide mathematical guarantees for N-way collision resolution at arbitrary scale.\n\n## Current Limitations\n- Content-hash based collision resolution fails at 5+ clones\n- Non-deterministic convergence in multi-round scenarios\n- UNIQUE constraint violations during rename operations\n- No formal proof of convergence properties\n\n## CRDT Benefits\n- Provably convergent (Strong Eventual Consistency)\n- Commutative/Associative/Idempotent operations\n- No coordination required between clones\n- Scales to 100+ concurrent workers\n- Well-understood mathematical foundations\n\n## Proposed Architecture\n\n### 1. UUID-Based IDs\nReplace sequential IDs with UUIDs:\n- Current: bd-1c63eb84, bd-9063acda, bd-4d80b7b1\n- CRDT: bd-a1b2c3d4-e5f6-7890-abcd-ef1234567890\n- Human aliases maintained separately: #42 maps to UUID\n\n### 2. Last-Write-Wins (LWW) Elements\nEach field becomes an LWW register:\n- title: (timestamp, clone_id, value)\n- status: (timestamp, clone_id, value)\n- Deterministic conflict resolution via Lamport timestamp + clone_id tiebreaker\n\n### 3. Operation Log\nTrack all operations as CRDT ops:\n- CREATE(uuid, timestamp, clone_id, fields)\n- UPDATE(uuid, field, timestamp, clone_id, value)\n- DELETE(uuid, timestamp, clone_id) - tombstone, not hard delete\n\n### 4. Sync as Merge\nSyncing becomes merging two CRDT states:\n- No merge conflicts possible\n- Deterministic merge function\n- Guaranteed convergence\n\n## Implementation Phases\n\n### Phase 1: Research \u0026 Design (4 weeks)\n- Study existing CRDT implementations (Automerge, Yjs, Loro)\n- Design schema for CRDT-based issue tracking\n- Prototype LWW-based Issue CRDT\n- Benchmark performance vs current system\n\n### Phase 2: Parallel Implementation (6 weeks)\n- Implement CRDT storage layer alongside SQLite\n- Build conversion tools: SQLite ↔ CRDT\n- Maintain backward compatibility with v1.x format\n- Migration path for existing databases\n\n### Phase 3: Testing \u0026 Validation (4 weeks)\n- Formal verification of convergence properties\n- Stress testing with 100+ clone scenario\n- Performance profiling and optimization\n- Documentation and examples\n\n### Phase 4: Migration \u0026 Rollout (4 weeks)\n- Release v2.0-beta with CRDT backend\n- Gradual migration from v1.x\n- Monitoring and bug fixes\n- Final v2.0 release\n\n## Risks \u0026 Mitigations\n\n**Risk 1: Performance overhead**\n- Mitigation: Benchmark early, optimize hot paths\n- CRDTs can be slower than append-only logs\n- May need compaction strategy\n\n**Risk 2: Storage bloat**\n- Mitigation: Implement operation log compaction\n- Tombstone garbage collection for deleted issues\n- Periodic snapshots to reduce log size\n\n**Risk 3: Breaking changes**\n- Mitigation: Maintain v1.x compatibility layer\n- Gradual migration tools\n- Dual-mode operation during transition\n\n**Risk 4: Complexity**\n- Mitigation: Use battle-tested CRDT libraries\n- Comprehensive documentation\n- Clear migration guide\n\n## Success Criteria\n- 100-clone collision test passes without failures\n- Formal proof of convergence properties\n- Performance within 2x of current system\n- Zero manual conflict resolution required\n- Backward compatible with v1.x databases\n\n## Timeline\n18-20 weeks total (4-5 months)\n\n## References\n- Automerge: https://automerge.org\n- Yjs: https://docs.yjs.dev\n- Loro: https://loro.dev\n- CRDT theory: Shapiro et al, A comprehensive study of CRDTs\n- Related issues: bd-e6d71828, bd-7a2b58fc,-1","status":"closed","priority":3,"issue_type":"feature","created_at":"2025-10-29T10:23:57.978339-07:00","updated_at":"2025-11-08T01:58:15.280264-08:00","closed_at":"2025-11-08T00:54:51.171319-08:00","source_repo":"."}
|
||||
{"id":"bd-71ky","content_hash":"83942b83e4bdf8446d1fa2309145e6469d80e3992ab4fdc9eea704fa3920afac","title":"Fix bd --version and bd completion to work without database","description":"","status":"closed","priority":1,"issue_type":"bug","created_at":"2025-11-08T02:06:00.78393-08:00","updated_at":"2025-11-08T02:06:11.452474-08:00","closed_at":"2025-11-08T02:06:11.452474-08:00","source_repo":"."}
|
||||
{"id":"bd-72w","content_hash":"55110afd5c4cd8e94796fe61fada9e32351e76f7fca57ce15e52fe6443e6e117","title":"Q4 Platform Improvements","description":"## Overview\n\n[Describe the high-level goal and scope of this epic]\n\n## Success Criteria\n\n- [ ] Criteria 1\n- [ ] Criteria 2\n- [ ] Criteria 3\n\n## Background\n\n[Provide context and motivation]\n\n## Scope\n\n**In Scope:**\n- Item 1\n- Item 2\n\n**Out of Scope:**\n- Item 1\n- Item 2\n","design":"## Architecture\n\n[Describe the overall architecture and approach]\n\n## Components\n\n- Component 1: [description]\n- Component 2: [description]\n\n## Dependencies\n\n[List external dependencies or constraints]\n","acceptance_criteria":"- [ ] All child issues are completed\n- [ ] Integration tests pass\n- [ ] Documentation is updated\n- [ ] Code review completed\n","status":"closed","priority":1,"issue_type":"epic","created_at":"2025-11-03T19:54:03.794244-08:00","updated_at":"2025-11-05T00:25:06.51152-08:00","closed_at":"2025-11-05T00:25:06.51152-08:00","source_repo":".","labels":["epic"]}
|
||||
{"id":"bd-7315","content_hash":"81137222aba60b33d3bcd7637891cf94547b5c876a1608e3e3370a578ba165f3","title":"Add validation for duplicate external_ref in batch imports","description":"Currently, if a batch import contains multiple issues with the same external_ref, the behavior is undefined. We should detect and handle this case.\n\nCurrent behavior:\n- No validation for duplicate external_ref within a batch\n- Last-write-wins or non-deterministic behavior\n\nProposed solution:\n- Detect duplicate external_ref values in incoming batch\n- Fail with clear error message OR\n- Merge duplicates intelligently (use newest timestamp)\n- Add test case for this scenario\n\nRelated: bd-1022","status":"closed","priority":2,"issue_type":"task","created_at":"2025-11-02T15:31:55.85634-08:00","updated_at":"2025-11-02T16:03:50.374552-08:00","closed_at":"2025-11-02T16:03:50.374552-08:00","source_repo":"."}
|
||||
{"id":"bd-7324","content_hash":"639f5eef2922178daae7416831b850bf58ebeb39b8b91e7124387e0b6dfea33c","title":"Add is_tombstone flag to schema","description":"Optionally add is_tombstone boolean field to issues table. Marks resurrected parents that were deleted. Allows distinguishing tombstones from normal deleted issues. Update schema.go and create migration.","status":"closed","priority":1,"issue_type":"task","created_at":"2025-11-04T12:31:59.745076-08:00","updated_at":"2025-11-05T00:44:27.947578-08:00","closed_at":"2025-11-05T00:44:27.947584-08:00","source_repo":"."}
|
||||
@@ -178,7 +179,7 @@
|
||||
{"id":"bd-9e8d","content_hash":"bde8f3625189415e8f00b06acf530454bec619cabee1332132836d45b26771ed","title":"Test Issue","description":"","status":"closed","priority":1,"issue_type":"bug","created_at":"2025-10-31T21:41:11.107393-07:00","updated_at":"2025-11-01T20:02:28.292279-07:00","closed_at":"2025-11-01T20:02:28.292279-07:00","source_repo":"."}
|
||||
{"id":"bd-9f1fce5d","content_hash":"06b6c591090df9e565a67086b354875c5029fce5b60245bce97af7bd63d26166","title":"Add internal/ai package for LLM integration","description":"Shared AI client for repair commands.\n\nProviders:\n- Anthropic (Claude)\n- OpenAI (GPT)\n- Ollama (local)\n\nEnv vars:\n- BEADS_AI_PROVIDER\n- BEADS_AI_API_KEY\n- BEADS_AI_MODEL\n\nFiles: internal/ai/client.go (new)","status":"closed","priority":1,"issue_type":"task","created_at":"2025-10-28T14:48:29.072473-07:00","updated_at":"2025-11-06T19:36:13.972045-08:00","closed_at":"2025-11-06T19:27:19.128093-08:00","source_repo":"."}
|
||||
{"id":"bd-9f20","content_hash":"fd9e463ab1b81e62f5ae1441e8c3a661361031a30e6a95502152bb4d7eecf7b2","title":"DetectCycles SQL query has bug preventing cycle detection","description":"The DetectCycles function's SQL query has a bug in the LIKE filter that prevents it from detecting cycles.\n\nCurrent code (line 571):\n```sql\nAND p.path NOT LIKE '%' || d.depends_on_id || '→%'\n```\n\nThis prevents ANY revisit to nodes, including returning to the start node to complete a cycle.\n\nFix:\n```sql\nAND (d.depends_on_id = p.start_id OR p.path NOT LIKE '%' || d.depends_on_id || '→%')\n```\n\nThis allows revisiting the start node (to detect the cycle) while still preventing intermediate node revisits.\n\nImpact: Currently DetectCycles cannot detect any cycles, but this hasn't been noticed because AddDependency prevents cycles from being created. The function would only matter if cycles were manually inserted into the database.","status":"closed","priority":3,"issue_type":"bug","created_at":"2025-11-01T22:50:32.552763-07:00","updated_at":"2025-11-01T22:52:02.247443-07:00","closed_at":"2025-11-01T22:52:02.247443-07:00","source_repo":"."}
|
||||
{"id":"bd-9f4a","content_hash":"ff058f9bad890bee6a00df24c846f523980473d47c702097164deea7504886a4","title":"Document external_ref in content hash behavior","description":"The content hash includes external_ref, which has implications that should be documented.\n\nCurrent behavior:\n- external_ref is included in content hash calculation (collision.go:158-160)\n- Changing external_ref changes content hash\n- This means: local issue → add external_ref → different hash\n\nImplications:\n- Local issue + external_ref addition = looks like 'new content'\n- May not match by content hash in some scenarios\n- Generally correct behavior, but subtle\n\nAction items:\n- Document in code comments\n- Add to ARCHITECTURE.md or similar\n- Add test demonstrating this behavior\n- Consider if this is desired long-term\n\nRelated: bd-1022\nFiles: internal/storage/sqlite/collision.go:158-160","status":"open","priority":4,"issue_type":"task","created_at":"2025-11-02T15:32:47.715458-08:00","updated_at":"2025-11-02T15:32:47.715458-08:00","source_repo":"."}
|
||||
{"id":"bd-9f4a","content_hash":"f7fc76124f76636ef40af114a47509885fa9e5af9d2fddaf8820a46542086e42","title":"Document external_ref in content hash behavior","description":"The content hash includes external_ref, which has implications that should be documented.\n\nCurrent behavior:\n- external_ref is included in content hash calculation (collision.go:158-160)\n- Changing external_ref changes content hash\n- This means: local issue → add external_ref → different hash\n\nImplications:\n- Local issue + external_ref addition = looks like 'new content'\n- May not match by content hash in some scenarios\n- Generally correct behavior, but subtle\n\nAction items:\n- Document in code comments\n- Add to ARCHITECTURE.md or similar\n- Add test demonstrating this behavior\n- Consider if this is desired long-term\n\nRelated: bd-1022\nFiles: internal/storage/sqlite/collision.go:158-160","status":"closed","priority":4,"issue_type":"task","created_at":"2025-11-02T15:32:47.715458-08:00","updated_at":"2025-11-08T02:24:24.685778-08:00","closed_at":"2025-11-08T02:20:01.004638-08:00","source_repo":"."}
|
||||
{"id":"bd-9li4","content_hash":"7ae7b885e82a2de333584c01f690dbc3ecb924603f18e316f5c91cc44e2256f8","title":"Create Docker image for Agent Mail","description":"Containerize Agent Mail server for easy deployment.\n\nAcceptance Criteria:\n- Dockerfile with Python 3.14\n- Health check endpoint\n- Volume mount for storage\n- Environment variable configuration\n- Multi-arch builds (amd64, arm64)\n\nFile: deployment/agent-mail/Dockerfile","status":"open","priority":3,"issue_type":"task","created_at":"2025-11-07T22:43:43.231964-08:00","updated_at":"2025-11-07T22:43:43.231964-08:00","source_repo":"."}
|
||||
{"id":"bd-9mnw","content_hash":"349a819f81aad88dffd0c5bd232d2d2e6a43710d1207aa200fb0e1555b10af3e","title":"Issue 1","description":"","status":"closed","priority":1,"issue_type":"task","created_at":"2025-11-07T19:07:16.329643-08:00","updated_at":"2025-11-08T00:20:30.589211-08:00","closed_at":"2025-11-08T00:20:30.589211-08:00","source_repo":"."}
|
||||
{"id":"bd-9msn","content_hash":"69ef2ebc5a847eb407c37e9039391d8ebc761a4cee3b60537de4f5a12011bec3","title":"Add monitoring and alerting","description":"Observability for production Agent Mail server.\n\nAcceptance Criteria:\n- Health check endpoint (/health)\n- Prometheus metrics export\n- Grafana dashboard\n- Alerts for server downtime\n- Alerts for high error rate\n- Log aggregation config\n\nFile: deployment/agent-mail/monitoring/","status":"open","priority":3,"issue_type":"task","created_at":"2025-11-07T22:43:43.354117-08:00","updated_at":"2025-11-07T22:43:43.354117-08:00","source_repo":".","dependencies":[{"issue_id":"bd-9msn","depends_on_id":"bd-z3s3","type":"blocks","created_at":"2025-11-07T23:04:28.050074-08:00","created_by":"daemon"}]}
|
||||
@@ -236,11 +237,11 @@
|
||||
{"id":"bd-d3f0","content_hash":"d759327f1a1e4817d3e8ec212fd6af2607d0bb5e654201a6fc3640ad0a3b18fd","title":"Add 'bd comment' as alias for 'bd comments add'","description":"The command 'bd comments add' is verbose and unintuitive. Add 'bd comment' as a shorter alias that works the same way.\n\n## Rationale\n- More natural: 'bd comment \u003cissue-id\u003e \u003ctext\u003e' reads better than 'bd comments add \u003cissue-id\u003e \u003ctext\u003e'\n- Matches user expectations: users naturally try 'bd comment' first\n- Follows convention: other commands like 'bd create', 'bd show', 'bd close' are verbs\n\n## Implementation\nCould be implemented as:\n1. A new command that wraps bd comments add\n2. An alias registered in cobra\n3. Keep 'bd comments add' for backwards compatibility\n\n## Examples\n```bash\nbd comment bd-1234 'This is a comment'\nbd comment bd-1234 'Multi-line comment' --body 'Additional details here'\n```","status":"closed","priority":2,"issue_type":"feature","created_at":"2025-11-02T17:13:18.82563-08:00","updated_at":"2025-11-03T22:20:30.223939-08:00","closed_at":"2025-11-03T22:20:30.223939-08:00","source_repo":"."}
|
||||
{"id":"bd-d4ec5a82","content_hash":"e0f9aa493571fdb0b5fd578993165042ad224bb2c00186564337732e6221d529","title":"Add MCP functions for repair commands","description":"Add repair commands to beads-mcp for agent access:\n- beads_resolve_conflicts()\n- beads_find_duplicates()\n- beads_detect_pollution()\n- beads_validate()\n\nFiles: integrations/beads-mcp/src/beads_mcp/server.py","status":"closed","priority":2,"issue_type":"task","created_at":"2025-10-28T14:48:29.071495-07:00","updated_at":"2025-11-06T19:36:13.972786-08:00","closed_at":"2025-11-06T19:27:19.170894-08:00","source_repo":"."}
|
||||
{"id":"bd-d68f","content_hash":"1c806ae9c39bb28faaa8730350ce6b20bc25821e33583f537db1567b183bce6d","title":"Add tests for Comments API (AddIssueComment, GetIssueComments)","description":"Comments API currently has 0% coverage. Need tests for:\n- AddIssueComment - adding comments to issues\n- GetIssueComments - retrieving comments\n- Comment ordering and pagination\n- Edge cases (non-existent issues, empty comments)","status":"closed","priority":3,"issue_type":"task","created_at":"2025-11-01T22:40:58.980688-07:00","updated_at":"2025-11-01T22:53:42.124391-07:00","closed_at":"2025-11-01T22:53:42.124391-07:00","source_repo":"."}
|
||||
{"id":"bd-d6aq","content_hash":"c513a7dc97f078c4785f828ec1f35a8f268265d8722c31603d169c9e80c9257b","title":"Test reservation expiration and renewal","description":"Verify TTL-based reservation expiration works correctly.\n\nAcceptance Criteria:\n- Reserve with short TTL (30s)\n- Verify other agents can't claim\n- Wait for expiration\n- Verify reservation auto-released\n- Other agent can now claim\n- Test renewal/heartbeat mechanism\n\nFile: tests/integration/test_reservation_ttl.py","notes":"Implemented comprehensive TTL/expiration test suite in tests/integration/test_reservation_ttl.py\n\nTest Coverage:\n✅ Short TTL reservations (30s) - verifies TTL is properly set\n✅ Reservation blocking - confirms agent2 cannot claim while agent1 holds reservation\n✅ Auto-release after expiration - validates expired reservations are auto-cleaned and become available\n✅ Renewal/heartbeat - tests that re-reserving extends expiration time\n\nAll 4 tests passing in 56.9s total (including 30s+ wait time for expiration tests).\n\nMock server implements full TTL management:\n- Reservation class with expiration tracking\n- Auto-cleanup of expired reservations on each request\n- Renewal support (same agent re-reserving)\n- 409 conflict for cross-agent reservation attempts","status":"closed","priority":2,"issue_type":"task","created_at":"2025-11-07T22:43:21.547821-08:00","updated_at":"2025-11-08T02:24:16.790407-08:00","closed_at":"2025-11-08T02:24:16.790407-08:00","source_repo":".","dependencies":[{"issue_id":"bd-d6aq","depends_on_id":"bd-m9th","type":"blocks","created_at":"2025-11-07T22:43:21.548731-08:00","created_by":"daemon"}]}
|
||||
{"id":"bd-d6aq","content_hash":"c513a7dc97f078c4785f828ec1f35a8f268265d8722c31603d169c9e80c9257b","title":"Test reservation expiration and renewal","description":"Verify TTL-based reservation expiration works correctly.\n\nAcceptance Criteria:\n- Reserve with short TTL (30s)\n- Verify other agents can't claim\n- Wait for expiration\n- Verify reservation auto-released\n- Other agent can now claim\n- Test renewal/heartbeat mechanism\n\nFile: tests/integration/test_reservation_ttl.py","notes":"Implemented comprehensive TTL/expiration test suite in tests/integration/test_reservation_ttl.py\n\nTest Coverage:\n✅ Short TTL reservations (30s) - verifies TTL is properly set\n✅ Reservation blocking - confirms agent2 cannot claim while agent1 holds reservation\n✅ Auto-release after expiration - validates expired reservations are auto-cleaned and become available\n✅ Renewal/heartbeat - tests that re-reserving extends expiration time\n\nAll 4 tests passing in 56.9s total (including 30s+ wait time for expiration tests).\n\nMock server implements full TTL management:\n- Reservation class with expiration tracking\n- Auto-cleanup of expired reservations on each request\n- Renewal support (same agent re-reserving)\n- 409 conflict for cross-agent reservation attempts","status":"closed","priority":2,"issue_type":"task","created_at":"2025-11-07T22:43:21.547821-08:00","updated_at":"2025-11-08T02:24:30.296982-08:00","closed_at":"2025-11-08T02:24:30.296982-08:00","source_repo":".","dependencies":[{"issue_id":"bd-d6aq","depends_on_id":"bd-m9th","type":"blocks","created_at":"2025-11-07T22:43:21.548731-08:00","created_by":"daemon"}]}
|
||||
{"id":"bd-d76d","content_hash":"b65da5fe9f89a98f1e6fad6ee32d463126ef72785fec4d6dfa5a4774c6a8a393","title":"Modify EnsureIDs to support parent resurrection","description":"Update internal/storage/sqlite/ids.go:189-202 to call TryResurrectParent before failing on missing parent. Add resurrection mode flag, log resurrected parents for transparency. Maintain backwards compatibility with strict validation mode.","status":"closed","priority":0,"issue_type":"task","created_at":"2025-11-04T12:31:59.659507-08:00","updated_at":"2025-11-05T00:08:42.814463-08:00","closed_at":"2025-11-05T00:08:42.814466-08:00","source_repo":"."}
|
||||
{"id":"bd-d7e88238","content_hash":"ff14f04a04bf89f52bda3d584933df6b09b554cce8665f47f429f1ac52dafb94","title":"Rapid 3","description":"","status":"closed","priority":3,"issue_type":"task","created_at":"2025-10-29T19:11:57.459655-07:00","updated_at":"2025-11-07T23:21:44.925275-08:00","closed_at":"2025-11-07T23:18:52.333825-08:00","source_repo":"."}
|
||||
{"id":"bd-d9e0","content_hash":"de4e01414f8863b63cb693a709048b85c3f4417f03e7d7b2528560076be0e1f7","title":"Extract validation functions to validators.go","description":"Move validatePriority, validateStatus, validateIssueType, validateTitle, validateEstimatedMinutes, validateFieldUpdate to validators.go","status":"closed","priority":1,"issue_type":"task","created_at":"2025-11-01T19:28:54.915909-07:00","updated_at":"2025-11-02T12:32:00.159298-08:00","closed_at":"2025-11-02T12:32:00.1593-08:00","source_repo":"."}
|
||||
{"id":"bd-df11","content_hash":"aff6233eae39a337d6a49328284a56d6e553a6d52dc13ac4ab7a05d6d2033ce1","title":"Add import metrics for external_ref matching statistics","description":"Add observability for external_ref matching behavior during imports to help debug and optimize import operations.\n\nMetrics to track:\n- Number of issues matched by external_ref\n- Number of issues matched by ID\n- Number of issues matched by content hash\n- Number of external_ref updates vs creates\n- Average import time with vs without external_ref\n\nOutput format:\n- Add to ImportResult struct\n- Include in import command output\n- Consider structured logging\n\nUse cases:\n- Debugging slow imports\n- Understanding match distribution\n- Optimizing import performance\n\nRelated: bd-1022","status":"open","priority":4,"issue_type":"chore","created_at":"2025-11-02T15:32:46.157899-08:00","updated_at":"2025-11-02T15:32:46.157899-08:00","source_repo":"."}
|
||||
{"id":"bd-df11","content_hash":"9d688c3fe5f4994ab29ed22c8c4ae467f2069c4cbb676a2168303b2ffcba48c4","title":"Add import metrics for external_ref matching statistics","description":"Add observability for external_ref matching behavior during imports to help debug and optimize import operations.\n\nMetrics to track:\n- Number of issues matched by external_ref\n- Number of issues matched by ID\n- Number of issues matched by content hash\n- Number of external_ref updates vs creates\n- Average import time with vs without external_ref\n\nOutput format:\n- Add to ImportResult struct\n- Include in import command output\n- Consider structured logging\n\nUse cases:\n- Debugging slow imports\n- Understanding match distribution\n- Optimizing import performance\n\nRelated: bd-1022","status":"closed","priority":4,"issue_type":"chore","created_at":"2025-11-02T15:32:46.157899-08:00","updated_at":"2025-11-08T02:24:24.686136-08:00","closed_at":"2025-11-08T02:20:01.01371-08:00","source_repo":"."}
|
||||
{"id":"bd-dxdn","content_hash":"5897df0b25f78b5f061302cde79a16aaf71b317eb8f931f461b187de5ff0c2f4","title":"bd ready taking 5 seconds with 132 issues (89 closed)","description":"User reports bd ready is annoyingly slow on M2 Mac - 5 seconds for 132 issues (89 closed). Started noticing after hash-based IDs update. Need to investigate performance regression. Reported in GH #243.","notes":"Initial analysis:\n- bd ready runs in 40ms on my machine with similar DB size\n- Recursive CTE looks properly indexed with idx_dependencies_depends_on_type\n- Need EXPLAIN QUERY PLAN from user's machine to diagnose\n- Possible missing index: (issue_id, type) for reverse lookup in recursive join\n- Could also be SQLite version difference or WAL mode not enabled\n\nNeed user to run:\n1. bd --version (check SQLite version)\n2. EXPLAIN QUERY PLAN on the ready query\n3. Confirm daemon vs direct mode\n4. Check if WAL mode is enabled","status":"in_progress","priority":1,"issue_type":"bug","created_at":"2025-11-07T00:26:30.359512-08:00","updated_at":"2025-11-07T00:29:24.711948-08:00","source_repo":"."}
|
||||
{"id":"bd-e044","content_hash":"8393c18d7f6edfed3d3e360a32a3075a9e0d9caa6f02d704774482aa1d9b0a7f","title":"Add mermaid output format for bd dep tree","description":"Add visual dependency graph output using Mermaid format for better visualization of issue relationships.\n\nExample usage:\n bd dep tree --format mermaid \u003cissue-id\u003e\n bd dep tree --format mermaid bd-42 \u003e graph.md\n\nThis would output Mermaid syntax that can be rendered in GitHub, documentation sites, or Mermaid live editor.\n\nImplementation notes:\n- Add --format flag to dep tree command\n- Support 'text' (default) and 'mermaid' formats\n- Mermaid graph should show issue IDs, titles, and dependency types\n- Consider using flowchart LR or graph TD syntax","status":"closed","priority":1,"issue_type":"feature","created_at":"2025-11-03T18:10:18.978383-08:00","updated_at":"2025-11-03T20:55:06.696363-08:00","closed_at":"2025-11-03T20:55:06.69637-08:00","source_repo":"."}
|
||||
{"id":"bd-e05d","content_hash":"c2f4d60f5bd679d9bf609c35efc9c15e8dd52130fb9b68eacfe47bdda910ecd7","title":"Investigate and optimize test suite performance","description":"Test suite is taking very long to run (\u003e45s for cmd/bd tests, full suite timing unknown but was cancelled).\n\nThis impacts development velocity and CI/CD performance.\n\nInvestigation needed:\n- Profile which tests are slowest\n- Identify bottlenecks (disk I/O, network, excessive setup/teardown?)\n- Consider parallelization opportunities\n- Look for redundant test cases\n- Check if integration tests can be optimized","status":"closed","priority":2,"issue_type":"task","created_at":"2025-11-02T15:37:44.529955-08:00","updated_at":"2025-11-02T16:35:38.093133-08:00","closed_at":"2025-11-02T16:35:38.093137-08:00","source_repo":"."}
|
||||
@@ -252,6 +253,7 @@
|
||||
{"id":"bd-e98221b3","content_hash":"4a4f6912d8de8bf0f9ae867be1a25d83c5a6991383e3aa192537747500bebc6a","title":"Update AGENTS.md and README.md with \"bd daemons\" documentation","description":"Document the new \"bd daemons\" command and all subcommands in AGENTS.md and README.md. Include examples and troubleshooting guidance.","status":"closed","priority":2,"issue_type":"task","created_at":"2025-10-26T19:41:11.099254-07:00","updated_at":"2025-11-06T20:06:49.219318-08:00","closed_at":"2025-11-06T19:51:57.75321-08:00","source_repo":"."}
|
||||
{"id":"bd-eb3c","content_hash":"6c7a46d58e565a27e3a7a5375bb1ad8345094bdef422dce52239ee4b7e559143","title":"UX nightmare: multiple ways daemon can fail with misleading messages","description":"","status":"closed","priority":0,"issue_type":"epic","created_at":"2025-10-31T21:08:09.090553-07:00","updated_at":"2025-11-01T20:27:42.79962-07:00","closed_at":"2025-11-01T20:27:42.79962-07:00","source_repo":"."}
|
||||
{"id":"bd-eimz","content_hash":"c2c2a4b9a7c9fe281e8b47abf088ae0fc5199177e2c3a5dcde62cd890985f796","title":"Add Agent Mail to QUICKSTART.md","description":"Mention Agent Mail as optional advanced feature in quickstart guide.\n\nFile: docs/QUICKSTART.md","status":"closed","priority":2,"issue_type":"task","created_at":"2025-11-07T22:42:51.357009-08:00","updated_at":"2025-11-08T01:07:11.598558-08:00","closed_at":"2025-11-08T01:07:11.598558-08:00","source_repo":".","dependencies":[{"issue_id":"bd-eimz","depends_on_id":"bd-xzrv","type":"blocks","created_at":"2025-11-07T23:04:09.841956-08:00","created_by":"daemon"}]}
|
||||
{"id":"bd-eiz9","content_hash":"5a5b4d50550bc22547503d8d59874b6ce7e35483c85fe677f538333635a7f35f","title":"Help agents understand version changes with bd info --whats-new","description":"**Problem** (from GH Discussion #239 by @maphew):\nWeekly major versions mean agents need to adapt workflows, but currently there's no efficient way to communicate \"what changed that affects you.\"\n\n**Proposed solutions:**\n\n1. **bd info --whats-new** - Show agent-actionable changes since last version\n ```\n Since v0.20.1:\n • Hash IDs eliminate collisions - remove ID coordination workarounds\n • Event-driven daemon (opt-in) - add BEADS_DAEMON_MODE=events\n • Merge driver auto-configured - conflicts rarer\n ```\n\n2. **Version-aware bd onboard** - Detect version changes and show diff of agent-relevant changes\n\n3. **AGENTS.md top section** - \"🆕 Recent Changes (Last 3 Versions)\" with workflow impacts\n\n**Why agents need this:**\n- Raw CHANGELOG is token-heavy and buried in release details\n- Full bd onboard re-run wasteful if only 2-3 things changed\n- Currently requires user to manually explain updates\n\n**Related:** https://github.com/steveyegge/beads/discussions/239","status":"open","priority":2,"issue_type":"feature","created_at":"2025-11-06T21:03:30.057576-08:00","updated_at":"2025-11-06T21:03:30.057576-08:00","source_repo":"."}
|
||||
{"id":"bd-epvx","content_hash":"37513b74fb23b40be54c502167c12fe287025afd38f2c040061bdb678bac4006","title":"Create Go adapter library (optional)","description":"For agents written in Go, provide native adapter library instead of shelling out to curl.\n\nAcceptance Criteria:\n- agentmail.Client struct\n- HTTP client with timeout/retry logic\n- Same API as Python adapter\n- Example usage in examples/go-agent/\n- Unit tests\n\nFile: pkg/agentmail/client.go\n\nNote: Lower priority - can shell out to curl initially","status":"open","priority":3,"issue_type":"feature","created_at":"2025-11-07T22:42:28.781577-08:00","updated_at":"2025-11-07T22:42:28.781577-08:00","source_repo":".","dependencies":[{"issue_id":"bd-epvx","depends_on_id":"bd-m9th","type":"blocks","created_at":"2025-11-07T23:04:01.47471-08:00","created_by":"daemon"}]}
|
||||
{"id":"bd-eqjc","content_hash":"8acc3d91ca9f9bef36d19358cb7f24eac247583a0e6701036aaff92607474c21","title":"bd init creates nested .beads directories","description":"bd init sometimes creates .beads/.beads/ nested directories, which should never happen. This occurs fairly often and can cause confusion about which .beads directory is active. Need to add validation to detect if already inside a .beads directory and either error or use the parent .beads location.","status":"closed","priority":1,"issue_type":"bug","created_at":"2025-11-06T22:21:22.948727-08:00","updated_at":"2025-11-06T22:22:41.04958-08:00","closed_at":"2025-11-06T22:22:41.04958-08:00","source_repo":"."}
|
||||
{"id":"bd-expt","content_hash":"6e14db64fb24882e4cf544ec24eaa994aba970fd0ae31c72dda2d8ea88560753","title":"RPC fast-fail: stat socket before dial, cap timeouts to 200ms","description":"Eliminate 5s delay when daemon socket is missing by:\n1. Add os.Stat(socketPath) check before dialing in TryConnect\n2. Return (nil, nil) immediately if socket doesn't exist\n3. Set default dial timeout to 200ms in TryConnect\n4. Keep TryConnectWithTimeout for explicit health/status checks (1-2s)\n\nThis prevents clients from waiting through full timeout when no daemon is running.","status":"closed","priority":0,"issue_type":"task","created_at":"2025-11-07T16:42:12.688526-08:00","updated_at":"2025-11-07T22:07:17.345918-08:00","closed_at":"2025-11-07T21:04:21.671436-08:00","source_repo":".","dependencies":[{"issue_id":"bd-expt","depends_on_id":"bd-ndyz","type":"discovered-from","created_at":"2025-11-07T16:42:12.689284-08:00","created_by":"daemon"}]}
|
||||
@@ -326,7 +328,7 @@
|
||||
{"id":"bd-r1pf","content_hash":"aa28faaa7538a6f1691a5dbc3f5136c2f446c7a16836ee0b572d8fe3e86adcff","title":"Test label","description":"","status":"closed","priority":3,"issue_type":"task","created_at":"2025-11-06T20:16:20.609492-08:00","updated_at":"2025-11-06T20:16:34.973855-08:00","closed_at":"2025-11-06T20:16:34.973855-08:00","source_repo":".","labels":[""," urgent "," bug "," critical "]}
|
||||
{"id":"bd-r79z","content_hash":"2972e60ecf73426a5349415689e57a2404800ee61468ed6d295ab7d4b2c5c7e0","title":"GH#245: Windows MCP subprocess timeout for git rev-parse","description":"User reports git detection timing out on Windows in MCP server, but CLI works fine.\n\nPath: C:\\Users\\chris\\Documents\\DEV_R\\quarto-cli\nError: Git repository detection timed out after 5s\nWorks fine in CLI: `git rev-parse --show-toplevel` succeeds\n\nHypothesis: subprocess.run() with asyncio.to_thread() may have Windows-specific issues or the MCP runtime environment may not have proper PATH/git access.\n\nPotential fixes:\n1. Add subprocess shell=True on Windows\n2. Increase timeout further for Windows\n3. Add better error logging to capture subprocess stderr\n4. Skip git resolution entirely on timeout and just use provided path","status":"closed","priority":1,"issue_type":"bug","created_at":"2025-11-07T16:31:37.531223-08:00","updated_at":"2025-11-07T19:00:44.358543-08:00","closed_at":"2025-11-07T19:00:44.358543-08:00","source_repo":"."}
|
||||
{"id":"bd-rb75","content_hash":"e91418eb7abda986ddb57feaee1b91867043de8c0883d71c21dc1bf4047f5824","title":"Clean up merge conflict artifacts in .beads directory","description":"After resolving merge conflicts in .beads/beads.jsonl, leftover artifacts remain as untracked files:\n- .beads/beads.base.jsonl\n- .beads/beads.left.jsonl\n\nThese appear to be temporary files created during merge conflict resolution.\n\nOptions to fix:\n1. Add these patterns to .beads/.gitignore automatically\n2. Clean up these files after successful merge resolution\n3. Document that users should delete them manually\n4. Add a check in 'bd sync' or 'bd doctor' to detect and remove stale merge artifacts\n\nPreferred solution: Add *.base.jsonl and *.left.jsonl patterns to .beads/.gitignore during 'bd init', and optionally clean them up automatically after successful import.","status":"closed","priority":2,"issue_type":"bug","created_at":"2025-11-06T19:09:16.114274-08:00","updated_at":"2025-11-06T19:13:44.630402-08:00","closed_at":"2025-11-06T19:13:44.630402-08:00","source_repo":"."}
|
||||
{"id":"bd-rbxi","content_hash":"a84e265e04e332ead9ba8a65e90316fa1b44fe024001bafad4b3de47e22a73bc","title":"bd-hv01: Deletion tracking production readiness","description":"Epic to track all improvements and fixes needed to make the deletion tracking implementation (bd-hv01) production-ready.\n\nThe core 3-way merge algorithm is sound, but there are critical issues around atomicity, error handling, and edge cases that need to be addressed before this can be safely used in production.\n\nCritical path (P1):\n- Non-atomic snapshot operations\n- Brittle JSON string comparison\n- Silent partial deletion failures\n- Race conditions in concurrent scenarios\n\nFollow-up work (P2-P3):\n- Test coverage for edge cases and multi-repo mode\n- Performance optimizations\n- Code refactoring and observability\n\nRelated commit: 708a81c","status":"in_progress","priority":1,"issue_type":"epic","created_at":"2025-11-06T18:18:24.315646-08:00","updated_at":"2025-11-06T18:39:15.739497-08:00","source_repo":"."}
|
||||
{"id":"bd-rbxi","content_hash":"c7e9f5293d411fb741c053d548479c5c387081f859e44e57ed21f855f91cfb26","title":"bd-hv01: Deletion tracking production readiness","description":"Epic to track all improvements and fixes needed to make the deletion tracking implementation (bd-hv01) production-ready.\n\nThe core 3-way merge algorithm is sound, but there are critical issues around atomicity, error handling, and edge cases that need to be addressed before this can be safely used in production.\n\nCritical path (P1):\n- Non-atomic snapshot operations\n- Brittle JSON string comparison\n- Silent partial deletion failures\n- Race conditions in concurrent scenarios\n\nFollow-up work (P2-P3):\n- Test coverage for edge cases and multi-repo mode\n- Performance optimizations\n- Code refactoring and observability\n\nRelated commit: 708a81c","status":"closed","priority":1,"issue_type":"epic","created_at":"2025-11-06T18:18:24.315646-08:00","updated_at":"2025-11-08T02:24:24.686475-08:00","closed_at":"2025-11-08T02:19:19.780741-08:00","source_repo":"."}
|
||||
{"id":"bd-ry1u","content_hash":"b566c54bd06360dbc38709cfb93040c8da04abf0309269bd8b4ee3710a9a4099","title":"Publish official devcontainer configuration","description":"","design":"Problem: GH issue #229 shows git hooks aren't available in devcontainers because bd CLI isn't installed. Solution: Provide official .devcontainer/devcontainer.json that installs Go, builds bd from source, runs bd init --quiet, and installs hooks automatically. Benefits: New contributors start immediately, AI assistants work out-of-box, no manual setup. Implementation: Create .devcontainer/devcontainer.json with Go feature, postCreateCommand to build bd and install hooks, environment variables. Acceptance: Container builds with bd installed, hooks functional, documentation updated, tested with Codespaces and VSCode.","notes":"Devcontainer configuration implemented. Manual testing required in actual devcontainer environment (Codespaces or VSCode Remote Containers). All code changes complete, tests pass, linting clean.","status":"closed","priority":2,"issue_type":"epic","created_at":"2025-11-05T15:02:21.783666-08:00","updated_at":"2025-11-05T17:46:42.70998-08:00","closed_at":"2025-11-05T17:46:42.70998-08:00","source_repo":"."}
|
||||
{"id":"bd-s02","content_hash":"911d456e4dabae028dd615b643c99058ef12e55ea523cb81cc933783c7b13546","title":"Manual task","description":"","status":"open","priority":2,"issue_type":"task","created_at":"2025-11-03T20:15:10.022202-08:00","updated_at":"2025-11-03T20:15:10.022202-08:00","source_repo":"."}
|
||||
{"id":"bd-sc57","content_hash":"c82176a5a4e821ce882030bbe3be411c528bc0fe659e1879c0be72758b55fb03","title":"Production Readiness (Optional)","description":"Enable multi-machine deployments with containerization and monitoring.","status":"closed","priority":3,"issue_type":"epic","created_at":"2025-11-07T22:43:31.527617-08:00","updated_at":"2025-11-08T01:06:12.904671-08:00","closed_at":"2025-11-08T01:06:12.904671-08:00","source_repo":".","dependencies":[{"issue_id":"bd-sc57","depends_on_id":"bd-wfmw","type":"blocks","created_at":"2025-11-07T22:43:31.528743-08:00","created_by":"daemon"},{"issue_id":"bd-sc57","depends_on_id":"bd-pdjb","type":"blocks","created_at":"2025-11-07T22:43:31.529193-08:00","created_by":"daemon"}]}
|
||||
@@ -339,7 +341,7 @@
|
||||
{"id":"bd-u4f5","content_hash":"89c6ae8745a842541c9a2025222c2c2e67e17b4fc33e0e56e58a37f0c5935939","title":"bd import silently succeeds when database matches working tree but not git HEAD","description":"**Critical**: bd import reports '0 created, 0 updated' when database matches working tree JSONL, even when working tree is ahead of git HEAD. This gives false confidence that everything is synced with the source of truth.\n\n## Reproduction\n\n1. Start with database synced to working tree .beads/issues.jsonl (376 issues)\n2. Git HEAD has older version of .beads/issues.jsonl (354 issues)\n3. Run: bd import .beads/issues.jsonl\n4. Output: 'Import complete: 0 created, 0 updated'\n\n## Problem\n\nUser expects 'bd import' after 'git pull' to sync database with committed state, but:\n- Command silently succeeds because DB already matches working tree\n- No warning that working tree has uncommitted changes\n- User falsely believes everything is synced with git\n- Violates 'JSONL in git is source of truth' principle\n\n## Expected Behavior\n\nWhen .beads/issues.jsonl differs from git HEAD, bd import should:\n1. Detect uncommitted changes: git diff --quiet HEAD .beads/issues.jsonl\n2. Warn user: 'Warning: .beads/issues.jsonl has uncommitted changes (376 lines vs 354 in HEAD)'\n3. Clarify status: 'Import complete: 0 created, 0 updated (already synced with working tree)'\n4. Recommend: 'Run git diff .beads/issues.jsonl to review uncommitted work'\n\n## Impact\n\n- Users can't trust 'bd import' status messages\n- Silent data loss risk if user assumes synced and runs git checkout\n- Breaks mental model of 'JSONL in git = source of truth'\n- Critical for VC's landing-the-plane workflow","acceptance_criteria":"1. bd import detects when working tree differs from git HEAD\n2. Warning emitted if JSONL has uncommitted changes \n3. Status message clarifies 'synced with working tree' vs 'synced with git'\n4. Optional flag to suppress warning (e.g., --working-tree mode)\n5. Documentation updated to explain import behavior with uncommitted changes\n6. Test case: import with dirty working tree shows warning","status":"closed","priority":0,"issue_type":"bug","created_at":"2025-11-07T23:51:28.536822-08:00","updated_at":"2025-11-07T23:58:34.482313-08:00","closed_at":"2025-11-07T23:58:34.482313-08:00","source_repo":".","labels":["data-integrity"]}
|
||||
{"id":"bd-u8j","content_hash":"962eba2362141bb73be8375626997f1d6ab91b76d691f11f23593a964b036995","title":"Clarify exclusive lock protocol compatibility with multi-repo","description":"The contributor-workflow-analysis.md proposes per-repo file locking (Decision #7) using flock on JSONL files. However, VC (a downstream library consumer) uses an exclusive lock protocol (vc-195, requires Beads v0.17.3+) that allows bd daemon and VC executor to coexist.\n\nNeed to clarify:\n- Does the proposed per-repo file locking work with VC's existing exclusive lock protocol?\n- Do library consumers like VC need to adapt their locking logic?\n- Can multiple repos be locked atomically for cross-repo operations?\n\nContext: contributor-workflow-analysis.md lines 662-681","acceptance_criteria":"- Documentation explicitly states compatibility or incompatibility with existing lock protocols\n- If incompatible, migration path is documented for library consumers\n- If compatible, example showing coexistence is provided","status":"closed","priority":2,"issue_type":"task","created_at":"2025-11-03T20:24:08.257493-08:00","updated_at":"2025-11-05T14:15:01.506885-08:00","closed_at":"2025-11-05T14:15:01.506885-08:00","source_repo":"."}
|
||||
{"id":"bd-uiae","content_hash":"5c184901daaa674a0f1224a29ab789019b53da6d5b5b4d6ac943e7d5d4846b3e","title":"Update documentation for beads-merge integration","description":"Document the integrated merge functionality.\n\n**Updates needed**:\n- AGENTS.md: Replace \"use external beads-merge\" with \"bd merge\"\n- README.md: Add git merge driver section\n- TROUBLESHOOTING.md: Update merge conflict resolution\n- ADVANCED.md: Document 3-way merge algorithm\n- Create CREDITS.md or ATTRIBUTION.md for @neongreen\n\n**Highlight**: Deletion sync fix (bd-hv01)","status":"closed","priority":2,"issue_type":"task","created_at":"2025-11-05T18:42:20.488998-08:00","updated_at":"2025-11-06T18:19:16.234758-08:00","closed_at":"2025-11-06T15:40:27.830475-08:00","source_repo":".","dependencies":[{"issue_id":"bd-uiae","depends_on_id":"bd-qqvw","type":"parent-child","created_at":"2025-11-05T18:42:28.752447-08:00","created_by":"daemon"}]}
|
||||
{"id":"bd-urob","content_hash":"7046a4e101c3ff6030c236c13a7b6bb5d64c54170e8d792768a9f7a8fd5781bf","title":"bd-hv01: Refactor snapshot management into dedicated module","description":"Problem: Snapshot logic is scattered across deletion_tracking.go. Would benefit from abstraction with SnapshotManager type.\n\nBenefits: cleaner separation of concerns, easier to test in isolation, better encapsulation, could add observability/metrics.\n\nSuggested improvements: add magic constants, track merge statistics, better error messages.\n\nFiles: cmd/bd/deletion_tracking.go (refactor into new snapshot_manager.go)","status":"open","priority":3,"issue_type":"chore","created_at":"2025-11-06T18:16:27.943666-08:00","updated_at":"2025-11-06T18:16:27.943666-08:00","source_repo":".","dependencies":[{"issue_id":"bd-urob","depends_on_id":"bd-rbxi","type":"parent-child","created_at":"2025-11-06T18:19:15.192447-08:00","created_by":"daemon"}]}
|
||||
{"id":"bd-urob","content_hash":"fc0e79260f5f6860fa8884859c4b33b18f9cc2dad361c1c1abb9bdeb412479b5","title":"bd-hv01: Refactor snapshot management into dedicated module","description":"Problem: Snapshot logic is scattered across deletion_tracking.go. Would benefit from abstraction with SnapshotManager type.\n\nBenefits: cleaner separation of concerns, easier to test in isolation, better encapsulation, could add observability/metrics.\n\nSuggested improvements: add magic constants, track merge statistics, better error messages.\n\nFiles: cmd/bd/deletion_tracking.go (refactor into new snapshot_manager.go)","status":"closed","priority":3,"issue_type":"chore","created_at":"2025-11-06T18:16:27.943666-08:00","updated_at":"2025-11-08T02:24:24.686744-08:00","closed_at":"2025-11-08T02:19:14.152412-08:00","source_repo":".","dependencies":[{"issue_id":"bd-urob","depends_on_id":"bd-rbxi","type":"parent-child","created_at":"2025-11-06T18:19:15.192447-08:00","created_by":"daemon"}]}
|
||||
{"id":"bd-vcg5","content_hash":"82933ce7e0add2ee5b5830b343785c3585151453c5c06243af2b1f2b934e72b2","title":"Daemon crash recovery: panic handler + socket cleanup","description":"Improve daemon cleanup on unexpected exit:\n1. Add top-level recover() in runDaemonLoop to capture panics\n2. Write daemon-error file with stack trace on panic\n3. Prefer return over os.Exit where possible (so defers run)\n4. In stopDaemon forced-kill path, also remove stale socket if present\n\nThis ensures better diagnostics and cleaner state after crashes.","status":"closed","priority":2,"issue_type":"task","created_at":"2025-11-07T16:42:12.733219-08:00","updated_at":"2025-11-07T22:07:17.347728-08:00","closed_at":"2025-11-07T21:17:15.94117-08:00","source_repo":".","dependencies":[{"issue_id":"bd-vcg5","depends_on_id":"bd-ndyz","type":"discovered-from","created_at":"2025-11-07T16:42:12.733889-08:00","created_by":"daemon"}]}
|
||||
{"id":"bd-vxdr","content_hash":"d188358987c7a7d444f9144a4a6cc5164eccd35b16325edba51dad104ab2a7f2","title":"Investigate database pollution - issue count anomalies","description":"Multiple repos showing inflated issue counts suggesting cross-repo pollution:\n- ~/src/dave/beads: 895 issues (675 open) - clearly polluted\n- ~/src/stevey/src/beads: 280 issues (expected ~209-220) - possibly polluted\n\nNeed to investigate:\n1. Source of pollution (multi-repo sync issues?)\n2. How many duplicate/foreign issues exist\n3. Whether recent sync operations caused cross-contamination\n4. How to clean up and prevent future pollution","notes":"Investigation findings:\n\n**Root cause identified:**\n- NOT cross-repo contamination\n- NOT automated test leakage (tests properly use t.TempDir())\n- Manual testing during template feature development (Nov 2-4)\n- Commit ba325a2: \"test issues were accidentally committed during template feature development\"\n\n**Database growth timeline:**\n- Nov 3: 19 issues (baseline)\n- Nov 2-5: +244 issues (massive development spike)\n- Nov 6-7: +40 issues (continued growth)\n- Current: 291 issues → 270 after cleanup\n\n**Test pollution breakdown:**\n- 21 issues matching \"Test \" prefix pattern\n- Most created Nov 2-5 during feature development\n- Pollution from manual `./bd create \"Test issue\"` commands in production workspace\n- All automated tests properly isolated with t.TempDir()\n\n**Cleanup completed:**\n- Ran scripts/cleanup-test-pollution.sh successfully\n- Removed 21 test issues\n- Database reduced from 291 → 270 issues (7.2% cleanup)\n- JSONL synced to git\n\n**Prevention strategy:**\n- Filed follow-up issue for prevention mechanisms\n- Script can be deleted once prevention is in place\n- Tests are already properly isolated - no code changes needed there","status":"closed","priority":0,"issue_type":"bug","created_at":"2025-11-06T22:34:40.137483-08:00","updated_at":"2025-11-07T16:07:28.274136-08:00","closed_at":"2025-11-07T16:04:02.199807-08:00","source_repo":"."}
|
||||
{"id":"bd-we4p","content_hash":"cd294027dd3105b1f32e306c52b8273a5720b749d1ac2cbd5e2fb36abffc0582","title":"Cache getMultiRepoJSONLPaths() result during sync to avoid redundant calls","description":"From bd-xo6b code review: getMultiRepoJSONLPaths() is called 3x per sync cycle.\n\n**Current behavior:**\ndaemon_sync.go calls getMultiRepoJSONLPaths() three times per sync:\n- Line 505: Snapshot capture before pull\n- Line 575: Merge/prune after pull\n- Line 613: Base snapshot update after import\n\n**Cost per call:**\n- Config lookup (likely cached, but still overhead)\n- Path construction: O(N) where N = number of repos\n- String allocations: (N + 1) × filepath.Join() calls\n\n**Total per sync:** 3N path constructions + 3 config lookups + 3 slice allocations\n\n**Impact:**\n- For N=3 repos: Negligible (\u003c 1ms)\n- For N=10 repos: Still minimal\n- For N=100+ repos: Wasteful\n\n**Solution:**\nCall once at sync start, reuse result:\n\n```go\nfunc createSyncFunc(...) func() {\n return func() {\n // ... existing setup ...\n \n // Call once at start\n multiRepoPaths := getMultiRepoJSONLPaths()\n \n // Snapshot capture\n if multiRepoPaths != nil {\n for _, path := range multiRepoPaths {\n if err := captureLeftSnapshot(path); err != nil { ... }\n }\n }\n \n // ... later ...\n \n // Merge/prune - reuse same paths\n if multiRepoPaths != nil {\n for _, path := range multiRepoPaths { ... }\n }\n \n // ... later ...\n \n // Base snapshot update - reuse same paths\n if multiRepoPaths != nil {\n for _, path := range multiRepoPaths { ... }\n }\n }\n}\n```\n\n**Files:**\n- cmd/bd/daemon_sync.go:449-636 (createSyncFunc)\n\n**Note:** This is a performance optimization, not a correctness fix. Low priority unless multi-repo usage scales significantly.","status":"closed","priority":2,"issue_type":"chore","created_at":"2025-11-06T19:31:32.128674-08:00","updated_at":"2025-11-06T19:40:50.871176-08:00","closed_at":"2025-11-06T19:40:50.871176-08:00","source_repo":".","dependencies":[{"issue_id":"bd-we4p","depends_on_id":"bd-xo6b","type":"discovered-from","created_at":"2025-11-06T19:32:12.39754-08:00","created_by":"daemon"}]}
|
||||
|
||||
@@ -1,249 +1,53 @@
|
||||
package main
|
||||
|
||||
import (
|
||||
"bufio"
|
||||
"context"
|
||||
"encoding/json"
|
||||
"fmt"
|
||||
"io"
|
||||
"os"
|
||||
"os/exec"
|
||||
"path/filepath"
|
||||
"reflect"
|
||||
"strings"
|
||||
"time"
|
||||
|
||||
"github.com/steveyegge/beads/internal/config"
|
||||
"github.com/steveyegge/beads/internal/merge"
|
||||
"github.com/steveyegge/beads/internal/storage"
|
||||
)
|
||||
|
||||
// snapshotMetadata contains versioning info for snapshot files
|
||||
type snapshotMetadata struct {
|
||||
Version string `json:"version"` // bd version that created this snapshot
|
||||
Timestamp time.Time `json:"timestamp"` // When snapshot was created
|
||||
CommitSHA string `json:"commit"` // Git commit SHA at snapshot time
|
||||
}
|
||||
|
||||
const (
|
||||
// maxSnapshotAge is the maximum allowed age for a snapshot file (1 hour)
|
||||
maxSnapshotAge = 1 * time.Hour
|
||||
)
|
||||
|
||||
// jsonEquals compares two JSON strings semantically, handling field reordering
|
||||
func jsonEquals(a, b string) bool {
|
||||
var objA, objB map[string]interface{}
|
||||
if err := json.Unmarshal([]byte(a), &objA); err != nil {
|
||||
return false
|
||||
}
|
||||
if err := json.Unmarshal([]byte(b), &objB); err != nil {
|
||||
return false
|
||||
}
|
||||
return reflect.DeepEqual(objA, objB)
|
||||
}
|
||||
|
||||
// getSnapshotPaths returns paths for base and left snapshot files
|
||||
func getSnapshotPaths(jsonlPath string) (basePath, leftPath string) {
|
||||
dir := filepath.Dir(jsonlPath)
|
||||
basePath = filepath.Join(dir, "beads.base.jsonl")
|
||||
leftPath = filepath.Join(dir, "beads.left.jsonl")
|
||||
return
|
||||
}
|
||||
|
||||
// getSnapshotMetadataPaths returns paths for metadata files
|
||||
func getSnapshotMetadataPaths(jsonlPath string) (baseMeta, leftMeta string) {
|
||||
dir := filepath.Dir(jsonlPath)
|
||||
baseMeta = filepath.Join(dir, "beads.base.meta.json")
|
||||
leftMeta = filepath.Join(dir, "beads.left.meta.json")
|
||||
return
|
||||
}
|
||||
|
||||
// getCurrentCommitSHA returns the current git commit SHA, or empty string if not in a git repo
|
||||
func getCurrentCommitSHA() string {
|
||||
cmd := exec.Command("git", "rev-parse", "--short", "HEAD")
|
||||
output, err := cmd.Output()
|
||||
if err != nil {
|
||||
return ""
|
||||
}
|
||||
return strings.TrimSpace(string(output))
|
||||
}
|
||||
|
||||
// createSnapshotMetadata creates metadata for the current snapshot
|
||||
func createSnapshotMetadata() snapshotMetadata {
|
||||
return snapshotMetadata{
|
||||
Version: getVersion(),
|
||||
Timestamp: time.Now(),
|
||||
CommitSHA: getCurrentCommitSHA(),
|
||||
}
|
||||
}
|
||||
|
||||
// getVersion returns the current bd version
|
||||
func getVersion() string {
|
||||
return Version
|
||||
}
|
||||
|
||||
// writeSnapshotMetadata writes metadata to a file
|
||||
func writeSnapshotMetadata(path string, meta snapshotMetadata) error {
|
||||
data, err := json.Marshal(meta)
|
||||
if err != nil {
|
||||
return fmt.Errorf("failed to marshal metadata: %w", err)
|
||||
}
|
||||
|
||||
// Use process-specific temp file for atomic write
|
||||
tempPath := fmt.Sprintf("%s.%d.tmp", path, os.Getpid())
|
||||
if err := os.WriteFile(tempPath, data, 0644); err != nil {
|
||||
return fmt.Errorf("failed to write metadata temp file: %w", err)
|
||||
}
|
||||
|
||||
// Atomic rename
|
||||
return os.Rename(tempPath, path)
|
||||
}
|
||||
|
||||
// readSnapshotMetadata reads metadata from a file
|
||||
func readSnapshotMetadata(path string) (*snapshotMetadata, error) {
|
||||
data, err := os.ReadFile(path)
|
||||
if err != nil {
|
||||
if os.IsNotExist(err) {
|
||||
return nil, nil // No metadata file exists (backward compatibility)
|
||||
}
|
||||
return nil, fmt.Errorf("failed to read metadata: %w", err)
|
||||
}
|
||||
|
||||
var meta snapshotMetadata
|
||||
if err := json.Unmarshal(data, &meta); err != nil {
|
||||
return nil, fmt.Errorf("failed to parse metadata: %w", err)
|
||||
}
|
||||
|
||||
return &meta, nil
|
||||
}
|
||||
|
||||
// validateSnapshotMetadata validates that snapshot metadata is recent and compatible
|
||||
func validateSnapshotMetadata(meta *snapshotMetadata, currentCommit string) error {
|
||||
if meta == nil {
|
||||
// No metadata file - likely old snapshot format, consider it stale
|
||||
return fmt.Errorf("snapshot has no metadata (stale format)")
|
||||
}
|
||||
|
||||
// Check age
|
||||
age := time.Since(meta.Timestamp)
|
||||
if age > maxSnapshotAge {
|
||||
return fmt.Errorf("snapshot is too old (age: %v, max: %v)", age.Round(time.Second), maxSnapshotAge)
|
||||
}
|
||||
|
||||
// Check version compatibility (major.minor must match)
|
||||
currentVersion := getVersion()
|
||||
if !isVersionCompatible(meta.Version, currentVersion) {
|
||||
return fmt.Errorf("snapshot version %s incompatible with current version %s", meta.Version, currentVersion)
|
||||
}
|
||||
|
||||
// Check commit SHA if we're in a git repo
|
||||
if currentCommit != "" && meta.CommitSHA != "" && meta.CommitSHA != currentCommit {
|
||||
return fmt.Errorf("snapshot from different commit (snapshot: %s, current: %s)", meta.CommitSHA, currentCommit)
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
// isVersionCompatible checks if two versions are compatible (major.minor must match)
|
||||
func isVersionCompatible(v1, v2 string) bool {
|
||||
// Extract major.minor from both versions
|
||||
parts1 := strings.Split(v1, ".")
|
||||
parts2 := strings.Split(v2, ".")
|
||||
|
||||
if len(parts1) < 2 || len(parts2) < 2 {
|
||||
return false
|
||||
}
|
||||
|
||||
// Compare major.minor
|
||||
return parts1[0] == parts2[0] && parts1[1] == parts2[1]
|
||||
}
|
||||
|
||||
// captureLeftSnapshot copies the current JSONL to the left snapshot file
|
||||
// This should be called after export, before git pull
|
||||
// Uses atomic file operations to prevent race conditions
|
||||
func captureLeftSnapshot(jsonlPath string) error {
|
||||
_, leftPath := getSnapshotPaths(jsonlPath)
|
||||
_, leftMetaPath := getSnapshotMetadataPaths(jsonlPath)
|
||||
|
||||
// Use process-specific temp file to prevent concurrent write conflicts
|
||||
tempPath := fmt.Sprintf("%s.%d.tmp", leftPath, os.Getpid())
|
||||
if err := copyFileSnapshot(jsonlPath, tempPath); err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
// Atomic rename on POSIX systems
|
||||
if err := os.Rename(tempPath, leftPath); err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
// Write metadata
|
||||
meta := createSnapshotMetadata()
|
||||
return writeSnapshotMetadata(leftMetaPath, meta)
|
||||
sm := NewSnapshotManager(jsonlPath)
|
||||
return sm.CaptureLeft()
|
||||
}
|
||||
|
||||
// updateBaseSnapshot copies the current JSONL to the base snapshot file
|
||||
// This should be called after successful import to track the new baseline
|
||||
// Uses atomic file operations to prevent race conditions
|
||||
func updateBaseSnapshot(jsonlPath string) error {
|
||||
basePath, _ := getSnapshotPaths(jsonlPath)
|
||||
baseMetaPath, _ := getSnapshotMetadataPaths(jsonlPath)
|
||||
|
||||
// Use process-specific temp file to prevent concurrent write conflicts
|
||||
tempPath := fmt.Sprintf("%s.%d.tmp", basePath, os.Getpid())
|
||||
if err := copyFileSnapshot(jsonlPath, tempPath); err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
// Atomic rename on POSIX systems
|
||||
if err := os.Rename(tempPath, basePath); err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
// Write metadata
|
||||
meta := createSnapshotMetadata()
|
||||
return writeSnapshotMetadata(baseMetaPath, meta)
|
||||
sm := NewSnapshotManager(jsonlPath)
|
||||
return sm.UpdateBase()
|
||||
}
|
||||
|
||||
// merge3WayAndPruneDeletions performs 3-way merge and prunes accepted deletions from DB
|
||||
// Returns true if merge was performed, false if skipped (no base file)
|
||||
func merge3WayAndPruneDeletions(ctx context.Context, store storage.Storage, jsonlPath string) (bool, error) {
|
||||
basePath, leftPath := getSnapshotPaths(jsonlPath)
|
||||
baseMetaPath, leftMetaPath := getSnapshotMetadataPaths(jsonlPath)
|
||||
sm := NewSnapshotManager(jsonlPath)
|
||||
basePath, leftPath := sm.getSnapshotPaths()
|
||||
|
||||
// If no base snapshot exists, skip deletion handling (first run or bootstrap)
|
||||
if !fileExists(basePath) {
|
||||
if !sm.BaseExists() {
|
||||
return false, nil
|
||||
}
|
||||
|
||||
|
||||
// Validate snapshot metadata
|
||||
currentCommit := getCurrentCommitSHA()
|
||||
|
||||
baseMeta, err := readSnapshotMetadata(baseMetaPath)
|
||||
if err != nil {
|
||||
return false, fmt.Errorf("failed to read base snapshot metadata: %w", err)
|
||||
}
|
||||
|
||||
if err := validateSnapshotMetadata(baseMeta, currentCommit); err != nil {
|
||||
if err := sm.Validate(); err != nil {
|
||||
// Stale or invalid snapshot - clean up and skip merge
|
||||
fmt.Fprintf(os.Stderr, "Warning: base snapshot invalid (%v), cleaning up\n", err)
|
||||
_ = cleanupSnapshots(jsonlPath)
|
||||
fmt.Fprintf(os.Stderr, "Warning: snapshot validation failed (%v), cleaning up\n", err)
|
||||
_ = sm.Cleanup()
|
||||
return false, nil
|
||||
}
|
||||
|
||||
// If left snapshot exists, validate it too
|
||||
if fileExists(leftPath) {
|
||||
leftMeta, err := readSnapshotMetadata(leftMetaPath)
|
||||
if err != nil {
|
||||
return false, fmt.Errorf("failed to read left snapshot metadata: %w", err)
|
||||
}
|
||||
|
||||
if err := validateSnapshotMetadata(leftMeta, currentCommit); err != nil {
|
||||
// Stale or invalid snapshot - clean up and skip merge
|
||||
fmt.Fprintf(os.Stderr, "Warning: left snapshot invalid (%v), cleaning up\n", err)
|
||||
_ = cleanupSnapshots(jsonlPath)
|
||||
return false, nil
|
||||
}
|
||||
}
|
||||
|
||||
// Run 3-way merge: base (last import) vs left (pre-pull export) vs right (pulled JSONL)
|
||||
tmpMerged := jsonlPath + ".merged"
|
||||
@@ -254,7 +58,7 @@ func merge3WayAndPruneDeletions(ctx context.Context, store storage.Storage, json
|
||||
}
|
||||
}()
|
||||
|
||||
if err = merge.Merge3Way(tmpMerged, basePath, leftPath, jsonlPath, false); err != nil {
|
||||
if err := merge.Merge3Way(tmpMerged, basePath, leftPath, jsonlPath, false); err != nil {
|
||||
// Merge error (including conflicts) is returned as error
|
||||
return false, fmt.Errorf("3-way merge failed: %w", err)
|
||||
}
|
||||
@@ -265,7 +69,7 @@ func merge3WayAndPruneDeletions(ctx context.Context, store storage.Storage, json
|
||||
}
|
||||
|
||||
// Compute accepted deletions (issues in base but not in merged, and unchanged locally)
|
||||
acceptedDeletions, err := computeAcceptedDeletions(basePath, leftPath, jsonlPath)
|
||||
acceptedDeletions, err := sm.ComputeAcceptedDeletions(jsonlPath)
|
||||
if err != nil {
|
||||
return false, fmt.Errorf("failed to compute accepted deletions: %w", err)
|
||||
}
|
||||
@@ -283,232 +87,58 @@ func merge3WayAndPruneDeletions(ctx context.Context, store storage.Storage, json
|
||||
return false, fmt.Errorf("deletion failures (DB may be inconsistent): %v", deletionErrors)
|
||||
}
|
||||
|
||||
if len(acceptedDeletions) > 0 {
|
||||
fmt.Fprintf(os.Stderr, "3-way merge: pruned %d deleted issue(s) from database\n", len(acceptedDeletions))
|
||||
// Print stats if deletions were found
|
||||
stats := sm.GetStats()
|
||||
if stats.DeletionsFound > 0 {
|
||||
fmt.Fprintf(os.Stderr, "3-way merge: pruned %d deleted issue(s) from database (base: %d, left: %d, merged: %d)\n",
|
||||
stats.DeletionsFound, stats.BaseCount, stats.LeftCount, stats.MergedCount)
|
||||
}
|
||||
|
||||
return true, nil
|
||||
}
|
||||
|
||||
// computeAcceptedDeletions identifies issues that were deleted in the remote
|
||||
// and should be removed from the local database.
|
||||
//
|
||||
// An issue is an "accepted deletion" if:
|
||||
// - It exists in base (last import)
|
||||
// - It does NOT exist in merged (after 3-way merge)
|
||||
// - It is unchanged in left (pre-pull export) compared to base
|
||||
//
|
||||
// This means the issue was deleted remotely and we had no local modifications,
|
||||
// so we should accept the deletion and prune it from our DB.
|
||||
func computeAcceptedDeletions(basePath, leftPath, mergedPath string) ([]string, error) {
|
||||
// Build map of ID -> raw line for base and left
|
||||
baseIndex, err := buildIDToLineMap(basePath)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("failed to read base snapshot: %w", err)
|
||||
}
|
||||
|
||||
leftIndex, err := buildIDToLineMap(leftPath)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("failed to read left snapshot: %w", err)
|
||||
}
|
||||
|
||||
// Build set of IDs in merged result
|
||||
mergedIDs, err := buildIDSet(mergedPath)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("failed to read merged file: %w", err)
|
||||
}
|
||||
|
||||
// Find accepted deletions
|
||||
var deletions []string
|
||||
for id, baseLine := range baseIndex {
|
||||
// Issue in base but not in merged
|
||||
if !mergedIDs[id] {
|
||||
// Check if unchanged locally - try raw equality first, then semantic JSON comparison
|
||||
if leftLine, existsInLeft := leftIndex[id]; existsInLeft && (leftLine == baseLine || jsonEquals(leftLine, baseLine)) {
|
||||
deletions = append(deletions, id)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
return deletions, nil
|
||||
}
|
||||
|
||||
// buildIDToLineMap reads a JSONL file and returns a map of issue ID -> raw JSON line
|
||||
func buildIDToLineMap(path string) (map[string]string, error) {
|
||||
result := make(map[string]string)
|
||||
|
||||
f, err := os.Open(path)
|
||||
if err != nil {
|
||||
if os.IsNotExist(err) {
|
||||
return result, nil // Empty map for missing files
|
||||
}
|
||||
return nil, err
|
||||
}
|
||||
defer f.Close()
|
||||
|
||||
scanner := bufio.NewScanner(f)
|
||||
for scanner.Scan() {
|
||||
line := scanner.Text()
|
||||
if line == "" {
|
||||
continue
|
||||
}
|
||||
|
||||
// Parse just the ID field
|
||||
var issue struct {
|
||||
ID string `json:"id"`
|
||||
}
|
||||
if err := json.Unmarshal([]byte(line), &issue); err != nil {
|
||||
return nil, fmt.Errorf("failed to parse issue ID from line: %w", err)
|
||||
}
|
||||
|
||||
result[issue.ID] = line
|
||||
}
|
||||
|
||||
if err := scanner.Err(); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
return result, nil
|
||||
}
|
||||
|
||||
// buildIDSet reads a JSONL file and returns a set of issue IDs
|
||||
func buildIDSet(path string) (map[string]bool, error) {
|
||||
result := make(map[string]bool)
|
||||
|
||||
f, err := os.Open(path)
|
||||
if err != nil {
|
||||
if os.IsNotExist(err) {
|
||||
return result, nil // Empty set for missing files
|
||||
}
|
||||
return nil, err
|
||||
}
|
||||
defer f.Close()
|
||||
|
||||
scanner := bufio.NewScanner(f)
|
||||
for scanner.Scan() {
|
||||
line := scanner.Text()
|
||||
if line == "" {
|
||||
continue
|
||||
}
|
||||
|
||||
// Parse just the ID field
|
||||
var issue struct {
|
||||
ID string `json:"id"`
|
||||
}
|
||||
if err := json.Unmarshal([]byte(line), &issue); err != nil {
|
||||
return nil, fmt.Errorf("failed to parse issue ID from line: %w", err)
|
||||
}
|
||||
|
||||
result[issue.ID] = true
|
||||
}
|
||||
|
||||
if err := scanner.Err(); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
return result, nil
|
||||
}
|
||||
|
||||
// fileExists checks if a file exists
|
||||
func fileExists(path string) bool {
|
||||
_, err := os.Stat(path)
|
||||
return err == nil
|
||||
}
|
||||
|
||||
// copyFileSnapshot copies a file from src to dst (renamed to avoid conflict with migrate_hash_ids.go)
|
||||
func copyFileSnapshot(src, dst string) error {
|
||||
sourceFile, err := os.Open(src)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
defer sourceFile.Close()
|
||||
|
||||
destFile, err := os.Create(dst)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
defer destFile.Close()
|
||||
|
||||
if _, err := io.Copy(destFile, sourceFile); err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
return destFile.Sync()
|
||||
}
|
||||
|
||||
// cleanupSnapshots removes the snapshot files and their metadata
|
||||
// This is useful for cleanup after errors or manual operations
|
||||
// Deprecated: Use SnapshotManager.Cleanup() instead
|
||||
func cleanupSnapshots(jsonlPath string) error {
|
||||
basePath, leftPath := getSnapshotPaths(jsonlPath)
|
||||
baseMetaPath, leftMetaPath := getSnapshotMetadataPaths(jsonlPath)
|
||||
|
||||
_ = os.Remove(basePath)
|
||||
_ = os.Remove(leftPath)
|
||||
_ = os.Remove(baseMetaPath)
|
||||
_ = os.Remove(leftMetaPath)
|
||||
|
||||
return nil
|
||||
sm := NewSnapshotManager(jsonlPath)
|
||||
return sm.Cleanup()
|
||||
}
|
||||
|
||||
// validateSnapshotConsistency checks if snapshot files are consistent
|
||||
// Returns an error if snapshots are corrupted or missing critical data
|
||||
// Deprecated: Use SnapshotManager.Validate() instead
|
||||
func validateSnapshotConsistency(jsonlPath string) error {
|
||||
basePath, leftPath := getSnapshotPaths(jsonlPath)
|
||||
|
||||
// Base file is optional (might not exist on first run)
|
||||
if fileExists(basePath) {
|
||||
if _, err := buildIDSet(basePath); err != nil {
|
||||
return fmt.Errorf("base snapshot is corrupted: %w", err)
|
||||
}
|
||||
}
|
||||
|
||||
// Left file is optional (might not exist if export hasn't run)
|
||||
if fileExists(leftPath) {
|
||||
if _, err := buildIDSet(leftPath); err != nil {
|
||||
return fmt.Errorf("left snapshot is corrupted: %w", err)
|
||||
}
|
||||
}
|
||||
|
||||
return nil
|
||||
sm := NewSnapshotManager(jsonlPath)
|
||||
return sm.Validate()
|
||||
}
|
||||
|
||||
// getSnapshotStats returns statistics about the snapshot files
|
||||
// Deprecated: Use SnapshotManager.GetStats() instead
|
||||
func getSnapshotStats(jsonlPath string) (baseCount, leftCount int, baseExists, leftExists bool) {
|
||||
basePath, leftPath := getSnapshotPaths(jsonlPath)
|
||||
sm := NewSnapshotManager(jsonlPath)
|
||||
basePath, leftPath := sm.GetSnapshotPaths()
|
||||
|
||||
if baseIDs, err := buildIDSet(basePath); err == nil {
|
||||
if baseIDs, err := sm.BuildIDSet(basePath); err == nil && len(baseIDs) > 0 {
|
||||
baseExists = true
|
||||
baseCount = len(baseIDs)
|
||||
} else {
|
||||
baseExists = fileExists(basePath)
|
||||
}
|
||||
|
||||
if leftIDs, err := buildIDSet(leftPath); err == nil {
|
||||
if leftIDs, err := sm.BuildIDSet(leftPath); err == nil && len(leftIDs) > 0 {
|
||||
leftExists = true
|
||||
leftCount = len(leftIDs)
|
||||
} else {
|
||||
leftExists = fileExists(leftPath)
|
||||
}
|
||||
|
||||
return
|
||||
}
|
||||
|
||||
// initializeSnapshotsIfNeeded creates initial snapshot files if they don't exist
|
||||
// This is called during init or first sync to bootstrap the deletion tracking
|
||||
// Deprecated: Use SnapshotManager.Initialize() instead
|
||||
func initializeSnapshotsIfNeeded(jsonlPath string) error {
|
||||
basePath, _ := getSnapshotPaths(jsonlPath)
|
||||
baseMetaPath, _ := getSnapshotMetadataPaths(jsonlPath)
|
||||
|
||||
// If JSONL exists but base snapshot doesn't, create initial base
|
||||
if fileExists(jsonlPath) && !fileExists(basePath) {
|
||||
if err := copyFileSnapshot(jsonlPath, basePath); err != nil {
|
||||
return fmt.Errorf("failed to initialize base snapshot: %w", err)
|
||||
}
|
||||
|
||||
// Create metadata
|
||||
meta := createSnapshotMetadata()
|
||||
if err := writeSnapshotMetadata(baseMetaPath, meta); err != nil {
|
||||
return fmt.Errorf("failed to initialize base snapshot metadata: %w", err)
|
||||
}
|
||||
}
|
||||
|
||||
return nil
|
||||
sm := NewSnapshotManager(jsonlPath)
|
||||
return sm.Initialize()
|
||||
}
|
||||
|
||||
// getMultiRepoJSONLPaths returns all JSONL file paths for multi-repo mode
|
||||
|
||||
@@ -252,8 +252,9 @@ func TestDeletionWithLocalModification(t *testing.T) {
|
||||
func TestComputeAcceptedDeletions(t *testing.T) {
|
||||
dir := t.TempDir()
|
||||
|
||||
basePath := filepath.Join(dir, "base.jsonl")
|
||||
leftPath := filepath.Join(dir, "left.jsonl")
|
||||
jsonlPath := filepath.Join(dir, "issues.jsonl")
|
||||
sm := NewSnapshotManager(jsonlPath)
|
||||
basePath, leftPath := sm.GetSnapshotPaths()
|
||||
mergedPath := filepath.Join(dir, "merged.jsonl")
|
||||
|
||||
// Base has 3 issues
|
||||
@@ -280,7 +281,7 @@ func TestComputeAcceptedDeletions(t *testing.T) {
|
||||
t.Fatalf("Failed to write merged: %v", err)
|
||||
}
|
||||
|
||||
deletions, err := computeAcceptedDeletions(basePath, leftPath, mergedPath)
|
||||
deletions, err := sm.ComputeAcceptedDeletions(mergedPath)
|
||||
if err != nil {
|
||||
t.Fatalf("Failed to compute deletions: %v", err)
|
||||
}
|
||||
@@ -326,7 +327,9 @@ func TestComputeAcceptedDeletions_LocallyModified(t *testing.T) {
|
||||
t.Fatalf("Failed to write merged: %v", err)
|
||||
}
|
||||
|
||||
deletions, err := computeAcceptedDeletions(basePath, leftPath, mergedPath)
|
||||
jsonlPath := filepath.Join(dir, "issues.jsonl")
|
||||
sm := NewSnapshotManager(jsonlPath)
|
||||
deletions, err := sm.ComputeAcceptedDeletions(mergedPath)
|
||||
if err != nil {
|
||||
t.Fatalf("Failed to compute deletions: %v", err)
|
||||
}
|
||||
@@ -354,7 +357,8 @@ func TestSnapshotManagement(t *testing.T) {
|
||||
t.Fatalf("Failed to initialize snapshots: %v", err)
|
||||
}
|
||||
|
||||
basePath, leftPath := getSnapshotPaths(jsonlPath)
|
||||
sm := NewSnapshotManager(jsonlPath)
|
||||
basePath, leftPath := sm.GetSnapshotPaths()
|
||||
|
||||
// Base should exist, left should not
|
||||
if !fileExists(basePath) {
|
||||
@@ -491,8 +495,10 @@ func TestMultiRepoDeletionTracking(t *testing.T) {
|
||||
}
|
||||
|
||||
// Verify snapshot files exist for both repos
|
||||
primaryBasePath, primaryLeftPath := getSnapshotPaths(primaryJSONL)
|
||||
additionalBasePath, additionalLeftPath := getSnapshotPaths(additionalJSONL)
|
||||
primarySM := NewSnapshotManager(primaryJSONL)
|
||||
primaryBasePath, primaryLeftPath := primarySM.GetSnapshotPaths()
|
||||
additionalSM := NewSnapshotManager(additionalJSONL)
|
||||
additionalBasePath, additionalLeftPath := additionalSM.GetSnapshotPaths()
|
||||
|
||||
if !fileExists(primaryBasePath) {
|
||||
t.Errorf("Primary base snapshot not created: %s", primaryBasePath)
|
||||
@@ -762,8 +768,10 @@ func TestMultiRepoSnapshotIsolation(t *testing.T) {
|
||||
}
|
||||
|
||||
// Get snapshot paths for both
|
||||
repo1Base, repo1Left := getSnapshotPaths(repo1JSONL)
|
||||
repo2Base, repo2Left := getSnapshotPaths(repo2JSONL)
|
||||
repo1SM := NewSnapshotManager(repo1JSONL)
|
||||
repo1Base, repo1Left := repo1SM.GetSnapshotPaths()
|
||||
repo2SM := NewSnapshotManager(repo2JSONL)
|
||||
repo2Base, repo2Left := repo2SM.GetSnapshotPaths()
|
||||
|
||||
// Verify isolation: snapshots should be in different directories
|
||||
if filepath.Dir(repo1Base) == filepath.Dir(repo2Base) {
|
||||
@@ -771,11 +779,11 @@ func TestMultiRepoSnapshotIsolation(t *testing.T) {
|
||||
}
|
||||
|
||||
// Verify each snapshot contains only its own issue
|
||||
repo1IDs, err := buildIDSet(repo1Base)
|
||||
repo1IDs, err := repo1SM.BuildIDSet(repo1Base)
|
||||
if err != nil {
|
||||
t.Fatalf("Failed to read repo1 base snapshot: %v", err)
|
||||
}
|
||||
repo2IDs, err := buildIDSet(repo2Base)
|
||||
repo2IDs, err := repo2SM.BuildIDSet(repo2Base)
|
||||
if err != nil {
|
||||
t.Fatalf("Failed to read repo2 base snapshot: %v", err)
|
||||
}
|
||||
@@ -807,11 +815,11 @@ func TestMultiRepoSnapshotIsolation(t *testing.T) {
|
||||
t.Error("Both left snapshots should exist")
|
||||
}
|
||||
|
||||
repo1LeftIDs, err := buildIDSet(repo1Left)
|
||||
repo1LeftIDs, err := repo1SM.BuildIDSet(repo1Left)
|
||||
if err != nil {
|
||||
t.Fatalf("Failed to read repo1 left snapshot: %v", err)
|
||||
}
|
||||
repo2LeftIDs, err := buildIDSet(repo2Left)
|
||||
repo2LeftIDs, err := repo2SM.BuildIDSet(repo2Left)
|
||||
if err != nil {
|
||||
t.Fatalf("Failed to read repo2 left snapshot: %v", err)
|
||||
}
|
||||
|
||||
492
cmd/bd/snapshot_manager.go
Normal file
492
cmd/bd/snapshot_manager.go
Normal file
@@ -0,0 +1,492 @@
|
||||
package main
|
||||
|
||||
import (
|
||||
"bufio"
|
||||
"encoding/json"
|
||||
"fmt"
|
||||
"io"
|
||||
"os"
|
||||
"os/exec"
|
||||
"path/filepath"
|
||||
"reflect"
|
||||
"strings"
|
||||
"time"
|
||||
)
|
||||
|
||||
const (
|
||||
// MagicHeader is written to snapshot files for corruption detection
|
||||
MagicHeader = "# beads snapshot v1\n"
|
||||
|
||||
// maxSnapshotAge is the maximum allowed age for a snapshot file (1 hour)
|
||||
maxSnapshotAge = 1 * time.Hour
|
||||
)
|
||||
|
||||
// snapshotMetadata contains versioning info for snapshot files
|
||||
type snapshotMetadata struct {
|
||||
Version string `json:"version"` // bd version that created this snapshot
|
||||
Timestamp time.Time `json:"timestamp"` // When snapshot was created
|
||||
CommitSHA string `json:"commit"` // Git commit SHA at snapshot time
|
||||
}
|
||||
|
||||
// SnapshotStats contains statistics about snapshot operations
|
||||
type SnapshotStats struct {
|
||||
BaseCount int // Number of issues in base snapshot
|
||||
LeftCount int // Number of issues in left snapshot
|
||||
MergedCount int // Number of issues in merged result
|
||||
DeletionsFound int // Number of deletions detected
|
||||
BaseExists bool // Whether base snapshot exists
|
||||
LeftExists bool // Whether left snapshot exists
|
||||
}
|
||||
|
||||
// SnapshotManager handles snapshot file operations and validation
|
||||
type SnapshotManager struct {
|
||||
jsonlPath string
|
||||
stats SnapshotStats
|
||||
}
|
||||
|
||||
// NewSnapshotManager creates a new snapshot manager for the given JSONL path
|
||||
func NewSnapshotManager(jsonlPath string) *SnapshotManager {
|
||||
return &SnapshotManager{
|
||||
jsonlPath: jsonlPath,
|
||||
stats: SnapshotStats{},
|
||||
}
|
||||
}
|
||||
|
||||
// GetStats returns accumulated statistics about snapshot operations
|
||||
func (sm *SnapshotManager) GetStats() SnapshotStats {
|
||||
return sm.stats
|
||||
}
|
||||
|
||||
// getSnapshotPaths returns paths for base and left snapshot files
|
||||
func (sm *SnapshotManager) getSnapshotPaths() (basePath, leftPath string) {
|
||||
dir := filepath.Dir(sm.jsonlPath)
|
||||
basePath = filepath.Join(dir, "beads.base.jsonl")
|
||||
leftPath = filepath.Join(dir, "beads.left.jsonl")
|
||||
return
|
||||
}
|
||||
|
||||
// getSnapshotMetadataPaths returns paths for metadata files
|
||||
func (sm *SnapshotManager) getSnapshotMetadataPaths() (baseMeta, leftMeta string) {
|
||||
dir := filepath.Dir(sm.jsonlPath)
|
||||
baseMeta = filepath.Join(dir, "beads.base.meta.json")
|
||||
leftMeta = filepath.Join(dir, "beads.left.meta.json")
|
||||
return
|
||||
}
|
||||
|
||||
// CaptureLeft copies the current JSONL to the left snapshot file
|
||||
// This should be called after export, before git pull
|
||||
func (sm *SnapshotManager) CaptureLeft() error {
|
||||
_, leftPath := sm.getSnapshotPaths()
|
||||
_, leftMetaPath := sm.getSnapshotMetadataPaths()
|
||||
|
||||
// Use process-specific temp file to prevent concurrent write conflicts
|
||||
tempPath := fmt.Sprintf("%s.%d.tmp", leftPath, os.Getpid())
|
||||
if err := sm.copyFile(sm.jsonlPath, tempPath); err != nil {
|
||||
return fmt.Errorf("failed to copy to temp file: %w", err)
|
||||
}
|
||||
|
||||
// Atomic rename on POSIX systems
|
||||
if err := os.Rename(tempPath, leftPath); err != nil {
|
||||
return fmt.Errorf("failed to rename snapshot: %w", err)
|
||||
}
|
||||
|
||||
// Write metadata
|
||||
meta := sm.createMetadata()
|
||||
if err := sm.writeMetadata(leftMetaPath, meta); err != nil {
|
||||
return fmt.Errorf("failed to write metadata: %w", err)
|
||||
}
|
||||
|
||||
// Update stats
|
||||
if ids, err := sm.buildIDSet(leftPath); err == nil {
|
||||
sm.stats.LeftExists = true
|
||||
sm.stats.LeftCount = len(ids)
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
// UpdateBase copies the current JSONL to the base snapshot file
|
||||
// This should be called after successful import to track the new baseline
|
||||
func (sm *SnapshotManager) UpdateBase() error {
|
||||
basePath, _ := sm.getSnapshotPaths()
|
||||
baseMetaPath, _ := sm.getSnapshotMetadataPaths()
|
||||
|
||||
// Use process-specific temp file to prevent concurrent write conflicts
|
||||
tempPath := fmt.Sprintf("%s.%d.tmp", basePath, os.Getpid())
|
||||
if err := sm.copyFile(sm.jsonlPath, tempPath); err != nil {
|
||||
return fmt.Errorf("failed to copy to temp file: %w", err)
|
||||
}
|
||||
|
||||
// Atomic rename on POSIX systems
|
||||
if err := os.Rename(tempPath, basePath); err != nil {
|
||||
return fmt.Errorf("failed to rename snapshot: %w", err)
|
||||
}
|
||||
|
||||
// Write metadata
|
||||
meta := sm.createMetadata()
|
||||
if err := sm.writeMetadata(baseMetaPath, meta); err != nil {
|
||||
return fmt.Errorf("failed to write metadata: %w", err)
|
||||
}
|
||||
|
||||
// Update stats
|
||||
if ids, err := sm.buildIDSet(basePath); err == nil {
|
||||
sm.stats.BaseExists = true
|
||||
sm.stats.BaseCount = len(ids)
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
// Validate checks if snapshots exist and are valid
|
||||
func (sm *SnapshotManager) Validate() error {
|
||||
basePath, leftPath := sm.getSnapshotPaths()
|
||||
baseMetaPath, leftMetaPath := sm.getSnapshotMetadataPaths()
|
||||
|
||||
// Check if base snapshot exists
|
||||
if !fileExists(basePath) {
|
||||
return nil // No base snapshot - first run, not an error
|
||||
}
|
||||
|
||||
currentCommit := getCurrentCommitSHA()
|
||||
|
||||
// Validate base snapshot
|
||||
baseMeta, err := sm.readMetadata(baseMetaPath)
|
||||
if err != nil {
|
||||
return fmt.Errorf("base snapshot metadata error: %w", err)
|
||||
}
|
||||
|
||||
if err := sm.validateMetadata(baseMeta, currentCommit); err != nil {
|
||||
return fmt.Errorf("base snapshot invalid: %w", err)
|
||||
}
|
||||
|
||||
// Validate left snapshot if it exists
|
||||
if fileExists(leftPath) {
|
||||
leftMeta, err := sm.readMetadata(leftMetaPath)
|
||||
if err != nil {
|
||||
return fmt.Errorf("left snapshot metadata error: %w", err)
|
||||
}
|
||||
|
||||
if err := sm.validateMetadata(leftMeta, currentCommit); err != nil {
|
||||
return fmt.Errorf("left snapshot invalid: %w", err)
|
||||
}
|
||||
}
|
||||
|
||||
// Check for corruption
|
||||
if _, err := sm.buildIDSet(basePath); err != nil {
|
||||
return fmt.Errorf("base snapshot corrupted: %w", err)
|
||||
}
|
||||
|
||||
if fileExists(leftPath) {
|
||||
if _, err := sm.buildIDSet(leftPath); err != nil {
|
||||
return fmt.Errorf("left snapshot corrupted: %w", err)
|
||||
}
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
// Cleanup removes all snapshot files and metadata
|
||||
func (sm *SnapshotManager) Cleanup() error {
|
||||
basePath, leftPath := sm.getSnapshotPaths()
|
||||
baseMetaPath, leftMetaPath := sm.getSnapshotMetadataPaths()
|
||||
|
||||
_ = os.Remove(basePath)
|
||||
_ = os.Remove(leftPath)
|
||||
_ = os.Remove(baseMetaPath)
|
||||
_ = os.Remove(leftMetaPath)
|
||||
|
||||
// Reset stats
|
||||
sm.stats = SnapshotStats{}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
// Initialize creates initial snapshot files if they don't exist
|
||||
func (sm *SnapshotManager) Initialize() error {
|
||||
basePath, _ := sm.getSnapshotPaths()
|
||||
baseMetaPath, _ := sm.getSnapshotMetadataPaths()
|
||||
|
||||
// If JSONL exists but base snapshot doesn't, create initial base
|
||||
if fileExists(sm.jsonlPath) && !fileExists(basePath) {
|
||||
if err := sm.copyFile(sm.jsonlPath, basePath); err != nil {
|
||||
return fmt.Errorf("failed to initialize base snapshot: %w", err)
|
||||
}
|
||||
|
||||
// Create metadata
|
||||
meta := sm.createMetadata()
|
||||
if err := sm.writeMetadata(baseMetaPath, meta); err != nil {
|
||||
return fmt.Errorf("failed to initialize base snapshot metadata: %w", err)
|
||||
}
|
||||
|
||||
// Update stats
|
||||
if ids, err := sm.buildIDSet(basePath); err == nil {
|
||||
sm.stats.BaseExists = true
|
||||
sm.stats.BaseCount = len(ids)
|
||||
}
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
// ComputeAcceptedDeletions identifies issues that were deleted remotely
|
||||
// An issue is an "accepted deletion" if:
|
||||
// - It exists in base (last import)
|
||||
// - It does NOT exist in merged (after 3-way merge)
|
||||
// - It is unchanged in left (pre-pull export) compared to base
|
||||
func (sm *SnapshotManager) ComputeAcceptedDeletions(mergedPath string) ([]string, error) {
|
||||
basePath, leftPath := sm.getSnapshotPaths()
|
||||
|
||||
// Build map of ID -> raw line for base and left
|
||||
baseIndex, err := sm.buildIDToLineMap(basePath)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("failed to read base snapshot: %w", err)
|
||||
}
|
||||
|
||||
leftIndex, err := sm.buildIDToLineMap(leftPath)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("failed to read left snapshot: %w", err)
|
||||
}
|
||||
|
||||
// Build set of IDs in merged result
|
||||
mergedIDs, err := sm.buildIDSet(mergedPath)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("failed to read merged file: %w", err)
|
||||
}
|
||||
|
||||
sm.stats.MergedCount = len(mergedIDs)
|
||||
|
||||
// Find accepted deletions
|
||||
var deletions []string
|
||||
for id, baseLine := range baseIndex {
|
||||
// Issue in base but not in merged
|
||||
if !mergedIDs[id] {
|
||||
// Check if unchanged locally - try raw equality first, then semantic JSON comparison
|
||||
if leftLine, existsInLeft := leftIndex[id]; existsInLeft && (leftLine == baseLine || sm.jsonEquals(leftLine, baseLine)) {
|
||||
deletions = append(deletions, id)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
sm.stats.DeletionsFound = len(deletions)
|
||||
|
||||
return deletions, nil
|
||||
}
|
||||
|
||||
// BaseExists checks if the base snapshot exists
|
||||
func (sm *SnapshotManager) BaseExists() bool {
|
||||
basePath, _ := sm.getSnapshotPaths()
|
||||
return fileExists(basePath)
|
||||
}
|
||||
|
||||
// GetSnapshotPaths returns the base and left snapshot paths (exposed for testing)
|
||||
func (sm *SnapshotManager) GetSnapshotPaths() (basePath, leftPath string) {
|
||||
return sm.getSnapshotPaths()
|
||||
}
|
||||
|
||||
// BuildIDSet reads a JSONL file and returns a set of issue IDs (exposed for testing)
|
||||
func (sm *SnapshotManager) BuildIDSet(path string) (map[string]bool, error) {
|
||||
return sm.buildIDSet(path)
|
||||
}
|
||||
|
||||
// Private helper methods
|
||||
|
||||
func (sm *SnapshotManager) createMetadata() snapshotMetadata {
|
||||
return snapshotMetadata{
|
||||
Version: getVersion(),
|
||||
Timestamp: time.Now(),
|
||||
CommitSHA: getCurrentCommitSHA(),
|
||||
}
|
||||
}
|
||||
|
||||
func (sm *SnapshotManager) writeMetadata(path string, meta snapshotMetadata) error {
|
||||
data, err := json.Marshal(meta)
|
||||
if err != nil {
|
||||
return fmt.Errorf("failed to marshal metadata: %w", err)
|
||||
}
|
||||
|
||||
// Use process-specific temp file for atomic write
|
||||
tempPath := fmt.Sprintf("%s.%d.tmp", path, os.Getpid())
|
||||
if err := os.WriteFile(tempPath, data, 0644); err != nil {
|
||||
return fmt.Errorf("failed to write metadata temp file: %w", err)
|
||||
}
|
||||
|
||||
// Atomic rename
|
||||
return os.Rename(tempPath, path)
|
||||
}
|
||||
|
||||
func (sm *SnapshotManager) readMetadata(path string) (*snapshotMetadata, error) {
|
||||
data, err := os.ReadFile(path)
|
||||
if err != nil {
|
||||
if os.IsNotExist(err) {
|
||||
return nil, nil // No metadata file exists (backward compatibility)
|
||||
}
|
||||
return nil, fmt.Errorf("failed to read metadata: %w", err)
|
||||
}
|
||||
|
||||
var meta snapshotMetadata
|
||||
if err := json.Unmarshal(data, &meta); err != nil {
|
||||
return nil, fmt.Errorf("failed to parse metadata: %w", err)
|
||||
}
|
||||
|
||||
return &meta, nil
|
||||
}
|
||||
|
||||
func (sm *SnapshotManager) validateMetadata(meta *snapshotMetadata, currentCommit string) error {
|
||||
if meta == nil {
|
||||
// No metadata file - likely old snapshot format, consider it stale
|
||||
return fmt.Errorf("snapshot has no metadata (stale format)")
|
||||
}
|
||||
|
||||
// Check age
|
||||
age := time.Since(meta.Timestamp)
|
||||
if age > maxSnapshotAge {
|
||||
return fmt.Errorf("snapshot is too old (age: %v, max: %v)", age.Round(time.Second), maxSnapshotAge)
|
||||
}
|
||||
|
||||
// Check version compatibility (major.minor must match)
|
||||
currentVersion := getVersion()
|
||||
if !isVersionCompatible(meta.Version, currentVersion) {
|
||||
return fmt.Errorf("snapshot version %s incompatible with current version %s", meta.Version, currentVersion)
|
||||
}
|
||||
|
||||
// Check commit SHA if we're in a git repo
|
||||
if currentCommit != "" && meta.CommitSHA != "" && meta.CommitSHA != currentCommit {
|
||||
return fmt.Errorf("snapshot from different commit (snapshot: %s, current: %s)", meta.CommitSHA, currentCommit)
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
func (sm *SnapshotManager) buildIDToLineMap(path string) (map[string]string, error) {
|
||||
result := make(map[string]string)
|
||||
|
||||
f, err := os.Open(path)
|
||||
if err != nil {
|
||||
if os.IsNotExist(err) {
|
||||
return result, nil // Empty map for missing files
|
||||
}
|
||||
return nil, err
|
||||
}
|
||||
defer f.Close()
|
||||
|
||||
scanner := bufio.NewScanner(f)
|
||||
for scanner.Scan() {
|
||||
line := scanner.Text()
|
||||
if line == "" {
|
||||
continue
|
||||
}
|
||||
|
||||
// Parse just the ID field
|
||||
var issue struct {
|
||||
ID string `json:"id"`
|
||||
}
|
||||
if err := json.Unmarshal([]byte(line), &issue); err != nil {
|
||||
return nil, fmt.Errorf("failed to parse issue ID from line: %w", err)
|
||||
}
|
||||
|
||||
result[issue.ID] = line
|
||||
}
|
||||
|
||||
if err := scanner.Err(); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
return result, nil
|
||||
}
|
||||
|
||||
func (sm *SnapshotManager) buildIDSet(path string) (map[string]bool, error) {
|
||||
result := make(map[string]bool)
|
||||
|
||||
f, err := os.Open(path)
|
||||
if err != nil {
|
||||
if os.IsNotExist(err) {
|
||||
return result, nil // Empty set for missing files
|
||||
}
|
||||
return nil, err
|
||||
}
|
||||
defer f.Close()
|
||||
|
||||
scanner := bufio.NewScanner(f)
|
||||
for scanner.Scan() {
|
||||
line := scanner.Text()
|
||||
if line == "" {
|
||||
continue
|
||||
}
|
||||
|
||||
// Parse just the ID field
|
||||
var issue struct {
|
||||
ID string `json:"id"`
|
||||
}
|
||||
if err := json.Unmarshal([]byte(line), &issue); err != nil {
|
||||
return nil, fmt.Errorf("failed to parse issue ID from line: %w", err)
|
||||
}
|
||||
|
||||
result[issue.ID] = true
|
||||
}
|
||||
|
||||
if err := scanner.Err(); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
return result, nil
|
||||
}
|
||||
|
||||
func (sm *SnapshotManager) jsonEquals(a, b string) bool {
|
||||
var objA, objB map[string]interface{}
|
||||
if err := json.Unmarshal([]byte(a), &objA); err != nil {
|
||||
return false
|
||||
}
|
||||
if err := json.Unmarshal([]byte(b), &objB); err != nil {
|
||||
return false
|
||||
}
|
||||
return reflect.DeepEqual(objA, objB)
|
||||
}
|
||||
|
||||
func (sm *SnapshotManager) copyFile(src, dst string) error {
|
||||
sourceFile, err := os.Open(src)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
defer sourceFile.Close()
|
||||
|
||||
destFile, err := os.Create(dst)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
defer destFile.Close()
|
||||
|
||||
if _, err := io.Copy(destFile, sourceFile); err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
return destFile.Sync()
|
||||
}
|
||||
|
||||
// Package-level helper functions
|
||||
|
||||
func getCurrentCommitSHA() string {
|
||||
cmd := exec.Command("git", "rev-parse", "--short", "HEAD")
|
||||
output, err := cmd.Output()
|
||||
if err != nil {
|
||||
return ""
|
||||
}
|
||||
return strings.TrimSpace(string(output))
|
||||
}
|
||||
|
||||
func isVersionCompatible(v1, v2 string) bool {
|
||||
// Extract major.minor from both versions
|
||||
parts1 := strings.Split(v1, ".")
|
||||
parts2 := strings.Split(v2, ".")
|
||||
|
||||
if len(parts1) < 2 || len(parts2) < 2 {
|
||||
return false
|
||||
}
|
||||
|
||||
// Compare major.minor
|
||||
return parts1[0] == parts2[0] && parts1[1] == parts2[1]
|
||||
}
|
||||
|
||||
func fileExists(path string) bool {
|
||||
_, err := os.Stat(path)
|
||||
return err == nil
|
||||
}
|
||||
@@ -73,6 +73,56 @@ func GenerateHashID(prefix, title, description string, created time.Time, worksp
|
||||
- These fields are mutable and shouldn't affect identity
|
||||
- Changing priority shouldn't change the issue ID
|
||||
|
||||
## Content Hash (Collision Detection)
|
||||
|
||||
Separate from ID generation, bd uses content hashing for collision detection during import. See `internal/storage/sqlite/collision.go:hashIssueContent()`.
|
||||
|
||||
### Content Hash Fields
|
||||
|
||||
The content hash includes ALL semantically meaningful fields:
|
||||
- title, description, status, priority, issue_type
|
||||
- assignee, design, acceptance_criteria, notes
|
||||
- **external_ref** ⚠️ (important: see below)
|
||||
|
||||
### External Ref in Content Hash
|
||||
|
||||
**IMPORTANT:** `external_ref` is included in the content hash. This has subtle implications:
|
||||
|
||||
```
|
||||
Local issue (no external_ref) → content hash A
|
||||
Same issue + external_ref → content hash B (different!)
|
||||
```
|
||||
|
||||
**Why include external_ref?**
|
||||
- Linkage to external systems (Jira, GitHub, Linear) is semantically meaningful
|
||||
- Changing external_ref represents a real content change
|
||||
- Ensures external system changes are tracked properly
|
||||
|
||||
**Implications:**
|
||||
1. **Rename detection** won't match issues before/after adding external_ref
|
||||
2. **Collision detection** treats external_ref changes as updates
|
||||
3. **Idempotent import** requires identical external_ref
|
||||
4. **Import by external_ref** still works (checked before content hash)
|
||||
|
||||
**Example scenario:**
|
||||
```bash
|
||||
# 1. Create local issue
|
||||
bd create "Fix auth bug" -p 1
|
||||
# → ID: bd-a3f2dd, content_hash: abc123
|
||||
|
||||
# 2. Link to Jira
|
||||
bd update bd-a3f2dd --external-ref JIRA-456
|
||||
# → ID: bd-a3f2dd (same), content_hash: def789 (changed!)
|
||||
|
||||
# 3. Re-import from Jira
|
||||
bd import -i jira-export.jsonl
|
||||
# → Matches by external_ref first (JIRA-456)
|
||||
# → Content hash different, triggers update
|
||||
# → Idempotent on subsequent imports
|
||||
```
|
||||
|
||||
**Design rationale:** External system linkage is tracked as substantive content, not just metadata. This ensures proper audit trails and collision resolution.
|
||||
|
||||
**Why 6 chars (with progressive extension)?**
|
||||
- 6 chars (24 bits) = ~16 million possible IDs
|
||||
- Progressive collision handling: extend to 7-8 chars only when needed
|
||||
|
||||
@@ -144,6 +144,19 @@ func compareIssues(existing, incoming *types.Issue) []string {
|
||||
}
|
||||
|
||||
// hashIssueContent creates a deterministic hash of issue content (excluding ID and timestamps)
|
||||
//
|
||||
// IMPORTANT: external_ref is included in the content hash. This means:
|
||||
// - Adding/removing/changing external_ref changes the content hash
|
||||
// - A local issue that gains an external_ref will have different content hash
|
||||
// - This is intentional: external_ref is semantically meaningful content
|
||||
//
|
||||
// Implications:
|
||||
// 1. Rename detection won't match issues before/after adding external_ref
|
||||
// 2. Content-based collision detection treats external_ref changes as updates
|
||||
// 3. Idempotent import only when external_ref is identical
|
||||
//
|
||||
// This design choice ensures external system linkage is tracked as substantive content,
|
||||
// not just metadata. See docs/HASH_ID_DESIGN.md for more on content hash philosophy.
|
||||
func hashIssueContent(issue *types.Issue) string {
|
||||
h := sha256.New()
|
||||
_, _ = fmt.Fprintf(h, "title:%s\n", issue.Title)
|
||||
@@ -155,6 +168,7 @@ func hashIssueContent(issue *types.Issue) string {
|
||||
_, _ = fmt.Fprintf(h, "design:%s\n", issue.Design)
|
||||
_, _ = fmt.Fprintf(h, "acceptance:%s\n", issue.AcceptanceCriteria)
|
||||
_, _ = fmt.Fprintf(h, "notes:%s\n", issue.Notes)
|
||||
// external_ref is included in content hash (see comment above)
|
||||
if issue.ExternalRef != nil {
|
||||
_, _ = fmt.Fprintf(h, "external_ref:%s\n", *issue.ExternalRef)
|
||||
}
|
||||
|
||||
@@ -340,6 +340,17 @@ func TestHashIssueContent(t *testing.T) {
|
||||
}
|
||||
}
|
||||
|
||||
// TestHashIssueContentWithExternalRef verifies that external_ref is included in content hash.
|
||||
//
|
||||
// This test demonstrates the behavior documented in bd-9f4a:
|
||||
// - Adding external_ref to an issue changes its content hash
|
||||
// - Different external_ref values produce different content hashes
|
||||
// - This is intentional: external_ref is semantically meaningful content
|
||||
//
|
||||
// Implications:
|
||||
// - Rename detection won't match issues before/after adding external_ref
|
||||
// - Collision detection treats external_ref changes as updates
|
||||
// - Idempotent import only when external_ref is identical
|
||||
func TestHashIssueContentWithExternalRef(t *testing.T) {
|
||||
ref1 := "JIRA-123"
|
||||
ref2 := "JIRA-456"
|
||||
@@ -365,11 +376,17 @@ func TestHashIssueContentWithExternalRef(t *testing.T) {
|
||||
hash2 := hashIssueContent(issueWithRef2)
|
||||
hash3 := hashIssueContent(issueNoRef)
|
||||
|
||||
// Different external_ref values should produce different hashes
|
||||
if hash1 == hash2 {
|
||||
t.Errorf("Expected different external refs to produce different hashes")
|
||||
}
|
||||
|
||||
// Adding external_ref should change the content hash
|
||||
if hash1 == hash3 {
|
||||
t.Errorf("Expected issue with external ref to differ from issue without")
|
||||
}
|
||||
|
||||
if hash2 == hash3 {
|
||||
t.Errorf("Expected issue with external ref to differ from issue without")
|
||||
}
|
||||
}
|
||||
|
||||
@@ -32,6 +32,22 @@ Agent Mail server failure scenarios test that validates graceful degradation.
|
||||
- 1s HTTP timeouts for quick failure detection
|
||||
- Mock HTTP server avoids real network calls
|
||||
|
||||
### test_reservation_ttl.py
|
||||
|
||||
Reservation TTL and expiration test that validates time-based reservation behavior.
|
||||
|
||||
**What it tests:**
|
||||
- Short TTL reservations (30s)
|
||||
- Reservation blocking verification (agent2 cannot claim while agent1 holds reservation)
|
||||
- Auto-release after expiration (expired reservations become available)
|
||||
- Renewal/heartbeat mechanism (re-reserving extends expiration)
|
||||
|
||||
**Performance:**
|
||||
- Uses `--no-daemon` flag for fast tests
|
||||
- 30s TTL for expiration tests (includes wait time)
|
||||
- Total test time: ~57s (includes 30s+ waiting for expiration)
|
||||
- Mock HTTP server with full TTL management
|
||||
|
||||
## Prerequisites
|
||||
|
||||
- bd installed: `go install github.com/steveyegge/beads/cmd/bd@latest`
|
||||
@@ -54,10 +70,16 @@ python3 tests/integration/test_agent_race.py
|
||||
python3 tests/integration/test_mail_failures.py
|
||||
```
|
||||
|
||||
**Run test_reservation_ttl.py:**
|
||||
```bash
|
||||
python3 tests/integration/test_reservation_ttl.py
|
||||
```
|
||||
|
||||
**Run all integration tests:**
|
||||
```bash
|
||||
python3 tests/integration/test_agent_race.py
|
||||
python3 tests/integration/test_mail_failures.py
|
||||
python3 tests/integration/test_reservation_ttl.py
|
||||
```
|
||||
|
||||
## Expected Results
|
||||
@@ -71,6 +93,11 @@ python3 tests/integration/test_mail_failures.py
|
||||
- Each test validates graceful degradation to Beads-only mode
|
||||
- JSONL remains consistent across all failure scenarios
|
||||
|
||||
### test_reservation_ttl.py
|
||||
- All 4 tests should pass in ~57 seconds
|
||||
- Tests verify TTL-based reservation expiration and renewal
|
||||
- Includes 30s+ wait time to validate actual expiration behavior
|
||||
|
||||
## Adding New Tests
|
||||
|
||||
Integration tests should:
|
||||
|
||||
635
tests/integration/test_reservation_ttl.py
Executable file
635
tests/integration/test_reservation_ttl.py
Executable file
@@ -0,0 +1,635 @@
|
||||
#!/usr/bin/env python3
|
||||
"""
|
||||
Reservation TTL and Expiration Test Suite
|
||||
|
||||
Tests verify time-based reservation behavior:
|
||||
- Short TTL reservations (30s)
|
||||
- Reservation blocking verification
|
||||
- Auto-release after expiration
|
||||
- Renewal/heartbeat mechanisms
|
||||
|
||||
Performance notes:
|
||||
- Uses 30s TTL for expiration tests (fast enough for CI)
|
||||
- Uses mock HTTP server with minimal overhead
|
||||
- Each test ~30-60s (waiting for expiration)
|
||||
"""
|
||||
|
||||
import json
|
||||
import subprocess
|
||||
import tempfile
|
||||
import shutil
|
||||
import os
|
||||
import sys
|
||||
import time
|
||||
import logging
|
||||
from pathlib import Path
|
||||
from http.server import HTTPServer, BaseHTTPRequestHandler
|
||||
from threading import Thread, Lock
|
||||
from typing import Optional, Dict, Any, List
|
||||
import socket
|
||||
from datetime import datetime, timedelta
|
||||
|
||||
# Add lib directory for beads_mail_adapter
|
||||
lib_path = Path(__file__).parent.parent.parent / "lib"
|
||||
sys.path.insert(0, str(lib_path))
|
||||
|
||||
from beads_mail_adapter import AgentMailAdapter
|
||||
|
||||
# Configure logging
|
||||
logging.basicConfig(
|
||||
level=logging.WARNING,
|
||||
format='%(asctime)s - %(name)s - %(levelname)s - %(message)s'
|
||||
)
|
||||
logger = logging.getLogger(__name__)
|
||||
|
||||
# Test configuration
|
||||
TEST_TIMEOUT = 2 # HTTP timeout
|
||||
SHORT_TTL = 30 # Short TTL for expiration tests (30 seconds)
|
||||
|
||||
|
||||
class Reservation:
|
||||
"""Represents a file reservation with TTL."""
|
||||
|
||||
def __init__(self, file_path: str, agent_name: str, ttl: int):
|
||||
self.file_path = file_path
|
||||
self.agent_name = agent_name
|
||||
self.expires_at = datetime.now() + timedelta(seconds=ttl)
|
||||
self.created_at = datetime.now()
|
||||
|
||||
def is_expired(self) -> bool:
|
||||
"""Check if reservation has expired."""
|
||||
return datetime.now() >= self.expires_at
|
||||
|
||||
def renew(self, ttl: int) -> None:
|
||||
"""Renew reservation with new TTL."""
|
||||
self.expires_at = datetime.now() + timedelta(seconds=ttl)
|
||||
|
||||
def to_dict(self) -> Dict[str, Any]:
|
||||
"""Convert to dictionary for JSON serialization."""
|
||||
return {
|
||||
"file_path": self.file_path,
|
||||
"agent_name": self.agent_name,
|
||||
"expires_at": self.expires_at.isoformat(),
|
||||
"created_at": self.created_at.isoformat()
|
||||
}
|
||||
|
||||
|
||||
class MockAgentMailServer:
|
||||
"""Mock Agent Mail server with TTL-based reservation management."""
|
||||
|
||||
def __init__(self, port: int = 0):
|
||||
self.port = port
|
||||
self.server: Optional[HTTPServer] = None
|
||||
self.thread: Optional[Thread] = None
|
||||
self.reservations: Dict[str, Reservation] = {} # file_path -> Reservation
|
||||
self.lock = Lock() # Thread-safe access to reservations
|
||||
self.request_count = 0
|
||||
|
||||
def start(self) -> int:
|
||||
"""Start the mock server. Returns actual port number."""
|
||||
handler_class = self._create_handler()
|
||||
|
||||
# Find available port if port=0
|
||||
if self.port == 0:
|
||||
with socket.socket(socket.AF_INET, socket.SOCK_STREAM) as s:
|
||||
s.bind(('', 0))
|
||||
s.listen(1)
|
||||
self.port = s.getsockname()[1]
|
||||
|
||||
self.server = HTTPServer(('127.0.0.1', self.port), handler_class)
|
||||
self.thread = Thread(target=self.server.serve_forever, daemon=True)
|
||||
self.thread.start()
|
||||
|
||||
# Wait for server to be ready
|
||||
time.sleep(0.1)
|
||||
|
||||
logger.info(f"Mock Agent Mail server started on port {self.port}")
|
||||
return self.port
|
||||
|
||||
def stop(self):
|
||||
"""Stop the mock server."""
|
||||
if self.server:
|
||||
self.server.shutdown()
|
||||
self.server.server_close()
|
||||
logger.info(f"Mock Agent Mail server stopped")
|
||||
|
||||
def _cleanup_expired(self) -> None:
|
||||
"""Remove expired reservations."""
|
||||
with self.lock:
|
||||
expired = [path for path, res in self.reservations.items() if res.is_expired()]
|
||||
for path in expired:
|
||||
del self.reservations[path]
|
||||
logger.debug(f"Auto-released expired reservation: {path}")
|
||||
|
||||
def _create_handler(self):
|
||||
"""Create request handler class with access to server state."""
|
||||
parent = self
|
||||
|
||||
class MockHandler(BaseHTTPRequestHandler):
|
||||
def log_message(self, format, *args):
|
||||
"""Suppress default logging."""
|
||||
pass
|
||||
|
||||
def do_GET(self):
|
||||
parent.request_count += 1
|
||||
parent._cleanup_expired() # Clean up expired reservations
|
||||
|
||||
# Health check
|
||||
if self.path == "/api/health":
|
||||
response = {"status": "ok"}
|
||||
self.send_response(200)
|
||||
self.send_header('Content-Type', 'application/json')
|
||||
self.end_headers()
|
||||
self.wfile.write(json.dumps(response).encode())
|
||||
|
||||
# Get all reservations
|
||||
elif self.path == "/api/reservations":
|
||||
with parent.lock:
|
||||
reservations = [res.to_dict() for res in parent.reservations.values()]
|
||||
|
||||
self.send_response(200)
|
||||
self.send_header('Content-Type', 'application/json')
|
||||
self.end_headers()
|
||||
self.wfile.write(json.dumps({"reservations": reservations}).encode())
|
||||
|
||||
else:
|
||||
self.send_response(404)
|
||||
self.end_headers()
|
||||
|
||||
def do_POST(self):
|
||||
parent.request_count += 1
|
||||
parent._cleanup_expired() # Clean up expired reservations
|
||||
|
||||
# Read request body
|
||||
content_length = int(self.headers.get('Content-Length', 0))
|
||||
body = self.rfile.read(content_length) if content_length > 0 else b'{}'
|
||||
|
||||
try:
|
||||
data = json.loads(body.decode('utf-8'))
|
||||
except json.JSONDecodeError:
|
||||
self.send_response(400)
|
||||
self.send_header('Content-Type', 'application/json')
|
||||
self.end_headers()
|
||||
self.wfile.write(json.dumps({"error": "Invalid JSON"}).encode())
|
||||
return
|
||||
|
||||
# Create/renew reservation
|
||||
if self.path == "/api/reservations":
|
||||
file_path = data.get("file_path")
|
||||
agent_name = data.get("agent_name")
|
||||
ttl = data.get("ttl", 3600)
|
||||
|
||||
if not file_path or not agent_name:
|
||||
self.send_response(400)
|
||||
self.send_header('Content-Type', 'application/json')
|
||||
self.end_headers()
|
||||
self.wfile.write(json.dumps({"error": "Missing file_path or agent_name"}).encode())
|
||||
return
|
||||
|
||||
with parent.lock:
|
||||
# Check if already reserved by another agent
|
||||
if file_path in parent.reservations:
|
||||
existing = parent.reservations[file_path]
|
||||
if existing.agent_name != agent_name:
|
||||
# Conflict: already reserved by another agent
|
||||
self.send_response(409)
|
||||
self.send_header('Content-Type', 'application/json')
|
||||
self.end_headers()
|
||||
error_msg = f"File already reserved by {existing.agent_name}"
|
||||
self.wfile.write(json.dumps({"error": error_msg}).encode())
|
||||
return
|
||||
else:
|
||||
# Renewal: same agent re-reserving (heartbeat)
|
||||
existing.renew(ttl)
|
||||
logger.debug(f"Renewed reservation: {file_path} by {agent_name}")
|
||||
else:
|
||||
# New reservation
|
||||
parent.reservations[file_path] = Reservation(file_path, agent_name, ttl)
|
||||
logger.debug(f"Created reservation: {file_path} by {agent_name} (TTL={ttl}s)")
|
||||
|
||||
self.send_response(201)
|
||||
self.send_header('Content-Type', 'application/json')
|
||||
self.end_headers()
|
||||
self.wfile.write(json.dumps({"status": "reserved"}).encode())
|
||||
|
||||
else:
|
||||
self.send_response(404)
|
||||
self.end_headers()
|
||||
|
||||
def do_DELETE(self):
|
||||
parent.request_count += 1
|
||||
parent._cleanup_expired() # Clean up expired reservations
|
||||
|
||||
# Release reservation: /api/reservations/{agent}/{issue_id}
|
||||
# Extract file_path from URL (last component is issue_id)
|
||||
parts = self.path.split('/')
|
||||
if len(parts) >= 5 and parts[1] == "api" and parts[2] == "reservations":
|
||||
agent_name = parts[3]
|
||||
issue_id = parts[4]
|
||||
file_path = f".beads/issues/{issue_id}"
|
||||
|
||||
with parent.lock:
|
||||
if file_path in parent.reservations:
|
||||
res = parent.reservations[file_path]
|
||||
if res.agent_name == agent_name:
|
||||
del parent.reservations[file_path]
|
||||
logger.debug(f"Released reservation: {file_path}")
|
||||
|
||||
self.send_response(204)
|
||||
self.end_headers()
|
||||
else:
|
||||
self.send_response(404)
|
||||
self.end_headers()
|
||||
|
||||
return MockHandler
|
||||
|
||||
|
||||
class TestAgent:
|
||||
"""Test agent that performs bd operations with reservation support."""
|
||||
|
||||
def __init__(self, workspace: str, agent_name: str = "test-agent",
|
||||
mail_url: Optional[str] = None):
|
||||
self.workspace = workspace
|
||||
self.agent_name = agent_name
|
||||
self.mail_url = mail_url
|
||||
|
||||
# Initialize adapter if URL provided
|
||||
if mail_url:
|
||||
self.mail = AgentMailAdapter(
|
||||
url=mail_url,
|
||||
agent_name=agent_name,
|
||||
timeout=TEST_TIMEOUT
|
||||
)
|
||||
else:
|
||||
self.mail = None
|
||||
|
||||
def run_bd(self, *args) -> dict:
|
||||
"""Run bd command and return JSON output."""
|
||||
cmd = ["bd", "--no-daemon"] + list(args) + ["--json"]
|
||||
result = subprocess.run(
|
||||
cmd,
|
||||
cwd=self.workspace,
|
||||
capture_output=True,
|
||||
text=True
|
||||
)
|
||||
|
||||
if result.returncode != 0:
|
||||
return {"error": result.stderr}
|
||||
|
||||
if result.stdout.strip():
|
||||
try:
|
||||
return json.loads(result.stdout)
|
||||
except json.JSONDecodeError:
|
||||
return {"error": "Invalid JSON", "output": result.stdout}
|
||||
return {}
|
||||
|
||||
def create_issue(self, title: str, priority: int = 1) -> Optional[str]:
|
||||
"""Create an issue and return its ID."""
|
||||
result = self.run_bd("create", title, "-p", str(priority))
|
||||
if "error" in result:
|
||||
logger.error(f"Failed to create issue: {result['error']}")
|
||||
return None
|
||||
return result.get("id")
|
||||
|
||||
def claim_issue(self, issue_id: str, ttl: int = 3600) -> bool:
|
||||
"""Attempt to claim an issue with optional reservation."""
|
||||
# Try to reserve if Agent Mail is enabled
|
||||
if self.mail and self.mail.enabled:
|
||||
reserved = self.mail.reserve_issue(issue_id, ttl=ttl)
|
||||
if not reserved:
|
||||
logger.warning(f"Failed to reserve {issue_id}")
|
||||
return False
|
||||
|
||||
# Update status
|
||||
result = self.run_bd("update", issue_id, "--status", "in_progress")
|
||||
|
||||
if "error" in result:
|
||||
logger.error(f"Failed to claim {issue_id}: {result['error']}")
|
||||
if self.mail and self.mail.enabled:
|
||||
self.mail.release_issue(issue_id)
|
||||
return False
|
||||
|
||||
return True
|
||||
|
||||
def renew_reservation(self, issue_id: str, ttl: int = 3600) -> bool:
|
||||
"""Renew reservation (heartbeat)."""
|
||||
if self.mail and self.mail.enabled:
|
||||
# Re-reserving with same agent acts as renewal
|
||||
return self.mail.reserve_issue(issue_id, ttl=ttl)
|
||||
return True
|
||||
|
||||
|
||||
def test_short_ttl_reservation():
|
||||
"""Test reservation with short TTL (30s)."""
|
||||
print("\n" + "="*70)
|
||||
print("TEST 1: Short TTL Reservation (30s)")
|
||||
print("="*70)
|
||||
|
||||
workspace = tempfile.mkdtemp(prefix="bd-test-ttl-")
|
||||
server = MockAgentMailServer()
|
||||
|
||||
try:
|
||||
# Initialize workspace
|
||||
subprocess.run(
|
||||
["bd", "init", "--quiet", "--prefix", "test"],
|
||||
cwd=workspace,
|
||||
check=True,
|
||||
capture_output=True
|
||||
)
|
||||
|
||||
# Start server
|
||||
port = server.start()
|
||||
mail_url = f"http://127.0.0.1:{port}"
|
||||
|
||||
# Create agent
|
||||
agent = TestAgent(workspace, "test-agent", mail_url=mail_url)
|
||||
|
||||
# Create and claim issue with short TTL
|
||||
issue_id = agent.create_issue("Test short TTL reservation")
|
||||
assert issue_id is not None, "Should create issue"
|
||||
|
||||
start_time = time.time()
|
||||
claimed = agent.claim_issue(issue_id, ttl=SHORT_TTL)
|
||||
assert claimed, f"Should claim issue with {SHORT_TTL}s TTL"
|
||||
|
||||
# Verify reservation exists
|
||||
reservations = agent.mail.get_reservations()
|
||||
assert len(reservations) == 1, f"Should have 1 reservation, got {len(reservations)}"
|
||||
assert reservations[0]["agent_name"] == "test-agent", "Reservation should be owned by test-agent"
|
||||
|
||||
# Check TTL info
|
||||
res = reservations[0]
|
||||
expires_at = datetime.fromisoformat(res["expires_at"])
|
||||
created_at = datetime.fromisoformat(res["created_at"])
|
||||
actual_ttl = (expires_at - created_at).total_seconds()
|
||||
|
||||
print(f"✅ PASS: Created reservation with {SHORT_TTL}s TTL")
|
||||
print(f" • Issue: {issue_id}")
|
||||
print(f" • Actual TTL: {actual_ttl:.1f}s")
|
||||
print(f" • Expires at: {expires_at.strftime('%H:%M:%S')}")
|
||||
return True
|
||||
|
||||
finally:
|
||||
server.stop()
|
||||
shutil.rmtree(workspace, ignore_errors=True)
|
||||
|
||||
|
||||
def test_reservation_blocking():
|
||||
"""Test that reservation blocks other agents from claiming."""
|
||||
print("\n" + "="*70)
|
||||
print("TEST 2: Reservation Blocking Verification")
|
||||
print("="*70)
|
||||
|
||||
workspace = tempfile.mkdtemp(prefix="bd-test-block-")
|
||||
server = MockAgentMailServer()
|
||||
|
||||
try:
|
||||
# Initialize workspace
|
||||
subprocess.run(
|
||||
["bd", "init", "--quiet", "--prefix", "test"],
|
||||
cwd=workspace,
|
||||
check=True,
|
||||
capture_output=True
|
||||
)
|
||||
|
||||
# Start server
|
||||
port = server.start()
|
||||
mail_url = f"http://127.0.0.1:{port}"
|
||||
|
||||
# Create two agents
|
||||
agent1 = TestAgent(workspace, "agent1", mail_url=mail_url)
|
||||
agent2 = TestAgent(workspace, "agent2", mail_url=mail_url)
|
||||
|
||||
# Agent 1 creates and claims issue
|
||||
issue_id = agent1.create_issue("Test reservation blocking")
|
||||
assert issue_id is not None, "Agent 1 should create issue"
|
||||
|
||||
claimed1 = agent1.claim_issue(issue_id, ttl=SHORT_TTL)
|
||||
assert claimed1, "Agent 1 should claim issue"
|
||||
|
||||
# Agent 2 attempts to claim same issue (should fail)
|
||||
claimed2 = agent2.claim_issue(issue_id, ttl=SHORT_TTL)
|
||||
assert not claimed2, "Agent 2 should NOT be able to claim (blocked by reservation)"
|
||||
|
||||
# Verify only one reservation exists
|
||||
reservations = agent1.mail.get_reservations()
|
||||
assert len(reservations) == 1, f"Should have 1 reservation, got {len(reservations)}"
|
||||
assert reservations[0]["agent_name"] == "agent1", "Reservation should be owned by agent1"
|
||||
|
||||
print("✅ PASS: Reservation successfully blocked other agent")
|
||||
print(f" • Agent 1 claimed: {issue_id}")
|
||||
print(f" • Agent 2 blocked by reservation")
|
||||
return True
|
||||
|
||||
finally:
|
||||
server.stop()
|
||||
shutil.rmtree(workspace, ignore_errors=True)
|
||||
|
||||
|
||||
def test_auto_release_after_expiration():
|
||||
"""Test that reservation auto-releases after TTL expires."""
|
||||
print("\n" + "="*70)
|
||||
print("TEST 3: Auto-Release After Expiration")
|
||||
print("="*70)
|
||||
print(f" (This test waits {SHORT_TTL}s for expiration)")
|
||||
|
||||
workspace = tempfile.mkdtemp(prefix="bd-test-expire-")
|
||||
server = MockAgentMailServer()
|
||||
|
||||
try:
|
||||
# Initialize workspace
|
||||
subprocess.run(
|
||||
["bd", "init", "--quiet", "--prefix", "test"],
|
||||
cwd=workspace,
|
||||
check=True,
|
||||
capture_output=True
|
||||
)
|
||||
|
||||
# Start server
|
||||
port = server.start()
|
||||
mail_url = f"http://127.0.0.1:{port}"
|
||||
|
||||
# Create two agents
|
||||
agent1 = TestAgent(workspace, "agent1", mail_url=mail_url)
|
||||
agent2 = TestAgent(workspace, "agent2", mail_url=mail_url)
|
||||
|
||||
# Agent 1 creates and claims issue with short TTL
|
||||
issue_id = agent1.create_issue("Test auto-release")
|
||||
assert issue_id is not None, "Agent 1 should create issue"
|
||||
|
||||
start_time = time.time()
|
||||
claimed1 = agent1.claim_issue(issue_id, ttl=SHORT_TTL)
|
||||
assert claimed1, "Agent 1 should claim issue"
|
||||
|
||||
# Verify reservation exists
|
||||
reservations = agent1.mail.get_reservations()
|
||||
assert len(reservations) == 1, "Should have 1 active reservation"
|
||||
|
||||
# Agent 2 attempts to claim (should fail - still reserved)
|
||||
claimed2_before = agent2.claim_issue(issue_id, ttl=SHORT_TTL)
|
||||
assert not claimed2_before, "Agent 2 should be blocked before expiration"
|
||||
|
||||
print(f" • Waiting {SHORT_TTL}s for reservation to expire...")
|
||||
|
||||
# Wait for TTL to expire (add 2s buffer for clock skew)
|
||||
time.sleep(SHORT_TTL + 2)
|
||||
|
||||
elapsed = time.time() - start_time
|
||||
|
||||
# Verify reservation auto-released (next request cleans up expired)
|
||||
reservations_after = agent2.mail.get_reservations() # Triggers cleanup
|
||||
assert len(reservations_after) == 0, f"Reservation should have expired, got {len(reservations_after)}"
|
||||
|
||||
# Agent 2 should now be able to claim
|
||||
claimed2_after = agent2.claim_issue(issue_id, ttl=SHORT_TTL)
|
||||
assert claimed2_after, "Agent 2 should claim issue after expiration"
|
||||
|
||||
# Verify new reservation by agent2
|
||||
final_reservations = agent2.mail.get_reservations()
|
||||
assert len(final_reservations) == 1, "Should have 1 reservation after agent2 claims"
|
||||
assert final_reservations[0]["agent_name"] == "agent2", "Reservation should be owned by agent2"
|
||||
|
||||
print(f"✅ PASS: Reservation auto-released after {elapsed:.1f}s")
|
||||
print(f" • Agent 1 reservation expired")
|
||||
print(f" • Agent 2 successfully claimed after expiration")
|
||||
return True
|
||||
|
||||
finally:
|
||||
server.stop()
|
||||
shutil.rmtree(workspace, ignore_errors=True)
|
||||
|
||||
|
||||
def test_renewal_heartbeat():
|
||||
"""Test reservation renewal (heartbeat mechanism)."""
|
||||
print("\n" + "="*70)
|
||||
print("TEST 4: Renewal/Heartbeat Mechanism")
|
||||
print("="*70)
|
||||
print(f" (This test waits {SHORT_TTL // 2}s to test renewal)")
|
||||
|
||||
workspace = tempfile.mkdtemp(prefix="bd-test-renew-")
|
||||
server = MockAgentMailServer()
|
||||
|
||||
try:
|
||||
# Initialize workspace
|
||||
subprocess.run(
|
||||
["bd", "init", "--quiet", "--prefix", "test"],
|
||||
cwd=workspace,
|
||||
check=True,
|
||||
capture_output=True
|
||||
)
|
||||
|
||||
# Start server
|
||||
port = server.start()
|
||||
mail_url = f"http://127.0.0.1:{port}"
|
||||
|
||||
# Create agent
|
||||
agent = TestAgent(workspace, "test-agent", mail_url=mail_url)
|
||||
|
||||
# Create and claim issue with short TTL
|
||||
issue_id = agent.create_issue("Test renewal/heartbeat")
|
||||
assert issue_id is not None, "Should create issue"
|
||||
|
||||
claimed = agent.claim_issue(issue_id, ttl=SHORT_TTL)
|
||||
assert claimed, f"Should claim issue with {SHORT_TTL}s TTL"
|
||||
|
||||
# Get initial expiration time
|
||||
reservations = agent.mail.get_reservations()
|
||||
assert len(reservations) == 1, "Should have 1 reservation"
|
||||
initial_expires = datetime.fromisoformat(reservations[0]["expires_at"])
|
||||
|
||||
print(f" • Initial expiration: {initial_expires.strftime('%H:%M:%S')}")
|
||||
print(f" • Waiting {SHORT_TTL // 2}s before renewal...")
|
||||
|
||||
# Wait halfway through TTL
|
||||
time.sleep(SHORT_TTL // 2)
|
||||
|
||||
# Renew reservation (heartbeat)
|
||||
renewed = agent.renew_reservation(issue_id, ttl=SHORT_TTL)
|
||||
assert renewed, "Should renew reservation"
|
||||
|
||||
# Get new expiration time
|
||||
reservations_after = agent.mail.get_reservations()
|
||||
assert len(reservations_after) == 1, "Should still have 1 reservation"
|
||||
renewed_expires = datetime.fromisoformat(reservations_after[0]["expires_at"])
|
||||
|
||||
# Verify expiration was extended
|
||||
extension = (renewed_expires - initial_expires).total_seconds()
|
||||
|
||||
print(f" • Renewed expiration: {renewed_expires.strftime('%H:%M:%S')}")
|
||||
print(f" • Extension: {extension:.1f}s")
|
||||
|
||||
# Extension should be approximately TTL/2 (since we renewed halfway)
|
||||
# Allow 5s tolerance for clock skew and processing time
|
||||
expected_extension = SHORT_TTL // 2
|
||||
assert abs(extension - expected_extension) < 5, \
|
||||
f"Extension should be ~{expected_extension}s, got {extension:.1f}s"
|
||||
|
||||
print(f"✅ PASS: Reservation renewed successfully")
|
||||
print(f" • Heartbeat extended expiration by {extension:.1f}s")
|
||||
return True
|
||||
|
||||
finally:
|
||||
server.stop()
|
||||
shutil.rmtree(workspace, ignore_errors=True)
|
||||
|
||||
|
||||
def main():
|
||||
"""Run all TTL/expiration tests."""
|
||||
print("🧪 Reservation TTL and Expiration Test Suite")
|
||||
print(f"Testing time-based reservation behavior (SHORT_TTL={SHORT_TTL}s)")
|
||||
|
||||
# Check if bd is available
|
||||
try:
|
||||
subprocess.run(["bd", "--version"], capture_output=True, check=True)
|
||||
except (subprocess.CalledProcessError, FileNotFoundError):
|
||||
print("❌ ERROR: bd command not found")
|
||||
print(" Install: go install github.com/steveyegge/beads/cmd/bd@latest")
|
||||
sys.exit(1)
|
||||
|
||||
# Run tests
|
||||
tests = [
|
||||
("Short TTL reservation", test_short_ttl_reservation),
|
||||
("Reservation blocking", test_reservation_blocking),
|
||||
("Auto-release after expiration", test_auto_release_after_expiration),
|
||||
("Renewal/heartbeat mechanism", test_renewal_heartbeat),
|
||||
]
|
||||
|
||||
passed = 0
|
||||
failed = 0
|
||||
start_time = time.time()
|
||||
|
||||
for name, test_func in tests:
|
||||
try:
|
||||
if test_func():
|
||||
passed += 1
|
||||
except AssertionError as e:
|
||||
print(f"\n❌ FAIL: {name}")
|
||||
print(f" {e}")
|
||||
failed += 1
|
||||
except Exception as e:
|
||||
print(f"\n💥 ERROR in {name}: {e}")
|
||||
import traceback
|
||||
traceback.print_exc()
|
||||
failed += 1
|
||||
|
||||
elapsed = time.time() - start_time
|
||||
|
||||
# Summary
|
||||
print("\n" + "="*70)
|
||||
print("SUMMARY")
|
||||
print("="*70)
|
||||
print(f"✅ Passed: {passed}/{len(tests)}")
|
||||
print(f"❌ Failed: {failed}/{len(tests)}")
|
||||
print(f"⏱️ Total time: {elapsed:.1f}s")
|
||||
|
||||
if failed == 0:
|
||||
print("\n🎉 All TTL/expiration tests passed!")
|
||||
print(" Reservation expiration and renewal work correctly")
|
||||
sys.exit(0)
|
||||
else:
|
||||
print(f"\n⚠️ {failed} test(s) failed")
|
||||
sys.exit(1)
|
||||
|
||||
|
||||
if __name__ == "__main__":
|
||||
main()
|
||||
Reference in New Issue
Block a user