fix(daemon): include tombstones in exportToJSONLWithStore for sync propagation (#696)

* fix(daemon): include tombstones in exportToJSONLWithStore for sync propagation

The daemon's exportToJSONLWithStore() function was using an empty
IssueFilter which defaults to IncludeTombstones: false. This caused
deleted issues (tombstones) to be excluded from JSONL exports during
daemon sync cycles.

Bug scenario:
1. User runs `bd delete <issue>` with daemon active
2. Database correctly marks issue as tombstone
3. Main .beads/issues.jsonl correctly shows status:"tombstone"
4. But sync branch worktree JSONL still showed status:"open"
5. Other clones would not see the deletion

The fix adds IncludeTombstones: true to match the behavior of
exportToJSONL() in sync.go, ensuring tombstones propagate to other
clones and prevent resurrection of deleted issues.

Adds regression test TestExportToJSONLWithStore_IncludesTombstones
that verifies tombstones are included in daemon JSONL exports.

* fix: resolve all golangci-lint errors (cherry-pick from fix/linting-errors)

Cherry-picked linting fixes to ensure CI passes.

---------

Co-authored-by: Charles P. Cross <cpdata@users.noreply.github.com>
This commit is contained in:
Charles P. Cross
2025-12-22 17:18:10 -05:00
committed by GitHub
parent ee016bbb25
commit 82cbd98e50
2 changed files with 122 additions and 2 deletions

View File

@@ -36,8 +36,9 @@ func exportToJSONLWithStore(ctx context.Context, store storage.Storage, jsonlPat
}
// Single-repo mode - use existing logic
// Get all issues
issues, err := store.SearchIssues(ctx, "", types.IssueFilter{})
// Get all issues including tombstones for sync propagation (bd-rp4o fix)
// Tombstones must be exported so they propagate to other clones and prevent resurrection
issues, err := store.SearchIssues(ctx, "", types.IssueFilter{IncludeTombstones: true})
if err != nil {
return fmt.Errorf("failed to get issues: %w", err)
}

View File

@@ -709,3 +709,122 @@ func TestUpdateExportMetadataInvalidKeySuffix(t *testing.T) {
t.Errorf("expected unsanitized key %s to NOT be set", unsanitizedKey)
}
}
// TestExportToJSONLWithStore_IncludesTombstones verifies that tombstones are included
// in JSONL export by the daemon. This is a regression test for the bug where
// exportToJSONLWithStore used an empty IssueFilter (IncludeTombstones: false),
// causing deleted issues to not propagate via sync branch to other clones.
//
// Bug scenario:
// 1. User runs `bd delete <issue>` with daemon active
// 2. Database correctly marks issue as tombstone
// 3. Main .beads/issues.jsonl correctly shows status:"tombstone"
// 4. But sync branch worktree JSONL showed status:"open" (bug)
// 5. Other clones would not see the deletion
func TestExportToJSONLWithStore_IncludesTombstones(t *testing.T) {
t.Parallel()
tmpDir := t.TempDir()
dbPath := filepath.Join(tmpDir, ".beads", "beads.db")
jsonlPath := filepath.Join(tmpDir, ".beads", "issues.jsonl")
// Create storage
store, err := sqlite.New(context.Background(), dbPath)
if err != nil {
t.Fatalf("failed to create store: %v", err)
}
defer store.Close()
ctx := context.Background()
// Set issue_prefix to prevent "database not initialized" errors
if err := store.SetConfig(ctx, "issue_prefix", "test"); err != nil {
t.Fatalf("failed to set issue_prefix: %v", err)
}
// Create an open issue
openIssue := &types.Issue{
ID: "test-1",
Title: "Open Issue",
Status: types.StatusOpen,
Priority: 1,
IssueType: types.TypeTask,
CreatedAt: time.Now(),
UpdatedAt: time.Now(),
}
if err := store.CreateIssue(ctx, openIssue, "test"); err != nil {
t.Fatalf("failed to create open issue: %v", err)
}
// Create a tombstone issue (deleted)
tombstoneIssue := &types.Issue{
ID: "test-2",
Title: "Deleted Issue",
Status: types.StatusTombstone,
Priority: 1,
IssueType: types.TypeTask,
CreatedAt: time.Now(),
UpdatedAt: time.Now(),
}
if err := store.CreateIssue(ctx, tombstoneIssue, "test"); err != nil {
t.Fatalf("failed to create tombstone issue: %v", err)
}
// Export to JSONL using daemon's export function
if err := exportToJSONLWithStore(ctx, store, jsonlPath); err != nil {
t.Fatalf("exportToJSONLWithStore failed: %v", err)
}
// Read and parse the exported JSONL
data, err := os.ReadFile(jsonlPath)
if err != nil {
t.Fatalf("failed to read JSONL: %v", err)
}
// Parse JSONL (one JSON object per line)
lines := splitJSONLLines(data)
if len(lines) != 2 {
t.Fatalf("expected 2 issues in JSONL, got %d", len(lines))
}
// Verify both issues are present (including tombstone)
var foundOpen, foundTombstone bool
for _, line := range lines {
var issue types.Issue
if err := json.Unmarshal(line, &issue); err != nil {
t.Fatalf("failed to unmarshal issue: %v", err)
}
if issue.ID == "test-1" && issue.Status == types.StatusOpen {
foundOpen = true
}
if issue.ID == "test-2" && issue.Status == types.StatusTombstone {
foundTombstone = true
}
}
if !foundOpen {
t.Error("expected open issue (test-1) to be in JSONL export")
}
if !foundTombstone {
t.Error("expected tombstone issue (test-2) to be in JSONL export - tombstones must be included for sync propagation")
}
}
// splitJSONLLines splits JSONL content into individual JSON lines
func splitJSONLLines(data []byte) [][]byte {
var lines [][]byte
var currentLine []byte
for _, b := range data {
if b == '\n' {
if len(currentLine) > 0 {
lines = append(lines, currentLine)
currentLine = nil
}
} else {
currentLine = append(currentLine, b)
}
}
if len(currentLine) > 0 {
lines = append(lines, currentLine)
}
return lines
}