Fix bd-hv01: Implement deletion tracking for multi-workspace sync

- Add 3-way merge deletion tracking using snapshot files
- Create .beads/beads.base.jsonl and .beads/beads.left.jsonl snapshots
- Integrate into both sync.go and daemon_sync.go
- Add comprehensive test suite in deletion_tracking_test.go
- Update .gitignore to exclude snapshot files

This fixes the resurrection bug where deleted issues come back after
multi-workspace git sync. Uses the beads-merge 3-way merge logic to
detect and apply deletions correctly.
This commit is contained in:
Steve Yegge
2025-11-06 17:52:37 -08:00
parent b201eecd55
commit 708a81c491
5 changed files with 743 additions and 0 deletions

4
.gitignore vendored
View File

@@ -49,6 +49,10 @@ Thumbs.db
# Keep JSONL exports (source of truth for git)
!.beads/*.jsonl
# 3-way merge snapshot files (local-only, for deletion tracking)
.beads/beads.base.jsonl
.beads/beads.left.jsonl
# Ignore nix result
result

View File

@@ -499,6 +499,11 @@ func createSyncFunc(ctx context.Context, store storage.Storage, autoCommit, auto
}
log.log("Exported to JSONL")
// Capture left snapshot (pre-pull state) for 3-way merge
if err := captureLeftSnapshot(jsonlPath); err != nil {
log.log("Warning: failed to capture snapshot for deletion tracking: %v", err)
}
if autoCommit {
// Try sync branch commit first
committed, err := syncBranchCommitAndPush(syncCtx, store, autoPush, log)
@@ -549,6 +554,12 @@ func createSyncFunc(ctx context.Context, store storage.Storage, autoCommit, auto
return
}
// Perform 3-way merge and prune deletions
if err := applyDeletionsFromMerge(syncCtx, store, jsonlPath); err != nil {
log.log("Error during 3-way merge: %v", err)
return
}
if err := importToJSONLWithStore(syncCtx, store, jsonlPath); err != nil {
log.log("Import failed: %v", err)
return
@@ -567,6 +578,11 @@ func createSyncFunc(ctx context.Context, store storage.Storage, autoCommit, auto
return
}
// Update base snapshot after successful import
if err := updateBaseSnapshot(jsonlPath); err != nil {
log.log("Warning: failed to update base snapshot: %v", err)
}
if autoPush && autoCommit {
if err := gitPush(syncCtx); err != nil {
log.log("Push failed: %v", err)

318
cmd/bd/deletion_tracking.go Normal file
View File

@@ -0,0 +1,318 @@
package main
import (
"bufio"
"context"
"encoding/json"
"fmt"
"io"
"os"
"path/filepath"
"github.com/steveyegge/beads/internal/merge"
"github.com/steveyegge/beads/internal/storage"
)
// getSnapshotPaths returns paths for base and left snapshot files
func getSnapshotPaths(jsonlPath string) (basePath, leftPath string) {
dir := filepath.Dir(jsonlPath)
basePath = filepath.Join(dir, "beads.base.jsonl")
leftPath = filepath.Join(dir, "beads.left.jsonl")
return
}
// captureLeftSnapshot copies the current JSONL to the left snapshot file
// This should be called after export, before git pull
func captureLeftSnapshot(jsonlPath string) error {
_, leftPath := getSnapshotPaths(jsonlPath)
return copyFileSnapshot(jsonlPath, leftPath)
}
// updateBaseSnapshot copies the current JSONL to the base snapshot file
// This should be called after successful import to track the new baseline
func updateBaseSnapshot(jsonlPath string) error {
basePath, _ := getSnapshotPaths(jsonlPath)
return copyFileSnapshot(jsonlPath, basePath)
}
// merge3WayAndPruneDeletions performs 3-way merge and prunes accepted deletions from DB
// Returns true if merge was performed, false if skipped (no base file)
func merge3WayAndPruneDeletions(ctx context.Context, store storage.Storage, jsonlPath string) (bool, error) {
basePath, leftPath := getSnapshotPaths(jsonlPath)
// If no base snapshot exists, skip deletion handling (first run or bootstrap)
if !fileExists(basePath) {
return false, nil
}
// Run 3-way merge: base (last import) vs left (pre-pull export) vs right (pulled JSONL)
tmpMerged := jsonlPath + ".merged"
err := merge.Merge3Way(tmpMerged, basePath, leftPath, jsonlPath, false)
if err != nil {
// Merge error (including conflicts) is returned as error
return false, fmt.Errorf("3-way merge failed: %w", err)
}
// Replace the JSONL with merged result
if err := os.Rename(tmpMerged, jsonlPath); err != nil {
return false, fmt.Errorf("failed to replace JSONL with merged result: %w", err)
}
// Compute accepted deletions (issues in base but not in merged, and unchanged locally)
acceptedDeletions, err := computeAcceptedDeletions(basePath, leftPath, jsonlPath)
if err != nil {
return false, fmt.Errorf("failed to compute accepted deletions: %w", err)
}
// Prune accepted deletions from the database
// Use type assertion to access DeleteIssue method (available in concrete SQLiteStorage)
type deleter interface {
DeleteIssue(context.Context, string) error
}
for _, id := range acceptedDeletions {
if d, ok := store.(deleter); ok {
if err := d.DeleteIssue(ctx, id); err != nil {
// Log warning but continue - issue might already be deleted
fmt.Fprintf(os.Stderr, "Warning: failed to delete issue %s during merge: %v\n", id, err)
}
} else {
return false, fmt.Errorf("storage backend does not support DeleteIssue")
}
}
if len(acceptedDeletions) > 0 {
fmt.Fprintf(os.Stderr, "3-way merge: pruned %d deleted issue(s) from database\n", len(acceptedDeletions))
}
return true, nil
}
// computeAcceptedDeletions identifies issues that were deleted in the remote
// and should be removed from the local database.
//
// An issue is an "accepted deletion" if:
// - It exists in base (last import)
// - It does NOT exist in merged (after 3-way merge)
// - It is unchanged in left (pre-pull export) compared to base
//
// This means the issue was deleted remotely and we had no local modifications,
// so we should accept the deletion and prune it from our DB.
func computeAcceptedDeletions(basePath, leftPath, mergedPath string) ([]string, error) {
// Build map of ID -> raw line for base and left
baseIndex, err := buildIDToLineMap(basePath)
if err != nil {
return nil, fmt.Errorf("failed to read base snapshot: %w", err)
}
leftIndex, err := buildIDToLineMap(leftPath)
if err != nil {
return nil, fmt.Errorf("failed to read left snapshot: %w", err)
}
// Build set of IDs in merged result
mergedIDs, err := buildIDSet(mergedPath)
if err != nil {
return nil, fmt.Errorf("failed to read merged file: %w", err)
}
// Find accepted deletions
var deletions []string
for id, baseLine := range baseIndex {
// Issue in base but not in merged
if !mergedIDs[id] {
// Check if unchanged locally (leftLine == baseLine)
if leftLine, existsInLeft := leftIndex[id]; existsInLeft && leftLine == baseLine {
deletions = append(deletions, id)
}
}
}
return deletions, nil
}
// buildIDToLineMap reads a JSONL file and returns a map of issue ID -> raw JSON line
func buildIDToLineMap(path string) (map[string]string, error) {
result := make(map[string]string)
f, err := os.Open(path)
if err != nil {
if os.IsNotExist(err) {
return result, nil // Empty map for missing files
}
return nil, err
}
defer f.Close()
scanner := bufio.NewScanner(f)
for scanner.Scan() {
line := scanner.Text()
if line == "" {
continue
}
// Parse just the ID field
var issue struct {
ID string `json:"id"`
}
if err := json.Unmarshal([]byte(line), &issue); err != nil {
return nil, fmt.Errorf("failed to parse issue ID from line: %w", err)
}
result[issue.ID] = line
}
if err := scanner.Err(); err != nil {
return nil, err
}
return result, nil
}
// buildIDSet reads a JSONL file and returns a set of issue IDs
func buildIDSet(path string) (map[string]bool, error) {
result := make(map[string]bool)
f, err := os.Open(path)
if err != nil {
if os.IsNotExist(err) {
return result, nil // Empty set for missing files
}
return nil, err
}
defer f.Close()
scanner := bufio.NewScanner(f)
for scanner.Scan() {
line := scanner.Text()
if line == "" {
continue
}
// Parse just the ID field
var issue struct {
ID string `json:"id"`
}
if err := json.Unmarshal([]byte(line), &issue); err != nil {
return nil, fmt.Errorf("failed to parse issue ID from line: %w", err)
}
result[issue.ID] = true
}
if err := scanner.Err(); err != nil {
return nil, err
}
return result, nil
}
// fileExists checks if a file exists
func fileExists(path string) bool {
_, err := os.Stat(path)
return err == nil
}
// copyFileSnapshot copies a file from src to dst (renamed to avoid conflict with migrate_hash_ids.go)
func copyFileSnapshot(src, dst string) error {
sourceFile, err := os.Open(src)
if err != nil {
return err
}
defer sourceFile.Close()
destFile, err := os.Create(dst)
if err != nil {
return err
}
defer destFile.Close()
if _, err := io.Copy(destFile, sourceFile); err != nil {
return err
}
return destFile.Sync()
}
// cleanupSnapshots removes the snapshot files
// This is useful for cleanup after errors or manual operations
func cleanupSnapshots(jsonlPath string) error {
basePath, leftPath := getSnapshotPaths(jsonlPath)
_ = os.Remove(basePath)
_ = os.Remove(leftPath)
return nil
}
// validateSnapshotConsistency checks if snapshot files are consistent
// Returns an error if snapshots are corrupted or missing critical data
func validateSnapshotConsistency(jsonlPath string) error {
basePath, leftPath := getSnapshotPaths(jsonlPath)
// Base file is optional (might not exist on first run)
if fileExists(basePath) {
if _, err := buildIDSet(basePath); err != nil {
return fmt.Errorf("base snapshot is corrupted: %w", err)
}
}
// Left file is optional (might not exist if export hasn't run)
if fileExists(leftPath) {
if _, err := buildIDSet(leftPath); err != nil {
return fmt.Errorf("left snapshot is corrupted: %w", err)
}
}
return nil
}
// getSnapshotStats returns statistics about the snapshot files
func getSnapshotStats(jsonlPath string) (baseCount, leftCount int, baseExists, leftExists bool) {
basePath, leftPath := getSnapshotPaths(jsonlPath)
if baseIDs, err := buildIDSet(basePath); err == nil {
baseExists = true
baseCount = len(baseIDs)
}
if leftIDs, err := buildIDSet(leftPath); err == nil {
leftExists = true
leftCount = len(leftIDs)
}
return
}
// initializeSnapshotsIfNeeded creates initial snapshot files if they don't exist
// This is called during init or first sync to bootstrap the deletion tracking
func initializeSnapshotsIfNeeded(jsonlPath string) error {
basePath, _ := getSnapshotPaths(jsonlPath)
// If JSONL exists but base snapshot doesn't, create initial base
if fileExists(jsonlPath) && !fileExists(basePath) {
if err := copyFileSnapshot(jsonlPath, basePath); err != nil {
return fmt.Errorf("failed to initialize base snapshot: %w", err)
}
}
return nil
}
// applyDeletionsFromMerge applies deletions discovered during 3-way merge
// This is the main entry point for deletion tracking during sync
func applyDeletionsFromMerge(ctx context.Context, store storage.Storage, jsonlPath string) error {
merged, err := merge3WayAndPruneDeletions(ctx, store, jsonlPath)
if err != nil {
return err
}
if !merged {
// No merge performed (no base snapshot), initialize for next time
if err := initializeSnapshotsIfNeeded(jsonlPath); err != nil {
fmt.Fprintf(os.Stderr, "Warning: failed to initialize snapshots: %v\n", err)
}
}
return nil
}

View File

@@ -0,0 +1,387 @@
package main
import (
"context"
"os"
"path/filepath"
"testing"
"github.com/steveyegge/beads/internal/storage/sqlite"
"github.com/steveyegge/beads/internal/types"
)
// TestMultiWorkspaceDeletionSync simulates the bd-hv01 bug scenario:
// Clone A deletes an issue, Clone B still has it, and after sync it should stay deleted
func TestMultiWorkspaceDeletionSync(t *testing.T) {
// Setup two separate workspaces simulating two git clones
cloneADir := t.TempDir()
cloneBDir := t.TempDir()
cloneAJSONL := filepath.Join(cloneADir, "beads.jsonl")
cloneBJSONL := filepath.Join(cloneBDir, "beads.jsonl")
cloneADB := filepath.Join(cloneADir, "beads.db")
cloneBDB := filepath.Join(cloneBDir, "beads.db")
ctx := context.Background()
// Create stores for both clones
storeA, err := sqlite.New(cloneADB)
if err != nil {
t.Fatalf("Failed to create store A: %v", err)
}
defer storeA.Close()
if err := storeA.SetConfig(ctx, "issue_prefix", "bd"); err != nil {
t.Fatalf("Failed to set issue_prefix for store A: %v", err)
}
storeB, err := sqlite.New(cloneBDB)
if err != nil {
t.Fatalf("Failed to create store B: %v", err)
}
defer storeB.Close()
if err := storeB.SetConfig(ctx, "issue_prefix", "bd"); err != nil {
t.Fatalf("Failed to set issue_prefix for store B: %v", err)
}
// Step 1: Both clones start with the same two issues
issueToDelete := &types.Issue{
ID: "bd-delete-me",
Title: "Issue to be deleted",
Description: "This will be deleted in clone A",
Status: types.StatusOpen,
Priority: 1,
IssueType: "bug",
}
issueToKeep := &types.Issue{
ID: "bd-keep-me",
Title: "Issue to keep",
Description: "This should remain",
Status: types.StatusOpen,
Priority: 1,
IssueType: "feature",
}
// Create in both stores (using "test" as actor)
if err := storeA.CreateIssue(ctx, issueToDelete, "test"); err != nil {
t.Fatalf("Failed to create issue in store A: %v", err)
}
if err := storeA.CreateIssue(ctx, issueToKeep, "test"); err != nil {
t.Fatalf("Failed to create issue in store A: %v", err)
}
if err := storeB.CreateIssue(ctx, issueToDelete, "test"); err != nil {
t.Fatalf("Failed to create issue in store B: %v", err)
}
if err := storeB.CreateIssue(ctx, issueToKeep, "test"); err != nil {
t.Fatalf("Failed to create issue in store B: %v", err)
}
// Export from both
if err := exportToJSONLWithStore(ctx, storeA, cloneAJSONL); err != nil {
t.Fatalf("Failed to export from store A: %v", err)
}
if err := exportToJSONLWithStore(ctx, storeB, cloneBJSONL); err != nil {
t.Fatalf("Failed to export from store B: %v", err)
}
// Initialize base snapshots for both (simulating first sync)
if err := initializeSnapshotsIfNeeded(cloneAJSONL); err != nil {
t.Fatalf("Failed to initialize snapshots for A: %v", err)
}
if err := initializeSnapshotsIfNeeded(cloneBJSONL); err != nil {
t.Fatalf("Failed to initialize snapshots for B: %v", err)
}
// Step 2: Clone A deletes the issue
if err := storeA.DeleteIssue(ctx, "bd-delete-me"); err != nil {
t.Fatalf("Failed to delete issue in store A: %v", err)
}
// Step 3: Clone A exports and captures left snapshot (simulating pre-pull)
if err := exportToJSONLWithStore(ctx, storeA, cloneAJSONL); err != nil {
t.Fatalf("Failed to export from store A after deletion: %v", err)
}
if err := captureLeftSnapshot(cloneAJSONL); err != nil {
t.Fatalf("Failed to capture left snapshot for A: %v", err)
}
// Simulate git push/pull: Copy Clone A's JSONL to Clone B's "remote" state
remoteJSONL := cloneAJSONL
// Step 4: Clone B exports (still has both issues) and captures left snapshot
if err := exportToJSONLWithStore(ctx, storeB, cloneBJSONL); err != nil {
t.Fatalf("Failed to export from store B: %v", err)
}
if err := captureLeftSnapshot(cloneBJSONL); err != nil {
t.Fatalf("Failed to capture left snapshot for B: %v", err)
}
// Step 5: Simulate Clone B pulling from remote (copy remote JSONL)
remoteData, err := os.ReadFile(remoteJSONL)
if err != nil {
t.Fatalf("Failed to read remote JSONL: %v", err)
}
if err := os.WriteFile(cloneBJSONL, remoteData, 0644); err != nil {
t.Fatalf("Failed to write pulled JSONL to clone B: %v", err)
}
// Step 6: Clone B applies 3-way merge and prunes deletions
// This is the key fix - it should detect that bd-delete-me was deleted remotely
merged, err := merge3WayAndPruneDeletions(ctx, storeB, cloneBJSONL)
if err != nil {
t.Fatalf("Failed to apply deletions from merge: %v", err)
}
if !merged {
t.Error("Expected 3-way merge to run, but it was skipped")
}
// Step 7: Verify the deletion was applied to Clone B's database
deletedIssue, err := storeB.GetIssue(ctx, "bd-delete-me")
if err == nil && deletedIssue != nil {
t.Errorf("Issue bd-delete-me should have been deleted from Clone B, but still exists")
}
// Verify the kept issue still exists
keptIssue, err := storeB.GetIssue(ctx, "bd-keep-me")
if err != nil || keptIssue == nil {
t.Errorf("Issue bd-keep-me should still exist in Clone B")
}
// Verify Clone A still has only one issue
issuesA, err := storeA.SearchIssues(ctx, "", types.IssueFilter{})
if err != nil {
t.Fatalf("Failed to search issues in store A: %v", err)
}
if len(issuesA) != 1 {
t.Errorf("Clone A should have 1 issue after deletion, got %d", len(issuesA))
}
// Verify Clone B now matches Clone A (both have 1 issue)
issuesB, err := storeB.SearchIssues(ctx, "", types.IssueFilter{})
if err != nil {
t.Fatalf("Failed to search issues in store B: %v", err)
}
if len(issuesB) != 1 {
t.Errorf("Clone B should have 1 issue after merge, got %d", len(issuesB))
}
}
// TestDeletionWithLocalModification tests the conflict scenario:
// Remote deletes an issue, but local has modified it
func TestDeletionWithLocalModification(t *testing.T) {
dir := t.TempDir()
jsonlPath := filepath.Join(dir, "beads.jsonl")
dbPath := filepath.Join(dir, "beads.db")
ctx := context.Background()
store, err := sqlite.New(dbPath)
if err != nil {
t.Fatalf("Failed to create store: %v", err)
}
defer store.Close()
if err := store.SetConfig(ctx, "issue_prefix", "bd"); err != nil {
t.Fatalf("Failed to set issue_prefix: %v", err)
}
// Create an issue
issue := &types.Issue{
ID: "bd-conflict",
Title: "Original title",
Description: "Original description",
Status: types.StatusOpen,
Priority: 1,
IssueType: "bug",
}
if err := store.CreateIssue(ctx, issue, "test"); err != nil {
t.Fatalf("Failed to create issue: %v", err)
}
// Export and create base snapshot
if err := exportToJSONLWithStore(ctx, store, jsonlPath); err != nil {
t.Fatalf("Failed to export: %v", err)
}
if err := initializeSnapshotsIfNeeded(jsonlPath); err != nil {
t.Fatalf("Failed to initialize snapshots: %v", err)
}
// Modify the issue locally
updates := map[string]interface{}{
"title": "Modified title locally",
}
if err := store.UpdateIssue(ctx, "bd-conflict", updates, "test"); err != nil {
t.Fatalf("Failed to update issue: %v", err)
}
// Export modified state and capture left snapshot
if err := exportToJSONLWithStore(ctx, store, jsonlPath); err != nil {
t.Fatalf("Failed to export after modification: %v", err)
}
if err := captureLeftSnapshot(jsonlPath); err != nil {
t.Fatalf("Failed to capture left snapshot: %v", err)
}
// Simulate remote deletion (write empty JSONL)
if err := os.WriteFile(jsonlPath, []byte(""), 0644); err != nil {
t.Fatalf("Failed to simulate remote deletion: %v", err)
}
// Try to merge - this should detect a conflict (modified locally, deleted remotely)
_, err = merge3WayAndPruneDeletions(ctx, store, jsonlPath)
if err == nil {
t.Error("Expected merge conflict error, but got nil")
}
// The issue should still exist in the database (conflict not auto-resolved)
conflictIssue, err := store.GetIssue(ctx, "bd-conflict")
if err != nil || conflictIssue == nil {
t.Error("Issue should still exist after conflict")
}
}
// TestComputeAcceptedDeletions tests the deletion detection logic
func TestComputeAcceptedDeletions(t *testing.T) {
dir := t.TempDir()
basePath := filepath.Join(dir, "base.jsonl")
leftPath := filepath.Join(dir, "left.jsonl")
mergedPath := filepath.Join(dir, "merged.jsonl")
// Base has 3 issues
baseContent := `{"id":"bd-1","title":"Issue 1"}
{"id":"bd-2","title":"Issue 2"}
{"id":"bd-3","title":"Issue 3"}
`
// Left has 3 issues (unchanged from base)
leftContent := baseContent
// Merged has only 2 issues (bd-2 was deleted remotely)
mergedContent := `{"id":"bd-1","title":"Issue 1"}
{"id":"bd-3","title":"Issue 3"}
`
if err := os.WriteFile(basePath, []byte(baseContent), 0644); err != nil {
t.Fatalf("Failed to write base: %v", err)
}
if err := os.WriteFile(leftPath, []byte(leftContent), 0644); err != nil {
t.Fatalf("Failed to write left: %v", err)
}
if err := os.WriteFile(mergedPath, []byte(mergedContent), 0644); err != nil {
t.Fatalf("Failed to write merged: %v", err)
}
deletions, err := computeAcceptedDeletions(basePath, leftPath, mergedPath)
if err != nil {
t.Fatalf("Failed to compute deletions: %v", err)
}
if len(deletions) != 1 {
t.Errorf("Expected 1 deletion, got %d", len(deletions))
}
if len(deletions) > 0 && deletions[0] != "bd-2" {
t.Errorf("Expected deletion of bd-2, got %s", deletions[0])
}
}
// TestComputeAcceptedDeletions_LocallyModified tests that locally modified issues are not deleted
func TestComputeAcceptedDeletions_LocallyModified(t *testing.T) {
dir := t.TempDir()
basePath := filepath.Join(dir, "base.jsonl")
leftPath := filepath.Join(dir, "left.jsonl")
mergedPath := filepath.Join(dir, "merged.jsonl")
// Base has 2 issues
baseContent := `{"id":"bd-1","title":"Original 1"}
{"id":"bd-2","title":"Original 2"}
`
// Left has bd-2 modified locally
leftContent := `{"id":"bd-1","title":"Original 1"}
{"id":"bd-2","title":"Modified locally"}
`
// Merged has only bd-1 (bd-2 deleted remotely, but we modified it locally)
mergedContent := `{"id":"bd-1","title":"Original 1"}
`
if err := os.WriteFile(basePath, []byte(baseContent), 0644); err != nil {
t.Fatalf("Failed to write base: %v", err)
}
if err := os.WriteFile(leftPath, []byte(leftContent), 0644); err != nil {
t.Fatalf("Failed to write left: %v", err)
}
if err := os.WriteFile(mergedPath, []byte(mergedContent), 0644); err != nil {
t.Fatalf("Failed to write merged: %v", err)
}
deletions, err := computeAcceptedDeletions(basePath, leftPath, mergedPath)
if err != nil {
t.Fatalf("Failed to compute deletions: %v", err)
}
// bd-2 should NOT be in accepted deletions because it was modified locally
if len(deletions) != 0 {
t.Errorf("Expected 0 deletions (locally modified), got %d: %v", len(deletions), deletions)
}
}
// TestSnapshotManagement tests the snapshot file lifecycle
func TestSnapshotManagement(t *testing.T) {
dir := t.TempDir()
jsonlPath := filepath.Join(dir, "beads.jsonl")
// Write initial JSONL
content := `{"id":"bd-1","title":"Test"}
`
if err := os.WriteFile(jsonlPath, []byte(content), 0644); err != nil {
t.Fatalf("Failed to write JSONL: %v", err)
}
// Initialize snapshots
if err := initializeSnapshotsIfNeeded(jsonlPath); err != nil {
t.Fatalf("Failed to initialize snapshots: %v", err)
}
basePath, leftPath := getSnapshotPaths(jsonlPath)
// Base should exist, left should not
if !fileExists(basePath) {
t.Error("Base snapshot should exist after initialization")
}
if fileExists(leftPath) {
t.Error("Left snapshot should not exist yet")
}
// Capture left snapshot
if err := captureLeftSnapshot(jsonlPath); err != nil {
t.Fatalf("Failed to capture left snapshot: %v", err)
}
if !fileExists(leftPath) {
t.Error("Left snapshot should exist after capture")
}
// Update base snapshot
if err := updateBaseSnapshot(jsonlPath); err != nil {
t.Fatalf("Failed to update base snapshot: %v", err)
}
// Both should exist now
baseCount, leftCount, baseExists, leftExists := getSnapshotStats(jsonlPath)
if !baseExists || !leftExists {
t.Error("Both snapshots should exist")
}
if baseCount != 1 || leftCount != 1 {
t.Errorf("Expected 1 issue in each snapshot, got base=%d left=%d", baseCount, leftCount)
}
}

View File

@@ -148,6 +148,11 @@ Use --merge to merge the sync branch back to main branch.`,
fmt.Fprintf(os.Stderr, "Error exporting: %v\n", err)
os.Exit(1)
}
// Capture left snapshot (pre-pull state) for 3-way merge
if err := captureLeftSnapshot(jsonlPath); err != nil {
fmt.Fprintf(os.Stderr, "Warning: failed to capture snapshot for deletion tracking: %v\n", err)
}
}
// Step 2: Check if there are changes to commit
@@ -192,6 +197,14 @@ Use --merge to merge the sync branch back to main branch.`,
}
}
// Step 3.5: Perform 3-way merge and prune deletions
if err := ensureStoreActive(); err == nil && store != nil {
if err := applyDeletionsFromMerge(ctx, store, jsonlPath); err != nil {
fmt.Fprintf(os.Stderr, "Error during 3-way merge: %v\n", err)
os.Exit(1)
}
}
// Step 4: Import updated JSONL after pull
fmt.Println("→ Importing updated JSONL...")
if err := importFromJSONL(ctx, jsonlPath, renameOnImport); err != nil {
@@ -249,6 +262,11 @@ Use --merge to merge the sync branch back to main branch.`,
fmt.Println("→ DB and JSONL in sync, skipping re-export")
}
}
// Update base snapshot after successful import
if err := updateBaseSnapshot(jsonlPath); err != nil {
fmt.Fprintf(os.Stderr, "Warning: failed to update base snapshot: %v\n", err)
}
}
}