refactor: remove all deletions.jsonl code (bd-fom)

Complete removal of the legacy deletions.jsonl manifest system.
Tombstones are now the sole deletion mechanism.

Removed:
- internal/deletions/ - entire package
- cmd/bd/deleted.go - deleted command
- cmd/bd/doctor/fix/deletions.go - HydrateDeletionsManifest
- Tests for all removed functionality

Cleaned:
- cmd/bd/sync.go - removed sanitize, auto-compact
- cmd/bd/delete.go - removed dual-writes
- cmd/bd/doctor.go - removed checkDeletionsManifest
- internal/importer/importer.go - removed deletions checks
- internal/syncbranch/worktree.go - removed deletions merge
- cmd/bd/integrity.go - updated validation (warn-only on decrease)

Files removed: 12
Lines removed: ~7500

🤖 Generated with [Claude Code](https://claude.com/claude-code)

Co-Authored-By: Claude Opus 4.5 <noreply@anthropic.com>
This commit is contained in:
Steve Yegge
2025-12-16 14:20:32 -08:00
parent e0528de590
commit 9f76cfda01
32 changed files with 298 additions and 7534 deletions

View File

@@ -1,341 +0,0 @@
// Package deletions handles the deletions manifest for tracking deleted issues.
// The deletions.jsonl file is an append-only log that records when issues are
// deleted, enabling propagation of deletions across repo clones via git sync.
package deletions
import (
"bufio"
"encoding/json"
"fmt"
"os"
"path/filepath"
"sort"
"time"
)
// DeletionRecord represents a single deletion entry in the manifest.
// Timestamps are serialized as RFC3339 and may lose sub-second precision.
type DeletionRecord struct {
ID string `json:"id"` // Issue ID that was deleted
Timestamp time.Time `json:"ts"` // When the deletion occurred
Actor string `json:"by"` // Who performed the deletion
Reason string `json:"reason,omitempty"` // Optional reason for deletion
}
// LoadResult contains the result of loading deletions, including any warnings.
type LoadResult struct {
Records map[string]DeletionRecord
Skipped int
Warnings []string
}
// LoadDeletions reads the deletions manifest and returns a LoadResult.
// Corrupt JSON lines are skipped rather than failing the load.
// Warnings about skipped lines are collected in LoadResult.Warnings.
func LoadDeletions(path string) (*LoadResult, error) {
result := &LoadResult{
Records: make(map[string]DeletionRecord),
Warnings: []string{},
}
f, err := os.Open(path) // #nosec G304 - controlled path from caller
if err != nil {
if os.IsNotExist(err) {
// No deletions file yet - return empty result
return result, nil
}
return nil, fmt.Errorf("failed to open deletions file: %w", err)
}
defer f.Close()
lineNo := 0
scanner := bufio.NewScanner(f)
// Allow large lines (up to 1MB) in case of very long reasons
scanner.Buffer(make([]byte, 0, 1024), 1024*1024)
for scanner.Scan() {
lineNo++
line := scanner.Text()
if line == "" {
continue
}
var record DeletionRecord
if err := json.Unmarshal([]byte(line), &record); err != nil {
warning := fmt.Sprintf("skipping corrupt line %d in deletions manifest: %v", lineNo, err)
result.Warnings = append(result.Warnings, warning)
result.Skipped++
continue
}
// Validate required fields
if record.ID == "" {
warning := fmt.Sprintf("skipping line %d in deletions manifest: missing ID", lineNo)
result.Warnings = append(result.Warnings, warning)
result.Skipped++
continue
}
// Use the most recent record for each ID (last write wins)
result.Records[record.ID] = record
}
if err := scanner.Err(); err != nil {
return nil, fmt.Errorf("error reading deletions file: %w", err)
}
return result, nil
}
// AppendDeletion appends a single deletion record to the manifest.
// Creates the file if it doesn't exist.
// Returns an error if the record has an empty ID.
func AppendDeletion(path string, record DeletionRecord) error {
// Validate required fields
if record.ID == "" {
return fmt.Errorf("cannot append deletion record: ID is required")
}
// Ensure directory exists
dir := filepath.Dir(path)
if err := os.MkdirAll(dir, 0755); err != nil {
return fmt.Errorf("failed to create directory: %w", err)
}
// Open file for appending (create if not exists)
f, err := os.OpenFile(path, os.O_APPEND|os.O_CREATE|os.O_WRONLY, 0644) // #nosec G302,G304 - controlled path, 0644 needed for git
if err != nil {
return fmt.Errorf("failed to open deletions file for append: %w", err)
}
defer f.Close()
// Marshal record to JSON
data, err := json.Marshal(record)
if err != nil {
return fmt.Errorf("failed to marshal deletion record: %w", err)
}
// Write line with newline
if _, err := f.Write(append(data, '\n')); err != nil {
return fmt.Errorf("failed to write deletion record: %w", err)
}
// Sync to ensure durability for append-only log
if err := f.Sync(); err != nil {
return fmt.Errorf("failed to sync deletions file: %w", err)
}
return nil
}
// WriteDeletions atomically writes the entire deletions manifest.
// Used for compaction to deduplicate and prune old entries.
// An empty slice will create an empty file (clearing all deletions).
func WriteDeletions(path string, records []DeletionRecord) error {
// Ensure directory exists
dir := filepath.Dir(path)
if err := os.MkdirAll(dir, 0755); err != nil {
return fmt.Errorf("failed to create directory: %w", err)
}
// Create temp file in same directory for atomic rename
base := filepath.Base(path)
tempFile, err := os.CreateTemp(dir, base+".tmp.*")
if err != nil {
return fmt.Errorf("failed to create temp file: %w", err)
}
tempPath := tempFile.Name()
defer func() {
_ = tempFile.Close()
_ = os.Remove(tempPath) // Clean up temp file on error
}()
// Write each record as a JSON line
for _, record := range records {
data, err := json.Marshal(record)
if err != nil {
return fmt.Errorf("failed to marshal deletion record: %w", err)
}
if _, err := tempFile.Write(append(data, '\n')); err != nil {
return fmt.Errorf("failed to write deletion record: %w", err)
}
}
// Close before rename
if err := tempFile.Close(); err != nil {
return fmt.Errorf("failed to close temp file: %w", err)
}
// Atomic replace
if err := os.Rename(tempPath, path); err != nil {
return fmt.Errorf("failed to replace deletions file: %w", err)
}
return nil
}
// DefaultPath returns the default path for the deletions manifest.
// beadsDir is typically .beads/
func DefaultPath(beadsDir string) string {
return filepath.Join(beadsDir, "deletions.jsonl")
}
// IsTombstoneMigrationComplete checks if the tombstone migration has been completed.
// After running `bd migrate-tombstones`, the deletions.jsonl file is archived to
// deletions.jsonl.migrated. This function checks for that marker file.
// When migration is complete, new deletion records should NOT be written to
// deletions.jsonl (bd-ffr9).
func IsTombstoneMigrationComplete(beadsDir string) bool {
migratedPath := filepath.Join(beadsDir, "deletions.jsonl.migrated")
_, err := os.Stat(migratedPath)
return err == nil
}
// Count returns the number of lines in the deletions manifest.
// This is a fast operation that doesn't parse JSON, just counts lines.
// Returns 0 if the file doesn't exist or is empty.
func Count(path string) (int, error) {
f, err := os.Open(path) // #nosec G304 - controlled path from caller
if err != nil {
if os.IsNotExist(err) {
return 0, nil
}
return 0, fmt.Errorf("failed to open deletions file: %w", err)
}
defer f.Close()
count := 0
scanner := bufio.NewScanner(f)
for scanner.Scan() {
line := scanner.Text()
if line != "" {
count++
}
}
if err := scanner.Err(); err != nil {
return 0, fmt.Errorf("error reading deletions file: %w", err)
}
return count, nil
}
// PruneResult contains the result of a prune operation.
type PruneResult struct {
KeptCount int
PrunedCount int
PrunedIDs []string
}
// PruneDeletions removes deletion records older than the specified retention period.
// Returns PruneResult with counts and IDs of pruned records.
// If the file doesn't exist or is empty, returns zero counts with no error.
func PruneDeletions(path string, retentionDays int) (*PruneResult, error) {
result := &PruneResult{
PrunedIDs: []string{},
}
loadResult, err := LoadDeletions(path)
if err != nil {
return nil, fmt.Errorf("failed to load deletions: %w", err)
}
if len(loadResult.Records) == 0 {
return result, nil
}
cutoff := time.Now().AddDate(0, 0, -retentionDays)
var kept []DeletionRecord
// Convert map to sorted slice for deterministic iteration (bd-wmo)
var allRecords []DeletionRecord
for _, record := range loadResult.Records {
allRecords = append(allRecords, record)
}
sort.Slice(allRecords, func(i, j int) bool {
return allRecords[i].ID < allRecords[j].ID
})
for _, record := range allRecords {
if record.Timestamp.After(cutoff) || record.Timestamp.Equal(cutoff) {
kept = append(kept, record)
} else {
result.PrunedCount++
result.PrunedIDs = append(result.PrunedIDs, record.ID)
}
}
result.KeptCount = len(kept)
// Only rewrite if we actually pruned something
if result.PrunedCount > 0 {
if err := WriteDeletions(path, kept); err != nil {
return nil, fmt.Errorf("failed to write pruned deletions: %w", err)
}
}
return result, nil
}
// RemoveResult contains the result of a remove operation.
type RemoveResult struct {
RemovedCount int
RemovedIDs []string
KeptCount int
}
// RemoveDeletions removes specific IDs from the deletions manifest.
// This is used when issues are hydrated from git history to prevent
// perpetual skip warnings during sync.
// If the file doesn't exist or is empty, returns zero counts with no error.
func RemoveDeletions(path string, idsToRemove []string) (*RemoveResult, error) {
result := &RemoveResult{
RemovedIDs: []string{},
}
if len(idsToRemove) == 0 {
return result, nil
}
loadResult, err := LoadDeletions(path)
if err != nil {
return nil, fmt.Errorf("failed to load deletions: %w", err)
}
if len(loadResult.Records) == 0 {
return result, nil
}
// Build a set of IDs to remove for O(1) lookup
removeSet := make(map[string]bool)
for _, id := range idsToRemove {
removeSet[id] = true
}
// Filter out the IDs to remove
var kept []DeletionRecord
for id, record := range loadResult.Records {
if removeSet[id] {
result.RemovedCount++
result.RemovedIDs = append(result.RemovedIDs, id)
} else {
kept = append(kept, record)
}
}
result.KeptCount = len(kept)
// Only rewrite if we actually removed something
if result.RemovedCount > 0 {
// Sort for deterministic output
sort.Slice(kept, func(i, j int) bool {
return kept[i].ID < kept[j].ID
})
if err := WriteDeletions(path, kept); err != nil {
return nil, fmt.Errorf("failed to write updated deletions: %w", err)
}
}
return result, nil
}

View File

@@ -1,861 +0,0 @@
package deletions
import (
"os"
"path/filepath"
"testing"
"time"
)
func TestLoadDeletions_Empty(t *testing.T) {
// Non-existent file should return empty result
result, err := LoadDeletions("/nonexistent/path/deletions.jsonl")
if err != nil {
t.Fatalf("expected no error for non-existent file, got: %v", err)
}
if result.Skipped != 0 {
t.Errorf("expected 0 skipped, got %d", result.Skipped)
}
if len(result.Records) != 0 {
t.Errorf("expected empty map, got %d records", len(result.Records))
}
if len(result.Warnings) != 0 {
t.Errorf("expected no warnings, got %d", len(result.Warnings))
}
}
func TestRoundTrip(t *testing.T) {
tmpDir := t.TempDir()
path := filepath.Join(tmpDir, "deletions.jsonl")
// Create test records
now := time.Now().Truncate(time.Millisecond) // Truncate for JSON round-trip
record1 := DeletionRecord{
ID: "bd-123",
Timestamp: now,
Actor: "testuser",
Reason: "duplicate",
}
record2 := DeletionRecord{
ID: "bd-456",
Timestamp: now.Add(time.Hour),
Actor: "testuser",
}
// Append records
if err := AppendDeletion(path, record1); err != nil {
t.Fatalf("AppendDeletion failed: %v", err)
}
if err := AppendDeletion(path, record2); err != nil {
t.Fatalf("AppendDeletion failed: %v", err)
}
// Load and verify
result, err := LoadDeletions(path)
if err != nil {
t.Fatalf("LoadDeletions failed: %v", err)
}
if result.Skipped != 0 {
t.Errorf("expected 0 skipped, got %d", result.Skipped)
}
if len(result.Records) != 2 {
t.Fatalf("expected 2 records, got %d", len(result.Records))
}
// Verify record1
r1, ok := result.Records["bd-123"]
if !ok {
t.Fatal("record bd-123 not found")
}
if r1.Actor != "testuser" {
t.Errorf("expected actor 'testuser', got '%s'", r1.Actor)
}
if r1.Reason != "duplicate" {
t.Errorf("expected reason 'duplicate', got '%s'", r1.Reason)
}
// Verify record2
r2, ok := result.Records["bd-456"]
if !ok {
t.Fatal("record bd-456 not found")
}
if r2.Reason != "" {
t.Errorf("expected empty reason, got '%s'", r2.Reason)
}
}
func TestLoadDeletions_CorruptLines(t *testing.T) {
tmpDir := t.TempDir()
path := filepath.Join(tmpDir, "deletions.jsonl")
// Write mixed valid and corrupt content
content := `{"id":"bd-001","ts":"2024-01-01T00:00:00Z","by":"user1"}
this is not valid json
{"id":"bd-002","ts":"2024-01-02T00:00:00Z","by":"user2"}
{"broken json
{"id":"bd-003","ts":"2024-01-03T00:00:00Z","by":"user3","reason":"test"}
`
if err := os.WriteFile(path, []byte(content), 0644); err != nil {
t.Fatalf("failed to write test file: %v", err)
}
result, err := LoadDeletions(path)
if err != nil {
t.Fatalf("LoadDeletions should not fail on corrupt lines: %v", err)
}
if result.Skipped != 2 {
t.Errorf("expected 2 skipped lines, got %d", result.Skipped)
}
if len(result.Records) != 3 {
t.Errorf("expected 3 valid records, got %d", len(result.Records))
}
if len(result.Warnings) != 2 {
t.Errorf("expected 2 warnings, got %d", len(result.Warnings))
}
// Verify valid records were loaded
for _, id := range []string{"bd-001", "bd-002", "bd-003"} {
if _, ok := result.Records[id]; !ok {
t.Errorf("expected record %s to be loaded", id)
}
}
}
func TestLoadDeletions_MissingID(t *testing.T) {
tmpDir := t.TempDir()
path := filepath.Join(tmpDir, "deletions.jsonl")
// Write record without ID
content := `{"id":"bd-001","ts":"2024-01-01T00:00:00Z","by":"user1"}
{"ts":"2024-01-02T00:00:00Z","by":"user2"}
{"id":"","ts":"2024-01-03T00:00:00Z","by":"user3"}
`
if err := os.WriteFile(path, []byte(content), 0644); err != nil {
t.Fatalf("failed to write test file: %v", err)
}
result, err := LoadDeletions(path)
if err != nil {
t.Fatalf("LoadDeletions failed: %v", err)
}
// Two lines should be skipped: one missing "id" field, one with empty "id"
if result.Skipped != 2 {
t.Errorf("expected 2 skipped lines (missing/empty ID), got %d", result.Skipped)
}
if len(result.Records) != 1 {
t.Errorf("expected 1 valid record, got %d", len(result.Records))
}
}
func TestLoadDeletions_LastWriteWins(t *testing.T) {
tmpDir := t.TempDir()
path := filepath.Join(tmpDir, "deletions.jsonl")
// Write same ID twice with different data
content := `{"id":"bd-001","ts":"2024-01-01T00:00:00Z","by":"user1","reason":"first"}
{"id":"bd-001","ts":"2024-01-02T00:00:00Z","by":"user2","reason":"second"}
`
if err := os.WriteFile(path, []byte(content), 0644); err != nil {
t.Fatalf("failed to write test file: %v", err)
}
result, err := LoadDeletions(path)
if err != nil {
t.Fatalf("LoadDeletions failed: %v", err)
}
if result.Skipped != 0 {
t.Errorf("expected 0 skipped, got %d", result.Skipped)
}
if len(result.Records) != 1 {
t.Errorf("expected 1 record (deduplicated), got %d", len(result.Records))
}
r := result.Records["bd-001"]
if r.Actor != "user2" {
t.Errorf("expected last write to win (user2), got '%s'", r.Actor)
}
if r.Reason != "second" {
t.Errorf("expected last reason 'second', got '%s'", r.Reason)
}
}
func TestWriteDeletions_Atomic(t *testing.T) {
tmpDir := t.TempDir()
path := filepath.Join(tmpDir, "deletions.jsonl")
now := time.Now().Truncate(time.Millisecond)
records := []DeletionRecord{
{ID: "bd-001", Timestamp: now, Actor: "user1"},
{ID: "bd-002", Timestamp: now, Actor: "user2", Reason: "cleanup"},
}
if err := WriteDeletions(path, records); err != nil {
t.Fatalf("WriteDeletions failed: %v", err)
}
// Verify by loading
result, err := LoadDeletions(path)
if err != nil {
t.Fatalf("LoadDeletions failed: %v", err)
}
if result.Skipped != 0 {
t.Errorf("expected 0 skipped, got %d", result.Skipped)
}
if len(result.Records) != 2 {
t.Errorf("expected 2 records, got %d", len(result.Records))
}
}
func TestWriteDeletions_Overwrite(t *testing.T) {
tmpDir := t.TempDir()
path := filepath.Join(tmpDir, "deletions.jsonl")
now := time.Now().Truncate(time.Millisecond)
// Write initial records
initial := []DeletionRecord{
{ID: "bd-001", Timestamp: now, Actor: "user1"},
{ID: "bd-002", Timestamp: now, Actor: "user2"},
{ID: "bd-003", Timestamp: now, Actor: "user3"},
}
if err := WriteDeletions(path, initial); err != nil {
t.Fatalf("initial WriteDeletions failed: %v", err)
}
// Overwrite with fewer records (simulates compaction pruning)
compacted := []DeletionRecord{
{ID: "bd-002", Timestamp: now, Actor: "user2"},
}
if err := WriteDeletions(path, compacted); err != nil {
t.Fatalf("compacted WriteDeletions failed: %v", err)
}
// Verify only compacted records remain
result, err := LoadDeletions(path)
if err != nil {
t.Fatalf("LoadDeletions failed: %v", err)
}
if len(result.Records) != 1 {
t.Errorf("expected 1 record after compaction, got %d", len(result.Records))
}
if _, ok := result.Records["bd-002"]; !ok {
t.Error("expected bd-002 to remain after compaction")
}
}
func TestAppendDeletion_CreatesDirectory(t *testing.T) {
tmpDir := t.TempDir()
path := filepath.Join(tmpDir, "nested", "dir", "deletions.jsonl")
record := DeletionRecord{
ID: "bd-001",
Timestamp: time.Now(),
Actor: "testuser",
}
if err := AppendDeletion(path, record); err != nil {
t.Fatalf("AppendDeletion should create parent directories: %v", err)
}
// Verify file exists
if _, err := os.Stat(path); err != nil {
t.Errorf("file should exist after append: %v", err)
}
}
func TestWriteDeletions_CreatesDirectory(t *testing.T) {
tmpDir := t.TempDir()
path := filepath.Join(tmpDir, "nested", "dir", "deletions.jsonl")
records := []DeletionRecord{
{ID: "bd-001", Timestamp: time.Now(), Actor: "testuser"},
}
if err := WriteDeletions(path, records); err != nil {
t.Fatalf("WriteDeletions should create parent directories: %v", err)
}
// Verify file exists
if _, err := os.Stat(path); err != nil {
t.Errorf("file should exist after write: %v", err)
}
}
func TestDefaultPath(t *testing.T) {
beadsDir := filepath.Join("home", "user", "project", ".beads")
path := DefaultPath(beadsDir)
expected := filepath.Join(beadsDir, "deletions.jsonl")
if path != expected {
t.Errorf("expected %s, got %s", expected, path)
}
}
func TestIsTombstoneMigrationComplete(t *testing.T) {
t.Run("no migrated file", func(t *testing.T) {
tmpDir := t.TempDir()
if IsTombstoneMigrationComplete(tmpDir) {
t.Error("expected false when no .migrated file exists")
}
})
t.Run("migrated file exists", func(t *testing.T) {
tmpDir := t.TempDir()
migratedPath := filepath.Join(tmpDir, "deletions.jsonl.migrated")
if err := os.WriteFile(migratedPath, []byte("{}"), 0644); err != nil {
t.Fatalf("failed to create migrated file: %v", err)
}
if !IsTombstoneMigrationComplete(tmpDir) {
t.Error("expected true when .migrated file exists")
}
})
t.Run("deletions.jsonl exists without migrated", func(t *testing.T) {
tmpDir := t.TempDir()
deletionsPath := filepath.Join(tmpDir, "deletions.jsonl")
if err := os.WriteFile(deletionsPath, []byte("{}"), 0644); err != nil {
t.Fatalf("failed to create deletions file: %v", err)
}
// Should return false because the .migrated marker doesn't exist
if IsTombstoneMigrationComplete(tmpDir) {
t.Error("expected false when only deletions.jsonl exists (not migrated)")
}
})
}
func TestLoadDeletions_EmptyLines(t *testing.T) {
tmpDir := t.TempDir()
path := filepath.Join(tmpDir, "deletions.jsonl")
// Write content with empty lines
content := `{"id":"bd-001","ts":"2024-01-01T00:00:00Z","by":"user1"}
{"id":"bd-002","ts":"2024-01-02T00:00:00Z","by":"user2"}
`
if err := os.WriteFile(path, []byte(content), 0644); err != nil {
t.Fatalf("failed to write test file: %v", err)
}
result, err := LoadDeletions(path)
if err != nil {
t.Fatalf("LoadDeletions failed: %v", err)
}
if result.Skipped != 0 {
t.Errorf("empty lines should not count as skipped, got %d", result.Skipped)
}
if len(result.Records) != 2 {
t.Errorf("expected 2 records, got %d", len(result.Records))
}
}
func TestAppendDeletion_EmptyID(t *testing.T) {
tmpDir := t.TempDir()
path := filepath.Join(tmpDir, "deletions.jsonl")
record := DeletionRecord{
ID: "",
Timestamp: time.Now(),
Actor: "testuser",
}
err := AppendDeletion(path, record)
if err == nil {
t.Fatal("AppendDeletion should fail with empty ID")
}
if err.Error() != "cannot append deletion record: ID is required" {
t.Errorf("unexpected error message: %v", err)
}
}
func TestPruneDeletions_Empty(t *testing.T) {
tmpDir := t.TempDir()
path := filepath.Join(tmpDir, "deletions.jsonl")
// Prune non-existent file should succeed
result, err := PruneDeletions(path, 7)
if err != nil {
t.Fatalf("PruneDeletions should not fail on non-existent file: %v", err)
}
if result.KeptCount != 0 {
t.Errorf("expected 0 kept, got %d", result.KeptCount)
}
if result.PrunedCount != 0 {
t.Errorf("expected 0 pruned, got %d", result.PrunedCount)
}
}
func TestPruneDeletions_AllRecent(t *testing.T) {
tmpDir := t.TempDir()
path := filepath.Join(tmpDir, "deletions.jsonl")
now := time.Now()
records := []DeletionRecord{
{ID: "bd-001", Timestamp: now.Add(-1 * time.Hour), Actor: "user1"},
{ID: "bd-002", Timestamp: now.Add(-2 * time.Hour), Actor: "user2"},
{ID: "bd-003", Timestamp: now.Add(-3 * time.Hour), Actor: "user3"},
}
// Write records
for _, r := range records {
if err := AppendDeletion(path, r); err != nil {
t.Fatalf("AppendDeletion failed: %v", err)
}
}
// Prune with 7 day retention - nothing should be pruned
result, err := PruneDeletions(path, 7)
if err != nil {
t.Fatalf("PruneDeletions failed: %v", err)
}
if result.KeptCount != 3 {
t.Errorf("expected 3 kept, got %d", result.KeptCount)
}
if result.PrunedCount != 0 {
t.Errorf("expected 0 pruned, got %d", result.PrunedCount)
}
// Verify file unchanged
loaded, err := LoadDeletions(path)
if err != nil {
t.Fatalf("LoadDeletions failed: %v", err)
}
if len(loaded.Records) != 3 {
t.Errorf("expected 3 records after prune, got %d", len(loaded.Records))
}
}
func TestPruneDeletions_SomeOld(t *testing.T) {
tmpDir := t.TempDir()
path := filepath.Join(tmpDir, "deletions.jsonl")
now := time.Now()
// Two recent, two old
records := []DeletionRecord{
{ID: "bd-001", Timestamp: now.Add(-1 * time.Hour), Actor: "user1"}, // Recent
{ID: "bd-002", Timestamp: now.AddDate(0, 0, -10), Actor: "user2"}, // 10 days old
{ID: "bd-003", Timestamp: now.Add(-2 * time.Hour), Actor: "user3"}, // Recent
{ID: "bd-004", Timestamp: now.AddDate(0, 0, -15), Actor: "user4"}, // 15 days old
}
// Write records
for _, r := range records {
if err := AppendDeletion(path, r); err != nil {
t.Fatalf("AppendDeletion failed: %v", err)
}
}
// Prune with 7 day retention
result, err := PruneDeletions(path, 7)
if err != nil {
t.Fatalf("PruneDeletions failed: %v", err)
}
if result.KeptCount != 2 {
t.Errorf("expected 2 kept, got %d", result.KeptCount)
}
if result.PrunedCount != 2 {
t.Errorf("expected 2 pruned, got %d", result.PrunedCount)
}
// Verify pruned IDs
prunedMap := make(map[string]bool)
for _, id := range result.PrunedIDs {
prunedMap[id] = true
}
if !prunedMap["bd-002"] || !prunedMap["bd-004"] {
t.Errorf("expected bd-002 and bd-004 to be pruned, got %v", result.PrunedIDs)
}
// Verify file was updated
loaded, err := LoadDeletions(path)
if err != nil {
t.Fatalf("LoadDeletions failed: %v", err)
}
if len(loaded.Records) != 2 {
t.Errorf("expected 2 records after prune, got %d", len(loaded.Records))
}
if _, ok := loaded.Records["bd-001"]; !ok {
t.Error("expected bd-001 to remain")
}
if _, ok := loaded.Records["bd-003"]; !ok {
t.Error("expected bd-003 to remain")
}
}
func TestPruneDeletions_AllOld(t *testing.T) {
tmpDir := t.TempDir()
path := filepath.Join(tmpDir, "deletions.jsonl")
now := time.Now()
records := []DeletionRecord{
{ID: "bd-001", Timestamp: now.AddDate(0, 0, -30), Actor: "user1"},
{ID: "bd-002", Timestamp: now.AddDate(0, 0, -60), Actor: "user2"},
}
// Write records
for _, r := range records {
if err := AppendDeletion(path, r); err != nil {
t.Fatalf("AppendDeletion failed: %v", err)
}
}
// Prune with 7 day retention - all should be pruned
result, err := PruneDeletions(path, 7)
if err != nil {
t.Fatalf("PruneDeletions failed: %v", err)
}
if result.KeptCount != 0 {
t.Errorf("expected 0 kept, got %d", result.KeptCount)
}
if result.PrunedCount != 2 {
t.Errorf("expected 2 pruned, got %d", result.PrunedCount)
}
// Verify file is empty
loaded, err := LoadDeletions(path)
if err != nil {
t.Fatalf("LoadDeletions failed: %v", err)
}
if len(loaded.Records) != 0 {
t.Errorf("expected 0 records after prune, got %d", len(loaded.Records))
}
}
func TestPruneDeletions_NearBoundary(t *testing.T) {
tmpDir := t.TempDir()
path := filepath.Join(tmpDir, "deletions.jsonl")
now := time.Now()
// Record just inside retention should be kept (6 days 23 hours)
// Record just outside retention should be pruned (7 days 1 hour)
records := []DeletionRecord{
{ID: "bd-001", Timestamp: now.AddDate(0, 0, -6).Add(-23 * time.Hour), Actor: "user1"}, // ~6.96 days (kept)
{ID: "bd-002", Timestamp: now.AddDate(0, 0, -7).Add(-1 * time.Hour), Actor: "user2"}, // ~7.04 days (pruned)
}
for _, r := range records {
if err := AppendDeletion(path, r); err != nil {
t.Fatalf("AppendDeletion failed: %v", err)
}
}
result, err := PruneDeletions(path, 7)
if err != nil {
t.Fatalf("PruneDeletions failed: %v", err)
}
if result.KeptCount != 1 {
t.Errorf("expected 1 kept (inside boundary), got %d", result.KeptCount)
}
if result.PrunedCount != 1 {
t.Errorf("expected 1 pruned (outside boundary), got %d", result.PrunedCount)
}
}
func TestPruneDeletions_ZeroRetention(t *testing.T) {
tmpDir := t.TempDir()
path := filepath.Join(tmpDir, "deletions.jsonl")
now := time.Now()
records := []DeletionRecord{
{ID: "bd-001", Timestamp: now.Add(1 * time.Hour), Actor: "user1"}, // 1 hour in future (kept)
{ID: "bd-002", Timestamp: now.Add(-1 * time.Hour), Actor: "user2"}, // 1 hour ago (pruned with 0 retention)
}
for _, r := range records {
if err := AppendDeletion(path, r); err != nil {
t.Fatalf("AppendDeletion failed: %v", err)
}
}
// With 0 retention, cutoff is now - past records should be pruned
result, err := PruneDeletions(path, 0)
if err != nil {
t.Fatalf("PruneDeletions failed: %v", err)
}
// Future record should be kept, past record should be pruned
if result.KeptCount != 1 {
t.Errorf("expected 1 kept with 0 retention, got %d", result.KeptCount)
}
if result.PrunedCount != 1 {
t.Errorf("expected 1 pruned with 0 retention, got %d", result.PrunedCount)
}
}
func TestCount_Empty(t *testing.T) {
// Non-existent file should return 0
count, err := Count("/nonexistent/path/deletions.jsonl")
if err != nil {
t.Fatalf("expected no error for non-existent file, got: %v", err)
}
if count != 0 {
t.Errorf("expected 0 count for non-existent file, got %d", count)
}
}
func TestCount_WithRecords(t *testing.T) {
tmpDir := t.TempDir()
path := filepath.Join(tmpDir, "deletions.jsonl")
now := time.Now()
records := []DeletionRecord{
{ID: "bd-001", Timestamp: now, Actor: "user1"},
{ID: "bd-002", Timestamp: now, Actor: "user2"},
{ID: "bd-003", Timestamp: now, Actor: "user3"},
}
for _, r := range records {
if err := AppendDeletion(path, r); err != nil {
t.Fatalf("AppendDeletion failed: %v", err)
}
}
count, err := Count(path)
if err != nil {
t.Fatalf("Count failed: %v", err)
}
if count != 3 {
t.Errorf("expected 3, got %d", count)
}
}
func TestCount_WithEmptyLines(t *testing.T) {
tmpDir := t.TempDir()
path := filepath.Join(tmpDir, "deletions.jsonl")
// Write content with empty lines
content := `{"id":"bd-001","ts":"2024-01-01T00:00:00Z","by":"user1"}
{"id":"bd-002","ts":"2024-01-02T00:00:00Z","by":"user2"}
`
if err := os.WriteFile(path, []byte(content), 0644); err != nil {
t.Fatalf("failed to write test file: %v", err)
}
count, err := Count(path)
if err != nil {
t.Fatalf("Count failed: %v", err)
}
// Should count only non-empty lines
if count != 2 {
t.Errorf("expected 2 (excluding empty lines), got %d", count)
}
}
// Tests for RemoveDeletions (bd-8v5o)
func TestRemoveDeletions_Empty(t *testing.T) {
tmpDir := t.TempDir()
path := filepath.Join(tmpDir, "deletions.jsonl")
// Remove from non-existent file should succeed
result, err := RemoveDeletions(path, []string{"bd-001"})
if err != nil {
t.Fatalf("RemoveDeletions should not fail on non-existent file: %v", err)
}
if result.RemovedCount != 0 {
t.Errorf("expected 0 removed, got %d", result.RemovedCount)
}
if result.KeptCount != 0 {
t.Errorf("expected 0 kept, got %d", result.KeptCount)
}
}
func TestRemoveDeletions_EmptyIDList(t *testing.T) {
tmpDir := t.TempDir()
path := filepath.Join(tmpDir, "deletions.jsonl")
now := time.Now()
records := []DeletionRecord{
{ID: "bd-001", Timestamp: now, Actor: "user1"},
{ID: "bd-002", Timestamp: now, Actor: "user2"},
}
for _, r := range records {
if err := AppendDeletion(path, r); err != nil {
t.Fatalf("AppendDeletion failed: %v", err)
}
}
// Remove with empty ID list should be a no-op
result, err := RemoveDeletions(path, []string{})
if err != nil {
t.Fatalf("RemoveDeletions failed: %v", err)
}
if result.RemovedCount != 0 {
t.Errorf("expected 0 removed with empty list, got %d", result.RemovedCount)
}
// Verify file unchanged
loaded, err := LoadDeletions(path)
if err != nil {
t.Fatalf("LoadDeletions failed: %v", err)
}
if len(loaded.Records) != 2 {
t.Errorf("expected 2 records unchanged, got %d", len(loaded.Records))
}
}
func TestRemoveDeletions_SomeMatches(t *testing.T) {
tmpDir := t.TempDir()
path := filepath.Join(tmpDir, "deletions.jsonl")
now := time.Now()
records := []DeletionRecord{
{ID: "bd-001", Timestamp: now, Actor: "user1"},
{ID: "bd-002", Timestamp: now, Actor: "user2"},
{ID: "bd-003", Timestamp: now, Actor: "user3"},
}
for _, r := range records {
if err := AppendDeletion(path, r); err != nil {
t.Fatalf("AppendDeletion failed: %v", err)
}
}
// Remove bd-001 and bd-003
result, err := RemoveDeletions(path, []string{"bd-001", "bd-003"})
if err != nil {
t.Fatalf("RemoveDeletions failed: %v", err)
}
if result.RemovedCount != 2 {
t.Errorf("expected 2 removed, got %d", result.RemovedCount)
}
if result.KeptCount != 1 {
t.Errorf("expected 1 kept, got %d", result.KeptCount)
}
// Verify removed IDs
removedMap := make(map[string]bool)
for _, id := range result.RemovedIDs {
removedMap[id] = true
}
if !removedMap["bd-001"] || !removedMap["bd-003"] {
t.Errorf("expected bd-001 and bd-003 to be removed, got %v", result.RemovedIDs)
}
// Verify file was updated
loaded, err := LoadDeletions(path)
if err != nil {
t.Fatalf("LoadDeletions failed: %v", err)
}
if len(loaded.Records) != 1 {
t.Errorf("expected 1 record after removal, got %d", len(loaded.Records))
}
if _, ok := loaded.Records["bd-002"]; !ok {
t.Error("expected bd-002 to remain")
}
}
func TestRemoveDeletions_AllMatches(t *testing.T) {
tmpDir := t.TempDir()
path := filepath.Join(tmpDir, "deletions.jsonl")
now := time.Now()
records := []DeletionRecord{
{ID: "bd-001", Timestamp: now, Actor: "user1"},
{ID: "bd-002", Timestamp: now, Actor: "user2"},
}
for _, r := range records {
if err := AppendDeletion(path, r); err != nil {
t.Fatalf("AppendDeletion failed: %v", err)
}
}
// Remove all records
result, err := RemoveDeletions(path, []string{"bd-001", "bd-002"})
if err != nil {
t.Fatalf("RemoveDeletions failed: %v", err)
}
if result.RemovedCount != 2 {
t.Errorf("expected 2 removed, got %d", result.RemovedCount)
}
if result.KeptCount != 0 {
t.Errorf("expected 0 kept, got %d", result.KeptCount)
}
// Verify file is empty
loaded, err := LoadDeletions(path)
if err != nil {
t.Fatalf("LoadDeletions failed: %v", err)
}
if len(loaded.Records) != 0 {
t.Errorf("expected 0 records after removal, got %d", len(loaded.Records))
}
}
func TestRemoveDeletions_NoMatches(t *testing.T) {
tmpDir := t.TempDir()
path := filepath.Join(tmpDir, "deletions.jsonl")
now := time.Now()
records := []DeletionRecord{
{ID: "bd-001", Timestamp: now, Actor: "user1"},
{ID: "bd-002", Timestamp: now, Actor: "user2"},
}
for _, r := range records {
if err := AppendDeletion(path, r); err != nil {
t.Fatalf("AppendDeletion failed: %v", err)
}
}
// Try to remove IDs that don't exist
result, err := RemoveDeletions(path, []string{"bd-999", "bd-888"})
if err != nil {
t.Fatalf("RemoveDeletions failed: %v", err)
}
if result.RemovedCount != 0 {
t.Errorf("expected 0 removed (no matches), got %d", result.RemovedCount)
}
if result.KeptCount != 2 {
t.Errorf("expected 2 kept, got %d", result.KeptCount)
}
// Verify file unchanged
loaded, err := LoadDeletions(path)
if err != nil {
t.Fatalf("LoadDeletions failed: %v", err)
}
if len(loaded.Records) != 2 {
t.Errorf("expected 2 records unchanged, got %d", len(loaded.Records))
}
}
func TestRemoveDeletions_MixedExistingAndNonExisting(t *testing.T) {
tmpDir := t.TempDir()
path := filepath.Join(tmpDir, "deletions.jsonl")
now := time.Now()
records := []DeletionRecord{
{ID: "bd-001", Timestamp: now, Actor: "user1"},
{ID: "bd-002", Timestamp: now, Actor: "user2"},
{ID: "bd-003", Timestamp: now, Actor: "user3"},
}
for _, r := range records {
if err := AppendDeletion(path, r); err != nil {
t.Fatalf("AppendDeletion failed: %v", err)
}
}
// Try to remove mix of existing and non-existing IDs
result, err := RemoveDeletions(path, []string{"bd-001", "bd-999", "bd-003"})
if err != nil {
t.Fatalf("RemoveDeletions failed: %v", err)
}
// Only bd-001 and bd-003 should be removed
if result.RemovedCount != 2 {
t.Errorf("expected 2 removed, got %d", result.RemovedCount)
}
if result.KeptCount != 1 {
t.Errorf("expected 1 kept, got %d", result.KeptCount)
}
// Verify only bd-002 remains
loaded, err := LoadDeletions(path)
if err != nil {
t.Fatalf("LoadDeletions failed: %v", err)
}
if len(loaded.Records) != 1 {
t.Errorf("expected 1 record, got %d", len(loaded.Records))
}
if _, ok := loaded.Records["bd-002"]; !ok {
t.Error("expected bd-002 to remain")
}
}

View File

@@ -1,18 +1,12 @@
package importer
import (
"bytes"
"context"
"fmt"
"os"
"os/exec"
"path/filepath"
"regexp"
"sort"
"strings"
"time"
"github.com/steveyegge/beads/internal/deletions"
"github.com/steveyegge/beads/internal/storage"
"github.com/steveyegge/beads/internal/storage/sqlite"
"github.com/steveyegge/beads/internal/types"
@@ -41,10 +35,8 @@ type Options struct {
RenameOnImport bool // Rename imported issues to match database prefix
SkipPrefixValidation bool // Skip prefix validation (for auto-import)
OrphanHandling OrphanHandling // How to handle missing parent issues (default: allow)
ClearDuplicateExternalRefs bool // Clear duplicate external_ref values instead of erroring
NoGitHistory bool // Skip git history backfill for deletions (prevents spurious deletion during JSONL migrations)
IgnoreDeletions bool // Import issues even if they're in the deletions manifest
ProtectLocalExportIDs map[string]bool // IDs from left snapshot to protect from git-history-backfill (bd-sync-deletion fix)
ClearDuplicateExternalRefs bool // Clear duplicate external_ref values instead of erroring
ProtectLocalExportIDs map[string]bool // IDs from left snapshot to protect from deletion (bd-sync-deletion fix)
}
// Result contains statistics about the import operation
@@ -60,14 +52,6 @@ type Result struct {
ExpectedPrefix string // Database configured prefix
MismatchPrefixes map[string]int // Map of mismatched prefixes to count
SkippedDependencies []string // Dependencies skipped due to FK constraint violations
Purged int // Issues purged from DB (found in deletions manifest)
PurgedIDs []string // IDs that were purged
SkippedDeleted int // Issues skipped because they're in deletions manifest
SkippedDeletedIDs []string // IDs that were skipped due to deletions manifest
ConvertedToTombstone int // Legacy deletions.jsonl entries converted to tombstones (bd-wucl)
ConvertedTombstoneIDs []string // IDs that were converted to tombstones
PreservedLocalExport int // Issues preserved because they were in local export (bd-sync-deletion fix)
PreservedLocalIDs []string // IDs that were preserved from local export
}
// ImportIssues handles the core import logic used by both manual and auto-import.
@@ -122,63 +106,6 @@ func ImportIssues(ctx context.Context, dbPath string, store storage.Storage, iss
opts.OrphanHandling = sqliteStore.GetOrphanHandling(ctx)
}
// Handle deletions manifest and tombstones (bd-dve)
//
// Phase 1 (Dual-Write):
// - Tombstones in JSONL are imported as-is (they're issues with status=tombstone)
// - Legacy deletions.jsonl entries are converted to tombstones
// - Non-tombstone issues in deletions manifest are skipped (backwards compat)
//
// Note: Tombstones from JSONL take precedence over legacy deletions.jsonl
if !opts.IgnoreDeletions && dbPath != "" {
beadsDir := filepath.Dir(dbPath)
deletionsPath := deletions.DefaultPath(beadsDir)
loadResult, err := deletions.LoadDeletions(deletionsPath)
if err == nil && len(loadResult.Records) > 0 {
// Build a map of existing tombstones from JSONL for quick lookup
tombstoneIDs := make(map[string]bool)
for _, issue := range issues {
if issue.IsTombstone() {
tombstoneIDs[issue.ID] = true
}
}
var filteredIssues []*types.Issue
for _, issue := range issues {
// Tombstones are always imported (they represent deletions in the new format)
if issue.IsTombstone() {
filteredIssues = append(filteredIssues, issue)
continue
}
if _, found := loadResult.Records[issue.ID]; found {
// Non-tombstone issue is in deletions manifest - skip it
// (this maintains backward compatibility during transition)
// Note: Individual skip messages removed (bd-wsqt) - caller shows summary
result.SkippedDeleted++
result.SkippedDeletedIDs = append(result.SkippedDeletedIDs, issue.ID)
} else {
filteredIssues = append(filteredIssues, issue)
}
}
// Convert legacy deletions.jsonl entries to tombstones if not already in JSONL
for id, del := range loadResult.Records {
if tombstoneIDs[id] {
// Already have a tombstone for this ID in JSONL, skip
continue
}
// Convert this deletion record to a tombstone (bd-wucl)
tombstone := convertDeletionToTombstone(id, del)
filteredIssues = append(filteredIssues, tombstone)
result.ConvertedToTombstone++
result.ConvertedTombstoneIDs = append(result.ConvertedTombstoneIDs, id)
}
issues = filteredIssues
}
}
// Check and handle prefix mismatches
issues, err = handlePrefixMismatch(ctx, sqliteStore, issues, opts, result)
if err != nil {
@@ -219,15 +146,6 @@ func ImportIssues(ctx context.Context, dbPath string, store storage.Storage, iss
return nil, err
}
// Purge deleted issues from DB based on deletions manifest
// Issues that are in the manifest but not in JSONL should be deleted from DB
if !opts.DryRun {
if err := purgeDeletedIssues(ctx, sqliteStore, dbPath, issues, opts, result); err != nil {
// Non-fatal - just log warning
fmt.Fprintf(os.Stderr, "Warning: failed to purge deleted issues: %v\n", err)
}
}
// Checkpoint WAL to ensure data persistence and reduce WAL file size
if err := sqliteStore.CheckpointWAL(ctx); err != nil {
// Non-fatal - just log warning
@@ -922,367 +840,6 @@ func importComments(ctx context.Context, sqliteStore *sqlite.SQLiteStorage, issu
return nil
}
// purgeDeletedIssues converts DB issues to tombstones if they are in the deletions
// manifest but not in the incoming JSONL. This enables deletion propagation across clones.
// Also uses git history fallback for deletions that were pruned from the manifest,
// unless opts.NoGitHistory is set (useful during JSONL filename migrations).
//
// Note (bd-dve): With inline tombstones, most deletions are now handled during import
// via convertDeletionToTombstone. This function primarily handles:
// 1. DB-only issues that need to be tombstoned (not in JSONL at all)
// 2. Git history fallback for pruned deletions
func purgeDeletedIssues(ctx context.Context, sqliteStore *sqlite.SQLiteStorage, dbPath string, jsonlIssues []*types.Issue, opts Options, result *Result) error {
// Get deletions manifest path (same directory as database)
beadsDir := filepath.Dir(dbPath)
deletionsPath := deletions.DefaultPath(beadsDir)
// Load deletions manifest (gracefully handles missing/empty file)
loadResult, err := deletions.LoadDeletions(deletionsPath)
if err != nil {
return fmt.Errorf("failed to load deletions manifest: %w", err)
}
// Log any warnings from loading
for _, warning := range loadResult.Warnings {
fmt.Fprintf(os.Stderr, "Warning: %s\n", warning)
}
// Build set of IDs in the incoming JSONL for O(1) lookup
jsonlIDs := make(map[string]bool, len(jsonlIssues))
for _, issue := range jsonlIssues {
jsonlIDs[issue.ID] = true
}
// Get all DB issues (exclude existing tombstones - they're already deleted)
dbIssues, err := sqliteStore.SearchIssues(ctx, "", types.IssueFilter{})
if err != nil {
return fmt.Errorf("failed to get DB issues: %w", err)
}
// Collect IDs that need git history check (not in JSONL, not in manifest)
var needGitCheck []string
// Find DB issues that:
// 1. Are NOT in the JSONL (not synced from remote)
// 2. ARE in the deletions manifest (were deleted elsewhere)
// 3. Are NOT already tombstones
for _, dbIssue := range dbIssues {
if jsonlIDs[dbIssue.ID] {
// Issue is in JSONL, keep it (tombstone or not)
continue
}
if del, found := loadResult.Records[dbIssue.ID]; found {
// SAFETY GUARD (bd-k92d): Prevent deletion of open/in_progress issues without explicit warning
// This protects against data loss from:
// 1. Repo ID mismatches causing incorrect deletions
// 2. Race conditions during daemon sync
// 3. Accidental deletion of active work
if dbIssue.Status == types.StatusOpen || dbIssue.Status == types.StatusInProgress {
fmt.Fprintf(os.Stderr, "⚠️ WARNING: Refusing to delete %s with status=%s\n", dbIssue.ID, dbIssue.Status)
fmt.Fprintf(os.Stderr, " Title: %s\n", dbIssue.Title)
fmt.Fprintf(os.Stderr, " This issue is in deletions.jsonl but still open/in_progress in your database.\n")
fmt.Fprintf(os.Stderr, " This may indicate:\n")
fmt.Fprintf(os.Stderr, " - A repo ID mismatch (check with 'bd migrate --update-repo-id')\n")
fmt.Fprintf(os.Stderr, " - A sync race condition with unpushed local changes\n")
fmt.Fprintf(os.Stderr, " - Accidental deletion on another clone\n")
fmt.Fprintf(os.Stderr, " To force deletion: bd delete %s\n", dbIssue.ID)
fmt.Fprintf(os.Stderr, " To keep this issue: remove it from .beads/deletions.jsonl\n\n")
continue
}
// Issue is in deletions manifest - convert to tombstone (bd-dve)
if err := sqliteStore.CreateTombstone(ctx, dbIssue.ID, del.Actor, del.Reason); err != nil {
fmt.Fprintf(os.Stderr, "Warning: failed to create tombstone for %s: %v\n", dbIssue.ID, err)
continue
}
// Log the tombstone creation with metadata
fmt.Fprintf(os.Stderr, "Tombstoned %s (deleted %s by %s", dbIssue.ID, del.Timestamp.Format("2006-01-02 15:04:05"), del.Actor)
if del.Reason != "" {
fmt.Fprintf(os.Stderr, ", reason: %s", del.Reason)
}
fmt.Fprintf(os.Stderr, ")\n")
result.Purged++
result.PurgedIDs = append(result.PurgedIDs, dbIssue.ID)
} else {
// Not in JSONL and not in deletions manifest
// This could be:
// 1. Local work (new issue not yet exported)
// 2. Deletion was pruned from manifest (check git history)
// 3. Issue was in local export but lost during pull/merge (bd-sync-deletion fix)
// Check if this issue was in our local export (left snapshot)
// If so, it's local work that got lost during merge - preserve it!
if opts.ProtectLocalExportIDs != nil && opts.ProtectLocalExportIDs[dbIssue.ID] {
fmt.Fprintf(os.Stderr, "Preserving %s (was in local export, lost during merge)\n", dbIssue.ID)
result.PreservedLocalExport++
result.PreservedLocalIDs = append(result.PreservedLocalIDs, dbIssue.ID)
continue
}
needGitCheck = append(needGitCheck, dbIssue.ID)
}
}
// Git history fallback for potential pruned deletions
// Skip if --no-git-history flag is set (prevents spurious deletions during JSONL migrations)
if len(needGitCheck) > 0 && !opts.NoGitHistory {
deletedViaGit := checkGitHistoryForDeletions(beadsDir, needGitCheck)
// Safety guard (bd-21a): Prevent mass deletion when JSONL appears reset
// If git-history-backfill would delete a large percentage of issues,
// this likely indicates the JSONL was reset (git reset, branch switch, etc.)
// rather than intentional deletions
totalDBIssues := len(dbIssues)
deleteCount := len(deletedViaGit)
if deleteCount > 0 && totalDBIssues > 0 {
deletePercent := float64(deleteCount) / float64(totalDBIssues) * 100
// Abort if would delete >50% of issues - this is almost certainly a reset
if deletePercent > 50 {
fmt.Fprintf(os.Stderr, "Warning: git-history-backfill would tombstone %d of %d issues (%.1f%%) - aborting\n",
deleteCount, totalDBIssues, deletePercent)
fmt.Fprintf(os.Stderr, "This usually means the JSONL was reset (git reset, branch switch, etc.)\n")
fmt.Fprintf(os.Stderr, "If these are legitimate deletions, add them to deletions.jsonl manually\n")
// Don't delete anything - abort the backfill
deleteCount = 0
deletedViaGit = nil
} else if deleteCount > 10 {
// Warn (but proceed) if deleting >10 issues
fmt.Fprintf(os.Stderr, "Warning: git-history-backfill will tombstone %d issues (%.1f%% of %d total)\n",
deleteCount, deletePercent, totalDBIssues)
}
}
for _, id := range deletedViaGit {
// SAFETY GUARD (bd-k92d): Check if this is an open/in_progress issue before deleting
// Get the issue from database to check its status
issue, err := sqliteStore.GetIssue(ctx, id)
if err == nil && issue != nil {
if issue.Status == types.StatusOpen || issue.Status == types.StatusInProgress {
fmt.Fprintf(os.Stderr, "⚠️ WARNING: git-history-backfill refusing to delete %s with status=%s\n", id, issue.Status)
fmt.Fprintf(os.Stderr, " Title: %s\n", issue.Title)
fmt.Fprintf(os.Stderr, " This issue was found in git history but is still open/in_progress.\n")
fmt.Fprintf(os.Stderr, " This may indicate:\n")
fmt.Fprintf(os.Stderr, " - A repo ID mismatch between clones\n")
fmt.Fprintf(os.Stderr, " - The issue was re-created after being deleted\n")
fmt.Fprintf(os.Stderr, " - Local uncommitted work that conflicts with remote history\n")
fmt.Fprintf(os.Stderr, " To force deletion: bd delete %s\n", id)
fmt.Fprintf(os.Stderr, " To prevent git-history checks: use --no-git-history flag\n\n")
continue
}
}
// Backfill the deletions manifest (self-healing)
// bd-ffr9: Skip writing to deletions.jsonl if tombstone migration is complete
if !deletions.IsTombstoneMigrationComplete(beadsDir) {
backfillRecord := deletions.DeletionRecord{
ID: id,
Timestamp: time.Now().UTC(),
Actor: "git-history-backfill",
Reason: "recovered from git history (pruned from manifest)",
}
if err := deletions.AppendDeletion(deletionsPath, backfillRecord); err != nil {
fmt.Fprintf(os.Stderr, "Warning: failed to backfill deletion record for %s: %v\n", id, err)
}
}
// Convert to tombstone (bd-dve)
if err := sqliteStore.CreateTombstone(ctx, id, "git-history-backfill", "recovered from git history (pruned from manifest)"); err != nil {
fmt.Fprintf(os.Stderr, "Warning: failed to create tombstone for %s (git-recovered): %v\n", id, err)
continue
}
fmt.Fprintf(os.Stderr, "Tombstoned %s (recovered from git history, pruned from manifest)\n", id)
result.Purged++
result.PurgedIDs = append(result.PurgedIDs, id)
}
} else if len(needGitCheck) > 0 && opts.NoGitHistory {
// Log that we skipped git history check due to flag
fmt.Fprintf(os.Stderr, "Skipped git history check for %d issue(s) (--no-git-history flag set)\n", len(needGitCheck))
}
return nil
}
// checkGitHistoryForDeletions checks if IDs were ever in the JSONL history.
// Returns the IDs that were found in git history (meaning they were deleted,
// and the deletion record was pruned from the manifest).
//
// Uses batched git log search for efficiency when checking multiple IDs.
func checkGitHistoryForDeletions(beadsDir string, ids []string) []string {
if len(ids) == 0 {
return nil
}
// Find the actual git repo root using git rev-parse (bd-bhd)
// This handles monorepos and nested projects where .beads isn't at repo root
ctx, cancel := context.WithTimeout(context.Background(), 5*time.Second)
defer cancel()
cmd := exec.CommandContext(ctx, "git", "rev-parse", "--show-toplevel")
cmd.Dir = beadsDir
output, err := cmd.Output()
if err != nil {
// Not in a git repo or git not available - can't do history check
return nil
}
repoRoot := strings.TrimSpace(string(output))
// Compute relative path from repo root to issues.jsonl
// beadsDir is absolute, compute its path relative to repoRoot
absBeadsDir, err := filepath.Abs(beadsDir)
if err != nil {
return nil
}
relBeadsDir, err := filepath.Rel(repoRoot, absBeadsDir)
if err != nil {
return nil
}
// Build JSONL path relative to repo root (bd-6xd: issues.jsonl is canonical)
jsonlPath := filepath.Join(relBeadsDir, "issues.jsonl")
var deleted []string
// For efficiency, batch IDs into a single git command when possible
// We use git log with -S to search for string additions/removals
if len(ids) <= 10 {
// Small batch: check each ID individually for accuracy
for _, id := range ids {
if wasEverInJSONL(repoRoot, jsonlPath, id) {
deleted = append(deleted, id)
}
}
} else {
// Large batch: use grep pattern for efficiency
// This may have some false positives, but is much faster
deleted = batchCheckGitHistory(repoRoot, jsonlPath, ids)
}
return deleted
}
// gitHistoryTimeout is the maximum time to wait for git history searches.
// Prevents hangs on large repositories (bd-f0n).
const gitHistoryTimeout = 30 * time.Second
// wasEverInJSONL checks if a single ID was ever present in the JSONL via git history.
// Returns true if the ID was found in any commit (added or removed).
// The caller is responsible for confirming the ID is NOT currently in JSONL
// to determine that it was deleted (vs still present).
func wasEverInJSONL(repoRoot, jsonlPath, id string) bool {
// git log --all -S "\"id\":\"bd-xxx\"" --oneline -- .beads/issues.jsonl
// This searches for commits that added or removed the ID string
// Note: -S uses literal string matching, not regex, so no escaping needed
searchPattern := fmt.Sprintf(`"id":"%s"`, id)
// Use context with timeout to prevent hangs on large repos (bd-f0n)
ctx, cancel := context.WithTimeout(context.Background(), gitHistoryTimeout)
defer cancel()
// #nosec G204 - searchPattern is constructed from validated issue IDs
cmd := exec.CommandContext(ctx, "git", "log", "--all", "-S", searchPattern, "--oneline", "--", jsonlPath)
cmd.Dir = repoRoot
var stdout bytes.Buffer
cmd.Stdout = &stdout
cmd.Stderr = nil // Ignore stderr
if err := cmd.Run(); err != nil {
// Git command failed - could be shallow clone, not a git repo, timeout, etc.
// Conservative: assume issue is local work, don't delete
return false
}
// If output is non-empty, the ID was found in git history (was once in JSONL).
// Since caller already verified ID is NOT currently in JSONL, this means deleted.
return len(bytes.TrimSpace(stdout.Bytes())) > 0
}
// batchCheckGitHistory checks multiple IDs at once using git log with pattern matching.
// Returns the IDs that were found in git history.
func batchCheckGitHistory(repoRoot, jsonlPath string, ids []string) []string {
// Build a regex pattern to match any of the IDs
// Pattern: "id":"bd-xxx"|"id":"bd-yyy"|...
// Escape regex special characters in IDs to avoid malformed patterns (bd-bgs)
patterns := make([]string, 0, len(ids))
for _, id := range ids {
escapedID := regexp.QuoteMeta(id)
patterns = append(patterns, fmt.Sprintf(`"id":"%s"`, escapedID))
}
searchPattern := strings.Join(patterns, "|")
// Use context with timeout to prevent hangs on large repos (bd-f0n)
ctx, cancel := context.WithTimeout(context.Background(), gitHistoryTimeout)
defer cancel()
// Use git log -G (regex) for batch search
// #nosec G204 - searchPattern is constructed from validated issue IDs
cmd := exec.CommandContext(ctx, "git", "log", "--all", "-G", searchPattern, "-p", "--", jsonlPath)
cmd.Dir = repoRoot
var stdout bytes.Buffer
cmd.Stdout = &stdout
cmd.Stderr = nil // Ignore stderr
if err := cmd.Run(); err != nil {
// Git command failed (timeout, shallow clone, etc.) - fall back to individual checks
// Individual checks also have timeout protection
var deleted []string
for _, id := range ids {
if wasEverInJSONL(repoRoot, jsonlPath, id) {
deleted = append(deleted, id)
}
}
return deleted
}
output := stdout.String()
if output == "" {
return nil
}
// Parse output to find which IDs were actually in history
var deleted []string
for _, id := range ids {
searchStr := fmt.Sprintf(`"id":"%s"`, id)
if strings.Contains(output, searchStr) {
deleted = append(deleted, id)
}
}
return deleted
}
// Helper functions
// convertDeletionToTombstone converts a legacy DeletionRecord to a tombstone Issue.
// This is used during import to migrate from deletions.jsonl to inline tombstones (bd-dve).
// Note: We use zero for priority to indicate unknown (bd-9auw).
// IssueType must be a valid type for validation, so we use TypeTask as default.
func convertDeletionToTombstone(id string, del deletions.DeletionRecord) *types.Issue {
deletedAt := del.Timestamp
return &types.Issue{
ID: id,
Title: "(deleted)",
Description: "",
Status: types.StatusTombstone,
Priority: 0, // Unknown priority (0 = unset, distinguishes from user-set values)
IssueType: types.TypeTask, // Default type (must be valid for validation)
CreatedAt: del.Timestamp,
UpdatedAt: del.Timestamp,
DeletedAt: &deletedAt,
DeletedBy: del.Actor,
DeleteReason: del.Reason,
OriginalType: "", // Not available in legacy deletions.jsonl
}
}
func GetPrefixList(prefixes map[string]int) []string {
var result []string
keys := make([]string, 0, len(prefixes))

View File

@@ -1,728 +0,0 @@
package importer
import (
"context"
"os"
"os/exec"
"path/filepath"
"testing"
"time"
"github.com/steveyegge/beads/internal/deletions"
"github.com/steveyegge/beads/internal/storage/sqlite"
"github.com/steveyegge/beads/internal/types"
)
// TestAutoImportPurgesBugBd4pv tests that auto-import doesn't incorrectly purge
// issues due to git history backfill finding them in old commits.
// This is a reproduction test for bd-4pv.
func TestAutoImportPurgesBugBd4pv(t *testing.T) {
// Create a temp directory for a test git repo
tmpDir := t.TempDir()
repoDir := filepath.Join(tmpDir, "test-repo")
beadsDir := filepath.Join(repoDir, ".beads")
// Initialize git repo
if err := os.MkdirAll(repoDir, 0755); err != nil {
t.Fatalf("failed to create repo dir: %v", err)
}
cmd := exec.Command("git", "init")
cmd.Dir = repoDir
if out, err := cmd.CombinedOutput(); err != nil {
t.Fatalf("failed to init git repo: %v\n%s", err, out)
}
// Configure git user for commits
cmd = exec.Command("git", "config", "user.email", "test@test.com")
cmd.Dir = repoDir
if err := cmd.Run(); err != nil {
t.Fatalf("failed to config git email: %v", err)
}
cmd = exec.Command("git", "config", "user.name", "Test User")
cmd.Dir = repoDir
if err := cmd.Run(); err != nil {
t.Fatalf("failed to config git name: %v", err)
}
// Create .beads directory
if err := os.MkdirAll(beadsDir, 0755); err != nil {
t.Fatalf("failed to create .beads dir: %v", err)
}
// Create initial issues.jsonl with 5 issues
jsonlPath := filepath.Join(beadsDir, "issues.jsonl")
initialContent := `{"id":"bd-abc1","title":"Issue 1","status":"open","priority":1,"issue_type":"task","created_at":"2025-01-01T00:00:00Z","updated_at":"2025-01-01T00:00:00Z"}
{"id":"bd-abc2","title":"Issue 2","status":"open","priority":1,"issue_type":"task","created_at":"2025-01-01T00:00:00Z","updated_at":"2025-01-01T00:00:00Z"}
{"id":"bd-abc3","title":"Issue 3","status":"open","priority":1,"issue_type":"task","created_at":"2025-01-01T00:00:00Z","updated_at":"2025-01-01T00:00:00Z"}
{"id":"bd-abc4","title":"Issue 4","status":"open","priority":1,"issue_type":"task","created_at":"2025-01-01T00:00:00Z","updated_at":"2025-01-01T00:00:00Z"}
{"id":"bd-abc5","title":"Issue 5","status":"open","priority":1,"issue_type":"task","created_at":"2025-01-01T00:00:00Z","updated_at":"2025-01-01T00:00:00Z"}
`
if err := os.WriteFile(jsonlPath, []byte(initialContent), 0644); err != nil {
t.Fatalf("failed to write initial JSONL: %v", err)
}
// Commit the initial state
cmd = exec.Command("git", "add", ".beads/issues.jsonl")
cmd.Dir = repoDir
if out, err := cmd.CombinedOutput(); err != nil {
t.Fatalf("failed to git add: %v\n%s", err, out)
}
cmd = exec.Command("git", "commit", "-m", "Initial issues")
cmd.Dir = repoDir
if out, err := cmd.CombinedOutput(); err != nil {
t.Fatalf("failed to git commit: %v\n%s", err, out)
}
// Now simulate what happens during auto-import:
// 1. Database is empty
// 2. Auto-import detects issues in git and imports them
ctx := context.Background()
dbPath := filepath.Join(beadsDir, "beads.db")
store, err := sqlite.New(ctx, dbPath)
if err != nil {
t.Fatalf("failed to create store: %v", err)
}
defer store.Close()
// Set up prefix
if err := store.SetConfig(ctx, "issue_prefix", "bd"); err != nil {
t.Fatalf("failed to set prefix: %v", err)
}
// Parse the JSONL issues
now := time.Now()
issues := []*types.Issue{
{ID: "bd-abc1", Title: "Issue 1", Status: types.StatusOpen, Priority: 1, IssueType: types.TypeTask, CreatedAt: now, UpdatedAt: now},
{ID: "bd-abc2", Title: "Issue 2", Status: types.StatusOpen, Priority: 1, IssueType: types.TypeTask, CreatedAt: now, UpdatedAt: now},
{ID: "bd-abc3", Title: "Issue 3", Status: types.StatusOpen, Priority: 1, IssueType: types.TypeTask, CreatedAt: now, UpdatedAt: now},
{ID: "bd-abc4", Title: "Issue 4", Status: types.StatusOpen, Priority: 1, IssueType: types.TypeTask, CreatedAt: now, UpdatedAt: now},
{ID: "bd-abc5", Title: "Issue 5", Status: types.StatusOpen, Priority: 1, IssueType: types.TypeTask, CreatedAt: now, UpdatedAt: now},
}
// Do the import WITHOUT NoGitHistory (the buggy behavior)
opts := Options{
DryRun: false,
SkipUpdate: false,
SkipPrefixValidation: true,
NoGitHistory: false, // Bug: should be true for auto-import
}
result, err := ImportIssues(ctx, dbPath, store, issues, opts)
if err != nil {
t.Fatalf("import failed: %v", err)
}
// Check how many issues are in the database
allIssues, err := store.SearchIssues(ctx, "", types.IssueFilter{})
if err != nil {
t.Fatalf("failed to search issues: %v", err)
}
// With the bug, some or all issues might be purged
// because git history finds them in the commit and thinks they were "deleted"
t.Logf("Import result: created=%d, updated=%d, purged=%d, purgedIDs=%v",
result.Created, result.Updated, result.Purged, result.PurgedIDs)
t.Logf("Issues in DB after import: %d", len(allIssues))
// The correct behavior is 5 issues in DB
// The bug would result in fewer (potentially 0) due to incorrect purging
if len(allIssues) != 5 {
t.Errorf("Expected 5 issues in DB, got %d. This is the bd-4pv bug!", len(allIssues))
t.Logf("Purged IDs: %v", result.PurgedIDs)
}
}
// TestGitHistoryBackfillPurgesLocalIssues tests the scenario where git history
// backfill incorrectly purges issues that exist locally but were never in the remote JSONL.
// This is another aspect of the bd-4pv bug.
func TestGitHistoryBackfillPurgesLocalIssues(t *testing.T) {
// Create a temp directory for a test git repo
tmpDir := t.TempDir()
repoDir := filepath.Join(tmpDir, "test-repo")
beadsDir := filepath.Join(repoDir, ".beads")
// Initialize git repo
if err := os.MkdirAll(repoDir, 0755); err != nil {
t.Fatalf("failed to create repo dir: %v", err)
}
cmd := exec.Command("git", "init")
cmd.Dir = repoDir
if out, err := cmd.CombinedOutput(); err != nil {
t.Fatalf("failed to init git repo: %v\n%s", err, out)
}
// Configure git user for commits
cmd = exec.Command("git", "config", "user.email", "test@test.com")
cmd.Dir = repoDir
if err := cmd.Run(); err != nil {
t.Fatalf("failed to config git email: %v", err)
}
cmd = exec.Command("git", "config", "user.name", "Test User")
cmd.Dir = repoDir
if err := cmd.Run(); err != nil {
t.Fatalf("failed to config git name: %v", err)
}
// Create .beads directory
if err := os.MkdirAll(beadsDir, 0755); err != nil {
t.Fatalf("failed to create .beads dir: %v", err)
}
// Create initial issues.jsonl with 1 issue
jsonlPath := filepath.Join(beadsDir, "issues.jsonl")
initialContent := `{"id":"bd-shared1","title":"Shared Issue","status":"open","priority":1,"issue_type":"task","created_at":"2025-01-01T00:00:00Z","updated_at":"2025-01-01T00:00:00Z"}
`
if err := os.WriteFile(jsonlPath, []byte(initialContent), 0644); err != nil {
t.Fatalf("failed to write initial JSONL: %v", err)
}
// Create empty deletions.jsonl
deletionsPath := deletions.DefaultPath(beadsDir)
if err := os.WriteFile(deletionsPath, []byte(""), 0644); err != nil {
t.Fatalf("failed to write deletions: %v", err)
}
// Commit the initial state
cmd = exec.Command("git", "add", ".beads/")
cmd.Dir = repoDir
if out, err := cmd.CombinedOutput(); err != nil {
t.Fatalf("failed to git add: %v\n%s", err, out)
}
cmd = exec.Command("git", "commit", "-m", "Initial issues")
cmd.Dir = repoDir
if out, err := cmd.CombinedOutput(); err != nil {
t.Fatalf("failed to git commit: %v\n%s", err, out)
}
// Create database with the shared issue AND local issues
ctx := context.Background()
dbPath := filepath.Join(beadsDir, "beads.db")
store, err := sqlite.New(ctx, dbPath)
if err != nil {
t.Fatalf("failed to create store: %v", err)
}
defer store.Close()
// Set up prefix
if err := store.SetConfig(ctx, "issue_prefix", "bd"); err != nil {
t.Fatalf("failed to set prefix: %v", err)
}
// Create issues in DB: 1 shared (in JSONL) + 4 local-only
now := time.Now()
dbIssues := []*types.Issue{
{ID: "bd-shared1", Title: "Shared Issue", Status: types.StatusOpen, Priority: 1, IssueType: types.TypeTask, CreatedAt: now, UpdatedAt: now},
{ID: "bd-local1", Title: "Local Issue 1", Status: types.StatusOpen, Priority: 1, IssueType: types.TypeTask, CreatedAt: now, UpdatedAt: now},
{ID: "bd-local2", Title: "Local Issue 2", Status: types.StatusOpen, Priority: 1, IssueType: types.TypeTask, CreatedAt: now, UpdatedAt: now},
{ID: "bd-local3", Title: "Local Issue 3", Status: types.StatusOpen, Priority: 1, IssueType: types.TypeTask, CreatedAt: now, UpdatedAt: now},
{ID: "bd-local4", Title: "Local Issue 4", Status: types.StatusOpen, Priority: 1, IssueType: types.TypeTask, CreatedAt: now, UpdatedAt: now},
}
for _, issue := range dbIssues {
issue.ContentHash = issue.ComputeContentHash()
if err := store.CreateIssue(ctx, issue, "test"); err != nil {
t.Fatalf("failed to create issue %s: %v", issue.ID, err)
}
}
// Verify DB has 5 issues
allBefore, err := store.SearchIssues(ctx, "", types.IssueFilter{})
if err != nil {
t.Fatalf("failed to search issues: %v", err)
}
if len(allBefore) != 5 {
t.Fatalf("Expected 5 issues before import, got %d", len(allBefore))
}
// Now import from JSONL (only has 1 issue: bd-shared1)
// WITHOUT NoGitHistory - this is the bug
incomingIssues := []*types.Issue{
{ID: "bd-shared1", Title: "Shared Issue", Status: types.StatusOpen, Priority: 1, IssueType: types.TypeTask, CreatedAt: now, UpdatedAt: now},
}
opts := Options{
DryRun: false,
SkipUpdate: false,
SkipPrefixValidation: true,
NoGitHistory: false, // Bug: local issues might be purged if they appear in git history
}
result, err := ImportIssues(ctx, dbPath, store, incomingIssues, opts)
if err != nil {
t.Fatalf("import failed: %v", err)
}
t.Logf("Import result: created=%d, updated=%d, unchanged=%d, purged=%d, purgedIDs=%v",
result.Created, result.Updated, result.Unchanged, result.Purged, result.PurgedIDs)
// Check how many issues are in the database
allAfter, err := store.SearchIssues(ctx, "", types.IssueFilter{})
if err != nil {
t.Fatalf("failed to search issues: %v", err)
}
t.Logf("Issues in DB after import: %d", len(allAfter))
for _, issue := range allAfter {
t.Logf(" - %s: %s", issue.ID, issue.Title)
}
// Expected: bd-shared1 + bd-local1..4 = 5 issues
// The local issues should NOT be purged because:
// 1. They're not in the deletions manifest
// 2. They were never in git history (they're local-only)
// 3. NoGitHistory=false but git history check shouldn't find bd-local* in history
if len(allAfter) != 5 {
t.Errorf("Expected 5 issues in DB, got %d. Local issues may have been incorrectly purged!", len(allAfter))
}
// Should have no purges (bd-local* were never in git history)
if result.Purged != 0 {
t.Errorf("Expected 0 purged issues, got %d (IDs: %v)", result.Purged, result.PurgedIDs)
}
}
// TestNoGitHistoryPreventsIncorrectPurge tests that setting NoGitHistory prevents
// the purge of issues that exist in the DB but not in JSONL during auto-import.
// This is the fix for bd-4pv - auto-import should NOT run git history backfill.
func TestNoGitHistoryPreventsIncorrectPurge(t *testing.T) {
// Create a temp directory for a test git repo
tmpDir := t.TempDir()
repoDir := filepath.Join(tmpDir, "test-repo")
beadsDir := filepath.Join(repoDir, ".beads")
// Initialize git repo
if err := os.MkdirAll(repoDir, 0755); err != nil {
t.Fatalf("failed to create repo dir: %v", err)
}
cmd := exec.Command("git", "init")
cmd.Dir = repoDir
if out, err := cmd.CombinedOutput(); err != nil {
t.Fatalf("failed to init git repo: %v\n%s", err, out)
}
// Configure git user for commits
cmd = exec.Command("git", "config", "user.email", "test@test.com")
cmd.Dir = repoDir
if err := cmd.Run(); err != nil {
t.Fatalf("failed to config git email: %v", err)
}
cmd = exec.Command("git", "config", "user.name", "Test User")
cmd.Dir = repoDir
if err := cmd.Run(); err != nil {
t.Fatalf("failed to config git name: %v", err)
}
// Create .beads directory
if err := os.MkdirAll(beadsDir, 0755); err != nil {
t.Fatalf("failed to create .beads dir: %v", err)
}
// Create issues.jsonl with 1 issue
jsonlPath := filepath.Join(beadsDir, "issues.jsonl")
initialContent := `{"id":"bd-shared1","title":"Shared Issue","status":"open","priority":1,"issue_type":"task","created_at":"2025-01-01T00:00:00Z","updated_at":"2025-01-01T00:00:00Z"}
`
if err := os.WriteFile(jsonlPath, []byte(initialContent), 0644); err != nil {
t.Fatalf("failed to write initial JSONL: %v", err)
}
// Create empty deletions.jsonl
deletionsPath := deletions.DefaultPath(beadsDir)
if err := os.WriteFile(deletionsPath, []byte(""), 0644); err != nil {
t.Fatalf("failed to write deletions: %v", err)
}
// Commit
cmd = exec.Command("git", "add", ".beads/")
cmd.Dir = repoDir
if out, err := cmd.CombinedOutput(); err != nil {
t.Fatalf("failed to git add: %v\n%s", err, out)
}
cmd = exec.Command("git", "commit", "-m", "Initial issues")
cmd.Dir = repoDir
if out, err := cmd.CombinedOutput(); err != nil {
t.Fatalf("failed to git commit: %v\n%s", err, out)
}
// Create database with 5 issues (1 shared + 4 local-only)
ctx := context.Background()
dbPath := filepath.Join(beadsDir, "beads.db")
store, err := sqlite.New(ctx, dbPath)
if err != nil {
t.Fatalf("failed to create store: %v", err)
}
defer store.Close()
// Set up prefix
if err := store.SetConfig(ctx, "issue_prefix", "bd"); err != nil {
t.Fatalf("failed to set prefix: %v", err)
}
// Create all 5 issues in DB
now := time.Now()
dbIssues := []*types.Issue{
{ID: "bd-shared1", Title: "Shared Issue", Status: types.StatusOpen, Priority: 1, IssueType: types.TypeTask, CreatedAt: now, UpdatedAt: now},
{ID: "bd-local1", Title: "Local Issue 1", Status: types.StatusOpen, Priority: 1, IssueType: types.TypeTask, CreatedAt: now, UpdatedAt: now},
{ID: "bd-local2", Title: "Local Issue 2", Status: types.StatusOpen, Priority: 1, IssueType: types.TypeTask, CreatedAt: now, UpdatedAt: now},
{ID: "bd-local3", Title: "Local Issue 3", Status: types.StatusOpen, Priority: 1, IssueType: types.TypeTask, CreatedAt: now, UpdatedAt: now},
{ID: "bd-local4", Title: "Local Issue 4", Status: types.StatusOpen, Priority: 1, IssueType: types.TypeTask, CreatedAt: now, UpdatedAt: now},
}
for _, issue := range dbIssues {
issue.ContentHash = issue.ComputeContentHash()
if err := store.CreateIssue(ctx, issue, "test"); err != nil {
t.Fatalf("failed to create issue %s: %v", issue.ID, err)
}
}
// Import from JSONL (only has 1 issue) WITH NoGitHistory=true (the fix)
incomingIssues := []*types.Issue{
{ID: "bd-shared1", Title: "Shared Issue", Status: types.StatusOpen, Priority: 1, IssueType: types.TypeTask, CreatedAt: now, UpdatedAt: now},
}
opts := Options{
DryRun: false,
SkipUpdate: false,
SkipPrefixValidation: true,
NoGitHistory: true, // Fix: skip git history backfill during auto-import
}
result, err := ImportIssues(ctx, dbPath, store, incomingIssues, opts)
if err != nil {
t.Fatalf("import failed: %v", err)
}
t.Logf("Import result: created=%d, updated=%d, unchanged=%d, purged=%d, purgedIDs=%v",
result.Created, result.Updated, result.Unchanged, result.Purged, result.PurgedIDs)
// Check how many issues are in the database
allAfter, err := store.SearchIssues(ctx, "", types.IssueFilter{})
if err != nil {
t.Fatalf("failed to search issues: %v", err)
}
t.Logf("Issues in DB after import: %d", len(allAfter))
for _, issue := range allAfter {
t.Logf(" - %s: %s", issue.ID, issue.Title)
}
// With NoGitHistory=true, the 4 local issues should NOT be purged
// because we skip git history backfill entirely during auto-import.
// This is the correct behavior for auto-import - local work should be preserved.
// Expected: all 5 issues remain
if len(allAfter) != 5 {
t.Errorf("Expected 5 issues in DB (local work preserved), got %d", len(allAfter))
}
// Should have no purges
if result.Purged != 0 {
t.Errorf("Expected 0 purged issues (NoGitHistory prevents purge), got %d (IDs: %v)", result.Purged, result.PurgedIDs)
}
}
// TestAutoImportWithNoGitHistoryFlag tests the fix for bd-4pv
func TestAutoImportWithNoGitHistoryFlag(t *testing.T) {
// Create a temp directory for a test git repo
tmpDir := t.TempDir()
repoDir := filepath.Join(tmpDir, "test-repo")
beadsDir := filepath.Join(repoDir, ".beads")
// Initialize git repo
if err := os.MkdirAll(repoDir, 0755); err != nil {
t.Fatalf("failed to create repo dir: %v", err)
}
cmd := exec.Command("git", "init")
cmd.Dir = repoDir
if out, err := cmd.CombinedOutput(); err != nil {
t.Fatalf("failed to init git repo: %v\n%s", err, out)
}
// Configure git user for commits
cmd = exec.Command("git", "config", "user.email", "test@test.com")
cmd.Dir = repoDir
if err := cmd.Run(); err != nil {
t.Fatalf("failed to config git email: %v", err)
}
cmd = exec.Command("git", "config", "user.name", "Test User")
cmd.Dir = repoDir
if err := cmd.Run(); err != nil {
t.Fatalf("failed to config git name: %v", err)
}
// Create .beads directory
if err := os.MkdirAll(beadsDir, 0755); err != nil {
t.Fatalf("failed to create .beads dir: %v", err)
}
// Create initial issues.jsonl with 5 issues
jsonlPath := filepath.Join(beadsDir, "issues.jsonl")
initialContent := `{"id":"bd-xyz1","title":"Issue 1","status":"open","priority":1,"issue_type":"task","created_at":"2025-01-01T00:00:00Z","updated_at":"2025-01-01T00:00:00Z"}
{"id":"bd-xyz2","title":"Issue 2","status":"open","priority":1,"issue_type":"task","created_at":"2025-01-01T00:00:00Z","updated_at":"2025-01-01T00:00:00Z"}
{"id":"bd-xyz3","title":"Issue 3","status":"open","priority":1,"issue_type":"task","created_at":"2025-01-01T00:00:00Z","updated_at":"2025-01-01T00:00:00Z"}
{"id":"bd-xyz4","title":"Issue 4","status":"open","priority":1,"issue_type":"task","created_at":"2025-01-01T00:00:00Z","updated_at":"2025-01-01T00:00:00Z"}
{"id":"bd-xyz5","title":"Issue 5","status":"open","priority":1,"issue_type":"task","created_at":"2025-01-01T00:00:00Z","updated_at":"2025-01-01T00:00:00Z"}
`
if err := os.WriteFile(jsonlPath, []byte(initialContent), 0644); err != nil {
t.Fatalf("failed to write initial JSONL: %v", err)
}
// Also create a deletions.jsonl (empty)
deletionsPath := deletions.DefaultPath(beadsDir)
if err := os.WriteFile(deletionsPath, []byte(""), 0644); err != nil {
t.Fatalf("failed to write deletions: %v", err)
}
// Commit the initial state
cmd = exec.Command("git", "add", ".beads/")
cmd.Dir = repoDir
if out, err := cmd.CombinedOutput(); err != nil {
t.Fatalf("failed to git add: %v\n%s", err, out)
}
cmd = exec.Command("git", "commit", "-m", "Initial issues")
cmd.Dir = repoDir
if out, err := cmd.CombinedOutput(); err != nil {
t.Fatalf("failed to git commit: %v\n%s", err, out)
}
ctx := context.Background()
dbPath := filepath.Join(beadsDir, "beads.db")
store, err := sqlite.New(ctx, dbPath)
if err != nil {
t.Fatalf("failed to create store: %v", err)
}
defer store.Close()
// Set up prefix
if err := store.SetConfig(ctx, "issue_prefix", "bd"); err != nil {
t.Fatalf("failed to set prefix: %v", err)
}
// Parse the JSONL issues
now := time.Now()
issues := []*types.Issue{
{ID: "bd-xyz1", Title: "Issue 1", Status: types.StatusOpen, Priority: 1, IssueType: types.TypeTask, CreatedAt: now, UpdatedAt: now},
{ID: "bd-xyz2", Title: "Issue 2", Status: types.StatusOpen, Priority: 1, IssueType: types.TypeTask, CreatedAt: now, UpdatedAt: now},
{ID: "bd-xyz3", Title: "Issue 3", Status: types.StatusOpen, Priority: 1, IssueType: types.TypeTask, CreatedAt: now, UpdatedAt: now},
{ID: "bd-xyz4", Title: "Issue 4", Status: types.StatusOpen, Priority: 1, IssueType: types.TypeTask, CreatedAt: now, UpdatedAt: now},
{ID: "bd-xyz5", Title: "Issue 5", Status: types.StatusOpen, Priority: 1, IssueType: types.TypeTask, CreatedAt: now, UpdatedAt: now},
}
// Do the import WITH NoGitHistory (the fix)
opts := Options{
DryRun: false,
SkipUpdate: false,
SkipPrefixValidation: true,
NoGitHistory: true, // Fix: skip git history backfill during auto-import
}
result, err := ImportIssues(ctx, dbPath, store, issues, opts)
if err != nil {
t.Fatalf("import failed: %v", err)
}
// Check how many issues are in the database
allIssues, err := store.SearchIssues(ctx, "", types.IssueFilter{})
if err != nil {
t.Fatalf("failed to search issues: %v", err)
}
t.Logf("Import result: created=%d, updated=%d, purged=%d",
result.Created, result.Updated, result.Purged)
t.Logf("Issues in DB after import: %d", len(allIssues))
// With the fix, all 5 issues should be in DB
if len(allIssues) != 5 {
t.Errorf("Expected 5 issues in DB, got %d", len(allIssues))
}
// Should have no purges
if result.Purged != 0 {
t.Errorf("Expected 0 purged issues, got %d", result.Purged)
}
}
// TestMassDeletionSafetyGuard tests the fix for bd-21a where git-history-backfill
// would incorrectly purge the entire database when a JSONL was reset.
// The safety guard should abort if >50% of issues would be deleted.
func TestMassDeletionSafetyGuard(t *testing.T) {
// Create a temp directory for a test git repo
tmpDir := t.TempDir()
repoDir := filepath.Join(tmpDir, "test-repo")
beadsDir := filepath.Join(repoDir, ".beads")
// Initialize git repo
if err := os.MkdirAll(repoDir, 0755); err != nil {
t.Fatalf("failed to create repo dir: %v", err)
}
cmd := exec.Command("git", "init")
cmd.Dir = repoDir
if out, err := cmd.CombinedOutput(); err != nil {
t.Fatalf("failed to init git repo: %v\n%s", err, out)
}
// Configure git user for commits
cmd = exec.Command("git", "config", "user.email", "test@test.com")
cmd.Dir = repoDir
if err := cmd.Run(); err != nil {
t.Fatalf("failed to config git email: %v", err)
}
cmd = exec.Command("git", "config", "user.name", "Test User")
cmd.Dir = repoDir
if err := cmd.Run(); err != nil {
t.Fatalf("failed to config git name: %v", err)
}
// Create .beads directory
if err := os.MkdirAll(beadsDir, 0755); err != nil {
t.Fatalf("failed to create .beads dir: %v", err)
}
// Create initial issues.jsonl with 10 issues
jsonlPath := filepath.Join(beadsDir, "issues.jsonl")
initialContent := `{"id":"bd-mass01","title":"Issue 1","status":"open","priority":1,"issue_type":"task","created_at":"2025-01-01T00:00:00Z","updated_at":"2025-01-01T00:00:00Z"}
{"id":"bd-mass02","title":"Issue 2","status":"open","priority":1,"issue_type":"task","created_at":"2025-01-01T00:00:00Z","updated_at":"2025-01-01T00:00:00Z"}
{"id":"bd-mass03","title":"Issue 3","status":"open","priority":1,"issue_type":"task","created_at":"2025-01-01T00:00:00Z","updated_at":"2025-01-01T00:00:00Z"}
{"id":"bd-mass04","title":"Issue 4","status":"open","priority":1,"issue_type":"task","created_at":"2025-01-01T00:00:00Z","updated_at":"2025-01-01T00:00:00Z"}
{"id":"bd-mass05","title":"Issue 5","status":"open","priority":1,"issue_type":"task","created_at":"2025-01-01T00:00:00Z","updated_at":"2025-01-01T00:00:00Z"}
{"id":"bd-mass06","title":"Issue 6","status":"open","priority":1,"issue_type":"task","created_at":"2025-01-01T00:00:00Z","updated_at":"2025-01-01T00:00:00Z"}
{"id":"bd-mass07","title":"Issue 7","status":"open","priority":1,"issue_type":"task","created_at":"2025-01-01T00:00:00Z","updated_at":"2025-01-01T00:00:00Z"}
{"id":"bd-mass08","title":"Issue 8","status":"open","priority":1,"issue_type":"task","created_at":"2025-01-01T00:00:00Z","updated_at":"2025-01-01T00:00:00Z"}
{"id":"bd-mass09","title":"Issue 9","status":"open","priority":1,"issue_type":"task","created_at":"2025-01-01T00:00:00Z","updated_at":"2025-01-01T00:00:00Z"}
{"id":"bd-mass10","title":"Issue 10","status":"open","priority":1,"issue_type":"task","created_at":"2025-01-01T00:00:00Z","updated_at":"2025-01-01T00:00:00Z"}
`
if err := os.WriteFile(jsonlPath, []byte(initialContent), 0644); err != nil {
t.Fatalf("failed to write initial JSONL: %v", err)
}
// Also create a deletions.jsonl (empty)
deletionsPath := deletions.DefaultPath(beadsDir)
if err := os.WriteFile(deletionsPath, []byte(""), 0644); err != nil {
t.Fatalf("failed to write deletions: %v", err)
}
// Commit the initial state
cmd = exec.Command("git", "add", ".beads/")
cmd.Dir = repoDir
if out, err := cmd.CombinedOutput(); err != nil {
t.Fatalf("failed to git add: %v\n%s", err, out)
}
cmd = exec.Command("git", "commit", "-m", "Initial issues with 10 entries")
cmd.Dir = repoDir
if out, err := cmd.CombinedOutput(); err != nil {
t.Fatalf("failed to git commit: %v\n%s", err, out)
}
ctx := context.Background()
dbPath := filepath.Join(beadsDir, "beads.db")
store, err := sqlite.New(ctx, dbPath)
if err != nil {
t.Fatalf("failed to create store: %v", err)
}
defer store.Close()
// Set up prefix
if err := store.SetConfig(ctx, "issue_prefix", "bd"); err != nil {
t.Fatalf("failed to set prefix: %v", err)
}
// First, import all 10 issues to the database
now := time.Now()
allIssues := []*types.Issue{
{ID: "bd-mass01", Title: "Issue 1", Status: types.StatusOpen, Priority: 1, IssueType: types.TypeTask, CreatedAt: now, UpdatedAt: now},
{ID: "bd-mass02", Title: "Issue 2", Status: types.StatusOpen, Priority: 1, IssueType: types.TypeTask, CreatedAt: now, UpdatedAt: now},
{ID: "bd-mass03", Title: "Issue 3", Status: types.StatusOpen, Priority: 1, IssueType: types.TypeTask, CreatedAt: now, UpdatedAt: now},
{ID: "bd-mass04", Title: "Issue 4", Status: types.StatusOpen, Priority: 1, IssueType: types.TypeTask, CreatedAt: now, UpdatedAt: now},
{ID: "bd-mass05", Title: "Issue 5", Status: types.StatusOpen, Priority: 1, IssueType: types.TypeTask, CreatedAt: now, UpdatedAt: now},
{ID: "bd-mass06", Title: "Issue 6", Status: types.StatusOpen, Priority: 1, IssueType: types.TypeTask, CreatedAt: now, UpdatedAt: now},
{ID: "bd-mass07", Title: "Issue 7", Status: types.StatusOpen, Priority: 1, IssueType: types.TypeTask, CreatedAt: now, UpdatedAt: now},
{ID: "bd-mass08", Title: "Issue 8", Status: types.StatusOpen, Priority: 1, IssueType: types.TypeTask, CreatedAt: now, UpdatedAt: now},
{ID: "bd-mass09", Title: "Issue 9", Status: types.StatusOpen, Priority: 1, IssueType: types.TypeTask, CreatedAt: now, UpdatedAt: now},
{ID: "bd-mass10", Title: "Issue 10", Status: types.StatusOpen, Priority: 1, IssueType: types.TypeTask, CreatedAt: now, UpdatedAt: now},
}
// Initial import - NoGitHistory to just populate the DB
opts := Options{
DryRun: false,
SkipUpdate: false,
SkipPrefixValidation: true,
NoGitHistory: true,
}
_, err = ImportIssues(ctx, dbPath, store, allIssues, opts)
if err != nil {
t.Fatalf("initial import failed: %v", err)
}
// Verify all 10 issues are in DB
dbIssues, err := store.SearchIssues(ctx, "", types.IssueFilter{})
if err != nil {
t.Fatalf("failed to search issues: %v", err)
}
if len(dbIssues) != 10 {
t.Fatalf("Expected 10 issues after initial import, got %d", len(dbIssues))
}
// Now simulate a "reset" scenario:
// JSONL is reset to only have 2 issues (80% would be deleted)
resetContent := `{"id":"bd-mass01","title":"Issue 1","status":"open","priority":1,"issue_type":"task","created_at":"2025-01-01T00:00:00Z","updated_at":"2025-01-01T00:00:00Z"}
{"id":"bd-mass02","title":"Issue 2","status":"open","priority":1,"issue_type":"task","created_at":"2025-01-01T00:00:00Z","updated_at":"2025-01-01T00:00:00Z"}
`
if err := os.WriteFile(jsonlPath, []byte(resetContent), 0644); err != nil {
t.Fatalf("failed to write reset JSONL: %v", err)
}
// Commit the reset state
cmd = exec.Command("git", "add", ".beads/issues.jsonl")
cmd.Dir = repoDir
if out, err := cmd.CombinedOutput(); err != nil {
t.Fatalf("failed to git add reset: %v\n%s", err, out)
}
cmd = exec.Command("git", "commit", "-m", "Reset JSONL to 2 issues")
cmd.Dir = repoDir
if out, err := cmd.CombinedOutput(); err != nil {
t.Fatalf("failed to git commit reset: %v\n%s", err, out)
}
// Now try to import the reset JSONL WITH git history enabled
// This should trigger the safety guard since 8/10 = 80% > 50%
resetIssues := []*types.Issue{
{ID: "bd-mass01", Title: "Issue 1", Status: types.StatusOpen, Priority: 1, IssueType: types.TypeTask, CreatedAt: now, UpdatedAt: now},
{ID: "bd-mass02", Title: "Issue 2", Status: types.StatusOpen, Priority: 1, IssueType: types.TypeTask, CreatedAt: now, UpdatedAt: now},
}
opts = Options{
DryRun: false,
SkipUpdate: false,
SkipPrefixValidation: true,
NoGitHistory: false, // Enable git history - this is the test!
}
result, err := ImportIssues(ctx, dbPath, store, resetIssues, opts)
if err != nil {
t.Fatalf("import failed: %v", err)
}
// The safety guard should have prevented any purges
// because 8/10 = 80% > 50% threshold
t.Logf("Import result: created=%d, updated=%d, unchanged=%d, purged=%d",
result.Created, result.Updated, result.Unchanged, result.Purged)
// Verify all 10 issues are STILL in DB (safety guard prevented deletion)
dbIssues, err = store.SearchIssues(ctx, "", types.IssueFilter{})
if err != nil {
t.Fatalf("failed to search issues after reset import: %v", err)
}
t.Logf("Issues in DB after reset import: %d", len(dbIssues))
if len(dbIssues) != 10 {
t.Errorf("Expected 10 issues in DB (safety guard should prevent purge), got %d", len(dbIssues))
}
if result.Purged != 0 {
t.Errorf("Expected 0 purged issues (safety guard), got %d (IDs: %v)", result.Purged, result.PurgedIDs)
}
}

View File

@@ -9,7 +9,6 @@ import (
"testing"
"time"
"github.com/steveyegge/beads/internal/deletions"
"github.com/steveyegge/beads/internal/storage/sqlite"
"github.com/steveyegge/beads/internal/types"
)
@@ -1075,88 +1074,6 @@ func TestConcurrentExternalRefImports(t *testing.T) {
})
}
func TestCheckGitHistoryForDeletions_EmptyList(t *testing.T) {
// Empty list should return nil
result := checkGitHistoryForDeletions("/tmp/test", nil)
if result != nil {
t.Errorf("Expected nil for empty list, got %v", result)
}
result = checkGitHistoryForDeletions("/tmp/test", []string{})
if result != nil {
t.Errorf("Expected nil for empty slice, got %v", result)
}
}
func TestCheckGitHistoryForDeletions_NonGitDir(t *testing.T) {
// Non-git directory should return empty (conservative behavior)
tmpDir := t.TempDir()
result := checkGitHistoryForDeletions(tmpDir, []string{"bd-test"})
if len(result) != 0 {
t.Errorf("Expected empty result for non-git dir, got %v", result)
}
}
func TestWasEverInJSONL_NonGitDir(t *testing.T) {
// Non-git directory should return false (conservative behavior)
tmpDir := t.TempDir()
result := wasEverInJSONL(tmpDir, ".beads/beads.jsonl", "bd-test")
if result {
t.Error("Expected false for non-git dir")
}
}
func TestBatchCheckGitHistory_NonGitDir(t *testing.T) {
// Non-git directory should return empty (falls back to individual checks)
tmpDir := t.TempDir()
result := batchCheckGitHistory(tmpDir, ".beads/beads.jsonl", []string{"bd-test1", "bd-test2"})
if len(result) != 0 {
t.Errorf("Expected empty result for non-git dir, got %v", result)
}
}
func TestConvertDeletionToTombstone(t *testing.T) {
ts := time.Date(2025, 12, 5, 14, 30, 0, 0, time.UTC)
del := deletions.DeletionRecord{
ID: "bd-test",
Timestamp: ts,
Actor: "alice",
Reason: "no longer needed",
}
tombstone := convertDeletionToTombstone("bd-test", del)
if tombstone.ID != "bd-test" {
t.Errorf("Expected ID 'bd-test', got %q", tombstone.ID)
}
if tombstone.Status != types.StatusTombstone {
t.Errorf("Expected status 'tombstone', got %q", tombstone.Status)
}
if tombstone.Title != "(deleted)" {
t.Errorf("Expected title '(deleted)', got %q", tombstone.Title)
}
if tombstone.DeletedAt == nil || !tombstone.DeletedAt.Equal(ts) {
t.Errorf("Expected DeletedAt to be %v, got %v", ts, tombstone.DeletedAt)
}
if tombstone.DeletedBy != "alice" {
t.Errorf("Expected DeletedBy 'alice', got %q", tombstone.DeletedBy)
}
if tombstone.DeleteReason != "no longer needed" {
t.Errorf("Expected DeleteReason 'no longer needed', got %q", tombstone.DeleteReason)
}
if tombstone.OriginalType != "" {
t.Errorf("Expected empty OriginalType, got %q", tombstone.OriginalType)
}
// Verify priority uses zero to indicate unknown (bd-9auw)
if tombstone.Priority != 0 {
t.Errorf("Expected Priority 0 (unknown), got %d", tombstone.Priority)
}
// IssueType must be valid for validation, so it defaults to task
if tombstone.IssueType != types.TypeTask {
t.Errorf("Expected IssueType 'task', got %q", tombstone.IssueType)
}
}
func TestImportIssues_TombstoneFromJSONL(t *testing.T) {
ctx := context.Background()
@@ -1225,182 +1142,6 @@ func TestImportIssues_TombstoneFromJSONL(t *testing.T) {
}
}
func TestImportIssues_TombstoneNotFilteredByDeletionsManifest(t *testing.T) {
ctx := context.Background()
tmpDir := t.TempDir()
tmpDB := tmpDir + "/test.db"
store, err := sqlite.New(context.Background(), tmpDB)
if err != nil {
t.Fatalf("Failed to create store: %v", err)
}
defer store.Close()
if err := store.SetConfig(ctx, "issue_prefix", "test"); err != nil {
t.Fatalf("Failed to set prefix: %v", err)
}
// Create a deletions manifest entry
deletionsPath := deletions.DefaultPath(tmpDir)
delRecord := deletions.DeletionRecord{
ID: "test-abc123",
Timestamp: time.Now().Add(-time.Hour),
Actor: "alice",
Reason: "old deletion",
}
if err := deletions.AppendDeletion(deletionsPath, delRecord); err != nil {
t.Fatalf("Failed to write deletion record: %v", err)
}
// Create a tombstone in JSONL for the same issue
deletedAt := time.Now()
tombstone := &types.Issue{
ID: "test-abc123",
Title: "(deleted)",
Status: types.StatusTombstone,
Priority: 2,
IssueType: types.TypeTask,
CreatedAt: time.Now().Add(-24 * time.Hour),
UpdatedAt: deletedAt,
DeletedAt: &deletedAt,
DeletedBy: "bob",
DeleteReason: "JSONL tombstone",
}
result, err := ImportIssues(ctx, tmpDB, store, []*types.Issue{tombstone}, Options{})
if err != nil {
t.Fatalf("Import failed: %v", err)
}
// The tombstone should be imported (not filtered by deletions manifest)
if result.Created != 1 {
t.Errorf("Expected 1 created (tombstone), got %d", result.Created)
}
if result.SkippedDeleted != 0 {
t.Errorf("Expected 0 skipped deleted (tombstone should not be filtered), got %d", result.SkippedDeleted)
}
}
// TestImportIssues_LegacyDeletionsConvertedToTombstones tests that entries in
// deletions.jsonl are converted to tombstones during import (bd-hp0m)
func TestImportIssues_LegacyDeletionsConvertedToTombstones(t *testing.T) {
ctx := context.Background()
tmpDir := t.TempDir()
tmpDB := tmpDir + "/test.db"
store, err := sqlite.New(context.Background(), tmpDB)
if err != nil {
t.Fatalf("Failed to create store: %v", err)
}
defer store.Close()
if err := store.SetConfig(ctx, "issue_prefix", "test"); err != nil {
t.Fatalf("Failed to set prefix: %v", err)
}
// Create a deletions manifest with one entry
deletionsPath := deletions.DefaultPath(tmpDir)
deleteTime := time.Now().Add(-time.Hour)
del := deletions.DeletionRecord{
ID: "test-abc",
Timestamp: deleteTime,
Actor: "alice",
Reason: "duplicate of test-xyz",
}
if err := deletions.AppendDeletion(deletionsPath, del); err != nil {
t.Fatalf("Failed to write deletion record: %v", err)
}
// Create a regular issue (not in deletions)
regularIssue := &types.Issue{
ID: "test-def",
Title: "Regular issue",
Status: types.StatusOpen,
Priority: 2,
IssueType: types.TypeTask,
CreatedAt: time.Now().Add(-24 * time.Hour),
UpdatedAt: time.Now(),
}
// Create an issue that's in the deletions manifest (non-tombstone)
deletedIssue := &types.Issue{
ID: "test-abc",
Title: "This will be skipped and converted",
Status: types.StatusOpen,
Priority: 1,
IssueType: types.TypeBug,
CreatedAt: time.Now().Add(-48 * time.Hour),
UpdatedAt: time.Now().Add(-2 * time.Hour),
}
// Import both issues
result, err := ImportIssues(ctx, tmpDB, store, []*types.Issue{regularIssue, deletedIssue}, Options{})
if err != nil {
t.Fatalf("Import failed: %v", err)
}
// Regular issue should be created
// The deleted issue is skipped (in deletions manifest), but a tombstone is created from deletions.jsonl
// So we expect: 1 regular + 1 tombstone = 2 created
if result.Created != 2 {
t.Errorf("Expected 2 created (1 regular + 1 tombstone from deletions.jsonl), got %d", result.Created)
}
if result.SkippedDeleted != 1 {
t.Errorf("Expected 1 skipped deleted (issue in deletions.jsonl), got %d", result.SkippedDeleted)
}
// Verify ConvertedToTombstone counter (bd-wucl)
if result.ConvertedToTombstone != 1 {
t.Errorf("Expected 1 converted to tombstone, got %d", result.ConvertedToTombstone)
}
if len(result.ConvertedTombstoneIDs) != 1 || result.ConvertedTombstoneIDs[0] != "test-abc" {
t.Errorf("Expected ConvertedTombstoneIDs [test-abc], got %v", result.ConvertedTombstoneIDs)
}
// Verify regular issue was imported
issues, err := store.SearchIssues(ctx, "", types.IssueFilter{})
if err != nil {
t.Fatalf("Failed to search issues: %v", err)
}
foundRegular := false
for _, i := range issues {
if i.ID == "test-def" {
foundRegular = true
}
}
if !foundRegular {
t.Error("Regular issue not found after import")
}
// Verify tombstone was created from deletions.jsonl
allIssues, err := store.SearchIssues(ctx, "", types.IssueFilter{IncludeTombstones: true})
if err != nil {
t.Fatalf("Failed to search all issues: %v", err)
}
var tombstone *types.Issue
for _, i := range allIssues {
if i.ID == "test-abc" {
tombstone = i
break
}
}
// test-abc should be a tombstone (was in JSONL and deletions)
if tombstone == nil {
t.Fatal("Expected tombstone for test-abc not found")
}
if tombstone.Status != types.StatusTombstone {
t.Errorf("Expected test-abc to be tombstone, got status %q", tombstone.Status)
}
if tombstone.DeletedBy != "alice" {
t.Errorf("Expected DeletedBy 'alice', got %q", tombstone.DeletedBy)
}
if tombstone.DeleteReason != "duplicate of test-xyz" {
t.Errorf("Expected DeleteReason 'duplicate of test-xyz', got %q", tombstone.DeleteReason)
}
}
// TestImportOrphanSkip_CountMismatch verifies that orphaned issues are properly
// skipped during import and tracked in the result count (bd-ckej).
//

View File

@@ -1,354 +0,0 @@
package importer
import (
"context"
"os"
"path/filepath"
"testing"
"time"
"github.com/steveyegge/beads/internal/deletions"
"github.com/steveyegge/beads/internal/storage/sqlite"
"github.com/steveyegge/beads/internal/types"
)
// TestPurgeDeletedIssues tests that issues in the deletions manifest are converted to tombstones during import
func TestPurgeDeletedIssues(t *testing.T) {
ctx := context.Background()
tmpDir := t.TempDir()
// Create database
dbPath := filepath.Join(tmpDir, "beads.db")
store, err := sqlite.New(ctx, dbPath)
if err != nil {
t.Fatalf("failed to create database: %v", err)
}
defer store.Close()
// Initialize prefix
if err := store.SetConfig(ctx, "issue_prefix", "test"); err != nil {
t.Fatalf("failed to set prefix: %v", err)
}
// Create some issues in the database
issue1 := &types.Issue{
ID: "test-abc",
Title: "Issue 1",
Status: types.StatusOpen,
Priority: 1,
IssueType: types.TypeTask,
}
// issue2 is CLOSED so it can be safely deleted (bd-k92d: safety guard prevents deleting open/in_progress)
closedTime := time.Now().UTC()
issue2 := &types.Issue{
ID: "test-def",
Title: "Issue 2",
Status: types.StatusClosed,
Priority: 1,
IssueType: types.TypeTask,
ClosedAt: &closedTime,
}
issue3 := &types.Issue{
ID: "test-ghi",
Title: "Issue 3 (local work)",
Status: types.StatusOpen,
Priority: 1,
IssueType: types.TypeTask,
}
for _, iss := range []*types.Issue{issue1, issue2, issue3} {
if err := store.CreateIssue(ctx, iss, "test"); err != nil {
t.Fatalf("failed to create issue %s: %v", iss.ID, err)
}
}
// Create a deletions manifest with issue2 deleted
deletionsPath := deletions.DefaultPath(tmpDir)
delRecord := deletions.DeletionRecord{
ID: "test-def",
Timestamp: time.Now().UTC(),
Actor: "test-user",
Reason: "test deletion",
}
if err := deletions.AppendDeletion(deletionsPath, delRecord); err != nil {
t.Fatalf("failed to create deletions manifest: %v", err)
}
// Simulate import with only issue1 in the JSONL (issue2 was deleted, issue3 is local work)
jsonlIssues := []*types.Issue{issue1}
result := &Result{
IDMapping: make(map[string]string),
MismatchPrefixes: make(map[string]int),
}
// Call purgeDeletedIssues
if err := purgeDeletedIssues(ctx, store, dbPath, jsonlIssues, Options{}, result); err != nil {
t.Fatalf("purgeDeletedIssues failed: %v", err)
}
// Verify issue2 was tombstoned (bd-dve: now converts to tombstone instead of hard-delete)
if result.Purged != 1 {
t.Errorf("expected 1 purged issue, got %d", result.Purged)
}
if len(result.PurgedIDs) != 1 || result.PurgedIDs[0] != "test-def" {
t.Errorf("expected PurgedIDs to contain 'test-def', got %v", result.PurgedIDs)
}
// Verify issue2 is now a tombstone (not hard-deleted)
// GetIssue returns nil for tombstones by default, so use IncludeTombstones filter
issues, err := store.SearchIssues(ctx, "", types.IssueFilter{IncludeTombstones: true})
if err != nil {
t.Fatalf("SearchIssues failed: %v", err)
}
var iss2 *types.Issue
for _, iss := range issues {
if iss.ID == "test-def" {
iss2 = iss
break
}
}
if iss2 == nil {
t.Errorf("expected issue2 to exist as tombstone, but it was hard-deleted")
} else if iss2.Status != types.StatusTombstone {
t.Errorf("expected issue2 to be a tombstone, got status %q", iss2.Status)
}
// Verify issue1 still exists (in JSONL)
iss1, err := store.GetIssue(ctx, "test-abc")
if err != nil {
t.Fatalf("GetIssue failed: %v", err)
}
if iss1 == nil {
t.Errorf("expected issue1 to still exist")
}
// Verify issue3 still exists (local work, not in deletions manifest)
iss3, err := store.GetIssue(ctx, "test-ghi")
if err != nil {
t.Fatalf("GetIssue failed: %v", err)
}
if iss3 == nil {
t.Errorf("expected issue3 (local work) to still exist")
}
}
// TestPurgeDeletedIssues_NoDeletionsManifest tests that import works without a deletions manifest
func TestPurgeDeletedIssues_NoDeletionsManifest(t *testing.T) {
ctx := context.Background()
tmpDir := t.TempDir()
// Create database
dbPath := filepath.Join(tmpDir, "beads.db")
store, err := sqlite.New(ctx, dbPath)
if err != nil {
t.Fatalf("failed to create database: %v", err)
}
defer store.Close()
// Initialize prefix
if err := store.SetConfig(ctx, "issue_prefix", "test"); err != nil {
t.Fatalf("failed to set prefix: %v", err)
}
// Create an issue in the database
issue := &types.Issue{
ID: "test-abc",
Title: "Issue 1",
Status: types.StatusOpen,
Priority: 1,
IssueType: types.TypeTask,
}
if err := store.CreateIssue(ctx, issue, "test"); err != nil {
t.Fatalf("failed to create issue: %v", err)
}
// No deletions manifest exists
jsonlIssues := []*types.Issue{issue}
result := &Result{
IDMapping: make(map[string]string),
MismatchPrefixes: make(map[string]int),
}
// Call purgeDeletedIssues - should succeed with no errors
if err := purgeDeletedIssues(ctx, store, dbPath, jsonlIssues, Options{}, result); err != nil {
t.Fatalf("purgeDeletedIssues failed: %v", err)
}
// Verify nothing was purged
if result.Purged != 0 {
t.Errorf("expected 0 purged issues, got %d", result.Purged)
}
// Verify issue still exists
iss, err := store.GetIssue(ctx, "test-abc")
if err != nil {
t.Fatalf("GetIssue failed: %v", err)
}
if iss == nil {
t.Errorf("expected issue to still exist")
}
}
// TestPurgeDeletedIssues_ProtectLocalExportIDs tests that issues in ProtectLocalExportIDs
// are not tombstoned even if they're not in the JSONL (bd-sync-deletion fix)
func TestPurgeDeletedIssues_ProtectLocalExportIDs(t *testing.T) {
ctx := context.Background()
tmpDir := t.TempDir()
// Create database
dbPath := filepath.Join(tmpDir, "beads.db")
store, err := sqlite.New(ctx, dbPath)
if err != nil {
t.Fatalf("failed to create database: %v", err)
}
defer store.Close()
// Initialize prefix
if err := store.SetConfig(ctx, "issue_prefix", "test"); err != nil {
t.Fatalf("failed to set prefix: %v", err)
}
// Create issues in the database:
// - issue1: in JSONL (should survive)
// - issue2: NOT in JSONL, but in ProtectLocalExportIDs (should survive - this is the fix)
// - issue3: NOT in JSONL, NOT protected (would be checked by git-history, but we skip that)
issue1 := &types.Issue{
ID: "test-abc",
Title: "Issue 1 (in JSONL)",
Status: types.StatusOpen,
Priority: 1,
IssueType: types.TypeTask,
}
issue2 := &types.Issue{
ID: "test-def",
Title: "Issue 2 (protected local export)",
Status: types.StatusOpen,
Priority: 1,
IssueType: types.TypeTask,
}
issue3 := &types.Issue{
ID: "test-ghi",
Title: "Issue 3 (unprotected)",
Status: types.StatusOpen,
Priority: 1,
IssueType: types.TypeTask,
}
for _, iss := range []*types.Issue{issue1, issue2, issue3} {
if err := store.CreateIssue(ctx, iss, "test"); err != nil {
t.Fatalf("failed to create issue %s: %v", iss.ID, err)
}
}
// Simulate import where JSONL only has issue1 (issue2 was in our local export but lost during merge)
jsonlIssues := []*types.Issue{issue1}
result := &Result{
IDMapping: make(map[string]string),
MismatchPrefixes: make(map[string]int),
}
// Set ProtectLocalExportIDs to protect issue2 (simulates left snapshot protection)
opts := Options{
ProtectLocalExportIDs: map[string]bool{
"test-def": true, // Protect issue2
},
NoGitHistory: true, // Skip git history check for this test
}
// Call purgeDeletedIssues
if err := purgeDeletedIssues(ctx, store, dbPath, jsonlIssues, opts, result); err != nil {
t.Fatalf("purgeDeletedIssues failed: %v", err)
}
// Verify issue2 was preserved (the fix!)
if result.PreservedLocalExport != 1 {
t.Errorf("expected 1 preserved issue, got %d", result.PreservedLocalExport)
}
if len(result.PreservedLocalIDs) != 1 || result.PreservedLocalIDs[0] != "test-def" {
t.Errorf("expected PreservedLocalIDs to contain 'test-def', got %v", result.PreservedLocalIDs)
}
// Verify issue1 still exists (was in JSONL)
iss1, err := store.GetIssue(ctx, "test-abc")
if err != nil {
t.Fatalf("GetIssue failed: %v", err)
}
if iss1 == nil {
t.Errorf("expected issue1 to still exist")
}
// Verify issue2 still exists (was protected)
iss2, err := store.GetIssue(ctx, "test-def")
if err != nil {
t.Fatalf("GetIssue failed: %v", err)
}
if iss2 == nil {
t.Errorf("expected issue2 (protected local export) to still exist - THIS IS THE FIX")
}
// Verify issue3 still exists (not in deletions, git history check skipped)
iss3, err := store.GetIssue(ctx, "test-ghi")
if err != nil {
t.Fatalf("GetIssue failed: %v", err)
}
if iss3 == nil {
t.Errorf("expected issue3 to still exist (git history check skipped)")
}
}
// TestPurgeDeletedIssues_EmptyDeletionsManifest tests that import works with empty deletions manifest
func TestPurgeDeletedIssues_EmptyDeletionsManifest(t *testing.T) {
ctx := context.Background()
tmpDir := t.TempDir()
// Create database
dbPath := filepath.Join(tmpDir, "beads.db")
store, err := sqlite.New(ctx, dbPath)
if err != nil {
t.Fatalf("failed to create database: %v", err)
}
defer store.Close()
// Initialize prefix
if err := store.SetConfig(ctx, "issue_prefix", "test"); err != nil {
t.Fatalf("failed to set prefix: %v", err)
}
// Create an issue in the database
issue := &types.Issue{
ID: "test-abc",
Title: "Issue 1",
Status: types.StatusOpen,
Priority: 1,
IssueType: types.TypeTask,
}
if err := store.CreateIssue(ctx, issue, "test"); err != nil {
t.Fatalf("failed to create issue: %v", err)
}
// Create empty deletions manifest
deletionsPath := deletions.DefaultPath(tmpDir)
if err := os.WriteFile(deletionsPath, []byte{}, 0644); err != nil {
t.Fatalf("failed to create empty deletions manifest: %v", err)
}
jsonlIssues := []*types.Issue{issue}
result := &Result{
IDMapping: make(map[string]string),
MismatchPrefixes: make(map[string]int),
}
// Call purgeDeletedIssues - should succeed with no errors
if err := purgeDeletedIssues(ctx, store, dbPath, jsonlIssues, Options{}, result); err != nil {
t.Fatalf("purgeDeletedIssues failed: %v", err)
}
// Verify nothing was purged
if result.Purged != 0 {
t.Errorf("expected 0 purged issues, got %d", result.Purged)
}
}

View File

@@ -122,9 +122,9 @@ func CommitToSyncBranch(ctx context.Context, repoRoot, syncBranch, jsonlPath str
return nil, fmt.Errorf("failed to sync JSONL to worktree: %w", err)
}
// Also sync other beads files (deletions.jsonl, metadata.json)
// Also sync other beads files (metadata.json)
beadsDir := filepath.Dir(jsonlPath)
for _, filename := range []string{"deletions.jsonl", "metadata.json"} {
for _, filename := range []string{"metadata.json"} {
srcPath := filepath.Join(beadsDir, filename)
if _, err := os.Stat(srcPath); err == nil {
relPath, err := filepath.Rel(repoRoot, srcPath)
@@ -326,12 +326,6 @@ func PullFromSyncBranch(ctx context.Context, repoRoot, syncBranch, jsonlPath str
return nil, fmt.Errorf("content merge failed: %w", err)
}
// Also merge deletions.jsonl if it exists
beadsRelDir := filepath.Dir(jsonlRelPath)
deletionsRelPath := filepath.Join(beadsRelDir, "deletions.jsonl")
mergedDeletions, deletionsErr := performDeletionsMerge(ctx, worktreePath, syncBranch, remote, deletionsRelPath)
// deletionsErr is non-fatal - file might not exist
// Reset worktree to remote's history (adopt their commit graph)
resetCmd := exec.CommandContext(ctx, "git", "-C", worktreePath, "reset", "--hard",
fmt.Sprintf("%s/%s", remote, syncBranch))
@@ -348,15 +342,6 @@ func PullFromSyncBranch(ctx context.Context, repoRoot, syncBranch, jsonlPath str
return nil, fmt.Errorf("failed to write merged JSONL: %w", err)
}
// Write merged deletions if we have them
if deletionsErr == nil && len(mergedDeletions) > 0 {
deletionsPath := filepath.Join(worktreePath, deletionsRelPath)
if err := os.WriteFile(deletionsPath, mergedDeletions, 0600); err != nil {
// Non-fatal - deletions are supplementary
_ = err
}
}
// Check if merge produced any changes from remote
hasChanges, err := hasChangesInWorktree(ctx, worktreePath, worktreeJSONLPath)
if err != nil {
@@ -679,62 +664,6 @@ func extractJSONLFromCommit(ctx context.Context, worktreePath, commit, filePath
return output, nil
}
// performDeletionsMerge merges deletions.jsonl from local and remote.
// Deletions are merged by union - we keep all deletion records from both sides.
// This ensures that if either side deleted an issue, it stays deleted.
func performDeletionsMerge(ctx context.Context, worktreePath, branch, remote, deletionsRelPath string) ([]byte, error) {
// Extract local deletions
localDeletions, localErr := extractJSONLFromCommit(ctx, worktreePath, "HEAD", deletionsRelPath)
// Extract remote deletions
remoteRef := fmt.Sprintf("%s/%s", remote, branch)
remoteDeletions, remoteErr := extractJSONLFromCommit(ctx, worktreePath, remoteRef, deletionsRelPath)
// If neither exists, nothing to merge
if localErr != nil && remoteErr != nil {
return nil, fmt.Errorf("no deletions files to merge")
}
// If only one exists, use that
if localErr != nil {
return remoteDeletions, nil
}
if remoteErr != nil {
return localDeletions, nil
}
// Both exist - merge by taking union of lines (deduplicated)
// Each line in deletions.jsonl is a JSON object with an "id" field
seen := make(map[string]bool)
var merged []byte
// Process local deletions
for _, line := range strings.Split(string(localDeletions), "\n") {
line = strings.TrimSpace(line)
if line == "" {
continue
}
if !seen[line] {
seen[line] = true
merged = append(merged, []byte(line+"\n")...)
}
}
// Process remote deletions
for _, line := range strings.Split(string(remoteDeletions), "\n") {
line = strings.TrimSpace(line)
if line == "" {
continue
}
if !seen[line] {
seen[line] = true
merged = append(merged, []byte(line+"\n")...)
}
}
return merged, nil
}
// copyJSONLToMainRepo copies JSONL and related files from worktree to main repo.
func copyJSONLToMainRepo(worktreePath, jsonlRelPath, jsonlPath string) error {
worktreeJSONLPath := filepath.Join(worktreePath, jsonlRelPath)
@@ -755,10 +684,10 @@ func copyJSONLToMainRepo(worktreePath, jsonlRelPath, jsonlPath string) error {
return fmt.Errorf("failed to write main JSONL: %w", err)
}
// Also sync other beads files back (deletions.jsonl, metadata.json)
// Also sync other beads files back (metadata.json)
beadsDir := filepath.Dir(jsonlPath)
worktreeBeadsDir := filepath.Dir(worktreeJSONLPath)
for _, filename := range []string{"deletions.jsonl", "metadata.json"} {
for _, filename := range []string{"metadata.json"} {
worktreeSrcPath := filepath.Join(worktreeBeadsDir, filename)
if fileData, err := os.ReadFile(worktreeSrcPath); err == nil {
dstPath := filepath.Join(beadsDir, filename)

View File

@@ -313,96 +313,6 @@ func TestPerformContentMerge(t *testing.T) {
})
}
// TestPerformDeletionsMerge tests the deletions merge function
func TestPerformDeletionsMerge(t *testing.T) {
if testing.Short() {
t.Skip("skipping integration test in short mode")
}
ctx := context.Background()
t.Run("merges deletions from both sides", func(t *testing.T) {
repoDir := setupTestRepo(t)
defer os.RemoveAll(repoDir)
runGit(t, repoDir, "checkout", "-b", "test-branch")
// Base: no deletions
writeFile(t, filepath.Join(repoDir, ".beads", "issues.jsonl"), `{"id":"test-1"}`)
runGit(t, repoDir, "add", ".")
runGit(t, repoDir, "commit", "-m", "base commit")
baseCommit := strings.TrimSpace(getGitOutput(t, repoDir, "rev-parse", "HEAD"))
// Local: delete issue-A
writeFile(t, filepath.Join(repoDir, ".beads", "deletions.jsonl"), `{"id":"issue-A","deleted_at":"2024-01-01T00:00:00Z"}`)
runGit(t, repoDir, "add", ".")
runGit(t, repoDir, "commit", "-m", "local deletion")
localHead := strings.TrimSpace(getGitOutput(t, repoDir, "rev-parse", "HEAD"))
// Remote: delete issue-B
runGit(t, repoDir, "checkout", baseCommit)
writeFile(t, filepath.Join(repoDir, ".beads", "deletions.jsonl"), `{"id":"issue-B","deleted_at":"2024-01-02T00:00:00Z"}`)
runGit(t, repoDir, "add", ".")
runGit(t, repoDir, "commit", "-m", "remote deletion")
runGit(t, repoDir, "update-ref", "refs/remotes/origin/test-branch", "HEAD")
// Go back to local
runGit(t, repoDir, "checkout", "-B", "test-branch", localHead)
// Perform merge
merged, err := performDeletionsMerge(ctx, repoDir, "test-branch", "origin", ".beads/deletions.jsonl")
if err != nil {
t.Fatalf("performDeletionsMerge() error = %v", err)
}
// Both deletions should be present
mergedStr := string(merged)
if !strings.Contains(mergedStr, "issue-A") {
t.Error("merged deletions missing issue-A")
}
if !strings.Contains(mergedStr, "issue-B") {
t.Error("merged deletions missing issue-B")
}
})
t.Run("handles only local deletions", func(t *testing.T) {
repoDir := setupTestRepo(t)
defer os.RemoveAll(repoDir)
runGit(t, repoDir, "checkout", "-b", "test-branch")
// Base: no deletions
writeFile(t, filepath.Join(repoDir, ".beads", "issues.jsonl"), `{"id":"test-1"}`)
runGit(t, repoDir, "add", ".")
runGit(t, repoDir, "commit", "-m", "base commit")
baseCommit := strings.TrimSpace(getGitOutput(t, repoDir, "rev-parse", "HEAD"))
// Local: has deletions
writeFile(t, filepath.Join(repoDir, ".beads", "deletions.jsonl"), `{"id":"issue-A"}`)
runGit(t, repoDir, "add", ".")
runGit(t, repoDir, "commit", "-m", "local deletion")
localHead := strings.TrimSpace(getGitOutput(t, repoDir, "rev-parse", "HEAD"))
// Remote: no deletions file
runGit(t, repoDir, "checkout", baseCommit)
runGit(t, repoDir, "update-ref", "refs/remotes/origin/test-branch", "HEAD")
// Go back to local
runGit(t, repoDir, "checkout", "-B", "test-branch", localHead)
// Perform merge
merged, err := performDeletionsMerge(ctx, repoDir, "test-branch", "origin", ".beads/deletions.jsonl")
if err != nil {
t.Fatalf("performDeletionsMerge() error = %v", err)
}
// Local deletions should be present
if !strings.Contains(string(merged), "issue-A") {
t.Error("merged deletions missing issue-A")
}
})
}
// Helper functions
func setupTestRepo(t *testing.T) string {