refactor: remove all deletions.jsonl code (bd-fom)

Complete removal of the legacy deletions.jsonl manifest system.
Tombstones are now the sole deletion mechanism.

Removed:
- internal/deletions/ - entire package
- cmd/bd/deleted.go - deleted command
- cmd/bd/doctor/fix/deletions.go - HydrateDeletionsManifest
- Tests for all removed functionality

Cleaned:
- cmd/bd/sync.go - removed sanitize, auto-compact
- cmd/bd/delete.go - removed dual-writes
- cmd/bd/doctor.go - removed checkDeletionsManifest
- internal/importer/importer.go - removed deletions checks
- internal/syncbranch/worktree.go - removed deletions merge
- cmd/bd/integrity.go - updated validation (warn-only on decrease)

Files removed: 12
Lines removed: ~7500

🤖 Generated with [Claude Code](https://claude.com/claude-code)

Co-Authored-By: Claude Opus 4.5 <noreply@anthropic.com>
This commit is contained in:
Steve Yegge
2025-12-16 14:20:32 -08:00
parent e0528de590
commit 9f76cfda01
32 changed files with 298 additions and 7534 deletions

View File

@@ -192,7 +192,6 @@ func autoImportIfNewer() {
SkipUpdate: false,
Strict: false,
SkipPrefixValidation: true, // Auto-import is lenient about prefixes
NoGitHistory: true, // Skip git history backfill during auto-import (bd-4pv)
}
result, err := importIssuesCore(ctx, dbPath, store, allIssues, opts)

View File

@@ -323,7 +323,6 @@ func importFromGit(ctx context.Context, dbFilePath string, store storage.Storage
DryRun: false,
SkipUpdate: false,
SkipPrefixValidation: true, // Auto-import is lenient about prefixes
NoGitHistory: true, // Skip git history backfill during auto-import (bd-4pv)
}
_, err = importIssuesCore(ctx, dbFilePath, store, issues, opts)

View File

@@ -11,8 +11,6 @@ import (
"github.com/spf13/cobra"
"github.com/steveyegge/beads/internal/compact"
"github.com/steveyegge/beads/internal/configfile"
"github.com/steveyegge/beads/internal/deletions"
"github.com/steveyegge/beads/internal/storage/sqlite"
"github.com/steveyegge/beads/internal/types"
)
@@ -31,8 +29,7 @@ var (
compactAuto bool
compactSummary string
compactActor string
compactLimit int
compactRetention int
compactLimit int
)
var compactCmd = &cobra.Command{
@@ -52,11 +49,6 @@ Tiers:
- Tier 1: Semantic compression (30 days closed, 70% reduction)
- Tier 2: Ultra compression (90 days closed, 95% reduction)
Deletions Pruning:
All modes also prune old deletion records from deletions.jsonl to prevent
unbounded growth. Default retention is 3 days (configurable via --retention
or deletions_retention_days in metadata.json).
Tombstone Pruning:
All modes also prune expired tombstones from issues.jsonl. Tombstones are
soft-delete markers that prevent resurrection of deleted issues. After the
@@ -75,9 +67,6 @@ Examples:
# Statistics
bd compact --stats # Show statistics
# Override retention period
bd compact --auto --all --retention=14 # Keep 14 days of deletions
`,
Run: func(_ *cobra.Command, _ []string) {
// Compact modifies data unless --stats or --analyze or --dry-run
@@ -309,9 +298,6 @@ func runCompactSingle(ctx context.Context, compactor *compact.Compactor, store *
float64(savingBytes)/float64(originalSize)*100)
fmt.Printf(" Time: %v\n", elapsed)
// Prune old deletion records
pruneDeletionsManifest()
// Prune expired tombstones (bd-okh)
if tombstonePruneResult, err := pruneExpiredTombstones(0); err != nil {
fmt.Fprintf(os.Stderr, "Warning: failed to prune expired tombstones: %v\n", err)
@@ -444,9 +430,6 @@ func runCompactAll(ctx context.Context, compactor *compact.Compactor, store *sql
fmt.Printf(" Saved: %d bytes (%.1f%%)\n", totalSaved, float64(totalSaved)/float64(totalOriginal)*100)
}
// Prune old deletion records
pruneDeletionsManifest()
// Prune expired tombstones (bd-okh)
if tombstonePruneResult, err := pruneExpiredTombstones(0); err != nil {
fmt.Fprintf(os.Stderr, "Warning: failed to prune expired tombstones: %v\n", err)
@@ -890,9 +873,6 @@ func runCompactApply(ctx context.Context, store *sqlite.SQLiteStorage) {
elapsed := time.Since(start)
// Prune old deletion records (do this before JSON output so we can include results)
pruneResult, retentionDays := pruneDeletionsManifest()
// Prune expired tombstones from issues.jsonl (bd-okh)
tombstonePruneResult, tombstoneErr := pruneExpiredTombstones(0)
if tombstoneErr != nil && !jsonOutput {
@@ -910,13 +890,6 @@ func runCompactApply(ctx context.Context, store *sqlite.SQLiteStorage) {
"reduction_pct": reductionPct,
"elapsed_ms": elapsed.Milliseconds(),
}
// Include pruning results if any deletions were pruned (bd-v29)
if pruneResult != nil && pruneResult.PrunedCount > 0 {
output["deletions_pruned"] = map[string]interface{}{
"count": pruneResult.PrunedCount,
"retention_days": retentionDays,
}
}
// Include tombstone pruning results (bd-okh)
if tombstonePruneResult != nil && tombstonePruneResult.PrunedCount > 0 {
output["tombstones_pruned"] = map[string]interface{}{
@@ -932,11 +905,6 @@ func runCompactApply(ctx context.Context, store *sqlite.SQLiteStorage) {
fmt.Printf(" %d → %d bytes (saved %d, %.1f%%)\n", originalSize, compactedSize, savingBytes, reductionPct)
fmt.Printf(" Time: %v\n", elapsed)
// Report pruning results for human-readable output
if pruneResult != nil && pruneResult.PrunedCount > 0 {
fmt.Printf("\nDeletions pruned: %d records older than %d days removed\n", pruneResult.PrunedCount, retentionDays)
}
// Report tombstone pruning results (bd-okh)
if tombstonePruneResult != nil && tombstonePruneResult.PrunedCount > 0 {
fmt.Printf("\nTombstones pruned: %d expired tombstones (older than %d days) removed\n",
@@ -947,40 +915,6 @@ func runCompactApply(ctx context.Context, store *sqlite.SQLiteStorage) {
markDirtyAndScheduleFlush()
}
// pruneDeletionsManifest prunes old deletion records based on retention settings.
// Returns the prune result and retention days used, so callers can include in output.
// Uses the global dbPath to determine the .beads directory.
func pruneDeletionsManifest() (*deletions.PruneResult, int) {
beadsDir := filepath.Dir(dbPath)
// Determine retention days
retentionDays := compactRetention
if retentionDays <= 0 {
// Load config for default
cfg, err := configfile.Load(beadsDir)
if err != nil {
if !jsonOutput {
fmt.Fprintf(os.Stderr, "Warning: could not load config for retention settings: %v\n", err)
}
retentionDays = configfile.DefaultDeletionsRetentionDays
} else if cfg != nil {
retentionDays = cfg.GetDeletionsRetentionDays()
} else {
retentionDays = configfile.DefaultDeletionsRetentionDays
}
}
deletionsPath := deletions.DefaultPath(beadsDir)
result, err := deletions.PruneDeletions(deletionsPath, retentionDays)
if err != nil {
if !jsonOutput {
fmt.Fprintf(os.Stderr, "Warning: failed to prune deletions: %v\n", err)
}
return nil, retentionDays
}
return result, retentionDays
}
// TombstonePruneResult contains the results of tombstone pruning
type TombstonePruneResult struct {
PrunedCount int
@@ -1159,8 +1093,5 @@ func init() {
compactCmd.Flags().StringVar(&compactActor, "actor", "agent", "Actor name for audit trail")
compactCmd.Flags().IntVar(&compactLimit, "limit", 0, "Limit number of candidates (0 = no limit)")
// Deletions pruning flag
compactCmd.Flags().IntVar(&compactRetention, "retention", 0, "Deletion retention days (0 = use config default)")
rootCmd.AddCommand(compactCmd)
}

View File

@@ -197,7 +197,6 @@ func importToJSONLWithStore(ctx context.Context, store storage.Storage, jsonlPat
SkipUpdate: false,
Strict: false,
SkipPrefixValidation: true, // Skip prefix validation for auto-import
NoGitHistory: true, // Skip git history backfill during auto-import (bd-4pv)
}
_, err = importIssuesCore(ctx, "", store, issues, opts)

View File

@@ -7,14 +7,11 @@ import (
"fmt"
"os"
"os/exec"
"path/filepath"
"regexp"
"strings"
"time"
"github.com/fatih/color"
"github.com/spf13/cobra"
"github.com/steveyegge/beads/internal/deletions"
"github.com/steveyegge/beads/internal/rpc"
"github.com/steveyegge/beads/internal/storage/sqlite"
"github.com/steveyegge/beads/internal/types"
@@ -241,13 +238,7 @@ Force: Delete and orphan dependents
return
}
// Actually delete
// 0. Record deletion in manifest FIRST (before any DB changes)
// This ensures deletion propagates via git sync even if DB operations fail
deleteActor := getActorWithGit()
if err := recordDeletion(issueID, deleteActor, "manual delete"); err != nil {
fmt.Fprintf(os.Stderr, "Error: failed to record deletion: %v\n", err)
os.Exit(1)
}
// 1. Update text references in connected issues (all text fields)
updatedIssueCount := 0
for id, connIssue := range connectedIssues {
@@ -508,13 +499,6 @@ func deleteBatch(_ *cobra.Command, issueIDs []string, force bool, dryRun bool, c
}
}
}
// Record deletions in manifest FIRST (before any DB changes)
// This ensures deletion propagates via git sync even if DB operations fail
deleteActor := getActorWithGit()
if err := recordDeletions(issueIDs, deleteActor, reason); err != nil {
fmt.Fprintf(os.Stderr, "Error: failed to record deletions: %v\n", err)
os.Exit(1)
}
// Actually delete
result, err := d.DeleteIssues(ctx, issueIDs, cascade, force, false)
if err != nil {
@@ -674,60 +658,6 @@ func getActorWithGit() string {
return "unknown"
}
// getDeletionsPath returns the path to the deletions manifest file.
// Uses the same directory as the database.
func getDeletionsPath() string {
// Get the .beads directory from dbPath
beadsDir := filepath.Dir(dbPath)
return deletions.DefaultPath(beadsDir)
}
// recordDeletion appends a deletion record to the deletions manifest.
// This MUST be called BEFORE deleting from the database to ensure
// deletion records are never lost.
// After tombstone migration (bd-ffr9), this is a no-op since inline tombstones
// are used instead of deletions.jsonl.
func recordDeletion(id, deleteActor, reason string) error {
// bd-ffr9: Skip writing to deletions.jsonl if tombstone migration is complete
beadsDir := filepath.Dir(dbPath)
if deletions.IsTombstoneMigrationComplete(beadsDir) {
return nil
}
record := deletions.DeletionRecord{
ID: id,
Timestamp: time.Now().UTC(),
Actor: deleteActor,
Reason: reason,
}
return deletions.AppendDeletion(getDeletionsPath(), record)
}
// recordDeletions appends multiple deletion records to the deletions manifest.
// This MUST be called BEFORE deleting from the database to ensure
// deletion records are never lost.
// After tombstone migration (bd-ffr9), this is a no-op since inline tombstones
// are used instead of deletions.jsonl.
func recordDeletions(ids []string, deleteActor, reason string) error {
// bd-ffr9: Skip writing to deletions.jsonl if tombstone migration is complete
beadsDir := filepath.Dir(dbPath)
if deletions.IsTombstoneMigrationComplete(beadsDir) {
return nil
}
path := getDeletionsPath()
for _, id := range ids {
record := deletions.DeletionRecord{
ID: id,
Timestamp: time.Now().UTC(),
Actor: deleteActor,
Reason: reason,
}
if err := deletions.AppendDeletion(path, record); err != nil {
return fmt.Errorf("failed to record deletion for %s: %w", id, err)
}
}
return nil
}
func init() {
deleteCmd.Flags().BoolP("force", "f", false, "Actually delete (without this flag, shows preview)")
deleteCmd.Flags().String("from-file", "", "Read issue IDs from file (one per line)")

View File

@@ -1,256 +0,0 @@
package main
import (
"context"
"os"
"path/filepath"
"testing"
"time"
"github.com/steveyegge/beads/internal/deletions"
"github.com/steveyegge/beads/internal/storage/sqlite"
"github.com/steveyegge/beads/internal/types"
)
// TestRecordDeletion tests that recordDeletion creates deletion manifest entries
func TestRecordDeletion(t *testing.T) {
tmpDir := t.TempDir()
// Set up dbPath so getDeletionsPath() works
oldDbPath := dbPath
dbPath = filepath.Join(tmpDir, "beads.db")
defer func() { dbPath = oldDbPath }()
// Create the .beads directory
if err := os.MkdirAll(tmpDir, 0750); err != nil {
t.Fatalf("failed to create directory: %v", err)
}
// Test recordDeletion
err := recordDeletion("test-abc", "test-user", "test reason")
if err != nil {
t.Fatalf("recordDeletion failed: %v", err)
}
// Verify the deletion was recorded
deletionsPath := getDeletionsPath()
result, err := deletions.LoadDeletions(deletionsPath)
if err != nil {
t.Fatalf("LoadDeletions failed: %v", err)
}
if len(result.Records) != 1 {
t.Fatalf("expected 1 deletion record, got %d", len(result.Records))
}
del, found := result.Records["test-abc"]
if !found {
t.Fatalf("deletion record for 'test-abc' not found")
}
if del.Actor != "test-user" {
t.Errorf("expected actor 'test-user', got '%s'", del.Actor)
}
if del.Reason != "test reason" {
t.Errorf("expected reason 'test reason', got '%s'", del.Reason)
}
// Timestamp should be recent (within last minute)
if time.Since(del.Timestamp) > time.Minute {
t.Errorf("timestamp seems too old: %v", del.Timestamp)
}
}
// TestRecordDeletion_SkipsAfterMigration tests that recordDeletion is a no-op after tombstone migration (bd-ffr9)
func TestRecordDeletion_SkipsAfterMigration(t *testing.T) {
tmpDir := t.TempDir()
// Set up dbPath so getDeletionsPath() works
oldDbPath := dbPath
dbPath = filepath.Join(tmpDir, "beads.db")
defer func() { dbPath = oldDbPath }()
// Create the .beads directory
if err := os.MkdirAll(tmpDir, 0750); err != nil {
t.Fatalf("failed to create directory: %v", err)
}
// Create the .migrated marker file to indicate tombstone migration is complete
migratedPath := filepath.Join(tmpDir, "deletions.jsonl.migrated")
if err := os.WriteFile(migratedPath, []byte("{}"), 0644); err != nil {
t.Fatalf("failed to create migrated marker: %v", err)
}
// Test recordDeletion - should be a no-op
err := recordDeletion("test-abc", "test-user", "test reason")
if err != nil {
t.Fatalf("recordDeletion failed: %v", err)
}
// Verify deletions.jsonl was NOT created
deletionsPath := getDeletionsPath()
if _, err := os.Stat(deletionsPath); !os.IsNotExist(err) {
t.Error("deletions.jsonl should not be created after tombstone migration")
}
}
// TestRecordDeletions tests that recordDeletions creates multiple deletion manifest entries
func TestRecordDeletions(t *testing.T) {
tmpDir := t.TempDir()
// Set up dbPath so getDeletionsPath() works
oldDbPath := dbPath
dbPath = filepath.Join(tmpDir, "beads.db")
defer func() { dbPath = oldDbPath }()
// Create the .beads directory
if err := os.MkdirAll(tmpDir, 0750); err != nil {
t.Fatalf("failed to create directory: %v", err)
}
// Test recordDeletions with multiple IDs
ids := []string{"test-abc", "test-def", "test-ghi"}
err := recordDeletions(ids, "batch-user", "batch cleanup")
if err != nil {
t.Fatalf("recordDeletions failed: %v", err)
}
// Verify the deletions were recorded
deletionsPath := getDeletionsPath()
result, err := deletions.LoadDeletions(deletionsPath)
if err != nil {
t.Fatalf("LoadDeletions failed: %v", err)
}
if len(result.Records) != 3 {
t.Fatalf("expected 3 deletion records, got %d", len(result.Records))
}
for _, id := range ids {
del, found := result.Records[id]
if !found {
t.Errorf("deletion record for '%s' not found", id)
continue
}
if del.Actor != "batch-user" {
t.Errorf("expected actor 'batch-user' for %s, got '%s'", id, del.Actor)
}
if del.Reason != "batch cleanup" {
t.Errorf("expected reason 'batch cleanup' for %s, got '%s'", id, del.Reason)
}
}
}
// TestGetActorWithGit tests actor sourcing logic
func TestGetActorWithGit(t *testing.T) {
// Save original actor value
oldActor := actor
defer func() { actor = oldActor }()
// Test case 1: actor is set from flag/env
actor = "flag-user"
result := getActorWithGit()
if result != "flag-user" {
t.Errorf("expected 'flag-user' when actor is set, got '%s'", result)
}
// Test case 2: actor is "unknown" - should try git config
actor = "unknown"
result = getActorWithGit()
// Can't test exact result since it depends on git config, but it shouldn't be empty
if result == "" {
t.Errorf("expected non-empty result when actor is 'unknown'")
}
// Test case 3: actor is empty - should try git config
actor = ""
result = getActorWithGit()
if result == "" {
t.Errorf("expected non-empty result when actor is empty")
}
}
// TestDeleteRecordingOrderOfOperations verifies deletion is recorded before DB delete
func TestDeleteRecordingOrderOfOperations(t *testing.T) {
ctx := context.Background()
tmpDir := t.TempDir()
// Set up dbPath
oldDbPath := dbPath
dbPath = filepath.Join(tmpDir, "beads.db")
defer func() { dbPath = oldDbPath }()
// Create database
testStore, err := sqlite.New(ctx, dbPath)
if err != nil {
t.Fatalf("failed to create database: %v", err)
}
defer testStore.Close()
// Initialize prefix
if err := testStore.SetConfig(ctx, "issue_prefix", "test"); err != nil {
t.Fatalf("failed to set prefix: %v", err)
}
// Create an issue
issue := &types.Issue{
ID: "test-delete-order",
Title: "Test Order of Operations",
Status: types.StatusOpen,
Priority: 1,
IssueType: types.TypeTask,
}
if err := testStore.CreateIssue(ctx, issue, "test"); err != nil {
t.Fatalf("failed to create issue: %v", err)
}
// Record deletion (simulating what delete command does)
if err := recordDeletion(issue.ID, "test-user", "order test"); err != nil {
t.Fatalf("recordDeletion failed: %v", err)
}
// Verify record was created BEFORE any DB changes
deletionsPath := getDeletionsPath()
result, err := deletions.LoadDeletions(deletionsPath)
if err != nil {
t.Fatalf("LoadDeletions failed: %v", err)
}
if _, found := result.Records[issue.ID]; !found {
t.Error("deletion record should exist before DB deletion")
}
// Now verify the issue still exists in DB (we only recorded, didn't delete)
existing, err := testStore.GetIssue(ctx, issue.ID)
if err != nil {
t.Fatalf("GetIssue failed: %v", err)
}
if existing == nil {
t.Error("issue should still exist in DB (we only recorded the deletion)")
}
// Now delete from DB
if err := testStore.DeleteIssue(ctx, issue.ID); err != nil {
t.Fatalf("DeleteIssue failed: %v", err)
}
// Verify both: deletion record exists AND issue is gone from DB
result, err = deletions.LoadDeletions(deletionsPath)
if err != nil {
t.Fatalf("LoadDeletions failed: %v", err)
}
if _, found := result.Records[issue.ID]; !found {
t.Error("deletion record should still exist after DB deletion")
}
existing, err = testStore.GetIssue(ctx, issue.ID)
if err != nil {
t.Fatalf("GetIssue failed: %v", err)
}
if existing != nil {
t.Error("issue should be gone from DB after deletion")
}
}

View File

@@ -1,194 +0,0 @@
package main
import (
"fmt"
"os"
"sort"
"strings"
"time"
"github.com/fatih/color"
"github.com/spf13/cobra"
"github.com/steveyegge/beads/internal/deletions"
)
var (
deletedSince string
deletedAll bool
)
var deletedCmd = &cobra.Command{
Use: "deleted [issue-id]",
Short: "Show deleted issues from the deletions manifest",
Long: `Show issues that have been deleted and are tracked in the deletions manifest.
This command provides an audit trail of deleted issues, showing:
- Which issues were deleted
- When they were deleted
- Who deleted them
- Optional reason for deletion
Examples:
bd deleted # Show recent deletions (last 7 days)
bd deleted --since=30d # Show deletions in last 30 days
bd deleted --all # Show all tracked deletions
bd deleted bd-xxx # Show deletion details for specific issue`,
Args: cobra.MaximumNArgs(1),
Run: func(cmd *cobra.Command, args []string) {
beadsDir := findBeadsDir()
if beadsDir == "" {
fmt.Fprintf(os.Stderr, "Error: not in a beads repository (no .beads directory found)\n")
os.Exit(1)
}
deletionsPath := deletions.DefaultPath(beadsDir)
result, err := deletions.LoadDeletions(deletionsPath)
if err != nil {
fmt.Fprintf(os.Stderr, "Error loading deletions: %v\n", err)
os.Exit(1)
}
// Print any warnings
for _, w := range result.Warnings {
fmt.Fprintf(os.Stderr, "Warning: %s\n", w)
}
// If looking for specific issue
if len(args) == 1 {
issueID := args[0]
displaySingleDeletion(result.Records, issueID)
return
}
// Filter by time range
var cutoff time.Time
if !deletedAll {
duration, err := parseDuration(deletedSince)
if err != nil {
fmt.Fprintf(os.Stderr, "Error: invalid --since value '%s': %v\n", deletedSince, err)
os.Exit(1)
}
cutoff = time.Now().Add(-duration)
}
// Collect and sort records
var records []deletions.DeletionRecord
for _, r := range result.Records {
if deletedAll || r.Timestamp.After(cutoff) {
records = append(records, r)
}
}
// Sort by timestamp descending (most recent first)
sort.Slice(records, func(i, j int) bool {
return records[i].Timestamp.After(records[j].Timestamp)
})
if jsonOutput {
outputJSON(records)
return
}
displayDeletions(records, deletedSince, deletedAll)
},
}
func displaySingleDeletion(records map[string]deletions.DeletionRecord, issueID string) {
record, found := records[issueID]
if !found {
if jsonOutput {
outputJSON(map[string]interface{}{
"found": false,
"id": issueID,
})
return
}
fmt.Printf("Issue %s not found in deletions manifest\n", issueID)
fmt.Println("(This could mean the issue was never deleted, or the deletion record was pruned)")
return
}
if jsonOutput {
outputJSON(map[string]interface{}{
"found": true,
"record": record,
})
return
}
cyan := color.New(color.FgCyan).SprintFunc()
fmt.Printf("\n%s Deletion record for %s:\n\n", cyan("🗑️"), issueID)
fmt.Printf(" ID: %s\n", record.ID)
fmt.Printf(" Deleted: %s\n", record.Timestamp.Local().Format("2006-01-02 15:04:05"))
fmt.Printf(" By: %s\n", record.Actor)
if record.Reason != "" {
fmt.Printf(" Reason: %s\n", record.Reason)
}
fmt.Println()
}
func displayDeletions(records []deletions.DeletionRecord, since string, all bool) {
if len(records) == 0 {
green := color.New(color.FgGreen).SprintFunc()
if all {
fmt.Printf("\n%s No deletions tracked in manifest\n\n", green("✨"))
} else {
fmt.Printf("\n%s No deletions in the last %s\n\n", green("✨"), since)
}
return
}
cyan := color.New(color.FgCyan).SprintFunc()
if all {
fmt.Printf("\n%s All tracked deletions (%d total):\n\n", cyan("🗑️"), len(records))
} else {
fmt.Printf("\n%s Deletions in the last %s (%d total):\n\n", cyan("🗑️"), since, len(records))
}
for _, r := range records {
ts := r.Timestamp.Local().Format("2006-01-02 15:04")
reason := ""
if r.Reason != "" {
reason = " " + r.Reason
}
fmt.Printf(" %-12s %s %-12s%s\n", r.ID, ts, r.Actor, reason)
}
fmt.Println()
}
// parseDuration parses a duration string like "7d", "30d", "2w"
func parseDuration(s string) (time.Duration, error) {
s = strings.TrimSpace(strings.ToLower(s))
if s == "" {
return 7 * 24 * time.Hour, nil // default 7 days
}
// Check for special suffixes
if strings.HasSuffix(s, "d") {
days := s[:len(s)-1]
var d int
if _, err := fmt.Sscanf(days, "%d", &d); err != nil {
return 0, fmt.Errorf("invalid days format: %s", s)
}
return time.Duration(d) * 24 * time.Hour, nil
}
if strings.HasSuffix(s, "w") {
weeks := s[:len(s)-1]
var w int
if _, err := fmt.Sscanf(weeks, "%d", &w); err != nil {
return 0, fmt.Errorf("invalid weeks format: %s", s)
}
return time.Duration(w) * 7 * 24 * time.Hour, nil
}
// Try standard Go duration
return time.ParseDuration(s)
}
func init() {
deletedCmd.Flags().StringVar(&deletedSince, "since", "7d", "Show deletions within this time range (e.g., 7d, 30d, 2w)")
deletedCmd.Flags().BoolVar(&deletedAll, "all", false, "Show all tracked deletions")
deletedCmd.Flags().BoolVar(&jsonOutput, "json", false, "Output JSON format")
rootCmd.AddCommand(deletedCmd)
}

View File

@@ -1,636 +0,0 @@
//go:build integration
// +build integration
package main
import (
"bytes"
"context"
"encoding/json"
"os"
"path/filepath"
"strings"
"testing"
"time"
"github.com/steveyegge/beads/internal/deletions"
"github.com/steveyegge/beads/internal/importer"
"github.com/steveyegge/beads/internal/storage/sqlite"
"github.com/steveyegge/beads/internal/types"
)
// importJSONLFile parses a JSONL file and imports using ImportIssues
func importJSONLFile(ctx context.Context, store *sqlite.SQLiteStorage, dbPath, jsonlPath string, opts importer.Options) (*importer.Result, error) {
data, err := os.ReadFile(jsonlPath)
if err != nil {
if os.IsNotExist(err) {
// Empty import if file doesn't exist
return importer.ImportIssues(ctx, dbPath, store, nil, opts)
}
return nil, err
}
var issues []*types.Issue
decoder := json.NewDecoder(bytes.NewReader(data))
for decoder.More() {
var issue types.Issue
if err := decoder.Decode(&issue); err != nil {
return nil, err
}
issues = append(issues, &issue)
}
return importer.ImportIssues(ctx, dbPath, store, issues, opts)
}
// TestDeletionPropagation_AcrossClones verifies that when an issue is deleted
// in one clone, the deletion propagates to other clones via the deletions manifest.
func TestDeletionPropagation_AcrossClones(t *testing.T) {
if testing.Short() {
t.Skip("Skipping integration test in short mode")
}
ctx := context.Background()
tempDir := t.TempDir()
// Create "remote" repository
remoteDir := filepath.Join(tempDir, "remote")
if err := os.MkdirAll(remoteDir, 0750); err != nil {
t.Fatalf("Failed to create remote dir: %v", err)
}
runGitCmd(t, remoteDir, "init", "--bare")
// Create clone1 (will create and delete issue)
clone1Dir := filepath.Join(tempDir, "clone1")
runGitCmd(t, tempDir, "clone", remoteDir, clone1Dir)
configureGit(t, clone1Dir)
// Create clone2 (will receive deletion via sync)
clone2Dir := filepath.Join(tempDir, "clone2")
runGitCmd(t, tempDir, "clone", remoteDir, clone2Dir)
configureGit(t, clone2Dir)
// Initialize beads in clone1
clone1BeadsDir := filepath.Join(clone1Dir, ".beads")
if err := os.MkdirAll(clone1BeadsDir, 0750); err != nil {
t.Fatalf("Failed to create .beads dir: %v", err)
}
clone1DBPath := filepath.Join(clone1BeadsDir, "beads.db")
clone1Store := newTestStore(t, clone1DBPath)
defer clone1Store.Close()
// Create an issue in clone1
issue := &types.Issue{
Title: "Issue to be deleted",
Status: types.StatusOpen,
Priority: 1,
IssueType: types.TypeTask,
CreatedAt: time.Now(),
UpdatedAt: time.Now(),
}
if err := clone1Store.CreateIssue(ctx, issue, "test-user"); err != nil {
t.Fatalf("Failed to create issue: %v", err)
}
issueID := issue.ID
t.Logf("Created issue: %s", issueID)
// Export to JSONL
clone1JSONLPath := filepath.Join(clone1BeadsDir, "beads.jsonl")
if err := exportIssuesToJSONL(ctx, clone1Store, clone1JSONLPath); err != nil {
t.Fatalf("Failed to export: %v", err)
}
// Commit and push from clone1
runGitCmd(t, clone1Dir, "add", ".beads")
runGitCmd(t, clone1Dir, "commit", "-m", "Add issue")
runGitCmd(t, clone1Dir, "push", "origin", "master")
// Clone2 pulls the issue
runGitCmd(t, clone2Dir, "pull")
// Initialize beads in clone2
clone2BeadsDir := filepath.Join(clone2Dir, ".beads")
clone2DBPath := filepath.Join(clone2BeadsDir, "beads.db")
clone2Store := newTestStore(t, clone2DBPath)
defer clone2Store.Close()
// Import to clone2
clone2JSONLPath := filepath.Join(clone2BeadsDir, "beads.jsonl")
result, err := importJSONLFile(ctx, clone2Store, clone2DBPath, clone2JSONLPath, importer.Options{})
if err != nil {
t.Fatalf("Failed to import to clone2: %v", err)
}
t.Logf("Clone2 import: created=%d, updated=%d", result.Created, result.Updated)
// Verify clone2 has the issue
clone2Issue, err := clone2Store.GetIssue(ctx, issueID)
if err != nil {
t.Fatalf("Failed to get issue from clone2: %v", err)
}
if clone2Issue == nil {
t.Fatal("Clone2 should have the issue after import")
}
t.Log("✓ Both clones have the issue")
// Clone1 deletes the issue
if err := clone1Store.DeleteIssue(ctx, issueID); err != nil {
t.Fatalf("Failed to delete issue from clone1: %v", err)
}
// Record deletion in manifest
clone1DeletionsPath := filepath.Join(clone1BeadsDir, "deletions.jsonl")
delRecord := deletions.DeletionRecord{
ID: issueID,
Timestamp: time.Now().UTC(),
Actor: "test-user",
Reason: "test deletion",
}
if err := deletions.AppendDeletion(clone1DeletionsPath, delRecord); err != nil {
t.Fatalf("Failed to record deletion: %v", err)
}
// Re-export JSONL (issue is now gone)
if err := exportIssuesToJSONL(ctx, clone1Store, clone1JSONLPath); err != nil {
t.Fatalf("Failed to export after deletion: %v", err)
}
// Commit and push deletion
runGitCmd(t, clone1Dir, "add", ".beads")
runGitCmd(t, clone1Dir, "commit", "-m", "Delete issue")
runGitCmd(t, clone1Dir, "push", "origin", "master")
t.Log("✓ Clone1 deleted issue and pushed")
// Clone2 pulls the deletion
runGitCmd(t, clone2Dir, "pull")
// Verify deletions.jsonl was synced to clone2
clone2DeletionsPath := filepath.Join(clone2BeadsDir, "deletions.jsonl")
if _, err := os.Stat(clone2DeletionsPath); err != nil {
t.Fatalf("deletions.jsonl should be synced to clone2: %v", err)
}
// Import to clone2 (should purge the deleted issue)
result, err = importJSONLFile(ctx, clone2Store, clone2DBPath, clone2JSONLPath, importer.Options{})
if err != nil {
t.Fatalf("Failed to import after deletion sync: %v", err)
}
t.Logf("Clone2 import after sync: purged=%d, purgedIDs=%v", result.Purged, result.PurgedIDs)
// Verify clone2 no longer has the issue
clone2Issue, err = clone2Store.GetIssue(ctx, issueID)
if err != nil {
t.Fatalf("Failed to check issue in clone2: %v", err)
}
if clone2Issue != nil {
t.Errorf("Clone2 should NOT have the issue after sync (deletion should propagate)")
} else {
t.Log("✓ Deletion propagated to clone2")
}
// Verify purge count
if result.Purged != 1 {
t.Errorf("Expected 1 purged issue, got %d", result.Purged)
}
}
// TestDeletionPropagation_SimultaneousDeletions verifies that when both clones
// delete the same issue, the deletions are handled idempotently.
func TestDeletionPropagation_SimultaneousDeletions(t *testing.T) {
if testing.Short() {
t.Skip("Skipping integration test in short mode")
}
ctx := context.Background()
tempDir := t.TempDir()
// Create "remote" repository
remoteDir := filepath.Join(tempDir, "remote")
if err := os.MkdirAll(remoteDir, 0750); err != nil {
t.Fatalf("Failed to create remote dir: %v", err)
}
runGitCmd(t, remoteDir, "init", "--bare")
// Create clone1
clone1Dir := filepath.Join(tempDir, "clone1")
runGitCmd(t, tempDir, "clone", remoteDir, clone1Dir)
configureGit(t, clone1Dir)
// Create clone2
clone2Dir := filepath.Join(tempDir, "clone2")
runGitCmd(t, tempDir, "clone", remoteDir, clone2Dir)
configureGit(t, clone2Dir)
// Initialize beads in clone1
clone1BeadsDir := filepath.Join(clone1Dir, ".beads")
if err := os.MkdirAll(clone1BeadsDir, 0750); err != nil {
t.Fatalf("Failed to create .beads dir: %v", err)
}
clone1DBPath := filepath.Join(clone1BeadsDir, "beads.db")
clone1Store := newTestStore(t, clone1DBPath)
defer clone1Store.Close()
// Create an issue in clone1
issue := &types.Issue{
Title: "Issue deleted by both",
Status: types.StatusOpen,
Priority: 1,
IssueType: types.TypeTask,
CreatedAt: time.Now(),
UpdatedAt: time.Now(),
}
if err := clone1Store.CreateIssue(ctx, issue, "test-user"); err != nil {
t.Fatalf("Failed to create issue: %v", err)
}
issueID := issue.ID
// Export and push
clone1JSONLPath := filepath.Join(clone1BeadsDir, "beads.jsonl")
if err := exportIssuesToJSONL(ctx, clone1Store, clone1JSONLPath); err != nil {
t.Fatalf("Failed to export: %v", err)
}
runGitCmd(t, clone1Dir, "add", ".beads")
runGitCmd(t, clone1Dir, "commit", "-m", "Add issue")
runGitCmd(t, clone1Dir, "push", "origin", "master")
// Clone2 pulls and imports
runGitCmd(t, clone2Dir, "pull")
clone2BeadsDir := filepath.Join(clone2Dir, ".beads")
clone2DBPath := filepath.Join(clone2BeadsDir, "beads.db")
clone2Store := newTestStore(t, clone2DBPath)
defer clone2Store.Close()
clone2JSONLPath := filepath.Join(clone2BeadsDir, "beads.jsonl")
if _, err := importJSONLFile(ctx, clone2Store, clone2DBPath, clone2JSONLPath, importer.Options{}); err != nil {
t.Fatalf("Failed to import to clone2: %v", err)
}
// Both clones delete the issue simultaneously
// Clone1 deletes
clone1Store.DeleteIssue(ctx, issueID)
clone1DeletionsPath := filepath.Join(clone1BeadsDir, "deletions.jsonl")
deletions.AppendDeletion(clone1DeletionsPath, deletions.DeletionRecord{
ID: issueID,
Timestamp: time.Now().UTC(),
Actor: "user1",
Reason: "deleted by clone1",
})
exportIssuesToJSONL(ctx, clone1Store, clone1JSONLPath)
// Clone2 deletes (before pulling clone1's deletion)
clone2Store.DeleteIssue(ctx, issueID)
clone2DeletionsPath := filepath.Join(clone2BeadsDir, "deletions.jsonl")
deletions.AppendDeletion(clone2DeletionsPath, deletions.DeletionRecord{
ID: issueID,
Timestamp: time.Now().UTC(),
Actor: "user2",
Reason: "deleted by clone2",
})
exportIssuesToJSONL(ctx, clone2Store, clone2JSONLPath)
t.Log("✓ Both clones deleted the issue locally")
// Clone1 commits and pushes first
runGitCmd(t, clone1Dir, "add", ".beads")
runGitCmd(t, clone1Dir, "commit", "-m", "Delete issue (clone1)")
runGitCmd(t, clone1Dir, "push", "origin", "master")
// Clone2 commits, pulls (may have conflict), and pushes
runGitCmd(t, clone2Dir, "add", ".beads")
runGitCmd(t, clone2Dir, "commit", "-m", "Delete issue (clone2)")
// Pull with rebase to handle the concurrent deletion
// The deletions.jsonl conflict is handled by accepting both (append-only)
runGitCmdAllowError(t, clone2Dir, "pull", "--rebase")
// If there's a conflict in deletions.jsonl, resolve by concatenating
resolveDeletionsConflict(t, clone2Dir)
runGitCmdAllowError(t, clone2Dir, "rebase", "--continue")
runGitCmdAllowError(t, clone2Dir, "push", "origin", "master")
// Verify deletions.jsonl contains both deletion records (deduplicated by ID on load)
finalDeletionsPath := filepath.Join(clone2BeadsDir, "deletions.jsonl")
result, err := deletions.LoadDeletions(finalDeletionsPath)
if err != nil {
t.Fatalf("Failed to load deletions: %v", err)
}
// Should have the deletion record (may be from either clone, deduplication keeps one)
if _, found := result.Records[issueID]; !found {
t.Error("Expected deletion record to exist after simultaneous deletions")
}
t.Log("✓ Simultaneous deletions handled correctly (idempotent)")
}
// TestDeletionPropagation_LocalWorkPreserved verifies that local unpushed work
// is NOT deleted when deletions are synced.
func TestDeletionPropagation_LocalWorkPreserved(t *testing.T) {
if testing.Short() {
t.Skip("Skipping integration test in short mode")
}
ctx := context.Background()
tempDir := t.TempDir()
// Create "remote" repository
remoteDir := filepath.Join(tempDir, "remote")
if err := os.MkdirAll(remoteDir, 0750); err != nil {
t.Fatalf("Failed to create remote dir: %v", err)
}
runGitCmd(t, remoteDir, "init", "--bare")
// Create clone1
clone1Dir := filepath.Join(tempDir, "clone1")
runGitCmd(t, tempDir, "clone", remoteDir, clone1Dir)
configureGit(t, clone1Dir)
// Create clone2
clone2Dir := filepath.Join(tempDir, "clone2")
runGitCmd(t, tempDir, "clone", remoteDir, clone2Dir)
configureGit(t, clone2Dir)
// Initialize beads in clone1
clone1BeadsDir := filepath.Join(clone1Dir, ".beads")
if err := os.MkdirAll(clone1BeadsDir, 0750); err != nil {
t.Fatalf("Failed to create .beads dir: %v", err)
}
clone1DBPath := filepath.Join(clone1BeadsDir, "beads.db")
clone1Store := newTestStore(t, clone1DBPath)
defer clone1Store.Close()
// Create shared issue in clone1
sharedIssue := &types.Issue{
Title: "Shared issue",
Status: types.StatusOpen,
Priority: 1,
IssueType: types.TypeTask,
CreatedAt: time.Now(),
UpdatedAt: time.Now(),
}
if err := clone1Store.CreateIssue(ctx, sharedIssue, "test-user"); err != nil {
t.Fatalf("Failed to create shared issue: %v", err)
}
sharedID := sharedIssue.ID
// Export and push
clone1JSONLPath := filepath.Join(clone1BeadsDir, "beads.jsonl")
if err := exportIssuesToJSONL(ctx, clone1Store, clone1JSONLPath); err != nil {
t.Fatalf("Failed to export: %v", err)
}
runGitCmd(t, clone1Dir, "add", ".beads")
runGitCmd(t, clone1Dir, "commit", "-m", "Add shared issue")
runGitCmd(t, clone1Dir, "push", "origin", "master")
// Clone2 pulls and imports the shared issue
runGitCmd(t, clone2Dir, "pull")
clone2BeadsDir := filepath.Join(clone2Dir, ".beads")
clone2DBPath := filepath.Join(clone2BeadsDir, "beads.db")
clone2Store := newTestStore(t, clone2DBPath)
defer clone2Store.Close()
clone2JSONLPath := filepath.Join(clone2BeadsDir, "beads.jsonl")
if _, err := importJSONLFile(ctx, clone2Store, clone2DBPath, clone2JSONLPath, importer.Options{}); err != nil {
t.Fatalf("Failed to import to clone2: %v", err)
}
// Clone2 creates LOCAL work (not synced)
localIssue := &types.Issue{
Title: "Local work in clone2",
Status: types.StatusOpen,
Priority: 1,
IssueType: types.TypeTask,
CreatedAt: time.Now(),
UpdatedAt: time.Now(),
}
if err := clone2Store.CreateIssue(ctx, localIssue, "clone2-user"); err != nil {
t.Fatalf("Failed to create local issue: %v", err)
}
localID := localIssue.ID
t.Logf("Clone2 created local issue: %s", localID)
// Clone1 deletes the shared issue
clone1Store.DeleteIssue(ctx, sharedID)
clone1DeletionsPath := filepath.Join(clone1BeadsDir, "deletions.jsonl")
deletions.AppendDeletion(clone1DeletionsPath, deletions.DeletionRecord{
ID: sharedID,
Timestamp: time.Now().UTC(),
Actor: "clone1-user",
Reason: "cleanup",
})
exportIssuesToJSONL(ctx, clone1Store, clone1JSONLPath)
runGitCmd(t, clone1Dir, "add", ".beads")
runGitCmd(t, clone1Dir, "commit", "-m", "Delete shared issue")
runGitCmd(t, clone1Dir, "push", "origin", "master")
// Clone2 pulls and imports (should delete shared, preserve local)
runGitCmd(t, clone2Dir, "pull")
result, err := importJSONLFile(ctx, clone2Store, clone2DBPath, clone2JSONLPath, importer.Options{})
if err != nil {
t.Fatalf("Failed to import after pull: %v", err)
}
t.Logf("Clone2 import: purged=%d, purgedIDs=%v", result.Purged, result.PurgedIDs)
// Verify shared issue is gone
sharedCheck, _ := clone2Store.GetIssue(ctx, sharedID)
if sharedCheck != nil {
t.Error("Shared issue should be deleted")
}
// Verify local issue is preserved
localCheck, _ := clone2Store.GetIssue(ctx, localID)
if localCheck == nil {
t.Error("Local work should be preserved (not in deletions manifest)")
}
t.Log("✓ Local work preserved while synced deletions propagated")
}
// TestDeletionPropagation_CorruptLineRecovery verifies that corrupt lines
// in deletions.jsonl are skipped gracefully during import.
func TestDeletionPropagation_CorruptLineRecovery(t *testing.T) {
if testing.Short() {
t.Skip("Skipping integration test in short mode")
}
ctx := context.Background()
tempDir := t.TempDir()
// Setup single clone for this test
beadsDir := filepath.Join(tempDir, ".beads")
if err := os.MkdirAll(beadsDir, 0750); err != nil {
t.Fatalf("Failed to create .beads dir: %v", err)
}
dbPath := filepath.Join(beadsDir, "beads.db")
store := newTestStore(t, dbPath)
defer store.Close()
// Create two issues
issue1 := &types.Issue{
Title: "Issue 1 (to be deleted)",
Status: types.StatusOpen,
Priority: 1,
IssueType: types.TypeTask,
CreatedAt: time.Now(),
UpdatedAt: time.Now(),
}
issue2 := &types.Issue{
Title: "Issue 2 (to keep)",
Status: types.StatusOpen,
Priority: 1,
IssueType: types.TypeTask,
CreatedAt: time.Now(),
UpdatedAt: time.Now(),
}
store.CreateIssue(ctx, issue1, "test-user")
store.CreateIssue(ctx, issue2, "test-user")
// Create deletions.jsonl with corrupt lines + valid deletion for issue1
deletionsPath := filepath.Join(beadsDir, "deletions.jsonl")
now := time.Now().UTC().Format(time.RFC3339)
corruptContent := `this is not valid json
{"broken
{"id":"` + issue1.ID + `","ts":"` + now + `","by":"test-user","reason":"valid deletion"}
more garbage {{{
`
if err := os.WriteFile(deletionsPath, []byte(corruptContent), 0644); err != nil {
t.Fatalf("Failed to write corrupt deletions: %v", err)
}
// Load deletions - should skip corrupt lines but parse valid one
result, err := deletions.LoadDeletions(deletionsPath)
if err != nil {
t.Fatalf("LoadDeletions should not fail on corrupt lines: %v", err)
}
if result.Skipped != 3 {
t.Errorf("Expected 3 skipped lines, got %d", result.Skipped)
}
if len(result.Records) != 1 {
t.Errorf("Expected 1 valid record, got %d", len(result.Records))
}
if _, found := result.Records[issue1.ID]; !found {
t.Error("Valid deletion record should be parsed")
}
if len(result.Warnings) != 3 {
t.Errorf("Expected 3 warnings, got %d", len(result.Warnings))
}
t.Logf("Warnings: %v", result.Warnings)
t.Log("✓ Corrupt deletions.jsonl lines handled gracefully")
}
// TestDeletionPropagation_EmptyManifest verifies that import works with
// empty or missing deletions manifest.
func TestDeletionPropagation_EmptyManifest(t *testing.T) {
if testing.Short() {
t.Skip("Skipping integration test in short mode")
}
ctx := context.Background()
tempDir := t.TempDir()
beadsDir := filepath.Join(tempDir, ".beads")
if err := os.MkdirAll(beadsDir, 0750); err != nil {
t.Fatalf("Failed to create .beads dir: %v", err)
}
dbPath := filepath.Join(beadsDir, "beads.db")
store := newTestStore(t, dbPath)
defer store.Close()
// Create an issue
issue := &types.Issue{
Title: "Test issue",
Status: types.StatusOpen,
Priority: 1,
IssueType: types.TypeTask,
CreatedAt: time.Now(),
UpdatedAt: time.Now(),
}
store.CreateIssue(ctx, issue, "test-user")
// Export to JSONL
jsonlPath := filepath.Join(beadsDir, "beads.jsonl")
if err := exportIssuesToJSONL(ctx, store, jsonlPath); err != nil {
t.Fatalf("Failed to export: %v", err)
}
// Test 1: No deletions.jsonl exists
result, err := importJSONLFile(ctx, store, dbPath, jsonlPath, importer.Options{})
if err != nil {
t.Fatalf("Import should succeed without deletions.jsonl: %v", err)
}
if result.Purged != 0 {
t.Errorf("Expected 0 purged with no deletions manifest, got %d", result.Purged)
}
t.Log("✓ Import works without deletions.jsonl")
// Test 2: Empty deletions.jsonl
deletionsPath := filepath.Join(beadsDir, "deletions.jsonl")
if err := os.WriteFile(deletionsPath, []byte{}, 0644); err != nil {
t.Fatalf("Failed to create empty deletions.jsonl: %v", err)
}
result, err = importJSONLFile(ctx, store, dbPath, jsonlPath, importer.Options{})
if err != nil {
t.Fatalf("Import should succeed with empty deletions.jsonl: %v", err)
}
if result.Purged != 0 {
t.Errorf("Expected 0 purged with empty deletions manifest, got %d", result.Purged)
}
t.Log("✓ Import works with empty deletions.jsonl")
// Verify issue still exists
check, _ := store.GetIssue(ctx, issue.ID)
if check == nil {
t.Error("Issue should still exist")
}
}
// Helper to resolve deletions.jsonl conflicts by keeping all lines
func resolveDeletionsConflict(t *testing.T, dir string) {
t.Helper()
deletionsPath := filepath.Join(dir, ".beads", "deletions.jsonl")
content, err := os.ReadFile(deletionsPath)
if err != nil {
return // No conflict file
}
if !strings.Contains(string(content), "<<<<<<<") {
return // No conflict markers
}
// Remove conflict markers, keep all deletion records
var cleanLines []string
for _, line := range strings.Split(string(content), "\n") {
if strings.HasPrefix(line, "<<<<<<<") ||
strings.HasPrefix(line, "=======") ||
strings.HasPrefix(line, ">>>>>>>") {
continue
}
if strings.TrimSpace(line) != "" && strings.HasPrefix(line, "{") {
cleanLines = append(cleanLines, line)
}
}
cleaned := strings.Join(cleanLines, "\n") + "\n"
os.WriteFile(deletionsPath, []byte(cleaned), 0644)
runGitCmdAllowError(t, dir, "add", deletionsPath)
}
// runGitCmdAllowError runs git command and ignores errors
func runGitCmdAllowError(t *testing.T, dir string, args ...string) {
t.Helper()
cmd := runCommandInDir(dir, "git", args...)
_ = cmd // ignore error
}

View File

@@ -383,8 +383,6 @@ func applyFixList(path string, fixes []doctorCheck) {
err = fix.DatabaseConfig(path)
case "JSONL Config":
err = fix.LegacyJSONLConfig(path)
case "Deletions Manifest":
err = fix.MigrateTombstones(path)
case "Untracked Files":
err = fix.UntrackedJSONL(path)
case "Sync Branch Health":
@@ -924,12 +922,7 @@ func runDiagnostics(path string) doctorResult {
result.Checks = append(result.Checks, syncBranchHealthCheck)
// Don't fail overall check for sync branch health, just warn
// Check 18: Deletions manifest (legacy, now replaced by tombstones)
deletionsCheck := checkDeletionsManifest(path)
result.Checks = append(result.Checks, deletionsCheck)
// Don't fail overall check for missing deletions manifest, just warn
// Check 19: Tombstones health (bd-s3v)
// Check 18: Tombstones health (bd-s3v)
tombstonesCheck := checkTombstones(path)
result.Checks = append(result.Checks, tombstonesCheck)
// Don't fail overall check for tombstone issues, just warn
@@ -2756,101 +2749,6 @@ func checkSyncBranchHealth(path string) doctorCheck {
}
}
func checkDeletionsManifest(path string) doctorCheck {
beadsDir := filepath.Join(path, ".beads")
// Skip if .beads doesn't exist
if _, err := os.Stat(beadsDir); os.IsNotExist(err) {
return doctorCheck{
Name: "Deletions Manifest",
Status: statusOK,
Message: "N/A (no .beads directory)",
}
}
// Check if we're in a git repository using worktree-aware detection
_, err := git.GetGitDir()
if err != nil {
return doctorCheck{
Name: "Deletions Manifest",
Status: statusOK,
Message: "N/A (not a git repository)",
}
}
deletionsPath := filepath.Join(beadsDir, "deletions.jsonl")
// Check if deletions.jsonl exists
info, err := os.Stat(deletionsPath)
if err == nil {
// File exists - count entries (empty file is valid, means no deletions)
if info.Size() == 0 {
return doctorCheck{
Name: "Deletions Manifest",
Status: statusOK,
Message: "Empty (no legacy deletions)",
}
}
file, err := os.Open(deletionsPath) // #nosec G304 - controlled path
if err == nil {
defer file.Close()
count := 0
scanner := bufio.NewScanner(file)
for scanner.Scan() {
if len(scanner.Bytes()) > 0 {
count++
}
}
// bd-s3v: Suggest migration to inline tombstones
if count > 0 {
return doctorCheck{
Name: "Deletions Manifest",
Status: statusWarning,
Message: fmt.Sprintf("Legacy format (%d entries)", count),
Detail: "deletions.jsonl is deprecated in favor of inline tombstones",
Fix: "Run 'bd migrate-tombstones' to convert to inline tombstones",
}
}
return doctorCheck{
Name: "Deletions Manifest",
Status: statusOK,
Message: "Empty (no legacy deletions)",
}
}
}
// bd-s3v: deletions.jsonl doesn't exist - this is the expected state with tombstones
// Check for .migrated file to confirm migration happened
migratedPath := filepath.Join(beadsDir, "deletions.jsonl.migrated")
if _, err := os.Stat(migratedPath); err == nil {
return doctorCheck{
Name: "Deletions Manifest",
Status: statusOK,
Message: "Migrated to tombstones",
}
}
// No deletions.jsonl and no .migrated file - check if JSONL exists
jsonlPath := filepath.Join(beadsDir, "issues.jsonl")
if _, err := os.Stat(jsonlPath); os.IsNotExist(err) {
jsonlPath = filepath.Join(beadsDir, "beads.jsonl")
if _, err := os.Stat(jsonlPath); os.IsNotExist(err) {
return doctorCheck{
Name: "Deletions Manifest",
Status: statusOK,
Message: "N/A (no JSONL file)",
}
}
}
// JSONL exists but no deletions tracking - this is fine for new repos using tombstones
return doctorCheck{
Name: "Deletions Manifest",
Status: statusOK,
Message: "Using inline tombstones",
}
}
// checkTombstones checks the health of tombstone records (bd-s3v)
// Reports: total tombstones, expiring soon (within 7 days), already expired
func checkTombstones(path string) doctorCheck {

View File

@@ -1,248 +0,0 @@
package fix
import (
"bufio"
"encoding/json"
"fmt"
"os"
"os/exec"
"path/filepath"
"strings"
"time"
"github.com/steveyegge/beads/internal/deletions"
)
// HydrateDeletionsManifest populates deletions.jsonl from git history.
// It finds all issue IDs that were ever in the JSONL but are no longer present,
// and adds them to the deletions manifest.
// Note (bd-ffr9): After tombstone migration, this is a no-op since inline tombstones
// are used instead of deletions.jsonl.
func HydrateDeletionsManifest(path string) error {
if err := validateBeadsWorkspace(path); err != nil {
return err
}
beadsDir := filepath.Join(path, ".beads")
// bd-ffr9: Skip hydrating deletions.jsonl if tombstone migration is complete
if deletions.IsTombstoneMigrationComplete(beadsDir) {
fmt.Println(" Tombstone migration complete - skipping deletions.jsonl hydration")
return nil
}
// bd-6xd: issues.jsonl is the canonical filename
jsonlPath := filepath.Join(beadsDir, "issues.jsonl")
// Also check for legacy beads.jsonl
if _, err := os.Stat(jsonlPath); os.IsNotExist(err) {
legacyPath := filepath.Join(beadsDir, "beads.jsonl")
if _, err := os.Stat(legacyPath); err == nil {
jsonlPath = legacyPath
} else {
return fmt.Errorf("no JSONL file found in .beads/")
}
}
// Load existing deletions manifest to avoid duplicates
deletionsPath := deletions.DefaultPath(beadsDir)
existingDeletions, err := deletions.LoadDeletions(deletionsPath)
if err != nil {
return fmt.Errorf("failed to load existing deletions: %w", err)
}
// Get current IDs from JSONL
currentIDs, err := getCurrentJSONLIDs(jsonlPath)
if err != nil {
return fmt.Errorf("failed to read current JSONL: %w", err)
}
// Get historical IDs from git
historicalIDs, err := getHistoricalJSONLIDs(path, jsonlPath)
if err != nil {
return fmt.Errorf("failed to get historical IDs from git: %w", err)
}
// Find deleted IDs (in history but not in current, and not already in manifest)
var deletedIDs []string
for id := range historicalIDs {
if !currentIDs[id] {
// Skip if already in deletions manifest
if _, exists := existingDeletions.Records[id]; exists {
continue
}
deletedIDs = append(deletedIDs, id)
}
}
if len(deletedIDs) == 0 {
// Create empty deletions manifest to signal hydration is complete
// This prevents the check from re-warning after --fix runs
if err := deletions.WriteDeletions(deletionsPath, nil); err != nil {
return fmt.Errorf("failed to create empty deletions manifest: %w", err)
}
fmt.Println(" No deleted issues found in git history (created empty manifest)")
return nil
}
// Add to deletions manifest
now := time.Now()
for _, id := range deletedIDs {
record := deletions.DeletionRecord{
ID: id,
Timestamp: now,
Actor: "bd-doctor-hydrate",
Reason: "Hydrated from git history",
}
if err := deletions.AppendDeletion(deletionsPath, record); err != nil {
return fmt.Errorf("failed to append deletion record for %s: %w", id, err)
}
}
fmt.Printf(" Added %d deletion records to manifest\n", len(deletedIDs))
return nil
}
// getCurrentJSONLIDs reads the current JSONL file and returns a set of IDs.
func getCurrentJSONLIDs(jsonlPath string) (map[string]bool, error) {
ids := make(map[string]bool)
file, err := os.Open(jsonlPath) // #nosec G304 - path validated by caller
if err != nil {
if os.IsNotExist(err) {
return ids, nil
}
return nil, err
}
defer func() {
_ = file.Close()
}()
scanner := bufio.NewScanner(file)
scanner.Buffer(make([]byte, 0, 64*1024), 10*1024*1024)
for scanner.Scan() {
line := scanner.Bytes()
if len(line) == 0 {
continue
}
var issue struct {
ID string `json:"id"`
Status string `json:"status"`
}
if err := json.Unmarshal(line, &issue); err != nil {
continue
}
// Include ALL issues including tombstones (bd-552 fix)
// Tombstones represent migrated deletions that ARE accounted for.
// By including them in currentIDs, they won't appear "missing" when
// compared to historicalIDs, preventing erroneous re-addition to
// deletions.jsonl. The previous bd-in7q fix had backwards logic.
if issue.ID != "" {
ids[issue.ID] = true
}
}
return ids, scanner.Err()
}
// getHistoricalJSONLIDs uses git log to find all IDs that were ever in the JSONL.
func getHistoricalJSONLIDs(repoPath, jsonlPath string) (map[string]bool, error) {
// Get the relative path for the JSONL file
relPath, err := filepath.Rel(repoPath, jsonlPath)
if err != nil {
relPath = jsonlPath
}
// Use the commit-by-commit approach which is more memory efficient
// and allows us to properly parse JSON rather than regex matching
return getHistoricalIDsViaDiff(repoPath, relPath)
}
// looksLikeIssueID validates that a string looks like a beads issue ID.
// Issue IDs have the format: prefix-hash or prefix-number (e.g., bd-abc123, myproject-42)
func looksLikeIssueID(id string) bool {
if id == "" {
return false
}
// Must contain at least one dash
dashIdx := strings.Index(id, "-")
if dashIdx <= 0 || dashIdx >= len(id)-1 {
return false
}
// Prefix should be alphanumeric (letters/numbers/underscores)
prefix := id[:dashIdx]
for _, c := range prefix {
isValidPrefixChar := (c >= 'a' && c <= 'z') || (c >= 'A' && c <= 'Z') || (c >= '0' && c <= '9') || c == '_'
if !isValidPrefixChar {
return false
}
}
// Suffix should be alphanumeric (base36 hash or number), may contain dots for children
suffix := id[dashIdx+1:]
for _, c := range suffix {
isValidSuffixChar := (c >= 'a' && c <= 'z') || (c >= '0' && c <= '9') || c == '.'
if !isValidSuffixChar {
return false
}
}
return true
}
// getHistoricalIDsViaDiff walks through git history commit-by-commit to find all IDs.
// This is more memory efficient than git log -p and allows proper JSON parsing.
func getHistoricalIDsViaDiff(repoPath, relPath string) (map[string]bool, error) {
ids := make(map[string]bool)
// Get list of all commits that touched the file
cmd := exec.Command("git", "log", "--all", "--format=%H", "--", relPath)
cmd.Dir = repoPath
output, err := cmd.Output()
if err != nil {
return ids, fmt.Errorf("git log failed: %w", err)
}
commits := strings.Split(strings.TrimSpace(string(output)), "\n")
if len(commits) == 0 || (len(commits) == 1 && commits[0] == "") {
return ids, nil
}
// For each commit, get the file content and extract IDs
for _, commit := range commits {
if commit == "" {
continue
}
// Get file content at this commit
showCmd := exec.Command("git", "show", commit+":"+relPath) // #nosec G204 - args are from git log output
showCmd.Dir = repoPath
content, err := showCmd.Output()
if err != nil {
// File might not exist at this commit
continue
}
// Parse each line for IDs
scanner := bufio.NewScanner(strings.NewReader(string(content)))
for scanner.Scan() {
line := scanner.Text()
if strings.Contains(line, `"id"`) {
var issue struct {
ID string `json:"id"`
}
if err := json.Unmarshal([]byte(line), &issue); err == nil && issue.ID != "" {
// Validate the ID looks like an issue ID to avoid false positives
if looksLikeIssueID(issue.ID) {
ids[issue.ID] = true
}
}
}
}
}
return ids, nil
}

View File

@@ -1,156 +0,0 @@
package fix
import (
"encoding/json"
"os"
"path/filepath"
"testing"
"github.com/steveyegge/beads/internal/types"
)
// TestGetCurrentJSONLIDs_IncludesTombstones verifies that tombstones ARE included
// in the current ID set. This is critical for bd-552 fix: tombstones represent
// migrated deletions that are accounted for. By including them, they won't appear
// "missing" when compared to historicalIDs, preventing erroneous re-addition to
// deletions.jsonl.
func TestGetCurrentJSONLIDs_IncludesTombstones(t *testing.T) {
// Setup: Create temp file with mix of normal issues and tombstones
tmpDir := t.TempDir()
jsonlPath := filepath.Join(tmpDir, "issues.jsonl")
// Create a JSONL file with both normal issues and tombstones
issues := []*types.Issue{
{
ID: "bd-abc",
Title: "Normal issue",
Status: types.StatusOpen,
},
{
ID: "bd-def",
Title: "(deleted)",
Status: types.StatusTombstone,
DeletedBy: "test-user",
},
{
ID: "bd-ghi",
Title: "Another normal issue",
Status: types.StatusOpen,
},
{
ID: "bd-jkl",
Title: "(deleted)",
Status: types.StatusTombstone,
DeletedBy: "test-user",
},
}
file, err := os.Create(jsonlPath)
if err != nil {
t.Fatalf("Failed to create test JSONL file: %v", err)
}
encoder := json.NewEncoder(file)
for _, issue := range issues {
if err := encoder.Encode(issue); err != nil {
_ = file.Close()
t.Fatalf("Failed to write issue to JSONL: %v", err)
}
}
_ = file.Close()
// Call getCurrentJSONLIDs
ids, err := getCurrentJSONLIDs(jsonlPath)
if err != nil {
t.Fatalf("getCurrentJSONLIDs failed: %v", err)
}
// Verify: Should contain ALL IDs including tombstones (bd-552 fix)
expectedIDs := map[string]bool{
"bd-abc": true,
"bd-def": true, // tombstone - must be included
"bd-ghi": true,
"bd-jkl": true, // tombstone - must be included
}
if len(ids) != len(expectedIDs) {
t.Errorf("Expected %d IDs, got %d. IDs: %v", len(expectedIDs), len(ids), ids)
}
for expectedID := range expectedIDs {
if !ids[expectedID] {
t.Errorf("Expected ID %s to be present", expectedID)
}
}
// Verify tombstones ARE included (this is the bd-552 fix)
if !ids["bd-def"] {
t.Error("Tombstone bd-def MUST be included in current IDs (bd-552 fix)")
}
if !ids["bd-jkl"] {
t.Error("Tombstone bd-jkl MUST be included in current IDs (bd-552 fix)")
}
}
func TestGetCurrentJSONLIDs_HandlesEmptyFile(t *testing.T) {
tmpDir := t.TempDir()
jsonlPath := filepath.Join(tmpDir, "issues.jsonl")
// Create empty file
if _, err := os.Create(jsonlPath); err != nil {
t.Fatalf("Failed to create empty file: %v", err)
}
ids, err := getCurrentJSONLIDs(jsonlPath)
if err != nil {
t.Fatalf("getCurrentJSONLIDs failed: %v", err)
}
if len(ids) != 0 {
t.Errorf("Expected 0 IDs from empty file, got %d", len(ids))
}
}
func TestGetCurrentJSONLIDs_HandlesMissingFile(t *testing.T) {
tmpDir := t.TempDir()
nonexistentPath := filepath.Join(tmpDir, "nonexistent.jsonl")
ids, err := getCurrentJSONLIDs(nonexistentPath)
if err != nil {
t.Fatalf("getCurrentJSONLIDs should handle missing file gracefully: %v", err)
}
if len(ids) != 0 {
t.Errorf("Expected 0 IDs from missing file, got %d", len(ids))
}
}
func TestGetCurrentJSONLIDs_SkipsInvalidJSON(t *testing.T) {
tmpDir := t.TempDir()
jsonlPath := filepath.Join(tmpDir, "issues.jsonl")
// Write mixed valid and invalid JSON lines
content := `{"id":"bd-valid","status":"open"}
invalid json line
{"id":"bd-another","status":"open"}
`
if err := os.WriteFile(jsonlPath, []byte(content), 0600); err != nil {
t.Fatalf("Failed to write test file: %v", err)
}
ids, err := getCurrentJSONLIDs(jsonlPath)
if err != nil {
t.Fatalf("getCurrentJSONLIDs failed: %v", err)
}
if len(ids) != 2 {
t.Errorf("Expected 2 valid IDs, got %d. IDs: %v", len(ids), ids)
}
if !ids["bd-valid"] || !ids["bd-another"] {
t.Error("Expected to parse both valid issues despite invalid line in between")
}
}
// Note: Full integration test for HydrateDeletionsManifest would require git repo setup.
// The unit tests above verify the core fix (bd-552: including tombstones in getCurrentJSONLIDs
// so they aren't erroneously re-added to deletions.jsonl).
// Integration tests are handled in migrate_tombstones_test.go with full sync cycle.

View File

@@ -37,17 +37,6 @@ func DatabaseVersion(path string) error {
return fmt.Errorf("failed to initialize database: %w", err)
}
// bd-8v5o: Clean up deletions manifest for hydrated issues
// After init, remove any issues from deletions.jsonl that exist in JSONL
// This prevents perpetual "Skipping bd-xxx (in deletions manifest)" warnings
jsonlPath := findJSONLPath(beadsDir)
if jsonlPath != "" {
if err := cleanupDeletionsManifest(beadsDir, jsonlPath); err != nil {
// Non-fatal - just log warning
fmt.Printf(" Warning: failed to clean up deletions manifest: %v\n", err)
}
}
return nil
}

View File

@@ -8,10 +8,59 @@ import (
"path/filepath"
"time"
"github.com/steveyegge/beads/internal/deletions"
"github.com/steveyegge/beads/internal/types"
)
// legacyDeletionRecord represents a single deletion entry from the legacy deletions.jsonl manifest.
// This is inlined here for migration purposes only - new code uses inline tombstones.
type legacyDeletionRecord struct {
ID string `json:"id"` // Issue ID that was deleted
Timestamp time.Time `json:"ts"` // When the deletion occurred
Actor string `json:"by"` // Who performed the deletion
Reason string `json:"reason,omitempty"` // Optional reason for deletion
}
// loadLegacyDeletions reads the legacy deletions.jsonl manifest.
// Returns a map of deletion records keyed by issue ID.
// This is inlined here for migration purposes only.
func loadLegacyDeletions(path string) (map[string]legacyDeletionRecord, error) {
records := make(map[string]legacyDeletionRecord)
f, err := os.Open(path) // #nosec G304 - controlled path from caller
if err != nil {
if os.IsNotExist(err) {
return records, nil
}
return nil, fmt.Errorf("failed to open deletions file: %w", err)
}
defer f.Close()
scanner := bufio.NewScanner(f)
scanner.Buffer(make([]byte, 0, 1024), 1024*1024)
for scanner.Scan() {
line := scanner.Text()
if line == "" {
continue
}
var record legacyDeletionRecord
if err := json.Unmarshal([]byte(line), &record); err != nil {
continue // Skip corrupt lines
}
if record.ID == "" {
continue // Skip records without ID
}
records[record.ID] = record
}
if err := scanner.Err(); err != nil {
return nil, fmt.Errorf("error reading deletions file: %w", err)
}
return records, nil
}
// MigrateTombstones converts legacy deletions.jsonl entries to inline tombstones.
// This is called by bd doctor --fix when legacy deletions are detected.
func MigrateTombstones(path string) error {
@@ -30,12 +79,12 @@ func MigrateTombstones(path string) error {
}
// Load deletions
loadResult, err := deletions.LoadDeletions(deletionsPath)
records, err := loadLegacyDeletions(deletionsPath)
if err != nil {
return fmt.Errorf("failed to load deletions: %w", err)
}
if len(loadResult.Records) == 0 {
if len(records) == 0 {
fmt.Println(" deletions.jsonl is empty - nothing to migrate")
return nil
}
@@ -60,9 +109,9 @@ func MigrateTombstones(path string) error {
}
// Convert deletions to tombstones
var toMigrate []deletions.DeletionRecord
var toMigrate []legacyDeletionRecord
var skipped int
for _, record := range loadResult.Records {
for _, record := range records {
if existingTombstones[record.ID] {
skipped++
continue
@@ -81,7 +130,7 @@ func MigrateTombstones(path string) error {
defer file.Close()
for _, record := range toMigrate {
tombstone := convertDeletionToTombstone(record)
tombstone := convertLegacyDeletionToTombstone(record)
data, err := json.Marshal(tombstone)
if err != nil {
return fmt.Errorf("failed to marshal tombstone for %s: %w", record.ID, err)
@@ -106,8 +155,8 @@ func MigrateTombstones(path string) error {
return nil
}
// convertDeletionToTombstone converts a DeletionRecord to a tombstone Issue.
func convertDeletionToTombstone(record deletions.DeletionRecord) *types.Issue {
// convertLegacyDeletionToTombstone converts a legacy DeletionRecord to a tombstone Issue.
func convertLegacyDeletionToTombstone(record legacyDeletionRecord) *types.Issue {
now := time.Now()
deletedAt := record.Timestamp
if deletedAt.IsZero() {

View File

@@ -1,185 +0,0 @@
package fix
import (
"encoding/json"
"os"
"path/filepath"
"testing"
"time"
"github.com/steveyegge/beads/internal/deletions"
"github.com/steveyegge/beads/internal/types"
)
func TestMigrateTombstones(t *testing.T) {
// Setup: create temp .beads directory
tmpDir := t.TempDir()
beadsDir := filepath.Join(tmpDir, ".beads")
if err := os.MkdirAll(beadsDir, 0755); err != nil {
t.Fatalf("failed to create .beads dir: %v", err)
}
deletionsPath := filepath.Join(beadsDir, "deletions.jsonl")
jsonlPath := filepath.Join(beadsDir, "issues.jsonl")
// Create an issue in issues.jsonl
issue := &types.Issue{
ID: "test-abc",
Title: "Test Issue",
Status: types.StatusOpen,
IssueType: types.TypeTask,
CreatedAt: time.Now(),
UpdatedAt: time.Now(),
}
issueData, _ := json.Marshal(issue)
if err := os.WriteFile(jsonlPath, append(issueData, '\n'), 0644); err != nil {
t.Fatalf("failed to write issues.jsonl: %v", err)
}
// Create deletions.jsonl with one entry
record := deletions.DeletionRecord{
ID: "test-deleted",
Timestamp: time.Now().Add(-time.Hour),
Actor: "testuser",
Reason: "test deletion",
}
if err := deletions.AppendDeletion(deletionsPath, record); err != nil {
t.Fatalf("failed to create deletions.jsonl: %v", err)
}
// Run migration
err := MigrateTombstones(tmpDir)
if err != nil {
t.Fatalf("MigrateTombstones failed: %v", err)
}
// Verify deletions.jsonl was archived
if _, err := os.Stat(deletionsPath); !os.IsNotExist(err) {
t.Error("deletions.jsonl should have been archived")
}
if _, err := os.Stat(deletionsPath + ".migrated"); os.IsNotExist(err) {
t.Error("deletions.jsonl.migrated should exist")
}
// Verify tombstone was added to issues.jsonl
data, err := os.ReadFile(jsonlPath)
if err != nil {
t.Fatalf("failed to read issues.jsonl: %v", err)
}
// Should have 2 lines now (original issue + tombstone)
lines := 0
var foundTombstone bool
for _, line := range splitLines(data) {
if len(line) == 0 {
continue
}
lines++
var iss struct {
ID string `json:"id"`
Status string `json:"status"`
}
if err := json.Unmarshal(line, &iss); err == nil {
if iss.ID == "test-deleted" && iss.Status == string(types.StatusTombstone) {
foundTombstone = true
}
}
}
if lines != 2 {
t.Errorf("expected 2 lines in issues.jsonl, got %d", lines)
}
if !foundTombstone {
t.Error("tombstone for test-deleted not found in issues.jsonl")
}
}
func TestMigrateTombstones_SkipsExisting(t *testing.T) {
// Setup: create temp .beads directory
tmpDir := t.TempDir()
beadsDir := filepath.Join(tmpDir, ".beads")
if err := os.MkdirAll(beadsDir, 0755); err != nil {
t.Fatalf("failed to create .beads dir: %v", err)
}
deletionsPath := filepath.Join(beadsDir, "deletions.jsonl")
jsonlPath := filepath.Join(beadsDir, "issues.jsonl")
// Create issues.jsonl with an existing tombstone
tombstone := &types.Issue{
ID: "test-already-tombstone",
Title: "[Deleted]",
Status: types.StatusTombstone,
IssueType: types.TypeTask,
CreatedAt: time.Now(),
UpdatedAt: time.Now(),
}
tombstoneData, _ := json.Marshal(tombstone)
if err := os.WriteFile(jsonlPath, append(tombstoneData, '\n'), 0644); err != nil {
t.Fatalf("failed to write issues.jsonl: %v", err)
}
// Create deletions.jsonl with the same ID
record := deletions.DeletionRecord{
ID: "test-already-tombstone",
Timestamp: time.Now().Add(-time.Hour),
Actor: "testuser",
Reason: "test deletion",
}
if err := deletions.AppendDeletion(deletionsPath, record); err != nil {
t.Fatalf("failed to create deletions.jsonl: %v", err)
}
// Run migration
err := MigrateTombstones(tmpDir)
if err != nil {
t.Fatalf("MigrateTombstones failed: %v", err)
}
// Verify issues.jsonl still has only 1 line (no duplicate tombstone)
data, err := os.ReadFile(jsonlPath)
if err != nil {
t.Fatalf("failed to read issues.jsonl: %v", err)
}
lines := 0
for _, line := range splitLines(data) {
if len(line) > 0 {
lines++
}
}
if lines != 1 {
t.Errorf("expected 1 line in issues.jsonl (existing tombstone), got %d", lines)
}
}
func TestMigrateTombstones_NoDeletionsFile(t *testing.T) {
// Setup: create temp .beads directory without deletions.jsonl
tmpDir := t.TempDir()
beadsDir := filepath.Join(tmpDir, ".beads")
if err := os.MkdirAll(beadsDir, 0755); err != nil {
t.Fatalf("failed to create .beads dir: %v", err)
}
// Run migration - should succeed without error
err := MigrateTombstones(tmpDir)
if err != nil {
t.Fatalf("MigrateTombstones failed: %v", err)
}
}
func splitLines(data []byte) [][]byte {
var lines [][]byte
start := 0
for i, b := range data {
if b == '\n' {
lines = append(lines, data[start:i])
start = i + 1
}
}
if start < len(data) {
lines = append(lines, data[start:])
}
return lines
}

View File

@@ -1,14 +1,10 @@
package fix
import (
"bufio"
"encoding/json"
"fmt"
"os"
"os/exec"
"path/filepath"
"github.com/steveyegge/beads/internal/deletions"
)
// DBJSONLSync fixes database-JSONL sync issues by running bd sync --import-only
@@ -31,13 +27,10 @@ func DBJSONLSync(path string) error {
}
hasJSONL := false
actualJSONLPath := ""
if _, err := os.Stat(jsonlPath); err == nil {
hasJSONL = true
actualJSONLPath = jsonlPath
} else if _, err := os.Stat(beadsJSONLPath); err == nil {
hasJSONL = true
actualJSONLPath = beadsJSONLPath
}
if !hasDB || !hasJSONL {
@@ -61,107 +54,5 @@ func DBJSONLSync(path string) error {
return fmt.Errorf("failed to sync database with JSONL: %w", err)
}
// bd-8v5o: Clean up deletions manifest for hydrated issues
// After sync, remove any issues from deletions.jsonl that exist in JSONL
// This prevents perpetual "Skipping bd-xxx (in deletions manifest)" warnings
if err := cleanupDeletionsManifest(beadsDir, actualJSONLPath); err != nil {
// Non-fatal - just log warning
fmt.Printf(" Warning: failed to clean up deletions manifest: %v\n", err)
}
return nil
}
// cleanupDeletionsManifest removes issues from deletions.jsonl that exist in JSONL.
// This is needed because when issues are hydrated from git history (e.g., via bd init
// or bd sync --import-only), they may still be in the deletions manifest from a
// previous deletion. This causes perpetual skip warnings during sync.
func cleanupDeletionsManifest(beadsDir, jsonlPath string) error {
deletionsPath := deletions.DefaultPath(beadsDir)
// Check if deletions manifest exists
if _, err := os.Stat(deletionsPath); os.IsNotExist(err) {
return nil // No deletions manifest, nothing to clean up
}
// Load deletions manifest
loadResult, err := deletions.LoadDeletions(deletionsPath)
if err != nil {
return fmt.Errorf("failed to load deletions manifest: %w", err)
}
if len(loadResult.Records) == 0 {
return nil // No deletions, nothing to clean up
}
// Get IDs from JSONL (excluding tombstones)
jsonlIDs, err := getNonTombstoneJSONLIDs(jsonlPath)
if err != nil {
return fmt.Errorf("failed to read JSONL: %w", err)
}
// Find IDs that are in both deletions manifest and JSONL
var idsToRemove []string
for id := range loadResult.Records {
if jsonlIDs[id] {
idsToRemove = append(idsToRemove, id)
}
}
if len(idsToRemove) == 0 {
return nil // No conflicting entries
}
// Remove conflicting entries from deletions manifest
result, err := deletions.RemoveDeletions(deletionsPath, idsToRemove)
if err != nil {
return fmt.Errorf("failed to remove deletions: %w", err)
}
if result.RemovedCount > 0 {
fmt.Printf(" Removed %d issue(s) from deletions manifest (now hydrated in JSONL)\n", result.RemovedCount)
}
return nil
}
// getNonTombstoneJSONLIDs reads the JSONL file and returns a set of IDs
// that are not tombstones (status != "tombstone").
func getNonTombstoneJSONLIDs(jsonlPath string) (map[string]bool, error) {
ids := make(map[string]bool)
file, err := os.Open(jsonlPath) // #nosec G304 - path validated by caller
if err != nil {
if os.IsNotExist(err) {
return ids, nil
}
return nil, err
}
defer func() {
_ = file.Close()
}()
scanner := bufio.NewScanner(file)
scanner.Buffer(make([]byte, 0, 64*1024), 10*1024*1024)
for scanner.Scan() {
line := scanner.Bytes()
if len(line) == 0 {
continue
}
var issue struct {
ID string `json:"id"`
Status string `json:"status"`
}
if err := json.Unmarshal(line, &issue); err != nil {
continue
}
// Only include non-tombstone issues
if issue.ID != "" && issue.Status != "tombstone" {
ids[issue.ID] = true
}
}
return ids, scanner.Err()
}

View File

@@ -96,8 +96,6 @@ NOTE: Import requires direct database access and does not work with daemon mode.
clearDuplicateExternalRefs, _ := cmd.Flags().GetBool("clear-duplicate-external-refs")
orphanHandling, _ := cmd.Flags().GetString("orphan-handling")
force, _ := cmd.Flags().GetBool("force")
noGitHistory, _ := cmd.Flags().GetBool("no-git-history")
ignoreDeletions, _ := cmd.Flags().GetBool("ignore-deletions")
protectLeftSnapshot, _ := cmd.Flags().GetBool("protect-left-snapshot")
// Check if stdin is being used interactively (not piped)
@@ -257,8 +255,6 @@ NOTE: Import requires direct database access and does not work with daemon mode.
RenameOnImport: renameOnImport,
ClearDuplicateExternalRefs: clearDuplicateExternalRefs,
OrphanHandling: orphanHandling,
NoGitHistory: noGitHistory,
IgnoreDeletions: ignoreDeletions,
}
// If --protect-left-snapshot is set, read the left snapshot and build ID set
@@ -423,18 +419,8 @@ NOTE: Import requires direct database access and does not work with daemon mode.
if len(result.IDMapping) > 0 {
fmt.Fprintf(os.Stderr, ", %d issues remapped", len(result.IDMapping))
}
if result.SkippedDeleted > 0 {
fmt.Fprintf(os.Stderr, ", %d skipped (deleted)", result.SkippedDeleted)
}
fmt.Fprintf(os.Stderr, "\n")
// Print skipped deleted issues summary if any (bd-4zy)
if result.SkippedDeleted > 0 {
fmt.Fprintf(os.Stderr, "\n⚠ Skipped %d issue(s) found in deletions manifest\n", result.SkippedDeleted)
fmt.Fprintf(os.Stderr, " These issues were previously deleted and will not be resurrected.\n")
fmt.Fprintf(os.Stderr, " Use --ignore-deletions to force import anyway.\n")
}
// Print skipped dependencies summary if any
if len(result.SkippedDependencies) > 0 {
fmt.Fprintf(os.Stderr, "\n⚠ Warning: Skipped %d dependencies due to missing references:\n", len(result.SkippedDependencies))
@@ -790,8 +776,6 @@ func init() {
importCmd.Flags().Bool("clear-duplicate-external-refs", false, "Clear duplicate external_ref values (keeps first occurrence)")
importCmd.Flags().String("orphan-handling", "", "How to handle missing parent issues: strict/resurrect/skip/allow (default: use config or 'allow')")
importCmd.Flags().Bool("force", false, "Force metadata update even when database is already in sync with JSONL")
importCmd.Flags().Bool("no-git-history", false, "Skip git history backfill for deletions (use during JSONL filename migrations)")
importCmd.Flags().Bool("ignore-deletions", false, "Import issues even if they're in the deletions manifest")
importCmd.Flags().Bool("protect-left-snapshot", false, "Protect issues in left snapshot from git-history-backfill (bd-sync-deletion fix)")
importCmd.Flags().BoolVar(&jsonOutput, "json", false, "Output import statistics in JSON format")
rootCmd.AddCommand(importCmd)

View File

@@ -165,8 +165,6 @@ type ImportOptions struct {
SkipPrefixValidation bool // Skip prefix validation (for auto-import)
ClearDuplicateExternalRefs bool // Clear duplicate external_ref values instead of erroring
OrphanHandling string // Orphan handling mode: strict/resurrect/skip/allow (empty = use config)
NoGitHistory bool // Skip git history backfill for deletions (prevents spurious deletion during JSONL migrations)
IgnoreDeletions bool // Import issues even if they're in the deletions manifest
ProtectLocalExportIDs map[string]bool // IDs from left snapshot to protect from git-history-backfill (bd-sync-deletion fix)
}
@@ -183,12 +181,6 @@ type ImportResult struct {
ExpectedPrefix string // Database configured prefix
MismatchPrefixes map[string]int // Map of mismatched prefixes to count
SkippedDependencies []string // Dependencies skipped due to FK constraint violations
Purged int // Issues purged from DB (found in deletions manifest)
PurgedIDs []string // IDs that were purged
SkippedDeleted int // Issues skipped because they're in deletions manifest
SkippedDeletedIDs []string // IDs that were skipped due to deletions manifest
PreservedLocalExport int // Issues preserved because they were in local export (bd-sync-deletion fix)
PreservedLocalIDs []string // IDs that were preserved from local export
}
// importIssuesCore handles the core import logic used by both manual and auto-import.
@@ -228,8 +220,6 @@ func importIssuesCore(ctx context.Context, dbPath string, store storage.Storage,
SkipPrefixValidation: opts.SkipPrefixValidation,
ClearDuplicateExternalRefs: opts.ClearDuplicateExternalRefs,
OrphanHandling: importer.OrphanHandling(orphanHandling),
NoGitHistory: opts.NoGitHistory,
IgnoreDeletions: opts.IgnoreDeletions,
ProtectLocalExportIDs: opts.ProtectLocalExportIDs,
}
@@ -252,12 +242,6 @@ func importIssuesCore(ctx context.Context, dbPath string, store storage.Storage,
ExpectedPrefix: result.ExpectedPrefix,
MismatchPrefixes: result.MismatchPrefixes,
SkippedDependencies: result.SkippedDependencies,
Purged: result.Purged,
PurgedIDs: result.PurgedIDs,
SkippedDeleted: result.SkippedDeleted,
SkippedDeletedIDs: result.SkippedDeletedIDs,
PreservedLocalExport: result.PreservedLocalExport,
PreservedLocalIDs: result.PreservedLocalIDs,
}, nil
}

View File

@@ -13,7 +13,6 @@ import (
"sort"
"strings"
"github.com/steveyegge/beads/internal/deletions"
"github.com/steveyegge/beads/internal/storage"
"github.com/steveyegge/beads/internal/types"
)
@@ -298,74 +297,41 @@ func checkOrphanedDeps(ctx context.Context, store storage.Storage) ([]string, er
// validatePostImport checks that import didn't cause data loss.
// Returns error if issue count decreased unexpectedly (data loss) or nil if OK.
// A decrease is legitimate if it matches deletions recorded in deletions.jsonl.
//
// Parameters:
// - before: issue count in DB before import
// - after: issue count in DB after import
// - jsonlPath: path to issues.jsonl (used to locate deletions.jsonl)
func validatePostImport(before, after int, jsonlPath string) error {
return validatePostImportWithExpectedDeletions(before, after, 0, jsonlPath)
// - jsonlPath: path to issues.jsonl (unused, kept for API compatibility)
func validatePostImport(before, after int, _ string) error {
return validatePostImportWithExpectedDeletions(before, after, 0, "")
}
// validatePostImportWithExpectedDeletions checks that import didn't cause data loss,
// accounting for expected deletions that were already sanitized from the JSONL.
// accounting for expected deletions (e.g., tombstones).
// Returns error if issue count decreased unexpectedly (data loss) or nil if OK.
//
// Parameters:
// - before: issue count in DB before import
// - after: issue count in DB after import
// - expectedDeletions: number of issues known to have been deleted (from sanitize step)
// - jsonlPath: path to issues.jsonl (used to locate deletions.jsonl)
func validatePostImportWithExpectedDeletions(before, after, expectedDeletions int, jsonlPath string) error {
// - expectedDeletions: number of issues known to have been deleted
// - jsonlPath: unused, kept for API compatibility
func validatePostImportWithExpectedDeletions(before, after, expectedDeletions int, _ string) error {
if after < before {
// Count decrease - check if this matches legitimate deletions
decrease := before - after
// First, account for expected deletions from the sanitize step (bd-tt0 fix)
// These were already removed from JSONL and will be purged from DB by import
// Account for expected deletions (tombstones converted to actual deletions)
if expectedDeletions > 0 && decrease <= expectedDeletions {
// Decrease is fully accounted for by expected deletions
fmt.Fprintf(os.Stderr, "Import complete: %d → %d issues (-%d, expected from sanitize)\n",
fmt.Fprintf(os.Stderr, "Import complete: %d → %d issues (-%d, expected deletions)\n",
before, after, decrease)
return nil
}
// If decrease exceeds expected deletions, check deletions manifest for additional legitimacy
unexplainedDecrease := decrease - expectedDeletions
// Load deletions manifest to check for legitimate deletions
beadsDir := filepath.Dir(jsonlPath)
deletionsPath := deletions.DefaultPath(beadsDir)
loadResult, err := deletions.LoadDeletions(deletionsPath)
if err != nil {
// If we can't load deletions, assume the worst
return fmt.Errorf("import reduced issue count: %d → %d (data loss detected! failed to verify deletions: %v)", before, after, err)
}
// If there are deletions recorded, the decrease is likely legitimate
// We can't perfectly match because we don't know exactly which issues
// were deleted in this sync cycle vs previously. But if there are ANY
// deletions recorded and the decrease is reasonable, allow it.
numDeletions := len(loadResult.Records)
if numDeletions > 0 && unexplainedDecrease <= numDeletions {
// Legitimate deletion - decrease is accounted for by deletions manifest
if expectedDeletions > 0 {
fmt.Fprintf(os.Stderr, "Import complete: %d → %d issues (-%d, %d from sanitize + %d from deletions manifest)\n",
before, after, decrease, expectedDeletions, unexplainedDecrease)
} else {
fmt.Fprintf(os.Stderr, "Import complete: %d → %d issues (-%d, accounted for by %d deletion(s))\n",
before, after, decrease, numDeletions)
}
return nil
}
// Decrease exceeds recorded deletions - potential data loss
if numDeletions > 0 || expectedDeletions > 0 {
return fmt.Errorf("import reduced issue count: %d → %d (-%d exceeds %d expected + %d recorded deletion(s) - potential data loss!)",
before, after, decrease, expectedDeletions, numDeletions)
}
return fmt.Errorf("import reduced issue count: %d → %d (data loss detected! no deletions recorded)", before, after)
// Unexpected decrease - warn but don't fail
// With tombstones as the deletion mechanism, decreases are unusual
// but can happen during cleanup or migration
fmt.Fprintf(os.Stderr, "Warning: import reduced issue count: %d → %d (-%d)\n",
before, after, decrease)
return nil
}
if after == before {
fmt.Fprintf(os.Stderr, "Import complete: no changes\n")

View File

@@ -164,62 +164,17 @@ func TestValidatePreExportSuite(t *testing.T) {
}
func TestValidatePostImport(t *testing.T) {
t.Run("issue count decreased with no deletions fails", func(t *testing.T) {
// Note: With tombstones as the deletion mechanism, validatePostImport
// no longer fails on decreases - it only warns. The deletions.jsonl
// validation has been removed.
t.Run("issue count decreased warns but succeeds", func(t *testing.T) {
tmpDir := t.TempDir()
jsonlPath := filepath.Join(tmpDir, "issues.jsonl")
// No deletions.jsonl file exists
err := validatePostImport(10, 5, jsonlPath)
if err == nil {
t.Error("Expected error for decreased issue count with no deletions, got nil")
}
if err != nil && !strings.Contains(err.Error(), "no deletions recorded") {
t.Errorf("Expected 'no deletions recorded' error, got: %v", err)
}
})
t.Run("issue count decreased within deletion count succeeds", func(t *testing.T) {
tmpDir := t.TempDir()
jsonlPath := filepath.Join(tmpDir, "issues.jsonl")
deletionsPath := filepath.Join(tmpDir, "deletions.jsonl")
// Create deletions file with 5 deletions
deletionsContent := `{"id":"del-1","ts":"2024-01-01T00:00:00Z","by":"test"}
{"id":"del-2","ts":"2024-01-01T00:00:00Z","by":"test"}
{"id":"del-3","ts":"2024-01-01T00:00:00Z","by":"test"}
{"id":"del-4","ts":"2024-01-01T00:00:00Z","by":"test"}
{"id":"del-5","ts":"2024-01-01T00:00:00Z","by":"test"}
`
if err := os.WriteFile(deletionsPath, []byte(deletionsContent), 0600); err != nil {
t.Fatalf("Failed to write deletions file: %v", err)
}
// Decrease of 5 matches the 5 recorded deletions
// With tombstone-based deletions, decreases are allowed (just warn)
err := validatePostImport(10, 5, jsonlPath)
if err != nil {
t.Errorf("Expected no error when decrease matches deletions, got: %v", err)
}
})
t.Run("issue count decreased exceeding deletion count fails", func(t *testing.T) {
tmpDir := t.TempDir()
jsonlPath := filepath.Join(tmpDir, "issues.jsonl")
deletionsPath := filepath.Join(tmpDir, "deletions.jsonl")
// Create deletions file with only 2 deletions
deletionsContent := `{"id":"del-1","ts":"2024-01-01T00:00:00Z","by":"test"}
{"id":"del-2","ts":"2024-01-01T00:00:00Z","by":"test"}
`
if err := os.WriteFile(deletionsPath, []byte(deletionsContent), 0600); err != nil {
t.Fatalf("Failed to write deletions file: %v", err)
}
// Decrease of 5 exceeds the 2 recorded deletions
err := validatePostImport(10, 5, jsonlPath)
if err == nil {
t.Error("Expected error when decrease exceeds deletions, got nil")
}
if err != nil && !strings.Contains(err.Error(), "exceeds") {
t.Errorf("Expected 'exceeds' error, got: %v", err)
t.Errorf("Expected no error (just warning) for decreased count, got: %v", err)
}
})
@@ -243,79 +198,25 @@ func TestValidatePostImport(t *testing.T) {
}
func TestValidatePostImportWithExpectedDeletions(t *testing.T) {
// Note: With tombstones as the deletion mechanism, validatePostImportWithExpectedDeletions
// no longer fails on decreases - it only warns. The deletions.jsonl validation has been removed.
t.Run("decrease fully accounted for by expected deletions succeeds", func(t *testing.T) {
tmpDir := t.TempDir()
jsonlPath := filepath.Join(tmpDir, "issues.jsonl")
// No deletions.jsonl needed - expected deletions from sanitize step
err := validatePostImportWithExpectedDeletions(26, 25, 1, jsonlPath)
if err != nil {
t.Errorf("Expected no error when decrease matches expected deletions, got: %v", err)
}
})
t.Run("decrease exceeds expected deletions but within manifest succeeds", func(t *testing.T) {
t.Run("decrease exceeds expected deletions warns but succeeds", func(t *testing.T) {
tmpDir := t.TempDir()
jsonlPath := filepath.Join(tmpDir, "issues.jsonl")
deletionsPath := filepath.Join(tmpDir, "deletions.jsonl")
// Create deletions file with 3 deletions
deletionsContent := `{"id":"del-1","ts":"2024-01-01T00:00:00Z","by":"test"}
{"id":"del-2","ts":"2024-01-01T00:00:00Z","by":"test"}
{"id":"del-3","ts":"2024-01-01T00:00:00Z","by":"test"}
`
if err := os.WriteFile(deletionsPath, []byte(deletionsContent), 0600); err != nil {
t.Fatalf("Failed to write deletions file: %v", err)
}
// Decrease of 5, expected 2, remaining 3 covered by manifest
// Decrease of 5, expected 2 - used to fail, now warns
err := validatePostImportWithExpectedDeletions(20, 15, 2, jsonlPath)
if err != nil {
t.Errorf("Expected no error when decrease covered by expected + manifest, got: %v", err)
}
})
t.Run("decrease exceeds expected and manifest fails", func(t *testing.T) {
tmpDir := t.TempDir()
jsonlPath := filepath.Join(tmpDir, "issues.jsonl")
deletionsPath := filepath.Join(tmpDir, "deletions.jsonl")
// Create deletions file with only 1 deletion
deletionsContent := `{"id":"del-1","ts":"2024-01-01T00:00:00Z","by":"test"}
`
if err := os.WriteFile(deletionsPath, []byte(deletionsContent), 0600); err != nil {
t.Fatalf("Failed to write deletions file: %v", err)
}
// Decrease of 10, expected 2, remaining 8 exceeds 1 in manifest
err := validatePostImportWithExpectedDeletions(20, 10, 2, jsonlPath)
if err == nil {
t.Error("Expected error when decrease exceeds expected + manifest, got nil")
}
if err != nil && !strings.Contains(err.Error(), "exceeds") {
t.Errorf("Expected 'exceeds' error, got: %v", err)
}
})
t.Run("zero expected deletions falls back to manifest check", func(t *testing.T) {
tmpDir := t.TempDir()
jsonlPath := filepath.Join(tmpDir, "issues.jsonl")
deletionsPath := filepath.Join(tmpDir, "deletions.jsonl")
// Create deletions file with 5 deletions
deletionsContent := `{"id":"del-1","ts":"2024-01-01T00:00:00Z","by":"test"}
{"id":"del-2","ts":"2024-01-01T00:00:00Z","by":"test"}
{"id":"del-3","ts":"2024-01-01T00:00:00Z","by":"test"}
{"id":"del-4","ts":"2024-01-01T00:00:00Z","by":"test"}
{"id":"del-5","ts":"2024-01-01T00:00:00Z","by":"test"}
`
if err := os.WriteFile(deletionsPath, []byte(deletionsContent), 0600); err != nil {
t.Fatalf("Failed to write deletions file: %v", err)
}
// Same as validatePostImport: decrease of 5 covered by 5 in manifest
err := validatePostImportWithExpectedDeletions(10, 5, 0, jsonlPath)
if err != nil {
t.Errorf("Expected no error when decrease matches manifest with zero expected, got: %v", err)
t.Errorf("Expected no error (just warning) for decreased count, got: %v", err)
}
})

View File

@@ -1,17 +1,73 @@
package main
import (
"bufio"
"encoding/json"
"fmt"
"os"
"path/filepath"
"time"
"github.com/fatih/color"
"github.com/spf13/cobra"
"github.com/steveyegge/beads/internal/deletions"
"github.com/steveyegge/beads/internal/types"
)
// legacyDeletionRecordCmd represents a single deletion entry from the legacy deletions.jsonl manifest.
// This is inlined here for migration purposes only - new code uses inline tombstones.
type legacyDeletionRecordCmd struct {
ID string `json:"id"` // Issue ID that was deleted
Timestamp time.Time `json:"ts"` // When the deletion occurred
Actor string `json:"by"` // Who performed the deletion
Reason string `json:"reason,omitempty"` // Optional reason for deletion
}
// loadLegacyDeletionsCmd reads the legacy deletions.jsonl manifest.
// Returns a map of deletion records keyed by issue ID and any warnings.
// This is inlined here for migration purposes only.
func loadLegacyDeletionsCmd(path string) (map[string]legacyDeletionRecordCmd, []string, error) {
records := make(map[string]legacyDeletionRecordCmd)
var warnings []string
f, err := os.Open(path) // #nosec G304 - controlled path from caller
if err != nil {
if os.IsNotExist(err) {
return records, nil, nil
}
return nil, nil, fmt.Errorf("failed to open deletions file: %w", err)
}
defer f.Close()
scanner := bufio.NewScanner(f)
scanner.Buffer(make([]byte, 0, 1024), 1024*1024)
lineNum := 0
for scanner.Scan() {
lineNum++
line := scanner.Text()
if line == "" {
continue
}
var record legacyDeletionRecordCmd
if err := json.Unmarshal([]byte(line), &record); err != nil {
warnings = append(warnings, fmt.Sprintf("line %d: invalid JSON", lineNum))
continue
}
if record.ID == "" {
warnings = append(warnings, fmt.Sprintf("line %d: missing ID", lineNum))
continue
}
records[record.ID] = record
}
if err := scanner.Err(); err != nil {
return nil, nil, fmt.Errorf("error reading deletions file: %w", err)
}
return records, warnings, nil
}
var migrateTombstonesCmd = &cobra.Command{
Use: "migrate-tombstones",
Short: "Convert deletions.jsonl entries to inline tombstones",
@@ -59,11 +115,11 @@ Examples:
}
// Check paths
deletionsPath := deletions.DefaultPath(beadsDir)
deletionsPath := filepath.Join(beadsDir, "deletions.jsonl")
issuesPath := filepath.Join(beadsDir, "issues.jsonl")
// Load existing deletions
loadResult, err := deletions.LoadDeletions(deletionsPath)
records, warnings, err := loadLegacyDeletionsCmd(deletionsPath)
if err != nil {
if jsonOutput {
outputJSON(map[string]interface{}{
@@ -76,13 +132,13 @@ Examples:
os.Exit(1)
}
if len(loadResult.Records) == 0 {
if len(records) == 0 {
if jsonOutput {
outputJSON(map[string]interface{}{
"status": "noop",
"message": "No deletions to migrate",
"status": "noop",
"message": "No deletions to migrate",
"migrated": 0,
"skipped": 0,
"skipped": 0,
})
} else {
fmt.Println("No deletions.jsonl entries to migrate")
@@ -91,7 +147,7 @@ Examples:
}
// Print warnings from loading
for _, warning := range loadResult.Warnings {
for _, warning := range warnings {
if !jsonOutput {
color.Yellow("Warning: %s\n", warning)
}
@@ -132,9 +188,9 @@ Examples:
}
// Determine which deletions need migration
var toMigrate []deletions.DeletionRecord
var toMigrate []legacyDeletionRecordCmd
var skippedIDs []string
for id, record := range loadResult.Records {
for id, record := range records {
if existingTombstones[id] {
skippedIDs = append(skippedIDs, id)
if verbose && !jsonOutput {
@@ -148,10 +204,10 @@ Examples:
if len(toMigrate) == 0 {
if jsonOutput {
outputJSON(map[string]interface{}{
"status": "noop",
"message": "All deletions already migrated to tombstones",
"status": "noop",
"message": "All deletions already migrated to tombstones",
"migrated": 0,
"skipped": len(skippedIDs),
"skipped": len(skippedIDs),
})
} else {
fmt.Printf("All %d deletion(s) already have tombstones in issues.jsonl\n", len(skippedIDs))
@@ -163,10 +219,10 @@ Examples:
if dryRun {
if jsonOutput {
outputJSON(map[string]interface{}{
"dry_run": true,
"dry_run": true,
"would_migrate": len(toMigrate),
"skipped": len(skippedIDs),
"total": len(loadResult.Records),
"skipped": len(skippedIDs),
"total": len(records),
})
} else {
fmt.Println("Dry run mode - no changes will be made")
@@ -208,12 +264,12 @@ Examples:
encoder := json.NewEncoder(file)
var migratedIDs []string
for _, record := range toMigrate {
tombstone := convertDeletionRecordToTombstone(record)
tombstone := convertLegacyDeletionRecordToTombstone(record)
if err := encoder.Encode(tombstone); err != nil {
if jsonOutput {
outputJSON(map[string]interface{}{
"error": "write_tombstone_failed",
"message": err.Error(),
"error": "write_tombstone_failed",
"message": err.Error(),
"issue_id": record.ID,
})
} else {
@@ -241,11 +297,11 @@ Examples:
// Success output
if jsonOutput {
outputJSON(map[string]interface{}{
"status": "success",
"migrated": len(migratedIDs),
"skipped": len(skippedIDs),
"total": len(loadResult.Records),
"archive": archivePath,
"status": "success",
"migrated": len(migratedIDs),
"skipped": len(skippedIDs),
"total": len(records),
"archive": archivePath,
"migrated_ids": migratedIDs,
})
} else {
@@ -262,10 +318,8 @@ Examples:
},
}
// convertDeletionRecordToTombstone creates a tombstone issue from a deletion record.
// This is similar to the importer's convertDeletionToTombstone but operates on
// deletions.DeletionRecord directly.
func convertDeletionRecordToTombstone(del deletions.DeletionRecord) *types.Issue {
// convertLegacyDeletionRecordToTombstone creates a tombstone issue from a legacy deletion record.
func convertLegacyDeletionRecordToTombstone(del legacyDeletionRecordCmd) *types.Issue {
deletedAt := del.Timestamp
return &types.Issue{
ID: del.ID,

View File

@@ -1,292 +0,0 @@
package main
import (
"encoding/json"
"os"
"path/filepath"
"testing"
"time"
"github.com/steveyegge/beads/internal/deletions"
"github.com/steveyegge/beads/internal/types"
)
func TestMigrateTombstones_NoDeletions(t *testing.T) {
// Setup: create temp .beads directory with no deletions.jsonl
tmpDir := t.TempDir()
beadsDir := filepath.Join(tmpDir, ".beads")
if err := os.MkdirAll(beadsDir, 0755); err != nil {
t.Fatalf("Failed to create .beads dir: %v", err)
}
// Create empty issues.jsonl
issuesPath := filepath.Join(beadsDir, "issues.jsonl")
if err := os.WriteFile(issuesPath, []byte{}, 0600); err != nil {
t.Fatalf("Failed to create issues.jsonl: %v", err)
}
// Run in temp dir
oldWd, _ := os.Getwd()
defer os.Chdir(oldWd)
os.Chdir(tmpDir)
// The command should report no deletions to migrate
deletionsPath := deletions.DefaultPath(beadsDir)
loadResult, err := deletions.LoadDeletions(deletionsPath)
if err != nil {
t.Fatalf("LoadDeletions failed: %v", err)
}
if len(loadResult.Records) != 0 {
t.Errorf("Expected 0 deletions, got %d", len(loadResult.Records))
}
}
func TestMigrateTombstones_WithDeletions(t *testing.T) {
// Setup: create temp .beads directory
tmpDir := t.TempDir()
beadsDir := filepath.Join(tmpDir, ".beads")
if err := os.MkdirAll(beadsDir, 0755); err != nil {
t.Fatalf("Failed to create .beads dir: %v", err)
}
// Create deletions.jsonl with some entries
deletionsPath := deletions.DefaultPath(beadsDir)
deleteTime := time.Now().Add(-24 * time.Hour)
records := []deletions.DeletionRecord{
{ID: "test-abc", Timestamp: deleteTime, Actor: "alice", Reason: "duplicate"},
{ID: "test-def", Timestamp: deleteTime.Add(-1 * time.Hour), Actor: "bob", Reason: "obsolete"},
}
for _, record := range records {
if err := deletions.AppendDeletion(deletionsPath, record); err != nil {
t.Fatalf("Failed to write deletion: %v", err)
}
}
// Create empty issues.jsonl
issuesPath := filepath.Join(beadsDir, "issues.jsonl")
if err := os.WriteFile(issuesPath, []byte{}, 0600); err != nil {
t.Fatalf("Failed to create issues.jsonl: %v", err)
}
// Load deletions
loadResult, err := deletions.LoadDeletions(deletionsPath)
if err != nil {
t.Fatalf("LoadDeletions failed: %v", err)
}
if len(loadResult.Records) != 2 {
t.Fatalf("Expected 2 deletions, got %d", len(loadResult.Records))
}
// Simulate migration by converting to tombstones
var tombstones []*types.Issue
for _, record := range loadResult.Records {
tombstones = append(tombstones, convertDeletionRecordToTombstone(record))
}
// Verify tombstone fields
for _, ts := range tombstones {
if ts.Status != types.StatusTombstone {
t.Errorf("Expected status tombstone, got %s", ts.Status)
}
if ts.DeletedAt == nil {
t.Error("Expected DeletedAt to be set")
}
if ts.DeletedBy == "" {
t.Error("Expected DeletedBy to be set")
}
}
}
func TestMigrateTombstones_SkipsExistingTombstones(t *testing.T) {
// Setup: create temp .beads directory
tmpDir := t.TempDir()
beadsDir := filepath.Join(tmpDir, ".beads")
if err := os.MkdirAll(beadsDir, 0755); err != nil {
t.Fatalf("Failed to create .beads dir: %v", err)
}
// Create deletions.jsonl with some entries
deletionsPath := deletions.DefaultPath(beadsDir)
deleteTime := time.Now().Add(-24 * time.Hour)
records := []deletions.DeletionRecord{
{ID: "test-abc", Timestamp: deleteTime, Actor: "alice", Reason: "duplicate"},
{ID: "test-def", Timestamp: deleteTime.Add(-1 * time.Hour), Actor: "bob", Reason: "obsolete"},
}
for _, record := range records {
if err := deletions.AppendDeletion(deletionsPath, record); err != nil {
t.Fatalf("Failed to write deletion: %v", err)
}
}
// Create issues.jsonl with an existing tombstone for test-abc
issuesPath := filepath.Join(beadsDir, "issues.jsonl")
existingTombstone := types.Issue{
ID: "test-abc",
Title: "(deleted)",
Status: types.StatusTombstone,
DeletedBy: "alice",
}
file, err := os.Create(issuesPath)
if err != nil {
t.Fatalf("Failed to create issues.jsonl: %v", err)
}
encoder := json.NewEncoder(file)
if err := encoder.Encode(existingTombstone); err != nil {
file.Close()
t.Fatalf("Failed to write existing tombstone: %v", err)
}
file.Close()
// Load existing tombstones
existingTombstones := make(map[string]bool)
file, _ = os.Open(issuesPath)
decoder := json.NewDecoder(file)
for {
var issue types.Issue
if err := decoder.Decode(&issue); err != nil {
break
}
if issue.IsTombstone() {
existingTombstones[issue.ID] = true
}
}
file.Close()
// Load deletions
loadResult, err := deletions.LoadDeletions(deletionsPath)
if err != nil {
t.Fatalf("LoadDeletions failed: %v", err)
}
// Count what should be migrated vs skipped
var toMigrate, skipped int
for id := range loadResult.Records {
if existingTombstones[id] {
skipped++
} else {
toMigrate++
}
}
if toMigrate != 1 {
t.Errorf("Expected 1 to migrate, got %d", toMigrate)
}
if skipped != 1 {
t.Errorf("Expected 1 skipped, got %d", skipped)
}
}
func TestConvertDeletionRecordToTombstone(t *testing.T) {
deleteTime := time.Now().Add(-24 * time.Hour)
record := deletions.DeletionRecord{
ID: "test-xyz",
Timestamp: deleteTime,
Actor: "alice",
Reason: "test reason",
}
tombstone := convertDeletionRecordToTombstone(record)
if tombstone.ID != "test-xyz" {
t.Errorf("Expected ID test-xyz, got %s", tombstone.ID)
}
if tombstone.Status != types.StatusTombstone {
t.Errorf("Expected status tombstone, got %s", tombstone.Status)
}
if tombstone.Title != "(deleted)" {
t.Errorf("Expected title '(deleted)', got %s", tombstone.Title)
}
if tombstone.DeletedBy != "alice" {
t.Errorf("Expected DeletedBy 'alice', got %s", tombstone.DeletedBy)
}
if tombstone.DeleteReason != "test reason" {
t.Errorf("Expected DeleteReason 'test reason', got %s", tombstone.DeleteReason)
}
if tombstone.DeletedAt == nil {
t.Error("Expected DeletedAt to be set")
} else if !tombstone.DeletedAt.Equal(deleteTime) {
t.Errorf("Expected DeletedAt %v, got %v", deleteTime, *tombstone.DeletedAt)
}
if tombstone.Priority != 0 {
t.Errorf("Expected priority 0 (unknown), got %d", tombstone.Priority)
}
if tombstone.IssueType != types.TypeTask {
t.Errorf("Expected type task, got %s", tombstone.IssueType)
}
if tombstone.OriginalType != "" {
t.Errorf("Expected empty OriginalType, got %s", tombstone.OriginalType)
}
}
// TestMigrateTombstones_TombstonesAreValid verifies that migrated tombstones
// have the tombstone status set, so they won't be re-added to deletions manifest (bd-in7q fix)
func TestMigrateTombstones_TombstonesAreValid(t *testing.T) {
// Setup: create temp .beads directory
tmpDir := t.TempDir()
beadsDir := filepath.Join(tmpDir, ".beads")
if err := os.MkdirAll(beadsDir, 0755); err != nil {
t.Fatalf("Failed to create .beads dir: %v", err)
}
// Create deletions.jsonl with some entries
deletionsPath := deletions.DefaultPath(beadsDir)
deleteTime := time.Now().Add(-24 * time.Hour)
records := []deletions.DeletionRecord{
{ID: "test-abc", Timestamp: deleteTime, Actor: "alice", Reason: "duplicate"},
}
for _, record := range records {
if err := deletions.AppendDeletion(deletionsPath, record); err != nil {
t.Fatalf("Failed to write deletion: %v", err)
}
}
// Create empty issues.jsonl
issuesPath := filepath.Join(beadsDir, "issues.jsonl")
if err := os.WriteFile(issuesPath, []byte{}, 0600); err != nil {
t.Fatalf("Failed to create issues.jsonl: %v", err)
}
// Load deletions
loadResult, err := deletions.LoadDeletions(deletionsPath)
if err != nil {
t.Fatalf("LoadDeletions failed: %v", err)
}
// Convert to tombstones (simulating what migrate-tombstones does)
var tombstones []*types.Issue
for _, record := range loadResult.Records {
ts := convertDeletionRecordToTombstone(record)
// CRITICAL: Tombstones must have status "tombstone"
// so they won't be re-added to deletions manifest on next sync (bd-in7q)
if ts.Status != types.StatusTombstone {
t.Errorf("Converted tombstone must have status 'tombstone', got %s", ts.Status)
}
tombstones = append(tombstones, ts)
}
// Verify tombstone is valid
if len(tombstones) != 1 {
t.Fatalf("Expected 1 tombstone, got %d", len(tombstones))
}
ts := tombstones[0]
// These fields are critical for the doctor fix to work correctly
if ts.ID != "test-abc" {
t.Errorf("Expected ID test-abc, got %s", ts.ID)
}
if ts.Status != types.StatusTombstone {
t.Errorf("Expected status tombstone, got %s", ts.Status)
}
if ts.DeletedBy != "alice" {
t.Errorf("Expected DeletedBy 'alice', got %s", ts.DeletedBy)
}
}

View File

@@ -10,15 +10,12 @@ import (
"os/exec"
"path/filepath"
"sort"
"strconv"
"strings"
"time"
"github.com/spf13/cobra"
"github.com/steveyegge/beads/internal/config"
"github.com/steveyegge/beads/internal/configfile"
"github.com/steveyegge/beads/internal/debug"
"github.com/steveyegge/beads/internal/deletions"
"github.com/steveyegge/beads/internal/git"
"github.com/steveyegge/beads/internal/rpc"
"github.com/steveyegge/beads/internal/syncbranch"
@@ -609,29 +606,6 @@ Use --merge to merge the sync branch back to main branch.`,
}
}
// Step 3.6: Sanitize JSONL - remove any resurrected zombies
// Git's 3-way merge may re-add deleted issues to JSONL.
// We must remove them before import to prevent resurrection.
sanitizeResult, err := sanitizeJSONLWithDeletions(jsonlPath)
if err != nil {
fmt.Fprintf(os.Stderr, "Warning: failed to sanitize JSONL: %v\n", err)
// Non-fatal - continue with import
} else {
// bd-3ee1 fix: Log protected issues (local work that would have been incorrectly removed)
if sanitizeResult.ProtectedCount > 0 {
fmt.Printf("→ Protected %d locally exported issue(s) from incorrect sanitization (bd-3ee1)\n", sanitizeResult.ProtectedCount)
for _, id := range sanitizeResult.ProtectedIDs {
fmt.Printf(" - %s (in left snapshot)\n", id)
}
}
if sanitizeResult.RemovedCount > 0 {
fmt.Printf("→ Sanitized JSONL: removed %d deleted issue(s) that were resurrected by git merge\n", sanitizeResult.RemovedCount)
for _, id := range sanitizeResult.RemovedIDs {
fmt.Printf(" - %s\n", id)
}
}
}
// Step 4: Import updated JSONL after pull
// Enable --protect-left-snapshot to prevent git-history-backfill from
// tombstoning issues that were in our local export but got lost during merge (bd-sync-deletion fix)
@@ -648,12 +622,7 @@ Use --merge to merge the sync branch back to main branch.`,
if err != nil {
fmt.Fprintf(os.Stderr, "Warning: failed to count issues after import: %v\n", err)
} else {
// Account for expected deletions from sanitize step (bd-tt0 fix)
expectedDeletions := 0
if sanitizeResult != nil {
expectedDeletions = sanitizeResult.RemovedCount
}
if err := validatePostImportWithExpectedDeletions(beforeCount, afterCount, expectedDeletions, jsonlPath); err != nil {
if err := validatePostImportWithExpectedDeletions(beforeCount, afterCount, 0, jsonlPath); err != nil {
fmt.Fprintf(os.Stderr, "Post-import validation failed: %v\n", err)
os.Exit(1)
}
@@ -765,12 +734,6 @@ Use --merge to merge the sync branch back to main branch.`,
fmt.Fprintf(os.Stderr, "Warning: failed to clean up snapshots: %v\n", err)
}
// Auto-compact deletions manifest if enabled and threshold exceeded
if err := maybeAutoCompactDeletions(ctx, jsonlPath); err != nil {
// Non-fatal - just log warning
fmt.Fprintf(os.Stderr, "Warning: auto-compact deletions failed: %v\n", err)
}
// When using sync.branch, restore .beads/ from current branch to keep
// working directory clean. The actual beads data lives on the sync branch,
// and the main branch's .beads/ should match what's committed there.
@@ -1688,234 +1651,6 @@ func importFromJSONL(ctx context.Context, jsonlPath string, renameOnImport bool,
return nil
}
// Default configuration values for auto-compact
const (
defaultAutoCompact = false
defaultAutoCompactThreshold = 1000
)
// maybeAutoCompactDeletions checks if auto-compact is enabled and threshold exceeded,
// and if so, prunes the deletions manifest.
func maybeAutoCompactDeletions(ctx context.Context, jsonlPath string) error {
// Ensure store is initialized for config access
if err := ensureStoreActive(); err != nil {
return nil // Can't access config, skip silently
}
// Check if auto-compact is enabled (disabled by default)
autoCompactStr, err := store.GetConfig(ctx, "deletions.auto_compact")
if err != nil || autoCompactStr == "" {
return nil // Not configured, skip
}
autoCompact := autoCompactStr == "true" || autoCompactStr == "1" || autoCompactStr == "yes"
if !autoCompact {
return nil // Disabled, skip
}
// Get threshold (default 1000)
threshold := defaultAutoCompactThreshold
if thresholdStr, err := store.GetConfig(ctx, "deletions.auto_compact_threshold"); err == nil && thresholdStr != "" {
if parsed, err := strconv.Atoi(thresholdStr); err == nil && parsed > 0 {
threshold = parsed
}
}
// Get deletions path
beadsDir := filepath.Dir(jsonlPath)
deletionsPath := deletions.DefaultPath(beadsDir)
// Count current deletions
count, err := deletions.Count(deletionsPath)
if err != nil {
return fmt.Errorf("failed to count deletions: %w", err)
}
// Check if threshold exceeded
if count <= threshold {
return nil // Below threshold, skip
}
// Get retention days (default 7)
retentionDays := configfile.DefaultDeletionsRetentionDays
if retentionStr, err := store.GetConfig(ctx, "deletions.retention_days"); err == nil && retentionStr != "" {
if parsed, err := strconv.Atoi(retentionStr); err == nil && parsed > 0 {
retentionDays = parsed
}
}
// Prune deletions
fmt.Printf("→ Auto-compacting deletions manifest (%d entries > %d threshold)...\n", count, threshold)
result, err := deletions.PruneDeletions(deletionsPath, retentionDays)
if err != nil {
return fmt.Errorf("failed to prune deletions: %w", err)
}
if result.PrunedCount > 0 {
fmt.Printf(" Pruned %d entries older than %d days, kept %d entries\n",
result.PrunedCount, retentionDays, result.KeptCount)
} else {
fmt.Printf(" No entries older than %d days to prune\n", retentionDays)
}
return nil
}
// SanitizeResult contains statistics about the JSONL sanitization operation.
type SanitizeResult struct {
RemovedCount int // Number of issues removed from JSONL
RemovedIDs []string // IDs that were removed
ProtectedCount int // Number of issues protected from removal (bd-3ee1)
ProtectedIDs []string // IDs that were protected
}
// sanitizeJSONLWithDeletions removes non-tombstone issues from the JSONL file
// if they are in the deletions manifest. This prevents zombie resurrection when
// git's 3-way merge re-adds deleted issues to the JSONL during pull.
//
// IMPORTANT (bd-kzxd fix): Tombstones are NOT removed. Tombstones are the proper
// representation of deletions in the JSONL format. Removing them would cause
// the importer to re-create tombstones from deletions.jsonl, leading to
// UNIQUE constraint errors when the tombstone already exists in the database.
//
// IMPORTANT (bd-3ee1 fix): Issues that were in the left snapshot (local export
// before pull) are protected from removal. This prevents newly created issues
// from being incorrectly removed when they happen to have an ID that matches
// an entry in the deletions manifest (possible with hash-based IDs if content
// is similar to a previously deleted issue).
//
// This should be called after git pull but before import.
func sanitizeJSONLWithDeletions(jsonlPath string) (*SanitizeResult, error) {
result := &SanitizeResult{
RemovedIDs: []string{},
ProtectedIDs: []string{},
}
// Get deletions manifest path
beadsDir := filepath.Dir(jsonlPath)
deletionsPath := deletions.DefaultPath(beadsDir)
// Load deletions manifest
loadResult, err := deletions.LoadDeletions(deletionsPath)
if err != nil {
return nil, fmt.Errorf("failed to load deletions manifest: %w", err)
}
// If no deletions, nothing to sanitize
if len(loadResult.Records) == 0 {
return result, nil
}
// bd-3ee1 fix: Load left snapshot to protect locally exported issues
// Issues in the left snapshot were exported before pull and represent
// local work that should not be removed by sanitize
sm := NewSnapshotManager(jsonlPath)
_, leftPath := sm.getSnapshotPaths()
protectedIDs := make(map[string]bool)
if leftIDs, err := sm.buildIDSet(leftPath); err == nil && len(leftIDs) > 0 {
protectedIDs = leftIDs
}
// Read current JSONL
f, err := os.Open(jsonlPath) // #nosec G304 - controlled path
if err != nil {
if os.IsNotExist(err) {
return result, nil // No JSONL file yet
}
return nil, fmt.Errorf("failed to open JSONL: %w", err)
}
var keptLines [][]byte
scanner := bufio.NewScanner(f)
// Allow large lines (up to 10MB for issues with large descriptions)
scanner.Buffer(make([]byte, 0, 64*1024), 10*1024*1024)
for scanner.Scan() {
line := scanner.Bytes()
if len(bytes.TrimSpace(line)) == 0 {
continue
}
// Extract ID and status to check for tombstones
var issue struct {
ID string `json:"id"`
Status string `json:"status"`
}
if err := json.Unmarshal(line, &issue); err != nil {
// Keep malformed lines (let import handle them)
keptLines = append(keptLines, append([]byte{}, line...))
continue
}
// Check if this ID is in deletions manifest
if _, deleted := loadResult.Records[issue.ID]; deleted {
// bd-kzxd fix: Keep tombstones! They are the proper representation of deletions.
// Only remove non-tombstone issues that were resurrected by git merge.
if issue.Status == string(types.StatusTombstone) {
// Keep the tombstone - it's the authoritative deletion record
keptLines = append(keptLines, append([]byte{}, line...))
} else if protectedIDs[issue.ID] {
// bd-3ee1 fix: Issue was in left snapshot (local export before pull)
// This is local work, not a resurrected zombie - protect it!
keptLines = append(keptLines, append([]byte{}, line...))
result.ProtectedCount++
result.ProtectedIDs = append(result.ProtectedIDs, issue.ID)
} else {
// Remove non-tombstone issue that was resurrected
result.RemovedCount++
result.RemovedIDs = append(result.RemovedIDs, issue.ID)
}
} else {
keptLines = append(keptLines, append([]byte{}, line...))
}
}
if err := scanner.Err(); err != nil {
_ = f.Close()
return nil, fmt.Errorf("failed to read JSONL: %w", err)
}
_ = f.Close()
// If nothing was removed, we're done
if result.RemovedCount == 0 {
return result, nil
}
// Write sanitized JSONL atomically
dir := filepath.Dir(jsonlPath)
base := filepath.Base(jsonlPath)
tempFile, err := os.CreateTemp(dir, base+".sanitize.*")
if err != nil {
return nil, fmt.Errorf("failed to create temp file: %w", err)
}
tempPath := tempFile.Name()
defer func() {
_ = tempFile.Close()
_ = os.Remove(tempPath) // Clean up on error
}()
for _, line := range keptLines {
if _, err := tempFile.Write(line); err != nil {
return nil, fmt.Errorf("failed to write line: %w", err)
}
if _, err := tempFile.Write([]byte("\n")); err != nil {
return nil, fmt.Errorf("failed to write newline: %w", err)
}
}
if err := tempFile.Close(); err != nil {
return nil, fmt.Errorf("failed to close temp file: %w", err)
}
// Atomic replace
if err := os.Rename(tempPath, jsonlPath); err != nil {
return nil, fmt.Errorf("failed to replace JSONL: %w", err)
}
return result, nil
}
// resolveNoGitHistoryForFromMain returns the resolved noGitHistory value for sync operations.
// When syncing from main (--from-main), noGitHistory is forced to true to prevent creating
// incorrect deletion records for locally-created beads that don't exist on main.

View File

@@ -577,657 +577,6 @@ func TestZFCSkipsExportAfterImport(t *testing.T) {
t.Logf("✓ ZFC fix verified: DB synced from 100 to 10 issues, JSONL unchanged")
}
func TestMaybeAutoCompactDeletions_Disabled(t *testing.T) {
ctx := context.Background()
tmpDir := t.TempDir()
// Create test database
beadsDir := filepath.Join(tmpDir, ".beads")
if err := os.MkdirAll(beadsDir, 0755); err != nil {
t.Fatalf("failed to create beads dir: %v", err)
}
testDBPath := filepath.Join(beadsDir, "beads.db")
jsonlPath := filepath.Join(beadsDir, "beads.jsonl")
// Create store
testStore, err := sqlite.New(ctx, testDBPath)
if err != nil {
t.Fatalf("failed to create store: %v", err)
}
defer testStore.Close()
// Set global store for maybeAutoCompactDeletions
// Save and restore original values
originalStore := store
originalStoreActive := storeActive
defer func() {
store = originalStore
storeActive = originalStoreActive
}()
store = testStore
storeActive = true
// Create empty JSONL file
if err := os.WriteFile(jsonlPath, []byte{}, 0644); err != nil {
t.Fatalf("failed to create JSONL: %v", err)
}
// Auto-compact is disabled by default, so should return nil
err = maybeAutoCompactDeletions(ctx, jsonlPath)
if err != nil {
t.Errorf("expected no error when auto-compact disabled, got: %v", err)
}
}
func TestMaybeAutoCompactDeletions_Enabled(t *testing.T) {
ctx := context.Background()
tmpDir := t.TempDir()
// Create test database
beadsDir := filepath.Join(tmpDir, ".beads")
if err := os.MkdirAll(beadsDir, 0755); err != nil {
t.Fatalf("failed to create beads dir: %v", err)
}
testDBPath := filepath.Join(beadsDir, "beads.db")
jsonlPath := filepath.Join(beadsDir, "beads.jsonl")
deletionsPath := filepath.Join(beadsDir, "deletions.jsonl")
// Create store
testStore, err := sqlite.New(ctx, testDBPath)
if err != nil {
t.Fatalf("failed to create store: %v", err)
}
defer testStore.Close()
// Enable auto-compact with low threshold
if err := testStore.SetConfig(ctx, "deletions.auto_compact", "true"); err != nil {
t.Fatalf("failed to set auto_compact config: %v", err)
}
if err := testStore.SetConfig(ctx, "deletions.auto_compact_threshold", "5"); err != nil {
t.Fatalf("failed to set threshold config: %v", err)
}
if err := testStore.SetConfig(ctx, "deletions.retention_days", "1"); err != nil {
t.Fatalf("failed to set retention config: %v", err)
}
// Set global store for maybeAutoCompactDeletions
// Save and restore original values
originalStore := store
originalStoreActive := storeActive
defer func() {
store = originalStore
storeActive = originalStoreActive
}()
store = testStore
storeActive = true
// Create empty JSONL file
if err := os.WriteFile(jsonlPath, []byte{}, 0644); err != nil {
t.Fatalf("failed to create JSONL: %v", err)
}
// Create deletions file with entries (some old, some recent)
now := time.Now()
deletionsContent := ""
// Add 10 old entries (will be pruned)
for i := 0; i < 10; i++ {
oldTime := now.AddDate(0, 0, -10).Format(time.RFC3339)
deletionsContent += fmt.Sprintf(`{"id":"bd-old-%d","ts":"%s","by":"user"}`, i, oldTime) + "\n"
}
// Add 3 recent entries (will be kept)
for i := 0; i < 3; i++ {
recentTime := now.Add(-1 * time.Hour).Format(time.RFC3339)
deletionsContent += fmt.Sprintf(`{"id":"bd-recent-%d","ts":"%s","by":"user"}`, i, recentTime) + "\n"
}
if err := os.WriteFile(deletionsPath, []byte(deletionsContent), 0644); err != nil {
t.Fatalf("failed to create deletions file: %v", err)
}
// Verify initial count
initialCount := strings.Count(deletionsContent, "\n")
if initialCount != 13 {
t.Fatalf("expected 13 initial entries, got %d", initialCount)
}
// Run auto-compact
err = maybeAutoCompactDeletions(ctx, jsonlPath)
if err != nil {
t.Errorf("auto-compact failed: %v", err)
}
// Read deletions file and count remaining entries
afterContent, err := os.ReadFile(deletionsPath)
if err != nil {
t.Fatalf("failed to read deletions file: %v", err)
}
afterLines := strings.Split(strings.TrimSpace(string(afterContent)), "\n")
afterCount := 0
for _, line := range afterLines {
if line != "" {
afterCount++
}
}
// Should have pruned old entries, kept recent ones
if afterCount != 3 {
t.Errorf("expected 3 entries after prune (recent ones), got %d", afterCount)
}
}
func TestMaybeAutoCompactDeletions_BelowThreshold(t *testing.T) {
ctx := context.Background()
tmpDir := t.TempDir()
// Create test database
beadsDir := filepath.Join(tmpDir, ".beads")
if err := os.MkdirAll(beadsDir, 0755); err != nil {
t.Fatalf("failed to create beads dir: %v", err)
}
testDBPath := filepath.Join(beadsDir, "beads.db")
jsonlPath := filepath.Join(beadsDir, "beads.jsonl")
deletionsPath := filepath.Join(beadsDir, "deletions.jsonl")
// Create store
testStore, err := sqlite.New(ctx, testDBPath)
if err != nil {
t.Fatalf("failed to create store: %v", err)
}
defer testStore.Close()
// Enable auto-compact with high threshold
if err := testStore.SetConfig(ctx, "deletions.auto_compact", "true"); err != nil {
t.Fatalf("failed to set auto_compact config: %v", err)
}
if err := testStore.SetConfig(ctx, "deletions.auto_compact_threshold", "100"); err != nil {
t.Fatalf("failed to set threshold config: %v", err)
}
// Set global store for maybeAutoCompactDeletions
// Save and restore original values
originalStore := store
originalStoreActive := storeActive
defer func() {
store = originalStore
storeActive = originalStoreActive
}()
store = testStore
storeActive = true
// Create empty JSONL file
if err := os.WriteFile(jsonlPath, []byte{}, 0644); err != nil {
t.Fatalf("failed to create JSONL: %v", err)
}
// Create deletions file with only 5 entries (below threshold of 100)
now := time.Now()
deletionsContent := ""
for i := 0; i < 5; i++ {
ts := now.Add(-1 * time.Hour).Format(time.RFC3339)
deletionsContent += fmt.Sprintf(`{"id":"bd-%d","ts":"%s","by":"user"}`, i, ts) + "\n"
}
if err := os.WriteFile(deletionsPath, []byte(deletionsContent), 0644); err != nil {
t.Fatalf("failed to create deletions file: %v", err)
}
// Run auto-compact - should skip because below threshold
err = maybeAutoCompactDeletions(ctx, jsonlPath)
if err != nil {
t.Errorf("auto-compact failed: %v", err)
}
// Read deletions file - should be unchanged
afterContent, err := os.ReadFile(deletionsPath)
if err != nil {
t.Fatalf("failed to read deletions file: %v", err)
}
if string(afterContent) != deletionsContent {
t.Error("deletions file should not be modified when below threshold")
}
}
func TestSanitizeJSONLWithDeletions_NoDeletions(t *testing.T) {
t.Parallel()
tmpDir := t.TempDir()
beadsDir := filepath.Join(tmpDir, ".beads")
os.MkdirAll(beadsDir, 0755)
jsonlPath := filepath.Join(beadsDir, "beads.jsonl")
jsonlContent := `{"id":"bd-1","title":"Issue 1"}
{"id":"bd-2","title":"Issue 2"}
{"id":"bd-3","title":"Issue 3"}
`
os.WriteFile(jsonlPath, []byte(jsonlContent), 0644)
// No deletions.jsonl file - should return without changes
result, err := sanitizeJSONLWithDeletions(jsonlPath)
if err != nil {
t.Fatalf("unexpected error: %v", err)
}
if result.RemovedCount != 0 {
t.Errorf("expected 0 removed, got %d", result.RemovedCount)
}
// Verify JSONL unchanged
afterContent, _ := os.ReadFile(jsonlPath)
if string(afterContent) != jsonlContent {
t.Error("JSONL should not be modified when no deletions")
}
}
func TestSanitizeJSONLWithDeletions_EmptyDeletions(t *testing.T) {
t.Parallel()
tmpDir := t.TempDir()
beadsDir := filepath.Join(tmpDir, ".beads")
os.MkdirAll(beadsDir, 0755)
jsonlPath := filepath.Join(beadsDir, "beads.jsonl")
deletionsPath := filepath.Join(beadsDir, "deletions.jsonl")
jsonlContent := `{"id":"bd-1","title":"Issue 1"}
{"id":"bd-2","title":"Issue 2"}
`
os.WriteFile(jsonlPath, []byte(jsonlContent), 0644)
os.WriteFile(deletionsPath, []byte(""), 0644)
result, err := sanitizeJSONLWithDeletions(jsonlPath)
if err != nil {
t.Fatalf("unexpected error: %v", err)
}
if result.RemovedCount != 0 {
t.Errorf("expected 0 removed, got %d", result.RemovedCount)
}
}
func TestSanitizeJSONLWithDeletions_RemovesDeletedIssues(t *testing.T) {
t.Parallel()
tmpDir := t.TempDir()
beadsDir := filepath.Join(tmpDir, ".beads")
os.MkdirAll(beadsDir, 0755)
jsonlPath := filepath.Join(beadsDir, "beads.jsonl")
deletionsPath := filepath.Join(beadsDir, "deletions.jsonl")
// JSONL with 4 issues
jsonlContent := `{"id":"bd-1","title":"Issue 1"}
{"id":"bd-2","title":"Issue 2"}
{"id":"bd-3","title":"Issue 3"}
{"id":"bd-4","title":"Issue 4"}
`
os.WriteFile(jsonlPath, []byte(jsonlContent), 0644)
// Deletions manifest marks bd-2 and bd-4 as deleted
now := time.Now().Format(time.RFC3339)
deletionsContent := fmt.Sprintf(`{"id":"bd-2","ts":"%s","by":"user","reason":"cleanup"}
{"id":"bd-4","ts":"%s","by":"user","reason":"duplicate"}
`, now, now)
os.WriteFile(deletionsPath, []byte(deletionsContent), 0644)
result, err := sanitizeJSONLWithDeletions(jsonlPath)
if err != nil {
t.Fatalf("unexpected error: %v", err)
}
if result.RemovedCount != 2 {
t.Errorf("expected 2 removed, got %d", result.RemovedCount)
}
if len(result.RemovedIDs) != 2 {
t.Errorf("expected 2 RemovedIDs, got %d", len(result.RemovedIDs))
}
// Verify correct IDs were removed
removedMap := make(map[string]bool)
for _, id := range result.RemovedIDs {
removedMap[id] = true
}
if !removedMap["bd-2"] || !removedMap["bd-4"] {
t.Errorf("expected bd-2 and bd-4 to be removed, got %v", result.RemovedIDs)
}
// Verify JSONL now only has bd-1 and bd-3
afterContent, _ := os.ReadFile(jsonlPath)
afterCount, _ := countIssuesInJSONL(jsonlPath)
if afterCount != 2 {
t.Errorf("expected 2 issues in JSONL after sanitize, got %d", afterCount)
}
if !strings.Contains(string(afterContent), `"id":"bd-1"`) {
t.Error("JSONL should still contain bd-1")
}
if !strings.Contains(string(afterContent), `"id":"bd-3"`) {
t.Error("JSONL should still contain bd-3")
}
if strings.Contains(string(afterContent), `"id":"bd-2"`) {
t.Error("JSONL should NOT contain deleted bd-2")
}
if strings.Contains(string(afterContent), `"id":"bd-4"`) {
t.Error("JSONL should NOT contain deleted bd-4")
}
}
func TestSanitizeJSONLWithDeletions_NoMatchingDeletions(t *testing.T) {
t.Parallel()
tmpDir := t.TempDir()
beadsDir := filepath.Join(tmpDir, ".beads")
os.MkdirAll(beadsDir, 0755)
jsonlPath := filepath.Join(beadsDir, "beads.jsonl")
deletionsPath := filepath.Join(beadsDir, "deletions.jsonl")
// JSONL with issues
jsonlContent := `{"id":"bd-1","title":"Issue 1"}
{"id":"bd-2","title":"Issue 2"}
`
os.WriteFile(jsonlPath, []byte(jsonlContent), 0644)
// Deletions for different IDs
now := time.Now().Format(time.RFC3339)
deletionsContent := fmt.Sprintf(`{"id":"bd-99","ts":"%s","by":"user"}
{"id":"bd-100","ts":"%s","by":"user"}
`, now, now)
os.WriteFile(deletionsPath, []byte(deletionsContent), 0644)
result, err := sanitizeJSONLWithDeletions(jsonlPath)
if err != nil {
t.Fatalf("unexpected error: %v", err)
}
if result.RemovedCount != 0 {
t.Errorf("expected 0 removed (no matching IDs), got %d", result.RemovedCount)
}
// Verify JSONL unchanged
afterContent, _ := os.ReadFile(jsonlPath)
if string(afterContent) != jsonlContent {
t.Error("JSONL should not be modified when no matching deletions")
}
}
func TestSanitizeJSONLWithDeletions_PreservesMalformedLines(t *testing.T) {
t.Parallel()
tmpDir := t.TempDir()
beadsDir := filepath.Join(tmpDir, ".beads")
os.MkdirAll(beadsDir, 0755)
jsonlPath := filepath.Join(beadsDir, "beads.jsonl")
deletionsPath := filepath.Join(beadsDir, "deletions.jsonl")
// JSONL with a malformed line
jsonlContent := `{"id":"bd-1","title":"Issue 1"}
this is not valid json
{"id":"bd-2","title":"Issue 2"}
`
os.WriteFile(jsonlPath, []byte(jsonlContent), 0644)
// Delete bd-2
now := time.Now().Format(time.RFC3339)
os.WriteFile(deletionsPath, []byte(fmt.Sprintf(`{"id":"bd-2","ts":"%s","by":"user"}`, now)+"\n"), 0644)
result, err := sanitizeJSONLWithDeletions(jsonlPath)
if err != nil {
t.Fatalf("unexpected error: %v", err)
}
if result.RemovedCount != 1 {
t.Errorf("expected 1 removed, got %d", result.RemovedCount)
}
// Verify malformed line is preserved (let import handle it)
afterContent, _ := os.ReadFile(jsonlPath)
if !strings.Contains(string(afterContent), "this is not valid json") {
t.Error("malformed line should be preserved")
}
if !strings.Contains(string(afterContent), `"id":"bd-1"`) {
t.Error("bd-1 should be preserved")
}
if strings.Contains(string(afterContent), `"id":"bd-2"`) {
t.Error("bd-2 should be removed")
}
}
func TestSanitizeJSONLWithDeletions_NonexistentJSONL(t *testing.T) {
t.Parallel()
tmpDir := t.TempDir()
beadsDir := filepath.Join(tmpDir, ".beads")
os.MkdirAll(beadsDir, 0755)
jsonlPath := filepath.Join(beadsDir, "nonexistent.jsonl")
deletionsPath := filepath.Join(beadsDir, "deletions.jsonl")
// Create deletions file
now := time.Now().Format(time.RFC3339)
os.WriteFile(deletionsPath, []byte(fmt.Sprintf(`{"id":"bd-1","ts":"%s","by":"user"}`, now)+"\n"), 0644)
// Should handle missing JSONL gracefully
result, err := sanitizeJSONLWithDeletions(jsonlPath)
if err != nil {
t.Fatalf("unexpected error for missing JSONL: %v", err)
}
if result.RemovedCount != 0 {
t.Errorf("expected 0 removed for missing file, got %d", result.RemovedCount)
}
}
// TestSanitizeJSONLWithDeletions_PreservesTombstones tests the bd-kzxd fix:
// Tombstones should NOT be removed by sanitize, even if their ID is in deletions.jsonl.
// Tombstones ARE the proper representation of deletions. Removing them would cause
// the importer to re-create tombstones from deletions.jsonl, leading to UNIQUE
// constraint errors when the tombstone already exists in the database.
func TestSanitizeJSONLWithDeletions_PreservesTombstones(t *testing.T) {
t.Parallel()
tmpDir := t.TempDir()
beadsDir := filepath.Join(tmpDir, ".beads")
os.MkdirAll(beadsDir, 0755)
jsonlPath := filepath.Join(beadsDir, "beads.jsonl")
deletionsPath := filepath.Join(beadsDir, "deletions.jsonl")
now := time.Now().Format(time.RFC3339)
// JSONL with:
// - bd-1: regular issue (should be kept)
// - bd-2: tombstone (should be kept even though it's in deletions.jsonl)
// - bd-3: regular issue that's in deletions.jsonl (should be removed)
jsonlContent := fmt.Sprintf(`{"id":"bd-1","title":"Issue 1","status":"open"}
{"id":"bd-2","title":"(deleted)","status":"tombstone","deleted_at":"%s","deleted_by":"user"}
{"id":"bd-3","title":"Issue 3","status":"open"}
`, now)
os.WriteFile(jsonlPath, []byte(jsonlContent), 0644)
// Deletions manifest marks bd-2 and bd-3 as deleted
deletionsContent := fmt.Sprintf(`{"id":"bd-2","ts":"%s","by":"user","reason":"cleanup"}
{"id":"bd-3","ts":"%s","by":"user","reason":"duplicate"}
`, now, now)
os.WriteFile(deletionsPath, []byte(deletionsContent), 0644)
result, err := sanitizeJSONLWithDeletions(jsonlPath)
if err != nil {
t.Fatalf("unexpected error: %v", err)
}
// Only bd-3 should be removed (non-tombstone issue in deletions)
// bd-2 should be kept (it's a tombstone)
if result.RemovedCount != 1 {
t.Errorf("expected 1 removed (only non-tombstone), got %d", result.RemovedCount)
}
if len(result.RemovedIDs) != 1 || result.RemovedIDs[0] != "bd-3" {
t.Errorf("expected only bd-3 to be removed, got %v", result.RemovedIDs)
}
// Verify JSONL content
afterContent, _ := os.ReadFile(jsonlPath)
afterStr := string(afterContent)
// bd-1 should still be present (not in deletions)
if !strings.Contains(afterStr, `"id":"bd-1"`) {
t.Error("JSONL should still contain bd-1")
}
// bd-2 should still be present (tombstone - preserved!)
if !strings.Contains(afterStr, `"id":"bd-2"`) {
t.Error("JSONL should still contain bd-2 (tombstone should be preserved)")
}
if !strings.Contains(afterStr, `"status":"tombstone"`) {
t.Error("JSONL should contain tombstone status")
}
// bd-3 should be removed (non-tombstone in deletions)
if strings.Contains(afterStr, `"id":"bd-3"`) {
t.Error("JSONL should NOT contain bd-3 (non-tombstone in deletions)")
}
// Verify we have exactly 2 issues left (bd-1 and bd-2)
afterCount, _ := countIssuesInJSONL(jsonlPath)
if afterCount != 2 {
t.Errorf("expected 2 issues in JSONL after sanitize, got %d", afterCount)
}
}
// TestSanitizeJSONLWithDeletions_ProtectsLeftSnapshot tests the bd-3ee1 fix:
// Issues that are in the left snapshot (local export before pull) should NOT be
// removed by sanitize, even if they have an ID that matches an entry in the
// deletions manifest. This prevents newly created issues from being incorrectly
// removed when they happen to have an ID that matches a previously deleted issue
// (possible with hash-based IDs if content is similar).
func TestSanitizeJSONLWithDeletions_ProtectsLeftSnapshot(t *testing.T) {
t.Parallel()
tmpDir := t.TempDir()
beadsDir := filepath.Join(tmpDir, ".beads")
os.MkdirAll(beadsDir, 0755)
jsonlPath := filepath.Join(beadsDir, "issues.jsonl")
deletionsPath := filepath.Join(beadsDir, "deletions.jsonl")
leftSnapshotPath := filepath.Join(beadsDir, "beads.left.jsonl")
now := time.Now().Format(time.RFC3339)
// JSONL with:
// - bd-1: regular issue (should be kept - not in deletions)
// - bd-2: regular issue in deletions AND in left snapshot (should be PROTECTED)
// - bd-3: regular issue in deletions but NOT in left snapshot (should be removed)
jsonlContent := `{"id":"bd-1","title":"Issue 1","status":"open"}
{"id":"bd-2","title":"Issue 2","status":"open"}
{"id":"bd-3","title":"Issue 3","status":"open"}
`
os.WriteFile(jsonlPath, []byte(jsonlContent), 0644)
// Left snapshot contains bd-1 and bd-2 (local work before pull)
// bd-2 is the issue we're testing protection for
leftSnapshotContent := `{"id":"bd-1","title":"Issue 1","status":"open"}
{"id":"bd-2","title":"Issue 2","status":"open"}
`
os.WriteFile(leftSnapshotPath, []byte(leftSnapshotContent), 0644)
// Deletions manifest marks bd-2 and bd-3 as deleted
// bd-2 is in deletions but should be protected (it's in left snapshot)
// bd-3 is in deletions and should be removed (it's NOT in left snapshot)
deletionsContent := fmt.Sprintf(`{"id":"bd-2","ts":"%s","by":"user","reason":"old deletion with same ID as new issue"}
{"id":"bd-3","ts":"%s","by":"user","reason":"legitimate deletion"}
`, now, now)
os.WriteFile(deletionsPath, []byte(deletionsContent), 0644)
result, err := sanitizeJSONLWithDeletions(jsonlPath)
if err != nil {
t.Fatalf("unexpected error: %v", err)
}
// bd-3 should be removed (in deletions, not in left snapshot)
if result.RemovedCount != 1 {
t.Errorf("expected 1 removed, got %d", result.RemovedCount)
}
if len(result.RemovedIDs) != 1 || result.RemovedIDs[0] != "bd-3" {
t.Errorf("expected only bd-3 to be removed, got %v", result.RemovedIDs)
}
// bd-2 should be protected (in left snapshot)
if result.ProtectedCount != 1 {
t.Errorf("expected 1 protected, got %d", result.ProtectedCount)
}
if len(result.ProtectedIDs) != 1 || result.ProtectedIDs[0] != "bd-2" {
t.Errorf("expected bd-2 to be protected, got %v", result.ProtectedIDs)
}
// Verify JSONL content
afterContent, _ := os.ReadFile(jsonlPath)
afterStr := string(afterContent)
// bd-1 should still be present (not in deletions)
if !strings.Contains(afterStr, `"id":"bd-1"`) {
t.Error("JSONL should still contain bd-1")
}
// bd-2 should still be present (protected by left snapshot - bd-3ee1 fix!)
if !strings.Contains(afterStr, `"id":"bd-2"`) {
t.Error("JSONL should still contain bd-2 (protected by left snapshot)")
}
// bd-3 should be removed (in deletions, not protected)
if strings.Contains(afterStr, `"id":"bd-3"`) {
t.Error("JSONL should NOT contain bd-3 (in deletions and not in left snapshot)")
}
// Verify we have exactly 2 issues left (bd-1 and bd-2)
afterCount, _ := countIssuesInJSONL(jsonlPath)
if afterCount != 2 {
t.Errorf("expected 2 issues in JSONL after sanitize, got %d", afterCount)
}
}
// TestSanitizeJSONLWithDeletions_NoLeftSnapshot tests that sanitize still works
// correctly when there's no left snapshot (e.g., first sync or snapshot cleanup).
func TestSanitizeJSONLWithDeletions_NoLeftSnapshot(t *testing.T) {
t.Parallel()
tmpDir := t.TempDir()
beadsDir := filepath.Join(tmpDir, ".beads")
os.MkdirAll(beadsDir, 0755)
jsonlPath := filepath.Join(beadsDir, "issues.jsonl")
deletionsPath := filepath.Join(beadsDir, "deletions.jsonl")
// NOTE: No left snapshot file created
now := time.Now().Format(time.RFC3339)
// JSONL with issues
jsonlContent := `{"id":"bd-1","title":"Issue 1","status":"open"}
{"id":"bd-2","title":"Issue 2","status":"open"}
`
os.WriteFile(jsonlPath, []byte(jsonlContent), 0644)
// Deletions manifest marks bd-2 as deleted
deletionsContent := fmt.Sprintf(`{"id":"bd-2","ts":"%s","by":"user","reason":"deleted"}
`, now)
os.WriteFile(deletionsPath, []byte(deletionsContent), 0644)
result, err := sanitizeJSONLWithDeletions(jsonlPath)
if err != nil {
t.Fatalf("unexpected error: %v", err)
}
// Without left snapshot, bd-2 should be removed (no protection available)
if result.RemovedCount != 1 {
t.Errorf("expected 1 removed, got %d", result.RemovedCount)
}
if result.ProtectedCount != 0 {
t.Errorf("expected 0 protected (no left snapshot), got %d", result.ProtectedCount)
}
// Verify JSONL content
afterContent, _ := os.ReadFile(jsonlPath)
afterStr := string(afterContent)
if !strings.Contains(afterStr, `"id":"bd-1"`) {
t.Error("JSONL should still contain bd-1")
}
if strings.Contains(afterStr, `"id":"bd-2"`) {
t.Error("JSONL should NOT contain bd-2 (no left snapshot protection)")
}
}
// TestHashBasedStalenessDetection_bd_f2f tests the bd-f2f fix:
// When JSONL content differs from stored hash (e.g., remote changed status),
// hasJSONLChanged should detect the mismatch even if counts are equal.