feat(tombstones): add migrate-tombstones command and compact pruning

- Add bd migrate-tombstones command (bd-8f9) to convert legacy
  deletions.jsonl entries to inline tombstones in issues.jsonl
  - Supports --dry-run to preview changes
  - Supports --verbose for detailed progress
  - Archives deletions.jsonl with .migrated suffix after migration

- Update bd compact to prune expired tombstones (bd-okh)
  - All compact modes now prune tombstones older than 30-day TTL
  - Reports count of pruned tombstones in output

- Add resurrection merge test (bd-bob)
  - Tests scenario where base is tombstone but both left/right resurrect

🤖 Generated with [Claude Code](https://claude.com/claude-code)

Co-Authored-By: Claude Opus 4.5 <noreply@anthropic.com>
This commit is contained in:
Steve Yegge
2025-12-07 21:34:35 +11:00
parent 24917f27c2
commit 08d8353619
5 changed files with 974 additions and 0 deletions

View File

@@ -14,6 +14,7 @@ import (
"github.com/steveyegge/beads/internal/configfile"
"github.com/steveyegge/beads/internal/deletions"
"github.com/steveyegge/beads/internal/storage/sqlite"
"github.com/steveyegge/beads/internal/types"
)
var (
@@ -56,6 +57,11 @@ Deletions Pruning:
unbounded growth. Default retention is 3 days (configurable via --retention
or deletions_retention_days in metadata.json).
Tombstone Pruning:
All modes also prune expired tombstones from issues.jsonl. Tombstones are
soft-delete markers that prevent resurrection of deleted issues. After the
TTL expires (default 30 days), tombstones are removed to save space.
Examples:
# Agent-driven workflow (recommended)
bd compact --analyze --json # Get candidates with full content
@@ -306,6 +312,14 @@ func runCompactSingle(ctx context.Context, compactor *compact.Compactor, store *
// Prune old deletion records
pruneDeletionsManifest()
// Prune expired tombstones (bd-okh)
if tombstonePruneResult, err := pruneExpiredTombstones(); err != nil {
fmt.Fprintf(os.Stderr, "Warning: failed to prune expired tombstones: %v\n", err)
} else if tombstonePruneResult != nil && tombstonePruneResult.PrunedCount > 0 {
fmt.Printf("\nTombstones pruned: %d expired (older than %d days)\n",
tombstonePruneResult.PrunedCount, tombstonePruneResult.TTLDays)
}
// Schedule auto-flush to export changes
markDirtyAndScheduleFlush()
}
@@ -433,6 +447,14 @@ func runCompactAll(ctx context.Context, compactor *compact.Compactor, store *sql
// Prune old deletion records
pruneDeletionsManifest()
// Prune expired tombstones (bd-okh)
if tombstonePruneResult, err := pruneExpiredTombstones(); err != nil {
fmt.Fprintf(os.Stderr, "Warning: failed to prune expired tombstones: %v\n", err)
} else if tombstonePruneResult != nil && tombstonePruneResult.PrunedCount > 0 {
fmt.Printf("\nTombstones pruned: %d expired (older than %d days)\n",
tombstonePruneResult.PrunedCount, tombstonePruneResult.TTLDays)
}
// Schedule auto-flush to export changes
if successCount > 0 {
markDirtyAndScheduleFlush()
@@ -871,6 +893,12 @@ func runCompactApply(ctx context.Context, store *sqlite.SQLiteStorage) {
// Prune old deletion records (do this before JSON output so we can include results)
pruneResult, retentionDays := pruneDeletionsManifest()
// Prune expired tombstones from issues.jsonl (bd-okh)
tombstonePruneResult, tombstoneErr := pruneExpiredTombstones()
if tombstoneErr != nil && !jsonOutput {
fmt.Fprintf(os.Stderr, "Warning: failed to prune expired tombstones: %v\n", tombstoneErr)
}
if jsonOutput {
output := map[string]interface{}{
"success": true,
@@ -889,6 +917,13 @@ func runCompactApply(ctx context.Context, store *sqlite.SQLiteStorage) {
"retention_days": retentionDays,
}
}
// Include tombstone pruning results (bd-okh)
if tombstonePruneResult != nil && tombstonePruneResult.PrunedCount > 0 {
output["tombstones_pruned"] = map[string]interface{}{
"count": tombstonePruneResult.PrunedCount,
"ttl_days": tombstonePruneResult.TTLDays,
}
}
outputJSON(output)
return
}
@@ -902,6 +937,12 @@ func runCompactApply(ctx context.Context, store *sqlite.SQLiteStorage) {
fmt.Printf("\nDeletions pruned: %d records older than %d days removed\n", pruneResult.PrunedCount, retentionDays)
}
// Report tombstone pruning results (bd-okh)
if tombstonePruneResult != nil && tombstonePruneResult.PrunedCount > 0 {
fmt.Printf("\nTombstones pruned: %d expired tombstones (older than %d days) removed\n",
tombstonePruneResult.PrunedCount, tombstonePruneResult.TTLDays)
}
// Schedule auto-flush to export changes
markDirtyAndScheduleFlush()
}
@@ -940,6 +981,101 @@ func pruneDeletionsManifest() (*deletions.PruneResult, int) {
return result, retentionDays
}
// TombstonePruneResult contains the results of tombstone pruning
type TombstonePruneResult struct {
PrunedCount int
PrunedIDs []string
TTLDays int
}
// pruneExpiredTombstones reads issues.jsonl, removes expired tombstones,
// and writes back the pruned file. Returns the prune result.
func pruneExpiredTombstones() (*TombstonePruneResult, error) {
beadsDir := filepath.Dir(dbPath)
issuesPath := filepath.Join(beadsDir, "issues.jsonl")
// Check if issues.jsonl exists
if _, err := os.Stat(issuesPath); os.IsNotExist(err) {
return &TombstonePruneResult{}, nil
}
// Read all issues
// nolint:gosec // G304: issuesPath is controlled from beadsDir
file, err := os.Open(issuesPath)
if err != nil {
return nil, fmt.Errorf("failed to open issues.jsonl: %w", err)
}
var allIssues []*types.Issue
decoder := json.NewDecoder(file)
for {
var issue types.Issue
if err := decoder.Decode(&issue); err != nil {
if err.Error() == "EOF" {
break
}
// Skip corrupt lines
continue
}
allIssues = append(allIssues, &issue)
}
file.Close()
// Determine TTL
ttl := types.DefaultTombstoneTTL
ttlDays := int(ttl.Hours() / 24)
// Filter out expired tombstones
var kept []*types.Issue
var prunedIDs []string
for _, issue := range allIssues {
if issue.IsExpired(ttl) {
prunedIDs = append(prunedIDs, issue.ID)
} else {
kept = append(kept, issue)
}
}
if len(prunedIDs) == 0 {
return &TombstonePruneResult{TTLDays: ttlDays}, nil
}
// Write back the pruned file atomically
dir := filepath.Dir(issuesPath)
base := filepath.Base(issuesPath)
tempFile, err := os.CreateTemp(dir, base+".prune.*")
if err != nil {
return nil, fmt.Errorf("failed to create temp file: %w", err)
}
tempPath := tempFile.Name()
encoder := json.NewEncoder(tempFile)
for _, issue := range kept {
if err := encoder.Encode(issue); err != nil {
tempFile.Close()
os.Remove(tempPath)
return nil, fmt.Errorf("failed to write issue %s: %w", issue.ID, err)
}
}
if err := tempFile.Close(); err != nil {
os.Remove(tempPath)
return nil, fmt.Errorf("failed to close temp file: %w", err)
}
// Atomically replace
if err := os.Rename(tempPath, issuesPath); err != nil {
os.Remove(tempPath)
return nil, fmt.Errorf("failed to replace issues.jsonl: %w", err)
}
return &TombstonePruneResult{
PrunedCount: len(prunedIDs),
PrunedIDs: prunedIDs,
TTLDays: ttlDays,
}, nil
}
func init() {
compactCmd.Flags().BoolVar(&compactDryRun, "dry-run", false, "Preview without compacting")
compactCmd.Flags().IntVar(&compactTier, "tier", 1, "Compaction tier (1 or 2)")

View File

@@ -2,7 +2,9 @@ package main
import (
"context"
"encoding/json"
"fmt"
"os"
"path/filepath"
"testing"
"time"
@@ -384,3 +386,178 @@ func TestCompactInitCommand(t *testing.T) {
t.Error("compact command should have --json flag")
}
}
func TestPruneExpiredTombstones(t *testing.T) {
// Setup: create a temp .beads directory with issues.jsonl
tmpDir := t.TempDir()
beadsDir := filepath.Join(tmpDir, ".beads")
if err := os.MkdirAll(beadsDir, 0755); err != nil {
t.Fatalf("Failed to create .beads dir: %v", err)
}
// Create issues.jsonl with mix of live issues, fresh tombstones, and expired tombstones
issuesPath := filepath.Join(beadsDir, "issues.jsonl")
now := time.Now()
freshTombstoneTime := now.Add(-10 * 24 * time.Hour) // 10 days ago - NOT expired
expiredTombstoneTime := now.Add(-60 * 24 * time.Hour) // 60 days ago - expired (> 30 day TTL)
issues := []*types.Issue{
{
ID: "test-live",
Title: "Live issue",
Status: types.StatusOpen,
Priority: 2,
IssueType: types.TypeTask,
CreatedAt: now.Add(-5 * 24 * time.Hour),
UpdatedAt: now,
},
{
ID: "test-fresh-tombstone",
Title: "(deleted)",
Status: types.StatusTombstone,
Priority: 0,
IssueType: types.TypeTask,
CreatedAt: now.Add(-20 * 24 * time.Hour),
UpdatedAt: freshTombstoneTime,
DeletedAt: &freshTombstoneTime,
DeletedBy: "alice",
DeleteReason: "duplicate",
},
{
ID: "test-expired-tombstone",
Title: "(deleted)",
Status: types.StatusTombstone,
Priority: 0,
IssueType: types.TypeTask,
CreatedAt: now.Add(-90 * 24 * time.Hour),
UpdatedAt: expiredTombstoneTime,
DeletedAt: &expiredTombstoneTime,
DeletedBy: "bob",
DeleteReason: "obsolete",
},
}
// Write issues to JSONL
file, err := os.Create(issuesPath)
if err != nil {
t.Fatalf("Failed to create issues.jsonl: %v", err)
}
encoder := json.NewEncoder(file)
for _, issue := range issues {
if err := encoder.Encode(issue); err != nil {
file.Close()
t.Fatalf("Failed to write issue: %v", err)
}
}
file.Close()
// Save original dbPath and restore after test
originalDBPath := dbPath
defer func() { dbPath = originalDBPath }()
dbPath = filepath.Join(beadsDir, "beads.db")
// Run pruning
result, err := pruneExpiredTombstones()
if err != nil {
t.Fatalf("pruneExpiredTombstones failed: %v", err)
}
// Verify results
if result.PrunedCount != 1 {
t.Errorf("Expected 1 pruned tombstone, got %d", result.PrunedCount)
}
if len(result.PrunedIDs) != 1 || result.PrunedIDs[0] != "test-expired-tombstone" {
t.Errorf("Expected PrunedIDs [test-expired-tombstone], got %v", result.PrunedIDs)
}
if result.TTLDays != 30 {
t.Errorf("Expected TTLDays 30, got %d", result.TTLDays)
}
// Verify the file was updated correctly
file, err = os.Open(issuesPath)
if err != nil {
t.Fatalf("Failed to reopen issues.jsonl: %v", err)
}
defer file.Close()
var remaining []*types.Issue
decoder := json.NewDecoder(file)
for {
var issue types.Issue
if err := decoder.Decode(&issue); err != nil {
if err.Error() == "EOF" {
break
}
t.Fatalf("Failed to decode issue: %v", err)
}
remaining = append(remaining, &issue)
}
if len(remaining) != 2 {
t.Fatalf("Expected 2 remaining issues, got %d", len(remaining))
}
// Verify live issue and fresh tombstone remain
ids := make(map[string]bool)
for _, issue := range remaining {
ids[issue.ID] = true
}
if !ids["test-live"] {
t.Error("Live issue should remain")
}
if !ids["test-fresh-tombstone"] {
t.Error("Fresh tombstone should remain")
}
if ids["test-expired-tombstone"] {
t.Error("Expired tombstone should have been pruned")
}
}
func TestPruneExpiredTombstones_NoTombstones(t *testing.T) {
// Setup: create a temp .beads directory with only live issues
tmpDir := t.TempDir()
beadsDir := filepath.Join(tmpDir, ".beads")
if err := os.MkdirAll(beadsDir, 0755); err != nil {
t.Fatalf("Failed to create .beads dir: %v", err)
}
issuesPath := filepath.Join(beadsDir, "issues.jsonl")
now := time.Now()
issue := &types.Issue{
ID: "test-live",
Title: "Live issue",
Status: types.StatusOpen,
Priority: 2,
IssueType: types.TypeTask,
CreatedAt: now,
UpdatedAt: now,
}
file, err := os.Create(issuesPath)
if err != nil {
t.Fatalf("Failed to create issues.jsonl: %v", err)
}
encoder := json.NewEncoder(file)
if err := encoder.Encode(issue); err != nil {
file.Close()
t.Fatalf("Failed to write issue: %v", err)
}
file.Close()
// Save original dbPath and restore after test
originalDBPath := dbPath
defer func() { dbPath = originalDBPath }()
dbPath = filepath.Join(beadsDir, "beads.db")
// Run pruning - should return zero pruned
result, err := pruneExpiredTombstones()
if err != nil {
t.Fatalf("pruneExpiredTombstones failed: %v", err)
}
if result.PrunedCount != 0 {
t.Errorf("Expected 0 pruned tombstones, got %d", result.PrunedCount)
}
}

View File

@@ -0,0 +1,291 @@
package main
import (
"encoding/json"
"fmt"
"os"
"path/filepath"
"github.com/fatih/color"
"github.com/spf13/cobra"
"github.com/steveyegge/beads/internal/deletions"
"github.com/steveyegge/beads/internal/types"
)
var migrateTombstonesCmd = &cobra.Command{
Use: "migrate-tombstones",
Short: "Convert deletions.jsonl entries to inline tombstones",
Long: `Migrate legacy deletions.jsonl entries to inline tombstones in issues.jsonl.
This command converts existing deletion records from the legacy deletions.jsonl
manifest to inline tombstone entries in issues.jsonl. This is part of the
transition from separate deletion tracking to unified tombstone-based deletion.
The migration:
1. Reads existing deletions from deletions.jsonl
2. Checks issues.jsonl for already-existing tombstones
3. Creates tombstone entries for unmigrated deletions
4. Appends new tombstones to issues.jsonl
5. Archives deletions.jsonl with .migrated suffix
Use --dry-run to preview changes without modifying files.
Examples:
bd migrate-tombstones # Migrate deletions to tombstones
bd migrate-tombstones --dry-run # Preview what would be migrated
bd migrate-tombstones --verbose # Show detailed progress`,
Run: func(cmd *cobra.Command, _ []string) {
dryRun, _ := cmd.Flags().GetBool("dry-run")
verbose, _ := cmd.Flags().GetBool("verbose")
// Block writes in readonly mode
if !dryRun {
CheckReadonly("migrate-tombstones")
}
// Find .beads directory
beadsDir := findBeadsDir()
if beadsDir == "" {
if jsonOutput {
outputJSON(map[string]interface{}{
"error": "no_beads_directory",
"message": "No .beads directory found. Run 'bd init' first.",
})
} else {
fmt.Fprintf(os.Stderr, "Error: no .beads directory found\n")
fmt.Fprintf(os.Stderr, "Hint: run 'bd init' to initialize bd\n")
}
os.Exit(1)
}
// Check paths
deletionsPath := deletions.DefaultPath(beadsDir)
issuesPath := filepath.Join(beadsDir, "issues.jsonl")
// Load existing deletions
loadResult, err := deletions.LoadDeletions(deletionsPath)
if err != nil {
if jsonOutput {
outputJSON(map[string]interface{}{
"error": "load_deletions_failed",
"message": err.Error(),
})
} else {
fmt.Fprintf(os.Stderr, "Error loading deletions.jsonl: %v\n", err)
}
os.Exit(1)
}
if len(loadResult.Records) == 0 {
if jsonOutput {
outputJSON(map[string]interface{}{
"status": "noop",
"message": "No deletions to migrate",
"migrated": 0,
"skipped": 0,
})
} else {
fmt.Println("No deletions.jsonl entries to migrate")
}
return
}
// Print warnings from loading
for _, warning := range loadResult.Warnings {
if !jsonOutput {
color.Yellow("Warning: %s\n", warning)
}
}
// Load existing issues.jsonl to find existing tombstones
existingTombstones := make(map[string]bool)
if _, err := os.Stat(issuesPath); err == nil {
// nolint:gosec // G304: issuesPath is controlled from beadsDir
file, err := os.Open(issuesPath)
if err != nil {
if jsonOutput {
outputJSON(map[string]interface{}{
"error": "load_issues_failed",
"message": err.Error(),
})
} else {
fmt.Fprintf(os.Stderr, "Error opening issues.jsonl: %v\n", err)
}
os.Exit(1)
}
decoder := json.NewDecoder(file)
for {
var issue types.Issue
if err := decoder.Decode(&issue); err != nil {
if err.Error() == "EOF" {
break
}
// Skip corrupt lines, continue reading
continue
}
if issue.IsTombstone() {
existingTombstones[issue.ID] = true
}
}
file.Close()
}
// Determine which deletions need migration
var toMigrate []deletions.DeletionRecord
var skippedIDs []string
for id, record := range loadResult.Records {
if existingTombstones[id] {
skippedIDs = append(skippedIDs, id)
if verbose && !jsonOutput {
fmt.Printf(" Skipping %s (tombstone already exists)\n", id)
}
} else {
toMigrate = append(toMigrate, record)
}
}
if len(toMigrate) == 0 {
if jsonOutput {
outputJSON(map[string]interface{}{
"status": "noop",
"message": "All deletions already migrated to tombstones",
"migrated": 0,
"skipped": len(skippedIDs),
})
} else {
fmt.Printf("All %d deletion(s) already have tombstones in issues.jsonl\n", len(skippedIDs))
}
return
}
// Dry run - just report what would happen
if dryRun {
if jsonOutput {
outputJSON(map[string]interface{}{
"dry_run": true,
"would_migrate": len(toMigrate),
"skipped": len(skippedIDs),
"total": len(loadResult.Records),
})
} else {
fmt.Println("Dry run mode - no changes will be made")
fmt.Printf("\nWould migrate %d deletion(s) to tombstones:\n", len(toMigrate))
for _, record := range toMigrate {
fmt.Printf(" - %s (deleted %s by %s)\n",
record.ID,
record.Timestamp.Format("2006-01-02"),
record.Actor)
}
if len(skippedIDs) > 0 {
fmt.Printf("\nWould skip %d already-migrated deletion(s)\n", len(skippedIDs))
}
}
return
}
// Perform migration - append tombstones to issues.jsonl
if verbose && !jsonOutput {
fmt.Printf("Creating %d tombstone(s)...\n", len(toMigrate))
}
// Open issues.jsonl for appending
// nolint:gosec // G304: issuesPath is controlled from beadsDir
file, err := os.OpenFile(issuesPath, os.O_APPEND|os.O_CREATE|os.O_WRONLY, 0600)
if err != nil {
if jsonOutput {
outputJSON(map[string]interface{}{
"error": "open_issues_failed",
"message": err.Error(),
})
} else {
fmt.Fprintf(os.Stderr, "Error opening issues.jsonl for append: %v\n", err)
}
os.Exit(1)
}
defer file.Close()
encoder := json.NewEncoder(file)
var migratedIDs []string
for _, record := range toMigrate {
tombstone := convertDeletionRecordToTombstone(record)
if err := encoder.Encode(tombstone); err != nil {
if jsonOutput {
outputJSON(map[string]interface{}{
"error": "write_tombstone_failed",
"message": err.Error(),
"issue_id": record.ID,
})
} else {
fmt.Fprintf(os.Stderr, "Error writing tombstone for %s: %v\n", record.ID, err)
}
os.Exit(1)
}
migratedIDs = append(migratedIDs, record.ID)
if verbose && !jsonOutput {
fmt.Printf(" ✓ Created tombstone for %s\n", record.ID)
}
}
// Archive deletions.jsonl
archivePath := deletionsPath + ".migrated"
if err := os.Rename(deletionsPath, archivePath); err != nil {
// Warn but don't fail - tombstones were already created
if !jsonOutput {
color.Yellow("Warning: could not archive deletions.jsonl: %v\n", err)
}
} else if verbose && !jsonOutput {
fmt.Printf(" ✓ Archived deletions.jsonl to %s\n", filepath.Base(archivePath))
}
// Success output
if jsonOutput {
outputJSON(map[string]interface{}{
"status": "success",
"migrated": len(migratedIDs),
"skipped": len(skippedIDs),
"total": len(loadResult.Records),
"archive": archivePath,
"migrated_ids": migratedIDs,
})
} else {
color.Green("\n✓ Migration complete\n\n")
fmt.Printf(" Migrated: %d tombstone(s)\n", len(migratedIDs))
if len(skippedIDs) > 0 {
fmt.Printf(" Skipped: %d (already had tombstones)\n", len(skippedIDs))
}
fmt.Printf(" Archived: %s\n", filepath.Base(archivePath))
fmt.Println("\nNext steps:")
fmt.Println(" 1. Run 'bd sync' to propagate tombstones to remote")
fmt.Println(" 2. Other clones will receive tombstones on next sync")
}
},
}
// convertDeletionRecordToTombstone creates a tombstone issue from a deletion record.
// This is similar to the importer's convertDeletionToTombstone but operates on
// deletions.DeletionRecord directly.
func convertDeletionRecordToTombstone(del deletions.DeletionRecord) *types.Issue {
deletedAt := del.Timestamp
return &types.Issue{
ID: del.ID,
Title: "(deleted)",
Description: "",
Status: types.StatusTombstone,
Priority: 0, // Unknown priority (0 = unset)
IssueType: types.TypeTask, // Default type (must be valid)
CreatedAt: del.Timestamp,
UpdatedAt: del.Timestamp,
DeletedAt: &deletedAt,
DeletedBy: del.Actor,
DeleteReason: del.Reason,
OriginalType: "", // Not available in legacy deletions.jsonl
}
}
func init() {
migrateTombstonesCmd.Flags().Bool("dry-run", false, "Preview changes without modifying files")
migrateTombstonesCmd.Flags().Bool("verbose", false, "Show detailed progress")
migrateTombstonesCmd.Flags().BoolVar(&jsonOutput, "json", false, "Output in JSON format")
rootCmd.AddCommand(migrateTombstonesCmd)
}

View File

@@ -0,0 +1,226 @@
package main
import (
"encoding/json"
"os"
"path/filepath"
"testing"
"time"
"github.com/steveyegge/beads/internal/deletions"
"github.com/steveyegge/beads/internal/types"
)
func TestMigrateTombstones_NoDeletions(t *testing.T) {
// Setup: create temp .beads directory with no deletions.jsonl
tmpDir := t.TempDir()
beadsDir := filepath.Join(tmpDir, ".beads")
if err := os.MkdirAll(beadsDir, 0755); err != nil {
t.Fatalf("Failed to create .beads dir: %v", err)
}
// Create empty issues.jsonl
issuesPath := filepath.Join(beadsDir, "issues.jsonl")
if err := os.WriteFile(issuesPath, []byte{}, 0600); err != nil {
t.Fatalf("Failed to create issues.jsonl: %v", err)
}
// Run in temp dir
oldWd, _ := os.Getwd()
defer os.Chdir(oldWd)
os.Chdir(tmpDir)
// The command should report no deletions to migrate
deletionsPath := deletions.DefaultPath(beadsDir)
loadResult, err := deletions.LoadDeletions(deletionsPath)
if err != nil {
t.Fatalf("LoadDeletions failed: %v", err)
}
if len(loadResult.Records) != 0 {
t.Errorf("Expected 0 deletions, got %d", len(loadResult.Records))
}
}
func TestMigrateTombstones_WithDeletions(t *testing.T) {
// Setup: create temp .beads directory
tmpDir := t.TempDir()
beadsDir := filepath.Join(tmpDir, ".beads")
if err := os.MkdirAll(beadsDir, 0755); err != nil {
t.Fatalf("Failed to create .beads dir: %v", err)
}
// Create deletions.jsonl with some entries
deletionsPath := deletions.DefaultPath(beadsDir)
deleteTime := time.Now().Add(-24 * time.Hour)
records := []deletions.DeletionRecord{
{ID: "test-abc", Timestamp: deleteTime, Actor: "alice", Reason: "duplicate"},
{ID: "test-def", Timestamp: deleteTime.Add(-1 * time.Hour), Actor: "bob", Reason: "obsolete"},
}
for _, record := range records {
if err := deletions.AppendDeletion(deletionsPath, record); err != nil {
t.Fatalf("Failed to write deletion: %v", err)
}
}
// Create empty issues.jsonl
issuesPath := filepath.Join(beadsDir, "issues.jsonl")
if err := os.WriteFile(issuesPath, []byte{}, 0600); err != nil {
t.Fatalf("Failed to create issues.jsonl: %v", err)
}
// Load deletions
loadResult, err := deletions.LoadDeletions(deletionsPath)
if err != nil {
t.Fatalf("LoadDeletions failed: %v", err)
}
if len(loadResult.Records) != 2 {
t.Fatalf("Expected 2 deletions, got %d", len(loadResult.Records))
}
// Simulate migration by converting to tombstones
var tombstones []*types.Issue
for _, record := range loadResult.Records {
tombstones = append(tombstones, convertDeletionRecordToTombstone(record))
}
// Verify tombstone fields
for _, ts := range tombstones {
if ts.Status != types.StatusTombstone {
t.Errorf("Expected status tombstone, got %s", ts.Status)
}
if ts.DeletedAt == nil {
t.Error("Expected DeletedAt to be set")
}
if ts.DeletedBy == "" {
t.Error("Expected DeletedBy to be set")
}
}
}
func TestMigrateTombstones_SkipsExistingTombstones(t *testing.T) {
// Setup: create temp .beads directory
tmpDir := t.TempDir()
beadsDir := filepath.Join(tmpDir, ".beads")
if err := os.MkdirAll(beadsDir, 0755); err != nil {
t.Fatalf("Failed to create .beads dir: %v", err)
}
// Create deletions.jsonl with some entries
deletionsPath := deletions.DefaultPath(beadsDir)
deleteTime := time.Now().Add(-24 * time.Hour)
records := []deletions.DeletionRecord{
{ID: "test-abc", Timestamp: deleteTime, Actor: "alice", Reason: "duplicate"},
{ID: "test-def", Timestamp: deleteTime.Add(-1 * time.Hour), Actor: "bob", Reason: "obsolete"},
}
for _, record := range records {
if err := deletions.AppendDeletion(deletionsPath, record); err != nil {
t.Fatalf("Failed to write deletion: %v", err)
}
}
// Create issues.jsonl with an existing tombstone for test-abc
issuesPath := filepath.Join(beadsDir, "issues.jsonl")
existingTombstone := types.Issue{
ID: "test-abc",
Title: "(deleted)",
Status: types.StatusTombstone,
DeletedBy: "alice",
}
file, err := os.Create(issuesPath)
if err != nil {
t.Fatalf("Failed to create issues.jsonl: %v", err)
}
encoder := json.NewEncoder(file)
if err := encoder.Encode(existingTombstone); err != nil {
file.Close()
t.Fatalf("Failed to write existing tombstone: %v", err)
}
file.Close()
// Load existing tombstones
existingTombstones := make(map[string]bool)
file, _ = os.Open(issuesPath)
decoder := json.NewDecoder(file)
for {
var issue types.Issue
if err := decoder.Decode(&issue); err != nil {
break
}
if issue.IsTombstone() {
existingTombstones[issue.ID] = true
}
}
file.Close()
// Load deletions
loadResult, err := deletions.LoadDeletions(deletionsPath)
if err != nil {
t.Fatalf("LoadDeletions failed: %v", err)
}
// Count what should be migrated vs skipped
var toMigrate, skipped int
for id := range loadResult.Records {
if existingTombstones[id] {
skipped++
} else {
toMigrate++
}
}
if toMigrate != 1 {
t.Errorf("Expected 1 to migrate, got %d", toMigrate)
}
if skipped != 1 {
t.Errorf("Expected 1 skipped, got %d", skipped)
}
}
func TestConvertDeletionRecordToTombstone(t *testing.T) {
deleteTime := time.Now().Add(-24 * time.Hour)
record := deletions.DeletionRecord{
ID: "test-xyz",
Timestamp: deleteTime,
Actor: "alice",
Reason: "test reason",
}
tombstone := convertDeletionRecordToTombstone(record)
if tombstone.ID != "test-xyz" {
t.Errorf("Expected ID test-xyz, got %s", tombstone.ID)
}
if tombstone.Status != types.StatusTombstone {
t.Errorf("Expected status tombstone, got %s", tombstone.Status)
}
if tombstone.Title != "(deleted)" {
t.Errorf("Expected title '(deleted)', got %s", tombstone.Title)
}
if tombstone.DeletedBy != "alice" {
t.Errorf("Expected DeletedBy 'alice', got %s", tombstone.DeletedBy)
}
if tombstone.DeleteReason != "test reason" {
t.Errorf("Expected DeleteReason 'test reason', got %s", tombstone.DeleteReason)
}
if tombstone.DeletedAt == nil {
t.Error("Expected DeletedAt to be set")
} else if !tombstone.DeletedAt.Equal(deleteTime) {
t.Errorf("Expected DeletedAt %v, got %v", deleteTime, *tombstone.DeletedAt)
}
if tombstone.Priority != 0 {
t.Errorf("Expected priority 0 (unknown), got %d", tombstone.Priority)
}
if tombstone.IssueType != types.TypeTask {
t.Errorf("Expected type task, got %s", tombstone.IssueType)
}
if tombstone.OriginalType != "" {
t.Errorf("Expected empty OriginalType, got %s", tombstone.OriginalType)
}
}