fix(rename-prefix): sync JSONL before and after prefix rename (#893)
- Pull from sync-branch before rename if configured - Import all issues from JSONL before rename to prevent data loss - Export directly to JSONL after rename (don't rely on flushManager) - Apply same pattern to --repair mode - Add newSilentLogger() for production use (not test-only) - Add comprehensive tests for JSONL update scenarios 🤖 Generated with [Claude Code](https://claude.com/claude-code) Co-authored-by: Claude Opus 4.5 <noreply@anthropic.com>
This commit is contained in:
@@ -1,6 +1,7 @@
|
||||
package main
|
||||
|
||||
import (
|
||||
"bufio"
|
||||
"cmp"
|
||||
"context"
|
||||
"database/sql"
|
||||
@@ -15,6 +16,7 @@ import (
|
||||
"github.com/spf13/cobra"
|
||||
"github.com/steveyegge/beads/internal/storage"
|
||||
"github.com/steveyegge/beads/internal/storage/sqlite"
|
||||
"github.com/steveyegge/beads/internal/syncbranch"
|
||||
"github.com/steveyegge/beads/internal/types"
|
||||
"github.com/steveyegge/beads/internal/ui"
|
||||
"github.com/steveyegge/beads/internal/utils"
|
||||
@@ -82,6 +84,51 @@ NOTE: This is a rare operation. Most users never need this command.`,
|
||||
os.Exit(1)
|
||||
}
|
||||
|
||||
// Get JSONL path for sync operations
|
||||
jsonlPath := findJSONLPath()
|
||||
|
||||
// If sync-branch is configured, pull latest remote issues first
|
||||
// This ensures we have all issues from remote before renaming
|
||||
if !dryRun && syncbranch.IsConfigured() {
|
||||
silentLog := newSilentLogger()
|
||||
pulled, err := syncBranchPull(ctx, store, silentLog)
|
||||
if err != nil {
|
||||
fmt.Fprintf(os.Stderr, "Warning: failed to pull sync-branch: %v\n", err)
|
||||
fmt.Fprintf(os.Stderr, "Continue anyway? Issues from remote may be missing.\n")
|
||||
} else if pulled {
|
||||
fmt.Printf("Pulled latest issues from sync-branch\n")
|
||||
}
|
||||
}
|
||||
|
||||
// Force import from JSONL to ensure DB has all issues before rename
|
||||
// This prevents data loss if JSONL has issues from other workspaces
|
||||
if !dryRun && jsonlPath != "" {
|
||||
if _, err := os.Stat(jsonlPath); err == nil {
|
||||
// JSONL exists - force import to sync all issues to DB
|
||||
issues, err := parseJSONLFile(jsonlPath)
|
||||
if err != nil {
|
||||
fmt.Fprintf(os.Stderr, "Error: failed to read JSONL before rename: %v\n", err)
|
||||
os.Exit(1)
|
||||
}
|
||||
if len(issues) > 0 {
|
||||
opts := ImportOptions{
|
||||
DryRun: false,
|
||||
SkipUpdate: false,
|
||||
Strict: false,
|
||||
SkipPrefixValidation: true, // Allow any prefix during rename
|
||||
}
|
||||
result, err := importIssuesCore(ctx, dbPath, store, issues, opts)
|
||||
if err != nil {
|
||||
fmt.Fprintf(os.Stderr, "Error: failed to sync JSONL before rename: %v\n", err)
|
||||
os.Exit(1)
|
||||
}
|
||||
if result.Created > 0 || result.Updated > 0 {
|
||||
fmt.Printf("Synced %d issues from JSONL before rename\n", result.Created+result.Updated)
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
oldPrefix, err := store.GetConfig(ctx, "issue_prefix")
|
||||
if err != nil || oldPrefix == "" {
|
||||
fmt.Fprintf(os.Stderr, "Error: failed to get current prefix: %v\n", err)
|
||||
@@ -163,7 +210,34 @@ NOTE: This is a rare operation. Most users never need this command.`,
|
||||
os.Exit(1)
|
||||
}
|
||||
|
||||
// Schedule full export (IDs changed, incremental won't work)
|
||||
// Force export to JSONL with new IDs
|
||||
// Safe because we imported all JSONL issues before rename
|
||||
if jsonlPath != "" {
|
||||
// Clear metadata hashes so integrity check doesn't fail
|
||||
_ = store.SetMetadata(ctx, "jsonl_content_hash", "")
|
||||
_ = store.SetMetadata(ctx, "export_hashes", "")
|
||||
_ = store.SetJSONLFileHash(ctx, "")
|
||||
|
||||
// Get all renamed issues from DB and export directly
|
||||
renamedIssues, err := store.SearchIssues(ctx, "", types.IssueFilter{})
|
||||
if err != nil {
|
||||
fmt.Fprintf(os.Stderr, "Warning: failed to get issues for export: %v\n", err)
|
||||
} else {
|
||||
// Get dependencies for each issue
|
||||
for _, issue := range renamedIssues {
|
||||
deps, _ := store.GetDependencyRecords(ctx, issue.ID)
|
||||
issue.Dependencies = deps
|
||||
}
|
||||
// Write directly to JSONL
|
||||
if _, err := writeJSONLAtomic(jsonlPath, renamedIssues); err != nil {
|
||||
fmt.Fprintf(os.Stderr, "Warning: failed to export: %v\n", err)
|
||||
fmt.Fprintf(os.Stderr, "Run 'bd export --force' to update JSONL\n")
|
||||
} else {
|
||||
fmt.Printf("Updated %s with new IDs\n", jsonlPath)
|
||||
}
|
||||
}
|
||||
}
|
||||
// Also schedule for flush manager if available
|
||||
markDirtyAndScheduleFullExport()
|
||||
|
||||
fmt.Printf("%s Successfully renamed prefix from %s to %s\n", ui.RenderPass("✓"), ui.RenderAccent(oldPrefix), ui.RenderAccent(newPrefix))
|
||||
@@ -364,7 +438,35 @@ func repairPrefixes(ctx context.Context, st storage.Storage, actorName string, t
|
||||
return fmt.Errorf("failed to update config: %w", err)
|
||||
}
|
||||
|
||||
// Schedule full export (IDs changed, incremental won't work)
|
||||
// Force export to JSONL with new IDs
|
||||
// Safe because we imported all JSONL issues before repair (done in caller)
|
||||
jsonlPath := findJSONLPath()
|
||||
if jsonlPath != "" {
|
||||
// Clear metadata hashes so integrity check doesn't fail
|
||||
_ = st.SetMetadata(ctx, "jsonl_content_hash", "")
|
||||
_ = st.SetMetadata(ctx, "export_hashes", "")
|
||||
_ = st.SetJSONLFileHash(ctx, "")
|
||||
|
||||
// Get all renamed issues from DB and export directly
|
||||
renamedIssues, err := st.SearchIssues(ctx, "", types.IssueFilter{})
|
||||
if err != nil {
|
||||
fmt.Fprintf(os.Stderr, "Warning: failed to get issues for export: %v\n", err)
|
||||
} else {
|
||||
// Get dependencies for each issue
|
||||
for _, issue := range renamedIssues {
|
||||
deps, _ := st.GetDependencyRecords(ctx, issue.ID)
|
||||
issue.Dependencies = deps
|
||||
}
|
||||
// Write directly to JSONL
|
||||
if _, err := writeJSONLAtomic(jsonlPath, renamedIssues); err != nil {
|
||||
fmt.Fprintf(os.Stderr, "Warning: failed to export: %v\n", err)
|
||||
fmt.Fprintf(os.Stderr, "Run 'bd export --force' to update JSONL\n")
|
||||
} else {
|
||||
fmt.Printf("Updated %s with new IDs\n", jsonlPath)
|
||||
}
|
||||
}
|
||||
}
|
||||
// Also schedule for flush manager if available
|
||||
markDirtyAndScheduleFullExport()
|
||||
|
||||
fmt.Printf("\n%s Successfully consolidated %d prefixes into %s\n",
|
||||
@@ -468,6 +570,42 @@ func generateRepairHashID(ctx context.Context, conn *sql.Conn, prefix string, is
|
||||
return newID, nil
|
||||
}
|
||||
|
||||
// parseJSONLFile reads and parses a JSONL file into a slice of issues
|
||||
func parseJSONLFile(jsonlPath string) ([]*types.Issue, error) {
|
||||
f, err := os.Open(jsonlPath)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("failed to open JSONL file: %w", err)
|
||||
}
|
||||
defer f.Close()
|
||||
|
||||
var issues []*types.Issue
|
||||
scanner := bufio.NewScanner(f)
|
||||
// Increase buffer to handle large JSON lines
|
||||
scanner.Buffer(make([]byte, 0, 1024), 2*1024*1024) // 2MB max line size
|
||||
|
||||
lineNum := 0
|
||||
for scanner.Scan() {
|
||||
lineNum++
|
||||
line := scanner.Text()
|
||||
if line == "" {
|
||||
continue
|
||||
}
|
||||
|
||||
var issue types.Issue
|
||||
if err := json.Unmarshal([]byte(line), &issue); err != nil {
|
||||
return nil, fmt.Errorf("parse error at line %d: %w", lineNum, err)
|
||||
}
|
||||
issue.SetDefaults()
|
||||
issues = append(issues, &issue)
|
||||
}
|
||||
|
||||
if err := scanner.Err(); err != nil {
|
||||
return nil, fmt.Errorf("scanner error: %w", err)
|
||||
}
|
||||
|
||||
return issues, nil
|
||||
}
|
||||
|
||||
func init() {
|
||||
renamePrefixCmd.Flags().Bool("dry-run", false, "Preview changes without applying them")
|
||||
renamePrefixCmd.Flags().Bool("repair", false, "Repair database with multiple prefixes by consolidating them")
|
||||
|
||||
Reference in New Issue
Block a user