This commit is contained in:
Steve Yegge
2025-11-23 20:38:42 -08:00
17 changed files with 1697 additions and 144 deletions

File diff suppressed because one or more lines are too long

View File

@@ -13,11 +13,11 @@ import (
var cleanCmd = &cobra.Command{ var cleanCmd = &cobra.Command{
Use: "clean", Use: "clean",
Short: "Clean up temporary beads artifacts", Short: "Clean up temporary git merge artifacts from .beads directory",
Long: `Delete temporary beads artifacts to clean up after git operations. Long: `Delete temporary git merge artifacts from the .beads directory.
This removes temporary files created during git merges and conflicts from the This command removes temporary files created during git merges and conflicts.
.beads directory. It does NOT delete issues from the database - use 'bd cleanup' for that.
Files removed: Files removed:
- 3-way merge snapshots (beads.base.jsonl, beads.left.jsonl, beads.right.jsonl) - 3-way merge snapshots (beads.base.jsonl, beads.left.jsonl, beads.right.jsonl)
@@ -36,7 +36,10 @@ Clean up temporary files:
bd clean bd clean
Preview what would be deleted: Preview what would be deleted:
bd clean --dry-run`, bd clean --dry-run
SEE ALSO:
bd cleanup Delete closed issues from database`,
Run: func(cmd *cobra.Command, args []string) { Run: func(cmd *cobra.Command, args []string) {
dryRun, _ := cmd.Flags().GetBool("dry-run") dryRun, _ := cmd.Flags().GetBool("dry-run")

View File

@@ -13,8 +13,11 @@ import (
var cleanupCmd = &cobra.Command{ var cleanupCmd = &cobra.Command{
Use: "cleanup", Use: "cleanup",
Short: "Delete all closed issues (optionally filtered by age)", Short: "Delete closed issues from database to free up space",
Long: `Delete all closed issues to clean up the database. Long: `Delete closed issues from the database to reduce database size.
This command permanently removes closed issues from beads.db and beads.jsonl.
It does NOT remove temporary files - use 'bd clean' for that.
By default, deletes ALL closed issues. Use --older-than to only delete By default, deletes ALL closed issues. Use --older-than to only delete
issues closed before a certain date. issues closed before a certain date.
@@ -34,7 +37,10 @@ SAFETY:
- Requires --force flag to actually delete (unless --dry-run) - Requires --force flag to actually delete (unless --dry-run)
- Supports --cascade to delete dependents - Supports --cascade to delete dependents
- Shows preview of what will be deleted - Shows preview of what will be deleted
- Use --json for programmatic output`, - Use --json for programmatic output
SEE ALSO:
bd clean Remove temporary git merge artifacts`,
Run: func(cmd *cobra.Command, args []string) { Run: func(cmd *cobra.Command, args []string) {
force, _ := cmd.Flags().GetBool("force") force, _ := cmd.Flags().GetBool("force")
dryRun, _ := cmd.Flags().GetBool("dry-run") dryRun, _ := cmd.Flags().GetBool("dry-run")

View File

@@ -14,18 +14,28 @@ import (
var detectPollutionCmd = &cobra.Command{ var detectPollutionCmd = &cobra.Command{
Use: "detect-pollution", Use: "detect-pollution",
Short: "Detect test issues that leaked into production database", Short: "Detect and optionally clean test issues from database",
Long: `Detect test issues using pattern matching: Long: `Detect test issues that leaked into production database using pattern matching.
- Titles starting with 'test', 'benchmark', 'sample', 'tmp', 'temp'
- Sequential numbering (test-1, test-2, ...)
- Generic descriptions or no description
- Created in rapid succession
Example: This command finds issues that appear to be test data based on:
- Titles starting with 'test', 'benchmark', 'sample', 'tmp', 'temp'
- Sequential numbering patterns (test-1, test-2, ...)
- Generic or missing descriptions
- Created in rapid succession (potential script/automation artifacts)
USE CASES:
- Cleaning up after testing in a production database
- Identifying accidental test data from CI/automation
- Database hygiene after development experiments
- Quality checks before database backups
EXAMPLES:
bd detect-pollution # Show potential test issues bd detect-pollution # Show potential test issues
bd detect-pollution --clean # Delete test issues (with confirmation) bd detect-pollution --clean # Delete test issues (with confirmation)
bd detect-pollution --clean --yes # Delete without confirmation bd detect-pollution --clean --yes # Delete without confirmation
bd detect-pollution --json # Output in JSON format`, bd detect-pollution --json # Output in JSON format
NOTE: Review detected issues carefully before using --clean. False positives are possible.`,
Run: func(cmd *cobra.Command, _ []string) { Run: func(cmd *cobra.Command, _ []string) {
// Check daemon mode - not supported yet (uses direct storage access) // Check daemon mode - not supported yet (uses direct storage access)
if daemonClient != nil { if daemonClient != nil {

View File

@@ -14,6 +14,8 @@ import (
"github.com/steveyegge/beads/internal/debug" "github.com/steveyegge/beads/internal/debug"
"github.com/steveyegge/beads/internal/storage/sqlite" "github.com/steveyegge/beads/internal/storage/sqlite"
"github.com/steveyegge/beads/internal/types" "github.com/steveyegge/beads/internal/types"
"github.com/steveyegge/beads/internal/util"
"github.com/steveyegge/beads/internal/validation"
) )
// countIssuesInJSONL counts the number of issues in a JSONL file // countIssuesInJSONL counts the number of issues in a JSONL file
@@ -114,13 +116,30 @@ var exportCmd = &cobra.Command{
Long: `Export all issues to JSON Lines format (one JSON object per line). Long: `Export all issues to JSON Lines format (one JSON object per line).
Issues are sorted by ID for consistent diffs. Issues are sorted by ID for consistent diffs.
Output to stdout by default, or use -o flag for file output.`, Output to stdout by default, or use -o flag for file output.
Examples:
bd export --status open -o open-issues.jsonl
bd export --type bug --priority-max 1
bd export --created-after 2025-01-01 --assignee alice`,
Run: func(cmd *cobra.Command, args []string) { Run: func(cmd *cobra.Command, args []string) {
format, _ := cmd.Flags().GetString("format") format, _ := cmd.Flags().GetString("format")
output, _ := cmd.Flags().GetString("output") output, _ := cmd.Flags().GetString("output")
statusFilter, _ := cmd.Flags().GetString("status") statusFilter, _ := cmd.Flags().GetString("status")
force, _ := cmd.Flags().GetBool("force") force, _ := cmd.Flags().GetBool("force")
// Additional filter flags
assignee, _ := cmd.Flags().GetString("assignee")
issueType, _ := cmd.Flags().GetString("type")
labels, _ := cmd.Flags().GetStringSlice("label")
labelsAny, _ := cmd.Flags().GetStringSlice("label-any")
priorityMinStr, _ := cmd.Flags().GetString("priority-min")
priorityMaxStr, _ := cmd.Flags().GetString("priority-max")
createdAfter, _ := cmd.Flags().GetString("created-after")
createdBefore, _ := cmd.Flags().GetString("created-before")
updatedAfter, _ := cmd.Flags().GetString("updated-after")
updatedBefore, _ := cmd.Flags().GetString("updated-before")
debug.Logf("Debug: export flags - output=%q, force=%v\n", output, force) debug.Logf("Debug: export flags - output=%q, force=%v\n", output, force)
if format != "jsonl" { if format != "jsonl" {
@@ -155,12 +174,81 @@ Output to stdout by default, or use -o flag for file output.`,
defer func() { _ = store.Close() }() defer func() { _ = store.Close() }()
} }
// Normalize labels: trim, dedupe, remove empty
labels = util.NormalizeLabels(labels)
labelsAny = util.NormalizeLabels(labelsAny)
// Build filter // Build filter
filter := types.IssueFilter{} filter := types.IssueFilter{}
if statusFilter != "" { if statusFilter != "" {
status := types.Status(statusFilter) status := types.Status(statusFilter)
filter.Status = &status filter.Status = &status
} }
if assignee != "" {
filter.Assignee = &assignee
}
if issueType != "" {
t := types.IssueType(issueType)
filter.IssueType = &t
}
if len(labels) > 0 {
filter.Labels = labels
}
if len(labelsAny) > 0 {
filter.LabelsAny = labelsAny
}
// Priority ranges
if cmd.Flags().Changed("priority-min") {
priorityMin, err := validation.ValidatePriority(priorityMinStr)
if err != nil {
fmt.Fprintf(os.Stderr, "Error parsing --priority-min: %v\n", err)
os.Exit(1)
}
filter.PriorityMin = &priorityMin
}
if cmd.Flags().Changed("priority-max") {
priorityMax, err := validation.ValidatePriority(priorityMaxStr)
if err != nil {
fmt.Fprintf(os.Stderr, "Error parsing --priority-max: %v\n", err)
os.Exit(1)
}
filter.PriorityMax = &priorityMax
}
// Date ranges
if createdAfter != "" {
t, err := parseTimeFlag(createdAfter)
if err != nil {
fmt.Fprintf(os.Stderr, "Error parsing --created-after: %v\n", err)
os.Exit(1)
}
filter.CreatedAfter = &t
}
if createdBefore != "" {
t, err := parseTimeFlag(createdBefore)
if err != nil {
fmt.Fprintf(os.Stderr, "Error parsing --created-before: %v\n", err)
os.Exit(1)
}
filter.CreatedBefore = &t
}
if updatedAfter != "" {
t, err := parseTimeFlag(updatedAfter)
if err != nil {
fmt.Fprintf(os.Stderr, "Error parsing --updated-after: %v\n", err)
os.Exit(1)
}
filter.UpdatedAfter = &t
}
if updatedBefore != "" {
t, err := parseTimeFlag(updatedBefore)
if err != nil {
fmt.Fprintf(os.Stderr, "Error parsing --updated-before: %v\n", err)
os.Exit(1)
}
filter.UpdatedBefore = &t
}
// Get all issues // Get all issues
ctx := rootCtx ctx := rootCtx
@@ -421,5 +509,22 @@ func init() {
exportCmd.Flags().StringP("status", "s", "", "Filter by status") exportCmd.Flags().StringP("status", "s", "", "Filter by status")
exportCmd.Flags().Bool("force", false, "Force export even if database is empty") exportCmd.Flags().Bool("force", false, "Force export even if database is empty")
exportCmd.Flags().BoolVar(&jsonOutput, "json", false, "Output export statistics in JSON format") exportCmd.Flags().BoolVar(&jsonOutput, "json", false, "Output export statistics in JSON format")
// Filter flags
exportCmd.Flags().StringP("assignee", "a", "", "Filter by assignee")
exportCmd.Flags().StringP("type", "t", "", "Filter by type (bug, feature, task, epic, chore)")
exportCmd.Flags().StringSliceP("label", "l", []string{}, "Filter by labels (AND: must have ALL)")
exportCmd.Flags().StringSlice("label-any", []string{}, "Filter by labels (OR: must have AT LEAST ONE)")
// Priority filters
exportCmd.Flags().String("priority-min", "", "Filter by minimum priority (inclusive, 0-4 or P0-P4)")
exportCmd.Flags().String("priority-max", "", "Filter by maximum priority (inclusive, 0-4 or P0-P4)")
// Date filters
exportCmd.Flags().String("created-after", "", "Filter issues created after date (YYYY-MM-DD or RFC3339)")
exportCmd.Flags().String("created-before", "", "Filter issues created before date (YYYY-MM-DD or RFC3339)")
exportCmd.Flags().String("updated-after", "", "Filter issues updated after date (YYYY-MM-DD or RFC3339)")
exportCmd.Flags().String("updated-before", "", "Filter issues updated before date (YYYY-MM-DD or RFC3339)")
rootCmd.AddCommand(exportCmd) rootCmd.AddCommand(exportCmd)
} }

View File

@@ -353,7 +353,6 @@ With --no-db: creates .beads/ directory and issues.jsonl file instead of SQLite
green := color.New(color.FgGreen).SprintFunc() green := color.New(color.FgGreen).SprintFunc()
cyan := color.New(color.FgCyan).SprintFunc() cyan := color.New(color.FgCyan).SprintFunc()
yellow := color.New(color.FgYellow).SprintFunc()
fmt.Printf("\n%s bd initialized successfully!\n\n", green("✓")) fmt.Printf("\n%s bd initialized successfully!\n\n", green("✓"))
fmt.Printf(" Database: %s\n", cyan(initDBPath)) fmt.Printf(" Database: %s\n", cyan(initDBPath))
@@ -372,6 +371,7 @@ With --no-db: creates .beads/ directory and issues.jsonl file instead of SQLite
} }
} }
if hasIssues { if hasIssues {
yellow := color.New(color.FgYellow).SprintFunc()
fmt.Printf("%s Setup incomplete. Some issues were detected:\n", yellow("⚠")) fmt.Printf("%s Setup incomplete. Some issues were detected:\n", yellow("⚠"))
// Show just the warnings/errors, not all checks // Show just the warnings/errors, not all checks
for _, check := range doctorResult.Checks { for _, check := range doctorResult.Checks {

View File

@@ -99,6 +99,8 @@ var (
profileEnabled bool profileEnabled bool
profileFile *os.File profileFile *os.File
traceFile *os.File traceFile *os.File
verboseFlag bool // Enable verbose/debug output
quietFlag bool // Suppress non-essential output
) )
func init() { func init() {
@@ -118,9 +120,11 @@ func init() {
rootCmd.PersistentFlags().BoolVar(&allowStale, "allow-stale", false, "Allow operations on potentially stale data (skip staleness check)") rootCmd.PersistentFlags().BoolVar(&allowStale, "allow-stale", false, "Allow operations on potentially stale data (skip staleness check)")
rootCmd.PersistentFlags().BoolVar(&noDb, "no-db", false, "Use no-db mode: load from JSONL, no SQLite") rootCmd.PersistentFlags().BoolVar(&noDb, "no-db", false, "Use no-db mode: load from JSONL, no SQLite")
rootCmd.PersistentFlags().BoolVar(&profileEnabled, "profile", false, "Generate CPU profile for performance analysis") rootCmd.PersistentFlags().BoolVar(&profileEnabled, "profile", false, "Generate CPU profile for performance analysis")
rootCmd.PersistentFlags().BoolVarP(&verboseFlag, "verbose", "v", false, "Enable verbose/debug output")
rootCmd.PersistentFlags().BoolVarP(&quietFlag, "quiet", "q", false, "Suppress non-essential output (errors only)")
// Add --version flag to root command (same behavior as version subcommand) // Add --version flag to root command (same behavior as version subcommand)
rootCmd.Flags().BoolP("version", "v", false, "Print version information") rootCmd.Flags().BoolP("version", "V", false, "Print version information")
} }
var rootCmd = &cobra.Command{ var rootCmd = &cobra.Command{
@@ -140,6 +144,10 @@ var rootCmd = &cobra.Command{
// Set up signal-aware context for graceful cancellation // Set up signal-aware context for graceful cancellation
rootCtx, rootCancel = signal.NotifyContext(context.Background(), os.Interrupt, syscall.SIGTERM) rootCtx, rootCancel = signal.NotifyContext(context.Background(), os.Interrupt, syscall.SIGTERM)
// Apply verbosity flags early (before any output)
debug.SetVerbose(verboseFlag)
debug.SetQuiet(quietFlag)
// Apply viper configuration if flags weren't explicitly set // Apply viper configuration if flags weren't explicitly set
// Priority: flags > viper (config file + env vars) > defaults // Priority: flags > viper (config file + env vars) > defaults
// Do this BEFORE early-return so init/version/help respect config // Do this BEFORE early-return so init/version/help respect config

View File

@@ -22,17 +22,30 @@ import (
var migrateHashIDsCmd = &cobra.Command{ var migrateHashIDsCmd = &cobra.Command{
Use: "migrate-hash-ids", Use: "migrate-hash-ids",
Short: "Migrate sequential IDs to hash-based IDs", Short: "Migrate sequential IDs to hash-based IDs (legacy)",
Long: `Migrate database from sequential IDs (bd-1, bd-2) to hash-based IDs (bd-a3f8e9a2). Long: `Migrate database from sequential IDs (bd-1, bd-2) to hash-based IDs (bd-a3f8e9a2).
This command: *** LEGACY COMMAND ***
This is a one-time migration command. Most users do not need this.
Only use if migrating from an older beads version that used sequential IDs.
What this does:
- Generates hash IDs for all top-level issues - Generates hash IDs for all top-level issues
- Assigns hierarchical child IDs (bd-a3f8e9a2.1) for epic children - Assigns hierarchical child IDs (bd-a3f8e9a2.1) for epic children
- Updates all references (dependencies, comments, external refs) - Updates all references (dependencies, comments, external refs)
- Creates mapping file for reference - Creates mapping file for reference
- Validates all relationships are intact - Validates all relationships are intact
- Automatically creates database backup before migration
Use --dry-run to preview changes before applying.`, USE CASES:
- Upgrading from beads v1.x to v2.x (sequential → hash IDs)
- One-time migration only - do not run on already-migrated databases
EXAMPLES:
bd migrate-hash-ids --dry-run # Preview changes
bd migrate-hash-ids # Perform migration (creates backup)
WARNING: Backup your database before running this command, even though it creates one automatically.`,
Run: func(cmd *cobra.Command, _ []string) { Run: func(cmd *cobra.Command, _ []string) {
dryRun, _ := cmd.Flags().GetBool("dry-run") dryRun, _ := cmd.Flags().GetBool("dry-run")

View File

@@ -18,10 +18,16 @@ import (
var renamePrefixCmd = &cobra.Command{ var renamePrefixCmd = &cobra.Command{
Use: "rename-prefix <new-prefix>", Use: "rename-prefix <new-prefix>",
Short: "Rename the issue prefix for all issues", Short: "Rename the issue prefix for all issues in the database",
Long: `Rename the issue prefix for all issues in the database. Long: `Rename the issue prefix for all issues in the database.
This will update all issue IDs and all text references across all fields. This will update all issue IDs and all text references across all fields.
USE CASES:
- Shortening long prefixes (e.g., 'knowledge-work-' → 'kw-')
- Rebranding project naming conventions
- Consolidating multiple prefixes after database corruption
- Migrating to team naming standards
Prefix validation rules: Prefix validation rules:
- Max length: 8 characters - Max length: 8 characters
- Allowed characters: lowercase letters, numbers, hyphens - Allowed characters: lowercase letters, numbers, hyphens
@@ -34,9 +40,12 @@ If issues have multiple prefixes (corrupted database), use --repair to consolida
The --repair flag will rename all issues with incorrect prefixes to the new prefix, The --repair flag will rename all issues with incorrect prefixes to the new prefix,
preserving issues that already have the correct prefix. preserving issues that already have the correct prefix.
Example: EXAMPLES:
bd rename-prefix kw- # Rename from 'knowledge-work-' to 'kw-' bd rename-prefix kw- # Rename from 'knowledge-work-' to 'kw-'
bd rename-prefix mtg- --repair # Consolidate multiple prefixes into 'mtg-'`, bd rename-prefix mtg- --repair # Consolidate multiple prefixes into 'mtg-'
bd rename-prefix team- --dry-run # Preview changes without applying
NOTE: This is a rare operation. Most users never need this command.`,
Args: cobra.ExactArgs(1), Args: cobra.ExactArgs(1),
Run: func(cmd *cobra.Command, args []string) { Run: func(cmd *cobra.Command, args []string) {
newPrefix := args[0] newPrefix := args[0]

View File

@@ -170,6 +170,12 @@ Configuration keys use dot-notation namespaces to organize settings:
- `min_hash_length` - Minimum hash ID length (default: 4) - `min_hash_length` - Minimum hash ID length (default: 4)
- `max_hash_length` - Maximum hash ID length (default: 8) - `max_hash_length` - Maximum hash ID length (default: 8)
- `import.orphan_handling` - How to handle hierarchical issues with missing parents during import (default: `allow`) - `import.orphan_handling` - How to handle hierarchical issues with missing parents during import (default: `allow`)
- `export.error_policy` - Error handling strategy for exports (default: `strict`)
- `export.retry_attempts` - Number of retry attempts for transient errors (default: 3)
- `export.retry_backoff_ms` - Initial backoff in milliseconds for retries (default: 100)
- `export.skip_encoding_errors` - Skip issues that fail JSON encoding (default: false)
- `export.write_manifest` - Write .manifest.json with export metadata (default: false)
- `auto_export.error_policy` - Override error policy for auto-exports (default: `best-effort`)
### Integration Namespaces ### Integration Namespaces
@@ -200,6 +206,74 @@ bd config set min_hash_length "5"
See [docs/ADAPTIVE_IDS.md](docs/ADAPTIVE_IDS.md) for detailed documentation. See [docs/ADAPTIVE_IDS.md](docs/ADAPTIVE_IDS.md) for detailed documentation.
### Example: Export Error Handling
Controls how export operations handle errors when fetching issue data (labels, comments, dependencies).
```bash
# Strict: Fail fast on any error (default for user-initiated exports)
bd config set export.error_policy "strict"
# Best-effort: Skip failed operations with warnings (good for auto-export)
bd config set export.error_policy "best-effort"
# Partial: Retry transient failures, skip persistent ones with manifest
bd config set export.error_policy "partial"
bd config set export.write_manifest "true"
# Required-core: Fail on core data (issues/deps), skip enrichments (labels/comments)
bd config set export.error_policy "required-core"
# Customize retry behavior
bd config set export.retry_attempts "5"
bd config set export.retry_backoff_ms "200"
# Skip individual issues that fail JSON encoding
bd config set export.skip_encoding_errors "true"
# Auto-export uses different policy (background operation)
bd config set auto_export.error_policy "best-effort"
```
**Policy details:**
- **`strict`** (default) - Fail immediately on any error. Ensures complete exports but may block on transient issues like database locks. Best for critical exports and migrations.
- **`best-effort`** - Skip failed batches with warnings. Continues export even if labels or comments fail to load. Best for auto-exports and background sync where availability matters more than completeness.
- **`partial`** - Retry transient failures (3x by default), then skip with manifest file. Creates `.manifest.json` alongside JSONL documenting what succeeded/failed. Best for large databases with occasional corruption.
- **`required-core`** - Fail on core data (issues, dependencies), skip enrichments (labels, comments) with warnings. Best when metadata is secondary to issue tracking.
**When to use each mode:**
- Use `strict` (default) for production backups and critical exports
- Use `best-effort` for auto-exports (default via `auto_export.error_policy`)
- Use `partial` when you need visibility into export completeness
- Use `required-core` when labels/comments are optional
**Context-specific behavior:**
User-initiated exports (`bd sync`, manual export commands) use `export.error_policy` (default: `strict`).
Auto-exports (daemon background sync) use `auto_export.error_policy` (default: `best-effort`), falling back to `export.error_policy` if not set.
**Example: Different policies for different contexts:**
```bash
# Critical project: strict everywhere
bd config set export.error_policy "strict"
# Development project: strict user exports, permissive auto-exports
bd config set export.error_policy "strict"
bd config set auto_export.error_policy "best-effort"
# Large database with occasional corruption
bd config set export.error_policy "partial"
bd config set export.write_manifest "true"
bd config set export.retry_attempts "5"
```
### Example: Import Orphan Handling ### Example: Import Orphan Handling
Controls how imports handle hierarchical child issues when their parent is missing from the database: Controls how imports handle hierarchical child issues when their parent is missing from the database:

View File

@@ -5,20 +5,54 @@ import (
"os" "os"
) )
var enabled = os.Getenv("BD_DEBUG") != "" var (
enabled = os.Getenv("BD_DEBUG") != ""
verboseMode = false
quietMode = false
)
func Enabled() bool { func Enabled() bool {
return enabled return enabled || verboseMode
}
// SetVerbose enables verbose/debug output
func SetVerbose(verbose bool) {
verboseMode = verbose
}
// SetQuiet enables quiet mode (suppress non-essential output)
func SetQuiet(quiet bool) {
quietMode = quiet
}
// IsQuiet returns true if quiet mode is enabled
func IsQuiet() bool {
return quietMode
} }
func Logf(format string, args ...interface{}) { func Logf(format string, args ...interface{}) {
if enabled { if enabled || verboseMode {
fmt.Fprintf(os.Stderr, format, args...) fmt.Fprintf(os.Stderr, format, args...)
} }
} }
func Printf(format string, args ...interface{}) { func Printf(format string, args ...interface{}) {
if enabled { if enabled || verboseMode {
fmt.Printf(format, args...) fmt.Printf(format, args...)
} }
} }
// PrintNormal prints output unless quiet mode is enabled
// Use this for normal informational output that should be suppressed in quiet mode
func PrintNormal(format string, args ...interface{}) {
if !quietMode {
fmt.Printf(format, args...)
}
}
// PrintlnNormal prints a line unless quiet mode is enabled
func PrintlnNormal(args ...interface{}) {
if !quietMode {
fmt.Println(args...)
}
}

117
internal/export/config.go Normal file
View File

@@ -0,0 +1,117 @@
package export
import (
"context"
"fmt"
"strconv"
"github.com/steveyegge/beads/internal/storage"
)
// ConfigStore defines the minimal storage interface needed for config
type ConfigStore interface {
GetConfig(ctx context.Context, key string) (string, error)
SetConfig(ctx context.Context, key, value string) error
}
// LoadConfig reads export configuration from storage
func LoadConfig(ctx context.Context, store ConfigStore, isAutoExport bool) (*Config, error) {
cfg := &Config{
Policy: DefaultErrorPolicy,
RetryAttempts: DefaultRetryAttempts,
RetryBackoffMS: DefaultRetryBackoffMS,
SkipEncodingErrors: DefaultSkipEncodingErrors,
WriteManifest: DefaultWriteManifest,
IsAutoExport: isAutoExport,
}
// Load error policy
if isAutoExport {
// Check auto-export specific policy first
if val, err := store.GetConfig(ctx, ConfigKeyAutoExportPolicy); err == nil && val != "" {
policy := ErrorPolicy(val)
if policy.IsValid() {
cfg.Policy = policy
}
}
}
// Fall back to general export policy if not set or not auto-export
if cfg.Policy == DefaultErrorPolicy {
if val, err := store.GetConfig(ctx, ConfigKeyErrorPolicy); err == nil && val != "" {
policy := ErrorPolicy(val)
if policy.IsValid() {
cfg.Policy = policy
}
}
}
// Load retry attempts
if val, err := store.GetConfig(ctx, ConfigKeyRetryAttempts); err == nil && val != "" {
if attempts, err := strconv.Atoi(val); err == nil && attempts >= 0 {
cfg.RetryAttempts = attempts
}
}
// Load retry backoff
if val, err := store.GetConfig(ctx, ConfigKeyRetryBackoffMS); err == nil && val != "" {
if backoff, err := strconv.Atoi(val); err == nil && backoff > 0 {
cfg.RetryBackoffMS = backoff
}
}
// Load skip encoding errors flag
if val, err := store.GetConfig(ctx, ConfigKeySkipEncodingErrors); err == nil && val != "" {
if skip, err := strconv.ParseBool(val); err == nil {
cfg.SkipEncodingErrors = skip
}
}
// Load write manifest flag
if val, err := store.GetConfig(ctx, ConfigKeyWriteManifest); err == nil && val != "" {
if write, err := strconv.ParseBool(val); err == nil {
cfg.WriteManifest = write
}
}
return cfg, nil
}
// SetPolicy sets the error policy for exports
func SetPolicy(ctx context.Context, store storage.Storage, policy ErrorPolicy, autoExport bool) error {
if !policy.IsValid() {
return fmt.Errorf("invalid error policy: %s (valid: strict, best-effort, partial, required-core)", policy)
}
key := ConfigKeyErrorPolicy
if autoExport {
key = ConfigKeyAutoExportPolicy
}
return store.SetConfig(ctx, key, string(policy))
}
// SetRetryAttempts sets the number of retry attempts
func SetRetryAttempts(ctx context.Context, store storage.Storage, attempts int) error {
if attempts < 0 {
return fmt.Errorf("retry attempts must be non-negative")
}
return store.SetConfig(ctx, ConfigKeyRetryAttempts, strconv.Itoa(attempts))
}
// SetRetryBackoff sets the initial retry backoff in milliseconds
func SetRetryBackoff(ctx context.Context, store storage.Storage, backoffMS int) error {
if backoffMS <= 0 {
return fmt.Errorf("retry backoff must be positive")
}
return store.SetConfig(ctx, ConfigKeyRetryBackoffMS, strconv.Itoa(backoffMS))
}
// SetSkipEncodingErrors sets whether to skip issues with encoding errors
func SetSkipEncodingErrors(ctx context.Context, store storage.Storage, skip bool) error {
return store.SetConfig(ctx, ConfigKeySkipEncodingErrors, strconv.FormatBool(skip))
}
// SetWriteManifest sets whether to write export manifests
func SetWriteManifest(ctx context.Context, store storage.Storage, write bool) error {
return store.SetConfig(ctx, ConfigKeyWriteManifest, strconv.FormatBool(write))
}

View File

@@ -0,0 +1,96 @@
package export
import (
"context"
"fmt"
"os"
)
// DataType represents a type of data being fetched
type DataType string
const (
DataTypeCore DataType = "core" // Issues and dependencies
DataTypeLabels DataType = "labels" // Issue labels
DataTypeComments DataType = "comments" // Issue comments
)
// FetchResult holds the result of a data fetch operation
type FetchResult struct {
Success bool
Err error
Warnings []string
}
// FetchWithPolicy executes a fetch operation with the configured error policy
func FetchWithPolicy(ctx context.Context, cfg *Config, dataType DataType, desc string, fn func() error) FetchResult {
var result FetchResult
// Determine if this is core data
isCore := dataType == DataTypeCore
// Execute based on policy
switch cfg.Policy {
case PolicyStrict:
// Fail-fast on any error
err := RetryWithBackoff(ctx, cfg.RetryAttempts, cfg.RetryBackoffMS, desc, fn)
if err != nil {
result.Err = err
return result
}
result.Success = true
case PolicyBestEffort:
// Skip errors with warnings
err := RetryWithBackoff(ctx, cfg.RetryAttempts, cfg.RetryBackoffMS, desc, fn)
if err != nil {
warning := fmt.Sprintf("Warning: %s failed, skipping: %v", desc, err)
fmt.Fprintf(os.Stderr, "%s\n", warning)
result.Warnings = append(result.Warnings, warning)
result.Success = false // Data is missing
return result
}
result.Success = true
case PolicyPartial:
// Retry with backoff, then skip with manifest entry
err := RetryWithBackoff(ctx, cfg.RetryAttempts, cfg.RetryBackoffMS, desc, fn)
if err != nil {
warning := fmt.Sprintf("Warning: %s failed after retries, skipping: %v", desc, err)
fmt.Fprintf(os.Stderr, "%s\n", warning)
result.Warnings = append(result.Warnings, warning)
result.Success = false
return result
}
result.Success = true
case PolicyRequiredCore:
// Fail on core data, skip enrichments
if isCore {
err := RetryWithBackoff(ctx, cfg.RetryAttempts, cfg.RetryBackoffMS, desc, fn)
if err != nil {
result.Err = err
return result
}
result.Success = true
} else {
// Best-effort for enrichments
err := RetryWithBackoff(ctx, cfg.RetryAttempts, cfg.RetryBackoffMS, desc, fn)
if err != nil {
warning := fmt.Sprintf("Warning: %s (enrichment) failed, skipping: %v", desc, err)
fmt.Fprintf(os.Stderr, "%s\n", warning)
result.Warnings = append(result.Warnings, warning)
result.Success = false
return result
}
result.Success = true
}
default:
// Unknown policy, fail-fast as safest option
result.Err = fmt.Errorf("unknown error policy: %s", cfg.Policy)
return result
}
return result
}

View File

@@ -0,0 +1,65 @@
package export
import (
"encoding/json"
"fmt"
"os"
"path/filepath"
"strings"
"time"
)
// WriteManifest writes an export manifest alongside the JSONL file
func WriteManifest(jsonlPath string, manifest *Manifest) error {
// Derive manifest path from JSONL path
manifestPath := strings.TrimSuffix(jsonlPath, ".jsonl") + ".manifest.json"
// Marshal manifest
data, err := json.MarshalIndent(manifest, "", " ")
if err != nil {
return fmt.Errorf("failed to marshal manifest: %w", err)
}
// Create temp file for atomic write
dir := filepath.Dir(manifestPath)
base := filepath.Base(manifestPath)
tempFile, err := os.CreateTemp(dir, base+".tmp.*")
if err != nil {
return fmt.Errorf("failed to create temp manifest file: %w", err)
}
tempPath := tempFile.Name()
defer func() {
_ = tempFile.Close()
_ = os.Remove(tempPath)
}()
// Write manifest
if _, err := tempFile.Write(data); err != nil {
return fmt.Errorf("failed to write manifest: %w", err)
}
// Close before rename
_ = tempFile.Close()
// Atomic replace
if err := os.Rename(tempPath, manifestPath); err != nil {
return fmt.Errorf("failed to replace manifest file: %w", err)
}
// Set appropriate file permissions (0600: rw-------)
if err := os.Chmod(manifestPath, 0600); err != nil {
// Non-fatal, just log
fmt.Fprintf(os.Stderr, "Warning: failed to set manifest permissions: %v\n", err)
}
return nil
}
// NewManifest creates a new export manifest
func NewManifest(policy ErrorPolicy) *Manifest {
return &Manifest{
ExportedAt: time.Now(),
ErrorPolicy: string(policy),
Complete: true, // Will be set to false if any data is missing
}
}

127
internal/export/policy.go Normal file
View File

@@ -0,0 +1,127 @@
package export
import (
"context"
"fmt"
"time"
)
// ErrorPolicy defines how export operations handle errors
type ErrorPolicy string
const (
// PolicyStrict fails fast on any error (default for user-initiated exports)
PolicyStrict ErrorPolicy = "strict"
// PolicyBestEffort skips failed operations with warnings (good for auto-export)
PolicyBestEffort ErrorPolicy = "best-effort"
// PolicyPartial retries transient failures, skips persistent ones with manifest
PolicyPartial ErrorPolicy = "partial"
// PolicyRequiredCore fails on core data (issues/deps), skips enrichments (labels/comments)
PolicyRequiredCore ErrorPolicy = "required-core"
)
// Config keys for export error handling
const (
ConfigKeyErrorPolicy = "export.error_policy"
ConfigKeyRetryAttempts = "export.retry_attempts"
ConfigKeyRetryBackoffMS = "export.retry_backoff_ms"
ConfigKeySkipEncodingErrors = "export.skip_encoding_errors"
ConfigKeyWriteManifest = "export.write_manifest"
ConfigKeyAutoExportPolicy = "auto_export.error_policy"
)
// Default values
const (
DefaultErrorPolicy = PolicyStrict
DefaultRetryAttempts = 3
DefaultRetryBackoffMS = 100
DefaultSkipEncodingErrors = false
DefaultWriteManifest = false
DefaultAutoExportPolicy = PolicyBestEffort
)
// Config holds export error handling configuration
type Config struct {
Policy ErrorPolicy
RetryAttempts int
RetryBackoffMS int
SkipEncodingErrors bool
WriteManifest bool
IsAutoExport bool // If true, may use different policy
}
// Manifest tracks export completeness and failures
type Manifest struct {
ExportedCount int `json:"exported_count"`
FailedIssues []FailedIssue `json:"failed_issues,omitempty"`
PartialData []string `json:"partial_data,omitempty"` // e.g., ["labels", "comments"]
Warnings []string `json:"warnings,omitempty"`
Complete bool `json:"complete"`
ExportedAt time.Time `json:"exported_at"`
ErrorPolicy string `json:"error_policy"`
}
// FailedIssue tracks a single issue that failed to export
type FailedIssue struct {
IssueID string `json:"issue_id"`
Reason string `json:"reason"`
MissingData []string `json:"missing_data,omitempty"` // e.g., ["labels", "comments"]
}
// RetryWithBackoff wraps a function with retry logic
func RetryWithBackoff(ctx context.Context, attempts int, initialBackoffMS int, desc string, fn func() error) error {
if attempts < 1 {
attempts = 1
}
var lastErr error
backoff := time.Duration(initialBackoffMS) * time.Millisecond
for attempt := 1; attempt <= attempts; attempt++ {
err := fn()
if err == nil {
return nil
}
lastErr = err
// Don't retry on context cancellation
if ctx.Err() != nil {
return ctx.Err()
}
// Don't wait after last attempt
if attempt == attempts {
break
}
select {
case <-ctx.Done():
return ctx.Err()
case <-time.After(backoff):
backoff *= 2 // Exponential backoff
}
}
if attempts > 1 {
return fmt.Errorf("%s failed after %d attempts: %w", desc, attempts, lastErr)
}
return lastErr
}
// IsValid checks if the policy is a valid value
func (p ErrorPolicy) IsValid() bool {
switch p {
case PolicyStrict, PolicyBestEffort, PolicyPartial, PolicyRequiredCore:
return true
default:
return false
}
}
// String implements fmt.Stringer
func (p ErrorPolicy) String() string {
return string(p)
}

View File

@@ -0,0 +1,176 @@
package export
import (
"context"
"errors"
"testing"
"time"
)
func TestRetryWithBackoff(t *testing.T) {
ctx := context.Background()
t.Run("succeeds first try", func(t *testing.T) {
attempts := 0
err := RetryWithBackoff(ctx, 3, 100, "test", func() error {
attempts++
return nil
})
if err != nil {
t.Errorf("expected no error, got %v", err)
}
if attempts != 1 {
t.Errorf("expected 1 attempt, got %d", attempts)
}
})
t.Run("succeeds after retries", func(t *testing.T) {
attempts := 0
err := RetryWithBackoff(ctx, 3, 10, "test", func() error {
attempts++
if attempts < 3 {
return errors.New("transient error")
}
return nil
})
if err != nil {
t.Errorf("expected no error, got %v", err)
}
if attempts != 3 {
t.Errorf("expected 3 attempts, got %d", attempts)
}
})
t.Run("fails after max retries", func(t *testing.T) {
attempts := 0
err := RetryWithBackoff(ctx, 3, 10, "test", func() error {
attempts++
return errors.New("persistent error")
})
if err == nil {
t.Error("expected error, got nil")
}
if attempts != 3 {
t.Errorf("expected 3 attempts, got %d", attempts)
}
})
t.Run("respects context cancellation", func(t *testing.T) {
ctx, cancel := context.WithTimeout(context.Background(), 50*time.Millisecond)
defer cancel()
attempts := 0
err := RetryWithBackoff(ctx, 10, 100, "test", func() error {
attempts++
return errors.New("error")
})
if err != context.DeadlineExceeded {
t.Errorf("expected DeadlineExceeded, got %v", err)
}
// Should stop before reaching max retries due to timeout
if attempts >= 10 {
t.Errorf("expected fewer than 10 attempts due to timeout, got %d", attempts)
}
})
}
func TestErrorPolicy(t *testing.T) {
tests := []struct {
name string
policy ErrorPolicy
valid bool
}{
{"strict", PolicyStrict, true},
{"best-effort", PolicyBestEffort, true},
{"partial", PolicyPartial, true},
{"required-core", PolicyRequiredCore, true},
{"invalid", ErrorPolicy("invalid"), false},
{"empty", ErrorPolicy(""), false},
}
for _, tt := range tests {
t.Run(tt.name, func(t *testing.T) {
if got := tt.policy.IsValid(); got != tt.valid {
t.Errorf("IsValid() = %v, want %v", got, tt.valid)
}
})
}
}
func TestFetchWithPolicy(t *testing.T) {
ctx := context.Background()
t.Run("strict policy fails fast", func(t *testing.T) {
cfg := &Config{
Policy: PolicyStrict,
RetryAttempts: 1,
RetryBackoffMS: 10,
}
result := FetchWithPolicy(ctx, cfg, DataTypeCore, "test", func() error {
return errors.New("test error")
})
if result.Err == nil {
t.Error("expected error, got nil")
}
if result.Success {
t.Error("expected Success=false")
}
})
t.Run("best-effort policy skips errors", func(t *testing.T) {
cfg := &Config{
Policy: PolicyBestEffort,
RetryAttempts: 1,
RetryBackoffMS: 10,
}
result := FetchWithPolicy(ctx, cfg, DataTypeLabels, "test", func() error {
return errors.New("test error")
})
if result.Err != nil {
t.Errorf("expected no error in best-effort, got %v", result.Err)
}
if result.Success {
t.Error("expected Success=false")
}
if len(result.Warnings) == 0 {
t.Error("expected warnings")
}
})
t.Run("required-core fails on core data", func(t *testing.T) {
cfg := &Config{
Policy: PolicyRequiredCore,
RetryAttempts: 1,
RetryBackoffMS: 10,
}
result := FetchWithPolicy(ctx, cfg, DataTypeCore, "test", func() error {
return errors.New("test error")
})
if result.Err == nil {
t.Error("expected error for core data, got nil")
}
if result.Success {
t.Error("expected Success=false")
}
})
t.Run("required-core skips enrichment errors", func(t *testing.T) {
cfg := &Config{
Policy: PolicyRequiredCore,
RetryAttempts: 1,
RetryBackoffMS: 10,
}
result := FetchWithPolicy(ctx, cfg, DataTypeLabels, "test", func() error {
return errors.New("test error")
})
if result.Err != nil {
t.Errorf("expected no error for enrichment, got %v", result.Err)
}
if result.Success {
t.Error("expected Success=false")
}
if len(result.Warnings) == 0 {
t.Error("expected warnings")
}
})
}

View File

@@ -13,6 +13,7 @@ import (
"github.com/steveyegge/beads/internal/autoimport" "github.com/steveyegge/beads/internal/autoimport"
"github.com/steveyegge/beads/internal/debug" "github.com/steveyegge/beads/internal/debug"
"github.com/steveyegge/beads/internal/export"
"github.com/steveyegge/beads/internal/importer" "github.com/steveyegge/beads/internal/importer"
"github.com/steveyegge/beads/internal/storage" "github.com/steveyegge/beads/internal/storage"
"github.com/steveyegge/beads/internal/storage/sqlite" "github.com/steveyegge/beads/internal/storage/sqlite"
@@ -30,10 +31,24 @@ func (s *Server) handleExport(req *Request) Response {
} }
store := s.storage store := s.storage
ctx := s.reqCtx(req) ctx := s.reqCtx(req)
// Get all issues // Load export configuration (user-initiated export, not auto)
cfg, err := export.LoadConfig(ctx, store, false)
if err != nil {
return Response{
Success: false,
Error: fmt.Sprintf("failed to load export config: %v", err),
}
}
// Initialize manifest if configured
var manifest *export.Manifest
if cfg.WriteManifest {
manifest = export.NewManifest(cfg.Policy)
}
// Get all issues (core operation, always fail-fast)
issues, err := store.SearchIssues(ctx, "", types.IssueFilter{}) issues, err := store.SearchIssues(ctx, "", types.IssueFilter{})
if err != nil { if err != nil {
return Response{ return Response{
@@ -47,40 +62,73 @@ func (s *Server) handleExport(req *Request) Response {
return issues[i].ID < issues[j].ID return issues[i].ID < issues[j].ID
}) })
// Populate dependencies for all issues (avoid N+1) // Populate dependencies for all issues (core data)
allDeps, err := store.GetAllDependencyRecords(ctx) var allDeps map[string][]*types.Dependency
if err != nil { result := export.FetchWithPolicy(ctx, cfg, export.DataTypeCore, "get dependencies", func() error {
var err error
allDeps, err = store.GetAllDependencyRecords(ctx)
return err
})
if result.Err != nil {
return Response{ return Response{
Success: false, Success: false,
Error: fmt.Sprintf("failed to get dependencies: %v", err), Error: fmt.Sprintf("failed to get dependencies: %v", result.Err),
} }
} }
for _, issue := range issues { for _, issue := range issues {
issue.Dependencies = allDeps[issue.ID] issue.Dependencies = allDeps[issue.ID]
} }
// Populate labels for all issues (avoid N+1) // Populate labels for all issues (enrichment data)
issueIDs := make([]string, len(issues)) issueIDs := make([]string, len(issues))
for i, issue := range issues { for i, issue := range issues {
issueIDs[i] = issue.ID issueIDs[i] = issue.ID
} }
allLabels, err := store.GetLabelsForIssues(ctx, issueIDs) var allLabels map[string][]string
if err != nil { result = export.FetchWithPolicy(ctx, cfg, export.DataTypeLabels, "get labels", func() error {
var err error
allLabels, err = store.GetLabelsForIssues(ctx, issueIDs)
return err
})
if result.Err != nil {
return Response{ return Response{
Success: false, Success: false,
Error: fmt.Sprintf("failed to get labels: %v", err), Error: fmt.Sprintf("failed to get labels: %v", result.Err),
}
}
if !result.Success {
// Labels fetch failed but policy allows continuing
allLabels = make(map[string][]string) // Empty map
if manifest != nil {
manifest.PartialData = append(manifest.PartialData, "labels")
manifest.Warnings = append(manifest.Warnings, result.Warnings...)
manifest.Complete = false
} }
} }
for _, issue := range issues { for _, issue := range issues {
issue.Labels = allLabels[issue.ID] issue.Labels = allLabels[issue.ID]
} }
// Populate comments for all issues (avoid N+1) // Populate comments for all issues (enrichment data)
allComments, err := store.GetCommentsForIssues(ctx, issueIDs) var allComments map[string][]*types.Comment
if err != nil { result = export.FetchWithPolicy(ctx, cfg, export.DataTypeComments, "get comments", func() error {
var err error
allComments, err = store.GetCommentsForIssues(ctx, issueIDs)
return err
})
if result.Err != nil {
return Response{ return Response{
Success: false, Success: false,
Error: fmt.Sprintf("failed to get comments: %v", err), Error: fmt.Sprintf("failed to get comments: %v", result.Err),
}
}
if !result.Success {
// Comments fetch failed but policy allows continuing
allComments = make(map[string][]*types.Comment) // Empty map
if manifest != nil {
manifest.PartialData = append(manifest.PartialData, "comments")
manifest.Warnings = append(manifest.Warnings, result.Warnings...)
manifest.Complete = false
} }
} }
for _, issue := range issues { for _, issue := range issues {
@@ -106,8 +154,24 @@ func (s *Server) handleExport(req *Request) Response {
// Write JSONL // Write JSONL
encoder := json.NewEncoder(tempFile) encoder := json.NewEncoder(tempFile)
exportedIDs := make([]string, 0, len(issues)) exportedIDs := make([]string, 0, len(issues))
var encodingWarnings []string
for _, issue := range issues { for _, issue := range issues {
if err := encoder.Encode(issue); err != nil { if err := encoder.Encode(issue); err != nil {
if cfg.SkipEncodingErrors {
// Skip this issue and continue
warning := fmt.Sprintf("skipped encoding issue %s: %v", issue.ID, err)
fmt.Fprintf(os.Stderr, "Warning: %s\n", warning)
encodingWarnings = append(encodingWarnings, warning)
if manifest != nil {
manifest.FailedIssues = append(manifest.FailedIssues, export.FailedIssue{
IssueID: issue.ID,
Reason: err.Error(),
})
manifest.Complete = false
}
continue
}
// Fail-fast on encoding errors
return Response{ return Response{
Success: false, Success: false,
Error: fmt.Sprintf("failed to encode issue %s: %v", issue.ID, err), Error: fmt.Sprintf("failed to encode issue %s: %v", issue.ID, err),
@@ -139,11 +203,25 @@ func (s *Server) handleExport(req *Request) Response {
fmt.Fprintf(os.Stderr, "Warning: failed to clear dirty flags: %v\n", err) fmt.Fprintf(os.Stderr, "Warning: failed to clear dirty flags: %v\n", err)
} }
result := map[string]interface{}{ // Write manifest if configured
if manifest != nil {
manifest.ExportedCount = len(exportedIDs)
manifest.Warnings = append(manifest.Warnings, encodingWarnings...)
if err := export.WriteManifest(exportArgs.JSONLPath, manifest); err != nil {
// Non-fatal, just log
fmt.Fprintf(os.Stderr, "Warning: failed to write manifest: %v\n", err)
}
}
responseData := map[string]interface{}{
"exported_count": len(exportedIDs), "exported_count": len(exportedIDs),
"path": exportArgs.JSONLPath, "path": exportArgs.JSONLPath,
"skipped_count": len(encodingWarnings),
} }
data, _ := json.Marshal(result) if len(encodingWarnings) > 0 {
responseData["warnings"] = encodingWarnings
}
data, _ := json.Marshal(responseData)
return Response{ return Response{
Success: true, Success: true,
Data: data, Data: data,
@@ -379,6 +457,18 @@ func (s *Server) triggerExport(ctx context.Context, store storage.Storage, dbPat
return fmt.Errorf("storage is not SQLiteStorage") return fmt.Errorf("storage is not SQLiteStorage")
} }
// Load export configuration (auto-export mode)
cfg, err := export.LoadConfig(ctx, store, true)
if err != nil {
// Fall back to defaults if config load fails
cfg = &export.Config{
Policy: export.DefaultAutoExportPolicy,
RetryAttempts: export.DefaultRetryAttempts,
RetryBackoffMS: export.DefaultRetryBackoffMS,
IsAutoExport: true,
}
}
// Export to JSONL (this will update the file with remapped IDs) // Export to JSONL (this will update the file with remapped IDs)
allIssues, err := sqliteStore.SearchIssues(ctx, "", types.IssueFilter{}) allIssues, err := sqliteStore.SearchIssues(ctx, "", types.IssueFilter{})
if err != nil { if err != nil {
@@ -393,32 +483,55 @@ func (s *Server) triggerExport(ctx context.Context, store storage.Storage, dbPat
// CRITICAL: Populate all related data to prevent data loss // CRITICAL: Populate all related data to prevent data loss
// This mirrors the logic in handleExport // This mirrors the logic in handleExport
// Populate dependencies for all issues (avoid N+1 queries) // Populate dependencies for all issues (core data)
allDeps, err := store.GetAllDependencyRecords(ctx) var allDeps map[string][]*types.Dependency
if err != nil { result := export.FetchWithPolicy(ctx, cfg, export.DataTypeCore, "get dependencies", func() error {
return fmt.Errorf("failed to get dependencies: %w", err) var err error
allDeps, err = store.GetAllDependencyRecords(ctx)
return err
})
if result.Err != nil {
return fmt.Errorf("failed to get dependencies: %w", result.Err)
} }
for _, issue := range allIssues { for _, issue := range allIssues {
issue.Dependencies = allDeps[issue.ID] issue.Dependencies = allDeps[issue.ID]
} }
// Populate labels for all issues (avoid N+1 queries) // Populate labels for all issues (enrichment data)
issueIDs := make([]string, len(allIssues)) issueIDs := make([]string, len(allIssues))
for i, issue := range allIssues { for i, issue := range allIssues {
issueIDs[i] = issue.ID issueIDs[i] = issue.ID
} }
allLabels, err := store.GetLabelsForIssues(ctx, issueIDs) var allLabels map[string][]string
if err != nil { result = export.FetchWithPolicy(ctx, cfg, export.DataTypeLabels, "get labels", func() error {
return fmt.Errorf("failed to get labels: %w", err) var err error
allLabels, err = store.GetLabelsForIssues(ctx, issueIDs)
return err
})
if result.Err != nil {
return fmt.Errorf("failed to get labels: %w", result.Err)
}
if !result.Success {
// Labels fetch failed but policy allows continuing
allLabels = make(map[string][]string) // Empty map
} }
for _, issue := range allIssues { for _, issue := range allIssues {
issue.Labels = allLabels[issue.ID] issue.Labels = allLabels[issue.ID]
} }
// Populate comments for all issues (avoid N+1 queries) // Populate comments for all issues (enrichment data)
allComments, err := store.GetCommentsForIssues(ctx, issueIDs) var allComments map[string][]*types.Comment
if err != nil { result = export.FetchWithPolicy(ctx, cfg, export.DataTypeComments, "get comments", func() error {
return fmt.Errorf("failed to get comments: %w", err) var err error
allComments, err = store.GetCommentsForIssues(ctx, issueIDs)
return err
})
if result.Err != nil {
return fmt.Errorf("failed to get comments: %w", result.Err)
}
if !result.Success {
// Comments fetch failed but policy allows continuing
allComments = make(map[string][]*types.Comment) // Empty map
} }
for _, issue := range allIssues { for _, issue := range allIssues {
issue.Comments = allComments[issue.ID] issue.Comments = allComments[issue.ID]