From 5fefce4e8508554ccbf6b0a8ca60fcc58111f22b Mon Sep 17 00:00:00 2001 From: Steve Yegge Date: Sat, 18 Oct 2025 18:21:17 -0700 Subject: [PATCH] Close bd-157: Complete auto-import refactoring MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit - Refactored autoImportIfNewer() to use shared importIssuesCore() - Removed ~200 lines of duplicated import logic from main.go - Manual and auto-import now use identical collision detection/resolution - Added auto-export scheduling after successful import (prevents JSONL drift) - Optimized remapping notification (O(n) instead of O(n²), sorted output) - Removed obsolete test functions for deleted helper functions - Use bytes.NewReader instead of string conversion for better performance Benefits: - Future bug fixes only need to be made once - Guaranteed consistency between manual and auto-import - JSONL stays in sync with database after auto-import - Clearer, more consistent user feedback Amp-Thread-ID: https://ampcode.com/threads/T-1925a48d-ca8a-4b54-b4e7-de3ec755d25a Co-authored-by: Amp --- .beads/issues.jsonl | 1 + cmd/bd/main.go | 217 ++++++++------------------------------- cmd/bd/output_test.go | 230 +----------------------------------------- 3 files changed, 46 insertions(+), 402 deletions(-) diff --git a/.beads/issues.jsonl b/.beads/issues.jsonl index 7d91ec8a..ae48ec44 100644 --- a/.beads/issues.jsonl +++ b/.beads/issues.jsonl @@ -62,6 +62,7 @@ {"id":"bd-154","title":"Add log rotation for daemon.log","description":"daemon.log grows forever without rotation. With sync every 5 minutes:\n- ~105k log entries per year\n- No size limit\n- No cleanup\n- Eventually fills disk\n\nNeed automatic log rotation with:\n- Size-based rotation (default: 10MB)\n- Age-based cleanup (default: 7 days)\n- Compression of old logs\n- Configurable retention\n\nLocation: cmd/bd/daemon.go:455","design":"Use lumberjack library for rotation:\n\nimport \"gopkg.in/natefinch/lumberjack.v2\"\n\nlogF := \u0026lumberjack.Logger{\n Filename: logPath,\n MaxSize: 10, // MB\n MaxBackups: 3,\n MaxAge: 7, // days\n Compress: true,\n}\n\nMake configurable via env vars:\n- BEADS_DAEMON_LOG_MAX_SIZE (default: 10MB)\n- BEADS_DAEMON_LOG_MAX_BACKUPS (default: 3)\n- BEADS_DAEMON_LOG_MAX_AGE (default: 7 days)\n\nAdd to daemon status output:\n- Current log size\n- Number of archived logs\n- Oldest log timestamp","acceptance_criteria":"- Log rotation works automatically\n- Old logs are compressed\n- Retention policy enforced\n- Configuration via env vars works\n- Log size stays bounded\n- No log data loss during rotation\n- Documentation updated","status":"closed","priority":1,"issue_type":"feature","created_at":"2025-10-18T13:07:30.94896-07:00","updated_at":"2025-10-18T16:27:51.349037-07:00","closed_at":"2025-10-18T16:27:51.349037-07:00"} {"id":"bd-155","title":"Daemon production readiness","description":"Make beads daemon production-ready for long-running use, multi-repo deployments, and resilient operation.\n\nCurrent state: Good foundation, works well for development\nTarget state: Production-ready for individual developers and small teams\n\nGap areas:\n1. Resource management (cache eviction, limits)\n2. Health monitoring and crash recovery\n3. Process lifecycle management\n4. User experience (visibility, feedback)\n5. Operational concerns (logging, metrics)\n\nSuccess criteria:\n- Can run for weeks without restart\n- Handles 50+ repositories efficiently\n- Recovers from crashes automatically\n- Users understand daemon status\n- Observable and debuggable","acceptance_criteria":"All child issues completed:\n- P0 issues: Storage cache, health checks, crash recovery, MCP cleanup\n- P1 issues: Global auto-start, visibility, version checks\n- P2 issues: Resource limits, telemetry, log rotation\n\nValidation:\n- Run daemon for 7+ days without issues\n- Test with 50+ repositories\n- Verify crash recovery\n- Confirm resource usage is bounded\n- Check metrics and logs are useful","status":"in_progress","priority":0,"issue_type":"epic","created_at":"2025-10-18T13:07:43.543715-07:00","updated_at":"2025-10-18T16:19:57.885896-07:00"} {"id":"bd-156","title":"Refactor import logic to eliminate duplication between manual and auto-import","description":"The import logic is duplicated in two places:\n1. cmd/bd/import.go (manual 'bd import' command)\n2. cmd/bd/main.go:autoImportIfNewer() (auto-import after git pull)\n\nBoth have nearly identical code for:\n- Reading and parsing JSONL\n- Type-asserting store to *sqlite.SQLiteStorage (where we just fixed a bug twice)\n- Opening direct SQLite connection when using daemon mode\n- Detecting collisions with sqlite.DetectCollisions()\n- Scoring and remapping collisions\n- Importing issues, dependencies, and labels\n\n**Problems:**\n- Bugs must be fixed in two places (we just did this for daemon mode)\n- Features must be implemented twice\n- Tests must cover both code paths\n- Harder to maintain and keep in sync\n- Higher risk of divergence over time\n\n**Proposed solution:**\nExtract a shared function that handles the core import logic:\n\n```go\n// importIssues handles the core import logic used by both manual and auto-import\nfunc importIssues(ctx context.Context, dbPath string, store storage.Storage, \n issues []*types.Issue, opts ImportOptions) (*ImportResult, error) {\n // Handle SQLite store detection/creation for daemon mode\n // Detect collisions\n // Score and remap if needed\n // Import issues, dependencies, labels\n // Return result\n}\n```\n\nBoth import.go and autoImportIfNewer() would call this shared function with their specific options.\n\n**Benefits:**\n- Single source of truth for import logic\n- Bugs fixed once\n- Easier to test\n- Easier to extend with new import features\n- Less code overall","status":"closed","priority":2,"issue_type":"chore","created_at":"2025-10-18T17:07:06.007026-07:00","updated_at":"2025-10-18T17:11:20.280214-07:00","closed_at":"2025-10-18T17:11:20.280214-07:00"} +{"id":"bd-157","title":"Complete auto-import refactoring to use shared importIssuesCore function","description":"The manual import command (bd import) was successfully refactored to use the shared importIssuesCore() function in import_shared.go, reducing code from 494 lines to 170 lines.\n\nHowever, autoImportIfNewer() in cmd/bd/main.go still has ~298 lines of duplicated import logic that should use the same shared function.\n\n**Current state:**\n- ✅ Manual import uses importIssuesCore() (commit 790233f)\n- ❌ Auto-import still has duplicated logic (lines 618-915 in main.go)\n\n**Duplication includes:**\n- SQLite store detection/creation for daemon mode (fixed in 790233f)\n- Collision detection with sqlite.DetectCollisions()\n- Scoring and remapping collisions\n- Importing issues (update existing, create new)\n- Importing dependencies\n- Importing labels\n\n**Benefits of completing this:**\n- Remove ~200 more lines of duplicated code\n- Ensure manual and auto-import have identical behavior\n- Future bug fixes only need to be made once\n- Easier to test and maintain\n\n**Implementation:**\nReplace lines 714-908 in autoImportIfNewer() with:\n```go\nopts := ImportOptions{\n ResolveCollisions: true, // Auto-import always resolves\n DryRun: false,\n SkipUpdate: false,\n Strict: false,\n}\nresult, err := importIssuesCore(ctx, dbPath, store, allIssues, opts)\n// Handle result and show remapping notification\n```\n\nThen update hash storage logic at the end.","status":"closed","priority":2,"issue_type":"chore","created_at":"2025-10-18T17:38:34.443872-07:00","updated_at":"2025-10-18T18:07:05.553928-07:00","closed_at":"2025-10-18T18:07:05.553928-07:00"} {"id":"bd-16","title":"Add EXPLAIN QUERY PLAN tests for ready work query","description":"Verify that the hierarchical blocking query uses proper indexes and doesn't do full table scans.\n\n**Queries to analyze:**\n1. The recursive CTE (both base case and recursive case)\n2. The final SELECT with NOT EXISTS\n3. Impact of various filters (status, priority, assignee)\n\n**Implementation:**\nAdd test function that:\n- Runs EXPLAIN QUERY PLAN on GetReadyWork query\n- Parses output to verify no SCAN TABLE operations\n- Documents expected query plan in comments\n- Fails if query plan degrades\n\n**Benefits:**\n- Catch performance regressions in tests\n- Document expected query behavior\n- Ensure indexes are being used\n\nRelated to: bd-77 (composite index on depends_on_id, type)","status":"closed","priority":3,"issue_type":"task","created_at":"2025-10-16T20:46:08.971822-07:00","updated_at":"2025-10-18T12:47:44.284846-07:00","closed_at":"2025-10-18T12:47:44.284846-07:00"} {"id":"bd-17","title":"Make auto-flush debounce duration configurable","description":"flushDebounce is hardcoded to 5 seconds. Make it configurable via environment variable BEADS_FLUSH_DEBOUNCE (e.g., '500ms', '10s'). Current 5-second value is reasonable for interactive use, but CI/automated scenarios might want faster flush. Add getDebounceDuration() helper function. Located in cmd/bd/main.go:31.","status":"closed","priority":3,"issue_type":"feature","created_at":"2025-10-16T20:46:08.971822-07:00","updated_at":"2025-10-18T09:57:27.593569-07:00","closed_at":"2025-10-18T09:47:43.22126-07:00"} {"id":"bd-18","title":"Optimize auto-flush to use incremental updates","description":"Every flush exports ALL issues and ALL dependencies, even if only one issue changed. For large projects (1000+ issues), this could be expensive. Current approach guarantees consistency, which is fine for MVP, but future optimization could track which issues changed and use incremental updates. Located in cmd/bd/main.go:255-276.","status":"closed","priority":3,"issue_type":"feature","created_at":"2025-10-16T20:46:08.971822-07:00","updated_at":"2025-10-18T09:57:27.595342-07:00","closed_at":"2025-10-14T02:51:52.200141-07:00"} diff --git a/cmd/bd/main.go b/cmd/bd/main.go index ee73a5c1..6783c514 100644 --- a/cmd/bd/main.go +++ b/cmd/bd/main.go @@ -680,7 +680,7 @@ func autoImportIfNewer() { } // Content changed - parse all issues - scanner := bufio.NewScanner(strings.NewReader(string(jsonlData))) + scanner := bufio.NewScanner(bytes.NewReader(jsonlData)) scanner.Buffer(make([]byte, 0, 1024), 2*1024*1024) // 2MB buffer for large JSON lines var allIssues []*types.Issue lineNo := 0 @@ -711,199 +711,68 @@ func autoImportIfNewer() { return } - // Detect collisions before importing (bd-228 fix) - // Auto-import needs direct SQLite access for collision detection - var sqliteStore *sqlite.SQLiteStorage - - if store != nil { - // Direct mode - try to use existing store - var ok bool - sqliteStore, ok = store.(*sqlite.SQLiteStorage) - if !ok { - fmt.Fprintf(os.Stderr, "Auto-import disabled for non-SQLite backend (no collision detection).\n") - fmt.Fprintf(os.Stderr, "To import manually, run: bd import -i %s\n", jsonlPath) - return - } - } else { - // Daemon mode - open direct connection for auto-import - if dbPath == "" { - if os.Getenv("BD_DEBUG") != "" { - fmt.Fprintf(os.Stderr, "Debug: auto-import skipped, no database path\n") - } - return - } - var err error - sqliteStore, err = sqlite.New(dbPath) - if err != nil { - fmt.Fprintf(os.Stderr, "Auto-import failed: could not open database: %v\n", err) - return - } - defer sqliteStore.Close() + // Use shared import logic (bd-157) + opts := ImportOptions{ + ResolveCollisions: true, // Auto-import always resolves collisions + DryRun: false, + SkipUpdate: false, + Strict: false, } - - collisionResult, err := sqlite.DetectCollisions(ctx, sqliteStore, allIssues) + + result, err := importIssuesCore(ctx, dbPath, store, allIssues, opts) if err != nil { - // Collision detection failed, skip import to be safe - fmt.Fprintf(os.Stderr, "Auto-import skipped: collision detection error: %v\n", err) + fmt.Fprintf(os.Stderr, "Auto-import failed: %v\n", err) return } - - // If collisions detected, auto-resolve them by remapping to new IDs - if len(collisionResult.Collisions) > 0 { - // Get all existing issues for scoring - allExistingIssues, err := store.SearchIssues(ctx, "", types.IssueFilter{}) - if err != nil { - fmt.Fprintf(os.Stderr, "Auto-import failed: error getting existing issues: %v\n", err) - return - } - - // Score collisions - if err := sqlite.ScoreCollisions(ctx, sqliteStore, collisionResult.Collisions, allExistingIssues); err != nil { - fmt.Fprintf(os.Stderr, "Auto-import failed: error scoring collisions: %v\n", err) - return - } - - // Remap collisions - idMapping, err := sqlite.RemapCollisions(ctx, sqliteStore, collisionResult.Collisions, allExistingIssues) - if err != nil { - fmt.Fprintf(os.Stderr, "Auto-import failed: error remapping collisions: %v\n", err) - return + + // Show collision remapping notification if any occurred + if len(result.IDMapping) > 0 { + // Build title lookup map to avoid O(n^2) search + titleByID := make(map[string]string) + for _, issue := range allIssues { + titleByID[issue.ID] = issue.Title } - // Show concise notification + // Sort remappings by old ID for consistent output + type mapping struct { + oldID string + newID string + } + mappings := make([]mapping, 0, len(result.IDMapping)) + for oldID, newID := range result.IDMapping { + mappings = append(mappings, mapping{oldID, newID}) + } + sort.Slice(mappings, func(i, j int) bool { + return mappings[i].oldID < mappings[j].oldID + }) + maxShow := 10 - numRemapped := len(idMapping) + numRemapped := len(mappings) if numRemapped < maxShow { maxShow = numRemapped } fmt.Fprintf(os.Stderr, "\nAuto-import: remapped %d colliding issue(s) to new IDs:\n", numRemapped) - i := 0 - for oldID, newID := range idMapping { - if i >= maxShow { - break - } - // Find the collision detail to get title - var title string - for _, collision := range collisionResult.Collisions { - if collision.ID == oldID { - title = collision.IncomingIssue.Title - break - } - } - fmt.Fprintf(os.Stderr, " %s → %s (%s)\n", oldID, newID, title) - i++ + for i := 0; i < maxShow; i++ { + m := mappings[i] + title := titleByID[m.oldID] + fmt.Fprintf(os.Stderr, " %s → %s (%s)\n", m.oldID, m.newID, title) } if numRemapped > maxShow { fmt.Fprintf(os.Stderr, " ... and %d more\n", numRemapped-maxShow) } fmt.Fprintf(os.Stderr, "\n") - - // Remove colliding issues from allIssues (they were already created with new IDs by RemapCollisions) - collidingIDs := make(map[string]bool) - for _, collision := range collisionResult.Collisions { - collidingIDs[collision.ID] = true - } - filteredIssues := make([]*types.Issue, 0) - for _, issue := range allIssues { - if !collidingIDs[issue.ID] { - filteredIssues = append(filteredIssues, issue) - } - } - allIssues = filteredIssues } - // Batch fetch all existing issues to avoid N+1 query pattern (bd-666) - allExistingIssues, err := store.SearchIssues(ctx, "", types.IssueFilter{}) - if err != nil { - fmt.Fprintf(os.Stderr, "Auto-import failed: error fetching existing issues: %v\n", err) - return - } - - // Build map for O(1) lookup - existingByID := make(map[string]*types.Issue) - for _, issue := range allExistingIssues { - existingByID[issue.ID] = issue - } - - // Import non-colliding issues (exact matches + new issues) - for _, issue := range allIssues { - existing := existingByID[issue.ID] - - if existing != nil { - // Update existing issue - updates := make(map[string]interface{}) - updates["title"] = issue.Title - updates["description"] = issue.Description - updates["design"] = issue.Design - updates["acceptance_criteria"] = issue.AcceptanceCriteria - updates["notes"] = issue.Notes - updates["status"] = issue.Status - updates["priority"] = issue.Priority - updates["issue_type"] = issue.IssueType - updates["assignee"] = issue.Assignee - if issue.EstimatedMinutes != nil { - updates["estimated_minutes"] = *issue.EstimatedMinutes - } - if issue.ExternalRef != nil { - updates["external_ref"] = *issue.ExternalRef - } - - // Enforce status/closed_at invariant (bd-226) - if issue.Status == "closed" { - // Issue is closed - ensure closed_at is set - if issue.ClosedAt != nil { - updates["closed_at"] = *issue.ClosedAt - } else if !issue.UpdatedAt.IsZero() { - updates["closed_at"] = issue.UpdatedAt - } else { - updates["closed_at"] = time.Now().UTC() - } - } else { - // Issue is not closed - ensure closed_at is null - updates["closed_at"] = nil - } - - _ = store.UpdateIssue(ctx, issue.ID, updates, "auto-import") + // Schedule export to sync JSONL after successful import + changed := (result.Created + result.Updated + len(result.IDMapping)) > 0 + if changed { + if len(result.IDMapping) > 0 { + // Remappings may affect many issues, do a full export + markDirtyAndScheduleFullExport() } else { - // Create new issue - enforce invariant before creation - if issue.Status == "closed" { - if issue.ClosedAt == nil { - now := time.Now().UTC() - issue.ClosedAt = &now - } - } else { - issue.ClosedAt = nil - } - _ = store.CreateIssue(ctx, issue, "auto-import") - } - } - - // Import dependencies - for _, issue := range allIssues { - if len(issue.Dependencies) == 0 { - continue - } - - // Get existing dependencies - existingDeps, err := store.GetDependencyRecords(ctx, issue.ID) - if err != nil { - continue - } - - // Add missing dependencies - for _, dep := range issue.Dependencies { - exists := false - for _, existing := range existingDeps { - if existing.DependsOnID == dep.DependsOnID && existing.Type == dep.Type { - exists = true - break - } - } - - if !exists { - _ = store.AddDependency(ctx, dep, "auto-import") - } + // Regular import, incremental export is fine + markDirtyAndScheduleFlush() } } diff --git a/cmd/bd/output_test.go b/cmd/bd/output_test.go index 591280d7..5974e813 100644 --- a/cmd/bd/output_test.go +++ b/cmd/bd/output_test.go @@ -5,11 +5,7 @@ import ( "encoding/json" "io" "os" - "strings" "testing" - - "github.com/steveyegge/beads/internal/storage/sqlite" - "github.com/steveyegge/beads/internal/types" ) func TestOutputJSON(t *testing.T) { @@ -93,230 +89,8 @@ func TestOutputJSONArray(t *testing.T) { } } -func TestPrintCollisionReport(t *testing.T) { - // Capture stderr - oldStderr := os.Stderr - r, w, _ := os.Pipe() - os.Stderr = w - - // Create collision data - result := &sqlite.CollisionResult{ - ExactMatches: []string{"bd-1", "bd-2"}, - NewIssues: []string{"bd-3", "bd-4", "bd-5"}, - Collisions: []*sqlite.CollisionDetail{ - { - ID: "bd-6", - IncomingIssue: &types.Issue{ - ID: "bd-6", - Title: "Test Issue 6", - }, - ConflictingFields: []string{"title", "priority"}, - }, - { - ID: "bd-7", - IncomingIssue: &types.Issue{ - ID: "bd-7", - Title: "Test Issue 7", - }, - ConflictingFields: []string{"description"}, - }, - }, - } - - // Call printCollisionReport - printCollisionReport(result) - - // Restore stderr - w.Close() - os.Stderr = oldStderr - - // Read output - var buf bytes.Buffer - io.Copy(&buf, r) - output := buf.String() - - // Verify output contains expected sections - if !strings.Contains(output, "Collision Detection Report") { - t.Errorf("Expected report header. Got: %s", output) - } - if !strings.Contains(output, "Exact matches (idempotent): 2") { - t.Errorf("Expected exact matches count. Got: %s", output) - } - if !strings.Contains(output, "New issues: 3") { - t.Errorf("Expected new issues count. Got: %s", output) - } - if !strings.Contains(output, "COLLISIONS DETECTED: 2") { - t.Errorf("Expected collisions count. Got: %s", output) - } - if !strings.Contains(output, "bd-6") { - t.Errorf("Expected first collision ID. Got: %s", output) - } - // The field names are printed directly, not in brackets - if !strings.Contains(output, "title") || !strings.Contains(output, "priority") { - t.Errorf("Expected conflicting fields for bd-6. Got: %s", output) - } -} - -func TestPrintCollisionReportNoCollisions(t *testing.T) { - // Capture stderr - oldStderr := os.Stderr - r, w, _ := os.Pipe() - os.Stderr = w - - // Create data with no collisions - result := &sqlite.CollisionResult{ - ExactMatches: []string{"bd-1", "bd-2", "bd-3"}, - NewIssues: []string{"bd-4"}, - Collisions: []*sqlite.CollisionDetail{}, - } - - // Call printCollisionReport - printCollisionReport(result) - - // Restore stderr - w.Close() - os.Stderr = oldStderr - - // Read output - var buf bytes.Buffer - io.Copy(&buf, r) - output := buf.String() - - // Verify output shows no collisions - if !strings.Contains(output, "COLLISIONS DETECTED: 0") { - t.Error("Expected 0 collisions") - } - if strings.Contains(output, "Colliding issues:") { - t.Error("Should not show colliding issues section when there are none") - } -} - -func TestPrintRemappingReport(t *testing.T) { - // Capture stderr - oldStderr := os.Stderr - r, w, _ := os.Pipe() - os.Stderr = w - - // Create remapping data - remapping := map[string]string{ - "bd-10": "bd-100", - "bd-20": "bd-200", - "bd-30": "bd-300", - } - collisions := []*sqlite.CollisionDetail{ - {ID: "bd-10", ReferenceScore: 5}, - {ID: "bd-20", ReferenceScore: 0}, - {ID: "bd-30", ReferenceScore: 12}, - } - - // Call printRemappingReport - printRemappingReport(remapping, collisions) - - // Restore stderr - w.Close() - os.Stderr = oldStderr - - // Read output - var buf bytes.Buffer - io.Copy(&buf, r) - output := buf.String() - - // Verify output contains expected information - if !strings.Contains(output, "Remapping Report") { - t.Errorf("Expected report title. Got: %s", output) - } - if !strings.Contains(output, "bd-10 → bd-100") { - t.Error("Expected first remapping") - } - if !strings.Contains(output, "refs: 5") { - t.Error("Expected reference count for bd-10") - } - if !strings.Contains(output, "bd-20 → bd-200") { - t.Error("Expected second remapping") - } - if !strings.Contains(output, "refs: 0") { - t.Error("Expected 0 references for bd-20") - } - if !strings.Contains(output, "bd-30 → bd-300") { - t.Error("Expected third remapping") - } - if !strings.Contains(output, "refs: 12") { - t.Error("Expected reference count for bd-30") - } -} - -func TestPrintRemappingReportEmpty(t *testing.T) { - // Capture stderr - oldStderr := os.Stderr - r, w, _ := os.Pipe() - os.Stderr = w - - // Empty remapping - remapping := map[string]string{} - collisions := []*sqlite.CollisionDetail{} - - // Call printRemappingReport - printRemappingReport(remapping, collisions) - - // Restore stderr - w.Close() - os.Stderr = oldStderr - - // Read output - var buf bytes.Buffer - io.Copy(&buf, r) - output := buf.String() - - // Should still have header - if !strings.Contains(output, "Remapping Report") { - t.Errorf("Expected report title even with no remappings. Got: %s", output) - } -} - -func TestPrintRemappingReportOrdering(t *testing.T) { - // Capture stderr - oldStderr := os.Stderr - r, w, _ := os.Pipe() - os.Stderr = w - - // Create remapping with different reference scores - // Ordering is by reference score (ascending) - remapping := map[string]string{ - "bd-2": "bd-200", - "bd-10": "bd-100", - "bd-100": "bd-1000", - } - collisions := []*sqlite.CollisionDetail{ - {ID: "bd-2", ReferenceScore: 10}, // highest refs - {ID: "bd-10", ReferenceScore: 5}, // medium refs - {ID: "bd-100", ReferenceScore: 1}, // lowest refs - } - - // Call printRemappingReport - printRemappingReport(remapping, collisions) - - // Restore stderr - w.Close() - os.Stderr = oldStderr - - // Read output - var buf bytes.Buffer - io.Copy(&buf, r) - output := buf.String() - - // Find positions of each remapping in output - pos2 := strings.Index(output, "bd-2 →") - pos10 := strings.Index(output, "bd-10 →") - pos100 := strings.Index(output, "bd-100 →") - - // Verify ordering by reference score (ascending): bd-100 (1 ref) < bd-10 (5 refs) < bd-2 (10 refs) - if pos2 == -1 || pos10 == -1 || pos100 == -1 { - t.Fatalf("Missing remappings in output: %s", output) - } - if !(pos100 < pos10 && pos10 < pos2) { - t.Errorf("Remappings not in reference score order. Got: %s", output) - } -} +// Tests for printCollisionReport and printRemappingReport were removed +// These functions no longer exist after refactoring to shared importIssuesCore (bd-157) // Note: createIssuesFromMarkdown is tested via cmd/bd/markdown_test.go which has // comprehensive tests for the markdown parsing functionality. We don't duplicate