feat: Add 'bd stale' command to show and release orphaned executor claims
- Implements bd stale command to show issues with execution_state where executor is dead/stopped - Adds --release flag to automatically release orphaned issues - Adds --threshold flag to customize heartbeat staleness threshold (default: 300s/5min) - Handles missing executor instances (LEFT JOIN) for cases where executor was deleted - Adds QueryContext and BeginTx helper methods to SQLiteStorage for advanced queries - Fixes ExternalRef comparison bug in import_shared.go (pointer vs string) - Removes unused imports in import.go Resolves vc-124
This commit is contained in:
@@ -1,96 +0,0 @@
|
|||||||
# BUG FOUND: getNextID() uses alphabetical MAX instead of numerical
|
|
||||||
|
|
||||||
## Location
|
|
||||||
`internal/storage/sqlite/sqlite.go:60-84`, function `getNextID()`
|
|
||||||
|
|
||||||
## The Bug
|
|
||||||
```go
|
|
||||||
err := db.QueryRow("SELECT MAX(id) FROM issues").Scan(&maxID)
|
|
||||||
```
|
|
||||||
|
|
||||||
This uses alphabetical MAX on the text `id` column, not numerical MAX.
|
|
||||||
|
|
||||||
## Impact
|
|
||||||
When you have bd-1 through bd-10:
|
|
||||||
- Alphabetical sort: bd-1, bd-10, bd-2, bd-3, ... bd-9
|
|
||||||
- MAX(id) returns "bd-9" (alphabetically last)
|
|
||||||
- nextID is calculated as 10
|
|
||||||
- Creating a new issue tries to use bd-10, which already exists
|
|
||||||
- Result: UNIQUE constraint failed
|
|
||||||
|
|
||||||
## Reproduction
|
|
||||||
```bash
|
|
||||||
# After creating bd-1 through bd-10
|
|
||||||
./bd create "Test issue" -t task -p 1
|
|
||||||
# Error: failed to insert issue: UNIQUE constraint failed: issues.id
|
|
||||||
```
|
|
||||||
|
|
||||||
## The Fix
|
|
||||||
|
|
||||||
Option 1: Cast to integer in SQL (BEST)
|
|
||||||
```sql
|
|
||||||
SELECT MAX(CAST(SUBSTR(id, INSTR(id, '-') + 1) AS INTEGER)) FROM issues WHERE id LIKE 'bd-%'
|
|
||||||
```
|
|
||||||
|
|
||||||
Option 2: Pad IDs with zeros
|
|
||||||
- Change ID format from "bd-10" to "bd-0010"
|
|
||||||
- Alphabetical and numerical order match
|
|
||||||
- Breaks existing IDs
|
|
||||||
|
|
||||||
Option 3: Query all IDs and find max in Go
|
|
||||||
- Less efficient but more flexible
|
|
||||||
- Works with any ID format
|
|
||||||
|
|
||||||
## Recommended Solution
|
|
||||||
|
|
||||||
Option 1 with proper prefix handling:
|
|
||||||
|
|
||||||
```go
|
|
||||||
func getNextID(db *sql.DB) int {
|
|
||||||
// Get prefix from config (default "bd")
|
|
||||||
var prefix string
|
|
||||||
err := db.QueryRow("SELECT value FROM config WHERE key = 'issue_prefix'").Scan(&prefix)
|
|
||||||
if err != nil || prefix == "" {
|
|
||||||
prefix = "bd"
|
|
||||||
}
|
|
||||||
|
|
||||||
// Find max numeric ID for this prefix
|
|
||||||
var maxNum sql.NullInt64
|
|
||||||
query := `
|
|
||||||
SELECT MAX(CAST(SUBSTR(id, LENGTH(?) + 2) AS INTEGER))
|
|
||||||
FROM issues
|
|
||||||
WHERE id LIKE ? || '-%'
|
|
||||||
`
|
|
||||||
err = db.QueryRow(query, prefix, prefix).Scan(&maxNum)
|
|
||||||
if err != nil || !maxNum.Valid {
|
|
||||||
return 1
|
|
||||||
}
|
|
||||||
|
|
||||||
return int(maxNum.Int64) + 1
|
|
||||||
}
|
|
||||||
```
|
|
||||||
|
|
||||||
## Workaround for Now
|
|
||||||
|
|
||||||
Manually specify IDs when creating issues:
|
|
||||||
```bash
|
|
||||||
# This won't work because auto-ID fails:
|
|
||||||
./bd create "Title" -t task -p 1
|
|
||||||
|
|
||||||
# Workaround - manually calculate next ID:
|
|
||||||
./bd list | grep -oE 'bd-[0-9]+' | sed 's/bd-//' | sort -n | tail -1
|
|
||||||
# Then add 1 and create with explicit ID in code
|
|
||||||
```
|
|
||||||
|
|
||||||
Or fix the bug first before continuing!
|
|
||||||
|
|
||||||
## Related to bd-9
|
|
||||||
|
|
||||||
This bug is EXACTLY the kind of distributed ID collision problem that bd-9 is designed to solve! But we should also fix the root cause.
|
|
||||||
|
|
||||||
## Created Issue
|
|
||||||
|
|
||||||
Should create: "Fix getNextID() to use numerical MAX instead of alphabetical"
|
|
||||||
- Type: bug
|
|
||||||
- Priority: 0 (critical - blocks all new issue creation)
|
|
||||||
- Blocks: bd-9 (can't create child issues)
|
|
||||||
@@ -1,86 +0,0 @@
|
|||||||
# Child Issues for BD-9: Collision Resolution
|
|
||||||
|
|
||||||
## Issues to Create
|
|
||||||
|
|
||||||
These issues break down bd-9 into implementable chunks. Link them all to bd-9 as parent-child dependencies.
|
|
||||||
|
|
||||||
### Issue 1: Extend export to include dependencies
|
|
||||||
**Title**: Extend export to include dependencies in JSONL
|
|
||||||
**Type**: task
|
|
||||||
**Priority**: 1
|
|
||||||
**Description**: Modify export.go to include dependencies array in each issue's JSONL output. This makes JSONL self-contained and enables proper collision resolution. Format: {"id":"bd-10","dependencies":[{"depends_on_id":"bd-5","type":"blocks"}]}
|
|
||||||
**Command**: `bd create "Extend export to include dependencies in JSONL" -t task -p 1 -d "Modify export.go to include dependencies array in each issue's JSONL output. This makes JSONL self-contained and enables proper collision resolution. Format: {\"id\":\"bd-10\",\"dependencies\":[{\"depends_on_id\":\"bd-5\",\"type\":\"blocks\"}]}"`
|
|
||||||
|
|
||||||
### Issue 2: Implement collision detection
|
|
||||||
**Title**: Implement collision detection in import
|
|
||||||
**Type**: task
|
|
||||||
**Priority**: 1
|
|
||||||
**Description**: Create collision.go with detectCollisions() function. Compare incoming JSONL issues against DB state. Distinguish between: (1) exact match (idempotent), (2) ID match but different content (collision), (3) new issue. Return list of colliding issues.
|
|
||||||
**Command**: `bd create "Implement collision detection in import" -t task -p 1 -d "Create collision.go with detectCollisions() function. Compare incoming JSONL issues against DB state. Distinguish between: (1) exact match (idempotent), (2) ID match but different content (collision), (3) new issue. Return list of colliding issues."`
|
|
||||||
|
|
||||||
### Issue 3: Implement reference scoring
|
|
||||||
**Title**: Implement reference scoring algorithm
|
|
||||||
**Type**: task
|
|
||||||
**Priority**: 1
|
|
||||||
**Description**: Count references for each colliding issue: text mentions in descriptions/notes/design fields + dependency references. Sort collisions by score ascending (fewest refs first). This minimizes total updates during renumbering.
|
|
||||||
**Command**: `bd create "Implement reference scoring algorithm" -t task -p 1 -d "Count references for each colliding issue: text mentions in descriptions/notes/design fields + dependency references. Sort collisions by score ascending (fewest refs first). This minimizes total updates during renumbering."`
|
|
||||||
|
|
||||||
### Issue 4: Implement ID remapping
|
|
||||||
**Title**: Implement ID remapping with reference updates
|
|
||||||
**Type**: task
|
|
||||||
**Priority**: 1
|
|
||||||
**Description**: Allocate new IDs for colliding issues. Update all text field references using word-boundary regex (\bbd-10\b). Update dependency records. Build id_mapping for reporting. Handle chain dependencies properly.
|
|
||||||
**Command**: `bd create "Implement ID remapping with reference updates" -t task -p 1 -d "Allocate new IDs for colliding issues. Update all text field references using word-boundary regex (\\bbd-10\\b). Update dependency records. Build id_mapping for reporting. Handle chain dependencies properly."`
|
|
||||||
|
|
||||||
### Issue 5: Add CLI flags
|
|
||||||
**Title**: Add --resolve-collisions flag and user reporting
|
|
||||||
**Type**: task
|
|
||||||
**Priority**: 1
|
|
||||||
**Description**: Add import flags: --resolve-collisions (auto-fix) and --dry-run (preview). Display clear report: collisions detected, remappings applied (old→new with scores), reference counts updated. Default behavior: fail on collision (safe).
|
|
||||||
**Command**: `bd create "Add --resolve-collisions flag and user reporting" -t task -p 1 -d "Add import flags: --resolve-collisions (auto-fix) and --dry-run (preview). Display clear report: collisions detected, remappings applied (old→new with scores), reference counts updated. Default behavior: fail on collision (safe)."`
|
|
||||||
|
|
||||||
### Issue 6: Write tests
|
|
||||||
**Title**: Write comprehensive collision resolution tests
|
|
||||||
**Type**: task
|
|
||||||
**Priority**: 1
|
|
||||||
**Description**: Test cases: simple collision, multiple collisions, dependency updates, text reference updates, chain dependencies, edge cases (partial ID matches, case sensitivity, triple merges). Add to import_test.go and collision_test.go.
|
|
||||||
**Command**: `bd create "Write comprehensive collision resolution tests" -t task -p 1 -d "Test cases: simple collision, multiple collisions, dependency updates, text reference updates, chain dependencies, edge cases (partial ID matches, case sensitivity, triple merges). Add to import_test.go and collision_test.go."`
|
|
||||||
|
|
||||||
### Issue 7: Update docs
|
|
||||||
**Title**: Update documentation for collision resolution
|
|
||||||
**Type**: task
|
|
||||||
**Priority**: 1
|
|
||||||
**Description**: Update README.md with collision resolution section. Update CLAUDE.md with new workflow. Document --resolve-collisions and --dry-run flags. Add example scenarios showing branch merge workflows.
|
|
||||||
**Command**: `bd create "Update documentation for collision resolution" -t task -p 1 -d "Update README.md with collision resolution section. Update CLAUDE.md with new workflow. Document --resolve-collisions and --dry-run flags. Add example scenarios showing branch merge workflows."`
|
|
||||||
|
|
||||||
## Additional Feature Issue
|
|
||||||
|
|
||||||
### Issue: Add design field support to update command
|
|
||||||
**Title**: Add design/notes/acceptance_criteria fields to update command
|
|
||||||
**Type**: feature
|
|
||||||
**Priority**: 2
|
|
||||||
**Description**: Currently bd update only supports status, priority, title, assignee. Add support for --design, --notes, --acceptance-criteria flags. This makes it easier to add detailed designs to issues after creation.
|
|
||||||
**Command**: `bd create "Add design/notes/acceptance_criteria fields to update command" -t feature -p 2 -d "Currently bd update only supports status, priority, title, assignee. Add support for --design, --notes, --acceptance-criteria flags. This makes it easier to add detailed designs to issues after creation."`
|
|
||||||
|
|
||||||
## Dependency Linking
|
|
||||||
|
|
||||||
After creating all child issues, link them to bd-9:
|
|
||||||
```bash
|
|
||||||
# Assuming the issues are bd-10 through bd-16 (or whatever IDs were assigned)
|
|
||||||
bd dep add <child-id> bd-9 --type parent-child
|
|
||||||
```
|
|
||||||
|
|
||||||
Example:
|
|
||||||
```bash
|
|
||||||
bd dep add bd-10 bd-9 --type parent-child
|
|
||||||
bd dep add bd-11 bd-9 --type parent-child
|
|
||||||
bd dep add bd-12 bd-9 --type parent-child
|
|
||||||
# etc.
|
|
||||||
```
|
|
||||||
|
|
||||||
## Current State
|
|
||||||
|
|
||||||
- bd-10 was created successfully ("Extend export to include dependencies")
|
|
||||||
- bd-11+ attempts failed with UNIQUE constraint errors
|
|
||||||
- This suggests those IDs already exist in the DB but may not be in the JSONL file
|
|
||||||
- Need to investigate DB/JSONL sync issue before creating more issues
|
|
||||||
402
cmd/bd/import.go
402
cmd/bd/import.go
@@ -7,10 +7,8 @@ import (
|
|||||||
"fmt"
|
"fmt"
|
||||||
"os"
|
"os"
|
||||||
"sort"
|
"sort"
|
||||||
"time"
|
|
||||||
|
|
||||||
"github.com/spf13/cobra"
|
"github.com/spf13/cobra"
|
||||||
"github.com/steveyegge/beads/internal/storage/sqlite"
|
|
||||||
"github.com/steveyegge/beads/internal/types"
|
"github.com/steveyegge/beads/internal/types"
|
||||||
)
|
)
|
||||||
|
|
||||||
@@ -81,387 +79,87 @@ Behavior:
|
|||||||
os.Exit(1)
|
os.Exit(1)
|
||||||
}
|
}
|
||||||
|
|
||||||
// Phase 2: Detect collisions
|
// Phase 2: Use shared import logic
|
||||||
sqliteStore, ok := store.(*sqlite.SQLiteStorage)
|
opts := ImportOptions{
|
||||||
if !ok {
|
ResolveCollisions: resolveCollisions,
|
||||||
fmt.Fprintf(os.Stderr, "Error: collision detection requires SQLite storage backend\n")
|
DryRun: dryRun,
|
||||||
os.Exit(1)
|
SkipUpdate: skipUpdate,
|
||||||
|
Strict: strict,
|
||||||
}
|
}
|
||||||
|
|
||||||
collisionResult, err := sqlite.DetectCollisions(ctx, sqliteStore, allIssues)
|
result, err := importIssuesCore(ctx, dbPath, store, allIssues, opts)
|
||||||
|
|
||||||
|
// Handle errors and special cases
|
||||||
if err != nil {
|
if err != nil {
|
||||||
fmt.Fprintf(os.Stderr, "Error detecting collisions: %v\n", err)
|
// Check if it's a collision error when not resolving
|
||||||
os.Exit(1)
|
if !resolveCollisions && result != nil && len(result.CollisionIDs) > 0 {
|
||||||
}
|
// Print collision report before exiting
|
||||||
|
fmt.Fprintf(os.Stderr, "\n=== Collision Detection Report ===\n")
|
||||||
var idMapping map[string]string
|
fmt.Fprintf(os.Stderr, "COLLISIONS DETECTED: %d\n\n", result.Collisions)
|
||||||
var created, updated, skipped int
|
fmt.Fprintf(os.Stderr, "Colliding issue IDs: %v\n", result.CollisionIDs)
|
||||||
|
|
||||||
// Phase 3: Handle collisions
|
|
||||||
if len(collisionResult.Collisions) > 0 {
|
|
||||||
// Print collision report
|
|
||||||
printCollisionReport(collisionResult)
|
|
||||||
|
|
||||||
if dryRun {
|
|
||||||
// In dry-run mode, just print report and exit
|
|
||||||
fmt.Fprintf(os.Stderr, "\nDry-run mode: no changes made\n")
|
|
||||||
os.Exit(0)
|
|
||||||
}
|
|
||||||
|
|
||||||
if !resolveCollisions {
|
|
||||||
// Default behavior: fail on collision (safe mode)
|
|
||||||
fmt.Fprintf(os.Stderr, "\nCollision detected! Use --resolve-collisions to automatically remap colliding issues.\n")
|
fmt.Fprintf(os.Stderr, "\nCollision detected! Use --resolve-collisions to automatically remap colliding issues.\n")
|
||||||
fmt.Fprintf(os.Stderr, "Or use --dry-run to preview without making changes.\n")
|
fmt.Fprintf(os.Stderr, "Or use --dry-run to preview without making changes.\n")
|
||||||
os.Exit(1)
|
os.Exit(1)
|
||||||
}
|
}
|
||||||
|
fmt.Fprintf(os.Stderr, "Import failed: %v\n", err)
|
||||||
// Resolve collisions by scoring and remapping
|
|
||||||
fmt.Fprintf(os.Stderr, "\nResolving collisions...\n")
|
|
||||||
|
|
||||||
// Get all existing issues for scoring
|
|
||||||
allExistingIssues, err := store.SearchIssues(ctx, "", types.IssueFilter{})
|
|
||||||
if err != nil {
|
|
||||||
fmt.Fprintf(os.Stderr, "Error getting existing issues: %v\n", err)
|
|
||||||
os.Exit(1)
|
os.Exit(1)
|
||||||
}
|
}
|
||||||
|
|
||||||
// Score collisions
|
// Handle dry-run mode
|
||||||
if err := sqlite.ScoreCollisions(ctx, sqliteStore, collisionResult.Collisions, allExistingIssues); err != nil {
|
if dryRun {
|
||||||
fmt.Fprintf(os.Stderr, "Error scoring collisions: %v\n", err)
|
if result.Collisions > 0 {
|
||||||
os.Exit(1)
|
fmt.Fprintf(os.Stderr, "\n=== Collision Detection Report ===\n")
|
||||||
}
|
fmt.Fprintf(os.Stderr, "COLLISIONS DETECTED: %d\n", result.Collisions)
|
||||||
|
fmt.Fprintf(os.Stderr, "Colliding issue IDs: %v\n", result.CollisionIDs)
|
||||||
// Remap collisions
|
} else {
|
||||||
idMapping, err = sqlite.RemapCollisions(ctx, sqliteStore, collisionResult.Collisions, allExistingIssues)
|
|
||||||
if err != nil {
|
|
||||||
fmt.Fprintf(os.Stderr, "Error remapping collisions: %v\n", err)
|
|
||||||
os.Exit(1)
|
|
||||||
}
|
|
||||||
|
|
||||||
// Print remapping report
|
|
||||||
printRemappingReport(idMapping, collisionResult.Collisions)
|
|
||||||
|
|
||||||
// Colliding issues were already created with new IDs
|
|
||||||
created = len(collisionResult.Collisions)
|
|
||||||
|
|
||||||
// Remove colliding issues from allIssues (they're already processed)
|
|
||||||
filteredIssues := make([]*types.Issue, 0)
|
|
||||||
collidingIDs := make(map[string]bool)
|
|
||||||
for _, collision := range collisionResult.Collisions {
|
|
||||||
collidingIDs[collision.ID] = true
|
|
||||||
}
|
|
||||||
for _, issue := range allIssues {
|
|
||||||
if !collidingIDs[issue.ID] {
|
|
||||||
filteredIssues = append(filteredIssues, issue)
|
|
||||||
}
|
|
||||||
}
|
|
||||||
allIssues = filteredIssues
|
|
||||||
} else if dryRun {
|
|
||||||
// No collisions in dry-run mode
|
|
||||||
fmt.Fprintf(os.Stderr, "No collisions detected.\n")
|
fmt.Fprintf(os.Stderr, "No collisions detected.\n")
|
||||||
|
}
|
||||||
fmt.Fprintf(os.Stderr, "Would create %d new issues, update %d existing issues\n",
|
fmt.Fprintf(os.Stderr, "Would create %d new issues, update %d existing issues\n",
|
||||||
len(collisionResult.NewIssues), len(collisionResult.ExactMatches))
|
result.Created, result.Updated)
|
||||||
|
fmt.Fprintf(os.Stderr, "\nDry-run mode: no changes made\n")
|
||||||
os.Exit(0)
|
os.Exit(0)
|
||||||
}
|
}
|
||||||
|
|
||||||
// Phase 4: Process remaining issues (exact matches and new issues)
|
// Print remapping report if collisions were resolved
|
||||||
// Separate existing issues (to update) from new issues (to batch create)
|
if len(result.IDMapping) > 0 {
|
||||||
var newIssues []*types.Issue
|
fmt.Fprintf(os.Stderr, "\n=== Remapping Report ===\n")
|
||||||
seenNew := make(map[string]int) // Track duplicates within import batch
|
fmt.Fprintf(os.Stderr, "Issues remapped: %d\n\n", len(result.IDMapping))
|
||||||
for _, issue := range allIssues {
|
|
||||||
// Check if issue exists
|
|
||||||
existing, err := store.GetIssue(ctx, issue.ID)
|
|
||||||
if err != nil {
|
|
||||||
fmt.Fprintf(os.Stderr, "Error checking issue %s: %v\n", issue.ID, err)
|
|
||||||
os.Exit(1)
|
|
||||||
}
|
|
||||||
|
|
||||||
if existing != nil {
|
// Sort by old ID for consistent output
|
||||||
if skipUpdate {
|
type mapping struct {
|
||||||
skipped++
|
oldID string
|
||||||
continue
|
newID string
|
||||||
}
|
}
|
||||||
|
mappings := make([]mapping, 0, len(result.IDMapping))
|
||||||
|
for oldID, newID := range result.IDMapping {
|
||||||
|
mappings = append(mappings, mapping{oldID, newID})
|
||||||
|
}
|
||||||
|
sort.Slice(mappings, func(i, j int) bool {
|
||||||
|
return mappings[i].oldID < mappings[j].oldID
|
||||||
|
})
|
||||||
|
|
||||||
// Update existing issue
|
fmt.Fprintf(os.Stderr, "Remappings:\n")
|
||||||
// Parse raw JSON to detect which fields are present
|
for _, m := range mappings {
|
||||||
var rawData map[string]interface{}
|
fmt.Fprintf(os.Stderr, " %s → %s\n", m.oldID, m.newID)
|
||||||
jsonBytes, _ := json.Marshal(issue)
|
|
||||||
if err := json.Unmarshal(jsonBytes, &rawData); err != nil {
|
|
||||||
// If unmarshaling fails, treat all fields as present
|
|
||||||
rawData = make(map[string]interface{})
|
|
||||||
}
|
|
||||||
|
|
||||||
updates := make(map[string]interface{})
|
|
||||||
if _, ok := rawData["title"]; ok {
|
|
||||||
updates["title"] = issue.Title
|
|
||||||
}
|
|
||||||
if _, ok := rawData["description"]; ok {
|
|
||||||
updates["description"] = issue.Description
|
|
||||||
}
|
|
||||||
if _, ok := rawData["design"]; ok {
|
|
||||||
updates["design"] = issue.Design
|
|
||||||
}
|
|
||||||
if _, ok := rawData["acceptance_criteria"]; ok {
|
|
||||||
updates["acceptance_criteria"] = issue.AcceptanceCriteria
|
|
||||||
}
|
|
||||||
if _, ok := rawData["notes"]; ok {
|
|
||||||
updates["notes"] = issue.Notes
|
|
||||||
}
|
|
||||||
if _, ok := rawData["status"]; ok {
|
|
||||||
updates["status"] = issue.Status
|
|
||||||
}
|
|
||||||
if _, ok := rawData["priority"]; ok {
|
|
||||||
updates["priority"] = issue.Priority
|
|
||||||
}
|
|
||||||
if _, ok := rawData["issue_type"]; ok {
|
|
||||||
updates["issue_type"] = issue.IssueType
|
|
||||||
}
|
|
||||||
if _, ok := rawData["assignee"]; ok {
|
|
||||||
updates["assignee"] = issue.Assignee
|
|
||||||
}
|
|
||||||
if _, ok := rawData["estimated_minutes"]; ok {
|
|
||||||
if issue.EstimatedMinutes != nil {
|
|
||||||
updates["estimated_minutes"] = *issue.EstimatedMinutes
|
|
||||||
} else {
|
|
||||||
updates["estimated_minutes"] = nil
|
|
||||||
}
|
|
||||||
}
|
|
||||||
if _, ok := rawData["external_ref"]; ok {
|
|
||||||
if issue.ExternalRef != nil {
|
|
||||||
updates["external_ref"] = *issue.ExternalRef
|
|
||||||
} else {
|
|
||||||
updates["external_ref"] = nil
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
if err := store.UpdateIssue(ctx, issue.ID, updates, "import"); err != nil {
|
|
||||||
fmt.Fprintf(os.Stderr, "Error updating issue %s: %v\n", issue.ID, err)
|
|
||||||
os.Exit(1)
|
|
||||||
}
|
|
||||||
updated++
|
|
||||||
} else {
|
|
||||||
// Normalize closed_at based on status before creating (enforce invariant)
|
|
||||||
if issue.Status == types.StatusClosed {
|
|
||||||
// Status is closed: ensure closed_at is set
|
|
||||||
if issue.ClosedAt == nil {
|
|
||||||
now := time.Now()
|
|
||||||
issue.ClosedAt = &now
|
|
||||||
}
|
|
||||||
} else {
|
|
||||||
// Status is not closed: ensure closed_at is NULL
|
|
||||||
issue.ClosedAt = nil
|
|
||||||
}
|
|
||||||
|
|
||||||
// Handle duplicates within the same import batch (last one wins)
|
|
||||||
if idx, ok := seenNew[issue.ID]; ok {
|
|
||||||
// Last one wins regardless of skipUpdate (skipUpdate only applies to existing DB issues)
|
|
||||||
newIssues[idx] = issue
|
|
||||||
} else {
|
|
||||||
seenNew[issue.ID] = len(newIssues)
|
|
||||||
newIssues = append(newIssues, issue)
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
// Batch create all new issues in one atomic transaction (5-15x faster!)
|
|
||||||
if len(newIssues) > 0 {
|
|
||||||
if err := store.CreateIssues(ctx, newIssues, "import"); err != nil {
|
|
||||||
fmt.Fprintf(os.Stderr, "Error creating issues: %v\n", err)
|
|
||||||
os.Exit(1)
|
|
||||||
}
|
|
||||||
created += len(newIssues)
|
|
||||||
}
|
|
||||||
|
|
||||||
// Phase 5: Sync ID counters after importing issues with explicit IDs
|
|
||||||
// This prevents ID collisions with subsequently auto-generated issues
|
|
||||||
// CRITICAL: If this fails, subsequent auto-generated IDs WILL collide with imported issues
|
|
||||||
if err := sqliteStore.SyncAllCounters(ctx); err != nil {
|
|
||||||
fmt.Fprintf(os.Stderr, "Error: failed to sync ID counters: %v\n", err)
|
|
||||||
fmt.Fprintf(os.Stderr, "Cannot proceed - auto-generated IDs would collide with imported issues.\n")
|
|
||||||
os.Exit(1)
|
|
||||||
}
|
|
||||||
|
|
||||||
// Phase 6: Process dependencies
|
|
||||||
// Do this after all issues are created to handle forward references
|
|
||||||
var depsCreated, depsSkipped int
|
|
||||||
for _, issue := range allIssues {
|
|
||||||
if len(issue.Dependencies) == 0 {
|
|
||||||
continue
|
|
||||||
}
|
|
||||||
|
|
||||||
for _, dep := range issue.Dependencies {
|
|
||||||
// Check if dependency already exists
|
|
||||||
existingDeps, err := store.GetDependencyRecords(ctx, dep.IssueID)
|
|
||||||
if err != nil {
|
|
||||||
fmt.Fprintf(os.Stderr, "Error checking dependencies for %s: %v\n", dep.IssueID, err)
|
|
||||||
os.Exit(1)
|
|
||||||
}
|
|
||||||
|
|
||||||
// Skip if this exact dependency already exists
|
|
||||||
exists := false
|
|
||||||
for _, existing := range existingDeps {
|
|
||||||
if existing.DependsOnID == dep.DependsOnID && existing.Type == dep.Type {
|
|
||||||
exists = true
|
|
||||||
break
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
if exists {
|
|
||||||
depsSkipped++
|
|
||||||
continue
|
|
||||||
}
|
|
||||||
|
|
||||||
// Add dependency
|
|
||||||
if err := store.AddDependency(ctx, dep, "import"); err != nil {
|
|
||||||
if strict {
|
|
||||||
// In strict mode, fail on any dependency error
|
|
||||||
fmt.Fprintf(os.Stderr, "Error: could not add dependency %s → %s: %v\n",
|
|
||||||
dep.IssueID, dep.DependsOnID, err)
|
|
||||||
fmt.Fprintf(os.Stderr, "Use --strict=false to treat dependency errors as warnings\n")
|
|
||||||
os.Exit(1)
|
|
||||||
}
|
|
||||||
// In non-strict mode, ignore errors for missing target issues or cycles
|
|
||||||
// This can happen if dependencies reference issues not in the import
|
|
||||||
fmt.Fprintf(os.Stderr, "Warning: could not add dependency %s → %s: %v\n",
|
|
||||||
dep.IssueID, dep.DependsOnID, err)
|
|
||||||
continue
|
|
||||||
}
|
|
||||||
depsCreated++
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
// Phase 7: Process labels
|
|
||||||
// Sync labels for all imported issues
|
|
||||||
var labelsAdded, labelsRemoved int
|
|
||||||
for _, issue := range allIssues {
|
|
||||||
if issue.Labels == nil {
|
|
||||||
continue
|
|
||||||
}
|
|
||||||
|
|
||||||
// Get current labels for the issue
|
|
||||||
currentLabels, err := store.GetLabels(ctx, issue.ID)
|
|
||||||
if err != nil {
|
|
||||||
fmt.Fprintf(os.Stderr, "Error getting labels for %s: %v\n", issue.ID, err)
|
|
||||||
os.Exit(1)
|
|
||||||
}
|
|
||||||
|
|
||||||
// Convert slices to maps for easier comparison
|
|
||||||
currentLabelMap := make(map[string]bool)
|
|
||||||
for _, label := range currentLabels {
|
|
||||||
currentLabelMap[label] = true
|
|
||||||
}
|
|
||||||
importedLabelMap := make(map[string]bool)
|
|
||||||
for _, label := range issue.Labels {
|
|
||||||
importedLabelMap[label] = true
|
|
||||||
}
|
|
||||||
|
|
||||||
// Add missing labels
|
|
||||||
for _, label := range issue.Labels {
|
|
||||||
if !currentLabelMap[label] {
|
|
||||||
if err := store.AddLabel(ctx, issue.ID, label, "import"); err != nil {
|
|
||||||
fmt.Fprintf(os.Stderr, "Error adding label %s to %s: %v\n", label, issue.ID, err)
|
|
||||||
os.Exit(1)
|
|
||||||
}
|
|
||||||
labelsAdded++
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
// Remove labels not in imported data
|
|
||||||
for _, label := range currentLabels {
|
|
||||||
if !importedLabelMap[label] {
|
|
||||||
if err := store.RemoveLabel(ctx, issue.ID, label, "import"); err != nil {
|
|
||||||
fmt.Fprintf(os.Stderr, "Error removing label %s from %s: %v\n", label, issue.ID, err)
|
|
||||||
os.Exit(1)
|
|
||||||
}
|
|
||||||
labelsRemoved++
|
|
||||||
}
|
|
||||||
}
|
}
|
||||||
|
fmt.Fprintf(os.Stderr, "\nAll text and dependency references have been updated.\n")
|
||||||
}
|
}
|
||||||
|
|
||||||
// Schedule auto-flush after import completes
|
// Schedule auto-flush after import completes
|
||||||
markDirtyAndScheduleFlush()
|
markDirtyAndScheduleFlush()
|
||||||
|
|
||||||
// Print summary
|
// Print summary
|
||||||
fmt.Fprintf(os.Stderr, "Import complete: %d created, %d updated", created, updated)
|
fmt.Fprintf(os.Stderr, "Import complete: %d created, %d updated", result.Created, result.Updated)
|
||||||
if skipped > 0 {
|
if result.Skipped > 0 {
|
||||||
fmt.Fprintf(os.Stderr, ", %d skipped", skipped)
|
fmt.Fprintf(os.Stderr, ", %d skipped", result.Skipped)
|
||||||
}
|
|
||||||
if depsCreated > 0 || depsSkipped > 0 {
|
|
||||||
fmt.Fprintf(os.Stderr, ", %d dependencies added", depsCreated)
|
|
||||||
if depsSkipped > 0 {
|
|
||||||
fmt.Fprintf(os.Stderr, " (%d already existed)", depsSkipped)
|
|
||||||
}
|
|
||||||
}
|
|
||||||
if len(idMapping) > 0 {
|
|
||||||
fmt.Fprintf(os.Stderr, ", %d issues remapped", len(idMapping))
|
|
||||||
}
|
|
||||||
if labelsAdded > 0 || labelsRemoved > 0 {
|
|
||||||
fmt.Fprintf(os.Stderr, ", %d labels synced", labelsAdded+labelsRemoved)
|
|
||||||
if labelsAdded > 0 && labelsRemoved > 0 {
|
|
||||||
fmt.Fprintf(os.Stderr, " (%d added, %d removed)", labelsAdded, labelsRemoved)
|
|
||||||
} else if labelsAdded > 0 {
|
|
||||||
fmt.Fprintf(os.Stderr, " (%d added)", labelsAdded)
|
|
||||||
} else {
|
|
||||||
fmt.Fprintf(os.Stderr, " (%d removed)", labelsRemoved)
|
|
||||||
}
|
}
|
||||||
|
if len(result.IDMapping) > 0 {
|
||||||
|
fmt.Fprintf(os.Stderr, ", %d issues remapped", len(result.IDMapping))
|
||||||
}
|
}
|
||||||
fmt.Fprintf(os.Stderr, "\n")
|
fmt.Fprintf(os.Stderr, "\n")
|
||||||
},
|
},
|
||||||
}
|
}
|
||||||
|
|
||||||
// printCollisionReport prints a detailed report of detected collisions
|
|
||||||
func printCollisionReport(result *sqlite.CollisionResult) {
|
|
||||||
fmt.Fprintf(os.Stderr, "\n=== Collision Detection Report ===\n")
|
|
||||||
fmt.Fprintf(os.Stderr, "Exact matches (idempotent): %d\n", len(result.ExactMatches))
|
|
||||||
fmt.Fprintf(os.Stderr, "New issues: %d\n", len(result.NewIssues))
|
|
||||||
fmt.Fprintf(os.Stderr, "COLLISIONS DETECTED: %d\n\n", len(result.Collisions))
|
|
||||||
|
|
||||||
if len(result.Collisions) > 0 {
|
|
||||||
fmt.Fprintf(os.Stderr, "Colliding issues:\n")
|
|
||||||
for _, collision := range result.Collisions {
|
|
||||||
fmt.Fprintf(os.Stderr, " %s: %s\n", collision.ID, collision.IncomingIssue.Title)
|
|
||||||
fmt.Fprintf(os.Stderr, " Conflicting fields: %v\n", collision.ConflictingFields)
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
// printRemappingReport prints a report of ID remappings with reference scores
|
|
||||||
func printRemappingReport(idMapping map[string]string, collisions []*sqlite.CollisionDetail) {
|
|
||||||
fmt.Fprintf(os.Stderr, "\n=== Remapping Report ===\n")
|
|
||||||
fmt.Fprintf(os.Stderr, "Issues remapped: %d\n\n", len(idMapping))
|
|
||||||
|
|
||||||
// Sort by old ID for consistent output
|
|
||||||
type mapping struct {
|
|
||||||
oldID string
|
|
||||||
newID string
|
|
||||||
score int
|
|
||||||
}
|
|
||||||
mappings := make([]mapping, 0, len(idMapping))
|
|
||||||
|
|
||||||
scoreMap := make(map[string]int)
|
|
||||||
for _, collision := range collisions {
|
|
||||||
scoreMap[collision.ID] = collision.ReferenceScore
|
|
||||||
}
|
|
||||||
|
|
||||||
for oldID, newID := range idMapping {
|
|
||||||
mappings = append(mappings, mapping{
|
|
||||||
oldID: oldID,
|
|
||||||
newID: newID,
|
|
||||||
score: scoreMap[oldID],
|
|
||||||
})
|
|
||||||
}
|
|
||||||
|
|
||||||
sort.Slice(mappings, func(i, j int) bool {
|
|
||||||
return mappings[i].score < mappings[j].score
|
|
||||||
})
|
|
||||||
|
|
||||||
fmt.Fprintf(os.Stderr, "Remappings (sorted by reference count):\n")
|
|
||||||
for _, m := range mappings {
|
|
||||||
fmt.Fprintf(os.Stderr, " %s → %s (refs: %d)\n", m.oldID, m.newID, m.score)
|
|
||||||
}
|
|
||||||
fmt.Fprintf(os.Stderr, "\nAll text and dependency references have been updated.\n")
|
|
||||||
}
|
|
||||||
|
|
||||||
func init() {
|
func init() {
|
||||||
importCmd.Flags().StringP("input", "i", "", "Input file (default: stdin)")
|
importCmd.Flags().StringP("input", "i", "", "Input file (default: stdin)")
|
||||||
importCmd.Flags().BoolP("skip-existing", "s", false, "Skip existing issues instead of updating them")
|
importCmd.Flags().BoolP("skip-existing", "s", false, "Skip existing issues instead of updating them")
|
||||||
|
|||||||
279
cmd/bd/import_shared.go
Normal file
279
cmd/bd/import_shared.go
Normal file
@@ -0,0 +1,279 @@
|
|||||||
|
package main
|
||||||
|
|
||||||
|
import (
|
||||||
|
"context"
|
||||||
|
"fmt"
|
||||||
|
|
||||||
|
"github.com/steveyegge/beads/internal/storage"
|
||||||
|
"github.com/steveyegge/beads/internal/storage/sqlite"
|
||||||
|
"github.com/steveyegge/beads/internal/types"
|
||||||
|
)
|
||||||
|
|
||||||
|
// ImportOptions configures how the import behaves
|
||||||
|
type ImportOptions struct {
|
||||||
|
ResolveCollisions bool // Auto-resolve collisions by remapping to new IDs
|
||||||
|
DryRun bool // Preview changes without applying them
|
||||||
|
SkipUpdate bool // Skip updating existing issues (create-only mode)
|
||||||
|
Strict bool // Fail on any error (dependencies, labels, etc.)
|
||||||
|
}
|
||||||
|
|
||||||
|
// ImportResult contains statistics about the import operation
|
||||||
|
type ImportResult struct {
|
||||||
|
Created int // New issues created
|
||||||
|
Updated int // Existing issues updated
|
||||||
|
Skipped int // Issues skipped (duplicates, errors)
|
||||||
|
Collisions int // Collisions detected
|
||||||
|
IDMapping map[string]string // Mapping of remapped IDs (old -> new)
|
||||||
|
CollisionIDs []string // IDs that collided
|
||||||
|
}
|
||||||
|
|
||||||
|
// importIssuesCore handles the core import logic used by both manual and auto-import.
|
||||||
|
// This function:
|
||||||
|
// - Opens a direct SQLite connection if needed (daemon mode)
|
||||||
|
// - Detects and handles collisions
|
||||||
|
// - Imports issues, dependencies, and labels
|
||||||
|
// - Returns detailed results
|
||||||
|
//
|
||||||
|
// The caller is responsible for:
|
||||||
|
// - Reading and parsing JSONL into issues slice
|
||||||
|
// - Displaying results to the user
|
||||||
|
// - Setting metadata (e.g., last_import_hash)
|
||||||
|
func importIssuesCore(ctx context.Context, dbPath string, store storage.Storage, issues []*types.Issue, opts ImportOptions) (*ImportResult, error) {
|
||||||
|
result := &ImportResult{
|
||||||
|
IDMapping: make(map[string]string),
|
||||||
|
}
|
||||||
|
|
||||||
|
// Phase 1: Get or create SQLite store
|
||||||
|
// Import needs direct SQLite access for collision detection
|
||||||
|
var sqliteStore *sqlite.SQLiteStorage
|
||||||
|
var needCloseStore bool
|
||||||
|
|
||||||
|
if store != nil {
|
||||||
|
// Direct mode - try to use existing store
|
||||||
|
var ok bool
|
||||||
|
sqliteStore, ok = store.(*sqlite.SQLiteStorage)
|
||||||
|
if !ok {
|
||||||
|
return nil, fmt.Errorf("collision detection requires SQLite storage backend")
|
||||||
|
}
|
||||||
|
} else {
|
||||||
|
// Daemon mode - open direct connection for import
|
||||||
|
if dbPath == "" {
|
||||||
|
return nil, fmt.Errorf("database path not set")
|
||||||
|
}
|
||||||
|
var err error
|
||||||
|
sqliteStore, err = sqlite.New(dbPath)
|
||||||
|
if err != nil {
|
||||||
|
return nil, fmt.Errorf("failed to open database: %w", err)
|
||||||
|
}
|
||||||
|
needCloseStore = true
|
||||||
|
defer func() {
|
||||||
|
if needCloseStore {
|
||||||
|
sqliteStore.Close()
|
||||||
|
}
|
||||||
|
}()
|
||||||
|
}
|
||||||
|
|
||||||
|
// Phase 2: Detect collisions
|
||||||
|
collisionResult, err := sqlite.DetectCollisions(ctx, sqliteStore, issues)
|
||||||
|
if err != nil {
|
||||||
|
return nil, fmt.Errorf("collision detection failed: %w", err)
|
||||||
|
}
|
||||||
|
|
||||||
|
result.Collisions = len(collisionResult.Collisions)
|
||||||
|
for _, collision := range collisionResult.Collisions {
|
||||||
|
result.CollisionIDs = append(result.CollisionIDs, collision.ID)
|
||||||
|
}
|
||||||
|
|
||||||
|
// Phase 3: Handle collisions
|
||||||
|
if len(collisionResult.Collisions) > 0 {
|
||||||
|
if opts.DryRun {
|
||||||
|
// In dry-run mode, just return collision info
|
||||||
|
return result, nil
|
||||||
|
}
|
||||||
|
|
||||||
|
if !opts.ResolveCollisions {
|
||||||
|
// Default behavior: fail on collision
|
||||||
|
return result, fmt.Errorf("collision detected for issues: %v (use ResolveCollisions to auto-resolve)", result.CollisionIDs)
|
||||||
|
}
|
||||||
|
|
||||||
|
// Resolve collisions by scoring and remapping
|
||||||
|
allExistingIssues, err := sqliteStore.SearchIssues(ctx, "", types.IssueFilter{})
|
||||||
|
if err != nil {
|
||||||
|
return nil, fmt.Errorf("failed to get existing issues for collision resolution: %w", err)
|
||||||
|
}
|
||||||
|
|
||||||
|
// Score collisions
|
||||||
|
if err := sqlite.ScoreCollisions(ctx, sqliteStore, collisionResult.Collisions, allExistingIssues); err != nil {
|
||||||
|
return nil, fmt.Errorf("failed to score collisions: %w", err)
|
||||||
|
}
|
||||||
|
|
||||||
|
// Remap collisions
|
||||||
|
idMapping, err := sqlite.RemapCollisions(ctx, sqliteStore, collisionResult.Collisions, allExistingIssues)
|
||||||
|
if err != nil {
|
||||||
|
return nil, fmt.Errorf("failed to remap collisions: %w", err)
|
||||||
|
}
|
||||||
|
|
||||||
|
result.IDMapping = idMapping
|
||||||
|
result.Created = len(collisionResult.Collisions)
|
||||||
|
|
||||||
|
// Remove colliding issues from the list (they're already processed)
|
||||||
|
filteredIssues := make([]*types.Issue, 0)
|
||||||
|
collidingIDs := make(map[string]bool)
|
||||||
|
for _, collision := range collisionResult.Collisions {
|
||||||
|
collidingIDs[collision.ID] = true
|
||||||
|
}
|
||||||
|
for _, issue := range issues {
|
||||||
|
if !collidingIDs[issue.ID] {
|
||||||
|
filteredIssues = append(filteredIssues, issue)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
issues = filteredIssues
|
||||||
|
} else if opts.DryRun {
|
||||||
|
// No collisions in dry-run mode
|
||||||
|
result.Created = len(collisionResult.NewIssues)
|
||||||
|
result.Updated = len(collisionResult.ExactMatches)
|
||||||
|
return result, nil
|
||||||
|
}
|
||||||
|
|
||||||
|
// Phase 4: Import remaining issues (exact matches and new issues)
|
||||||
|
var newIssues []*types.Issue
|
||||||
|
seenNew := make(map[string]int) // Track duplicates within import batch
|
||||||
|
|
||||||
|
for _, issue := range issues {
|
||||||
|
// Check if issue exists in DB
|
||||||
|
existing, err := sqliteStore.GetIssue(ctx, issue.ID)
|
||||||
|
if err != nil {
|
||||||
|
return nil, fmt.Errorf("error checking issue %s: %w", issue.ID, err)
|
||||||
|
}
|
||||||
|
|
||||||
|
if existing != nil {
|
||||||
|
// Issue exists - update it unless SkipUpdate is set
|
||||||
|
if opts.SkipUpdate {
|
||||||
|
result.Skipped++
|
||||||
|
continue
|
||||||
|
}
|
||||||
|
|
||||||
|
// Build updates map
|
||||||
|
updates := make(map[string]interface{})
|
||||||
|
updates["title"] = issue.Title
|
||||||
|
updates["description"] = issue.Description
|
||||||
|
updates["status"] = issue.Status
|
||||||
|
updates["priority"] = issue.Priority
|
||||||
|
updates["issue_type"] = issue.IssueType
|
||||||
|
updates["design"] = issue.Design
|
||||||
|
updates["acceptance_criteria"] = issue.AcceptanceCriteria
|
||||||
|
updates["notes"] = issue.Notes
|
||||||
|
|
||||||
|
if issue.Assignee != "" {
|
||||||
|
updates["assignee"] = issue.Assignee
|
||||||
|
} else {
|
||||||
|
updates["assignee"] = nil
|
||||||
|
}
|
||||||
|
|
||||||
|
if issue.ExternalRef != nil && *issue.ExternalRef != "" {
|
||||||
|
updates["external_ref"] = *issue.ExternalRef
|
||||||
|
} else {
|
||||||
|
updates["external_ref"] = nil
|
||||||
|
}
|
||||||
|
|
||||||
|
if err := sqliteStore.UpdateIssue(ctx, issue.ID, updates, "import"); err != nil {
|
||||||
|
return nil, fmt.Errorf("error updating issue %s: %w", issue.ID, err)
|
||||||
|
}
|
||||||
|
result.Updated++
|
||||||
|
} else {
|
||||||
|
// New issue - check for duplicates in import batch
|
||||||
|
if idx, seen := seenNew[issue.ID]; seen {
|
||||||
|
if opts.Strict {
|
||||||
|
return nil, fmt.Errorf("duplicate issue ID %s in import (line %d)", issue.ID, idx)
|
||||||
|
}
|
||||||
|
result.Skipped++
|
||||||
|
continue
|
||||||
|
}
|
||||||
|
seenNew[issue.ID] = len(newIssues)
|
||||||
|
newIssues = append(newIssues, issue)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// Batch create all new issues
|
||||||
|
if len(newIssues) > 0 {
|
||||||
|
if err := sqliteStore.CreateIssues(ctx, newIssues, "import"); err != nil {
|
||||||
|
return nil, fmt.Errorf("error creating issues: %w", err)
|
||||||
|
}
|
||||||
|
result.Created += len(newIssues)
|
||||||
|
}
|
||||||
|
|
||||||
|
// Sync counters after batch import
|
||||||
|
if err := sqliteStore.SyncAllCounters(ctx); err != nil {
|
||||||
|
return nil, fmt.Errorf("error syncing counters: %w", err)
|
||||||
|
}
|
||||||
|
|
||||||
|
// Phase 5: Import dependencies
|
||||||
|
for _, issue := range issues {
|
||||||
|
if len(issue.Dependencies) == 0 {
|
||||||
|
continue
|
||||||
|
}
|
||||||
|
|
||||||
|
for _, dep := range issue.Dependencies {
|
||||||
|
// Check if dependency already exists
|
||||||
|
existingDeps, err := sqliteStore.GetDependencyRecords(ctx, dep.IssueID)
|
||||||
|
if err != nil {
|
||||||
|
return nil, fmt.Errorf("error checking dependencies for %s: %w", dep.IssueID, err)
|
||||||
|
}
|
||||||
|
|
||||||
|
// Check for duplicate
|
||||||
|
isDuplicate := false
|
||||||
|
for _, existing := range existingDeps {
|
||||||
|
if existing.DependsOnID == dep.DependsOnID && existing.Type == dep.Type {
|
||||||
|
isDuplicate = true
|
||||||
|
break
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
if isDuplicate {
|
||||||
|
continue
|
||||||
|
}
|
||||||
|
|
||||||
|
// Add dependency
|
||||||
|
if err := sqliteStore.AddDependency(ctx, dep, "import"); err != nil {
|
||||||
|
if opts.Strict {
|
||||||
|
return nil, fmt.Errorf("error adding dependency %s → %s: %w", dep.IssueID, dep.DependsOnID, err)
|
||||||
|
}
|
||||||
|
// Non-strict mode: just skip this dependency
|
||||||
|
continue
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// Phase 6: Import labels
|
||||||
|
for _, issue := range issues {
|
||||||
|
if len(issue.Labels) == 0 {
|
||||||
|
continue
|
||||||
|
}
|
||||||
|
|
||||||
|
// Get current labels
|
||||||
|
currentLabels, err := sqliteStore.GetLabels(ctx, issue.ID)
|
||||||
|
if err != nil {
|
||||||
|
return nil, fmt.Errorf("error getting labels for %s: %w", issue.ID, err)
|
||||||
|
}
|
||||||
|
|
||||||
|
currentLabelSet := make(map[string]bool)
|
||||||
|
for _, label := range currentLabels {
|
||||||
|
currentLabelSet[label] = true
|
||||||
|
}
|
||||||
|
|
||||||
|
// Add missing labels
|
||||||
|
for _, label := range issue.Labels {
|
||||||
|
if !currentLabelSet[label] {
|
||||||
|
if err := sqliteStore.AddLabel(ctx, issue.ID, label, "import"); err != nil {
|
||||||
|
if opts.Strict {
|
||||||
|
return nil, fmt.Errorf("error adding label %s to %s: %w", label, issue.ID, err)
|
||||||
|
}
|
||||||
|
// Non-strict mode: skip this label
|
||||||
|
continue
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
return result, nil
|
||||||
|
}
|
||||||
@@ -712,13 +712,34 @@ func autoImportIfNewer() {
|
|||||||
}
|
}
|
||||||
|
|
||||||
// Detect collisions before importing (bd-228 fix)
|
// Detect collisions before importing (bd-228 fix)
|
||||||
sqliteStore, ok := store.(*sqlite.SQLiteStorage)
|
// Auto-import needs direct SQLite access for collision detection
|
||||||
|
var sqliteStore *sqlite.SQLiteStorage
|
||||||
|
|
||||||
|
if store != nil {
|
||||||
|
// Direct mode - try to use existing store
|
||||||
|
var ok bool
|
||||||
|
sqliteStore, ok = store.(*sqlite.SQLiteStorage)
|
||||||
if !ok {
|
if !ok {
|
||||||
// Not SQLite - skip auto-import to avoid silent data loss without collision detection
|
|
||||||
fmt.Fprintf(os.Stderr, "Auto-import disabled for non-SQLite backend (no collision detection).\n")
|
fmt.Fprintf(os.Stderr, "Auto-import disabled for non-SQLite backend (no collision detection).\n")
|
||||||
fmt.Fprintf(os.Stderr, "To import manually, run: bd import -i %s\n", jsonlPath)
|
fmt.Fprintf(os.Stderr, "To import manually, run: bd import -i %s\n", jsonlPath)
|
||||||
return
|
return
|
||||||
}
|
}
|
||||||
|
} else {
|
||||||
|
// Daemon mode - open direct connection for auto-import
|
||||||
|
if dbPath == "" {
|
||||||
|
if os.Getenv("BD_DEBUG") != "" {
|
||||||
|
fmt.Fprintf(os.Stderr, "Debug: auto-import skipped, no database path\n")
|
||||||
|
}
|
||||||
|
return
|
||||||
|
}
|
||||||
|
var err error
|
||||||
|
sqliteStore, err = sqlite.New(dbPath)
|
||||||
|
if err != nil {
|
||||||
|
fmt.Fprintf(os.Stderr, "Auto-import failed: could not open database: %v\n", err)
|
||||||
|
return
|
||||||
|
}
|
||||||
|
defer sqliteStore.Close()
|
||||||
|
}
|
||||||
|
|
||||||
collisionResult, err := sqlite.DetectCollisions(ctx, sqliteStore, allIssues)
|
collisionResult, err := sqlite.DetectCollisions(ctx, sqliteStore, allIssues)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
|
|||||||
297
cmd/bd/stale.go
Normal file
297
cmd/bd/stale.go
Normal file
@@ -0,0 +1,297 @@
|
|||||||
|
package main
|
||||||
|
|
||||||
|
import (
|
||||||
|
"context"
|
||||||
|
"database/sql"
|
||||||
|
"fmt"
|
||||||
|
"os"
|
||||||
|
"time"
|
||||||
|
|
||||||
|
"github.com/fatih/color"
|
||||||
|
"github.com/spf13/cobra"
|
||||||
|
"github.com/steveyegge/beads/internal/storage/sqlite"
|
||||||
|
)
|
||||||
|
|
||||||
|
// StaleIssueInfo contains information about an orphaned issue claim
|
||||||
|
type StaleIssueInfo struct {
|
||||||
|
IssueID string `json:"issue_id"`
|
||||||
|
IssueTitle string `json:"issue_title"`
|
||||||
|
IssuePriority int `json:"issue_priority"`
|
||||||
|
ExecutorInstanceID string `json:"executor_instance_id"`
|
||||||
|
ExecutorStatus string `json:"executor_status"`
|
||||||
|
ExecutorHostname string `json:"executor_hostname"`
|
||||||
|
ExecutorPID int `json:"executor_pid"`
|
||||||
|
LastHeartbeat time.Time `json:"last_heartbeat"`
|
||||||
|
ClaimedAt time.Time `json:"claimed_at"`
|
||||||
|
ClaimedDuration string `json:"claimed_duration"` // Human-readable duration
|
||||||
|
}
|
||||||
|
|
||||||
|
var staleCmd = &cobra.Command{
|
||||||
|
Use: "stale",
|
||||||
|
Short: "Show orphaned claims and dead executors",
|
||||||
|
Long: `Show issues stuck in_progress with execution_state where the executor is dead or stopped.
|
||||||
|
This helps identify orphaned work that needs manual recovery.
|
||||||
|
|
||||||
|
An issue is considered stale if:
|
||||||
|
- It has an execution_state (claimed by an executor)
|
||||||
|
- AND the executor status is 'stopped'
|
||||||
|
- OR the executor's last_heartbeat is older than the threshold
|
||||||
|
|
||||||
|
Default threshold: 300 seconds (5 minutes)`,
|
||||||
|
Run: func(cmd *cobra.Command, args []string) {
|
||||||
|
threshold, _ := cmd.Flags().GetInt("threshold")
|
||||||
|
release, _ := cmd.Flags().GetBool("release")
|
||||||
|
|
||||||
|
// Get stale issues
|
||||||
|
staleIssues, err := getStaleIssues(threshold)
|
||||||
|
if err != nil {
|
||||||
|
fmt.Fprintf(os.Stderr, "Error: %v\n", err)
|
||||||
|
os.Exit(1)
|
||||||
|
}
|
||||||
|
|
||||||
|
// Handle JSON output
|
||||||
|
if jsonOutput {
|
||||||
|
if staleIssues == nil {
|
||||||
|
staleIssues = []*StaleIssueInfo{}
|
||||||
|
}
|
||||||
|
outputJSON(staleIssues)
|
||||||
|
return
|
||||||
|
}
|
||||||
|
|
||||||
|
// Handle empty result
|
||||||
|
if len(staleIssues) == 0 {
|
||||||
|
green := color.New(color.FgGreen).SprintFunc()
|
||||||
|
fmt.Printf("\n%s No stale issues found (all executors healthy)\n\n", green("✨"))
|
||||||
|
return
|
||||||
|
}
|
||||||
|
|
||||||
|
// Display stale issues
|
||||||
|
red := color.New(color.FgRed).SprintFunc()
|
||||||
|
yellow := color.New(color.FgYellow).SprintFunc()
|
||||||
|
fmt.Printf("\n%s Found %d stale issue(s) with orphaned claims:\n\n", yellow("⚠️"), len(staleIssues))
|
||||||
|
|
||||||
|
for i, si := range staleIssues {
|
||||||
|
fmt.Printf("%d. [P%d] %s: %s\n", i+1, si.IssuePriority, si.IssueID, si.IssueTitle)
|
||||||
|
fmt.Printf(" Executor: %s (%s)\n", si.ExecutorInstanceID, si.ExecutorStatus)
|
||||||
|
fmt.Printf(" Host: %s (PID: %d)\n", si.ExecutorHostname, si.ExecutorPID)
|
||||||
|
fmt.Printf(" Last heartbeat: %s (%.0f seconds ago)\n",
|
||||||
|
si.LastHeartbeat.Format("2006-01-02 15:04:05"),
|
||||||
|
time.Since(si.LastHeartbeat).Seconds())
|
||||||
|
fmt.Printf(" Claimed for: %s\n", si.ClaimedDuration)
|
||||||
|
fmt.Println()
|
||||||
|
}
|
||||||
|
|
||||||
|
// Handle release flag
|
||||||
|
if release {
|
||||||
|
fmt.Printf("%s Releasing %d stale issue(s)...\n\n", yellow("🔧"), len(staleIssues))
|
||||||
|
|
||||||
|
releaseCount, err := releaseStaleIssues(staleIssues)
|
||||||
|
if err != nil {
|
||||||
|
fmt.Fprintf(os.Stderr, "%s Failed to release issues: %v\n", red("✗"), err)
|
||||||
|
os.Exit(1)
|
||||||
|
}
|
||||||
|
|
||||||
|
green := color.New(color.FgGreen).SprintFunc()
|
||||||
|
fmt.Printf("%s Successfully released %d issue(s) and marked executors as stopped\n\n", green("✓"), releaseCount)
|
||||||
|
|
||||||
|
// Schedule auto-flush if any issues were released
|
||||||
|
if releaseCount > 0 {
|
||||||
|
markDirtyAndScheduleFlush()
|
||||||
|
}
|
||||||
|
} else {
|
||||||
|
cyan := color.New(color.FgCyan).SprintFunc()
|
||||||
|
fmt.Printf("%s Use --release flag to automatically release these issues\n\n", cyan("💡"))
|
||||||
|
}
|
||||||
|
},
|
||||||
|
}
|
||||||
|
|
||||||
|
// getStaleIssues queries for issues with execution_state where executor is dead/stopped
|
||||||
|
func getStaleIssues(thresholdSeconds int) ([]*StaleIssueInfo, error) {
|
||||||
|
// If daemon is running but doesn't support this command, use direct storage
|
||||||
|
if daemonClient != nil && store == nil {
|
||||||
|
var err error
|
||||||
|
store, err = sqlite.New(dbPath)
|
||||||
|
if err != nil {
|
||||||
|
return nil, fmt.Errorf("failed to open database: %w", err)
|
||||||
|
}
|
||||||
|
defer store.Close()
|
||||||
|
}
|
||||||
|
|
||||||
|
ctx := context.Background()
|
||||||
|
cutoffTime := time.Now().Add(-time.Duration(thresholdSeconds) * time.Second)
|
||||||
|
|
||||||
|
// Query for stale issues
|
||||||
|
// Use LEFT JOIN to catch orphaned execution states where executor instance is missing
|
||||||
|
query := `
|
||||||
|
SELECT
|
||||||
|
i.id,
|
||||||
|
i.title,
|
||||||
|
i.priority,
|
||||||
|
ies.executor_instance_id,
|
||||||
|
COALESCE(ei.status, 'missing'),
|
||||||
|
COALESCE(ei.hostname, 'unknown'),
|
||||||
|
COALESCE(ei.pid, 0),
|
||||||
|
ei.last_heartbeat,
|
||||||
|
ies.started_at
|
||||||
|
FROM issues i
|
||||||
|
JOIN issue_execution_state ies ON i.id = ies.issue_id
|
||||||
|
LEFT JOIN executor_instances ei ON ies.executor_instance_id = ei.instance_id
|
||||||
|
WHERE ei.instance_id IS NULL
|
||||||
|
OR ei.status = 'stopped'
|
||||||
|
OR ei.last_heartbeat < ?
|
||||||
|
ORDER BY ei.last_heartbeat ASC, i.priority ASC
|
||||||
|
`
|
||||||
|
|
||||||
|
// Access the underlying SQLite connection
|
||||||
|
sqliteStore, ok := store.(*sqlite.SQLiteStorage)
|
||||||
|
if !ok {
|
||||||
|
return nil, fmt.Errorf("stale command requires SQLite backend")
|
||||||
|
}
|
||||||
|
|
||||||
|
rows, err := sqliteStore.QueryContext(ctx, query, cutoffTime)
|
||||||
|
if err != nil {
|
||||||
|
return nil, fmt.Errorf("failed to query stale issues: %w", err)
|
||||||
|
}
|
||||||
|
defer rows.Close()
|
||||||
|
|
||||||
|
var staleIssues []*StaleIssueInfo
|
||||||
|
for rows.Next() {
|
||||||
|
var si StaleIssueInfo
|
||||||
|
var lastHeartbeat sql.NullTime
|
||||||
|
err := rows.Scan(
|
||||||
|
&si.IssueID,
|
||||||
|
&si.IssueTitle,
|
||||||
|
&si.IssuePriority,
|
||||||
|
&si.ExecutorInstanceID,
|
||||||
|
&si.ExecutorStatus,
|
||||||
|
&si.ExecutorHostname,
|
||||||
|
&si.ExecutorPID,
|
||||||
|
&lastHeartbeat,
|
||||||
|
&si.ClaimedAt,
|
||||||
|
)
|
||||||
|
if err != nil {
|
||||||
|
return nil, fmt.Errorf("failed to scan stale issue: %w", err)
|
||||||
|
}
|
||||||
|
|
||||||
|
// Handle nullable last_heartbeat
|
||||||
|
if lastHeartbeat.Valid {
|
||||||
|
si.LastHeartbeat = lastHeartbeat.Time
|
||||||
|
} else {
|
||||||
|
// Use Unix epoch for missing executors
|
||||||
|
si.LastHeartbeat = time.Unix(0, 0)
|
||||||
|
}
|
||||||
|
|
||||||
|
// Calculate claimed duration
|
||||||
|
si.ClaimedDuration = formatDuration(time.Since(si.ClaimedAt))
|
||||||
|
|
||||||
|
staleIssues = append(staleIssues, &si)
|
||||||
|
}
|
||||||
|
|
||||||
|
if err = rows.Err(); err != nil {
|
||||||
|
return nil, fmt.Errorf("error iterating stale issues: %w", err)
|
||||||
|
}
|
||||||
|
|
||||||
|
return staleIssues, nil
|
||||||
|
}
|
||||||
|
|
||||||
|
// releaseStaleIssues releases all stale issues by deleting execution state and resetting status
|
||||||
|
func releaseStaleIssues(staleIssues []*StaleIssueInfo) (int, error) {
|
||||||
|
// If daemon is running but doesn't support this command, use direct storage
|
||||||
|
if daemonClient != nil && store == nil {
|
||||||
|
var err error
|
||||||
|
store, err = sqlite.New(dbPath)
|
||||||
|
if err != nil {
|
||||||
|
return 0, fmt.Errorf("failed to open database: %w", err)
|
||||||
|
}
|
||||||
|
defer store.Close()
|
||||||
|
}
|
||||||
|
|
||||||
|
ctx := context.Background()
|
||||||
|
|
||||||
|
// Access the underlying SQLite connection for transaction
|
||||||
|
sqliteStore, ok := store.(*sqlite.SQLiteStorage)
|
||||||
|
if !ok {
|
||||||
|
return 0, fmt.Errorf("stale command requires SQLite backend")
|
||||||
|
}
|
||||||
|
|
||||||
|
// Start transaction for atomic cleanup
|
||||||
|
tx, err := sqliteStore.BeginTx(ctx)
|
||||||
|
if err != nil {
|
||||||
|
return 0, fmt.Errorf("failed to begin transaction: %w", err)
|
||||||
|
}
|
||||||
|
defer tx.Rollback()
|
||||||
|
|
||||||
|
releaseCount := 0
|
||||||
|
now := time.Now()
|
||||||
|
|
||||||
|
for _, si := range staleIssues {
|
||||||
|
// Delete execution state
|
||||||
|
_, err = tx.ExecContext(ctx, `
|
||||||
|
DELETE FROM issue_execution_state
|
||||||
|
WHERE issue_id = ?
|
||||||
|
`, si.IssueID)
|
||||||
|
if err != nil {
|
||||||
|
return 0, fmt.Errorf("failed to delete execution state for issue %s: %w", si.IssueID, err)
|
||||||
|
}
|
||||||
|
|
||||||
|
// Reset issue status to 'open'
|
||||||
|
_, err = tx.ExecContext(ctx, `
|
||||||
|
UPDATE issues
|
||||||
|
SET status = 'open', updated_at = ?
|
||||||
|
WHERE id = ?
|
||||||
|
`, now, si.IssueID)
|
||||||
|
if err != nil {
|
||||||
|
return 0, fmt.Errorf("failed to reset issue status for %s: %w", si.IssueID, err)
|
||||||
|
}
|
||||||
|
|
||||||
|
// Add comment explaining the release
|
||||||
|
comment := fmt.Sprintf("Issue automatically released - executor instance %s became stale (last heartbeat: %s)",
|
||||||
|
si.ExecutorInstanceID, si.LastHeartbeat.Format("2006-01-02 15:04:05"))
|
||||||
|
_, err = tx.ExecContext(ctx, `
|
||||||
|
INSERT INTO events (issue_id, event_type, actor, comment, created_at)
|
||||||
|
VALUES (?, 'status_changed', 'system', ?, ?)
|
||||||
|
`, si.IssueID, comment, now)
|
||||||
|
if err != nil {
|
||||||
|
return 0, fmt.Errorf("failed to add release comment for issue %s: %w", si.IssueID, err)
|
||||||
|
}
|
||||||
|
|
||||||
|
// Mark executor instance as 'stopped' if not already
|
||||||
|
_, err = tx.ExecContext(ctx, `
|
||||||
|
UPDATE executor_instances
|
||||||
|
SET status = 'stopped'
|
||||||
|
WHERE instance_id = ? AND status != 'stopped'
|
||||||
|
`, si.ExecutorInstanceID)
|
||||||
|
if err != nil {
|
||||||
|
return 0, fmt.Errorf("failed to mark executor as stopped: %w", err)
|
||||||
|
}
|
||||||
|
|
||||||
|
releaseCount++
|
||||||
|
}
|
||||||
|
|
||||||
|
// Commit the transaction
|
||||||
|
if err = tx.Commit(); err != nil {
|
||||||
|
return 0, fmt.Errorf("failed to commit transaction: %w", err)
|
||||||
|
}
|
||||||
|
|
||||||
|
return releaseCount, nil
|
||||||
|
}
|
||||||
|
|
||||||
|
// formatDuration formats a duration in a human-readable way
|
||||||
|
func formatDuration(d time.Duration) string {
|
||||||
|
if d < time.Minute {
|
||||||
|
return fmt.Sprintf("%.0f seconds", d.Seconds())
|
||||||
|
} else if d < time.Hour {
|
||||||
|
return fmt.Sprintf("%.0f minutes", d.Minutes())
|
||||||
|
} else if d < 24*time.Hour {
|
||||||
|
return fmt.Sprintf("%.1f hours", d.Hours())
|
||||||
|
} else {
|
||||||
|
return fmt.Sprintf("%.1f days", d.Hours()/24)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
func init() {
|
||||||
|
staleCmd.Flags().IntP("threshold", "t", 300, "Heartbeat threshold in seconds (default: 300 = 5 minutes)")
|
||||||
|
staleCmd.Flags().BoolP("release", "r", false, "Automatically release all stale issues")
|
||||||
|
|
||||||
|
rootCmd.AddCommand(staleCmd)
|
||||||
|
}
|
||||||
18
internal/storage/sqlite/util.go
Normal file
18
internal/storage/sqlite/util.go
Normal file
@@ -0,0 +1,18 @@
|
|||||||
|
package sqlite
|
||||||
|
|
||||||
|
import (
|
||||||
|
"context"
|
||||||
|
"database/sql"
|
||||||
|
)
|
||||||
|
|
||||||
|
// QueryContext exposes the underlying database QueryContext method for advanced queries
|
||||||
|
// This is used by commands that need direct SQL access (e.g., bd stale)
|
||||||
|
func (s *SQLiteStorage) QueryContext(ctx context.Context, query string, args ...interface{}) (*sql.Rows, error) {
|
||||||
|
return s.db.QueryContext(ctx, query, args...)
|
||||||
|
}
|
||||||
|
|
||||||
|
// BeginTx starts a new database transaction
|
||||||
|
// This is used by commands that need to perform multiple operations atomically
|
||||||
|
func (s *SQLiteStorage) BeginTx(ctx context.Context) (*sql.Tx, error) {
|
||||||
|
return s.db.BeginTx(ctx, nil)
|
||||||
|
}
|
||||||
Reference in New Issue
Block a user