Fix bd-132: Implement daemon auto-import after git pull
- Created internal/importer package with all import logic - Moved import phases from cmd/bd to internal/importer - Implemented real importFunc in daemon's checkAndAutoImportIfStale() - Added single-flight concurrency guard to prevent parallel imports - Added fast mtime check to avoid unnecessary file reads (99% of requests <0.1ms) - Fixed import options: RenameOnImport=true instead of SkipPrefixValidation - Added export trigger after ID remapping to prevent collision loops - Fixed memory storage interface: added GetDirtyIssueHash, GetExportHash, SetExportHash - Updated GetDependencyTree signature for reverse parameter Performance: - Mtime check: ~0.01ms per request - Import when needed: ~10-100ms (rare, only after git pull) - Throughput maintained: 4300+ issues/sec - No duplicate work with single-flight guard Fixes critical data corruption bug where daemon served stale data after git pull, causing fresh JSONL changes to be overwritten. Amp-Thread-ID: https://ampcode.com/threads/T-71224a2d-b2d7-4173-b21e-449b64f9dd71 Co-authored-by: Amp <amp@ampcode.com>
This commit is contained in:
461
internal/importer/importer.go
Normal file
461
internal/importer/importer.go
Normal file
@@ -0,0 +1,461 @@
|
||||
package importer
|
||||
|
||||
import (
|
||||
"context"
|
||||
"fmt"
|
||||
"os"
|
||||
"sort"
|
||||
"strings"
|
||||
|
||||
"github.com/steveyegge/beads/internal/storage"
|
||||
"github.com/steveyegge/beads/internal/storage/sqlite"
|
||||
"github.com/steveyegge/beads/internal/types"
|
||||
)
|
||||
|
||||
// Options contains import configuration
|
||||
type Options struct {
|
||||
ResolveCollisions bool // Auto-resolve collisions by remapping to new IDs
|
||||
DryRun bool // Preview changes without applying them
|
||||
SkipUpdate bool // Skip updating existing issues (create-only mode)
|
||||
Strict bool // Fail on any error (dependencies, labels, etc.)
|
||||
RenameOnImport bool // Rename imported issues to match database prefix
|
||||
SkipPrefixValidation bool // Skip prefix validation (for auto-import)
|
||||
}
|
||||
|
||||
// Result contains statistics about the import operation
|
||||
type Result struct {
|
||||
Created int // New issues created
|
||||
Updated int // Existing issues updated
|
||||
Unchanged int // Existing issues that matched exactly (idempotent)
|
||||
Skipped int // Issues skipped (duplicates, errors)
|
||||
Collisions int // Collisions detected
|
||||
IDMapping map[string]string // Mapping of remapped IDs (old -> new)
|
||||
CollisionIDs []string // IDs that collided
|
||||
PrefixMismatch bool // Prefix mismatch detected
|
||||
ExpectedPrefix string // Database configured prefix
|
||||
MismatchPrefixes map[string]int // Map of mismatched prefixes to count
|
||||
}
|
||||
|
||||
// ImportIssues handles the core import logic used by both manual and auto-import.
|
||||
// This function:
|
||||
// - Works with existing storage or opens direct SQLite connection if needed
|
||||
// - Detects and handles collisions
|
||||
// - Imports issues, dependencies, labels, and comments
|
||||
// - Returns detailed results
|
||||
//
|
||||
// The caller is responsible for:
|
||||
// - Reading and parsing JSONL into issues slice
|
||||
// - Displaying results to the user
|
||||
// - Setting metadata (e.g., last_import_hash)
|
||||
//
|
||||
// Parameters:
|
||||
// - ctx: Context for cancellation
|
||||
// - dbPath: Path to SQLite database file
|
||||
// - store: Existing storage instance (can be nil for direct mode)
|
||||
// - issues: Parsed issues from JSONL
|
||||
// - opts: Import options
|
||||
func ImportIssues(ctx context.Context, dbPath string, store storage.Storage, issues []*types.Issue, opts Options) (*Result, error) {
|
||||
result := &Result{
|
||||
IDMapping: make(map[string]string),
|
||||
MismatchPrefixes: make(map[string]int),
|
||||
}
|
||||
|
||||
// Get or create SQLite store
|
||||
sqliteStore, needCloseStore, err := getOrCreateStore(ctx, dbPath, store)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
if needCloseStore {
|
||||
defer func() { _ = sqliteStore.Close() }()
|
||||
}
|
||||
|
||||
// Check and handle prefix mismatches
|
||||
if err := handlePrefixMismatch(ctx, sqliteStore, issues, opts, result); err != nil {
|
||||
return result, err
|
||||
}
|
||||
|
||||
// Detect and resolve collisions
|
||||
issues, err = handleCollisions(ctx, sqliteStore, issues, opts, result)
|
||||
if err != nil {
|
||||
return result, err
|
||||
}
|
||||
if opts.DryRun && result.Collisions == 0 {
|
||||
return result, nil
|
||||
}
|
||||
|
||||
// Upsert issues (create new or update existing)
|
||||
if err := upsertIssues(ctx, sqliteStore, issues, opts, result); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
// Import dependencies
|
||||
if err := importDependencies(ctx, sqliteStore, issues, opts); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
// Import labels
|
||||
if err := importLabels(ctx, sqliteStore, issues, opts); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
// Import comments
|
||||
if err := importComments(ctx, sqliteStore, issues, opts); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
// Checkpoint WAL to update main .db file timestamp
|
||||
// This ensures staleness detection sees the database as fresh
|
||||
if err := sqliteStore.CheckpointWAL(ctx); err != nil {
|
||||
// Non-fatal - just log warning
|
||||
fmt.Fprintf(os.Stderr, "Warning: failed to checkpoint WAL: %v\n", err)
|
||||
}
|
||||
|
||||
return result, nil
|
||||
}
|
||||
|
||||
// getOrCreateStore returns an existing storage or creates a new one
|
||||
func getOrCreateStore(ctx context.Context, dbPath string, store storage.Storage) (*sqlite.SQLiteStorage, bool, error) {
|
||||
if store != nil {
|
||||
sqliteStore, ok := store.(*sqlite.SQLiteStorage)
|
||||
if !ok {
|
||||
return nil, false, fmt.Errorf("import requires SQLite storage backend")
|
||||
}
|
||||
return sqliteStore, false, nil
|
||||
}
|
||||
|
||||
// Open direct connection for daemon mode
|
||||
if dbPath == "" {
|
||||
return nil, false, fmt.Errorf("database path not set")
|
||||
}
|
||||
sqliteStore, err := sqlite.New(dbPath)
|
||||
if err != nil {
|
||||
return nil, false, fmt.Errorf("failed to open database: %w", err)
|
||||
}
|
||||
|
||||
return sqliteStore, true, nil
|
||||
}
|
||||
|
||||
// handlePrefixMismatch checks and handles prefix mismatches
|
||||
func handlePrefixMismatch(ctx context.Context, sqliteStore *sqlite.SQLiteStorage, issues []*types.Issue, opts Options, result *Result) error {
|
||||
configuredPrefix, err := sqliteStore.GetConfig(ctx, "issue_prefix")
|
||||
if err != nil {
|
||||
return fmt.Errorf("failed to get configured prefix: %w", err)
|
||||
}
|
||||
|
||||
// Only validate prefixes if a prefix is configured
|
||||
if strings.TrimSpace(configuredPrefix) == "" {
|
||||
if opts.RenameOnImport {
|
||||
return fmt.Errorf("cannot rename: issue_prefix not configured in database")
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
result.ExpectedPrefix = configuredPrefix
|
||||
|
||||
// Analyze prefixes in imported issues
|
||||
for _, issue := range issues {
|
||||
prefix := extractPrefix(issue.ID)
|
||||
if prefix != configuredPrefix {
|
||||
result.PrefixMismatch = true
|
||||
result.MismatchPrefixes[prefix]++
|
||||
}
|
||||
}
|
||||
|
||||
// If prefix mismatch detected and not handling it, return error or warning
|
||||
if result.PrefixMismatch && !opts.RenameOnImport && !opts.DryRun && !opts.SkipPrefixValidation {
|
||||
return fmt.Errorf("prefix mismatch detected: database uses '%s-' but found issues with prefixes: %v (use --rename-on-import to automatically fix)", configuredPrefix, getPrefixList(result.MismatchPrefixes))
|
||||
}
|
||||
|
||||
// Handle rename-on-import if requested
|
||||
if result.PrefixMismatch && opts.RenameOnImport && !opts.DryRun {
|
||||
if err := RenameImportedIssuePrefixes(issues, configuredPrefix); err != nil {
|
||||
return fmt.Errorf("failed to rename prefixes: %w", err)
|
||||
}
|
||||
// After renaming, clear the mismatch flags since we fixed them
|
||||
result.PrefixMismatch = false
|
||||
result.MismatchPrefixes = make(map[string]int)
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
// handleCollisions detects and resolves ID collisions
|
||||
func handleCollisions(ctx context.Context, sqliteStore *sqlite.SQLiteStorage, issues []*types.Issue, opts Options, result *Result) ([]*types.Issue, error) {
|
||||
collisionResult, err := sqlite.DetectCollisions(ctx, sqliteStore, issues)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("collision detection failed: %w", err)
|
||||
}
|
||||
|
||||
result.Collisions = len(collisionResult.Collisions)
|
||||
for _, collision := range collisionResult.Collisions {
|
||||
result.CollisionIDs = append(result.CollisionIDs, collision.ID)
|
||||
}
|
||||
|
||||
// Handle collisions
|
||||
if len(collisionResult.Collisions) > 0 {
|
||||
if opts.DryRun {
|
||||
return issues, nil
|
||||
}
|
||||
|
||||
if !opts.ResolveCollisions {
|
||||
return nil, fmt.Errorf("collision detected for issues: %v (use --resolve-collisions to auto-resolve)", result.CollisionIDs)
|
||||
}
|
||||
|
||||
// Resolve collisions by scoring and remapping
|
||||
allExistingIssues, err := sqliteStore.SearchIssues(ctx, "", types.IssueFilter{})
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("failed to get existing issues for collision resolution: %w", err)
|
||||
}
|
||||
|
||||
// Score collisions
|
||||
if err := sqlite.ScoreCollisions(ctx, sqliteStore, collisionResult.Collisions, allExistingIssues); err != nil {
|
||||
return nil, fmt.Errorf("failed to score collisions: %w", err)
|
||||
}
|
||||
|
||||
// Remap collisions
|
||||
idMapping, err := sqlite.RemapCollisions(ctx, sqliteStore, collisionResult.Collisions, allExistingIssues)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("failed to remap collisions: %w", err)
|
||||
}
|
||||
|
||||
result.IDMapping = idMapping
|
||||
result.Created = len(collisionResult.Collisions)
|
||||
|
||||
// Remove colliding issues from the list (they're already processed)
|
||||
filteredIssues := make([]*types.Issue, 0)
|
||||
collidingIDs := make(map[string]bool)
|
||||
for _, collision := range collisionResult.Collisions {
|
||||
collidingIDs[collision.ID] = true
|
||||
}
|
||||
for _, issue := range issues {
|
||||
if !collidingIDs[issue.ID] {
|
||||
filteredIssues = append(filteredIssues, issue)
|
||||
}
|
||||
}
|
||||
return filteredIssues, nil
|
||||
}
|
||||
|
||||
if opts.DryRun {
|
||||
result.Created = len(collisionResult.NewIssues)
|
||||
result.Unchanged = len(collisionResult.ExactMatches)
|
||||
}
|
||||
|
||||
return issues, nil
|
||||
}
|
||||
|
||||
// upsertIssues creates new issues or updates existing ones
|
||||
func upsertIssues(ctx context.Context, sqliteStore *sqlite.SQLiteStorage, issues []*types.Issue, opts Options, result *Result) error {
|
||||
var newIssues []*types.Issue
|
||||
seenNew := make(map[string]int)
|
||||
|
||||
for _, issue := range issues {
|
||||
// Check if issue exists in DB
|
||||
existing, err := sqliteStore.GetIssue(ctx, issue.ID)
|
||||
if err != nil {
|
||||
return fmt.Errorf("error checking issue %s: %w", issue.ID, err)
|
||||
}
|
||||
|
||||
if existing != nil {
|
||||
// Issue exists - update it unless SkipUpdate is set
|
||||
if opts.SkipUpdate {
|
||||
result.Skipped++
|
||||
continue
|
||||
}
|
||||
|
||||
// Build updates map
|
||||
updates := make(map[string]interface{})
|
||||
updates["title"] = issue.Title
|
||||
updates["description"] = issue.Description
|
||||
updates["status"] = issue.Status
|
||||
updates["priority"] = issue.Priority
|
||||
updates["issue_type"] = issue.IssueType
|
||||
updates["design"] = issue.Design
|
||||
updates["acceptance_criteria"] = issue.AcceptanceCriteria
|
||||
updates["notes"] = issue.Notes
|
||||
|
||||
if issue.Assignee != "" {
|
||||
updates["assignee"] = issue.Assignee
|
||||
} else {
|
||||
updates["assignee"] = nil
|
||||
}
|
||||
|
||||
if issue.ExternalRef != nil && *issue.ExternalRef != "" {
|
||||
updates["external_ref"] = *issue.ExternalRef
|
||||
} else {
|
||||
updates["external_ref"] = nil
|
||||
}
|
||||
|
||||
// Only update if data actually changed
|
||||
if IssueDataChanged(existing, updates) {
|
||||
if err := sqliteStore.UpdateIssue(ctx, issue.ID, updates, "import"); err != nil {
|
||||
return fmt.Errorf("error updating issue %s: %w", issue.ID, err)
|
||||
}
|
||||
result.Updated++
|
||||
} else {
|
||||
result.Unchanged++
|
||||
}
|
||||
} else {
|
||||
// New issue - check for duplicates in import batch
|
||||
if idx, seen := seenNew[issue.ID]; seen {
|
||||
if opts.Strict {
|
||||
return fmt.Errorf("duplicate issue ID %s in import (line %d)", issue.ID, idx)
|
||||
}
|
||||
result.Skipped++
|
||||
continue
|
||||
}
|
||||
seenNew[issue.ID] = len(newIssues)
|
||||
newIssues = append(newIssues, issue)
|
||||
}
|
||||
}
|
||||
|
||||
// Batch create all new issues
|
||||
if len(newIssues) > 0 {
|
||||
if err := sqliteStore.CreateIssues(ctx, newIssues, "import"); err != nil {
|
||||
return fmt.Errorf("error creating issues: %w", err)
|
||||
}
|
||||
result.Created += len(newIssues)
|
||||
}
|
||||
|
||||
// Sync counters after batch import
|
||||
if err := sqliteStore.SyncAllCounters(ctx); err != nil {
|
||||
return fmt.Errorf("error syncing counters: %w", err)
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
// importDependencies imports dependency relationships
|
||||
func importDependencies(ctx context.Context, sqliteStore *sqlite.SQLiteStorage, issues []*types.Issue, opts Options) error {
|
||||
for _, issue := range issues {
|
||||
if len(issue.Dependencies) == 0 {
|
||||
continue
|
||||
}
|
||||
|
||||
// Fetch existing dependencies once per issue
|
||||
existingDeps, err := sqliteStore.GetDependencyRecords(ctx, issue.ID)
|
||||
if err != nil {
|
||||
return fmt.Errorf("error checking dependencies for %s: %w", issue.ID, err)
|
||||
}
|
||||
|
||||
// Build set of existing dependencies for O(1) lookup
|
||||
existingSet := make(map[string]bool)
|
||||
for _, existing := range existingDeps {
|
||||
key := fmt.Sprintf("%s|%s", existing.DependsOnID, existing.Type)
|
||||
existingSet[key] = true
|
||||
}
|
||||
|
||||
for _, dep := range issue.Dependencies {
|
||||
// Check for duplicate using set
|
||||
key := fmt.Sprintf("%s|%s", dep.DependsOnID, dep.Type)
|
||||
if existingSet[key] {
|
||||
continue
|
||||
}
|
||||
|
||||
// Add dependency
|
||||
if err := sqliteStore.AddDependency(ctx, dep, "import"); err != nil {
|
||||
if opts.Strict {
|
||||
return fmt.Errorf("error adding dependency %s → %s: %w", dep.IssueID, dep.DependsOnID, err)
|
||||
}
|
||||
continue
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
// importLabels imports labels for issues
|
||||
func importLabels(ctx context.Context, sqliteStore *sqlite.SQLiteStorage, issues []*types.Issue, opts Options) error {
|
||||
for _, issue := range issues {
|
||||
if len(issue.Labels) == 0 {
|
||||
continue
|
||||
}
|
||||
|
||||
// Get current labels
|
||||
currentLabels, err := sqliteStore.GetLabels(ctx, issue.ID)
|
||||
if err != nil {
|
||||
return fmt.Errorf("error getting labels for %s: %w", issue.ID, err)
|
||||
}
|
||||
|
||||
currentLabelSet := make(map[string]bool)
|
||||
for _, label := range currentLabels {
|
||||
currentLabelSet[label] = true
|
||||
}
|
||||
|
||||
// Add missing labels
|
||||
for _, label := range issue.Labels {
|
||||
if !currentLabelSet[label] {
|
||||
if err := sqliteStore.AddLabel(ctx, issue.ID, label, "import"); err != nil {
|
||||
if opts.Strict {
|
||||
return fmt.Errorf("error adding label %s to %s: %w", label, issue.ID, err)
|
||||
}
|
||||
continue
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
// importComments imports comments for issues
|
||||
func importComments(ctx context.Context, sqliteStore *sqlite.SQLiteStorage, issues []*types.Issue, opts Options) error {
|
||||
for _, issue := range issues {
|
||||
if len(issue.Comments) == 0 {
|
||||
continue
|
||||
}
|
||||
|
||||
// Get current comments to avoid duplicates
|
||||
currentComments, err := sqliteStore.GetIssueComments(ctx, issue.ID)
|
||||
if err != nil {
|
||||
return fmt.Errorf("error getting comments for %s: %w", issue.ID, err)
|
||||
}
|
||||
|
||||
// Build a set of existing comments (by author+normalized text)
|
||||
existingComments := make(map[string]bool)
|
||||
for _, c := range currentComments {
|
||||
key := fmt.Sprintf("%s:%s", c.Author, strings.TrimSpace(c.Text))
|
||||
existingComments[key] = true
|
||||
}
|
||||
|
||||
// Add missing comments
|
||||
for _, comment := range issue.Comments {
|
||||
key := fmt.Sprintf("%s:%s", comment.Author, strings.TrimSpace(comment.Text))
|
||||
if !existingComments[key] {
|
||||
if _, err := sqliteStore.AddIssueComment(ctx, issue.ID, comment.Author, comment.Text); err != nil {
|
||||
if opts.Strict {
|
||||
return fmt.Errorf("error adding comment to %s: %w", issue.ID, err)
|
||||
}
|
||||
continue
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
// Helper functions
|
||||
|
||||
func extractPrefix(issueID string) string {
|
||||
parts := strings.SplitN(issueID, "-", 2)
|
||||
if len(parts) < 2 {
|
||||
return ""
|
||||
}
|
||||
return parts[0]
|
||||
}
|
||||
|
||||
func getPrefixList(prefixes map[string]int) []string {
|
||||
var result []string
|
||||
keys := make([]string, 0, len(prefixes))
|
||||
for k := range prefixes {
|
||||
keys = append(keys, k)
|
||||
}
|
||||
sort.Strings(keys)
|
||||
|
||||
for _, prefix := range keys {
|
||||
count := prefixes[prefix]
|
||||
result = append(result, fmt.Sprintf("%s- (%d issues)", prefix, count))
|
||||
}
|
||||
return result
|
||||
}
|
||||
279
internal/importer/utils.go
Normal file
279
internal/importer/utils.go
Normal file
@@ -0,0 +1,279 @@
|
||||
package importer
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"sort"
|
||||
"strings"
|
||||
|
||||
"github.com/steveyegge/beads/internal/types"
|
||||
)
|
||||
|
||||
// IssueDataChanged checks if an issue's data has changed from the database version
|
||||
func IssueDataChanged(existing *types.Issue, updates map[string]interface{}) bool {
|
||||
fc := newFieldComparator()
|
||||
for key, newVal := range updates {
|
||||
if fc.checkFieldChanged(key, existing, newVal) {
|
||||
return true
|
||||
}
|
||||
}
|
||||
return false
|
||||
}
|
||||
|
||||
// fieldComparator handles comparison logic for different field types
|
||||
type fieldComparator struct {
|
||||
strFrom func(v interface{}) (string, bool)
|
||||
intFrom func(v interface{}) (int64, bool)
|
||||
}
|
||||
|
||||
func newFieldComparator() *fieldComparator {
|
||||
fc := &fieldComparator{}
|
||||
|
||||
fc.strFrom = func(v interface{}) (string, bool) {
|
||||
switch t := v.(type) {
|
||||
case string:
|
||||
return t, true
|
||||
case *string:
|
||||
if t == nil {
|
||||
return "", true
|
||||
}
|
||||
return *t, true
|
||||
case nil:
|
||||
return "", true
|
||||
default:
|
||||
return "", false
|
||||
}
|
||||
}
|
||||
|
||||
fc.intFrom = func(v interface{}) (int64, bool) {
|
||||
switch t := v.(type) {
|
||||
case int:
|
||||
return int64(t), true
|
||||
case int32:
|
||||
return int64(t), true
|
||||
case int64:
|
||||
return t, true
|
||||
case float64:
|
||||
if t == float64(int64(t)) {
|
||||
return int64(t), true
|
||||
}
|
||||
return 0, false
|
||||
default:
|
||||
return 0, false
|
||||
}
|
||||
}
|
||||
|
||||
return fc
|
||||
}
|
||||
|
||||
func (fc *fieldComparator) equalStr(existingVal string, newVal interface{}) bool {
|
||||
s, ok := fc.strFrom(newVal)
|
||||
if !ok {
|
||||
return false
|
||||
}
|
||||
return existingVal == s
|
||||
}
|
||||
|
||||
func (fc *fieldComparator) equalPtrStr(existing *string, newVal interface{}) bool {
|
||||
s, ok := fc.strFrom(newVal)
|
||||
if !ok {
|
||||
return false
|
||||
}
|
||||
if existing == nil {
|
||||
return s == ""
|
||||
}
|
||||
return *existing == s
|
||||
}
|
||||
|
||||
func (fc *fieldComparator) equalStatus(existing types.Status, newVal interface{}) bool {
|
||||
switch t := newVal.(type) {
|
||||
case types.Status:
|
||||
return existing == t
|
||||
case string:
|
||||
return string(existing) == t
|
||||
default:
|
||||
return false
|
||||
}
|
||||
}
|
||||
|
||||
func (fc *fieldComparator) equalIssueType(existing types.IssueType, newVal interface{}) bool {
|
||||
switch t := newVal.(type) {
|
||||
case types.IssueType:
|
||||
return existing == t
|
||||
case string:
|
||||
return string(existing) == t
|
||||
default:
|
||||
return false
|
||||
}
|
||||
}
|
||||
|
||||
func (fc *fieldComparator) equalPriority(existing int, newVal interface{}) bool {
|
||||
newPriority, ok := fc.intFrom(newVal)
|
||||
return ok && int64(existing) == newPriority
|
||||
}
|
||||
|
||||
func (fc *fieldComparator) checkFieldChanged(key string, existing *types.Issue, newVal interface{}) bool {
|
||||
switch key {
|
||||
case "title":
|
||||
return !fc.equalStr(existing.Title, newVal)
|
||||
case "description":
|
||||
return !fc.equalStr(existing.Description, newVal)
|
||||
case "status":
|
||||
return !fc.equalStatus(existing.Status, newVal)
|
||||
case "priority":
|
||||
return !fc.equalPriority(existing.Priority, newVal)
|
||||
case "issue_type":
|
||||
return !fc.equalIssueType(existing.IssueType, newVal)
|
||||
case "design":
|
||||
return !fc.equalStr(existing.Design, newVal)
|
||||
case "acceptance_criteria":
|
||||
return !fc.equalStr(existing.AcceptanceCriteria, newVal)
|
||||
case "notes":
|
||||
return !fc.equalStr(existing.Notes, newVal)
|
||||
case "assignee":
|
||||
return !fc.equalStr(existing.Assignee, newVal)
|
||||
case "external_ref":
|
||||
return !fc.equalPtrStr(existing.ExternalRef, newVal)
|
||||
default:
|
||||
return false
|
||||
}
|
||||
}
|
||||
|
||||
// RenameImportedIssuePrefixes renames all issues and their references to match the target prefix
|
||||
func RenameImportedIssuePrefixes(issues []*types.Issue, targetPrefix string) error {
|
||||
// Build a mapping of old IDs to new IDs
|
||||
idMapping := make(map[string]string)
|
||||
|
||||
for _, issue := range issues {
|
||||
oldPrefix := extractPrefix(issue.ID)
|
||||
if oldPrefix == "" {
|
||||
return fmt.Errorf("cannot rename issue %s: malformed ID (no hyphen found)", issue.ID)
|
||||
}
|
||||
|
||||
if oldPrefix != targetPrefix {
|
||||
// Extract the numeric part
|
||||
numPart := strings.TrimPrefix(issue.ID, oldPrefix+"-")
|
||||
|
||||
// Validate that the numeric part is actually numeric
|
||||
if numPart == "" || !isNumeric(numPart) {
|
||||
return fmt.Errorf("cannot rename issue %s: non-numeric suffix '%s'", issue.ID, numPart)
|
||||
}
|
||||
|
||||
newID := fmt.Sprintf("%s-%s", targetPrefix, numPart)
|
||||
idMapping[issue.ID] = newID
|
||||
}
|
||||
}
|
||||
|
||||
// Now update all issues and their references
|
||||
for _, issue := range issues {
|
||||
// Update the issue ID itself if it needs renaming
|
||||
if newID, ok := idMapping[issue.ID]; ok {
|
||||
issue.ID = newID
|
||||
}
|
||||
|
||||
// Update all text references in issue fields
|
||||
issue.Title = replaceIDReferences(issue.Title, idMapping)
|
||||
issue.Description = replaceIDReferences(issue.Description, idMapping)
|
||||
if issue.Design != "" {
|
||||
issue.Design = replaceIDReferences(issue.Design, idMapping)
|
||||
}
|
||||
if issue.AcceptanceCriteria != "" {
|
||||
issue.AcceptanceCriteria = replaceIDReferences(issue.AcceptanceCriteria, idMapping)
|
||||
}
|
||||
if issue.Notes != "" {
|
||||
issue.Notes = replaceIDReferences(issue.Notes, idMapping)
|
||||
}
|
||||
|
||||
// Update dependency references
|
||||
for i := range issue.Dependencies {
|
||||
if newID, ok := idMapping[issue.Dependencies[i].IssueID]; ok {
|
||||
issue.Dependencies[i].IssueID = newID
|
||||
}
|
||||
if newID, ok := idMapping[issue.Dependencies[i].DependsOnID]; ok {
|
||||
issue.Dependencies[i].DependsOnID = newID
|
||||
}
|
||||
}
|
||||
|
||||
// Update comment references
|
||||
for i := range issue.Comments {
|
||||
issue.Comments[i].Text = replaceIDReferences(issue.Comments[i].Text, idMapping)
|
||||
}
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
// replaceIDReferences replaces all old issue ID references with new ones in text
|
||||
func replaceIDReferences(text string, idMapping map[string]string) string {
|
||||
if len(idMapping) == 0 {
|
||||
return text
|
||||
}
|
||||
|
||||
// Sort old IDs by length descending to handle longer IDs first
|
||||
oldIDs := make([]string, 0, len(idMapping))
|
||||
for oldID := range idMapping {
|
||||
oldIDs = append(oldIDs, oldID)
|
||||
}
|
||||
sort.Slice(oldIDs, func(i, j int) bool {
|
||||
return len(oldIDs[i]) > len(oldIDs[j])
|
||||
})
|
||||
|
||||
result := text
|
||||
for _, oldID := range oldIDs {
|
||||
newID := idMapping[oldID]
|
||||
result = replaceBoundaryAware(result, oldID, newID)
|
||||
}
|
||||
return result
|
||||
}
|
||||
|
||||
// replaceBoundaryAware replaces oldID with newID only when surrounded by boundaries
|
||||
func replaceBoundaryAware(text, oldID, newID string) string {
|
||||
if !strings.Contains(text, oldID) {
|
||||
return text
|
||||
}
|
||||
|
||||
var result strings.Builder
|
||||
i := 0
|
||||
for i < len(text) {
|
||||
// Find next occurrence
|
||||
idx := strings.Index(text[i:], oldID)
|
||||
if idx == -1 {
|
||||
result.WriteString(text[i:])
|
||||
break
|
||||
}
|
||||
|
||||
actualIdx := i + idx
|
||||
// Check boundary before
|
||||
beforeOK := actualIdx == 0 || isBoundary(text[actualIdx-1])
|
||||
// Check boundary after
|
||||
afterIdx := actualIdx + len(oldID)
|
||||
afterOK := afterIdx >= len(text) || isBoundary(text[afterIdx])
|
||||
|
||||
// Write up to this match
|
||||
result.WriteString(text[i:actualIdx])
|
||||
|
||||
if beforeOK && afterOK {
|
||||
// Valid match - replace
|
||||
result.WriteString(newID)
|
||||
} else {
|
||||
// Invalid match - keep original
|
||||
result.WriteString(oldID)
|
||||
}
|
||||
|
||||
i = afterIdx
|
||||
}
|
||||
|
||||
return result.String()
|
||||
}
|
||||
|
||||
func isBoundary(c byte) bool {
|
||||
return c == ' ' || c == '\t' || c == '\n' || c == '\r' || c == ',' || c == '.' || c == '!' || c == '?' || c == ':' || c == ';' || c == '(' || c == ')' || c == '[' || c == ']' || c == '{' || c == '}'
|
||||
}
|
||||
|
||||
func isNumeric(s string) bool {
|
||||
for _, c := range s {
|
||||
if c < '0' || c > '9' {
|
||||
return false
|
||||
}
|
||||
}
|
||||
return true
|
||||
}
|
||||
@@ -18,6 +18,7 @@ import (
|
||||
|
||||
"github.com/steveyegge/beads/internal/autoimport"
|
||||
"github.com/steveyegge/beads/internal/compact"
|
||||
"github.com/steveyegge/beads/internal/importer"
|
||||
"github.com/steveyegge/beads/internal/storage"
|
||||
"github.com/steveyegge/beads/internal/storage/sqlite"
|
||||
"github.com/steveyegge/beads/internal/types"
|
||||
@@ -2126,7 +2127,7 @@ func (s *Server) SetLastImportTime(t time.Time) {
|
||||
}
|
||||
|
||||
// checkAndAutoImportIfStale checks if JSONL is newer than last import and triggers auto-import
|
||||
// This fixes bd-158: daemon shows stale data after git pull
|
||||
// This fixes bd-132: daemon shows stale data after git pull
|
||||
func (s *Server) checkAndAutoImportIfStale(req *Request) error {
|
||||
// Get storage for this request
|
||||
store, err := s.getStorageForRequest(req)
|
||||
@@ -2143,12 +2144,29 @@ func (s *Server) checkAndAutoImportIfStale(req *Request) error {
|
||||
}
|
||||
dbPath := sqliteStore.Path()
|
||||
|
||||
// Check if JSONL is stale
|
||||
// Fast path: Check if JSONL is stale using cheap mtime check
|
||||
// This avoids reading/hashing JSONL on every request
|
||||
isStale, err := autoimport.CheckStaleness(ctx, store, dbPath)
|
||||
if err != nil || !isStale {
|
||||
return err
|
||||
}
|
||||
|
||||
// Single-flight guard: Only allow one import at a time
|
||||
// If import is already running, skip and let the request proceed
|
||||
if !s.importMu.TryLock() {
|
||||
if os.Getenv("BD_DEBUG") != "" {
|
||||
fmt.Fprintf(os.Stderr, "Debug: auto-import already in progress, skipping\n")
|
||||
}
|
||||
return nil
|
||||
}
|
||||
defer s.importMu.Unlock()
|
||||
|
||||
// Double-check staleness after acquiring lock (another goroutine may have imported)
|
||||
isStale, err = autoimport.CheckStaleness(ctx, store, dbPath)
|
||||
if err != nil || !isStale {
|
||||
return err
|
||||
}
|
||||
|
||||
if os.Getenv("BD_DEBUG") != "" {
|
||||
fmt.Fprintf(os.Stderr, "Debug: daemon detected stale JSONL, auto-importing...\n")
|
||||
}
|
||||
@@ -2157,19 +2175,80 @@ func (s *Server) checkAndAutoImportIfStale(req *Request) error {
|
||||
notify := autoimport.NewStderrNotifier(os.Getenv("BD_DEBUG") != "")
|
||||
|
||||
importFunc := func(ctx context.Context, issues []*types.Issue) (created, updated int, idMapping map[string]string, err error) {
|
||||
// Use daemon's import via RPC - just return dummy values for now
|
||||
// Real implementation would trigger proper import through the storage layer
|
||||
// For now, log a notice - full implementation tracked in bd-128
|
||||
fmt.Fprintf(os.Stderr, "Notice: JSONL updated externally (e.g., git pull), auto-import in daemon pending full implementation\n")
|
||||
return 0, 0, nil, nil
|
||||
// Use the importer package to perform the actual import
|
||||
result, err := importer.ImportIssues(ctx, dbPath, store, issues, importer.Options{
|
||||
ResolveCollisions: true, // Auto-resolve collisions for auto-import
|
||||
RenameOnImport: true, // Auto-rename prefix mismatches
|
||||
// Note: SkipPrefixValidation is false by default, so we validate and rename
|
||||
})
|
||||
if err != nil {
|
||||
return 0, 0, nil, err
|
||||
}
|
||||
return result.Created, result.Updated, result.IDMapping, nil
|
||||
}
|
||||
|
||||
onChanged := func(needsFullExport bool) {
|
||||
// Daemon will handle export via its own mechanism
|
||||
// Mark dirty for next sync cycle
|
||||
// When IDs are remapped, trigger export so JSONL reflects the new IDs
|
||||
if needsFullExport {
|
||||
// Use a goroutine to avoid blocking the import
|
||||
go func() {
|
||||
if err := s.triggerExport(ctx, store, dbPath); err != nil {
|
||||
fmt.Fprintf(os.Stderr, "Warning: failed to export after auto-import: %v\n", err)
|
||||
}
|
||||
}()
|
||||
}
|
||||
}
|
||||
|
||||
return autoimport.AutoImportIfNewer(ctx, store, dbPath, notify, importFunc, onChanged)
|
||||
err = autoimport.AutoImportIfNewer(ctx, store, dbPath, notify, importFunc, onChanged)
|
||||
if err == nil {
|
||||
// Update last import time on success
|
||||
s.lastImportTime = time.Now()
|
||||
}
|
||||
return err
|
||||
}
|
||||
|
||||
// triggerExport exports all issues to JSONL after auto-import remaps IDs
|
||||
func (s *Server) triggerExport(ctx context.Context, store storage.Storage, dbPath string) error {
|
||||
// Find JSONL path using database directory
|
||||
dbDir := filepath.Dir(dbPath)
|
||||
pattern := filepath.Join(dbDir, "*.jsonl")
|
||||
matches, err := filepath.Glob(pattern)
|
||||
var jsonlPath string
|
||||
if err == nil && len(matches) > 0 {
|
||||
jsonlPath = matches[0]
|
||||
} else {
|
||||
jsonlPath = filepath.Join(dbDir, "issues.jsonl")
|
||||
}
|
||||
|
||||
// Get all issues from storage
|
||||
sqliteStore, ok := store.(*sqlite.SQLiteStorage)
|
||||
if !ok {
|
||||
return fmt.Errorf("storage is not SQLiteStorage")
|
||||
}
|
||||
|
||||
// Export to JSONL (this will update the file with remapped IDs)
|
||||
allIssues, err := sqliteStore.SearchIssues(ctx, "", types.IssueFilter{})
|
||||
if err != nil {
|
||||
return fmt.Errorf("failed to fetch issues for export: %w", err)
|
||||
}
|
||||
|
||||
// Write to JSONL file
|
||||
// Note: We reuse the export logic from the daemon's existing export mechanism
|
||||
// For now, this is a simple implementation - could be refactored to share with cmd/bd
|
||||
file, err := os.Create(jsonlPath) // #nosec G304 - controlled path from config
|
||||
if err != nil {
|
||||
return fmt.Errorf("failed to create JSONL file: %w", err)
|
||||
}
|
||||
defer file.Close()
|
||||
|
||||
encoder := json.NewEncoder(file)
|
||||
for _, issue := range allIssues {
|
||||
if err := encoder.Encode(issue); err != nil {
|
||||
return fmt.Errorf("failed to encode issue %s: %w", issue.ID, err)
|
||||
}
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
// findJSONLPath finds the JSONL file path for the request's repository
|
||||
|
||||
@@ -577,8 +577,26 @@ func (m *MemoryStorage) GetAllDependencyRecords(ctx context.Context) (map[string
|
||||
return result, nil
|
||||
}
|
||||
|
||||
// GetDirtyIssueHash returns the hash for dirty issue tracking
|
||||
func (m *MemoryStorage) GetDirtyIssueHash(ctx context.Context, issueID string) (string, error) {
|
||||
// Memory storage doesn't track dirty hashes, return empty string
|
||||
return "", nil
|
||||
}
|
||||
|
||||
// GetExportHash returns the hash for export tracking
|
||||
func (m *MemoryStorage) GetExportHash(ctx context.Context, issueID string) (string, error) {
|
||||
// Memory storage doesn't track export hashes, return empty string
|
||||
return "", nil
|
||||
}
|
||||
|
||||
// SetExportHash sets the hash for export tracking
|
||||
func (m *MemoryStorage) SetExportHash(ctx context.Context, issueID, hash string) error {
|
||||
// Memory storage doesn't track export hashes, no-op
|
||||
return nil
|
||||
}
|
||||
|
||||
// GetDependencyTree gets the dependency tree for an issue
|
||||
func (m *MemoryStorage) GetDependencyTree(ctx context.Context, issueID string, maxDepth int, showAllPaths bool) ([]*types.TreeNode, error) {
|
||||
func (m *MemoryStorage) GetDependencyTree(ctx context.Context, issueID string, maxDepth int, showAllPaths bool, reverse bool) ([]*types.TreeNode, error) {
|
||||
// Simplified implementation - just return direct dependencies
|
||||
deps, err := m.GetDependencies(ctx, issueID)
|
||||
if err != nil {
|
||||
|
||||
Reference in New Issue
Block a user