From b849f598d7d934738be0f23e71fd68e4a5917b1d Mon Sep 17 00:00:00 2001 From: Test Date: Wed, 21 Jan 2026 13:13:24 -0800 Subject: [PATCH 1/4] /{cmd,docs,internal}: support import export for dolt backends --- ...ackend_jsonl_roundtrip_integration_test.go | 259 ++++++ cmd/bd/doctor/database.go | 4 +- cmd/bd/doctor/fix/e2e_test.go | 10 +- cmd/bd/doctor/sync_divergence.go | 2 +- .../dolt_jsonl_roundtrip_integration_test.go | 288 ++++++ cmd/bd/export.go | 32 +- cmd/bd/federation.go | 10 +- cmd/bd/hook.go | 10 +- cmd/bd/import.go | 8 +- docs/CONFIG.md | 4 +- .../importer/backend_agnostic_import_test.go | 114 +++ internal/importer/importer.go | 853 +++++++++++++++--- internal/importer/importer_test.go | 56 +- internal/storage/dolt/credentials.go | 11 +- internal/storage/dolt/events.go | 29 +- internal/storage/dolt/server.go | 2 + internal/storage/dolt/transaction.go | 110 +++ internal/storage/memory/memory.go | 18 + internal/storage/sqlite/comments.go | 6 +- internal/storage/sqlite/import_tx.go | 115 +++ internal/storage/sqlite/transaction.go | 98 ++ internal/storage/storage.go | 8 + internal/storage/storage_test.go | 16 + 23 files changed, 1837 insertions(+), 226 deletions(-) create mode 100644 cmd/bd/cross_backend_jsonl_roundtrip_integration_test.go create mode 100644 cmd/bd/dolt_jsonl_roundtrip_integration_test.go create mode 100644 internal/importer/backend_agnostic_import_test.go create mode 100644 internal/storage/sqlite/import_tx.go diff --git a/cmd/bd/cross_backend_jsonl_roundtrip_integration_test.go b/cmd/bd/cross_backend_jsonl_roundtrip_integration_test.go new file mode 100644 index 00000000..12c9f51e --- /dev/null +++ b/cmd/bd/cross_backend_jsonl_roundtrip_integration_test.go @@ -0,0 +1,259 @@ +//go:build integration +// +build integration + +package main + +import ( + "os" + "path/filepath" + "runtime" + "strings" + "testing" + "time" + + "github.com/steveyegge/beads/internal/types" +) + +func isDoltBackendUnavailable(out string) bool { + lower := strings.ToLower(out) + return strings.Contains(lower, "dolt") && (strings.Contains(lower, "not supported") || strings.Contains(lower, "not available") || strings.Contains(lower, "unknown")) +} + +func setupGitRepoForIntegration(t *testing.T, dir string) { + t.Helper() + if err := runCommandInDir(dir, "git", "init"); err != nil { + t.Fatalf("git init failed: %v", err) + } + _ = runCommandInDir(dir, "git", "config", "user.email", "test@example.com") + _ = runCommandInDir(dir, "git", "config", "user.name", "Test User") +} + +func TestSQLiteToDolt_JSONLRoundTrip(t *testing.T) { + if testing.Short() { + t.Skip("skipping slow integration test in short mode") + } + if runtime.GOOS == windowsOS { + t.Skip("cross-backend integration test not supported on windows") + } + + env := []string{ + "BEADS_TEST_MODE=1", + "BEADS_NO_DAEMON=1", + } + + // Workspace 1: SQLite create -> export JSONL + ws1 := createTempDirWithCleanup(t) + setupGitRepoForIntegration(t, ws1) + + // Explicitly initialize sqlite for clarity. + if out, err := runBDExecAllowErrorWithEnv(t, ws1, env, "init", "--backend", "sqlite", "--prefix", "test", "--quiet"); err != nil { + t.Fatalf("bd init --backend sqlite failed: %v\n%s", err, out) + } + + outA, err := runBDExecAllowErrorWithEnv(t, ws1, env, "create", "Issue A", "--json") + if err != nil { + t.Fatalf("bd create A failed: %v\n%s", err, outA) + } + idA := parseCreateID(t, outA) + + outB, err := runBDExecAllowErrorWithEnv(t, ws1, env, "create", "Issue B", "--json") + if err != nil { + t.Fatalf("bd create B failed: %v\n%s", err, outB) + } + idB := parseCreateID(t, outB) + + // Add label + comment + dependency. + if out, err := runBDExecAllowErrorWithEnv(t, ws1, env, "label", "add", idA, "urgent"); err != nil { + t.Fatalf("bd label add failed: %v\n%s", err, out) + } + commentText := "Cross-backend round-trip" + if out, err := runBDExecAllowErrorWithEnv(t, ws1, env, "comments", "add", idA, commentText); err != nil { + t.Fatalf("bd comments add failed: %v\n%s", err, out) + } + if out, err := runBDExecAllowErrorWithEnv(t, ws1, env, "dep", "add", idA, idB); err != nil { + t.Fatalf("bd dep add failed: %v\n%s", err, out) + } + + // Create tombstone via delete (SQLite supports tombstones). + if out, err := runBDExecAllowErrorWithEnv(t, ws1, env, "delete", idB, "--force", "--reason", "test tombstone"); err != nil { + t.Fatalf("bd delete failed: %v\n%s", err, out) + } + + jsonl1 := filepath.Join(ws1, ".beads", "issues.jsonl") + if out, err := runBDExecAllowErrorWithEnv(t, ws1, env, "export", "-o", jsonl1); err != nil { + t.Fatalf("bd export failed: %v\n%s", err, out) + } + + issues1 := readJSONLIssues(t, jsonl1) + if len(issues1) != 2 { + t.Fatalf("expected 2 issues in sqlite export (including tombstone), got %d", len(issues1)) + } + if issues1[idB].Status != types.StatusTombstone { + t.Fatalf("expected %s to be tombstone in sqlite export, got %q", idB, issues1[idB].Status) + } + ts1, ok := findCommentTimestampByText(issues1[idA], commentText) + if !ok || ts1.IsZero() { + t.Fatalf("expected comment on %s in sqlite export", idA) + } + + // Workspace 2: Dolt import JSONL -> export JSONL + ws2 := createTempDirWithCleanup(t) + setupGitRepoForIntegration(t, ws2) + + initOut, initErr := runBDExecAllowErrorWithEnv(t, ws2, env, "init", "--backend", "dolt", "--prefix", "test", "--quiet") + if initErr != nil { + if isDoltBackendUnavailable(initOut) { + t.Skipf("dolt backend not available: %s", initOut) + } + t.Fatalf("bd init --backend dolt failed: %v\n%s", initErr, initOut) + } + + jsonl2in := filepath.Join(ws2, ".beads", "issues.jsonl") + data, err := os.ReadFile(jsonl1) + if err != nil { + t.Fatalf("read sqlite export: %v", err) + } + if err := os.WriteFile(jsonl2in, data, 0o600); err != nil { + t.Fatalf("write dolt issues.jsonl: %v", err) + } + + if out, err := runBDExecAllowErrorWithEnv(t, ws2, env, "import", "-i", jsonl2in); err != nil { + t.Fatalf("bd import (dolt) failed: %v\n%s", err, out) + } + + jsonl2out := filepath.Join(ws2, ".beads", "roundtrip.jsonl") + if out, err := runBDExecAllowErrorWithEnv(t, ws2, env, "export", "-o", jsonl2out); err != nil { + t.Fatalf("bd export (dolt) failed: %v\n%s", err, out) + } + + issues2 := readJSONLIssues(t, jsonl2out) + if len(issues2) != 2 { + t.Fatalf("expected 2 issues in dolt export, got %d", len(issues2)) + } + if issues2[idB].Status != types.StatusTombstone { + t.Fatalf("expected %s to be tombstone after import into dolt, got %q", idB, issues2[idB].Status) + } + ts2, ok := findCommentTimestampByText(issues2[idA], commentText) + if !ok { + t.Fatalf("expected comment on %s in dolt export", idA) + } + if !ts2.Equal(ts1) { + t.Fatalf("expected comment timestamp preserved across sqlite->dolt, export1=%s export2=%s", ts1.Format(time.RFC3339Nano), ts2.Format(time.RFC3339Nano)) + } +} + +func TestDoltToSQLite_JSONLRoundTrip(t *testing.T) { + if testing.Short() { + t.Skip("skipping slow integration test in short mode") + } + if runtime.GOOS == windowsOS { + t.Skip("cross-backend integration test not supported on windows") + } + + env := []string{ + "BEADS_TEST_MODE=1", + "BEADS_NO_DAEMON=1", + } + + // Workspace 1: Dolt create -> export JSONL + ws1 := createTempDirWithCleanup(t) + setupGitRepoForIntegration(t, ws1) + + initOut, initErr := runBDExecAllowErrorWithEnv(t, ws1, env, "init", "--backend", "dolt", "--prefix", "test", "--quiet") + if initErr != nil { + if isDoltBackendUnavailable(initOut) { + t.Skipf("dolt backend not available: %s", initOut) + } + t.Fatalf("bd init --backend dolt failed: %v\n%s", initErr, initOut) + } + + outA, err := runBDExecAllowErrorWithEnv(t, ws1, env, "create", "Issue A", "--json") + if err != nil { + t.Fatalf("bd create A failed: %v\n%s", err, outA) + } + idA := parseCreateID(t, outA) + + outB, err := runBDExecAllowErrorWithEnv(t, ws1, env, "create", "Issue B", "--json") + if err != nil { + t.Fatalf("bd create B failed: %v\n%s", err, outB) + } + idB := parseCreateID(t, outB) + + if out, err := runBDExecAllowErrorWithEnv(t, ws1, env, "label", "add", idA, "urgent"); err != nil { + t.Fatalf("bd label add failed: %v\n%s", err, out) + } + commentText := "Cross-backend round-trip" + if out, err := runBDExecAllowErrorWithEnv(t, ws1, env, "comments", "add", idA, commentText); err != nil { + t.Fatalf("bd comments add failed: %v\n%s", err, out) + } + if out, err := runBDExecAllowErrorWithEnv(t, ws1, env, "dep", "add", idA, idB); err != nil { + t.Fatalf("bd dep add failed: %v\n%s", err, out) + } + + jsonl1 := filepath.Join(ws1, ".beads", "issues.jsonl") + if out, err := runBDExecAllowErrorWithEnv(t, ws1, env, "export", "-o", jsonl1); err != nil { + t.Fatalf("bd export (dolt) failed: %v\n%s", err, out) + } + + issues1 := readJSONLIssues(t, jsonl1) + if len(issues1) != 2 { + t.Fatalf("expected 2 issues in dolt export, got %d", len(issues1)) + } + ts1, ok := findCommentTimestampByText(issues1[idA], commentText) + if !ok || ts1.IsZero() { + t.Fatalf("expected comment on %s in dolt export", idA) + } + + // Inject tombstone record for B into JSONL (Dolt backend may not support bd delete tombstones). + now := time.Now().UTC() + issues1[idB].Status = types.StatusTombstone + issues1[idB].DeletedAt = &now + issues1[idB].DeletedBy = "test" + issues1[idB].DeleteReason = "test tombstone" + issues1[idB].OriginalType = string(issues1[idB].IssueType) + issues1[idB].SetDefaults() + + jsonl1Tomb := filepath.Join(ws1, ".beads", "issues.tomb.jsonl") + writeJSONLIssues(t, jsonl1Tomb, issues1) + + // Workspace 2: SQLite import JSONL -> export JSONL + ws2 := createTempDirWithCleanup(t) + setupGitRepoForIntegration(t, ws2) + + if out, err := runBDExecAllowErrorWithEnv(t, ws2, env, "init", "--backend", "sqlite", "--prefix", "test", "--quiet"); err != nil { + t.Fatalf("bd init --backend sqlite failed: %v\n%s", err, out) + } + + jsonl2in := filepath.Join(ws2, ".beads", "issues.jsonl") + data, err := os.ReadFile(jsonl1Tomb) + if err != nil { + t.Fatalf("read dolt export: %v", err) + } + if err := os.WriteFile(jsonl2in, data, 0o600); err != nil { + t.Fatalf("write sqlite issues.jsonl: %v", err) + } + + if out, err := runBDExecAllowErrorWithEnv(t, ws2, env, "import", "-i", jsonl2in); err != nil { + t.Fatalf("bd import (sqlite) failed: %v\n%s", err, out) + } + + jsonl2out := filepath.Join(ws2, ".beads", "roundtrip.jsonl") + if out, err := runBDExecAllowErrorWithEnv(t, ws2, env, "export", "-o", jsonl2out); err != nil { + t.Fatalf("bd export (sqlite) failed: %v\n%s", err, out) + } + + issues2 := readJSONLIssues(t, jsonl2out) + if len(issues2) != 2 { + t.Fatalf("expected 2 issues in sqlite export, got %d", len(issues2)) + } + if issues2[idB].Status != types.StatusTombstone { + t.Fatalf("expected %s to be tombstone after import into sqlite, got %q", idB, issues2[idB].Status) + } + ts2, ok := findCommentTimestampByText(issues2[idA], commentText) + if !ok { + t.Fatalf("expected comment on %s in sqlite export", idA) + } + if !ts2.Equal(ts1) { + t.Fatalf("expected comment timestamp preserved across dolt->sqlite, export1=%s export2=%s", ts1.Format(time.RFC3339Nano), ts2.Format(time.RFC3339Nano)) + } +} diff --git a/cmd/bd/doctor/database.go b/cmd/bd/doctor/database.go index 7647955d..d591fba5 100644 --- a/cmd/bd/doctor/database.go +++ b/cmd/bd/doctor/database.go @@ -510,7 +510,7 @@ func CheckDatabaseIntegrity(path string) DoctorCheck { func CheckDatabaseJSONLSync(path string) DoctorCheck { backend, beadsDir := getBackendAndBeadsDir(path) - // Dolt backend: JSONL is a derived compatibility artifact (export-only today). + // Dolt backend: JSONL is an optional compatibility artifact. // The SQLite-style import/export divergence checks don't apply. if backend == configfile.BackendDolt { // Find JSONL file (respects metadata.json override when set). @@ -545,7 +545,7 @@ func CheckDatabaseJSONLSync(path string) DoctorCheck { Name: "DB-JSONL Sync", Status: StatusOK, Message: "N/A (dolt backend)", - Detail: "JSONL is derived from Dolt (export-only); import-only sync checks do not apply", + Detail: "Dolt sync is database-native; JSONL divergence checks do not apply (manual JSONL import/export is supported).", } } diff --git a/cmd/bd/doctor/fix/e2e_test.go b/cmd/bd/doctor/fix/e2e_test.go index 219bb7b0..8d5c4f70 100644 --- a/cmd/bd/doctor/fix/e2e_test.go +++ b/cmd/bd/doctor/fix/e2e_test.go @@ -780,14 +780,20 @@ func TestMergeDriverWithLockedConfig_E2E(t *testing.T) { dir := setupTestGitRepo(t) + gitDir := filepath.Join(dir, ".git") gitConfigPath := filepath.Join(dir, ".git", "config") - // Make git config read-only - if err := os.Chmod(gitConfigPath, 0444); err != nil { + // Make both .git directory and config file read-only to truly prevent writes. + // Git may otherwise write via lockfile+rename even if the config file itself is read-only. + if err := os.Chmod(gitConfigPath, 0400); err != nil { t.Fatalf("failed to make config read-only: %v", err) } + if err := os.Chmod(gitDir, 0500); err != nil { + t.Fatalf("failed to make .git read-only: %v", err) + } defer func() { // Restore permissions for cleanup + _ = os.Chmod(gitDir, 0755) _ = os.Chmod(gitConfigPath, 0644) }() diff --git a/cmd/bd/doctor/sync_divergence.go b/cmd/bd/doctor/sync_divergence.go index adf3bc90..8091a474 100644 --- a/cmd/bd/doctor/sync_divergence.go +++ b/cmd/bd/doctor/sync_divergence.go @@ -68,7 +68,7 @@ func CheckSyncDivergence(path string) DoctorCheck { } // Check 2: SQLite last_import_time vs JSONL mtime (SQLite only). - // Dolt backend does not maintain SQLite metadata and does not support import-only sync. + // Dolt backend does not maintain SQLite metadata; this SQLite-only check doesn't apply. if backend == configfile.BackendSQLite { mtimeIssue := checkSQLiteMtimeDivergence(path, beadsDir) if mtimeIssue != nil { diff --git a/cmd/bd/dolt_jsonl_roundtrip_integration_test.go b/cmd/bd/dolt_jsonl_roundtrip_integration_test.go new file mode 100644 index 00000000..49e80c82 --- /dev/null +++ b/cmd/bd/dolt_jsonl_roundtrip_integration_test.go @@ -0,0 +1,288 @@ +//go:build integration +// +build integration + +package main + +import ( + "bufio" + "encoding/json" + "os" + "path/filepath" + "runtime" + "sort" + "strings" + "testing" + "time" + + "github.com/steveyegge/beads/internal/types" +) + +func parseCreateID(t *testing.T, out string) string { + t.Helper() + idx := strings.Index(out, "{") + if idx < 0 { + t.Fatalf("expected JSON in output, got:\n%s", out) + } + var m map[string]any + if err := json.Unmarshal([]byte(out[idx:]), &m); err != nil { + t.Fatalf("failed to parse create JSON: %v\n%s", err, out) + } + id, _ := m["id"].(string) + if id == "" { + t.Fatalf("missing id in create output:\n%s", out) + } + return id +} + +func readJSONLIssues(t *testing.T, path string) map[string]*types.Issue { + t.Helper() + f, err := os.Open(path) // #nosec G304 -- test-controlled path + if err != nil { + t.Fatalf("open %s: %v", path, err) + } + defer func() { _ = f.Close() }() + + scanner := bufio.NewScanner(f) + // allow larger issues + scanner.Buffer(make([]byte, 0, 64*1024), 2*1024*1024) + + out := make(map[string]*types.Issue) + for scanner.Scan() { + line := strings.TrimSpace(scanner.Text()) + if line == "" { + continue + } + var iss types.Issue + if err := json.Unmarshal([]byte(line), &iss); err != nil { + t.Fatalf("unmarshal JSONL line: %v\nline=%s", err, line) + } + iss.SetDefaults() + copy := iss + out[iss.ID] = © + } + if err := scanner.Err(); err != nil { + t.Fatalf("scan %s: %v", path, err) + } + return out +} + +func writeJSONLIssues(t *testing.T, path string, issues map[string]*types.Issue) { + t.Helper() + f, err := os.OpenFile(path, os.O_CREATE|os.O_TRUNC|os.O_WRONLY, 0o600) // #nosec G304 -- test-controlled path + if err != nil { + t.Fatalf("open %s for write: %v", path, err) + } + defer func() { _ = f.Close() }() + + ids := make([]string, 0, len(issues)) + for id := range issues { + ids = append(ids, id) + } + sort.Strings(ids) + + w := bufio.NewWriter(f) + for _, id := range ids { + iss := issues[id] + if iss == nil { + continue + } + b, err := json.Marshal(iss) + if err != nil { + t.Fatalf("marshal issue %s: %v", id, err) + } + if _, err := w.Write(append(b, '\n')); err != nil { + t.Fatalf("write issue %s: %v", id, err) + } + } + if err := w.Flush(); err != nil { + t.Fatalf("flush %s: %v", path, err) + } +} + +func findCommentTimestamp(iss *types.Issue, author, text string) (time.Time, bool) { + if iss == nil { + return time.Time{}, false + } + for _, c := range iss.Comments { + if c.Author == author && strings.TrimSpace(c.Text) == strings.TrimSpace(text) { + return c.CreatedAt, true + } + } + return time.Time{}, false +} + +func findCommentTimestampByText(iss *types.Issue, text string) (time.Time, bool) { + if iss == nil { + return time.Time{}, false + } + for _, c := range iss.Comments { + if strings.TrimSpace(c.Text) == strings.TrimSpace(text) { + return c.CreatedAt, true + } + } + return time.Time{}, false +} + +func TestDoltJSONLRoundTrip_DepsLabelsCommentsTombstones(t *testing.T) { + if testing.Short() { + t.Skip("skipping slow integration test in short mode") + } + if runtime.GOOS == windowsOS { + t.Skip("dolt integration test not supported on windows") + } + + // Workspace 1: create data and export JSONL. + ws1 := createTempDirWithCleanup(t) + if err := runCommandInDir(ws1, "git", "init"); err != nil { + t.Fatalf("git init failed: %v", err) + } + _ = runCommandInDir(ws1, "git", "config", "user.email", "test@example.com") + _ = runCommandInDir(ws1, "git", "config", "user.name", "Test User") + + env := []string{ + "BEADS_TEST_MODE=1", + "BEADS_NO_DAEMON=1", + } + + initOut, initErr := runBDExecAllowErrorWithEnv(t, ws1, env, "init", "--backend", "dolt", "--prefix", "test", "--quiet") + if initErr != nil { + lower := strings.ToLower(initOut) + if strings.Contains(lower, "dolt") && (strings.Contains(lower, "not supported") || strings.Contains(lower, "not available") || strings.Contains(lower, "unknown")) { + t.Skipf("dolt backend not available: %s", initOut) + } + t.Fatalf("bd init --backend dolt failed: %v\n%s", initErr, initOut) + } + + outA, err := runBDExecAllowErrorWithEnv(t, ws1, env, "create", "Issue A", "--json") + if err != nil { + t.Fatalf("bd create A failed: %v\n%s", err, outA) + } + idA := parseCreateID(t, outA) + + outB, err := runBDExecAllowErrorWithEnv(t, ws1, env, "create", "Issue B", "--json") + if err != nil { + t.Fatalf("bd create B failed: %v\n%s", err, outB) + } + idB := parseCreateID(t, outB) + + // Add label + comment + dependency. + if out, err := runBDExecAllowErrorWithEnv(t, ws1, env, "label", "add", idA, "urgent"); err != nil { + t.Fatalf("bd label add failed: %v\n%s", err, out) + } + commentText := "Hello from JSONL round-trip" + if out, err := runBDExecAllowErrorWithEnv(t, ws1, env, "comments", "add", idA, commentText); err != nil { + t.Fatalf("bd comments add failed: %v\n%s", err, out) + } + if out, err := runBDExecAllowErrorWithEnv(t, ws1, env, "dep", "add", idA, idB); err != nil { + t.Fatalf("bd dep add failed: %v\n%s", err, out) + } + + jsonl1 := filepath.Join(ws1, ".beads", "issues.jsonl") + if out, err := runBDExecAllowErrorWithEnv(t, ws1, env, "export", "-o", jsonl1); err != nil { + t.Fatalf("bd export failed: %v\n%s", err, out) + } + + issues1 := readJSONLIssues(t, jsonl1) + if len(issues1) != 2 { + t.Fatalf("expected 2 issues in export1, got %d", len(issues1)) + } + if issues1[idA] == nil || issues1[idB] == nil { + t.Fatalf("expected exported issues to include %s and %s", idA, idB) + } + // Label present + foundUrgent := false + for _, l := range issues1[idA].Labels { + if l == "urgent" { + foundUrgent = true + break + } + } + if !foundUrgent { + t.Fatalf("expected label 'urgent' on %s in export1", idA) + } + // Dependency present + foundDep := false + for _, d := range issues1[idA].Dependencies { + if d.DependsOnID == idB { + foundDep = true + break + } + } + if !foundDep { + t.Fatalf("expected dependency %s -> %s in export1", idA, idB) + } + // Comment present + capture timestamp + ts1, ok := findCommentTimestampByText(issues1[idA], commentText) + if !ok || ts1.IsZero() { + t.Fatalf("expected comment on %s in export1", idA) + } + + // Create a tombstone record in JSONL for issue B (Dolt backend may not support + // creating tombstones via `bd delete`, but it must round-trip tombstones via JSONL). + now := time.Now().UTC() + issues1[idB].Status = types.StatusTombstone + issues1[idB].DeletedAt = &now + issues1[idB].DeletedBy = "test" + issues1[idB].DeleteReason = "test tombstone" + issues1[idB].OriginalType = string(issues1[idB].IssueType) + issues1[idB].SetDefaults() + + jsonl1Tomb := filepath.Join(ws1, ".beads", "issues.tomb.jsonl") + writeJSONLIssues(t, jsonl1Tomb, issues1) + issues1Tomb := readJSONLIssues(t, jsonl1Tomb) + if issues1Tomb[idB].Status != types.StatusTombstone { + t.Fatalf("expected %s to be tombstone in tombstone JSONL, got %q", idB, issues1Tomb[idB].Status) + } + + // Workspace 2: import JSONL into fresh Dolt DB and re-export. + ws2 := createTempDirWithCleanup(t) + if err := runCommandInDir(ws2, "git", "init"); err != nil { + t.Fatalf("git init failed: %v", err) + } + _ = runCommandInDir(ws2, "git", "config", "user.email", "test@example.com") + _ = runCommandInDir(ws2, "git", "config", "user.name", "Test User") + + initOut2, initErr2 := runBDExecAllowErrorWithEnv(t, ws2, env, "init", "--backend", "dolt", "--prefix", "test", "--quiet") + if initErr2 != nil { + lower := strings.ToLower(initOut2) + if strings.Contains(lower, "dolt") && (strings.Contains(lower, "not supported") || strings.Contains(lower, "not available") || strings.Contains(lower, "unknown")) { + t.Skipf("dolt backend not available: %s", initOut2) + } + t.Fatalf("bd init --backend dolt (ws2) failed: %v\n%s", initErr2, initOut2) + } + + // Copy JSONL into ws2 beads dir + jsonl2in := filepath.Join(ws2, ".beads", "issues.jsonl") + data, err := os.ReadFile(jsonl1Tomb) + if err != nil { + t.Fatalf("read export1: %v", err) + } + if err := os.WriteFile(jsonl2in, data, 0o600); err != nil { + t.Fatalf("write ws2 issues.jsonl: %v", err) + } + + if out, err := runBDExecAllowErrorWithEnv(t, ws2, env, "import", "-i", jsonl2in); err != nil { + t.Fatalf("bd import failed: %v\n%s", err, out) + } + + jsonl2out := filepath.Join(ws2, ".beads", "roundtrip.jsonl") + if out, err := runBDExecAllowErrorWithEnv(t, ws2, env, "export", "-o", jsonl2out); err != nil { + t.Fatalf("bd export (ws2) failed: %v\n%s", err, out) + } + + issues2 := readJSONLIssues(t, jsonl2out) + if len(issues2) != 2 { + t.Fatalf("expected 2 issues in export2 (including tombstone), got %d", len(issues2)) + } + if issues2[idB].Status != types.StatusTombstone { + t.Fatalf("expected %s to be tombstone in export2, got %q", idB, issues2[idB].Status) + } + // Ensure comment timestamp preserved across import/export + ts2, ok := findCommentTimestampByText(issues2[idA], commentText) + if !ok { + t.Fatalf("expected comment on %s in export2", idA) + } + if !ts2.Equal(ts1) { + t.Fatalf("expected comment timestamp preserved, export1=%s export2=%s", ts1.Format(time.RFC3339Nano), ts2.Format(time.RFC3339Nano)) + } +} diff --git a/cmd/bd/export.go b/cmd/bd/export.go index 5de0c795..972062ab 100644 --- a/cmd/bd/export.go +++ b/cmd/bd/export.go @@ -13,6 +13,7 @@ import ( "github.com/spf13/cobra" "github.com/steveyegge/beads/internal/debug" + "github.com/steveyegge/beads/internal/storage/factory" "github.com/steveyegge/beads/internal/storage/sqlite" "github.com/steveyegge/beads/internal/types" "github.com/steveyegge/beads/internal/util" @@ -181,7 +182,10 @@ Examples: fmt.Fprintf(os.Stderr, "Error: no database path found\n") os.Exit(1) } - store, err = sqlite.NewWithTimeout(rootCtx, dbPath, lockTimeout) + beadsDir := filepath.Dir(dbPath) + store, err = factory.NewFromConfigWithOptions(rootCtx, beadsDir, factory.Options{ + LockTimeout: lockTimeout, + }) if err != nil { fmt.Fprintf(os.Stderr, "Error: failed to open database: %v\n", err) os.Exit(1) @@ -396,14 +400,26 @@ Examples: issue.Dependencies = allDeps[issue.ID] } - // Populate labels for all issues + // Populate labels and comments for all issues (batch APIs) + ids := make([]string, 0, len(issues)) for _, issue := range issues { - labels, err := store.GetLabels(ctx, issue.ID) - if err != nil { - fmt.Fprintf(os.Stderr, "Error getting labels for %s: %v\n", issue.ID, err) - os.Exit(1) - } - issue.Labels = labels + ids = append(ids, issue.ID) + } + + labelsMap, err := store.GetLabelsForIssues(ctx, ids) + if err != nil { + fmt.Fprintf(os.Stderr, "Error getting labels: %v\n", err) + os.Exit(1) + } + commentsMap, err := store.GetCommentsForIssues(ctx, ids) + if err != nil { + fmt.Fprintf(os.Stderr, "Error getting comments: %v\n", err) + os.Exit(1) + } + + for _, issue := range issues { + issue.Labels = labelsMap[issue.ID] + issue.Comments = commentsMap[issue.ID] } // Open output diff --git a/cmd/bd/federation.go b/cmd/bd/federation.go index 0733a027..67a9abf2 100644 --- a/cmd/bd/federation.go +++ b/cmd/bd/federation.go @@ -282,10 +282,10 @@ func runFederationStatus(cmd *cobra.Command, args []string) { // Collect status for each peer type peerStatus struct { - Status *storage.SyncStatus - URL string - Reachable bool - ReachError string + Status *storage.SyncStatus + URL string + Reachable bool + ReachError string } var peerStatuses []peerStatus @@ -374,7 +374,7 @@ func runFederationAddPeer(cmd *cobra.Command, args []string) { password := federationPassword if federationUser != "" && password == "" { fmt.Fprint(os.Stderr, "Password: ") - pwBytes, err := term.ReadPassword(int(syscall.Stdin)) + pwBytes, err := term.ReadPassword(syscall.Stdin) fmt.Fprintln(os.Stderr) // newline after password if err != nil { FatalErrorRespectJSON("failed to read password: %v", err) diff --git a/cmd/bd/hook.go b/cmd/bd/hook.go index 90e68213..5bf5c0e3 100644 --- a/cmd/bd/hook.go +++ b/cmd/bd/hook.go @@ -638,7 +638,7 @@ func hookPostMergeDolt(beadsDir string) int { doltStore, ok := store.(interface { Branch(ctx context.Context, name string) error Checkout(ctx context.Context, branch string) error - Merge(ctx context.Context, branch string) error + Merge(ctx context.Context, branch string) ([]storage.Conflict, error) Commit(ctx context.Context, message string) error CurrentBranch(ctx context.Context) (string, error) DeleteBranch(ctx context.Context, branch string) error @@ -691,10 +691,15 @@ func hookPostMergeDolt(beadsDir string) int { } // Merge import branch (Dolt provides cell-level merge) - if err := doltStore.Merge(ctx, importBranch); err != nil { + conflicts, err := doltStore.Merge(ctx, importBranch) + if err != nil { fmt.Fprintf(os.Stderr, "Warning: could not merge import branch: %v\n", err) return 0 } + if len(conflicts) > 0 { + fmt.Fprintf(os.Stderr, "Warning: %d conflict(s) detected during Dolt merge; resolve with 'bd federation conflicts' or Dolt conflict tooling\n", len(conflicts)) + // Best-effort: still return 0 to avoid blocking git merge, consistent with other hook warnings. + } // Commit the merge if err := doltStore.Commit(ctx, "Merge JSONL import"); err != nil { @@ -839,7 +844,6 @@ func hookPostCheckout(args []string) int { // ============================================================================= // importFromJSONLToStore imports issues from JSONL to a store. -// This is a placeholder - the actual implementation should use the store's methods. func importFromJSONLToStore(ctx context.Context, store storage.Storage, jsonlPath string) error { // Parse JSONL into issues // #nosec G304 - jsonlPath is derived from beadsDir (trusted workspace path) diff --git a/cmd/bd/import.go b/cmd/bd/import.go index ddf8bf8e..0c76e121 100644 --- a/cmd/bd/import.go +++ b/cmd/bd/import.go @@ -16,7 +16,7 @@ import ( "github.com/spf13/cobra" "github.com/steveyegge/beads/internal/beads" "github.com/steveyegge/beads/internal/debug" - "github.com/steveyegge/beads/internal/storage/sqlite" + "github.com/steveyegge/beads/internal/storage/factory" "github.com/steveyegge/beads/internal/types" "github.com/steveyegge/beads/internal/utils" "golang.org/x/term" @@ -74,10 +74,12 @@ NOTE: Import requires direct database access and does not work with daemon mode. daemonClient = nil var err error - store, err = sqlite.New(rootCtx, dbPath) + beadsDir := filepath.Dir(dbPath) + store, err = factory.NewFromConfigWithOptions(rootCtx, beadsDir, factory.Options{ + LockTimeout: lockTimeout, + }) if err != nil { // Check for fresh clone scenario - beadsDir := filepath.Dir(dbPath) if handleFreshCloneError(err, beadsDir) { os.Exit(1) } diff --git a/docs/CONFIG.md b/docs/CONFIG.md index adb0e2e3..de754ce1 100644 --- a/docs/CONFIG.md +++ b/docs/CONFIG.md @@ -89,7 +89,7 @@ The sync mode controls how beads synchronizes data with git and/or Dolt remotes. |------|-------------| | `git-portable` | (default) Export JSONL on push, import on pull. Standard git-based workflow. | | `realtime` | Export JSONL on every database change. Legacy behavior, higher I/O. | -| `dolt-native` | Use Dolt remotes directly. No JSONL needed - Dolt handles sync. | +| `dolt-native` | Use Dolt remotes directly for sync. JSONL is not used for sync (but manual `bd import` / `bd export` still work). | | `belt-and-suspenders` | Both Dolt remote AND JSONL backup. Maximum redundancy. | #### Sync Triggers @@ -143,7 +143,7 @@ federation: - **git-portable** (default): Best for most teams. JSONL is committed to git, works with any git hosting. - **realtime**: Use when you need instant JSONL updates (e.g., file watchers, CI triggers on JSONL changes). -- **dolt-native**: Use when you have Dolt infrastructure and want database-level sync without JSONL. +- **dolt-native**: Use when you have Dolt infrastructure and want database-level sync; JSONL remains available for portability/audits/manual workflows. - **belt-and-suspenders**: Use for critical data where you want both Dolt sync AND git-portable backup. ### Example Config File diff --git a/internal/importer/backend_agnostic_import_test.go b/internal/importer/backend_agnostic_import_test.go new file mode 100644 index 00000000..6fd349fc --- /dev/null +++ b/internal/importer/backend_agnostic_import_test.go @@ -0,0 +1,114 @@ +//go:build !integration +// +build !integration + +package importer + +import ( + "context" + "testing" + "time" + + "github.com/steveyegge/beads/internal/storage/memory" + "github.com/steveyegge/beads/internal/types" +) + +func TestImportIssues_BackendAgnostic_DepsLabelsCommentsTombstone(t *testing.T) { + ctx := context.Background() + store := memory.New("") + if err := store.SetConfig(ctx, "issue_prefix", "test"); err != nil { + t.Fatalf("set issue_prefix: %v", err) + } + + commentTS := time.Date(2020, 1, 2, 3, 4, 5, 6, time.UTC) + deletedTS := time.Date(2021, 2, 3, 4, 5, 6, 7, time.UTC) + + issueA := &types.Issue{ + ID: "test-1", + Title: "Issue A", + IssueType: types.TypeTask, + Status: types.StatusOpen, + Priority: 2, + Labels: []string{"urgent"}, + Dependencies: []*types.Dependency{ + {IssueID: "test-1", DependsOnID: "test-2", Type: types.DepBlocks}, + }, + Comments: []*types.Comment{ + {Author: "tester", Text: "hello", CreatedAt: commentTS}, + }, + } + issueB := &types.Issue{ + ID: "test-2", + Title: "Issue B", + IssueType: types.TypeTask, + Status: types.StatusTombstone, + Priority: 4, + DeletedAt: &deletedTS, + DeletedBy: "tester", + DeleteReason: "bye", + OriginalType: string(types.TypeTask), + Description: "tombstone", + ContentHash: "", + Dependencies: nil, + Labels: nil, + Comments: nil, + Assignee: "", + Owner: "", + CreatedBy: "", + SourceSystem: "", + ExternalRef: nil, + ClosedAt: nil, + CompactedAt: nil, + DeferUntil: nil, + LastActivity: nil, + QualityScore: nil, + Validations: nil, + BondedFrom: nil, + Waiters: nil, + } + + res, err := ImportIssues(ctx, "", store, []*types.Issue{issueA, issueB}, Options{OrphanHandling: OrphanAllow}) + if err != nil { + t.Fatalf("ImportIssues: %v", err) + } + if res.Created != 2 { + t.Fatalf("expected Created=2, got %d", res.Created) + } + + labels, err := store.GetLabels(ctx, "test-1") + if err != nil { + t.Fatalf("GetLabels: %v", err) + } + if len(labels) != 1 || labels[0] != "urgent" { + t.Fatalf("expected labels [urgent], got %v", labels) + } + + deps, err := store.GetDependencyRecords(ctx, "test-1") + if err != nil { + t.Fatalf("GetDependencyRecords: %v", err) + } + if len(deps) != 1 || deps[0].DependsOnID != "test-2" || deps[0].Type != types.DepBlocks { + t.Fatalf("expected dependency test-1 blocks test-2, got %#v", deps) + } + + comments, err := store.GetIssueComments(ctx, "test-1") + if err != nil { + t.Fatalf("GetIssueComments: %v", err) + } + if len(comments) != 1 { + t.Fatalf("expected 1 comment, got %d", len(comments)) + } + if !comments[0].CreatedAt.Equal(commentTS) { + t.Fatalf("expected comment timestamp preserved (%s), got %s", commentTS.Format(time.RFC3339Nano), comments[0].CreatedAt.Format(time.RFC3339Nano)) + } + + b, err := store.GetIssue(ctx, "test-2") + if err != nil { + t.Fatalf("GetIssue: %v", err) + } + if b.Status != types.StatusTombstone { + t.Fatalf("expected tombstone status, got %q", b.Status) + } + if b.DeletedAt == nil || !b.DeletedAt.Equal(deletedTS) { + t.Fatalf("expected DeletedAt preserved (%s), got %#v", deletedTS.Format(time.RFC3339Nano), b.DeletedAt) + } +} diff --git a/internal/importer/importer.go b/internal/importer/importer.go index 49e86933..114d95cd 100644 --- a/internal/importer/importer.go +++ b/internal/importer/importer.go @@ -1,7 +1,9 @@ package importer import ( + "bufio" "context" + "encoding/json" "fmt" "os" "path/filepath" @@ -18,29 +20,30 @@ import ( "github.com/steveyegge/beads/internal/utils" ) -// OrphanHandling is an alias to sqlite.OrphanHandling for convenience -type OrphanHandling = sqlite.OrphanHandling +// OrphanHandling defines how to handle hierarchical child issues whose parents are missing. +// This mirrors the string values historically used by the SQLite backend config. +type OrphanHandling string const ( // OrphanStrict fails import on missing parent (safest) - OrphanStrict = sqlite.OrphanStrict + OrphanStrict OrphanHandling = "strict" // OrphanResurrect auto-resurrects missing parents from JSONL history - OrphanResurrect = sqlite.OrphanResurrect + OrphanResurrect OrphanHandling = "resurrect" // OrphanSkip skips orphaned issues with warning - OrphanSkip = sqlite.OrphanSkip + OrphanSkip OrphanHandling = "skip" // OrphanAllow imports orphans without validation (default, works around bugs) - OrphanAllow = sqlite.OrphanAllow + OrphanAllow OrphanHandling = "allow" ) // Options contains import configuration type Options struct { - DryRun bool // Preview changes without applying them - SkipUpdate bool // Skip updating existing issues (create-only mode) - Strict bool // Fail on any error (dependencies, labels, etc.) - RenameOnImport bool // Rename imported issues to match database prefix - SkipPrefixValidation bool // Skip prefix validation (for auto-import) - OrphanHandling OrphanHandling // How to handle missing parent issues (default: allow) - ClearDuplicateExternalRefs bool // Clear duplicate external_ref values instead of erroring + DryRun bool // Preview changes without applying them + SkipUpdate bool // Skip updating existing issues (create-only mode) + Strict bool // Fail on any error (dependencies, labels, etc.) + RenameOnImport bool // Rename imported issues to match database prefix + SkipPrefixValidation bool // Skip prefix validation (for auto-import) + OrphanHandling OrphanHandling // How to handle missing parent issues (default: allow) + ClearDuplicateExternalRefs bool // Clear duplicate external_ref values instead of erroring ProtectLocalExportIDs map[string]time.Time // IDs from left snapshot with timestamps for timestamp-aware protection (GH#865) } @@ -83,6 +86,10 @@ func ImportIssues(ctx context.Context, dbPath string, store storage.Storage, iss MismatchPrefixes: make(map[string]int), } + if store == nil { + return nil, fmt.Errorf("import requires an initialized storage backend") + } + // Normalize Linear external_refs to canonical form to avoid slug-based duplicates. for _, issue := range issues { if issue.ExternalRef == nil || *issue.ExternalRef == "" { @@ -110,15 +117,6 @@ func ImportIssues(ctx context.Context, dbPath string, store storage.Storage, iss } } - // Get or create SQLite store - sqliteStore, needCloseStore, err := getOrCreateStore(ctx, dbPath, store) - if err != nil { - return nil, err - } - if needCloseStore { - defer func() { _ = sqliteStore.Close() }() - } - // GH#686: In multi-repo mode, skip prefix validation for all issues. // Issues from additional repos have their own prefixes which are expected and correct. if config.GetMultiRepoConfig() != nil && !opts.SkipPrefixValidation { @@ -128,18 +126,29 @@ func ImportIssues(ctx context.Context, dbPath string, store storage.Storage, iss // Clear export_hashes before import to prevent staleness // Import operations may add/update issues, so export_hashes entries become invalid if !opts.DryRun { - if err := sqliteStore.ClearAllExportHashes(ctx); err != nil { + if err := store.ClearAllExportHashes(ctx); err != nil { fmt.Fprintf(os.Stderr, "Warning: failed to clear export_hashes before import: %v\n", err) } } // Read orphan handling from config if not explicitly set if opts.OrphanHandling == "" { - opts.OrphanHandling = sqliteStore.GetOrphanHandling(ctx) + value, err := store.GetConfig(ctx, "import.orphan_handling") + if err == nil && value != "" { + switch OrphanHandling(value) { + case OrphanStrict, OrphanResurrect, OrphanSkip, OrphanAllow: + opts.OrphanHandling = OrphanHandling(value) + default: + opts.OrphanHandling = OrphanAllow + } + } else { + opts.OrphanHandling = OrphanAllow + } } // Check and handle prefix mismatches - issues, err = handlePrefixMismatch(ctx, sqliteStore, issues, opts, result) + var err error + issues, err = handlePrefixMismatch(ctx, store, issues, opts, result) if err != nil { return result, err } @@ -150,7 +159,7 @@ func ImportIssues(ctx context.Context, dbPath string, store storage.Storage, iss } // Detect and resolve collisions - issues, err = detectUpdates(ctx, sqliteStore, issues, opts, result) + issues, err = detectUpdates(ctx, store, issues, opts, result) if err != nil { return result, err } @@ -158,61 +167,53 @@ func ImportIssues(ctx context.Context, dbPath string, store storage.Storage, iss return result, nil } - // Upsert issues (create new or update existing) - if err := upsertIssues(ctx, sqliteStore, issues, opts, result); err != nil { - return nil, err - } - - // Import dependencies - if err := importDependencies(ctx, sqliteStore, issues, opts, result); err != nil { - return nil, err - } - - // Import labels - if err := importLabels(ctx, sqliteStore, issues, opts); err != nil { - return nil, err - } - - // Import comments - if err := importComments(ctx, sqliteStore, issues, opts); err != nil { - return nil, err - } - - // Checkpoint WAL to ensure data persistence and reduce WAL file size - if err := sqliteStore.CheckpointWAL(ctx); err != nil { - // Non-fatal - just log warning - fmt.Fprintf(os.Stderr, "Warning: failed to checkpoint WAL: %v\n", err) + // Apply changes atomically when transactions are supported. + if err := store.RunInTransaction(ctx, func(tx storage.Transaction) error { + // Upsert issues (create new or update existing) + if err := upsertIssuesTx(ctx, tx, store, issues, opts, result); err != nil { + return err + } + // Import dependencies + if err := importDependenciesTx(ctx, tx, issues, opts, result); err != nil { + return err + } + // Import labels + if err := importLabelsTx(ctx, tx, issues, opts); err != nil { + return err + } + // Import comments (timestamp-preserving) + if err := importCommentsTx(ctx, tx, issues, opts); err != nil { + return err + } + return nil + }); err != nil { + // Some backends (e.g., --no-db) don't support transactions. + // Fall back to non-transactional behavior in that case. + if strings.Contains(err.Error(), "not supported") { + if err := upsertIssues(ctx, store, issues, opts, result); err != nil { + return nil, err + } + if err := importDependencies(ctx, store, issues, opts, result); err != nil { + return nil, err + } + if err := importLabels(ctx, store, issues, opts); err != nil { + return nil, err + } + if err := importComments(ctx, store, issues, opts); err != nil { + return nil, err + } + } else { + return nil, err + } } return result, nil } -// getOrCreateStore returns an existing storage or creates a new one -func getOrCreateStore(ctx context.Context, dbPath string, store storage.Storage) (*sqlite.SQLiteStorage, bool, error) { - if store != nil { - sqliteStore, ok := store.(*sqlite.SQLiteStorage) - if !ok { - return nil, false, fmt.Errorf("import requires SQLite storage backend") - } - return sqliteStore, false, nil - } - - // Open direct connection for daemon mode - if dbPath == "" { - return nil, false, fmt.Errorf("database path not set") - } - sqliteStore, err := sqlite.New(ctx, dbPath) - if err != nil { - return nil, false, fmt.Errorf("failed to open database: %w", err) - } - - return sqliteStore, true, nil -} - // handlePrefixMismatch checks and handles prefix mismatches. // Returns a filtered issues slice with tombstoned issues having wrong prefixes removed. -func handlePrefixMismatch(ctx context.Context, sqliteStore *sqlite.SQLiteStorage, issues []*types.Issue, opts Options, result *Result) ([]*types.Issue, error) { - configuredPrefix, err := sqliteStore.GetConfig(ctx, "issue_prefix") +func handlePrefixMismatch(ctx context.Context, store storage.Storage, issues []*types.Issue, opts Options, result *Result) ([]*types.Issue, error) { + configuredPrefix, err := store.GetConfig(ctx, "issue_prefix") if err != nil { return nil, fmt.Errorf("failed to get configured prefix: %w", err) } @@ -228,10 +229,10 @@ func handlePrefixMismatch(ctx context.Context, sqliteStore *sqlite.SQLiteStorage result.ExpectedPrefix = configuredPrefix // Read allowed_prefixes config for additional valid prefixes (e.g., mol-*) - allowedPrefixesConfig, _ := sqliteStore.GetConfig(ctx, "allowed_prefixes") + allowedPrefixesConfig, _ := store.GetConfig(ctx, "allowed_prefixes") // Get beads directory from database path for route lookup - beadsDir := filepath.Dir(sqliteStore.Path()) + beadsDir := filepath.Dir(store.Path()) // GH#686: In multi-repo mode, allow all prefixes (nil = allow all) // Also include prefixes from routes.jsonl for multi-rig setups (Gas Town) @@ -321,33 +322,39 @@ func handlePrefixMismatch(ctx context.Context, sqliteStore *sqlite.SQLiteStorage } // detectUpdates detects same-ID scenarios (which are updates with hash IDs, not collisions) -func detectUpdates(ctx context.Context, sqliteStore *sqlite.SQLiteStorage, issues []*types.Issue, opts Options, result *Result) ([]*types.Issue, error) { - // Phase 1: Detect (read-only) - collisionResult, err := sqlite.DetectCollisions(ctx, sqliteStore, issues) +func detectUpdates(ctx context.Context, store storage.Storage, issues []*types.Issue, opts Options, result *Result) ([]*types.Issue, error) { + // Backend-agnostic collision detection: + // "collision" here means: same ID exists but content hash differs. + dbIssues, err := store.SearchIssues(ctx, "", types.IssueFilter{IncludeTombstones: true}) if err != nil { return nil, fmt.Errorf("collision detection failed: %w", err) } + dbByID := buildIDMap(dbIssues) - result.Collisions = len(collisionResult.Collisions) - for _, collision := range collisionResult.Collisions { - result.CollisionIDs = append(result.CollisionIDs, collision.ID) + newCount := 0 + exactCount := 0 + collisionCount := 0 + for _, incoming := range issues { + existing, ok := dbByID[incoming.ID] + if !ok || existing == nil { + newCount++ + continue + } + // Exact content match is idempotent. + if existing.ContentHash != "" && incoming.ContentHash != "" && existing.ContentHash == incoming.ContentHash { + exactCount++ + continue + } + // Treat same ID + different content as an update candidate. + collisionCount++ + result.CollisionIDs = append(result.CollisionIDs, incoming.ID) } - // With hash IDs, "collisions" (same ID, different content) are actually UPDATES - // Hash IDs are based on creation content and remain stable across updates - // So same ID + different fields = normal update operation, not a collision - // The collisionResult.Collisions list represents issues that *may* be updated - // Note: We don't pre-count updates here - upsertIssues will count them after - // checking timestamps to ensure we only update when incoming is newer - - // Phase 4: Renames removed - obsolete with hash IDs - // Hash-based IDs are content-addressed, so renames don't occur - + result.Collisions = collisionCount if opts.DryRun { - result.Created = len(collisionResult.NewIssues) + len(collisionResult.Renames) - result.Unchanged = len(collisionResult.ExactMatches) + result.Created = newCount + result.Unchanged = exactCount } - return issues, nil } @@ -373,7 +380,7 @@ func buildIDMap(issues []*types.Issue) map[string]*types.Issue { // handleRename handles content match with different IDs (rename detected) // Returns the old ID that was deleted (if any), or empty string if no deletion occurred -func handleRename(ctx context.Context, s *sqlite.SQLiteStorage, existing *types.Issue, incoming *types.Issue) (string, error) { +func handleRename(ctx context.Context, s storage.Storage, existing *types.Issue, incoming *types.Issue) (string, error) { // Check if target ID already exists with the same content (race condition) // This can happen when multiple clones import the same rename simultaneously targetIssue, err := s.GetIssue(ctx, incoming.ID) @@ -493,14 +500,11 @@ func handleRename(ctx context.Context, s *sqlite.SQLiteStorage, existing *types. // Create with new ID if err := s.CreateIssue(ctx, incoming, "import-rename"); err != nil { - // If UNIQUE constraint error, it's likely another clone created it concurrently - if sqlite.IsUniqueConstraintError(err) { - // Check if target exists with same content - targetIssue, getErr := s.GetIssue(ctx, incoming.ID) - if getErr == nil && targetIssue != nil && targetIssue.ComputeContentHash() == incoming.ComputeContentHash() { - // Same content - rename already complete, this is OK - return oldID, nil - } + // Another writer may have created the target concurrently. If the target now exists + // with the same content, treat the rename as already complete. + targetIssue, getErr := s.GetIssue(ctx, incoming.ID) + if getErr == nil && targetIssue != nil && targetIssue.ComputeContentHash() == incoming.ComputeContentHash() { + return oldID, nil } return "", fmt.Errorf("failed to create renamed issue %s: %w", incoming.ID, err) } @@ -512,10 +516,10 @@ func handleRename(ctx context.Context, s *sqlite.SQLiteStorage, existing *types. } // upsertIssues creates new issues or updates existing ones using content-first matching -func upsertIssues(ctx context.Context, sqliteStore *sqlite.SQLiteStorage, issues []*types.Issue, opts Options, result *Result) error { +func upsertIssues(ctx context.Context, store storage.Storage, issues []*types.Issue, opts Options, result *Result) error { // Get all DB issues once - include tombstones to prevent UNIQUE constraint violations // when trying to create issues that were previously deleted - dbIssues, err := sqliteStore.SearchIssues(ctx, "", types.IssueFilter{IncludeTombstones: true}) + dbIssues, err := store.SearchIssues(ctx, "", types.IssueFilter{IncludeTombstones: true}) if err != nil { return fmt.Errorf("failed to get DB issues: %w", err) } @@ -625,7 +629,7 @@ func upsertIssues(ctx context.Context, sqliteStore *sqlite.SQLiteStorage, issues // Only update if data actually changed if IssueDataChanged(existing, updates) { - if err := sqliteStore.UpdateIssue(ctx, existing.ID, updates, "import"); err != nil { + if err := store.UpdateIssue(ctx, existing.ID, updates, "import"); err != nil { return fmt.Errorf("error updating issue %s (matched by external_ref): %w", existing.ID, err) } result.Updated++ @@ -658,7 +662,7 @@ func upsertIssues(ctx context.Context, sqliteStore *sqlite.SQLiteStorage, issues result.Skipped++ } else if !opts.SkipUpdate { // Same prefix, different ID suffix - this is a true rename - deletedID, err := handleRename(ctx, sqliteStore, existing, incoming) + deletedID, err := handleRename(ctx, store, existing, incoming) if err != nil { return fmt.Errorf("failed to handle rename %s -> %s: %w", existing.ID, incoming.ID, err) } @@ -730,7 +734,7 @@ func upsertIssues(ctx context.Context, sqliteStore *sqlite.SQLiteStorage, issues // Only update if data actually changed if IssueDataChanged(existingWithID, updates) { - if err := sqliteStore.UpdateIssue(ctx, incoming.ID, updates, "import"); err != nil { + if err := store.UpdateIssue(ctx, incoming.ID, updates, "import"); err != nil { return fmt.Errorf("error updating issue %s: %w", incoming.ID, err) } result.Updated++ @@ -748,13 +752,11 @@ func upsertIssues(ctx context.Context, sqliteStore *sqlite.SQLiteStorage, issues // Filter out orphaned issues if orphan_handling is set to skip // Pre-filter before batch creation to prevent orphans from being created then ID-cleared - if opts.OrphanHandling == sqlite.OrphanSkip { + if opts.OrphanHandling == OrphanSkip { var filteredNewIssues []*types.Issue for _, issue := range newIssues { // Check if this is a hierarchical child whose parent doesn't exist - if strings.Contains(issue.ID, ".") { - lastDot := strings.LastIndex(issue.ID, ".") - parentID := issue.ID[:lastDot] + if isHierarchical, parentID := isHierarchicalID(issue.ID); isHierarchical { // Check if parent exists in either existing DB issues or in newIssues batch var parentExists bool @@ -784,12 +786,35 @@ func upsertIssues(ctx context.Context, sqliteStore *sqlite.SQLiteStorage, issues newIssues = filteredNewIssues } + // OrphanStrict: fail-fast if any new hierarchical child has no parent in DB or import batch. + if opts.OrphanHandling == OrphanStrict { + newIDSet := make(map[string]bool, len(newIssues)) + for _, issue := range newIssues { + newIDSet[issue.ID] = true + } + for _, issue := range newIssues { + if isHierarchical, parentID := isHierarchicalID(issue.ID); isHierarchical { + if dbByID[parentID] == nil && !newIDSet[parentID] { + return fmt.Errorf("parent issue %s does not exist (strict mode)", parentID) + } + } + } + } + + // OrphanResurrect: if any hierarchical parents are missing, attempt to resurrect them + // from local JSONL history by creating tombstone parents (status=closed). + if opts.OrphanHandling == OrphanResurrect { + if err := addResurrectedParents(store, dbByID, issues, &newIssues); err != nil { + return err + } + } + // Batch create all new issues // Sort by hierarchy depth to ensure parents are created before children if len(newIssues) > 0 { sort.Slice(newIssues, func(i, j int) bool { - depthI := strings.Count(newIssues[i].ID, ".") - depthJ := strings.Count(newIssues[j].ID, ".") + depthI := hierarchyDepth(newIssues[i].ID) + depthJ := hierarchyDepth(newIssues[j].ID) if depthI != depthJ { return depthI < depthJ // Shallower first } @@ -800,17 +825,30 @@ func upsertIssues(ctx context.Context, sqliteStore *sqlite.SQLiteStorage, issues for depth := 0; depth <= 3; depth++ { var batchForDepth []*types.Issue for _, issue := range newIssues { - if strings.Count(issue.ID, ".") == depth { + if hierarchyDepth(issue.ID) == depth { batchForDepth = append(batchForDepth, issue) } } if len(batchForDepth) > 0 { - batchOpts := sqlite.BatchCreateOptions{ - OrphanHandling: opts.OrphanHandling, - SkipPrefixValidation: opts.SkipPrefixValidation, + // Prefer a backend-specific import/batch API when available, so we can honor + // options like SkipPrefixValidation (multi-repo mode) without requiring the + // entire importer to be SQLite-specific. + type importBatchCreator interface { + CreateIssuesWithFullOptions(ctx context.Context, issues []*types.Issue, actor string, opts sqlite.BatchCreateOptions) error } - if err := sqliteStore.CreateIssuesWithFullOptions(ctx, batchForDepth, "import", batchOpts); err != nil { - return fmt.Errorf("error creating depth-%d issues: %w", depth, err) + if bc, ok := store.(importBatchCreator); ok { + batchOpts := sqlite.BatchCreateOptions{ + OrphanHandling: sqlite.OrphanHandling(opts.OrphanHandling), + SkipPrefixValidation: opts.SkipPrefixValidation, + } + if err := bc.CreateIssuesWithFullOptions(ctx, batchForDepth, "import", batchOpts); err != nil { + return fmt.Errorf("error creating depth-%d issues: %w", depth, err) + } + } else { + // Generic fallback. OrphanSkip and OrphanStrict are enforced above. + if err := store.CreateIssues(ctx, batchForDepth, "import"); err != nil { + return fmt.Errorf("error creating depth-%d issues: %w", depth, err) + } } result.Created += len(batchForDepth) } @@ -822,15 +860,502 @@ func upsertIssues(ctx context.Context, sqliteStore *sqlite.SQLiteStorage, issues return nil } +// upsertIssuesTx performs upsert using a transaction for atomicity. +func upsertIssuesTx(ctx context.Context, tx storage.Transaction, store storage.Storage, issues []*types.Issue, opts Options, result *Result) error { + // Use transaction-scoped reads for consistency. + dbIssues, err := tx.SearchIssues(ctx, "", types.IssueFilter{IncludeTombstones: true}) + if err != nil { + return fmt.Errorf("failed to get DB issues: %w", err) + } + + dbByHash := buildHashMap(dbIssues) + dbByID := buildIDMap(dbIssues) + + // Build external_ref map for O(1) lookup + dbByExternalRef := make(map[string]*types.Issue) + for _, issue := range dbIssues { + if issue.ExternalRef != nil && *issue.ExternalRef != "" { + dbByExternalRef[*issue.ExternalRef] = issue + if linear.IsLinearExternalRef(*issue.ExternalRef) { + if canonical, ok := linear.CanonicalizeLinearExternalRef(*issue.ExternalRef); ok { + dbByExternalRef[canonical] = issue + } + } + } + } + + // Track what we need to create + var newIssues []*types.Issue + seenHashes := make(map[string]bool) + seenIDs := make(map[string]bool) + + for _, incoming := range issues { + hash := incoming.ContentHash + if hash == "" { + hash = incoming.ComputeContentHash() + incoming.ContentHash = hash + } + + if seenHashes[hash] { + result.Skipped++ + continue + } + seenHashes[hash] = true + + if seenIDs[incoming.ID] { + result.Skipped++ + continue + } + seenIDs[incoming.ID] = true + + // Never resurrect over tombstones. + if existingByID, found := dbByID[incoming.ID]; found && existingByID != nil && existingByID.Status == types.StatusTombstone { + result.Skipped++ + continue + } + + // Phase 0: external_ref + if incoming.ExternalRef != nil && *incoming.ExternalRef != "" { + if existing, found := dbByExternalRef[*incoming.ExternalRef]; found && existing != nil { + if !opts.SkipUpdate { + if shouldProtectFromUpdate(existing.ID, incoming.UpdatedAt, opts.ProtectLocalExportIDs) { + debugLogProtection(existing.ID, opts.ProtectLocalExportIDs[existing.ID], incoming.UpdatedAt) + result.Skipped++ + continue + } + if !incoming.UpdatedAt.After(existing.UpdatedAt) { + result.Unchanged++ + continue + } + updates := map[string]interface{}{ + "title": incoming.Title, + "description": incoming.Description, + "status": incoming.Status, + "priority": incoming.Priority, + "issue_type": incoming.IssueType, + "design": incoming.Design, + "acceptance_criteria": incoming.AcceptanceCriteria, + "notes": incoming.Notes, + "closed_at": incoming.ClosedAt, + } + if incoming.Pinned { + updates["pinned"] = incoming.Pinned + } + if incoming.Assignee != "" { + updates["assignee"] = incoming.Assignee + } else { + updates["assignee"] = nil + } + if incoming.ExternalRef != nil && *incoming.ExternalRef != "" { + updates["external_ref"] = *incoming.ExternalRef + } else { + updates["external_ref"] = nil + } + if IssueDataChanged(existing, updates) { + if err := tx.UpdateIssue(ctx, existing.ID, updates, "import"); err != nil { + return fmt.Errorf("error updating issue %s (matched by external_ref): %w", existing.ID, err) + } + result.Updated++ + } else { + result.Unchanged++ + } + } else { + result.Skipped++ + } + continue + } + } + + // Phase 1: content hash + if existing, found := dbByHash[hash]; found && existing != nil { + if existing.ID == incoming.ID { + result.Unchanged++ + } else { + existingPrefix := utils.ExtractIssuePrefix(existing.ID) + incomingPrefix := utils.ExtractIssuePrefix(incoming.ID) + if existingPrefix != incomingPrefix { + result.Skipped++ + } else if !opts.SkipUpdate { + deletedID, err := handleRename(ctx, store, existing, incoming) + if err != nil { + return fmt.Errorf("failed to handle rename %s -> %s: %w", existing.ID, incoming.ID, err) + } + if deletedID != "" { + delete(dbByID, deletedID) + } + result.Updated++ + } else { + result.Skipped++ + } + } + continue + } + + // Phase 2: same ID exists -> update candidate + if existingWithID, found := dbByID[incoming.ID]; found && existingWithID != nil { + if existingWithID.Status == types.StatusTombstone { + result.Skipped++ + continue + } + if !opts.SkipUpdate { + if shouldProtectFromUpdate(incoming.ID, incoming.UpdatedAt, opts.ProtectLocalExportIDs) { + debugLogProtection(incoming.ID, opts.ProtectLocalExportIDs[incoming.ID], incoming.UpdatedAt) + result.Skipped++ + continue + } + if !incoming.UpdatedAt.After(existingWithID.UpdatedAt) { + result.Unchanged++ + continue + } + updates := map[string]interface{}{ + "title": incoming.Title, + "description": incoming.Description, + "status": incoming.Status, + "priority": incoming.Priority, + "issue_type": incoming.IssueType, + "design": incoming.Design, + "acceptance_criteria": incoming.AcceptanceCriteria, + "notes": incoming.Notes, + "closed_at": incoming.ClosedAt, + } + if incoming.Pinned { + updates["pinned"] = incoming.Pinned + } + if incoming.Assignee != "" { + updates["assignee"] = incoming.Assignee + } else { + updates["assignee"] = nil + } + if incoming.ExternalRef != nil && *incoming.ExternalRef != "" { + updates["external_ref"] = *incoming.ExternalRef + } else { + updates["external_ref"] = nil + } + if IssueDataChanged(existingWithID, updates) { + if err := tx.UpdateIssue(ctx, incoming.ID, updates, "import"); err != nil { + return fmt.Errorf("error updating issue %s: %w", incoming.ID, err) + } + result.Updated++ + } else { + result.Unchanged++ + } + } else { + result.Skipped++ + } + } else { + newIssues = append(newIssues, incoming) + } + } + + // OrphanSkip/Strict/Resurrect handled using the same helpers as non-tx path + if opts.OrphanHandling == OrphanSkip { + var filtered []*types.Issue + for _, issue := range newIssues { + if isHier, parentID := isHierarchicalID(issue.ID); isHier { + if dbByID[parentID] == nil { + // parent might be created in this batch; check newIssues + found := false + for _, ni := range newIssues { + if ni.ID == parentID { + found = true + break + } + } + if !found { + result.Skipped++ + continue + } + } + } + filtered = append(filtered, issue) + } + newIssues = filtered + } + if opts.OrphanHandling == OrphanStrict { + newIDSet := make(map[string]bool, len(newIssues)) + for _, issue := range newIssues { + newIDSet[issue.ID] = true + } + for _, issue := range newIssues { + if isHier, parentID := isHierarchicalID(issue.ID); isHier { + if dbByID[parentID] == nil && !newIDSet[parentID] { + return fmt.Errorf("parent issue %s does not exist (strict mode)", parentID) + } + } + } + } + if opts.OrphanHandling == OrphanResurrect { + if err := addResurrectedParents(store, dbByID, issues, &newIssues); err != nil { + return err + } + } + + // Create new issues in deterministic depth order using tx. + if len(newIssues) > 0 { + sort.Slice(newIssues, func(i, j int) bool { + di := hierarchyDepth(newIssues[i].ID) + dj := hierarchyDepth(newIssues[j].ID) + if di != dj { + return di < dj + } + return newIssues[i].ID < newIssues[j].ID + }) + + type importCreator interface { + CreateIssueImport(ctx context.Context, issue *types.Issue, actor string, skipPrefixValidation bool) error + } + for _, iss := range newIssues { + if ic, ok := tx.(importCreator); ok { + if err := ic.CreateIssueImport(ctx, iss, "import", opts.SkipPrefixValidation); err != nil { + return err + } + } else { + if err := tx.CreateIssue(ctx, iss, "import"); err != nil { + return err + } + } + result.Created++ + } + } + + return nil +} + +func importDependenciesTx(ctx context.Context, tx storage.Transaction, issues []*types.Issue, opts Options, result *Result) error { + for _, issue := range issues { + if len(issue.Dependencies) == 0 { + continue + } + + existingDeps, err := tx.GetDependencyRecords(ctx, issue.ID) + if err != nil { + return fmt.Errorf("error checking dependencies for %s: %w", issue.ID, err) + } + existingSet := make(map[string]bool) + for _, existing := range existingDeps { + key := fmt.Sprintf("%s|%s", existing.DependsOnID, existing.Type) + existingSet[key] = true + } + + for _, dep := range issue.Dependencies { + key := fmt.Sprintf("%s|%s", dep.DependsOnID, dep.Type) + if existingSet[key] { + continue + } + if err := tx.AddDependency(ctx, dep, "import"); err != nil { + if opts.Strict { + return fmt.Errorf("error adding dependency %s → %s: %w", dep.IssueID, dep.DependsOnID, err) + } + depDesc := fmt.Sprintf("%s → %s (%s)", dep.IssueID, dep.DependsOnID, dep.Type) + fmt.Fprintf(os.Stderr, "Warning: Skipping dependency due to error: %s (%v)\n", depDesc, err) + if result != nil { + result.SkippedDependencies = append(result.SkippedDependencies, depDesc) + } + continue + } + } + } + return nil +} + +func importLabelsTx(ctx context.Context, tx storage.Transaction, issues []*types.Issue, opts Options) error { + for _, issue := range issues { + if len(issue.Labels) == 0 { + continue + } + currentLabels, err := tx.GetLabels(ctx, issue.ID) + if err != nil { + return fmt.Errorf("error getting labels for %s: %w", issue.ID, err) + } + set := make(map[string]bool, len(currentLabels)) + for _, l := range currentLabels { + set[l] = true + } + for _, label := range issue.Labels { + if set[label] { + continue + } + if err := tx.AddLabel(ctx, issue.ID, label, "import"); err != nil { + if opts.Strict { + return fmt.Errorf("error adding label %s to %s: %w", label, issue.ID, err) + } + } + } + } + return nil +} + +func importCommentsTx(ctx context.Context, tx storage.Transaction, issues []*types.Issue, opts Options) error { + for _, issue := range issues { + if len(issue.Comments) == 0 { + continue + } + currentComments, err := tx.GetIssueComments(ctx, issue.ID) + if err != nil { + return fmt.Errorf("error getting comments for %s: %w", issue.ID, err) + } + existing := make(map[string]bool) + for _, c := range currentComments { + key := fmt.Sprintf("%s:%s", c.Author, strings.TrimSpace(c.Text)) + existing[key] = true + } + for _, comment := range issue.Comments { + key := fmt.Sprintf("%s:%s", comment.Author, strings.TrimSpace(comment.Text)) + if existing[key] { + continue + } + if _, err := tx.ImportIssueComment(ctx, issue.ID, comment.Author, comment.Text, comment.CreatedAt); err != nil { + if opts.Strict { + return fmt.Errorf("error adding comment to %s: %w", issue.ID, err) + } + } + } + } + return nil +} + +// addResurrectedParents ensures missing hierarchical parents exist by adding "tombstone parent" +// issues to newIssues (if needed). Parents are sourced from the local JSONL file when possible. +func addResurrectedParents(store storage.Storage, dbByID map[string]*types.Issue, allIncoming []*types.Issue, newIssues *[]*types.Issue) error { + // Track which IDs will exist after creation + willExist := make(map[string]bool, len(dbByID)+len(*newIssues)) + for id, iss := range dbByID { + if iss != nil { + willExist[id] = true + } + } + for _, iss := range *newIssues { + willExist[iss.ID] = true + } + + // Helper to ensure a single parent exists (and its ancestors). + var ensureParent func(parentID string) error + ensureParent = func(parentID string) error { + if willExist[parentID] { + return nil + } + // Ensure ancestors first (root-to-leaf) + if isHier, grandParent := isHierarchicalID(parentID); isHier { + if err := ensureParent(grandParent); err != nil { + return err + } + } + + // Try to find the issue in the incoming batch first (already being imported) + for _, iss := range allIncoming { + if iss.ID == parentID { + willExist[parentID] = true + return nil + } + } + + // Try local JSONL history + beadsDir := filepath.Dir(store.Path()) + found, err := findIssueInLocalJSONL(filepath.Join(beadsDir, "issues.jsonl"), parentID) + if err != nil { + return fmt.Errorf("parent issue %s does not exist and could not be resurrected: %w", parentID, err) + } + if found == nil { + return fmt.Errorf("parent issue %s does not exist and cannot be resurrected from local JSONL history", parentID) + } + + now := time.Now().UTC() + closedAt := now + tombstone := &types.Issue{ + ID: found.ID, + Title: found.Title, + IssueType: found.IssueType, + Status: types.StatusClosed, + Priority: 4, + CreatedAt: found.CreatedAt, + UpdatedAt: now, + ClosedAt: &closedAt, + // Keep content deterministic-ish but signal it's resurrected. + Description: "[RESURRECTED] Recreated as closed to preserve hierarchical structure.", + } + // Compute hash (ImportIssues computed hashes for original slice only) + tombstone.ContentHash = tombstone.ComputeContentHash() + + *newIssues = append(*newIssues, tombstone) + willExist[parentID] = true + return nil + } + + // Walk newIssues and ensure parents exist + for _, iss := range *newIssues { + if isHier, parentID := isHierarchicalID(iss.ID); isHier { + if err := ensureParent(parentID); err != nil { + return err + } + } + } + return nil +} + +func findIssueInLocalJSONL(jsonlPath, issueID string) (*types.Issue, error) { + if jsonlPath == "" { + return nil, nil + } + if _, err := os.Stat(jsonlPath); err != nil { + return nil, nil // No JSONL file available + } + + f, err := os.Open(jsonlPath) // #nosec G304 -- jsonlPath derived from beadsDir + if err != nil { + return nil, err + } + defer func() { _ = f.Close() }() + + scanner := bufio.NewScanner(f) + scanner.Buffer(make([]byte, 0, 64*1024), 1024*1024) + + var last *types.Issue + for scanner.Scan() { + line := strings.TrimSpace(scanner.Text()) + if line == "" { + continue + } + if !strings.Contains(line, `"`+issueID+`"`) { + continue + } + var iss types.Issue + if err := json.Unmarshal([]byte(line), &iss); err != nil { + // Skip malformed lines (best-effort resurrection) + continue + } + if iss.ID == issueID { + iss.SetDefaults() + copy := iss + last = © + } + } + if err := scanner.Err(); err != nil { + return nil, err + } + return last, nil +} + // importDependencies imports dependency relationships -func importDependencies(ctx context.Context, sqliteStore *sqlite.SQLiteStorage, issues []*types.Issue, opts Options, result *Result) error { +func importDependencies(ctx context.Context, store storage.Storage, issues []*types.Issue, opts Options, result *Result) error { + // Backend-agnostic existence check map to avoid relying on backend-specific FK errors. + dbIssues, err := store.SearchIssues(ctx, "", types.IssueFilter{IncludeTombstones: true}) + if err != nil { + return fmt.Errorf("failed to load issues for dependency validation: %w", err) + } + exists := make(map[string]bool, len(dbIssues)) + for _, iss := range dbIssues { + if iss != nil { + exists[iss.ID] = true + } + } + for _, issue := range issues { if len(issue.Dependencies) == 0 { continue } // Fetch existing dependencies once per issue - existingDeps, err := sqliteStore.GetDependencyRecords(ctx, issue.ID) + existingDeps, err := store.GetDependencyRecords(ctx, issue.ID) if err != nil { return fmt.Errorf("error checking dependencies for %s: %w", issue.ID, err) } @@ -843,6 +1368,19 @@ func importDependencies(ctx context.Context, sqliteStore *sqlite.SQLiteStorage, } for _, dep := range issue.Dependencies { + // Validate referenced issues exist (after upsert). Tombstones count as existing. + if !exists[dep.IssueID] || !exists[dep.DependsOnID] { + depDesc := fmt.Sprintf("%s → %s (%s)", dep.IssueID, dep.DependsOnID, dep.Type) + if opts.Strict { + return fmt.Errorf("missing reference for dependency: %s", depDesc) + } + fmt.Fprintf(os.Stderr, "Warning: Skipping dependency due to missing reference: %s\n", depDesc) + if result != nil { + result.SkippedDependencies = append(result.SkippedDependencies, depDesc) + } + continue + } + // Check for duplicate using set key := fmt.Sprintf("%s|%s", dep.DependsOnID, dep.Type) if existingSet[key] { @@ -850,22 +1388,16 @@ func importDependencies(ctx context.Context, sqliteStore *sqlite.SQLiteStorage, } // Add dependency - if err := sqliteStore.AddDependency(ctx, dep, "import"); err != nil { - // Check for FOREIGN KEY constraint violation - if sqlite.IsForeignKeyConstraintError(err) { - // Log warning and track skipped dependency - depDesc := fmt.Sprintf("%s → %s (%s)", dep.IssueID, dep.DependsOnID, dep.Type) - fmt.Fprintf(os.Stderr, "Warning: Skipping dependency due to missing reference: %s\n", depDesc) - if result != nil { - result.SkippedDependencies = append(result.SkippedDependencies, depDesc) - } - continue - } - - // For non-FK errors, respect strict mode + if err := store.AddDependency(ctx, dep, "import"); err != nil { + // Backend-agnostic: treat dependency insert errors as non-fatal unless strict mode is enabled. if opts.Strict { return fmt.Errorf("error adding dependency %s → %s: %w", dep.IssueID, dep.DependsOnID, err) } + depDesc := fmt.Sprintf("%s → %s (%s)", dep.IssueID, dep.DependsOnID, dep.Type) + fmt.Fprintf(os.Stderr, "Warning: Skipping dependency due to error: %s (%v)\n", depDesc, err) + if result != nil { + result.SkippedDependencies = append(result.SkippedDependencies, depDesc) + } continue } } @@ -875,14 +1407,14 @@ func importDependencies(ctx context.Context, sqliteStore *sqlite.SQLiteStorage, } // importLabels imports labels for issues -func importLabels(ctx context.Context, sqliteStore *sqlite.SQLiteStorage, issues []*types.Issue, opts Options) error { +func importLabels(ctx context.Context, store storage.Storage, issues []*types.Issue, opts Options) error { for _, issue := range issues { if len(issue.Labels) == 0 { continue } // Get current labels - currentLabels, err := sqliteStore.GetLabels(ctx, issue.ID) + currentLabels, err := store.GetLabels(ctx, issue.ID) if err != nil { return fmt.Errorf("error getting labels for %s: %w", issue.ID, err) } @@ -895,7 +1427,7 @@ func importLabels(ctx context.Context, sqliteStore *sqlite.SQLiteStorage, issues // Add missing labels for _, label := range issue.Labels { if !currentLabelSet[label] { - if err := sqliteStore.AddLabel(ctx, issue.ID, label, "import"); err != nil { + if err := store.AddLabel(ctx, issue.ID, label, "import"); err != nil { if opts.Strict { return fmt.Errorf("error adding label %s to %s: %w", label, issue.ID, err) } @@ -909,14 +1441,14 @@ func importLabels(ctx context.Context, sqliteStore *sqlite.SQLiteStorage, issues } // importComments imports comments for issues -func importComments(ctx context.Context, sqliteStore *sqlite.SQLiteStorage, issues []*types.Issue, opts Options) error { +func importComments(ctx context.Context, store storage.Storage, issues []*types.Issue, opts Options) error { for _, issue := range issues { if len(issue.Comments) == 0 { continue } // Get current comments to avoid duplicates - currentComments, err := sqliteStore.GetIssueComments(ctx, issue.ID) + currentComments, err := store.GetIssueComments(ctx, issue.ID) if err != nil { return fmt.Errorf("error getting comments for %s: %w", issue.ID, err) } @@ -932,10 +1464,7 @@ func importComments(ctx context.Context, sqliteStore *sqlite.SQLiteStorage, issu for _, comment := range issue.Comments { key := fmt.Sprintf("%s:%s", comment.Author, strings.TrimSpace(comment.Text)) if !existingComments[key] { - // Use ImportIssueComment to preserve original timestamp (GH#735) - // Format timestamp as RFC3339 for SQLite compatibility - createdAt := comment.CreatedAt.UTC().Format(time.RFC3339) - if _, err := sqliteStore.ImportIssueComment(ctx, issue.ID, comment.Author, comment.Text, createdAt); err != nil { + if _, err := store.ImportIssueComment(ctx, issue.ID, comment.Author, comment.Text, comment.CreatedAt); err != nil { if opts.Strict { return fmt.Errorf("error adding comment to %s: %w", issue.ID, err) } @@ -1032,6 +1561,40 @@ func validateNoDuplicateExternalRefs(issues []*types.Issue, clearDuplicates bool return nil } +// isHierarchicalID returns true if id is a hierarchical child ID of the form "." (n is digits). +// This intentionally avoids treating prefixes that contain dots (e.g., "my.project-abc") as hierarchical. +func isHierarchicalID(id string) (bool, string) { + lastDot := strings.LastIndex(id, ".") + if lastDot <= 0 || lastDot == len(id)-1 { + return false, "" + } + suffix := id[lastDot+1:] + for i := 0; i < len(suffix); i++ { + if suffix[i] < '0' || suffix[i] > '9' { + return false, "" + } + } + return true, id[:lastDot] +} + +// hierarchyDepth returns the number of hierarchical segments in an ID. +// Examples: +// - "bd-abc" -> 0 +// - "bd-abc.1" -> 1 +// - "bd-abc.1.2" -> 2 +func hierarchyDepth(id string) int { + depth := 0 + cur := id + for { + isHier, parent := isHierarchicalID(cur) + if !isHier { + return depth + } + depth++ + cur = parent + } +} + // buildAllowedPrefixSet returns allowed prefixes, or nil to allow all (GH#686). // In multi-repo mode, additional repos have their own prefixes - allow all. // Also accepts allowedPrefixesConfig (comma-separated list like "gt-,mol-"). diff --git a/internal/importer/importer_test.go b/internal/importer/importer_test.go index 7e2f25ee..aea5f21c 100644 --- a/internal/importer/importer_test.go +++ b/internal/importer/importer_test.go @@ -812,60 +812,22 @@ func TestImportIssues_Labels(t *testing.T) { } func TestGetOrCreateStore_ExistingStore(t *testing.T) { - ctx := context.Background() - - tmpDB := t.TempDir() + "/test.db" - store, err := sqlite.New(context.Background(), tmpDB) - if err != nil { - t.Fatalf("Failed to create store: %v", err) - } - defer store.Close() - - result, needClose, err := getOrCreateStore(ctx, tmpDB, store) - if err != nil { - t.Fatalf("Expected no error, got: %v", err) - } - if needClose { - t.Error("Expected needClose=false for existing store") - } - if result != store { - t.Error("Expected same store instance") - } + t.Skip("getOrCreateStore removed: importer now requires a store") } func TestGetOrCreateStore_NewStore(t *testing.T) { - ctx := context.Background() - - tmpDB := t.TempDir() + "/test.db" - - // Create initial database - initStore, err := sqlite.New(context.Background(), tmpDB) - if err != nil { - t.Fatalf("Failed to create store: %v", err) - } - initStore.Close() - - // Test creating new connection - result, needClose, err := getOrCreateStore(ctx, tmpDB, nil) - if err != nil { - t.Fatalf("Expected no error, got: %v", err) - } - defer result.Close() - - if !needClose { - t.Error("Expected needClose=true for new store") - } - if result == nil { - t.Error("Expected non-nil store") - } + t.Skip("getOrCreateStore removed: importer now requires a store") } func TestGetOrCreateStore_EmptyPath(t *testing.T) { + t.Skip("getOrCreateStore removed: importer now requires a store") +} + +func TestImportIssues_RequiresStore(t *testing.T) { ctx := context.Background() - - _, _, err := getOrCreateStore(ctx, "", nil) + _, err := ImportIssues(ctx, "", nil, []*types.Issue{}, Options{}) if err == nil { - t.Error("Expected error for empty database path") + t.Fatal("expected error when store is nil") } } @@ -1203,7 +1165,7 @@ func TestImportOrphanSkip_CountMismatch(t *testing.T) { // Import with OrphanSkip mode - parent doesn't exist result, err := ImportIssues(ctx, "", store, issues, Options{ - OrphanHandling: sqlite.OrphanSkip, + OrphanHandling: OrphanSkip, SkipPrefixValidation: true, // Allow explicit IDs during import }) if err != nil { diff --git a/internal/storage/dolt/credentials.go b/internal/storage/dolt/credentials.go index cbb91cc0..40779d15 100644 --- a/internal/storage/dolt/credentials.go +++ b/internal/storage/dolt/credentials.go @@ -263,14 +263,17 @@ func (s *DoltStore) UpdatePeerLastSync(ctx context.Context, name string) error { // The caller must hold federationEnvMutex. func setFederationCredentials(username, password string) func() { if username != "" { - os.Setenv("DOLT_REMOTE_USER", username) + // Best-effort: failures here should not crash the caller. + _ = os.Setenv("DOLT_REMOTE_USER", username) } if password != "" { - os.Setenv("DOLT_REMOTE_PASSWORD", password) + // Best-effort: failures here should not crash the caller. + _ = os.Setenv("DOLT_REMOTE_PASSWORD", password) } return func() { - os.Unsetenv("DOLT_REMOTE_USER") - os.Unsetenv("DOLT_REMOTE_PASSWORD") + // Best-effort cleanup. + _ = os.Unsetenv("DOLT_REMOTE_USER") + _ = os.Unsetenv("DOLT_REMOTE_PASSWORD") } } diff --git a/internal/storage/dolt/events.go b/internal/storage/dolt/events.go index 1c927100..b8880389 100644 --- a/internal/storage/dolt/events.go +++ b/internal/storage/dolt/events.go @@ -65,10 +65,26 @@ func (s *DoltStore) GetEvents(ctx context.Context, issueID string, limit int) ([ // AddIssueComment adds a comment to an issue (structured comment) func (s *DoltStore) AddIssueComment(ctx context.Context, issueID, author, text string) (*types.Comment, error) { + return s.ImportIssueComment(ctx, issueID, author, text, time.Now().UTC()) +} + +// ImportIssueComment adds a comment during import, preserving the original timestamp. +// This prevents comment timestamp drift across JSONL sync cycles. +func (s *DoltStore) ImportIssueComment(ctx context.Context, issueID, author, text string, createdAt time.Time) (*types.Comment, error) { + // Verify issue exists + var exists bool + if err := s.db.QueryRowContext(ctx, `SELECT EXISTS(SELECT 1 FROM issues WHERE id = ?)`, issueID).Scan(&exists); err != nil { + return nil, fmt.Errorf("failed to check issue existence: %w", err) + } + if !exists { + return nil, fmt.Errorf("issue %s not found", issueID) + } + + createdAt = createdAt.UTC() result, err := s.db.ExecContext(ctx, ` INSERT INTO comments (issue_id, author, text, created_at) VALUES (?, ?, ?, ?) - `, issueID, author, text, time.Now().UTC()) + `, issueID, author, text, createdAt) if err != nil { return nil, fmt.Errorf("failed to add comment: %w", err) } @@ -78,12 +94,21 @@ func (s *DoltStore) AddIssueComment(ctx context.Context, issueID, author, text s return nil, fmt.Errorf("failed to get comment id: %w", err) } + // Mark issue dirty for incremental JSONL export + if _, err := s.db.ExecContext(ctx, ` + INSERT INTO dirty_issues (issue_id, marked_at) + VALUES (?, ?) + ON DUPLICATE KEY UPDATE marked_at = VALUES(marked_at) + `, issueID, time.Now().UTC()); err != nil { + return nil, fmt.Errorf("failed to mark issue dirty: %w", err) + } + return &types.Comment{ ID: id, IssueID: issueID, Author: author, Text: text, - CreatedAt: time.Now().UTC(), + CreatedAt: createdAt, }, nil } diff --git a/internal/storage/dolt/server.go b/internal/storage/dolt/server.go index 12261ffb..b757f687 100644 --- a/internal/storage/dolt/server.go +++ b/internal/storage/dolt/server.go @@ -103,6 +103,7 @@ func (s *Server) Start(ctx context.Context) error { } // Create command + // #nosec G204 -- dolt binary is fixed; args are derived from internal config. s.cmd = exec.CommandContext(ctx, "dolt", args...) s.cmd.Dir = s.cfg.DataDir @@ -272,6 +273,7 @@ func (s *Server) waitForReady(ctx context.Context) error { // GetRunningServerPID returns the PID of a running server from the PID file, or 0 if not running func GetRunningServerPID(dataDir string) int { pidFile := filepath.Join(dataDir, "dolt-server.pid") + // #nosec G304 -- pidFile is derived from internal dataDir. data, err := os.ReadFile(pidFile) if err != nil { return 0 diff --git a/internal/storage/dolt/transaction.go b/internal/storage/dolt/transaction.go index 20b9cd5d..93c3df23 100644 --- a/internal/storage/dolt/transaction.go +++ b/internal/storage/dolt/transaction.go @@ -17,6 +17,12 @@ type doltTransaction struct { store *DoltStore } +// CreateIssueImport is the import-friendly issue creation hook. +// Dolt does not enforce prefix validation at the storage layer, so this delegates to CreateIssue. +func (t *doltTransaction) CreateIssueImport(ctx context.Context, issue *types.Issue, actor string, skipPrefixValidation bool) error { + return t.CreateIssue(ctx, issue, actor) +} + // RunInTransaction executes a function within a database transaction func (s *DoltStore) RunInTransaction(ctx context.Context, fn func(tx storage.Transaction) error) error { sqlTx, err := s.db.BeginTx(ctx, nil) @@ -169,6 +175,36 @@ func (t *doltTransaction) AddDependency(ctx context.Context, dep *types.Dependen return err } +func (t *doltTransaction) GetDependencyRecords(ctx context.Context, issueID string) ([]*types.Dependency, error) { + rows, err := t.tx.QueryContext(ctx, ` + SELECT issue_id, depends_on_id, type, created_at, created_by, metadata, thread_id + FROM dependencies + WHERE issue_id = ? + `, issueID) + if err != nil { + return nil, err + } + defer rows.Close() + + var deps []*types.Dependency + for rows.Next() { + var d types.Dependency + var metadata sql.NullString + var threadID sql.NullString + if err := rows.Scan(&d.IssueID, &d.DependsOnID, &d.Type, &d.CreatedAt, &d.CreatedBy, &metadata, &threadID); err != nil { + return nil, err + } + if metadata.Valid { + d.Metadata = metadata.String + } + if threadID.Valid { + d.ThreadID = threadID.String + } + deps = append(deps, &d) + } + return deps, rows.Err() +} + // RemoveDependency removes a dependency within the transaction func (t *doltTransaction) RemoveDependency(ctx context.Context, issueID, dependsOnID string, actor string) error { _, err := t.tx.ExecContext(ctx, ` @@ -185,6 +221,23 @@ func (t *doltTransaction) AddLabel(ctx context.Context, issueID, label, actor st return err } +func (t *doltTransaction) GetLabels(ctx context.Context, issueID string) ([]string, error) { + rows, err := t.tx.QueryContext(ctx, `SELECT label FROM labels WHERE issue_id = ? ORDER BY label`, issueID) + if err != nil { + return nil, err + } + defer rows.Close() + var labels []string + for rows.Next() { + var l string + if err := rows.Scan(&l); err != nil { + return nil, err + } + labels = append(labels, l) + } + return labels, rows.Err() +} + // RemoveLabel removes a label within the transaction func (t *doltTransaction) RemoveLabel(ctx context.Context, issueID, label, actor string) error { _, err := t.tx.ExecContext(ctx, ` @@ -231,6 +284,63 @@ func (t *doltTransaction) GetMetadata(ctx context.Context, key string) (string, return value, err } +func (t *doltTransaction) ImportIssueComment(ctx context.Context, issueID, author, text string, createdAt time.Time) (*types.Comment, error) { + // Verify issue exists in tx + iss, err := t.GetIssue(ctx, issueID) + if err != nil { + return nil, err + } + if iss == nil { + return nil, fmt.Errorf("issue %s not found", issueID) + } + + createdAt = createdAt.UTC() + res, err := t.tx.ExecContext(ctx, ` + INSERT INTO comments (issue_id, author, text, created_at) + VALUES (?, ?, ?, ?) + `, issueID, author, text, createdAt) + if err != nil { + return nil, fmt.Errorf("failed to add comment: %w", err) + } + id, err := res.LastInsertId() + if err != nil { + return nil, fmt.Errorf("failed to get comment id: %w", err) + } + + // mark dirty in tx + if _, err := t.tx.ExecContext(ctx, ` + INSERT INTO dirty_issues (issue_id, marked_at) + VALUES (?, ?) + ON DUPLICATE KEY UPDATE marked_at = VALUES(marked_at) + `, issueID, time.Now().UTC()); err != nil { + return nil, fmt.Errorf("failed to mark issue dirty: %w", err) + } + + return &types.Comment{ID: id, IssueID: issueID, Author: author, Text: text, CreatedAt: createdAt}, nil +} + +func (t *doltTransaction) GetIssueComments(ctx context.Context, issueID string) ([]*types.Comment, error) { + rows, err := t.tx.QueryContext(ctx, ` + SELECT id, issue_id, author, text, created_at + FROM comments + WHERE issue_id = ? + ORDER BY created_at ASC + `, issueID) + if err != nil { + return nil, err + } + defer rows.Close() + var comments []*types.Comment + for rows.Next() { + var c types.Comment + if err := rows.Scan(&c.ID, &c.IssueID, &c.Author, &c.Text, &c.CreatedAt); err != nil { + return nil, err + } + comments = append(comments, &c) + } + return comments, rows.Err() +} + // AddComment adds a comment within the transaction func (t *doltTransaction) AddComment(ctx context.Context, issueID, actor, comment string) error { _, err := t.tx.ExecContext(ctx, ` diff --git a/internal/storage/memory/memory.go b/internal/storage/memory/memory.go index fe14fcbf..ba408126 100644 --- a/internal/storage/memory/memory.go +++ b/internal/storage/memory/memory.go @@ -1457,6 +1457,24 @@ func (m *MemoryStorage) AddIssueComment(ctx context.Context, issueID, author, te return comment, nil } +func (m *MemoryStorage) ImportIssueComment(ctx context.Context, issueID, author, text string, createdAt time.Time) (*types.Comment, error) { + m.mu.Lock() + defer m.mu.Unlock() + + comment := &types.Comment{ + ID: int64(len(m.comments[issueID]) + 1), + IssueID: issueID, + Author: author, + Text: text, + CreatedAt: createdAt, + } + + m.comments[issueID] = append(m.comments[issueID], comment) + m.dirty[issueID] = true + + return comment, nil +} + func (m *MemoryStorage) GetIssueComments(ctx context.Context, issueID string) ([]*types.Comment, error) { m.mu.RLock() defer m.mu.RUnlock() diff --git a/internal/storage/sqlite/comments.go b/internal/storage/sqlite/comments.go index 7c066a2e..7e74cfd1 100644 --- a/internal/storage/sqlite/comments.go +++ b/internal/storage/sqlite/comments.go @@ -3,6 +3,7 @@ package sqlite import ( "context" "fmt" + "time" "github.com/steveyegge/beads/internal/types" ) @@ -56,7 +57,7 @@ func (s *SQLiteStorage) AddIssueComment(ctx context.Context, issueID, author, te // Unlike AddIssueComment which uses CURRENT_TIMESTAMP, this method uses the provided // createdAt time from the JSONL file. This prevents timestamp drift during sync cycles. // GH#735: Comment created_at timestamps were being overwritten with current time during import. -func (s *SQLiteStorage) ImportIssueComment(ctx context.Context, issueID, author, text string, createdAt string) (*types.Comment, error) { +func (s *SQLiteStorage) ImportIssueComment(ctx context.Context, issueID, author, text string, createdAt time.Time) (*types.Comment, error) { // Verify issue exists var exists bool err := s.db.QueryRowContext(ctx, `SELECT EXISTS(SELECT 1 FROM issues WHERE id = ?)`, issueID).Scan(&exists) @@ -68,10 +69,11 @@ func (s *SQLiteStorage) ImportIssueComment(ctx context.Context, issueID, author, } // Insert comment with provided timestamp + createdAtStr := createdAt.UTC().Format(time.RFC3339Nano) result, err := s.db.ExecContext(ctx, ` INSERT INTO comments (issue_id, author, text, created_at) VALUES (?, ?, ?, ?) - `, issueID, author, text, createdAt) + `, issueID, author, text, createdAtStr) if err != nil { return nil, fmt.Errorf("failed to insert comment: %w", err) } diff --git a/internal/storage/sqlite/import_tx.go b/internal/storage/sqlite/import_tx.go new file mode 100644 index 00000000..fd4c525a --- /dev/null +++ b/internal/storage/sqlite/import_tx.go @@ -0,0 +1,115 @@ +package sqlite + +import ( + "context" + "database/sql" + "fmt" + "time" + + "github.com/steveyegge/beads/internal/types" +) + +// CreateIssueImport creates an issue inside an existing sqlite transaction, optionally skipping +// prefix validation. This is used by JSONL import to support multi-repo mode (GH#686). +func (t *sqliteTxStorage) CreateIssueImport(ctx context.Context, issue *types.Issue, actor string, skipPrefixValidation bool) error { + // Fetch custom statuses and types for validation + customStatuses, err := t.GetCustomStatuses(ctx) + if err != nil { + return fmt.Errorf("failed to get custom statuses: %w", err) + } + customTypes, err := t.GetCustomTypes(ctx) + if err != nil { + return fmt.Errorf("failed to get custom types: %w", err) + } + + // Set timestamps + now := time.Now() + if issue.CreatedAt.IsZero() { + issue.CreatedAt = now + } + if issue.UpdatedAt.IsZero() { + issue.UpdatedAt = now + } + + // Defensive fix for closed_at invariant + if issue.Status == types.StatusClosed && issue.ClosedAt == nil { + maxTime := issue.CreatedAt + if issue.UpdatedAt.After(maxTime) { + maxTime = issue.UpdatedAt + } + closedAt := maxTime.Add(time.Second) + issue.ClosedAt = &closedAt + } + // Defensive fix for tombstone invariant + if issue.Status == types.StatusTombstone && issue.DeletedAt == nil { + maxTime := issue.CreatedAt + if issue.UpdatedAt.After(maxTime) { + maxTime = issue.UpdatedAt + } + deletedAt := maxTime.Add(time.Second) + issue.DeletedAt = &deletedAt + } + + // Validate issue before creating + if err := issue.ValidateWithCustom(customStatuses, customTypes); err != nil { + return fmt.Errorf("validation failed: %w", err) + } + + // Compute content hash + if issue.ContentHash == "" { + issue.ContentHash = issue.ComputeContentHash() + } + + // Get configured prefix for validation and ID generation behavior + var configPrefix string + err = t.conn.QueryRowContext(ctx, `SELECT value FROM config WHERE key = ?`, "issue_prefix").Scan(&configPrefix) + if err == sql.ErrNoRows || configPrefix == "" { + return fmt.Errorf("database not initialized: issue_prefix config is missing (run 'bd init --prefix ' first)") + } else if err != nil { + return fmt.Errorf("failed to get config: %w", err) + } + + prefix := configPrefix + if issue.IDPrefix != "" { + prefix = configPrefix + "-" + issue.IDPrefix + } + + if issue.ID == "" { + // Import path expects IDs, but be defensive and generate if missing. + generatedID, err := GenerateIssueID(ctx, t.conn, prefix, issue, actor) + if err != nil { + return fmt.Errorf("failed to generate issue ID: %w", err) + } + issue.ID = generatedID + } else if !skipPrefixValidation { + if err := ValidateIssueIDPrefix(issue.ID, prefix); err != nil { + return fmt.Errorf("failed to validate issue ID prefix: %w", err) + } + } + + // Ensure parent exists for hierarchical IDs (importer should have ensured / resurrected). + if isHierarchical, parentID := IsHierarchicalID(issue.ID); isHierarchical { + var parentCount int + if err := t.conn.QueryRowContext(ctx, `SELECT COUNT(*) FROM issues WHERE id = ?`, parentID).Scan(&parentCount); err != nil { + return fmt.Errorf("failed to check parent existence: %w", err) + } + if parentCount == 0 { + return fmt.Errorf("parent issue %s does not exist", parentID) + } + } + + // Insert issue (strict) + if err := insertIssueStrict(ctx, t.conn, issue); err != nil { + return fmt.Errorf("failed to insert issue: %w", err) + } + // Record event + if err := recordCreatedEvent(ctx, t.conn, issue, actor); err != nil { + return fmt.Errorf("failed to record creation event: %w", err) + } + // Mark dirty + if err := markDirty(ctx, t.conn, issue.ID); err != nil { + return fmt.Errorf("failed to mark issue dirty: %w", err) + } + return nil +} + diff --git a/internal/storage/sqlite/transaction.go b/internal/storage/sqlite/transaction.go index b540ab41..22c0b1fb 100644 --- a/internal/storage/sqlite/transaction.go +++ b/internal/storage/sqlite/transaction.go @@ -824,6 +824,37 @@ func (t *sqliteTxStorage) AddDependency(ctx context.Context, dep *types.Dependen return nil } +// GetDependencyRecords retrieves dependency records for an issue within the transaction. +func (t *sqliteTxStorage) GetDependencyRecords(ctx context.Context, issueID string) ([]*types.Dependency, error) { + rows, err := t.conn.QueryContext(ctx, ` + SELECT issue_id, depends_on_id, type, created_at, created_by, metadata, thread_id + FROM dependencies + WHERE issue_id = ? + `, issueID) + if err != nil { + return nil, fmt.Errorf("failed to query dependencies: %w", err) + } + defer func() { _ = rows.Close() }() + + var deps []*types.Dependency + for rows.Next() { + var d types.Dependency + var metadata sql.NullString + var threadID sql.NullString + if err := rows.Scan(&d.IssueID, &d.DependsOnID, &d.Type, &d.CreatedAt, &d.CreatedBy, &metadata, &threadID); err != nil { + return nil, fmt.Errorf("failed to scan dependency: %w", err) + } + if metadata.Valid { + d.Metadata = metadata.String + } + if threadID.Valid { + d.ThreadID = threadID.String + } + deps = append(deps, &d) + } + return deps, rows.Err() +} + // RemoveDependency removes a dependency within the transaction. func (t *sqliteTxStorage) RemoveDependency(ctx context.Context, issueID, dependsOnID string, actor string) error { // First, check what type of dependency is being removed @@ -916,6 +947,11 @@ func (t *sqliteTxStorage) AddLabel(ctx context.Context, issueID, label, actor st return nil } +// GetLabels retrieves labels for an issue within the transaction. +func (t *sqliteTxStorage) GetLabels(ctx context.Context, issueID string) ([]string, error) { + return t.getLabels(ctx, issueID) +} + // RemoveLabel removes a label from an issue within the transaction. func (t *sqliteTxStorage) RemoveLabel(ctx context.Context, issueID, label, actor string) error { result, err := t.conn.ExecContext(ctx, ` @@ -1063,6 +1099,68 @@ func (t *sqliteTxStorage) AddComment(ctx context.Context, issueID, actor, commen return nil } +// ImportIssueComment adds a structured comment during import, preserving the original timestamp. +func (t *sqliteTxStorage) ImportIssueComment(ctx context.Context, issueID, author, text string, createdAt time.Time) (*types.Comment, error) { + // Verify issue exists + existing, err := t.GetIssue(ctx, issueID) + if err != nil { + return nil, fmt.Errorf("failed to check issue existence: %w", err) + } + if existing == nil { + return nil, fmt.Errorf("issue %s not found", issueID) + } + + createdAtStr := createdAt.UTC().Format(time.RFC3339Nano) + res, err := t.conn.ExecContext(ctx, ` + INSERT INTO comments (issue_id, author, text, created_at) + VALUES (?, ?, ?, ?) + `, issueID, author, text, createdAtStr) + if err != nil { + return nil, fmt.Errorf("failed to insert comment: %w", err) + } + commentID, err := res.LastInsertId() + if err != nil { + return nil, fmt.Errorf("failed to get comment ID: %w", err) + } + + // Mark issue dirty + if err := markDirty(ctx, t.conn, issueID); err != nil { + return nil, fmt.Errorf("failed to mark issue dirty: %w", err) + } + + return &types.Comment{ + ID: commentID, + IssueID: issueID, + Author: author, + Text: text, + CreatedAt: createdAt.UTC(), + }, nil +} + +// GetIssueComments retrieves structured comments for an issue within the transaction. +func (t *sqliteTxStorage) GetIssueComments(ctx context.Context, issueID string) ([]*types.Comment, error) { + rows, err := t.conn.QueryContext(ctx, ` + SELECT id, issue_id, author, text, created_at + FROM comments + WHERE issue_id = ? + ORDER BY created_at ASC + `, issueID) + if err != nil { + return nil, fmt.Errorf("failed to query comments: %w", err) + } + defer func() { _ = rows.Close() }() + + var comments []*types.Comment + for rows.Next() { + var c types.Comment + if err := rows.Scan(&c.ID, &c.IssueID, &c.Author, &c.Text, &c.CreatedAt); err != nil { + return nil, fmt.Errorf("failed to scan comment: %w", err) + } + comments = append(comments, &c) + } + return comments, rows.Err() +} + // SearchIssues finds issues matching query and filters within the transaction. // This enables read-your-writes semantics for searching within a transaction. func (t *sqliteTxStorage) SearchIssues(ctx context.Context, query string, filter types.IssueFilter) ([]*types.Issue, error) { diff --git a/internal/storage/storage.go b/internal/storage/storage.go index 9cca5dd3..7442495a 100644 --- a/internal/storage/storage.go +++ b/internal/storage/storage.go @@ -4,6 +4,7 @@ package storage import ( "context" "database/sql" + "time" "github.com/steveyegge/beads/internal/types" ) @@ -58,10 +59,12 @@ type Transaction interface { // Dependency operations AddDependency(ctx context.Context, dep *types.Dependency, actor string) error RemoveDependency(ctx context.Context, issueID, dependsOnID string, actor string) error + GetDependencyRecords(ctx context.Context, issueID string) ([]*types.Dependency, error) // Label operations AddLabel(ctx context.Context, issueID, label, actor string) error RemoveLabel(ctx context.Context, issueID, label, actor string) error + GetLabels(ctx context.Context, issueID string) ([]string, error) // Config operations (for atomic config + issue workflows) SetConfig(ctx context.Context, key, value string) error @@ -73,6 +76,8 @@ type Transaction interface { // Comment operations AddComment(ctx context.Context, issueID, actor, comment string) error + ImportIssueComment(ctx context.Context, issueID, author, text string, createdAt time.Time) (*types.Comment, error) + GetIssueComments(ctx context.Context, issueID string) ([]*types.Comment, error) } // Storage defines the interface for issue storage backends @@ -121,6 +126,9 @@ type Storage interface { // Comments AddIssueComment(ctx context.Context, issueID, author, text string) (*types.Comment, error) + // ImportIssueComment adds a comment while preserving the original timestamp. + // Used during JSONL import to avoid timestamp drift across sync cycles. + ImportIssueComment(ctx context.Context, issueID, author, text string, createdAt time.Time) (*types.Comment, error) GetIssueComments(ctx context.Context, issueID string) ([]*types.Comment, error) GetCommentsForIssues(ctx context.Context, issueIDs []string) (map[string][]*types.Comment, error) diff --git a/internal/storage/storage_test.go b/internal/storage/storage_test.go index a68ce621..ccbf8f70 100644 --- a/internal/storage/storage_test.go +++ b/internal/storage/storage_test.go @@ -5,6 +5,7 @@ import ( "context" "database/sql" "testing" + "time" "github.com/steveyegge/beads/internal/types" ) @@ -119,6 +120,9 @@ func (m *mockStorage) GetEvents(ctx context.Context, issueID string, limit int) func (m *mockStorage) AddIssueComment(ctx context.Context, issueID, author, text string) (*types.Comment, error) { return nil, nil } +func (m *mockStorage) ImportIssueComment(ctx context.Context, issueID, author, text string, createdAt time.Time) (*types.Comment, error) { + return nil, nil +} func (m *mockStorage) GetIssueComments(ctx context.Context, issueID string) ([]*types.Comment, error) { return nil, nil } @@ -237,12 +241,18 @@ func (m *mockTransaction) AddDependency(ctx context.Context, dep *types.Dependen func (m *mockTransaction) RemoveDependency(ctx context.Context, issueID, dependsOnID string, actor string) error { return nil } +func (m *mockTransaction) GetDependencyRecords(ctx context.Context, issueID string) ([]*types.Dependency, error) { + return nil, nil +} func (m *mockTransaction) AddLabel(ctx context.Context, issueID, label, actor string) error { return nil } func (m *mockTransaction) RemoveLabel(ctx context.Context, issueID, label, actor string) error { return nil } +func (m *mockTransaction) GetLabels(ctx context.Context, issueID string) ([]string, error) { + return nil, nil +} func (m *mockTransaction) SetConfig(ctx context.Context, key, value string) error { return nil } @@ -258,6 +268,12 @@ func (m *mockTransaction) GetMetadata(ctx context.Context, key string) (string, func (m *mockTransaction) AddComment(ctx context.Context, issueID, actor, comment string) error { return nil } +func (m *mockTransaction) ImportIssueComment(ctx context.Context, issueID, author, text string, createdAt time.Time) (*types.Comment, error) { + return nil, nil +} +func (m *mockTransaction) GetIssueComments(ctx context.Context, issueID string) ([]*types.Comment, error) { + return nil, nil +} // TestConfig verifies the Config struct has expected fields. func TestConfig(t *testing.T) { From 0a9bcc2dd0402c1cc848844b1c3135af77f76d1c Mon Sep 17 00:00:00 2001 From: Test Date: Wed, 21 Jan 2026 13:40:06 -0800 Subject: [PATCH 2/4] /internal/importer/importer_test.go: remove skipped tests --- internal/importer/importer_test.go | 186 ++++++++++++++--------------- 1 file changed, 88 insertions(+), 98 deletions(-) diff --git a/internal/importer/importer_test.go b/internal/importer/importer_test.go index aea5f21c..2ea32b14 100644 --- a/internal/importer/importer_test.go +++ b/internal/importer/importer_test.go @@ -153,10 +153,10 @@ func TestFieldComparator_StringConversion(t *testing.T) { fc := newFieldComparator() tests := []struct { - name string - value interface{} - wantStr string - wantOk bool + name string + value interface{} + wantStr string + wantOk bool }{ {"string", "hello", "hello", true}, {"string pointer", stringPtr("world"), "world", true}, @@ -436,67 +436,67 @@ func TestRenameImportedIssuePrefixes(t *testing.T) { func TestReplaceBoundaryAware(t *testing.T) { tests := []struct { - name string - text string - oldID string - newID string - want string + name string + text string + oldID string + newID string + want string }{ { - name: "simple replacement", - text: "See old-1 for details", - oldID: "old-1", - newID: "new-1", - want: "See new-1 for details", + name: "simple replacement", + text: "See old-1 for details", + oldID: "old-1", + newID: "new-1", + want: "See new-1 for details", }, { - name: "multiple occurrences", - text: "old-1 and old-1 again", - oldID: "old-1", - newID: "new-1", - want: "new-1 and new-1 again", + name: "multiple occurrences", + text: "old-1 and old-1 again", + oldID: "old-1", + newID: "new-1", + want: "new-1 and new-1 again", }, { - name: "no match substring prefix", - text: "old-10 should not match", - oldID: "old-1", - newID: "new-1", - want: "old-10 should not match", + name: "no match substring prefix", + text: "old-10 should not match", + oldID: "old-1", + newID: "new-1", + want: "old-10 should not match", }, { - name: "match at end of longer ID", - text: "should not match old-1 at end", - oldID: "old-1", - newID: "new-1", - want: "should not match new-1 at end", + name: "match at end of longer ID", + text: "should not match old-1 at end", + oldID: "old-1", + newID: "new-1", + want: "should not match new-1 at end", }, { - name: "boundary at start", - text: "old-1 starts here", - oldID: "old-1", - newID: "new-1", - want: "new-1 starts here", + name: "boundary at start", + text: "old-1 starts here", + oldID: "old-1", + newID: "new-1", + want: "new-1 starts here", }, { - name: "boundary at end", - text: "ends with old-1", - oldID: "old-1", - newID: "new-1", - want: "ends with new-1", + name: "boundary at end", + text: "ends with old-1", + oldID: "old-1", + newID: "new-1", + want: "ends with new-1", }, { - name: "boundary punctuation", - text: "See (old-1) and [old-1] or {old-1}", - oldID: "old-1", - newID: "new-1", - want: "See (new-1) and [new-1] or {new-1}", + name: "boundary punctuation", + text: "See (old-1) and [old-1] or {old-1}", + oldID: "old-1", + newID: "new-1", + want: "See (new-1) and [new-1] or {new-1}", }, { - name: "no occurrence", - text: "No match here", - oldID: "old-1", - newID: "new-1", - want: "No match here", + name: "no occurrence", + text: "No match here", + oldID: "old-1", + newID: "new-1", + want: "No match here", }, } @@ -547,9 +547,9 @@ func TestIsValidIDSuffix(t *testing.T) { {"abc.1.2.3", true}, {"1.5", true}, // Invalid suffixes - {"", false}, // Empty string - {"A3F8", false}, // Uppercase not allowed - {"@#$!", false}, // Special characters not allowed + {"", false}, // Empty string + {"A3F8", false}, // Uppercase not allowed + {"@#$!", false}, // Special characters not allowed {"abc-def", false}, // Hyphens not allowed in suffix } @@ -569,7 +569,7 @@ func stringPtr(s string) *string { func TestImportIssues_Basic(t *testing.T) { ctx := context.Background() - + // Create temp database tmpDB := t.TempDir() + "/test.db" store, err := sqlite.New(context.Background(), tmpDB) @@ -577,12 +577,12 @@ func TestImportIssues_Basic(t *testing.T) { t.Fatalf("Failed to create store: %v", err) } defer store.Close() - + // Set config prefix if err := store.SetConfig(ctx, "issue_prefix", "test"); err != nil { t.Fatalf("Failed to set prefix: %v", err) } - + // Import single issue issues := []*types.Issue{ { @@ -594,16 +594,16 @@ func TestImportIssues_Basic(t *testing.T) { IssueType: types.TypeTask, }, } - + result, err := ImportIssues(ctx, tmpDB, store, issues, Options{}) if err != nil { t.Fatalf("Import failed: %v", err) } - + if result.Created != 1 { t.Errorf("Expected 1 created, got %d", result.Created) } - + // Verify issue was created retrieved, err := store.GetIssue(ctx, "test-abc123") if err != nil { @@ -616,18 +616,18 @@ func TestImportIssues_Basic(t *testing.T) { func TestImportIssues_Update(t *testing.T) { ctx := context.Background() - + tmpDB := t.TempDir() + "/test.db" store, err := sqlite.New(context.Background(), tmpDB) if err != nil { t.Fatalf("Failed to create store: %v", err) } defer store.Close() - + if err := store.SetConfig(ctx, "issue_prefix", "test"); err != nil { t.Fatalf("Failed to set prefix: %v", err) } - + // Create initial issue issue1 := &types.Issue{ ID: "test-abc123", @@ -638,12 +638,12 @@ func TestImportIssues_Update(t *testing.T) { IssueType: types.TypeTask, } issue1.ContentHash = issue1.ComputeContentHash() - + err = store.CreateIssue(ctx, issue1, "test") if err != nil { t.Fatalf("Failed to create initial issue: %v", err) } - + // Import updated version with newer timestamp issue2 := &types.Issue{ ID: "test-abc123", @@ -656,18 +656,18 @@ func TestImportIssues_Update(t *testing.T) { UpdatedAt: time.Now().Add(time.Hour), // Newer than issue1 } issue2.ContentHash = issue2.ComputeContentHash() - + result, err := ImportIssues(ctx, tmpDB, store, []*types.Issue{issue2}, Options{}) if err != nil { t.Fatalf("Import failed: %v", err) } - + // The importer detects this as both a collision (1) and then upserts it (creates=1) // Total updates = collision count + actual upserts if result.Updated == 0 && result.Created == 0 { t.Error("Expected some updates or creates") } - + // Verify update retrieved, err := store.GetIssue(ctx, "test-abc123") if err != nil { @@ -680,18 +680,18 @@ func TestImportIssues_Update(t *testing.T) { func TestImportIssues_DryRun(t *testing.T) { ctx := context.Background() - + tmpDB := t.TempDir() + "/test.db" store, err := sqlite.New(context.Background(), tmpDB) if err != nil { t.Fatalf("Failed to create store: %v", err) } defer store.Close() - + if err := store.SetConfig(ctx, "issue_prefix", "test"); err != nil { t.Fatalf("Failed to set prefix: %v", err) } - + issues := []*types.Issue{ { ID: "test-abc123", @@ -701,13 +701,13 @@ func TestImportIssues_DryRun(t *testing.T) { IssueType: types.TypeTask, }, } - + // Dry run returns early when no collisions, so it reports what would be created result, err := ImportIssues(ctx, tmpDB, store, issues, Options{DryRun: true}) if err != nil { t.Fatalf("Import failed: %v", err) } - + // Should report that 1 issue would be created if result.Created != 1 { t.Errorf("Expected 1 would be created in dry run, got %d", result.Created) @@ -716,18 +716,18 @@ func TestImportIssues_DryRun(t *testing.T) { func TestImportIssues_Dependencies(t *testing.T) { ctx := context.Background() - + tmpDB := t.TempDir() + "/test.db" store, err := sqlite.New(context.Background(), tmpDB) if err != nil { t.Fatalf("Failed to create store: %v", err) } defer store.Close() - + if err := store.SetConfig(ctx, "issue_prefix", "test"); err != nil { t.Fatalf("Failed to set prefix: %v", err) } - + issues := []*types.Issue{ { ID: "test-abc123", @@ -747,16 +747,16 @@ func TestImportIssues_Dependencies(t *testing.T) { IssueType: types.TypeTask, }, } - + result, err := ImportIssues(ctx, tmpDB, store, issues, Options{}) if err != nil { t.Fatalf("Import failed: %v", err) } - + if result.Created != 2 { t.Errorf("Expected 2 created, got %d", result.Created) } - + // Verify dependency was created deps, err := store.GetDependencies(ctx, "test-abc123") if err != nil { @@ -769,18 +769,18 @@ func TestImportIssues_Dependencies(t *testing.T) { func TestImportIssues_Labels(t *testing.T) { ctx := context.Background() - + tmpDB := t.TempDir() + "/test.db" store, err := sqlite.New(context.Background(), tmpDB) if err != nil { t.Fatalf("Failed to create store: %v", err) } defer store.Close() - + if err := store.SetConfig(ctx, "issue_prefix", "test"); err != nil { t.Fatalf("Failed to set prefix: %v", err) } - + issues := []*types.Issue{ { ID: "test-abc123", @@ -791,16 +791,16 @@ func TestImportIssues_Labels(t *testing.T) { Labels: []string{"bug", "critical"}, }, } - + result, err := ImportIssues(ctx, tmpDB, store, issues, Options{}) if err != nil { t.Fatalf("Import failed: %v", err) } - + if result.Created != 1 { t.Errorf("Expected 1 created, got %d", result.Created) } - + // Verify labels were created retrieved, err := store.GetIssue(ctx, "test-abc123") if err != nil { @@ -811,17 +811,7 @@ func TestImportIssues_Labels(t *testing.T) { } } -func TestGetOrCreateStore_ExistingStore(t *testing.T) { - t.Skip("getOrCreateStore removed: importer now requires a store") -} - -func TestGetOrCreateStore_NewStore(t *testing.T) { - t.Skip("getOrCreateStore removed: importer now requires a store") -} - -func TestGetOrCreateStore_EmptyPath(t *testing.T) { - t.Skip("getOrCreateStore removed: importer now requires a store") -} +// NOTE: getOrCreateStore was removed; importer now requires an initialized store. func TestImportIssues_RequiresStore(t *testing.T) { ctx := context.Background() @@ -853,7 +843,7 @@ func TestGetPrefixList(t *testing.T) { want: []string{}, }, } - + for _, tt := range tests { t.Run(tt.name, func(t *testing.T) { got := GetPrefixList(tt.prefixes) @@ -972,7 +962,7 @@ func TestValidateNoDuplicateExternalRefs(t *testing.T) { func TestConcurrentExternalRefImports(t *testing.T) { t.Skip("TODO(bd-gpe7): Test hangs due to database deadlock - needs investigation") - + t.Run("sequential imports with same external_ref are detected as updates", func(t *testing.T) { store, err := sqlite.New(context.Background(), ":memory:") if err != nil { @@ -986,7 +976,7 @@ func TestConcurrentExternalRefImports(t *testing.T) { } externalRef := "JIRA-100" - + issue1 := &types.Issue{ ID: "bd-1", Title: "First import", @@ -1144,7 +1134,7 @@ func TestImportOrphanSkip_CountMismatch(t *testing.T) { UpdatedAt: now, }, { - ID: "test-orphan.1", // Child of non-existent parent + ID: "test-orphan.1", // Child of non-existent parent Title: "Orphaned Child", Status: types.StatusOpen, Priority: 2, From 76701123416c0da1af664be66d404987287a5dc3 Mon Sep 17 00:00:00 2001 From: Test Date: Wed, 21 Jan 2026 13:59:47 -0800 Subject: [PATCH 3/4] /internal/storage/dolt: fix windows build issue --- internal/storage/dolt/procattr_unix.go | 15 +++++++++ internal/storage/dolt/procattr_windows.go | 12 +++++++ internal/storage/dolt/process_unix.go | 32 +++++++++++++++++++ internal/storage/dolt/process_windows.go | 21 +++++++++++++ internal/storage/dolt/server.go | 38 +++++++---------------- 5 files changed, 91 insertions(+), 27 deletions(-) create mode 100644 internal/storage/dolt/procattr_unix.go create mode 100644 internal/storage/dolt/procattr_windows.go create mode 100644 internal/storage/dolt/process_unix.go create mode 100644 internal/storage/dolt/process_windows.go diff --git a/internal/storage/dolt/procattr_unix.go b/internal/storage/dolt/procattr_unix.go new file mode 100644 index 00000000..5cce7d3c --- /dev/null +++ b/internal/storage/dolt/procattr_unix.go @@ -0,0 +1,15 @@ +//go:build !windows +// +build !windows + +package dolt + +import ( + "os/exec" + "syscall" +) + +func setDoltServerSysProcAttr(cmd *exec.Cmd) { + cmd.SysProcAttr = &syscall.SysProcAttr{ + Setpgid: true, + } +} diff --git a/internal/storage/dolt/procattr_windows.go b/internal/storage/dolt/procattr_windows.go new file mode 100644 index 00000000..86b78159 --- /dev/null +++ b/internal/storage/dolt/procattr_windows.go @@ -0,0 +1,12 @@ +//go:build windows +// +build windows + +package dolt + +import "os/exec" + +// Windows does not support Setpgid; leave default process attributes. +func setDoltServerSysProcAttr(cmd *exec.Cmd) { + // no-op + _ = cmd +} diff --git a/internal/storage/dolt/process_unix.go b/internal/storage/dolt/process_unix.go new file mode 100644 index 00000000..848f054d --- /dev/null +++ b/internal/storage/dolt/process_unix.go @@ -0,0 +1,32 @@ +//go:build !windows +// +build !windows + +package dolt + +import ( + "os" + "strings" + "syscall" +) + +func processMayBeAlive(p *os.Process) bool { + // Signal 0 checks for existence without sending a real signal. + if err := p.Signal(syscall.Signal(0)); err != nil { + return false + } + return true +} + +func terminateProcess(p *os.Process) error { + if p == nil { + return nil + } + if err := p.Signal(syscall.SIGTERM); err != nil { + // Process may already be dead; treat as success. + if strings.Contains(err.Error(), "process already finished") { + return nil + } + return err + } + return nil +} diff --git a/internal/storage/dolt/process_windows.go b/internal/storage/dolt/process_windows.go new file mode 100644 index 00000000..f7484527 --- /dev/null +++ b/internal/storage/dolt/process_windows.go @@ -0,0 +1,21 @@ +//go:build windows +// +build windows + +package dolt + +import "os" + +func processMayBeAlive(p *os.Process) bool { + // Windows doesn't support Unix-style signal(0) checks. Treat as "unknown/alive" + // and let connection attempts / wait timeouts determine readiness. + _ = p + return true +} + +func terminateProcess(p *os.Process) error { + if p == nil { + return nil + } + // Best-effort: Windows doesn't have SIGTERM semantics; kill the process. + return p.Kill() +} diff --git a/internal/storage/dolt/server.go b/internal/storage/dolt/server.go index b757f687..302aa739 100644 --- a/internal/storage/dolt/server.go +++ b/internal/storage/dolt/server.go @@ -15,7 +15,6 @@ import ( "strconv" "strings" "sync" - "syscall" "time" ) @@ -107,10 +106,8 @@ func (s *Server) Start(ctx context.Context) error { s.cmd = exec.CommandContext(ctx, "dolt", args...) s.cmd.Dir = s.cfg.DataDir - // Set up process group for clean shutdown - s.cmd.SysProcAttr = &syscall.SysProcAttr{ - Setpgid: true, - } + // Set up process group for clean shutdown (Unix-only; no-op on Windows). + setDoltServerSysProcAttr(s.cmd) // Set up logging if s.cfg.LogFile != "" { @@ -163,13 +160,8 @@ func (s *Server) Stop() error { return nil } - // Try graceful shutdown first (SIGTERM) - if err := s.cmd.Process.Signal(syscall.SIGTERM); err != nil { - // Process may already be dead - if !strings.Contains(err.Error(), "process already finished") { - return fmt.Errorf("failed to send SIGTERM: %w", err) - } - } + // Best-effort graceful shutdown (platform-specific). + _ = terminateProcess(s.cmd.Process) // Wait for graceful shutdown with timeout done := make(chan error, 1) @@ -250,11 +242,9 @@ func (s *Server) waitForReady(ctx context.Context) error { default: } - // Check if process is still alive using signal 0 - if s.cmd.Process != nil { - if err := s.cmd.Process.Signal(syscall.Signal(0)); err != nil { - return fmt.Errorf("server process exited unexpectedly") - } + // Best-effort: if we can tell the process is dead, fail fast. + if s.cmd.Process != nil && !processMayBeAlive(s.cmd.Process) { + return fmt.Errorf("server process exited unexpectedly") } // Try to connect @@ -290,9 +280,8 @@ func GetRunningServerPID(dataDir string) int { return 0 } - // On Unix, FindProcess always succeeds, so we need to check if it's alive - if err := process.Signal(syscall.Signal(0)); err != nil { - // Process is not running + // Best-effort liveness check (platform-specific). + if !processMayBeAlive(process) { _ = os.Remove(pidFile) return 0 } @@ -307,13 +296,8 @@ func StopServerByPID(pid int) error { return err } - // Try graceful shutdown first - if err := process.Signal(syscall.SIGTERM); err != nil { - if !strings.Contains(err.Error(), "process already finished") { - return err - } - return nil - } + // Best-effort graceful shutdown (platform-specific). + _ = terminateProcess(process) // Wait for graceful shutdown done := make(chan struct{}) From 01fa2d9f3f53037b03ca3461fec17fabb2ddaded Mon Sep 17 00:00:00 2001 From: Test Date: Wed, 21 Jan 2026 14:08:40 -0800 Subject: [PATCH 4/4] /cmd/bd/federation.go: fix windows build issue again --- cmd/bd/federation.go | 3 +-- 1 file changed, 1 insertion(+), 2 deletions(-) diff --git a/cmd/bd/federation.go b/cmd/bd/federation.go index 67a9abf2..00a370b9 100644 --- a/cmd/bd/federation.go +++ b/cmd/bd/federation.go @@ -4,7 +4,6 @@ import ( "fmt" "os" "strings" - "syscall" "github.com/spf13/cobra" "github.com/steveyegge/beads/internal/storage" @@ -374,7 +373,7 @@ func runFederationAddPeer(cmd *cobra.Command, args []string) { password := federationPassword if federationUser != "" && password == "" { fmt.Fprint(os.Stderr, "Password: ") - pwBytes, err := term.ReadPassword(syscall.Stdin) + pwBytes, err := term.ReadPassword(int(os.Stdin.Fd())) fmt.Fprintln(os.Stderr) // newline after password if err != nil { FatalErrorRespectJSON("failed to read password: %v", err)