Merge pull request #1240 from coffeegoddd/db/import-export

Enable full-fidelity JSONL import/export for Dolt backend
This commit is contained in:
Steve Yegge
2026-01-21 16:40:03 -08:00
committed by GitHub
27 changed files with 2011 additions and 347 deletions

View File

@@ -0,0 +1,259 @@
//go:build integration
// +build integration
package main
import (
"os"
"path/filepath"
"runtime"
"strings"
"testing"
"time"
"github.com/steveyegge/beads/internal/types"
)
func isDoltBackendUnavailable(out string) bool {
lower := strings.ToLower(out)
return strings.Contains(lower, "dolt") && (strings.Contains(lower, "not supported") || strings.Contains(lower, "not available") || strings.Contains(lower, "unknown"))
}
func setupGitRepoForIntegration(t *testing.T, dir string) {
t.Helper()
if err := runCommandInDir(dir, "git", "init"); err != nil {
t.Fatalf("git init failed: %v", err)
}
_ = runCommandInDir(dir, "git", "config", "user.email", "test@example.com")
_ = runCommandInDir(dir, "git", "config", "user.name", "Test User")
}
func TestSQLiteToDolt_JSONLRoundTrip(t *testing.T) {
if testing.Short() {
t.Skip("skipping slow integration test in short mode")
}
if runtime.GOOS == windowsOS {
t.Skip("cross-backend integration test not supported on windows")
}
env := []string{
"BEADS_TEST_MODE=1",
"BEADS_NO_DAEMON=1",
}
// Workspace 1: SQLite create -> export JSONL
ws1 := createTempDirWithCleanup(t)
setupGitRepoForIntegration(t, ws1)
// Explicitly initialize sqlite for clarity.
if out, err := runBDExecAllowErrorWithEnv(t, ws1, env, "init", "--backend", "sqlite", "--prefix", "test", "--quiet"); err != nil {
t.Fatalf("bd init --backend sqlite failed: %v\n%s", err, out)
}
outA, err := runBDExecAllowErrorWithEnv(t, ws1, env, "create", "Issue A", "--json")
if err != nil {
t.Fatalf("bd create A failed: %v\n%s", err, outA)
}
idA := parseCreateID(t, outA)
outB, err := runBDExecAllowErrorWithEnv(t, ws1, env, "create", "Issue B", "--json")
if err != nil {
t.Fatalf("bd create B failed: %v\n%s", err, outB)
}
idB := parseCreateID(t, outB)
// Add label + comment + dependency.
if out, err := runBDExecAllowErrorWithEnv(t, ws1, env, "label", "add", idA, "urgent"); err != nil {
t.Fatalf("bd label add failed: %v\n%s", err, out)
}
commentText := "Cross-backend round-trip"
if out, err := runBDExecAllowErrorWithEnv(t, ws1, env, "comments", "add", idA, commentText); err != nil {
t.Fatalf("bd comments add failed: %v\n%s", err, out)
}
if out, err := runBDExecAllowErrorWithEnv(t, ws1, env, "dep", "add", idA, idB); err != nil {
t.Fatalf("bd dep add failed: %v\n%s", err, out)
}
// Create tombstone via delete (SQLite supports tombstones).
if out, err := runBDExecAllowErrorWithEnv(t, ws1, env, "delete", idB, "--force", "--reason", "test tombstone"); err != nil {
t.Fatalf("bd delete failed: %v\n%s", err, out)
}
jsonl1 := filepath.Join(ws1, ".beads", "issues.jsonl")
if out, err := runBDExecAllowErrorWithEnv(t, ws1, env, "export", "-o", jsonl1); err != nil {
t.Fatalf("bd export failed: %v\n%s", err, out)
}
issues1 := readJSONLIssues(t, jsonl1)
if len(issues1) != 2 {
t.Fatalf("expected 2 issues in sqlite export (including tombstone), got %d", len(issues1))
}
if issues1[idB].Status != types.StatusTombstone {
t.Fatalf("expected %s to be tombstone in sqlite export, got %q", idB, issues1[idB].Status)
}
ts1, ok := findCommentTimestampByText(issues1[idA], commentText)
if !ok || ts1.IsZero() {
t.Fatalf("expected comment on %s in sqlite export", idA)
}
// Workspace 2: Dolt import JSONL -> export JSONL
ws2 := createTempDirWithCleanup(t)
setupGitRepoForIntegration(t, ws2)
initOut, initErr := runBDExecAllowErrorWithEnv(t, ws2, env, "init", "--backend", "dolt", "--prefix", "test", "--quiet")
if initErr != nil {
if isDoltBackendUnavailable(initOut) {
t.Skipf("dolt backend not available: %s", initOut)
}
t.Fatalf("bd init --backend dolt failed: %v\n%s", initErr, initOut)
}
jsonl2in := filepath.Join(ws2, ".beads", "issues.jsonl")
data, err := os.ReadFile(jsonl1)
if err != nil {
t.Fatalf("read sqlite export: %v", err)
}
if err := os.WriteFile(jsonl2in, data, 0o600); err != nil {
t.Fatalf("write dolt issues.jsonl: %v", err)
}
if out, err := runBDExecAllowErrorWithEnv(t, ws2, env, "import", "-i", jsonl2in); err != nil {
t.Fatalf("bd import (dolt) failed: %v\n%s", err, out)
}
jsonl2out := filepath.Join(ws2, ".beads", "roundtrip.jsonl")
if out, err := runBDExecAllowErrorWithEnv(t, ws2, env, "export", "-o", jsonl2out); err != nil {
t.Fatalf("bd export (dolt) failed: %v\n%s", err, out)
}
issues2 := readJSONLIssues(t, jsonl2out)
if len(issues2) != 2 {
t.Fatalf("expected 2 issues in dolt export, got %d", len(issues2))
}
if issues2[idB].Status != types.StatusTombstone {
t.Fatalf("expected %s to be tombstone after import into dolt, got %q", idB, issues2[idB].Status)
}
ts2, ok := findCommentTimestampByText(issues2[idA], commentText)
if !ok {
t.Fatalf("expected comment on %s in dolt export", idA)
}
if !ts2.Equal(ts1) {
t.Fatalf("expected comment timestamp preserved across sqlite->dolt, export1=%s export2=%s", ts1.Format(time.RFC3339Nano), ts2.Format(time.RFC3339Nano))
}
}
func TestDoltToSQLite_JSONLRoundTrip(t *testing.T) {
if testing.Short() {
t.Skip("skipping slow integration test in short mode")
}
if runtime.GOOS == windowsOS {
t.Skip("cross-backend integration test not supported on windows")
}
env := []string{
"BEADS_TEST_MODE=1",
"BEADS_NO_DAEMON=1",
}
// Workspace 1: Dolt create -> export JSONL
ws1 := createTempDirWithCleanup(t)
setupGitRepoForIntegration(t, ws1)
initOut, initErr := runBDExecAllowErrorWithEnv(t, ws1, env, "init", "--backend", "dolt", "--prefix", "test", "--quiet")
if initErr != nil {
if isDoltBackendUnavailable(initOut) {
t.Skipf("dolt backend not available: %s", initOut)
}
t.Fatalf("bd init --backend dolt failed: %v\n%s", initErr, initOut)
}
outA, err := runBDExecAllowErrorWithEnv(t, ws1, env, "create", "Issue A", "--json")
if err != nil {
t.Fatalf("bd create A failed: %v\n%s", err, outA)
}
idA := parseCreateID(t, outA)
outB, err := runBDExecAllowErrorWithEnv(t, ws1, env, "create", "Issue B", "--json")
if err != nil {
t.Fatalf("bd create B failed: %v\n%s", err, outB)
}
idB := parseCreateID(t, outB)
if out, err := runBDExecAllowErrorWithEnv(t, ws1, env, "label", "add", idA, "urgent"); err != nil {
t.Fatalf("bd label add failed: %v\n%s", err, out)
}
commentText := "Cross-backend round-trip"
if out, err := runBDExecAllowErrorWithEnv(t, ws1, env, "comments", "add", idA, commentText); err != nil {
t.Fatalf("bd comments add failed: %v\n%s", err, out)
}
if out, err := runBDExecAllowErrorWithEnv(t, ws1, env, "dep", "add", idA, idB); err != nil {
t.Fatalf("bd dep add failed: %v\n%s", err, out)
}
jsonl1 := filepath.Join(ws1, ".beads", "issues.jsonl")
if out, err := runBDExecAllowErrorWithEnv(t, ws1, env, "export", "-o", jsonl1); err != nil {
t.Fatalf("bd export (dolt) failed: %v\n%s", err, out)
}
issues1 := readJSONLIssues(t, jsonl1)
if len(issues1) != 2 {
t.Fatalf("expected 2 issues in dolt export, got %d", len(issues1))
}
ts1, ok := findCommentTimestampByText(issues1[idA], commentText)
if !ok || ts1.IsZero() {
t.Fatalf("expected comment on %s in dolt export", idA)
}
// Inject tombstone record for B into JSONL (Dolt backend may not support bd delete tombstones).
now := time.Now().UTC()
issues1[idB].Status = types.StatusTombstone
issues1[idB].DeletedAt = &now
issues1[idB].DeletedBy = "test"
issues1[idB].DeleteReason = "test tombstone"
issues1[idB].OriginalType = string(issues1[idB].IssueType)
issues1[idB].SetDefaults()
jsonl1Tomb := filepath.Join(ws1, ".beads", "issues.tomb.jsonl")
writeJSONLIssues(t, jsonl1Tomb, issues1)
// Workspace 2: SQLite import JSONL -> export JSONL
ws2 := createTempDirWithCleanup(t)
setupGitRepoForIntegration(t, ws2)
if out, err := runBDExecAllowErrorWithEnv(t, ws2, env, "init", "--backend", "sqlite", "--prefix", "test", "--quiet"); err != nil {
t.Fatalf("bd init --backend sqlite failed: %v\n%s", err, out)
}
jsonl2in := filepath.Join(ws2, ".beads", "issues.jsonl")
data, err := os.ReadFile(jsonl1Tomb)
if err != nil {
t.Fatalf("read dolt export: %v", err)
}
if err := os.WriteFile(jsonl2in, data, 0o600); err != nil {
t.Fatalf("write sqlite issues.jsonl: %v", err)
}
if out, err := runBDExecAllowErrorWithEnv(t, ws2, env, "import", "-i", jsonl2in); err != nil {
t.Fatalf("bd import (sqlite) failed: %v\n%s", err, out)
}
jsonl2out := filepath.Join(ws2, ".beads", "roundtrip.jsonl")
if out, err := runBDExecAllowErrorWithEnv(t, ws2, env, "export", "-o", jsonl2out); err != nil {
t.Fatalf("bd export (sqlite) failed: %v\n%s", err, out)
}
issues2 := readJSONLIssues(t, jsonl2out)
if len(issues2) != 2 {
t.Fatalf("expected 2 issues in sqlite export, got %d", len(issues2))
}
if issues2[idB].Status != types.StatusTombstone {
t.Fatalf("expected %s to be tombstone after import into sqlite, got %q", idB, issues2[idB].Status)
}
ts2, ok := findCommentTimestampByText(issues2[idA], commentText)
if !ok {
t.Fatalf("expected comment on %s in sqlite export", idA)
}
if !ts2.Equal(ts1) {
t.Fatalf("expected comment timestamp preserved across dolt->sqlite, export1=%s export2=%s", ts1.Format(time.RFC3339Nano), ts2.Format(time.RFC3339Nano))
}
}

View File

@@ -510,7 +510,7 @@ func CheckDatabaseIntegrity(path string) DoctorCheck {
func CheckDatabaseJSONLSync(path string) DoctorCheck {
backend, beadsDir := getBackendAndBeadsDir(path)
// Dolt backend: JSONL is a derived compatibility artifact (export-only today).
// Dolt backend: JSONL is an optional compatibility artifact.
// The SQLite-style import/export divergence checks don't apply.
if backend == configfile.BackendDolt {
// Find JSONL file (respects metadata.json override when set).
@@ -545,7 +545,7 @@ func CheckDatabaseJSONLSync(path string) DoctorCheck {
Name: "DB-JSONL Sync",
Status: StatusOK,
Message: "N/A (dolt backend)",
Detail: "JSONL is derived from Dolt (export-only); import-only sync checks do not apply",
Detail: "Dolt sync is database-native; JSONL divergence checks do not apply (manual JSONL import/export is supported).",
}
}

View File

@@ -780,14 +780,20 @@ func TestMergeDriverWithLockedConfig_E2E(t *testing.T) {
dir := setupTestGitRepo(t)
gitDir := filepath.Join(dir, ".git")
gitConfigPath := filepath.Join(dir, ".git", "config")
// Make git config read-only
if err := os.Chmod(gitConfigPath, 0444); err != nil {
// Make both .git directory and config file read-only to truly prevent writes.
// Git may otherwise write via lockfile+rename even if the config file itself is read-only.
if err := os.Chmod(gitConfigPath, 0400); err != nil {
t.Fatalf("failed to make config read-only: %v", err)
}
if err := os.Chmod(gitDir, 0500); err != nil {
t.Fatalf("failed to make .git read-only: %v", err)
}
defer func() {
// Restore permissions for cleanup
_ = os.Chmod(gitDir, 0755)
_ = os.Chmod(gitConfigPath, 0644)
}()

View File

@@ -68,7 +68,7 @@ func CheckSyncDivergence(path string) DoctorCheck {
}
// Check 2: SQLite last_import_time vs JSONL mtime (SQLite only).
// Dolt backend does not maintain SQLite metadata and does not support import-only sync.
// Dolt backend does not maintain SQLite metadata; this SQLite-only check doesn't apply.
if backend == configfile.BackendSQLite {
mtimeIssue := checkSQLiteMtimeDivergence(path, beadsDir)
if mtimeIssue != nil {

View File

@@ -0,0 +1,288 @@
//go:build integration
// +build integration
package main
import (
"bufio"
"encoding/json"
"os"
"path/filepath"
"runtime"
"sort"
"strings"
"testing"
"time"
"github.com/steveyegge/beads/internal/types"
)
func parseCreateID(t *testing.T, out string) string {
t.Helper()
idx := strings.Index(out, "{")
if idx < 0 {
t.Fatalf("expected JSON in output, got:\n%s", out)
}
var m map[string]any
if err := json.Unmarshal([]byte(out[idx:]), &m); err != nil {
t.Fatalf("failed to parse create JSON: %v\n%s", err, out)
}
id, _ := m["id"].(string)
if id == "" {
t.Fatalf("missing id in create output:\n%s", out)
}
return id
}
func readJSONLIssues(t *testing.T, path string) map[string]*types.Issue {
t.Helper()
f, err := os.Open(path) // #nosec G304 -- test-controlled path
if err != nil {
t.Fatalf("open %s: %v", path, err)
}
defer func() { _ = f.Close() }()
scanner := bufio.NewScanner(f)
// allow larger issues
scanner.Buffer(make([]byte, 0, 64*1024), 2*1024*1024)
out := make(map[string]*types.Issue)
for scanner.Scan() {
line := strings.TrimSpace(scanner.Text())
if line == "" {
continue
}
var iss types.Issue
if err := json.Unmarshal([]byte(line), &iss); err != nil {
t.Fatalf("unmarshal JSONL line: %v\nline=%s", err, line)
}
iss.SetDefaults()
copy := iss
out[iss.ID] = &copy
}
if err := scanner.Err(); err != nil {
t.Fatalf("scan %s: %v", path, err)
}
return out
}
func writeJSONLIssues(t *testing.T, path string, issues map[string]*types.Issue) {
t.Helper()
f, err := os.OpenFile(path, os.O_CREATE|os.O_TRUNC|os.O_WRONLY, 0o600) // #nosec G304 -- test-controlled path
if err != nil {
t.Fatalf("open %s for write: %v", path, err)
}
defer func() { _ = f.Close() }()
ids := make([]string, 0, len(issues))
for id := range issues {
ids = append(ids, id)
}
sort.Strings(ids)
w := bufio.NewWriter(f)
for _, id := range ids {
iss := issues[id]
if iss == nil {
continue
}
b, err := json.Marshal(iss)
if err != nil {
t.Fatalf("marshal issue %s: %v", id, err)
}
if _, err := w.Write(append(b, '\n')); err != nil {
t.Fatalf("write issue %s: %v", id, err)
}
}
if err := w.Flush(); err != nil {
t.Fatalf("flush %s: %v", path, err)
}
}
func findCommentTimestamp(iss *types.Issue, author, text string) (time.Time, bool) {
if iss == nil {
return time.Time{}, false
}
for _, c := range iss.Comments {
if c.Author == author && strings.TrimSpace(c.Text) == strings.TrimSpace(text) {
return c.CreatedAt, true
}
}
return time.Time{}, false
}
func findCommentTimestampByText(iss *types.Issue, text string) (time.Time, bool) {
if iss == nil {
return time.Time{}, false
}
for _, c := range iss.Comments {
if strings.TrimSpace(c.Text) == strings.TrimSpace(text) {
return c.CreatedAt, true
}
}
return time.Time{}, false
}
func TestDoltJSONLRoundTrip_DepsLabelsCommentsTombstones(t *testing.T) {
if testing.Short() {
t.Skip("skipping slow integration test in short mode")
}
if runtime.GOOS == windowsOS {
t.Skip("dolt integration test not supported on windows")
}
// Workspace 1: create data and export JSONL.
ws1 := createTempDirWithCleanup(t)
if err := runCommandInDir(ws1, "git", "init"); err != nil {
t.Fatalf("git init failed: %v", err)
}
_ = runCommandInDir(ws1, "git", "config", "user.email", "test@example.com")
_ = runCommandInDir(ws1, "git", "config", "user.name", "Test User")
env := []string{
"BEADS_TEST_MODE=1",
"BEADS_NO_DAEMON=1",
}
initOut, initErr := runBDExecAllowErrorWithEnv(t, ws1, env, "init", "--backend", "dolt", "--prefix", "test", "--quiet")
if initErr != nil {
lower := strings.ToLower(initOut)
if strings.Contains(lower, "dolt") && (strings.Contains(lower, "not supported") || strings.Contains(lower, "not available") || strings.Contains(lower, "unknown")) {
t.Skipf("dolt backend not available: %s", initOut)
}
t.Fatalf("bd init --backend dolt failed: %v\n%s", initErr, initOut)
}
outA, err := runBDExecAllowErrorWithEnv(t, ws1, env, "create", "Issue A", "--json")
if err != nil {
t.Fatalf("bd create A failed: %v\n%s", err, outA)
}
idA := parseCreateID(t, outA)
outB, err := runBDExecAllowErrorWithEnv(t, ws1, env, "create", "Issue B", "--json")
if err != nil {
t.Fatalf("bd create B failed: %v\n%s", err, outB)
}
idB := parseCreateID(t, outB)
// Add label + comment + dependency.
if out, err := runBDExecAllowErrorWithEnv(t, ws1, env, "label", "add", idA, "urgent"); err != nil {
t.Fatalf("bd label add failed: %v\n%s", err, out)
}
commentText := "Hello from JSONL round-trip"
if out, err := runBDExecAllowErrorWithEnv(t, ws1, env, "comments", "add", idA, commentText); err != nil {
t.Fatalf("bd comments add failed: %v\n%s", err, out)
}
if out, err := runBDExecAllowErrorWithEnv(t, ws1, env, "dep", "add", idA, idB); err != nil {
t.Fatalf("bd dep add failed: %v\n%s", err, out)
}
jsonl1 := filepath.Join(ws1, ".beads", "issues.jsonl")
if out, err := runBDExecAllowErrorWithEnv(t, ws1, env, "export", "-o", jsonl1); err != nil {
t.Fatalf("bd export failed: %v\n%s", err, out)
}
issues1 := readJSONLIssues(t, jsonl1)
if len(issues1) != 2 {
t.Fatalf("expected 2 issues in export1, got %d", len(issues1))
}
if issues1[idA] == nil || issues1[idB] == nil {
t.Fatalf("expected exported issues to include %s and %s", idA, idB)
}
// Label present
foundUrgent := false
for _, l := range issues1[idA].Labels {
if l == "urgent" {
foundUrgent = true
break
}
}
if !foundUrgent {
t.Fatalf("expected label 'urgent' on %s in export1", idA)
}
// Dependency present
foundDep := false
for _, d := range issues1[idA].Dependencies {
if d.DependsOnID == idB {
foundDep = true
break
}
}
if !foundDep {
t.Fatalf("expected dependency %s -> %s in export1", idA, idB)
}
// Comment present + capture timestamp
ts1, ok := findCommentTimestampByText(issues1[idA], commentText)
if !ok || ts1.IsZero() {
t.Fatalf("expected comment on %s in export1", idA)
}
// Create a tombstone record in JSONL for issue B (Dolt backend may not support
// creating tombstones via `bd delete`, but it must round-trip tombstones via JSONL).
now := time.Now().UTC()
issues1[idB].Status = types.StatusTombstone
issues1[idB].DeletedAt = &now
issues1[idB].DeletedBy = "test"
issues1[idB].DeleteReason = "test tombstone"
issues1[idB].OriginalType = string(issues1[idB].IssueType)
issues1[idB].SetDefaults()
jsonl1Tomb := filepath.Join(ws1, ".beads", "issues.tomb.jsonl")
writeJSONLIssues(t, jsonl1Tomb, issues1)
issues1Tomb := readJSONLIssues(t, jsonl1Tomb)
if issues1Tomb[idB].Status != types.StatusTombstone {
t.Fatalf("expected %s to be tombstone in tombstone JSONL, got %q", idB, issues1Tomb[idB].Status)
}
// Workspace 2: import JSONL into fresh Dolt DB and re-export.
ws2 := createTempDirWithCleanup(t)
if err := runCommandInDir(ws2, "git", "init"); err != nil {
t.Fatalf("git init failed: %v", err)
}
_ = runCommandInDir(ws2, "git", "config", "user.email", "test@example.com")
_ = runCommandInDir(ws2, "git", "config", "user.name", "Test User")
initOut2, initErr2 := runBDExecAllowErrorWithEnv(t, ws2, env, "init", "--backend", "dolt", "--prefix", "test", "--quiet")
if initErr2 != nil {
lower := strings.ToLower(initOut2)
if strings.Contains(lower, "dolt") && (strings.Contains(lower, "not supported") || strings.Contains(lower, "not available") || strings.Contains(lower, "unknown")) {
t.Skipf("dolt backend not available: %s", initOut2)
}
t.Fatalf("bd init --backend dolt (ws2) failed: %v\n%s", initErr2, initOut2)
}
// Copy JSONL into ws2 beads dir
jsonl2in := filepath.Join(ws2, ".beads", "issues.jsonl")
data, err := os.ReadFile(jsonl1Tomb)
if err != nil {
t.Fatalf("read export1: %v", err)
}
if err := os.WriteFile(jsonl2in, data, 0o600); err != nil {
t.Fatalf("write ws2 issues.jsonl: %v", err)
}
if out, err := runBDExecAllowErrorWithEnv(t, ws2, env, "import", "-i", jsonl2in); err != nil {
t.Fatalf("bd import failed: %v\n%s", err, out)
}
jsonl2out := filepath.Join(ws2, ".beads", "roundtrip.jsonl")
if out, err := runBDExecAllowErrorWithEnv(t, ws2, env, "export", "-o", jsonl2out); err != nil {
t.Fatalf("bd export (ws2) failed: %v\n%s", err, out)
}
issues2 := readJSONLIssues(t, jsonl2out)
if len(issues2) != 2 {
t.Fatalf("expected 2 issues in export2 (including tombstone), got %d", len(issues2))
}
if issues2[idB].Status != types.StatusTombstone {
t.Fatalf("expected %s to be tombstone in export2, got %q", idB, issues2[idB].Status)
}
// Ensure comment timestamp preserved across import/export
ts2, ok := findCommentTimestampByText(issues2[idA], commentText)
if !ok {
t.Fatalf("expected comment on %s in export2", idA)
}
if !ts2.Equal(ts1) {
t.Fatalf("expected comment timestamp preserved, export1=%s export2=%s", ts1.Format(time.RFC3339Nano), ts2.Format(time.RFC3339Nano))
}
}

View File

@@ -13,6 +13,7 @@ import (
"github.com/spf13/cobra"
"github.com/steveyegge/beads/internal/debug"
"github.com/steveyegge/beads/internal/storage/factory"
"github.com/steveyegge/beads/internal/storage/sqlite"
"github.com/steveyegge/beads/internal/types"
"github.com/steveyegge/beads/internal/util"
@@ -181,7 +182,10 @@ Examples:
fmt.Fprintf(os.Stderr, "Error: no database path found\n")
os.Exit(1)
}
store, err = sqlite.NewWithTimeout(rootCtx, dbPath, lockTimeout)
beadsDir := filepath.Dir(dbPath)
store, err = factory.NewFromConfigWithOptions(rootCtx, beadsDir, factory.Options{
LockTimeout: lockTimeout,
})
if err != nil {
fmt.Fprintf(os.Stderr, "Error: failed to open database: %v\n", err)
os.Exit(1)
@@ -396,14 +400,26 @@ Examples:
issue.Dependencies = allDeps[issue.ID]
}
// Populate labels for all issues
// Populate labels and comments for all issues (batch APIs)
ids := make([]string, 0, len(issues))
for _, issue := range issues {
labels, err := store.GetLabels(ctx, issue.ID)
if err != nil {
fmt.Fprintf(os.Stderr, "Error getting labels for %s: %v\n", issue.ID, err)
os.Exit(1)
}
issue.Labels = labels
ids = append(ids, issue.ID)
}
labelsMap, err := store.GetLabelsForIssues(ctx, ids)
if err != nil {
fmt.Fprintf(os.Stderr, "Error getting labels: %v\n", err)
os.Exit(1)
}
commentsMap, err := store.GetCommentsForIssues(ctx, ids)
if err != nil {
fmt.Fprintf(os.Stderr, "Error getting comments: %v\n", err)
os.Exit(1)
}
for _, issue := range issues {
issue.Labels = labelsMap[issue.ID]
issue.Comments = commentsMap[issue.ID]
}
// Open output

View File

@@ -4,7 +4,6 @@ import (
"fmt"
"os"
"strings"
"syscall"
"github.com/spf13/cobra"
"github.com/steveyegge/beads/internal/storage"
@@ -282,10 +281,10 @@ func runFederationStatus(cmd *cobra.Command, args []string) {
// Collect status for each peer
type peerStatus struct {
Status *storage.SyncStatus
URL string
Reachable bool
ReachError string
Status *storage.SyncStatus
URL string
Reachable bool
ReachError string
}
var peerStatuses []peerStatus
@@ -374,7 +373,7 @@ func runFederationAddPeer(cmd *cobra.Command, args []string) {
password := federationPassword
if federationUser != "" && password == "" {
fmt.Fprint(os.Stderr, "Password: ")
pwBytes, err := term.ReadPassword(int(syscall.Stdin))
pwBytes, err := term.ReadPassword(int(os.Stdin.Fd()))
fmt.Fprintln(os.Stderr) // newline after password
if err != nil {
FatalErrorRespectJSON("failed to read password: %v", err)

View File

@@ -638,7 +638,7 @@ func hookPostMergeDolt(beadsDir string) int {
doltStore, ok := store.(interface {
Branch(ctx context.Context, name string) error
Checkout(ctx context.Context, branch string) error
Merge(ctx context.Context, branch string) error
Merge(ctx context.Context, branch string) ([]storage.Conflict, error)
Commit(ctx context.Context, message string) error
CurrentBranch(ctx context.Context) (string, error)
DeleteBranch(ctx context.Context, branch string) error
@@ -691,10 +691,15 @@ func hookPostMergeDolt(beadsDir string) int {
}
// Merge import branch (Dolt provides cell-level merge)
if err := doltStore.Merge(ctx, importBranch); err != nil {
conflicts, err := doltStore.Merge(ctx, importBranch)
if err != nil {
fmt.Fprintf(os.Stderr, "Warning: could not merge import branch: %v\n", err)
return 0
}
if len(conflicts) > 0 {
fmt.Fprintf(os.Stderr, "Warning: %d conflict(s) detected during Dolt merge; resolve with 'bd federation conflicts' or Dolt conflict tooling\n", len(conflicts))
// Best-effort: still return 0 to avoid blocking git merge, consistent with other hook warnings.
}
// Commit the merge
if err := doltStore.Commit(ctx, "Merge JSONL import"); err != nil {
@@ -839,7 +844,6 @@ func hookPostCheckout(args []string) int {
// =============================================================================
// importFromJSONLToStore imports issues from JSONL to a store.
// This is a placeholder - the actual implementation should use the store's methods.
func importFromJSONLToStore(ctx context.Context, store storage.Storage, jsonlPath string) error {
// Parse JSONL into issues
// #nosec G304 - jsonlPath is derived from beadsDir (trusted workspace path)

View File

@@ -16,7 +16,7 @@ import (
"github.com/spf13/cobra"
"github.com/steveyegge/beads/internal/beads"
"github.com/steveyegge/beads/internal/debug"
"github.com/steveyegge/beads/internal/storage/sqlite"
"github.com/steveyegge/beads/internal/storage/factory"
"github.com/steveyegge/beads/internal/types"
"github.com/steveyegge/beads/internal/utils"
"golang.org/x/term"
@@ -74,10 +74,12 @@ NOTE: Import requires direct database access and does not work with daemon mode.
daemonClient = nil
var err error
store, err = sqlite.New(rootCtx, dbPath)
beadsDir := filepath.Dir(dbPath)
store, err = factory.NewFromConfigWithOptions(rootCtx, beadsDir, factory.Options{
LockTimeout: lockTimeout,
})
if err != nil {
// Check for fresh clone scenario
beadsDir := filepath.Dir(dbPath)
if handleFreshCloneError(err, beadsDir) {
os.Exit(1)
}