/{cmd,docs,internal}: support import export for dolt backends

This commit is contained in:
Test
2026-01-21 13:13:24 -08:00
parent 4a0f4abc70
commit b849f598d7
23 changed files with 1837 additions and 226 deletions
@@ -0,0 +1,259 @@
//go:build integration
// +build integration
package main
import (
"os"
"path/filepath"
"runtime"
"strings"
"testing"
"time"
"github.com/steveyegge/beads/internal/types"
)
func isDoltBackendUnavailable(out string) bool {
lower := strings.ToLower(out)
return strings.Contains(lower, "dolt") && (strings.Contains(lower, "not supported") || strings.Contains(lower, "not available") || strings.Contains(lower, "unknown"))
}
func setupGitRepoForIntegration(t *testing.T, dir string) {
t.Helper()
if err := runCommandInDir(dir, "git", "init"); err != nil {
t.Fatalf("git init failed: %v", err)
}
_ = runCommandInDir(dir, "git", "config", "user.email", "test@example.com")
_ = runCommandInDir(dir, "git", "config", "user.name", "Test User")
}
func TestSQLiteToDolt_JSONLRoundTrip(t *testing.T) {
if testing.Short() {
t.Skip("skipping slow integration test in short mode")
}
if runtime.GOOS == windowsOS {
t.Skip("cross-backend integration test not supported on windows")
}
env := []string{
"BEADS_TEST_MODE=1",
"BEADS_NO_DAEMON=1",
}
// Workspace 1: SQLite create -> export JSONL
ws1 := createTempDirWithCleanup(t)
setupGitRepoForIntegration(t, ws1)
// Explicitly initialize sqlite for clarity.
if out, err := runBDExecAllowErrorWithEnv(t, ws1, env, "init", "--backend", "sqlite", "--prefix", "test", "--quiet"); err != nil {
t.Fatalf("bd init --backend sqlite failed: %v\n%s", err, out)
}
outA, err := runBDExecAllowErrorWithEnv(t, ws1, env, "create", "Issue A", "--json")
if err != nil {
t.Fatalf("bd create A failed: %v\n%s", err, outA)
}
idA := parseCreateID(t, outA)
outB, err := runBDExecAllowErrorWithEnv(t, ws1, env, "create", "Issue B", "--json")
if err != nil {
t.Fatalf("bd create B failed: %v\n%s", err, outB)
}
idB := parseCreateID(t, outB)
// Add label + comment + dependency.
if out, err := runBDExecAllowErrorWithEnv(t, ws1, env, "label", "add", idA, "urgent"); err != nil {
t.Fatalf("bd label add failed: %v\n%s", err, out)
}
commentText := "Cross-backend round-trip"
if out, err := runBDExecAllowErrorWithEnv(t, ws1, env, "comments", "add", idA, commentText); err != nil {
t.Fatalf("bd comments add failed: %v\n%s", err, out)
}
if out, err := runBDExecAllowErrorWithEnv(t, ws1, env, "dep", "add", idA, idB); err != nil {
t.Fatalf("bd dep add failed: %v\n%s", err, out)
}
// Create tombstone via delete (SQLite supports tombstones).
if out, err := runBDExecAllowErrorWithEnv(t, ws1, env, "delete", idB, "--force", "--reason", "test tombstone"); err != nil {
t.Fatalf("bd delete failed: %v\n%s", err, out)
}
jsonl1 := filepath.Join(ws1, ".beads", "issues.jsonl")
if out, err := runBDExecAllowErrorWithEnv(t, ws1, env, "export", "-o", jsonl1); err != nil {
t.Fatalf("bd export failed: %v\n%s", err, out)
}
issues1 := readJSONLIssues(t, jsonl1)
if len(issues1) != 2 {
t.Fatalf("expected 2 issues in sqlite export (including tombstone), got %d", len(issues1))
}
if issues1[idB].Status != types.StatusTombstone {
t.Fatalf("expected %s to be tombstone in sqlite export, got %q", idB, issues1[idB].Status)
}
ts1, ok := findCommentTimestampByText(issues1[idA], commentText)
if !ok || ts1.IsZero() {
t.Fatalf("expected comment on %s in sqlite export", idA)
}
// Workspace 2: Dolt import JSONL -> export JSONL
ws2 := createTempDirWithCleanup(t)
setupGitRepoForIntegration(t, ws2)
initOut, initErr := runBDExecAllowErrorWithEnv(t, ws2, env, "init", "--backend", "dolt", "--prefix", "test", "--quiet")
if initErr != nil {
if isDoltBackendUnavailable(initOut) {
t.Skipf("dolt backend not available: %s", initOut)
}
t.Fatalf("bd init --backend dolt failed: %v\n%s", initErr, initOut)
}
jsonl2in := filepath.Join(ws2, ".beads", "issues.jsonl")
data, err := os.ReadFile(jsonl1)
if err != nil {
t.Fatalf("read sqlite export: %v", err)
}
if err := os.WriteFile(jsonl2in, data, 0o600); err != nil {
t.Fatalf("write dolt issues.jsonl: %v", err)
}
if out, err := runBDExecAllowErrorWithEnv(t, ws2, env, "import", "-i", jsonl2in); err != nil {
t.Fatalf("bd import (dolt) failed: %v\n%s", err, out)
}
jsonl2out := filepath.Join(ws2, ".beads", "roundtrip.jsonl")
if out, err := runBDExecAllowErrorWithEnv(t, ws2, env, "export", "-o", jsonl2out); err != nil {
t.Fatalf("bd export (dolt) failed: %v\n%s", err, out)
}
issues2 := readJSONLIssues(t, jsonl2out)
if len(issues2) != 2 {
t.Fatalf("expected 2 issues in dolt export, got %d", len(issues2))
}
if issues2[idB].Status != types.StatusTombstone {
t.Fatalf("expected %s to be tombstone after import into dolt, got %q", idB, issues2[idB].Status)
}
ts2, ok := findCommentTimestampByText(issues2[idA], commentText)
if !ok {
t.Fatalf("expected comment on %s in dolt export", idA)
}
if !ts2.Equal(ts1) {
t.Fatalf("expected comment timestamp preserved across sqlite->dolt, export1=%s export2=%s", ts1.Format(time.RFC3339Nano), ts2.Format(time.RFC3339Nano))
}
}
func TestDoltToSQLite_JSONLRoundTrip(t *testing.T) {
if testing.Short() {
t.Skip("skipping slow integration test in short mode")
}
if runtime.GOOS == windowsOS {
t.Skip("cross-backend integration test not supported on windows")
}
env := []string{
"BEADS_TEST_MODE=1",
"BEADS_NO_DAEMON=1",
}
// Workspace 1: Dolt create -> export JSONL
ws1 := createTempDirWithCleanup(t)
setupGitRepoForIntegration(t, ws1)
initOut, initErr := runBDExecAllowErrorWithEnv(t, ws1, env, "init", "--backend", "dolt", "--prefix", "test", "--quiet")
if initErr != nil {
if isDoltBackendUnavailable(initOut) {
t.Skipf("dolt backend not available: %s", initOut)
}
t.Fatalf("bd init --backend dolt failed: %v\n%s", initErr, initOut)
}
outA, err := runBDExecAllowErrorWithEnv(t, ws1, env, "create", "Issue A", "--json")
if err != nil {
t.Fatalf("bd create A failed: %v\n%s", err, outA)
}
idA := parseCreateID(t, outA)
outB, err := runBDExecAllowErrorWithEnv(t, ws1, env, "create", "Issue B", "--json")
if err != nil {
t.Fatalf("bd create B failed: %v\n%s", err, outB)
}
idB := parseCreateID(t, outB)
if out, err := runBDExecAllowErrorWithEnv(t, ws1, env, "label", "add", idA, "urgent"); err != nil {
t.Fatalf("bd label add failed: %v\n%s", err, out)
}
commentText := "Cross-backend round-trip"
if out, err := runBDExecAllowErrorWithEnv(t, ws1, env, "comments", "add", idA, commentText); err != nil {
t.Fatalf("bd comments add failed: %v\n%s", err, out)
}
if out, err := runBDExecAllowErrorWithEnv(t, ws1, env, "dep", "add", idA, idB); err != nil {
t.Fatalf("bd dep add failed: %v\n%s", err, out)
}
jsonl1 := filepath.Join(ws1, ".beads", "issues.jsonl")
if out, err := runBDExecAllowErrorWithEnv(t, ws1, env, "export", "-o", jsonl1); err != nil {
t.Fatalf("bd export (dolt) failed: %v\n%s", err, out)
}
issues1 := readJSONLIssues(t, jsonl1)
if len(issues1) != 2 {
t.Fatalf("expected 2 issues in dolt export, got %d", len(issues1))
}
ts1, ok := findCommentTimestampByText(issues1[idA], commentText)
if !ok || ts1.IsZero() {
t.Fatalf("expected comment on %s in dolt export", idA)
}
// Inject tombstone record for B into JSONL (Dolt backend may not support bd delete tombstones).
now := time.Now().UTC()
issues1[idB].Status = types.StatusTombstone
issues1[idB].DeletedAt = &now
issues1[idB].DeletedBy = "test"
issues1[idB].DeleteReason = "test tombstone"
issues1[idB].OriginalType = string(issues1[idB].IssueType)
issues1[idB].SetDefaults()
jsonl1Tomb := filepath.Join(ws1, ".beads", "issues.tomb.jsonl")
writeJSONLIssues(t, jsonl1Tomb, issues1)
// Workspace 2: SQLite import JSONL -> export JSONL
ws2 := createTempDirWithCleanup(t)
setupGitRepoForIntegration(t, ws2)
if out, err := runBDExecAllowErrorWithEnv(t, ws2, env, "init", "--backend", "sqlite", "--prefix", "test", "--quiet"); err != nil {
t.Fatalf("bd init --backend sqlite failed: %v\n%s", err, out)
}
jsonl2in := filepath.Join(ws2, ".beads", "issues.jsonl")
data, err := os.ReadFile(jsonl1Tomb)
if err != nil {
t.Fatalf("read dolt export: %v", err)
}
if err := os.WriteFile(jsonl2in, data, 0o600); err != nil {
t.Fatalf("write sqlite issues.jsonl: %v", err)
}
if out, err := runBDExecAllowErrorWithEnv(t, ws2, env, "import", "-i", jsonl2in); err != nil {
t.Fatalf("bd import (sqlite) failed: %v\n%s", err, out)
}
jsonl2out := filepath.Join(ws2, ".beads", "roundtrip.jsonl")
if out, err := runBDExecAllowErrorWithEnv(t, ws2, env, "export", "-o", jsonl2out); err != nil {
t.Fatalf("bd export (sqlite) failed: %v\n%s", err, out)
}
issues2 := readJSONLIssues(t, jsonl2out)
if len(issues2) != 2 {
t.Fatalf("expected 2 issues in sqlite export, got %d", len(issues2))
}
if issues2[idB].Status != types.StatusTombstone {
t.Fatalf("expected %s to be tombstone after import into sqlite, got %q", idB, issues2[idB].Status)
}
ts2, ok := findCommentTimestampByText(issues2[idA], commentText)
if !ok {
t.Fatalf("expected comment on %s in sqlite export", idA)
}
if !ts2.Equal(ts1) {
t.Fatalf("expected comment timestamp preserved across dolt->sqlite, export1=%s export2=%s", ts1.Format(time.RFC3339Nano), ts2.Format(time.RFC3339Nano))
}
}
+2 -2
View File
@@ -510,7 +510,7 @@ func CheckDatabaseIntegrity(path string) DoctorCheck {
func CheckDatabaseJSONLSync(path string) DoctorCheck { func CheckDatabaseJSONLSync(path string) DoctorCheck {
backend, beadsDir := getBackendAndBeadsDir(path) backend, beadsDir := getBackendAndBeadsDir(path)
// Dolt backend: JSONL is a derived compatibility artifact (export-only today). // Dolt backend: JSONL is an optional compatibility artifact.
// The SQLite-style import/export divergence checks don't apply. // The SQLite-style import/export divergence checks don't apply.
if backend == configfile.BackendDolt { if backend == configfile.BackendDolt {
// Find JSONL file (respects metadata.json override when set). // Find JSONL file (respects metadata.json override when set).
@@ -545,7 +545,7 @@ func CheckDatabaseJSONLSync(path string) DoctorCheck {
Name: "DB-JSONL Sync", Name: "DB-JSONL Sync",
Status: StatusOK, Status: StatusOK,
Message: "N/A (dolt backend)", Message: "N/A (dolt backend)",
Detail: "JSONL is derived from Dolt (export-only); import-only sync checks do not apply", Detail: "Dolt sync is database-native; JSONL divergence checks do not apply (manual JSONL import/export is supported).",
} }
} }
+8 -2
View File
@@ -780,14 +780,20 @@ func TestMergeDriverWithLockedConfig_E2E(t *testing.T) {
dir := setupTestGitRepo(t) dir := setupTestGitRepo(t)
gitDir := filepath.Join(dir, ".git")
gitConfigPath := filepath.Join(dir, ".git", "config") gitConfigPath := filepath.Join(dir, ".git", "config")
// Make git config read-only // Make both .git directory and config file read-only to truly prevent writes.
if err := os.Chmod(gitConfigPath, 0444); err != nil { // Git may otherwise write via lockfile+rename even if the config file itself is read-only.
if err := os.Chmod(gitConfigPath, 0400); err != nil {
t.Fatalf("failed to make config read-only: %v", err) t.Fatalf("failed to make config read-only: %v", err)
} }
if err := os.Chmod(gitDir, 0500); err != nil {
t.Fatalf("failed to make .git read-only: %v", err)
}
defer func() { defer func() {
// Restore permissions for cleanup // Restore permissions for cleanup
_ = os.Chmod(gitDir, 0755)
_ = os.Chmod(gitConfigPath, 0644) _ = os.Chmod(gitConfigPath, 0644)
}() }()
+1 -1
View File
@@ -68,7 +68,7 @@ func CheckSyncDivergence(path string) DoctorCheck {
} }
// Check 2: SQLite last_import_time vs JSONL mtime (SQLite only). // Check 2: SQLite last_import_time vs JSONL mtime (SQLite only).
// Dolt backend does not maintain SQLite metadata and does not support import-only sync. // Dolt backend does not maintain SQLite metadata; this SQLite-only check doesn't apply.
if backend == configfile.BackendSQLite { if backend == configfile.BackendSQLite {
mtimeIssue := checkSQLiteMtimeDivergence(path, beadsDir) mtimeIssue := checkSQLiteMtimeDivergence(path, beadsDir)
if mtimeIssue != nil { if mtimeIssue != nil {
@@ -0,0 +1,288 @@
//go:build integration
// +build integration
package main
import (
"bufio"
"encoding/json"
"os"
"path/filepath"
"runtime"
"sort"
"strings"
"testing"
"time"
"github.com/steveyegge/beads/internal/types"
)
func parseCreateID(t *testing.T, out string) string {
t.Helper()
idx := strings.Index(out, "{")
if idx < 0 {
t.Fatalf("expected JSON in output, got:\n%s", out)
}
var m map[string]any
if err := json.Unmarshal([]byte(out[idx:]), &m); err != nil {
t.Fatalf("failed to parse create JSON: %v\n%s", err, out)
}
id, _ := m["id"].(string)
if id == "" {
t.Fatalf("missing id in create output:\n%s", out)
}
return id
}
func readJSONLIssues(t *testing.T, path string) map[string]*types.Issue {
t.Helper()
f, err := os.Open(path) // #nosec G304 -- test-controlled path
if err != nil {
t.Fatalf("open %s: %v", path, err)
}
defer func() { _ = f.Close() }()
scanner := bufio.NewScanner(f)
// allow larger issues
scanner.Buffer(make([]byte, 0, 64*1024), 2*1024*1024)
out := make(map[string]*types.Issue)
for scanner.Scan() {
line := strings.TrimSpace(scanner.Text())
if line == "" {
continue
}
var iss types.Issue
if err := json.Unmarshal([]byte(line), &iss); err != nil {
t.Fatalf("unmarshal JSONL line: %v\nline=%s", err, line)
}
iss.SetDefaults()
copy := iss
out[iss.ID] = &copy
}
if err := scanner.Err(); err != nil {
t.Fatalf("scan %s: %v", path, err)
}
return out
}
func writeJSONLIssues(t *testing.T, path string, issues map[string]*types.Issue) {
t.Helper()
f, err := os.OpenFile(path, os.O_CREATE|os.O_TRUNC|os.O_WRONLY, 0o600) // #nosec G304 -- test-controlled path
if err != nil {
t.Fatalf("open %s for write: %v", path, err)
}
defer func() { _ = f.Close() }()
ids := make([]string, 0, len(issues))
for id := range issues {
ids = append(ids, id)
}
sort.Strings(ids)
w := bufio.NewWriter(f)
for _, id := range ids {
iss := issues[id]
if iss == nil {
continue
}
b, err := json.Marshal(iss)
if err != nil {
t.Fatalf("marshal issue %s: %v", id, err)
}
if _, err := w.Write(append(b, '\n')); err != nil {
t.Fatalf("write issue %s: %v", id, err)
}
}
if err := w.Flush(); err != nil {
t.Fatalf("flush %s: %v", path, err)
}
}
func findCommentTimestamp(iss *types.Issue, author, text string) (time.Time, bool) {
if iss == nil {
return time.Time{}, false
}
for _, c := range iss.Comments {
if c.Author == author && strings.TrimSpace(c.Text) == strings.TrimSpace(text) {
return c.CreatedAt, true
}
}
return time.Time{}, false
}
func findCommentTimestampByText(iss *types.Issue, text string) (time.Time, bool) {
if iss == nil {
return time.Time{}, false
}
for _, c := range iss.Comments {
if strings.TrimSpace(c.Text) == strings.TrimSpace(text) {
return c.CreatedAt, true
}
}
return time.Time{}, false
}
func TestDoltJSONLRoundTrip_DepsLabelsCommentsTombstones(t *testing.T) {
if testing.Short() {
t.Skip("skipping slow integration test in short mode")
}
if runtime.GOOS == windowsOS {
t.Skip("dolt integration test not supported on windows")
}
// Workspace 1: create data and export JSONL.
ws1 := createTempDirWithCleanup(t)
if err := runCommandInDir(ws1, "git", "init"); err != nil {
t.Fatalf("git init failed: %v", err)
}
_ = runCommandInDir(ws1, "git", "config", "user.email", "test@example.com")
_ = runCommandInDir(ws1, "git", "config", "user.name", "Test User")
env := []string{
"BEADS_TEST_MODE=1",
"BEADS_NO_DAEMON=1",
}
initOut, initErr := runBDExecAllowErrorWithEnv(t, ws1, env, "init", "--backend", "dolt", "--prefix", "test", "--quiet")
if initErr != nil {
lower := strings.ToLower(initOut)
if strings.Contains(lower, "dolt") && (strings.Contains(lower, "not supported") || strings.Contains(lower, "not available") || strings.Contains(lower, "unknown")) {
t.Skipf("dolt backend not available: %s", initOut)
}
t.Fatalf("bd init --backend dolt failed: %v\n%s", initErr, initOut)
}
outA, err := runBDExecAllowErrorWithEnv(t, ws1, env, "create", "Issue A", "--json")
if err != nil {
t.Fatalf("bd create A failed: %v\n%s", err, outA)
}
idA := parseCreateID(t, outA)
outB, err := runBDExecAllowErrorWithEnv(t, ws1, env, "create", "Issue B", "--json")
if err != nil {
t.Fatalf("bd create B failed: %v\n%s", err, outB)
}
idB := parseCreateID(t, outB)
// Add label + comment + dependency.
if out, err := runBDExecAllowErrorWithEnv(t, ws1, env, "label", "add", idA, "urgent"); err != nil {
t.Fatalf("bd label add failed: %v\n%s", err, out)
}
commentText := "Hello from JSONL round-trip"
if out, err := runBDExecAllowErrorWithEnv(t, ws1, env, "comments", "add", idA, commentText); err != nil {
t.Fatalf("bd comments add failed: %v\n%s", err, out)
}
if out, err := runBDExecAllowErrorWithEnv(t, ws1, env, "dep", "add", idA, idB); err != nil {
t.Fatalf("bd dep add failed: %v\n%s", err, out)
}
jsonl1 := filepath.Join(ws1, ".beads", "issues.jsonl")
if out, err := runBDExecAllowErrorWithEnv(t, ws1, env, "export", "-o", jsonl1); err != nil {
t.Fatalf("bd export failed: %v\n%s", err, out)
}
issues1 := readJSONLIssues(t, jsonl1)
if len(issues1) != 2 {
t.Fatalf("expected 2 issues in export1, got %d", len(issues1))
}
if issues1[idA] == nil || issues1[idB] == nil {
t.Fatalf("expected exported issues to include %s and %s", idA, idB)
}
// Label present
foundUrgent := false
for _, l := range issues1[idA].Labels {
if l == "urgent" {
foundUrgent = true
break
}
}
if !foundUrgent {
t.Fatalf("expected label 'urgent' on %s in export1", idA)
}
// Dependency present
foundDep := false
for _, d := range issues1[idA].Dependencies {
if d.DependsOnID == idB {
foundDep = true
break
}
}
if !foundDep {
t.Fatalf("expected dependency %s -> %s in export1", idA, idB)
}
// Comment present + capture timestamp
ts1, ok := findCommentTimestampByText(issues1[idA], commentText)
if !ok || ts1.IsZero() {
t.Fatalf("expected comment on %s in export1", idA)
}
// Create a tombstone record in JSONL for issue B (Dolt backend may not support
// creating tombstones via `bd delete`, but it must round-trip tombstones via JSONL).
now := time.Now().UTC()
issues1[idB].Status = types.StatusTombstone
issues1[idB].DeletedAt = &now
issues1[idB].DeletedBy = "test"
issues1[idB].DeleteReason = "test tombstone"
issues1[idB].OriginalType = string(issues1[idB].IssueType)
issues1[idB].SetDefaults()
jsonl1Tomb := filepath.Join(ws1, ".beads", "issues.tomb.jsonl")
writeJSONLIssues(t, jsonl1Tomb, issues1)
issues1Tomb := readJSONLIssues(t, jsonl1Tomb)
if issues1Tomb[idB].Status != types.StatusTombstone {
t.Fatalf("expected %s to be tombstone in tombstone JSONL, got %q", idB, issues1Tomb[idB].Status)
}
// Workspace 2: import JSONL into fresh Dolt DB and re-export.
ws2 := createTempDirWithCleanup(t)
if err := runCommandInDir(ws2, "git", "init"); err != nil {
t.Fatalf("git init failed: %v", err)
}
_ = runCommandInDir(ws2, "git", "config", "user.email", "test@example.com")
_ = runCommandInDir(ws2, "git", "config", "user.name", "Test User")
initOut2, initErr2 := runBDExecAllowErrorWithEnv(t, ws2, env, "init", "--backend", "dolt", "--prefix", "test", "--quiet")
if initErr2 != nil {
lower := strings.ToLower(initOut2)
if strings.Contains(lower, "dolt") && (strings.Contains(lower, "not supported") || strings.Contains(lower, "not available") || strings.Contains(lower, "unknown")) {
t.Skipf("dolt backend not available: %s", initOut2)
}
t.Fatalf("bd init --backend dolt (ws2) failed: %v\n%s", initErr2, initOut2)
}
// Copy JSONL into ws2 beads dir
jsonl2in := filepath.Join(ws2, ".beads", "issues.jsonl")
data, err := os.ReadFile(jsonl1Tomb)
if err != nil {
t.Fatalf("read export1: %v", err)
}
if err := os.WriteFile(jsonl2in, data, 0o600); err != nil {
t.Fatalf("write ws2 issues.jsonl: %v", err)
}
if out, err := runBDExecAllowErrorWithEnv(t, ws2, env, "import", "-i", jsonl2in); err != nil {
t.Fatalf("bd import failed: %v\n%s", err, out)
}
jsonl2out := filepath.Join(ws2, ".beads", "roundtrip.jsonl")
if out, err := runBDExecAllowErrorWithEnv(t, ws2, env, "export", "-o", jsonl2out); err != nil {
t.Fatalf("bd export (ws2) failed: %v\n%s", err, out)
}
issues2 := readJSONLIssues(t, jsonl2out)
if len(issues2) != 2 {
t.Fatalf("expected 2 issues in export2 (including tombstone), got %d", len(issues2))
}
if issues2[idB].Status != types.StatusTombstone {
t.Fatalf("expected %s to be tombstone in export2, got %q", idB, issues2[idB].Status)
}
// Ensure comment timestamp preserved across import/export
ts2, ok := findCommentTimestampByText(issues2[idA], commentText)
if !ok {
t.Fatalf("expected comment on %s in export2", idA)
}
if !ts2.Equal(ts1) {
t.Fatalf("expected comment timestamp preserved, export1=%s export2=%s", ts1.Format(time.RFC3339Nano), ts2.Format(time.RFC3339Nano))
}
}
+24 -8
View File
@@ -13,6 +13,7 @@ import (
"github.com/spf13/cobra" "github.com/spf13/cobra"
"github.com/steveyegge/beads/internal/debug" "github.com/steveyegge/beads/internal/debug"
"github.com/steveyegge/beads/internal/storage/factory"
"github.com/steveyegge/beads/internal/storage/sqlite" "github.com/steveyegge/beads/internal/storage/sqlite"
"github.com/steveyegge/beads/internal/types" "github.com/steveyegge/beads/internal/types"
"github.com/steveyegge/beads/internal/util" "github.com/steveyegge/beads/internal/util"
@@ -181,7 +182,10 @@ Examples:
fmt.Fprintf(os.Stderr, "Error: no database path found\n") fmt.Fprintf(os.Stderr, "Error: no database path found\n")
os.Exit(1) os.Exit(1)
} }
store, err = sqlite.NewWithTimeout(rootCtx, dbPath, lockTimeout) beadsDir := filepath.Dir(dbPath)
store, err = factory.NewFromConfigWithOptions(rootCtx, beadsDir, factory.Options{
LockTimeout: lockTimeout,
})
if err != nil { if err != nil {
fmt.Fprintf(os.Stderr, "Error: failed to open database: %v\n", err) fmt.Fprintf(os.Stderr, "Error: failed to open database: %v\n", err)
os.Exit(1) os.Exit(1)
@@ -396,14 +400,26 @@ Examples:
issue.Dependencies = allDeps[issue.ID] issue.Dependencies = allDeps[issue.ID]
} }
// Populate labels for all issues // Populate labels and comments for all issues (batch APIs)
ids := make([]string, 0, len(issues))
for _, issue := range issues { for _, issue := range issues {
labels, err := store.GetLabels(ctx, issue.ID) ids = append(ids, issue.ID)
if err != nil { }
fmt.Fprintf(os.Stderr, "Error getting labels for %s: %v\n", issue.ID, err)
os.Exit(1) labelsMap, err := store.GetLabelsForIssues(ctx, ids)
} if err != nil {
issue.Labels = labels fmt.Fprintf(os.Stderr, "Error getting labels: %v\n", err)
os.Exit(1)
}
commentsMap, err := store.GetCommentsForIssues(ctx, ids)
if err != nil {
fmt.Fprintf(os.Stderr, "Error getting comments: %v\n", err)
os.Exit(1)
}
for _, issue := range issues {
issue.Labels = labelsMap[issue.ID]
issue.Comments = commentsMap[issue.ID]
} }
// Open output // Open output
+5 -5
View File
@@ -282,10 +282,10 @@ func runFederationStatus(cmd *cobra.Command, args []string) {
// Collect status for each peer // Collect status for each peer
type peerStatus struct { type peerStatus struct {
Status *storage.SyncStatus Status *storage.SyncStatus
URL string URL string
Reachable bool Reachable bool
ReachError string ReachError string
} }
var peerStatuses []peerStatus var peerStatuses []peerStatus
@@ -374,7 +374,7 @@ func runFederationAddPeer(cmd *cobra.Command, args []string) {
password := federationPassword password := federationPassword
if federationUser != "" && password == "" { if federationUser != "" && password == "" {
fmt.Fprint(os.Stderr, "Password: ") fmt.Fprint(os.Stderr, "Password: ")
pwBytes, err := term.ReadPassword(int(syscall.Stdin)) pwBytes, err := term.ReadPassword(syscall.Stdin)
fmt.Fprintln(os.Stderr) // newline after password fmt.Fprintln(os.Stderr) // newline after password
if err != nil { if err != nil {
FatalErrorRespectJSON("failed to read password: %v", err) FatalErrorRespectJSON("failed to read password: %v", err)
+7 -3
View File
@@ -638,7 +638,7 @@ func hookPostMergeDolt(beadsDir string) int {
doltStore, ok := store.(interface { doltStore, ok := store.(interface {
Branch(ctx context.Context, name string) error Branch(ctx context.Context, name string) error
Checkout(ctx context.Context, branch string) error Checkout(ctx context.Context, branch string) error
Merge(ctx context.Context, branch string) error Merge(ctx context.Context, branch string) ([]storage.Conflict, error)
Commit(ctx context.Context, message string) error Commit(ctx context.Context, message string) error
CurrentBranch(ctx context.Context) (string, error) CurrentBranch(ctx context.Context) (string, error)
DeleteBranch(ctx context.Context, branch string) error DeleteBranch(ctx context.Context, branch string) error
@@ -691,10 +691,15 @@ func hookPostMergeDolt(beadsDir string) int {
} }
// Merge import branch (Dolt provides cell-level merge) // Merge import branch (Dolt provides cell-level merge)
if err := doltStore.Merge(ctx, importBranch); err != nil { conflicts, err := doltStore.Merge(ctx, importBranch)
if err != nil {
fmt.Fprintf(os.Stderr, "Warning: could not merge import branch: %v\n", err) fmt.Fprintf(os.Stderr, "Warning: could not merge import branch: %v\n", err)
return 0 return 0
} }
if len(conflicts) > 0 {
fmt.Fprintf(os.Stderr, "Warning: %d conflict(s) detected during Dolt merge; resolve with 'bd federation conflicts' or Dolt conflict tooling\n", len(conflicts))
// Best-effort: still return 0 to avoid blocking git merge, consistent with other hook warnings.
}
// Commit the merge // Commit the merge
if err := doltStore.Commit(ctx, "Merge JSONL import"); err != nil { if err := doltStore.Commit(ctx, "Merge JSONL import"); err != nil {
@@ -839,7 +844,6 @@ func hookPostCheckout(args []string) int {
// ============================================================================= // =============================================================================
// importFromJSONLToStore imports issues from JSONL to a store. // importFromJSONLToStore imports issues from JSONL to a store.
// This is a placeholder - the actual implementation should use the store's methods.
func importFromJSONLToStore(ctx context.Context, store storage.Storage, jsonlPath string) error { func importFromJSONLToStore(ctx context.Context, store storage.Storage, jsonlPath string) error {
// Parse JSONL into issues // Parse JSONL into issues
// #nosec G304 - jsonlPath is derived from beadsDir (trusted workspace path) // #nosec G304 - jsonlPath is derived from beadsDir (trusted workspace path)
+5 -3
View File
@@ -16,7 +16,7 @@ import (
"github.com/spf13/cobra" "github.com/spf13/cobra"
"github.com/steveyegge/beads/internal/beads" "github.com/steveyegge/beads/internal/beads"
"github.com/steveyegge/beads/internal/debug" "github.com/steveyegge/beads/internal/debug"
"github.com/steveyegge/beads/internal/storage/sqlite" "github.com/steveyegge/beads/internal/storage/factory"
"github.com/steveyegge/beads/internal/types" "github.com/steveyegge/beads/internal/types"
"github.com/steveyegge/beads/internal/utils" "github.com/steveyegge/beads/internal/utils"
"golang.org/x/term" "golang.org/x/term"
@@ -74,10 +74,12 @@ NOTE: Import requires direct database access and does not work with daemon mode.
daemonClient = nil daemonClient = nil
var err error var err error
store, err = sqlite.New(rootCtx, dbPath) beadsDir := filepath.Dir(dbPath)
store, err = factory.NewFromConfigWithOptions(rootCtx, beadsDir, factory.Options{
LockTimeout: lockTimeout,
})
if err != nil { if err != nil {
// Check for fresh clone scenario // Check for fresh clone scenario
beadsDir := filepath.Dir(dbPath)
if handleFreshCloneError(err, beadsDir) { if handleFreshCloneError(err, beadsDir) {
os.Exit(1) os.Exit(1)
} }
+2 -2
View File
@@ -89,7 +89,7 @@ The sync mode controls how beads synchronizes data with git and/or Dolt remotes.
|------|-------------| |------|-------------|
| `git-portable` | (default) Export JSONL on push, import on pull. Standard git-based workflow. | | `git-portable` | (default) Export JSONL on push, import on pull. Standard git-based workflow. |
| `realtime` | Export JSONL on every database change. Legacy behavior, higher I/O. | | `realtime` | Export JSONL on every database change. Legacy behavior, higher I/O. |
| `dolt-native` | Use Dolt remotes directly. No JSONL needed - Dolt handles sync. | | `dolt-native` | Use Dolt remotes directly for sync. JSONL is not used for sync (but manual `bd import` / `bd export` still work). |
| `belt-and-suspenders` | Both Dolt remote AND JSONL backup. Maximum redundancy. | | `belt-and-suspenders` | Both Dolt remote AND JSONL backup. Maximum redundancy. |
#### Sync Triggers #### Sync Triggers
@@ -143,7 +143,7 @@ federation:
- **git-portable** (default): Best for most teams. JSONL is committed to git, works with any git hosting. - **git-portable** (default): Best for most teams. JSONL is committed to git, works with any git hosting.
- **realtime**: Use when you need instant JSONL updates (e.g., file watchers, CI triggers on JSONL changes). - **realtime**: Use when you need instant JSONL updates (e.g., file watchers, CI triggers on JSONL changes).
- **dolt-native**: Use when you have Dolt infrastructure and want database-level sync without JSONL. - **dolt-native**: Use when you have Dolt infrastructure and want database-level sync; JSONL remains available for portability/audits/manual workflows.
- **belt-and-suspenders**: Use for critical data where you want both Dolt sync AND git-portable backup. - **belt-and-suspenders**: Use for critical data where you want both Dolt sync AND git-portable backup.
### Example Config File ### Example Config File
@@ -0,0 +1,114 @@
//go:build !integration
// +build !integration
package importer
import (
"context"
"testing"
"time"
"github.com/steveyegge/beads/internal/storage/memory"
"github.com/steveyegge/beads/internal/types"
)
func TestImportIssues_BackendAgnostic_DepsLabelsCommentsTombstone(t *testing.T) {
ctx := context.Background()
store := memory.New("")
if err := store.SetConfig(ctx, "issue_prefix", "test"); err != nil {
t.Fatalf("set issue_prefix: %v", err)
}
commentTS := time.Date(2020, 1, 2, 3, 4, 5, 6, time.UTC)
deletedTS := time.Date(2021, 2, 3, 4, 5, 6, 7, time.UTC)
issueA := &types.Issue{
ID: "test-1",
Title: "Issue A",
IssueType: types.TypeTask,
Status: types.StatusOpen,
Priority: 2,
Labels: []string{"urgent"},
Dependencies: []*types.Dependency{
{IssueID: "test-1", DependsOnID: "test-2", Type: types.DepBlocks},
},
Comments: []*types.Comment{
{Author: "tester", Text: "hello", CreatedAt: commentTS},
},
}
issueB := &types.Issue{
ID: "test-2",
Title: "Issue B",
IssueType: types.TypeTask,
Status: types.StatusTombstone,
Priority: 4,
DeletedAt: &deletedTS,
DeletedBy: "tester",
DeleteReason: "bye",
OriginalType: string(types.TypeTask),
Description: "tombstone",
ContentHash: "",
Dependencies: nil,
Labels: nil,
Comments: nil,
Assignee: "",
Owner: "",
CreatedBy: "",
SourceSystem: "",
ExternalRef: nil,
ClosedAt: nil,
CompactedAt: nil,
DeferUntil: nil,
LastActivity: nil,
QualityScore: nil,
Validations: nil,
BondedFrom: nil,
Waiters: nil,
}
res, err := ImportIssues(ctx, "", store, []*types.Issue{issueA, issueB}, Options{OrphanHandling: OrphanAllow})
if err != nil {
t.Fatalf("ImportIssues: %v", err)
}
if res.Created != 2 {
t.Fatalf("expected Created=2, got %d", res.Created)
}
labels, err := store.GetLabels(ctx, "test-1")
if err != nil {
t.Fatalf("GetLabels: %v", err)
}
if len(labels) != 1 || labels[0] != "urgent" {
t.Fatalf("expected labels [urgent], got %v", labels)
}
deps, err := store.GetDependencyRecords(ctx, "test-1")
if err != nil {
t.Fatalf("GetDependencyRecords: %v", err)
}
if len(deps) != 1 || deps[0].DependsOnID != "test-2" || deps[0].Type != types.DepBlocks {
t.Fatalf("expected dependency test-1 blocks test-2, got %#v", deps)
}
comments, err := store.GetIssueComments(ctx, "test-1")
if err != nil {
t.Fatalf("GetIssueComments: %v", err)
}
if len(comments) != 1 {
t.Fatalf("expected 1 comment, got %d", len(comments))
}
if !comments[0].CreatedAt.Equal(commentTS) {
t.Fatalf("expected comment timestamp preserved (%s), got %s", commentTS.Format(time.RFC3339Nano), comments[0].CreatedAt.Format(time.RFC3339Nano))
}
b, err := store.GetIssue(ctx, "test-2")
if err != nil {
t.Fatalf("GetIssue: %v", err)
}
if b.Status != types.StatusTombstone {
t.Fatalf("expected tombstone status, got %q", b.Status)
}
if b.DeletedAt == nil || !b.DeletedAt.Equal(deletedTS) {
t.Fatalf("expected DeletedAt preserved (%s), got %#v", deletedTS.Format(time.RFC3339Nano), b.DeletedAt)
}
}
File diff suppressed because it is too large Load Diff
+9 -47
View File
@@ -812,60 +812,22 @@ func TestImportIssues_Labels(t *testing.T) {
} }
func TestGetOrCreateStore_ExistingStore(t *testing.T) { func TestGetOrCreateStore_ExistingStore(t *testing.T) {
ctx := context.Background() t.Skip("getOrCreateStore removed: importer now requires a store")
tmpDB := t.TempDir() + "/test.db"
store, err := sqlite.New(context.Background(), tmpDB)
if err != nil {
t.Fatalf("Failed to create store: %v", err)
}
defer store.Close()
result, needClose, err := getOrCreateStore(ctx, tmpDB, store)
if err != nil {
t.Fatalf("Expected no error, got: %v", err)
}
if needClose {
t.Error("Expected needClose=false for existing store")
}
if result != store {
t.Error("Expected same store instance")
}
} }
func TestGetOrCreateStore_NewStore(t *testing.T) { func TestGetOrCreateStore_NewStore(t *testing.T) {
ctx := context.Background() t.Skip("getOrCreateStore removed: importer now requires a store")
tmpDB := t.TempDir() + "/test.db"
// Create initial database
initStore, err := sqlite.New(context.Background(), tmpDB)
if err != nil {
t.Fatalf("Failed to create store: %v", err)
}
initStore.Close()
// Test creating new connection
result, needClose, err := getOrCreateStore(ctx, tmpDB, nil)
if err != nil {
t.Fatalf("Expected no error, got: %v", err)
}
defer result.Close()
if !needClose {
t.Error("Expected needClose=true for new store")
}
if result == nil {
t.Error("Expected non-nil store")
}
} }
func TestGetOrCreateStore_EmptyPath(t *testing.T) { func TestGetOrCreateStore_EmptyPath(t *testing.T) {
t.Skip("getOrCreateStore removed: importer now requires a store")
}
func TestImportIssues_RequiresStore(t *testing.T) {
ctx := context.Background() ctx := context.Background()
_, err := ImportIssues(ctx, "", nil, []*types.Issue{}, Options{})
_, _, err := getOrCreateStore(ctx, "", nil)
if err == nil { if err == nil {
t.Error("Expected error for empty database path") t.Fatal("expected error when store is nil")
} }
} }
@@ -1203,7 +1165,7 @@ func TestImportOrphanSkip_CountMismatch(t *testing.T) {
// Import with OrphanSkip mode - parent doesn't exist // Import with OrphanSkip mode - parent doesn't exist
result, err := ImportIssues(ctx, "", store, issues, Options{ result, err := ImportIssues(ctx, "", store, issues, Options{
OrphanHandling: sqlite.OrphanSkip, OrphanHandling: OrphanSkip,
SkipPrefixValidation: true, // Allow explicit IDs during import SkipPrefixValidation: true, // Allow explicit IDs during import
}) })
if err != nil { if err != nil {
+7 -4
View File
@@ -263,14 +263,17 @@ func (s *DoltStore) UpdatePeerLastSync(ctx context.Context, name string) error {
// The caller must hold federationEnvMutex. // The caller must hold federationEnvMutex.
func setFederationCredentials(username, password string) func() { func setFederationCredentials(username, password string) func() {
if username != "" { if username != "" {
os.Setenv("DOLT_REMOTE_USER", username) // Best-effort: failures here should not crash the caller.
_ = os.Setenv("DOLT_REMOTE_USER", username)
} }
if password != "" { if password != "" {
os.Setenv("DOLT_REMOTE_PASSWORD", password) // Best-effort: failures here should not crash the caller.
_ = os.Setenv("DOLT_REMOTE_PASSWORD", password)
} }
return func() { return func() {
os.Unsetenv("DOLT_REMOTE_USER") // Best-effort cleanup.
os.Unsetenv("DOLT_REMOTE_PASSWORD") _ = os.Unsetenv("DOLT_REMOTE_USER")
_ = os.Unsetenv("DOLT_REMOTE_PASSWORD")
} }
} }
+27 -2
View File
@@ -65,10 +65,26 @@ func (s *DoltStore) GetEvents(ctx context.Context, issueID string, limit int) ([
// AddIssueComment adds a comment to an issue (structured comment) // AddIssueComment adds a comment to an issue (structured comment)
func (s *DoltStore) AddIssueComment(ctx context.Context, issueID, author, text string) (*types.Comment, error) { func (s *DoltStore) AddIssueComment(ctx context.Context, issueID, author, text string) (*types.Comment, error) {
return s.ImportIssueComment(ctx, issueID, author, text, time.Now().UTC())
}
// ImportIssueComment adds a comment during import, preserving the original timestamp.
// This prevents comment timestamp drift across JSONL sync cycles.
func (s *DoltStore) ImportIssueComment(ctx context.Context, issueID, author, text string, createdAt time.Time) (*types.Comment, error) {
// Verify issue exists
var exists bool
if err := s.db.QueryRowContext(ctx, `SELECT EXISTS(SELECT 1 FROM issues WHERE id = ?)`, issueID).Scan(&exists); err != nil {
return nil, fmt.Errorf("failed to check issue existence: %w", err)
}
if !exists {
return nil, fmt.Errorf("issue %s not found", issueID)
}
createdAt = createdAt.UTC()
result, err := s.db.ExecContext(ctx, ` result, err := s.db.ExecContext(ctx, `
INSERT INTO comments (issue_id, author, text, created_at) INSERT INTO comments (issue_id, author, text, created_at)
VALUES (?, ?, ?, ?) VALUES (?, ?, ?, ?)
`, issueID, author, text, time.Now().UTC()) `, issueID, author, text, createdAt)
if err != nil { if err != nil {
return nil, fmt.Errorf("failed to add comment: %w", err) return nil, fmt.Errorf("failed to add comment: %w", err)
} }
@@ -78,12 +94,21 @@ func (s *DoltStore) AddIssueComment(ctx context.Context, issueID, author, text s
return nil, fmt.Errorf("failed to get comment id: %w", err) return nil, fmt.Errorf("failed to get comment id: %w", err)
} }
// Mark issue dirty for incremental JSONL export
if _, err := s.db.ExecContext(ctx, `
INSERT INTO dirty_issues (issue_id, marked_at)
VALUES (?, ?)
ON DUPLICATE KEY UPDATE marked_at = VALUES(marked_at)
`, issueID, time.Now().UTC()); err != nil {
return nil, fmt.Errorf("failed to mark issue dirty: %w", err)
}
return &types.Comment{ return &types.Comment{
ID: id, ID: id,
IssueID: issueID, IssueID: issueID,
Author: author, Author: author,
Text: text, Text: text,
CreatedAt: time.Now().UTC(), CreatedAt: createdAt,
}, nil }, nil
} }
+2
View File
@@ -103,6 +103,7 @@ func (s *Server) Start(ctx context.Context) error {
} }
// Create command // Create command
// #nosec G204 -- dolt binary is fixed; args are derived from internal config.
s.cmd = exec.CommandContext(ctx, "dolt", args...) s.cmd = exec.CommandContext(ctx, "dolt", args...)
s.cmd.Dir = s.cfg.DataDir s.cmd.Dir = s.cfg.DataDir
@@ -272,6 +273,7 @@ func (s *Server) waitForReady(ctx context.Context) error {
// GetRunningServerPID returns the PID of a running server from the PID file, or 0 if not running // GetRunningServerPID returns the PID of a running server from the PID file, or 0 if not running
func GetRunningServerPID(dataDir string) int { func GetRunningServerPID(dataDir string) int {
pidFile := filepath.Join(dataDir, "dolt-server.pid") pidFile := filepath.Join(dataDir, "dolt-server.pid")
// #nosec G304 -- pidFile is derived from internal dataDir.
data, err := os.ReadFile(pidFile) data, err := os.ReadFile(pidFile)
if err != nil { if err != nil {
return 0 return 0
+110
View File
@@ -17,6 +17,12 @@ type doltTransaction struct {
store *DoltStore store *DoltStore
} }
// CreateIssueImport is the import-friendly issue creation hook.
// Dolt does not enforce prefix validation at the storage layer, so this delegates to CreateIssue.
func (t *doltTransaction) CreateIssueImport(ctx context.Context, issue *types.Issue, actor string, skipPrefixValidation bool) error {
return t.CreateIssue(ctx, issue, actor)
}
// RunInTransaction executes a function within a database transaction // RunInTransaction executes a function within a database transaction
func (s *DoltStore) RunInTransaction(ctx context.Context, fn func(tx storage.Transaction) error) error { func (s *DoltStore) RunInTransaction(ctx context.Context, fn func(tx storage.Transaction) error) error {
sqlTx, err := s.db.BeginTx(ctx, nil) sqlTx, err := s.db.BeginTx(ctx, nil)
@@ -169,6 +175,36 @@ func (t *doltTransaction) AddDependency(ctx context.Context, dep *types.Dependen
return err return err
} }
func (t *doltTransaction) GetDependencyRecords(ctx context.Context, issueID string) ([]*types.Dependency, error) {
rows, err := t.tx.QueryContext(ctx, `
SELECT issue_id, depends_on_id, type, created_at, created_by, metadata, thread_id
FROM dependencies
WHERE issue_id = ?
`, issueID)
if err != nil {
return nil, err
}
defer rows.Close()
var deps []*types.Dependency
for rows.Next() {
var d types.Dependency
var metadata sql.NullString
var threadID sql.NullString
if err := rows.Scan(&d.IssueID, &d.DependsOnID, &d.Type, &d.CreatedAt, &d.CreatedBy, &metadata, &threadID); err != nil {
return nil, err
}
if metadata.Valid {
d.Metadata = metadata.String
}
if threadID.Valid {
d.ThreadID = threadID.String
}
deps = append(deps, &d)
}
return deps, rows.Err()
}
// RemoveDependency removes a dependency within the transaction // RemoveDependency removes a dependency within the transaction
func (t *doltTransaction) RemoveDependency(ctx context.Context, issueID, dependsOnID string, actor string) error { func (t *doltTransaction) RemoveDependency(ctx context.Context, issueID, dependsOnID string, actor string) error {
_, err := t.tx.ExecContext(ctx, ` _, err := t.tx.ExecContext(ctx, `
@@ -185,6 +221,23 @@ func (t *doltTransaction) AddLabel(ctx context.Context, issueID, label, actor st
return err return err
} }
func (t *doltTransaction) GetLabels(ctx context.Context, issueID string) ([]string, error) {
rows, err := t.tx.QueryContext(ctx, `SELECT label FROM labels WHERE issue_id = ? ORDER BY label`, issueID)
if err != nil {
return nil, err
}
defer rows.Close()
var labels []string
for rows.Next() {
var l string
if err := rows.Scan(&l); err != nil {
return nil, err
}
labels = append(labels, l)
}
return labels, rows.Err()
}
// RemoveLabel removes a label within the transaction // RemoveLabel removes a label within the transaction
func (t *doltTransaction) RemoveLabel(ctx context.Context, issueID, label, actor string) error { func (t *doltTransaction) RemoveLabel(ctx context.Context, issueID, label, actor string) error {
_, err := t.tx.ExecContext(ctx, ` _, err := t.tx.ExecContext(ctx, `
@@ -231,6 +284,63 @@ func (t *doltTransaction) GetMetadata(ctx context.Context, key string) (string,
return value, err return value, err
} }
func (t *doltTransaction) ImportIssueComment(ctx context.Context, issueID, author, text string, createdAt time.Time) (*types.Comment, error) {
// Verify issue exists in tx
iss, err := t.GetIssue(ctx, issueID)
if err != nil {
return nil, err
}
if iss == nil {
return nil, fmt.Errorf("issue %s not found", issueID)
}
createdAt = createdAt.UTC()
res, err := t.tx.ExecContext(ctx, `
INSERT INTO comments (issue_id, author, text, created_at)
VALUES (?, ?, ?, ?)
`, issueID, author, text, createdAt)
if err != nil {
return nil, fmt.Errorf("failed to add comment: %w", err)
}
id, err := res.LastInsertId()
if err != nil {
return nil, fmt.Errorf("failed to get comment id: %w", err)
}
// mark dirty in tx
if _, err := t.tx.ExecContext(ctx, `
INSERT INTO dirty_issues (issue_id, marked_at)
VALUES (?, ?)
ON DUPLICATE KEY UPDATE marked_at = VALUES(marked_at)
`, issueID, time.Now().UTC()); err != nil {
return nil, fmt.Errorf("failed to mark issue dirty: %w", err)
}
return &types.Comment{ID: id, IssueID: issueID, Author: author, Text: text, CreatedAt: createdAt}, nil
}
func (t *doltTransaction) GetIssueComments(ctx context.Context, issueID string) ([]*types.Comment, error) {
rows, err := t.tx.QueryContext(ctx, `
SELECT id, issue_id, author, text, created_at
FROM comments
WHERE issue_id = ?
ORDER BY created_at ASC
`, issueID)
if err != nil {
return nil, err
}
defer rows.Close()
var comments []*types.Comment
for rows.Next() {
var c types.Comment
if err := rows.Scan(&c.ID, &c.IssueID, &c.Author, &c.Text, &c.CreatedAt); err != nil {
return nil, err
}
comments = append(comments, &c)
}
return comments, rows.Err()
}
// AddComment adds a comment within the transaction // AddComment adds a comment within the transaction
func (t *doltTransaction) AddComment(ctx context.Context, issueID, actor, comment string) error { func (t *doltTransaction) AddComment(ctx context.Context, issueID, actor, comment string) error {
_, err := t.tx.ExecContext(ctx, ` _, err := t.tx.ExecContext(ctx, `
+18
View File
@@ -1457,6 +1457,24 @@ func (m *MemoryStorage) AddIssueComment(ctx context.Context, issueID, author, te
return comment, nil return comment, nil
} }
func (m *MemoryStorage) ImportIssueComment(ctx context.Context, issueID, author, text string, createdAt time.Time) (*types.Comment, error) {
m.mu.Lock()
defer m.mu.Unlock()
comment := &types.Comment{
ID: int64(len(m.comments[issueID]) + 1),
IssueID: issueID,
Author: author,
Text: text,
CreatedAt: createdAt,
}
m.comments[issueID] = append(m.comments[issueID], comment)
m.dirty[issueID] = true
return comment, nil
}
func (m *MemoryStorage) GetIssueComments(ctx context.Context, issueID string) ([]*types.Comment, error) { func (m *MemoryStorage) GetIssueComments(ctx context.Context, issueID string) ([]*types.Comment, error) {
m.mu.RLock() m.mu.RLock()
defer m.mu.RUnlock() defer m.mu.RUnlock()
+4 -2
View File
@@ -3,6 +3,7 @@ package sqlite
import ( import (
"context" "context"
"fmt" "fmt"
"time"
"github.com/steveyegge/beads/internal/types" "github.com/steveyegge/beads/internal/types"
) )
@@ -56,7 +57,7 @@ func (s *SQLiteStorage) AddIssueComment(ctx context.Context, issueID, author, te
// Unlike AddIssueComment which uses CURRENT_TIMESTAMP, this method uses the provided // Unlike AddIssueComment which uses CURRENT_TIMESTAMP, this method uses the provided
// createdAt time from the JSONL file. This prevents timestamp drift during sync cycles. // createdAt time from the JSONL file. This prevents timestamp drift during sync cycles.
// GH#735: Comment created_at timestamps were being overwritten with current time during import. // GH#735: Comment created_at timestamps were being overwritten with current time during import.
func (s *SQLiteStorage) ImportIssueComment(ctx context.Context, issueID, author, text string, createdAt string) (*types.Comment, error) { func (s *SQLiteStorage) ImportIssueComment(ctx context.Context, issueID, author, text string, createdAt time.Time) (*types.Comment, error) {
// Verify issue exists // Verify issue exists
var exists bool var exists bool
err := s.db.QueryRowContext(ctx, `SELECT EXISTS(SELECT 1 FROM issues WHERE id = ?)`, issueID).Scan(&exists) err := s.db.QueryRowContext(ctx, `SELECT EXISTS(SELECT 1 FROM issues WHERE id = ?)`, issueID).Scan(&exists)
@@ -68,10 +69,11 @@ func (s *SQLiteStorage) ImportIssueComment(ctx context.Context, issueID, author,
} }
// Insert comment with provided timestamp // Insert comment with provided timestamp
createdAtStr := createdAt.UTC().Format(time.RFC3339Nano)
result, err := s.db.ExecContext(ctx, ` result, err := s.db.ExecContext(ctx, `
INSERT INTO comments (issue_id, author, text, created_at) INSERT INTO comments (issue_id, author, text, created_at)
VALUES (?, ?, ?, ?) VALUES (?, ?, ?, ?)
`, issueID, author, text, createdAt) `, issueID, author, text, createdAtStr)
if err != nil { if err != nil {
return nil, fmt.Errorf("failed to insert comment: %w", err) return nil, fmt.Errorf("failed to insert comment: %w", err)
} }
+115
View File
@@ -0,0 +1,115 @@
package sqlite
import (
"context"
"database/sql"
"fmt"
"time"
"github.com/steveyegge/beads/internal/types"
)
// CreateIssueImport creates an issue inside an existing sqlite transaction, optionally skipping
// prefix validation. This is used by JSONL import to support multi-repo mode (GH#686).
func (t *sqliteTxStorage) CreateIssueImport(ctx context.Context, issue *types.Issue, actor string, skipPrefixValidation bool) error {
// Fetch custom statuses and types for validation
customStatuses, err := t.GetCustomStatuses(ctx)
if err != nil {
return fmt.Errorf("failed to get custom statuses: %w", err)
}
customTypes, err := t.GetCustomTypes(ctx)
if err != nil {
return fmt.Errorf("failed to get custom types: %w", err)
}
// Set timestamps
now := time.Now()
if issue.CreatedAt.IsZero() {
issue.CreatedAt = now
}
if issue.UpdatedAt.IsZero() {
issue.UpdatedAt = now
}
// Defensive fix for closed_at invariant
if issue.Status == types.StatusClosed && issue.ClosedAt == nil {
maxTime := issue.CreatedAt
if issue.UpdatedAt.After(maxTime) {
maxTime = issue.UpdatedAt
}
closedAt := maxTime.Add(time.Second)
issue.ClosedAt = &closedAt
}
// Defensive fix for tombstone invariant
if issue.Status == types.StatusTombstone && issue.DeletedAt == nil {
maxTime := issue.CreatedAt
if issue.UpdatedAt.After(maxTime) {
maxTime = issue.UpdatedAt
}
deletedAt := maxTime.Add(time.Second)
issue.DeletedAt = &deletedAt
}
// Validate issue before creating
if err := issue.ValidateWithCustom(customStatuses, customTypes); err != nil {
return fmt.Errorf("validation failed: %w", err)
}
// Compute content hash
if issue.ContentHash == "" {
issue.ContentHash = issue.ComputeContentHash()
}
// Get configured prefix for validation and ID generation behavior
var configPrefix string
err = t.conn.QueryRowContext(ctx, `SELECT value FROM config WHERE key = ?`, "issue_prefix").Scan(&configPrefix)
if err == sql.ErrNoRows || configPrefix == "" {
return fmt.Errorf("database not initialized: issue_prefix config is missing (run 'bd init --prefix <prefix>' first)")
} else if err != nil {
return fmt.Errorf("failed to get config: %w", err)
}
prefix := configPrefix
if issue.IDPrefix != "" {
prefix = configPrefix + "-" + issue.IDPrefix
}
if issue.ID == "" {
// Import path expects IDs, but be defensive and generate if missing.
generatedID, err := GenerateIssueID(ctx, t.conn, prefix, issue, actor)
if err != nil {
return fmt.Errorf("failed to generate issue ID: %w", err)
}
issue.ID = generatedID
} else if !skipPrefixValidation {
if err := ValidateIssueIDPrefix(issue.ID, prefix); err != nil {
return fmt.Errorf("failed to validate issue ID prefix: %w", err)
}
}
// Ensure parent exists for hierarchical IDs (importer should have ensured / resurrected).
if isHierarchical, parentID := IsHierarchicalID(issue.ID); isHierarchical {
var parentCount int
if err := t.conn.QueryRowContext(ctx, `SELECT COUNT(*) FROM issues WHERE id = ?`, parentID).Scan(&parentCount); err != nil {
return fmt.Errorf("failed to check parent existence: %w", err)
}
if parentCount == 0 {
return fmt.Errorf("parent issue %s does not exist", parentID)
}
}
// Insert issue (strict)
if err := insertIssueStrict(ctx, t.conn, issue); err != nil {
return fmt.Errorf("failed to insert issue: %w", err)
}
// Record event
if err := recordCreatedEvent(ctx, t.conn, issue, actor); err != nil {
return fmt.Errorf("failed to record creation event: %w", err)
}
// Mark dirty
if err := markDirty(ctx, t.conn, issue.ID); err != nil {
return fmt.Errorf("failed to mark issue dirty: %w", err)
}
return nil
}
+98
View File
@@ -824,6 +824,37 @@ func (t *sqliteTxStorage) AddDependency(ctx context.Context, dep *types.Dependen
return nil return nil
} }
// GetDependencyRecords retrieves dependency records for an issue within the transaction.
func (t *sqliteTxStorage) GetDependencyRecords(ctx context.Context, issueID string) ([]*types.Dependency, error) {
rows, err := t.conn.QueryContext(ctx, `
SELECT issue_id, depends_on_id, type, created_at, created_by, metadata, thread_id
FROM dependencies
WHERE issue_id = ?
`, issueID)
if err != nil {
return nil, fmt.Errorf("failed to query dependencies: %w", err)
}
defer func() { _ = rows.Close() }()
var deps []*types.Dependency
for rows.Next() {
var d types.Dependency
var metadata sql.NullString
var threadID sql.NullString
if err := rows.Scan(&d.IssueID, &d.DependsOnID, &d.Type, &d.CreatedAt, &d.CreatedBy, &metadata, &threadID); err != nil {
return nil, fmt.Errorf("failed to scan dependency: %w", err)
}
if metadata.Valid {
d.Metadata = metadata.String
}
if threadID.Valid {
d.ThreadID = threadID.String
}
deps = append(deps, &d)
}
return deps, rows.Err()
}
// RemoveDependency removes a dependency within the transaction. // RemoveDependency removes a dependency within the transaction.
func (t *sqliteTxStorage) RemoveDependency(ctx context.Context, issueID, dependsOnID string, actor string) error { func (t *sqliteTxStorage) RemoveDependency(ctx context.Context, issueID, dependsOnID string, actor string) error {
// First, check what type of dependency is being removed // First, check what type of dependency is being removed
@@ -916,6 +947,11 @@ func (t *sqliteTxStorage) AddLabel(ctx context.Context, issueID, label, actor st
return nil return nil
} }
// GetLabels retrieves labels for an issue within the transaction.
func (t *sqliteTxStorage) GetLabels(ctx context.Context, issueID string) ([]string, error) {
return t.getLabels(ctx, issueID)
}
// RemoveLabel removes a label from an issue within the transaction. // RemoveLabel removes a label from an issue within the transaction.
func (t *sqliteTxStorage) RemoveLabel(ctx context.Context, issueID, label, actor string) error { func (t *sqliteTxStorage) RemoveLabel(ctx context.Context, issueID, label, actor string) error {
result, err := t.conn.ExecContext(ctx, ` result, err := t.conn.ExecContext(ctx, `
@@ -1063,6 +1099,68 @@ func (t *sqliteTxStorage) AddComment(ctx context.Context, issueID, actor, commen
return nil return nil
} }
// ImportIssueComment adds a structured comment during import, preserving the original timestamp.
func (t *sqliteTxStorage) ImportIssueComment(ctx context.Context, issueID, author, text string, createdAt time.Time) (*types.Comment, error) {
// Verify issue exists
existing, err := t.GetIssue(ctx, issueID)
if err != nil {
return nil, fmt.Errorf("failed to check issue existence: %w", err)
}
if existing == nil {
return nil, fmt.Errorf("issue %s not found", issueID)
}
createdAtStr := createdAt.UTC().Format(time.RFC3339Nano)
res, err := t.conn.ExecContext(ctx, `
INSERT INTO comments (issue_id, author, text, created_at)
VALUES (?, ?, ?, ?)
`, issueID, author, text, createdAtStr)
if err != nil {
return nil, fmt.Errorf("failed to insert comment: %w", err)
}
commentID, err := res.LastInsertId()
if err != nil {
return nil, fmt.Errorf("failed to get comment ID: %w", err)
}
// Mark issue dirty
if err := markDirty(ctx, t.conn, issueID); err != nil {
return nil, fmt.Errorf("failed to mark issue dirty: %w", err)
}
return &types.Comment{
ID: commentID,
IssueID: issueID,
Author: author,
Text: text,
CreatedAt: createdAt.UTC(),
}, nil
}
// GetIssueComments retrieves structured comments for an issue within the transaction.
func (t *sqliteTxStorage) GetIssueComments(ctx context.Context, issueID string) ([]*types.Comment, error) {
rows, err := t.conn.QueryContext(ctx, `
SELECT id, issue_id, author, text, created_at
FROM comments
WHERE issue_id = ?
ORDER BY created_at ASC
`, issueID)
if err != nil {
return nil, fmt.Errorf("failed to query comments: %w", err)
}
defer func() { _ = rows.Close() }()
var comments []*types.Comment
for rows.Next() {
var c types.Comment
if err := rows.Scan(&c.ID, &c.IssueID, &c.Author, &c.Text, &c.CreatedAt); err != nil {
return nil, fmt.Errorf("failed to scan comment: %w", err)
}
comments = append(comments, &c)
}
return comments, rows.Err()
}
// SearchIssues finds issues matching query and filters within the transaction. // SearchIssues finds issues matching query and filters within the transaction.
// This enables read-your-writes semantics for searching within a transaction. // This enables read-your-writes semantics for searching within a transaction.
func (t *sqliteTxStorage) SearchIssues(ctx context.Context, query string, filter types.IssueFilter) ([]*types.Issue, error) { func (t *sqliteTxStorage) SearchIssues(ctx context.Context, query string, filter types.IssueFilter) ([]*types.Issue, error) {
+8
View File
@@ -4,6 +4,7 @@ package storage
import ( import (
"context" "context"
"database/sql" "database/sql"
"time"
"github.com/steveyegge/beads/internal/types" "github.com/steveyegge/beads/internal/types"
) )
@@ -58,10 +59,12 @@ type Transaction interface {
// Dependency operations // Dependency operations
AddDependency(ctx context.Context, dep *types.Dependency, actor string) error AddDependency(ctx context.Context, dep *types.Dependency, actor string) error
RemoveDependency(ctx context.Context, issueID, dependsOnID string, actor string) error RemoveDependency(ctx context.Context, issueID, dependsOnID string, actor string) error
GetDependencyRecords(ctx context.Context, issueID string) ([]*types.Dependency, error)
// Label operations // Label operations
AddLabel(ctx context.Context, issueID, label, actor string) error AddLabel(ctx context.Context, issueID, label, actor string) error
RemoveLabel(ctx context.Context, issueID, label, actor string) error RemoveLabel(ctx context.Context, issueID, label, actor string) error
GetLabels(ctx context.Context, issueID string) ([]string, error)
// Config operations (for atomic config + issue workflows) // Config operations (for atomic config + issue workflows)
SetConfig(ctx context.Context, key, value string) error SetConfig(ctx context.Context, key, value string) error
@@ -73,6 +76,8 @@ type Transaction interface {
// Comment operations // Comment operations
AddComment(ctx context.Context, issueID, actor, comment string) error AddComment(ctx context.Context, issueID, actor, comment string) error
ImportIssueComment(ctx context.Context, issueID, author, text string, createdAt time.Time) (*types.Comment, error)
GetIssueComments(ctx context.Context, issueID string) ([]*types.Comment, error)
} }
// Storage defines the interface for issue storage backends // Storage defines the interface for issue storage backends
@@ -121,6 +126,9 @@ type Storage interface {
// Comments // Comments
AddIssueComment(ctx context.Context, issueID, author, text string) (*types.Comment, error) AddIssueComment(ctx context.Context, issueID, author, text string) (*types.Comment, error)
// ImportIssueComment adds a comment while preserving the original timestamp.
// Used during JSONL import to avoid timestamp drift across sync cycles.
ImportIssueComment(ctx context.Context, issueID, author, text string, createdAt time.Time) (*types.Comment, error)
GetIssueComments(ctx context.Context, issueID string) ([]*types.Comment, error) GetIssueComments(ctx context.Context, issueID string) ([]*types.Comment, error)
GetCommentsForIssues(ctx context.Context, issueIDs []string) (map[string][]*types.Comment, error) GetCommentsForIssues(ctx context.Context, issueIDs []string) (map[string][]*types.Comment, error)
+16
View File
@@ -5,6 +5,7 @@ import (
"context" "context"
"database/sql" "database/sql"
"testing" "testing"
"time"
"github.com/steveyegge/beads/internal/types" "github.com/steveyegge/beads/internal/types"
) )
@@ -119,6 +120,9 @@ func (m *mockStorage) GetEvents(ctx context.Context, issueID string, limit int)
func (m *mockStorage) AddIssueComment(ctx context.Context, issueID, author, text string) (*types.Comment, error) { func (m *mockStorage) AddIssueComment(ctx context.Context, issueID, author, text string) (*types.Comment, error) {
return nil, nil return nil, nil
} }
func (m *mockStorage) ImportIssueComment(ctx context.Context, issueID, author, text string, createdAt time.Time) (*types.Comment, error) {
return nil, nil
}
func (m *mockStorage) GetIssueComments(ctx context.Context, issueID string) ([]*types.Comment, error) { func (m *mockStorage) GetIssueComments(ctx context.Context, issueID string) ([]*types.Comment, error) {
return nil, nil return nil, nil
} }
@@ -237,12 +241,18 @@ func (m *mockTransaction) AddDependency(ctx context.Context, dep *types.Dependen
func (m *mockTransaction) RemoveDependency(ctx context.Context, issueID, dependsOnID string, actor string) error { func (m *mockTransaction) RemoveDependency(ctx context.Context, issueID, dependsOnID string, actor string) error {
return nil return nil
} }
func (m *mockTransaction) GetDependencyRecords(ctx context.Context, issueID string) ([]*types.Dependency, error) {
return nil, nil
}
func (m *mockTransaction) AddLabel(ctx context.Context, issueID, label, actor string) error { func (m *mockTransaction) AddLabel(ctx context.Context, issueID, label, actor string) error {
return nil return nil
} }
func (m *mockTransaction) RemoveLabel(ctx context.Context, issueID, label, actor string) error { func (m *mockTransaction) RemoveLabel(ctx context.Context, issueID, label, actor string) error {
return nil return nil
} }
func (m *mockTransaction) GetLabels(ctx context.Context, issueID string) ([]string, error) {
return nil, nil
}
func (m *mockTransaction) SetConfig(ctx context.Context, key, value string) error { func (m *mockTransaction) SetConfig(ctx context.Context, key, value string) error {
return nil return nil
} }
@@ -258,6 +268,12 @@ func (m *mockTransaction) GetMetadata(ctx context.Context, key string) (string,
func (m *mockTransaction) AddComment(ctx context.Context, issueID, actor, comment string) error { func (m *mockTransaction) AddComment(ctx context.Context, issueID, actor, comment string) error {
return nil return nil
} }
func (m *mockTransaction) ImportIssueComment(ctx context.Context, issueID, author, text string, createdAt time.Time) (*types.Comment, error) {
return nil, nil
}
func (m *mockTransaction) GetIssueComments(ctx context.Context, issueID string) ([]*types.Comment, error) {
return nil, nil
}
// TestConfig verifies the Config struct has expected fields. // TestConfig verifies the Config struct has expected fields.
func TestConfig(t *testing.T) { func TestConfig(t *testing.T) {