Merge pull request #1240 from coffeegoddd/db/import-export

Enable full-fidelity JSONL import/export for Dolt backend
This commit is contained in:
Steve Yegge
2026-01-21 16:40:03 -08:00
committed by GitHub
27 changed files with 2011 additions and 347 deletions

View File

@@ -0,0 +1,259 @@
//go:build integration
// +build integration
package main
import (
"os"
"path/filepath"
"runtime"
"strings"
"testing"
"time"
"github.com/steveyegge/beads/internal/types"
)
func isDoltBackendUnavailable(out string) bool {
lower := strings.ToLower(out)
return strings.Contains(lower, "dolt") && (strings.Contains(lower, "not supported") || strings.Contains(lower, "not available") || strings.Contains(lower, "unknown"))
}
func setupGitRepoForIntegration(t *testing.T, dir string) {
t.Helper()
if err := runCommandInDir(dir, "git", "init"); err != nil {
t.Fatalf("git init failed: %v", err)
}
_ = runCommandInDir(dir, "git", "config", "user.email", "test@example.com")
_ = runCommandInDir(dir, "git", "config", "user.name", "Test User")
}
func TestSQLiteToDolt_JSONLRoundTrip(t *testing.T) {
if testing.Short() {
t.Skip("skipping slow integration test in short mode")
}
if runtime.GOOS == windowsOS {
t.Skip("cross-backend integration test not supported on windows")
}
env := []string{
"BEADS_TEST_MODE=1",
"BEADS_NO_DAEMON=1",
}
// Workspace 1: SQLite create -> export JSONL
ws1 := createTempDirWithCleanup(t)
setupGitRepoForIntegration(t, ws1)
// Explicitly initialize sqlite for clarity.
if out, err := runBDExecAllowErrorWithEnv(t, ws1, env, "init", "--backend", "sqlite", "--prefix", "test", "--quiet"); err != nil {
t.Fatalf("bd init --backend sqlite failed: %v\n%s", err, out)
}
outA, err := runBDExecAllowErrorWithEnv(t, ws1, env, "create", "Issue A", "--json")
if err != nil {
t.Fatalf("bd create A failed: %v\n%s", err, outA)
}
idA := parseCreateID(t, outA)
outB, err := runBDExecAllowErrorWithEnv(t, ws1, env, "create", "Issue B", "--json")
if err != nil {
t.Fatalf("bd create B failed: %v\n%s", err, outB)
}
idB := parseCreateID(t, outB)
// Add label + comment + dependency.
if out, err := runBDExecAllowErrorWithEnv(t, ws1, env, "label", "add", idA, "urgent"); err != nil {
t.Fatalf("bd label add failed: %v\n%s", err, out)
}
commentText := "Cross-backend round-trip"
if out, err := runBDExecAllowErrorWithEnv(t, ws1, env, "comments", "add", idA, commentText); err != nil {
t.Fatalf("bd comments add failed: %v\n%s", err, out)
}
if out, err := runBDExecAllowErrorWithEnv(t, ws1, env, "dep", "add", idA, idB); err != nil {
t.Fatalf("bd dep add failed: %v\n%s", err, out)
}
// Create tombstone via delete (SQLite supports tombstones).
if out, err := runBDExecAllowErrorWithEnv(t, ws1, env, "delete", idB, "--force", "--reason", "test tombstone"); err != nil {
t.Fatalf("bd delete failed: %v\n%s", err, out)
}
jsonl1 := filepath.Join(ws1, ".beads", "issues.jsonl")
if out, err := runBDExecAllowErrorWithEnv(t, ws1, env, "export", "-o", jsonl1); err != nil {
t.Fatalf("bd export failed: %v\n%s", err, out)
}
issues1 := readJSONLIssues(t, jsonl1)
if len(issues1) != 2 {
t.Fatalf("expected 2 issues in sqlite export (including tombstone), got %d", len(issues1))
}
if issues1[idB].Status != types.StatusTombstone {
t.Fatalf("expected %s to be tombstone in sqlite export, got %q", idB, issues1[idB].Status)
}
ts1, ok := findCommentTimestampByText(issues1[idA], commentText)
if !ok || ts1.IsZero() {
t.Fatalf("expected comment on %s in sqlite export", idA)
}
// Workspace 2: Dolt import JSONL -> export JSONL
ws2 := createTempDirWithCleanup(t)
setupGitRepoForIntegration(t, ws2)
initOut, initErr := runBDExecAllowErrorWithEnv(t, ws2, env, "init", "--backend", "dolt", "--prefix", "test", "--quiet")
if initErr != nil {
if isDoltBackendUnavailable(initOut) {
t.Skipf("dolt backend not available: %s", initOut)
}
t.Fatalf("bd init --backend dolt failed: %v\n%s", initErr, initOut)
}
jsonl2in := filepath.Join(ws2, ".beads", "issues.jsonl")
data, err := os.ReadFile(jsonl1)
if err != nil {
t.Fatalf("read sqlite export: %v", err)
}
if err := os.WriteFile(jsonl2in, data, 0o600); err != nil {
t.Fatalf("write dolt issues.jsonl: %v", err)
}
if out, err := runBDExecAllowErrorWithEnv(t, ws2, env, "import", "-i", jsonl2in); err != nil {
t.Fatalf("bd import (dolt) failed: %v\n%s", err, out)
}
jsonl2out := filepath.Join(ws2, ".beads", "roundtrip.jsonl")
if out, err := runBDExecAllowErrorWithEnv(t, ws2, env, "export", "-o", jsonl2out); err != nil {
t.Fatalf("bd export (dolt) failed: %v\n%s", err, out)
}
issues2 := readJSONLIssues(t, jsonl2out)
if len(issues2) != 2 {
t.Fatalf("expected 2 issues in dolt export, got %d", len(issues2))
}
if issues2[idB].Status != types.StatusTombstone {
t.Fatalf("expected %s to be tombstone after import into dolt, got %q", idB, issues2[idB].Status)
}
ts2, ok := findCommentTimestampByText(issues2[idA], commentText)
if !ok {
t.Fatalf("expected comment on %s in dolt export", idA)
}
if !ts2.Equal(ts1) {
t.Fatalf("expected comment timestamp preserved across sqlite->dolt, export1=%s export2=%s", ts1.Format(time.RFC3339Nano), ts2.Format(time.RFC3339Nano))
}
}
func TestDoltToSQLite_JSONLRoundTrip(t *testing.T) {
if testing.Short() {
t.Skip("skipping slow integration test in short mode")
}
if runtime.GOOS == windowsOS {
t.Skip("cross-backend integration test not supported on windows")
}
env := []string{
"BEADS_TEST_MODE=1",
"BEADS_NO_DAEMON=1",
}
// Workspace 1: Dolt create -> export JSONL
ws1 := createTempDirWithCleanup(t)
setupGitRepoForIntegration(t, ws1)
initOut, initErr := runBDExecAllowErrorWithEnv(t, ws1, env, "init", "--backend", "dolt", "--prefix", "test", "--quiet")
if initErr != nil {
if isDoltBackendUnavailable(initOut) {
t.Skipf("dolt backend not available: %s", initOut)
}
t.Fatalf("bd init --backend dolt failed: %v\n%s", initErr, initOut)
}
outA, err := runBDExecAllowErrorWithEnv(t, ws1, env, "create", "Issue A", "--json")
if err != nil {
t.Fatalf("bd create A failed: %v\n%s", err, outA)
}
idA := parseCreateID(t, outA)
outB, err := runBDExecAllowErrorWithEnv(t, ws1, env, "create", "Issue B", "--json")
if err != nil {
t.Fatalf("bd create B failed: %v\n%s", err, outB)
}
idB := parseCreateID(t, outB)
if out, err := runBDExecAllowErrorWithEnv(t, ws1, env, "label", "add", idA, "urgent"); err != nil {
t.Fatalf("bd label add failed: %v\n%s", err, out)
}
commentText := "Cross-backend round-trip"
if out, err := runBDExecAllowErrorWithEnv(t, ws1, env, "comments", "add", idA, commentText); err != nil {
t.Fatalf("bd comments add failed: %v\n%s", err, out)
}
if out, err := runBDExecAllowErrorWithEnv(t, ws1, env, "dep", "add", idA, idB); err != nil {
t.Fatalf("bd dep add failed: %v\n%s", err, out)
}
jsonl1 := filepath.Join(ws1, ".beads", "issues.jsonl")
if out, err := runBDExecAllowErrorWithEnv(t, ws1, env, "export", "-o", jsonl1); err != nil {
t.Fatalf("bd export (dolt) failed: %v\n%s", err, out)
}
issues1 := readJSONLIssues(t, jsonl1)
if len(issues1) != 2 {
t.Fatalf("expected 2 issues in dolt export, got %d", len(issues1))
}
ts1, ok := findCommentTimestampByText(issues1[idA], commentText)
if !ok || ts1.IsZero() {
t.Fatalf("expected comment on %s in dolt export", idA)
}
// Inject tombstone record for B into JSONL (Dolt backend may not support bd delete tombstones).
now := time.Now().UTC()
issues1[idB].Status = types.StatusTombstone
issues1[idB].DeletedAt = &now
issues1[idB].DeletedBy = "test"
issues1[idB].DeleteReason = "test tombstone"
issues1[idB].OriginalType = string(issues1[idB].IssueType)
issues1[idB].SetDefaults()
jsonl1Tomb := filepath.Join(ws1, ".beads", "issues.tomb.jsonl")
writeJSONLIssues(t, jsonl1Tomb, issues1)
// Workspace 2: SQLite import JSONL -> export JSONL
ws2 := createTempDirWithCleanup(t)
setupGitRepoForIntegration(t, ws2)
if out, err := runBDExecAllowErrorWithEnv(t, ws2, env, "init", "--backend", "sqlite", "--prefix", "test", "--quiet"); err != nil {
t.Fatalf("bd init --backend sqlite failed: %v\n%s", err, out)
}
jsonl2in := filepath.Join(ws2, ".beads", "issues.jsonl")
data, err := os.ReadFile(jsonl1Tomb)
if err != nil {
t.Fatalf("read dolt export: %v", err)
}
if err := os.WriteFile(jsonl2in, data, 0o600); err != nil {
t.Fatalf("write sqlite issues.jsonl: %v", err)
}
if out, err := runBDExecAllowErrorWithEnv(t, ws2, env, "import", "-i", jsonl2in); err != nil {
t.Fatalf("bd import (sqlite) failed: %v\n%s", err, out)
}
jsonl2out := filepath.Join(ws2, ".beads", "roundtrip.jsonl")
if out, err := runBDExecAllowErrorWithEnv(t, ws2, env, "export", "-o", jsonl2out); err != nil {
t.Fatalf("bd export (sqlite) failed: %v\n%s", err, out)
}
issues2 := readJSONLIssues(t, jsonl2out)
if len(issues2) != 2 {
t.Fatalf("expected 2 issues in sqlite export, got %d", len(issues2))
}
if issues2[idB].Status != types.StatusTombstone {
t.Fatalf("expected %s to be tombstone after import into sqlite, got %q", idB, issues2[idB].Status)
}
ts2, ok := findCommentTimestampByText(issues2[idA], commentText)
if !ok {
t.Fatalf("expected comment on %s in sqlite export", idA)
}
if !ts2.Equal(ts1) {
t.Fatalf("expected comment timestamp preserved across dolt->sqlite, export1=%s export2=%s", ts1.Format(time.RFC3339Nano), ts2.Format(time.RFC3339Nano))
}
}

View File

@@ -510,7 +510,7 @@ func CheckDatabaseIntegrity(path string) DoctorCheck {
func CheckDatabaseJSONLSync(path string) DoctorCheck {
backend, beadsDir := getBackendAndBeadsDir(path)
// Dolt backend: JSONL is a derived compatibility artifact (export-only today).
// Dolt backend: JSONL is an optional compatibility artifact.
// The SQLite-style import/export divergence checks don't apply.
if backend == configfile.BackendDolt {
// Find JSONL file (respects metadata.json override when set).
@@ -545,7 +545,7 @@ func CheckDatabaseJSONLSync(path string) DoctorCheck {
Name: "DB-JSONL Sync",
Status: StatusOK,
Message: "N/A (dolt backend)",
Detail: "JSONL is derived from Dolt (export-only); import-only sync checks do not apply",
Detail: "Dolt sync is database-native; JSONL divergence checks do not apply (manual JSONL import/export is supported).",
}
}

View File

@@ -780,14 +780,20 @@ func TestMergeDriverWithLockedConfig_E2E(t *testing.T) {
dir := setupTestGitRepo(t)
gitDir := filepath.Join(dir, ".git")
gitConfigPath := filepath.Join(dir, ".git", "config")
// Make git config read-only
if err := os.Chmod(gitConfigPath, 0444); err != nil {
// Make both .git directory and config file read-only to truly prevent writes.
// Git may otherwise write via lockfile+rename even if the config file itself is read-only.
if err := os.Chmod(gitConfigPath, 0400); err != nil {
t.Fatalf("failed to make config read-only: %v", err)
}
if err := os.Chmod(gitDir, 0500); err != nil {
t.Fatalf("failed to make .git read-only: %v", err)
}
defer func() {
// Restore permissions for cleanup
_ = os.Chmod(gitDir, 0755)
_ = os.Chmod(gitConfigPath, 0644)
}()

View File

@@ -68,7 +68,7 @@ func CheckSyncDivergence(path string) DoctorCheck {
}
// Check 2: SQLite last_import_time vs JSONL mtime (SQLite only).
// Dolt backend does not maintain SQLite metadata and does not support import-only sync.
// Dolt backend does not maintain SQLite metadata; this SQLite-only check doesn't apply.
if backend == configfile.BackendSQLite {
mtimeIssue := checkSQLiteMtimeDivergence(path, beadsDir)
if mtimeIssue != nil {

View File

@@ -0,0 +1,288 @@
//go:build integration
// +build integration
package main
import (
"bufio"
"encoding/json"
"os"
"path/filepath"
"runtime"
"sort"
"strings"
"testing"
"time"
"github.com/steveyegge/beads/internal/types"
)
func parseCreateID(t *testing.T, out string) string {
t.Helper()
idx := strings.Index(out, "{")
if idx < 0 {
t.Fatalf("expected JSON in output, got:\n%s", out)
}
var m map[string]any
if err := json.Unmarshal([]byte(out[idx:]), &m); err != nil {
t.Fatalf("failed to parse create JSON: %v\n%s", err, out)
}
id, _ := m["id"].(string)
if id == "" {
t.Fatalf("missing id in create output:\n%s", out)
}
return id
}
func readJSONLIssues(t *testing.T, path string) map[string]*types.Issue {
t.Helper()
f, err := os.Open(path) // #nosec G304 -- test-controlled path
if err != nil {
t.Fatalf("open %s: %v", path, err)
}
defer func() { _ = f.Close() }()
scanner := bufio.NewScanner(f)
// allow larger issues
scanner.Buffer(make([]byte, 0, 64*1024), 2*1024*1024)
out := make(map[string]*types.Issue)
for scanner.Scan() {
line := strings.TrimSpace(scanner.Text())
if line == "" {
continue
}
var iss types.Issue
if err := json.Unmarshal([]byte(line), &iss); err != nil {
t.Fatalf("unmarshal JSONL line: %v\nline=%s", err, line)
}
iss.SetDefaults()
copy := iss
out[iss.ID] = &copy
}
if err := scanner.Err(); err != nil {
t.Fatalf("scan %s: %v", path, err)
}
return out
}
func writeJSONLIssues(t *testing.T, path string, issues map[string]*types.Issue) {
t.Helper()
f, err := os.OpenFile(path, os.O_CREATE|os.O_TRUNC|os.O_WRONLY, 0o600) // #nosec G304 -- test-controlled path
if err != nil {
t.Fatalf("open %s for write: %v", path, err)
}
defer func() { _ = f.Close() }()
ids := make([]string, 0, len(issues))
for id := range issues {
ids = append(ids, id)
}
sort.Strings(ids)
w := bufio.NewWriter(f)
for _, id := range ids {
iss := issues[id]
if iss == nil {
continue
}
b, err := json.Marshal(iss)
if err != nil {
t.Fatalf("marshal issue %s: %v", id, err)
}
if _, err := w.Write(append(b, '\n')); err != nil {
t.Fatalf("write issue %s: %v", id, err)
}
}
if err := w.Flush(); err != nil {
t.Fatalf("flush %s: %v", path, err)
}
}
func findCommentTimestamp(iss *types.Issue, author, text string) (time.Time, bool) {
if iss == nil {
return time.Time{}, false
}
for _, c := range iss.Comments {
if c.Author == author && strings.TrimSpace(c.Text) == strings.TrimSpace(text) {
return c.CreatedAt, true
}
}
return time.Time{}, false
}
func findCommentTimestampByText(iss *types.Issue, text string) (time.Time, bool) {
if iss == nil {
return time.Time{}, false
}
for _, c := range iss.Comments {
if strings.TrimSpace(c.Text) == strings.TrimSpace(text) {
return c.CreatedAt, true
}
}
return time.Time{}, false
}
func TestDoltJSONLRoundTrip_DepsLabelsCommentsTombstones(t *testing.T) {
if testing.Short() {
t.Skip("skipping slow integration test in short mode")
}
if runtime.GOOS == windowsOS {
t.Skip("dolt integration test not supported on windows")
}
// Workspace 1: create data and export JSONL.
ws1 := createTempDirWithCleanup(t)
if err := runCommandInDir(ws1, "git", "init"); err != nil {
t.Fatalf("git init failed: %v", err)
}
_ = runCommandInDir(ws1, "git", "config", "user.email", "test@example.com")
_ = runCommandInDir(ws1, "git", "config", "user.name", "Test User")
env := []string{
"BEADS_TEST_MODE=1",
"BEADS_NO_DAEMON=1",
}
initOut, initErr := runBDExecAllowErrorWithEnv(t, ws1, env, "init", "--backend", "dolt", "--prefix", "test", "--quiet")
if initErr != nil {
lower := strings.ToLower(initOut)
if strings.Contains(lower, "dolt") && (strings.Contains(lower, "not supported") || strings.Contains(lower, "not available") || strings.Contains(lower, "unknown")) {
t.Skipf("dolt backend not available: %s", initOut)
}
t.Fatalf("bd init --backend dolt failed: %v\n%s", initErr, initOut)
}
outA, err := runBDExecAllowErrorWithEnv(t, ws1, env, "create", "Issue A", "--json")
if err != nil {
t.Fatalf("bd create A failed: %v\n%s", err, outA)
}
idA := parseCreateID(t, outA)
outB, err := runBDExecAllowErrorWithEnv(t, ws1, env, "create", "Issue B", "--json")
if err != nil {
t.Fatalf("bd create B failed: %v\n%s", err, outB)
}
idB := parseCreateID(t, outB)
// Add label + comment + dependency.
if out, err := runBDExecAllowErrorWithEnv(t, ws1, env, "label", "add", idA, "urgent"); err != nil {
t.Fatalf("bd label add failed: %v\n%s", err, out)
}
commentText := "Hello from JSONL round-trip"
if out, err := runBDExecAllowErrorWithEnv(t, ws1, env, "comments", "add", idA, commentText); err != nil {
t.Fatalf("bd comments add failed: %v\n%s", err, out)
}
if out, err := runBDExecAllowErrorWithEnv(t, ws1, env, "dep", "add", idA, idB); err != nil {
t.Fatalf("bd dep add failed: %v\n%s", err, out)
}
jsonl1 := filepath.Join(ws1, ".beads", "issues.jsonl")
if out, err := runBDExecAllowErrorWithEnv(t, ws1, env, "export", "-o", jsonl1); err != nil {
t.Fatalf("bd export failed: %v\n%s", err, out)
}
issues1 := readJSONLIssues(t, jsonl1)
if len(issues1) != 2 {
t.Fatalf("expected 2 issues in export1, got %d", len(issues1))
}
if issues1[idA] == nil || issues1[idB] == nil {
t.Fatalf("expected exported issues to include %s and %s", idA, idB)
}
// Label present
foundUrgent := false
for _, l := range issues1[idA].Labels {
if l == "urgent" {
foundUrgent = true
break
}
}
if !foundUrgent {
t.Fatalf("expected label 'urgent' on %s in export1", idA)
}
// Dependency present
foundDep := false
for _, d := range issues1[idA].Dependencies {
if d.DependsOnID == idB {
foundDep = true
break
}
}
if !foundDep {
t.Fatalf("expected dependency %s -> %s in export1", idA, idB)
}
// Comment present + capture timestamp
ts1, ok := findCommentTimestampByText(issues1[idA], commentText)
if !ok || ts1.IsZero() {
t.Fatalf("expected comment on %s in export1", idA)
}
// Create a tombstone record in JSONL for issue B (Dolt backend may not support
// creating tombstones via `bd delete`, but it must round-trip tombstones via JSONL).
now := time.Now().UTC()
issues1[idB].Status = types.StatusTombstone
issues1[idB].DeletedAt = &now
issues1[idB].DeletedBy = "test"
issues1[idB].DeleteReason = "test tombstone"
issues1[idB].OriginalType = string(issues1[idB].IssueType)
issues1[idB].SetDefaults()
jsonl1Tomb := filepath.Join(ws1, ".beads", "issues.tomb.jsonl")
writeJSONLIssues(t, jsonl1Tomb, issues1)
issues1Tomb := readJSONLIssues(t, jsonl1Tomb)
if issues1Tomb[idB].Status != types.StatusTombstone {
t.Fatalf("expected %s to be tombstone in tombstone JSONL, got %q", idB, issues1Tomb[idB].Status)
}
// Workspace 2: import JSONL into fresh Dolt DB and re-export.
ws2 := createTempDirWithCleanup(t)
if err := runCommandInDir(ws2, "git", "init"); err != nil {
t.Fatalf("git init failed: %v", err)
}
_ = runCommandInDir(ws2, "git", "config", "user.email", "test@example.com")
_ = runCommandInDir(ws2, "git", "config", "user.name", "Test User")
initOut2, initErr2 := runBDExecAllowErrorWithEnv(t, ws2, env, "init", "--backend", "dolt", "--prefix", "test", "--quiet")
if initErr2 != nil {
lower := strings.ToLower(initOut2)
if strings.Contains(lower, "dolt") && (strings.Contains(lower, "not supported") || strings.Contains(lower, "not available") || strings.Contains(lower, "unknown")) {
t.Skipf("dolt backend not available: %s", initOut2)
}
t.Fatalf("bd init --backend dolt (ws2) failed: %v\n%s", initErr2, initOut2)
}
// Copy JSONL into ws2 beads dir
jsonl2in := filepath.Join(ws2, ".beads", "issues.jsonl")
data, err := os.ReadFile(jsonl1Tomb)
if err != nil {
t.Fatalf("read export1: %v", err)
}
if err := os.WriteFile(jsonl2in, data, 0o600); err != nil {
t.Fatalf("write ws2 issues.jsonl: %v", err)
}
if out, err := runBDExecAllowErrorWithEnv(t, ws2, env, "import", "-i", jsonl2in); err != nil {
t.Fatalf("bd import failed: %v\n%s", err, out)
}
jsonl2out := filepath.Join(ws2, ".beads", "roundtrip.jsonl")
if out, err := runBDExecAllowErrorWithEnv(t, ws2, env, "export", "-o", jsonl2out); err != nil {
t.Fatalf("bd export (ws2) failed: %v\n%s", err, out)
}
issues2 := readJSONLIssues(t, jsonl2out)
if len(issues2) != 2 {
t.Fatalf("expected 2 issues in export2 (including tombstone), got %d", len(issues2))
}
if issues2[idB].Status != types.StatusTombstone {
t.Fatalf("expected %s to be tombstone in export2, got %q", idB, issues2[idB].Status)
}
// Ensure comment timestamp preserved across import/export
ts2, ok := findCommentTimestampByText(issues2[idA], commentText)
if !ok {
t.Fatalf("expected comment on %s in export2", idA)
}
if !ts2.Equal(ts1) {
t.Fatalf("expected comment timestamp preserved, export1=%s export2=%s", ts1.Format(time.RFC3339Nano), ts2.Format(time.RFC3339Nano))
}
}

View File

@@ -13,6 +13,7 @@ import (
"github.com/spf13/cobra"
"github.com/steveyegge/beads/internal/debug"
"github.com/steveyegge/beads/internal/storage/factory"
"github.com/steveyegge/beads/internal/storage/sqlite"
"github.com/steveyegge/beads/internal/types"
"github.com/steveyegge/beads/internal/util"
@@ -181,7 +182,10 @@ Examples:
fmt.Fprintf(os.Stderr, "Error: no database path found\n")
os.Exit(1)
}
store, err = sqlite.NewWithTimeout(rootCtx, dbPath, lockTimeout)
beadsDir := filepath.Dir(dbPath)
store, err = factory.NewFromConfigWithOptions(rootCtx, beadsDir, factory.Options{
LockTimeout: lockTimeout,
})
if err != nil {
fmt.Fprintf(os.Stderr, "Error: failed to open database: %v\n", err)
os.Exit(1)
@@ -396,14 +400,26 @@ Examples:
issue.Dependencies = allDeps[issue.ID]
}
// Populate labels for all issues
// Populate labels and comments for all issues (batch APIs)
ids := make([]string, 0, len(issues))
for _, issue := range issues {
labels, err := store.GetLabels(ctx, issue.ID)
if err != nil {
fmt.Fprintf(os.Stderr, "Error getting labels for %s: %v\n", issue.ID, err)
os.Exit(1)
}
issue.Labels = labels
ids = append(ids, issue.ID)
}
labelsMap, err := store.GetLabelsForIssues(ctx, ids)
if err != nil {
fmt.Fprintf(os.Stderr, "Error getting labels: %v\n", err)
os.Exit(1)
}
commentsMap, err := store.GetCommentsForIssues(ctx, ids)
if err != nil {
fmt.Fprintf(os.Stderr, "Error getting comments: %v\n", err)
os.Exit(1)
}
for _, issue := range issues {
issue.Labels = labelsMap[issue.ID]
issue.Comments = commentsMap[issue.ID]
}
// Open output

View File

@@ -4,7 +4,6 @@ import (
"fmt"
"os"
"strings"
"syscall"
"github.com/spf13/cobra"
"github.com/steveyegge/beads/internal/storage"
@@ -282,10 +281,10 @@ func runFederationStatus(cmd *cobra.Command, args []string) {
// Collect status for each peer
type peerStatus struct {
Status *storage.SyncStatus
URL string
Reachable bool
ReachError string
Status *storage.SyncStatus
URL string
Reachable bool
ReachError string
}
var peerStatuses []peerStatus
@@ -374,7 +373,7 @@ func runFederationAddPeer(cmd *cobra.Command, args []string) {
password := federationPassword
if federationUser != "" && password == "" {
fmt.Fprint(os.Stderr, "Password: ")
pwBytes, err := term.ReadPassword(int(syscall.Stdin))
pwBytes, err := term.ReadPassword(int(os.Stdin.Fd()))
fmt.Fprintln(os.Stderr) // newline after password
if err != nil {
FatalErrorRespectJSON("failed to read password: %v", err)

View File

@@ -638,7 +638,7 @@ func hookPostMergeDolt(beadsDir string) int {
doltStore, ok := store.(interface {
Branch(ctx context.Context, name string) error
Checkout(ctx context.Context, branch string) error
Merge(ctx context.Context, branch string) error
Merge(ctx context.Context, branch string) ([]storage.Conflict, error)
Commit(ctx context.Context, message string) error
CurrentBranch(ctx context.Context) (string, error)
DeleteBranch(ctx context.Context, branch string) error
@@ -691,10 +691,15 @@ func hookPostMergeDolt(beadsDir string) int {
}
// Merge import branch (Dolt provides cell-level merge)
if err := doltStore.Merge(ctx, importBranch); err != nil {
conflicts, err := doltStore.Merge(ctx, importBranch)
if err != nil {
fmt.Fprintf(os.Stderr, "Warning: could not merge import branch: %v\n", err)
return 0
}
if len(conflicts) > 0 {
fmt.Fprintf(os.Stderr, "Warning: %d conflict(s) detected during Dolt merge; resolve with 'bd federation conflicts' or Dolt conflict tooling\n", len(conflicts))
// Best-effort: still return 0 to avoid blocking git merge, consistent with other hook warnings.
}
// Commit the merge
if err := doltStore.Commit(ctx, "Merge JSONL import"); err != nil {
@@ -839,7 +844,6 @@ func hookPostCheckout(args []string) int {
// =============================================================================
// importFromJSONLToStore imports issues from JSONL to a store.
// This is a placeholder - the actual implementation should use the store's methods.
func importFromJSONLToStore(ctx context.Context, store storage.Storage, jsonlPath string) error {
// Parse JSONL into issues
// #nosec G304 - jsonlPath is derived from beadsDir (trusted workspace path)

View File

@@ -16,7 +16,7 @@ import (
"github.com/spf13/cobra"
"github.com/steveyegge/beads/internal/beads"
"github.com/steveyegge/beads/internal/debug"
"github.com/steveyegge/beads/internal/storage/sqlite"
"github.com/steveyegge/beads/internal/storage/factory"
"github.com/steveyegge/beads/internal/types"
"github.com/steveyegge/beads/internal/utils"
"golang.org/x/term"
@@ -74,10 +74,12 @@ NOTE: Import requires direct database access and does not work with daemon mode.
daemonClient = nil
var err error
store, err = sqlite.New(rootCtx, dbPath)
beadsDir := filepath.Dir(dbPath)
store, err = factory.NewFromConfigWithOptions(rootCtx, beadsDir, factory.Options{
LockTimeout: lockTimeout,
})
if err != nil {
// Check for fresh clone scenario
beadsDir := filepath.Dir(dbPath)
if handleFreshCloneError(err, beadsDir) {
os.Exit(1)
}

View File

@@ -89,7 +89,7 @@ The sync mode controls how beads synchronizes data with git and/or Dolt remotes.
|------|-------------|
| `git-portable` | (default) Export JSONL on push, import on pull. Standard git-based workflow. |
| `realtime` | Export JSONL on every database change. Legacy behavior, higher I/O. |
| `dolt-native` | Use Dolt remotes directly. No JSONL needed - Dolt handles sync. |
| `dolt-native` | Use Dolt remotes directly for sync. JSONL is not used for sync (but manual `bd import` / `bd export` still work). |
| `belt-and-suspenders` | Both Dolt remote AND JSONL backup. Maximum redundancy. |
#### Sync Triggers
@@ -143,7 +143,7 @@ federation:
- **git-portable** (default): Best for most teams. JSONL is committed to git, works with any git hosting.
- **realtime**: Use when you need instant JSONL updates (e.g., file watchers, CI triggers on JSONL changes).
- **dolt-native**: Use when you have Dolt infrastructure and want database-level sync without JSONL.
- **dolt-native**: Use when you have Dolt infrastructure and want database-level sync; JSONL remains available for portability/audits/manual workflows.
- **belt-and-suspenders**: Use for critical data where you want both Dolt sync AND git-portable backup.
### Example Config File

View File

@@ -0,0 +1,114 @@
//go:build !integration
// +build !integration
package importer
import (
"context"
"testing"
"time"
"github.com/steveyegge/beads/internal/storage/memory"
"github.com/steveyegge/beads/internal/types"
)
func TestImportIssues_BackendAgnostic_DepsLabelsCommentsTombstone(t *testing.T) {
ctx := context.Background()
store := memory.New("")
if err := store.SetConfig(ctx, "issue_prefix", "test"); err != nil {
t.Fatalf("set issue_prefix: %v", err)
}
commentTS := time.Date(2020, 1, 2, 3, 4, 5, 6, time.UTC)
deletedTS := time.Date(2021, 2, 3, 4, 5, 6, 7, time.UTC)
issueA := &types.Issue{
ID: "test-1",
Title: "Issue A",
IssueType: types.TypeTask,
Status: types.StatusOpen,
Priority: 2,
Labels: []string{"urgent"},
Dependencies: []*types.Dependency{
{IssueID: "test-1", DependsOnID: "test-2", Type: types.DepBlocks},
},
Comments: []*types.Comment{
{Author: "tester", Text: "hello", CreatedAt: commentTS},
},
}
issueB := &types.Issue{
ID: "test-2",
Title: "Issue B",
IssueType: types.TypeTask,
Status: types.StatusTombstone,
Priority: 4,
DeletedAt: &deletedTS,
DeletedBy: "tester",
DeleteReason: "bye",
OriginalType: string(types.TypeTask),
Description: "tombstone",
ContentHash: "",
Dependencies: nil,
Labels: nil,
Comments: nil,
Assignee: "",
Owner: "",
CreatedBy: "",
SourceSystem: "",
ExternalRef: nil,
ClosedAt: nil,
CompactedAt: nil,
DeferUntil: nil,
LastActivity: nil,
QualityScore: nil,
Validations: nil,
BondedFrom: nil,
Waiters: nil,
}
res, err := ImportIssues(ctx, "", store, []*types.Issue{issueA, issueB}, Options{OrphanHandling: OrphanAllow})
if err != nil {
t.Fatalf("ImportIssues: %v", err)
}
if res.Created != 2 {
t.Fatalf("expected Created=2, got %d", res.Created)
}
labels, err := store.GetLabels(ctx, "test-1")
if err != nil {
t.Fatalf("GetLabels: %v", err)
}
if len(labels) != 1 || labels[0] != "urgent" {
t.Fatalf("expected labels [urgent], got %v", labels)
}
deps, err := store.GetDependencyRecords(ctx, "test-1")
if err != nil {
t.Fatalf("GetDependencyRecords: %v", err)
}
if len(deps) != 1 || deps[0].DependsOnID != "test-2" || deps[0].Type != types.DepBlocks {
t.Fatalf("expected dependency test-1 blocks test-2, got %#v", deps)
}
comments, err := store.GetIssueComments(ctx, "test-1")
if err != nil {
t.Fatalf("GetIssueComments: %v", err)
}
if len(comments) != 1 {
t.Fatalf("expected 1 comment, got %d", len(comments))
}
if !comments[0].CreatedAt.Equal(commentTS) {
t.Fatalf("expected comment timestamp preserved (%s), got %s", commentTS.Format(time.RFC3339Nano), comments[0].CreatedAt.Format(time.RFC3339Nano))
}
b, err := store.GetIssue(ctx, "test-2")
if err != nil {
t.Fatalf("GetIssue: %v", err)
}
if b.Status != types.StatusTombstone {
t.Fatalf("expected tombstone status, got %q", b.Status)
}
if b.DeletedAt == nil || !b.DeletedAt.Equal(deletedTS) {
t.Fatalf("expected DeletedAt preserved (%s), got %#v", deletedTS.Format(time.RFC3339Nano), b.DeletedAt)
}
}

File diff suppressed because it is too large Load Diff

View File

@@ -153,10 +153,10 @@ func TestFieldComparator_StringConversion(t *testing.T) {
fc := newFieldComparator()
tests := []struct {
name string
value interface{}
wantStr string
wantOk bool
name string
value interface{}
wantStr string
wantOk bool
}{
{"string", "hello", "hello", true},
{"string pointer", stringPtr("world"), "world", true},
@@ -436,67 +436,67 @@ func TestRenameImportedIssuePrefixes(t *testing.T) {
func TestReplaceBoundaryAware(t *testing.T) {
tests := []struct {
name string
text string
oldID string
newID string
want string
name string
text string
oldID string
newID string
want string
}{
{
name: "simple replacement",
text: "See old-1 for details",
oldID: "old-1",
newID: "new-1",
want: "See new-1 for details",
name: "simple replacement",
text: "See old-1 for details",
oldID: "old-1",
newID: "new-1",
want: "See new-1 for details",
},
{
name: "multiple occurrences",
text: "old-1 and old-1 again",
oldID: "old-1",
newID: "new-1",
want: "new-1 and new-1 again",
name: "multiple occurrences",
text: "old-1 and old-1 again",
oldID: "old-1",
newID: "new-1",
want: "new-1 and new-1 again",
},
{
name: "no match substring prefix",
text: "old-10 should not match",
oldID: "old-1",
newID: "new-1",
want: "old-10 should not match",
name: "no match substring prefix",
text: "old-10 should not match",
oldID: "old-1",
newID: "new-1",
want: "old-10 should not match",
},
{
name: "match at end of longer ID",
text: "should not match old-1 at end",
oldID: "old-1",
newID: "new-1",
want: "should not match new-1 at end",
name: "match at end of longer ID",
text: "should not match old-1 at end",
oldID: "old-1",
newID: "new-1",
want: "should not match new-1 at end",
},
{
name: "boundary at start",
text: "old-1 starts here",
oldID: "old-1",
newID: "new-1",
want: "new-1 starts here",
name: "boundary at start",
text: "old-1 starts here",
oldID: "old-1",
newID: "new-1",
want: "new-1 starts here",
},
{
name: "boundary at end",
text: "ends with old-1",
oldID: "old-1",
newID: "new-1",
want: "ends with new-1",
name: "boundary at end",
text: "ends with old-1",
oldID: "old-1",
newID: "new-1",
want: "ends with new-1",
},
{
name: "boundary punctuation",
text: "See (old-1) and [old-1] or {old-1}",
oldID: "old-1",
newID: "new-1",
want: "See (new-1) and [new-1] or {new-1}",
name: "boundary punctuation",
text: "See (old-1) and [old-1] or {old-1}",
oldID: "old-1",
newID: "new-1",
want: "See (new-1) and [new-1] or {new-1}",
},
{
name: "no occurrence",
text: "No match here",
oldID: "old-1",
newID: "new-1",
want: "No match here",
name: "no occurrence",
text: "No match here",
oldID: "old-1",
newID: "new-1",
want: "No match here",
},
}
@@ -547,9 +547,9 @@ func TestIsValidIDSuffix(t *testing.T) {
{"abc.1.2.3", true},
{"1.5", true},
// Invalid suffixes
{"", false}, // Empty string
{"A3F8", false}, // Uppercase not allowed
{"@#$!", false}, // Special characters not allowed
{"", false}, // Empty string
{"A3F8", false}, // Uppercase not allowed
{"@#$!", false}, // Special characters not allowed
{"abc-def", false}, // Hyphens not allowed in suffix
}
@@ -569,7 +569,7 @@ func stringPtr(s string) *string {
func TestImportIssues_Basic(t *testing.T) {
ctx := context.Background()
// Create temp database
tmpDB := t.TempDir() + "/test.db"
store, err := sqlite.New(context.Background(), tmpDB)
@@ -577,12 +577,12 @@ func TestImportIssues_Basic(t *testing.T) {
t.Fatalf("Failed to create store: %v", err)
}
defer store.Close()
// Set config prefix
if err := store.SetConfig(ctx, "issue_prefix", "test"); err != nil {
t.Fatalf("Failed to set prefix: %v", err)
}
// Import single issue
issues := []*types.Issue{
{
@@ -594,16 +594,16 @@ func TestImportIssues_Basic(t *testing.T) {
IssueType: types.TypeTask,
},
}
result, err := ImportIssues(ctx, tmpDB, store, issues, Options{})
if err != nil {
t.Fatalf("Import failed: %v", err)
}
if result.Created != 1 {
t.Errorf("Expected 1 created, got %d", result.Created)
}
// Verify issue was created
retrieved, err := store.GetIssue(ctx, "test-abc123")
if err != nil {
@@ -616,18 +616,18 @@ func TestImportIssues_Basic(t *testing.T) {
func TestImportIssues_Update(t *testing.T) {
ctx := context.Background()
tmpDB := t.TempDir() + "/test.db"
store, err := sqlite.New(context.Background(), tmpDB)
if err != nil {
t.Fatalf("Failed to create store: %v", err)
}
defer store.Close()
if err := store.SetConfig(ctx, "issue_prefix", "test"); err != nil {
t.Fatalf("Failed to set prefix: %v", err)
}
// Create initial issue
issue1 := &types.Issue{
ID: "test-abc123",
@@ -638,12 +638,12 @@ func TestImportIssues_Update(t *testing.T) {
IssueType: types.TypeTask,
}
issue1.ContentHash = issue1.ComputeContentHash()
err = store.CreateIssue(ctx, issue1, "test")
if err != nil {
t.Fatalf("Failed to create initial issue: %v", err)
}
// Import updated version with newer timestamp
issue2 := &types.Issue{
ID: "test-abc123",
@@ -656,18 +656,18 @@ func TestImportIssues_Update(t *testing.T) {
UpdatedAt: time.Now().Add(time.Hour), // Newer than issue1
}
issue2.ContentHash = issue2.ComputeContentHash()
result, err := ImportIssues(ctx, tmpDB, store, []*types.Issue{issue2}, Options{})
if err != nil {
t.Fatalf("Import failed: %v", err)
}
// The importer detects this as both a collision (1) and then upserts it (creates=1)
// Total updates = collision count + actual upserts
if result.Updated == 0 && result.Created == 0 {
t.Error("Expected some updates or creates")
}
// Verify update
retrieved, err := store.GetIssue(ctx, "test-abc123")
if err != nil {
@@ -680,18 +680,18 @@ func TestImportIssues_Update(t *testing.T) {
func TestImportIssues_DryRun(t *testing.T) {
ctx := context.Background()
tmpDB := t.TempDir() + "/test.db"
store, err := sqlite.New(context.Background(), tmpDB)
if err != nil {
t.Fatalf("Failed to create store: %v", err)
}
defer store.Close()
if err := store.SetConfig(ctx, "issue_prefix", "test"); err != nil {
t.Fatalf("Failed to set prefix: %v", err)
}
issues := []*types.Issue{
{
ID: "test-abc123",
@@ -701,13 +701,13 @@ func TestImportIssues_DryRun(t *testing.T) {
IssueType: types.TypeTask,
},
}
// Dry run returns early when no collisions, so it reports what would be created
result, err := ImportIssues(ctx, tmpDB, store, issues, Options{DryRun: true})
if err != nil {
t.Fatalf("Import failed: %v", err)
}
// Should report that 1 issue would be created
if result.Created != 1 {
t.Errorf("Expected 1 would be created in dry run, got %d", result.Created)
@@ -716,18 +716,18 @@ func TestImportIssues_DryRun(t *testing.T) {
func TestImportIssues_Dependencies(t *testing.T) {
ctx := context.Background()
tmpDB := t.TempDir() + "/test.db"
store, err := sqlite.New(context.Background(), tmpDB)
if err != nil {
t.Fatalf("Failed to create store: %v", err)
}
defer store.Close()
if err := store.SetConfig(ctx, "issue_prefix", "test"); err != nil {
t.Fatalf("Failed to set prefix: %v", err)
}
issues := []*types.Issue{
{
ID: "test-abc123",
@@ -747,16 +747,16 @@ func TestImportIssues_Dependencies(t *testing.T) {
IssueType: types.TypeTask,
},
}
result, err := ImportIssues(ctx, tmpDB, store, issues, Options{})
if err != nil {
t.Fatalf("Import failed: %v", err)
}
if result.Created != 2 {
t.Errorf("Expected 2 created, got %d", result.Created)
}
// Verify dependency was created
deps, err := store.GetDependencies(ctx, "test-abc123")
if err != nil {
@@ -769,18 +769,18 @@ func TestImportIssues_Dependencies(t *testing.T) {
func TestImportIssues_Labels(t *testing.T) {
ctx := context.Background()
tmpDB := t.TempDir() + "/test.db"
store, err := sqlite.New(context.Background(), tmpDB)
if err != nil {
t.Fatalf("Failed to create store: %v", err)
}
defer store.Close()
if err := store.SetConfig(ctx, "issue_prefix", "test"); err != nil {
t.Fatalf("Failed to set prefix: %v", err)
}
issues := []*types.Issue{
{
ID: "test-abc123",
@@ -791,16 +791,16 @@ func TestImportIssues_Labels(t *testing.T) {
Labels: []string{"bug", "critical"},
},
}
result, err := ImportIssues(ctx, tmpDB, store, issues, Options{})
if err != nil {
t.Fatalf("Import failed: %v", err)
}
if result.Created != 1 {
t.Errorf("Expected 1 created, got %d", result.Created)
}
// Verify labels were created
retrieved, err := store.GetIssue(ctx, "test-abc123")
if err != nil {
@@ -811,61 +811,13 @@ func TestImportIssues_Labels(t *testing.T) {
}
}
func TestGetOrCreateStore_ExistingStore(t *testing.T) {
ctx := context.Background()
tmpDB := t.TempDir() + "/test.db"
store, err := sqlite.New(context.Background(), tmpDB)
if err != nil {
t.Fatalf("Failed to create store: %v", err)
}
defer store.Close()
result, needClose, err := getOrCreateStore(ctx, tmpDB, store)
if err != nil {
t.Fatalf("Expected no error, got: %v", err)
}
if needClose {
t.Error("Expected needClose=false for existing store")
}
if result != store {
t.Error("Expected same store instance")
}
}
// NOTE: getOrCreateStore was removed; importer now requires an initialized store.
func TestGetOrCreateStore_NewStore(t *testing.T) {
func TestImportIssues_RequiresStore(t *testing.T) {
ctx := context.Background()
tmpDB := t.TempDir() + "/test.db"
// Create initial database
initStore, err := sqlite.New(context.Background(), tmpDB)
if err != nil {
t.Fatalf("Failed to create store: %v", err)
}
initStore.Close()
// Test creating new connection
result, needClose, err := getOrCreateStore(ctx, tmpDB, nil)
if err != nil {
t.Fatalf("Expected no error, got: %v", err)
}
defer result.Close()
if !needClose {
t.Error("Expected needClose=true for new store")
}
if result == nil {
t.Error("Expected non-nil store")
}
}
func TestGetOrCreateStore_EmptyPath(t *testing.T) {
ctx := context.Background()
_, _, err := getOrCreateStore(ctx, "", nil)
_, err := ImportIssues(ctx, "", nil, []*types.Issue{}, Options{})
if err == nil {
t.Error("Expected error for empty database path")
t.Fatal("expected error when store is nil")
}
}
@@ -891,7 +843,7 @@ func TestGetPrefixList(t *testing.T) {
want: []string{},
},
}
for _, tt := range tests {
t.Run(tt.name, func(t *testing.T) {
got := GetPrefixList(tt.prefixes)
@@ -1010,7 +962,7 @@ func TestValidateNoDuplicateExternalRefs(t *testing.T) {
func TestConcurrentExternalRefImports(t *testing.T) {
t.Skip("TODO(bd-gpe7): Test hangs due to database deadlock - needs investigation")
t.Run("sequential imports with same external_ref are detected as updates", func(t *testing.T) {
store, err := sqlite.New(context.Background(), ":memory:")
if err != nil {
@@ -1024,7 +976,7 @@ func TestConcurrentExternalRefImports(t *testing.T) {
}
externalRef := "JIRA-100"
issue1 := &types.Issue{
ID: "bd-1",
Title: "First import",
@@ -1182,7 +1134,7 @@ func TestImportOrphanSkip_CountMismatch(t *testing.T) {
UpdatedAt: now,
},
{
ID: "test-orphan.1", // Child of non-existent parent
ID: "test-orphan.1", // Child of non-existent parent
Title: "Orphaned Child",
Status: types.StatusOpen,
Priority: 2,
@@ -1203,7 +1155,7 @@ func TestImportOrphanSkip_CountMismatch(t *testing.T) {
// Import with OrphanSkip mode - parent doesn't exist
result, err := ImportIssues(ctx, "", store, issues, Options{
OrphanHandling: sqlite.OrphanSkip,
OrphanHandling: OrphanSkip,
SkipPrefixValidation: true, // Allow explicit IDs during import
})
if err != nil {

View File

@@ -263,14 +263,17 @@ func (s *DoltStore) UpdatePeerLastSync(ctx context.Context, name string) error {
// The caller must hold federationEnvMutex.
func setFederationCredentials(username, password string) func() {
if username != "" {
os.Setenv("DOLT_REMOTE_USER", username)
// Best-effort: failures here should not crash the caller.
_ = os.Setenv("DOLT_REMOTE_USER", username)
}
if password != "" {
os.Setenv("DOLT_REMOTE_PASSWORD", password)
// Best-effort: failures here should not crash the caller.
_ = os.Setenv("DOLT_REMOTE_PASSWORD", password)
}
return func() {
os.Unsetenv("DOLT_REMOTE_USER")
os.Unsetenv("DOLT_REMOTE_PASSWORD")
// Best-effort cleanup.
_ = os.Unsetenv("DOLT_REMOTE_USER")
_ = os.Unsetenv("DOLT_REMOTE_PASSWORD")
}
}

View File

@@ -65,10 +65,26 @@ func (s *DoltStore) GetEvents(ctx context.Context, issueID string, limit int) ([
// AddIssueComment adds a comment to an issue (structured comment)
func (s *DoltStore) AddIssueComment(ctx context.Context, issueID, author, text string) (*types.Comment, error) {
return s.ImportIssueComment(ctx, issueID, author, text, time.Now().UTC())
}
// ImportIssueComment adds a comment during import, preserving the original timestamp.
// This prevents comment timestamp drift across JSONL sync cycles.
func (s *DoltStore) ImportIssueComment(ctx context.Context, issueID, author, text string, createdAt time.Time) (*types.Comment, error) {
// Verify issue exists
var exists bool
if err := s.db.QueryRowContext(ctx, `SELECT EXISTS(SELECT 1 FROM issues WHERE id = ?)`, issueID).Scan(&exists); err != nil {
return nil, fmt.Errorf("failed to check issue existence: %w", err)
}
if !exists {
return nil, fmt.Errorf("issue %s not found", issueID)
}
createdAt = createdAt.UTC()
result, err := s.db.ExecContext(ctx, `
INSERT INTO comments (issue_id, author, text, created_at)
VALUES (?, ?, ?, ?)
`, issueID, author, text, time.Now().UTC())
`, issueID, author, text, createdAt)
if err != nil {
return nil, fmt.Errorf("failed to add comment: %w", err)
}
@@ -78,12 +94,21 @@ func (s *DoltStore) AddIssueComment(ctx context.Context, issueID, author, text s
return nil, fmt.Errorf("failed to get comment id: %w", err)
}
// Mark issue dirty for incremental JSONL export
if _, err := s.db.ExecContext(ctx, `
INSERT INTO dirty_issues (issue_id, marked_at)
VALUES (?, ?)
ON DUPLICATE KEY UPDATE marked_at = VALUES(marked_at)
`, issueID, time.Now().UTC()); err != nil {
return nil, fmt.Errorf("failed to mark issue dirty: %w", err)
}
return &types.Comment{
ID: id,
IssueID: issueID,
Author: author,
Text: text,
CreatedAt: time.Now().UTC(),
CreatedAt: createdAt,
}, nil
}

View File

@@ -0,0 +1,15 @@
//go:build !windows
// +build !windows
package dolt
import (
"os/exec"
"syscall"
)
func setDoltServerSysProcAttr(cmd *exec.Cmd) {
cmd.SysProcAttr = &syscall.SysProcAttr{
Setpgid: true,
}
}

View File

@@ -0,0 +1,12 @@
//go:build windows
// +build windows
package dolt
import "os/exec"
// Windows does not support Setpgid; leave default process attributes.
func setDoltServerSysProcAttr(cmd *exec.Cmd) {
// no-op
_ = cmd
}

View File

@@ -0,0 +1,32 @@
//go:build !windows
// +build !windows
package dolt
import (
"os"
"strings"
"syscall"
)
func processMayBeAlive(p *os.Process) bool {
// Signal 0 checks for existence without sending a real signal.
if err := p.Signal(syscall.Signal(0)); err != nil {
return false
}
return true
}
func terminateProcess(p *os.Process) error {
if p == nil {
return nil
}
if err := p.Signal(syscall.SIGTERM); err != nil {
// Process may already be dead; treat as success.
if strings.Contains(err.Error(), "process already finished") {
return nil
}
return err
}
return nil
}

View File

@@ -0,0 +1,21 @@
//go:build windows
// +build windows
package dolt
import "os"
func processMayBeAlive(p *os.Process) bool {
// Windows doesn't support Unix-style signal(0) checks. Treat as "unknown/alive"
// and let connection attempts / wait timeouts determine readiness.
_ = p
return true
}
func terminateProcess(p *os.Process) error {
if p == nil {
return nil
}
// Best-effort: Windows doesn't have SIGTERM semantics; kill the process.
return p.Kill()
}

View File

@@ -15,7 +15,6 @@ import (
"strconv"
"strings"
"sync"
"syscall"
"time"
)
@@ -103,13 +102,12 @@ func (s *Server) Start(ctx context.Context) error {
}
// Create command
// #nosec G204 -- dolt binary is fixed; args are derived from internal config.
s.cmd = exec.CommandContext(ctx, "dolt", args...)
s.cmd.Dir = s.cfg.DataDir
// Set up process group for clean shutdown
s.cmd.SysProcAttr = &syscall.SysProcAttr{
Setpgid: true,
}
// Set up process group for clean shutdown (Unix-only; no-op on Windows).
setDoltServerSysProcAttr(s.cmd)
// Set up logging
if s.cfg.LogFile != "" {
@@ -162,13 +160,8 @@ func (s *Server) Stop() error {
return nil
}
// Try graceful shutdown first (SIGTERM)
if err := s.cmd.Process.Signal(syscall.SIGTERM); err != nil {
// Process may already be dead
if !strings.Contains(err.Error(), "process already finished") {
return fmt.Errorf("failed to send SIGTERM: %w", err)
}
}
// Best-effort graceful shutdown (platform-specific).
_ = terminateProcess(s.cmd.Process)
// Wait for graceful shutdown with timeout
done := make(chan error, 1)
@@ -249,11 +242,9 @@ func (s *Server) waitForReady(ctx context.Context) error {
default:
}
// Check if process is still alive using signal 0
if s.cmd.Process != nil {
if err := s.cmd.Process.Signal(syscall.Signal(0)); err != nil {
return fmt.Errorf("server process exited unexpectedly")
}
// Best-effort: if we can tell the process is dead, fail fast.
if s.cmd.Process != nil && !processMayBeAlive(s.cmd.Process) {
return fmt.Errorf("server process exited unexpectedly")
}
// Try to connect
@@ -272,6 +263,7 @@ func (s *Server) waitForReady(ctx context.Context) error {
// GetRunningServerPID returns the PID of a running server from the PID file, or 0 if not running
func GetRunningServerPID(dataDir string) int {
pidFile := filepath.Join(dataDir, "dolt-server.pid")
// #nosec G304 -- pidFile is derived from internal dataDir.
data, err := os.ReadFile(pidFile)
if err != nil {
return 0
@@ -288,9 +280,8 @@ func GetRunningServerPID(dataDir string) int {
return 0
}
// On Unix, FindProcess always succeeds, so we need to check if it's alive
if err := process.Signal(syscall.Signal(0)); err != nil {
// Process is not running
// Best-effort liveness check (platform-specific).
if !processMayBeAlive(process) {
_ = os.Remove(pidFile)
return 0
}
@@ -305,13 +296,8 @@ func StopServerByPID(pid int) error {
return err
}
// Try graceful shutdown first
if err := process.Signal(syscall.SIGTERM); err != nil {
if !strings.Contains(err.Error(), "process already finished") {
return err
}
return nil
}
// Best-effort graceful shutdown (platform-specific).
_ = terminateProcess(process)
// Wait for graceful shutdown
done := make(chan struct{})

View File

@@ -17,6 +17,12 @@ type doltTransaction struct {
store *DoltStore
}
// CreateIssueImport is the import-friendly issue creation hook.
// Dolt does not enforce prefix validation at the storage layer, so this delegates to CreateIssue.
func (t *doltTransaction) CreateIssueImport(ctx context.Context, issue *types.Issue, actor string, skipPrefixValidation bool) error {
return t.CreateIssue(ctx, issue, actor)
}
// RunInTransaction executes a function within a database transaction
func (s *DoltStore) RunInTransaction(ctx context.Context, fn func(tx storage.Transaction) error) error {
sqlTx, err := s.db.BeginTx(ctx, nil)
@@ -169,6 +175,36 @@ func (t *doltTransaction) AddDependency(ctx context.Context, dep *types.Dependen
return err
}
func (t *doltTransaction) GetDependencyRecords(ctx context.Context, issueID string) ([]*types.Dependency, error) {
rows, err := t.tx.QueryContext(ctx, `
SELECT issue_id, depends_on_id, type, created_at, created_by, metadata, thread_id
FROM dependencies
WHERE issue_id = ?
`, issueID)
if err != nil {
return nil, err
}
defer rows.Close()
var deps []*types.Dependency
for rows.Next() {
var d types.Dependency
var metadata sql.NullString
var threadID sql.NullString
if err := rows.Scan(&d.IssueID, &d.DependsOnID, &d.Type, &d.CreatedAt, &d.CreatedBy, &metadata, &threadID); err != nil {
return nil, err
}
if metadata.Valid {
d.Metadata = metadata.String
}
if threadID.Valid {
d.ThreadID = threadID.String
}
deps = append(deps, &d)
}
return deps, rows.Err()
}
// RemoveDependency removes a dependency within the transaction
func (t *doltTransaction) RemoveDependency(ctx context.Context, issueID, dependsOnID string, actor string) error {
_, err := t.tx.ExecContext(ctx, `
@@ -185,6 +221,23 @@ func (t *doltTransaction) AddLabel(ctx context.Context, issueID, label, actor st
return err
}
func (t *doltTransaction) GetLabels(ctx context.Context, issueID string) ([]string, error) {
rows, err := t.tx.QueryContext(ctx, `SELECT label FROM labels WHERE issue_id = ? ORDER BY label`, issueID)
if err != nil {
return nil, err
}
defer rows.Close()
var labels []string
for rows.Next() {
var l string
if err := rows.Scan(&l); err != nil {
return nil, err
}
labels = append(labels, l)
}
return labels, rows.Err()
}
// RemoveLabel removes a label within the transaction
func (t *doltTransaction) RemoveLabel(ctx context.Context, issueID, label, actor string) error {
_, err := t.tx.ExecContext(ctx, `
@@ -231,6 +284,63 @@ func (t *doltTransaction) GetMetadata(ctx context.Context, key string) (string,
return value, err
}
func (t *doltTransaction) ImportIssueComment(ctx context.Context, issueID, author, text string, createdAt time.Time) (*types.Comment, error) {
// Verify issue exists in tx
iss, err := t.GetIssue(ctx, issueID)
if err != nil {
return nil, err
}
if iss == nil {
return nil, fmt.Errorf("issue %s not found", issueID)
}
createdAt = createdAt.UTC()
res, err := t.tx.ExecContext(ctx, `
INSERT INTO comments (issue_id, author, text, created_at)
VALUES (?, ?, ?, ?)
`, issueID, author, text, createdAt)
if err != nil {
return nil, fmt.Errorf("failed to add comment: %w", err)
}
id, err := res.LastInsertId()
if err != nil {
return nil, fmt.Errorf("failed to get comment id: %w", err)
}
// mark dirty in tx
if _, err := t.tx.ExecContext(ctx, `
INSERT INTO dirty_issues (issue_id, marked_at)
VALUES (?, ?)
ON DUPLICATE KEY UPDATE marked_at = VALUES(marked_at)
`, issueID, time.Now().UTC()); err != nil {
return nil, fmt.Errorf("failed to mark issue dirty: %w", err)
}
return &types.Comment{ID: id, IssueID: issueID, Author: author, Text: text, CreatedAt: createdAt}, nil
}
func (t *doltTransaction) GetIssueComments(ctx context.Context, issueID string) ([]*types.Comment, error) {
rows, err := t.tx.QueryContext(ctx, `
SELECT id, issue_id, author, text, created_at
FROM comments
WHERE issue_id = ?
ORDER BY created_at ASC
`, issueID)
if err != nil {
return nil, err
}
defer rows.Close()
var comments []*types.Comment
for rows.Next() {
var c types.Comment
if err := rows.Scan(&c.ID, &c.IssueID, &c.Author, &c.Text, &c.CreatedAt); err != nil {
return nil, err
}
comments = append(comments, &c)
}
return comments, rows.Err()
}
// AddComment adds a comment within the transaction
func (t *doltTransaction) AddComment(ctx context.Context, issueID, actor, comment string) error {
_, err := t.tx.ExecContext(ctx, `

View File

@@ -1458,6 +1458,24 @@ func (m *MemoryStorage) AddIssueComment(ctx context.Context, issueID, author, te
return comment, nil
}
func (m *MemoryStorage) ImportIssueComment(ctx context.Context, issueID, author, text string, createdAt time.Time) (*types.Comment, error) {
m.mu.Lock()
defer m.mu.Unlock()
comment := &types.Comment{
ID: int64(len(m.comments[issueID]) + 1),
IssueID: issueID,
Author: author,
Text: text,
CreatedAt: createdAt,
}
m.comments[issueID] = append(m.comments[issueID], comment)
m.dirty[issueID] = true
return comment, nil
}
func (m *MemoryStorage) GetIssueComments(ctx context.Context, issueID string) ([]*types.Comment, error) {
m.mu.RLock()
defer m.mu.RUnlock()

View File

@@ -3,6 +3,7 @@ package sqlite
import (
"context"
"fmt"
"time"
"github.com/steveyegge/beads/internal/types"
)
@@ -56,7 +57,7 @@ func (s *SQLiteStorage) AddIssueComment(ctx context.Context, issueID, author, te
// Unlike AddIssueComment which uses CURRENT_TIMESTAMP, this method uses the provided
// createdAt time from the JSONL file. This prevents timestamp drift during sync cycles.
// GH#735: Comment created_at timestamps were being overwritten with current time during import.
func (s *SQLiteStorage) ImportIssueComment(ctx context.Context, issueID, author, text string, createdAt string) (*types.Comment, error) {
func (s *SQLiteStorage) ImportIssueComment(ctx context.Context, issueID, author, text string, createdAt time.Time) (*types.Comment, error) {
// Verify issue exists
var exists bool
err := s.db.QueryRowContext(ctx, `SELECT EXISTS(SELECT 1 FROM issues WHERE id = ?)`, issueID).Scan(&exists)
@@ -68,10 +69,11 @@ func (s *SQLiteStorage) ImportIssueComment(ctx context.Context, issueID, author,
}
// Insert comment with provided timestamp
createdAtStr := createdAt.UTC().Format(time.RFC3339Nano)
result, err := s.db.ExecContext(ctx, `
INSERT INTO comments (issue_id, author, text, created_at)
VALUES (?, ?, ?, ?)
`, issueID, author, text, createdAt)
`, issueID, author, text, createdAtStr)
if err != nil {
return nil, fmt.Errorf("failed to insert comment: %w", err)
}

View File

@@ -0,0 +1,115 @@
package sqlite
import (
"context"
"database/sql"
"fmt"
"time"
"github.com/steveyegge/beads/internal/types"
)
// CreateIssueImport creates an issue inside an existing sqlite transaction, optionally skipping
// prefix validation. This is used by JSONL import to support multi-repo mode (GH#686).
func (t *sqliteTxStorage) CreateIssueImport(ctx context.Context, issue *types.Issue, actor string, skipPrefixValidation bool) error {
// Fetch custom statuses and types for validation
customStatuses, err := t.GetCustomStatuses(ctx)
if err != nil {
return fmt.Errorf("failed to get custom statuses: %w", err)
}
customTypes, err := t.GetCustomTypes(ctx)
if err != nil {
return fmt.Errorf("failed to get custom types: %w", err)
}
// Set timestamps
now := time.Now()
if issue.CreatedAt.IsZero() {
issue.CreatedAt = now
}
if issue.UpdatedAt.IsZero() {
issue.UpdatedAt = now
}
// Defensive fix for closed_at invariant
if issue.Status == types.StatusClosed && issue.ClosedAt == nil {
maxTime := issue.CreatedAt
if issue.UpdatedAt.After(maxTime) {
maxTime = issue.UpdatedAt
}
closedAt := maxTime.Add(time.Second)
issue.ClosedAt = &closedAt
}
// Defensive fix for tombstone invariant
if issue.Status == types.StatusTombstone && issue.DeletedAt == nil {
maxTime := issue.CreatedAt
if issue.UpdatedAt.After(maxTime) {
maxTime = issue.UpdatedAt
}
deletedAt := maxTime.Add(time.Second)
issue.DeletedAt = &deletedAt
}
// Validate issue before creating
if err := issue.ValidateWithCustom(customStatuses, customTypes); err != nil {
return fmt.Errorf("validation failed: %w", err)
}
// Compute content hash
if issue.ContentHash == "" {
issue.ContentHash = issue.ComputeContentHash()
}
// Get configured prefix for validation and ID generation behavior
var configPrefix string
err = t.conn.QueryRowContext(ctx, `SELECT value FROM config WHERE key = ?`, "issue_prefix").Scan(&configPrefix)
if err == sql.ErrNoRows || configPrefix == "" {
return fmt.Errorf("database not initialized: issue_prefix config is missing (run 'bd init --prefix <prefix>' first)")
} else if err != nil {
return fmt.Errorf("failed to get config: %w", err)
}
prefix := configPrefix
if issue.IDPrefix != "" {
prefix = configPrefix + "-" + issue.IDPrefix
}
if issue.ID == "" {
// Import path expects IDs, but be defensive and generate if missing.
generatedID, err := GenerateIssueID(ctx, t.conn, prefix, issue, actor)
if err != nil {
return fmt.Errorf("failed to generate issue ID: %w", err)
}
issue.ID = generatedID
} else if !skipPrefixValidation {
if err := ValidateIssueIDPrefix(issue.ID, prefix); err != nil {
return fmt.Errorf("failed to validate issue ID prefix: %w", err)
}
}
// Ensure parent exists for hierarchical IDs (importer should have ensured / resurrected).
if isHierarchical, parentID := IsHierarchicalID(issue.ID); isHierarchical {
var parentCount int
if err := t.conn.QueryRowContext(ctx, `SELECT COUNT(*) FROM issues WHERE id = ?`, parentID).Scan(&parentCount); err != nil {
return fmt.Errorf("failed to check parent existence: %w", err)
}
if parentCount == 0 {
return fmt.Errorf("parent issue %s does not exist", parentID)
}
}
// Insert issue (strict)
if err := insertIssueStrict(ctx, t.conn, issue); err != nil {
return fmt.Errorf("failed to insert issue: %w", err)
}
// Record event
if err := recordCreatedEvent(ctx, t.conn, issue, actor); err != nil {
return fmt.Errorf("failed to record creation event: %w", err)
}
// Mark dirty
if err := markDirty(ctx, t.conn, issue.ID); err != nil {
return fmt.Errorf("failed to mark issue dirty: %w", err)
}
return nil
}

View File

@@ -824,6 +824,37 @@ func (t *sqliteTxStorage) AddDependency(ctx context.Context, dep *types.Dependen
return nil
}
// GetDependencyRecords retrieves dependency records for an issue within the transaction.
func (t *sqliteTxStorage) GetDependencyRecords(ctx context.Context, issueID string) ([]*types.Dependency, error) {
rows, err := t.conn.QueryContext(ctx, `
SELECT issue_id, depends_on_id, type, created_at, created_by, metadata, thread_id
FROM dependencies
WHERE issue_id = ?
`, issueID)
if err != nil {
return nil, fmt.Errorf("failed to query dependencies: %w", err)
}
defer func() { _ = rows.Close() }()
var deps []*types.Dependency
for rows.Next() {
var d types.Dependency
var metadata sql.NullString
var threadID sql.NullString
if err := rows.Scan(&d.IssueID, &d.DependsOnID, &d.Type, &d.CreatedAt, &d.CreatedBy, &metadata, &threadID); err != nil {
return nil, fmt.Errorf("failed to scan dependency: %w", err)
}
if metadata.Valid {
d.Metadata = metadata.String
}
if threadID.Valid {
d.ThreadID = threadID.String
}
deps = append(deps, &d)
}
return deps, rows.Err()
}
// RemoveDependency removes a dependency within the transaction.
func (t *sqliteTxStorage) RemoveDependency(ctx context.Context, issueID, dependsOnID string, actor string) error {
// First, check what type of dependency is being removed
@@ -916,6 +947,11 @@ func (t *sqliteTxStorage) AddLabel(ctx context.Context, issueID, label, actor st
return nil
}
// GetLabels retrieves labels for an issue within the transaction.
func (t *sqliteTxStorage) GetLabels(ctx context.Context, issueID string) ([]string, error) {
return t.getLabels(ctx, issueID)
}
// RemoveLabel removes a label from an issue within the transaction.
func (t *sqliteTxStorage) RemoveLabel(ctx context.Context, issueID, label, actor string) error {
result, err := t.conn.ExecContext(ctx, `
@@ -1063,6 +1099,68 @@ func (t *sqliteTxStorage) AddComment(ctx context.Context, issueID, actor, commen
return nil
}
// ImportIssueComment adds a structured comment during import, preserving the original timestamp.
func (t *sqliteTxStorage) ImportIssueComment(ctx context.Context, issueID, author, text string, createdAt time.Time) (*types.Comment, error) {
// Verify issue exists
existing, err := t.GetIssue(ctx, issueID)
if err != nil {
return nil, fmt.Errorf("failed to check issue existence: %w", err)
}
if existing == nil {
return nil, fmt.Errorf("issue %s not found", issueID)
}
createdAtStr := createdAt.UTC().Format(time.RFC3339Nano)
res, err := t.conn.ExecContext(ctx, `
INSERT INTO comments (issue_id, author, text, created_at)
VALUES (?, ?, ?, ?)
`, issueID, author, text, createdAtStr)
if err != nil {
return nil, fmt.Errorf("failed to insert comment: %w", err)
}
commentID, err := res.LastInsertId()
if err != nil {
return nil, fmt.Errorf("failed to get comment ID: %w", err)
}
// Mark issue dirty
if err := markDirty(ctx, t.conn, issueID); err != nil {
return nil, fmt.Errorf("failed to mark issue dirty: %w", err)
}
return &types.Comment{
ID: commentID,
IssueID: issueID,
Author: author,
Text: text,
CreatedAt: createdAt.UTC(),
}, nil
}
// GetIssueComments retrieves structured comments for an issue within the transaction.
func (t *sqliteTxStorage) GetIssueComments(ctx context.Context, issueID string) ([]*types.Comment, error) {
rows, err := t.conn.QueryContext(ctx, `
SELECT id, issue_id, author, text, created_at
FROM comments
WHERE issue_id = ?
ORDER BY created_at ASC
`, issueID)
if err != nil {
return nil, fmt.Errorf("failed to query comments: %w", err)
}
defer func() { _ = rows.Close() }()
var comments []*types.Comment
for rows.Next() {
var c types.Comment
if err := rows.Scan(&c.ID, &c.IssueID, &c.Author, &c.Text, &c.CreatedAt); err != nil {
return nil, fmt.Errorf("failed to scan comment: %w", err)
}
comments = append(comments, &c)
}
return comments, rows.Err()
}
// SearchIssues finds issues matching query and filters within the transaction.
// This enables read-your-writes semantics for searching within a transaction.
func (t *sqliteTxStorage) SearchIssues(ctx context.Context, query string, filter types.IssueFilter) ([]*types.Issue, error) {

View File

@@ -4,6 +4,7 @@ package storage
import (
"context"
"database/sql"
"time"
"github.com/steveyegge/beads/internal/types"
)
@@ -58,10 +59,12 @@ type Transaction interface {
// Dependency operations
AddDependency(ctx context.Context, dep *types.Dependency, actor string) error
RemoveDependency(ctx context.Context, issueID, dependsOnID string, actor string) error
GetDependencyRecords(ctx context.Context, issueID string) ([]*types.Dependency, error)
// Label operations
AddLabel(ctx context.Context, issueID, label, actor string) error
RemoveLabel(ctx context.Context, issueID, label, actor string) error
GetLabels(ctx context.Context, issueID string) ([]string, error)
// Config operations (for atomic config + issue workflows)
SetConfig(ctx context.Context, key, value string) error
@@ -73,6 +76,8 @@ type Transaction interface {
// Comment operations
AddComment(ctx context.Context, issueID, actor, comment string) error
ImportIssueComment(ctx context.Context, issueID, author, text string, createdAt time.Time) (*types.Comment, error)
GetIssueComments(ctx context.Context, issueID string) ([]*types.Comment, error)
}
// Storage defines the interface for issue storage backends
@@ -121,6 +126,9 @@ type Storage interface {
// Comments
AddIssueComment(ctx context.Context, issueID, author, text string) (*types.Comment, error)
// ImportIssueComment adds a comment while preserving the original timestamp.
// Used during JSONL import to avoid timestamp drift across sync cycles.
ImportIssueComment(ctx context.Context, issueID, author, text string, createdAt time.Time) (*types.Comment, error)
GetIssueComments(ctx context.Context, issueID string) ([]*types.Comment, error)
GetCommentsForIssues(ctx context.Context, issueIDs []string) (map[string][]*types.Comment, error)

View File

@@ -5,6 +5,7 @@ import (
"context"
"database/sql"
"testing"
"time"
"github.com/steveyegge/beads/internal/types"
)
@@ -119,6 +120,9 @@ func (m *mockStorage) GetEvents(ctx context.Context, issueID string, limit int)
func (m *mockStorage) AddIssueComment(ctx context.Context, issueID, author, text string) (*types.Comment, error) {
return nil, nil
}
func (m *mockStorage) ImportIssueComment(ctx context.Context, issueID, author, text string, createdAt time.Time) (*types.Comment, error) {
return nil, nil
}
func (m *mockStorage) GetIssueComments(ctx context.Context, issueID string) ([]*types.Comment, error) {
return nil, nil
}
@@ -237,12 +241,18 @@ func (m *mockTransaction) AddDependency(ctx context.Context, dep *types.Dependen
func (m *mockTransaction) RemoveDependency(ctx context.Context, issueID, dependsOnID string, actor string) error {
return nil
}
func (m *mockTransaction) GetDependencyRecords(ctx context.Context, issueID string) ([]*types.Dependency, error) {
return nil, nil
}
func (m *mockTransaction) AddLabel(ctx context.Context, issueID, label, actor string) error {
return nil
}
func (m *mockTransaction) RemoveLabel(ctx context.Context, issueID, label, actor string) error {
return nil
}
func (m *mockTransaction) GetLabels(ctx context.Context, issueID string) ([]string, error) {
return nil, nil
}
func (m *mockTransaction) SetConfig(ctx context.Context, key, value string) error {
return nil
}
@@ -258,6 +268,12 @@ func (m *mockTransaction) GetMetadata(ctx context.Context, key string) (string,
func (m *mockTransaction) AddComment(ctx context.Context, issueID, actor, comment string) error {
return nil
}
func (m *mockTransaction) ImportIssueComment(ctx context.Context, issueID, author, text string, createdAt time.Time) (*types.Comment, error) {
return nil, nil
}
func (m *mockTransaction) GetIssueComments(ctx context.Context, issueID string) ([]*types.Comment, error) {
return nil, nil
}
// TestConfig verifies the Config struct has expected fields.
func TestConfig(t *testing.T) {