/{website,internal,docs,cmd}: integration tests and more split backend fixes

This commit is contained in:
coffeegoddd☕️✨
2026-01-20 13:39:04 -08:00
parent ba432847e0
commit 422bc838ed
21 changed files with 892 additions and 156 deletions

View File

@@ -267,9 +267,10 @@ func sanitizeMetadataKey(key string) string {
// This function does NOT provide atomicity between JSONL write, metadata updates, and DB mtime.
// If a crash occurs between these operations, metadata may be inconsistent. However, this is
// acceptable because:
// 1. The worst case is "JSONL content has changed" error on next export
// 2. User can fix by running 'bd import' (safe, no data loss)
// 3. Current approach is simple and doesn't require complex WAL or format changes
// 1. The worst case is "JSONL content has changed" error on next export
// 2. User can fix by running 'bd import' (safe, no data loss)
// 3. Current approach is simple and doesn't require complex WAL or format changes
//
// Future: Consider defensive checks on startup if this becomes a common issue.
func updateExportMetadata(ctx context.Context, store storage.Storage, jsonlPath string, log daemonLogger, keySuffix string) {
// Sanitize keySuffix to handle Windows paths with colons
@@ -456,9 +457,12 @@ func performExport(ctx context.Context, store storage.Storage, autoCommit, autoP
// Update database mtime to be >= JSONL mtime (fixes #278, #301, #321)
// This prevents validatePreExport from incorrectly blocking on next export
// with "JSONL is newer than database" after daemon auto-export
dbPath := filepath.Join(beadsDir, "beads.db")
if err := TouchDatabaseFile(dbPath, jsonlPath); err != nil {
log.log("Warning: failed to update database mtime: %v", err)
// Dolt backend does not have a SQLite DB file; mtime touch is SQLite-only.
if _, ok := store.(*sqlite.SQLiteStorage); ok {
dbPath := filepath.Join(beadsDir, "beads.db")
if err := TouchDatabaseFile(dbPath, jsonlPath); err != nil {
log.log("Warning: failed to update database mtime: %v", err)
}
}
}
@@ -755,8 +759,11 @@ func performSync(ctx context.Context, store storage.Storage, autoCommit, autoPus
// Update database mtime to be >= JSONL mtime
// This prevents validatePreExport from incorrectly blocking on next export
if err := TouchDatabaseFile(dbPath, jsonlPath); err != nil {
log.log("Warning: failed to update database mtime: %v", err)
// Dolt backend does not have a SQLite DB file; mtime touch is SQLite-only.
if _, ok := store.(*sqlite.SQLiteStorage); ok {
if err := TouchDatabaseFile(dbPath, jsonlPath); err != nil {
log.log("Warning: failed to update database mtime: %v", err)
}
}
}
@@ -872,8 +879,11 @@ func performSync(ctx context.Context, store storage.Storage, autoCommit, autoPus
// Update database mtime after import (fixes #278, #301, #321)
// Sync branch import can update JSONL timestamp, so ensure DB >= JSONL
if err := TouchDatabaseFile(dbPath, jsonlPath); err != nil {
log.log("Warning: failed to update database mtime: %v", err)
// Dolt backend does not have a SQLite DB file; mtime touch is SQLite-only.
if _, ok := store.(*sqlite.SQLiteStorage); ok {
if err := TouchDatabaseFile(dbPath, jsonlPath); err != nil {
log.log("Warning: failed to update database mtime: %v", err)
}
}
// Validate import didn't cause data loss

19
cmd/bd/doctor/backend.go Normal file
View File

@@ -0,0 +1,19 @@
package doctor
import (
"path/filepath"
"github.com/steveyegge/beads/internal/configfile"
)
// getBackendAndBeadsDir resolves the effective .beads directory (following redirects)
// and returns the configured storage backend ("sqlite" by default, or "dolt").
func getBackendAndBeadsDir(repoPath string) (backend string, beadsDir string) {
beadsDir = resolveBeadsDir(filepath.Join(repoPath, ".beads"))
cfg, err := configfile.Load(beadsDir)
if err != nil || cfg == nil {
return configfile.BackendSQLite, beadsDir
}
return cfg.GetBackend(), beadsDir
}

View File

@@ -309,8 +309,19 @@ func checkMetadataConfigValues(repoPath string) []string {
if strings.Contains(cfg.Database, string(os.PathSeparator)) || strings.Contains(cfg.Database, "/") {
issues = append(issues, fmt.Sprintf("metadata.json database: %q should be a filename, not a path", cfg.Database))
}
if !strings.HasSuffix(cfg.Database, ".db") && !strings.HasSuffix(cfg.Database, ".sqlite") && !strings.HasSuffix(cfg.Database, ".sqlite3") {
issues = append(issues, fmt.Sprintf("metadata.json database: %q has unusual extension (expected .db, .sqlite, or .sqlite3)", cfg.Database))
backend := cfg.GetBackend()
if backend == configfile.BackendSQLite {
if !strings.HasSuffix(cfg.Database, ".db") && !strings.HasSuffix(cfg.Database, ".sqlite") && !strings.HasSuffix(cfg.Database, ".sqlite3") {
issues = append(issues, fmt.Sprintf("metadata.json database: %q has unusual extension (expected .db, .sqlite, or .sqlite3)", cfg.Database))
}
} else if backend == configfile.BackendDolt {
// Dolt is directory-backed; `database` should point to a directory (typically "dolt").
if strings.HasSuffix(cfg.Database, ".db") || strings.HasSuffix(cfg.Database, ".sqlite") || strings.HasSuffix(cfg.Database, ".sqlite3") {
issues = append(issues, fmt.Sprintf("metadata.json database: %q looks like a SQLite file, but backend is dolt (expected a directory like %q)", cfg.Database, "dolt"))
}
if cfg.Database == beads.CanonicalDatabaseName {
issues = append(issues, fmt.Sprintf("metadata.json database: %q is misleading for dolt backend (expected %q)", cfg.Database, "dolt"))
}
}
}
@@ -345,11 +356,16 @@ func checkDatabaseConfigValues(repoPath string) []string {
return issues // No .beads directory, nothing to check
}
// Get database path
// Get database path (backend-aware)
dbPath := filepath.Join(beadsDir, beads.CanonicalDatabaseName)
// Check metadata.json for custom database name
if cfg, err := configfile.Load(beadsDir); err == nil && cfg != nil && cfg.Database != "" {
dbPath = cfg.DatabasePath(beadsDir)
if cfg, err := configfile.Load(beadsDir); err == nil && cfg != nil {
// For Dolt, cfg.DatabasePath() is a directory and sqlite checks are not applicable.
if cfg.GetBackend() == configfile.BackendDolt {
return issues
}
if cfg.Database != "" {
dbPath = cfg.DatabasePath(beadsDir)
}
}
if _, err := os.Stat(dbPath); os.IsNotExist(err) {

View File

@@ -182,6 +182,22 @@ func TestCheckMetadataConfigValues(t *testing.T) {
}
})
t.Run("valid dolt metadata", func(t *testing.T) {
metadataContent := `{
"database": "dolt",
"jsonl_export": "issues.jsonl",
"backend": "dolt"
}`
if err := os.WriteFile(filepath.Join(beadsDir, "metadata.json"), []byte(metadataContent), 0644); err != nil {
t.Fatalf("failed to write metadata.json: %v", err)
}
issues := checkMetadataConfigValues(tmpDir)
if len(issues) > 0 {
t.Errorf("expected no issues, got: %v", issues)
}
})
// Test with path in database field
t.Run("path in database field", func(t *testing.T) {
metadataContent := `{

View File

@@ -10,7 +10,7 @@ import (
"github.com/steveyegge/beads/internal/daemon"
"github.com/steveyegge/beads/internal/git"
"github.com/steveyegge/beads/internal/rpc"
"github.com/steveyegge/beads/internal/storage/sqlite"
"github.com/steveyegge/beads/internal/storage/factory"
"github.com/steveyegge/beads/internal/syncbranch"
)
@@ -167,7 +167,7 @@ func CheckGitSyncSetup(path string) DoctorCheck {
// CheckDaemonAutoSync checks if daemon has auto-commit/auto-push enabled when
// sync-branch is configured. Missing auto-sync slows down agent workflows.
func CheckDaemonAutoSync(path string) DoctorCheck {
beadsDir := filepath.Join(path, ".beads")
_, beadsDir := getBackendAndBeadsDir(path)
socketPath := filepath.Join(beadsDir, "bd.sock")
// Check if daemon is running
@@ -181,8 +181,7 @@ func CheckDaemonAutoSync(path string) DoctorCheck {
// Check if sync-branch is configured
ctx := context.Background()
dbPath := filepath.Join(beadsDir, "beads.db")
store, err := sqlite.New(ctx, dbPath)
store, err := factory.NewFromConfigWithOptions(ctx, beadsDir, factory.Options{ReadOnly: true})
if err != nil {
return DoctorCheck{
Name: "Daemon Auto-Sync",
@@ -249,11 +248,10 @@ func CheckDaemonAutoSync(path string) DoctorCheck {
// CheckLegacyDaemonConfig checks for deprecated daemon config options and
// encourages migration to the unified daemon.auto-sync setting.
func CheckLegacyDaemonConfig(path string) DoctorCheck {
beadsDir := filepath.Join(path, ".beads")
dbPath := filepath.Join(beadsDir, "beads.db")
_, beadsDir := getBackendAndBeadsDir(path)
ctx := context.Background()
store, err := sqlite.New(ctx, dbPath)
store, err := factory.NewFromConfigWithOptions(ctx, beadsDir, factory.Options{ReadOnly: true})
if err != nil {
return DoctorCheck{
Name: "Daemon Config",

View File

@@ -2,6 +2,7 @@ package doctor
import (
"bufio"
"context"
"database/sql"
"encoding/json"
"fmt"
@@ -16,6 +17,7 @@ import (
"github.com/steveyegge/beads/cmd/bd/doctor/fix"
"github.com/steveyegge/beads/internal/beads"
"github.com/steveyegge/beads/internal/configfile"
storagefactory "github.com/steveyegge/beads/internal/storage/factory"
"gopkg.in/yaml.v3"
)
@@ -27,8 +29,85 @@ type localConfig struct {
// CheckDatabaseVersion checks the database version and migration status
func CheckDatabaseVersion(path string, cliVersion string) DoctorCheck {
// Follow redirect to resolve actual beads directory
beadsDir := resolveBeadsDir(filepath.Join(path, ".beads"))
backend, beadsDir := getBackendAndBeadsDir(path)
// Dolt backend: directory-backed store; version lives in metadata table.
if backend == configfile.BackendDolt {
doltPath := filepath.Join(beadsDir, "dolt")
if _, err := os.Stat(doltPath); os.IsNotExist(err) {
// If JSONL exists, treat as fresh clone / needs init.
issuesJSONL := filepath.Join(beadsDir, "issues.jsonl")
beadsJSONL := filepath.Join(beadsDir, "beads.jsonl")
_, issuesErr := os.Stat(issuesJSONL)
_, beadsErr := os.Stat(beadsJSONL)
if issuesErr == nil || beadsErr == nil {
return DoctorCheck{
Name: "Database",
Status: StatusWarning,
Message: "Fresh clone detected (no dolt database)",
Detail: "Storage: Dolt",
Fix: "Run 'bd init --backend dolt' to create and hydrate the dolt database",
}
}
return DoctorCheck{
Name: "Database",
Status: StatusError,
Message: "No dolt database found",
Detail: "Storage: Dolt",
Fix: "Run 'bd init --backend dolt' to create database",
}
}
ctx := context.Background()
store, err := storagefactory.NewFromConfigWithOptions(ctx, beadsDir, storagefactory.Options{ReadOnly: true})
if err != nil {
return DoctorCheck{
Name: "Database",
Status: StatusError,
Message: "Unable to open database",
Detail: fmt.Sprintf("Storage: Dolt\n\nError: %v", err),
Fix: "Run 'bd init --backend dolt' (or remove and re-init .beads/dolt if corrupted)",
}
}
defer func() { _ = store.Close() }()
dbVersion, err := store.GetMetadata(ctx, "bd_version")
if err != nil {
return DoctorCheck{
Name: "Database",
Status: StatusError,
Message: "Unable to read database version",
Detail: fmt.Sprintf("Storage: Dolt\n\nError: %v", err),
Fix: "Database may be corrupted. Try re-initializing the dolt database with 'bd init --backend dolt'",
}
}
if dbVersion == "" {
return DoctorCheck{
Name: "Database",
Status: StatusWarning,
Message: "Database missing version metadata",
Detail: "Storage: Dolt",
Fix: "Run 'bd migrate' or re-run 'bd init --backend dolt' to set version metadata",
}
}
if dbVersion != cliVersion {
return DoctorCheck{
Name: "Database",
Status: StatusWarning,
Message: fmt.Sprintf("version %s (CLI: %s)", dbVersion, cliVersion),
Detail: "Storage: Dolt",
Fix: "Update bd CLI and re-run (dolt metadata will be updated automatically by the daemon)",
}
}
return DoctorCheck{
Name: "Database",
Status: StatusOK,
Message: fmt.Sprintf("version %s", dbVersion),
Detail: "Storage: Dolt",
}
}
// Check metadata.json first for custom database name
var dbPath string
@@ -137,8 +216,48 @@ func CheckDatabaseVersion(path string, cliVersion string) DoctorCheck {
// CheckSchemaCompatibility checks if all required tables and columns are present
func CheckSchemaCompatibility(path string) DoctorCheck {
// Follow redirect to resolve actual beads directory
beadsDir := resolveBeadsDir(filepath.Join(path, ".beads"))
backend, beadsDir := getBackendAndBeadsDir(path)
// Dolt backend: no SQLite schema probe. Instead, run a lightweight query sanity check.
if backend == configfile.BackendDolt {
if info, err := os.Stat(filepath.Join(beadsDir, "dolt")); err != nil || !info.IsDir() {
return DoctorCheck{
Name: "Schema Compatibility",
Status: StatusOK,
Message: "N/A (no database)",
}
}
ctx := context.Background()
store, err := storagefactory.NewFromConfigWithOptions(ctx, beadsDir, storagefactory.Options{ReadOnly: true})
if err != nil {
return DoctorCheck{
Name: "Schema Compatibility",
Status: StatusError,
Message: "Failed to open database",
Detail: fmt.Sprintf("Storage: Dolt\n\nError: %v", err),
}
}
defer func() { _ = store.Close() }()
// Exercise core tables/views.
if _, err := store.GetStatistics(ctx); err != nil {
return DoctorCheck{
Name: "Schema Compatibility",
Status: StatusError,
Message: "Database schema is incomplete or incompatible",
Detail: fmt.Sprintf("Storage: Dolt\n\nError: %v", err),
Fix: "Re-run 'bd init --backend dolt' or remove and re-initialize .beads/dolt if corrupted",
}
}
return DoctorCheck{
Name: "Schema Compatibility",
Status: StatusOK,
Message: "Basic queries succeeded",
Detail: "Storage: Dolt",
}
}
// Check metadata.json first for custom database name
var dbPath string
@@ -227,8 +346,57 @@ func CheckSchemaCompatibility(path string) DoctorCheck {
// CheckDatabaseIntegrity runs SQLite's PRAGMA integrity_check
func CheckDatabaseIntegrity(path string) DoctorCheck {
// Follow redirect to resolve actual beads directory
beadsDir := resolveBeadsDir(filepath.Join(path, ".beads"))
backend, beadsDir := getBackendAndBeadsDir(path)
// Dolt backend: SQLite PRAGMA integrity_check doesn't apply.
// We do a lightweight read-only sanity check instead.
if backend == configfile.BackendDolt {
if info, err := os.Stat(filepath.Join(beadsDir, "dolt")); err != nil || !info.IsDir() {
return DoctorCheck{
Name: "Database Integrity",
Status: StatusOK,
Message: "N/A (no database)",
}
}
ctx := context.Background()
store, err := storagefactory.NewFromConfigWithOptions(ctx, beadsDir, storagefactory.Options{ReadOnly: true})
if err != nil {
return DoctorCheck{
Name: "Database Integrity",
Status: StatusError,
Message: "Failed to open database",
Detail: fmt.Sprintf("Storage: Dolt\n\nError: %v", err),
Fix: "Re-run 'bd init --backend dolt' or remove and re-initialize .beads/dolt if corrupted",
}
}
defer func() { _ = store.Close() }()
// Minimal checks: metadata + statistics. If these work, the store is at least readable.
if _, err := store.GetMetadata(ctx, "bd_version"); err != nil {
return DoctorCheck{
Name: "Database Integrity",
Status: StatusError,
Message: "Basic query failed",
Detail: fmt.Sprintf("Storage: Dolt\n\nError: %v", err),
}
}
if _, err := store.GetStatistics(ctx); err != nil {
return DoctorCheck{
Name: "Database Integrity",
Status: StatusError,
Message: "Basic query failed",
Detail: fmt.Sprintf("Storage: Dolt\n\nError: %v", err),
}
}
return DoctorCheck{
Name: "Database Integrity",
Status: StatusOK,
Message: "Basic query check passed",
Detail: "Storage: Dolt (no SQLite integrity_check equivalent)",
}
}
// Get database path (same logic as CheckSchemaCompatibility)
var dbPath string
@@ -340,8 +508,46 @@ func CheckDatabaseIntegrity(path string) DoctorCheck {
// CheckDatabaseJSONLSync checks if database and JSONL are in sync
func CheckDatabaseJSONLSync(path string) DoctorCheck {
// Follow redirect to resolve actual beads directory
beadsDir := resolveBeadsDir(filepath.Join(path, ".beads"))
backend, beadsDir := getBackendAndBeadsDir(path)
// Dolt backend: JSONL is a derived compatibility artifact (export-only today).
// The SQLite-style import/export divergence checks don't apply.
if backend == configfile.BackendDolt {
// Find JSONL file (respects metadata.json override when set).
jsonlPath := ""
if cfg, err := configfile.Load(beadsDir); err == nil && cfg != nil {
if cfg.JSONLExport != "" && !isSystemJSONLFilename(cfg.JSONLExport) {
p := cfg.JSONLPath(beadsDir)
if _, err := os.Stat(p); err == nil {
jsonlPath = p
}
}
}
if jsonlPath == "" {
for _, name := range []string{"issues.jsonl", "beads.jsonl"} {
testPath := filepath.Join(beadsDir, name)
if _, err := os.Stat(testPath); err == nil {
jsonlPath = testPath
break
}
}
}
if jsonlPath == "" {
return DoctorCheck{
Name: "DB-JSONL Sync",
Status: StatusOK,
Message: "N/A (no JSONL file)",
}
}
return DoctorCheck{
Name: "DB-JSONL Sync",
Status: StatusOK,
Message: "N/A (dolt backend)",
Detail: "JSONL is derived from Dolt (export-only); import-only sync checks do not apply",
}
}
// Resolve database path (respects metadata.json override).
dbPath := filepath.Join(beadsDir, beads.CanonicalDatabaseName)
@@ -795,8 +1001,7 @@ func isNoDbModeConfigured(beadsDir string) bool {
// irreversible. The user must make an explicit decision to delete their
// closed issue history. We only provide guidance, never action.
func CheckDatabaseSize(path string) DoctorCheck {
// Follow redirect to resolve actual beads directory
beadsDir := resolveBeadsDir(filepath.Join(path, ".beads"))
_, beadsDir := getBackendAndBeadsDir(path)
// Get database path
var dbPath string

View File

@@ -2,6 +2,7 @@ package doctor
import (
"bufio"
"context"
"database/sql"
"fmt"
"os"
@@ -16,6 +17,7 @@ import (
"github.com/steveyegge/beads/internal/beads"
"github.com/steveyegge/beads/internal/configfile"
"github.com/steveyegge/beads/internal/git"
storagefactory "github.com/steveyegge/beads/internal/storage/factory"
)
// CheckIDFormat checks whether issues use hash-based or sequential IDs
@@ -404,9 +406,98 @@ func CheckDeletionsManifest(path string) DoctorCheck {
// This detects when a .beads directory was copied from another repo or when
// the git remote URL changed. A mismatch can cause data loss during sync.
func CheckRepoFingerprint(path string) DoctorCheck {
// Follow redirect to resolve actual beads directory (bd-tvus fix)
beadsDir := resolveBeadsDir(filepath.Join(path, ".beads"))
backend, beadsDir := getBackendAndBeadsDir(path)
// Backend-aware existence check
switch backend {
case configfile.BackendDolt:
if info, err := os.Stat(filepath.Join(beadsDir, "dolt")); err != nil || !info.IsDir() {
return DoctorCheck{
Name: "Repo Fingerprint",
Status: StatusOK,
Message: "N/A (no database)",
}
}
default:
// SQLite backend: needs a .db file
var dbPath string
if cfg, err := configfile.Load(beadsDir); err == nil && cfg != nil && cfg.Database != "" {
dbPath = cfg.DatabasePath(beadsDir)
} else {
dbPath = filepath.Join(beadsDir, beads.CanonicalDatabaseName)
}
if _, err := os.Stat(dbPath); os.IsNotExist(err) {
return DoctorCheck{
Name: "Repo Fingerprint",
Status: StatusOK,
Message: "N/A (no database)",
}
}
}
// For Dolt, read fingerprint from storage metadata (no sqlite assumptions).
if backend == configfile.BackendDolt {
ctx := context.Background()
store, err := storagefactory.NewFromConfigWithOptions(ctx, beadsDir, storagefactory.Options{ReadOnly: true})
if err != nil {
return DoctorCheck{
Name: "Repo Fingerprint",
Status: StatusWarning,
Message: "Unable to open database",
Detail: err.Error(),
}
}
defer func() { _ = store.Close() }()
storedRepoID, err := store.GetMetadata(ctx, "repo_id")
if err != nil {
return DoctorCheck{
Name: "Repo Fingerprint",
Status: StatusWarning,
Message: "Unable to read repo fingerprint",
Detail: err.Error(),
}
}
// If missing, warn (not the legacy sqlite messaging).
if storedRepoID == "" {
return DoctorCheck{
Name: "Repo Fingerprint",
Status: StatusWarning,
Message: "Missing repo fingerprint metadata",
Detail: "Storage: Dolt",
Fix: "Run 'bd migrate --update-repo-id' to add fingerprint metadata",
}
}
currentRepoID, err := beads.ComputeRepoID()
if err != nil {
return DoctorCheck{
Name: "Repo Fingerprint",
Status: StatusWarning,
Message: "Unable to compute current repo ID",
Detail: err.Error(),
}
}
if storedRepoID != currentRepoID {
return DoctorCheck{
Name: "Repo Fingerprint",
Status: StatusError,
Message: "Database belongs to different repository",
Detail: fmt.Sprintf("stored: %s, current: %s", storedRepoID[:8], currentRepoID[:8]),
Fix: "Run 'bd migrate --update-repo-id' if URL changed, or 'rm -rf .beads && bd init --backend dolt' if wrong database",
}
}
return DoctorCheck{
Name: "Repo Fingerprint",
Status: StatusOK,
Message: fmt.Sprintf("Verified (%s)", currentRepoID[:8]),
}
}
// SQLite path (existing behavior)
// Get database path
var dbPath string
if cfg, err := configfile.Load(beadsDir); err == nil && cfg != nil && cfg.Database != "" {

View File

@@ -368,8 +368,7 @@ func CheckDatabaseConfig(repoPath string) DoctorCheck {
// CheckFreshClone detects if this is a fresh clone that needs 'bd init'.
// A fresh clone has JSONL with issues but no database file.
func CheckFreshClone(repoPath string) DoctorCheck {
// Follow redirect to resolve actual beads directory
beadsDir := resolveBeadsDir(filepath.Join(repoPath, ".beads"))
backend, beadsDir := getBackendAndBeadsDir(repoPath)
// Check if .beads/ exists
if _, err := os.Stat(beadsDir); os.IsNotExist(err) {
@@ -401,21 +400,32 @@ func CheckFreshClone(repoPath string) DoctorCheck {
}
}
// Check if database exists
var dbPath string
if cfg, err := configfile.Load(beadsDir); err == nil && cfg != nil && cfg.Database != "" {
dbPath = cfg.DatabasePath(beadsDir)
} else {
// Fall back to canonical database name
dbPath = filepath.Join(beadsDir, beads.CanonicalDatabaseName)
}
// If database exists, not a fresh clone
if _, err := os.Stat(dbPath); err == nil {
return DoctorCheck{
Name: "Fresh Clone",
Status: "ok",
Message: "Database exists",
// Check if database exists (backend-aware)
switch backend {
case configfile.BackendDolt:
// Dolt is directory-backed: treat .beads/dolt as the DB existence signal.
if info, err := os.Stat(filepath.Join(beadsDir, "dolt")); err == nil && info.IsDir() {
return DoctorCheck{
Name: "Fresh Clone",
Status: "ok",
Message: "Database exists",
}
}
default:
// SQLite (default): check configured .db file path.
var dbPath string
if cfg, err := configfile.Load(beadsDir); err == nil && cfg != nil && cfg.Database != "" {
dbPath = cfg.DatabasePath(beadsDir)
} else {
// Fall back to canonical database name
dbPath = filepath.Join(beadsDir, beads.CanonicalDatabaseName)
}
if _, err := os.Stat(dbPath); err == nil {
return DoctorCheck{
Name: "Fresh Clone",
Status: "ok",
Message: "Database exists",
}
}
}
@@ -434,6 +444,12 @@ func CheckFreshClone(repoPath string) DoctorCheck {
if prefix != "" {
fixCmd = fmt.Sprintf("bd init --prefix %s", prefix)
}
if backend == configfile.BackendDolt {
fixCmd = "bd init --backend dolt"
if prefix != "" {
fixCmd = fmt.Sprintf("bd init --backend dolt --prefix %s", prefix)
}
}
return DoctorCheck{
Name: "Fresh Clone",

View File

@@ -54,6 +54,11 @@ func CheckSyncDivergence(path string) DoctorCheck {
}
}
backend := configfile.BackendSQLite
if cfg, err := configfile.Load(beadsDir); err == nil && cfg != nil {
backend = cfg.GetBackend()
}
var issues []SyncDivergenceIssue
// Check 1: JSONL differs from git HEAD
@@ -62,10 +67,13 @@ func CheckSyncDivergence(path string) DoctorCheck {
issues = append(issues, *jsonlIssue)
}
// Check 2: SQLite last_import_time vs JSONL mtime
mtimeIssue := checkSQLiteMtimeDivergence(path, beadsDir)
if mtimeIssue != nil {
issues = append(issues, *mtimeIssue)
// Check 2: SQLite last_import_time vs JSONL mtime (SQLite only).
// Dolt backend does not maintain SQLite metadata and does not support import-only sync.
if backend == configfile.BackendSQLite {
mtimeIssue := checkSQLiteMtimeDivergence(path, beadsDir)
if mtimeIssue != nil {
issues = append(issues, *mtimeIssue)
}
}
// Check 3: Uncommitted .beads/ changes
@@ -75,10 +83,14 @@ func CheckSyncDivergence(path string) DoctorCheck {
}
if len(issues) == 0 {
msg := "JSONL, SQLite, and git are in sync"
if backend == configfile.BackendDolt {
msg = "JSONL, Dolt, and git are in sync"
}
return DoctorCheck{
Name: "Sync Divergence",
Status: StatusOK,
Message: "JSONL, SQLite, and git are in sync",
Message: msg,
Category: CategoryData,
}
}
@@ -256,10 +268,16 @@ func checkUncommittedBeadsChanges(path, beadsDir string) *SyncDivergenceIssue {
}
}
fixCmd := "bd sync"
// For dolt backend, bd sync/import-only workflows don't apply; recommend a plain git commit.
if cfg, err := configfile.Load(beadsDir); err == nil && cfg != nil && cfg.GetBackend() == configfile.BackendDolt {
fixCmd = "git add .beads/ && git commit -m 'sync beads'"
}
return &SyncDivergenceIssue{
Type: "uncommitted_beads",
Description: fmt.Sprintf("Uncommitted .beads/ changes (%d file(s))", fileCount),
FixCommand: "bd sync",
FixCommand: fixCmd,
}
}

View File

@@ -0,0 +1,120 @@
//go:build integration
// +build integration
package main
import (
"encoding/json"
"os"
"os/exec"
"path/filepath"
"runtime"
"strings"
"testing"
"time"
)
func runBDExecAllowErrorWithEnv(t *testing.T, dir string, extraEnv []string, args ...string) (string, error) {
t.Helper()
cmd := exec.Command(testBD, args...)
cmd.Dir = dir
// Start from a clean-ish environment, then apply overrides.
// NOTE: we keep os.Environ() so PATH etc still work for git/dolt.
env := append([]string{}, os.Environ()...)
env = append(env, extraEnv...)
cmd.Env = env
out, err := cmd.CombinedOutput()
return string(out), err
}
func TestDoltDaemonAutostart_NoTimeoutOnCreate(t *testing.T) {
if testing.Short() {
t.Skip("skipping slow integration test in short mode")
}
if runtime.GOOS == windowsOS {
t.Skip("dolt daemon integration test not supported on windows")
}
tmpDir := createTempDirWithCleanup(t)
// Set up a real git repo so daemon autostart is allowed.
if err := runCommandInDir(tmpDir, "git", "init"); err != nil {
t.Fatalf("git init failed: %v", err)
}
_ = runCommandInDir(tmpDir, "git", "config", "user.email", "test@example.com")
_ = runCommandInDir(tmpDir, "git", "config", "user.name", "Test User")
socketPath := filepath.Join(tmpDir, ".beads", "bd.sock")
env := []string{
"BEADS_TEST_MODE=1",
"BEADS_AUTO_START_DAEMON=true",
"BEADS_NO_DAEMON=0",
"BD_SOCKET=" + socketPath,
}
// Init dolt backend.
initOut, initErr := runBDExecAllowErrorWithEnv(t, tmpDir, env, "init", "--backend", "dolt", "--prefix", "test", "--quiet")
if initErr != nil {
// If dolt backend isn't available in this build, skip rather than fail.
// (Some environments may build without dolt support.)
lower := strings.ToLower(initOut)
if strings.Contains(lower, "dolt") && (strings.Contains(lower, "not supported") || strings.Contains(lower, "not available") || strings.Contains(lower, "unknown")) {
t.Skipf("dolt backend not available: %s", initOut)
}
t.Fatalf("bd init --backend dolt failed: %v\n%s", initErr, initOut)
}
// Always stop daemon on cleanup (best effort) so temp dir can be removed.
t.Cleanup(func() {
_, _ = runBDExecAllowErrorWithEnv(t, tmpDir, env, "daemon", "stop")
// Give the daemon a moment to release any locks/files.
time.Sleep(200 * time.Millisecond)
})
// Create should auto-start daemon and should NOT fall back with a timeout warning.
createOut, createErr := runBDExecAllowErrorWithEnv(t, tmpDir, env, "create", "dolt daemon autostart test", "--json")
if createErr != nil {
t.Fatalf("bd create failed: %v\n%s", createErr, createOut)
}
if strings.Contains(createOut, "Daemon took too long to start") || strings.Contains(createOut, "Running in direct mode") {
t.Fatalf("unexpected daemon fallback on dolt create; output:\n%s", createOut)
}
// Verify daemon reports running (via JSON output).
statusOut, statusErr := runBDExecAllowErrorWithEnv(t, tmpDir, env, "daemon", "status", "--json")
if statusErr != nil {
t.Fatalf("bd daemon status failed: %v\n%s", statusErr, statusOut)
}
// We accept either the legacy DaemonStatusResponse shape (daemon_lifecycle.go)
// or the newer DaemonStatusReport shape (daemon_status.go), depending on flags/routes.
// Here we just assert it isn't obviously "not_running".
var m map[string]any
if err := json.Unmarshal([]byte(statusOut), &m); err != nil {
// Sometimes status may print warnings before JSON; try from first '{'.
if idx := strings.Index(statusOut, "{"); idx >= 0 {
if err2 := json.Unmarshal([]byte(statusOut[idx:]), &m); err2 != nil {
t.Fatalf("failed to parse daemon status JSON: %v\n%s", err2, statusOut)
}
} else {
t.Fatalf("failed to parse daemon status JSON: %v\n%s", err, statusOut)
}
}
// Check "running" boolean (legacy) or "status" string (new).
if runningVal, ok := m["running"]; ok {
if b, ok := runningVal.(bool); ok && !b {
t.Fatalf("expected daemon running=true, got: %s", statusOut)
}
} else if statusVal, ok := m["status"]; ok {
if s, ok := statusVal.(string); ok && (s == "not_running" || s == "stale" || s == "unresponsive") {
t.Fatalf("expected daemon to be running/healthy, got status=%q; full: %s", s, statusOut)
}
} else {
// If schema changes again, this will fail loudly and force an update.
t.Fatalf("unexpected daemon status JSON shape (missing running/status): %s", statusOut)
}
}

View File

@@ -0,0 +1,90 @@
//go:build integration
// +build integration
package main
import (
"os"
"path/filepath"
"runtime"
"strings"
"testing"
"time"
)
func TestDoltDoctor_NoSQLiteWarningsAfterInitAndCreate(t *testing.T) {
if testing.Short() {
t.Skip("skipping slow integration test in short mode")
}
if runtime.GOOS == windowsOS {
t.Skip("dolt doctor integration test not supported on windows")
}
tmpDir := createTempDirWithCleanup(t)
// Set up a real git repo so init/create/doctor behave normally.
if err := runCommandInDir(tmpDir, "git", "init"); err != nil {
t.Fatalf("git init failed: %v", err)
}
_ = runCommandInDir(tmpDir, "git", "config", "user.email", "test@example.com")
_ = runCommandInDir(tmpDir, "git", "config", "user.name", "Test User")
socketPath := filepath.Join(tmpDir, ".beads", "bd.sock")
env := []string{
"BEADS_TEST_MODE=1",
"BEADS_AUTO_START_DAEMON=true",
"BEADS_NO_DAEMON=0",
"BD_SOCKET=" + socketPath,
}
// Init dolt backend.
initOut, initErr := runBDExecAllowErrorWithEnv(t, tmpDir, env, "init", "--backend", "dolt", "--prefix", "test", "--quiet")
if initErr != nil {
// If dolt backend isn't available in this build, skip rather than fail.
lower := strings.ToLower(initOut)
if strings.Contains(lower, "dolt") && (strings.Contains(lower, "not supported") || strings.Contains(lower, "not available") || strings.Contains(lower, "unknown")) {
t.Skipf("dolt backend not available: %s", initOut)
}
t.Fatalf("bd init --backend dolt failed: %v\n%s", initErr, initOut)
}
// Ensure daemon cleanup so temp dir removal doesn't flake.
t.Cleanup(func() {
_, _ = runBDExecAllowErrorWithEnv(t, tmpDir, env, "daemon", "stop")
time.Sleep(200 * time.Millisecond)
})
// Create one issue so the store is definitely initialized.
createOut, createErr := runBDExecAllowErrorWithEnv(t, tmpDir, env, "create", "doctor dolt smoke", "--json")
if createErr != nil {
t.Fatalf("bd create failed: %v\n%s", createErr, createOut)
}
// Run doctor; it may return non-zero for unrelated warnings (upstream, claude, etc),
// but it should NOT include SQLite-only failures on dolt.
doctorOut, _ := runBDExecAllowErrorWithEnv(t, tmpDir, env, "doctor")
// Also include stderr-like output if doctor wrote it to stdout in some modes.
// (CombinedOutput already captures both.)
for _, forbidden := range []string{
"No beads.db found",
"Unable to read database version",
"Legacy database",
} {
if strings.Contains(doctorOut, forbidden) {
t.Fatalf("bd doctor printed sqlite-specific warning %q in dolt mode; output:\n%s", forbidden, doctorOut)
}
}
// Sanity check: doctor should mention dolt somewhere so we know we exercised the right path.
if !strings.Contains(strings.ToLower(doctorOut), "dolt") {
// Some doctor output is terse depending on flags; don't be too strict, but
// if it's completely missing, that usually means we didn't use dolt config.
t.Fatalf("bd doctor output did not mention dolt; output:\n%s", doctorOut)
}
// Regression check: dolt init must NOT create a SQLite database file.
if _, err := os.Stat(filepath.Join(tmpDir, ".beads", "beads.db")); err == nil {
t.Fatalf("unexpected sqlite database created in dolt mode: %s", filepath.Join(tmpDir, ".beads", "beads.db"))
}
}

View File

@@ -547,11 +547,14 @@ Examples:
// Only do this when exporting to default JSONL path (not arbitrary outputs)
// This prevents validatePreExport from incorrectly blocking on next export
if output == "" || output == findJSONLPath() {
beadsDir := filepath.Dir(finalPath)
dbPath := filepath.Join(beadsDir, "beads.db")
if err := TouchDatabaseFile(dbPath, finalPath); err != nil {
// Log warning but don't fail export
fmt.Fprintf(os.Stderr, "Warning: failed to update database mtime: %v\n", err)
// Dolt backend does not have a SQLite DB file, so only touch mtime for SQLite.
if _, ok := store.(*sqlite.SQLiteStorage); ok {
beadsDir := filepath.Dir(finalPath)
dbPath := filepath.Join(beadsDir, "beads.db")
if err := TouchDatabaseFile(dbPath, finalPath); err != nil {
// Log warning but don't fail export
fmt.Fprintf(os.Stderr, "Warning: failed to update database mtime: %v\n", err)
}
}
}
}

View File

@@ -131,17 +131,27 @@ With --stealth: configures per-repository git settings for invisible beads usage
// The hyphen is added automatically during ID generation
prefix = strings.TrimRight(prefix, "-")
// Create database
// Use global dbPath if set via --db flag or BEADS_DB env var, otherwise default to .beads/beads.db
// Determine storage path.
//
// IMPORTANT: In Dolt mode, we must NOT create a SQLite database file.
// `initDBPath` is used for SQLite-specific tasks (migration, import helpers, etc),
// so in Dolt mode it should point to the Dolt directory instead.
//
// Use global dbPath if set via --db flag or BEADS_DB env var (SQLite-only),
// otherwise default to `.beads/beads.db` for SQLite.
initDBPath := dbPath
if initDBPath == "" {
if backend == configfile.BackendDolt {
initDBPath = filepath.Join(".beads", "dolt")
} else if initDBPath == "" {
initDBPath = filepath.Join(".beads", beads.CanonicalDatabaseName)
}
// Migrate old database files if they exist
if err := migrateOldDatabases(initDBPath, quiet); err != nil {
fmt.Fprintf(os.Stderr, "Error during database migration: %v\n", err)
os.Exit(1)
// Migrate old SQLite database files if they exist (SQLite backend only).
if backend == configfile.BackendSQLite {
if err := migrateOldDatabases(initDBPath, quiet); err != nil {
fmt.Fprintf(os.Stderr, "Error during database migration: %v\n", err)
os.Exit(1)
}
}
// Determine if we should create .beads/ directory in CWD or main repo root
@@ -285,9 +295,10 @@ With --stealth: configures per-repository git settings for invisible beads usage
}
}
// Ensure parent directory exists for the database
// Ensure parent directory exists for the storage backend.
// For SQLite: parent of .beads/beads.db. For Dolt: parent of .beads/dolt.
if err := os.MkdirAll(initDBDir, 0750); err != nil {
fmt.Fprintf(os.Stderr, "Error: failed to create database directory %s: %v\n", initDBDir, err)
fmt.Fprintf(os.Stderr, "Error: failed to create storage directory %s: %v\n", initDBDir, err)
os.Exit(1)
}
@@ -389,6 +400,14 @@ With --stealth: configures per-repository git settings for invisible beads usage
if backend != configfile.BackendSQLite {
cfg.Backend = backend
}
// In Dolt mode, metadata.json.database should point to the Dolt directory (not beads.db).
// Backward-compat: older dolt setups left this as "beads.db", which is misleading and
// can trigger SQLite-only code paths.
if backend == configfile.BackendDolt {
if cfg.Database == "" || cfg.Database == beads.CanonicalDatabaseName {
cfg.Database = "dolt"
}
}
if err := cfg.Save(beadsDir); err != nil {
fmt.Fprintf(os.Stderr, "Warning: failed to create metadata.json: %v\n", err)
@@ -429,40 +448,46 @@ With --stealth: configures per-repository git settings for invisible beads usage
}
}
// Check if git has existing issues to import (fresh clone scenario)
// With --from-jsonl: import from local file instead of git history
if fromJSONL {
// Import from current working tree's JSONL file
localJSONLPath := filepath.Join(beadsDir, "issues.jsonl")
if _, err := os.Stat(localJSONLPath); err == nil {
issueCount, err := importFromLocalJSONL(ctx, initDBPath, store, localJSONLPath)
if err != nil {
if !quiet {
fmt.Fprintf(os.Stderr, "Warning: import from local JSONL failed: %v\n", err)
// Import issues on init:
// - SQLite backend: import from git history or local JSONL (existing behavior).
// - Dolt backend: do NOT run SQLite import code. Dolt bootstraps itself from
// `.beads/issues.jsonl` on first open (factory_dolt.go) when present.
if backend == configfile.BackendSQLite {
// Check if git has existing issues to import (fresh clone scenario)
// With --from-jsonl: import from local file instead of git history
if fromJSONL {
// Import from current working tree's JSONL file
localJSONLPath := filepath.Join(beadsDir, "issues.jsonl")
if _, err := os.Stat(localJSONLPath); err == nil {
issueCount, err := importFromLocalJSONL(ctx, initDBPath, store, localJSONLPath)
if err != nil {
if !quiet {
fmt.Fprintf(os.Stderr, "Warning: import from local JSONL failed: %v\n", err)
}
// Non-fatal - continue with empty database
} else if !quiet && issueCount > 0 {
fmt.Fprintf(os.Stderr, "✓ Imported %d issues from local %s\n\n", issueCount, localJSONLPath)
}
// Non-fatal - continue with empty database
} else if !quiet && issueCount > 0 {
fmt.Fprintf(os.Stderr, "✓ Imported %d issues from local %s\n\n", issueCount, localJSONLPath)
}
} else if !quiet {
fmt.Fprintf(os.Stderr, "Warning: --from-jsonl specified but %s not found\n", localJSONLPath)
}
} else {
// Default: import from git history
issueCount, jsonlPath, gitRef := checkGitForIssues()
if issueCount > 0 {
if !quiet {
fmt.Fprintf(os.Stderr, "\n✓ Database initialized. Found %d issues in git, importing...\n", issueCount)
}
if err := importFromGit(ctx, initDBPath, store, jsonlPath, gitRef); err != nil {
if !quiet {
fmt.Fprintf(os.Stderr, "Warning: auto-import failed: %v\n", err)
fmt.Fprintf(os.Stderr, "Try manually: git show %s:%s | bd import -i /dev/stdin\n", gitRef, jsonlPath)
}
// Non-fatal - continue with empty database
} else if !quiet {
fmt.Fprintf(os.Stderr, "✓ Successfully imported %d issues from git.\n\n", issueCount)
fmt.Fprintf(os.Stderr, "Warning: --from-jsonl specified but %s not found\n", localJSONLPath)
}
} else {
// Default: import from git history
issueCount, jsonlPath, gitRef := checkGitForIssues()
if issueCount > 0 {
if !quiet {
fmt.Fprintf(os.Stderr, "\n✓ Database initialized. Found %d issues in git, importing...\n", issueCount)
}
if err := importFromGit(ctx, initDBPath, store, jsonlPath, gitRef); err != nil {
if !quiet {
fmt.Fprintf(os.Stderr, "Warning: auto-import failed: %v\n", err)
fmt.Fprintf(os.Stderr, "Try manually: git show %s:%s | bd import -i /dev/stdin\n", gitRef, jsonlPath)
}
// Non-fatal - continue with empty database
} else if !quiet {
fmt.Fprintf(os.Stderr, "✓ Successfully imported %d issues from git.\n\n", issueCount)
}
}
}
}
@@ -676,7 +701,6 @@ func migrateOldDatabases(targetPath string, quiet bool) error {
return nil
}
// readFirstIssueFromJSONL reads the first issue from a JSONL file
func readFirstIssueFromJSONL(path string) (*types.Issue, error) {
// #nosec G304 -- helper reads JSONL file chosen by current bd command
@@ -744,7 +768,6 @@ func readFirstIssueFromGit(jsonlPath, gitRef string) (*types.Issue, error) {
return nil, nil
}
// checkExistingBeadsData checks for existing database files
// and returns an error if found (safety guard for bd-emg)
//
@@ -781,7 +804,29 @@ func checkExistingBeadsData(prefix string) error {
return nil // No .beads directory, safe to init
}
// Check for existing database file
// Check for existing database (SQLite or Dolt)
//
// NOTE: For Dolt backend, the "database" is a directory at `.beads/dolt/`.
// We prefer metadata.json as the single source of truth, but we also keep a
// conservative fallback for legacy SQLite setups.
if cfg, err := configfile.Load(beadsDir); err == nil && cfg != nil && cfg.GetBackend() == configfile.BackendDolt {
doltPath := filepath.Join(beadsDir, "dolt")
if info, err := os.Stat(doltPath); err == nil && info.IsDir() {
return fmt.Errorf(`
%s Found existing Dolt database: %s
This workspace is already initialized.
To use the existing database:
Just run bd commands normally (e.g., %s)
To completely reinitialize (data loss warning):
rm -rf .beads && bd init --backend dolt --prefix %s
Aborting.`, ui.RenderWarn("⚠"), doltPath, ui.RenderAccent("bd list"), prefix)
}
}
dbPath := filepath.Join(beadsDir, beads.CanonicalDatabaseName)
if _, err := os.Stat(dbPath); err == nil {
return fmt.Errorf(`

View File

@@ -12,6 +12,7 @@ import (
"github.com/steveyegge/beads/internal/config"
"github.com/steveyegge/beads/internal/rpc"
"github.com/steveyegge/beads/internal/storage/sqlite"
"github.com/steveyegge/beads/internal/types"
"github.com/steveyegge/beads/internal/ui"
"github.com/steveyegge/beads/internal/validation"
@@ -86,13 +87,17 @@ func finalizeExport(ctx context.Context, result *ExportResult) {
}
// Update database mtime to be >= JSONL mtime (fixes #278, #301, #321)
// This prevents validatePreExport from incorrectly blocking on next export
// This prevents validatePreExport from incorrectly blocking on next export.
//
// Dolt backend does not use a SQLite DB file, so this check is SQLite-only.
if result.JSONLPath != "" {
beadsDir := filepath.Dir(result.JSONLPath)
dbPath := filepath.Join(beadsDir, "beads.db")
if err := TouchDatabaseFile(dbPath, result.JSONLPath); err != nil {
// Non-fatal warning
fmt.Fprintf(os.Stderr, "Warning: failed to update database mtime: %v\n", err)
if _, ok := store.(*sqlite.SQLiteStorage); ok {
beadsDir := filepath.Dir(result.JSONLPath)
dbPath := filepath.Join(beadsDir, "beads.db")
if err := TouchDatabaseFile(dbPath, result.JSONLPath); err != nil {
// Non-fatal warning
fmt.Fprintf(os.Stderr, "Warning: failed to update database mtime: %v\n", err)
}
}
}
}