Fix Dolt backend init/daemon/doctor; prevent accidental SQLite artifacts; add integration tests; clean up lint (#1218)

* /{cmd,internal}: get dolt backend init working and allow issue creation

* /{website,internal,docs,cmd}: integration tests and more split backend fixes

* /{cmd,internal}: fix lint issues

* /cmd/bd/doctor/integrity.go: fix unable to query issues bug with dolt backend

* /cmd/bd/daemon.go: remove debug logging
This commit is contained in:
Dustin Brown
2026-01-20 17:34:00 -08:00
committed by GitHub
parent c1ac69da3e
commit d3ccd5cfba
31 changed files with 1071 additions and 305 deletions

View File

@@ -14,6 +14,7 @@ import (
"github.com/spf13/cobra"
"github.com/steveyegge/beads/cmd/bd/doctor"
"github.com/steveyegge/beads/internal/beads"
"github.com/steveyegge/beads/internal/configfile"
"github.com/steveyegge/beads/internal/daemon"
"github.com/steveyegge/beads/internal/rpc"
"github.com/steveyegge/beads/internal/storage/factory"
@@ -298,7 +299,6 @@ func runDaemonLoop(interval time.Duration, autoCommit, autoPush, autoPull, local
stackTrace := string(stackBuf[:stackSize])
log.Error("stack trace", "trace", stackTrace)
// Write crash report to daemon-error file for user visibility
var beadsDir string
if dbPath != "" {
beadsDir = filepath.Dir(dbPath)
@@ -307,13 +307,9 @@ func runDaemonLoop(interval time.Duration, autoCommit, autoPush, autoPull, local
}
if beadsDir != "" {
errFile := filepath.Join(beadsDir, "daemon-error")
crashReport := fmt.Sprintf("Daemon crashed at %s\n\nPanic: %v\n\nStack trace:\n%s\n",
time.Now().Format(time.RFC3339), r, stackTrace)
// nolint:gosec // G306: Error file needs to be readable for debugging
if err := os.WriteFile(errFile, []byte(crashReport), 0644); err != nil {
log.Warn("could not write crash report", "error", err)
}
log.Error("crash report", "report", crashReport)
}
// Clean up PID file
@@ -350,50 +346,63 @@ func runDaemonLoop(interval time.Duration, autoCommit, autoPush, autoPull, local
// Check for multiple .db files (ambiguity error)
beadsDir := filepath.Dir(daemonDBPath)
backend := factory.GetBackendFromConfig(beadsDir)
if backend == "" {
backend = configfile.BackendSQLite
}
// Reset backoff on daemon start (fresh start, but preserve NeedsManualSync hint)
if !localMode {
ResetBackoffOnDaemonStart(beadsDir)
}
matches, err := filepath.Glob(filepath.Join(beadsDir, "*.db"))
if err == nil && len(matches) > 1 {
// Filter out backup files (*.backup-*.db, *.backup.db)
var validDBs []string
for _, match := range matches {
baseName := filepath.Base(match)
// Skip if it's a backup file (contains ".backup" in name)
if !strings.Contains(baseName, ".backup") && baseName != "vc.db" {
validDBs = append(validDBs, match)
}
}
if len(validDBs) > 1 {
errMsg := fmt.Sprintf("Error: Multiple database files found in %s:\n", beadsDir)
for _, db := range validDBs {
errMsg += fmt.Sprintf(" - %s\n", filepath.Base(db))
}
errMsg += fmt.Sprintf("\nBeads requires a single canonical database: %s\n", beads.CanonicalDatabaseName)
errMsg += "Run 'bd init' to migrate legacy databases or manually remove old databases\n"
errMsg += "Or run 'bd doctor' for more diagnostics"
log.log(errMsg)
// Write error to file so user can see it without checking logs
errFile := filepath.Join(beadsDir, "daemon-error")
// nolint:gosec // G306: Error file needs to be readable for debugging
if err := os.WriteFile(errFile, []byte(errMsg), 0644); err != nil {
log.Warn("could not write daemon-error file", "error", err)
// Check for multiple .db files (ambiguity error) - SQLite only.
// Dolt is directory-backed so this check is irrelevant and can be misleading.
if backend == configfile.BackendSQLite {
matches, err := filepath.Glob(filepath.Join(beadsDir, "*.db"))
if err == nil && len(matches) > 1 {
// Filter out backup files (*.backup-*.db, *.backup.db)
var validDBs []string
for _, match := range matches {
baseName := filepath.Base(match)
// Skip if it's a backup file (contains ".backup" in name)
if !strings.Contains(baseName, ".backup") && baseName != "vc.db" {
validDBs = append(validDBs, match)
}
}
if len(validDBs) > 1 {
errMsg := fmt.Sprintf("Error: Multiple database files found in %s:\n", beadsDir)
for _, db := range validDBs {
errMsg += fmt.Sprintf(" - %s\n", filepath.Base(db))
}
errMsg += fmt.Sprintf("\nBeads requires a single canonical database: %s\n", beads.CanonicalDatabaseName)
errMsg += "Run 'bd init' to migrate legacy databases or manually remove old databases\n"
errMsg += "Or run 'bd doctor' for more diagnostics"
return // Use return instead of os.Exit to allow defers to run
log.log(errMsg)
// Write error to file so user can see it without checking logs
errFile := filepath.Join(beadsDir, "daemon-error")
// nolint:gosec // G306: Error file needs to be readable for debugging
if err := os.WriteFile(errFile, []byte(errMsg), 0644); err != nil {
log.Warn("could not write daemon-error file", "error", err)
}
return // Use return instead of os.Exit to allow defers to run
}
}
}
// Validate using canonical name
dbBaseName := filepath.Base(daemonDBPath)
if dbBaseName != beads.CanonicalDatabaseName {
log.Error("non-canonical database name", "name", dbBaseName, "expected", beads.CanonicalDatabaseName)
log.Info("run 'bd init' to migrate to canonical name")
return // Use return instead of os.Exit to allow defers to run
// Validate using canonical name (SQLite only).
// Dolt uses a directory-backed store (typically .beads/dolt), so the "beads.db"
// basename invariant does not apply.
if backend == configfile.BackendSQLite {
dbBaseName := filepath.Base(daemonDBPath)
if dbBaseName != beads.CanonicalDatabaseName {
log.Error("non-canonical database name", "name", dbBaseName, "expected", beads.CanonicalDatabaseName)
log.Info("run 'bd init' to migrate to canonical name")
return // Use return instead of os.Exit to allow defers to run
}
}
log.Info("using database", "path", daemonDBPath)
@@ -621,6 +630,7 @@ func runDaemonLoop(interval time.Duration, autoCommit, autoPush, autoPull, local
// - If either BEADS_AUTO_COMMIT/daemon.auto_commit or BEADS_AUTO_PUSH/daemon.auto_push
// is enabled, treat as auto-sync=true (full read/write)
// - Otherwise check auto-pull for read-only mode
//
// 4. Fallback: all default to true when sync-branch configured
//
// Note: The individual auto-commit/auto-push settings are deprecated.

View File

@@ -366,9 +366,28 @@ func startDaemonProcess(socketPath string) bool {
binPath = os.Args[0]
}
args := []string{"daemon", "start"}
// IMPORTANT: Use --foreground for auto-start.
//
// Rationale:
// - `bd daemon start` (without --foreground) spawns an additional child process
// (`bd daemon --start` with BD_DAEMON_FOREGROUND=1). For Dolt, that extra
// daemonization layer can introduce startup races/lock contention (Dolt's
// LOCK acquisition timeout is 100ms). If the daemon isn't ready quickly,
// the parent falls back to direct mode and may fail to open Dolt because the
// daemon holds the write lock.
// - Here we already daemonize via SysProcAttr + stdio redirection, so a second
// layer is unnecessary.
args := []string{"daemon", "start", "--foreground"}
cmd := execCommandFn(binPath, args...)
// Mark this as a daemon-foreground child so we don't track/kill based on the
// short-lived launcher process PID (see computeDaemonParentPID()).
// Also force the daemon to bind the same socket we're probing for readiness,
// avoiding any mismatch between workspace-derived paths.
cmd.Env = append(os.Environ(),
"BD_DAEMON_FOREGROUND=1",
"BD_SOCKET="+socketPath,
)
setupDaemonIO(cmd)
if dbPath != "" {

View File

@@ -40,8 +40,18 @@ Examples:
logLevel, _ := cmd.Flags().GetString("log-level")
logJSON, _ := cmd.Flags().GetBool("log-json")
// Load auto-commit/push/pull defaults from env vars, config, or sync-branch
autoCommit, autoPush, autoPull = loadDaemonAutoSettings(cmd, autoCommit, autoPush, autoPull)
// NOTE: Only load daemon auto-settings from the database in foreground mode.
//
// In background mode, `bd daemon start` spawns a child process to run the
// daemon loop. Opening the database here in the parent process can briefly
// hold Dolt's LOCK file long enough for the child to time out and fall back
// to read-only mode (100ms lock timeout), which can break startup.
//
// In background mode, auto-settings are loaded in the actual daemon process
// (the BD_DAEMON_FOREGROUND=1 child spawned by startDaemon).
if foreground {
autoCommit, autoPush, autoPull = loadDaemonAutoSettings(cmd, autoCommit, autoPush, autoPull)
}
if interval <= 0 {
fmt.Fprintf(os.Stderr, "Error: interval must be positive (got %v)\n", interval)

View File

@@ -267,9 +267,10 @@ func sanitizeMetadataKey(key string) string {
// This function does NOT provide atomicity between JSONL write, metadata updates, and DB mtime.
// If a crash occurs between these operations, metadata may be inconsistent. However, this is
// acceptable because:
// 1. The worst case is "JSONL content has changed" error on next export
// 2. User can fix by running 'bd import' (safe, no data loss)
// 3. Current approach is simple and doesn't require complex WAL or format changes
// 1. The worst case is "JSONL content has changed" error on next export
// 2. User can fix by running 'bd import' (safe, no data loss)
// 3. Current approach is simple and doesn't require complex WAL or format changes
//
// Future: Consider defensive checks on startup if this becomes a common issue.
func updateExportMetadata(ctx context.Context, store storage.Storage, jsonlPath string, log daemonLogger, keySuffix string) {
// Sanitize keySuffix to handle Windows paths with colons
@@ -456,9 +457,12 @@ func performExport(ctx context.Context, store storage.Storage, autoCommit, autoP
// Update database mtime to be >= JSONL mtime (fixes #278, #301, #321)
// This prevents validatePreExport from incorrectly blocking on next export
// with "JSONL is newer than database" after daemon auto-export
dbPath := filepath.Join(beadsDir, "beads.db")
if err := TouchDatabaseFile(dbPath, jsonlPath); err != nil {
log.log("Warning: failed to update database mtime: %v", err)
// Dolt backend does not have a SQLite DB file; mtime touch is SQLite-only.
if _, ok := store.(*sqlite.SQLiteStorage); ok {
dbPath := filepath.Join(beadsDir, "beads.db")
if err := TouchDatabaseFile(dbPath, jsonlPath); err != nil {
log.log("Warning: failed to update database mtime: %v", err)
}
}
}
@@ -755,8 +759,11 @@ func performSync(ctx context.Context, store storage.Storage, autoCommit, autoPus
// Update database mtime to be >= JSONL mtime
// This prevents validatePreExport from incorrectly blocking on next export
if err := TouchDatabaseFile(dbPath, jsonlPath); err != nil {
log.log("Warning: failed to update database mtime: %v", err)
// Dolt backend does not have a SQLite DB file; mtime touch is SQLite-only.
if _, ok := store.(*sqlite.SQLiteStorage); ok {
if err := TouchDatabaseFile(dbPath, jsonlPath); err != nil {
log.log("Warning: failed to update database mtime: %v", err)
}
}
}
@@ -872,8 +879,11 @@ func performSync(ctx context.Context, store storage.Storage, autoCommit, autoPus
// Update database mtime after import (fixes #278, #301, #321)
// Sync branch import can update JSONL timestamp, so ensure DB >= JSONL
if err := TouchDatabaseFile(dbPath, jsonlPath); err != nil {
log.log("Warning: failed to update database mtime: %v", err)
// Dolt backend does not have a SQLite DB file; mtime touch is SQLite-only.
if _, ok := store.(*sqlite.SQLiteStorage); ok {
if err := TouchDatabaseFile(dbPath, jsonlPath); err != nil {
log.log("Warning: failed to update database mtime: %v", err)
}
}
// Validate import didn't cause data loss

19
cmd/bd/doctor/backend.go Normal file
View File

@@ -0,0 +1,19 @@
package doctor
import (
"path/filepath"
"github.com/steveyegge/beads/internal/configfile"
)
// getBackendAndBeadsDir resolves the effective .beads directory (following redirects)
// and returns the configured storage backend ("sqlite" by default, or "dolt").
func getBackendAndBeadsDir(repoPath string) (backend string, beadsDir string) {
beadsDir = resolveBeadsDir(filepath.Join(repoPath, ".beads"))
cfg, err := configfile.Load(beadsDir)
if err != nil || cfg == nil {
return configfile.BackendSQLite, beadsDir
}
return cfg.GetBackend(), beadsDir
}

View File

@@ -309,8 +309,19 @@ func checkMetadataConfigValues(repoPath string) []string {
if strings.Contains(cfg.Database, string(os.PathSeparator)) || strings.Contains(cfg.Database, "/") {
issues = append(issues, fmt.Sprintf("metadata.json database: %q should be a filename, not a path", cfg.Database))
}
if !strings.HasSuffix(cfg.Database, ".db") && !strings.HasSuffix(cfg.Database, ".sqlite") && !strings.HasSuffix(cfg.Database, ".sqlite3") {
issues = append(issues, fmt.Sprintf("metadata.json database: %q has unusual extension (expected .db, .sqlite, or .sqlite3)", cfg.Database))
backend := cfg.GetBackend()
if backend == configfile.BackendSQLite {
if !strings.HasSuffix(cfg.Database, ".db") && !strings.HasSuffix(cfg.Database, ".sqlite") && !strings.HasSuffix(cfg.Database, ".sqlite3") {
issues = append(issues, fmt.Sprintf("metadata.json database: %q has unusual extension (expected .db, .sqlite, or .sqlite3)", cfg.Database))
}
} else if backend == configfile.BackendDolt {
// Dolt is directory-backed; `database` should point to a directory (typically "dolt").
if strings.HasSuffix(cfg.Database, ".db") || strings.HasSuffix(cfg.Database, ".sqlite") || strings.HasSuffix(cfg.Database, ".sqlite3") {
issues = append(issues, fmt.Sprintf("metadata.json database: %q looks like a SQLite file, but backend is dolt (expected a directory like %q)", cfg.Database, "dolt"))
}
if cfg.Database == beads.CanonicalDatabaseName {
issues = append(issues, fmt.Sprintf("metadata.json database: %q is misleading for dolt backend (expected %q)", cfg.Database, "dolt"))
}
}
}
@@ -345,11 +356,16 @@ func checkDatabaseConfigValues(repoPath string) []string {
return issues // No .beads directory, nothing to check
}
// Get database path
// Get database path (backend-aware)
dbPath := filepath.Join(beadsDir, beads.CanonicalDatabaseName)
// Check metadata.json for custom database name
if cfg, err := configfile.Load(beadsDir); err == nil && cfg != nil && cfg.Database != "" {
dbPath = cfg.DatabasePath(beadsDir)
if cfg, err := configfile.Load(beadsDir); err == nil && cfg != nil {
// For Dolt, cfg.DatabasePath() is a directory and sqlite checks are not applicable.
if cfg.GetBackend() == configfile.BackendDolt {
return issues
}
if cfg.Database != "" {
dbPath = cfg.DatabasePath(beadsDir)
}
}
if _, err := os.Stat(dbPath); os.IsNotExist(err) {

View File

@@ -182,6 +182,22 @@ func TestCheckMetadataConfigValues(t *testing.T) {
}
})
t.Run("valid dolt metadata", func(t *testing.T) {
metadataContent := `{
"database": "dolt",
"jsonl_export": "issues.jsonl",
"backend": "dolt"
}`
if err := os.WriteFile(filepath.Join(beadsDir, "metadata.json"), []byte(metadataContent), 0644); err != nil {
t.Fatalf("failed to write metadata.json: %v", err)
}
issues := checkMetadataConfigValues(tmpDir)
if len(issues) > 0 {
t.Errorf("expected no issues, got: %v", issues)
}
})
// Test with path in database field
t.Run("path in database field", func(t *testing.T) {
metadataContent := `{

View File

@@ -10,7 +10,7 @@ import (
"github.com/steveyegge/beads/internal/daemon"
"github.com/steveyegge/beads/internal/git"
"github.com/steveyegge/beads/internal/rpc"
"github.com/steveyegge/beads/internal/storage/sqlite"
"github.com/steveyegge/beads/internal/storage/factory"
"github.com/steveyegge/beads/internal/syncbranch"
)
@@ -167,7 +167,7 @@ func CheckGitSyncSetup(path string) DoctorCheck {
// CheckDaemonAutoSync checks if daemon has auto-commit/auto-push enabled when
// sync-branch is configured. Missing auto-sync slows down agent workflows.
func CheckDaemonAutoSync(path string) DoctorCheck {
beadsDir := filepath.Join(path, ".beads")
_, beadsDir := getBackendAndBeadsDir(path)
socketPath := filepath.Join(beadsDir, "bd.sock")
// Check if daemon is running
@@ -181,8 +181,7 @@ func CheckDaemonAutoSync(path string) DoctorCheck {
// Check if sync-branch is configured
ctx := context.Background()
dbPath := filepath.Join(beadsDir, "beads.db")
store, err := sqlite.New(ctx, dbPath)
store, err := factory.NewFromConfigWithOptions(ctx, beadsDir, factory.Options{ReadOnly: true})
if err != nil {
return DoctorCheck{
Name: "Daemon Auto-Sync",
@@ -249,11 +248,10 @@ func CheckDaemonAutoSync(path string) DoctorCheck {
// CheckLegacyDaemonConfig checks for deprecated daemon config options and
// encourages migration to the unified daemon.auto-sync setting.
func CheckLegacyDaemonConfig(path string) DoctorCheck {
beadsDir := filepath.Join(path, ".beads")
dbPath := filepath.Join(beadsDir, "beads.db")
_, beadsDir := getBackendAndBeadsDir(path)
ctx := context.Background()
store, err := sqlite.New(ctx, dbPath)
store, err := factory.NewFromConfigWithOptions(ctx, beadsDir, factory.Options{ReadOnly: true})
if err != nil {
return DoctorCheck{
Name: "Daemon Config",

View File

@@ -2,6 +2,7 @@ package doctor
import (
"bufio"
"context"
"database/sql"
"encoding/json"
"fmt"
@@ -16,6 +17,7 @@ import (
"github.com/steveyegge/beads/cmd/bd/doctor/fix"
"github.com/steveyegge/beads/internal/beads"
"github.com/steveyegge/beads/internal/configfile"
storagefactory "github.com/steveyegge/beads/internal/storage/factory"
"gopkg.in/yaml.v3"
)
@@ -27,8 +29,85 @@ type localConfig struct {
// CheckDatabaseVersion checks the database version and migration status
func CheckDatabaseVersion(path string, cliVersion string) DoctorCheck {
// Follow redirect to resolve actual beads directory
beadsDir := resolveBeadsDir(filepath.Join(path, ".beads"))
backend, beadsDir := getBackendAndBeadsDir(path)
// Dolt backend: directory-backed store; version lives in metadata table.
if backend == configfile.BackendDolt {
doltPath := filepath.Join(beadsDir, "dolt")
if _, err := os.Stat(doltPath); os.IsNotExist(err) {
// If JSONL exists, treat as fresh clone / needs init.
issuesJSONL := filepath.Join(beadsDir, "issues.jsonl")
beadsJSONL := filepath.Join(beadsDir, "beads.jsonl")
_, issuesErr := os.Stat(issuesJSONL)
_, beadsErr := os.Stat(beadsJSONL)
if issuesErr == nil || beadsErr == nil {
return DoctorCheck{
Name: "Database",
Status: StatusWarning,
Message: "Fresh clone detected (no dolt database)",
Detail: "Storage: Dolt",
Fix: "Run 'bd init --backend dolt' to create and hydrate the dolt database",
}
}
return DoctorCheck{
Name: "Database",
Status: StatusError,
Message: "No dolt database found",
Detail: "Storage: Dolt",
Fix: "Run 'bd init --backend dolt' to create database",
}
}
ctx := context.Background()
store, err := storagefactory.NewFromConfigWithOptions(ctx, beadsDir, storagefactory.Options{ReadOnly: true})
if err != nil {
return DoctorCheck{
Name: "Database",
Status: StatusError,
Message: "Unable to open database",
Detail: fmt.Sprintf("Storage: Dolt\n\nError: %v", err),
Fix: "Run 'bd init --backend dolt' (or remove and re-init .beads/dolt if corrupted)",
}
}
defer func() { _ = store.Close() }()
dbVersion, err := store.GetMetadata(ctx, "bd_version")
if err != nil {
return DoctorCheck{
Name: "Database",
Status: StatusError,
Message: "Unable to read database version",
Detail: fmt.Sprintf("Storage: Dolt\n\nError: %v", err),
Fix: "Database may be corrupted. Try re-initializing the dolt database with 'bd init --backend dolt'",
}
}
if dbVersion == "" {
return DoctorCheck{
Name: "Database",
Status: StatusWarning,
Message: "Database missing version metadata",
Detail: "Storage: Dolt",
Fix: "Run 'bd migrate' or re-run 'bd init --backend dolt' to set version metadata",
}
}
if dbVersion != cliVersion {
return DoctorCheck{
Name: "Database",
Status: StatusWarning,
Message: fmt.Sprintf("version %s (CLI: %s)", dbVersion, cliVersion),
Detail: "Storage: Dolt",
Fix: "Update bd CLI and re-run (dolt metadata will be updated automatically by the daemon)",
}
}
return DoctorCheck{
Name: "Database",
Status: StatusOK,
Message: fmt.Sprintf("version %s", dbVersion),
Detail: "Storage: Dolt",
}
}
// Check metadata.json first for custom database name
var dbPath string
@@ -137,8 +216,48 @@ func CheckDatabaseVersion(path string, cliVersion string) DoctorCheck {
// CheckSchemaCompatibility checks if all required tables and columns are present
func CheckSchemaCompatibility(path string) DoctorCheck {
// Follow redirect to resolve actual beads directory
beadsDir := resolveBeadsDir(filepath.Join(path, ".beads"))
backend, beadsDir := getBackendAndBeadsDir(path)
// Dolt backend: no SQLite schema probe. Instead, run a lightweight query sanity check.
if backend == configfile.BackendDolt {
if info, err := os.Stat(filepath.Join(beadsDir, "dolt")); err != nil || !info.IsDir() {
return DoctorCheck{
Name: "Schema Compatibility",
Status: StatusOK,
Message: "N/A (no database)",
}
}
ctx := context.Background()
store, err := storagefactory.NewFromConfigWithOptions(ctx, beadsDir, storagefactory.Options{ReadOnly: true})
if err != nil {
return DoctorCheck{
Name: "Schema Compatibility",
Status: StatusError,
Message: "Failed to open database",
Detail: fmt.Sprintf("Storage: Dolt\n\nError: %v", err),
}
}
defer func() { _ = store.Close() }()
// Exercise core tables/views.
if _, err := store.GetStatistics(ctx); err != nil {
return DoctorCheck{
Name: "Schema Compatibility",
Status: StatusError,
Message: "Database schema is incomplete or incompatible",
Detail: fmt.Sprintf("Storage: Dolt\n\nError: %v", err),
Fix: "Re-run 'bd init --backend dolt' or remove and re-initialize .beads/dolt if corrupted",
}
}
return DoctorCheck{
Name: "Schema Compatibility",
Status: StatusOK,
Message: "Basic queries succeeded",
Detail: "Storage: Dolt",
}
}
// Check metadata.json first for custom database name
var dbPath string
@@ -227,8 +346,57 @@ func CheckSchemaCompatibility(path string) DoctorCheck {
// CheckDatabaseIntegrity runs SQLite's PRAGMA integrity_check
func CheckDatabaseIntegrity(path string) DoctorCheck {
// Follow redirect to resolve actual beads directory
beadsDir := resolveBeadsDir(filepath.Join(path, ".beads"))
backend, beadsDir := getBackendAndBeadsDir(path)
// Dolt backend: SQLite PRAGMA integrity_check doesn't apply.
// We do a lightweight read-only sanity check instead.
if backend == configfile.BackendDolt {
if info, err := os.Stat(filepath.Join(beadsDir, "dolt")); err != nil || !info.IsDir() {
return DoctorCheck{
Name: "Database Integrity",
Status: StatusOK,
Message: "N/A (no database)",
}
}
ctx := context.Background()
store, err := storagefactory.NewFromConfigWithOptions(ctx, beadsDir, storagefactory.Options{ReadOnly: true})
if err != nil {
return DoctorCheck{
Name: "Database Integrity",
Status: StatusError,
Message: "Failed to open database",
Detail: fmt.Sprintf("Storage: Dolt\n\nError: %v", err),
Fix: "Re-run 'bd init --backend dolt' or remove and re-initialize .beads/dolt if corrupted",
}
}
defer func() { _ = store.Close() }()
// Minimal checks: metadata + statistics. If these work, the store is at least readable.
if _, err := store.GetMetadata(ctx, "bd_version"); err != nil {
return DoctorCheck{
Name: "Database Integrity",
Status: StatusError,
Message: "Basic query failed",
Detail: fmt.Sprintf("Storage: Dolt\n\nError: %v", err),
}
}
if _, err := store.GetStatistics(ctx); err != nil {
return DoctorCheck{
Name: "Database Integrity",
Status: StatusError,
Message: "Basic query failed",
Detail: fmt.Sprintf("Storage: Dolt\n\nError: %v", err),
}
}
return DoctorCheck{
Name: "Database Integrity",
Status: StatusOK,
Message: "Basic query check passed",
Detail: "Storage: Dolt (no SQLite integrity_check equivalent)",
}
}
// Get database path (same logic as CheckSchemaCompatibility)
var dbPath string
@@ -340,8 +508,46 @@ func CheckDatabaseIntegrity(path string) DoctorCheck {
// CheckDatabaseJSONLSync checks if database and JSONL are in sync
func CheckDatabaseJSONLSync(path string) DoctorCheck {
// Follow redirect to resolve actual beads directory
beadsDir := resolveBeadsDir(filepath.Join(path, ".beads"))
backend, beadsDir := getBackendAndBeadsDir(path)
// Dolt backend: JSONL is a derived compatibility artifact (export-only today).
// The SQLite-style import/export divergence checks don't apply.
if backend == configfile.BackendDolt {
// Find JSONL file (respects metadata.json override when set).
jsonlPath := ""
if cfg, err := configfile.Load(beadsDir); err == nil && cfg != nil {
if cfg.JSONLExport != "" && !isSystemJSONLFilename(cfg.JSONLExport) {
p := cfg.JSONLPath(beadsDir)
if _, err := os.Stat(p); err == nil {
jsonlPath = p
}
}
}
if jsonlPath == "" {
for _, name := range []string{"issues.jsonl", "beads.jsonl"} {
testPath := filepath.Join(beadsDir, name)
if _, err := os.Stat(testPath); err == nil {
jsonlPath = testPath
break
}
}
}
if jsonlPath == "" {
return DoctorCheck{
Name: "DB-JSONL Sync",
Status: StatusOK,
Message: "N/A (no JSONL file)",
}
}
return DoctorCheck{
Name: "DB-JSONL Sync",
Status: StatusOK,
Message: "N/A (dolt backend)",
Detail: "JSONL is derived from Dolt (export-only); import-only sync checks do not apply",
}
}
// Resolve database path (respects metadata.json override).
dbPath := filepath.Join(beadsDir, beads.CanonicalDatabaseName)
@@ -795,8 +1001,7 @@ func isNoDbModeConfigured(beadsDir string) bool {
// irreversible. The user must make an explicit decision to delete their
// closed issue history. We only provide guidance, never action.
func CheckDatabaseSize(path string) DoctorCheck {
// Follow redirect to resolve actual beads directory
beadsDir := resolveBeadsDir(filepath.Join(path, ".beads"))
_, beadsDir := getBackendAndBeadsDir(path)
// Get database path
var dbPath string

View File

@@ -2,6 +2,7 @@ package doctor
import (
"bufio"
"context"
"database/sql"
"fmt"
"os"
@@ -16,23 +17,20 @@ import (
"github.com/steveyegge/beads/internal/beads"
"github.com/steveyegge/beads/internal/configfile"
"github.com/steveyegge/beads/internal/git"
storagefactory "github.com/steveyegge/beads/internal/storage/factory"
)
// CheckIDFormat checks whether issues use hash-based or sequential IDs
func CheckIDFormat(path string) DoctorCheck {
// Follow redirect to resolve actual beads directory (bd-tvus fix)
beadsDir := resolveBeadsDir(filepath.Join(path, ".beads"))
backend, beadsDir := getBackendAndBeadsDir(path)
// Check metadata.json first for custom database name
var dbPath string
if cfg, err := configfile.Load(beadsDir); err == nil && cfg != nil && cfg.Database != "" {
// Determine the on-disk location (file for SQLite, directory for Dolt).
dbPath := filepath.Join(beadsDir, beads.CanonicalDatabaseName)
if cfg, err := configfile.Load(beadsDir); err == nil && cfg != nil {
dbPath = cfg.DatabasePath(beadsDir)
} else {
// Fall back to canonical database name
dbPath = filepath.Join(beadsDir, beads.CanonicalDatabaseName)
}
// Check if using JSONL-only mode
// Check if using JSONL-only mode (or uninitialized DB).
if _, err := os.Stat(dbPath); os.IsNotExist(err) {
// Check if JSONL exists (--no-db mode)
jsonlPath := filepath.Join(beadsDir, "issues.jsonl")
@@ -51,24 +49,29 @@ func CheckIDFormat(path string) DoctorCheck {
}
}
// Open database
db, err := sql.Open("sqlite3", sqliteConnString(dbPath, true))
// Open the configured backend in read-only mode.
// This must work for both SQLite and Dolt.
ctx := context.Background()
store, err := storagefactory.NewFromConfigWithOptions(ctx, beadsDir, storagefactory.Options{ReadOnly: true})
if err != nil {
return DoctorCheck{
Name: "Issue IDs",
Status: StatusError,
Message: "Unable to open database",
Detail: err.Error(),
}
}
defer func() { _ = db.Close() }() // Intentionally ignore close error
defer func() { _ = store.Close() }() // Intentionally ignore close error
db := store.UnderlyingDB()
// Get sample of issues to check ID format (up to 10 for pattern analysis)
rows, err := db.Query("SELECT id FROM issues ORDER BY created_at LIMIT 10")
rows, err := db.QueryContext(ctx, "SELECT id FROM issues ORDER BY created_at LIMIT 10")
if err != nil {
return DoctorCheck{
Name: "Issue IDs",
Status: StatusError,
Message: "Unable to query issues",
Detail: err.Error(),
}
}
defer rows.Close()
@@ -99,6 +102,13 @@ func CheckIDFormat(path string) DoctorCheck {
}
// Sequential IDs - recommend migration
if backend == configfile.BackendDolt {
return DoctorCheck{
Name: "Issue IDs",
Status: StatusOK,
Message: "hash-based ✓",
}
}
return DoctorCheck{
Name: "Issue IDs",
Status: StatusWarning,
@@ -404,9 +414,98 @@ func CheckDeletionsManifest(path string) DoctorCheck {
// This detects when a .beads directory was copied from another repo or when
// the git remote URL changed. A mismatch can cause data loss during sync.
func CheckRepoFingerprint(path string) DoctorCheck {
// Follow redirect to resolve actual beads directory (bd-tvus fix)
beadsDir := resolveBeadsDir(filepath.Join(path, ".beads"))
backend, beadsDir := getBackendAndBeadsDir(path)
// Backend-aware existence check
switch backend {
case configfile.BackendDolt:
if info, err := os.Stat(filepath.Join(beadsDir, "dolt")); err != nil || !info.IsDir() {
return DoctorCheck{
Name: "Repo Fingerprint",
Status: StatusOK,
Message: "N/A (no database)",
}
}
default:
// SQLite backend: needs a .db file
var dbPath string
if cfg, err := configfile.Load(beadsDir); err == nil && cfg != nil && cfg.Database != "" {
dbPath = cfg.DatabasePath(beadsDir)
} else {
dbPath = filepath.Join(beadsDir, beads.CanonicalDatabaseName)
}
if _, err := os.Stat(dbPath); os.IsNotExist(err) {
return DoctorCheck{
Name: "Repo Fingerprint",
Status: StatusOK,
Message: "N/A (no database)",
}
}
}
// For Dolt, read fingerprint from storage metadata (no sqlite assumptions).
if backend == configfile.BackendDolt {
ctx := context.Background()
store, err := storagefactory.NewFromConfigWithOptions(ctx, beadsDir, storagefactory.Options{ReadOnly: true})
if err != nil {
return DoctorCheck{
Name: "Repo Fingerprint",
Status: StatusWarning,
Message: "Unable to open database",
Detail: err.Error(),
}
}
defer func() { _ = store.Close() }()
storedRepoID, err := store.GetMetadata(ctx, "repo_id")
if err != nil {
return DoctorCheck{
Name: "Repo Fingerprint",
Status: StatusWarning,
Message: "Unable to read repo fingerprint",
Detail: err.Error(),
}
}
// If missing, warn (not the legacy sqlite messaging).
if storedRepoID == "" {
return DoctorCheck{
Name: "Repo Fingerprint",
Status: StatusWarning,
Message: "Missing repo fingerprint metadata",
Detail: "Storage: Dolt",
Fix: "Run 'bd migrate --update-repo-id' to add fingerprint metadata",
}
}
currentRepoID, err := beads.ComputeRepoID()
if err != nil {
return DoctorCheck{
Name: "Repo Fingerprint",
Status: StatusWarning,
Message: "Unable to compute current repo ID",
Detail: err.Error(),
}
}
if storedRepoID != currentRepoID {
return DoctorCheck{
Name: "Repo Fingerprint",
Status: StatusError,
Message: "Database belongs to different repository",
Detail: fmt.Sprintf("stored: %s, current: %s", storedRepoID[:8], currentRepoID[:8]),
Fix: "Run 'bd migrate --update-repo-id' if URL changed, or 'rm -rf .beads && bd init --backend dolt' if wrong database",
}
}
return DoctorCheck{
Name: "Repo Fingerprint",
Status: StatusOK,
Message: fmt.Sprintf("Verified (%s)", currentRepoID[:8]),
}
}
// SQLite path (existing behavior)
// Get database path
var dbPath string
if cfg, err := configfile.Load(beadsDir); err == nil && cfg != nil && cfg.Database != "" {

View File

@@ -371,8 +371,7 @@ func CheckDatabaseConfig(repoPath string) DoctorCheck {
// CheckFreshClone detects if this is a fresh clone that needs 'bd init'.
// A fresh clone has JSONL with issues but no database file.
func CheckFreshClone(repoPath string) DoctorCheck {
// Follow redirect to resolve actual beads directory
beadsDir := resolveBeadsDir(filepath.Join(repoPath, ".beads"))
backend, beadsDir := getBackendAndBeadsDir(repoPath)
// Check if .beads/ exists
if _, err := os.Stat(beadsDir); os.IsNotExist(err) {
@@ -404,21 +403,32 @@ func CheckFreshClone(repoPath string) DoctorCheck {
}
}
// Check if database exists
var dbPath string
if cfg, err := configfile.Load(beadsDir); err == nil && cfg != nil && cfg.Database != "" {
dbPath = cfg.DatabasePath(beadsDir)
} else {
// Fall back to canonical database name
dbPath = filepath.Join(beadsDir, beads.CanonicalDatabaseName)
}
// If database exists, not a fresh clone
if _, err := os.Stat(dbPath); err == nil {
return DoctorCheck{
Name: "Fresh Clone",
Status: "ok",
Message: "Database exists",
// Check if database exists (backend-aware)
switch backend {
case configfile.BackendDolt:
// Dolt is directory-backed: treat .beads/dolt as the DB existence signal.
if info, err := os.Stat(filepath.Join(beadsDir, "dolt")); err == nil && info.IsDir() {
return DoctorCheck{
Name: "Fresh Clone",
Status: "ok",
Message: "Database exists",
}
}
default:
// SQLite (default): check configured .db file path.
var dbPath string
if cfg, err := configfile.Load(beadsDir); err == nil && cfg != nil && cfg.Database != "" {
dbPath = cfg.DatabasePath(beadsDir)
} else {
// Fall back to canonical database name
dbPath = filepath.Join(beadsDir, beads.CanonicalDatabaseName)
}
if _, err := os.Stat(dbPath); err == nil {
return DoctorCheck{
Name: "Fresh Clone",
Status: "ok",
Message: "Database exists",
}
}
}
@@ -437,6 +447,12 @@ func CheckFreshClone(repoPath string) DoctorCheck {
if prefix != "" {
fixCmd = fmt.Sprintf("bd init --prefix %s", prefix)
}
if backend == configfile.BackendDolt {
fixCmd = "bd init --backend dolt"
if prefix != "" {
fixCmd = fmt.Sprintf("bd init --backend dolt --prefix %s", prefix)
}
}
return DoctorCheck{
Name: "Fresh Clone",

View File

@@ -54,6 +54,11 @@ func CheckSyncDivergence(path string) DoctorCheck {
}
}
backend := configfile.BackendSQLite
if cfg, err := configfile.Load(beadsDir); err == nil && cfg != nil {
backend = cfg.GetBackend()
}
var issues []SyncDivergenceIssue
// Check 1: JSONL differs from git HEAD
@@ -62,10 +67,13 @@ func CheckSyncDivergence(path string) DoctorCheck {
issues = append(issues, *jsonlIssue)
}
// Check 2: SQLite last_import_time vs JSONL mtime
mtimeIssue := checkSQLiteMtimeDivergence(path, beadsDir)
if mtimeIssue != nil {
issues = append(issues, *mtimeIssue)
// Check 2: SQLite last_import_time vs JSONL mtime (SQLite only).
// Dolt backend does not maintain SQLite metadata and does not support import-only sync.
if backend == configfile.BackendSQLite {
mtimeIssue := checkSQLiteMtimeDivergence(path, beadsDir)
if mtimeIssue != nil {
issues = append(issues, *mtimeIssue)
}
}
// Check 3: Uncommitted .beads/ changes
@@ -75,10 +83,14 @@ func CheckSyncDivergence(path string) DoctorCheck {
}
if len(issues) == 0 {
msg := "JSONL, SQLite, and git are in sync"
if backend == configfile.BackendDolt {
msg = "JSONL, Dolt, and git are in sync"
}
return DoctorCheck{
Name: "Sync Divergence",
Status: StatusOK,
Message: "JSONL, SQLite, and git are in sync",
Message: msg,
Category: CategoryData,
}
}
@@ -256,10 +268,16 @@ func checkUncommittedBeadsChanges(path, beadsDir string) *SyncDivergenceIssue {
}
}
fixCmd := "bd sync"
// For dolt backend, bd sync/import-only workflows don't apply; recommend a plain git commit.
if cfg, err := configfile.Load(beadsDir); err == nil && cfg != nil && cfg.GetBackend() == configfile.BackendDolt {
fixCmd = "git add .beads/ && git commit -m 'sync beads'"
}
return &SyncDivergenceIssue{
Type: "uncommitted_beads",
Description: fmt.Sprintf("Uncommitted .beads/ changes (%d file(s))", fileCount),
FixCommand: "bd sync",
FixCommand: fixCmd,
}
}

View File

@@ -0,0 +1,120 @@
//go:build integration
// +build integration
package main
import (
"encoding/json"
"os"
"os/exec"
"path/filepath"
"runtime"
"strings"
"testing"
"time"
)
func runBDExecAllowErrorWithEnv(t *testing.T, dir string, extraEnv []string, args ...string) (string, error) {
t.Helper()
cmd := exec.Command(testBD, args...)
cmd.Dir = dir
// Start from a clean-ish environment, then apply overrides.
// NOTE: we keep os.Environ() so PATH etc still work for git/dolt.
env := append([]string{}, os.Environ()...)
env = append(env, extraEnv...)
cmd.Env = env
out, err := cmd.CombinedOutput()
return string(out), err
}
func TestDoltDaemonAutostart_NoTimeoutOnCreate(t *testing.T) {
if testing.Short() {
t.Skip("skipping slow integration test in short mode")
}
if runtime.GOOS == windowsOS {
t.Skip("dolt daemon integration test not supported on windows")
}
tmpDir := createTempDirWithCleanup(t)
// Set up a real git repo so daemon autostart is allowed.
if err := runCommandInDir(tmpDir, "git", "init"); err != nil {
t.Fatalf("git init failed: %v", err)
}
_ = runCommandInDir(tmpDir, "git", "config", "user.email", "test@example.com")
_ = runCommandInDir(tmpDir, "git", "config", "user.name", "Test User")
socketPath := filepath.Join(tmpDir, ".beads", "bd.sock")
env := []string{
"BEADS_TEST_MODE=1",
"BEADS_AUTO_START_DAEMON=true",
"BEADS_NO_DAEMON=0",
"BD_SOCKET=" + socketPath,
}
// Init dolt backend.
initOut, initErr := runBDExecAllowErrorWithEnv(t, tmpDir, env, "init", "--backend", "dolt", "--prefix", "test", "--quiet")
if initErr != nil {
// If dolt backend isn't available in this build, skip rather than fail.
// (Some environments may build without dolt support.)
lower := strings.ToLower(initOut)
if strings.Contains(lower, "dolt") && (strings.Contains(lower, "not supported") || strings.Contains(lower, "not available") || strings.Contains(lower, "unknown")) {
t.Skipf("dolt backend not available: %s", initOut)
}
t.Fatalf("bd init --backend dolt failed: %v\n%s", initErr, initOut)
}
// Always stop daemon on cleanup (best effort) so temp dir can be removed.
t.Cleanup(func() {
_, _ = runBDExecAllowErrorWithEnv(t, tmpDir, env, "daemon", "stop")
// Give the daemon a moment to release any locks/files.
time.Sleep(200 * time.Millisecond)
})
// Create should auto-start daemon and should NOT fall back with a timeout warning.
createOut, createErr := runBDExecAllowErrorWithEnv(t, tmpDir, env, "create", "dolt daemon autostart test", "--json")
if createErr != nil {
t.Fatalf("bd create failed: %v\n%s", createErr, createOut)
}
if strings.Contains(createOut, "Daemon took too long to start") || strings.Contains(createOut, "Running in direct mode") {
t.Fatalf("unexpected daemon fallback on dolt create; output:\n%s", createOut)
}
// Verify daemon reports running (via JSON output).
statusOut, statusErr := runBDExecAllowErrorWithEnv(t, tmpDir, env, "daemon", "status", "--json")
if statusErr != nil {
t.Fatalf("bd daemon status failed: %v\n%s", statusErr, statusOut)
}
// We accept either the legacy DaemonStatusResponse shape (daemon_lifecycle.go)
// or the newer DaemonStatusReport shape (daemon_status.go), depending on flags/routes.
// Here we just assert it isn't obviously "not_running".
var m map[string]any
if err := json.Unmarshal([]byte(statusOut), &m); err != nil {
// Sometimes status may print warnings before JSON; try from first '{'.
if idx := strings.Index(statusOut, "{"); idx >= 0 {
if err2 := json.Unmarshal([]byte(statusOut[idx:]), &m); err2 != nil {
t.Fatalf("failed to parse daemon status JSON: %v\n%s", err2, statusOut)
}
} else {
t.Fatalf("failed to parse daemon status JSON: %v\n%s", err, statusOut)
}
}
// Check "running" boolean (legacy) or "status" string (new).
if runningVal, ok := m["running"]; ok {
if b, ok := runningVal.(bool); ok && !b {
t.Fatalf("expected daemon running=true, got: %s", statusOut)
}
} else if statusVal, ok := m["status"]; ok {
if s, ok := statusVal.(string); ok && (s == "not_running" || s == "stale" || s == "unresponsive") {
t.Fatalf("expected daemon to be running/healthy, got status=%q; full: %s", s, statusOut)
}
} else {
// If schema changes again, this will fail loudly and force an update.
t.Fatalf("unexpected daemon status JSON shape (missing running/status): %s", statusOut)
}
}

View File

@@ -0,0 +1,90 @@
//go:build integration
// +build integration
package main
import (
"os"
"path/filepath"
"runtime"
"strings"
"testing"
"time"
)
func TestDoltDoctor_NoSQLiteWarningsAfterInitAndCreate(t *testing.T) {
if testing.Short() {
t.Skip("skipping slow integration test in short mode")
}
if runtime.GOOS == windowsOS {
t.Skip("dolt doctor integration test not supported on windows")
}
tmpDir := createTempDirWithCleanup(t)
// Set up a real git repo so init/create/doctor behave normally.
if err := runCommandInDir(tmpDir, "git", "init"); err != nil {
t.Fatalf("git init failed: %v", err)
}
_ = runCommandInDir(tmpDir, "git", "config", "user.email", "test@example.com")
_ = runCommandInDir(tmpDir, "git", "config", "user.name", "Test User")
socketPath := filepath.Join(tmpDir, ".beads", "bd.sock")
env := []string{
"BEADS_TEST_MODE=1",
"BEADS_AUTO_START_DAEMON=true",
"BEADS_NO_DAEMON=0",
"BD_SOCKET=" + socketPath,
}
// Init dolt backend.
initOut, initErr := runBDExecAllowErrorWithEnv(t, tmpDir, env, "init", "--backend", "dolt", "--prefix", "test", "--quiet")
if initErr != nil {
// If dolt backend isn't available in this build, skip rather than fail.
lower := strings.ToLower(initOut)
if strings.Contains(lower, "dolt") && (strings.Contains(lower, "not supported") || strings.Contains(lower, "not available") || strings.Contains(lower, "unknown")) {
t.Skipf("dolt backend not available: %s", initOut)
}
t.Fatalf("bd init --backend dolt failed: %v\n%s", initErr, initOut)
}
// Ensure daemon cleanup so temp dir removal doesn't flake.
t.Cleanup(func() {
_, _ = runBDExecAllowErrorWithEnv(t, tmpDir, env, "daemon", "stop")
time.Sleep(200 * time.Millisecond)
})
// Create one issue so the store is definitely initialized.
createOut, createErr := runBDExecAllowErrorWithEnv(t, tmpDir, env, "create", "doctor dolt smoke", "--json")
if createErr != nil {
t.Fatalf("bd create failed: %v\n%s", createErr, createOut)
}
// Run doctor; it may return non-zero for unrelated warnings (upstream, claude, etc),
// but it should NOT include SQLite-only failures on dolt.
doctorOut, _ := runBDExecAllowErrorWithEnv(t, tmpDir, env, "doctor")
// Also include stderr-like output if doctor wrote it to stdout in some modes.
// (CombinedOutput already captures both.)
for _, forbidden := range []string{
"No beads.db found",
"Unable to read database version",
"Legacy database",
} {
if strings.Contains(doctorOut, forbidden) {
t.Fatalf("bd doctor printed sqlite-specific warning %q in dolt mode; output:\n%s", forbidden, doctorOut)
}
}
// Sanity check: doctor should mention dolt somewhere so we know we exercised the right path.
if !strings.Contains(strings.ToLower(doctorOut), "dolt") {
// Some doctor output is terse depending on flags; don't be too strict, but
// if it's completely missing, that usually means we didn't use dolt config.
t.Fatalf("bd doctor output did not mention dolt; output:\n%s", doctorOut)
}
// Regression check: dolt init must NOT create a SQLite database file.
if _, err := os.Stat(filepath.Join(tmpDir, ".beads", "beads.db")); err == nil {
t.Fatalf("unexpected sqlite database created in dolt mode: %s", filepath.Join(tmpDir, ".beads", "beads.db"))
}
}

View File

@@ -547,11 +547,14 @@ Examples:
// Only do this when exporting to default JSONL path (not arbitrary outputs)
// This prevents validatePreExport from incorrectly blocking on next export
if output == "" || output == findJSONLPath() {
beadsDir := filepath.Dir(finalPath)
dbPath := filepath.Join(beadsDir, "beads.db")
if err := TouchDatabaseFile(dbPath, finalPath); err != nil {
// Log warning but don't fail export
fmt.Fprintf(os.Stderr, "Warning: failed to update database mtime: %v\n", err)
// Dolt backend does not have a SQLite DB file, so only touch mtime for SQLite.
if _, ok := store.(*sqlite.SQLiteStorage); ok {
beadsDir := filepath.Dir(finalPath)
dbPath := filepath.Join(beadsDir, "beads.db")
if err := TouchDatabaseFile(dbPath, finalPath); err != nil {
// Log warning but don't fail export
fmt.Fprintf(os.Stderr, "Warning: failed to update database mtime: %v\n", err)
}
}
}
}

View File

@@ -429,13 +429,14 @@ func hookPreCommitDolt(beadsDir, worktreeRoot string) int {
fmt.Fprintf(os.Stderr, "Warning: could not open database: %v\n", err)
return 0
}
defer store.Close()
defer func() { _ = store.Close() }()
// Check if store supports versioned operations (required for Dolt)
vs, ok := storage.AsVersioned(store)
if !ok {
// Fall back to full export if not versioned
return doExportAndSaveState(ctx, beadsDir, worktreeRoot, "")
doExportAndSaveState(ctx, beadsDir, worktreeRoot, "")
return 0
}
// Get current Dolt commit hash
@@ -443,7 +444,8 @@ func hookPreCommitDolt(beadsDir, worktreeRoot string) int {
if err != nil {
fmt.Fprintf(os.Stderr, "Warning: could not get Dolt commit: %v\n", err)
// Fall back to full export without commit tracking
return doExportAndSaveState(ctx, beadsDir, worktreeRoot, "")
doExportAndSaveState(ctx, beadsDir, worktreeRoot, "")
return 0
}
// Check if we've already exported for this Dolt commit (idempotency)
@@ -465,17 +467,18 @@ func hookPreCommitDolt(beadsDir, worktreeRoot string) int {
}
}
return doExportAndSaveState(ctx, beadsDir, worktreeRoot, currentDoltCommit)
doExportAndSaveState(ctx, beadsDir, worktreeRoot, currentDoltCommit)
return 0
}
// doExportAndSaveState performs the export and saves state. Shared by main path and fallback.
func doExportAndSaveState(ctx context.Context, beadsDir, worktreeRoot, doltCommit string) int {
func doExportAndSaveState(ctx context.Context, beadsDir, worktreeRoot, doltCommit string) {
jsonlPath := filepath.Join(beadsDir, "issues.jsonl")
// Export to JSONL
if err := runJSONLExport(); err != nil {
fmt.Fprintf(os.Stderr, "Warning: could not export to JSONL: %v\n", err)
return 0
return
}
// Stage JSONL files for git commit
@@ -493,8 +496,6 @@ func doExportAndSaveState(ctx context.Context, beadsDir, worktreeRoot, doltCommi
if err := saveExportState(beadsDir, worktreeRoot, state); err != nil {
fmt.Fprintf(os.Stderr, "Warning: could not save export state: %v\n", err)
}
return 0
}
// hasDoltChanges checks if there are any changes between two Dolt commits.
@@ -622,7 +623,7 @@ func hookPostMergeDolt(beadsDir string) int {
fmt.Fprintf(os.Stderr, "Warning: could not open database: %v\n", err)
return 0
}
defer store.Close()
defer func() { _ = store.Close() }()
// Check if Dolt store supports version control operations
doltStore, ok := store.(interface {
@@ -662,7 +663,7 @@ func hookPostMergeDolt(beadsDir string) int {
// Import JSONL to the import branch
jsonlPath := filepath.Join(beadsDir, "issues.jsonl")
if err := importFromJSONLToStore(ctx, store, jsonlPath); err != nil {
if err := importFromJSONLToStore(store, jsonlPath); err != nil {
fmt.Fprintf(os.Stderr, "Warning: could not import JSONL: %v\n", err)
// Checkout back to original branch
_ = doltStore.Checkout(ctx, currentBranch)
@@ -830,7 +831,9 @@ func hookPostCheckout(args []string) int {
// importFromJSONLToStore imports issues from JSONL to a store.
// This is a placeholder - the actual implementation should use the store's methods.
func importFromJSONLToStore(ctx context.Context, store interface{}, jsonlPath string) error {
func importFromJSONLToStore(store interface{}, jsonlPath string) error {
_ = store
_ = jsonlPath
// Use bd sync --import-only for now
// TODO: Implement direct store import
cmd := exec.Command("bd", "sync", "--import-only", "--no-git-history", "--no-daemon")

View File

@@ -131,17 +131,27 @@ With --stealth: configures per-repository git settings for invisible beads usage
// The hyphen is added automatically during ID generation
prefix = strings.TrimRight(prefix, "-")
// Create database
// Use global dbPath if set via --db flag or BEADS_DB env var, otherwise default to .beads/beads.db
// Determine storage path.
//
// IMPORTANT: In Dolt mode, we must NOT create a SQLite database file.
// `initDBPath` is used for SQLite-specific tasks (migration, import helpers, etc),
// so in Dolt mode it should point to the Dolt directory instead.
//
// Use global dbPath if set via --db flag or BEADS_DB env var (SQLite-only),
// otherwise default to `.beads/beads.db` for SQLite.
initDBPath := dbPath
if initDBPath == "" {
if backend == configfile.BackendDolt {
initDBPath = filepath.Join(".beads", "dolt")
} else if initDBPath == "" {
initDBPath = filepath.Join(".beads", beads.CanonicalDatabaseName)
}
// Migrate old database files if they exist
if err := migrateOldDatabases(initDBPath, quiet); err != nil {
fmt.Fprintf(os.Stderr, "Error during database migration: %v\n", err)
os.Exit(1)
// Migrate old SQLite database files if they exist (SQLite backend only).
if backend == configfile.BackendSQLite {
if err := migrateOldDatabases(initDBPath, quiet); err != nil {
fmt.Fprintf(os.Stderr, "Error during database migration: %v\n", err)
os.Exit(1)
}
}
// Determine if we should create .beads/ directory in CWD or main repo root
@@ -285,9 +295,10 @@ With --stealth: configures per-repository git settings for invisible beads usage
}
}
// Ensure parent directory exists for the database
// Ensure parent directory exists for the storage backend.
// For SQLite: parent of .beads/beads.db. For Dolt: parent of .beads/dolt.
if err := os.MkdirAll(initDBDir, 0750); err != nil {
fmt.Fprintf(os.Stderr, "Error: failed to create database directory %s: %v\n", initDBDir, err)
fmt.Fprintf(os.Stderr, "Error: failed to create storage directory %s: %v\n", initDBDir, err)
os.Exit(1)
}
@@ -389,6 +400,14 @@ With --stealth: configures per-repository git settings for invisible beads usage
if backend != configfile.BackendSQLite {
cfg.Backend = backend
}
// In Dolt mode, metadata.json.database should point to the Dolt directory (not beads.db).
// Backward-compat: older dolt setups left this as "beads.db", which is misleading and
// can trigger SQLite-only code paths.
if backend == configfile.BackendDolt {
if cfg.Database == "" || cfg.Database == beads.CanonicalDatabaseName {
cfg.Database = "dolt"
}
}
if err := cfg.Save(beadsDir); err != nil {
fmt.Fprintf(os.Stderr, "Warning: failed to create metadata.json: %v\n", err)
@@ -429,40 +448,46 @@ With --stealth: configures per-repository git settings for invisible beads usage
}
}
// Check if git has existing issues to import (fresh clone scenario)
// With --from-jsonl: import from local file instead of git history
if fromJSONL {
// Import from current working tree's JSONL file
localJSONLPath := filepath.Join(beadsDir, "issues.jsonl")
if _, err := os.Stat(localJSONLPath); err == nil {
issueCount, err := importFromLocalJSONL(ctx, initDBPath, store, localJSONLPath)
if err != nil {
if !quiet {
fmt.Fprintf(os.Stderr, "Warning: import from local JSONL failed: %v\n", err)
// Import issues on init:
// - SQLite backend: import from git history or local JSONL (existing behavior).
// - Dolt backend: do NOT run SQLite import code. Dolt bootstraps itself from
// `.beads/issues.jsonl` on first open (factory_dolt.go) when present.
if backend == configfile.BackendSQLite {
// Check if git has existing issues to import (fresh clone scenario)
// With --from-jsonl: import from local file instead of git history
if fromJSONL {
// Import from current working tree's JSONL file
localJSONLPath := filepath.Join(beadsDir, "issues.jsonl")
if _, err := os.Stat(localJSONLPath); err == nil {
issueCount, err := importFromLocalJSONL(ctx, initDBPath, store, localJSONLPath)
if err != nil {
if !quiet {
fmt.Fprintf(os.Stderr, "Warning: import from local JSONL failed: %v\n", err)
}
// Non-fatal - continue with empty database
} else if !quiet && issueCount > 0 {
fmt.Fprintf(os.Stderr, "✓ Imported %d issues from local %s\n\n", issueCount, localJSONLPath)
}
// Non-fatal - continue with empty database
} else if !quiet && issueCount > 0 {
fmt.Fprintf(os.Stderr, "✓ Imported %d issues from local %s\n\n", issueCount, localJSONLPath)
}
} else if !quiet {
fmt.Fprintf(os.Stderr, "Warning: --from-jsonl specified but %s not found\n", localJSONLPath)
}
} else {
// Default: import from git history
issueCount, jsonlPath, gitRef := checkGitForIssues()
if issueCount > 0 {
if !quiet {
fmt.Fprintf(os.Stderr, "\n✓ Database initialized. Found %d issues in git, importing...\n", issueCount)
}
if err := importFromGit(ctx, initDBPath, store, jsonlPath, gitRef); err != nil {
if !quiet {
fmt.Fprintf(os.Stderr, "Warning: auto-import failed: %v\n", err)
fmt.Fprintf(os.Stderr, "Try manually: git show %s:%s | bd import -i /dev/stdin\n", gitRef, jsonlPath)
}
// Non-fatal - continue with empty database
} else if !quiet {
fmt.Fprintf(os.Stderr, "✓ Successfully imported %d issues from git.\n\n", issueCount)
fmt.Fprintf(os.Stderr, "Warning: --from-jsonl specified but %s not found\n", localJSONLPath)
}
} else {
// Default: import from git history
issueCount, jsonlPath, gitRef := checkGitForIssues()
if issueCount > 0 {
if !quiet {
fmt.Fprintf(os.Stderr, "\n✓ Database initialized. Found %d issues in git, importing...\n", issueCount)
}
if err := importFromGit(ctx, initDBPath, store, jsonlPath, gitRef); err != nil {
if !quiet {
fmt.Fprintf(os.Stderr, "Warning: auto-import failed: %v\n", err)
fmt.Fprintf(os.Stderr, "Try manually: git show %s:%s | bd import -i /dev/stdin\n", gitRef, jsonlPath)
}
// Non-fatal - continue with empty database
} else if !quiet {
fmt.Fprintf(os.Stderr, "✓ Successfully imported %d issues from git.\n\n", issueCount)
}
}
}
}
@@ -676,7 +701,6 @@ func migrateOldDatabases(targetPath string, quiet bool) error {
return nil
}
// readFirstIssueFromJSONL reads the first issue from a JSONL file
func readFirstIssueFromJSONL(path string) (*types.Issue, error) {
// #nosec G304 -- helper reads JSONL file chosen by current bd command
@@ -744,7 +768,6 @@ func readFirstIssueFromGit(jsonlPath, gitRef string) (*types.Issue, error) {
return nil, nil
}
// checkExistingBeadsData checks for existing database files
// and returns an error if found (safety guard for bd-emg)
//
@@ -781,7 +804,29 @@ func checkExistingBeadsData(prefix string) error {
return nil // No .beads directory, safe to init
}
// Check for existing database file
// Check for existing database (SQLite or Dolt)
//
// NOTE: For Dolt backend, the "database" is a directory at `.beads/dolt/`.
// We prefer metadata.json as the single source of truth, but we also keep a
// conservative fallback for legacy SQLite setups.
if cfg, err := configfile.Load(beadsDir); err == nil && cfg != nil && cfg.GetBackend() == configfile.BackendDolt {
doltPath := filepath.Join(beadsDir, "dolt")
if info, err := os.Stat(doltPath); err == nil && info.IsDir() {
return fmt.Errorf(`
%s Found existing Dolt database: %s
This workspace is already initialized.
To use the existing database:
Just run bd commands normally (e.g., %s)
To completely reinitialize (data loss warning):
rm -rf .beads && bd init --backend dolt --prefix %s
Aborting.`, ui.RenderWarn("⚠"), doltPath, ui.RenderAccent("bd list"), prefix)
}
}
dbPath := filepath.Join(beadsDir, beads.CanonicalDatabaseName)
if _, err := os.Stat(dbPath); err == nil {
return fmt.Errorf(`

View File

@@ -924,6 +924,7 @@ type SyncConflictRecord struct {
// LoadSyncConflictState loads the sync conflict state from disk.
func LoadSyncConflictState(beadsDir string) (*SyncConflictState, error) {
path := filepath.Join(beadsDir, "sync_conflicts.json")
// #nosec G304 -- path is derived from the workspace .beads directory
data, err := os.ReadFile(path)
if err != nil {
if os.IsNotExist(err) {
@@ -1023,7 +1024,7 @@ func resolveSyncConflicts(ctx context.Context, jsonlPath string, strategy config
// Handle manual strategy with interactive resolution
if strategy == config.ConflictStrategyManual {
return resolveSyncConflictsManually(ctx, jsonlPath, beadsDir, conflictState, baseMap, localMap, remoteMap, baseIssues, localIssues, remoteIssues)
return resolveSyncConflictsManually(ctx, jsonlPath, beadsDir, conflictState, baseMap, localMap, remoteMap)
}
resolved := 0
@@ -1101,8 +1102,7 @@ func resolveSyncConflicts(ctx context.Context, jsonlPath string, strategy config
// resolveSyncConflictsManually handles manual conflict resolution with interactive prompts.
func resolveSyncConflictsManually(ctx context.Context, jsonlPath, beadsDir string, conflictState *SyncConflictState,
baseMap, localMap, remoteMap map[string]*beads.Issue,
baseIssues, localIssues, remoteIssues []*beads.Issue) error {
baseMap, localMap, remoteMap map[string]*beads.Issue) error {
// Build interactive conflicts list
var interactiveConflicts []InteractiveConflict

View File

@@ -12,6 +12,7 @@ import (
"github.com/steveyegge/beads/internal/config"
"github.com/steveyegge/beads/internal/rpc"
"github.com/steveyegge/beads/internal/storage/sqlite"
"github.com/steveyegge/beads/internal/types"
"github.com/steveyegge/beads/internal/ui"
"github.com/steveyegge/beads/internal/validation"
@@ -86,13 +87,17 @@ func finalizeExport(ctx context.Context, result *ExportResult) {
}
// Update database mtime to be >= JSONL mtime (fixes #278, #301, #321)
// This prevents validatePreExport from incorrectly blocking on next export
// This prevents validatePreExport from incorrectly blocking on next export.
//
// Dolt backend does not use a SQLite DB file, so this check is SQLite-only.
if result.JSONLPath != "" {
beadsDir := filepath.Dir(result.JSONLPath)
dbPath := filepath.Join(beadsDir, "beads.db")
if err := TouchDatabaseFile(dbPath, result.JSONLPath); err != nil {
// Non-fatal warning
fmt.Fprintf(os.Stderr, "Warning: failed to update database mtime: %v\n", err)
if _, ok := store.(*sqlite.SQLiteStorage); ok {
beadsDir := filepath.Dir(result.JSONLPath)
dbPath := filepath.Join(beadsDir, "beads.db")
if err := TouchDatabaseFile(dbPath, result.JSONLPath); err != nil {
// Non-fatal warning
fmt.Fprintf(os.Stderr, "Warning: failed to update database mtime: %v\n", err)
}
}
}
}

View File

@@ -160,15 +160,15 @@ func displayConflictDiff(conflict InteractiveConflict) {
// Description (show truncated if different)
if local.Description != remote.Description {
fmt.Printf(" %s\n", ui.RenderAccent("description:"))
fmt.Printf(" %s %s\n", ui.RenderMuted("local:"), truncateText(local.Description, 60))
fmt.Printf(" %s %s\n", ui.RenderAccent("remote:"), truncateText(remote.Description, 60))
fmt.Printf(" %s %s\n", ui.RenderMuted("local:"), truncateText(local.Description))
fmt.Printf(" %s %s\n", ui.RenderAccent("remote:"), truncateText(remote.Description))
}
// Notes (show truncated if different)
if local.Notes != remote.Notes {
fmt.Printf(" %s\n", ui.RenderAccent("notes:"))
fmt.Printf(" %s %s\n", ui.RenderMuted("local:"), truncateText(local.Notes, 60))
fmt.Printf(" %s %s\n", ui.RenderAccent("remote:"), truncateText(remote.Notes, 60))
fmt.Printf(" %s %s\n", ui.RenderMuted("local:"), truncateText(local.Notes))
fmt.Printf(" %s %s\n", ui.RenderAccent("remote:"), truncateText(remote.Notes))
}
// Labels
@@ -371,9 +371,11 @@ func valueOrNone(s string) string {
return s
}
// truncateText truncates a string to maxLen runes (not bytes) for proper UTF-8 handling.
const truncateTextMaxLen = 60
// truncateText truncates a string to a fixed max length (runes, not bytes) for proper UTF-8 handling.
// Replaces newlines with spaces for single-line display.
func truncateText(s string, maxLen int) string {
func truncateText(s string) string {
if s == "" {
return "(empty)"
}
@@ -383,14 +385,14 @@ func truncateText(s string, maxLen int) string {
// Count runes, not bytes, for proper UTF-8 handling
runeCount := utf8.RuneCountInString(s)
if runeCount <= maxLen {
if runeCount <= truncateTextMaxLen {
return s
}
// Truncate by runes
runes := []rune(s)
if maxLen <= 3 {
if truncateTextMaxLen <= 3 {
return "..."
}
return string(runes[:maxLen-3]) + "..."
return string(runes[:truncateTextMaxLen-3]) + "..."
}

View File

@@ -10,78 +10,37 @@ import (
func TestTruncateText(t *testing.T) {
tests := []struct {
name string
input string
maxLen int
want string
name string
input string
want string
}{
{
name: "empty string",
input: "",
maxLen: 10,
want: "(empty)",
name: "empty string",
input: "",
want: "(empty)",
},
{
name: "short string",
input: "hello",
maxLen: 10,
want: "hello",
name: "short string",
input: "hello",
want: "hello",
},
{
name: "exact length",
input: "0123456789",
maxLen: 10,
want: "0123456789",
name: "newlines replaced",
input: "line1\nline2\r\nline3",
want: "line1 line2 line3",
},
{
name: "truncated",
input: "this is a very long string",
maxLen: 15,
want: "this is a ve...",
},
{
name: "newlines replaced",
input: "line1\nline2\nline3",
maxLen: 30,
want: "line1 line2 line3",
},
{
name: "very short max",
input: "hello world",
maxLen: 3,
want: "...",
},
{
name: "UTF-8 characters preserved",
input: "Hello 世界This is a test",
maxLen: 12,
want: "Hello 世界!...",
},
{
name: "UTF-8 exact length",
input: "日本語テスト",
maxLen: 6,
want: "日本語テスト",
},
{
name: "UTF-8 truncate",
input: "日本語テストです",
maxLen: 6,
want: "日本語...",
},
{
name: "emoji handling",
input: "Hello 🌍🌎🌏 World",
maxLen: 12,
want: "Hello 🌍🌎🌏...",
name: "truncated at fixed max",
input: strings.Repeat("a", truncateTextMaxLen+10),
want: strings.Repeat("a", truncateTextMaxLen-3) + "...",
},
}
for _, tt := range tests {
t.Run(tt.name, func(t *testing.T) {
got := truncateText(tt.input, tt.maxLen)
got := truncateText(tt.input)
if got != tt.want {
t.Errorf("truncateText(%q, %d) = %q, want %q", tt.input, tt.maxLen, got, tt.want)
t.Errorf("truncateText(%q) = %q, want %q", tt.input, got, tt.want)
}
})
}

View File

@@ -18,6 +18,9 @@ First time in a repository:
# Basic setup
bd init
# Dolt backend (version-controlled SQL database)
bd init --backend dolt
# OSS contributor (fork workflow with separate planning repo)
bd init --contributor
@@ -35,6 +38,10 @@ The wizard will:
- Prompt to configure git merge driver (recommended)
- Auto-start daemon for sync
Notes:
- SQLite backend stores data in `.beads/beads.db`.
- Dolt backend stores data in `.beads/dolt/` and records `"database": "dolt"` in `.beads/metadata.json`.
## Your First Issues
```bash

View File

@@ -216,8 +216,9 @@ func findLocalBeadsDir() string {
// findDatabaseInBeadsDir searches for a database file within a .beads directory.
// It implements the standard search order:
// 1. Check metadata.json first (single source of truth)
// - For SQLite backend: returns path to .db file
// - For Dolt backend: returns path to dolt/ directory
// - For SQLite backend: returns path to .db file
// - For Dolt backend: returns path to dolt/ directory
//
// 2. Fall back to canonical beads.db
// 3. Search for *.db files, filtering out backups and vc.db
//
@@ -231,8 +232,8 @@ func findDatabaseInBeadsDir(beadsDir string, warnOnIssues bool) string {
if cfg, err := configfile.Load(beadsDir); err == nil && cfg != nil {
backend := cfg.GetBackend()
if backend == configfile.BackendDolt {
// For Dolt, check if the dolt directory exists
doltPath := filepath.Join(beadsDir, "dolt")
// For Dolt, check if the configured database directory exists
doltPath := cfg.DatabasePath(beadsDir)
if info, err := os.Stat(doltPath); err == nil && info.IsDir() {
return doltPath
}
@@ -575,9 +576,9 @@ func FindJSONLPath(dbPath string) string {
// DatabaseInfo contains information about a discovered beads database
type DatabaseInfo struct {
Path string // Full path to the .db file
BeadsDir string // Parent .beads directory
IssueCount int // Number of issues (-1 if unknown)
Path string // Full path to the .db file
BeadsDir string // Parent .beads directory
IssueCount int // Number of issues (-1 if unknown)
}
// findGitRoot returns the root directory of the current git repository,

View File

@@ -21,7 +21,7 @@ var ConfigWarningWriter io.Writer = os.Stderr
// logConfigWarning logs a warning message if ConfigWarnings is enabled.
func logConfigWarning(format string, args ...interface{}) {
if ConfigWarnings && ConfigWarningWriter != nil {
fmt.Fprintf(ConfigWarningWriter, format, args...)
_, _ = fmt.Fprintf(ConfigWarningWriter, format, args...)
}
}

View File

@@ -5,6 +5,7 @@ import (
"fmt"
"os"
"path/filepath"
"strings"
)
const ConfigFileName = "metadata.json"
@@ -37,7 +38,7 @@ func ConfigPath(beadsDir string) string {
func Load(beadsDir string) (*Config, error) {
configPath := ConfigPath(beadsDir)
data, err := os.ReadFile(configPath) // #nosec G304 - controlled path from config
if os.IsNotExist(err) {
// Try legacy config.json location (migration path)
@@ -49,52 +50,79 @@ func Load(beadsDir string) (*Config, error) {
if err != nil {
return nil, fmt.Errorf("reading legacy config: %w", err)
}
// Migrate: parse legacy config, save as metadata.json, remove old file
var cfg Config
if err := json.Unmarshal(data, &cfg); err != nil {
return nil, fmt.Errorf("parsing legacy config: %w", err)
}
// Save to new location
if err := cfg.Save(beadsDir); err != nil {
return nil, fmt.Errorf("migrating config to metadata.json: %w", err)
}
// Remove legacy file (best effort)
_ = os.Remove(legacyPath)
return &cfg, nil
}
if err != nil {
return nil, fmt.Errorf("reading config: %w", err)
}
var cfg Config
if err := json.Unmarshal(data, &cfg); err != nil {
return nil, fmt.Errorf("parsing config: %w", err)
}
return &cfg, nil
}
func (c *Config) Save(beadsDir string) error {
configPath := ConfigPath(beadsDir)
data, err := json.MarshalIndent(c, "", " ")
if err != nil {
return fmt.Errorf("marshaling config: %w", err)
}
if err := os.WriteFile(configPath, data, 0600); err != nil {
return fmt.Errorf("writing config: %w", err)
}
return nil
}
func (c *Config) DatabasePath(beadsDir string) string {
return filepath.Join(beadsDir, c.Database)
backend := c.GetBackend()
// Treat Database as the on-disk storage location:
// - SQLite: filename (default: beads.db)
// - Dolt: directory name (default: dolt)
//
// Backward-compat: early dolt configs wrote "beads.db" even when Backend=dolt.
// In that case, treat it as "dolt".
if backend == BackendDolt {
db := strings.TrimSpace(c.Database)
if db == "" || db == "beads.db" {
db = "dolt"
}
if filepath.IsAbs(db) {
return db
}
return filepath.Join(beadsDir, db)
}
// SQLite (default)
db := strings.TrimSpace(c.Database)
if db == "" {
db = "beads.db"
}
if filepath.IsAbs(db) {
return db
}
return filepath.Join(beadsDir, db)
}
func (c *Config) JSONLPath(beadsDir string) string {

View File

@@ -8,7 +8,7 @@ import (
func TestDefaultConfig(t *testing.T) {
cfg := DefaultConfig()
if cfg.Database != "beads.db" {
t.Errorf("Database = %q, want beads.db", cfg.Database)
}
@@ -25,26 +25,26 @@ func TestLoadSaveRoundtrip(t *testing.T) {
if err := os.MkdirAll(beadsDir, 0750); err != nil {
t.Fatalf("failed to create .beads directory: %v", err)
}
cfg := DefaultConfig()
if err := cfg.Save(beadsDir); err != nil {
t.Fatalf("Save() failed: %v", err)
}
loaded, err := Load(beadsDir)
if err != nil {
t.Fatalf("Load() failed: %v", err)
}
if loaded == nil {
t.Fatal("Load() returned nil config")
}
if loaded.Database != cfg.Database {
t.Errorf("Database = %q, want %q", loaded.Database, cfg.Database)
}
if loaded.JSONLExport != cfg.JSONLExport {
t.Errorf("JSONLExport = %q, want %q", loaded.JSONLExport, cfg.JSONLExport)
}
@@ -52,12 +52,12 @@ func TestLoadSaveRoundtrip(t *testing.T) {
func TestLoadNonexistent(t *testing.T) {
tmpDir := t.TempDir()
cfg, err := Load(tmpDir)
if err != nil {
t.Fatalf("Load() returned error for nonexistent config: %v", err)
}
if cfg != nil {
t.Errorf("Load() = %v, want nil for nonexistent config", cfg)
}
@@ -66,22 +66,44 @@ func TestLoadNonexistent(t *testing.T) {
func TestDatabasePath(t *testing.T) {
beadsDir := "/home/user/project/.beads"
cfg := &Config{Database: "beads.db"}
got := cfg.DatabasePath(beadsDir)
want := filepath.Join(beadsDir, "beads.db")
if got != want {
t.Errorf("DatabasePath() = %q, want %q", got, want)
}
}
func TestDatabasePath_Dolt(t *testing.T) {
beadsDir := "/home/user/project/.beads"
t.Run("explicit dolt dir", func(t *testing.T) {
cfg := &Config{Database: "dolt", Backend: BackendDolt}
got := cfg.DatabasePath(beadsDir)
want := filepath.Join(beadsDir, "dolt")
if got != want {
t.Errorf("DatabasePath() = %q, want %q", got, want)
}
})
t.Run("backward compat: dolt backend with beads.db field", func(t *testing.T) {
cfg := &Config{Database: "beads.db", Backend: BackendDolt}
got := cfg.DatabasePath(beadsDir)
want := filepath.Join(beadsDir, "dolt")
if got != want {
t.Errorf("DatabasePath() = %q, want %q", got, want)
}
})
}
func TestJSONLPath(t *testing.T) {
beadsDir := "/home/user/project/.beads"
tests := []struct {
name string
cfg *Config
want string
name string
cfg *Config
want string
}{
{
name: "default",
@@ -99,7 +121,7 @@ func TestJSONLPath(t *testing.T) {
want: filepath.Join(beadsDir, "issues.jsonl"),
},
}
for _, tt := range tests {
t.Run(tt.name, func(t *testing.T) {
got := tt.cfg.JSONLPath(beadsDir)
@@ -122,9 +144,9 @@ func TestConfigPath(t *testing.T) {
func TestGetDeletionsRetentionDays(t *testing.T) {
tests := []struct {
name string
cfg *Config
want int
name string
cfg *Config
want int
}{
{
name: "zero uses default",

View File

@@ -122,11 +122,33 @@ func GetGitCommonDir() (string, error) {
// and live in the common git directory (e.g., /repo/.git/hooks), not in
// the worktree-specific directory (e.g., /repo/.git/worktrees/feature/hooks).
func GetGitHooksDir() (string, error) {
commonDir, err := GetGitCommonDir()
ctx, err := getGitContext()
if err != nil {
return "", err
}
return filepath.Join(commonDir, "hooks"), nil
// Respect core.hooksPath if configured.
// This is used by beads' Dolt backend (hooks installed to .beads/hooks/).
cmd := exec.Command("git", "config", "--get", "core.hooksPath")
cmd.Dir = ctx.repoRoot
if out, err := cmd.Output(); err == nil {
hooksPath := strings.TrimSpace(string(out))
if hooksPath != "" {
if filepath.IsAbs(hooksPath) {
return hooksPath, nil
}
// Git treats relative core.hooksPath as relative to the repo root in common usage.
// (e.g., ".beads/hooks", ".githooks").
p := filepath.Join(ctx.repoRoot, hooksPath)
if abs, err := filepath.Abs(p); err == nil {
return abs, nil
}
return p, nil
}
}
// Default: hooks are stored in the common git directory.
return filepath.Join(ctx.commonDir, "hooks"), nil
}
// GetGitRefsDir returns the path to the Git refs directory.

View File

@@ -140,7 +140,7 @@ func findJSONLPath(beadsDir string) string {
func acquireBootstrapLock(lockPath string, timeout time.Duration) (*os.File, error) {
// Create lock file
// #nosec G304 - controlled path
f, err := os.OpenFile(lockPath, os.O_CREATE|os.O_RDWR, 0644)
f, err := os.OpenFile(lockPath, os.O_CREATE|os.O_RDWR, 0600)
if err != nil {
return nil, fmt.Errorf("failed to create lock file: %w", err)
}

View File

@@ -439,25 +439,23 @@ func (s *DoltStore) GetStaleIssues(ctx context.Context, filter types.StaleFilter
func (s *DoltStore) GetStatistics(ctx context.Context) (*types.Statistics, error) {
stats := &types.Statistics{}
// Count by status
// Get counts (mirror SQLite semantics: exclude tombstones from TotalIssues, report separately).
// Important: COALESCE to avoid NULL scans when the table is empty.
err := s.db.QueryRowContext(ctx, `
SELECT
COUNT(*) as total,
SUM(CASE WHEN status = 'open' THEN 1 ELSE 0 END) as open_count,
SUM(CASE WHEN status = 'in_progress' THEN 1 ELSE 0 END) as in_progress,
SUM(CASE WHEN status = 'closed' THEN 1 ELSE 0 END) as closed,
SUM(CASE WHEN status = 'blocked' THEN 1 ELSE 0 END) as blocked,
SUM(CASE WHEN status = 'deferred' THEN 1 ELSE 0 END) as deferred,
SUM(CASE WHEN status = 'tombstone' THEN 1 ELSE 0 END) as tombstone,
SUM(CASE WHEN pinned = 1 THEN 1 ELSE 0 END) as pinned
COALESCE(SUM(CASE WHEN status != 'tombstone' THEN 1 ELSE 0 END), 0) as total,
COALESCE(SUM(CASE WHEN status = 'open' THEN 1 ELSE 0 END), 0) as open_count,
COALESCE(SUM(CASE WHEN status = 'in_progress' THEN 1 ELSE 0 END), 0) as in_progress,
COALESCE(SUM(CASE WHEN status = 'closed' THEN 1 ELSE 0 END), 0) as closed,
COALESCE(SUM(CASE WHEN status = 'deferred' THEN 1 ELSE 0 END), 0) as deferred,
COALESCE(SUM(CASE WHEN status = 'tombstone' THEN 1 ELSE 0 END), 0) as tombstone,
COALESCE(SUM(CASE WHEN pinned = 1 THEN 1 ELSE 0 END), 0) as pinned
FROM issues
WHERE status != 'tombstone'
`).Scan(
&stats.TotalIssues,
&stats.OpenIssues,
&stats.InProgressIssues,
&stats.ClosedIssues,
&stats.BlockedIssues,
&stats.DeferredIssues,
&stats.TombstoneIssues,
&stats.PinnedIssues,
@@ -466,6 +464,27 @@ func (s *DoltStore) GetStatistics(ctx context.Context) (*types.Statistics, error
return nil, fmt.Errorf("failed to get statistics: %w", err)
}
// Blocked count (same semantics as SQLite: blocked by open deps).
err = s.db.QueryRowContext(ctx, `
SELECT COUNT(DISTINCT i.id)
FROM issues i
JOIN dependencies d ON i.id = d.issue_id
JOIN issues blocker ON d.depends_on_id = blocker.id
WHERE i.status IN ('open', 'in_progress', 'blocked', 'deferred', 'hooked')
AND d.type = 'blocks'
AND blocker.status IN ('open', 'in_progress', 'blocked', 'deferred', 'hooked')
`).Scan(&stats.BlockedIssues)
if err != nil {
return nil, fmt.Errorf("failed to get blocked count: %w", err)
}
// Ready count (use the ready_issues view).
// Note: view already excludes ephemeral issues and blocked transitive deps.
err = s.db.QueryRowContext(ctx, `SELECT COUNT(*) FROM ready_issues`).Scan(&stats.ReadyIssues)
if err != nil {
return nil, fmt.Errorf("failed to get ready count: %w", err)
}
return stats, nil
}

View File

@@ -4,7 +4,6 @@ package factory
import (
"context"
"fmt"
"path/filepath"
"time"
"github.com/steveyegge/beads/internal/configfile"
@@ -84,9 +83,7 @@ func NewFromConfigWithOptions(ctx context.Context, beadsDir string, opts Options
case configfile.BackendSQLite:
return NewWithOptions(ctx, backend, cfg.DatabasePath(beadsDir), opts)
case configfile.BackendDolt:
// For Dolt, use a subdirectory to store the Dolt database
doltPath := filepath.Join(beadsDir, "dolt")
return NewWithOptions(ctx, backend, doltPath, opts)
return NewWithOptions(ctx, backend, cfg.DatabasePath(beadsDir), opts)
default:
return nil, fmt.Errorf("unknown storage backend in config: %s", backend)
}

View File

@@ -16,6 +16,9 @@ First time in a repository:
# Basic setup
bd init
# Dolt backend (version-controlled SQL database)
bd init --backend dolt
# For AI agents (non-interactive)
bd init --quiet
@@ -36,6 +39,10 @@ The wizard will:
- Prompt to configure git merge driver (recommended)
- Auto-start daemon for sync
Notes:
- SQLite backend stores data in `.beads/beads.db`.
- Dolt backend stores data in `.beads/dolt/` and records `"database": "dolt"` in `.beads/metadata.json`.
## Your First Issues
```bash