doctor: add fs fault injection and lock contention coverage

Co-authored-by: factory-droid[bot] <138933559+factory-droid[bot]@users.noreply.github.com>
This commit is contained in:
Jordan Hubbard
2025-12-26 09:22:45 -04:00
parent 8166207eb4
commit 7af3106610
15 changed files with 357 additions and 68 deletions

View File

@@ -357,7 +357,7 @@ func checkDatabaseConfigValues(repoPath string) []string {
}
// Open database in read-only mode
db, err := sql.Open("sqlite3", "file:"+dbPath+"?mode=ro")
db, err := sql.Open("sqlite3", sqliteConnString(dbPath, true))
if err != nil {
return issues // Can't open database, skip
}

View File

@@ -155,9 +155,9 @@ func CheckSchemaCompatibility(path string) DoctorCheck {
}
}
// Open database (bd-ckvw: This will run migrations and schema probe)
// Open database (bd-ckvw: schema probe)
// Note: We can't use the global 'store' because doctor can check arbitrary paths
db, err := sql.Open("sqlite3", "file:"+dbPath+"?_pragma=foreign_keys(ON)&_pragma=busy_timeout(30000)")
db, err := sql.Open("sqlite3", sqliteConnString(dbPath, true))
if err != nil {
return DoctorCheck{
Name: "Schema Compatibility",
@@ -244,7 +244,7 @@ func CheckDatabaseIntegrity(path string) DoctorCheck {
}
// Open database in read-only mode for integrity check
db, err := sql.Open("sqlite3", "file:"+dbPath+"?mode=ro&_pragma=busy_timeout(30000)")
db, err := sql.Open("sqlite3", sqliteConnString(dbPath, true))
if err != nil {
return DoctorCheck{
Name: "Database Integrity",
@@ -350,7 +350,7 @@ func CheckDatabaseJSONLSync(path string) DoctorCheck {
jsonlCount, jsonlPrefixes, jsonlErr := CountJSONLIssues(jsonlPath)
// Single database open for all queries (instead of 3 separate opens)
db, err := sql.Open("sqlite3", dbPath)
db, err := sql.Open("sqlite3", sqliteConnString(dbPath, true))
if err != nil {
// Database can't be opened. If JSONL has issues, suggest recovery.
if jsonlErr == nil && jsonlCount > 0 {
@@ -523,7 +523,7 @@ func FixDBJSONLSync(path string) error {
// getDatabaseVersionFromPath reads the database version from the given path
func getDatabaseVersionFromPath(dbPath string) string {
db, err := sql.Open("sqlite3", "file:"+dbPath+"?mode=ro")
db, err := sql.Open("sqlite3", sqliteConnString(dbPath, true))
if err != nil {
return "unknown"
}

View File

@@ -65,10 +65,10 @@ func DatabaseIntegrity(path string) error {
// Back up corrupt DB and its sidecar files.
ts := time.Now().UTC().Format("20060102T150405Z")
backupDB := dbPath + "." + ts + ".corrupt.backup.db"
if err := os.Rename(dbPath, backupDB); err != nil {
if err := moveFile(dbPath, backupDB); err != nil {
// Retry once after attempting to kill daemons again (helps on platforms with strict file locks).
_ = Daemon(absPath)
if err2 := os.Rename(dbPath, backupDB); err2 != nil {
if err2 := moveFile(dbPath, backupDB); err2 != nil {
// Prefer the original error (more likely root cause).
return fmt.Errorf("failed to back up database: %w", err)
}
@@ -76,7 +76,7 @@ func DatabaseIntegrity(path string) error {
for _, suffix := range []string{"-wal", "-shm", "-journal"} {
sidecar := dbPath + suffix
if _, err := os.Stat(sidecar); err == nil {
_ = os.Rename(sidecar, backupDB+suffix) // best effort
_ = moveFile(sidecar, backupDB+suffix) // best effort
}
}
@@ -98,9 +98,9 @@ func DatabaseIntegrity(path string) error {
failedTS := time.Now().UTC().Format("20060102T150405Z")
if _, statErr := os.Stat(dbPath); statErr == nil {
failedDB := dbPath + "." + failedTS + ".failed.init.db"
_ = os.Rename(dbPath, failedDB)
_ = moveFile(dbPath, failedDB)
for _, suffix := range []string{"-wal", "-shm", "-journal"} {
_ = os.Rename(dbPath+suffix, failedDB+suffix)
_ = moveFile(dbPath+suffix, failedDB+suffix)
}
}
_ = copyFile(backupDB, dbPath)

57
cmd/bd/doctor/fix/fs.go Normal file
View File

@@ -0,0 +1,57 @@
package fix
import (
"errors"
"fmt"
"io"
"os"
"syscall"
)
var (
renameFile = os.Rename
removeFile = os.Remove
openFileRO = os.Open
openFileRW = os.OpenFile
)
func moveFile(src, dst string) error {
if err := renameFile(src, dst); err == nil {
return nil
} else if isEXDEV(err) {
if err := copyFile(src, dst); err != nil {
return err
}
if err := removeFile(src); err != nil {
return fmt.Errorf("failed to remove source after copy: %w", err)
}
return nil
} else {
return err
}
}
func copyFile(src, dst string) error {
in, err := openFileRO(src) // #nosec G304 -- src is within the workspace
if err != nil {
return err
}
defer in.Close()
out, err := openFileRW(dst, os.O_CREATE|os.O_TRUNC|os.O_WRONLY, 0644)
if err != nil {
return err
}
defer func() { _ = out.Close() }()
if _, err := io.Copy(out, in); err != nil {
return err
}
return out.Close()
}
func isEXDEV(err error) bool {
var linkErr *os.LinkError
if errors.As(err, &linkErr) {
return errors.Is(linkErr.Err, syscall.EXDEV)
}
return errors.Is(err, syscall.EXDEV)
}

View File

@@ -0,0 +1,71 @@
package fix
import (
"errors"
"os"
"path/filepath"
"syscall"
"testing"
)
func TestMoveFile_EXDEV_FallsBackToCopy(t *testing.T) {
root := t.TempDir()
src := filepath.Join(root, "src.txt")
dst := filepath.Join(root, "dst.txt")
if err := os.WriteFile(src, []byte("hello"), 0644); err != nil {
t.Fatal(err)
}
oldRename := renameFile
defer func() { renameFile = oldRename }()
renameFile = func(oldpath, newpath string) error {
return &os.LinkError{Op: "rename", Old: oldpath, New: newpath, Err: syscall.EXDEV}
}
if err := moveFile(src, dst); err != nil {
t.Fatalf("moveFile failed: %v", err)
}
if _, err := os.Stat(src); !os.IsNotExist(err) {
t.Fatalf("expected src to be removed, stat err=%v", err)
}
data, err := os.ReadFile(dst)
if err != nil {
t.Fatalf("read dst: %v", err)
}
if string(data) != "hello" {
t.Fatalf("dst contents=%q", string(data))
}
}
func TestMoveFile_EXDEV_CopyFails_LeavesSource(t *testing.T) {
root := t.TempDir()
src := filepath.Join(root, "src.txt")
dst := filepath.Join(root, "dst.txt")
if err := os.WriteFile(src, []byte("hello"), 0644); err != nil {
t.Fatal(err)
}
oldRename := renameFile
oldOpenRW := openFileRW
defer func() {
renameFile = oldRename
openFileRW = oldOpenRW
}()
renameFile = func(oldpath, newpath string) error {
return &os.LinkError{Op: "rename", Old: oldpath, New: newpath, Err: syscall.EXDEV}
}
openFileRW = func(name string, flag int, perm os.FileMode) (*os.File, error) {
return nil, &os.PathError{Op: "open", Path: name, Err: syscall.ENOSPC}
}
err := moveFile(src, dst)
if err == nil {
t.Fatalf("expected error")
}
if !errors.Is(err, syscall.ENOSPC) {
t.Fatalf("expected ENOSPC, got %v", err)
}
if _, err := os.Stat(src); err != nil {
t.Fatalf("expected src to remain, stat err=%v", err)
}
}

View File

@@ -2,7 +2,6 @@ package fix
import (
"fmt"
"io"
"os"
"path/filepath"
"time"
@@ -58,13 +57,13 @@ func JSONLIntegrity(path string) error {
// Back up the JSONL.
ts := time.Now().UTC().Format("20060102T150405Z")
backup := jsonlPath + "." + ts + ".corrupt.backup.jsonl"
if err := os.Rename(jsonlPath, backup); err != nil {
if err := moveFile(jsonlPath, backup); err != nil {
return fmt.Errorf("failed to back up JSONL: %w", err)
}
binary, err := getBdBinary()
if err != nil {
_ = os.Rename(backup, jsonlPath)
_ = moveFile(backup, jsonlPath)
return err
}
@@ -78,7 +77,7 @@ func JSONLIntegrity(path string) error {
failedTS := time.Now().UTC().Format("20060102T150405Z")
if _, statErr := os.Stat(jsonlPath); statErr == nil {
failed := jsonlPath + "." + failedTS + ".failed.regen.jsonl"
_ = os.Rename(jsonlPath, failed)
_ = moveFile(jsonlPath, failed)
}
_ = copyFile(backup, jsonlPath)
return fmt.Errorf("failed to regenerate JSONL from database: %w (backup: %s)", err, backup)
@@ -86,20 +85,3 @@ func JSONLIntegrity(path string) error {
return nil
}
func copyFile(src, dst string) error {
in, err := os.Open(src) // #nosec G304 -- src is within the workspace
if err != nil {
return err
}
defer in.Close()
out, err := os.OpenFile(dst, os.O_CREATE|os.O_TRUNC|os.O_WRONLY, 0644)
if err != nil {
return err
}
defer func() { _ = out.Close() }()
if _, err := io.Copy(out, in); err != nil {
return err
}
return out.Close()
}

View File

@@ -0,0 +1,52 @@
package fix
import (
"fmt"
"os"
"strings"
"time"
)
func sqliteConnString(path string, readOnly bool) string {
path = strings.TrimSpace(path)
if path == "" {
return ""
}
busy := 30 * time.Second
if v := strings.TrimSpace(os.Getenv("BD_LOCK_TIMEOUT")); v != "" {
if d, err := time.ParseDuration(v); err == nil {
busy = d
}
}
busyMs := int64(busy / time.Millisecond)
if strings.HasPrefix(path, "file:") {
conn := path
sep := "?"
if strings.Contains(conn, "?") {
sep = "&"
}
if readOnly && !strings.Contains(conn, "mode=") {
conn += sep + "mode=ro"
sep = "&"
}
if !strings.Contains(conn, "_pragma=busy_timeout") {
conn += fmt.Sprintf("%s_pragma=busy_timeout(%d)", sep, busyMs)
sep = "&"
}
if !strings.Contains(conn, "_pragma=foreign_keys") {
conn += sep + "_pragma=foreign_keys(ON)"
sep = "&"
}
if !strings.Contains(conn, "_time_format=") {
conn += sep + "_time_format=sqlite"
}
return conn
}
if readOnly {
return fmt.Sprintf("file:%s?mode=ro&_pragma=foreign_keys(ON)&_pragma=busy_timeout(%d)&_time_format=sqlite", path, busyMs)
}
return fmt.Sprintf("file:%s?_pragma=foreign_keys(ON)&_pragma=busy_timeout(%d)&_time_format=sqlite", path, busyMs)
}

View File

@@ -149,7 +149,7 @@ func DBJSONLSync(path string) error {
// countDatabaseIssues counts the number of issues in the database.
func countDatabaseIssues(dbPath string) (int, error) {
db, err := sql.Open("sqlite3", dbPath)
db, err := sql.Open("sqlite3", sqliteConnString(dbPath, true))
if err != nil {
return 0, fmt.Errorf("failed to open database: %w", err)
}

View File

@@ -229,5 +229,5 @@ func ChildParentDependencies(path string) error {
// openDB opens a SQLite database for read-write access
func openDB(dbPath string) (*sql.DB, error) {
return sql.Open("sqlite3", dbPath)
return sql.Open("sqlite3", sqliteConnString(dbPath, false))
}

View File

@@ -829,5 +829,5 @@ func CheckOrphanedIssues(path string) DoctorCheck {
// openDBReadOnly opens a SQLite database in read-only mode
func openDBReadOnly(dbPath string) (*sql.DB, error) {
return sql.Open("sqlite3", "file:"+dbPath+"?mode=ro")
return sql.Open("sqlite3", sqliteConnString(dbPath, true))
}

View File

@@ -106,7 +106,7 @@ func CheckPermissions(path string) DoctorCheck {
dbPath := filepath.Join(beadsDir, beads.CanonicalDatabaseName)
if _, err := os.Stat(dbPath); err == nil {
// Try to open database
db, err := sql.Open("sqlite3", dbPath)
db, err := sql.Open("sqlite3", sqliteConnString(dbPath, true))
if err != nil {
return DoctorCheck{
Name: "Permissions",
@@ -118,7 +118,7 @@ func CheckPermissions(path string) DoctorCheck {
_ = db.Close() // Intentionally ignore close error
// Try a write test
db, err = sql.Open("sqlite", dbPath)
db, err = sql.Open("sqlite", sqliteConnString(dbPath, true))
if err == nil {
_, err = db.Exec("SELECT 1")
_ = db.Close() // Intentionally ignore close error

View File

@@ -51,7 +51,7 @@ func CheckIDFormat(path string) DoctorCheck {
}
// Open database
db, err := sql.Open("sqlite3", "file:"+dbPath+"?mode=ro")
db, err := sql.Open("sqlite3", sqliteConnString(dbPath, true))
if err != nil {
return DoctorCheck{
Name: "Issue IDs",
@@ -121,7 +121,7 @@ func CheckDependencyCycles(path string) DoctorCheck {
}
// Open database to check for cycles
db, err := sql.Open("sqlite3", dbPath)
db, err := sql.Open("sqlite3", sqliteConnString(dbPath, true))
if err != nil {
return DoctorCheck{
Name: "Dependency Cycles",
@@ -216,7 +216,7 @@ func CheckTombstones(path string) DoctorCheck {
}
}
db, err := sql.Open("sqlite3", dbPath)
db, err := sql.Open("sqlite3", sqliteConnString(dbPath, true))
if err != nil {
return DoctorCheck{
Name: "Tombstones",
@@ -420,7 +420,7 @@ func CheckRepoFingerprint(path string) DoctorCheck {
}
// Open database
db, err := sql.Open("sqlite3", "file:"+dbPath+"?mode=ro")
db, err := sql.Open("sqlite3", sqliteConnString(dbPath, true))
if err != nil {
return DoctorCheck{
Name: "Repo Fingerprint",

View File

@@ -0,0 +1,54 @@
package doctor
import (
"fmt"
"os"
"strings"
"time"
)
func sqliteConnString(path string, readOnly bool) string {
path = strings.TrimSpace(path)
if path == "" {
return ""
}
// Best-effort: honor the same env var viper uses (BD_LOCK_TIMEOUT).
busy := 30 * time.Second
if v := strings.TrimSpace(os.Getenv("BD_LOCK_TIMEOUT")); v != "" {
if d, err := time.ParseDuration(v); err == nil {
busy = d
}
}
busyMs := int64(busy / time.Millisecond)
// If it's already a URI, append pragmas if absent.
if strings.HasPrefix(path, "file:") {
conn := path
sep := "?"
if strings.Contains(conn, "?") {
sep = "&"
}
if readOnly && !strings.Contains(conn, "mode=") {
conn += sep + "mode=ro"
sep = "&"
}
if !strings.Contains(conn, "_pragma=busy_timeout") {
conn += fmt.Sprintf("%s_pragma=busy_timeout(%d)", sep, busyMs)
sep = "&"
}
if !strings.Contains(conn, "_pragma=foreign_keys") {
conn += sep + "_pragma=foreign_keys(ON)"
sep = "&"
}
if !strings.Contains(conn, "_time_format=") {
conn += sep + "_time_format=sqlite"
}
return conn
}
if readOnly {
return fmt.Sprintf("file:%s?mode=ro&_pragma=foreign_keys(ON)&_pragma=busy_timeout(%d)&_time_format=sqlite", path, busyMs)
}
return fmt.Sprintf("file:%s?_pragma=foreign_keys(ON)&_pragma=busy_timeout(%d)&_time_format=sqlite", path, busyMs)
}

View File

@@ -4,6 +4,8 @@ package main
import (
"bytes"
"context"
"database/sql"
"io"
"os"
"os/exec"
@@ -11,6 +13,8 @@ import (
"strings"
"testing"
"time"
_ "github.com/ncruces/go-sqlite3/driver"
)
func TestDoctorRepair_CorruptDatabase_NotADatabase_RebuildFromJSONL(t *testing.T) {
@@ -223,6 +227,55 @@ func TestDoctorRepair_JSONLIntegrity_MalformedLine_ReexportFromDB(t *testing.T)
}
}
func TestDoctorRepair_DatabaseIntegrity_DBWriteLocked_ImportFailsFast(t *testing.T) {
bdExe := buildBDForTest(t)
ws := mkTmpDirInTmp(t, "bd-doctor-chaos-db-locked-*")
dbPath := filepath.Join(ws, ".beads", "beads.db")
jsonlPath := filepath.Join(ws, ".beads", "issues.jsonl")
if _, err := runBDSideDB(t, bdExe, ws, dbPath, "init", "--prefix", "chaos", "--quiet"); err != nil {
t.Fatalf("bd init failed: %v", err)
}
if _, err := runBDSideDB(t, bdExe, ws, dbPath, "create", "Chaos issue", "-p", "1"); err != nil {
t.Fatalf("bd create failed: %v", err)
}
if _, err := runBDSideDB(t, bdExe, ws, dbPath, "export", "-o", jsonlPath, "--force"); err != nil {
t.Fatalf("bd export failed: %v", err)
}
// Lock the DB for writes in-process.
db, err := sql.Open("sqlite3", dbPath)
if err != nil {
t.Fatalf("open db: %v", err)
}
defer db.Close()
tx, err := db.Begin()
if err != nil {
t.Fatalf("begin tx: %v", err)
}
if _, err := tx.Exec("INSERT INTO issues (id, title, status) VALUES ('lock-test', 'Lock Test', 'open')"); err != nil {
_ = tx.Rollback()
t.Fatalf("insert lock row: %v", err)
}
defer func() { _ = tx.Rollback() }()
ctx, cancel := context.WithTimeout(context.Background(), 5*time.Second)
defer cancel()
out, err := runBDWithEnv(ctx, bdExe, ws, dbPath, map[string]string{
"BD_LOCK_TIMEOUT": "200ms",
}, "import", "-i", jsonlPath, "--force", "--skip-existing", "--no-git-history")
if err == nil {
t.Fatalf("expected bd import to fail under DB write lock")
}
if ctx.Err() == context.DeadlineExceeded {
t.Fatalf("import exceeded timeout (likely hung); output:\n%s", out)
}
low := strings.ToLower(out)
if !strings.Contains(low, "locked") && !strings.Contains(low, "busy") && !strings.Contains(low, "timeout") {
t.Fatalf("expected lock/busy/timeout error, got:\n%s", out)
}
}
func TestDoctorRepair_CorruptDatabase_ReadOnlyBeadsDir_PermissionsFixMakesWritable(t *testing.T) {
bdExe := buildBDForTest(t)
ws := mkTmpDirInTmp(t, "bd-doctor-chaos-readonly-*")
@@ -303,3 +356,23 @@ func startDaemonForChaosTest(t *testing.T, bdExe, ws, dbPath string) *exec.Cmd {
t.Fatalf("daemon failed to start (no socket: %s)\nstdout:\n%s\nstderr:\n%s", sock, stdout.String(), stderr.String())
return nil
}
func runBDWithEnv(ctx context.Context, exe, dir, dbPath string, env map[string]string, args ...string) (string, error) {
fullArgs := []string{"--db", dbPath}
if len(args) > 0 && args[0] != "init" {
fullArgs = append(fullArgs, "--no-daemon")
}
fullArgs = append(fullArgs, args...)
cmd := exec.CommandContext(ctx, exe, fullArgs...)
cmd.Dir = dir
cmd.Env = append(os.Environ(),
"BEADS_NO_DAEMON=1",
"BEADS_DIR="+filepath.Join(dir, ".beads"),
)
for k, v := range env {
cmd.Env = append(cmd.Env, k+"="+v)
}
out, err := cmd.CombinedOutput()
return string(out), err
}

View File

@@ -156,7 +156,7 @@ Examples:
_ = daemonClient.Close()
daemonClient = nil
}
// Note: We used to check database file timestamps here, but WAL files
// get created when opening the DB, making timestamp checks unreliable.
// Instead, we check issue counts after loading (see below).
@@ -168,7 +168,7 @@ Examples:
fmt.Fprintf(os.Stderr, "Error: no database path found\n")
os.Exit(1)
}
store, err = sqlite.New(rootCtx, dbPath)
store, err = sqlite.NewWithTimeout(rootCtx, dbPath, lockTimeout)
if err != nil {
fmt.Fprintf(os.Stderr, "Error: failed to open database: %v\n", err)
os.Exit(1)
@@ -302,20 +302,20 @@ Examples:
// Safety check: prevent exporting stale database that would lose issues
if output != "" && !force {
debug.Logf("Debug: checking staleness - output=%s, force=%v\n", output, force)
// Read existing JSONL to get issue IDs
jsonlIDs, err := getIssueIDsFromJSONL(output)
if err != nil && !os.IsNotExist(err) {
fmt.Fprintf(os.Stderr, "Warning: failed to read existing JSONL for staleness check: %v\n", err)
}
if err == nil && len(jsonlIDs) > 0 {
// Build set of DB issue IDs
dbIDs := make(map[string]bool)
for _, issue := range issues {
dbIDs[issue.ID] = true
}
// Check if JSONL has any issues that DB doesn't have
var missingIDs []string
for id := range jsonlIDs {
@@ -323,17 +323,17 @@ Examples:
missingIDs = append(missingIDs, id)
}
}
debug.Logf("Debug: JSONL has %d issues, DB has %d issues, missing %d\n",
debug.Logf("Debug: JSONL has %d issues, DB has %d issues, missing %d\n",
len(jsonlIDs), len(issues), len(missingIDs))
if len(missingIDs) > 0 {
slices.Sort(missingIDs)
fmt.Fprintf(os.Stderr, "Error: refusing to export stale database that would lose issues\n")
fmt.Fprintf(os.Stderr, " Database has %d issues\n", len(issues))
fmt.Fprintf(os.Stderr, " JSONL has %d issues\n", len(jsonlIDs))
fmt.Fprintf(os.Stderr, " Export would lose %d issue(s):\n", len(missingIDs))
// Show first 10 missing issues
showCount := len(missingIDs)
if showCount > 10 {
@@ -345,7 +345,7 @@ Examples:
if len(missingIDs) > 10 {
fmt.Fprintf(os.Stderr, " ... and %d more\n", len(missingIDs)-10)
}
fmt.Fprintf(os.Stderr, "\n")
fmt.Fprintf(os.Stderr, "This usually means:\n")
fmt.Fprintf(os.Stderr, " 1. You need to run 'bd import -i %s' to sync the latest changes\n", output)
@@ -434,8 +434,8 @@ Examples:
skippedCount := 0
for _, issue := range issues {
if err := encoder.Encode(issue); err != nil {
fmt.Fprintf(os.Stderr, "Error encoding issue %s: %v\n", issue.ID, err)
os.Exit(1)
fmt.Fprintf(os.Stderr, "Error encoding issue %s: %v\n", issue.ID, err)
os.Exit(1)
}
exportedIDs = append(exportedIDs, issue.ID)
@@ -495,19 +495,19 @@ Examples:
}
}
// Verify JSONL file integrity after export
actualCount, err := countIssuesInJSONL(finalPath)
if err != nil {
fmt.Fprintf(os.Stderr, "Error: Export verification failed: %v\n", err)
os.Exit(1)
}
if actualCount != len(exportedIDs) {
fmt.Fprintf(os.Stderr, "Error: Export verification failed\n")
fmt.Fprintf(os.Stderr, " Expected: %d issues\n", len(exportedIDs))
fmt.Fprintf(os.Stderr, " JSONL file: %d lines\n", actualCount)
fmt.Fprintf(os.Stderr, " Mismatch indicates export failed to write all issues\n")
os.Exit(1)
}
// Verify JSONL file integrity after export
actualCount, err := countIssuesInJSONL(finalPath)
if err != nil {
fmt.Fprintf(os.Stderr, "Error: Export verification failed: %v\n", err)
os.Exit(1)
}
if actualCount != len(exportedIDs) {
fmt.Fprintf(os.Stderr, "Error: Export verification failed\n")
fmt.Fprintf(os.Stderr, " Expected: %d issues\n", len(exportedIDs))
fmt.Fprintf(os.Stderr, " JSONL file: %d lines\n", actualCount)
fmt.Fprintf(os.Stderr, " Mismatch indicates export failed to write all issues\n")
os.Exit(1)
}
// Update database mtime to be >= JSONL mtime (fixes #278, #301, #321)
// Only do this when exporting to default JSONL path (not arbitrary outputs)
@@ -520,9 +520,9 @@ Examples:
fmt.Fprintf(os.Stderr, "Warning: failed to update database mtime: %v\n", err)
}
}
}
}
// Output statistics if JSON format requested
// Output statistics if JSON format requested
if jsonOutput {
stats := map[string]interface{}{
"success": true,