bd sync: 2025-12-27 15:56:42
This commit is contained in:
@@ -316,10 +316,6 @@ func checkMetadataConfigValues(repoPath string) []string {
|
||||
|
||||
// Validate jsonl_export filename
|
||||
if cfg.JSONLExport != "" {
|
||||
switch cfg.JSONLExport {
|
||||
case "deletions.jsonl", "interactions.jsonl", "molecules.jsonl":
|
||||
issues = append(issues, fmt.Sprintf("metadata.json jsonl_export: %q is a system file and should not be configured as a JSONL export (expected issues.jsonl)", cfg.JSONLExport))
|
||||
}
|
||||
if strings.Contains(cfg.JSONLExport, string(os.PathSeparator)) || strings.Contains(cfg.JSONLExport, "/") {
|
||||
issues = append(issues, fmt.Sprintf("metadata.json jsonl_export: %q should be a filename, not a path", cfg.JSONLExport))
|
||||
}
|
||||
@@ -357,7 +353,7 @@ func checkDatabaseConfigValues(repoPath string) []string {
|
||||
}
|
||||
|
||||
// Open database in read-only mode
|
||||
db, err := sql.Open("sqlite3", sqliteConnString(dbPath, true))
|
||||
db, err := sql.Open("sqlite3", "file:"+dbPath+"?mode=ro")
|
||||
if err != nil {
|
||||
return issues // Can't open database, skip
|
||||
}
|
||||
|
||||
@@ -213,21 +213,6 @@ func TestCheckMetadataConfigValues(t *testing.T) {
|
||||
t.Error("expected issues for wrong jsonl extension")
|
||||
}
|
||||
})
|
||||
|
||||
t.Run("jsonl_export cannot be system file", func(t *testing.T) {
|
||||
metadataContent := `{
|
||||
"database": "beads.db",
|
||||
"jsonl_export": "interactions.jsonl"
|
||||
}`
|
||||
if err := os.WriteFile(filepath.Join(beadsDir, "metadata.json"), []byte(metadataContent), 0644); err != nil {
|
||||
t.Fatalf("failed to write metadata.json: %v", err)
|
||||
}
|
||||
|
||||
issues := checkMetadataConfigValues(tmpDir)
|
||||
if len(issues) == 0 {
|
||||
t.Error("expected issues for system jsonl_export")
|
||||
}
|
||||
})
|
||||
}
|
||||
|
||||
func contains(s, substr string) bool {
|
||||
|
||||
+14
-173
@@ -155,9 +155,9 @@ func CheckSchemaCompatibility(path string) DoctorCheck {
|
||||
}
|
||||
}
|
||||
|
||||
// Open database (bd-ckvw: schema probe)
|
||||
// Open database (bd-ckvw: This will run migrations and schema probe)
|
||||
// Note: We can't use the global 'store' because doctor can check arbitrary paths
|
||||
db, err := sql.Open("sqlite3", sqliteConnString(dbPath, true))
|
||||
db, err := sql.Open("sqlite3", "file:"+dbPath+"?_pragma=foreign_keys(ON)&_pragma=busy_timeout(30000)")
|
||||
if err != nil {
|
||||
return DoctorCheck{
|
||||
Name: "Schema Compatibility",
|
||||
@@ -244,30 +244,13 @@ func CheckDatabaseIntegrity(path string) DoctorCheck {
|
||||
}
|
||||
|
||||
// Open database in read-only mode for integrity check
|
||||
db, err := sql.Open("sqlite3", sqliteConnString(dbPath, true))
|
||||
db, err := sql.Open("sqlite3", "file:"+dbPath+"?mode=ro&_pragma=busy_timeout(30000)")
|
||||
if err != nil {
|
||||
// Check if JSONL recovery is possible
|
||||
jsonlCount, _, jsonlErr := CountJSONLIssues(filepath.Join(beadsDir, "issues.jsonl"))
|
||||
if jsonlErr != nil {
|
||||
jsonlCount, _, jsonlErr = CountJSONLIssues(filepath.Join(beadsDir, "beads.jsonl"))
|
||||
}
|
||||
|
||||
if jsonlErr == nil && jsonlCount > 0 {
|
||||
return DoctorCheck{
|
||||
Name: "Database Integrity",
|
||||
Status: StatusError,
|
||||
Message: fmt.Sprintf("Failed to open database (JSONL has %d issues for recovery)", jsonlCount),
|
||||
Detail: err.Error(),
|
||||
Fix: "Run 'bd doctor --fix' to recover from JSONL backup",
|
||||
}
|
||||
}
|
||||
|
||||
return DoctorCheck{
|
||||
Name: "Database Integrity",
|
||||
Status: StatusError,
|
||||
Message: "Failed to open database for integrity check",
|
||||
Detail: err.Error(),
|
||||
Fix: "Run 'bd doctor --fix' to back up the corrupt DB and rebuild from JSONL (if available), or restore from backup",
|
||||
}
|
||||
}
|
||||
defer db.Close()
|
||||
@@ -276,28 +259,11 @@ func CheckDatabaseIntegrity(path string) DoctorCheck {
|
||||
// This checks the entire database for corruption
|
||||
rows, err := db.Query("PRAGMA integrity_check")
|
||||
if err != nil {
|
||||
// Check if JSONL recovery is possible
|
||||
jsonlCount, _, jsonlErr := CountJSONLIssues(filepath.Join(beadsDir, "issues.jsonl"))
|
||||
if jsonlErr != nil {
|
||||
jsonlCount, _, jsonlErr = CountJSONLIssues(filepath.Join(beadsDir, "beads.jsonl"))
|
||||
}
|
||||
|
||||
if jsonlErr == nil && jsonlCount > 0 {
|
||||
return DoctorCheck{
|
||||
Name: "Database Integrity",
|
||||
Status: StatusError,
|
||||
Message: fmt.Sprintf("Failed to run integrity check (JSONL has %d issues for recovery)", jsonlCount),
|
||||
Detail: err.Error(),
|
||||
Fix: "Run 'bd doctor --fix' to recover from JSONL backup",
|
||||
}
|
||||
}
|
||||
|
||||
return DoctorCheck{
|
||||
Name: "Database Integrity",
|
||||
Status: StatusError,
|
||||
Message: "Failed to run integrity check",
|
||||
Detail: err.Error(),
|
||||
Fix: "Run 'bd doctor --fix' to back up the corrupt DB and rebuild from JSONL (if available), or restore from backup",
|
||||
}
|
||||
}
|
||||
defer rows.Close()
|
||||
@@ -320,59 +286,28 @@ func CheckDatabaseIntegrity(path string) DoctorCheck {
|
||||
}
|
||||
}
|
||||
|
||||
// Any other result indicates corruption - check if JSONL recovery is possible
|
||||
jsonlCount, _, jsonlErr := CountJSONLIssues(filepath.Join(beadsDir, "issues.jsonl"))
|
||||
if jsonlErr != nil {
|
||||
// Try alternate name
|
||||
jsonlCount, _, jsonlErr = CountJSONLIssues(filepath.Join(beadsDir, "beads.jsonl"))
|
||||
}
|
||||
|
||||
if jsonlErr == nil && jsonlCount > 0 {
|
||||
return DoctorCheck{
|
||||
Name: "Database Integrity",
|
||||
Status: StatusError,
|
||||
Message: fmt.Sprintf("Database corruption detected (JSONL has %d issues for recovery)", jsonlCount),
|
||||
Detail: strings.Join(results, "; "),
|
||||
Fix: "Run 'bd doctor --fix' to recover from JSONL backup",
|
||||
}
|
||||
}
|
||||
|
||||
// Any other result indicates corruption
|
||||
return DoctorCheck{
|
||||
Name: "Database Integrity",
|
||||
Status: StatusError,
|
||||
Message: "Database corruption detected",
|
||||
Detail: strings.Join(results, "; "),
|
||||
Fix: "Run 'bd doctor --fix' to back up the corrupt DB and rebuild from JSONL (if available), or restore from backup",
|
||||
Fix: "Database may need recovery. Export with 'bd export' if possible, then restore from backup or reinitialize",
|
||||
}
|
||||
}
|
||||
|
||||
// CheckDatabaseJSONLSync checks if database and JSONL are in sync
|
||||
func CheckDatabaseJSONLSync(path string) DoctorCheck {
|
||||
beadsDir := filepath.Join(path, ".beads")
|
||||
|
||||
// Resolve database path (respects metadata.json override).
|
||||
dbPath := filepath.Join(beadsDir, beads.CanonicalDatabaseName)
|
||||
if cfg, err := configfile.Load(beadsDir); err == nil && cfg != nil && cfg.Database != "" {
|
||||
dbPath = cfg.DatabasePath(beadsDir)
|
||||
}
|
||||
|
||||
// Find JSONL file (respects metadata.json override when set).
|
||||
jsonlPath := ""
|
||||
if cfg, err := configfile.Load(beadsDir); err == nil && cfg != nil {
|
||||
if cfg.JSONLExport != "" && !isSystemJSONLFilename(cfg.JSONLExport) {
|
||||
p := cfg.JSONLPath(beadsDir)
|
||||
if _, err := os.Stat(p); err == nil {
|
||||
jsonlPath = p
|
||||
}
|
||||
}
|
||||
}
|
||||
if jsonlPath == "" {
|
||||
for _, name := range []string{"issues.jsonl", "beads.jsonl"} {
|
||||
testPath := filepath.Join(beadsDir, name)
|
||||
if _, err := os.Stat(testPath); err == nil {
|
||||
jsonlPath = testPath
|
||||
break
|
||||
}
|
||||
// Find JSONL file
|
||||
var jsonlPath string
|
||||
for _, name := range []string{"issues.jsonl", "beads.jsonl"} {
|
||||
testPath := filepath.Join(beadsDir, name)
|
||||
if _, err := os.Stat(testPath); err == nil {
|
||||
jsonlPath = testPath
|
||||
break
|
||||
}
|
||||
}
|
||||
|
||||
@@ -398,7 +333,7 @@ func CheckDatabaseJSONLSync(path string) DoctorCheck {
|
||||
jsonlCount, jsonlPrefixes, jsonlErr := CountJSONLIssues(jsonlPath)
|
||||
|
||||
// Single database open for all queries (instead of 3 separate opens)
|
||||
db, err := sql.Open("sqlite3", sqliteConnString(dbPath, true))
|
||||
db, err := sql.Open("sqlite3", dbPath)
|
||||
if err != nil {
|
||||
// Database can't be opened. If JSONL has issues, suggest recovery.
|
||||
if jsonlErr == nil && jsonlCount > 0 {
|
||||
@@ -455,16 +390,11 @@ func CheckDatabaseJSONLSync(path string) DoctorCheck {
|
||||
|
||||
// Use JSONL error if we got it earlier
|
||||
if jsonlErr != nil {
|
||||
fixMsg := "Run 'bd doctor --fix' to attempt recovery"
|
||||
if strings.Contains(jsonlErr.Error(), "malformed") {
|
||||
fixMsg = "Run 'bd doctor --fix' to back up and regenerate the JSONL from the database"
|
||||
}
|
||||
return DoctorCheck{
|
||||
Name: "DB-JSONL Sync",
|
||||
Status: StatusWarning,
|
||||
Message: "Unable to read JSONL file",
|
||||
Detail: jsonlErr.Error(),
|
||||
Fix: fixMsg,
|
||||
}
|
||||
}
|
||||
|
||||
@@ -571,7 +501,7 @@ func FixDBJSONLSync(path string) error {
|
||||
|
||||
// getDatabaseVersionFromPath reads the database version from the given path
|
||||
func getDatabaseVersionFromPath(dbPath string) string {
|
||||
db, err := sql.Open("sqlite3", sqliteConnString(dbPath, true))
|
||||
db, err := sql.Open("sqlite3", "file:"+dbPath+"?mode=ro")
|
||||
if err != nil {
|
||||
return "unknown"
|
||||
}
|
||||
@@ -690,92 +620,3 @@ func isNoDbModeConfigured(beadsDir string) bool {
|
||||
|
||||
return cfg.NoDb
|
||||
}
|
||||
|
||||
// CheckDatabaseSize warns when the database has accumulated many closed issues.
|
||||
// This is purely informational - pruning is NEVER auto-fixed because it
|
||||
// permanently deletes data. Users must explicitly run 'bd cleanup' to prune.
|
||||
//
|
||||
// Config: doctor.suggest_pruning_issue_count (default: 5000, 0 = disabled)
|
||||
//
|
||||
// DESIGN NOTE: This check intentionally has NO auto-fix. Unlike other doctor
|
||||
// checks that fix configuration or sync issues, pruning is destructive and
|
||||
// irreversible. The user must make an explicit decision to delete their
|
||||
// closed issue history. We only provide guidance, never action.
|
||||
func CheckDatabaseSize(path string) DoctorCheck {
|
||||
beadsDir := filepath.Join(path, ".beads")
|
||||
|
||||
// Get database path
|
||||
var dbPath string
|
||||
if cfg, err := configfile.Load(beadsDir); err == nil && cfg != nil && cfg.Database != "" {
|
||||
dbPath = cfg.DatabasePath(beadsDir)
|
||||
} else {
|
||||
dbPath = filepath.Join(beadsDir, beads.CanonicalDatabaseName)
|
||||
}
|
||||
|
||||
// If no database, skip this check
|
||||
if _, err := os.Stat(dbPath); os.IsNotExist(err) {
|
||||
return DoctorCheck{
|
||||
Name: "Large Database",
|
||||
Status: StatusOK,
|
||||
Message: "N/A (no database)",
|
||||
}
|
||||
}
|
||||
|
||||
// Read threshold from config (default 5000, 0 = disabled)
|
||||
threshold := 5000
|
||||
db, err := sql.Open("sqlite3", "file:"+dbPath+"?mode=ro&_pragma=busy_timeout(30000)")
|
||||
if err != nil {
|
||||
return DoctorCheck{
|
||||
Name: "Large Database",
|
||||
Status: StatusOK,
|
||||
Message: "N/A (unable to open database)",
|
||||
}
|
||||
}
|
||||
defer db.Close()
|
||||
|
||||
// Check for custom threshold in config table
|
||||
var thresholdStr string
|
||||
err = db.QueryRow("SELECT value FROM config WHERE key = ?", "doctor.suggest_pruning_issue_count").Scan(&thresholdStr)
|
||||
if err == nil {
|
||||
if _, err := fmt.Sscanf(thresholdStr, "%d", &threshold); err != nil {
|
||||
threshold = 5000 // Reset to default on parse error
|
||||
}
|
||||
}
|
||||
|
||||
// If disabled, return OK
|
||||
if threshold == 0 {
|
||||
return DoctorCheck{
|
||||
Name: "Large Database",
|
||||
Status: StatusOK,
|
||||
Message: "Check disabled (threshold = 0)",
|
||||
}
|
||||
}
|
||||
|
||||
// Count closed issues
|
||||
var closedCount int
|
||||
err = db.QueryRow("SELECT COUNT(*) FROM issues WHERE status = 'closed'").Scan(&closedCount)
|
||||
if err != nil {
|
||||
return DoctorCheck{
|
||||
Name: "Large Database",
|
||||
Status: StatusOK,
|
||||
Message: "N/A (unable to count issues)",
|
||||
}
|
||||
}
|
||||
|
||||
// Check against threshold
|
||||
if closedCount > threshold {
|
||||
return DoctorCheck{
|
||||
Name: "Large Database",
|
||||
Status: StatusWarning,
|
||||
Message: fmt.Sprintf("%d closed issues (threshold: %d)", closedCount, threshold),
|
||||
Detail: "Large number of closed issues may impact performance",
|
||||
Fix: "Consider running 'bd cleanup --older-than 90' to prune old closed issues",
|
||||
}
|
||||
}
|
||||
|
||||
return DoctorCheck{
|
||||
Name: "Large Database",
|
||||
Status: StatusOK,
|
||||
Message: fmt.Sprintf("%d closed issues (threshold: %d)", closedCount, threshold),
|
||||
}
|
||||
}
|
||||
|
||||
@@ -12,13 +12,6 @@ import (
|
||||
// This prevents fork bombs when tests call functions that execute bd subcommands.
|
||||
var ErrTestBinary = fmt.Errorf("running as test binary - cannot execute bd subcommands")
|
||||
|
||||
func newBdCmd(bdBinary string, args ...string) *exec.Cmd {
|
||||
fullArgs := append([]string{"--no-daemon"}, args...)
|
||||
cmd := exec.Command(bdBinary, fullArgs...) // #nosec G204 -- bdBinary from validated executable path
|
||||
cmd.Env = append(os.Environ(), "BEADS_NO_DAEMON=1")
|
||||
return cmd
|
||||
}
|
||||
|
||||
// getBdBinary returns the path to the bd binary to use for fix operations.
|
||||
// It prefers the current executable to avoid command injection attacks.
|
||||
// Returns ErrTestBinary if running as a test binary to prevent fork bombs.
|
||||
|
||||
@@ -3,6 +3,7 @@ package fix
|
||||
import (
|
||||
"fmt"
|
||||
"os"
|
||||
"os/exec"
|
||||
"path/filepath"
|
||||
)
|
||||
|
||||
@@ -35,7 +36,7 @@ func Daemon(path string) error {
|
||||
}
|
||||
|
||||
// Run bd daemons killall to clean up stale daemons
|
||||
cmd := newBdCmd(bdBinary, "daemons", "killall")
|
||||
cmd := exec.Command(bdBinary, "daemons", "killall") // #nosec G204 -- bdBinary from validated executable path
|
||||
cmd.Stdout = os.Stdout
|
||||
cmd.Stderr = os.Stderr
|
||||
|
||||
|
||||
@@ -32,13 +32,6 @@ func DatabaseConfig(path string) error {
|
||||
|
||||
fixed := false
|
||||
|
||||
// Never treat system JSONL files as a JSONL export configuration.
|
||||
if isSystemJSONLFilename(cfg.JSONLExport) {
|
||||
fmt.Printf(" Updating jsonl_export: %s → issues.jsonl\n", cfg.JSONLExport)
|
||||
cfg.JSONLExport = "issues.jsonl"
|
||||
fixed = true
|
||||
}
|
||||
|
||||
// Check if configured JSONL exists
|
||||
if cfg.JSONLExport != "" {
|
||||
jsonlPath := cfg.JSONLPath(beadsDir)
|
||||
@@ -106,15 +99,7 @@ func findActualJSONLFile(beadsDir string) string {
|
||||
strings.Contains(lowerName, ".orig") ||
|
||||
strings.Contains(lowerName, ".bak") ||
|
||||
strings.Contains(lowerName, "~") ||
|
||||
strings.HasPrefix(lowerName, "backup_") ||
|
||||
// System files are not JSONL exports.
|
||||
name == "deletions.jsonl" ||
|
||||
name == "interactions.jsonl" ||
|
||||
name == "molecules.jsonl" ||
|
||||
// Git merge conflict artifacts (e.g., issues.base.jsonl, issues.left.jsonl)
|
||||
strings.Contains(lowerName, ".base.jsonl") ||
|
||||
strings.Contains(lowerName, ".left.jsonl") ||
|
||||
strings.Contains(lowerName, ".right.jsonl") {
|
||||
strings.HasPrefix(lowerName, "backup_") {
|
||||
continue
|
||||
}
|
||||
|
||||
@@ -136,15 +121,6 @@ func findActualJSONLFile(beadsDir string) string {
|
||||
return candidates[0]
|
||||
}
|
||||
|
||||
func isSystemJSONLFilename(name string) bool {
|
||||
switch name {
|
||||
case "deletions.jsonl", "interactions.jsonl", "molecules.jsonl":
|
||||
return true
|
||||
default:
|
||||
return false
|
||||
}
|
||||
}
|
||||
|
||||
// LegacyJSONLConfig migrates from legacy beads.jsonl to canonical issues.jsonl.
|
||||
// This renames the file, updates metadata.json, and updates .gitattributes if present.
|
||||
// bd-6xd: issues.jsonl is the canonical filename
|
||||
|
||||
@@ -220,53 +220,3 @@ func TestLegacyJSONLConfig_UpdatesGitattributes(t *testing.T) {
|
||||
t.Errorf("Expected .gitattributes to reference issues.jsonl, got: %q", string(content))
|
||||
}
|
||||
}
|
||||
|
||||
// TestFindActualJSONLFile_SkipsSystemFiles ensures system JSONL files are never treated as JSONL exports.
|
||||
func TestFindActualJSONLFile_SkipsSystemFiles(t *testing.T) {
|
||||
tmpDir := t.TempDir()
|
||||
|
||||
// Only system files → no candidates.
|
||||
if err := os.WriteFile(filepath.Join(tmpDir, "interactions.jsonl"), []byte(`{"id":"x"}`), 0644); err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
if got := findActualJSONLFile(tmpDir); got != "" {
|
||||
t.Fatalf("expected empty result, got %q", got)
|
||||
}
|
||||
|
||||
// System + legacy export → legacy wins.
|
||||
if err := os.WriteFile(filepath.Join(tmpDir, "beads.jsonl"), []byte(`{"id":"x"}`), 0644); err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
if got := findActualJSONLFile(tmpDir); got != "beads.jsonl" {
|
||||
t.Fatalf("expected beads.jsonl, got %q", got)
|
||||
}
|
||||
}
|
||||
|
||||
func TestDatabaseConfigFix_RejectsSystemJSONLExport(t *testing.T) {
|
||||
tmpDir := t.TempDir()
|
||||
beadsDir := filepath.Join(tmpDir, ".beads")
|
||||
if err := os.Mkdir(beadsDir, 0755); err != nil {
|
||||
t.Fatalf("Failed to create .beads dir: %v", err)
|
||||
}
|
||||
|
||||
if err := os.WriteFile(filepath.Join(beadsDir, "interactions.jsonl"), []byte(`{"id":"x"}`), 0644); err != nil {
|
||||
t.Fatalf("Failed to create interactions.jsonl: %v", err)
|
||||
}
|
||||
|
||||
cfg := &configfile.Config{Database: "beads.db", JSONLExport: "interactions.jsonl"}
|
||||
if err := cfg.Save(beadsDir); err != nil {
|
||||
t.Fatalf("Failed to save config: %v", err)
|
||||
}
|
||||
|
||||
if err := DatabaseConfig(tmpDir); err != nil {
|
||||
t.Fatalf("DatabaseConfig failed: %v", err)
|
||||
}
|
||||
|
||||
updated, err := configfile.Load(beadsDir)
|
||||
if err != nil {
|
||||
t.Fatalf("Failed to load updated config: %v", err)
|
||||
}
|
||||
if updated.JSONLExport != "issues.jsonl" {
|
||||
t.Fatalf("expected issues.jsonl, got %q", updated.JSONLExport)
|
||||
}
|
||||
}
|
||||
|
||||
@@ -1,116 +0,0 @@
|
||||
package fix
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"os"
|
||||
"path/filepath"
|
||||
"time"
|
||||
|
||||
"github.com/steveyegge/beads/internal/beads"
|
||||
"github.com/steveyegge/beads/internal/configfile"
|
||||
)
|
||||
|
||||
// DatabaseIntegrity attempts to recover from database corruption by:
|
||||
// 1. Backing up the corrupt database (and WAL/SHM if present)
|
||||
// 2. Re-initializing the database from the working tree JSONL export
|
||||
//
|
||||
// This is intentionally conservative: it will not delete JSONL, and it preserves the
|
||||
// original DB as a backup for forensic recovery.
|
||||
func DatabaseIntegrity(path string) error {
|
||||
if err := validateBeadsWorkspace(path); err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
absPath, err := filepath.Abs(path)
|
||||
if err != nil {
|
||||
return fmt.Errorf("failed to resolve path: %w", err)
|
||||
}
|
||||
|
||||
beadsDir := filepath.Join(absPath, ".beads")
|
||||
|
||||
// Best-effort: stop any running daemon to reduce the chance of DB file locks.
|
||||
_ = Daemon(absPath)
|
||||
|
||||
// Resolve database path (respects metadata.json database override).
|
||||
var dbPath string
|
||||
if cfg, err := configfile.Load(beadsDir); err == nil && cfg != nil && cfg.Database != "" {
|
||||
dbPath = cfg.DatabasePath(beadsDir)
|
||||
} else {
|
||||
dbPath = filepath.Join(beadsDir, beads.CanonicalDatabaseName)
|
||||
}
|
||||
|
||||
// Find JSONL source of truth.
|
||||
jsonlPath := ""
|
||||
if cfg, err := configfile.Load(beadsDir); err == nil && cfg != nil {
|
||||
if cfg.JSONLExport != "" && !isSystemJSONLFilename(cfg.JSONLExport) {
|
||||
candidate := cfg.JSONLPath(beadsDir)
|
||||
if _, err := os.Stat(candidate); err == nil {
|
||||
jsonlPath = candidate
|
||||
}
|
||||
}
|
||||
}
|
||||
if jsonlPath == "" {
|
||||
for _, name := range []string{"issues.jsonl", "beads.jsonl"} {
|
||||
candidate := filepath.Join(beadsDir, name)
|
||||
if _, err := os.Stat(candidate); err == nil {
|
||||
jsonlPath = candidate
|
||||
break
|
||||
}
|
||||
}
|
||||
}
|
||||
if jsonlPath == "" {
|
||||
return fmt.Errorf("cannot auto-recover: no JSONL export found in %s", beadsDir)
|
||||
}
|
||||
|
||||
// Back up corrupt DB and its sidecar files.
|
||||
ts := time.Now().UTC().Format("20060102T150405Z")
|
||||
backupDB := dbPath + "." + ts + ".corrupt.backup.db"
|
||||
if err := moveFile(dbPath, backupDB); err != nil {
|
||||
// Retry once after attempting to kill daemons again (helps on platforms with strict file locks).
|
||||
_ = Daemon(absPath)
|
||||
if err2 := moveFile(dbPath, backupDB); err2 != nil {
|
||||
// Prefer the original error (more likely root cause).
|
||||
return fmt.Errorf("failed to back up database: %w", err)
|
||||
}
|
||||
}
|
||||
for _, suffix := range []string{"-wal", "-shm", "-journal"} {
|
||||
sidecar := dbPath + suffix
|
||||
if _, err := os.Stat(sidecar); err == nil {
|
||||
_ = moveFile(sidecar, backupDB+suffix) // best effort
|
||||
}
|
||||
}
|
||||
|
||||
// Rebuild by importing from the working tree JSONL into a fresh database.
|
||||
bdBinary, err := getBdBinary()
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
// Use import (not init) so we always hydrate from the working tree JSONL, not git-tracked blobs.
|
||||
args := []string{"--db", dbPath, "import", "-i", jsonlPath, "--force", "--no-git-history"}
|
||||
cmd := newBdCmd(bdBinary, args...)
|
||||
cmd.Dir = absPath
|
||||
cmd.Stdout = os.Stdout
|
||||
cmd.Stderr = os.Stderr
|
||||
|
||||
if err := cmd.Run(); err != nil {
|
||||
// Best-effort rollback: attempt to restore the original DB, while preserving the backup.
|
||||
failedTS := time.Now().UTC().Format("20060102T150405Z")
|
||||
if _, statErr := os.Stat(dbPath); statErr == nil {
|
||||
failedDB := dbPath + "." + failedTS + ".failed.init.db"
|
||||
_ = moveFile(dbPath, failedDB)
|
||||
for _, suffix := range []string{"-wal", "-shm", "-journal"} {
|
||||
_ = moveFile(dbPath+suffix, failedDB+suffix)
|
||||
}
|
||||
}
|
||||
_ = copyFile(backupDB, dbPath)
|
||||
for _, suffix := range []string{"-wal", "-shm", "-journal"} {
|
||||
if _, statErr := os.Stat(backupDB + suffix); statErr == nil {
|
||||
_ = copyFile(backupDB+suffix, dbPath+suffix)
|
||||
}
|
||||
}
|
||||
return fmt.Errorf("failed to rebuild database from JSONL: %w (backup: %s)", err, backupDB)
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
@@ -1,57 +0,0 @@
|
||||
package fix
|
||||
|
||||
import (
|
||||
"errors"
|
||||
"fmt"
|
||||
"io"
|
||||
"os"
|
||||
"syscall"
|
||||
)
|
||||
|
||||
var (
|
||||
renameFile = os.Rename
|
||||
removeFile = os.Remove
|
||||
openFileRO = os.Open
|
||||
openFileRW = os.OpenFile
|
||||
)
|
||||
|
||||
func moveFile(src, dst string) error {
|
||||
if err := renameFile(src, dst); err == nil {
|
||||
return nil
|
||||
} else if isEXDEV(err) {
|
||||
if err := copyFile(src, dst); err != nil {
|
||||
return err
|
||||
}
|
||||
if err := removeFile(src); err != nil {
|
||||
return fmt.Errorf("failed to remove source after copy: %w", err)
|
||||
}
|
||||
return nil
|
||||
} else {
|
||||
return err
|
||||
}
|
||||
}
|
||||
|
||||
func copyFile(src, dst string) error {
|
||||
in, err := openFileRO(src) // #nosec G304 -- src is within the workspace
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
defer in.Close()
|
||||
out, err := openFileRW(dst, os.O_CREATE|os.O_TRUNC|os.O_WRONLY, 0644)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
defer func() { _ = out.Close() }()
|
||||
if _, err := io.Copy(out, in); err != nil {
|
||||
return err
|
||||
}
|
||||
return out.Close()
|
||||
}
|
||||
|
||||
func isEXDEV(err error) bool {
|
||||
var linkErr *os.LinkError
|
||||
if errors.As(err, &linkErr) {
|
||||
return errors.Is(linkErr.Err, syscall.EXDEV)
|
||||
}
|
||||
return errors.Is(err, syscall.EXDEV)
|
||||
}
|
||||
@@ -1,71 +0,0 @@
|
||||
package fix
|
||||
|
||||
import (
|
||||
"errors"
|
||||
"os"
|
||||
"path/filepath"
|
||||
"syscall"
|
||||
"testing"
|
||||
)
|
||||
|
||||
func TestMoveFile_EXDEV_FallsBackToCopy(t *testing.T) {
|
||||
root := t.TempDir()
|
||||
src := filepath.Join(root, "src.txt")
|
||||
dst := filepath.Join(root, "dst.txt")
|
||||
if err := os.WriteFile(src, []byte("hello"), 0644); err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
|
||||
oldRename := renameFile
|
||||
defer func() { renameFile = oldRename }()
|
||||
renameFile = func(oldpath, newpath string) error {
|
||||
return &os.LinkError{Op: "rename", Old: oldpath, New: newpath, Err: syscall.EXDEV}
|
||||
}
|
||||
|
||||
if err := moveFile(src, dst); err != nil {
|
||||
t.Fatalf("moveFile failed: %v", err)
|
||||
}
|
||||
if _, err := os.Stat(src); !os.IsNotExist(err) {
|
||||
t.Fatalf("expected src to be removed, stat err=%v", err)
|
||||
}
|
||||
data, err := os.ReadFile(dst)
|
||||
if err != nil {
|
||||
t.Fatalf("read dst: %v", err)
|
||||
}
|
||||
if string(data) != "hello" {
|
||||
t.Fatalf("dst contents=%q", string(data))
|
||||
}
|
||||
}
|
||||
|
||||
func TestMoveFile_EXDEV_CopyFails_LeavesSource(t *testing.T) {
|
||||
root := t.TempDir()
|
||||
src := filepath.Join(root, "src.txt")
|
||||
dst := filepath.Join(root, "dst.txt")
|
||||
if err := os.WriteFile(src, []byte("hello"), 0644); err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
|
||||
oldRename := renameFile
|
||||
oldOpenRW := openFileRW
|
||||
defer func() {
|
||||
renameFile = oldRename
|
||||
openFileRW = oldOpenRW
|
||||
}()
|
||||
renameFile = func(oldpath, newpath string) error {
|
||||
return &os.LinkError{Op: "rename", Old: oldpath, New: newpath, Err: syscall.EXDEV}
|
||||
}
|
||||
openFileRW = func(name string, flag int, perm os.FileMode) (*os.File, error) {
|
||||
return nil, &os.PathError{Op: "open", Path: name, Err: syscall.ENOSPC}
|
||||
}
|
||||
|
||||
err := moveFile(src, dst)
|
||||
if err == nil {
|
||||
t.Fatalf("expected error")
|
||||
}
|
||||
if !errors.Is(err, syscall.ENOSPC) {
|
||||
t.Fatalf("expected ENOSPC, got %v", err)
|
||||
}
|
||||
if _, err := os.Stat(src); err != nil {
|
||||
t.Fatalf("expected src to remain, stat err=%v", err)
|
||||
}
|
||||
}
|
||||
@@ -28,7 +28,7 @@ func GitHooks(path string) error {
|
||||
}
|
||||
|
||||
// Run bd hooks install
|
||||
cmd := newBdCmd(bdBinary, "hooks", "install")
|
||||
cmd := exec.Command(bdBinary, "hooks", "install") // #nosec G204 -- bdBinary from validated executable path
|
||||
cmd.Dir = path // Set working directory without changing process dir
|
||||
cmd.Stdout = os.Stdout
|
||||
cmd.Stderr = os.Stderr
|
||||
|
||||
@@ -1,87 +0,0 @@
|
||||
package fix
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"os"
|
||||
"path/filepath"
|
||||
"time"
|
||||
|
||||
"github.com/steveyegge/beads/internal/beads"
|
||||
"github.com/steveyegge/beads/internal/configfile"
|
||||
"github.com/steveyegge/beads/internal/utils"
|
||||
)
|
||||
|
||||
// JSONLIntegrity backs up a malformed JSONL export and regenerates it from the database.
|
||||
// This is safe only when a database exists and is readable.
|
||||
func JSONLIntegrity(path string) error {
|
||||
if err := validateBeadsWorkspace(path); err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
absPath, err := filepath.Abs(path)
|
||||
if err != nil {
|
||||
return fmt.Errorf("failed to resolve path: %w", err)
|
||||
}
|
||||
|
||||
beadsDir := filepath.Join(absPath, ".beads")
|
||||
|
||||
// Resolve db path.
|
||||
dbPath := filepath.Join(beadsDir, beads.CanonicalDatabaseName)
|
||||
if cfg, err := configfile.Load(beadsDir); err == nil && cfg != nil && cfg.Database != "" {
|
||||
dbPath = cfg.DatabasePath(beadsDir)
|
||||
}
|
||||
if _, err := os.Stat(dbPath); os.IsNotExist(err) {
|
||||
return fmt.Errorf("cannot auto-repair JSONL: no database found")
|
||||
}
|
||||
|
||||
// Resolve JSONL export path.
|
||||
jsonlPath := ""
|
||||
if cfg, err := configfile.Load(beadsDir); err == nil && cfg != nil {
|
||||
if cfg.JSONLExport != "" && !isSystemJSONLFilename(cfg.JSONLExport) {
|
||||
p := cfg.JSONLPath(beadsDir)
|
||||
if _, err := os.Stat(p); err == nil {
|
||||
jsonlPath = p
|
||||
}
|
||||
}
|
||||
}
|
||||
if jsonlPath == "" {
|
||||
p := utils.FindJSONLInDir(beadsDir)
|
||||
if _, err := os.Stat(p); err == nil {
|
||||
jsonlPath = p
|
||||
}
|
||||
}
|
||||
if jsonlPath == "" {
|
||||
return fmt.Errorf("cannot auto-repair JSONL: no JSONL file found")
|
||||
}
|
||||
|
||||
// Back up the JSONL.
|
||||
ts := time.Now().UTC().Format("20060102T150405Z")
|
||||
backup := jsonlPath + "." + ts + ".corrupt.backup.jsonl"
|
||||
if err := moveFile(jsonlPath, backup); err != nil {
|
||||
return fmt.Errorf("failed to back up JSONL: %w", err)
|
||||
}
|
||||
|
||||
binary, err := getBdBinary()
|
||||
if err != nil {
|
||||
_ = moveFile(backup, jsonlPath)
|
||||
return err
|
||||
}
|
||||
|
||||
// Re-export from DB.
|
||||
cmd := newBdCmd(binary, "--db", dbPath, "export", "-o", jsonlPath, "--force")
|
||||
cmd.Dir = absPath
|
||||
cmd.Stdout = os.Stdout
|
||||
cmd.Stderr = os.Stderr
|
||||
if err := cmd.Run(); err != nil {
|
||||
// Best-effort rollback: restore the original JSONL, but keep the backup.
|
||||
failedTS := time.Now().UTC().Format("20060102T150405Z")
|
||||
if _, statErr := os.Stat(jsonlPath); statErr == nil {
|
||||
failed := jsonlPath + "." + failedTS + ".failed.regen.jsonl"
|
||||
_ = moveFile(jsonlPath, failed)
|
||||
}
|
||||
_ = copyFile(backup, jsonlPath)
|
||||
return fmt.Errorf("failed to regenerate JSONL from database: %w (backup: %s)", err, backup)
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
@@ -3,10 +3,8 @@ package fix
|
||||
import (
|
||||
"fmt"
|
||||
"os"
|
||||
"os/exec"
|
||||
"path/filepath"
|
||||
|
||||
"github.com/steveyegge/beads/internal/beads"
|
||||
"github.com/steveyegge/beads/internal/configfile"
|
||||
)
|
||||
|
||||
// DatabaseVersion fixes database version mismatches by running bd migrate,
|
||||
@@ -25,15 +23,12 @@ func DatabaseVersion(path string) error {
|
||||
|
||||
// Check if database exists - if not, run init instead of migrate (bd-4h9)
|
||||
beadsDir := filepath.Join(path, ".beads")
|
||||
dbPath := filepath.Join(beadsDir, beads.CanonicalDatabaseName)
|
||||
if cfg, err := configfile.Load(beadsDir); err == nil && cfg != nil && cfg.Database != "" {
|
||||
dbPath = cfg.DatabasePath(beadsDir)
|
||||
}
|
||||
dbPath := filepath.Join(beadsDir, "beads.db")
|
||||
|
||||
if _, err := os.Stat(dbPath); os.IsNotExist(err) {
|
||||
// No database - this is a fresh clone, run bd init
|
||||
fmt.Println("→ No database found, running 'bd init' to hydrate from JSONL...")
|
||||
cmd := newBdCmd(bdBinary, "--db", dbPath, "init")
|
||||
cmd := exec.Command(bdBinary, "init") // #nosec G204 -- bdBinary from validated executable path
|
||||
cmd.Dir = path
|
||||
cmd.Stdout = os.Stdout
|
||||
cmd.Stderr = os.Stderr
|
||||
@@ -46,8 +41,8 @@ func DatabaseVersion(path string) error {
|
||||
}
|
||||
|
||||
// Database exists - run bd migrate
|
||||
cmd := newBdCmd(bdBinary, "--db", dbPath, "migrate")
|
||||
cmd.Dir = path // Set working directory without changing process dir
|
||||
cmd := exec.Command(bdBinary, "migrate") // #nosec G204 -- bdBinary from validated executable path
|
||||
cmd.Dir = path // Set working directory without changing process dir
|
||||
cmd.Stdout = os.Stdout
|
||||
cmd.Stderr = os.Stderr
|
||||
|
||||
|
||||
@@ -1,81 +0,0 @@
|
||||
package fix
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"os"
|
||||
"os/exec"
|
||||
"path/filepath"
|
||||
)
|
||||
|
||||
// DatabaseCorruptionRecovery recovers a corrupted database from JSONL backup.
|
||||
// It backs up the corrupted database, deletes it, and re-imports from JSONL.
|
||||
func DatabaseCorruptionRecovery(path string) error {
|
||||
// Validate workspace
|
||||
if err := validateBeadsWorkspace(path); err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
beadsDir := filepath.Join(path, ".beads")
|
||||
dbPath := filepath.Join(beadsDir, "beads.db")
|
||||
|
||||
// Check if database exists
|
||||
if _, err := os.Stat(dbPath); os.IsNotExist(err) {
|
||||
return fmt.Errorf("no database to recover")
|
||||
}
|
||||
|
||||
// Find JSONL file
|
||||
jsonlPath := findJSONLPath(beadsDir)
|
||||
if jsonlPath == "" {
|
||||
return fmt.Errorf("no JSONL backup found - cannot recover (try restoring from git history)")
|
||||
}
|
||||
|
||||
// Count issues in JSONL
|
||||
issueCount, err := countJSONLIssues(jsonlPath)
|
||||
if err != nil {
|
||||
return fmt.Errorf("failed to read JSONL: %w", err)
|
||||
}
|
||||
if issueCount == 0 {
|
||||
return fmt.Errorf("JSONL is empty - cannot recover (try restoring from git history)")
|
||||
}
|
||||
|
||||
// Backup corrupted database
|
||||
backupPath := dbPath + ".corrupt"
|
||||
fmt.Printf(" Backing up corrupted database to %s\n", filepath.Base(backupPath))
|
||||
if err := os.Rename(dbPath, backupPath); err != nil {
|
||||
return fmt.Errorf("failed to backup corrupted database: %w", err)
|
||||
}
|
||||
|
||||
// Get bd binary path
|
||||
bdBinary, err := getBdBinary()
|
||||
if err != nil {
|
||||
// Restore corrupted database on failure
|
||||
_ = os.Rename(backupPath, dbPath)
|
||||
return err
|
||||
}
|
||||
|
||||
// Run bd import with --rename-on-import to handle prefix mismatches
|
||||
fmt.Printf(" Recovering %d issues from %s\n", issueCount, filepath.Base(jsonlPath))
|
||||
cmd := exec.Command(bdBinary, "import", "-i", jsonlPath, "--rename-on-import") // #nosec G204
|
||||
cmd.Dir = path
|
||||
cmd.Stdout = os.Stdout
|
||||
cmd.Stderr = os.Stderr
|
||||
|
||||
if err := cmd.Run(); err != nil {
|
||||
// Keep backup on failure
|
||||
fmt.Printf(" Warning: recovery failed, corrupted database preserved at %s\n", filepath.Base(backupPath))
|
||||
return fmt.Errorf("failed to import from JSONL: %w", err)
|
||||
}
|
||||
|
||||
// Run migrate to set version metadata
|
||||
migrateCmd := exec.Command(bdBinary, "migrate") // #nosec G204
|
||||
migrateCmd.Dir = path
|
||||
migrateCmd.Stdout = os.Stdout
|
||||
migrateCmd.Stderr = os.Stderr
|
||||
if err := migrateCmd.Run(); err != nil {
|
||||
// Non-fatal - import succeeded, version just won't be set
|
||||
fmt.Printf(" Warning: migration failed (non-fatal): %v\n", err)
|
||||
}
|
||||
|
||||
fmt.Printf(" Recovered %d issues from JSONL backup\n", issueCount)
|
||||
return nil
|
||||
}
|
||||
@@ -3,6 +3,7 @@ package fix
|
||||
import (
|
||||
"fmt"
|
||||
"os"
|
||||
"os/exec"
|
||||
"path/filepath"
|
||||
"strings"
|
||||
)
|
||||
@@ -30,9 +31,9 @@ func readLineUnbuffered() (string, error) {
|
||||
// RepoFingerprint fixes repo fingerprint mismatches by prompting the user
|
||||
// for which action to take. This is interactive because the consequences
|
||||
// differ significantly between options:
|
||||
// 1. Update repo ID (if URL changed or bd upgraded)
|
||||
// 2. Reinitialize database (if wrong database was copied)
|
||||
// 3. Skip (do nothing)
|
||||
// 1. Update repo ID (if URL changed or bd upgraded)
|
||||
// 2. Reinitialize database (if wrong database was copied)
|
||||
// 3. Skip (do nothing)
|
||||
func RepoFingerprint(path string) error {
|
||||
// Validate workspace
|
||||
if err := validateBeadsWorkspace(path); err != nil {
|
||||
@@ -66,7 +67,7 @@ func RepoFingerprint(path string) error {
|
||||
case "1":
|
||||
// Run bd migrate --update-repo-id
|
||||
fmt.Println(" → Running 'bd migrate --update-repo-id'...")
|
||||
cmd := newBdCmd(bdBinary, "migrate", "--update-repo-id")
|
||||
cmd := exec.Command(bdBinary, "migrate", "--update-repo-id") // #nosec G204 -- bdBinary from validated executable path
|
||||
cmd.Dir = path
|
||||
cmd.Stdin = os.Stdin // Allow user to respond to migrate's confirmation prompt
|
||||
cmd.Stdout = os.Stdout
|
||||
@@ -104,7 +105,7 @@ func RepoFingerprint(path string) error {
|
||||
_ = os.Remove(dbPath + "-shm")
|
||||
|
||||
fmt.Println(" → Running 'bd init'...")
|
||||
cmd := newBdCmd(bdBinary, "init", "--quiet")
|
||||
cmd := exec.Command(bdBinary, "init", "--quiet") // #nosec G204 -- bdBinary from validated executable path
|
||||
cmd.Dir = path
|
||||
cmd.Stdout = os.Stdout
|
||||
cmd.Stderr = os.Stderr
|
||||
|
||||
@@ -1,52 +0,0 @@
|
||||
package fix
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"os"
|
||||
"strings"
|
||||
"time"
|
||||
)
|
||||
|
||||
func sqliteConnString(path string, readOnly bool) string {
|
||||
path = strings.TrimSpace(path)
|
||||
if path == "" {
|
||||
return ""
|
||||
}
|
||||
|
||||
busy := 30 * time.Second
|
||||
if v := strings.TrimSpace(os.Getenv("BD_LOCK_TIMEOUT")); v != "" {
|
||||
if d, err := time.ParseDuration(v); err == nil {
|
||||
busy = d
|
||||
}
|
||||
}
|
||||
busyMs := int64(busy / time.Millisecond)
|
||||
|
||||
if strings.HasPrefix(path, "file:") {
|
||||
conn := path
|
||||
sep := "?"
|
||||
if strings.Contains(conn, "?") {
|
||||
sep = "&"
|
||||
}
|
||||
if readOnly && !strings.Contains(conn, "mode=") {
|
||||
conn += sep + "mode=ro"
|
||||
sep = "&"
|
||||
}
|
||||
if !strings.Contains(conn, "_pragma=busy_timeout") {
|
||||
conn += fmt.Sprintf("%s_pragma=busy_timeout(%d)", sep, busyMs)
|
||||
sep = "&"
|
||||
}
|
||||
if !strings.Contains(conn, "_pragma=foreign_keys") {
|
||||
conn += sep + "_pragma=foreign_keys(ON)"
|
||||
sep = "&"
|
||||
}
|
||||
if !strings.Contains(conn, "_time_format=") {
|
||||
conn += sep + "_time_format=sqlite"
|
||||
}
|
||||
return conn
|
||||
}
|
||||
|
||||
if readOnly {
|
||||
return fmt.Sprintf("file:%s?mode=ro&_pragma=foreign_keys(ON)&_pragma=busy_timeout(%d)&_time_format=sqlite", path, busyMs)
|
||||
}
|
||||
return fmt.Sprintf("file:%s?_pragma=foreign_keys(ON)&_pragma=busy_timeout(%d)&_time_format=sqlite", path, busyMs)
|
||||
}
|
||||
+18
-42
@@ -6,6 +6,7 @@ import (
|
||||
"encoding/json"
|
||||
"fmt"
|
||||
"os"
|
||||
"os/exec"
|
||||
"path/filepath"
|
||||
|
||||
_ "github.com/ncruces/go-sqlite3/driver"
|
||||
@@ -37,23 +38,13 @@ func DBJSONLSync(path string) error {
|
||||
|
||||
// Find JSONL file
|
||||
var jsonlPath string
|
||||
if cfg, err := configfile.Load(beadsDir); err == nil && cfg != nil {
|
||||
if cfg.JSONLExport != "" && !isSystemJSONLFilename(cfg.JSONLExport) {
|
||||
p := cfg.JSONLPath(beadsDir)
|
||||
if _, err := os.Stat(p); err == nil {
|
||||
jsonlPath = p
|
||||
}
|
||||
}
|
||||
}
|
||||
if jsonlPath == "" {
|
||||
issuesJSONL := filepath.Join(beadsDir, "issues.jsonl")
|
||||
beadsJSONL := filepath.Join(beadsDir, "beads.jsonl")
|
||||
issuesJSONL := filepath.Join(beadsDir, "issues.jsonl")
|
||||
beadsJSONL := filepath.Join(beadsDir, "beads.jsonl")
|
||||
|
||||
if _, err := os.Stat(issuesJSONL); err == nil {
|
||||
jsonlPath = issuesJSONL
|
||||
} else if _, err := os.Stat(beadsJSONL); err == nil {
|
||||
jsonlPath = beadsJSONL
|
||||
}
|
||||
if _, err := os.Stat(issuesJSONL); err == nil {
|
||||
jsonlPath = issuesJSONL
|
||||
} else if _, err := os.Stat(beadsJSONL); err == nil {
|
||||
jsonlPath = beadsJSONL
|
||||
}
|
||||
|
||||
// Check if both database and JSONL exist
|
||||
@@ -111,36 +102,21 @@ func DBJSONLSync(path string) error {
|
||||
return err
|
||||
}
|
||||
|
||||
// Run the appropriate sync command
|
||||
var cmd *exec.Cmd
|
||||
if syncDirection == "export" {
|
||||
// Export DB to JSONL file (must specify -o to write to file, not stdout)
|
||||
jsonlOutputPath := jsonlPath
|
||||
exportCmd := newBdCmd(bdBinary, "--db", dbPath, "export", "-o", jsonlOutputPath, "--force")
|
||||
exportCmd.Dir = path // Set working directory without changing process dir
|
||||
exportCmd.Stdout = os.Stdout
|
||||
exportCmd.Stderr = os.Stderr
|
||||
if err := exportCmd.Run(); err != nil {
|
||||
return fmt.Errorf("failed to export database to JSONL: %w", err)
|
||||
}
|
||||
|
||||
// Staleness check uses last_import_time. After exporting, JSONL mtime is newer,
|
||||
// so mark the DB as fresh by running a no-op import (skip existing issues).
|
||||
markFreshCmd := newBdCmd(bdBinary, "--db", dbPath, "import", "-i", jsonlOutputPath, "--force", "--skip-existing", "--no-git-history")
|
||||
markFreshCmd.Dir = path
|
||||
markFreshCmd.Stdout = os.Stdout
|
||||
markFreshCmd.Stderr = os.Stderr
|
||||
if err := markFreshCmd.Run(); err != nil {
|
||||
return fmt.Errorf("failed to mark database as fresh after export: %w", err)
|
||||
}
|
||||
|
||||
return nil
|
||||
jsonlOutputPath := filepath.Join(beadsDir, "issues.jsonl")
|
||||
cmd = exec.Command(bdBinary, "export", "-o", jsonlOutputPath, "--force") // #nosec G204 -- bdBinary from validated executable path
|
||||
} else {
|
||||
cmd = exec.Command(bdBinary, "sync", "--import-only") // #nosec G204 -- bdBinary from validated executable path
|
||||
}
|
||||
|
||||
importCmd := newBdCmd(bdBinary, "--db", dbPath, "sync", "--import-only")
|
||||
importCmd.Dir = path // Set working directory without changing process dir
|
||||
importCmd.Stdout = os.Stdout
|
||||
importCmd.Stderr = os.Stderr
|
||||
cmd.Dir = path // Set working directory without changing process dir
|
||||
cmd.Stdout = os.Stdout
|
||||
cmd.Stderr = os.Stderr
|
||||
|
||||
if err := importCmd.Run(); err != nil {
|
||||
if err := cmd.Run(); err != nil {
|
||||
return fmt.Errorf("failed to sync database with JSONL: %w", err)
|
||||
}
|
||||
|
||||
@@ -149,7 +125,7 @@ func DBJSONLSync(path string) error {
|
||||
|
||||
// countDatabaseIssues counts the number of issues in the database.
|
||||
func countDatabaseIssues(dbPath string) (int, error) {
|
||||
db, err := sql.Open("sqlite3", sqliteConnString(dbPath, true))
|
||||
db, err := sql.Open("sqlite3", dbPath)
|
||||
if err != nil {
|
||||
return 0, fmt.Errorf("failed to open database: %w", err)
|
||||
}
|
||||
|
||||
@@ -32,7 +32,8 @@ func SyncBranchConfig(path string) error {
|
||||
}
|
||||
|
||||
// Set sync.branch using bd config set
|
||||
setCmd := newBdCmd(bdBinary, "config", "set", "sync.branch", currentBranch)
|
||||
// #nosec G204 - bdBinary is controlled by getBdBinary() which returns os.Executable()
|
||||
setCmd := exec.Command(bdBinary, "config", "set", "sync.branch", currentBranch)
|
||||
setCmd.Dir = path
|
||||
if output, err := setCmd.CombinedOutput(); err != nil {
|
||||
return fmt.Errorf("failed to set sync.branch: %w\nOutput: %s", err, string(output))
|
||||
|
||||
@@ -180,14 +180,11 @@ func ChildParentDependencies(path string) error {
|
||||
}
|
||||
defer db.Close()
|
||||
|
||||
// Find child→parent BLOCKING dependencies where issue_id starts with depends_on_id + "."
|
||||
// Only matches blocking types (blocks, conditional-blocks, waits-for) that cause deadlock.
|
||||
// Excludes 'parent-child' type which is a legitimate structural hierarchy relationship.
|
||||
// Find child→parent dependencies where issue_id starts with depends_on_id + "."
|
||||
query := `
|
||||
SELECT d.issue_id, d.depends_on_id, d.type
|
||||
SELECT d.issue_id, d.depends_on_id
|
||||
FROM dependencies d
|
||||
WHERE d.issue_id LIKE d.depends_on_id || '.%'
|
||||
AND d.type IN ('blocks', 'conditional-blocks', 'waits-for')
|
||||
`
|
||||
rows, err := db.Query(query)
|
||||
if err != nil {
|
||||
@@ -198,13 +195,12 @@ func ChildParentDependencies(path string) error {
|
||||
type badDep struct {
|
||||
issueID string
|
||||
dependsOnID string
|
||||
depType string
|
||||
}
|
||||
var badDeps []badDep
|
||||
|
||||
for rows.Next() {
|
||||
var d badDep
|
||||
if err := rows.Scan(&d.issueID, &d.dependsOnID, &d.depType); err == nil {
|
||||
if err := rows.Scan(&d.issueID, &d.dependsOnID); err == nil {
|
||||
badDeps = append(badDeps, d)
|
||||
}
|
||||
}
|
||||
@@ -214,10 +210,10 @@ func ChildParentDependencies(path string) error {
|
||||
return nil
|
||||
}
|
||||
|
||||
// Delete child→parent blocking dependencies (preserving parent-child type)
|
||||
// Delete child→parent dependencies
|
||||
for _, d := range badDeps {
|
||||
_, err := db.Exec("DELETE FROM dependencies WHERE issue_id = ? AND depends_on_id = ? AND type = ?",
|
||||
d.issueID, d.dependsOnID, d.depType)
|
||||
_, err := db.Exec("DELETE FROM dependencies WHERE issue_id = ? AND depends_on_id = ?",
|
||||
d.issueID, d.dependsOnID)
|
||||
if err != nil {
|
||||
fmt.Printf(" Warning: failed to remove %s→%s: %v\n", d.issueID, d.dependsOnID, err)
|
||||
} else {
|
||||
@@ -233,5 +229,5 @@ func ChildParentDependencies(path string) error {
|
||||
|
||||
// openDB opens a SQLite database for read-write access
|
||||
func openDB(dbPath string) (*sql.DB, error) {
|
||||
return sql.Open("sqlite3", sqliteConnString(dbPath, false))
|
||||
return sql.Open("sqlite3", dbPath)
|
||||
}
|
||||
|
||||
@@ -138,66 +138,3 @@ func TestChildParentDependencies_FixesBadDeps(t *testing.T) {
|
||||
t.Errorf("Expected 2 dirty issues (unique issue_ids from removed deps), got %d", dirtyCount)
|
||||
}
|
||||
}
|
||||
|
||||
// TestChildParentDependencies_PreservesParentChildType verifies that legitimate
|
||||
// parent-child type dependencies are NOT removed (only blocking types are removed).
|
||||
// Regression test for GitHub issue #750.
|
||||
func TestChildParentDependencies_PreservesParentChildType(t *testing.T) {
|
||||
// Set up test database with both 'blocks' and 'parent-child' type deps
|
||||
dir := t.TempDir()
|
||||
beadsDir := filepath.Join(dir, ".beads")
|
||||
if err := os.MkdirAll(beadsDir, 0755); err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
|
||||
dbPath := filepath.Join(beadsDir, "beads.db")
|
||||
db, err := openDB(dbPath)
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
|
||||
// Create schema with both 'blocks' (anti-pattern) and 'parent-child' (legitimate) deps
|
||||
_, err = db.Exec(`
|
||||
CREATE TABLE issues (id TEXT PRIMARY KEY);
|
||||
CREATE TABLE dependencies (issue_id TEXT, depends_on_id TEXT, type TEXT);
|
||||
CREATE TABLE dirty_issues (issue_id TEXT PRIMARY KEY);
|
||||
INSERT INTO issues (id) VALUES ('bd-abc'), ('bd-abc.1'), ('bd-abc.2');
|
||||
INSERT INTO dependencies (issue_id, depends_on_id, type) VALUES
|
||||
('bd-abc.1', 'bd-abc', 'parent-child'),
|
||||
('bd-abc.2', 'bd-abc', 'parent-child'),
|
||||
('bd-abc.1', 'bd-abc', 'blocks');
|
||||
`)
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
db.Close()
|
||||
|
||||
// Run fix
|
||||
err = ChildParentDependencies(dir)
|
||||
if err != nil {
|
||||
t.Fatalf("ChildParentDependencies failed: %v", err)
|
||||
}
|
||||
|
||||
// Verify only 'blocks' type was removed, 'parent-child' preserved
|
||||
db, _ = openDB(dbPath)
|
||||
defer db.Close()
|
||||
|
||||
var blocksCount int
|
||||
db.QueryRow("SELECT COUNT(*) FROM dependencies WHERE type = 'blocks'").Scan(&blocksCount)
|
||||
if blocksCount != 0 {
|
||||
t.Errorf("Expected 0 'blocks' dependencies after fix, got %d", blocksCount)
|
||||
}
|
||||
|
||||
var parentChildCount int
|
||||
db.QueryRow("SELECT COUNT(*) FROM dependencies WHERE type = 'parent-child'").Scan(&parentChildCount)
|
||||
if parentChildCount != 2 {
|
||||
t.Errorf("Expected 2 'parent-child' dependencies preserved, got %d", parentChildCount)
|
||||
}
|
||||
|
||||
// Verify only 1 dirty issue (the one with 'blocks' dep removed)
|
||||
var dirtyCount int
|
||||
db.QueryRow("SELECT COUNT(*) FROM dirty_issues").Scan(&dirtyCount)
|
||||
if dirtyCount != 1 {
|
||||
t.Errorf("Expected 1 dirty issue, got %d", dirtyCount)
|
||||
}
|
||||
}
|
||||
|
||||
+1
-170
@@ -78,173 +78,6 @@ func CheckGitHooks() DoctorCheck {
|
||||
}
|
||||
}
|
||||
|
||||
// CheckGitWorkingTree checks if the git working tree is clean.
|
||||
// This helps prevent leaving work stranded (AGENTS.md: keep git state clean).
|
||||
func CheckGitWorkingTree(path string) DoctorCheck {
|
||||
cmd := exec.Command("git", "rev-parse", "--git-dir")
|
||||
cmd.Dir = path
|
||||
if err := cmd.Run(); err != nil {
|
||||
return DoctorCheck{
|
||||
Name: "Git Working Tree",
|
||||
Status: StatusOK,
|
||||
Message: "N/A (not a git repository)",
|
||||
}
|
||||
}
|
||||
|
||||
cmd = exec.Command("git", "status", "--porcelain")
|
||||
cmd.Dir = path
|
||||
out, err := cmd.Output()
|
||||
if err != nil {
|
||||
return DoctorCheck{
|
||||
Name: "Git Working Tree",
|
||||
Status: StatusWarning,
|
||||
Message: "Unable to check git status",
|
||||
Detail: err.Error(),
|
||||
Fix: "Run 'git status' and commit/stash changes before syncing",
|
||||
}
|
||||
}
|
||||
|
||||
status := strings.TrimSpace(string(out))
|
||||
if status == "" {
|
||||
return DoctorCheck{
|
||||
Name: "Git Working Tree",
|
||||
Status: StatusOK,
|
||||
Message: "Clean",
|
||||
}
|
||||
}
|
||||
|
||||
// Show a small sample of paths for quick debugging.
|
||||
lines := strings.Split(status, "\n")
|
||||
maxLines := 8
|
||||
if len(lines) > maxLines {
|
||||
lines = append(lines[:maxLines], "…")
|
||||
}
|
||||
|
||||
return DoctorCheck{
|
||||
Name: "Git Working Tree",
|
||||
Status: StatusWarning,
|
||||
Message: "Uncommitted changes present",
|
||||
Detail: strings.Join(lines, "\n"),
|
||||
Fix: "Commit or stash changes, then follow AGENTS.md: git pull --rebase && git push",
|
||||
}
|
||||
}
|
||||
|
||||
// CheckGitUpstream checks whether the current branch is up to date with its upstream.
|
||||
// This catches common "forgot to pull/push" failure modes (AGENTS.md: pull --rebase, push).
|
||||
func CheckGitUpstream(path string) DoctorCheck {
|
||||
cmd := exec.Command("git", "rev-parse", "--git-dir")
|
||||
cmd.Dir = path
|
||||
if err := cmd.Run(); err != nil {
|
||||
return DoctorCheck{
|
||||
Name: "Git Upstream",
|
||||
Status: StatusOK,
|
||||
Message: "N/A (not a git repository)",
|
||||
}
|
||||
}
|
||||
|
||||
// Detect detached HEAD.
|
||||
cmd = exec.Command("git", "symbolic-ref", "--short", "HEAD")
|
||||
cmd.Dir = path
|
||||
branchOut, err := cmd.Output()
|
||||
if err != nil {
|
||||
return DoctorCheck{
|
||||
Name: "Git Upstream",
|
||||
Status: StatusWarning,
|
||||
Message: "Detached HEAD (no branch)",
|
||||
Fix: "Check out a branch before syncing",
|
||||
}
|
||||
}
|
||||
branch := strings.TrimSpace(string(branchOut))
|
||||
|
||||
cmd = exec.Command("git", "rev-parse", "--abbrev-ref", "--symbolic-full-name", "@{u}")
|
||||
cmd.Dir = path
|
||||
upOut, err := cmd.Output()
|
||||
if err != nil {
|
||||
return DoctorCheck{
|
||||
Name: "Git Upstream",
|
||||
Status: StatusWarning,
|
||||
Message: fmt.Sprintf("No upstream configured for %s", branch),
|
||||
Fix: fmt.Sprintf("Set upstream then push: git push -u origin %s", branch),
|
||||
}
|
||||
}
|
||||
upstream := strings.TrimSpace(string(upOut))
|
||||
|
||||
ahead, aheadErr := gitRevListCount(path, "@{u}..HEAD")
|
||||
behind, behindErr := gitRevListCount(path, "HEAD..@{u}")
|
||||
if aheadErr != nil || behindErr != nil {
|
||||
detailParts := []string{}
|
||||
if aheadErr != nil {
|
||||
detailParts = append(detailParts, "ahead: "+aheadErr.Error())
|
||||
}
|
||||
if behindErr != nil {
|
||||
detailParts = append(detailParts, "behind: "+behindErr.Error())
|
||||
}
|
||||
return DoctorCheck{
|
||||
Name: "Git Upstream",
|
||||
Status: StatusWarning,
|
||||
Message: fmt.Sprintf("Unable to compare with upstream (%s)", upstream),
|
||||
Detail: strings.Join(detailParts, "; "),
|
||||
Fix: "Run 'git fetch' then check: git status -sb",
|
||||
}
|
||||
}
|
||||
|
||||
if ahead == 0 && behind == 0 {
|
||||
return DoctorCheck{
|
||||
Name: "Git Upstream",
|
||||
Status: StatusOK,
|
||||
Message: fmt.Sprintf("Up to date (%s)", upstream),
|
||||
Detail: fmt.Sprintf("Branch: %s", branch),
|
||||
}
|
||||
}
|
||||
|
||||
if ahead > 0 && behind == 0 {
|
||||
return DoctorCheck{
|
||||
Name: "Git Upstream",
|
||||
Status: StatusWarning,
|
||||
Message: fmt.Sprintf("Ahead of upstream by %d commit(s)", ahead),
|
||||
Detail: fmt.Sprintf("Branch: %s, upstream: %s", branch, upstream),
|
||||
Fix: "Run 'git push' (AGENTS.md: git pull --rebase && git push)",
|
||||
}
|
||||
}
|
||||
|
||||
if behind > 0 && ahead == 0 {
|
||||
return DoctorCheck{
|
||||
Name: "Git Upstream",
|
||||
Status: StatusWarning,
|
||||
Message: fmt.Sprintf("Behind upstream by %d commit(s)", behind),
|
||||
Detail: fmt.Sprintf("Branch: %s, upstream: %s", branch, upstream),
|
||||
Fix: "Run 'git pull --rebase' (then re-run bd sync / bd doctor)",
|
||||
}
|
||||
}
|
||||
|
||||
return DoctorCheck{
|
||||
Name: "Git Upstream",
|
||||
Status: StatusWarning,
|
||||
Message: fmt.Sprintf("Diverged from upstream (ahead %d, behind %d)", ahead, behind),
|
||||
Detail: fmt.Sprintf("Branch: %s, upstream: %s", branch, upstream),
|
||||
Fix: "Run 'git pull --rebase' then 'git push'",
|
||||
}
|
||||
}
|
||||
|
||||
func gitRevListCount(path string, rangeExpr string) (int, error) {
|
||||
cmd := exec.Command("git", "rev-list", "--count", rangeExpr) // #nosec G204 -- fixed args
|
||||
cmd.Dir = path
|
||||
out, err := cmd.Output()
|
||||
if err != nil {
|
||||
return 0, err
|
||||
}
|
||||
countStr := strings.TrimSpace(string(out))
|
||||
if countStr == "" {
|
||||
return 0, nil
|
||||
}
|
||||
|
||||
var n int
|
||||
if _, err := fmt.Sscanf(countStr, "%d", &n); err != nil {
|
||||
return 0, err
|
||||
}
|
||||
return n, nil
|
||||
}
|
||||
|
||||
// CheckSyncBranchHookCompatibility checks if pre-push hook is compatible with sync-branch mode.
|
||||
// When sync-branch is configured, the pre-push hook must have the sync-branch bypass logic
|
||||
// (added in version 0.29.0). Without it, users experience circular "bd sync" failures (issue #532).
|
||||
@@ -312,8 +145,6 @@ func CheckSyncBranchHookCompatibility(path string) DoctorCheck {
|
||||
Status: StatusWarning,
|
||||
Message: "Pre-push hook is not a bd hook",
|
||||
Detail: "Cannot verify sync-branch compatibility with custom hooks",
|
||||
Fix: "Either run 'bd hooks install --force' to use bd hooks,\n" +
|
||||
" or ensure your custom hook skips validation when pushing to sync-branch",
|
||||
}
|
||||
}
|
||||
|
||||
@@ -831,5 +662,5 @@ func CheckOrphanedIssues(path string) DoctorCheck {
|
||||
|
||||
// openDBReadOnly opens a SQLite database in read-only mode
|
||||
func openDBReadOnly(dbPath string) (*sql.DB, error) {
|
||||
return sql.Open("sqlite3", sqliteConnString(dbPath, true))
|
||||
return sql.Open("sqlite3", "file:"+dbPath+"?mode=ro")
|
||||
}
|
||||
|
||||
@@ -1,176 +0,0 @@
|
||||
package doctor
|
||||
|
||||
import (
|
||||
"os"
|
||||
"os/exec"
|
||||
"path/filepath"
|
||||
"strings"
|
||||
"testing"
|
||||
)
|
||||
|
||||
func mkTmpDirInTmp(t *testing.T, prefix string) string {
|
||||
t.Helper()
|
||||
dir, err := os.MkdirTemp("/tmp", prefix)
|
||||
if err != nil {
|
||||
// Fallback for platforms without /tmp (e.g. Windows).
|
||||
dir, err = os.MkdirTemp("", prefix)
|
||||
if err != nil {
|
||||
t.Fatalf("failed to create temp dir: %v", err)
|
||||
}
|
||||
}
|
||||
t.Cleanup(func() { _ = os.RemoveAll(dir) })
|
||||
return dir
|
||||
}
|
||||
|
||||
func runGit(t *testing.T, dir string, args ...string) string {
|
||||
t.Helper()
|
||||
cmd := exec.Command("git", args...)
|
||||
cmd.Dir = dir
|
||||
out, err := cmd.CombinedOutput()
|
||||
if err != nil {
|
||||
t.Fatalf("git %v failed: %v\n%s", args, err, string(out))
|
||||
}
|
||||
return string(out)
|
||||
}
|
||||
|
||||
func initRepo(t *testing.T, dir string, branch string) {
|
||||
t.Helper()
|
||||
_ = os.MkdirAll(filepath.Join(dir, ".beads"), 0755)
|
||||
runGit(t, dir, "init", "-b", branch)
|
||||
runGit(t, dir, "config", "user.email", "test@test.com")
|
||||
runGit(t, dir, "config", "user.name", "Test User")
|
||||
}
|
||||
|
||||
func commitFile(t *testing.T, dir, name, content, msg string) {
|
||||
t.Helper()
|
||||
path := filepath.Join(dir, name)
|
||||
if err := os.MkdirAll(filepath.Dir(path), 0755); err != nil {
|
||||
t.Fatalf("mkdir: %v", err)
|
||||
}
|
||||
if err := os.WriteFile(path, []byte(content), 0644); err != nil {
|
||||
t.Fatalf("write file: %v", err)
|
||||
}
|
||||
runGit(t, dir, "add", name)
|
||||
runGit(t, dir, "commit", "-m", msg)
|
||||
}
|
||||
|
||||
func TestCheckGitWorkingTree(t *testing.T) {
|
||||
t.Run("not a git repo", func(t *testing.T) {
|
||||
dir := mkTmpDirInTmp(t, "bd-git-nt-*")
|
||||
check := CheckGitWorkingTree(dir)
|
||||
if check.Status != StatusOK {
|
||||
t.Fatalf("status=%q want %q", check.Status, StatusOK)
|
||||
}
|
||||
if !strings.Contains(check.Message, "N/A") {
|
||||
t.Fatalf("message=%q want N/A", check.Message)
|
||||
}
|
||||
})
|
||||
|
||||
t.Run("clean", func(t *testing.T) {
|
||||
dir := mkTmpDirInTmp(t, "bd-git-clean-*")
|
||||
initRepo(t, dir, "main")
|
||||
commitFile(t, dir, "README.md", "# test\n", "initial")
|
||||
|
||||
check := CheckGitWorkingTree(dir)
|
||||
if check.Status != StatusOK {
|
||||
t.Fatalf("status=%q want %q (msg=%q)", check.Status, StatusOK, check.Message)
|
||||
}
|
||||
})
|
||||
|
||||
t.Run("dirty", func(t *testing.T) {
|
||||
dir := mkTmpDirInTmp(t, "bd-git-dirty-*")
|
||||
initRepo(t, dir, "main")
|
||||
commitFile(t, dir, "README.md", "# test\n", "initial")
|
||||
if err := os.WriteFile(filepath.Join(dir, "dirty.txt"), []byte("x"), 0644); err != nil {
|
||||
t.Fatalf("write dirty file: %v", err)
|
||||
}
|
||||
|
||||
check := CheckGitWorkingTree(dir)
|
||||
if check.Status != StatusWarning {
|
||||
t.Fatalf("status=%q want %q (msg=%q)", check.Status, StatusWarning, check.Message)
|
||||
}
|
||||
})
|
||||
}
|
||||
|
||||
func TestCheckGitUpstream(t *testing.T) {
|
||||
t.Run("no upstream", func(t *testing.T) {
|
||||
dir := mkTmpDirInTmp(t, "bd-git-up-*")
|
||||
initRepo(t, dir, "main")
|
||||
commitFile(t, dir, "README.md", "# test\n", "initial")
|
||||
|
||||
check := CheckGitUpstream(dir)
|
||||
if check.Status != StatusWarning {
|
||||
t.Fatalf("status=%q want %q (msg=%q)", check.Status, StatusWarning, check.Message)
|
||||
}
|
||||
if !strings.Contains(check.Message, "No upstream") {
|
||||
t.Fatalf("message=%q want to mention upstream", check.Message)
|
||||
}
|
||||
})
|
||||
|
||||
t.Run("up to date", func(t *testing.T) {
|
||||
dir := mkTmpDirInTmp(t, "bd-git-up2-*")
|
||||
remote := mkTmpDirInTmp(t, "bd-git-remote-*")
|
||||
runGit(t, remote, "init", "--bare", "--initial-branch=main")
|
||||
|
||||
initRepo(t, dir, "main")
|
||||
commitFile(t, dir, "README.md", "# test\n", "initial")
|
||||
runGit(t, dir, "remote", "add", "origin", remote)
|
||||
runGit(t, dir, "push", "-u", "origin", "main")
|
||||
|
||||
check := CheckGitUpstream(dir)
|
||||
if check.Status != StatusOK {
|
||||
t.Fatalf("status=%q want %q (msg=%q)", check.Status, StatusOK, check.Message)
|
||||
}
|
||||
})
|
||||
|
||||
t.Run("ahead of upstream", func(t *testing.T) {
|
||||
dir := mkTmpDirInTmp(t, "bd-git-ahead-*")
|
||||
remote := mkTmpDirInTmp(t, "bd-git-remote2-*")
|
||||
runGit(t, remote, "init", "--bare", "--initial-branch=main")
|
||||
|
||||
initRepo(t, dir, "main")
|
||||
commitFile(t, dir, "README.md", "# test\n", "initial")
|
||||
runGit(t, dir, "remote", "add", "origin", remote)
|
||||
runGit(t, dir, "push", "-u", "origin", "main")
|
||||
|
||||
commitFile(t, dir, "file2.txt", "x", "local commit")
|
||||
|
||||
check := CheckGitUpstream(dir)
|
||||
if check.Status != StatusWarning {
|
||||
t.Fatalf("status=%q want %q (msg=%q)", check.Status, StatusWarning, check.Message)
|
||||
}
|
||||
if !strings.Contains(check.Message, "Ahead") {
|
||||
t.Fatalf("message=%q want to mention ahead", check.Message)
|
||||
}
|
||||
})
|
||||
|
||||
t.Run("behind upstream", func(t *testing.T) {
|
||||
dir := mkTmpDirInTmp(t, "bd-git-behind-*")
|
||||
remote := mkTmpDirInTmp(t, "bd-git-remote3-*")
|
||||
runGit(t, remote, "init", "--bare", "--initial-branch=main")
|
||||
|
||||
initRepo(t, dir, "main")
|
||||
commitFile(t, dir, "README.md", "# test\n", "initial")
|
||||
runGit(t, dir, "remote", "add", "origin", remote)
|
||||
runGit(t, dir, "push", "-u", "origin", "main")
|
||||
|
||||
// Advance remote via another clone.
|
||||
clone := mkTmpDirInTmp(t, "bd-git-clone-*")
|
||||
runGit(t, clone, "clone", remote, ".")
|
||||
runGit(t, clone, "config", "user.email", "test@test.com")
|
||||
runGit(t, clone, "config", "user.name", "Test User")
|
||||
commitFile(t, clone, "remote.txt", "y", "remote commit")
|
||||
runGit(t, clone, "push", "origin", "main")
|
||||
|
||||
// Update tracking refs.
|
||||
runGit(t, dir, "fetch", "origin")
|
||||
|
||||
check := CheckGitUpstream(dir)
|
||||
if check.Status != StatusWarning {
|
||||
t.Fatalf("status=%q want %q (msg=%q)", check.Status, StatusWarning, check.Message)
|
||||
}
|
||||
if !strings.Contains(check.Message, "Behind") {
|
||||
t.Fatalf("message=%q want to mention behind", check.Message)
|
||||
}
|
||||
})
|
||||
}
|
||||
@@ -23,8 +23,8 @@ func setupGitRepo(t *testing.T) string {
|
||||
t.Fatalf("failed to create .beads directory: %v", err)
|
||||
}
|
||||
|
||||
// Initialize git repo with 'main' as default branch (modern git convention)
|
||||
cmd := exec.Command("git", "init", "--initial-branch=main")
|
||||
// Initialize git repo
|
||||
cmd := exec.Command("git", "init")
|
||||
cmd.Dir = dir
|
||||
if err := cmd.Run(); err != nil {
|
||||
t.Fatalf("failed to init git repo: %v", err)
|
||||
@@ -278,8 +278,8 @@ func setupGitRepoInDir(t *testing.T, dir string) {
|
||||
t.Fatalf("failed to create .beads directory: %v", err)
|
||||
}
|
||||
|
||||
// Initialize git repo with 'main' as default branch (modern git convention)
|
||||
cmd := exec.Command("git", "init", "--initial-branch=main")
|
||||
// Initialize git repo
|
||||
cmd := exec.Command("git", "init")
|
||||
cmd.Dir = dir
|
||||
if err := cmd.Run(); err != nil {
|
||||
t.Fatalf("failed to init git repo: %v", err)
|
||||
|
||||
@@ -19,7 +19,6 @@ daemon.lock
|
||||
daemon.log
|
||||
daemon.pid
|
||||
bd.sock
|
||||
sync-state.json
|
||||
|
||||
# Local version tracking (prevents upgrade notification spam after git ops)
|
||||
.local_version
|
||||
|
||||
@@ -106,7 +106,7 @@ func CheckPermissions(path string) DoctorCheck {
|
||||
dbPath := filepath.Join(beadsDir, beads.CanonicalDatabaseName)
|
||||
if _, err := os.Stat(dbPath); err == nil {
|
||||
// Try to open database
|
||||
db, err := sql.Open("sqlite3", sqliteConnString(dbPath, true))
|
||||
db, err := sql.Open("sqlite3", dbPath)
|
||||
if err != nil {
|
||||
return DoctorCheck{
|
||||
Name: "Permissions",
|
||||
@@ -118,7 +118,7 @@ func CheckPermissions(path string) DoctorCheck {
|
||||
_ = db.Close() // Intentionally ignore close error
|
||||
|
||||
// Try a write test
|
||||
db, err = sql.Open("sqlite", sqliteConnString(dbPath, true))
|
||||
db, err = sql.Open("sqlite", dbPath)
|
||||
if err == nil {
|
||||
_, err = db.Exec("SELECT 1")
|
||||
_ = db.Close() // Intentionally ignore close error
|
||||
|
||||
@@ -51,7 +51,7 @@ func CheckIDFormat(path string) DoctorCheck {
|
||||
}
|
||||
|
||||
// Open database
|
||||
db, err := sql.Open("sqlite3", sqliteConnString(dbPath, true))
|
||||
db, err := sql.Open("sqlite3", "file:"+dbPath+"?mode=ro")
|
||||
if err != nil {
|
||||
return DoctorCheck{
|
||||
Name: "Issue IDs",
|
||||
@@ -121,7 +121,7 @@ func CheckDependencyCycles(path string) DoctorCheck {
|
||||
}
|
||||
|
||||
// Open database to check for cycles
|
||||
db, err := sql.Open("sqlite3", sqliteConnString(dbPath, true))
|
||||
db, err := sql.Open("sqlite3", dbPath)
|
||||
if err != nil {
|
||||
return DoctorCheck{
|
||||
Name: "Dependency Cycles",
|
||||
@@ -216,7 +216,7 @@ func CheckTombstones(path string) DoctorCheck {
|
||||
}
|
||||
}
|
||||
|
||||
db, err := sql.Open("sqlite3", sqliteConnString(dbPath, true))
|
||||
db, err := sql.Open("sqlite3", dbPath)
|
||||
if err != nil {
|
||||
return DoctorCheck{
|
||||
Name: "Tombstones",
|
||||
@@ -420,7 +420,7 @@ func CheckRepoFingerprint(path string) DoctorCheck {
|
||||
}
|
||||
|
||||
// Open database
|
||||
db, err := sql.Open("sqlite3", sqliteConnString(dbPath, true))
|
||||
db, err := sql.Open("sqlite3", "file:"+dbPath+"?mode=ro")
|
||||
if err != nil {
|
||||
return DoctorCheck{
|
||||
Name: "Repo Fingerprint",
|
||||
|
||||
@@ -1,123 +0,0 @@
|
||||
package doctor
|
||||
|
||||
import (
|
||||
"bufio"
|
||||
"encoding/json"
|
||||
"fmt"
|
||||
"os"
|
||||
"path/filepath"
|
||||
"strings"
|
||||
|
||||
"github.com/steveyegge/beads/internal/beads"
|
||||
"github.com/steveyegge/beads/internal/configfile"
|
||||
"github.com/steveyegge/beads/internal/utils"
|
||||
)
|
||||
|
||||
func CheckJSONLIntegrity(path string) DoctorCheck {
|
||||
beadsDir := filepath.Join(path, ".beads")
|
||||
|
||||
// Resolve JSONL path.
|
||||
jsonlPath := ""
|
||||
if cfg, err := configfile.Load(beadsDir); err == nil && cfg != nil {
|
||||
if cfg.JSONLExport != "" && !isSystemJSONLFilename(cfg.JSONLExport) {
|
||||
p := cfg.JSONLPath(beadsDir)
|
||||
if _, err := os.Stat(p); err == nil {
|
||||
jsonlPath = p
|
||||
}
|
||||
}
|
||||
}
|
||||
if jsonlPath == "" {
|
||||
// Fall back to a best-effort discovery within .beads/.
|
||||
p := utils.FindJSONLInDir(beadsDir)
|
||||
if _, err := os.Stat(p); err == nil {
|
||||
jsonlPath = p
|
||||
}
|
||||
}
|
||||
if jsonlPath == "" {
|
||||
return DoctorCheck{Name: "JSONL Integrity", Status: StatusOK, Message: "N/A (no JSONL file)"}
|
||||
}
|
||||
|
||||
// Best-effort scan for malformed lines.
|
||||
f, err := os.Open(jsonlPath) // #nosec G304 -- jsonlPath is within the workspace
|
||||
if err != nil {
|
||||
return DoctorCheck{
|
||||
Name: "JSONL Integrity",
|
||||
Status: StatusWarning,
|
||||
Message: "Unable to read JSONL file",
|
||||
Detail: err.Error(),
|
||||
}
|
||||
}
|
||||
defer f.Close()
|
||||
|
||||
var malformed int
|
||||
var examples []string
|
||||
scanner := bufio.NewScanner(f)
|
||||
lineNo := 0
|
||||
for scanner.Scan() {
|
||||
lineNo++
|
||||
line := strings.TrimSpace(scanner.Text())
|
||||
if line == "" {
|
||||
continue
|
||||
}
|
||||
var v struct {
|
||||
ID string `json:"id"`
|
||||
}
|
||||
if err := json.Unmarshal([]byte(line), &v); err != nil || v.ID == "" {
|
||||
malformed++
|
||||
if len(examples) < 5 {
|
||||
if err != nil {
|
||||
examples = append(examples, fmt.Sprintf("line %d: %v", lineNo, err))
|
||||
} else {
|
||||
examples = append(examples, fmt.Sprintf("line %d: missing id", lineNo))
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
if err := scanner.Err(); err != nil {
|
||||
return DoctorCheck{
|
||||
Name: "JSONL Integrity",
|
||||
Status: StatusWarning,
|
||||
Message: "Unable to scan JSONL file",
|
||||
Detail: err.Error(),
|
||||
}
|
||||
}
|
||||
if malformed == 0 {
|
||||
return DoctorCheck{
|
||||
Name: "JSONL Integrity",
|
||||
Status: StatusOK,
|
||||
Message: fmt.Sprintf("%s looks valid", filepath.Base(jsonlPath)),
|
||||
}
|
||||
}
|
||||
|
||||
// If we have a database, we can auto-repair by re-exporting from DB.
|
||||
dbPath := filepath.Join(beadsDir, beads.CanonicalDatabaseName)
|
||||
if cfg, err := configfile.Load(beadsDir); err == nil && cfg != nil && cfg.Database != "" {
|
||||
dbPath = cfg.DatabasePath(beadsDir)
|
||||
}
|
||||
if _, err := os.Stat(dbPath); os.IsNotExist(err) {
|
||||
return DoctorCheck{
|
||||
Name: "JSONL Integrity",
|
||||
Status: StatusError,
|
||||
Message: fmt.Sprintf("%s has %d malformed line(s)", filepath.Base(jsonlPath), malformed),
|
||||
Detail: strings.Join(examples, "\n"),
|
||||
Fix: "Restore the JSONL file from git or from a backup (no database available for auto-repair).",
|
||||
}
|
||||
}
|
||||
|
||||
return DoctorCheck{
|
||||
Name: "JSONL Integrity",
|
||||
Status: StatusError,
|
||||
Message: fmt.Sprintf("%s has %d malformed line(s)", filepath.Base(jsonlPath), malformed),
|
||||
Detail: strings.Join(examples, "\n"),
|
||||
Fix: "Run 'bd doctor --fix' to back up the JSONL and regenerate it from the database.",
|
||||
}
|
||||
}
|
||||
|
||||
func isSystemJSONLFilename(name string) bool {
|
||||
switch name {
|
||||
case "deletions.jsonl", "interactions.jsonl", "molecules.jsonl":
|
||||
return true
|
||||
default:
|
||||
return false
|
||||
}
|
||||
}
|
||||
@@ -1,43 +0,0 @@
|
||||
package doctor
|
||||
|
||||
import (
|
||||
"os"
|
||||
"path/filepath"
|
||||
"testing"
|
||||
)
|
||||
|
||||
func TestCheckJSONLIntegrity_MalformedLine(t *testing.T) {
|
||||
ws := t.TempDir()
|
||||
beadsDir := filepath.Join(ws, ".beads")
|
||||
if err := os.MkdirAll(beadsDir, 0755); err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
jsonlPath := filepath.Join(beadsDir, "issues.jsonl")
|
||||
if err := os.WriteFile(jsonlPath, []byte("{\"id\":\"t-1\"}\n{not json}\n"), 0644); err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
// Ensure DB exists so check suggests auto-repair.
|
||||
if err := os.WriteFile(filepath.Join(beadsDir, "beads.db"), []byte("x"), 0644); err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
|
||||
check := CheckJSONLIntegrity(ws)
|
||||
if check.Status != StatusError {
|
||||
t.Fatalf("expected StatusError, got %v (%s)", check.Status, check.Message)
|
||||
}
|
||||
if check.Fix == "" {
|
||||
t.Fatalf("expected Fix guidance")
|
||||
}
|
||||
}
|
||||
|
||||
func TestCheckJSONLIntegrity_NoJSONL(t *testing.T) {
|
||||
ws := t.TempDir()
|
||||
beadsDir := filepath.Join(ws, ".beads")
|
||||
if err := os.MkdirAll(beadsDir, 0755); err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
check := CheckJSONLIntegrity(ws)
|
||||
if check.Status != StatusOK {
|
||||
t.Fatalf("expected StatusOK, got %v (%s)", check.Status, check.Message)
|
||||
}
|
||||
}
|
||||
+8
-26
@@ -53,7 +53,7 @@ func CheckLegacyBeadsSlashCommands(repoPath string) DoctorCheck {
|
||||
Name: "Legacy Commands",
|
||||
Status: "warning",
|
||||
Message: fmt.Sprintf("Old beads integration detected in %s", strings.Join(filesWithLegacyCommands, ", ")),
|
||||
Detail: "Found: /beads:* slash command references (deprecated)\n" +
|
||||
Detail: "Found: /beads:* slash command references (deprecated)\n" +
|
||||
" These commands are token-inefficient (~10.5k tokens per session)",
|
||||
Fix: "Migrate to bd prime hooks for better token efficiency:\n" +
|
||||
"\n" +
|
||||
@@ -104,7 +104,7 @@ func CheckAgentDocumentation(repoPath string) DoctorCheck {
|
||||
Name: "Agent Documentation",
|
||||
Status: "warning",
|
||||
Message: "No agent documentation found",
|
||||
Detail: "Missing: AGENTS.md or CLAUDE.md\n" +
|
||||
Detail: "Missing: AGENTS.md or CLAUDE.md\n" +
|
||||
" Documenting workflow helps AI agents work more effectively",
|
||||
Fix: "Add agent documentation:\n" +
|
||||
" • Run 'bd onboard' to create AGENTS.md with workflow guidance\n" +
|
||||
@@ -187,10 +187,10 @@ func CheckLegacyJSONLFilename(repoPath string) DoctorCheck {
|
||||
Name: "JSONL Files",
|
||||
Status: "warning",
|
||||
Message: fmt.Sprintf("Multiple JSONL files found: %s", strings.Join(realJSONLFiles, ", ")),
|
||||
Detail: "Having multiple JSONL files can cause sync and merge conflicts.\n" +
|
||||
Detail: "Having multiple JSONL files can cause sync and merge conflicts.\n" +
|
||||
" Only one JSONL file should be used per repository.",
|
||||
Fix: "Determine which file is current and remove the others:\n" +
|
||||
" 1. Check .beads/metadata.json for 'jsonl_export' setting\n" +
|
||||
" 1. Check 'bd stats' to see which file is being used\n" +
|
||||
" 2. Verify with 'git log .beads/*.jsonl' to see commit history\n" +
|
||||
" 3. Remove the unused file(s): git rm .beads/<unused>.jsonl\n" +
|
||||
" 4. Commit the change",
|
||||
@@ -235,7 +235,7 @@ func CheckLegacyJSONLConfig(repoPath string) DoctorCheck {
|
||||
Name: "JSONL Config",
|
||||
Status: "warning",
|
||||
Message: "Using legacy beads.jsonl filename",
|
||||
Detail: "The canonical filename is now issues.jsonl (bd-6xd).\n" +
|
||||
Detail: "The canonical filename is now issues.jsonl (bd-6xd).\n" +
|
||||
" Legacy beads.jsonl is still supported but should be migrated.",
|
||||
Fix: "Run 'bd doctor --fix' to auto-migrate, or manually:\n" +
|
||||
" 1. git mv .beads/beads.jsonl .beads/issues.jsonl\n" +
|
||||
@@ -251,7 +251,7 @@ func CheckLegacyJSONLConfig(repoPath string) DoctorCheck {
|
||||
Status: "warning",
|
||||
Message: "Config references beads.jsonl but issues.jsonl exists",
|
||||
Detail: "metadata.json says beads.jsonl but the actual file is issues.jsonl",
|
||||
Fix: "Run 'bd doctor --fix' to update the configuration",
|
||||
Fix: "Run 'bd doctor --fix' to update the configuration",
|
||||
}
|
||||
}
|
||||
}
|
||||
@@ -303,16 +303,6 @@ func CheckDatabaseConfig(repoPath string) DoctorCheck {
|
||||
|
||||
// Check if configured JSONL exists
|
||||
if cfg.JSONLExport != "" {
|
||||
if cfg.JSONLExport == "deletions.jsonl" || cfg.JSONLExport == "interactions.jsonl" || cfg.JSONLExport == "molecules.jsonl" {
|
||||
return DoctorCheck{
|
||||
Name: "Database Config",
|
||||
Status: "error",
|
||||
Message: fmt.Sprintf("Invalid jsonl_export %q (system file)", cfg.JSONLExport),
|
||||
Detail: "metadata.json jsonl_export must reference the git-tracked issues export (typically issues.jsonl), not a system log file.",
|
||||
Fix: "Run 'bd doctor --fix' to reset metadata.json jsonl_export to issues.jsonl, then commit the change.",
|
||||
}
|
||||
}
|
||||
|
||||
jsonlPath := cfg.JSONLPath(beadsDir)
|
||||
if _, err := os.Stat(jsonlPath); os.IsNotExist(err) {
|
||||
// Check if other .jsonl files exist
|
||||
@@ -325,15 +315,7 @@ func CheckDatabaseConfig(repoPath string) DoctorCheck {
|
||||
lowerName := strings.ToLower(name)
|
||||
if !strings.Contains(lowerName, "backup") &&
|
||||
!strings.Contains(lowerName, ".orig") &&
|
||||
!strings.Contains(lowerName, ".bak") &&
|
||||
!strings.Contains(lowerName, "~") &&
|
||||
!strings.HasPrefix(lowerName, "backup_") &&
|
||||
name != "deletions.jsonl" &&
|
||||
name != "interactions.jsonl" &&
|
||||
name != "molecules.jsonl" &&
|
||||
!strings.Contains(lowerName, ".base.jsonl") &&
|
||||
!strings.Contains(lowerName, ".left.jsonl") &&
|
||||
!strings.Contains(lowerName, ".right.jsonl") {
|
||||
!strings.Contains(lowerName, ".bak") {
|
||||
otherJSONLs = append(otherJSONLs, name)
|
||||
}
|
||||
}
|
||||
@@ -439,7 +421,7 @@ func CheckFreshClone(repoPath string) DoctorCheck {
|
||||
Name: "Fresh Clone",
|
||||
Status: "warning",
|
||||
Message: fmt.Sprintf("Fresh clone detected (%d issues in %s, no database)", issueCount, jsonlName),
|
||||
Detail: "This appears to be a freshly cloned repository.\n" +
|
||||
Detail: "This appears to be a freshly cloned repository.\n" +
|
||||
" The JSONL file contains issues but no local database exists.\n" +
|
||||
" Run 'bd init' to create the database and import existing issues.",
|
||||
Fix: fmt.Sprintf("Run '%s' to initialize the database and import issues", fixCmd),
|
||||
|
||||
@@ -410,49 +410,6 @@ func TestCheckLegacyJSONLConfig(t *testing.T) {
|
||||
}
|
||||
}
|
||||
|
||||
func TestCheckDatabaseConfig_IgnoresSystemJSONLs(t *testing.T) {
|
||||
tmpDir := t.TempDir()
|
||||
beadsDir := filepath.Join(tmpDir, ".beads")
|
||||
if err := os.Mkdir(beadsDir, 0750); err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
|
||||
// Configure issues.jsonl, but only create interactions.jsonl.
|
||||
metadataPath := filepath.Join(beadsDir, "metadata.json")
|
||||
if err := os.WriteFile(metadataPath, []byte(`{"database":"beads.db","jsonl_export":"issues.jsonl"}`), 0644); err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
if err := os.WriteFile(filepath.Join(beadsDir, "interactions.jsonl"), []byte(`{"id":"x"}`), 0644); err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
|
||||
check := CheckDatabaseConfig(tmpDir)
|
||||
if check.Status != "ok" {
|
||||
t.Fatalf("expected ok, got %s: %s\n%s", check.Status, check.Message, check.Detail)
|
||||
}
|
||||
}
|
||||
|
||||
func TestCheckDatabaseConfig_SystemJSONLExportIsError(t *testing.T) {
|
||||
tmpDir := t.TempDir()
|
||||
beadsDir := filepath.Join(tmpDir, ".beads")
|
||||
if err := os.Mkdir(beadsDir, 0750); err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
|
||||
metadataPath := filepath.Join(beadsDir, "metadata.json")
|
||||
if err := os.WriteFile(metadataPath, []byte(`{"database":"beads.db","jsonl_export":"interactions.jsonl"}`), 0644); err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
if err := os.WriteFile(filepath.Join(beadsDir, "interactions.jsonl"), []byte(`{"id":"x"}`), 0644); err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
|
||||
check := CheckDatabaseConfig(tmpDir)
|
||||
if check.Status != "error" {
|
||||
t.Fatalf("expected error, got %s: %s", check.Status, check.Message)
|
||||
}
|
||||
}
|
||||
|
||||
func TestCheckFreshClone(t *testing.T) {
|
||||
tests := []struct {
|
||||
name string
|
||||
|
||||
@@ -312,7 +312,7 @@ func CheckCompactionCandidates(path string) DoctorCheck {
|
||||
// the actual beads directory location.
|
||||
func resolveBeadsDir(beadsDir string) string {
|
||||
redirectFile := filepath.Join(beadsDir, "redirect")
|
||||
data, err := os.ReadFile(redirectFile) //nolint:gosec // redirect file path is constructed from known beadsDir
|
||||
data, err := os.ReadFile(redirectFile)
|
||||
if err != nil {
|
||||
// No redirect file - use original path
|
||||
return beadsDir
|
||||
|
||||
@@ -1,54 +0,0 @@
|
||||
package doctor
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"os"
|
||||
"strings"
|
||||
"time"
|
||||
)
|
||||
|
||||
func sqliteConnString(path string, readOnly bool) string {
|
||||
path = strings.TrimSpace(path)
|
||||
if path == "" {
|
||||
return ""
|
||||
}
|
||||
|
||||
// Best-effort: honor the same env var viper uses (BD_LOCK_TIMEOUT).
|
||||
busy := 30 * time.Second
|
||||
if v := strings.TrimSpace(os.Getenv("BD_LOCK_TIMEOUT")); v != "" {
|
||||
if d, err := time.ParseDuration(v); err == nil {
|
||||
busy = d
|
||||
}
|
||||
}
|
||||
busyMs := int64(busy / time.Millisecond)
|
||||
|
||||
// If it's already a URI, append pragmas if absent.
|
||||
if strings.HasPrefix(path, "file:") {
|
||||
conn := path
|
||||
sep := "?"
|
||||
if strings.Contains(conn, "?") {
|
||||
sep = "&"
|
||||
}
|
||||
if readOnly && !strings.Contains(conn, "mode=") {
|
||||
conn += sep + "mode=ro"
|
||||
sep = "&"
|
||||
}
|
||||
if !strings.Contains(conn, "_pragma=busy_timeout") {
|
||||
conn += fmt.Sprintf("%s_pragma=busy_timeout(%d)", sep, busyMs)
|
||||
sep = "&"
|
||||
}
|
||||
if !strings.Contains(conn, "_pragma=foreign_keys") {
|
||||
conn += sep + "_pragma=foreign_keys(ON)"
|
||||
sep = "&"
|
||||
}
|
||||
if !strings.Contains(conn, "_time_format=") {
|
||||
conn += sep + "_time_format=sqlite"
|
||||
}
|
||||
return conn
|
||||
}
|
||||
|
||||
if readOnly {
|
||||
return fmt.Sprintf("file:%s?mode=ro&_pragma=foreign_keys(ON)&_pragma=busy_timeout(%d)&_time_format=sqlite", path, busyMs)
|
||||
}
|
||||
return fmt.Sprintf("file:%s?_pragma=foreign_keys(ON)&_pragma=busy_timeout(%d)&_time_format=sqlite", path, busyMs)
|
||||
}
|
||||
@@ -333,14 +333,12 @@ func CheckChildParentDependencies(path string) DoctorCheck {
|
||||
}
|
||||
defer db.Close()
|
||||
|
||||
// Query for child→parent BLOCKING dependencies where issue_id starts with depends_on_id + "."
|
||||
// Only matches blocking types (blocks, conditional-blocks, waits-for) that cause deadlock.
|
||||
// Excludes 'parent-child' type which is a legitimate structural hierarchy relationship.
|
||||
// Query for child→parent dependencies where issue_id starts with depends_on_id + "."
|
||||
// This uses SQLite's LIKE pattern matching
|
||||
query := `
|
||||
SELECT d.issue_id, d.depends_on_id
|
||||
FROM dependencies d
|
||||
WHERE d.issue_id LIKE d.depends_on_id || '.%'
|
||||
AND d.type IN ('blocks', 'conditional-blocks', 'waits-for')
|
||||
`
|
||||
rows, err := db.Query(query)
|
||||
if err != nil {
|
||||
|
||||
Reference in New Issue
Block a user