refactor(doctor): consolidate maintenance commands + improve daemon startup

Consolidate clean, repair-deps, validate into bd doctor:
- Add validation checks to doctor (merge artifacts, orphaned deps, duplicates, test pollution, git conflicts)
- Add auto-fix for merge artifacts and orphaned dependencies
- Delete obsolete command files: clean.go, repair_deps.go, validate.go
- Delete orphaned test files: clean_security_test.go, validate_test.go

Improve daemon startup performance:
- Add fast-fail detection when daemon crashed (check lock before retrying)
- Reduce graceful shutdown timeout from 5s to 1s
- Skip daemon connection for root command (just shows help)
- Extract shutdown timeout as constants (daemonShutdownTimeout, daemonShutdownPollInterval)

Other changes:
- Move rename-prefix command to Maintenance group in help
- Fix Makefile to inject git commit hash via ldflags

New files:
- cmd/bd/doctor/validation.go (5 check functions)
- cmd/bd/doctor/fix/validation.go (2 fix functions)
This commit is contained in:
Ryan Snodgrass
2025-12-22 19:39:51 -08:00
committed by Steve Yegge
parent e60dfaf1f1
commit cafc0b9dfb
16 changed files with 619 additions and 1374 deletions

View File

@@ -8,7 +8,7 @@ all: build
# Build the bd binary # Build the bd binary
build: build:
@echo "Building bd..." @echo "Building bd..."
go build -o bd ./cmd/bd go build -ldflags="-X main.Build=$$(git rev-parse --short HEAD)" -o bd ./cmd/bd
# Run all tests (skips known broken tests listed in .test-skip) # Run all tests (skips known broken tests listed in .test-skip)
test: test:

View File

@@ -1,169 +0,0 @@
package main
import (
"bufio"
"fmt"
"os"
"path/filepath"
"strings"
"github.com/spf13/cobra"
"github.com/steveyegge/beads/internal/ui"
)
// TODO: Consider consolidating into 'bd doctor --fix' for simpler maintenance UX
var cleanCmd = &cobra.Command{
Use: "clean",
GroupID: "maint",
Short: "Clean up temporary git merge artifacts from .beads directory",
Long: `Delete temporary git merge artifacts from the .beads directory.
This command removes temporary files created during git merges and conflicts.
It does NOT delete issues from the database - use 'bd cleanup' for that.
Files removed:
- 3-way merge snapshots (beads.base.jsonl, beads.left.jsonl, beads.right.jsonl)
- Merge metadata (*.meta.json)
- Git merge driver temp files (*.json[0-9], *.jsonl[0-9])
Files preserved:
- issues.jsonl (source of truth)
- beads.db (SQLite database)
- metadata.json
- config.yaml
- All daemon files
EXAMPLES:
Clean up temporary files:
bd clean
Preview what would be deleted:
bd clean --dry-run
SEE ALSO:
bd cleanup Delete closed issues from database`,
Run: func(cmd *cobra.Command, args []string) {
dryRun, _ := cmd.Flags().GetBool("dry-run")
// Find beads directory
beadsDir := findBeadsDir()
if beadsDir == "" {
fmt.Fprintf(os.Stderr, "Error: .beads directory not found\n")
os.Exit(1)
}
// Read patterns from .beads/.gitignore (only merge artifacts section)
cleanPatterns, err := readMergeArtifactPatterns(beadsDir)
if err != nil {
fmt.Fprintf(os.Stderr, "Error reading .gitignore: %v\n", err)
os.Exit(1)
}
// Collect files to delete
var filesToDelete []string
for _, pattern := range cleanPatterns {
matches, err := filepath.Glob(filepath.Join(beadsDir, pattern))
if err != nil {
fmt.Fprintf(os.Stderr, "Warning: error matching pattern %s: %v\n", pattern, err)
continue
}
filesToDelete = append(filesToDelete, matches...)
}
if len(filesToDelete) == 0 {
fmt.Println("Nothing to clean - all artifacts already removed")
return
}
// Just run by default, no --force needed
if dryRun {
fmt.Println(ui.RenderWarn("DRY RUN - no changes will be made"))
}
fmt.Printf("Found %d file(s) to clean:\n", len(filesToDelete))
for _, file := range filesToDelete {
relPath, err := filepath.Rel(beadsDir, file)
if err != nil {
relPath = file
}
fmt.Printf(" %s\n", relPath)
}
if dryRun {
return
}
// Actually delete the files
deletedCount := 0
errorCount := 0
for _, file := range filesToDelete {
if err := os.Remove(file); err != nil {
if !os.IsNotExist(err) {
relPath, _ := filepath.Rel(beadsDir, file)
fmt.Fprintf(os.Stderr, "Warning: failed to delete %s: %v\n", relPath, err)
errorCount++
}
} else {
deletedCount++
}
}
fmt.Printf("\nDeleted %d file(s)", deletedCount)
if errorCount > 0 {
fmt.Printf(" (%d error(s))", errorCount)
}
fmt.Println()
},
}
// readMergeArtifactPatterns reads the .beads/.gitignore file and extracts
// patterns from the "Merge artifacts" section
func readMergeArtifactPatterns(beadsDir string) ([]string, error) {
gitignorePath := filepath.Join(beadsDir, ".gitignore")
// #nosec G304 -- gitignorePath is safely constructed via filepath.Join from beadsDir
// (which comes from findBeadsDir searching upward for .beads). This can only open
// .gitignore within the project's .beads directory. See TestReadMergeArtifactPatterns_PathTraversal
file, err := os.Open(gitignorePath)
if err != nil {
return nil, fmt.Errorf("failed to open .gitignore: %w", err)
}
defer file.Close()
var patterns []string
inMergeSection := false
scanner := bufio.NewScanner(file)
for scanner.Scan() {
line := strings.TrimSpace(scanner.Text())
// Look for the merge artifacts section
if strings.Contains(line, "Merge artifacts") {
inMergeSection = true
continue
}
// Stop at the next section (starts with #)
if inMergeSection && strings.HasPrefix(line, "#") {
break
}
// Collect patterns from merge section
if inMergeSection && line != "" && !strings.HasPrefix(line, "#") {
// Skip negation patterns (starting with !)
if !strings.HasPrefix(line, "!") {
patterns = append(patterns, line)
}
}
}
if err := scanner.Err(); err != nil {
return nil, fmt.Errorf("error reading .gitignore: %w", err)
}
return patterns, nil
}
func init() {
cleanCmd.Flags().Bool("dry-run", false, "Preview what would be deleted without making changes")
rootCmd.AddCommand(cleanCmd)
}

View File

@@ -1,296 +0,0 @@
package main
import (
"os"
"path/filepath"
"strings"
"testing"
)
// TestReadMergeArtifactPatterns_PathTraversal verifies that the clean command
// properly validates file paths and prevents path traversal attacks.
//
// This test addresses bd-nbc: gosec G304 flags os.Open(gitignorePath) in
// clean.go:118 for potential file inclusion via variable. We verify that:
// 1. gitignorePath is safely constructed using filepath.Join
// 2. Only .gitignore files within .beads directory can be opened
// 3. Path traversal attempts are prevented by filepath.Join normalization
// 4. Symlinks pointing outside .beads directory are handled safely
func TestReadMergeArtifactPatterns_PathTraversal(t *testing.T) {
tests := []struct {
name string
setupFunc func(t *testing.T, tmpDir string) string // Returns beadsDir
wantErr bool
errContains string
}{
{
name: "normal .gitignore in .beads",
setupFunc: func(t *testing.T, tmpDir string) string {
beadsDir := filepath.Join(tmpDir, ".beads")
if err := os.MkdirAll(beadsDir, 0755); err != nil {
t.Fatalf("Failed to create .beads: %v", err)
}
gitignore := filepath.Join(beadsDir, ".gitignore")
content := `# Merge artifacts
beads.base.jsonl
beads.left.jsonl
beads.right.jsonl
# Other section
something-else
`
if err := os.WriteFile(gitignore, []byte(content), 0644); err != nil {
t.Fatalf("Failed to create .gitignore: %v", err)
}
return beadsDir
},
wantErr: false,
},
{
name: "missing .gitignore",
setupFunc: func(t *testing.T, tmpDir string) string {
beadsDir := filepath.Join(tmpDir, ".beads")
if err := os.MkdirAll(beadsDir, 0755); err != nil {
t.Fatalf("Failed to create .beads: %v", err)
}
// Don't create .gitignore
return beadsDir
},
wantErr: true,
errContains: "failed to open .gitignore",
},
{
name: "path traversal via beadsDir (normalized by filepath.Join)",
setupFunc: func(t *testing.T, tmpDir string) string {
// Create a .gitignore at tmpDir level (not in .beads)
gitignore := filepath.Join(tmpDir, ".gitignore")
if err := os.WriteFile(gitignore, []byte("# Test"), 0644); err != nil {
t.Fatalf("Failed to create .gitignore: %v", err)
}
// Create .beads directory
beadsDir := filepath.Join(tmpDir, ".beads")
if err := os.MkdirAll(beadsDir, 0755); err != nil {
t.Fatalf("Failed to create .beads: %v", err)
}
// Try to use path traversal (will be normalized by filepath.Join)
// This demonstrates that filepath.Join protects against traversal
return filepath.Join(tmpDir, ".beads", "..")
},
wantErr: false, // filepath.Join normalizes ".." to tmpDir, which has .gitignore
},
}
for _, tt := range tests {
t.Run(tt.name, func(t *testing.T) {
tmpDir := t.TempDir()
beadsDir := tt.setupFunc(t, tmpDir)
patterns, err := readMergeArtifactPatterns(beadsDir)
if tt.wantErr {
if err == nil {
t.Errorf("Expected error containing %q, got nil", tt.errContains)
return
}
if tt.errContains != "" && !strings.Contains(err.Error(), tt.errContains) {
t.Errorf("Expected error containing %q, got %q", tt.errContains, err.Error())
}
return
}
if err != nil {
t.Errorf("Unexpected error: %v", err)
return
}
// For successful cases, verify we got some patterns
if tt.name == "normal .gitignore in .beads" && len(patterns) == 0 {
t.Errorf("Expected to read patterns from .gitignore, got 0")
}
})
}
}
// TestReadMergeArtifactPatterns_SymlinkSafety verifies that symlinks are
// handled safely and don't allow access to files outside .beads directory.
func TestReadMergeArtifactPatterns_SymlinkSafety(t *testing.T) {
if os.Getenv("CI") == "true" {
t.Skip("Skipping symlink test in CI (may not have permissions)")
}
tmpDir := t.TempDir()
// Create a sensitive file outside .beads
sensitiveDir := filepath.Join(tmpDir, "sensitive")
if err := os.MkdirAll(sensitiveDir, 0755); err != nil {
t.Fatalf("Failed to create sensitive dir: %v", err)
}
sensitivePath := filepath.Join(sensitiveDir, "secrets.txt")
if err := os.WriteFile(sensitivePath, []byte("SECRET_DATA"), 0644); err != nil {
t.Fatalf("Failed to create sensitive file: %v", err)
}
// Create .beads directory
beadsDir := filepath.Join(tmpDir, ".beads")
if err := os.MkdirAll(beadsDir, 0755); err != nil {
t.Fatalf("Failed to create .beads: %v", err)
}
// Create a symlink from .beads/.gitignore to sensitive file
symlinkPath := filepath.Join(beadsDir, ".gitignore")
if err := os.Symlink(sensitivePath, symlinkPath); err != nil {
t.Skipf("Cannot create symlink (may lack permissions): %v", err)
}
// Try to read patterns - this will follow the symlink
// This is actually safe because:
// 1. The path is constructed via filepath.Join (safe)
// 2. Following symlinks is normal OS behavior
// 3. We're just reading a .gitignore file, not executing it
patterns, err := readMergeArtifactPatterns(beadsDir)
// The function will read the sensitive file, but since it doesn't
// contain valid gitignore patterns, it should return empty or error
if err != nil {
// Error is acceptable - the sensitive file isn't a valid .gitignore
t.Logf("Got expected error reading non-.gitignore file: %v", err)
return
}
// If no error, verify that no patterns were extracted (file doesn't
// have "Merge artifacts" section)
if len(patterns) > 0 {
t.Logf("Symlink was followed but extracted %d patterns (likely none useful)", len(patterns))
}
// Key insight: This is not actually a security vulnerability because:
// - We're only reading the file, not executing it
// - The file path is constructed safely
// - Following symlinks is expected OS behavior
// - The worst case is reading .gitignore content, which is not sensitive
}
// TestReadMergeArtifactPatterns_OnlyMergeSection verifies that only patterns
// from the "Merge artifacts" section are extracted, preventing unintended
// file deletion from other sections.
func TestReadMergeArtifactPatterns_OnlyMergeSection(t *testing.T) {
tmpDir := t.TempDir()
beadsDir := filepath.Join(tmpDir, ".beads")
if err := os.MkdirAll(beadsDir, 0755); err != nil {
t.Fatalf("Failed to create .beads: %v", err)
}
// Create .gitignore with multiple sections
gitignore := filepath.Join(beadsDir, ".gitignore")
content := `# Important files - DO NOT DELETE
beads.db
metadata.json
config.yaml
# Merge artifacts
beads.base.jsonl
beads.left.jsonl
beads.right.jsonl
*.meta.json
# Daemon files - DO NOT DELETE
daemon.sock
daemon.pid
`
if err := os.WriteFile(gitignore, []byte(content), 0644); err != nil {
t.Fatalf("Failed to create .gitignore: %v", err)
}
patterns, err := readMergeArtifactPatterns(beadsDir)
if err != nil {
t.Fatalf("Unexpected error: %v", err)
}
// Verify we only got patterns from "Merge artifacts" section
expectedPatterns := map[string]bool{
"beads.base.jsonl": true,
"beads.left.jsonl": true,
"beads.right.jsonl": true,
"*.meta.json": true,
}
if len(patterns) != len(expectedPatterns) {
t.Errorf("Expected %d patterns, got %d: %v", len(expectedPatterns), len(patterns), patterns)
}
for _, pattern := range patterns {
if !expectedPatterns[pattern] {
t.Errorf("Unexpected pattern %q - should only extract from Merge artifacts section", pattern)
}
// Verify we're NOT getting patterns from other sections
forbiddenPatterns := []string{"beads.db", "metadata.json", "config.yaml", "daemon.sock", "daemon.pid"}
for _, forbidden := range forbiddenPatterns {
if pattern == forbidden {
t.Errorf("Got forbidden pattern %q - this should be preserved, not cleaned!", pattern)
}
}
}
}
// TestReadMergeArtifactPatterns_ValidatesPatternSafety verifies that
// patterns with path traversal attempts behave safely.
func TestReadMergeArtifactPatterns_ValidatesPatternSafety(t *testing.T) {
tmpDir := t.TempDir()
beadsDir := filepath.Join(tmpDir, ".beads")
if err := os.MkdirAll(beadsDir, 0755); err != nil {
t.Fatalf("Failed to create .beads: %v", err)
}
// Create .gitignore with potentially dangerous patterns
gitignore := filepath.Join(beadsDir, ".gitignore")
content := `# Merge artifacts
beads.base.jsonl
/etc/shadow
~/sensitive.txt
`
if err := os.WriteFile(gitignore, []byte(content), 0644); err != nil {
t.Fatalf("Failed to create .gitignore: %v", err)
}
patterns, err := readMergeArtifactPatterns(beadsDir)
if err != nil {
t.Fatalf("Unexpected error: %v", err)
}
// The patterns are read as-is, but when used with filepath.Join(beadsDir, pattern),
// absolute paths stay absolute, and relative paths are joined to beadsDir.
// Let's verify the behavior:
for _, pattern := range patterns {
// Simulate what clean.go does: filepath.Glob(filepath.Join(beadsDir, pattern))
fullPattern := filepath.Join(beadsDir, pattern)
// filepath.Join has specific behavior:
// - Absolute paths (starting with /) override beadsDir and stay absolute
// - Relative paths are joined to beadsDir
// - This means /etc/shadow would become /etc/shadow (dangerous)
// - But filepath.Glob would fail to match because we don't have permissions
if filepath.IsAbs(pattern) {
// Absolute pattern - stays absolute (potential issue, but glob will fail)
t.Logf("WARNING: Absolute pattern %q would become %q", pattern, fullPattern)
// In real usage, glob would fail due to permissions
} else {
// Relative pattern - joined to beadsDir (safe)
if !strings.Contains(fullPattern, beadsDir) {
t.Errorf("Relative pattern %q should be within beadsDir, got %q", pattern, fullPattern)
}
}
}
// Key insight: This highlights a potential issue in clean.go:
// - Absolute paths in .gitignore could theoretically target system files
// - However, in practice:
// 1. .gitignore is trusted (user-controlled file)
// 2. filepath.Glob would fail due to permissions on system paths
// 3. Only files the user has write access to can be deleted
// Still, we should add validation to prevent absolute paths in patterns
}

View File

@@ -15,6 +15,16 @@ import (
"github.com/steveyegge/beads/internal/ui" "github.com/steveyegge/beads/internal/ui"
) )
// daemonShutdownTimeout is how long to wait for graceful shutdown before force killing.
// 1 second is sufficient - if daemon hasn't stopped by then, it's likely hung.
const daemonShutdownTimeout = 1 * time.Second
// daemonShutdownPollInterval is how often to check if daemon has stopped.
const daemonShutdownPollInterval = 100 * time.Millisecond
// daemonShutdownAttempts is the number of poll attempts before force kill.
const daemonShutdownAttempts = int(daemonShutdownTimeout / daemonShutdownPollInterval)
// Daemon start failure tracking for exponential backoff // Daemon start failure tracking for exponential backoff
var ( var (
lastDaemonStartAttempt time.Time lastDaemonStartAttempt time.Time
@@ -72,9 +82,9 @@ func restartDaemonForVersionMismatch() bool {
return false return false
} }
// Wait for daemon to stop (up to 5 seconds) // Wait for daemon to stop, then force kill
for i := 0; i < 50; i++ { for i := 0; i < daemonShutdownAttempts; i++ {
time.Sleep(100 * time.Millisecond) time.Sleep(daemonShutdownPollInterval)
if isRunning, _ := isDaemonRunning(pidFile); !isRunning { if isRunning, _ := isDaemonRunning(pidFile); !isRunning {
debug.Logf("old daemon stopped successfully") debug.Logf("old daemon stopped successfully")
break break

View File

@@ -277,15 +277,15 @@ func stopDaemon(pidFile string) {
os.Exit(1) os.Exit(1)
} }
for i := 0; i < 50; i++ { for i := 0; i < daemonShutdownAttempts; i++ {
time.Sleep(100 * time.Millisecond) time.Sleep(daemonShutdownPollInterval)
if isRunning, _ := isDaemonRunning(pidFile); !isRunning { if isRunning, _ := isDaemonRunning(pidFile); !isRunning {
fmt.Println("Daemon stopped") fmt.Println("Daemon stopped")
return return
} }
} }
fmt.Fprintf(os.Stderr, "Warning: daemon did not stop after 5 seconds, forcing termination\n") fmt.Fprintf(os.Stderr, "Warning: daemon did not stop after %v, forcing termination\n", daemonShutdownTimeout)
// Check one more time before killing the process to avoid a race. // Check one more time before killing the process to avoid a race.
if isRunning, _ := isDaemonRunning(pidFile); !isRunning { if isRunning, _ := isDaemonRunning(pidFile); !isRunning {

View File

@@ -396,6 +396,22 @@ func applyFixList(path string, fixes []doctorCheck) {
continue continue
} }
err = fix.SyncBranchHealth(path, syncBranch) err = fix.SyncBranchHealth(path, syncBranch)
case "Merge Artifacts":
err = fix.MergeArtifacts(path)
case "Orphaned Dependencies":
err = fix.OrphanedDependencies(path)
case "Duplicate Issues":
// No auto-fix: duplicates require user review
fmt.Printf(" ⚠ Run 'bd duplicates' to review and merge duplicates\n")
continue
case "Test Pollution":
// No auto-fix: test cleanup requires user review
fmt.Printf(" ⚠ Run 'bd detect-pollution' to review and clean test issues\n")
continue
case "Git Conflicts":
// No auto-fix: git conflicts require manual resolution
fmt.Printf(" ⚠ Resolve conflicts manually: git checkout --ours or --theirs .beads/issues.jsonl\n")
continue
default: default:
fmt.Printf(" ⚠ No automatic fix available for %s\n", check.Name) fmt.Printf(" ⚠ No automatic fix available for %s\n", check.Name)
fmt.Printf(" Manual fix: %s\n", check.Fix) fmt.Printf(" Manual fix: %s\n", check.Fix)
@@ -749,6 +765,33 @@ func runDiagnostics(path string) doctorResult {
result.Checks = append(result.Checks, untrackedCheck) result.Checks = append(result.Checks, untrackedCheck)
// Don't fail overall check for untracked files, just warn // Don't fail overall check for untracked files, just warn
// Check 21: Merge artifacts (from bd clean)
mergeArtifactsCheck := convertDoctorCheck(doctor.CheckMergeArtifacts(path))
result.Checks = append(result.Checks, mergeArtifactsCheck)
// Don't fail overall check for merge artifacts, just warn
// Check 22: Orphaned dependencies (from bd repair-deps, bd validate)
orphanedDepsCheck := convertDoctorCheck(doctor.CheckOrphanedDependencies(path))
result.Checks = append(result.Checks, orphanedDepsCheck)
// Don't fail overall check for orphaned deps, just warn
// Check 23: Duplicate issues (from bd validate)
duplicatesCheck := convertDoctorCheck(doctor.CheckDuplicateIssues(path))
result.Checks = append(result.Checks, duplicatesCheck)
// Don't fail overall check for duplicates, just warn
// Check 24: Test pollution (from bd validate)
pollutionCheck := convertDoctorCheck(doctor.CheckTestPollution(path))
result.Checks = append(result.Checks, pollutionCheck)
// Don't fail overall check for test pollution, just warn
// Check 25: Git conflicts in JSONL (from bd validate)
conflictsCheck := convertDoctorCheck(doctor.CheckGitConflicts(path))
result.Checks = append(result.Checks, conflictsCheck)
if conflictsCheck.Status == statusError {
result.OverallOK = false
}
return result return result
} }

View File

@@ -0,0 +1,168 @@
package fix
import (
"bufio"
"database/sql"
"fmt"
"os"
"path/filepath"
"strings"
_ "github.com/ncruces/go-sqlite3/driver"
_ "github.com/ncruces/go-sqlite3/embed"
)
// MergeArtifacts removes temporary git merge files from .beads directory.
func MergeArtifacts(path string) error {
if err := validateBeadsWorkspace(path); err != nil {
return err
}
beadsDir := filepath.Join(path, ".beads")
// Read patterns from .gitignore or use defaults
patterns, err := readMergeArtifactPatterns(beadsDir)
if err != nil {
patterns = []string{
"*.base.jsonl",
"*.left.jsonl",
"*.right.jsonl",
"*.meta.json",
}
}
// Find and delete matching files
var deleted int
var errors []string
for _, pattern := range patterns {
matches, err := filepath.Glob(filepath.Join(beadsDir, pattern))
if err != nil {
continue
}
for _, file := range matches {
if err := os.Remove(file); err != nil {
if !os.IsNotExist(err) {
errors = append(errors, fmt.Sprintf("%s: %v", filepath.Base(file), err))
}
} else {
deleted++
fmt.Printf(" Removed %s\n", filepath.Base(file))
}
}
}
if len(errors) > 0 {
return fmt.Errorf("failed to remove some files: %s", strings.Join(errors, "; "))
}
if deleted == 0 {
fmt.Println(" No merge artifacts to remove")
} else {
fmt.Printf(" Removed %d merge artifact(s)\n", deleted)
}
return nil
}
// readMergeArtifactPatterns reads patterns from .beads/.gitignore merge section
func readMergeArtifactPatterns(beadsDir string) ([]string, error) {
gitignorePath := filepath.Join(beadsDir, ".gitignore")
file, err := os.Open(gitignorePath) // #nosec G304 - path constructed from beadsDir
if err != nil {
return nil, err
}
defer file.Close()
var patterns []string
inMergeSection := false
scanner := bufio.NewScanner(file)
for scanner.Scan() {
line := strings.TrimSpace(scanner.Text())
if strings.Contains(line, "Merge artifacts") {
inMergeSection = true
continue
}
if inMergeSection && strings.HasPrefix(line, "#") {
break
}
if inMergeSection && line != "" && !strings.HasPrefix(line, "#") && !strings.HasPrefix(line, "!") {
patterns = append(patterns, line)
}
}
return patterns, scanner.Err()
}
// OrphanedDependencies removes dependencies pointing to non-existent issues.
func OrphanedDependencies(path string) error {
if err := validateBeadsWorkspace(path); err != nil {
return err
}
beadsDir := filepath.Join(path, ".beads")
dbPath := filepath.Join(beadsDir, "beads.db")
// Open database
db, err := openDB(dbPath)
if err != nil {
return fmt.Errorf("failed to open database: %w", err)
}
defer db.Close()
// Find orphaned dependencies
query := `
SELECT d.issue_id, d.depends_on_id
FROM dependencies d
LEFT JOIN issues i ON d.depends_on_id = i.id
WHERE i.id IS NULL
`
rows, err := db.Query(query)
if err != nil {
return fmt.Errorf("failed to query orphaned dependencies: %w", err)
}
defer rows.Close()
type orphan struct {
issueID string
dependsOnID string
}
var orphans []orphan
for rows.Next() {
var o orphan
if err := rows.Scan(&o.issueID, &o.dependsOnID); err == nil {
orphans = append(orphans, o)
}
}
if len(orphans) == 0 {
fmt.Println(" No orphaned dependencies to fix")
return nil
}
// Delete orphaned dependencies
for _, o := range orphans {
_, err := db.Exec("DELETE FROM dependencies WHERE issue_id = ? AND depends_on_id = ?",
o.issueID, o.dependsOnID)
if err != nil {
fmt.Printf(" Warning: failed to remove %s→%s: %v\n", o.issueID, o.dependsOnID, err)
} else {
// Mark issue as dirty for export
_, _ = db.Exec("INSERT OR IGNORE INTO dirty_issues (issue_id) VALUES (?)", o.issueID)
fmt.Printf(" Removed orphaned dependency: %s→%s\n", o.issueID, o.dependsOnID)
}
}
fmt.Printf(" Fixed %d orphaned dependency reference(s)\n", len(orphans))
return nil
}
// openDB opens a SQLite database for read-write access
func openDB(dbPath string) (*sql.DB, error) {
return sql.Open("sqlite3", dbPath)
}

358
cmd/bd/doctor/validation.go Normal file
View File

@@ -0,0 +1,358 @@
package doctor
import (
"bufio"
"bytes"
"fmt"
"os"
"path/filepath"
"strings"
"github.com/steveyegge/beads/internal/beads"
"github.com/steveyegge/beads/internal/storage/sqlite"
"github.com/steveyegge/beads/internal/types"
)
// CheckMergeArtifacts detects temporary git merge files in .beads directory.
// These are created during git merges and should be cleaned up.
func CheckMergeArtifacts(path string) DoctorCheck {
beadsDir := filepath.Join(path, ".beads")
if _, err := os.Stat(beadsDir); os.IsNotExist(err) {
return DoctorCheck{
Name: "Merge Artifacts",
Status: "ok",
Message: "N/A (no .beads directory)",
}
}
// Read patterns from .beads/.gitignore (merge artifacts section)
patterns, err := readMergeArtifactPatterns(beadsDir)
if err != nil {
// No .gitignore or can't read it - use default patterns
patterns = []string{
"*.base.jsonl",
"*.left.jsonl",
"*.right.jsonl",
"*.meta.json",
}
}
// Find matching files
var artifacts []string
for _, pattern := range patterns {
matches, err := filepath.Glob(filepath.Join(beadsDir, pattern))
if err != nil {
continue
}
artifacts = append(artifacts, matches...)
}
if len(artifacts) == 0 {
return DoctorCheck{
Name: "Merge Artifacts",
Status: "ok",
Message: "No merge artifacts found",
}
}
// Build list of relative paths for display
var relPaths []string
for _, f := range artifacts {
if rel, err := filepath.Rel(beadsDir, f); err == nil {
relPaths = append(relPaths, rel)
}
}
return DoctorCheck{
Name: "Merge Artifacts",
Status: "warning",
Message: fmt.Sprintf("%d temporary merge file(s) found", len(artifacts)),
Detail: strings.Join(relPaths, ", "),
Fix: "Run 'bd doctor --fix' to remove merge artifacts",
}
}
// readMergeArtifactPatterns reads patterns from .beads/.gitignore merge section
func readMergeArtifactPatterns(beadsDir string) ([]string, error) {
gitignorePath := filepath.Join(beadsDir, ".gitignore")
file, err := os.Open(gitignorePath) // #nosec G304 - path constructed from beadsDir
if err != nil {
return nil, err
}
defer file.Close()
var patterns []string
inMergeSection := false
scanner := bufio.NewScanner(file)
for scanner.Scan() {
line := strings.TrimSpace(scanner.Text())
if strings.Contains(line, "Merge artifacts") {
inMergeSection = true
continue
}
if inMergeSection && strings.HasPrefix(line, "#") {
break
}
if inMergeSection && line != "" && !strings.HasPrefix(line, "#") && !strings.HasPrefix(line, "!") {
patterns = append(patterns, line)
}
}
return patterns, scanner.Err()
}
// CheckOrphanedDependencies detects dependencies pointing to non-existent issues.
func CheckOrphanedDependencies(path string) DoctorCheck {
beadsDir := filepath.Join(path, ".beads")
dbPath := filepath.Join(beadsDir, beads.CanonicalDatabaseName)
if _, err := os.Stat(dbPath); os.IsNotExist(err) {
return DoctorCheck{
Name: "Orphaned Dependencies",
Status: "ok",
Message: "N/A (no database)",
}
}
// Open database read-only
db, err := openDBReadOnly(dbPath)
if err != nil {
return DoctorCheck{
Name: "Orphaned Dependencies",
Status: "ok",
Message: "N/A (unable to open database)",
}
}
defer db.Close()
// Query for orphaned dependencies
query := `
SELECT d.issue_id, d.depends_on_id, d.type
FROM dependencies d
LEFT JOIN issues i ON d.depends_on_id = i.id
WHERE i.id IS NULL
`
rows, err := db.Query(query)
if err != nil {
return DoctorCheck{
Name: "Orphaned Dependencies",
Status: "ok",
Message: "N/A (query failed)",
}
}
defer rows.Close()
var orphans []string
for rows.Next() {
var issueID, dependsOnID, depType string
if err := rows.Scan(&issueID, &dependsOnID, &depType); err == nil {
orphans = append(orphans, fmt.Sprintf("%s→%s", issueID, dependsOnID))
}
}
if len(orphans) == 0 {
return DoctorCheck{
Name: "Orphaned Dependencies",
Status: "ok",
Message: "No orphaned dependencies",
}
}
detail := strings.Join(orphans, ", ")
if len(detail) > 200 {
detail = detail[:200] + "..."
}
return DoctorCheck{
Name: "Orphaned Dependencies",
Status: "warning",
Message: fmt.Sprintf("%d orphaned dependency reference(s)", len(orphans)),
Detail: detail,
Fix: "Run 'bd doctor --fix' to remove orphaned dependencies",
}
}
// CheckDuplicateIssues detects issues with identical content.
func CheckDuplicateIssues(path string) DoctorCheck {
beadsDir := filepath.Join(path, ".beads")
dbPath := filepath.Join(beadsDir, beads.CanonicalDatabaseName)
if _, err := os.Stat(dbPath); os.IsNotExist(err) {
return DoctorCheck{
Name: "Duplicate Issues",
Status: "ok",
Message: "N/A (no database)",
}
}
// Open store to use existing duplicate detection
store, err := sqlite.New(nil, dbPath)
if err != nil {
return DoctorCheck{
Name: "Duplicate Issues",
Status: "ok",
Message: "N/A (unable to open database)",
}
}
defer func() { _ = store.Close() }()
issues, err := store.SearchIssues(nil, "", types.IssueFilter{})
if err != nil {
return DoctorCheck{
Name: "Duplicate Issues",
Status: "ok",
Message: "N/A (unable to query issues)",
}
}
// Find duplicates by title+description hash
seen := make(map[string][]string) // hash -> list of IDs
for _, issue := range issues {
if issue.Status == types.StatusTombstone {
continue
}
key := issue.Title + "|" + issue.Description
seen[key] = append(seen[key], issue.ID)
}
var duplicateGroups int
var totalDuplicates int
for _, ids := range seen {
if len(ids) > 1 {
duplicateGroups++
totalDuplicates += len(ids) - 1 // exclude the canonical one
}
}
if duplicateGroups == 0 {
return DoctorCheck{
Name: "Duplicate Issues",
Status: "ok",
Message: "No duplicate issues",
}
}
return DoctorCheck{
Name: "Duplicate Issues",
Status: "warning",
Message: fmt.Sprintf("%d duplicate issue(s) in %d group(s)", totalDuplicates, duplicateGroups),
Detail: "Duplicates cannot be auto-fixed",
Fix: "Run 'bd duplicates' to review and merge duplicates",
}
}
// CheckTestPollution detects test issues that may have leaked into the database.
func CheckTestPollution(path string) DoctorCheck {
beadsDir := filepath.Join(path, ".beads")
dbPath := filepath.Join(beadsDir, beads.CanonicalDatabaseName)
if _, err := os.Stat(dbPath); os.IsNotExist(err) {
return DoctorCheck{
Name: "Test Pollution",
Status: "ok",
Message: "N/A (no database)",
}
}
db, err := openDBReadOnly(dbPath)
if err != nil {
return DoctorCheck{
Name: "Test Pollution",
Status: "ok",
Message: "N/A (unable to open database)",
}
}
defer db.Close()
// Look for common test patterns in titles
query := `
SELECT COUNT(*) FROM issues
WHERE status != 'tombstone'
AND (
title LIKE 'test-%' OR
title LIKE 'Test Issue%' OR
title LIKE '%test issue%' OR
id LIKE 'test-%'
)
`
var count int
if err := db.QueryRow(query).Scan(&count); err != nil {
return DoctorCheck{
Name: "Test Pollution",
Status: "ok",
Message: "N/A (query failed)",
}
}
if count == 0 {
return DoctorCheck{
Name: "Test Pollution",
Status: "ok",
Message: "No test pollution detected",
}
}
return DoctorCheck{
Name: "Test Pollution",
Status: "warning",
Message: fmt.Sprintf("%d potential test issue(s) detected", count),
Detail: "Test issues may have leaked into production database",
Fix: "Run 'bd detect-pollution' to review and clean test issues",
}
}
// CheckGitConflicts detects git conflict markers in JSONL file.
func CheckGitConflicts(path string) DoctorCheck {
beadsDir := filepath.Join(path, ".beads")
jsonlPath := filepath.Join(beadsDir, "issues.jsonl")
if _, err := os.Stat(jsonlPath); os.IsNotExist(err) {
return DoctorCheck{
Name: "Git Conflicts",
Status: "ok",
Message: "N/A (no JSONL file)",
}
}
data, err := os.ReadFile(jsonlPath) // #nosec G304 - path constructed safely
if err != nil {
return DoctorCheck{
Name: "Git Conflicts",
Status: "ok",
Message: "N/A (unable to read JSONL)",
}
}
// Look for conflict markers at start of lines
lines := bytes.Split(data, []byte("\n"))
var conflictLines []int
for i, line := range lines {
trimmed := bytes.TrimSpace(line)
if bytes.HasPrefix(trimmed, []byte("<<<<<<< ")) ||
bytes.Equal(trimmed, []byte("=======")) ||
bytes.HasPrefix(trimmed, []byte(">>>>>>> ")) {
conflictLines = append(conflictLines, i+1)
}
}
if len(conflictLines) == 0 {
return DoctorCheck{
Name: "Git Conflicts",
Status: "ok",
Message: "No git conflicts in JSONL",
}
}
return DoctorCheck{
Name: "Git Conflicts",
Status: "error",
Message: fmt.Sprintf("Git conflict markers found at %d location(s)", len(conflictLines)),
Detail: fmt.Sprintf("Conflict markers at lines: %v", conflictLines),
Fix: "Resolve conflicts manually: git checkout --ours or --theirs .beads/issues.jsonl",
}
}

View File

@@ -118,12 +118,24 @@ var (
quietFlag bool // Suppress non-essential output quietFlag bool // Suppress non-essential output
) )
// Command group IDs for help organization
const (
GroupMaintenance = "maintenance"
GroupIntegrations = "integrations"
)
func init() { func init() {
// Initialize viper configuration // Initialize viper configuration
if err := config.Initialize(); err != nil { if err := config.Initialize(); err != nil {
fmt.Fprintf(os.Stderr, "Warning: failed to initialize config: %v\n", err) fmt.Fprintf(os.Stderr, "Warning: failed to initialize config: %v\n", err)
} }
// Add command groups for organized help output
rootCmd.AddGroup(
&cobra.Group{ID: GroupMaintenance, Title: "Maintenance:"},
&cobra.Group{ID: GroupIntegrations, Title: "Integrations & Advanced:"},
)
// Register persistent flags // Register persistent flags
rootCmd.PersistentFlags().StringVar(&dbPath, "db", "", "Database path (default: auto-discover .beads/*.db)") rootCmd.PersistentFlags().StringVar(&dbPath, "db", "", "Database path (default: auto-discover .beads/*.db)")
rootCmd.PersistentFlags().StringVar(&actor, "actor", "", "Actor name for audit trail (default: $BD_ACTOR or $USER)") rootCmd.PersistentFlags().StringVar(&actor, "actor", "", "Actor name for audit trail (default: $BD_ACTOR or $USER)")
@@ -391,6 +403,11 @@ var rootCmd = &cobra.Command{
return return
} }
// Skip for root command with no subcommand (just shows help)
if cmd.Parent() == nil && cmdName == "bd" {
return
}
// Also skip for --version flag on root command (cmdName would be "bd") // Also skip for --version flag on root command (cmdName would be "bd")
if v, _ := cmd.Flags().GetBool("version"); v { if v, _ := cmd.Flags().GetBool("version"); v {
return return

View File

@@ -209,7 +209,7 @@ func findInProgressMolecules(ctx context.Context, s storage.Storage, agent strin
} }
resp, err := daemonClient.List(listArgs) resp, err := daemonClient.List(listArgs)
if err == nil { if err == nil {
json.Unmarshal(resp.Data, &inProgressIssues) _ = json.Unmarshal(resp.Data, &inProgressIssues)
} }
} else { } else {
// Direct query - search for in_progress issues // Direct query - search for in_progress issues

View File

@@ -22,7 +22,7 @@ import (
var renamePrefixCmd = &cobra.Command{ var renamePrefixCmd = &cobra.Command{
Use: "rename-prefix <new-prefix>", Use: "rename-prefix <new-prefix>",
GroupID: "advanced", GroupID: GroupMaintenance,
Short: "Rename the issue prefix for all issues in the database", Short: "Rename the issue prefix for all issues in the database",
Long: `Rename the issue prefix for all issues in the database. Long: `Rename the issue prefix for all issues in the database.
This will update all issue IDs and all text references across all fields. This will update all issue IDs and all text references across all fields.

View File

@@ -1,174 +0,0 @@
// Package main implements the bd CLI dependency repair command.
package main
import (
"fmt"
"os"
"github.com/spf13/cobra"
"github.com/steveyegge/beads/internal/storage/sqlite"
"github.com/steveyegge/beads/internal/types"
"github.com/steveyegge/beads/internal/ui"
)
// TODO: Consider consolidating into 'bd doctor --fix' for simpler maintenance UX
var repairDepsCmd = &cobra.Command{
Use: "repair-deps",
GroupID: "maint",
Short: "Find and fix orphaned dependency references",
Long: `Scans all issues for dependencies pointing to non-existent issues.
Reports orphaned dependencies and optionally removes them with --fix.
Interactive mode with --interactive prompts for each orphan.`,
Run: func(cmd *cobra.Command, args []string) {
CheckReadonly("repair-deps")
fix, _ := cmd.Flags().GetBool("fix")
interactive, _ := cmd.Flags().GetBool("interactive")
// If daemon is running but doesn't support this command, use direct storage
if daemonClient != nil && store == nil {
var err error
store, err = sqlite.New(rootCtx, dbPath)
if err != nil {
fmt.Fprintf(os.Stderr, "Error: failed to open database: %v\n", err)
os.Exit(1)
}
defer func() { _ = store.Close() }()
}
ctx := rootCtx
// Get all dependency records
allDeps, err := store.GetAllDependencyRecords(ctx)
if err != nil {
fmt.Fprintf(os.Stderr, "Error: failed to get dependencies: %v\n", err)
os.Exit(1)
}
// Get all issues to check existence
issues, err := store.SearchIssues(ctx, "", types.IssueFilter{})
if err != nil {
fmt.Fprintf(os.Stderr, "Error: failed to list issues: %v\n", err)
os.Exit(1)
}
// Build set of valid issue IDs
validIDs := make(map[string]bool)
for _, issue := range issues {
validIDs[issue.ID] = true
}
// Find orphaned dependencies
type orphan struct {
issueID string
dependsOnID string
depType types.DependencyType
}
var orphans []orphan
for issueID, deps := range allDeps {
if !validIDs[issueID] {
// The issue itself doesn't exist, skip (will be cleaned up separately)
continue
}
for _, dep := range deps {
if !validIDs[dep.DependsOnID] {
orphans = append(orphans, orphan{
issueID: dep.IssueID,
dependsOnID: dep.DependsOnID,
depType: dep.Type,
})
}
}
}
if jsonOutput {
result := map[string]interface{}{
"orphans_found": len(orphans),
"orphans": []map[string]string{},
}
if len(orphans) > 0 {
orphanList := make([]map[string]string, len(orphans))
for i, o := range orphans {
orphanList[i] = map[string]string{
"issue_id": o.issueID,
"depends_on_id": o.dependsOnID,
"type": string(o.depType),
}
}
result["orphans"] = orphanList
}
if fix || interactive {
result["fixed"] = len(orphans)
}
outputJSON(result)
return
}
// Report results
if len(orphans) == 0 {
fmt.Printf("\n%s No orphaned dependencies found\n\n", ui.RenderPass("✓"))
return
}
fmt.Printf("\n%s Found %d orphaned dependencies:\n\n", ui.RenderWarn("⚠"), len(orphans))
for i, o := range orphans {
fmt.Printf("%d. %s → %s (%s) [%s does not exist]\n",
i+1, o.issueID, o.dependsOnID, o.depType, o.dependsOnID)
}
fmt.Println()
// Fix if requested
if interactive {
fixed := 0
for _, o := range orphans {
fmt.Printf("Remove dependency %s → %s (%s)? [y/N]: ", o.issueID, o.dependsOnID, o.depType)
var response string
_, _ = fmt.Scanln(&response)
if response == "y" || response == "Y" {
// Use direct SQL to remove orphaned dependencies
// RemoveDependency tries to mark the depends_on issue as dirty, which fails for orphans
db := store.UnderlyingDB()
_, err := db.ExecContext(ctx, "DELETE FROM dependencies WHERE issue_id = ? AND depends_on_id = ?",
o.issueID, o.dependsOnID)
if err != nil {
fmt.Fprintf(os.Stderr, "Error removing dependency: %v\n", err)
} else {
// Mark the issue as dirty
_, _ = db.ExecContext(ctx, "INSERT OR IGNORE INTO dirty_issues (issue_id) VALUES (?)", o.issueID)
fixed++
}
}
}
markDirtyAndScheduleFlush()
fmt.Printf("\n%s Fixed %d orphaned dependencies\n\n", ui.RenderPass("✓"), fixed)
} else if fix {
db := store.UnderlyingDB()
for _, o := range orphans {
// Use direct SQL to remove orphaned dependencies
_, err := db.ExecContext(ctx, "DELETE FROM dependencies WHERE issue_id = ? AND depends_on_id = ?",
o.issueID, o.dependsOnID)
if err != nil {
fmt.Fprintf(os.Stderr, "Error removing dependency %s → %s: %v\n",
o.issueID, o.dependsOnID, err)
} else {
// Mark the issue as dirty
_, _ = db.ExecContext(ctx, "INSERT OR IGNORE INTO dirty_issues (issue_id) VALUES (?)", o.issueID)
}
}
markDirtyAndScheduleFlush()
fmt.Printf("%s Fixed %d orphaned dependencies\n\n", ui.RenderPass("✓"), len(orphans))
} else {
fmt.Printf("Run with --fix to automatically remove orphaned dependencies\n")
fmt.Printf("Run with --interactive to review each dependency\n\n")
}
},
}
func init() {
repairDepsCmd.Flags().Bool("fix", false, "Automatically remove orphaned dependencies")
repairDepsCmd.Flags().Bool("interactive", false, "Interactively review each orphaned dependency")
repairDepsCmd.Flags().BoolVar(&jsonOutput, "json", false, "Output repair results in JSON format")
rootCmd.AddCommand(repairDepsCmd)
}

View File

@@ -7,6 +7,7 @@ import (
"os/exec" "os/exec"
"path/filepath" "path/filepath"
"strings" "strings"
"time"
"github.com/spf13/cobra" "github.com/spf13/cobra"
"github.com/steveyegge/beads/internal/git" "github.com/steveyegge/beads/internal/git"
@@ -323,12 +324,12 @@ func stopDaemonQuiet(pidFile string) {
_ = sendStopSignal(process) _ = sendStopSignal(process)
// Wait up to 5 seconds for daemon to stop // Wait for daemon to stop gracefully
for i := 0; i < 50; i++ { for i := 0; i < daemonShutdownAttempts; i++ {
time.Sleep(daemonShutdownPollInterval)
if isRunning, _ := isDaemonRunning(pidFile); !isRunning { if isRunning, _ := isDaemonRunning(pidFile); !isRunning {
return return
} }
// Small sleep handled by the check
} }
// Force kill if still running // Force kill if still running

View File

@@ -1,378 +0,0 @@
package main
import (
"bytes"
"context"
"fmt"
"os"
"strings"
"github.com/spf13/cobra"
"github.com/steveyegge/beads/internal/types"
"github.com/steveyegge/beads/internal/ui"
)
// TODO: Consider consolidating into 'bd doctor --fix' for simpler maintenance UX
var validateCmd = &cobra.Command{
Use: "validate",
GroupID: "maint",
Short: "Run comprehensive database health checks",
Long: `Run all validation checks to ensure database integrity:
- Orphaned dependencies (references to deleted issues)
- Duplicate issues (identical content)
- Test pollution (leaked test issues)
- Git merge conflicts in JSONL
Example:
bd validate # Run all checks
bd validate --fix-all # Auto-fix all issues
bd validate --checks=orphans,dupes # Run specific checks
bd validate --checks=conflicts # Check for git conflicts
bd validate --json # Output in JSON format`,
Run: func(cmd *cobra.Command, _ []string) {
fixAll, _ := cmd.Flags().GetBool("fix-all")
// Block writes in readonly mode (--fix-all modifies data)
if fixAll {
CheckReadonly("validate --fix-all")
}
// Check daemon mode - not supported yet (uses direct storage access)
if daemonClient != nil {
fmt.Fprintf(os.Stderr, "Error: validate command not yet supported in daemon mode\n")
fmt.Fprintf(os.Stderr, "Use: bd --no-daemon validate\n")
os.Exit(1)
}
checksFlag, _ := cmd.Flags().GetString("checks")
jsonOut, _ := cmd.Flags().GetBool("json")
ctx := rootCtx
// Check database freshness before reading (bd-2q6d, bd-c4rq)
// Skip check when using daemon (daemon auto-imports on staleness)
if daemonClient == nil {
if err := ensureDatabaseFresh(ctx); err != nil {
fmt.Fprintf(os.Stderr, "Error: %v\n", err)
os.Exit(1)
}
}
// Parse and normalize checks
checks, err := parseChecks(checksFlag)
if err != nil {
fmt.Fprintf(os.Stderr, "Error: %v\n", err)
fmt.Fprintf(os.Stderr, "Valid checks: orphans, duplicates, pollution, conflicts\n")
os.Exit(2)
}
// Fetch all issues once for checks that need them
var allIssues []*types.Issue
needsIssues := false
for _, check := range checks {
if check == "orphans" || check == "duplicates" || check == "pollution" {
needsIssues = true
break
}
}
if needsIssues {
allIssues, err = store.SearchIssues(ctx, "", types.IssueFilter{})
if err != nil {
fmt.Fprintf(os.Stderr, "Error fetching issues: %v\n", err)
os.Exit(1)
}
}
results := validationResults{
checks: make(map[string]checkResult),
checkOrder: checks,
}
// Run each check
for _, check := range checks {
switch check {
case "orphans":
results.checks["orphans"] = validateOrphanedDeps(ctx, allIssues, fixAll)
case "duplicates":
results.checks["duplicates"] = validateDuplicates(ctx, allIssues, fixAll)
case "pollution":
results.checks["pollution"] = validatePollution(ctx, allIssues, fixAll)
case "conflicts":
results.checks["conflicts"] = validateGitConflicts(ctx, fixAll)
}
}
// Output results
if jsonOut {
outputJSON(results.toJSON())
} else {
results.print(fixAll)
}
// Exit with error code if issues found or errors occurred
if results.hasFailures() {
os.Exit(1)
}
},
}
// parseChecks normalizes and validates check names
func parseChecks(checksFlag string) ([]string, error) {
defaultChecks := []string{"orphans", "duplicates", "pollution", "conflicts"}
if checksFlag == "" {
return defaultChecks, nil
}
// Map of synonyms to canonical names
synonyms := map[string]string{
"dupes": "duplicates",
"git-conflicts": "conflicts",
}
var result []string
seen := make(map[string]bool)
parts := strings.Split(checksFlag, ",")
for _, part := range parts {
check := strings.ToLower(strings.TrimSpace(part))
if check == "" {
continue
}
// Map synonyms
if canonical, ok := synonyms[check]; ok {
check = canonical
}
// Validate
valid := false
for _, validCheck := range defaultChecks {
if check == validCheck {
valid = true
break
}
}
if !valid {
return nil, fmt.Errorf("unknown check: %s", part)
}
// Deduplicate
if !seen[check] {
seen[check] = true
result = append(result, check)
}
}
return result, nil
}
type checkResult struct {
name string
issueCount int
fixedCount int
err error
suggestions []string
}
type validationResults struct {
checks map[string]checkResult
checkOrder []string
}
func (r *validationResults) hasFailures() bool {
for _, result := range r.checks {
if result.err != nil {
return true
}
if result.issueCount > 0 && result.fixedCount < result.issueCount {
return true
}
}
return false
}
func (r *validationResults) toJSON() map[string]interface{} {
output := map[string]interface{}{
"checks": map[string]interface{}{},
}
totalIssues := 0
totalFixed := 0
hasErrors := false
for name, result := range r.checks {
var errorStr interface{}
if result.err != nil {
errorStr = result.err.Error()
hasErrors = true
}
output["checks"].(map[string]interface{})[name] = map[string]interface{}{
"issue_count": result.issueCount,
"fixed_count": result.fixedCount,
"error": errorStr,
"failed": result.err != nil,
"suggestions": result.suggestions,
}
totalIssues += result.issueCount
totalFixed += result.fixedCount
}
output["total_issues"] = totalIssues
output["total_fixed"] = totalFixed
output["healthy"] = !hasErrors && (totalIssues == 0 || totalIssues == totalFixed)
return output
}
func (r *validationResults) print(_ bool) {
fmt.Println("\nValidation Results:")
fmt.Println("===================")
totalIssues := 0
totalFixed := 0
// Print in deterministic order
for _, name := range r.checkOrder {
result := r.checks[name]
prefix := "✓"
var coloredPrefix string
if result.err != nil {
prefix = "✗"
coloredPrefix = ui.RenderFail(prefix)
fmt.Printf("%s %s: ERROR - %v\n", coloredPrefix, result.name, result.err)
} else if result.issueCount > 0 {
prefix = "⚠"
coloredPrefix = ui.RenderWarn(prefix)
if result.fixedCount > 0 {
fmt.Printf("%s %s: %d found, %d fixed\n", coloredPrefix, result.name, result.issueCount, result.fixedCount)
} else {
fmt.Printf("%s %s: %d found\n", coloredPrefix, result.name, result.issueCount)
}
} else {
coloredPrefix = ui.RenderPass(prefix)
fmt.Printf("%s %s: OK\n", coloredPrefix, result.name)
}
totalIssues += result.issueCount
totalFixed += result.fixedCount
}
fmt.Println()
if totalIssues == 0 {
fmt.Printf("%s Database is healthy!\n", ui.RenderPass("✓"))
} else if totalFixed == totalIssues {
fmt.Printf("%s Fixed all %d issues\n", ui.RenderPass("✓"), totalFixed)
} else {
remaining := totalIssues - totalFixed
fmt.Printf("%s Found %d issues", ui.RenderWarn("⚠"), totalIssues)
if totalFixed > 0 {
fmt.Printf(" (fixed %d, %d remaining)", totalFixed, remaining)
}
fmt.Println()
// Print suggestions
fmt.Println("\nRecommendations:")
for _, result := range r.checks {
for _, suggestion := range result.suggestions {
fmt.Printf(" - %s\n", suggestion)
}
}
}
}
func validateOrphanedDeps(ctx context.Context, allIssues []*types.Issue, fix bool) checkResult {
result := checkResult{name: "orphaned dependencies"}
// Build ID existence map
existingIDs := make(map[string]bool)
for _, issue := range allIssues {
existingIDs[issue.ID] = true
}
// Find orphaned dependencies
type orphanedDep struct {
issueID string
orphanedID string
}
var orphaned []orphanedDep
for _, issue := range allIssues {
for _, dep := range issue.Dependencies {
if !existingIDs[dep.DependsOnID] {
orphaned = append(orphaned, orphanedDep{
issueID: issue.ID,
orphanedID: dep.DependsOnID,
})
}
}
}
result.issueCount = len(orphaned)
if fix && len(orphaned) > 0 {
// Group by issue
orphansByIssue := make(map[string][]string)
for _, o := range orphaned {
orphansByIssue[o.issueID] = append(orphansByIssue[o.issueID], o.orphanedID)
}
// Fix each issue
for issueID, orphanedIDs := range orphansByIssue {
for _, orphanedID := range orphanedIDs {
if err := store.RemoveDependency(ctx, issueID, orphanedID, actor); err == nil {
result.fixedCount++
}
}
}
if result.fixedCount > 0 {
markDirtyAndScheduleFlush()
}
}
if result.issueCount > result.fixedCount {
result.suggestions = append(result.suggestions, "Run 'bd repair-deps --fix' to remove orphaned dependencies")
}
return result
}
func validateDuplicates(_ context.Context, allIssues []*types.Issue, fix bool) checkResult {
result := checkResult{name: "duplicates"}
// Find duplicates
duplicateGroups := findDuplicateGroups(allIssues)
// Count total duplicate issues (excluding one canonical per group)
for _, group := range duplicateGroups {
result.issueCount += len(group) - 1
}
if fix && len(duplicateGroups) > 0 {
// Note: Auto-merge is complex and requires user review
// We don't auto-fix duplicates, just report them
result.suggestions = append(result.suggestions,
fmt.Sprintf("Run 'bd duplicates --auto-merge' to merge %d duplicate groups", len(duplicateGroups)))
} else if result.issueCount > 0 {
result.suggestions = append(result.suggestions,
fmt.Sprintf("Run 'bd duplicates' to review %d duplicate groups", len(duplicateGroups)))
}
return result
}
func validatePollution(_ context.Context, allIssues []*types.Issue, fix bool) checkResult {
result := checkResult{name: "test pollution"}
// Detect pollution
polluted := detectTestPollution(allIssues)
result.issueCount = len(polluted)
if fix && len(polluted) > 0 {
// Note: Deleting issues is destructive, we just suggest it
result.suggestions = append(result.suggestions,
fmt.Sprintf("Run 'bd detect-pollution --clean' to delete %d test issues", len(polluted)))
} else if result.issueCount > 0 {
result.suggestions = append(result.suggestions,
fmt.Sprintf("Run 'bd detect-pollution' to review %d potential test issues", len(polluted)))
}
return result
}
func validateGitConflicts(_ context.Context, fix bool) checkResult {
result := checkResult{name: "git conflicts"}
// Check JSONL file for conflict markers
jsonlPath := findJSONLPath()
// nolint:gosec // G304: jsonlPath is validated JSONL file from findJSONLPath
data, err := os.ReadFile(jsonlPath)
if err != nil {
if os.IsNotExist(err) {
// No JSONL file = no conflicts
return result
}
result.err = fmt.Errorf("failed to read JSONL: %w", err)
return result
}
// Look for git conflict markers in raw bytes (before JSON decoding)
// This prevents false positives when issue content contains these strings
lines := bytes.Split(data, []byte("\n"))
var conflictLines []int
for i, line := range lines {
trimmed := bytes.TrimSpace(line)
if bytes.HasPrefix(trimmed, []byte("<<<<<<< ")) ||
bytes.Equal(trimmed, []byte("=======")) ||
bytes.HasPrefix(trimmed, []byte(">>>>>>> ")) {
conflictLines = append(conflictLines, i+1)
}
}
if len(conflictLines) > 0 {
result.issueCount = 1 // One conflict situation
result.suggestions = append(result.suggestions,
fmt.Sprintf("Git conflict markers found in %s at lines: %v", jsonlPath, conflictLines))
result.suggestions = append(result.suggestions,
"To resolve, choose one version:")
result.suggestions = append(result.suggestions,
" git checkout --ours .beads/issues.jsonl && bd import -i .beads/issues.jsonl")
result.suggestions = append(result.suggestions,
" git checkout --theirs .beads/issues.jsonl && bd import -i .beads/issues.jsonl")
result.suggestions = append(result.suggestions,
"For advanced field-level merging: https://github.com/neongreen/mono/tree/main/beads-merge")
}
// Can't auto-fix git conflicts
if fix && result.issueCount > 0 {
result.suggestions = append(result.suggestions,
"Note: Git conflicts cannot be auto-fixed with --fix-all")
}
return result
}
func init() {
validateCmd.Flags().Bool("fix-all", false, "Auto-fix all fixable issues")
validateCmd.Flags().String("checks", "", "Comma-separated list of checks (orphans,duplicates,pollution,conflicts)")
rootCmd.AddCommand(validateCmd)
}

View File

@@ -1,345 +0,0 @@
package main
import (
"context"
"os"
"testing"
"github.com/steveyegge/beads/internal/types"
)
func TestParseChecks(t *testing.T) {
t.Parallel()
tests := []struct {
name string
input string
want []string
wantError bool
}{
{
name: "empty returns all defaults",
input: "",
want: []string{"orphans", "duplicates", "pollution", "conflicts"},
},
{
name: "single check",
input: "orphans",
want: []string{"orphans"},
},
{
name: "multiple checks",
input: "orphans,duplicates",
want: []string{"orphans", "duplicates"},
},
{
name: "synonym dupes->duplicates",
input: "dupes",
want: []string{"duplicates"},
},
{
name: "synonym git-conflicts->conflicts",
input: "git-conflicts",
want: []string{"conflicts"},
},
{
name: "mixed with whitespace",
input: " orphans , duplicates , pollution ",
want: []string{"orphans", "duplicates", "pollution"},
},
{
name: "deduplication",
input: "orphans,orphans,duplicates",
want: []string{"orphans", "duplicates"},
},
{
name: "invalid check",
input: "orphans,invalid,duplicates",
wantError: true,
},
{
name: "empty parts ignored",
input: "orphans,,duplicates",
want: []string{"orphans", "duplicates"},
},
}
for _, tt := range tests {
tt := tt // capture range variable
t.Run(tt.name, func(t *testing.T) {
t.Parallel()
got, err := parseChecks(tt.input)
if tt.wantError {
if err == nil {
t.Errorf("parseChecks(%q) expected error, got nil", tt.input)
}
return
}
if err != nil {
t.Fatalf("parseChecks(%q) unexpected error: %v", tt.input, err)
}
if len(got) != len(tt.want) {
t.Errorf("parseChecks(%q) length = %d, want %d", tt.input, len(got), len(tt.want))
return
}
for i := range got {
if got[i] != tt.want[i] {
t.Errorf("parseChecks(%q)[%d] = %q, want %q", tt.input, i, got[i], tt.want[i])
}
}
})
}
}
func TestValidationResultsHasFailures(t *testing.T) {
t.Parallel()
tests := []struct {
name string
checks map[string]checkResult
want bool
}{
{
name: "no failures - all clean",
checks: map[string]checkResult{
"orphans": {issueCount: 0, fixedCount: 0},
"dupes": {issueCount: 0, fixedCount: 0},
},
want: false,
},
{
name: "has error",
checks: map[string]checkResult{
"orphans": {err: os.ErrNotExist},
},
want: true,
},
{
name: "issues found but not all fixed",
checks: map[string]checkResult{
"orphans": {issueCount: 5, fixedCount: 3},
},
want: true,
},
{
name: "issues found and all fixed",
checks: map[string]checkResult{
"orphans": {issueCount: 5, fixedCount: 5},
},
want: false,
},
}
for _, tt := range tests {
tt := tt // capture range variable
t.Run(tt.name, func(t *testing.T) {
t.Parallel()
r := &validationResults{checks: tt.checks}
got := r.hasFailures()
if got != tt.want {
t.Errorf("hasFailures() = %v, want %v", got, tt.want)
}
})
}
}
func TestValidationResultsToJSON(t *testing.T) {
t.Parallel()
r := &validationResults{
checks: map[string]checkResult{
"orphans": {
issueCount: 3,
fixedCount: 2,
suggestions: []string{"Run bd repair"},
},
"dupes": {
issueCount: 0,
fixedCount: 0,
},
},
}
output := r.toJSON()
if output["total_issues"] != 3 {
t.Errorf("total_issues = %v, want 3", output["total_issues"])
}
if output["total_fixed"] != 2 {
t.Errorf("total_fixed = %v, want 2", output["total_fixed"])
}
if output["healthy"] != false {
t.Errorf("healthy = %v, want false", output["healthy"])
}
checks := output["checks"].(map[string]interface{})
orphans := checks["orphans"].(map[string]interface{})
if orphans["issue_count"] != 3 {
t.Errorf("orphans issue_count = %v, want 3", orphans["issue_count"])
}
if orphans["fixed_count"] != 2 {
t.Errorf("orphans fixed_count = %v, want 2", orphans["fixed_count"])
}
}
func TestValidateOrphanedDeps(t *testing.T) {
ctx := context.Background()
allIssues := []*types.Issue{
{
ID: "bd-1",
Dependencies: []*types.Dependency{
{DependsOnID: "bd-2", Type: types.DepBlocks},
{DependsOnID: "bd-999", Type: types.DepBlocks}, // orphaned
},
},
{
ID: "bd-2",
},
}
result := validateOrphanedDeps(ctx, allIssues, false)
if result.issueCount != 1 {
t.Errorf("issueCount = %d, want 1", result.issueCount)
}
if result.fixedCount != 0 {
t.Errorf("fixedCount = %d, want 0 (fix=false)", result.fixedCount)
}
if len(result.suggestions) == 0 {
t.Error("expected suggestions")
}
}
func TestValidateDuplicates(t *testing.T) {
ctx := context.Background()
allIssues := []*types.Issue{
{
ID: "bd-1",
Title: "Same title",
},
{
ID: "bd-2",
Title: "Same title",
},
{
ID: "bd-3",
Title: "Different",
},
}
result := validateDuplicates(ctx, allIssues, false)
// Should find 1 duplicate (bd-2 is duplicate of bd-1)
if result.issueCount != 1 {
t.Errorf("issueCount = %d, want 1", result.issueCount)
}
if len(result.suggestions) == 0 {
t.Error("expected suggestions")
}
}
func TestValidatePollution(t *testing.T) {
ctx := context.Background()
allIssues := []*types.Issue{
{
ID: "test-1",
Title: "Test issue",
},
{
ID: "bd-1",
Title: "Normal issue",
},
}
result := validatePollution(ctx, allIssues, false)
// Should detect test-1 as pollution
if result.issueCount != 1 {
t.Errorf("issueCount = %d, want 1", result.issueCount)
}
}
func TestValidateGitConflicts_NoFile(t *testing.T) {
ctx := context.Background()
// Create temp dir without JSONL
tmpDir := t.TempDir()
if err := os.MkdirAll(tmpDir, 0755); err != nil {
t.Fatal(err)
}
// Override dbPath to point to temp dir (file won't exist)
originalDBPath := dbPath
dbPath = tmpDir + "/nonexistent.db"
defer func() { dbPath = originalDBPath }()
result := validateGitConflicts(ctx, false)
if result.issueCount != 0 {
t.Errorf("issueCount = %d, want 0 (no file)", result.issueCount)
}
if result.err != nil {
t.Errorf("unexpected error: %v", result.err)
}
}
func TestValidateGitConflicts_WithMarkers(t *testing.T) {
ctx := context.Background()
// Create temp JSONL with conflict markers
tmpDir := t.TempDir()
jsonlPath := tmpDir + "/test.jsonl"
content := `{"id":"bd-1"}
<<<<<<< HEAD
{"id":"bd-2","title":"Version A"}
=======
{"id":"bd-2","title":"Version B"}
>>>>>>> main
{"id":"bd-3"}`
if err := os.WriteFile(jsonlPath, []byte(content), 0644); err != nil {
t.Fatal(err)
}
// Override dbPath to point to temp file
originalDBPath := dbPath
dbPath = jsonlPath
defer func() { dbPath = originalDBPath }()
result := validateGitConflicts(ctx, false)
if result.issueCount != 1 {
t.Errorf("issueCount = %d, want 1 (conflict found)", result.issueCount)
}
if len(result.suggestions) == 0 {
t.Error("expected suggestions for conflict resolution")
}
}
func TestValidateGitConflicts_Clean(t *testing.T) {
ctx := context.Background()
// Create temp JSONL without conflicts
tmpDir := t.TempDir()
jsonlPath := tmpDir + "/test.jsonl"
content := `{"id":"bd-1","title":"Normal"}
{"id":"bd-2","title":"Also normal"}`
if err := os.WriteFile(jsonlPath, []byte(content), 0644); err != nil {
t.Fatal(err)
}
// Override dbPath to point to temp file
originalDBPath := dbPath
dbPath = jsonlPath
defer func() { dbPath = originalDBPath }()
result := validateGitConflicts(ctx, false)
if result.issueCount != 0 {
t.Errorf("issueCount = %d, want 0 (clean file)", result.issueCount)
}
if result.err != nil {
t.Errorf("unexpected error: %v", result.err)
}
}

View File

@@ -91,6 +91,16 @@ func TryConnectWithTimeout(socketPath string, dialTimeout time.Duration) (*Clien
if err != nil { if err != nil {
debug.Logf("failed to connect to RPC endpoint: %v", err) debug.Logf("failed to connect to RPC endpoint: %v", err)
rpcDebugLog("dial failed after %v: %v", dialDuration, err) rpcDebugLog("dial failed after %v: %v", dialDuration, err)
// Fast-fail: socket exists but dial failed - check if daemon actually alive
// If lock is not held, daemon crashed and left stale socket - clean up immediately
beadsDir := filepath.Dir(socketPath)
running, _ := lockfile.TryDaemonLock(beadsDir)
if !running {
rpcDebugLog("daemon not running (lock free) - cleaning up stale socket")
cleanupStaleDaemonArtifacts(beadsDir)
_ = os.Remove(socketPath) // Also remove stale socket
}
return nil, nil return nil, nil
} }