Merge pull request #753 from rsnodgrass/bd-improvements
feat: add database corruption recovery and sync backoff
This commit is contained in:
@@ -0,0 +1,14 @@
|
||||
repos:
|
||||
- repo: https://github.com/golangci/golangci-lint
|
||||
rev: v2.1.6
|
||||
hooks:
|
||||
- id: golangci-lint
|
||||
args: [--timeout=5m]
|
||||
|
||||
- repo: https://github.com/pre-commit/pre-commit-hooks
|
||||
rev: v6.0.0
|
||||
hooks:
|
||||
- id: trailing-whitespace
|
||||
- id: end-of-file-fixer
|
||||
- id: check-yaml
|
||||
- id: check-added-large-files
|
||||
+18
-1
@@ -99,7 +99,10 @@ The 30-second debounce provides a **transaction window** for batch operations -
|
||||
**MANDATORY WORKFLOW - COMPLETE ALL STEPS:**
|
||||
|
||||
1. **File beads issues for any remaining work** that needs follow-up
|
||||
2. **Ensure all quality gates pass** (only if code changes were made) - run tests, linters, builds (file P0 issues if broken)
|
||||
2. **Ensure all quality gates pass** (only if code changes were made):
|
||||
- Run `make lint` or `golangci-lint run ./...` (if pre-commit installed: `pre-commit run --all-files`)
|
||||
- Run `make test` or `go test ./...`
|
||||
- File P0 issues if quality gates are broken
|
||||
3. **Update beads issues** - close finished work, update status
|
||||
4. **PUSH TO REMOTE - NON-NEGOTIABLE** - This step is MANDATORY. Execute ALL commands below:
|
||||
```bash
|
||||
@@ -244,6 +247,20 @@ Without the pre-push hook, you can have database changes committed locally but s
|
||||
|
||||
## Common Development Tasks
|
||||
|
||||
### CLI Design Principles
|
||||
|
||||
**Minimize cognitive overload.** Every new command, flag, or option adds cognitive burden for users. Before adding anything:
|
||||
|
||||
1. **Recovery/fix operations → `bd doctor --fix`**: Don't create separate commands like `bd recover` or `bd repair`. Doctor already detects problems - let `--fix` handle remediation. This keeps all health-related operations in one discoverable place.
|
||||
|
||||
2. **Prefer flags on existing commands**: Before creating a new command, ask: "Can this be a flag on an existing command?" Example: `bd list --stale` instead of `bd stale`.
|
||||
|
||||
3. **Consolidate related operations**: Related operations should live together. Daemon management uses `bd daemons {list,health,killall}`, not separate top-level commands.
|
||||
|
||||
4. **Count the commands**: Run `bd --help` and count. If we're approaching 30+ commands, we have a discoverability problem. Consider subcommand grouping.
|
||||
|
||||
5. **New commands need strong justification**: A new command should represent a fundamentally different operation, not just a convenience wrapper.
|
||||
|
||||
### Adding a New Command
|
||||
|
||||
1. Create file in `cmd/bd/`
|
||||
|
||||
@@ -365,6 +365,8 @@ type cookFormulaResult struct {
|
||||
// cookFormulaToSubgraph creates an in-memory TemplateSubgraph from a resolved formula.
|
||||
// This is the ephemeral proto implementation - no database storage.
|
||||
// The returned subgraph can be passed directly to cloneSubgraph for instantiation.
|
||||
//
|
||||
//nolint:unparam // error return kept for API consistency with future error handling
|
||||
func cookFormulaToSubgraph(f *formula.Formula, protoID string) (*TemplateSubgraph, error) {
|
||||
// Map step ID -> created issue
|
||||
issueMap := make(map[string]*types.Issue)
|
||||
|
||||
@@ -355,6 +355,11 @@ func runDaemonLoop(interval time.Duration, autoCommit, autoPush, autoPull, local
|
||||
|
||||
// Check for multiple .db files (ambiguity error)
|
||||
beadsDir := filepath.Dir(daemonDBPath)
|
||||
|
||||
// Reset backoff on daemon start (fresh start, but preserve NeedsManualSync hint)
|
||||
if !localMode {
|
||||
ResetBackoffOnDaemonStart(beadsDir)
|
||||
}
|
||||
matches, err := filepath.Glob(filepath.Join(beadsDir, "*.db"))
|
||||
if err == nil && len(matches) > 1 {
|
||||
// Filter out backup files (*.backup-*.db, *.backup.db)
|
||||
|
||||
+19
-2
@@ -529,6 +529,19 @@ func performAutoImport(ctx context.Context, store storage.Storage, skipGit bool,
|
||||
if skipGit {
|
||||
mode = "local auto-import"
|
||||
}
|
||||
|
||||
// Check backoff before attempting sync (skip for local mode)
|
||||
if !skipGit {
|
||||
jsonlPath := findJSONLPath()
|
||||
if jsonlPath != "" {
|
||||
beadsDir := filepath.Dir(jsonlPath)
|
||||
if ShouldSkipSync(beadsDir) {
|
||||
log.log("Skipping %s: in backoff period", mode)
|
||||
return
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
log.log("Starting %s...", mode)
|
||||
|
||||
jsonlPath := findJSONLPath()
|
||||
@@ -579,14 +592,16 @@ func performAutoImport(ctx context.Context, store storage.Storage, skipGit bool,
|
||||
// Try sync branch first
|
||||
pulled, err := syncBranchPull(importCtx, store, log)
|
||||
if err != nil {
|
||||
log.log("Sync branch pull failed: %v", err)
|
||||
backoff := RecordSyncFailure(beadsDir, err.Error())
|
||||
log.log("Sync branch pull failed: %v (backoff: %v)", err, backoff)
|
||||
return
|
||||
}
|
||||
|
||||
// If sync branch not configured, use regular pull
|
||||
if !pulled {
|
||||
if err := gitPull(importCtx); err != nil {
|
||||
log.log("Pull failed: %v", err)
|
||||
backoff := RecordSyncFailure(beadsDir, err.Error())
|
||||
log.log("Pull failed: %v (backoff: %v)", err, backoff)
|
||||
return
|
||||
}
|
||||
log.log("Pulled from remote")
|
||||
@@ -622,6 +637,8 @@ func performAutoImport(ctx context.Context, store storage.Storage, skipGit bool,
|
||||
if skipGit {
|
||||
log.log("Local auto-import complete")
|
||||
} else {
|
||||
// Record success to clear backoff state
|
||||
RecordSyncSuccess(beadsDir)
|
||||
log.log("Auto-import complete")
|
||||
}
|
||||
}
|
||||
|
||||
@@ -0,0 +1,165 @@
|
||||
package main
|
||||
|
||||
import (
|
||||
"encoding/json"
|
||||
"os"
|
||||
"path/filepath"
|
||||
"sync"
|
||||
"time"
|
||||
)
|
||||
|
||||
// SyncState tracks daemon sync health for backoff and user hints.
|
||||
// Stored in .beads/sync-state.json (gitignored, local-only).
|
||||
type SyncState struct {
|
||||
LastFailure time.Time `json:"last_failure,omitempty"`
|
||||
FailureCount int `json:"failure_count"`
|
||||
BackoffUntil time.Time `json:"backoff_until,omitempty"`
|
||||
NeedsManualSync bool `json:"needs_manual_sync"`
|
||||
FailureReason string `json:"failure_reason,omitempty"`
|
||||
}
|
||||
|
||||
const (
|
||||
syncStateFile = "sync-state.json"
|
||||
// Backoff schedule: 30s, 1m, 2m, 5m, 10m, 30m (cap)
|
||||
maxBackoffDuration = 30 * time.Minute
|
||||
// Clear stale state after 24 hours
|
||||
staleStateThreshold = 24 * time.Hour
|
||||
)
|
||||
|
||||
var (
|
||||
// backoffSchedule defines the exponential backoff durations
|
||||
backoffSchedule = []time.Duration{
|
||||
30 * time.Second,
|
||||
1 * time.Minute,
|
||||
2 * time.Minute,
|
||||
5 * time.Minute,
|
||||
10 * time.Minute,
|
||||
30 * time.Minute,
|
||||
}
|
||||
// syncStateMu protects concurrent access to sync state file
|
||||
syncStateMu sync.Mutex
|
||||
)
|
||||
|
||||
// LoadSyncState loads the sync state from .beads/sync-state.json.
|
||||
// Returns empty state if file doesn't exist or is stale.
|
||||
func LoadSyncState(beadsDir string) SyncState {
|
||||
syncStateMu.Lock()
|
||||
defer syncStateMu.Unlock()
|
||||
|
||||
statePath := filepath.Join(beadsDir, syncStateFile)
|
||||
data, err := os.ReadFile(statePath) // #nosec G304 - path constructed from beadsDir
|
||||
if err != nil {
|
||||
return SyncState{}
|
||||
}
|
||||
|
||||
var state SyncState
|
||||
if err := json.Unmarshal(data, &state); err != nil {
|
||||
return SyncState{}
|
||||
}
|
||||
|
||||
// Clear stale state (older than 24h with no recent failures)
|
||||
if !state.LastFailure.IsZero() && time.Since(state.LastFailure) > staleStateThreshold {
|
||||
_ = os.Remove(statePath)
|
||||
return SyncState{}
|
||||
}
|
||||
|
||||
return state
|
||||
}
|
||||
|
||||
// SaveSyncState saves the sync state to .beads/sync-state.json.
|
||||
func SaveSyncState(beadsDir string, state SyncState) error {
|
||||
syncStateMu.Lock()
|
||||
defer syncStateMu.Unlock()
|
||||
|
||||
statePath := filepath.Join(beadsDir, syncStateFile)
|
||||
|
||||
// If state is empty/reset, remove the file
|
||||
if state.FailureCount == 0 && !state.NeedsManualSync {
|
||||
_ = os.Remove(statePath)
|
||||
return nil
|
||||
}
|
||||
|
||||
data, err := json.MarshalIndent(state, "", " ")
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
return os.WriteFile(statePath, data, 0600)
|
||||
}
|
||||
|
||||
// ClearSyncState removes the sync state file.
|
||||
func ClearSyncState(beadsDir string) error {
|
||||
syncStateMu.Lock()
|
||||
defer syncStateMu.Unlock()
|
||||
|
||||
statePath := filepath.Join(beadsDir, syncStateFile)
|
||||
err := os.Remove(statePath)
|
||||
if os.IsNotExist(err) {
|
||||
return nil
|
||||
}
|
||||
return err
|
||||
}
|
||||
|
||||
// RecordSyncFailure updates the sync state after a failure.
|
||||
// Returns the duration until next retry.
|
||||
func RecordSyncFailure(beadsDir string, reason string) time.Duration {
|
||||
state := LoadSyncState(beadsDir)
|
||||
|
||||
state.LastFailure = time.Now()
|
||||
state.FailureCount++
|
||||
state.FailureReason = reason
|
||||
|
||||
// Calculate backoff duration
|
||||
backoffIndex := state.FailureCount - 1
|
||||
if backoffIndex >= len(backoffSchedule) {
|
||||
backoffIndex = len(backoffSchedule) - 1
|
||||
}
|
||||
backoff := backoffSchedule[backoffIndex]
|
||||
|
||||
state.BackoffUntil = time.Now().Add(backoff)
|
||||
|
||||
// Mark as needing manual sync after 3 failures (likely a conflict)
|
||||
if state.FailureCount >= 3 {
|
||||
state.NeedsManualSync = true
|
||||
}
|
||||
|
||||
_ = SaveSyncState(beadsDir, state)
|
||||
return backoff
|
||||
}
|
||||
|
||||
// RecordSyncSuccess clears the sync state after a successful sync.
|
||||
func RecordSyncSuccess(beadsDir string) {
|
||||
_ = ClearSyncState(beadsDir)
|
||||
}
|
||||
|
||||
// ShouldSkipSync returns true if we're still in the backoff period.
|
||||
func ShouldSkipSync(beadsDir string) bool {
|
||||
state := LoadSyncState(beadsDir)
|
||||
if state.BackoffUntil.IsZero() {
|
||||
return false
|
||||
}
|
||||
return time.Now().Before(state.BackoffUntil)
|
||||
}
|
||||
|
||||
// ResetBackoffOnDaemonStart resets backoff counters when daemon starts,
|
||||
// but preserves NeedsManualSync flag so hints still show.
|
||||
// This allows a fresh start while keeping user informed of conflicts.
|
||||
func ResetBackoffOnDaemonStart(beadsDir string) {
|
||||
state := LoadSyncState(beadsDir)
|
||||
|
||||
// Nothing to reset
|
||||
if state.FailureCount == 0 && !state.NeedsManualSync {
|
||||
return
|
||||
}
|
||||
|
||||
// Reset backoff but preserve NeedsManualSync
|
||||
needsManual := state.NeedsManualSync
|
||||
reason := state.FailureReason
|
||||
|
||||
state = SyncState{
|
||||
NeedsManualSync: needsManual,
|
||||
FailureReason: reason,
|
||||
}
|
||||
|
||||
_ = SaveSyncState(beadsDir, state)
|
||||
}
|
||||
+12
-1
@@ -410,7 +410,8 @@ func applyFixList(path string, fixes []doctorCheck) {
|
||||
case "Database":
|
||||
err = fix.DatabaseVersion(path)
|
||||
case "Database Integrity":
|
||||
err = fix.DatabaseIntegrity(path)
|
||||
// Corruption detected - try recovery from JSONL
|
||||
err = fix.DatabaseCorruptionRecovery(path)
|
||||
case "Schema Compatibility":
|
||||
err = fix.SchemaCompatibility(path)
|
||||
case "Repo Fingerprint":
|
||||
@@ -472,6 +473,10 @@ func applyFixList(path string, fixes []doctorCheck) {
|
||||
// No auto-fix: compaction requires agent review
|
||||
fmt.Printf(" ⚠ Run 'bd compact --analyze' to review candidates\n")
|
||||
continue
|
||||
case "Large Database":
|
||||
// No auto-fix: pruning deletes data, must be user-controlled
|
||||
fmt.Printf(" ⚠ Run 'bd cleanup --older-than 90' to prune old closed issues\n")
|
||||
continue
|
||||
default:
|
||||
fmt.Printf(" ⚠ No automatic fix available for %s\n", check.Name)
|
||||
fmt.Printf(" Manual fix: %s\n", check.Fix)
|
||||
@@ -894,6 +899,12 @@ func runDiagnostics(path string) doctorResult {
|
||||
result.Checks = append(result.Checks, compactionCheck)
|
||||
// Info only, not a warning - compaction requires human review
|
||||
|
||||
// Check 29: Database size (pruning suggestion)
|
||||
// Note: This check has no auto-fix - pruning is destructive and user-controlled
|
||||
sizeCheck := convertDoctorCheck(doctor.CheckDatabaseSize(path))
|
||||
result.Checks = append(result.Checks, sizeCheck)
|
||||
// Don't fail overall check for size warning, just inform
|
||||
|
||||
return result
|
||||
}
|
||||
|
||||
|
||||
+138
-1
@@ -246,6 +246,22 @@ func CheckDatabaseIntegrity(path string) DoctorCheck {
|
||||
// Open database in read-only mode for integrity check
|
||||
db, err := sql.Open("sqlite3", sqliteConnString(dbPath, true))
|
||||
if err != nil {
|
||||
// Check if JSONL recovery is possible
|
||||
jsonlCount, _, jsonlErr := CountJSONLIssues(filepath.Join(beadsDir, "issues.jsonl"))
|
||||
if jsonlErr != nil {
|
||||
jsonlCount, _, jsonlErr = CountJSONLIssues(filepath.Join(beadsDir, "beads.jsonl"))
|
||||
}
|
||||
|
||||
if jsonlErr == nil && jsonlCount > 0 {
|
||||
return DoctorCheck{
|
||||
Name: "Database Integrity",
|
||||
Status: StatusError,
|
||||
Message: fmt.Sprintf("Failed to open database (JSONL has %d issues for recovery)", jsonlCount),
|
||||
Detail: err.Error(),
|
||||
Fix: "Run 'bd doctor --fix' to recover from JSONL backup",
|
||||
}
|
||||
}
|
||||
|
||||
return DoctorCheck{
|
||||
Name: "Database Integrity",
|
||||
Status: StatusError,
|
||||
@@ -260,6 +276,22 @@ func CheckDatabaseIntegrity(path string) DoctorCheck {
|
||||
// This checks the entire database for corruption
|
||||
rows, err := db.Query("PRAGMA integrity_check")
|
||||
if err != nil {
|
||||
// Check if JSONL recovery is possible
|
||||
jsonlCount, _, jsonlErr := CountJSONLIssues(filepath.Join(beadsDir, "issues.jsonl"))
|
||||
if jsonlErr != nil {
|
||||
jsonlCount, _, jsonlErr = CountJSONLIssues(filepath.Join(beadsDir, "beads.jsonl"))
|
||||
}
|
||||
|
||||
if jsonlErr == nil && jsonlCount > 0 {
|
||||
return DoctorCheck{
|
||||
Name: "Database Integrity",
|
||||
Status: StatusError,
|
||||
Message: fmt.Sprintf("Failed to run integrity check (JSONL has %d issues for recovery)", jsonlCount),
|
||||
Detail: err.Error(),
|
||||
Fix: "Run 'bd doctor --fix' to recover from JSONL backup",
|
||||
}
|
||||
}
|
||||
|
||||
return DoctorCheck{
|
||||
Name: "Database Integrity",
|
||||
Status: StatusError,
|
||||
@@ -288,7 +320,23 @@ func CheckDatabaseIntegrity(path string) DoctorCheck {
|
||||
}
|
||||
}
|
||||
|
||||
// Any other result indicates corruption
|
||||
// Any other result indicates corruption - check if JSONL recovery is possible
|
||||
jsonlCount, _, jsonlErr := CountJSONLIssues(filepath.Join(beadsDir, "issues.jsonl"))
|
||||
if jsonlErr != nil {
|
||||
// Try alternate name
|
||||
jsonlCount, _, jsonlErr = CountJSONLIssues(filepath.Join(beadsDir, "beads.jsonl"))
|
||||
}
|
||||
|
||||
if jsonlErr == nil && jsonlCount > 0 {
|
||||
return DoctorCheck{
|
||||
Name: "Database Integrity",
|
||||
Status: StatusError,
|
||||
Message: fmt.Sprintf("Database corruption detected (JSONL has %d issues for recovery)", jsonlCount),
|
||||
Detail: strings.Join(results, "; "),
|
||||
Fix: "Run 'bd doctor --fix' to recover from JSONL backup",
|
||||
}
|
||||
}
|
||||
|
||||
return DoctorCheck{
|
||||
Name: "Database Integrity",
|
||||
Status: StatusError,
|
||||
@@ -642,3 +690,92 @@ func isNoDbModeConfigured(beadsDir string) bool {
|
||||
|
||||
return cfg.NoDb
|
||||
}
|
||||
|
||||
// CheckDatabaseSize warns when the database has accumulated many closed issues.
|
||||
// This is purely informational - pruning is NEVER auto-fixed because it
|
||||
// permanently deletes data. Users must explicitly run 'bd cleanup' to prune.
|
||||
//
|
||||
// Config: doctor.suggest_pruning_issue_count (default: 5000, 0 = disabled)
|
||||
//
|
||||
// DESIGN NOTE: This check intentionally has NO auto-fix. Unlike other doctor
|
||||
// checks that fix configuration or sync issues, pruning is destructive and
|
||||
// irreversible. The user must make an explicit decision to delete their
|
||||
// closed issue history. We only provide guidance, never action.
|
||||
func CheckDatabaseSize(path string) DoctorCheck {
|
||||
beadsDir := filepath.Join(path, ".beads")
|
||||
|
||||
// Get database path
|
||||
var dbPath string
|
||||
if cfg, err := configfile.Load(beadsDir); err == nil && cfg != nil && cfg.Database != "" {
|
||||
dbPath = cfg.DatabasePath(beadsDir)
|
||||
} else {
|
||||
dbPath = filepath.Join(beadsDir, beads.CanonicalDatabaseName)
|
||||
}
|
||||
|
||||
// If no database, skip this check
|
||||
if _, err := os.Stat(dbPath); os.IsNotExist(err) {
|
||||
return DoctorCheck{
|
||||
Name: "Large Database",
|
||||
Status: StatusOK,
|
||||
Message: "N/A (no database)",
|
||||
}
|
||||
}
|
||||
|
||||
// Read threshold from config (default 5000, 0 = disabled)
|
||||
threshold := 5000
|
||||
db, err := sql.Open("sqlite3", "file:"+dbPath+"?mode=ro&_pragma=busy_timeout(30000)")
|
||||
if err != nil {
|
||||
return DoctorCheck{
|
||||
Name: "Large Database",
|
||||
Status: StatusOK,
|
||||
Message: "N/A (unable to open database)",
|
||||
}
|
||||
}
|
||||
defer db.Close()
|
||||
|
||||
// Check for custom threshold in config table
|
||||
var thresholdStr string
|
||||
err = db.QueryRow("SELECT value FROM config WHERE key = ?", "doctor.suggest_pruning_issue_count").Scan(&thresholdStr)
|
||||
if err == nil {
|
||||
if _, err := fmt.Sscanf(thresholdStr, "%d", &threshold); err != nil {
|
||||
threshold = 5000 // Reset to default on parse error
|
||||
}
|
||||
}
|
||||
|
||||
// If disabled, return OK
|
||||
if threshold == 0 {
|
||||
return DoctorCheck{
|
||||
Name: "Large Database",
|
||||
Status: StatusOK,
|
||||
Message: "Check disabled (threshold = 0)",
|
||||
}
|
||||
}
|
||||
|
||||
// Count closed issues
|
||||
var closedCount int
|
||||
err = db.QueryRow("SELECT COUNT(*) FROM issues WHERE status = 'closed'").Scan(&closedCount)
|
||||
if err != nil {
|
||||
return DoctorCheck{
|
||||
Name: "Large Database",
|
||||
Status: StatusOK,
|
||||
Message: "N/A (unable to count issues)",
|
||||
}
|
||||
}
|
||||
|
||||
// Check against threshold
|
||||
if closedCount > threshold {
|
||||
return DoctorCheck{
|
||||
Name: "Large Database",
|
||||
Status: StatusWarning,
|
||||
Message: fmt.Sprintf("%d closed issues (threshold: %d)", closedCount, threshold),
|
||||
Detail: "Large number of closed issues may impact performance",
|
||||
Fix: "Consider running 'bd cleanup --older-than 90' to prune old closed issues",
|
||||
}
|
||||
}
|
||||
|
||||
return DoctorCheck{
|
||||
Name: "Large Database",
|
||||
Status: StatusOK,
|
||||
Message: fmt.Sprintf("%d closed issues (threshold: %d)", closedCount, threshold),
|
||||
}
|
||||
}
|
||||
|
||||
@@ -0,0 +1,81 @@
|
||||
package fix
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"os"
|
||||
"os/exec"
|
||||
"path/filepath"
|
||||
)
|
||||
|
||||
// DatabaseCorruptionRecovery recovers a corrupted database from JSONL backup.
|
||||
// It backs up the corrupted database, deletes it, and re-imports from JSONL.
|
||||
func DatabaseCorruptionRecovery(path string) error {
|
||||
// Validate workspace
|
||||
if err := validateBeadsWorkspace(path); err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
beadsDir := filepath.Join(path, ".beads")
|
||||
dbPath := filepath.Join(beadsDir, "beads.db")
|
||||
|
||||
// Check if database exists
|
||||
if _, err := os.Stat(dbPath); os.IsNotExist(err) {
|
||||
return fmt.Errorf("no database to recover")
|
||||
}
|
||||
|
||||
// Find JSONL file
|
||||
jsonlPath := findJSONLPath(beadsDir)
|
||||
if jsonlPath == "" {
|
||||
return fmt.Errorf("no JSONL backup found - cannot recover (try restoring from git history)")
|
||||
}
|
||||
|
||||
// Count issues in JSONL
|
||||
issueCount, err := countJSONLIssues(jsonlPath)
|
||||
if err != nil {
|
||||
return fmt.Errorf("failed to read JSONL: %w", err)
|
||||
}
|
||||
if issueCount == 0 {
|
||||
return fmt.Errorf("JSONL is empty - cannot recover (try restoring from git history)")
|
||||
}
|
||||
|
||||
// Backup corrupted database
|
||||
backupPath := dbPath + ".corrupt"
|
||||
fmt.Printf(" Backing up corrupted database to %s\n", filepath.Base(backupPath))
|
||||
if err := os.Rename(dbPath, backupPath); err != nil {
|
||||
return fmt.Errorf("failed to backup corrupted database: %w", err)
|
||||
}
|
||||
|
||||
// Get bd binary path
|
||||
bdBinary, err := getBdBinary()
|
||||
if err != nil {
|
||||
// Restore corrupted database on failure
|
||||
_ = os.Rename(backupPath, dbPath)
|
||||
return err
|
||||
}
|
||||
|
||||
// Run bd import with --rename-on-import to handle prefix mismatches
|
||||
fmt.Printf(" Recovering %d issues from %s\n", issueCount, filepath.Base(jsonlPath))
|
||||
cmd := exec.Command(bdBinary, "import", "-i", jsonlPath, "--rename-on-import") // #nosec G204
|
||||
cmd.Dir = path
|
||||
cmd.Stdout = os.Stdout
|
||||
cmd.Stderr = os.Stderr
|
||||
|
||||
if err := cmd.Run(); err != nil {
|
||||
// Keep backup on failure
|
||||
fmt.Printf(" Warning: recovery failed, corrupted database preserved at %s\n", filepath.Base(backupPath))
|
||||
return fmt.Errorf("failed to import from JSONL: %w", err)
|
||||
}
|
||||
|
||||
// Run migrate to set version metadata
|
||||
migrateCmd := exec.Command(bdBinary, "migrate") // #nosec G204
|
||||
migrateCmd.Dir = path
|
||||
migrateCmd.Stdout = os.Stdout
|
||||
migrateCmd.Stderr = os.Stderr
|
||||
if err := migrateCmd.Run(); err != nil {
|
||||
// Non-fatal - import succeeded, version just won't be set
|
||||
fmt.Printf(" Warning: migration failed (non-fatal): %v\n", err)
|
||||
}
|
||||
|
||||
fmt.Printf(" Recovered %d issues from JSONL backup\n", issueCount)
|
||||
return nil
|
||||
}
|
||||
@@ -312,6 +312,8 @@ func CheckSyncBranchHookCompatibility(path string) DoctorCheck {
|
||||
Status: StatusWarning,
|
||||
Message: "Pre-push hook is not a bd hook",
|
||||
Detail: "Cannot verify sync-branch compatibility with custom hooks",
|
||||
Fix: "Either run 'bd hooks install --force' to use bd hooks,\n" +
|
||||
" or ensure your custom hook skips validation when pushing to sync-branch",
|
||||
}
|
||||
}
|
||||
|
||||
|
||||
@@ -110,7 +110,7 @@ func TestCheckGitUpstream(t *testing.T) {
|
||||
t.Run("up to date", func(t *testing.T) {
|
||||
dir := mkTmpDirInTmp(t, "bd-git-up2-*")
|
||||
remote := mkTmpDirInTmp(t, "bd-git-remote-*")
|
||||
runGit(t, remote, "init", "--bare")
|
||||
runGit(t, remote, "init", "--bare", "--initial-branch=main")
|
||||
|
||||
initRepo(t, dir, "main")
|
||||
commitFile(t, dir, "README.md", "# test\n", "initial")
|
||||
@@ -126,7 +126,7 @@ func TestCheckGitUpstream(t *testing.T) {
|
||||
t.Run("ahead of upstream", func(t *testing.T) {
|
||||
dir := mkTmpDirInTmp(t, "bd-git-ahead-*")
|
||||
remote := mkTmpDirInTmp(t, "bd-git-remote2-*")
|
||||
runGit(t, remote, "init", "--bare")
|
||||
runGit(t, remote, "init", "--bare", "--initial-branch=main")
|
||||
|
||||
initRepo(t, dir, "main")
|
||||
commitFile(t, dir, "README.md", "# test\n", "initial")
|
||||
@@ -147,7 +147,7 @@ func TestCheckGitUpstream(t *testing.T) {
|
||||
t.Run("behind upstream", func(t *testing.T) {
|
||||
dir := mkTmpDirInTmp(t, "bd-git-behind-*")
|
||||
remote := mkTmpDirInTmp(t, "bd-git-remote3-*")
|
||||
runGit(t, remote, "init", "--bare")
|
||||
runGit(t, remote, "init", "--bare", "--initial-branch=main")
|
||||
|
||||
initRepo(t, dir, "main")
|
||||
commitFile(t, dir, "README.md", "# test\n", "initial")
|
||||
|
||||
@@ -23,8 +23,8 @@ func setupGitRepo(t *testing.T) string {
|
||||
t.Fatalf("failed to create .beads directory: %v", err)
|
||||
}
|
||||
|
||||
// Initialize git repo
|
||||
cmd := exec.Command("git", "init")
|
||||
// Initialize git repo with 'main' as default branch (modern git convention)
|
||||
cmd := exec.Command("git", "init", "--initial-branch=main")
|
||||
cmd.Dir = dir
|
||||
if err := cmd.Run(); err != nil {
|
||||
t.Fatalf("failed to init git repo: %v", err)
|
||||
@@ -278,8 +278,8 @@ func setupGitRepoInDir(t *testing.T, dir string) {
|
||||
t.Fatalf("failed to create .beads directory: %v", err)
|
||||
}
|
||||
|
||||
// Initialize git repo
|
||||
cmd := exec.Command("git", "init")
|
||||
// Initialize git repo with 'main' as default branch (modern git convention)
|
||||
cmd := exec.Command("git", "init", "--initial-branch=main")
|
||||
cmd.Dir = dir
|
||||
if err := cmd.Run(); err != nil {
|
||||
t.Fatalf("failed to init git repo: %v", err)
|
||||
|
||||
@@ -19,6 +19,7 @@ daemon.lock
|
||||
daemon.log
|
||||
daemon.pid
|
||||
bd.sock
|
||||
sync-state.json
|
||||
|
||||
# Local version tracking (prevents upgrade notification spam after git ops)
|
||||
.local_version
|
||||
|
||||
@@ -190,7 +190,7 @@ func CheckLegacyJSONLFilename(repoPath string) DoctorCheck {
|
||||
Detail: "Having multiple JSONL files can cause sync and merge conflicts.\n" +
|
||||
" Only one JSONL file should be used per repository.",
|
||||
Fix: "Determine which file is current and remove the others:\n" +
|
||||
" 1. Check 'bd stats' to see which file is being used\n" +
|
||||
" 1. Check .beads/metadata.json for 'jsonl_export' setting\n" +
|
||||
" 2. Verify with 'git log .beads/*.jsonl' to see commit history\n" +
|
||||
" 3. Remove the unused file(s): git rm .beads/<unused>.jsonl\n" +
|
||||
" 4. Commit the change",
|
||||
|
||||
@@ -312,7 +312,7 @@ func CheckCompactionCandidates(path string) DoctorCheck {
|
||||
// the actual beads directory location.
|
||||
func resolveBeadsDir(beadsDir string) string {
|
||||
redirectFile := filepath.Join(beadsDir, "redirect")
|
||||
data, err := os.ReadFile(redirectFile)
|
||||
data, err := os.ReadFile(redirectFile) //nolint:gosec // redirect file path is constructed from known beadsDir
|
||||
if err != nil {
|
||||
// No redirect file - use original path
|
||||
return beadsDir
|
||||
|
||||
+2
-2
@@ -564,7 +564,7 @@ func runFormulaConvert(cmd *cobra.Command, args []string) {
|
||||
tomlPath := strings.TrimSuffix(jsonPath, formula.FormulaExtJSON) + formula.FormulaExtTOML
|
||||
|
||||
// Write the TOML file
|
||||
if err := os.WriteFile(tomlPath, tomlData, 0644); err != nil {
|
||||
if err := os.WriteFile(tomlPath, tomlData, 0600); err != nil {
|
||||
fmt.Fprintf(os.Stderr, "Error writing %s: %v\n", tomlPath, err)
|
||||
os.Exit(1)
|
||||
}
|
||||
@@ -623,7 +623,7 @@ func convertAllFormulas() {
|
||||
continue
|
||||
}
|
||||
|
||||
if err := os.WriteFile(tomlPath, tomlData, 0644); err != nil {
|
||||
if err := os.WriteFile(tomlPath, tomlData, 0600); err != nil {
|
||||
fmt.Fprintf(os.Stderr, "✗ Error writing %s: %v\n", tomlPath, err)
|
||||
errors++
|
||||
continue
|
||||
|
||||
+3
-3
@@ -870,7 +870,7 @@ func evalTimerGate(gate *types.Issue, now time.Time) (bool, string) {
|
||||
// ghRunStatus represents the JSON output of `gh run view --json`
|
||||
type ghRunStatus struct {
|
||||
Status string `json:"status"` // queued, in_progress, completed
|
||||
Conclusion string `json:"conclusion"` // success, failure, cancelled, skipped, etc.
|
||||
Conclusion string `json:"conclusion"` // success, failure, canceled, skipped, etc.
|
||||
}
|
||||
|
||||
// evalGHRunGate checks if a GitHub Actions run has completed.
|
||||
@@ -882,7 +882,7 @@ func evalGHRunGate(gate *types.Issue) (bool, string) {
|
||||
}
|
||||
|
||||
// Run gh CLI to get run status
|
||||
cmd := exec.Command("gh", "run", "view", runID, "--json", "status,conclusion")
|
||||
cmd := exec.Command("gh", "run", "view", runID, "--json", "status,conclusion") //nolint:gosec // runID is from trusted issue.AwaitID field
|
||||
output, err := cmd.Output()
|
||||
if err != nil {
|
||||
// gh CLI failed - could be network issue, invalid run ID, or gh not installed
|
||||
@@ -924,7 +924,7 @@ func evalGHPRGate(gate *types.Issue) (bool, string) {
|
||||
}
|
||||
|
||||
// Run gh CLI to get PR status
|
||||
cmd := exec.Command("gh", "pr", "view", prNumber, "--json", "state,mergedAt")
|
||||
cmd := exec.Command("gh", "pr", "view", prNumber, "--json", "state,mergedAt") //nolint:gosec // prNumber is from trusted issue.AwaitID field
|
||||
output, err := cmd.Output()
|
||||
if err != nil {
|
||||
// gh CLI failed - could be network issue, invalid PR, or gh not installed
|
||||
|
||||
@@ -875,6 +875,9 @@ var rootCmd = &cobra.Command{
|
||||
debug.Logf("loaded %d molecules from %v", result.Loaded, result.Sources)
|
||||
}
|
||||
}
|
||||
|
||||
// Tips (including sync conflict proactive checks) are shown via maybeShowTip()
|
||||
// after successful command execution, not in PreRun
|
||||
},
|
||||
PersistentPostRun: func(cmd *cobra.Command, args []string) {
|
||||
// Handle --no-db mode: write memory storage back to JSONL
|
||||
|
||||
@@ -254,9 +254,9 @@ func findWritableFormulaDir(formulaName string) string {
|
||||
if err := os.MkdirAll(dir, 0755); err == nil {
|
||||
// Check if we can write to it
|
||||
testPath := filepath.Join(dir, ".write-test")
|
||||
if f, err := os.Create(testPath); err == nil {
|
||||
f.Close()
|
||||
os.Remove(testPath)
|
||||
if f, err := os.Create(testPath); err == nil { //nolint:gosec // testPath is constructed from known search paths
|
||||
_ = f.Close()
|
||||
_ = os.Remove(testPath)
|
||||
return filepath.Join(dir, formulaName+formula.FormulaExt)
|
||||
}
|
||||
}
|
||||
|
||||
@@ -91,8 +91,12 @@ func testFreshCloneAutoImport(t *testing.T) {
|
||||
|
||||
// Test checkGitForIssues detects issues.jsonl
|
||||
t.Chdir(dir)
|
||||
|
||||
git.ResetCaches() // Reset git caches after changing directory
|
||||
|
||||
git.ResetCaches()
|
||||
|
||||
|
||||
count, path, gitRef := checkGitForIssues()
|
||||
if count != 1 {
|
||||
t.Errorf("Expected 1 issue in git, got %d", count)
|
||||
@@ -171,8 +175,12 @@ func testDatabaseRemovalScenario(t *testing.T) {
|
||||
|
||||
// Change to test directory
|
||||
t.Chdir(dir)
|
||||
|
||||
git.ResetCaches() // Reset git caches after changing directory
|
||||
|
||||
git.ResetCaches()
|
||||
|
||||
|
||||
// Test checkGitForIssues finds issues.jsonl (canonical name)
|
||||
count, path, gitRef := checkGitForIssues()
|
||||
if count != 2 {
|
||||
@@ -250,8 +258,12 @@ func testLegacyFilenameSupport(t *testing.T) {
|
||||
|
||||
// Change to test directory
|
||||
t.Chdir(dir)
|
||||
|
||||
git.ResetCaches() // Reset git caches after changing directory
|
||||
|
||||
git.ResetCaches()
|
||||
|
||||
|
||||
// Test checkGitForIssues finds issues.jsonl
|
||||
count, path, gitRef := checkGitForIssues()
|
||||
if count != 1 {
|
||||
@@ -327,8 +339,12 @@ func testPrecedenceTest(t *testing.T) {
|
||||
|
||||
// Change to test directory
|
||||
t.Chdir(dir)
|
||||
|
||||
git.ResetCaches() // Reset git caches after changing directory
|
||||
|
||||
git.ResetCaches()
|
||||
|
||||
|
||||
// Test checkGitForIssues prefers issues.jsonl
|
||||
count, path, _ := checkGitForIssues()
|
||||
if count != 2 {
|
||||
@@ -374,8 +390,12 @@ func testInitSafetyCheck(t *testing.T) {
|
||||
|
||||
// Change to test directory
|
||||
t.Chdir(dir)
|
||||
|
||||
git.ResetCaches() // Reset git caches after changing directory
|
||||
|
||||
git.ResetCaches()
|
||||
|
||||
|
||||
// Create empty database (simulating failed import)
|
||||
dbPath := filepath.Join(beadsDir, "test.db")
|
||||
store, err := sqlite.New(context.Background(), dbPath)
|
||||
@@ -415,8 +435,14 @@ func testInitSafetyCheck(t *testing.T) {
|
||||
// Helper functions
|
||||
|
||||
// runCmd runs a command and fails the test if it returns an error
|
||||
// If the command is "git init", it automatically adds --initial-branch=main
|
||||
// for modern git compatibility.
|
||||
func runCmd(t *testing.T, dir string, name string, args ...string) {
|
||||
t.Helper()
|
||||
// Add --initial-branch=main to git init for modern git compatibility
|
||||
if name == "git" && len(args) > 0 && args[0] == "init" {
|
||||
args = append(args, "--initial-branch=main")
|
||||
}
|
||||
cmd := exec.Command(name, args...)
|
||||
cmd.Dir = dir
|
||||
if output, err := cmd.CombinedOutput(); err != nil {
|
||||
|
||||
+5
-5
@@ -50,14 +50,14 @@ func resolveAndGetIssueWithRouting(ctx context.Context, localStore storage.Stora
|
||||
// Step 2: Resolve and get from routed store
|
||||
result, err := resolveAndGetFromStore(ctx, routedStorage.Storage, id, true)
|
||||
if err != nil {
|
||||
routedStorage.Close()
|
||||
_ = routedStorage.Close()
|
||||
return nil, err
|
||||
}
|
||||
if result != nil {
|
||||
result.closeFn = func() { routedStorage.Close() }
|
||||
result.closeFn = func() { _ = routedStorage.Close() }
|
||||
return result, nil
|
||||
}
|
||||
routedStorage.Close()
|
||||
_ = routedStorage.Close()
|
||||
}
|
||||
|
||||
// Step 3: Fall back to local store
|
||||
@@ -133,7 +133,7 @@ func getIssueWithRouting(ctx context.Context, localStore storage.Storage, id str
|
||||
// Step 3: Try the routed storage
|
||||
routedIssue, routedErr := routedStorage.Storage.GetIssue(ctx, id)
|
||||
if routedErr != nil || routedIssue == nil {
|
||||
routedStorage.Close()
|
||||
_ = routedStorage.Close()
|
||||
// Return the original error if routing also failed
|
||||
if err != nil {
|
||||
return nil, err
|
||||
@@ -148,7 +148,7 @@ func getIssueWithRouting(ctx context.Context, localStore storage.Storage, id str
|
||||
Routed: true,
|
||||
ResolvedID: id,
|
||||
closeFn: func() {
|
||||
routedStorage.Close()
|
||||
_ = routedStorage.Close()
|
||||
},
|
||||
}, nil
|
||||
}
|
||||
|
||||
@@ -361,6 +361,9 @@ Use --merge to merge the sync branch back to main branch.`,
|
||||
}
|
||||
}
|
||||
|
||||
// Clear sync state on successful sync (daemon backoff/hints)
|
||||
_ = ClearSyncState(beadsDir)
|
||||
|
||||
fmt.Println("\n✓ Sync complete")
|
||||
return
|
||||
}
|
||||
@@ -711,6 +714,11 @@ Use --merge to merge the sync branch back to main branch.`,
|
||||
skipFinalFlush = true
|
||||
}
|
||||
|
||||
// Clear sync state on successful sync (daemon backoff/hints)
|
||||
if bd := beads.FindBeadsDir(); bd != "" {
|
||||
_ = ClearSyncState(bd)
|
||||
}
|
||||
|
||||
fmt.Println("\n✓ Sync complete")
|
||||
}
|
||||
},
|
||||
|
||||
@@ -39,8 +39,11 @@ func setupGitRepo(t *testing.T) (repoPath string, cleanup func()) {
|
||||
t.Fatalf("failed to change to temp directory: %v", err)
|
||||
}
|
||||
|
||||
// Initialize git repo
|
||||
if err := exec.Command("git", "init").Run(); err != nil {
|
||||
// Reset git caches after changing directory
|
||||
git.ResetCaches()
|
||||
|
||||
// Initialize git repo with 'main' as default branch (modern git convention)
|
||||
if err := exec.Command("git", "init", "--initial-branch=main").Run(); err != nil {
|
||||
_ = os.Chdir(originalWd)
|
||||
t.Fatalf("failed to init git repo: %v", err)
|
||||
}
|
||||
@@ -63,6 +66,7 @@ func setupGitRepo(t *testing.T) (repoPath string, cleanup func()) {
|
||||
|
||||
cleanup = func() {
|
||||
_ = os.Chdir(originalWd)
|
||||
git.ResetCaches()
|
||||
}
|
||||
|
||||
return tmpDir, cleanup
|
||||
@@ -83,6 +87,9 @@ func setupGitRepoWithBranch(t *testing.T, branch string) (repoPath string, clean
|
||||
t.Fatalf("failed to change to temp directory: %v", err)
|
||||
}
|
||||
|
||||
// Reset git caches after changing directory
|
||||
git.ResetCaches()
|
||||
|
||||
// Initialize git repo with specific branch
|
||||
if err := exec.Command("git", "init", "-b", branch).Run(); err != nil {
|
||||
_ = os.Chdir(originalWd)
|
||||
@@ -107,6 +114,7 @@ func setupGitRepoWithBranch(t *testing.T, branch string) (repoPath string, clean
|
||||
|
||||
cleanup = func() {
|
||||
_ = os.Chdir(originalWd)
|
||||
git.ResetCaches()
|
||||
}
|
||||
|
||||
return tmpDir, cleanup
|
||||
@@ -127,8 +135,11 @@ func setupMinimalGitRepo(t *testing.T) (repoPath string, cleanup func()) {
|
||||
t.Fatalf("failed to change to temp directory: %v", err)
|
||||
}
|
||||
|
||||
// Initialize git repo
|
||||
if err := exec.Command("git", "init").Run(); err != nil {
|
||||
// Reset git caches after changing directory
|
||||
git.ResetCaches()
|
||||
|
||||
// Initialize git repo with 'main' as default branch (modern git convention)
|
||||
if err := exec.Command("git", "init", "--initial-branch=main").Run(); err != nil {
|
||||
_ = os.Chdir(originalWd)
|
||||
t.Fatalf("failed to init git repo: %v", err)
|
||||
}
|
||||
@@ -139,6 +150,7 @@ func setupMinimalGitRepo(t *testing.T) (repoPath string, cleanup func()) {
|
||||
|
||||
cleanup = func() {
|
||||
_ = os.Chdir(originalWd)
|
||||
git.ResetCaches()
|
||||
}
|
||||
|
||||
return tmpDir, cleanup
|
||||
|
||||
@@ -14,6 +14,7 @@ import (
|
||||
"sync"
|
||||
"time"
|
||||
|
||||
"github.com/steveyegge/beads/internal/beads"
|
||||
"github.com/steveyegge/beads/internal/storage"
|
||||
)
|
||||
|
||||
@@ -353,6 +354,30 @@ func initDefaultTips() {
|
||||
return isClaudeDetected() && !isClaudeSetupComplete()
|
||||
},
|
||||
)
|
||||
|
||||
// Sync conflict tip - ALWAYS show when sync has failed and needs manual intervention
|
||||
// This is a proactive health check that trumps educational tips (ox-cli pattern)
|
||||
InjectTip(
|
||||
"sync_conflict",
|
||||
"Run 'bd sync' to resolve sync conflict",
|
||||
200, // Higher than Claude setup - sync issues are urgent
|
||||
0, // No frequency limit - always show when applicable
|
||||
1.0, // 100% probability - always show when condition is true
|
||||
syncConflictCondition,
|
||||
)
|
||||
}
|
||||
|
||||
// syncConflictCondition checks if there's a sync conflict that needs manual resolution.
|
||||
// This is the condition function for the sync_conflict tip.
|
||||
func syncConflictCondition() bool {
|
||||
// Find beads directory to check sync state
|
||||
beadsDir := beads.FindBeadsDir()
|
||||
if beadsDir == "" {
|
||||
return false
|
||||
}
|
||||
|
||||
state := LoadSyncState(beadsDir)
|
||||
return state.NeedsManualSync
|
||||
}
|
||||
|
||||
// init initializes the tip system with default tips
|
||||
|
||||
@@ -71,8 +71,10 @@ func TestShouldDisableDaemonForWorktree(t *testing.T) {
|
||||
|
||||
// Change to the worktree directory
|
||||
origDir, _ := os.Getwd()
|
||||
defer func() {
|
||||
defer func() {
|
||||
_ = os.Chdir(origDir)
|
||||
// Reset git caches after changing directory
|
||||
git.ResetCaches()
|
||||
// Reinitialize config to restore original state
|
||||
_ = config.Initialize()
|
||||
}()
|
||||
@@ -81,6 +83,9 @@ func TestShouldDisableDaemonForWorktree(t *testing.T) {
|
||||
}
|
||||
git.ResetCaches()
|
||||
|
||||
// Reset git caches after changing directory (required for IsWorktree to re-detect)
|
||||
git.ResetCaches()
|
||||
|
||||
// No sync-branch configured
|
||||
os.Unsetenv("BEADS_SYNC_BRANCH")
|
||||
|
||||
@@ -108,8 +113,9 @@ func TestShouldDisableDaemonForWorktree(t *testing.T) {
|
||||
|
||||
// Change to the worktree directory
|
||||
origDir, _ := os.Getwd()
|
||||
defer func() {
|
||||
defer func() {
|
||||
_ = os.Chdir(origDir)
|
||||
git.ResetCaches()
|
||||
_ = config.Initialize()
|
||||
}()
|
||||
if err := os.Chdir(worktreeDir); err != nil {
|
||||
@@ -117,6 +123,9 @@ func TestShouldDisableDaemonForWorktree(t *testing.T) {
|
||||
}
|
||||
git.ResetCaches()
|
||||
|
||||
// Reset git caches after changing directory
|
||||
git.ResetCaches()
|
||||
|
||||
// Reinitialize config to pick up the new directory's config.yaml
|
||||
if err := config.Initialize(); err != nil {
|
||||
t.Fatalf("Failed to reinitialize config: %v", err)
|
||||
@@ -140,8 +149,9 @@ func TestShouldDisableDaemonForWorktree(t *testing.T) {
|
||||
|
||||
// Change to the worktree directory
|
||||
origDir, _ := os.Getwd()
|
||||
defer func() {
|
||||
defer func() {
|
||||
_ = os.Chdir(origDir)
|
||||
git.ResetCaches()
|
||||
_ = config.Initialize()
|
||||
}()
|
||||
if err := os.Chdir(worktreeDir); err != nil {
|
||||
@@ -149,6 +159,9 @@ func TestShouldDisableDaemonForWorktree(t *testing.T) {
|
||||
}
|
||||
git.ResetCaches()
|
||||
|
||||
// Reset git caches after changing directory
|
||||
git.ResetCaches()
|
||||
|
||||
// Reinitialize config to pick up the new directory's config.yaml
|
||||
if err := config.Initialize(); err != nil {
|
||||
t.Fatalf("Failed to reinitialize config: %v", err)
|
||||
@@ -191,8 +204,9 @@ func TestShouldAutoStartDaemonWorktreeIntegration(t *testing.T) {
|
||||
|
||||
// Change to the worktree directory
|
||||
origDir, _ := os.Getwd()
|
||||
defer func() {
|
||||
defer func() {
|
||||
_ = os.Chdir(origDir)
|
||||
git.ResetCaches()
|
||||
_ = config.Initialize()
|
||||
}()
|
||||
if err := os.Chdir(worktreeDir); err != nil {
|
||||
@@ -200,6 +214,9 @@ func TestShouldAutoStartDaemonWorktreeIntegration(t *testing.T) {
|
||||
}
|
||||
git.ResetCaches()
|
||||
|
||||
// Reset git caches after changing directory
|
||||
git.ResetCaches()
|
||||
|
||||
// Clear all daemon-related env vars
|
||||
os.Unsetenv("BEADS_NO_DAEMON")
|
||||
os.Unsetenv("BEADS_AUTO_START_DAEMON")
|
||||
@@ -225,8 +242,9 @@ func TestShouldAutoStartDaemonWorktreeIntegration(t *testing.T) {
|
||||
|
||||
// Change to the worktree directory
|
||||
origDir, _ := os.Getwd()
|
||||
defer func() {
|
||||
defer func() {
|
||||
_ = os.Chdir(origDir)
|
||||
git.ResetCaches()
|
||||
_ = config.Initialize()
|
||||
}()
|
||||
if err := os.Chdir(worktreeDir); err != nil {
|
||||
@@ -234,6 +252,9 @@ func TestShouldAutoStartDaemonWorktreeIntegration(t *testing.T) {
|
||||
}
|
||||
git.ResetCaches()
|
||||
|
||||
// Reset git caches after changing directory
|
||||
git.ResetCaches()
|
||||
|
||||
// Reinitialize config to pick up the new directory's config.yaml
|
||||
if err := config.Initialize(); err != nil {
|
||||
t.Fatalf("Failed to reinitialize config: %v", err)
|
||||
@@ -259,8 +280,9 @@ func TestShouldAutoStartDaemonWorktreeIntegration(t *testing.T) {
|
||||
|
||||
// Change to the worktree directory
|
||||
origDir, _ := os.Getwd()
|
||||
defer func() {
|
||||
defer func() {
|
||||
_ = os.Chdir(origDir)
|
||||
git.ResetCaches()
|
||||
_ = config.Initialize()
|
||||
}()
|
||||
if err := os.Chdir(worktreeDir); err != nil {
|
||||
@@ -268,6 +290,9 @@ func TestShouldAutoStartDaemonWorktreeIntegration(t *testing.T) {
|
||||
}
|
||||
git.ResetCaches()
|
||||
|
||||
// Reset git caches after changing directory
|
||||
git.ResetCaches()
|
||||
|
||||
// Reinitialize config to pick up the new directory's config.yaml
|
||||
if err := config.Initialize(); err != nil {
|
||||
t.Fatalf("Failed to reinitialize config: %v", err)
|
||||
@@ -309,8 +334,8 @@ func setupWorktreeTestRepo(t *testing.T) (mainDir, worktreeDir string) {
|
||||
// Create main repo directory
|
||||
mainDir = t.TempDir()
|
||||
|
||||
// Initialize git repo
|
||||
cmd := exec.Command("git", "init")
|
||||
// Initialize git repo with 'main' as default branch (modern git convention)
|
||||
cmd := exec.Command("git", "init", "--initial-branch=main")
|
||||
cmd.Dir = mainDir
|
||||
if output, err := cmd.CombinedOutput(); err != nil {
|
||||
t.Fatalf("Failed to init git repo: %v\n%s", err, output)
|
||||
|
||||
@@ -212,6 +212,8 @@ func expandLoopWithVars(step *Step, vars map[string]string) ([]*Step, error) {
|
||||
// expandLoopIteration expands a single iteration of a loop.
|
||||
// The iteration index is used to generate unique step IDs.
|
||||
// The iterVars map contains loop variable bindings for this iteration (gt-8tmz.27).
|
||||
//
|
||||
//nolint:unparam // error return kept for API consistency with future error handling
|
||||
func expandLoopIteration(step *Step, iteration int, iterVars map[string]string) ([]*Step, error) {
|
||||
result := make([]*Step, 0, len(step.Loop.Body))
|
||||
|
||||
|
||||
@@ -26,7 +26,7 @@ type Route struct {
|
||||
// Returns an empty slice if the file doesn't exist.
|
||||
func LoadRoutes(beadsDir string) ([]Route, error) {
|
||||
routesPath := filepath.Join(beadsDir, RoutesFileName)
|
||||
file, err := os.Open(routesPath)
|
||||
file, err := os.Open(routesPath) //nolint:gosec // routesPath is constructed from known beadsDir
|
||||
if err != nil {
|
||||
if os.IsNotExist(err) {
|
||||
return nil, nil // No routes file is not an error
|
||||
@@ -159,7 +159,7 @@ func ResolveBeadsDirForID(ctx context.Context, id, currentBeadsDir string) (stri
|
||||
// and resolves the redirect path if present.
|
||||
func resolveRedirect(beadsDir string) string {
|
||||
redirectFile := filepath.Join(beadsDir, "redirect")
|
||||
data, err := os.ReadFile(redirectFile)
|
||||
data, err := os.ReadFile(redirectFile) //nolint:gosec // redirectFile is constructed from known beadsDir
|
||||
if err != nil {
|
||||
if os.Getenv("BD_DEBUG_ROUTING") != "" {
|
||||
fmt.Fprintf(os.Stderr, "[routing] No redirect file at %s: %v\n", redirectFile, err)
|
||||
|
||||
@@ -31,7 +31,7 @@ func TestCommitToSyncBranch(t *testing.T) {
|
||||
writeFile(t, jsonlPath, `{"id":"test-1"}`)
|
||||
runGit(t, repoDir, "add", ".")
|
||||
runGit(t, repoDir, "commit", "-m", "initial sync branch commit")
|
||||
runGit(t, repoDir, "checkout", "master")
|
||||
runGit(t, repoDir, "checkout", "main")
|
||||
|
||||
// Write new content to commit
|
||||
writeFile(t, jsonlPath, `{"id":"test-1"}`+"\n"+`{"id":"test-2"}`)
|
||||
@@ -64,7 +64,7 @@ func TestCommitToSyncBranch(t *testing.T) {
|
||||
writeFile(t, jsonlPath, `{"id":"test-1"}`)
|
||||
runGit(t, repoDir, "add", ".")
|
||||
runGit(t, repoDir, "commit", "-m", "initial")
|
||||
runGit(t, repoDir, "checkout", "master")
|
||||
runGit(t, repoDir, "checkout", "main")
|
||||
|
||||
// Write the same content that's in the sync branch
|
||||
writeFile(t, jsonlPath, `{"id":"test-1"}`)
|
||||
@@ -101,7 +101,7 @@ func TestPullFromSyncBranch(t *testing.T) {
|
||||
writeFile(t, jsonlPath, `{"id":"test-1"}`)
|
||||
runGit(t, repoDir, "add", ".")
|
||||
runGit(t, repoDir, "commit", "-m", "local sync")
|
||||
runGit(t, repoDir, "checkout", "master")
|
||||
runGit(t, repoDir, "checkout", "main")
|
||||
|
||||
// Pull should handle the case where remote doesn't have the branch
|
||||
result, err := PullFromSyncBranch(ctx, repoDir, syncBranch, jsonlPath, false)
|
||||
@@ -131,7 +131,7 @@ func TestPullFromSyncBranch(t *testing.T) {
|
||||
runGit(t, repoDir, "commit", "-m", "sync commit")
|
||||
// Set up a fake remote ref at the same commit
|
||||
runGit(t, repoDir, "update-ref", "refs/remotes/origin/"+syncBranch, "HEAD")
|
||||
runGit(t, repoDir, "checkout", "master")
|
||||
runGit(t, repoDir, "checkout", "main")
|
||||
|
||||
// Pull when already at remote HEAD
|
||||
result, err := PullFromSyncBranch(ctx, repoDir, syncBranch, jsonlPath, false)
|
||||
@@ -158,7 +158,7 @@ func TestPullFromSyncBranch(t *testing.T) {
|
||||
runGit(t, repoDir, "add", ".")
|
||||
runGit(t, repoDir, "commit", "-m", "sync commit")
|
||||
runGit(t, repoDir, "update-ref", "refs/remotes/origin/"+syncBranch, "HEAD")
|
||||
runGit(t, repoDir, "checkout", "master")
|
||||
runGit(t, repoDir, "checkout", "main")
|
||||
|
||||
// Remove local JSONL to verify it gets copied back
|
||||
os.Remove(jsonlPath)
|
||||
@@ -198,7 +198,7 @@ func TestPullFromSyncBranch(t *testing.T) {
|
||||
|
||||
// Reset back to base (so remote is ahead)
|
||||
runGit(t, repoDir, "reset", "--hard", baseCommit)
|
||||
runGit(t, repoDir, "checkout", "master")
|
||||
runGit(t, repoDir, "checkout", "main")
|
||||
|
||||
// Pull should fast-forward
|
||||
result, err := PullFromSyncBranch(ctx, repoDir, syncBranch, jsonlPath, false)
|
||||
@@ -233,7 +233,7 @@ func TestResetToRemote(t *testing.T) {
|
||||
writeFile(t, jsonlPath, `{"id":"local-1"}`)
|
||||
runGit(t, repoDir, "add", ".")
|
||||
runGit(t, repoDir, "commit", "-m", "local commit")
|
||||
runGit(t, repoDir, "checkout", "master")
|
||||
runGit(t, repoDir, "checkout", "main")
|
||||
|
||||
// ResetToRemote should fail since remote branch doesn't exist
|
||||
err := ResetToRemote(ctx, repoDir, syncBranch, jsonlPath)
|
||||
@@ -264,7 +264,7 @@ func TestPushSyncBranch(t *testing.T) {
|
||||
writeFile(t, filepath.Join(repoDir, ".beads", "issues.jsonl"), `{"id":"test-1"}`)
|
||||
runGit(t, repoDir, "add", ".")
|
||||
runGit(t, repoDir, "commit", "-m", "initial")
|
||||
runGit(t, repoDir, "checkout", "master")
|
||||
runGit(t, repoDir, "checkout", "main")
|
||||
|
||||
// PushSyncBranch should handle the worktree creation
|
||||
err := PushSyncBranch(ctx, repoDir, syncBranch)
|
||||
@@ -391,8 +391,8 @@ func setupTestRepoWithRemote(t *testing.T) string {
|
||||
t.Fatalf("Failed to create temp dir: %v", err)
|
||||
}
|
||||
|
||||
// Initialize git repo
|
||||
runGit(t, tmpDir, "init", "-b", "master")
|
||||
// Initialize git repo with 'main' as default branch (modern git convention)
|
||||
runGit(t, tmpDir, "init", "--initial-branch=main")
|
||||
runGit(t, tmpDir, "config", "user.email", "test@test.com")
|
||||
runGit(t, tmpDir, "config", "user.name", "Test User")
|
||||
|
||||
|
||||
Reference in New Issue
Block a user