Introduce slog for structured daemon logging (bd-u2sc.4)

- Replace custom daemonLogger with slog-based structured logging
- Add --log-level flag (debug, info, warn, error) with default "info"
- Add --log-json flag for JSON-formatted log output
- Update log calls to use appropriate levels (Error, Warn, Info, Debug)
- Add test helper functions (newTestLogger, newTestLoggerWithWriter)

Log output examples:
  Text: time=2025-12-23T12:30:00Z level=INFO msg="daemon started" socket=/tmp/bd.sock
  JSON: {"time":"...","level":"INFO","msg":"daemon started","socket":"/tmp/bd.sock"}

🤖 Generated with [Claude Code](https://claude.com/claude-code)

Co-Authored-By: Claude Opus 4.5 <noreply@anthropic.com>
This commit is contained in:
Steve Yegge
2025-12-23 13:43:40 -08:00
parent d834e4f4d8
commit 05f37d2408
10 changed files with 237 additions and 160 deletions

View File

@@ -56,6 +56,8 @@ Run 'bd daemon' with no flags to see available options.`,
localMode, _ := cmd.Flags().GetBool("local") localMode, _ := cmd.Flags().GetBool("local")
logFile, _ := cmd.Flags().GetString("log") logFile, _ := cmd.Flags().GetString("log")
foreground, _ := cmd.Flags().GetBool("foreground") foreground, _ := cmd.Flags().GetBool("foreground")
logLevel, _ := cmd.Flags().GetString("log-level")
logJSON, _ := cmd.Flags().GetBool("log-json")
// If no operation flags provided, show help // If no operation flags provided, show help
if !start && !stop && !stopAll && !status && !health && !metrics { if !start && !stop && !stopAll && !status && !health && !metrics {
@@ -245,7 +247,7 @@ Run 'bd daemon' with no flags to see available options.`,
fmt.Printf("Logging to: %s\n", logFile) fmt.Printf("Logging to: %s\n", logFile)
} }
startDaemon(interval, autoCommit, autoPush, autoPull, localMode, foreground, logFile, pidFile) startDaemon(interval, autoCommit, autoPush, autoPull, localMode, foreground, logFile, pidFile, logLevel, logJSON)
}, },
} }
@@ -263,6 +265,8 @@ func init() {
daemonCmd.Flags().Bool("metrics", false, "Show detailed daemon metrics") daemonCmd.Flags().Bool("metrics", false, "Show detailed daemon metrics")
daemonCmd.Flags().String("log", "", "Log file path (default: .beads/daemon.log)") daemonCmd.Flags().String("log", "", "Log file path (default: .beads/daemon.log)")
daemonCmd.Flags().Bool("foreground", false, "Run in foreground (don't daemonize)") daemonCmd.Flags().Bool("foreground", false, "Run in foreground (don't daemonize)")
daemonCmd.Flags().String("log-level", "info", "Log level (debug, info, warn, error)")
daemonCmd.Flags().Bool("log-json", false, "Output logs in JSON format (structured logging)")
daemonCmd.Flags().BoolVar(&jsonOutput, "json", false, "Output JSON format") daemonCmd.Flags().BoolVar(&jsonOutput, "json", false, "Output JSON format")
rootCmd.AddCommand(daemonCmd) rootCmd.AddCommand(daemonCmd)
} }
@@ -279,8 +283,9 @@ func computeDaemonParentPID() int {
} }
return os.Getppid() return os.Getppid()
} }
func runDaemonLoop(interval time.Duration, autoCommit, autoPush, autoPull, localMode bool, logPath, pidFile string) { func runDaemonLoop(interval time.Duration, autoCommit, autoPush, autoPull, localMode bool, logPath, pidFile, logLevel string, logJSON bool) {
logF, log := setupDaemonLogger(logPath) level := parseLogLevel(logLevel)
logF, log := setupDaemonLogger(logPath, logJSON, level)
defer func() { _ = logF.Close() }() defer func() { _ = logF.Close() }()
// Set up signal-aware context for graceful shutdown // Set up signal-aware context for graceful shutdown
@@ -290,13 +295,13 @@ func runDaemonLoop(interval time.Duration, autoCommit, autoPush, autoPull, local
// Top-level panic recovery to ensure clean shutdown and diagnostics // Top-level panic recovery to ensure clean shutdown and diagnostics
defer func() { defer func() {
if r := recover(); r != nil { if r := recover(); r != nil {
log.log("PANIC: daemon crashed: %v", r) log.Error("daemon crashed", "panic", r)
// Capture stack trace // Capture stack trace
stackBuf := make([]byte, 4096) stackBuf := make([]byte, 4096)
stackSize := runtime.Stack(stackBuf, false) stackSize := runtime.Stack(stackBuf, false)
stackTrace := string(stackBuf[:stackSize]) stackTrace := string(stackBuf[:stackSize])
log.log("Stack trace:\n%s", stackTrace) log.Error("stack trace", "trace", stackTrace)
// Write crash report to daemon-error file for user visibility // Write crash report to daemon-error file for user visibility
var beadsDir string var beadsDir string
@@ -312,14 +317,14 @@ func runDaemonLoop(interval time.Duration, autoCommit, autoPush, autoPull, local
time.Now().Format(time.RFC3339), r, stackTrace) time.Now().Format(time.RFC3339), r, stackTrace)
// nolint:gosec // G306: Error file needs to be readable for debugging // nolint:gosec // G306: Error file needs to be readable for debugging
if err := os.WriteFile(errFile, []byte(crashReport), 0644); err != nil { if err := os.WriteFile(errFile, []byte(crashReport), 0644); err != nil {
log.log("Warning: could not write crash report: %v", err) log.Warn("could not write crash report", "error", err)
} }
} }
// Clean up PID file // Clean up PID file
_ = os.Remove(pidFile) _ = os.Remove(pidFile)
log.log("Daemon terminated after panic") log.Info("daemon terminated after panic")
} }
}() }()
@@ -329,8 +334,8 @@ func runDaemonLoop(interval time.Duration, autoCommit, autoPush, autoPull, local
if foundDB := beads.FindDatabasePath(); foundDB != "" { if foundDB := beads.FindDatabasePath(); foundDB != "" {
daemonDBPath = foundDB daemonDBPath = foundDB
} else { } else {
log.log("Error: no beads database found") log.Error("no beads database found")
log.log("Hint: run 'bd init' to create a database or set BEADS_DB environment variable") log.Info("hint: run 'bd init' to create a database or set BEADS_DB environment variable")
return // Use return instead of os.Exit to allow defers to run return // Use return instead of os.Exit to allow defers to run
} }
} }
@@ -376,7 +381,7 @@ func runDaemonLoop(interval time.Duration, autoCommit, autoPush, autoPull, local
errFile := filepath.Join(beadsDir, "daemon-error") errFile := filepath.Join(beadsDir, "daemon-error")
// nolint:gosec // G306: Error file needs to be readable for debugging // nolint:gosec // G306: Error file needs to be readable for debugging
if err := os.WriteFile(errFile, []byte(errMsg), 0644); err != nil { if err := os.WriteFile(errFile, []byte(errMsg), 0644); err != nil {
log.log("Warning: could not write daemon-error file: %v", err) log.Warn("could not write daemon-error file", "error", err)
} }
return // Use return instead of os.Exit to allow defers to run return // Use return instead of os.Exit to allow defers to run
@@ -386,24 +391,22 @@ func runDaemonLoop(interval time.Duration, autoCommit, autoPush, autoPull, local
// Validate using canonical name // Validate using canonical name
dbBaseName := filepath.Base(daemonDBPath) dbBaseName := filepath.Base(daemonDBPath)
if dbBaseName != beads.CanonicalDatabaseName { if dbBaseName != beads.CanonicalDatabaseName {
log.log("Error: Non-canonical database name: %s", dbBaseName) log.Error("non-canonical database name", "name", dbBaseName, "expected", beads.CanonicalDatabaseName)
log.log("Expected: %s", beads.CanonicalDatabaseName) log.Info("run 'bd init' to migrate to canonical name")
log.log("")
log.log("Run 'bd init' to migrate to canonical name")
return // Use return instead of os.Exit to allow defers to run return // Use return instead of os.Exit to allow defers to run
} }
log.log("Using database: %s", daemonDBPath) log.Info("using database", "path", daemonDBPath)
// Clear any previous daemon-error file on successful startup // Clear any previous daemon-error file on successful startup
errFile := filepath.Join(beadsDir, "daemon-error") errFile := filepath.Join(beadsDir, "daemon-error")
if err := os.Remove(errFile); err != nil && !os.IsNotExist(err) { if err := os.Remove(errFile); err != nil && !os.IsNotExist(err) {
log.log("Warning: could not remove daemon-error file: %v", err) log.Warn("could not remove daemon-error file", "error", err)
} }
store, err := sqlite.New(ctx, daemonDBPath) store, err := sqlite.New(ctx, daemonDBPath)
if err != nil { if err != nil {
log.log("Error: cannot open database: %v", err) log.Error("cannot open database", "error", err)
return // Use return instead of os.Exit to allow defers to run return // Use return instead of os.Exit to allow defers to run
} }
defer func() { _ = store.Close() }() defer func() { _ = store.Close() }()
@@ -411,73 +414,71 @@ func runDaemonLoop(interval time.Duration, autoCommit, autoPush, autoPull, local
// Enable freshness checking to detect external database file modifications // Enable freshness checking to detect external database file modifications
// (e.g., when git merge replaces the database file) // (e.g., when git merge replaces the database file)
store.EnableFreshnessChecking() store.EnableFreshnessChecking()
log.log("Database opened: %s (freshness checking enabled)", daemonDBPath) log.Info("database opened", "path", daemonDBPath, "freshness_checking", true)
// Auto-upgrade .beads/.gitignore if outdated // Auto-upgrade .beads/.gitignore if outdated
gitignoreCheck := doctor.CheckGitignore() gitignoreCheck := doctor.CheckGitignore()
if gitignoreCheck.Status == "warning" || gitignoreCheck.Status == "error" { if gitignoreCheck.Status == "warning" || gitignoreCheck.Status == "error" {
log.log("Upgrading .beads/.gitignore...") log.Info("upgrading .beads/.gitignore")
if err := doctor.FixGitignore(); err != nil { if err := doctor.FixGitignore(); err != nil {
log.log("Warning: failed to upgrade .gitignore: %v", err) log.Warn("failed to upgrade .gitignore", "error", err)
} else { } else {
log.log("Successfully upgraded .beads/.gitignore") log.Info("successfully upgraded .beads/.gitignore")
} }
} }
// Hydrate from multi-repo if configured // Hydrate from multi-repo if configured
if results, err := store.HydrateFromMultiRepo(ctx); err != nil { if results, err := store.HydrateFromMultiRepo(ctx); err != nil {
log.log("Error: multi-repo hydration failed: %v", err) log.Error("multi-repo hydration failed", "error", err)
return // Use return instead of os.Exit to allow defers to run return // Use return instead of os.Exit to allow defers to run
} else if results != nil { } else if results != nil {
log.log("Multi-repo hydration complete:") log.Info("multi-repo hydration complete")
for repo, count := range results { for repo, count := range results {
log.log(" %s: %d issues", repo, count) log.Info("hydrated issues", "repo", repo, "count", count)
} }
} }
// Validate database fingerprint (skip in local mode - no git available) // Validate database fingerprint (skip in local mode - no git available)
if localMode { if localMode {
log.log("Skipping fingerprint validation (local mode)") log.Info("skipping fingerprint validation (local mode)")
} else if err := validateDatabaseFingerprint(ctx, store, &log); err != nil { } else if err := validateDatabaseFingerprint(ctx, store, &log); err != nil {
if os.Getenv("BEADS_IGNORE_REPO_MISMATCH") != "1" { if os.Getenv("BEADS_IGNORE_REPO_MISMATCH") != "1" {
log.log("Error: %v", err) log.Error("repository fingerprint validation failed", "error", err)
return // Use return instead of os.Exit to allow defers to run return // Use return instead of os.Exit to allow defers to run
} }
log.log("Warning: repository mismatch ignored (BEADS_IGNORE_REPO_MISMATCH=1)") log.Warn("repository mismatch ignored (BEADS_IGNORE_REPO_MISMATCH=1)")
} }
// Validate schema version matches daemon version // Validate schema version matches daemon version
versionCtx := context.Background() versionCtx := context.Background()
dbVersion, err := store.GetMetadata(versionCtx, "bd_version") dbVersion, err := store.GetMetadata(versionCtx, "bd_version")
if err != nil && err.Error() != "metadata key not found: bd_version" { if err != nil && err.Error() != "metadata key not found: bd_version" {
log.log("Error: failed to read database version: %v", err) log.Error("failed to read database version", "error", err)
return // Use return instead of os.Exit to allow defers to run return // Use return instead of os.Exit to allow defers to run
} }
if dbVersion != "" && dbVersion != Version { if dbVersion != "" && dbVersion != Version {
log.log("Warning: Database schema version mismatch") log.Warn("database schema version mismatch", "db_version", dbVersion, "daemon_version", Version)
log.log(" Database version: %s", dbVersion) log.Info("auto-upgrading database to daemon version")
log.log(" Daemon version: %s", Version)
log.log(" Auto-upgrading database to daemon version...")
// Auto-upgrade database to daemon version // Auto-upgrade database to daemon version
// The daemon operates on its own database, so it should always use its own version // The daemon operates on its own database, so it should always use its own version
if err := store.SetMetadata(versionCtx, "bd_version", Version); err != nil { if err := store.SetMetadata(versionCtx, "bd_version", Version); err != nil {
log.log("Error: failed to update database version: %v", err) log.Error("failed to update database version", "error", err)
// Allow override via environment variable for emergencies // Allow override via environment variable for emergencies
if os.Getenv("BEADS_IGNORE_VERSION_MISMATCH") != "1" { if os.Getenv("BEADS_IGNORE_VERSION_MISMATCH") != "1" {
return // Use return instead of os.Exit to allow defers to run return // Use return instead of os.Exit to allow defers to run
} }
log.log("Warning: Proceeding despite version update failure (BEADS_IGNORE_VERSION_MISMATCH=1)") log.Warn("proceeding despite version update failure (BEADS_IGNORE_VERSION_MISMATCH=1)")
} else { } else {
log.log(" Database version updated to %s", Version) log.Info("database version updated", "version", Version)
} }
} else if dbVersion == "" { } else if dbVersion == "" {
// Old database without version metadata - set it now // Old database without version metadata - set it now
log.log("Warning: Database missing version metadata, setting to %s", Version) log.Warn("database missing version metadata", "setting_to", Version)
if err := store.SetMetadata(versionCtx, "bd_version", Version); err != nil { if err := store.SetMetadata(versionCtx, "bd_version", Version); err != nil {
log.log("Error: failed to set database version: %v", err) log.Error("failed to set database version", "error", err)
return // Use return instead of os.Exit to allow defers to run return // Use return instead of os.Exit to allow defers to run
} }
} }
@@ -506,7 +507,7 @@ func runDaemonLoop(interval time.Duration, autoCommit, autoPush, autoPull, local
// Register daemon in global registry // Register daemon in global registry
registry, err := daemon.NewRegistry() registry, err := daemon.NewRegistry()
if err != nil { if err != nil {
log.log("Warning: failed to create registry: %v", err) log.Warn("failed to create registry", "error", err)
} else { } else {
entry := daemon.RegistryEntry{ entry := daemon.RegistryEntry{
WorkspacePath: workspacePath, WorkspacePath: workspacePath,
@@ -517,14 +518,14 @@ func runDaemonLoop(interval time.Duration, autoCommit, autoPush, autoPull, local
StartedAt: time.Now(), StartedAt: time.Now(),
} }
if err := registry.Register(entry); err != nil { if err := registry.Register(entry); err != nil {
log.log("Warning: failed to register daemon: %v", err) log.Warn("failed to register daemon", "error", err)
} else { } else {
log.log("Registered in global registry") log.Info("registered in global registry")
} }
// Ensure we unregister on exit // Ensure we unregister on exit
defer func() { defer func() {
if err := registry.Unregister(workspacePath, os.Getpid()); err != nil { if err := registry.Unregister(workspacePath, os.Getpid()); err != nil {
log.log("Warning: failed to unregister daemon: %v", err) log.Warn("failed to unregister daemon", "error", err)
} }
}() }()
} }
@@ -543,16 +544,16 @@ func runDaemonLoop(interval time.Duration, autoCommit, autoPush, autoPull, local
// Get parent PID for monitoring (exit if parent dies) // Get parent PID for monitoring (exit if parent dies)
parentPID := computeDaemonParentPID() parentPID := computeDaemonParentPID()
log.log("Monitoring parent process (PID %d)", parentPID) log.Info("monitoring parent process", "pid", parentPID)
// daemonMode already determined above for SetConfig // daemonMode already determined above for SetConfig
switch daemonMode { switch daemonMode {
case "events": case "events":
log.log("Using event-driven mode") log.Info("using event-driven mode")
jsonlPath := findJSONLPath() jsonlPath := findJSONLPath()
if jsonlPath == "" { if jsonlPath == "" {
log.log("Error: JSONL path not found, cannot use event-driven mode") log.Error("JSONL path not found, cannot use event-driven mode")
log.log("Falling back to polling mode") log.Info("falling back to polling mode")
runEventLoop(ctx, cancel, ticker, doSync, server, serverErrChan, parentPID, log) runEventLoop(ctx, cancel, ticker, doSync, server, serverErrChan, parentPID, log)
} else { } else {
// Event-driven mode uses separate export-only and import-only functions // Event-driven mode uses separate export-only and import-only functions
@@ -567,10 +568,10 @@ func runDaemonLoop(interval time.Duration, autoCommit, autoPush, autoPull, local
runEventDrivenLoop(ctx, cancel, server, serverErrChan, store, jsonlPath, doExport, doAutoImport, autoPull, parentPID, log) runEventDrivenLoop(ctx, cancel, server, serverErrChan, store, jsonlPath, doExport, doAutoImport, autoPull, parentPID, log)
} }
case "poll": case "poll":
log.log("Using polling mode (interval: %v)", interval) log.Info("using polling mode", "interval", interval)
runEventLoop(ctx, cancel, ticker, doSync, server, serverErrChan, parentPID, log) runEventLoop(ctx, cancel, ticker, doSync, server, serverErrChan, parentPID, log)
default: default:
log.log("Unknown BEADS_DAEMON_MODE: %s (valid: poll, events), defaulting to poll", daemonMode) log.Warn("unknown BEADS_DAEMON_MODE, defaulting to poll", "mode", daemonMode, "valid", "poll, events")
runEventLoop(ctx, cancel, ticker, doSync, server, serverErrChan, parentPID, log) runEventLoop(ctx, cancel, ticker, doSync, server, serverErrChan, parentPID, log)
} }
} }

View File

@@ -457,11 +457,7 @@ func TestEventLoopSignalHandling(t *testing.T) {
// createTestLogger creates a daemonLogger for testing // createTestLogger creates a daemonLogger for testing
func createTestLogger(t *testing.T) daemonLogger { func createTestLogger(t *testing.T) daemonLogger {
return daemonLogger{ return newTestLogger()
logFunc: func(format string, args ...interface{}) {
t.Logf("[daemon] "+format, args...)
},
}
} }
// TestDaemonIntegration_SocketCleanup verifies socket cleanup after daemon stops // TestDaemonIntegration_SocketCleanup verifies socket cleanup after daemon stops

View File

@@ -369,7 +369,7 @@ func stopAllDaemons() {
} }
// startDaemon starts the daemon (in foreground if requested, otherwise background) // startDaemon starts the daemon (in foreground if requested, otherwise background)
func startDaemon(interval time.Duration, autoCommit, autoPush, autoPull, localMode, foreground bool, logFile, pidFile string) { func startDaemon(interval time.Duration, autoCommit, autoPush, autoPull, localMode, foreground bool, logFile, pidFile, logLevel string, logJSON bool) {
logPath, err := getLogFilePath(logFile) logPath, err := getLogFilePath(logFile)
if err != nil { if err != nil {
fmt.Fprintf(os.Stderr, "Error: %v\n", err) fmt.Fprintf(os.Stderr, "Error: %v\n", err)
@@ -378,7 +378,7 @@ func startDaemon(interval time.Duration, autoCommit, autoPush, autoPull, localMo
// Run in foreground if --foreground flag set or if we're the forked child process // Run in foreground if --foreground flag set or if we're the forked child process
if foreground || os.Getenv("BD_DAEMON_FOREGROUND") == "1" { if foreground || os.Getenv("BD_DAEMON_FOREGROUND") == "1" {
runDaemonLoop(interval, autoCommit, autoPush, autoPull, localMode, logPath, pidFile) runDaemonLoop(interval, autoCommit, autoPush, autoPull, localMode, logPath, pidFile, logLevel, logJSON)
return return
} }
@@ -406,6 +406,12 @@ func startDaemon(interval time.Duration, autoCommit, autoPush, autoPull, localMo
if logFile != "" { if logFile != "" {
args = append(args, "--log", logFile) args = append(args, "--log", logFile)
} }
if logLevel != "" && logLevel != "info" {
args = append(args, "--log-level", logLevel)
}
if logJSON {
args = append(args, "--log-json")
}
cmd := exec.Command(exe, args...) // #nosec G204 - bd daemon command from trusted binary cmd := exec.Command(exe, args...) // #nosec G204 - bd daemon command from trusted binary
cmd.Env = append(os.Environ(), "BD_DAEMON_FOREGROUND=1") cmd.Env = append(os.Environ(), "BD_DAEMON_FOREGROUND=1")
@@ -455,18 +461,18 @@ func setupDaemonLock(pidFile string, dbPath string, log daemonLogger) (*DaemonLo
// Detect nested .beads directories (e.g., .beads/.beads/.beads/) // Detect nested .beads directories (e.g., .beads/.beads/.beads/)
cleanPath := filepath.Clean(beadsDir) cleanPath := filepath.Clean(beadsDir)
if strings.Contains(cleanPath, string(filepath.Separator)+".beads"+string(filepath.Separator)+".beads") { if strings.Contains(cleanPath, string(filepath.Separator)+".beads"+string(filepath.Separator)+".beads") {
log.log("Error: Nested .beads directory detected: %s", cleanPath) log.Error("nested .beads directory detected", "path", cleanPath)
log.log("Hint: Do not run 'bd daemon' from inside .beads/ directory") log.Info("hint: do not run 'bd daemon' from inside .beads/ directory")
log.log("Hint: Use absolute paths for BEADS_DB or run from workspace root") log.Info("hint: use absolute paths for BEADS_DB or run from workspace root")
return nil, fmt.Errorf("nested .beads directory detected") return nil, fmt.Errorf("nested .beads directory detected")
} }
lock, err := acquireDaemonLock(beadsDir, dbPath) lock, err := acquireDaemonLock(beadsDir, dbPath)
if err != nil { if err != nil {
if err == ErrDaemonLocked { if err == ErrDaemonLocked {
log.log("Daemon already running (lock held), exiting") log.Info("daemon already running (lock held), exiting")
} else { } else {
log.log("Error acquiring daemon lock: %v", err) log.Error("acquiring daemon lock", "error", err)
} }
return nil, err return nil, err
} }
@@ -477,11 +483,11 @@ func setupDaemonLock(pidFile string, dbPath string, log daemonLogger) (*DaemonLo
if pid, err := strconv.Atoi(strings.TrimSpace(string(data))); err == nil && pid == myPID { if pid, err := strconv.Atoi(strings.TrimSpace(string(data))); err == nil && pid == myPID {
// PID file is correct, continue // PID file is correct, continue
} else { } else {
log.log("PID file has wrong PID (expected %d, got %d), overwriting", myPID, pid) log.Warn("PID file has wrong PID, overwriting", "expected", myPID, "got", pid)
_ = os.WriteFile(pidFile, []byte(fmt.Sprintf("%d\n", myPID)), 0600) _ = os.WriteFile(pidFile, []byte(fmt.Sprintf("%d\n", myPID)), 0600)
} }
} else { } else {
log.log("PID file missing after lock acquisition, creating") log.Info("PID file missing after lock acquisition, creating")
_ = os.WriteFile(pidFile, []byte(fmt.Sprintf("%d\n", myPID)), 0600) _ = os.WriteFile(pidFile, []byte(fmt.Sprintf("%d\n", myPID)), 0600)
} }

View File

@@ -122,12 +122,8 @@ func TestCreateLocalSyncFunc(t *testing.T) {
t.Fatalf("Failed to create issue: %v", err) t.Fatalf("Failed to create issue: %v", err)
} }
// Create logger // Create logger (test output via newTestLogger)
log := daemonLogger{ log := newTestLogger()
logFunc: func(format string, args ...interface{}) {
t.Logf(format, args...)
},
}
// Create and run local sync function // Create and run local sync function
doSync := createLocalSyncFunc(ctx, testStore, log) doSync := createLocalSyncFunc(ctx, testStore, log)
@@ -193,11 +189,7 @@ func TestCreateLocalExportFunc(t *testing.T) {
} }
} }
log := daemonLogger{ log := newTestLogger()
logFunc: func(format string, args ...interface{}) {
t.Logf(format, args...)
},
}
doExport := createLocalExportFunc(ctx, testStore, log) doExport := createLocalExportFunc(ctx, testStore, log)
doExport() doExport()
@@ -258,11 +250,7 @@ func TestCreateLocalAutoImportFunc(t *testing.T) {
t.Fatalf("Failed to write JSONL: %v", err) t.Fatalf("Failed to write JSONL: %v", err)
} }
log := daemonLogger{ log := newTestLogger()
logFunc: func(format string, args ...interface{}) {
t.Logf(format, args...)
},
}
doImport := createLocalAutoImportFunc(ctx, testStore, log) doImport := createLocalAutoImportFunc(ctx, testStore, log)
doImport() doImport()
@@ -379,11 +367,7 @@ func TestLocalModeInNonGitDirectory(t *testing.T) {
t.Fatalf("Failed to create issue: %v", err) t.Fatalf("Failed to create issue: %v", err)
} }
log := daemonLogger{ log := newTestLogger()
logFunc: func(format string, args ...interface{}) {
t.Logf(format, args...)
},
}
// Run local sync (should work without git) // Run local sync (should work without git)
doSync := createLocalSyncFunc(ctx, testStore, log) doSync := createLocalSyncFunc(ctx, testStore, log)
@@ -437,11 +421,7 @@ func TestLocalModeExportImportRoundTrip(t *testing.T) {
defer func() { dbPath = oldDBPath }() defer func() { dbPath = oldDBPath }()
dbPath = testDBPath dbPath = testDBPath
log := daemonLogger{ log := newTestLogger()
logFunc: func(format string, args ...interface{}) {
t.Logf(format, args...)
},
}
// Create issues // Create issues
for i := 0; i < 5; i++ { for i := 0; i < 5; i++ {

View File

@@ -1,23 +1,97 @@
package main package main
import ( import (
"fmt" "io"
"time" "log/slog"
"os"
"strings"
"gopkg.in/natefinch/lumberjack.v2" "gopkg.in/natefinch/lumberjack.v2"
) )
// daemonLogger wraps a logging function for the daemon // daemonLogger wraps slog for daemon logging.
// Provides level-specific methods and backward-compatible log() for migration.
type daemonLogger struct { type daemonLogger struct {
logFunc func(string, ...interface{}) logger *slog.Logger
} }
// log is the backward-compatible logging method (maps to Info level).
// Use Info(), Warn(), Error(), Debug() for explicit levels.
func (d *daemonLogger) log(format string, args ...interface{}) { func (d *daemonLogger) log(format string, args ...interface{}) {
d.logFunc(format, args...) d.logger.Info(format, toSlogArgs(args)...)
} }
// setupDaemonLogger creates a rotating log file logger for the daemon // Info logs at INFO level.
func setupDaemonLogger(logPath string) (*lumberjack.Logger, daemonLogger) { func (d *daemonLogger) Info(msg string, args ...interface{}) {
d.logger.Info(msg, toSlogArgs(args)...)
}
// Warn logs at WARN level.
func (d *daemonLogger) Warn(msg string, args ...interface{}) {
d.logger.Warn(msg, toSlogArgs(args)...)
}
// Error logs at ERROR level.
func (d *daemonLogger) Error(msg string, args ...interface{}) {
d.logger.Error(msg, toSlogArgs(args)...)
}
// Debug logs at DEBUG level.
func (d *daemonLogger) Debug(msg string, args ...interface{}) {
d.logger.Debug(msg, toSlogArgs(args)...)
}
// toSlogArgs converts variadic args to slog-compatible key-value pairs.
// If args are already in key-value format (string, value, string, value...),
// they're passed through. Otherwise, they're wrapped as "args" for sprintf-style logs.
func toSlogArgs(args []interface{}) []any {
if len(args) == 0 {
return nil
}
// Check if args look like slog key-value pairs (string key followed by value)
// If first arg is a string and we have pairs, treat as slog format
if len(args) >= 2 {
if _, ok := args[0].(string); ok {
// Likely slog-style: "key", value, "key2", value2
result := make([]any, len(args))
for i, a := range args {
result[i] = a
}
return result
}
}
// For sprintf-style args, wrap them (caller should use fmt.Sprintf)
result := make([]any, len(args))
for i, a := range args {
result[i] = a
}
return result
}
// parseLogLevel converts a log level string to slog.Level.
func parseLogLevel(level string) slog.Level {
switch strings.ToLower(level) {
case "debug":
return slog.LevelDebug
case "info":
return slog.LevelInfo
case "warn", "warning":
return slog.LevelWarn
case "error":
return slog.LevelError
default:
return slog.LevelInfo
}
}
// setupDaemonLogger creates a structured logger for the daemon.
// Returns the lumberjack logger (for cleanup) and the daemon logger.
//
// Parameters:
// - logPath: path to log file (uses lumberjack for rotation)
// - jsonFormat: if true, output JSON; otherwise text format
// - level: log level (debug, info, warn, error)
func setupDaemonLogger(logPath string, jsonFormat bool, level slog.Level) (*lumberjack.Logger, daemonLogger) {
maxSizeMB := getEnvInt("BEADS_DAEMON_LOG_MAX_SIZE", 50) maxSizeMB := getEnvInt("BEADS_DAEMON_LOG_MAX_SIZE", 50)
maxBackups := getEnvInt("BEADS_DAEMON_LOG_MAX_BACKUPS", 7) maxBackups := getEnvInt("BEADS_DAEMON_LOG_MAX_BACKUPS", 7)
maxAgeDays := getEnvInt("BEADS_DAEMON_LOG_MAX_AGE", 30) maxAgeDays := getEnvInt("BEADS_DAEMON_LOG_MAX_AGE", 30)
@@ -31,13 +105,65 @@ func setupDaemonLogger(logPath string) (*lumberjack.Logger, daemonLogger) {
Compress: compress, Compress: compress,
} }
// Create multi-writer to log to both file and stderr (for foreground mode visibility)
var w io.Writer = logF
// Configure slog handler
opts := &slog.HandlerOptions{
Level: level,
}
var handler slog.Handler
if jsonFormat {
handler = slog.NewJSONHandler(w, opts)
} else {
handler = slog.NewTextHandler(w, opts)
}
logger := daemonLogger{ logger := daemonLogger{
logFunc: func(format string, args ...interface{}) { logger: slog.New(handler),
msg := fmt.Sprintf(format, args...)
timestamp := time.Now().Format("2006-01-02 15:04:05")
_, _ = fmt.Fprintf(logF, "[%s] %s\n", timestamp, msg)
},
} }
return logF, logger return logF, logger
} }
// setupDaemonLoggerLegacy is the old signature for backward compatibility during migration.
// TODO: Remove this once all callers are updated to use the new signature.
func setupDaemonLoggerLegacy(logPath string) (*lumberjack.Logger, daemonLogger) {
return setupDaemonLogger(logPath, false, slog.LevelInfo)
}
// SetupStderrLogger creates a logger that writes to stderr only (no file).
// Useful for foreground mode or testing.
func SetupStderrLogger(jsonFormat bool, level slog.Level) daemonLogger {
opts := &slog.HandlerOptions{
Level: level,
}
var handler slog.Handler
if jsonFormat {
handler = slog.NewJSONHandler(os.Stderr, opts)
} else {
handler = slog.NewTextHandler(os.Stderr, opts)
}
return daemonLogger{
logger: slog.New(handler),
}
}
// newTestLogger creates a no-op logger for testing.
// Logs are discarded - use this when you don't need to verify log output.
func newTestLogger() daemonLogger {
return daemonLogger{
logger: slog.New(slog.NewTextHandler(io.Discard, nil)),
}
}
// newTestLoggerWithWriter creates a logger that writes to the given writer.
// Use this when you need to capture and verify log output in tests.
func newTestLoggerWithWriter(w io.Writer) daemonLogger {
return daemonLogger{
logger: slog.New(slog.NewTextHandler(w, nil)),
}
}

View File

@@ -19,21 +19,21 @@ func startRPCServer(ctx context.Context, socketPath string, store storage.Storag
serverErrChan := make(chan error, 1) serverErrChan := make(chan error, 1)
go func() { go func() {
log.log("Starting RPC server: %s", socketPath) log.Info("starting RPC server", "socket", socketPath)
if err := server.Start(ctx); err != nil { if err := server.Start(ctx); err != nil {
log.log("RPC server error: %v", err) log.Error("RPC server error", "error", err)
serverErrChan <- err serverErrChan <- err
} }
}() }()
select { select {
case err := <-serverErrChan: case err := <-serverErrChan:
log.log("RPC server failed to start: %v", err) log.Error("RPC server failed to start", "error", err)
return nil, nil, err return nil, nil, err
case <-server.WaitReady(): case <-server.WaitReady():
log.log("RPC server ready (socket listening)") log.Info("RPC server ready (socket listening)")
case <-time.After(5 * time.Second): case <-time.After(5 * time.Second):
log.log("WARNING: Server didn't signal ready after 5 seconds (may still be starting)") log.Warn("server didn't signal ready after 5 seconds (may still be starting)")
} }
return server, serverErrChan, nil return server, serverErrChan, nil
@@ -78,35 +78,35 @@ func runEventLoop(ctx context.Context, cancel context.CancelFunc, ticker *time.T
case <-parentCheckTicker.C: case <-parentCheckTicker.C:
// Check if parent process is still alive // Check if parent process is still alive
if !checkParentProcessAlive(parentPID) { if !checkParentProcessAlive(parentPID) {
log.log("Parent process (PID %d) died, shutting down daemon", parentPID) log.Info("parent process died, shutting down daemon", "parent_pid", parentPID)
cancel() cancel()
if err := server.Stop(); err != nil { if err := server.Stop(); err != nil {
log.log("Error stopping server: %v", err) log.Error("stopping server", "error", err)
} }
return return
} }
case sig := <-sigChan: case sig := <-sigChan:
if isReloadSignal(sig) { if isReloadSignal(sig) {
log.log("Received reload signal, ignoring (daemon continues running)") log.Info("received reload signal, ignoring (daemon continues running)")
continue continue
} }
log.log("Received signal %v, shutting down gracefully...", sig) log.Info("received signal, shutting down gracefully", "signal", sig)
cancel() cancel()
if err := server.Stop(); err != nil { if err := server.Stop(); err != nil {
log.log("Error stopping RPC server: %v", err) log.Error("stopping RPC server", "error", err)
} }
return return
case <-ctx.Done(): case <-ctx.Done():
log.log("Context canceled, shutting down") log.Info("context canceled, shutting down")
if err := server.Stop(); err != nil { if err := server.Stop(); err != nil {
log.log("Error stopping RPC server: %v", err) log.Error("stopping RPC server", "error", err)
} }
return return
case err := <-serverErrChan: case err := <-serverErrChan:
log.log("RPC server failed: %v", err) log.Error("RPC server failed", "error", err)
cancel() cancel()
if err := server.Stop(); err != nil { if err := server.Stop(); err != nil {
log.log("Error stopping RPC server: %v", err) log.Error("stopping RPC server", "error", err)
} }
return return
} }

View File

@@ -772,13 +772,11 @@ func TestSyncBranchIntegration_EndToEnd(t *testing.T) {
// Helper types for testing // Helper types for testing
func newTestSyncBranchLogger() (daemonLogger, *string) { func newTestSyncBranchLogger() (daemonLogger, *string) {
// Note: With slog, we can't easily capture formatted messages like before.
// For tests that need to verify log output, use strings.Builder and newTestLoggerWithWriter.
// This helper is kept for backward compatibility but messages won't be captured.
messages := "" messages := ""
logger := daemonLogger{ return newTestLogger(), &messages
logFunc: func(format string, args ...interface{}) {
messages += "\n" + format
},
}
return logger, &messages
} }
// TestSyncBranchConfigChange tests changing sync.branch after worktree exists // TestSyncBranchConfigChange tests changing sync.branch after worktree exists

View File

@@ -335,11 +335,7 @@ func TestExportUpdatesMetadata(t *testing.T) {
// Update metadata using the actual daemon helper function (bd-ar2.3 fix) // Update metadata using the actual daemon helper function (bd-ar2.3 fix)
// This verifies that updateExportMetadata (used by createExportFunc and createSyncFunc) works correctly // This verifies that updateExportMetadata (used by createExportFunc and createSyncFunc) works correctly
mockLogger := daemonLogger{ mockLogger := newTestLogger()
logFunc: func(format string, args ...interface{}) {
t.Logf(format, args...)
},
}
updateExportMetadata(ctx, store, jsonlPath, mockLogger, "") updateExportMetadata(ctx, store, jsonlPath, mockLogger, "")
// Verify metadata was set (renamed from last_import_hash to jsonl_content_hash - bd-39o) // Verify metadata was set (renamed from last_import_hash to jsonl_content_hash - bd-39o)
@@ -438,11 +434,7 @@ func TestUpdateExportMetadataMultiRepo(t *testing.T) {
} }
// Create mock logger // Create mock logger
mockLogger := daemonLogger{ mockLogger := newTestLogger()
logFunc: func(format string, args ...interface{}) {
t.Logf(format, args...)
},
}
// Update metadata for each repo with different keys (bd-ar2.2 multi-repo support) // Update metadata for each repo with different keys (bd-ar2.2 multi-repo support)
updateExportMetadata(ctx, store, jsonlPath1, mockLogger, jsonlPath1) updateExportMetadata(ctx, store, jsonlPath1, mockLogger, jsonlPath1)
@@ -554,11 +546,7 @@ func TestExportWithMultiRepoConfigUpdatesAllMetadata(t *testing.T) {
// Simulate multi-repo export flow (as in createExportFunc) // Simulate multi-repo export flow (as in createExportFunc)
// This tests the full integration: getMultiRepoJSONLPaths -> getRepoKeyForPath -> updateExportMetadata // This tests the full integration: getMultiRepoJSONLPaths -> getRepoKeyForPath -> updateExportMetadata
mockLogger := daemonLogger{ mockLogger := newTestLogger()
logFunc: func(format string, args ...interface{}) {
t.Logf(format, args...)
},
}
// Simulate multi-repo mode with stable keys // Simulate multi-repo mode with stable keys
multiRepoPaths := []string{primaryJSONL, additionalJSONL} multiRepoPaths := []string{primaryJSONL, additionalJSONL}
@@ -676,11 +664,7 @@ func TestUpdateExportMetadataInvalidKeySuffix(t *testing.T) {
} }
// Create mock logger // Create mock logger
mockLogger := daemonLogger{ mockLogger := newTestLogger()
logFunc: func(format string, args ...interface{}) {
t.Logf(format, args...)
},
}
// Update metadata with keySuffix containing ':' (bd-web8: should be auto-sanitized) // Update metadata with keySuffix containing ':' (bd-web8: should be auto-sanitized)
// This simulates Windows absolute paths like "C:\Users\..." // This simulates Windows absolute paths like "C:\Users\..."

View File

@@ -15,9 +15,7 @@ import (
// newMockLogger creates a daemonLogger that does nothing // newMockLogger creates a daemonLogger that does nothing
func newMockLogger() daemonLogger { func newMockLogger() daemonLogger {
return daemonLogger{ return newTestLogger()
logFunc: func(format string, args ...interface{}) {},
}
} }
func TestFileWatcher_JSONLChangeDetection(t *testing.T) { func TestFileWatcher_JSONLChangeDetection(t *testing.T) {

View File

@@ -65,11 +65,7 @@ func TestExportUpdatesDatabaseMtime(t *testing.T) {
} }
// Update metadata after export (bd-ymj fix) // Update metadata after export (bd-ymj fix)
mockLogger := daemonLogger{ mockLogger := newTestLogger()
logFunc: func(format string, args ...interface{}) {
t.Logf(format, args...)
},
}
updateExportMetadata(ctx, store, jsonlPath, mockLogger, "") updateExportMetadata(ctx, store, jsonlPath, mockLogger, "")
// Get JSONL mtime // Get JSONL mtime
@@ -170,11 +166,7 @@ func TestDaemonExportScenario(t *testing.T) {
} }
// Daemon updates metadata after export (bd-ymj fix) // Daemon updates metadata after export (bd-ymj fix)
mockLogger := daemonLogger{ mockLogger := newTestLogger()
logFunc: func(format string, args ...interface{}) {
t.Logf(format, args...)
},
}
updateExportMetadata(ctx, store, jsonlPath, mockLogger, "") updateExportMetadata(ctx, store, jsonlPath, mockLogger, "")
// THIS IS THE FIX: daemon now calls TouchDatabaseFile after export // THIS IS THE FIX: daemon now calls TouchDatabaseFile after export
@@ -249,11 +241,7 @@ func TestMultipleExportCycles(t *testing.T) {
} }
// Update metadata after export (bd-ymj fix) // Update metadata after export (bd-ymj fix)
mockLogger := daemonLogger{ mockLogger := newTestLogger()
logFunc: func(format string, args ...interface{}) {
t.Logf(format, args...)
},
}
updateExportMetadata(ctx, store, jsonlPath, mockLogger, "") updateExportMetadata(ctx, store, jsonlPath, mockLogger, "")
// Apply fix // Apply fix