Introduce slog for structured daemon logging (bd-u2sc.4)

- Replace custom daemonLogger with slog-based structured logging
- Add --log-level flag (debug, info, warn, error) with default "info"
- Add --log-json flag for JSON-formatted log output
- Update log calls to use appropriate levels (Error, Warn, Info, Debug)
- Add test helper functions (newTestLogger, newTestLoggerWithWriter)

Log output examples:
  Text: time=2025-12-23T12:30:00Z level=INFO msg="daemon started" socket=/tmp/bd.sock
  JSON: {"time":"...","level":"INFO","msg":"daemon started","socket":"/tmp/bd.sock"}

🤖 Generated with [Claude Code](https://claude.com/claude-code)

Co-Authored-By: Claude Opus 4.5 <noreply@anthropic.com>
This commit is contained in:
Steve Yegge
2025-12-23 13:43:40 -08:00
parent d834e4f4d8
commit 05f37d2408
10 changed files with 237 additions and 160 deletions

View File

@@ -56,6 +56,8 @@ Run 'bd daemon' with no flags to see available options.`,
localMode, _ := cmd.Flags().GetBool("local")
logFile, _ := cmd.Flags().GetString("log")
foreground, _ := cmd.Flags().GetBool("foreground")
logLevel, _ := cmd.Flags().GetString("log-level")
logJSON, _ := cmd.Flags().GetBool("log-json")
// If no operation flags provided, show help
if !start && !stop && !stopAll && !status && !health && !metrics {
@@ -245,7 +247,7 @@ Run 'bd daemon' with no flags to see available options.`,
fmt.Printf("Logging to: %s\n", logFile)
}
startDaemon(interval, autoCommit, autoPush, autoPull, localMode, foreground, logFile, pidFile)
startDaemon(interval, autoCommit, autoPush, autoPull, localMode, foreground, logFile, pidFile, logLevel, logJSON)
},
}
@@ -263,6 +265,8 @@ func init() {
daemonCmd.Flags().Bool("metrics", false, "Show detailed daemon metrics")
daemonCmd.Flags().String("log", "", "Log file path (default: .beads/daemon.log)")
daemonCmd.Flags().Bool("foreground", false, "Run in foreground (don't daemonize)")
daemonCmd.Flags().String("log-level", "info", "Log level (debug, info, warn, error)")
daemonCmd.Flags().Bool("log-json", false, "Output logs in JSON format (structured logging)")
daemonCmd.Flags().BoolVar(&jsonOutput, "json", false, "Output JSON format")
rootCmd.AddCommand(daemonCmd)
}
@@ -279,8 +283,9 @@ func computeDaemonParentPID() int {
}
return os.Getppid()
}
func runDaemonLoop(interval time.Duration, autoCommit, autoPush, autoPull, localMode bool, logPath, pidFile string) {
logF, log := setupDaemonLogger(logPath)
func runDaemonLoop(interval time.Duration, autoCommit, autoPush, autoPull, localMode bool, logPath, pidFile, logLevel string, logJSON bool) {
level := parseLogLevel(logLevel)
logF, log := setupDaemonLogger(logPath, logJSON, level)
defer func() { _ = logF.Close() }()
// Set up signal-aware context for graceful shutdown
@@ -290,13 +295,13 @@ func runDaemonLoop(interval time.Duration, autoCommit, autoPush, autoPull, local
// Top-level panic recovery to ensure clean shutdown and diagnostics
defer func() {
if r := recover(); r != nil {
log.log("PANIC: daemon crashed: %v", r)
log.Error("daemon crashed", "panic", r)
// Capture stack trace
stackBuf := make([]byte, 4096)
stackSize := runtime.Stack(stackBuf, false)
stackTrace := string(stackBuf[:stackSize])
log.log("Stack trace:\n%s", stackTrace)
log.Error("stack trace", "trace", stackTrace)
// Write crash report to daemon-error file for user visibility
var beadsDir string
@@ -305,21 +310,21 @@ func runDaemonLoop(interval time.Duration, autoCommit, autoPush, autoPull, local
} else if foundDB := beads.FindDatabasePath(); foundDB != "" {
beadsDir = filepath.Dir(foundDB)
}
if beadsDir != "" {
errFile := filepath.Join(beadsDir, "daemon-error")
crashReport := fmt.Sprintf("Daemon crashed at %s\n\nPanic: %v\n\nStack trace:\n%s\n",
time.Now().Format(time.RFC3339), r, stackTrace)
// nolint:gosec // G306: Error file needs to be readable for debugging
if err := os.WriteFile(errFile, []byte(crashReport), 0644); err != nil {
log.log("Warning: could not write crash report: %v", err)
log.Warn("could not write crash report", "error", err)
}
}
// Clean up PID file
_ = os.Remove(pidFile)
log.log("Daemon terminated after panic")
log.Info("daemon terminated after panic")
}
}()
@@ -329,8 +334,8 @@ func runDaemonLoop(interval time.Duration, autoCommit, autoPush, autoPull, local
if foundDB := beads.FindDatabasePath(); foundDB != "" {
daemonDBPath = foundDB
} else {
log.log("Error: no beads database found")
log.log("Hint: run 'bd init' to create a database or set BEADS_DB environment variable")
log.Error("no beads database found")
log.Info("hint: run 'bd init' to create a database or set BEADS_DB environment variable")
return // Use return instead of os.Exit to allow defers to run
}
}
@@ -376,7 +381,7 @@ func runDaemonLoop(interval time.Duration, autoCommit, autoPush, autoPull, local
errFile := filepath.Join(beadsDir, "daemon-error")
// nolint:gosec // G306: Error file needs to be readable for debugging
if err := os.WriteFile(errFile, []byte(errMsg), 0644); err != nil {
log.log("Warning: could not write daemon-error file: %v", err)
log.Warn("could not write daemon-error file", "error", err)
}
return // Use return instead of os.Exit to allow defers to run
@@ -386,24 +391,22 @@ func runDaemonLoop(interval time.Duration, autoCommit, autoPush, autoPull, local
// Validate using canonical name
dbBaseName := filepath.Base(daemonDBPath)
if dbBaseName != beads.CanonicalDatabaseName {
log.log("Error: Non-canonical database name: %s", dbBaseName)
log.log("Expected: %s", beads.CanonicalDatabaseName)
log.log("")
log.log("Run 'bd init' to migrate to canonical name")
log.Error("non-canonical database name", "name", dbBaseName, "expected", beads.CanonicalDatabaseName)
log.Info("run 'bd init' to migrate to canonical name")
return // Use return instead of os.Exit to allow defers to run
}
log.log("Using database: %s", daemonDBPath)
log.Info("using database", "path", daemonDBPath)
// Clear any previous daemon-error file on successful startup
errFile := filepath.Join(beadsDir, "daemon-error")
if err := os.Remove(errFile); err != nil && !os.IsNotExist(err) {
log.log("Warning: could not remove daemon-error file: %v", err)
log.Warn("could not remove daemon-error file", "error", err)
}
store, err := sqlite.New(ctx, daemonDBPath)
if err != nil {
log.log("Error: cannot open database: %v", err)
log.Error("cannot open database", "error", err)
return // Use return instead of os.Exit to allow defers to run
}
defer func() { _ = store.Close() }()
@@ -411,73 +414,71 @@ func runDaemonLoop(interval time.Duration, autoCommit, autoPush, autoPull, local
// Enable freshness checking to detect external database file modifications
// (e.g., when git merge replaces the database file)
store.EnableFreshnessChecking()
log.log("Database opened: %s (freshness checking enabled)", daemonDBPath)
log.Info("database opened", "path", daemonDBPath, "freshness_checking", true)
// Auto-upgrade .beads/.gitignore if outdated
gitignoreCheck := doctor.CheckGitignore()
if gitignoreCheck.Status == "warning" || gitignoreCheck.Status == "error" {
log.log("Upgrading .beads/.gitignore...")
log.Info("upgrading .beads/.gitignore")
if err := doctor.FixGitignore(); err != nil {
log.log("Warning: failed to upgrade .gitignore: %v", err)
log.Warn("failed to upgrade .gitignore", "error", err)
} else {
log.log("Successfully upgraded .beads/.gitignore")
log.Info("successfully upgraded .beads/.gitignore")
}
}
// Hydrate from multi-repo if configured
if results, err := store.HydrateFromMultiRepo(ctx); err != nil {
log.log("Error: multi-repo hydration failed: %v", err)
log.Error("multi-repo hydration failed", "error", err)
return // Use return instead of os.Exit to allow defers to run
} else if results != nil {
log.log("Multi-repo hydration complete:")
log.Info("multi-repo hydration complete")
for repo, count := range results {
log.log(" %s: %d issues", repo, count)
log.Info("hydrated issues", "repo", repo, "count", count)
}
}
// Validate database fingerprint (skip in local mode - no git available)
if localMode {
log.log("Skipping fingerprint validation (local mode)")
log.Info("skipping fingerprint validation (local mode)")
} else if err := validateDatabaseFingerprint(ctx, store, &log); err != nil {
if os.Getenv("BEADS_IGNORE_REPO_MISMATCH") != "1" {
log.log("Error: %v", err)
log.Error("repository fingerprint validation failed", "error", err)
return // Use return instead of os.Exit to allow defers to run
}
log.log("Warning: repository mismatch ignored (BEADS_IGNORE_REPO_MISMATCH=1)")
log.Warn("repository mismatch ignored (BEADS_IGNORE_REPO_MISMATCH=1)")
}
// Validate schema version matches daemon version
versionCtx := context.Background()
dbVersion, err := store.GetMetadata(versionCtx, "bd_version")
if err != nil && err.Error() != "metadata key not found: bd_version" {
log.log("Error: failed to read database version: %v", err)
log.Error("failed to read database version", "error", err)
return // Use return instead of os.Exit to allow defers to run
}
if dbVersion != "" && dbVersion != Version {
log.log("Warning: Database schema version mismatch")
log.log(" Database version: %s", dbVersion)
log.log(" Daemon version: %s", Version)
log.log(" Auto-upgrading database to daemon version...")
log.Warn("database schema version mismatch", "db_version", dbVersion, "daemon_version", Version)
log.Info("auto-upgrading database to daemon version")
// Auto-upgrade database to daemon version
// The daemon operates on its own database, so it should always use its own version
if err := store.SetMetadata(versionCtx, "bd_version", Version); err != nil {
log.log("Error: failed to update database version: %v", err)
log.Error("failed to update database version", "error", err)
// Allow override via environment variable for emergencies
if os.Getenv("BEADS_IGNORE_VERSION_MISMATCH") != "1" {
return // Use return instead of os.Exit to allow defers to run
}
log.log("Warning: Proceeding despite version update failure (BEADS_IGNORE_VERSION_MISMATCH=1)")
log.Warn("proceeding despite version update failure (BEADS_IGNORE_VERSION_MISMATCH=1)")
} else {
log.log(" Database version updated to %s", Version)
log.Info("database version updated", "version", Version)
}
} else if dbVersion == "" {
// Old database without version metadata - set it now
log.log("Warning: Database missing version metadata, setting to %s", Version)
log.Warn("database missing version metadata", "setting_to", Version)
if err := store.SetMetadata(versionCtx, "bd_version", Version); err != nil {
log.log("Error: failed to set database version: %v", err)
log.Error("failed to set database version", "error", err)
return // Use return instead of os.Exit to allow defers to run
}
}
@@ -506,7 +507,7 @@ func runDaemonLoop(interval time.Duration, autoCommit, autoPush, autoPull, local
// Register daemon in global registry
registry, err := daemon.NewRegistry()
if err != nil {
log.log("Warning: failed to create registry: %v", err)
log.Warn("failed to create registry", "error", err)
} else {
entry := daemon.RegistryEntry{
WorkspacePath: workspacePath,
@@ -517,14 +518,14 @@ func runDaemonLoop(interval time.Duration, autoCommit, autoPush, autoPull, local
StartedAt: time.Now(),
}
if err := registry.Register(entry); err != nil {
log.log("Warning: failed to register daemon: %v", err)
log.Warn("failed to register daemon", "error", err)
} else {
log.log("Registered in global registry")
log.Info("registered in global registry")
}
// Ensure we unregister on exit
defer func() {
if err := registry.Unregister(workspacePath, os.Getpid()); err != nil {
log.log("Warning: failed to unregister daemon: %v", err)
log.Warn("failed to unregister daemon", "error", err)
}
}()
}
@@ -543,16 +544,16 @@ func runDaemonLoop(interval time.Duration, autoCommit, autoPush, autoPull, local
// Get parent PID for monitoring (exit if parent dies)
parentPID := computeDaemonParentPID()
log.log("Monitoring parent process (PID %d)", parentPID)
log.Info("monitoring parent process", "pid", parentPID)
// daemonMode already determined above for SetConfig
switch daemonMode {
case "events":
log.log("Using event-driven mode")
log.Info("using event-driven mode")
jsonlPath := findJSONLPath()
if jsonlPath == "" {
log.log("Error: JSONL path not found, cannot use event-driven mode")
log.log("Falling back to polling mode")
log.Error("JSONL path not found, cannot use event-driven mode")
log.Info("falling back to polling mode")
runEventLoop(ctx, cancel, ticker, doSync, server, serverErrChan, parentPID, log)
} else {
// Event-driven mode uses separate export-only and import-only functions
@@ -567,10 +568,10 @@ func runDaemonLoop(interval time.Duration, autoCommit, autoPush, autoPull, local
runEventDrivenLoop(ctx, cancel, server, serverErrChan, store, jsonlPath, doExport, doAutoImport, autoPull, parentPID, log)
}
case "poll":
log.log("Using polling mode (interval: %v)", interval)
log.Info("using polling mode", "interval", interval)
runEventLoop(ctx, cancel, ticker, doSync, server, serverErrChan, parentPID, log)
default:
log.log("Unknown BEADS_DAEMON_MODE: %s (valid: poll, events), defaulting to poll", daemonMode)
log.Warn("unknown BEADS_DAEMON_MODE, defaulting to poll", "mode", daemonMode, "valid", "poll, events")
runEventLoop(ctx, cancel, ticker, doSync, server, serverErrChan, parentPID, log)
}
}

View File

@@ -457,11 +457,7 @@ func TestEventLoopSignalHandling(t *testing.T) {
// createTestLogger creates a daemonLogger for testing
func createTestLogger(t *testing.T) daemonLogger {
return daemonLogger{
logFunc: func(format string, args ...interface{}) {
t.Logf("[daemon] "+format, args...)
},
}
return newTestLogger()
}
// TestDaemonIntegration_SocketCleanup verifies socket cleanup after daemon stops

View File

@@ -369,7 +369,7 @@ func stopAllDaemons() {
}
// startDaemon starts the daemon (in foreground if requested, otherwise background)
func startDaemon(interval time.Duration, autoCommit, autoPush, autoPull, localMode, foreground bool, logFile, pidFile string) {
func startDaemon(interval time.Duration, autoCommit, autoPush, autoPull, localMode, foreground bool, logFile, pidFile, logLevel string, logJSON bool) {
logPath, err := getLogFilePath(logFile)
if err != nil {
fmt.Fprintf(os.Stderr, "Error: %v\n", err)
@@ -378,7 +378,7 @@ func startDaemon(interval time.Duration, autoCommit, autoPush, autoPull, localMo
// Run in foreground if --foreground flag set or if we're the forked child process
if foreground || os.Getenv("BD_DAEMON_FOREGROUND") == "1" {
runDaemonLoop(interval, autoCommit, autoPush, autoPull, localMode, logPath, pidFile)
runDaemonLoop(interval, autoCommit, autoPush, autoPull, localMode, logPath, pidFile, logLevel, logJSON)
return
}
@@ -406,6 +406,12 @@ func startDaemon(interval time.Duration, autoCommit, autoPush, autoPull, localMo
if logFile != "" {
args = append(args, "--log", logFile)
}
if logLevel != "" && logLevel != "info" {
args = append(args, "--log-level", logLevel)
}
if logJSON {
args = append(args, "--log-json")
}
cmd := exec.Command(exe, args...) // #nosec G204 - bd daemon command from trusted binary
cmd.Env = append(os.Environ(), "BD_DAEMON_FOREGROUND=1")
@@ -455,18 +461,18 @@ func setupDaemonLock(pidFile string, dbPath string, log daemonLogger) (*DaemonLo
// Detect nested .beads directories (e.g., .beads/.beads/.beads/)
cleanPath := filepath.Clean(beadsDir)
if strings.Contains(cleanPath, string(filepath.Separator)+".beads"+string(filepath.Separator)+".beads") {
log.log("Error: Nested .beads directory detected: %s", cleanPath)
log.log("Hint: Do not run 'bd daemon' from inside .beads/ directory")
log.log("Hint: Use absolute paths for BEADS_DB or run from workspace root")
log.Error("nested .beads directory detected", "path", cleanPath)
log.Info("hint: do not run 'bd daemon' from inside .beads/ directory")
log.Info("hint: use absolute paths for BEADS_DB or run from workspace root")
return nil, fmt.Errorf("nested .beads directory detected")
}
lock, err := acquireDaemonLock(beadsDir, dbPath)
if err != nil {
if err == ErrDaemonLocked {
log.log("Daemon already running (lock held), exiting")
log.Info("daemon already running (lock held), exiting")
} else {
log.log("Error acquiring daemon lock: %v", err)
log.Error("acquiring daemon lock", "error", err)
}
return nil, err
}
@@ -477,11 +483,11 @@ func setupDaemonLock(pidFile string, dbPath string, log daemonLogger) (*DaemonLo
if pid, err := strconv.Atoi(strings.TrimSpace(string(data))); err == nil && pid == myPID {
// PID file is correct, continue
} else {
log.log("PID file has wrong PID (expected %d, got %d), overwriting", myPID, pid)
log.Warn("PID file has wrong PID, overwriting", "expected", myPID, "got", pid)
_ = os.WriteFile(pidFile, []byte(fmt.Sprintf("%d\n", myPID)), 0600)
}
} else {
log.log("PID file missing after lock acquisition, creating")
log.Info("PID file missing after lock acquisition, creating")
_ = os.WriteFile(pidFile, []byte(fmt.Sprintf("%d\n", myPID)), 0600)
}

View File

@@ -122,12 +122,8 @@ func TestCreateLocalSyncFunc(t *testing.T) {
t.Fatalf("Failed to create issue: %v", err)
}
// Create logger
log := daemonLogger{
logFunc: func(format string, args ...interface{}) {
t.Logf(format, args...)
},
}
// Create logger (test output via newTestLogger)
log := newTestLogger()
// Create and run local sync function
doSync := createLocalSyncFunc(ctx, testStore, log)
@@ -193,11 +189,7 @@ func TestCreateLocalExportFunc(t *testing.T) {
}
}
log := daemonLogger{
logFunc: func(format string, args ...interface{}) {
t.Logf(format, args...)
},
}
log := newTestLogger()
doExport := createLocalExportFunc(ctx, testStore, log)
doExport()
@@ -258,11 +250,7 @@ func TestCreateLocalAutoImportFunc(t *testing.T) {
t.Fatalf("Failed to write JSONL: %v", err)
}
log := daemonLogger{
logFunc: func(format string, args ...interface{}) {
t.Logf(format, args...)
},
}
log := newTestLogger()
doImport := createLocalAutoImportFunc(ctx, testStore, log)
doImport()
@@ -379,11 +367,7 @@ func TestLocalModeInNonGitDirectory(t *testing.T) {
t.Fatalf("Failed to create issue: %v", err)
}
log := daemonLogger{
logFunc: func(format string, args ...interface{}) {
t.Logf(format, args...)
},
}
log := newTestLogger()
// Run local sync (should work without git)
doSync := createLocalSyncFunc(ctx, testStore, log)
@@ -437,11 +421,7 @@ func TestLocalModeExportImportRoundTrip(t *testing.T) {
defer func() { dbPath = oldDBPath }()
dbPath = testDBPath
log := daemonLogger{
logFunc: func(format string, args ...interface{}) {
t.Logf(format, args...)
},
}
log := newTestLogger()
// Create issues
for i := 0; i < 5; i++ {

View File

@@ -1,23 +1,97 @@
package main
import (
"fmt"
"time"
"io"
"log/slog"
"os"
"strings"
"gopkg.in/natefinch/lumberjack.v2"
)
// daemonLogger wraps a logging function for the daemon
// daemonLogger wraps slog for daemon logging.
// Provides level-specific methods and backward-compatible log() for migration.
type daemonLogger struct {
logFunc func(string, ...interface{})
logger *slog.Logger
}
// log is the backward-compatible logging method (maps to Info level).
// Use Info(), Warn(), Error(), Debug() for explicit levels.
func (d *daemonLogger) log(format string, args ...interface{}) {
d.logFunc(format, args...)
d.logger.Info(format, toSlogArgs(args)...)
}
// setupDaemonLogger creates a rotating log file logger for the daemon
func setupDaemonLogger(logPath string) (*lumberjack.Logger, daemonLogger) {
// Info logs at INFO level.
func (d *daemonLogger) Info(msg string, args ...interface{}) {
d.logger.Info(msg, toSlogArgs(args)...)
}
// Warn logs at WARN level.
func (d *daemonLogger) Warn(msg string, args ...interface{}) {
d.logger.Warn(msg, toSlogArgs(args)...)
}
// Error logs at ERROR level.
func (d *daemonLogger) Error(msg string, args ...interface{}) {
d.logger.Error(msg, toSlogArgs(args)...)
}
// Debug logs at DEBUG level.
func (d *daemonLogger) Debug(msg string, args ...interface{}) {
d.logger.Debug(msg, toSlogArgs(args)...)
}
// toSlogArgs converts variadic args to slog-compatible key-value pairs.
// If args are already in key-value format (string, value, string, value...),
// they're passed through. Otherwise, they're wrapped as "args" for sprintf-style logs.
func toSlogArgs(args []interface{}) []any {
if len(args) == 0 {
return nil
}
// Check if args look like slog key-value pairs (string key followed by value)
// If first arg is a string and we have pairs, treat as slog format
if len(args) >= 2 {
if _, ok := args[0].(string); ok {
// Likely slog-style: "key", value, "key2", value2
result := make([]any, len(args))
for i, a := range args {
result[i] = a
}
return result
}
}
// For sprintf-style args, wrap them (caller should use fmt.Sprintf)
result := make([]any, len(args))
for i, a := range args {
result[i] = a
}
return result
}
// parseLogLevel converts a log level string to slog.Level.
func parseLogLevel(level string) slog.Level {
switch strings.ToLower(level) {
case "debug":
return slog.LevelDebug
case "info":
return slog.LevelInfo
case "warn", "warning":
return slog.LevelWarn
case "error":
return slog.LevelError
default:
return slog.LevelInfo
}
}
// setupDaemonLogger creates a structured logger for the daemon.
// Returns the lumberjack logger (for cleanup) and the daemon logger.
//
// Parameters:
// - logPath: path to log file (uses lumberjack for rotation)
// - jsonFormat: if true, output JSON; otherwise text format
// - level: log level (debug, info, warn, error)
func setupDaemonLogger(logPath string, jsonFormat bool, level slog.Level) (*lumberjack.Logger, daemonLogger) {
maxSizeMB := getEnvInt("BEADS_DAEMON_LOG_MAX_SIZE", 50)
maxBackups := getEnvInt("BEADS_DAEMON_LOG_MAX_BACKUPS", 7)
maxAgeDays := getEnvInt("BEADS_DAEMON_LOG_MAX_AGE", 30)
@@ -31,13 +105,65 @@ func setupDaemonLogger(logPath string) (*lumberjack.Logger, daemonLogger) {
Compress: compress,
}
// Create multi-writer to log to both file and stderr (for foreground mode visibility)
var w io.Writer = logF
// Configure slog handler
opts := &slog.HandlerOptions{
Level: level,
}
var handler slog.Handler
if jsonFormat {
handler = slog.NewJSONHandler(w, opts)
} else {
handler = slog.NewTextHandler(w, opts)
}
logger := daemonLogger{
logFunc: func(format string, args ...interface{}) {
msg := fmt.Sprintf(format, args...)
timestamp := time.Now().Format("2006-01-02 15:04:05")
_, _ = fmt.Fprintf(logF, "[%s] %s\n", timestamp, msg)
},
logger: slog.New(handler),
}
return logF, logger
}
// setupDaemonLoggerLegacy is the old signature for backward compatibility during migration.
// TODO: Remove this once all callers are updated to use the new signature.
func setupDaemonLoggerLegacy(logPath string) (*lumberjack.Logger, daemonLogger) {
return setupDaemonLogger(logPath, false, slog.LevelInfo)
}
// SetupStderrLogger creates a logger that writes to stderr only (no file).
// Useful for foreground mode or testing.
func SetupStderrLogger(jsonFormat bool, level slog.Level) daemonLogger {
opts := &slog.HandlerOptions{
Level: level,
}
var handler slog.Handler
if jsonFormat {
handler = slog.NewJSONHandler(os.Stderr, opts)
} else {
handler = slog.NewTextHandler(os.Stderr, opts)
}
return daemonLogger{
logger: slog.New(handler),
}
}
// newTestLogger creates a no-op logger for testing.
// Logs are discarded - use this when you don't need to verify log output.
func newTestLogger() daemonLogger {
return daemonLogger{
logger: slog.New(slog.NewTextHandler(io.Discard, nil)),
}
}
// newTestLoggerWithWriter creates a logger that writes to the given writer.
// Use this when you need to capture and verify log output in tests.
func newTestLoggerWithWriter(w io.Writer) daemonLogger {
return daemonLogger{
logger: slog.New(slog.NewTextHandler(w, nil)),
}
}

View File

@@ -19,21 +19,21 @@ func startRPCServer(ctx context.Context, socketPath string, store storage.Storag
serverErrChan := make(chan error, 1)
go func() {
log.log("Starting RPC server: %s", socketPath)
log.Info("starting RPC server", "socket", socketPath)
if err := server.Start(ctx); err != nil {
log.log("RPC server error: %v", err)
log.Error("RPC server error", "error", err)
serverErrChan <- err
}
}()
select {
case err := <-serverErrChan:
log.log("RPC server failed to start: %v", err)
log.Error("RPC server failed to start", "error", err)
return nil, nil, err
case <-server.WaitReady():
log.log("RPC server ready (socket listening)")
log.Info("RPC server ready (socket listening)")
case <-time.After(5 * time.Second):
log.log("WARNING: Server didn't signal ready after 5 seconds (may still be starting)")
log.Warn("server didn't signal ready after 5 seconds (may still be starting)")
}
return server, serverErrChan, nil
@@ -78,35 +78,35 @@ func runEventLoop(ctx context.Context, cancel context.CancelFunc, ticker *time.T
case <-parentCheckTicker.C:
// Check if parent process is still alive
if !checkParentProcessAlive(parentPID) {
log.log("Parent process (PID %d) died, shutting down daemon", parentPID)
log.Info("parent process died, shutting down daemon", "parent_pid", parentPID)
cancel()
if err := server.Stop(); err != nil {
log.log("Error stopping server: %v", err)
log.Error("stopping server", "error", err)
}
return
}
case sig := <-sigChan:
if isReloadSignal(sig) {
log.log("Received reload signal, ignoring (daemon continues running)")
log.Info("received reload signal, ignoring (daemon continues running)")
continue
}
log.log("Received signal %v, shutting down gracefully...", sig)
log.Info("received signal, shutting down gracefully", "signal", sig)
cancel()
if err := server.Stop(); err != nil {
log.log("Error stopping RPC server: %v", err)
log.Error("stopping RPC server", "error", err)
}
return
case <-ctx.Done():
log.log("Context canceled, shutting down")
log.Info("context canceled, shutting down")
if err := server.Stop(); err != nil {
log.log("Error stopping RPC server: %v", err)
log.Error("stopping RPC server", "error", err)
}
return
case err := <-serverErrChan:
log.log("RPC server failed: %v", err)
log.Error("RPC server failed", "error", err)
cancel()
if err := server.Stop(); err != nil {
log.log("Error stopping RPC server: %v", err)
log.Error("stopping RPC server", "error", err)
}
return
}

View File

@@ -772,13 +772,11 @@ func TestSyncBranchIntegration_EndToEnd(t *testing.T) {
// Helper types for testing
func newTestSyncBranchLogger() (daemonLogger, *string) {
// Note: With slog, we can't easily capture formatted messages like before.
// For tests that need to verify log output, use strings.Builder and newTestLoggerWithWriter.
// This helper is kept for backward compatibility but messages won't be captured.
messages := ""
logger := daemonLogger{
logFunc: func(format string, args ...interface{}) {
messages += "\n" + format
},
}
return logger, &messages
return newTestLogger(), &messages
}
// TestSyncBranchConfigChange tests changing sync.branch after worktree exists

View File

@@ -335,11 +335,7 @@ func TestExportUpdatesMetadata(t *testing.T) {
// Update metadata using the actual daemon helper function (bd-ar2.3 fix)
// This verifies that updateExportMetadata (used by createExportFunc and createSyncFunc) works correctly
mockLogger := daemonLogger{
logFunc: func(format string, args ...interface{}) {
t.Logf(format, args...)
},
}
mockLogger := newTestLogger()
updateExportMetadata(ctx, store, jsonlPath, mockLogger, "")
// Verify metadata was set (renamed from last_import_hash to jsonl_content_hash - bd-39o)
@@ -438,11 +434,7 @@ func TestUpdateExportMetadataMultiRepo(t *testing.T) {
}
// Create mock logger
mockLogger := daemonLogger{
logFunc: func(format string, args ...interface{}) {
t.Logf(format, args...)
},
}
mockLogger := newTestLogger()
// Update metadata for each repo with different keys (bd-ar2.2 multi-repo support)
updateExportMetadata(ctx, store, jsonlPath1, mockLogger, jsonlPath1)
@@ -554,11 +546,7 @@ func TestExportWithMultiRepoConfigUpdatesAllMetadata(t *testing.T) {
// Simulate multi-repo export flow (as in createExportFunc)
// This tests the full integration: getMultiRepoJSONLPaths -> getRepoKeyForPath -> updateExportMetadata
mockLogger := daemonLogger{
logFunc: func(format string, args ...interface{}) {
t.Logf(format, args...)
},
}
mockLogger := newTestLogger()
// Simulate multi-repo mode with stable keys
multiRepoPaths := []string{primaryJSONL, additionalJSONL}
@@ -676,11 +664,7 @@ func TestUpdateExportMetadataInvalidKeySuffix(t *testing.T) {
}
// Create mock logger
mockLogger := daemonLogger{
logFunc: func(format string, args ...interface{}) {
t.Logf(format, args...)
},
}
mockLogger := newTestLogger()
// Update metadata with keySuffix containing ':' (bd-web8: should be auto-sanitized)
// This simulates Windows absolute paths like "C:\Users\..."

View File

@@ -15,9 +15,7 @@ import (
// newMockLogger creates a daemonLogger that does nothing
func newMockLogger() daemonLogger {
return daemonLogger{
logFunc: func(format string, args ...interface{}) {},
}
return newTestLogger()
}
func TestFileWatcher_JSONLChangeDetection(t *testing.T) {

View File

@@ -65,11 +65,7 @@ func TestExportUpdatesDatabaseMtime(t *testing.T) {
}
// Update metadata after export (bd-ymj fix)
mockLogger := daemonLogger{
logFunc: func(format string, args ...interface{}) {
t.Logf(format, args...)
},
}
mockLogger := newTestLogger()
updateExportMetadata(ctx, store, jsonlPath, mockLogger, "")
// Get JSONL mtime
@@ -170,11 +166,7 @@ func TestDaemonExportScenario(t *testing.T) {
}
// Daemon updates metadata after export (bd-ymj fix)
mockLogger := daemonLogger{
logFunc: func(format string, args ...interface{}) {
t.Logf(format, args...)
},
}
mockLogger := newTestLogger()
updateExportMetadata(ctx, store, jsonlPath, mockLogger, "")
// THIS IS THE FIX: daemon now calls TouchDatabaseFile after export
@@ -249,11 +241,7 @@ func TestMultipleExportCycles(t *testing.T) {
}
// Update metadata after export (bd-ymj fix)
mockLogger := daemonLogger{
logFunc: func(format string, args ...interface{}) {
t.Logf(format, args...)
},
}
mockLogger := newTestLogger()
updateExportMetadata(ctx, store, jsonlPath, mockLogger, "")
// Apply fix