WIP: Extract daemon runtime into internal/daemonrunner
- Created config.go with Config struct - Created daemon.go with Daemon struct and Start/Stop methods - Created logger.go for logging setup - Created process.go for lock/PID management - Created fingerprint.go for database validation - Created flock_unix.go/flock_windows.go for platform-specific locking - Created git.go for git operations Still TODO: - Implement runGlobalDaemon, startRPCServer, runSyncLoop - Create sync.go, rpc.go, jsonl.go, validation.go - Update cmd/bd/daemon.go to use daemonrunner Part of bd-5f26
This commit is contained in:
26
internal/daemonrunner/config.go
Normal file
26
internal/daemonrunner/config.go
Normal file
@@ -0,0 +1,26 @@
|
||||
package daemonrunner
|
||||
|
||||
import "time"
|
||||
|
||||
// Config holds all configuration for the daemon
|
||||
type Config struct {
|
||||
// Sync behavior
|
||||
Interval time.Duration
|
||||
AutoCommit bool
|
||||
AutoPush bool
|
||||
|
||||
// Scope
|
||||
Global bool
|
||||
|
||||
// Paths
|
||||
LogFile string
|
||||
PIDFile string
|
||||
DBPath string // Local daemon only
|
||||
BeadsDir string // Local daemon: .beads dir, Global daemon: ~/.beads
|
||||
|
||||
// RPC
|
||||
SocketPath string
|
||||
|
||||
// Workspace
|
||||
WorkspacePath string // Only for local daemon: parent of .beads directory
|
||||
}
|
||||
293
internal/daemonrunner/daemon.go
Normal file
293
internal/daemonrunner/daemon.go
Normal file
@@ -0,0 +1,293 @@
|
||||
package daemonrunner
|
||||
|
||||
import (
|
||||
"context"
|
||||
"fmt"
|
||||
"io"
|
||||
"os"
|
||||
"path/filepath"
|
||||
"strings"
|
||||
"time"
|
||||
|
||||
"github.com/steveyegge/beads"
|
||||
"github.com/steveyegge/beads/internal/daemon"
|
||||
"github.com/steveyegge/beads/internal/rpc"
|
||||
"github.com/steveyegge/beads/internal/storage"
|
||||
"github.com/steveyegge/beads/internal/storage/sqlite"
|
||||
"gopkg.in/natefinch/lumberjack.v2"
|
||||
)
|
||||
|
||||
// Daemon represents a running background daemon
|
||||
type Daemon struct {
|
||||
cfg Config
|
||||
log *logger
|
||||
logF *lumberjack.Logger
|
||||
store storage.Storage
|
||||
server *rpc.Server
|
||||
lock io.Closer
|
||||
cancel context.CancelFunc
|
||||
|
||||
// Version is the daemon's build version
|
||||
Version string
|
||||
}
|
||||
|
||||
// New creates a new Daemon instance
|
||||
func New(cfg Config, version string) *Daemon {
|
||||
return &Daemon{
|
||||
cfg: cfg,
|
||||
Version: version,
|
||||
}
|
||||
}
|
||||
|
||||
// Start runs the daemon main loop
|
||||
func (d *Daemon) Start() error {
|
||||
// Setup logger
|
||||
d.logF, d.log = d.setupLogger()
|
||||
defer func() { _ = d.logF.Close() }()
|
||||
|
||||
// Determine database path for local daemon
|
||||
if !d.cfg.Global {
|
||||
if err := d.determineDatabasePath(); err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
|
||||
// Acquire daemon lock
|
||||
lock, err := d.setupLock()
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
d.lock = lock
|
||||
defer func() { _ = d.lock.Close() }()
|
||||
defer func() { _ = os.Remove(d.cfg.PIDFile) }()
|
||||
|
||||
d.log.log("Daemon started (interval: %v, auto-commit: %v, auto-push: %v)",
|
||||
d.cfg.Interval, d.cfg.AutoCommit, d.cfg.AutoPush)
|
||||
|
||||
// Handle global daemon differently
|
||||
if d.cfg.Global {
|
||||
return d.runGlobalDaemon()
|
||||
}
|
||||
|
||||
// Validate single canonical database
|
||||
if err := d.validateSingleDatabase(); err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
d.log.log("Using database: %s", d.cfg.DBPath)
|
||||
|
||||
// Clear any previous daemon-error file on successful startup
|
||||
errFile := filepath.Join(d.cfg.BeadsDir, "daemon-error")
|
||||
if err := os.Remove(errFile); err != nil && !os.IsNotExist(err) {
|
||||
d.log.log("Warning: could not remove daemon-error file: %v", err)
|
||||
}
|
||||
|
||||
// Open database
|
||||
store, err := sqlite.New(d.cfg.DBPath)
|
||||
if err != nil {
|
||||
d.log.log("Error: cannot open database: %v", err)
|
||||
return fmt.Errorf("cannot open database: %w", err)
|
||||
}
|
||||
d.store = store
|
||||
defer func() { _ = d.store.Close() }()
|
||||
d.log.log("Database opened: %s", d.cfg.DBPath)
|
||||
|
||||
// Validate database fingerprint
|
||||
if err := d.validateDatabaseFingerprint(); err != nil {
|
||||
if os.Getenv("BEADS_IGNORE_REPO_MISMATCH") != "1" {
|
||||
d.log.log("Error: %v", err)
|
||||
return err
|
||||
}
|
||||
d.log.log("Warning: repository mismatch ignored (BEADS_IGNORE_REPO_MISMATCH=1)")
|
||||
}
|
||||
|
||||
// Validate schema version
|
||||
if err := d.validateSchemaVersion(); err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
// Start RPC server
|
||||
ctx, cancel := context.WithCancel(context.Background())
|
||||
d.cancel = cancel
|
||||
defer cancel()
|
||||
|
||||
server, serverErrChan, err := d.startRPCServer(ctx)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
d.server = server
|
||||
|
||||
// Register in global registry
|
||||
if err := d.registerDaemon(); err != nil {
|
||||
d.log.log("Warning: failed to register daemon: %v", err)
|
||||
} else {
|
||||
defer d.unregisterDaemon()
|
||||
}
|
||||
|
||||
// Run sync loops
|
||||
return d.runSyncLoop(ctx, serverErrChan)
|
||||
}
|
||||
|
||||
// TODO: Implement these methods by extracting from cmd/bd/daemon.go
|
||||
func (d *Daemon) runGlobalDaemon() error {
|
||||
// TODO: Extract from runGlobalDaemon in cmd/bd/daemon.go
|
||||
return nil
|
||||
}
|
||||
|
||||
func (d *Daemon) startRPCServer(ctx context.Context) (*rpc.Server, chan error, error) {
|
||||
// TODO: Extract from startRPCServer in cmd/bd/daemon.go
|
||||
return nil, nil, nil
|
||||
}
|
||||
|
||||
func (d *Daemon) runSyncLoop(ctx context.Context, serverErrChan chan error) error {
|
||||
// TODO: Extract from runDaemonLoop in cmd/bd/daemon.go
|
||||
return nil
|
||||
}
|
||||
|
||||
// Stop gracefully shuts down the daemon
|
||||
func (d *Daemon) Stop() error {
|
||||
if d.cancel != nil {
|
||||
d.cancel()
|
||||
}
|
||||
if d.server != nil {
|
||||
return d.server.Stop()
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
func (d *Daemon) determineDatabasePath() error {
|
||||
if d.cfg.DBPath != "" {
|
||||
return nil
|
||||
}
|
||||
|
||||
// Use public API to find database
|
||||
foundDB := beads.FindDatabasePath()
|
||||
if foundDB == "" {
|
||||
d.log.log("Error: no beads database found")
|
||||
d.log.log("Hint: run 'bd init' to create a database or set BEADS_DB environment variable")
|
||||
return fmt.Errorf("no beads database found")
|
||||
}
|
||||
|
||||
d.cfg.DBPath = foundDB
|
||||
d.cfg.BeadsDir = filepath.Dir(foundDB)
|
||||
d.cfg.WorkspacePath = filepath.Dir(d.cfg.BeadsDir)
|
||||
d.cfg.SocketPath = filepath.Join(d.cfg.BeadsDir, "bd.sock")
|
||||
return nil
|
||||
}
|
||||
|
||||
func (d *Daemon) validateSingleDatabase() error {
|
||||
// Check for multiple .db files (ambiguity error)
|
||||
matches, err := filepath.Glob(filepath.Join(d.cfg.BeadsDir, "*.db"))
|
||||
if err == nil && len(matches) > 1 {
|
||||
// Filter out backup files
|
||||
var validDBs []string
|
||||
for _, match := range matches {
|
||||
baseName := filepath.Base(match)
|
||||
if !strings.Contains(baseName, ".backup") && baseName != "vc.db" {
|
||||
validDBs = append(validDBs, match)
|
||||
}
|
||||
}
|
||||
if len(validDBs) > 1 {
|
||||
errMsg := fmt.Sprintf("Error: Multiple database files found in %s:\n", d.cfg.BeadsDir)
|
||||
for _, db := range validDBs {
|
||||
errMsg += fmt.Sprintf(" - %s\n", filepath.Base(db))
|
||||
}
|
||||
errMsg += fmt.Sprintf("\nBeads requires a single canonical database: %s\n", beads.CanonicalDatabaseName)
|
||||
errMsg += "Run 'bd init' to migrate legacy databases or manually remove old databases\n"
|
||||
errMsg += "Or run 'bd doctor' for more diagnostics"
|
||||
|
||||
d.log.log(errMsg)
|
||||
|
||||
// Write error to file so user can see it without checking logs
|
||||
errFile := filepath.Join(d.cfg.BeadsDir, "daemon-error")
|
||||
_ = os.WriteFile(errFile, []byte(errMsg), 0644)
|
||||
|
||||
return fmt.Errorf("multiple database files found")
|
||||
}
|
||||
}
|
||||
|
||||
// Validate using canonical name
|
||||
dbBaseName := filepath.Base(d.cfg.DBPath)
|
||||
if dbBaseName != beads.CanonicalDatabaseName {
|
||||
d.log.log("Error: Non-canonical database name: %s", dbBaseName)
|
||||
d.log.log("Expected: %s", beads.CanonicalDatabaseName)
|
||||
d.log.log("")
|
||||
d.log.log("Run 'bd init' to migrate to canonical name")
|
||||
return fmt.Errorf("non-canonical database name: %s", dbBaseName)
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
func (d *Daemon) validateSchemaVersion() error {
|
||||
ctx := context.Background()
|
||||
dbVersion, err := d.store.GetMetadata(ctx, "bd_version")
|
||||
if err != nil && err.Error() != "metadata key not found: bd_version" {
|
||||
d.log.log("Error: failed to read database version: %v", err)
|
||||
return fmt.Errorf("failed to read database version: %w", err)
|
||||
}
|
||||
|
||||
if dbVersion != "" && dbVersion != d.Version {
|
||||
d.log.log("Error: Database schema version mismatch")
|
||||
d.log.log(" Database version: %s", dbVersion)
|
||||
d.log.log(" Daemon version: %s", d.Version)
|
||||
d.log.log("")
|
||||
d.log.log("The database was created with a different version of bd.")
|
||||
d.log.log("This may cause compatibility issues.")
|
||||
d.log.log("")
|
||||
d.log.log("Options:")
|
||||
d.log.log(" 1. Run 'bd migrate' to update the database to the current version")
|
||||
d.log.log(" 2. Upgrade/downgrade bd to match database version: %s", dbVersion)
|
||||
d.log.log(" 3. Set BEADS_IGNORE_VERSION_MISMATCH=1 to proceed anyway (not recommended)")
|
||||
d.log.log("")
|
||||
|
||||
if os.Getenv("BEADS_IGNORE_VERSION_MISMATCH") != "1" {
|
||||
return fmt.Errorf("database version mismatch")
|
||||
}
|
||||
d.log.log("Warning: Proceeding despite version mismatch (BEADS_IGNORE_VERSION_MISMATCH=1)")
|
||||
} else if dbVersion == "" {
|
||||
// Old database without version metadata - set it now
|
||||
d.log.log("Warning: Database missing version metadata, setting to %s", d.Version)
|
||||
if err := d.store.SetMetadata(ctx, "bd_version", d.Version); err != nil {
|
||||
d.log.log("Error: failed to set database version: %v", err)
|
||||
return fmt.Errorf("failed to set database version: %w", err)
|
||||
}
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
func (d *Daemon) registerDaemon() error {
|
||||
registry, err := daemon.NewRegistry()
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
entry := daemon.RegistryEntry{
|
||||
WorkspacePath: d.cfg.WorkspacePath,
|
||||
SocketPath: d.cfg.SocketPath,
|
||||
DatabasePath: d.cfg.DBPath,
|
||||
PID: os.Getpid(),
|
||||
Version: d.Version,
|
||||
StartedAt: time.Now(),
|
||||
}
|
||||
|
||||
if err := registry.Register(entry); err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
d.log.log("Registered in global registry")
|
||||
return nil
|
||||
}
|
||||
|
||||
func (d *Daemon) unregisterDaemon() {
|
||||
registry, err := daemon.NewRegistry()
|
||||
if err != nil {
|
||||
d.log.log("Warning: failed to create registry for unregister: %v", err)
|
||||
return
|
||||
}
|
||||
|
||||
if err := registry.Unregister(d.cfg.WorkspacePath, os.Getpid()); err != nil {
|
||||
d.log.log("Warning: failed to unregister daemon: %v", err)
|
||||
}
|
||||
}
|
||||
72
internal/daemonrunner/fingerprint.go
Normal file
72
internal/daemonrunner/fingerprint.go
Normal file
@@ -0,0 +1,72 @@
|
||||
package daemonrunner
|
||||
|
||||
import (
|
||||
"context"
|
||||
"fmt"
|
||||
|
||||
"github.com/steveyegge/beads"
|
||||
)
|
||||
|
||||
func (d *Daemon) validateDatabaseFingerprint() error {
|
||||
ctx := context.Background()
|
||||
|
||||
// Get stored repo ID
|
||||
storedRepoID, err := d.store.GetMetadata(ctx, "repo_id")
|
||||
if err != nil && err.Error() != "metadata key not found: repo_id" {
|
||||
return fmt.Errorf("failed to read repo_id: %w", err)
|
||||
}
|
||||
|
||||
// If no repo_id, this is a legacy database
|
||||
if storedRepoID == "" {
|
||||
return fmt.Errorf(`
|
||||
LEGACY DATABASE DETECTED!
|
||||
|
||||
This database was created before version 0.17.5 and lacks a repository fingerprint.
|
||||
To continue using this database, you must explicitly set its repository ID:
|
||||
|
||||
bd migrate --update-repo-id
|
||||
|
||||
This ensures the database is bound to this repository and prevents accidental
|
||||
database sharing between different repositories.
|
||||
|
||||
If this is a fresh clone, run:
|
||||
rm -rf .beads && bd init
|
||||
|
||||
Note: Auto-claiming legacy databases is intentionally disabled to prevent
|
||||
silent corruption when databases are copied between repositories.
|
||||
`)
|
||||
}
|
||||
|
||||
// Validate repo ID matches current repository
|
||||
currentRepoID, err := beads.ComputeRepoID()
|
||||
if err != nil {
|
||||
d.log.log("Warning: could not compute current repository ID: %v", err)
|
||||
return nil
|
||||
}
|
||||
|
||||
if storedRepoID != currentRepoID {
|
||||
return fmt.Errorf(`
|
||||
DATABASE MISMATCH DETECTED!
|
||||
|
||||
This database belongs to a different repository:
|
||||
Database repo ID: %s
|
||||
Current repo ID: %s
|
||||
|
||||
This usually means:
|
||||
1. You copied a .beads directory from another repo (don't do this!)
|
||||
2. Git remote URL changed (run 'bd migrate --update-repo-id')
|
||||
3. Database corruption
|
||||
4. bd was upgraded and URL canonicalization changed
|
||||
|
||||
Solutions:
|
||||
- If remote URL changed: bd migrate --update-repo-id
|
||||
- If bd was upgraded: bd migrate --update-repo-id
|
||||
- If wrong database: rm -rf .beads && bd init
|
||||
- If correct database: BEADS_IGNORE_REPO_MISMATCH=1 bd daemon
|
||||
(Warning: This can cause data corruption across clones!)
|
||||
`, storedRepoID[:8], currentRepoID[:8])
|
||||
}
|
||||
|
||||
d.log.log("Repository fingerprint validated: %s", currentRepoID[:8])
|
||||
return nil
|
||||
}
|
||||
18
internal/daemonrunner/flock_unix.go
Normal file
18
internal/daemonrunner/flock_unix.go
Normal file
@@ -0,0 +1,18 @@
|
||||
//go:build unix
|
||||
|
||||
package daemonrunner
|
||||
|
||||
import (
|
||||
"os"
|
||||
|
||||
"golang.org/x/sys/unix"
|
||||
)
|
||||
|
||||
// flockExclusive acquires an exclusive non-blocking lock on the file
|
||||
func flockExclusive(f *os.File) error {
|
||||
err := unix.Flock(int(f.Fd()), unix.LOCK_EX|unix.LOCK_NB)
|
||||
if err == unix.EWOULDBLOCK {
|
||||
return ErrDaemonLocked
|
||||
}
|
||||
return err
|
||||
}
|
||||
35
internal/daemonrunner/flock_windows.go
Normal file
35
internal/daemonrunner/flock_windows.go
Normal file
@@ -0,0 +1,35 @@
|
||||
//go:build windows
|
||||
|
||||
package daemonrunner
|
||||
|
||||
import (
|
||||
"os"
|
||||
"syscall"
|
||||
|
||||
"golang.org/x/sys/windows"
|
||||
)
|
||||
|
||||
// flockExclusive acquires an exclusive non-blocking lock on the file using LockFileEx
|
||||
func flockExclusive(f *os.File) error {
|
||||
// LOCKFILE_EXCLUSIVE_LOCK (2) | LOCKFILE_FAIL_IMMEDIATELY (1) = 3
|
||||
const flags = windows.LOCKFILE_EXCLUSIVE_LOCK | windows.LOCKFILE_FAIL_IMMEDIATELY
|
||||
|
||||
// Create overlapped structure for the entire file
|
||||
ol := &windows.Overlapped{}
|
||||
|
||||
// Lock entire file (0xFFFFFFFF, 0xFFFFFFFF = maximum range)
|
||||
err := windows.LockFileEx(
|
||||
windows.Handle(f.Fd()),
|
||||
flags,
|
||||
0, // reserved
|
||||
0xFFFFFFFF, // number of bytes to lock (low)
|
||||
0xFFFFFFFF, // number of bytes to lock (high)
|
||||
ol,
|
||||
)
|
||||
|
||||
if err == windows.ERROR_LOCK_VIOLATION || err == syscall.EWOULDBLOCK {
|
||||
return ErrDaemonLocked
|
||||
}
|
||||
|
||||
return err
|
||||
}
|
||||
92
internal/daemonrunner/git.go
Normal file
92
internal/daemonrunner/git.go
Normal file
@@ -0,0 +1,92 @@
|
||||
package daemonrunner
|
||||
|
||||
import (
|
||||
"context"
|
||||
"fmt"
|
||||
"os/exec"
|
||||
"strings"
|
||||
"time"
|
||||
)
|
||||
|
||||
// isGitRepo checks if we're in a git repository
|
||||
func isGitRepo() bool {
|
||||
cmd := exec.Command("git", "rev-parse", "--git-dir")
|
||||
return cmd.Run() == nil
|
||||
}
|
||||
|
||||
// gitHasUpstream checks if the current branch has an upstream configured
|
||||
func gitHasUpstream() bool {
|
||||
cmd := exec.Command("git", "rev-parse", "--abbrev-ref", "--symbolic-full-name", "@{u}")
|
||||
return cmd.Run() == nil
|
||||
}
|
||||
|
||||
// gitHasChanges checks if the specified file has uncommitted changes
|
||||
func gitHasChanges(ctx context.Context, filePath string) (bool, error) {
|
||||
cmd := exec.CommandContext(ctx, "git", "status", "--porcelain", filePath)
|
||||
output, err := cmd.Output()
|
||||
if err != nil {
|
||||
return false, fmt.Errorf("git status failed: %w", err)
|
||||
}
|
||||
return len(strings.TrimSpace(string(output))) > 0, nil
|
||||
}
|
||||
|
||||
// gitCommit commits the specified file
|
||||
func gitCommit(ctx context.Context, filePath string, message string) error {
|
||||
// Stage the file
|
||||
addCmd := exec.CommandContext(ctx, "git", "add", filePath)
|
||||
if err := addCmd.Run(); err != nil {
|
||||
return fmt.Errorf("git add failed: %w", err)
|
||||
}
|
||||
|
||||
// Generate message if not provided
|
||||
if message == "" {
|
||||
message = fmt.Sprintf("bd sync: %s", time.Now().Format("2006-01-02 15:04:05"))
|
||||
}
|
||||
|
||||
// Commit
|
||||
commitCmd := exec.CommandContext(ctx, "git", "commit", "-m", message)
|
||||
output, err := commitCmd.CombinedOutput()
|
||||
if err != nil {
|
||||
return fmt.Errorf("git commit failed: %w\n%s", err, output)
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
// gitPull pulls from the current branch's upstream
|
||||
func gitPull(ctx context.Context) error {
|
||||
// Get current branch name
|
||||
branchCmd := exec.CommandContext(ctx, "git", "rev-parse", "--abbrev-ref", "HEAD")
|
||||
branchOutput, err := branchCmd.Output()
|
||||
if err != nil {
|
||||
return fmt.Errorf("failed to get current branch: %w", err)
|
||||
}
|
||||
branch := strings.TrimSpace(string(branchOutput))
|
||||
|
||||
// Get remote name for current branch (usually "origin")
|
||||
remoteCmd := exec.CommandContext(ctx, "git", "config", "--get", fmt.Sprintf("branch.%s.remote", branch))
|
||||
remoteOutput, err := remoteCmd.Output()
|
||||
if err != nil {
|
||||
// If no remote configured, default to "origin"
|
||||
remoteOutput = []byte("origin\n")
|
||||
}
|
||||
remote := strings.TrimSpace(string(remoteOutput))
|
||||
|
||||
// Pull with explicit remote and branch
|
||||
cmd := exec.CommandContext(ctx, "git", "pull", remote, branch)
|
||||
output, err := cmd.CombinedOutput()
|
||||
if err != nil {
|
||||
return fmt.Errorf("git pull failed: %w\n%s", err, output)
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
// gitPush pushes to the current branch's upstream
|
||||
func gitPush(ctx context.Context) error {
|
||||
cmd := exec.CommandContext(ctx, "git", "push")
|
||||
output, err := cmd.CombinedOutput()
|
||||
if err != nil {
|
||||
return fmt.Errorf("git push failed: %w\n%s", err, output)
|
||||
}
|
||||
return nil
|
||||
}
|
||||
59
internal/daemonrunner/logger.go
Normal file
59
internal/daemonrunner/logger.go
Normal file
@@ -0,0 +1,59 @@
|
||||
package daemonrunner
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"os"
|
||||
"strconv"
|
||||
"time"
|
||||
|
||||
"gopkg.in/natefinch/lumberjack.v2"
|
||||
)
|
||||
|
||||
type logger struct {
|
||||
logFunc func(string, ...interface{})
|
||||
}
|
||||
|
||||
func (l *logger) log(format string, args ...interface{}) {
|
||||
l.logFunc(format, args...)
|
||||
}
|
||||
|
||||
func (d *Daemon) setupLogger() (*lumberjack.Logger, *logger) {
|
||||
maxSizeMB := getEnvInt("BEADS_DAEMON_LOG_MAX_SIZE", 10)
|
||||
maxBackups := getEnvInt("BEADS_DAEMON_LOG_MAX_BACKUPS", 3)
|
||||
maxAgeDays := getEnvInt("BEADS_DAEMON_LOG_MAX_AGE", 7)
|
||||
compress := getEnvBool("BEADS_DAEMON_LOG_COMPRESS", true)
|
||||
|
||||
logF := &lumberjack.Logger{
|
||||
Filename: d.cfg.LogFile,
|
||||
MaxSize: maxSizeMB,
|
||||
MaxBackups: maxBackups,
|
||||
MaxAge: maxAgeDays,
|
||||
Compress: compress,
|
||||
}
|
||||
|
||||
log := &logger{
|
||||
logFunc: func(format string, args ...interface{}) {
|
||||
msg := fmt.Sprintf(format, args...)
|
||||
timestamp := time.Now().Format("2006-01-02 15:04:05")
|
||||
_, _ = fmt.Fprintf(logF, "[%s] %s\n", timestamp, msg)
|
||||
},
|
||||
}
|
||||
|
||||
return logF, log
|
||||
}
|
||||
|
||||
func getEnvInt(key string, defaultValue int) int {
|
||||
if val := os.Getenv(key); val != "" {
|
||||
if parsed, err := strconv.Atoi(val); err == nil {
|
||||
return parsed
|
||||
}
|
||||
}
|
||||
return defaultValue
|
||||
}
|
||||
|
||||
func getEnvBool(key string, defaultValue bool) bool {
|
||||
if val := os.Getenv(key); val != "" {
|
||||
return val == "true" || val == "1"
|
||||
}
|
||||
return defaultValue
|
||||
}
|
||||
108
internal/daemonrunner/process.go
Normal file
108
internal/daemonrunner/process.go
Normal file
@@ -0,0 +1,108 @@
|
||||
package daemonrunner
|
||||
|
||||
import (
|
||||
"encoding/json"
|
||||
"errors"
|
||||
"fmt"
|
||||
"io"
|
||||
"os"
|
||||
"path/filepath"
|
||||
"time"
|
||||
)
|
||||
|
||||
var ErrDaemonLocked = errors.New("daemon lock already held by another process")
|
||||
|
||||
// DaemonLockInfo represents the metadata stored in the daemon.lock file
|
||||
type DaemonLockInfo struct {
|
||||
PID int `json:"pid"`
|
||||
Database string `json:"database"`
|
||||
Version string `json:"version"`
|
||||
StartedAt time.Time `json:"started_at"`
|
||||
}
|
||||
|
||||
// DaemonLock represents a held lock on the daemon.lock file
|
||||
type DaemonLock struct {
|
||||
file *os.File
|
||||
path string
|
||||
}
|
||||
|
||||
// Close releases the daemon lock
|
||||
func (l *DaemonLock) Close() error {
|
||||
if l.file == nil {
|
||||
return nil
|
||||
}
|
||||
err := l.file.Close()
|
||||
l.file = nil
|
||||
return err
|
||||
}
|
||||
|
||||
func (d *Daemon) setupLock() (io.Closer, error) {
|
||||
lock, err := acquireDaemonLock(d.cfg.BeadsDir, d.cfg.DBPath, d.Version)
|
||||
if err != nil {
|
||||
if err == ErrDaemonLocked {
|
||||
d.log.log("Daemon already running (lock held), exiting")
|
||||
} else {
|
||||
d.log.log("Error acquiring daemon lock: %v", err)
|
||||
}
|
||||
return nil, err
|
||||
}
|
||||
|
||||
// Ensure PID file matches our PID
|
||||
myPID := os.Getpid()
|
||||
pidFile := d.cfg.PIDFile
|
||||
// #nosec G304 - controlled path from config
|
||||
if data, err := os.ReadFile(pidFile); err == nil {
|
||||
var filePID int
|
||||
if _, err := fmt.Sscanf(string(data), "%d", &filePID); err == nil && filePID != myPID {
|
||||
d.log.log("PID file has wrong PID (expected %d, got %d), overwriting", myPID, filePID)
|
||||
_ = os.WriteFile(pidFile, []byte(fmt.Sprintf("%d\n", myPID)), 0600)
|
||||
}
|
||||
} else {
|
||||
d.log.log("PID file missing after lock acquisition, creating")
|
||||
_ = os.WriteFile(pidFile, []byte(fmt.Sprintf("%d\n", myPID)), 0600)
|
||||
}
|
||||
|
||||
return lock, nil
|
||||
}
|
||||
|
||||
// acquireDaemonLock attempts to acquire an exclusive lock on daemon.lock
|
||||
func acquireDaemonLock(beadsDir string, dbPath string, version string) (*DaemonLock, error) {
|
||||
lockPath := filepath.Join(beadsDir, "daemon.lock")
|
||||
|
||||
// Open or create the lock file
|
||||
// #nosec G304 - controlled path from config
|
||||
f, err := os.OpenFile(lockPath, os.O_CREATE|os.O_RDWR, 0600)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("cannot open lock file: %w", err)
|
||||
}
|
||||
|
||||
// Try to acquire exclusive non-blocking lock
|
||||
if err := flockExclusive(f); err != nil {
|
||||
_ = f.Close()
|
||||
if err == ErrDaemonLocked {
|
||||
return nil, ErrDaemonLocked
|
||||
}
|
||||
return nil, fmt.Errorf("cannot lock file: %w", err)
|
||||
}
|
||||
|
||||
// Write JSON metadata to the lock file
|
||||
lockInfo := DaemonLockInfo{
|
||||
PID: os.Getpid(),
|
||||
Database: dbPath,
|
||||
Version: version,
|
||||
StartedAt: time.Now().UTC(),
|
||||
}
|
||||
|
||||
_ = f.Truncate(0)
|
||||
_, _ = f.Seek(0, 0)
|
||||
encoder := json.NewEncoder(f)
|
||||
encoder.SetIndent("", " ")
|
||||
_ = encoder.Encode(lockInfo)
|
||||
_ = f.Sync()
|
||||
|
||||
// Also write PID file for Windows compatibility
|
||||
pidFile := filepath.Join(beadsDir, "daemon.pid")
|
||||
_ = os.WriteFile(pidFile, []byte(fmt.Sprintf("%d\n", os.Getpid())), 0600)
|
||||
|
||||
return &DaemonLock{file: f, path: lockPath}, nil
|
||||
}
|
||||
Reference in New Issue
Block a user