* bd sync: 2025-10-30 12:12:27 * Working on frontend * bd sync: 2025-11-06 16:55:55 * feat: finish bd monitor human viewer * Merge conflicts resolved and added tests * bd sync: 2025-11-06 17:23:41 * bd sync: 2025-11-06 17:34:52 * feat: Add reload button and multiselect status filter to monitor - Changed status filter from single select to multiselect with 'Open' selected by default - Added reload button with visual feedback (hover/active states) - Updated filterIssues() to handle multiple selected statuses - Added reloadData() function that reloads both stats and issues - Improved responsive design for mobile devices - Filter controls now use flexbox layout with better spacing * fix: Update monitor statistics to show Total, In Progress, Open, Closed - Replaced 'Ready to Work' stat with 'In Progress' stat - Reordered stats to show logical progression: Total -> In Progress -> Open -> Closed - Updated loadStats() to fetch in-progress count from stats API - Removed unnecessary separate API call for ready count * fix: Correct API field names in monitor stats JavaScript The JavaScript was using incorrect field names (stats.total, stats.by_status) that don't match the actual types.Statistics struct which uses flat fields with underscores (total_issues, in_progress_issues, etc). Fixed by updating loadStats() to use correct field names: - stats.total -> stats.total_issues - stats.by_status?.['in-progress'] -> stats.in_progress_issues - stats.by_status?.open -> stats.open_issues - stats.by_status?.closed -> stats.closed_issues Fixes beads-9 * bd sync: 2025-11-06 17:51:24 * bd sync: 2025-11-06 17:56:09 * fix: Make monitor require daemon to prevent SQLite locking Implemented Option 1 from beads-eel: monitor now requires daemon and never opens direct SQLite connection. Changes: - Added 'monitor' to noDbCommands list in main.go to skip normal DB initialization - Added validateDaemonForMonitor() PreRun function that: - Finds database path using beads.FindDatabasePath() - Validates daemon is running and healthy - Fails gracefully with clear error message if no daemon - Only uses RPC connection, never opens SQLite directly Benefits: - Eliminates SQLite locking conflicts between monitor and daemon - Users can now close/update issues via CLI while monitor runs - Clear error messages guide users to start daemon first Fixes beads-eel * bd sync: 2025-11-06 18:03:50 * docs: Add bd daemons restart subcommand documentation Added documentation for the 'bd daemons restart' subcommand across all documentation files: - commands/daemons.md: Added full restart subcommand section with synopsis, description, arguments, flags, and examples - README.md: Added restart examples to daemon management section - AGENTS.md: Added restart examples with --json flag for agents The restart command gracefully stops and starts a specific daemon by workspace path or PID, useful after upgrading bd or when a daemon needs refreshing. Fixes beads-11 * bd sync: 2025-11-06 18:13:16 * Separated the web ui from the general monitoring functionality --------- Co-authored-by: Steve Yegge <stevey@sourcegraph.com>
192 lines
5.6 KiB
Go
192 lines
5.6 KiB
Go
package rpc
|
|
|
|
import (
|
|
"encoding/json"
|
|
"fmt"
|
|
"net"
|
|
"os"
|
|
"sync"
|
|
"sync/atomic"
|
|
"time"
|
|
|
|
"github.com/steveyegge/beads/internal/storage"
|
|
)
|
|
|
|
// ServerVersion is the version of this RPC server
|
|
// This should match the bd CLI version for proper compatibility checks
|
|
// It's set dynamically by daemon.go from cmd/bd/version.go before starting the server
|
|
var ServerVersion = "0.0.0" // Placeholder; overridden by daemon startup
|
|
|
|
const (
|
|
statusUnhealthy = "unhealthy"
|
|
)
|
|
|
|
// Server represents the RPC server that runs in the daemon
|
|
type Server struct {
|
|
socketPath string
|
|
workspacePath string // Absolute path to workspace root
|
|
dbPath string // Absolute path to database file
|
|
storage storage.Storage // Default storage (for backward compat)
|
|
listener net.Listener
|
|
mu sync.RWMutex
|
|
shutdown bool
|
|
shutdownChan chan struct{}
|
|
stopOnce sync.Once
|
|
doneChan chan struct{} // closed when Start() cleanup is complete
|
|
// Health and metrics
|
|
startTime time.Time
|
|
lastActivityTime atomic.Value // time.Time - last request timestamp
|
|
metrics *Metrics
|
|
// Connection limiting
|
|
maxConns int
|
|
activeConns int32 // atomic counter
|
|
connSemaphore chan struct{}
|
|
// Request timeout
|
|
requestTimeout time.Duration
|
|
// Ready channel signals when server is listening
|
|
readyChan chan struct{}
|
|
// Auto-import single-flight guard
|
|
importInProgress atomic.Bool
|
|
// Mutation events for event-driven daemon
|
|
mutationChan chan MutationEvent
|
|
droppedEvents atomic.Int64 // Counter for dropped mutation events
|
|
// Recent mutations buffer for polling (circular buffer, max 100 events)
|
|
recentMutations []MutationEvent
|
|
recentMutationsMu sync.RWMutex
|
|
maxMutationBuffer int
|
|
}
|
|
|
|
// Mutation event types
|
|
const (
|
|
MutationCreate = "create"
|
|
MutationUpdate = "update"
|
|
MutationDelete = "delete"
|
|
MutationComment = "comment"
|
|
)
|
|
|
|
// MutationEvent represents a database mutation for event-driven sync
|
|
type MutationEvent struct {
|
|
Type string // One of: MutationCreate, MutationUpdate, MutationDelete, MutationComment
|
|
IssueID string // e.g., "bd-42"
|
|
Timestamp time.Time
|
|
}
|
|
|
|
// NewServer creates a new RPC server
|
|
func NewServer(socketPath string, store storage.Storage, workspacePath string, dbPath string) *Server {
|
|
// Parse config from env vars
|
|
maxConns := 100 // default
|
|
if env := os.Getenv("BEADS_DAEMON_MAX_CONNS"); env != "" {
|
|
var conns int
|
|
if _, err := fmt.Sscanf(env, "%d", &conns); err == nil && conns > 0 {
|
|
maxConns = conns
|
|
}
|
|
}
|
|
|
|
requestTimeout := 30 * time.Second // default
|
|
if env := os.Getenv("BEADS_DAEMON_REQUEST_TIMEOUT"); env != "" {
|
|
if timeout, err := time.ParseDuration(env); err == nil && timeout > 0 {
|
|
requestTimeout = timeout
|
|
}
|
|
}
|
|
|
|
mutationBufferSize := 512 // default (increased from 100 for better burst handling)
|
|
if env := os.Getenv("BEADS_MUTATION_BUFFER"); env != "" {
|
|
var bufSize int
|
|
if _, err := fmt.Sscanf(env, "%d", &bufSize); err == nil && bufSize > 0 {
|
|
mutationBufferSize = bufSize
|
|
}
|
|
}
|
|
|
|
s := &Server{
|
|
socketPath: socketPath,
|
|
workspacePath: workspacePath,
|
|
dbPath: dbPath,
|
|
storage: store,
|
|
shutdownChan: make(chan struct{}),
|
|
doneChan: make(chan struct{}),
|
|
startTime: time.Now(),
|
|
metrics: NewMetrics(),
|
|
maxConns: maxConns,
|
|
connSemaphore: make(chan struct{}, maxConns),
|
|
requestTimeout: requestTimeout,
|
|
readyChan: make(chan struct{}),
|
|
mutationChan: make(chan MutationEvent, mutationBufferSize), // Configurable buffer
|
|
recentMutations: make([]MutationEvent, 0, 100),
|
|
maxMutationBuffer: 100,
|
|
}
|
|
s.lastActivityTime.Store(time.Now())
|
|
return s
|
|
}
|
|
|
|
// emitMutation sends a mutation event to the daemon's event-driven loop.
|
|
// Non-blocking: drops event if channel is full (sync will happen eventually).
|
|
// Also stores in recent mutations buffer for polling.
|
|
func (s *Server) emitMutation(eventType, issueID string) {
|
|
event := MutationEvent{
|
|
Type: eventType,
|
|
IssueID: issueID,
|
|
Timestamp: time.Now(),
|
|
}
|
|
|
|
// Send to mutation channel for daemon
|
|
select {
|
|
case s.mutationChan <- event:
|
|
// Event sent successfully
|
|
default:
|
|
// Channel full, increment dropped events counter
|
|
s.droppedEvents.Add(1)
|
|
}
|
|
|
|
// Store in recent mutations buffer for polling
|
|
s.recentMutationsMu.Lock()
|
|
s.recentMutations = append(s.recentMutations, event)
|
|
// Keep buffer size limited (circular buffer behavior)
|
|
if len(s.recentMutations) > s.maxMutationBuffer {
|
|
s.recentMutations = s.recentMutations[1:]
|
|
}
|
|
s.recentMutationsMu.Unlock()
|
|
}
|
|
|
|
// MutationChan returns the mutation event channel for the daemon to consume
|
|
func (s *Server) MutationChan() <-chan MutationEvent {
|
|
return s.mutationChan
|
|
}
|
|
|
|
// ResetDroppedEventsCount resets the dropped events counter and returns the previous value
|
|
func (s *Server) ResetDroppedEventsCount() int64 {
|
|
return s.droppedEvents.Swap(0)
|
|
}
|
|
|
|
// GetRecentMutations returns mutations since the given timestamp
|
|
func (s *Server) GetRecentMutations(sinceMillis int64) []MutationEvent {
|
|
s.recentMutationsMu.RLock()
|
|
defer s.recentMutationsMu.RUnlock()
|
|
|
|
var result []MutationEvent
|
|
for _, m := range s.recentMutations {
|
|
if m.Timestamp.UnixMilli() > sinceMillis {
|
|
result = append(result, m)
|
|
}
|
|
}
|
|
return result
|
|
}
|
|
|
|
// handleGetMutations handles the get_mutations RPC operation
|
|
func (s *Server) handleGetMutations(req *Request) Response {
|
|
var args GetMutationsArgs
|
|
if err := json.Unmarshal(req.Args, &args); err != nil {
|
|
return Response{
|
|
Success: false,
|
|
Error: fmt.Sprintf("invalid arguments: %v", err),
|
|
}
|
|
}
|
|
|
|
mutations := s.GetRecentMutations(args.Since)
|
|
data, _ := json.Marshal(mutations)
|
|
|
|
return Response{
|
|
Success: true,
|
|
Data: data,
|
|
}
|
|
}
|