refactor(cmd): replace map[string]interface{} with typed JSON response structs (bd-u2sc.1)

Added typed response structs for JSON output in CLI commands:

compact.go:
- CompactDryRunResponse, CompactSuccessResponse
- CompactNoCandidatesResponse, CompactBatchSuccessResponse
- CompactStatsResponse, CompactTierStats
- CompactApplyResponse, TombstonePrunedInfo

cleanup.go:
- CleanupEmptyResponse

daemons.go:
- DaemonStopResponse, DaemonRestartResponse
- DaemonLogsResponse, DaemonKillallEmptyResponse
- DaemonHealthResponse, DaemonHealthReport

daemon_lifecycle.go:
- DaemonStatusResponse

Benefits:
- Compile-time type checking for JSON output
- IDE autocompletion for response fields
- Self-documenting API structure
- Easier refactoring

Note: RPC args and storage update maps remain as-is (require
interface changes for internal APIs).

🤖 Generated with [Claude Code](https://claude.ai/code)

Co-Authored-By: Claude Opus 4.5 <noreply@anthropic.com>
This commit is contained in:
Steve Yegge
2025-12-22 15:48:36 -08:00
parent e67712dcd4
commit 4c38075520
4 changed files with 247 additions and 122 deletions

View File

@@ -1,7 +1,6 @@
package main
import (
"encoding/json"
"fmt"
"os"
"time"
@@ -11,6 +10,14 @@ import (
"github.com/steveyegge/beads/internal/ui"
)
// CleanupEmptyResponse is returned when there are no closed issues to delete
type CleanupEmptyResponse struct {
DeletedCount int `json:"deleted_count"`
Message string `json:"message"`
Filter string `json:"filter,omitempty"`
Wisp bool `json:"wisp,omitempty"`
}
// Hard delete mode: bypass tombstone TTL safety, use --older-than days directly
// TODO: Consider consolidating into 'bd doctor --fix' for simpler maintenance UX
@@ -146,18 +153,17 @@ SEE ALSO:
if len(closedIssues) == 0 {
if jsonOutput {
result := map[string]interface{}{
"deleted_count": 0,
"message": "No closed issues to delete",
result := CleanupEmptyResponse{
DeletedCount: 0,
Message: "No closed issues to delete",
}
if olderThanDays > 0 {
result["filter"] = fmt.Sprintf("older than %d days", olderThanDays)
result.Filter = fmt.Sprintf("older than %d days", olderThanDays)
}
if wispOnly {
result["wisp"] = true
result.Wisp = true
}
output, _ := json.MarshalIndent(result, "", " ")
fmt.Println(string(output))
outputJSON(result)
} else {
msg := "No closed issues to delete"
if wispOnly && olderThanDays > 0 {

View File

@@ -34,6 +34,81 @@ var (
compactLimit int
)
// JSON response types for compact command output
// CompactDryRunResponse is returned for --dry-run mode
type CompactDryRunResponse struct {
DryRun bool `json:"dry_run"`
Tier int `json:"tier"`
IssueID string `json:"issue_id,omitempty"`
OriginalSize int `json:"original_size,omitempty"`
CandidateCount int `json:"candidate_count,omitempty"`
TotalSizeBytes int `json:"total_size_bytes,omitempty"`
EstimatedReduction string `json:"estimated_reduction"`
}
// CompactSuccessResponse is returned for successful single-issue compaction
type CompactSuccessResponse struct {
Success bool `json:"success"`
Tier int `json:"tier"`
IssueID string `json:"issue_id"`
OriginalSize int `json:"original_size"`
CompactedSize int `json:"compacted_size"`
SavedBytes int `json:"saved_bytes"`
ReductionPct float64 `json:"reduction_pct"`
ElapsedMs int64 `json:"elapsed_ms"`
}
// CompactNoCandidatesResponse is returned when no candidates are found
type CompactNoCandidatesResponse struct {
Success bool `json:"success"`
Count int `json:"count"`
Message string `json:"message"`
}
// CompactBatchSuccessResponse is returned for successful batch compaction
type CompactBatchSuccessResponse struct {
Success bool `json:"success"`
Tier int `json:"tier"`
Total int `json:"total"`
Succeeded int `json:"succeeded"`
Failed int `json:"failed"`
SavedBytes int `json:"saved_bytes"`
OriginalSize int `json:"original_size"`
ElapsedMs int64 `json:"elapsed_ms"`
}
// CompactTierStats holds statistics for a compaction tier
type CompactTierStats struct {
Candidates int `json:"candidates"`
TotalSize int `json:"total_size"`
}
// CompactStatsResponse is returned for --stats mode
type CompactStatsResponse struct {
Tier1 CompactTierStats `json:"tier1"`
Tier2 CompactTierStats `json:"tier2"`
}
// TombstonePrunedInfo holds info about pruned tombstones
type TombstonePrunedInfo struct {
Count int `json:"count"`
TTLDays int `json:"ttl_days"`
}
// CompactApplyResponse is returned for --apply mode
type CompactApplyResponse struct {
Success bool `json:"success"`
IssueID string `json:"issue_id"`
Tier int `json:"tier"`
OriginalSize int `json:"original_size"`
CompactedSize int `json:"compacted_size"`
SavedBytes int `json:"saved_bytes"`
ReductionPct float64 `json:"reduction_pct"`
ElapsedMs int64 `json:"elapsed_ms"`
TombstonesPruned *TombstonePrunedInfo `json:"tombstones_pruned,omitempty"`
}
// TODO: Consider consolidating into 'bd doctor --fix' for simpler maintenance UX
var compactCmd = &cobra.Command{
Use: "compact",
@@ -248,14 +323,13 @@ func runCompactSingle(ctx context.Context, compactor *compact.Compactor, store *
if compactDryRun {
if jsonOutput {
output := map[string]interface{}{
"dry_run": true,
"tier": compactTier,
"issue_id": issueID,
"original_size": originalSize,
"estimated_reduction": "70-80%",
}
outputJSON(output)
outputJSON(CompactDryRunResponse{
DryRun: true,
Tier: compactTier,
IssueID: issueID,
OriginalSize: originalSize,
EstimatedReduction: "70-80%",
})
return
}
@@ -290,17 +364,16 @@ func runCompactSingle(ctx context.Context, compactor *compact.Compactor, store *
elapsed := time.Since(start)
if jsonOutput {
output := map[string]interface{}{
"success": true,
"tier": compactTier,
"issue_id": issueID,
"original_size": originalSize,
"compacted_size": compactedSize,
"saved_bytes": savingBytes,
"reduction_pct": float64(savingBytes) / float64(originalSize) * 100,
"elapsed_ms": elapsed.Milliseconds(),
}
outputJSON(output)
outputJSON(CompactSuccessResponse{
Success: true,
Tier: compactTier,
IssueID: issueID,
OriginalSize: originalSize,
CompactedSize: compactedSize,
SavedBytes: savingBytes,
ReductionPct: float64(savingBytes) / float64(originalSize) * 100,
ElapsedMs: elapsed.Milliseconds(),
})
return
}
@@ -348,10 +421,10 @@ func runCompactAll(ctx context.Context, compactor *compact.Compactor, store *sql
if len(candidates) == 0 {
if jsonOutput {
outputJSON(map[string]interface{}{
"success": true,
"count": 0,
"message": "No eligible candidates",
outputJSON(CompactNoCandidatesResponse{
Success: true,
Count: 0,
Message: "No eligible candidates",
})
return
}
@@ -370,14 +443,13 @@ func runCompactAll(ctx context.Context, compactor *compact.Compactor, store *sql
}
if jsonOutput {
output := map[string]interface{}{
"dry_run": true,
"tier": compactTier,
"candidate_count": len(candidates),
"total_size_bytes": totalSize,
"estimated_reduction": "70-80%",
}
outputJSON(output)
outputJSON(CompactDryRunResponse{
DryRun: true,
Tier: compactTier,
CandidateCount: len(candidates),
TotalSizeBytes: totalSize,
EstimatedReduction: "70-80%",
})
return
}
@@ -420,17 +492,16 @@ func runCompactAll(ctx context.Context, compactor *compact.Compactor, store *sql
elapsed := time.Since(start)
if jsonOutput {
output := map[string]interface{}{
"success": true,
"tier": compactTier,
"total": len(results),
"succeeded": successCount,
"failed": failCount,
"saved_bytes": totalSaved,
"original_size": totalOriginal,
"elapsed_ms": elapsed.Milliseconds(),
}
outputJSON(output)
outputJSON(CompactBatchSuccessResponse{
Success: true,
Tier: compactTier,
Total: len(results),
Succeeded: successCount,
Failed: failCount,
SavedBytes: totalSaved,
OriginalSize: totalOriginal,
ElapsedMs: elapsed.Milliseconds(),
})
return
}
@@ -480,17 +551,16 @@ func runCompactStats(ctx context.Context, store *sqlite.SQLiteStorage) {
}
if jsonOutput {
output := map[string]interface{}{
"tier1": map[string]interface{}{
"candidates": len(tier1),
"total_size": tier1Size,
outputJSON(CompactStatsResponse{
Tier1: CompactTierStats{
Candidates: len(tier1),
TotalSize: tier1Size,
},
"tier2": map[string]interface{}{
"candidates": len(tier2),
"total_size": tier2Size,
Tier2: CompactTierStats{
Candidates: len(tier2),
TotalSize: tier2Size,
},
}
outputJSON(output)
})
return
}
@@ -892,24 +962,24 @@ func runCompactApply(ctx context.Context, store *sqlite.SQLiteStorage) {
}
if jsonOutput {
output := map[string]interface{}{
"success": true,
"issue_id": compactID,
"tier": compactTier,
"original_size": originalSize,
"compacted_size": compactedSize,
"saved_bytes": savingBytes,
"reduction_pct": reductionPct,
"elapsed_ms": elapsed.Milliseconds(),
response := CompactApplyResponse{
Success: true,
IssueID: compactID,
Tier: compactTier,
OriginalSize: originalSize,
CompactedSize: compactedSize,
SavedBytes: savingBytes,
ReductionPct: reductionPct,
ElapsedMs: elapsed.Milliseconds(),
}
// Include tombstone pruning results (bd-okh)
if tombstonePruneResult != nil && tombstonePruneResult.PrunedCount > 0 {
output["tombstones_pruned"] = map[string]interface{}{
"count": tombstonePruneResult.PrunedCount,
"ttl_days": tombstonePruneResult.TTLDays,
response.TombstonesPruned = &TombstonePrunedInfo{
Count: tombstonePruneResult.PrunedCount,
TTLDays: tombstonePruneResult.TTLDays,
}
}
outputJSON(output)
outputJSON(response)
return
}

View File

@@ -14,6 +14,19 @@ import (
"github.com/steveyegge/beads/internal/rpc"
)
// DaemonStatusResponse is returned for daemon status check
type DaemonStatusResponse struct {
Running bool `json:"running"`
PID int `json:"pid,omitempty"`
Started string `json:"started,omitempty"`
LogPath string `json:"log_path,omitempty"`
AutoCommit bool `json:"auto_commit,omitempty"`
AutoPush bool `json:"auto_push,omitempty"`
LocalMode bool `json:"local_mode,omitempty"`
SyncInterval string `json:"sync_interval,omitempty"`
DaemonMode string `json:"daemon_mode,omitempty"`
}
// isDaemonRunning checks if the daemon is currently running
func isDaemonRunning(pidFile string) (bool, int) {
beadsDir := filepath.Dir(pidFile)
@@ -67,23 +80,19 @@ func showDaemonStatus(pidFile string) {
}
if jsonOutput {
status := map[string]interface{}{
"running": true,
"pid": pid,
}
if started != "" {
status["started"] = started
}
if logPath != "" {
status["log_path"] = logPath
status := DaemonStatusResponse{
Running: true,
PID: pid,
Started: started,
LogPath: logPath,
}
// Add config from RPC status if available
if rpcStatus != nil {
status["auto_commit"] = rpcStatus.AutoCommit
status["auto_push"] = rpcStatus.AutoPush
status["local_mode"] = rpcStatus.LocalMode
status["sync_interval"] = rpcStatus.SyncInterval
status["daemon_mode"] = rpcStatus.DaemonMode
status.AutoCommit = rpcStatus.AutoCommit
status.AutoPush = rpcStatus.AutoPush
status.LocalMode = rpcStatus.LocalMode
status.SyncInterval = rpcStatus.SyncInterval
status.DaemonMode = rpcStatus.DaemonMode
}
outputJSON(status)
return
@@ -108,7 +117,7 @@ func showDaemonStatus(pidFile string) {
}
} else {
if jsonOutput {
outputJSON(map[string]interface{}{"running": false})
outputJSON(DaemonStatusResponse{Running: false})
return
}
fmt.Println("Daemon is not running")

View File

@@ -1,4 +1,5 @@
package main
import (
"bufio"
"encoding/json"
@@ -10,9 +11,59 @@ import (
"strings"
"text/tabwriter"
"time"
"github.com/spf13/cobra"
"github.com/steveyegge/beads/internal/daemon"
)
// JSON response types for daemons commands
// DaemonStopResponse is returned when a daemon is stopped
type DaemonStopResponse struct {
Workspace string `json:"workspace"`
PID int `json:"pid"`
Stopped bool `json:"stopped"`
}
// DaemonRestartResponse is returned when a daemon is restarted
type DaemonRestartResponse struct {
Workspace string `json:"workspace"`
Action string `json:"action"`
}
// DaemonLogsResponse is returned for daemon logs in JSON mode
type DaemonLogsResponse struct {
Workspace string `json:"workspace"`
LogPath string `json:"log_path"`
Content string `json:"content"`
}
// DaemonKillallEmptyResponse is returned when no daemons are running
type DaemonKillallEmptyResponse struct {
Stopped int `json:"stopped"`
Failed int `json:"failed"`
}
// DaemonHealthReport is a single daemon health report entry
type DaemonHealthReport struct {
Workspace string `json:"workspace"`
SocketPath string `json:"socket_path"`
PID int `json:"pid,omitempty"`
Version string `json:"version,omitempty"`
Status string `json:"status"`
Issue string `json:"issue,omitempty"`
VersionMismatch bool `json:"version_mismatch,omitempty"`
}
// DaemonHealthResponse is returned for daemon health check
type DaemonHealthResponse struct {
Total int `json:"total"`
Healthy int `json:"healthy"`
Stale int `json:"stale"`
Mismatched int `json:"mismatched"`
Unresponsive int `json:"unresponsive"`
Daemons []DaemonHealthReport `json:"daemons"`
}
var daemonsCmd = &cobra.Command{
Use: "daemons",
GroupID: "sync",
@@ -154,10 +205,10 @@ Sends shutdown command via RPC, with SIGTERM fallback if RPC fails.`,
os.Exit(1)
}
if jsonOutput {
outputJSON(map[string]interface{}{
"workspace": targetDaemon.WorkspacePath,
"pid": targetDaemon.PID,
"stopped": true,
outputJSON(DaemonStopResponse{
Workspace: targetDaemon.WorkspacePath,
PID: targetDaemon.PID,
Stopped: true,
})
} else {
fmt.Printf("Stopped daemon for %s (PID %d)\n", targetDaemon.WorkspacePath, targetDaemon.PID)
@@ -268,9 +319,9 @@ Stops the daemon gracefully, then starts a new one.`,
}
}()
if jsonOutput {
outputJSON(map[string]interface{}{
"workspace": workspace,
"action": "restarted",
outputJSON(DaemonRestartResponse{
Workspace: workspace,
Action: "restarted",
})
} else {
fmt.Printf("Successfully restarted daemon for workspace: %s\n", workspace)
@@ -333,10 +384,10 @@ Supports tail mode (last N lines) and follow mode (like tail -f).`,
outputJSON(map[string]string{"error": err.Error()})
os.Exit(1)
}
outputJSON(map[string]interface{}{
"workspace": targetDaemon.WorkspacePath,
"log_path": logPath,
"content": string(content),
outputJSON(DaemonLogsResponse{
Workspace: targetDaemon.WorkspacePath,
LogPath: logPath,
Content: string(content),
})
return
}
@@ -430,9 +481,9 @@ Uses escalating shutdown strategy: RPC (2s) → SIGTERM (3s) → SIGKILL (1s).`,
}
if len(aliveDaemons) == 0 {
if jsonOutput {
outputJSON(map[string]interface{}{
"stopped": 0,
"failed": 0,
outputJSON(DaemonKillallEmptyResponse{
Stopped: 0,
Failed: 0,
})
} else {
fmt.Println("No running daemons found")
@@ -472,23 +523,14 @@ stale sockets, version mismatches, and unresponsive daemons.`,
fmt.Fprintf(os.Stderr, "Error discovering daemons: %v\n", err)
os.Exit(1)
}
type healthReport struct {
Workspace string `json:"workspace"`
SocketPath string `json:"socket_path"`
PID int `json:"pid,omitempty"`
Version string `json:"version,omitempty"`
Status string `json:"status"`
Issue string `json:"issue,omitempty"`
VersionMismatch bool `json:"version_mismatch,omitempty"`
}
var reports []healthReport
var reports []DaemonHealthReport
healthyCount := 0
staleCount := 0
mismatchCount := 0
unresponsiveCount := 0
currentVersion := Version
for _, d := range daemons {
report := healthReport{
report := DaemonHealthReport{
Workspace: d.WorkspacePath,
SocketPath: d.SocketPath,
PID: d.PID,
@@ -510,16 +552,14 @@ stale sockets, version mismatches, and unresponsive daemons.`,
reports = append(reports, report)
}
if jsonOutput {
output := map[string]interface{}{
"total": len(reports),
"healthy": healthyCount,
"stale": staleCount,
"mismatched": mismatchCount,
"unresponsive": unresponsiveCount,
"daemons": reports,
}
data, _ := json.MarshalIndent(output, "", " ")
fmt.Println(string(data))
outputJSON(DaemonHealthResponse{
Total: len(reports),
Healthy: healthyCount,
Stale: staleCount,
Mismatched: mismatchCount,
Unresponsive: unresponsiveCount,
Daemons: reports,
})
return
}
// Human-readable output