Fix --json flag shadowing issue causing test failures

Fixed TestHashIDs_IdenticalContentDedup test failure by removing duplicate
--json flag definitions that were shadowing the global persistent flag.

Root cause: Commands had both a persistent --json flag (main.go) and local
--json flags (in individual command files). The local flags shadowed the
persistent flag, preventing jsonOutput variable from being set correctly.

Changes:
- Removed 31 duplicate --json flag definitions from 15 command files
- All commands now use the single persistent --json flag from main.go
- Commands now correctly output JSON when --json flag is specified

Test results:
- TestHashIDs_IdenticalContentDedup: Now passes (was failing)
- TestHashIDs_MultiCloneConverge: Passes without JSON parsing warnings
- All other tests: Pass with no regressions

🤖 Generated with [Claude Code](https://claude.com/claude-code)

Co-Authored-By: Claude <noreply@anthropic.com>
This commit is contained in:
Steve Yegge
2025-11-02 18:52:44 -08:00
parent edf1f71fa7
commit e5f1e4b971
15 changed files with 7 additions and 588 deletions

View File

@@ -272,6 +272,6 @@ func init() {
createCmd.Flags().String("external-ref", "", "External reference (e.g., 'gh-9', 'jira-ABC')")
createCmd.Flags().StringSlice("deps", []string{}, "Dependencies in format 'type:id' or 'id' (e.g., 'discovered-from:bd-20,blocks:bd-15' or 'bd-20')")
createCmd.Flags().Bool("force", false, "Force creation even if prefix doesn't match database prefix")
createCmd.Flags().Bool("json", false, "Output JSON format")
// Note: --json flag is defined as a persistent flag in main.go, not here
rootCmd.AddCommand(createCmd)
}

View File

@@ -1,5 +1,4 @@
package main
import (
"bufio"
"encoding/json"
@@ -11,16 +10,13 @@ import (
"strings"
"text/tabwriter"
"time"
"github.com/spf13/cobra"
"github.com/steveyegge/beads/internal/daemon"
)
var daemonsCmd = &cobra.Command{
Use: "daemons",
Short: "Manage multiple bd daemons",
Long: `Manage bd daemon processes across all repositories and worktrees.
Subcommands:
list - Show all running daemons
health - Check health of all daemons
@@ -29,7 +25,6 @@ Subcommands:
killall - Stop all running daemons
restart - Restart a specific daemon (not yet implemented)`,
}
var daemonsListCmd = &cobra.Command{
Use: "list",
Short: "List all running bd daemons",
@@ -38,14 +33,12 @@ uptime, last activity, and exclusive lock status.`,
Run: func(cmd *cobra.Command, args []string) {
searchRoots, _ := cmd.Flags().GetStringSlice("search")
// Use global jsonOutput set by PersistentPreRun
// Discover daemons
daemons, err := daemon.DiscoverDaemons(searchRoots)
if err != nil {
fmt.Fprintf(os.Stderr, "Error discovering daemons: %v\n", err)
os.Exit(1)
}
// Auto-cleanup stale sockets (unless --no-cleanup flag is set)
noCleanup, _ := cmd.Flags().GetBool("no-cleanup")
if !noCleanup {
@@ -56,7 +49,6 @@ uptime, last activity, and exclusive lock status.`,
fmt.Fprintf(os.Stderr, "Cleaned up %d stale socket(s)\n", cleaned)
}
}
// Filter to only alive daemons
var aliveDaemons []daemon.DaemonInfo
for _, d := range daemons {
@@ -64,50 +56,40 @@ uptime, last activity, and exclusive lock status.`,
aliveDaemons = append(aliveDaemons, d)
}
}
if jsonOutput {
data, _ := json.MarshalIndent(aliveDaemons, "", " ")
fmt.Println(string(data))
return
}
// Human-readable table output
if len(aliveDaemons) == 0 {
fmt.Println("No running daemons found")
return
}
w := tabwriter.NewWriter(os.Stdout, 0, 0, 2, ' ', 0)
_, _ = fmt.Fprintln(w, "WORKSPACE\tPID\tVERSION\tUPTIME\tLAST ACTIVITY\tLOCK")
for _, d := range aliveDaemons {
workspace := d.WorkspacePath
if workspace == "" {
workspace = "(unknown)"
}
uptime := formatDaemonDuration(d.UptimeSeconds)
lastActivity := "(unknown)"
if d.LastActivityTime != "" {
if t, err := time.Parse(time.RFC3339, d.LastActivityTime); err == nil {
lastActivity = formatDaemonRelativeTime(t)
}
}
lock := "-"
if d.ExclusiveLockActive {
lock = fmt.Sprintf("🔒 %s", d.ExclusiveLockHolder)
}
_, _ = fmt.Fprintf(w, "%s\t%d\t%s\t%s\t%s\t%s\n",
workspace, d.PID, d.Version, uptime, lastActivity, lock)
}
_ = w.Flush()
},
}
func formatDaemonDuration(seconds float64) string {
d := time.Duration(seconds * float64(time.Second))
if d < time.Minute {
@@ -119,7 +101,6 @@ func formatDaemonDuration(seconds float64) string {
}
return fmt.Sprintf("%.1fd", d.Hours()/24)
}
func formatDaemonRelativeTime(t time.Time) string {
d := time.Since(t)
if d < time.Minute {
@@ -131,7 +112,6 @@ func formatDaemonRelativeTime(t time.Time) string {
}
return fmt.Sprintf("%.1fd ago", d.Hours()/24)
}
var daemonsStopCmd = &cobra.Command{
Use: "stop <workspace-path|pid>",
Short: "Stop a specific bd daemon",
@@ -141,14 +121,12 @@ Sends shutdown command via RPC, with SIGTERM fallback if RPC fails.`,
Run: func(cmd *cobra.Command, args []string) {
target := args[0]
// Use global jsonOutput set by PersistentPreRun
// Discover all daemons
daemons, err := daemon.DiscoverDaemons(nil)
if err != nil {
fmt.Fprintf(os.Stderr, "Error discovering daemons: %v\n", err)
os.Exit(1)
}
// Find matching daemon by workspace path or PID
var targetDaemon *daemon.DaemonInfo
for _, d := range daemons {
@@ -157,7 +135,6 @@ Sends shutdown command via RPC, with SIGTERM fallback if RPC fails.`,
break
}
}
if targetDaemon == nil {
if jsonOutput {
outputJSON(map[string]string{"error": "daemon not found"})
@@ -166,7 +143,6 @@ Sends shutdown command via RPC, with SIGTERM fallback if RPC fails.`,
}
os.Exit(1)
}
// Stop the daemon
if err := daemon.StopDaemon(*targetDaemon); err != nil {
if jsonOutput {
@@ -176,7 +152,6 @@ Sends shutdown command via RPC, with SIGTERM fallback if RPC fails.`,
}
os.Exit(1)
}
if jsonOutput {
outputJSON(map[string]interface{}{
"workspace": targetDaemon.WorkspacePath,
@@ -188,7 +163,6 @@ Sends shutdown command via RPC, with SIGTERM fallback if RPC fails.`,
}
},
}
var daemonsRestartCmd = &cobra.Command{
Use: "restart <workspace-path|pid>",
Short: "Restart a specific bd daemon",
@@ -199,14 +173,12 @@ Stops the daemon gracefully, then starts a new one.`,
target := args[0]
searchRoots, _ := cmd.Flags().GetStringSlice("search")
// Use global jsonOutput set by PersistentPreRun
// Discover daemons
daemons, err := daemon.DiscoverDaemons(searchRoots)
if err != nil {
fmt.Fprintf(os.Stderr, "Error discovering daemons: %v\n", err)
os.Exit(1)
}
// Find the target daemon
var targetDaemon *daemon.DaemonInfo
for _, d := range daemons {
@@ -215,7 +187,6 @@ Stops the daemon gracefully, then starts a new one.`,
break
}
}
if targetDaemon == nil {
if jsonOutput {
outputJSON(map[string]string{"error": "daemon not found"})
@@ -224,9 +195,7 @@ Stops the daemon gracefully, then starts a new one.`,
}
os.Exit(1)
}
workspace := targetDaemon.WorkspacePath
// Stop the daemon
if !jsonOutput {
fmt.Printf("Stopping daemon for workspace: %s (PID %d)\n", workspace, targetDaemon.PID)
@@ -239,15 +208,12 @@ Stops the daemon gracefully, then starts a new one.`,
}
os.Exit(1)
}
// Wait a moment for cleanup
time.Sleep(500 * time.Millisecond)
// Start a new daemon by executing 'bd daemon' in the workspace directory
if !jsonOutput {
fmt.Printf("Starting new daemon for workspace: %s\n", workspace)
}
exe, err := os.Executable()
if err != nil {
if jsonOutput {
@@ -257,17 +223,14 @@ Stops the daemon gracefully, then starts a new one.`,
}
os.Exit(1)
}
// Check if workspace-local bd binary exists (preferred)
localBd := filepath.Join(workspace, "bd")
_, localErr := os.Stat(localBd)
bdPath := exe
if localErr == nil {
// Use local bd binary if it exists
bdPath = localBd
}
// Use bd daemon command with proper working directory
// The daemon will fork itself into the background
daemonCmd := &exec.Cmd{
@@ -276,7 +239,6 @@ Stops the daemon gracefully, then starts a new one.`,
Dir: workspace,
Env: os.Environ(),
}
if err := daemonCmd.Start(); err != nil {
if jsonOutput {
outputJSON(map[string]string{"error": fmt.Sprintf("failed to start daemon: %v", err)})
@@ -285,10 +247,8 @@ Stops the daemon gracefully, then starts a new one.`,
}
os.Exit(1)
}
// Don't wait for daemon to exit (it will fork and continue in background)
go func() { _ = daemonCmd.Wait() }()
if jsonOutput {
outputJSON(map[string]interface{}{
"workspace": workspace,
@@ -299,7 +259,6 @@ Stops the daemon gracefully, then starts a new one.`,
}
},
}
var daemonsLogsCmd = &cobra.Command{
Use: "logs <workspace-path|pid>",
Short: "View logs for a specific bd daemon",
@@ -311,7 +270,6 @@ Supports tail mode (last N lines) and follow mode (like tail -f).`,
// Use global jsonOutput set by PersistentPreRun
follow, _ := cmd.Flags().GetBool("follow")
lines, _ := cmd.Flags().GetInt("lines")
// Discover all daemons
daemons, err := daemon.DiscoverDaemons(nil)
if err != nil {
@@ -322,7 +280,6 @@ Supports tail mode (last N lines) and follow mode (like tail -f).`,
}
os.Exit(1)
}
// Find matching daemon by workspace path or PID
var targetDaemon *daemon.DaemonInfo
for _, d := range daemons {
@@ -331,7 +288,6 @@ Supports tail mode (last N lines) and follow mode (like tail -f).`,
break
}
}
if targetDaemon == nil {
if jsonOutput {
outputJSON(map[string]string{"error": "daemon not found"})
@@ -340,10 +296,8 @@ Supports tail mode (last N lines) and follow mode (like tail -f).`,
}
os.Exit(1)
}
// Determine log file path
logPath := filepath.Join(filepath.Dir(targetDaemon.SocketPath), "daemon.log")
// Check if log file exists
if _, err := os.Stat(logPath); err != nil {
if jsonOutput {
@@ -353,7 +307,6 @@ Supports tail mode (last N lines) and follow mode (like tail -f).`,
}
os.Exit(1)
}
if jsonOutput {
// JSON mode: read entire file
// #nosec G304 - controlled path from daemon discovery
@@ -369,7 +322,6 @@ Supports tail mode (last N lines) and follow mode (like tail -f).`,
})
return
}
// Human-readable mode
if follow {
tailFollow(logPath)
@@ -381,7 +333,6 @@ Supports tail mode (last N lines) and follow mode (like tail -f).`,
}
},
}
func tailLines(filePath string, n int) error {
// #nosec G304 - controlled path from daemon discovery
file, err := os.Open(filePath)
@@ -389,7 +340,6 @@ func tailLines(filePath string, n int) error {
return err
}
defer file.Close()
// Read all lines
var lines []string
scanner := bufio.NewScanner(file)
@@ -399,7 +349,6 @@ func tailLines(filePath string, n int) error {
if err := scanner.Err(); err != nil {
return err
}
// Print last N lines
start := 0
if len(lines) > n {
@@ -408,10 +357,8 @@ func tailLines(filePath string, n int) error {
for i := start; i < len(lines); i++ {
fmt.Println(lines[i])
}
return nil
}
func tailFollow(filePath string) {
// #nosec G304 - controlled path from daemon discovery
file, err := os.Open(filePath)
@@ -420,10 +367,8 @@ func tailFollow(filePath string) {
os.Exit(1)
}
defer file.Close()
// Seek to end
_, _ = file.Seek(0, io.SeekEnd)
reader := bufio.NewReader(file)
for {
line, err := reader.ReadString('\n')
@@ -439,7 +384,6 @@ func tailFollow(filePath string) {
fmt.Print(strings.TrimRight(line, "\n\r") + "\n")
}
}
var daemonsKillallCmd = &cobra.Command{
Use: "killall",
Short: "Stop all running bd daemons",
@@ -449,7 +393,6 @@ Uses escalating shutdown strategy: RPC (2s) → SIGTERM (3s) → SIGKILL (1s).`,
searchRoots, _ := cmd.Flags().GetStringSlice("search")
// Use global jsonOutput set by PersistentPreRun
force, _ := cmd.Flags().GetBool("force")
// Discover all daemons
daemons, err := daemon.DiscoverDaemons(searchRoots)
if err != nil {
@@ -460,7 +403,6 @@ Uses escalating shutdown strategy: RPC (2s) → SIGTERM (3s) → SIGKILL (1s).`,
}
os.Exit(1)
}
// Filter to alive daemons only
var aliveDaemons []daemon.DaemonInfo
for _, d := range daemons {
@@ -468,7 +410,6 @@ Uses escalating shutdown strategy: RPC (2s) → SIGTERM (3s) → SIGKILL (1s).`,
aliveDaemons = append(aliveDaemons, d)
}
}
if len(aliveDaemons) == 0 {
if jsonOutput {
outputJSON(map[string]interface{}{
@@ -480,10 +421,8 @@ Uses escalating shutdown strategy: RPC (2s) → SIGTERM (3s) → SIGKILL (1s).`,
}
return
}
// Kill all daemons
results := daemon.KillAllDaemons(aliveDaemons, force)
if jsonOutput {
outputJSON(results)
} else {
@@ -496,13 +435,11 @@ Uses escalating shutdown strategy: RPC (2s) → SIGTERM (3s) → SIGKILL (1s).`,
}
}
}
if results.Failed > 0 {
os.Exit(1)
}
},
}
var daemonsHealthCmd = &cobra.Command{
Use: "health",
Short: "Check health of all bd daemons",
@@ -511,14 +448,12 @@ stale sockets, version mismatches, and unresponsive daemons.`,
Run: func(cmd *cobra.Command, args []string) {
searchRoots, _ := cmd.Flags().GetStringSlice("search")
// Use global jsonOutput set by PersistentPreRun
// Discover daemons
daemons, err := daemon.DiscoverDaemons(searchRoots)
if err != nil {
fmt.Fprintf(os.Stderr, "Error discovering daemons: %v\n", err)
os.Exit(1)
}
type healthReport struct {
Workspace string `json:"workspace"`
SocketPath string `json:"socket_path"`
@@ -528,15 +463,12 @@ stale sockets, version mismatches, and unresponsive daemons.`,
Issue string `json:"issue,omitempty"`
VersionMismatch bool `json:"version_mismatch,omitempty"`
}
var reports []healthReport
healthyCount := 0
staleCount := 0
mismatchCount := 0
unresponsiveCount := 0
currentVersion := Version
for _, d := range daemons {
report := healthReport{
Workspace: d.WorkspacePath,
@@ -544,7 +476,6 @@ stale sockets, version mismatches, and unresponsive daemons.`,
PID: d.PID,
Version: d.Version,
}
if !d.Alive {
report.Status = "stale"
report.Issue = d.Error
@@ -558,10 +489,8 @@ stale sockets, version mismatches, and unresponsive daemons.`,
report.Status = "healthy"
healthyCount++
}
reports = append(reports, report)
}
if jsonOutput {
output := map[string]interface{}{
"total": len(reports),
@@ -575,61 +504,49 @@ stale sockets, version mismatches, and unresponsive daemons.`,
fmt.Println(string(data))
return
}
// Human-readable output
if len(reports) == 0 {
fmt.Println("No daemons found")
return
}
fmt.Printf("Health Check Summary:\n")
fmt.Printf(" Total: %d\n", len(reports))
fmt.Printf(" Healthy: %d\n", healthyCount)
fmt.Printf(" Stale: %d\n", staleCount)
fmt.Printf(" Mismatched: %d\n", mismatchCount)
fmt.Printf(" Unresponsive: %d\n\n", unresponsiveCount)
w := tabwriter.NewWriter(os.Stdout, 0, 0, 2, ' ', 0)
_, _ = fmt.Fprintln(w, "WORKSPACE\tPID\tVERSION\tSTATUS\tISSUE")
for _, r := range reports {
workspace := r.Workspace
if workspace == "" {
workspace = "(unknown)"
}
pidStr := "-"
if r.PID != 0 {
pidStr = fmt.Sprintf("%d", r.PID)
}
version := r.Version
if version == "" {
version = "-"
}
status := r.Status
issue := r.Issue
if issue == "" {
issue = "-"
}
_, _ = fmt.Fprintf(w, "%s\t%s\t%s\t%s\t%s\n",
workspace, pidStr, version, status, issue)
}
_ = w.Flush()
// Exit with error if there are any issues
if staleCount > 0 || mismatchCount > 0 || unresponsiveCount > 0 {
os.Exit(1)
}
},
}
func init() {
rootCmd.AddCommand(daemonsCmd)
// Add subcommands
daemonsCmd.AddCommand(daemonsListCmd)
daemonsCmd.AddCommand(daemonsHealthCmd)
@@ -637,30 +554,18 @@ func init() {
daemonsCmd.AddCommand(daemonsLogsCmd)
daemonsCmd.AddCommand(daemonsKillallCmd)
daemonsCmd.AddCommand(daemonsRestartCmd)
// Flags for list command
daemonsListCmd.Flags().StringSlice("search", nil, "Directories to search for daemons (default: home, /tmp, cwd)")
daemonsListCmd.Flags().Bool("json", false, "Output in JSON format")
daemonsListCmd.Flags().Bool("no-cleanup", false, "Skip auto-cleanup of stale sockets")
// Flags for health command
daemonsHealthCmd.Flags().StringSlice("search", nil, "Directories to search for daemons (default: home, /tmp, cwd)")
daemonsHealthCmd.Flags().Bool("json", false, "Output in JSON format")
// Flags for stop command
daemonsStopCmd.Flags().Bool("json", false, "Output in JSON format")
// Flags for logs command
daemonsLogsCmd.Flags().BoolP("follow", "f", false, "Follow log output (like tail -f)")
daemonsLogsCmd.Flags().IntP("lines", "n", 50, "Number of lines to show from end of log")
daemonsLogsCmd.Flags().Bool("json", false, "Output in JSON format")
// Flags for killall command
daemonsKillallCmd.Flags().StringSlice("search", nil, "Directories to search for daemons (default: home, /tmp, cwd)")
daemonsKillallCmd.Flags().Bool("json", false, "Output in JSON format")
daemonsKillallCmd.Flags().Bool("force", false, "Use SIGKILL immediately if graceful shutdown fails")
// Flags for restart command
daemonsRestartCmd.Flags().StringSlice("search", nil, "Directories to search for daemons (default: home, /tmp, cwd)")
daemonsRestartCmd.Flags().Bool("json", false, "Output in JSON format")
}

View File

@@ -1,5 +1,4 @@
package main
import (
"bufio"
"context"
@@ -8,44 +7,32 @@ import (
"os"
"regexp"
"strings"
"github.com/fatih/color"
"github.com/spf13/cobra"
"github.com/steveyegge/beads/internal/storage/sqlite"
"github.com/steveyegge/beads/internal/types"
)
var deleteCmd = &cobra.Command{
Use: "delete <issue-id> [issue-id...]",
Short: "Delete one or more issues and clean up references",
Long: `Delete one or more issues and clean up all references to them.
This command will:
1. Remove all dependency links (any type, both directions) involving the issues
2. Update text references to "[deleted:ID]" in directly connected issues
3. Delete the issues from the database
This is a destructive operation that cannot be undone. Use with caution.
BATCH DELETION:
Delete multiple issues at once:
bd delete bd-1 bd-2 bd-3 --force
Delete from file (one ID per line):
bd delete --from-file deletions.txt --force
Preview before deleting:
bd delete --from-file deletions.txt --dry-run
DEPENDENCY HANDLING:
Default: Fails if any issue has dependents not in deletion set
bd delete bd-1 bd-2
Cascade: Recursively delete all dependents
bd delete bd-1 --cascade --force
Force: Delete and orphan dependents
bd delete bd-1 --force`,
Args: cobra.MinimumNArgs(0),
@@ -55,11 +42,9 @@ Force: Delete and orphan dependents
dryRun, _ := cmd.Flags().GetBool("dry-run")
cascade, _ := cmd.Flags().GetBool("cascade")
// Use global jsonOutput set by PersistentPreRun
// Collect issue IDs from args and/or file
issueIDs := make([]string, 0, len(args))
issueIDs = append(issueIDs, args...)
if fromFile != "" {
fileIDs, err := readIssueIDsFromFile(fromFile)
if err != nil {
@@ -68,25 +53,20 @@ Force: Delete and orphan dependents
}
issueIDs = append(issueIDs, fileIDs...)
}
if len(issueIDs) == 0 {
fmt.Fprintf(os.Stderr, "Error: no issue IDs provided\n")
_ = cmd.Usage()
os.Exit(1)
}
// Remove duplicates
issueIDs = uniqueStrings(issueIDs)
// Handle batch deletion
if len(issueIDs) > 1 {
deleteBatch(cmd, issueIDs, force, dryRun, cascade, jsonOutput)
return
}
// Single issue deletion (legacy behavior)
issueID := issueIDs[0]
// Ensure we have a direct store when daemon lacks delete support
if daemonClient != nil {
if err := ensureDirectMode("daemon does not support delete command"); err != nil {
@@ -99,9 +79,7 @@ Force: Delete and orphan dependents
os.Exit(1)
}
}
ctx := context.Background()
// Get the issue to be deleted
issue, err := store.GetIssue(ctx, issueID)
if err != nil {
@@ -112,10 +90,8 @@ Force: Delete and orphan dependents
fmt.Fprintf(os.Stderr, "Error: issue %s not found\n", issueID)
os.Exit(1)
}
// Find all connected issues (dependencies in both directions)
connectedIssues := make(map[string]*types.Issue)
// Get dependencies (issues this one depends on)
deps, err := store.GetDependencies(ctx, issueID)
if err != nil {
@@ -125,7 +101,6 @@ Force: Delete and orphan dependents
for _, dep := range deps {
connectedIssues[dep.ID] = dep
}
// Get dependents (issues that depend on this one)
dependents, err := store.GetDependents(ctx, issueID)
if err != nil {
@@ -135,29 +110,24 @@ Force: Delete and orphan dependents
for _, dependent := range dependents {
connectedIssues[dependent.ID] = dependent
}
// Get dependency records (outgoing) to count how many we'll remove
depRecords, err := store.GetDependencyRecords(ctx, issueID)
if err != nil {
fmt.Fprintf(os.Stderr, "Error getting dependency records: %v\n", err)
os.Exit(1)
}
// Build the regex pattern for matching issue IDs (handles hyphenated IDs properly)
// Pattern: (^|non-word-char)(issueID)($|non-word-char) where word-char includes hyphen
idPattern := `(^|[^A-Za-z0-9_-])(` + regexp.QuoteMeta(issueID) + `)($|[^A-Za-z0-9_-])`
re := regexp.MustCompile(idPattern)
replacementText := `$1[deleted:` + issueID + `]$3`
// Preview mode
if !force {
red := color.New(color.FgRed).SprintFunc()
yellow := color.New(color.FgYellow).SprintFunc()
fmt.Printf("\n%s\n", red("⚠️ DELETE PREVIEW"))
fmt.Printf("\nIssue to delete:\n")
fmt.Printf(" %s: %s\n", issueID, issue.Title)
totalDeps := len(depRecords) + len(dependents)
if totalDeps > 0 {
fmt.Printf("\nDependency links to remove: %d\n", totalDeps)
@@ -168,7 +138,6 @@ Force: Delete and orphan dependents
fmt.Printf(" %s → %s (inbound)\n", dep.ID, issueID)
}
}
if len(connectedIssues) > 0 {
fmt.Printf("\nConnected issues where text references will be updated:\n")
issuesWithRefs := 0
@@ -178,7 +147,6 @@ Force: Delete and orphan dependents
(connIssue.Notes != "" && re.MatchString(connIssue.Notes)) ||
(connIssue.Design != "" && re.MatchString(connIssue.Design)) ||
(connIssue.AcceptanceCriteria != "" && re.MatchString(connIssue.AcceptanceCriteria))
if hasRefs {
fmt.Printf(" %s: %s\n", id, connIssue.Title)
issuesWithRefs++
@@ -188,43 +156,35 @@ Force: Delete and orphan dependents
fmt.Printf(" (none have text references)\n")
}
}
fmt.Printf("\n%s\n", yellow("This operation cannot be undone!"))
fmt.Printf("To proceed, run: %s\n\n", yellow("bd delete "+issueID+" --force"))
return
}
// Actually delete
// 1. Update text references in connected issues (all text fields)
updatedIssueCount := 0
for id, connIssue := range connectedIssues {
updates := make(map[string]interface{})
// Replace in description
if re.MatchString(connIssue.Description) {
newDesc := re.ReplaceAllString(connIssue.Description, replacementText)
updates["description"] = newDesc
}
// Replace in notes
if connIssue.Notes != "" && re.MatchString(connIssue.Notes) {
newNotes := re.ReplaceAllString(connIssue.Notes, replacementText)
updates["notes"] = newNotes
}
// Replace in design
if connIssue.Design != "" && re.MatchString(connIssue.Design) {
newDesign := re.ReplaceAllString(connIssue.Design, replacementText)
updates["design"] = newDesign
}
// Replace in acceptance_criteria
if connIssue.AcceptanceCriteria != "" && re.MatchString(connIssue.AcceptanceCriteria) {
newAC := re.ReplaceAllString(connIssue.AcceptanceCriteria, replacementText)
updates["acceptance_criteria"] = newAC
}
if len(updates) > 0 {
if err := store.UpdateIssue(ctx, id, updates, actor); err != nil {
fmt.Fprintf(os.Stderr, "Warning: Failed to update references in %s: %v\n", id, err)
@@ -233,7 +193,6 @@ Force: Delete and orphan dependents
}
}
}
// 2. Remove all dependency links (outgoing)
outgoingRemoved := 0
for _, dep := range depRecords {
@@ -244,7 +203,6 @@ Force: Delete and orphan dependents
outgoingRemoved++
}
}
// 3. Remove inbound dependency links (issues that depend on this one)
inboundRemoved := 0
for _, dep := range dependents {
@@ -255,21 +213,17 @@ Force: Delete and orphan dependents
inboundRemoved++
}
}
// 4. Delete the issue itself from database
if err := deleteIssue(ctx, issueID); err != nil {
fmt.Fprintf(os.Stderr, "Error deleting issue: %v\n", err)
os.Exit(1)
}
// 5. Remove from JSONL (auto-flush can't see deletions)
if err := removeIssueFromJSONL(issueID); err != nil {
fmt.Fprintf(os.Stderr, "Warning: Failed to remove from JSONL: %v\n", err)
}
// Schedule auto-flush to update neighbors
markDirtyAndScheduleFlush()
totalDepsRemoved := outgoingRemoved + inboundRemoved
if jsonOutput {
outputJSON(map[string]interface{}{
@@ -285,7 +239,6 @@ Force: Delete and orphan dependents
}
},
}
// deleteIssue removes an issue from the database
// Note: This is a direct database operation since Storage interface doesn't have Delete
func deleteIssue(ctx context.Context, issueID string) error {
@@ -294,14 +247,11 @@ func deleteIssue(ctx context.Context, issueID string) error {
type deleter interface {
DeleteIssue(ctx context.Context, id string) error
}
if d, ok := store.(deleter); ok {
return d.DeleteIssue(ctx, issueID)
}
return fmt.Errorf("delete operation not supported by this storage backend")
}
// removeIssueFromJSONL removes a deleted issue from the JSONL file
// Auto-flush cannot see deletions because the dirty_issues row is deleted with the issue
func removeIssueFromJSONL(issueID string) error {
@@ -309,7 +259,6 @@ func removeIssueFromJSONL(issueID string) error {
if path == "" {
return nil // No JSONL file yet
}
// Read all issues except the deleted one
// #nosec G304 - controlled path from config
f, err := os.Open(path)
@@ -319,7 +268,6 @@ func removeIssueFromJSONL(issueID string) error {
}
return fmt.Errorf("failed to open JSONL: %w", err)
}
var issues []*types.Issue
scanner := bufio.NewScanner(f)
for scanner.Scan() {
@@ -340,11 +288,9 @@ func removeIssueFromJSONL(issueID string) error {
_ = f.Close()
return fmt.Errorf("failed to read JSONL: %w", err)
}
if err := f.Close(); err != nil {
return fmt.Errorf("failed to close JSONL: %w", err)
}
// Write to temp file atomically
temp := fmt.Sprintf("%s.tmp.%d", path, os.Getpid())
// #nosec G304 - controlled path from config
@@ -352,7 +298,6 @@ func removeIssueFromJSONL(issueID string) error {
if err != nil {
return fmt.Errorf("failed to create temp file: %w", err)
}
enc := json.NewEncoder(out)
for _, iss := range issues {
if err := enc.Encode(iss); err != nil {
@@ -361,21 +306,17 @@ func removeIssueFromJSONL(issueID string) error {
return fmt.Errorf("failed to write issue: %w", err)
}
}
if err := out.Close(); err != nil {
_ = os.Remove(temp)
return fmt.Errorf("failed to close temp file: %w", err)
}
// Atomic rename
if err := os.Rename(temp, path); err != nil {
_ = os.Remove(temp)
return fmt.Errorf("failed to rename temp file: %w", err)
}
return nil
}
// deleteBatch handles deletion of multiple issues
//nolint:unparam // cmd parameter required for potential future use
func deleteBatch(_ *cobra.Command, issueIDs []string, force bool, dryRun bool, cascade bool, jsonOutput bool) {
@@ -391,16 +332,13 @@ func deleteBatch(_ *cobra.Command, issueIDs []string, force bool, dryRun bool, c
os.Exit(1)
}
}
ctx := context.Background()
// Type assert to SQLite storage
d, ok := store.(*sqlite.SQLiteStorage)
if !ok {
fmt.Fprintf(os.Stderr, "Error: batch delete not supported by this storage backend\n")
os.Exit(1)
}
// Verify all issues exist
issues := make(map[string]*types.Issue)
notFound := []string{}
@@ -416,12 +354,10 @@ func deleteBatch(_ *cobra.Command, issueIDs []string, force bool, dryRun bool, c
issues[id] = issue
}
}
if len(notFound) > 0 {
fmt.Fprintf(os.Stderr, "Error: issues not found: %s\n", strings.Join(notFound, ", "))
os.Exit(1)
}
// Dry-run or preview mode
if dryRun || !force {
result, err := d.DeleteIssues(ctx, issueIDs, cascade, false, true)
@@ -430,7 +366,6 @@ func deleteBatch(_ *cobra.Command, issueIDs []string, force bool, dryRun bool, c
showDeletionPreview(issueIDs, issues, cascade, err)
os.Exit(1)
}
showDeletionPreview(issueIDs, issues, cascade, nil)
fmt.Printf("\nWould delete: %d issues\n", result.DeletedCount)
fmt.Printf("Would remove: %d dependencies, %d labels, %d events\n",
@@ -438,7 +373,6 @@ func deleteBatch(_ *cobra.Command, issueIDs []string, force bool, dryRun bool, c
if len(result.OrphanedIssues) > 0 {
fmt.Printf("Would orphan: %d issues\n", len(result.OrphanedIssues))
}
if dryRun {
fmt.Printf("\n(Dry-run mode - no changes made)\n")
} else {
@@ -454,14 +388,12 @@ func deleteBatch(_ *cobra.Command, issueIDs []string, force bool, dryRun bool, c
}
return
}
// Pre-collect connected issues before deletion (so we can update their text references)
connectedIssues := make(map[string]*types.Issue)
idSet := make(map[string]bool)
for _, id := range issueIDs {
idSet[id] = true
}
for _, id := range issueIDs {
// Get dependencies (issues this one depends on)
deps, err := store.GetDependencies(ctx, id)
@@ -472,7 +404,6 @@ func deleteBatch(_ *cobra.Command, issueIDs []string, force bool, dryRun bool, c
}
}
}
// Get dependents (issues that depend on this one)
dependents, err := store.GetDependents(ctx, id)
if err == nil {
@@ -483,27 +414,22 @@ func deleteBatch(_ *cobra.Command, issueIDs []string, force bool, dryRun bool, c
}
}
}
// Actually delete
result, err := d.DeleteIssues(ctx, issueIDs, cascade, force, false)
if err != nil {
fmt.Fprintf(os.Stderr, "Error: %v\n", err)
os.Exit(1)
}
// Update text references in connected issues (using pre-collected issues)
updatedCount := updateTextReferencesInIssues(ctx, issueIDs, connectedIssues)
// Remove from JSONL
for _, id := range issueIDs {
if err := removeIssueFromJSONL(id); err != nil {
fmt.Fprintf(os.Stderr, "Warning: Failed to remove %s from JSONL: %v\n", id, err)
}
}
// Schedule auto-flush
markDirtyAndScheduleFlush()
// Output results
if jsonOutput {
outputJSON(map[string]interface{}{
@@ -529,12 +455,10 @@ func deleteBatch(_ *cobra.Command, issueIDs []string, force bool, dryRun bool, c
}
}
}
// showDeletionPreview shows what would be deleted
func showDeletionPreview(issueIDs []string, issues map[string]*types.Issue, cascade bool, depError error) {
red := color.New(color.FgRed).SprintFunc()
yellow := color.New(color.FgYellow).SprintFunc()
fmt.Printf("\n%s\n", red("⚠️ DELETE PREVIEW"))
fmt.Printf("\nIssues to delete (%d):\n", len(issueIDs))
for _, id := range issueIDs {
@@ -542,30 +466,24 @@ func showDeletionPreview(issueIDs []string, issues map[string]*types.Issue, casc
fmt.Printf(" %s: %s\n", id, issue.Title)
}
}
if cascade {
fmt.Printf("\n%s Cascade mode enabled - will also delete all dependent issues\n", yellow("⚠"))
}
if depError != nil {
fmt.Printf("\n%s\n", red(depError.Error()))
}
}
// updateTextReferencesInIssues updates text references to deleted issues in pre-collected connected issues
func updateTextReferencesInIssues(ctx context.Context, deletedIDs []string, connectedIssues map[string]*types.Issue) int {
updatedCount := 0
// For each deleted issue, update references in all connected issues
for _, id := range deletedIDs {
// Build regex pattern
idPattern := `(^|[^A-Za-z0-9_-])(` + regexp.QuoteMeta(id) + `)($|[^A-Za-z0-9_-])`
re := regexp.MustCompile(idPattern)
replacementText := `$1[deleted:` + id + `]$3`
for connID, connIssue := range connectedIssues {
updates := make(map[string]interface{})
if re.MatchString(connIssue.Description) {
updates["description"] = re.ReplaceAllString(connIssue.Description, replacementText)
}
@@ -578,7 +496,6 @@ func updateTextReferencesInIssues(ctx context.Context, deletedIDs []string, conn
if connIssue.AcceptanceCriteria != "" && re.MatchString(connIssue.AcceptanceCriteria) {
updates["acceptance_criteria"] = re.ReplaceAllString(connIssue.AcceptanceCriteria, replacementText)
}
if len(updates) > 0 {
if err := store.UpdateIssue(ctx, connID, updates, actor); err == nil {
updatedCount++
@@ -599,10 +516,8 @@ func updateTextReferencesInIssues(ctx context.Context, deletedIDs []string, conn
}
}
}
return updatedCount
}
// readIssueIDsFromFile reads issue IDs from a file (one per line)
func readIssueIDsFromFile(filename string) ([]string, error) {
// #nosec G304 - user-provided file path is intentional
@@ -611,7 +526,6 @@ func readIssueIDsFromFile(filename string) ([]string, error) {
return nil, err
}
defer func() { _ = f.Close() }()
var ids []string
scanner := bufio.NewScanner(f)
for scanner.Scan() {
@@ -622,14 +536,11 @@ func readIssueIDsFromFile(filename string) ([]string, error) {
}
ids = append(ids, line)
}
if err := scanner.Err(); err != nil {
return nil, err
}
return ids, nil
}
// uniqueStrings removes duplicates from a slice of strings
func uniqueStrings(slice []string) []string {
seen := make(map[string]bool)
@@ -642,12 +553,10 @@ func uniqueStrings(slice []string) []string {
}
return result
}
func init() {
deleteCmd.Flags().BoolP("force", "f", false, "Actually delete (without this flag, shows preview)")
deleteCmd.Flags().String("from-file", "", "Read issue IDs from file (one per line)")
deleteCmd.Flags().Bool("dry-run", false, "Preview what would be deleted without making changes")
deleteCmd.Flags().Bool("cascade", false, "Recursively delete all dependent issues")
deleteCmd.Flags().Bool("json", false, "Output JSON format")
rootCmd.AddCommand(deleteCmd)
}

View File

@@ -444,17 +444,17 @@ func getStatusEmoji(status types.Status) string {
func init() {
depAddCmd.Flags().StringP("type", "t", "blocks", "Dependency type (blocks|related|parent-child|discovered-from)")
depAddCmd.Flags().Bool("json", false, "Output JSON format")
// Note: --json flag is defined as a persistent flag in main.go, not here
depRemoveCmd.Flags().Bool("json", false, "Output JSON format")
// Note: --json flag is defined as a persistent flag in main.go, not here
depTreeCmd.Flags().Bool("show-all-paths", false, "Show all paths to nodes (no deduplication for diamond dependencies)")
depTreeCmd.Flags().IntP("max-depth", "d", 50, "Maximum tree depth to display (safety limit)")
depTreeCmd.Flags().Bool("reverse", false, "Show dependent tree (what was discovered from this) instead of dependency tree (what blocks this)")
depTreeCmd.Flags().String("format", "", "Output format: 'mermaid' for Mermaid.js flowchart")
depTreeCmd.Flags().Bool("json", false, "Output JSON format")
// Note: --json flag is defined as a persistent flag in main.go, not here
depCyclesCmd.Flags().Bool("json", false, "Output JSON format")
// Note: --json flag is defined as a persistent flag in main.go, not here
depCmd.AddCommand(depAddCmd)
depCmd.AddCommand(depRemoveCmd)

View File

@@ -1,29 +1,23 @@
package main
import (
"context"
"fmt"
"os"
"regexp"
"strings"
"github.com/fatih/color"
"github.com/spf13/cobra"
"github.com/steveyegge/beads/internal/types"
)
var duplicatesCmd = &cobra.Command{
Use: "duplicates",
Short: "Find and optionally merge duplicate issues",
Long: `Find issues with identical content (title, description, design, acceptance criteria).
Groups issues by content hash and reports duplicates with suggested merge targets.
The merge target is chosen by:
1. Reference count (most referenced issue wins)
2. Lexicographically smallest ID if reference counts are equal
Only groups issues with matching status (open with open, closed with closed).
Example:
bd duplicates # Show all duplicate groups
bd duplicates --auto-merge # Automatically merge all duplicates
@@ -35,20 +29,16 @@ Example:
fmt.Fprintf(os.Stderr, "Use: bd --no-daemon duplicates\n")
os.Exit(1)
}
autoMerge, _ := cmd.Flags().GetBool("auto-merge")
dryRun, _ := cmd.Flags().GetBool("dry-run")
// Use global jsonOutput set by PersistentPreRun
ctx := context.Background()
// Get all issues
allIssues, err := store.SearchIssues(ctx, "", types.IssueFilter{})
if err != nil {
fmt.Fprintf(os.Stderr, "Error fetching issues: %v\n", err)
os.Exit(1)
}
// Filter out closed issues - they're done, no point detecting duplicates
openIssues := make([]*types.Issue, 0, len(allIssues))
for _, issue := range allIssues {
@@ -56,10 +46,8 @@ Example:
openIssues = append(openIssues, issue)
}
}
// Find duplicates (only among open issues)
duplicateGroups := findDuplicateGroups(openIssues)
if len(duplicateGroups) == 0 {
if !jsonOutput {
fmt.Println("No duplicates found!")
@@ -71,14 +59,11 @@ Example:
}
return
}
// Count references for each issue
refCounts := countReferences(allIssues)
// Prepare output
var mergeCommands []string
var mergeResults []map[string]interface{}
for _, group := range duplicateGroups {
target := chooseMergeTarget(group, refCounts)
sources := make([]string, 0, len(group)-1)
@@ -87,7 +72,6 @@ Example:
sources = append(sources, issue.ID)
}
}
if autoMerge || dryRun {
// Perform merge (unless dry-run)
if !dryRun {
@@ -96,7 +80,6 @@ Example:
fmt.Fprintf(os.Stderr, "Error merging %s into %s: %v\n", strings.Join(sources, ", "), target.ID, err)
continue
}
if jsonOutput {
mergeResults = append(mergeResults, map[string]interface{}{
"target_id": target.ID,
@@ -109,7 +92,6 @@ Example:
})
}
}
cmd := fmt.Sprintf("bd merge %s --into %s", strings.Join(sources, " "), target.ID)
mergeCommands = append(mergeCommands, cmd)
} else {
@@ -117,12 +99,10 @@ Example:
mergeCommands = append(mergeCommands, cmd)
}
}
// Mark dirty if we performed merges
if autoMerge && !dryRun && len(mergeCommands) > 0 {
markDirtyAndScheduleFlush()
}
// Output results
if jsonOutput {
output := map[string]interface{}{
@@ -140,13 +120,10 @@ Example:
yellow := color.New(color.FgYellow).SprintFunc()
cyan := color.New(color.FgCyan).SprintFunc()
green := color.New(color.FgGreen).SprintFunc()
fmt.Printf("%s Found %d duplicate group(s):\n\n", yellow("🔍"), len(duplicateGroups))
for i, group := range duplicateGroups {
target := chooseMergeTarget(group, refCounts)
fmt.Printf("%s Group %d: %s\n", cyan("━━"), i+1, group[0].Title)
for _, issue := range group {
refs := refCounts[issue.ID]
marker := " "
@@ -156,7 +133,6 @@ Example:
fmt.Printf("%s%s (%s, P%d, %d references)\n",
marker, issue.ID, issue.Status, issue.Priority, refs)
}
sources := make([]string, 0, len(group)-1)
for _, issue := range group {
if issue.ID != target.ID {
@@ -166,7 +142,6 @@ Example:
fmt.Printf(" %s bd merge %s --into %s\n\n",
cyan("Suggested:"), strings.Join(sources, " "), target.ID)
}
if autoMerge {
if dryRun {
fmt.Printf("%s Dry run - would execute %d merge(s)\n", yellow("⚠"), len(mergeCommands))
@@ -179,14 +154,11 @@ Example:
}
},
}
func init() {
duplicatesCmd.Flags().Bool("auto-merge", false, "Automatically merge all duplicates")
duplicatesCmd.Flags().Bool("dry-run", false, "Show what would be merged without making changes")
duplicatesCmd.Flags().Bool("json", false, "Output JSON format")
rootCmd.AddCommand(duplicatesCmd)
}
// contentKey represents the fields we use to identify duplicate issues
type contentKey struct {
title string
@@ -195,11 +167,9 @@ type contentKey struct {
acceptanceCriteria string
status string // Only group issues with same status
}
// findDuplicateGroups groups issues by content hash
func findDuplicateGroups(issues []*types.Issue) [][]*types.Issue {
groups := make(map[contentKey][]*types.Issue)
for _, issue := range issues {
key := contentKey{
title: issue.Title,
@@ -208,10 +178,8 @@ func findDuplicateGroups(issues []*types.Issue) [][]*types.Issue {
acceptanceCriteria: issue.AcceptanceCriteria,
status: string(issue.Status),
}
groups[key] = append(groups[key], issue)
}
// Filter to only groups with duplicates
var duplicates [][]*types.Issue
for _, group := range groups {
@@ -219,15 +187,12 @@ func findDuplicateGroups(issues []*types.Issue) [][]*types.Issue {
duplicates = append(duplicates, group)
}
}
return duplicates
}
// countReferences counts how many times each issue is referenced in text fields
func countReferences(issues []*types.Issue) map[string]int {
counts := make(map[string]int)
idPattern := regexp.MustCompile(`\b[a-zA-Z][-a-zA-Z0-9]*-\d+\b`)
for _, issue := range issues {
// Search in all text fields
textFields := []string{
@@ -236,7 +201,6 @@ func countReferences(issues []*types.Issue) map[string]int {
issue.AcceptanceCriteria,
issue.Notes,
}
for _, text := range textFields {
matches := idPattern.FindAllString(text, -1)
for _, match := range matches {
@@ -244,20 +208,16 @@ func countReferences(issues []*types.Issue) map[string]int {
}
}
}
return counts
}
// chooseMergeTarget selects the best issue to merge into
// Priority: highest reference count, then lexicographically smallest ID
func chooseMergeTarget(group []*types.Issue, refCounts map[string]int) *types.Issue {
if len(group) == 0 {
return nil
}
target := group[0]
targetRefs := refCounts[target.ID]
for _, issue := range group[1:] {
issueRefs := refCounts[issue.ID]
if issueRefs > targetRefs || (issueRefs == targetRefs && issue.ID < target.ID) {
@@ -265,18 +225,14 @@ func chooseMergeTarget(group []*types.Issue, refCounts map[string]int) *types.Is
targetRefs = issueRefs
}
}
return target
}
// formatDuplicateGroupsJSON formats duplicate groups for JSON output
func formatDuplicateGroupsJSON(groups [][]*types.Issue, refCounts map[string]int) []map[string]interface{} {
var result []map[string]interface{}
for _, group := range groups {
target := chooseMergeTarget(group, refCounts)
issues := make([]map[string]interface{}, len(group))
for i, issue := range group {
issues[i] = map[string]interface{}{
"id": issue.ID,
@@ -287,14 +243,12 @@ func formatDuplicateGroupsJSON(groups [][]*types.Issue, refCounts map[string]int
"is_merge_target": issue.ID == target.ID,
}
}
sources := make([]string, 0, len(group)-1)
for _, issue := range group {
if issue.ID != target.ID {
sources = append(sources, issue.ID)
}
}
result = append(result, map[string]interface{}{
"title": group[0].Title,
"issues": issues,
@@ -303,6 +257,5 @@ func formatDuplicateGroupsJSON(groups [][]*types.Issue, refCounts map[string]int
"suggested_merge_cmd": fmt.Sprintf("bd merge %s --into %s", strings.Join(sources, " "), target.ID),
})
}
return result
}

View File

@@ -1,32 +1,26 @@
package main
import (
"context"
"encoding/json"
"fmt"
"os"
"github.com/fatih/color"
"github.com/spf13/cobra"
"github.com/steveyegge/beads/internal/rpc"
"github.com/steveyegge/beads/internal/types"
)
var epicCmd = &cobra.Command{
Use: "epic",
Short: "Epic management commands",
}
var epicStatusCmd = &cobra.Command{
Use: "status",
Short: "Show epic completion status",
Run: func(cmd *cobra.Command, args []string) {
eligibleOnly, _ := cmd.Flags().GetBool("eligible-only")
// Use global jsonOutput set by PersistentPreRun
var epics []*types.EpicStatus
var err error
if daemonClient != nil {
resp, err := daemonClient.EpicStatus(&rpc.EpicStatusArgs{
EligibleOnly: eligibleOnly,
@@ -50,7 +44,6 @@ var epicStatusCmd = &cobra.Command{
fmt.Fprintf(os.Stderr, "Error getting epic status: %v\n", err)
os.Exit(1)
}
if eligibleOnly {
filtered := []*types.EpicStatus{}
for _, epic := range epics {
@@ -61,7 +54,6 @@ var epicStatusCmd = &cobra.Command{
epics = filtered
}
}
if jsonOutput {
enc := json.NewEncoder(os.Stdout)
enc.SetIndent("", " ")
@@ -71,25 +63,21 @@ var epicStatusCmd = &cobra.Command{
}
return
}
// Human-readable output
if len(epics) == 0 {
fmt.Println("No open epics found")
return
}
cyan := color.New(color.FgCyan).SprintFunc()
yellow := color.New(color.FgYellow).SprintFunc()
green := color.New(color.FgGreen).SprintFunc()
bold := color.New(color.Bold).SprintFunc()
for _, epicStatus := range epics {
epic := epicStatus.Epic
percentage := 0
if epicStatus.TotalChildren > 0 {
percentage = (epicStatus.ClosedChildren * 100) / epicStatus.TotalChildren
}
statusIcon := ""
if epicStatus.EligibleForClose {
statusIcon = green("✓")
@@ -98,7 +86,6 @@ var epicStatusCmd = &cobra.Command{
} else {
statusIcon = "○"
}
fmt.Printf("%s %s %s\n", statusIcon, cyan(epic.ID), bold(epic.Title))
fmt.Printf(" Progress: %d/%d children closed (%d%%)\n",
epicStatus.ClosedChildren, epicStatus.TotalChildren, percentage)
@@ -109,16 +96,13 @@ var epicStatusCmd = &cobra.Command{
}
},
}
var closeEligibleEpicsCmd = &cobra.Command{
Use: "close-eligible",
Short: "Close epics where all children are complete",
Run: func(cmd *cobra.Command, args []string) {
dryRun, _ := cmd.Flags().GetBool("dry-run")
// Use global jsonOutput set by PersistentPreRun
var eligibleEpics []*types.EpicStatus
if daemonClient != nil {
resp, err := daemonClient.EpicStatus(&rpc.EpicStatusArgs{
EligibleOnly: true,
@@ -148,7 +132,6 @@ var closeEligibleEpicsCmd = &cobra.Command{
}
}
}
if len(eligibleEpics) == 0 {
if !jsonOutput {
fmt.Println("No epics eligible for closure")
@@ -157,7 +140,6 @@ var closeEligibleEpicsCmd = &cobra.Command{
}
return
}
if dryRun {
if jsonOutput {
enc := json.NewEncoder(os.Stdout)
@@ -174,7 +156,6 @@ var closeEligibleEpicsCmd = &cobra.Command{
}
return
}
// Actually close the epics
closedIDs := []string{}
for _, epicStatus := range eligibleEpics {
@@ -203,7 +184,6 @@ var closeEligibleEpicsCmd = &cobra.Command{
}
closedIDs = append(closedIDs, epicStatus.Epic.ID)
}
if jsonOutput {
enc := json.NewEncoder(os.Stdout)
enc.SetIndent("", " ")
@@ -222,16 +202,10 @@ var closeEligibleEpicsCmd = &cobra.Command{
}
},
}
func init() {
epicCmd.AddCommand(epicStatusCmd)
epicCmd.AddCommand(closeEligibleEpicsCmd)
epicStatusCmd.Flags().Bool("eligible-only", false, "Show only epics eligible for closure")
epicStatusCmd.Flags().Bool("json", false, "Output in JSON format")
closeEligibleEpicsCmd.Flags().Bool("dry-run", false, "Preview what would be closed without making changes")
closeEligibleEpicsCmd.Flags().Bool("json", false, "Output in JSON format")
rootCmd.AddCommand(epicCmd)
}

View File

@@ -1,6 +1,5 @@
// Package main implements the bd CLI label management commands.
package main
import (
"context"
"encoding/json"
@@ -8,25 +7,21 @@ import (
"os"
"sort"
"strings"
"github.com/fatih/color"
"github.com/spf13/cobra"
"github.com/steveyegge/beads/internal/rpc"
"github.com/steveyegge/beads/internal/types"
"github.com/steveyegge/beads/internal/utils"
)
var labelCmd = &cobra.Command{
Use: "label",
Short: "Manage issue labels",
}
// Helper function to process label operations for multiple issues
func processBatchLabelOperation(issueIDs []string, label string, operation string, jsonOut bool,
daemonFunc func(string, string) error, storeFunc func(context.Context, string, string, string) error) {
ctx := context.Background()
results := []map[string]interface{}{}
for _, issueID := range issueIDs {
var err error
if daemonClient != nil {
@@ -34,12 +29,10 @@ func processBatchLabelOperation(issueIDs []string, label string, operation strin
} else {
err = storeFunc(ctx, issueID, label, actor)
}
if err != nil {
fmt.Fprintf(os.Stderr, "Error %s label %s %s: %v\n", operation, operation, issueID, err)
continue
}
if jsonOut {
results = append(results, map[string]interface{}{
"status": operation,
@@ -57,22 +50,18 @@ func processBatchLabelOperation(issueIDs []string, label string, operation strin
fmt.Printf("%s %s label '%s' %s %s\n", green("✓"), verb, label, prep, issueID)
}
}
if len(issueIDs) > 0 && daemonClient == nil {
markDirtyAndScheduleFlush()
}
if jsonOut && len(results) > 0 {
outputJSON(results)
}
}
func parseLabelArgs(args []string) (issueIDs []string, label string) {
label = args[len(args)-1]
issueIDs = args[:len(args)-1]
return
}
//nolint:dupl // labelAddCmd and labelRemoveCmd are similar but serve different operations
var labelAddCmd = &cobra.Command{
Use: "add [issue-id...] [label]",
@@ -81,14 +70,12 @@ var labelAddCmd = &cobra.Command{
Run: func(cmd *cobra.Command, args []string) {
// Use global jsonOutput set by PersistentPreRun
issueIDs, label := parseLabelArgs(args)
// Resolve partial IDs
ctx := context.Background()
resolvedIDs := make([]string, 0, len(issueIDs))
for _, id := range issueIDs {
var fullID string
var err error
if daemonClient != nil {
resolveArgs := &rpc.ResolveIDArgs{ID: id}
resp, err := daemonClient.ResolveID(resolveArgs)
@@ -107,7 +94,6 @@ var labelAddCmd = &cobra.Command{
resolvedIDs = append(resolvedIDs, fullID)
}
issueIDs = resolvedIDs
processBatchLabelOperation(issueIDs, label, "added", jsonOutput,
func(issueID, lbl string) error {
_, err := daemonClient.AddLabel(&rpc.LabelAddArgs{ID: issueID, Label: lbl})
@@ -118,7 +104,6 @@ var labelAddCmd = &cobra.Command{
})
},
}
//nolint:dupl // labelRemoveCmd and labelAddCmd are similar but serve different operations
var labelRemoveCmd = &cobra.Command{
Use: "remove [issue-id...] [label]",
@@ -127,14 +112,12 @@ var labelRemoveCmd = &cobra.Command{
Run: func(cmd *cobra.Command, args []string) {
// Use global jsonOutput set by PersistentPreRun
issueIDs, label := parseLabelArgs(args)
// Resolve partial IDs
ctx := context.Background()
resolvedIDs := make([]string, 0, len(issueIDs))
for _, id := range issueIDs {
var fullID string
var err error
if daemonClient != nil {
resolveArgs := &rpc.ResolveIDArgs{ID: id}
resp, err := daemonClient.ResolveID(resolveArgs)
@@ -153,7 +136,6 @@ var labelRemoveCmd = &cobra.Command{
resolvedIDs = append(resolvedIDs, fullID)
}
issueIDs = resolvedIDs
processBatchLabelOperation(issueIDs, label, "removed", jsonOutput,
func(issueID, lbl string) error {
_, err := daemonClient.RemoveLabel(&rpc.LabelRemoveArgs{ID: issueID, Label: lbl})
@@ -164,7 +146,6 @@ var labelRemoveCmd = &cobra.Command{
})
},
}
var labelListCmd = &cobra.Command{
Use: "list [issue-id]",
Short: "List labels for an issue",
@@ -172,7 +153,6 @@ var labelListCmd = &cobra.Command{
Run: func(cmd *cobra.Command, args []string) {
// Use global jsonOutput set by PersistentPreRun
ctx := context.Background()
// Resolve partial ID first
var issueID string
if daemonClient != nil {
@@ -191,9 +171,7 @@ var labelListCmd = &cobra.Command{
os.Exit(1)
}
}
var labels []string
// Use daemon if available
if daemonClient != nil {
resp, err := daemonClient.Show(&rpc.ShowArgs{ID: issueID})
@@ -201,7 +179,6 @@ var labelListCmd = &cobra.Command{
fmt.Fprintf(os.Stderr, "Error: %v\n", err)
os.Exit(1)
}
var issue types.Issue
if err := json.Unmarshal(resp.Data, &issue); err != nil {
fmt.Fprintf(os.Stderr, "Error parsing response: %v\n", err)
@@ -217,7 +194,6 @@ var labelListCmd = &cobra.Command{
os.Exit(1)
}
}
if jsonOutput {
// Always output array, even if empty
if labels == nil {
@@ -226,12 +202,10 @@ var labelListCmd = &cobra.Command{
outputJSON(labels)
return
}
if len(labels) == 0 {
fmt.Printf("\n%s has no labels\n", issueID)
return
}
cyan := color.New(color.FgCyan).SprintFunc()
fmt.Printf("\n%s Labels for %s:\n", cyan("🏷"), issueID)
for _, label := range labels {
@@ -240,17 +214,14 @@ var labelListCmd = &cobra.Command{
fmt.Println()
},
}
var labelListAllCmd = &cobra.Command{
Use: "list-all",
Short: "List all unique labels in the database",
Run: func(cmd *cobra.Command, args []string) {
// Use global jsonOutput set by PersistentPreRun
ctx := context.Background()
var issues []*types.Issue
var err error
// Use daemon if available
if daemonClient != nil {
resp, err := daemonClient.List(&rpc.ListArgs{})
@@ -258,7 +229,6 @@ var labelListAllCmd = &cobra.Command{
fmt.Fprintf(os.Stderr, "Error: %v\n", err)
os.Exit(1)
}
if err := json.Unmarshal(resp.Data, &issues); err != nil {
fmt.Fprintf(os.Stderr, "Error parsing response: %v\n", err)
os.Exit(1)
@@ -271,7 +241,6 @@ var labelListAllCmd = &cobra.Command{
os.Exit(1)
}
}
// Collect unique labels with counts
labelCounts := make(map[string]int)
for _, issue := range issues {
@@ -292,7 +261,6 @@ var labelListAllCmd = &cobra.Command{
}
}
}
if len(labelCounts) == 0 {
if jsonOutput {
outputJSON([]string{})
@@ -301,14 +269,12 @@ var labelListAllCmd = &cobra.Command{
}
return
}
// Sort labels alphabetically
labels := make([]string, 0, len(labelCounts))
for label := range labelCounts {
labels = append(labels, label)
}
sort.Strings(labels)
if jsonOutput {
// Output as array of {label, count} objects
type labelInfo struct {
@@ -325,10 +291,8 @@ var labelListAllCmd = &cobra.Command{
outputJSON(result)
return
}
cyan := color.New(color.FgCyan).SprintFunc()
fmt.Printf("\n%s All labels (%d unique):\n", cyan("🏷"), len(labels))
// Find longest label for alignment
maxLen := 0
for _, label := range labels {
@@ -336,7 +300,6 @@ var labelListAllCmd = &cobra.Command{
maxLen = len(label)
}
}
for _, label := range labels {
padding := strings.Repeat(" ", maxLen-len(label))
fmt.Printf(" %s%s (%d issues)\n", label, padding, labelCounts[label])
@@ -344,13 +307,7 @@ var labelListAllCmd = &cobra.Command{
fmt.Println()
},
}
func init() {
labelAddCmd.Flags().Bool("json", false, "Output JSON format")
labelRemoveCmd.Flags().Bool("json", false, "Output JSON format")
labelListCmd.Flags().Bool("json", false, "Output JSON format")
labelListAllCmd.Flags().Bool("json", false, "Output JSON format")
labelCmd.AddCommand(labelAddCmd)
labelCmd.AddCommand(labelRemoveCmd)
labelCmd.AddCommand(labelListCmd)

View File

@@ -240,7 +240,7 @@ func init() {
listCmd.Flags().IntP("limit", "n", 0, "Limit results")
listCmd.Flags().String("format", "", "Output format: 'digraph' (for golang.org/x/tools/cmd/digraph), 'dot' (Graphviz), or Go template")
listCmd.Flags().Bool("all", false, "Show all issues (default behavior; flag provided for CLI familiarity)")
listCmd.Flags().Bool("json", false, "Output JSON format")
// Note: --json flag is defined as a persistent flag in main.go, not here
rootCmd.AddCommand(listCmd)
}

View File

@@ -1,5 +1,4 @@
package main
import (
"context"
"fmt"
@@ -7,23 +6,19 @@ import (
"regexp"
"strings"
"time"
"github.com/fatih/color"
"github.com/spf13/cobra"
"github.com/steveyegge/beads/internal/types"
)
var mergeCmd = &cobra.Command{
Use: "merge [source-id...] --into [target-id]",
Short: "Merge duplicate issues into a single issue",
Long: `Merge one or more source issues into a target issue.
This command is idempotent and safe to retry after partial failures:
1. Validates all issues exist and no self-merge
2. Migrates all dependencies from sources to target (skips if already exist)
3. Updates text references in all issue descriptions/notes
4. Closes source issues with reason 'Merged into bd-X' (skips if already closed)
Example:
bd merge bd-42 bd-43 --into bd-41
bd merge bd-10 bd-11 bd-12 --into bd-10 --dry-run`,
@@ -34,26 +29,21 @@ Example:
fmt.Fprintf(os.Stderr, "Error: merge command not yet supported in daemon mode (see bd-190)\n")
os.Exit(1)
}
targetID, _ := cmd.Flags().GetString("into")
if targetID == "" {
fmt.Fprintf(os.Stderr, "Error: --into flag is required\n")
os.Exit(1)
}
sourceIDs := args
dryRun, _ := cmd.Flags().GetBool("dry-run")
// Use global jsonOutput set by PersistentPreRun
// Validate merge operation
if err := validateMerge(targetID, sourceIDs); err != nil {
fmt.Fprintf(os.Stderr, "Error: %v\n", err)
os.Exit(1)
}
// Direct mode
ctx := context.Background()
if dryRun {
if !jsonOutput {
fmt.Println("Dry run - validation passed, no changes made")
@@ -61,17 +51,14 @@ Example:
}
return
}
// Perform merge
result, err := performMerge(ctx, targetID, sourceIDs)
if err != nil {
fmt.Fprintf(os.Stderr, "Error performing merge: %v\n", err)
os.Exit(1)
}
// Schedule auto-flush
markDirtyAndScheduleFlush()
if jsonOutput {
output := map[string]interface{}{
"target_id": targetID,
@@ -93,18 +80,14 @@ Example:
}
},
}
func init() {
mergeCmd.Flags().String("into", "", "Target issue ID to merge into (required)")
mergeCmd.Flags().Bool("dry-run", false, "Validate without making changes")
mergeCmd.Flags().Bool("json", false, "Output JSON format")
rootCmd.AddCommand(mergeCmd)
}
// validateMerge checks that merge operation is valid
func validateMerge(targetID string, sourceIDs []string) error {
ctx := context.Background()
// Check target exists
target, err := store.GetIssue(ctx, targetID)
if err != nil {
@@ -113,13 +96,11 @@ func validateMerge(targetID string, sourceIDs []string) error {
if target == nil {
return fmt.Errorf("target issue not found: %s", targetID)
}
// Check all sources exist and validate no self-merge
for _, sourceID := range sourceIDs {
if sourceID == targetID {
return fmt.Errorf("cannot merge issue into itself: %s", sourceID)
}
source, err := store.GetIssue(ctx, sourceID)
if err != nil {
return fmt.Errorf("source issue not found: %s", sourceID)
@@ -128,10 +109,8 @@ func validateMerge(targetID string, sourceIDs []string) error {
return fmt.Errorf("source issue not found: %s", sourceID)
}
}
return nil
}
// mergeResult tracks the results of a merge operation for reporting
type mergeResult struct {
depsAdded int
@@ -140,12 +119,10 @@ type mergeResult struct {
issuesClosed int
issuesSkipped int
}
// performMerge executes the merge operation
// TODO(bd-202): Add transaction support for atomicity
func performMerge(ctx context.Context, targetID string, sourceIDs []string) (*mergeResult, error) {
result := &mergeResult{}
// Step 1: Migrate dependencies from source issues to target
for _, sourceID := range sourceIDs {
// Get all dependencies where source is the dependent (source depends on X)
@@ -153,7 +130,6 @@ func performMerge(ctx context.Context, targetID string, sourceIDs []string) (*me
if err != nil {
return nil, fmt.Errorf("failed to get dependencies for %s: %w", sourceID, err)
}
// Migrate each dependency to target
for _, dep := range deps {
// Skip if target already has this dependency
@@ -161,7 +137,6 @@ func performMerge(ctx context.Context, targetID string, sourceIDs []string) (*me
if err != nil {
return nil, fmt.Errorf("failed to check target dependencies: %w", err)
}
alreadyExists := false
for _, existing := range existingDeps {
if existing.DependsOnID == dep.DependsOnID && existing.Type == dep.Type {
@@ -169,7 +144,6 @@ func performMerge(ctx context.Context, targetID string, sourceIDs []string) (*me
break
}
}
if alreadyExists || dep.DependsOnID == targetID {
result.depsSkipped++
} else {
@@ -187,13 +161,11 @@ func performMerge(ctx context.Context, targetID string, sourceIDs []string) (*me
result.depsAdded++
}
}
// Get all dependencies where source is the dependency (X depends on source)
allDeps, err := store.GetAllDependencyRecords(ctx)
if err != nil {
return nil, fmt.Errorf("failed to get all dependencies: %w", err)
}
for issueID, depList := range allDeps {
for _, dep := range depList {
if dep.DependsOnID == sourceID {
@@ -204,7 +176,6 @@ func performMerge(ctx context.Context, targetID string, sourceIDs []string) (*me
return nil, fmt.Errorf("failed to remove dependency %s -> %s: %w", issueID, sourceID, err)
}
}
// Add new dependency to target (if not self-reference)
if issueID != targetID {
newDep := &types.Dependency{
@@ -228,14 +199,12 @@ func performMerge(ctx context.Context, targetID string, sourceIDs []string) (*me
}
}
}
// Step 2: Update text references in all issues
refCount, err := updateMergeTextReferences(ctx, sourceIDs, targetID)
if err != nil {
return nil, fmt.Errorf("failed to update text references: %w", err)
}
result.textRefCount = refCount
// Step 3: Close source issues (idempotent - skip if already closed)
for _, sourceID := range sourceIDs {
issue, err := store.GetIssue(ctx, sourceID)
@@ -245,7 +214,6 @@ func performMerge(ctx context.Context, targetID string, sourceIDs []string) (*me
if issue == nil {
return nil, fmt.Errorf("source issue not found: %s", sourceID)
}
if issue.Status == types.StatusClosed {
// Already closed - skip
result.issuesSkipped++
@@ -257,10 +225,8 @@ func performMerge(ctx context.Context, targetID string, sourceIDs []string) (*me
result.issuesClosed++
}
}
return result, nil
}
// updateMergeTextReferences updates text references from source IDs to target ID
// Returns the count of text references updated
func updateMergeTextReferences(ctx context.Context, sourceIDs []string, targetID string) (int, error) {
@@ -269,7 +235,6 @@ func updateMergeTextReferences(ctx context.Context, sourceIDs []string, targetID
if err != nil {
return 0, fmt.Errorf("failed to get all issues: %w", err)
}
updatedCount := 0
for _, issue := range allIssues {
// Skip source issues (they're being closed anyway)
@@ -283,16 +248,13 @@ func updateMergeTextReferences(ctx context.Context, sourceIDs []string, targetID
if isSource {
continue
}
updates := make(map[string]interface{})
// Check each source ID for references
for _, sourceID := range sourceIDs {
// Build regex pattern to match issue IDs with word boundaries
idPattern := `(^|[^A-Za-z0-9_-])(` + regexp.QuoteMeta(sourceID) + `)($|[^A-Za-z0-9_-])`
re := regexp.MustCompile(idPattern)
replacementText := `$1` + targetID + `$3`
// Update description
if issue.Description != "" && re.MatchString(issue.Description) {
if _, exists := updates["description"]; !exists {
@@ -302,7 +264,6 @@ func updateMergeTextReferences(ctx context.Context, sourceIDs []string, targetID
updates["description"] = re.ReplaceAllString(desc, replacementText)
}
}
// Update notes
if issue.Notes != "" && re.MatchString(issue.Notes) {
if _, exists := updates["notes"]; !exists {
@@ -312,7 +273,6 @@ func updateMergeTextReferences(ctx context.Context, sourceIDs []string, targetID
updates["notes"] = re.ReplaceAllString(notes, replacementText)
}
}
// Update design
if issue.Design != "" && re.MatchString(issue.Design) {
if _, exists := updates["design"]; !exists {
@@ -322,7 +282,6 @@ func updateMergeTextReferences(ctx context.Context, sourceIDs []string, targetID
updates["design"] = re.ReplaceAllString(design, replacementText)
}
}
// Update acceptance criteria
if issue.AcceptanceCriteria != "" && re.MatchString(issue.AcceptanceCriteria) {
if _, exists := updates["acceptance_criteria"]; !exists {
@@ -333,7 +292,6 @@ func updateMergeTextReferences(ctx context.Context, sourceIDs []string, targetID
}
}
}
// Apply updates if any
if len(updates) > 0 {
if err := store.UpdateIssue(ctx, issue.ID, updates, actor); err != nil {
@@ -342,6 +300,5 @@ func updateMergeTextReferences(ctx context.Context, sourceIDs []string, targetID
updatedCount++
}
}
return updatedCount, nil
}

View File

@@ -1,18 +1,15 @@
package main
import (
"context"
"encoding/json"
"fmt"
"os"
"github.com/fatih/color"
"github.com/spf13/cobra"
"github.com/steveyegge/beads/internal/rpc"
"github.com/steveyegge/beads/internal/storage/sqlite"
"github.com/steveyegge/beads/internal/types"
)
var readyCmd = &cobra.Command{
Use: "ready",
Short: "Show ready work (no blockers, open or in-progress)",
@@ -21,7 +18,6 @@ var readyCmd = &cobra.Command{
assignee, _ := cmd.Flags().GetString("assignee")
sortPolicy, _ := cmd.Flags().GetString("sort")
// Use global jsonOutput set by PersistentPreRun (respects config.yaml + env vars)
filter := types.WorkFilter{
// Leave Status empty to get both 'open' and 'in_progress' (bd-165)
Limit: limit,
@@ -35,13 +31,11 @@ var readyCmd = &cobra.Command{
if assignee != "" {
filter.Assignee = &assignee
}
// Validate sort policy
if !filter.SortPolicy.IsValid() {
fmt.Fprintf(os.Stderr, "Error: invalid sort policy '%s'. Valid values: hybrid, priority, oldest\n", sortPolicy)
os.Exit(1)
}
// If daemon is running, use RPC
if daemonClient != nil {
readyArgs := &rpc.ReadyArgs{
@@ -53,19 +47,16 @@ var readyCmd = &cobra.Command{
priority, _ := cmd.Flags().GetInt("priority")
readyArgs.Priority = &priority
}
resp, err := daemonClient.Ready(readyArgs)
if err != nil {
fmt.Fprintf(os.Stderr, "Error: %v\n", err)
os.Exit(1)
}
var issues []*types.Issue
if err := json.Unmarshal(resp.Data, &issues); err != nil {
fmt.Fprintf(os.Stderr, "Error parsing response: %v\n", err)
os.Exit(1)
}
if jsonOutput {
if issues == nil {
issues = []*types.Issue{}
@@ -73,17 +64,14 @@ var readyCmd = &cobra.Command{
outputJSON(issues)
return
}
if len(issues) == 0 {
yellow := color.New(color.FgYellow).SprintFunc()
fmt.Printf("\n%s No ready work found (all issues have blocking dependencies)\n\n",
yellow("✨"))
return
}
cyan := color.New(color.FgCyan).SprintFunc()
fmt.Printf("\n%s Ready work (%d issues with no blockers):\n\n", cyan("📋"), len(issues))
for i, issue := range issues {
fmt.Printf("%d. [P%d] %s: %s\n", i+1, issue.Priority, issue.ID, issue.Title)
if issue.EstimatedMinutes != nil {
@@ -96,7 +84,6 @@ var readyCmd = &cobra.Command{
fmt.Println()
return
}
// Direct mode
ctx := context.Background()
issues, err := store.GetReadyWork(ctx, filter)
@@ -104,7 +91,6 @@ var readyCmd = &cobra.Command{
fmt.Fprintf(os.Stderr, "Error: %v\n", err)
os.Exit(1)
}
// If no ready work found, check if git has issues and auto-import
if len(issues) == 0 {
if checkAndAutoImport(ctx, store) {
@@ -116,7 +102,6 @@ var readyCmd = &cobra.Command{
}
}
}
if jsonOutput {
// Always output array, even if empty
if issues == nil {
@@ -125,17 +110,14 @@ var readyCmd = &cobra.Command{
outputJSON(issues)
return
}
if len(issues) == 0 {
yellow := color.New(color.FgYellow).SprintFunc()
fmt.Printf("\n%s No ready work found (all issues have blocking dependencies)\n\n",
yellow("✨"))
return
}
cyan := color.New(color.FgCyan).SprintFunc()
fmt.Printf("\n%s Ready work (%d issues with no blockers):\n\n", cyan("📋"), len(issues))
for i, issue := range issues {
fmt.Printf("%d. [P%d] %s: %s\n", i+1, issue.Priority, issue.ID, issue.Title)
if issue.EstimatedMinutes != nil {
@@ -148,13 +130,11 @@ var readyCmd = &cobra.Command{
fmt.Println()
},
}
var blockedCmd = &cobra.Command{
Use: "blocked",
Short: "Show blocked issues",
Run: func(cmd *cobra.Command, args []string) {
// Use global jsonOutput set by PersistentPreRun (respects config.yaml + env vars)
// If daemon is running but doesn't support this command, use direct storage
if daemonClient != nil && store == nil {
var err error
@@ -165,14 +145,12 @@ var blockedCmd = &cobra.Command{
}
defer func() { _ = store.Close() }()
}
ctx := context.Background()
blocked, err := store.GetBlockedIssues(ctx)
if err != nil {
fmt.Fprintf(os.Stderr, "Error: %v\n", err)
os.Exit(1)
}
if jsonOutput {
// Always output array, even if empty
if blocked == nil {
@@ -181,16 +159,13 @@ var blockedCmd = &cobra.Command{
outputJSON(blocked)
return
}
if len(blocked) == 0 {
green := color.New(color.FgGreen).SprintFunc()
fmt.Printf("\n%s No blocked issues\n\n", green("✨"))
return
}
red := color.New(color.FgRed).SprintFunc()
fmt.Printf("\n%s Blocked issues (%d):\n\n", red("🚫"), len(blocked))
for _, issue := range blocked {
fmt.Printf("[P%d] %s: %s\n", issue.Priority, issue.ID, issue.Title)
blockedBy := issue.BlockedBy
@@ -203,13 +178,11 @@ var blockedCmd = &cobra.Command{
}
},
}
var statsCmd = &cobra.Command{
Use: "stats",
Short: "Show statistics",
Run: func(cmd *cobra.Command, args []string) {
// Use global jsonOutput set by PersistentPreRun (respects config.yaml + env vars)
// If daemon is running, use RPC
if daemonClient != nil {
resp, err := daemonClient.Stats()
@@ -217,22 +190,18 @@ var statsCmd = &cobra.Command{
fmt.Fprintf(os.Stderr, "Error: %v\n", err)
os.Exit(1)
}
var stats types.Statistics
if err := json.Unmarshal(resp.Data, &stats); err != nil {
fmt.Fprintf(os.Stderr, "Error parsing response: %v\n", err)
os.Exit(1)
}
if jsonOutput {
outputJSON(stats)
return
}
cyan := color.New(color.FgCyan).SprintFunc()
green := color.New(color.FgGreen).SprintFunc()
yellow := color.New(color.FgYellow).SprintFunc()
fmt.Printf("\n%s Beads Statistics:\n\n", cyan("📊"))
fmt.Printf("Total Issues: %d\n", stats.TotalIssues)
fmt.Printf("Open: %s\n", green(fmt.Sprintf("%d", stats.OpenIssues)))
@@ -246,7 +215,6 @@ var statsCmd = &cobra.Command{
fmt.Println()
return
}
// Direct mode
ctx := context.Background()
stats, err := store.GetStatistics(ctx)
@@ -254,7 +222,6 @@ var statsCmd = &cobra.Command{
fmt.Fprintf(os.Stderr, "Error: %v\n", err)
os.Exit(1)
}
// If no issues found, check if git has issues and auto-import
if stats.TotalIssues == 0 {
if checkAndAutoImport(ctx, store) {
@@ -266,16 +233,13 @@ var statsCmd = &cobra.Command{
}
}
}
if jsonOutput {
outputJSON(stats)
return
}
cyan := color.New(color.FgCyan).SprintFunc()
green := color.New(color.FgGreen).SprintFunc()
yellow := color.New(color.FgYellow).SprintFunc()
fmt.Printf("\n%s Beads Statistics:\n\n", cyan("📊"))
fmt.Printf("Total Issues: %d\n", stats.TotalIssues)
fmt.Printf("Open: %s\n", green(fmt.Sprintf("%d", stats.OpenIssues)))
@@ -292,17 +256,11 @@ var statsCmd = &cobra.Command{
fmt.Println()
},
}
func init() {
readyCmd.Flags().IntP("limit", "n", 10, "Maximum issues to show")
readyCmd.Flags().IntP("priority", "p", 0, "Filter by priority")
readyCmd.Flags().StringP("assignee", "a", "", "Filter by assignee")
readyCmd.Flags().StringP("sort", "s", "hybrid", "Sort policy: hybrid (default), priority, oldest")
readyCmd.Flags().Bool("json", false, "Output JSON format")
statsCmd.Flags().Bool("json", false, "Output JSON format")
blockedCmd.Flags().Bool("json", false, "Output JSON format")
rootCmd.AddCommand(readyCmd)
rootCmd.AddCommand(blockedCmd)
rootCmd.AddCommand(statsCmd)

View File

@@ -1,31 +1,25 @@
package main
import (
"context"
"encoding/json"
"fmt"
"os"
"github.com/fatih/color"
"github.com/spf13/cobra"
"github.com/steveyegge/beads/internal/rpc"
"github.com/steveyegge/beads/internal/types"
"github.com/steveyegge/beads/internal/utils"
)
var reopenCmd = &cobra.Command{
Use: "reopen [id...]",
Short: "Reopen one or more closed issues",
Long: `Reopen closed issues by setting status to 'open' and clearing the closed_at timestamp.
This is more explicit than 'bd update --status open' and emits a Reopened event.`,
Args: cobra.MinimumNArgs(1),
Run: func(cmd *cobra.Command, args []string) {
reason, _ := cmd.Flags().GetString("reason")
// Use global jsonOutput set by PersistentPreRun
ctx := context.Background()
// Resolve partial IDs first
var resolvedIDs []string
if daemonClient != nil {
@@ -46,9 +40,7 @@ This is more explicit than 'bd update --status open' and emits a Reopened event.
os.Exit(1)
}
}
reopenedIssues := []*types.Issue{}
// If daemon is running, use RPC
if daemonClient != nil {
for _, id := range resolvedIDs {
@@ -57,18 +49,15 @@ This is more explicit than 'bd update --status open' and emits a Reopened event.
ID: id,
Status: &openStatus,
}
resp, err := daemonClient.Update(updateArgs)
if err != nil {
fmt.Fprintf(os.Stderr, "Error reopening %s: %v\n", id, err)
continue
}
// TODO: Add reason as a comment once RPC supports AddComment
if reason != "" {
fmt.Fprintf(os.Stderr, "Warning: reason not supported in daemon mode yet\n")
}
if jsonOutput {
var issue types.Issue
if err := json.Unmarshal(resp.Data, &issue); err == nil {
@@ -83,26 +72,22 @@ This is more explicit than 'bd update --status open' and emits a Reopened event.
fmt.Printf("%s Reopened %s%s\n", blue("↻"), id, reasonMsg)
}
}
if jsonOutput && len(reopenedIssues) > 0 {
outputJSON(reopenedIssues)
}
return
}
// Fall back to direct storage access
if store == nil {
fmt.Fprintln(os.Stderr, "Error: database not initialized")
os.Exit(1)
}
for _, id := range args {
fullID, err := utils.ResolvePartialID(ctx, store, id)
if err != nil {
fmt.Fprintf(os.Stderr, "Error resolving %s: %v\n", id, err)
continue
}
// UpdateIssue automatically clears closed_at when status changes from closed
updates := map[string]interface{}{
"status": string(types.StatusOpen),
@@ -111,14 +96,12 @@ This is more explicit than 'bd update --status open' and emits a Reopened event.
fmt.Fprintf(os.Stderr, "Error reopening %s: %v\n", fullID, err)
continue
}
// Add reason as a comment if provided
if reason != "" {
if err := store.AddComment(ctx, fullID, actor, reason); err != nil {
fmt.Fprintf(os.Stderr, "Warning: failed to add comment to %s: %v\n", fullID, err)
}
}
if jsonOutput {
issue, _ := store.GetIssue(ctx, fullID)
if issue != nil {
@@ -133,20 +116,16 @@ This is more explicit than 'bd update --status open' and emits a Reopened event.
fmt.Printf("%s Reopened %s%s\n", blue("↻"), fullID, reasonMsg)
}
}
// Schedule auto-flush if any issues were reopened
if len(args) > 0 {
markDirtyAndScheduleFlush()
}
if jsonOutput && len(reopenedIssues) > 0 {
outputJSON(reopenedIssues)
}
},
}
func init() {
reopenCmd.Flags().StringP("reason", "r", "", "Reason for reopening")
reopenCmd.Flags().Bool("json", false, "Output JSON format")
rootCmd.AddCommand(reopenCmd)
}

View File

@@ -1,5 +1,4 @@
package main
import (
"context"
"encoding/json"
@@ -7,7 +6,6 @@ import (
"os"
"os/exec"
"strings"
"github.com/fatih/color"
"github.com/spf13/cobra"
"github.com/steveyegge/beads/internal/rpc"
@@ -15,7 +13,6 @@ import (
"github.com/steveyegge/beads/internal/types"
"github.com/steveyegge/beads/internal/utils"
)
var showCmd = &cobra.Command{
Use: "show [id...]",
Short: "Show issue details",
@@ -23,7 +20,6 @@ var showCmd = &cobra.Command{
Run: func(cmd *cobra.Command, args []string) {
// Use global jsonOutput set by PersistentPreRun
ctx := context.Background()
// Resolve partial IDs first
var resolvedIDs []string
if daemonClient != nil {
@@ -46,7 +42,6 @@ var showCmd = &cobra.Command{
os.Exit(1)
}
}
// If daemon is running, use RPC
if daemonClient != nil {
allDetails := []interface{}{}
@@ -57,7 +52,6 @@ var showCmd = &cobra.Command{
fmt.Fprintf(os.Stderr, "Error fetching %s: %v\n", id, err)
continue
}
if jsonOutput {
type IssueDetails struct {
types.Issue
@@ -78,7 +72,6 @@ var showCmd = &cobra.Command{
if idx > 0 {
fmt.Println("\n" + strings.Repeat("─", 60))
}
// Parse response and use existing formatting code
type IssueDetails struct {
types.Issue
@@ -92,9 +85,7 @@ var showCmd = &cobra.Command{
os.Exit(1)
}
issue := &details.Issue
cyan := color.New(color.FgCyan).SprintFunc()
// Format output (same as direct mode below)
tierEmoji := ""
statusSuffix := ""
@@ -106,7 +97,6 @@ var showCmd = &cobra.Command{
tierEmoji = " 📦"
statusSuffix = " (compacted L2)"
}
fmt.Printf("\n%s: %s%s\n", cyan(issue.ID), issue.Title, tierEmoji)
fmt.Printf("Status: %s%s\n", issue.Status, statusSuffix)
fmt.Printf("Priority: P%d\n", issue.Priority)
@@ -119,7 +109,6 @@ var showCmd = &cobra.Command{
}
fmt.Printf("Created: %s\n", issue.CreatedAt.Format("2006-01-02 15:04"))
fmt.Printf("Updated: %s\n", issue.UpdatedAt.Format("2006-01-02 15:04"))
// Show compaction status
if issue.CompactionLevel > 0 {
fmt.Println()
@@ -142,7 +131,6 @@ var showCmd = &cobra.Command{
}
fmt.Printf("%s Compacted: %s (Tier %d)\n", tierEmoji2, compactedDate, issue.CompactionLevel)
}
if issue.Description != "" {
fmt.Printf("\nDescription:\n%s\n", issue.Description)
}
@@ -155,35 +143,29 @@ var showCmd = &cobra.Command{
if issue.AcceptanceCriteria != "" {
fmt.Printf("\nAcceptance Criteria:\n%s\n", issue.AcceptanceCriteria)
}
if len(details.Labels) > 0 {
fmt.Printf("\nLabels: %v\n", details.Labels)
}
if len(details.Dependencies) > 0 {
fmt.Printf("\nDepends on (%d):\n", len(details.Dependencies))
for _, dep := range details.Dependencies {
fmt.Printf(" → %s: %s [P%d]\n", dep.ID, dep.Title, dep.Priority)
}
}
if len(details.Dependents) > 0 {
fmt.Printf("\nBlocks (%d):\n", len(details.Dependents))
for _, dep := range details.Dependents {
fmt.Printf(" ← %s: %s [P%d]\n", dep.ID, dep.Title, dep.Priority)
}
}
fmt.Println()
}
}
if jsonOutput && len(allDetails) > 0 {
outputJSON(allDetails)
}
return
}
// Direct mode
allDetails := []interface{}{}
for idx, id := range resolvedIDs {
@@ -196,7 +178,6 @@ var showCmd = &cobra.Command{
fmt.Fprintf(os.Stderr, "Issue %s not found\n", id)
continue
}
if jsonOutput {
// Include labels, dependencies (with metadata), dependents (with metadata), and comments in JSON output
type IssueDetails struct {
@@ -208,7 +189,6 @@ var showCmd = &cobra.Command{
}
details := &IssueDetails{Issue: issue}
details.Labels, _ = store.GetLabels(ctx, issue.ID)
// Get dependencies with metadata (type, created_at, created_by)
if sqliteStore, ok := store.(*sqlite.SQLiteStorage); ok {
details.Dependencies, _ = sqliteStore.GetDependenciesWithMetadata(ctx, issue.ID)
@@ -224,18 +204,14 @@ var showCmd = &cobra.Command{
details.Dependents = append(details.Dependents, &types.IssueWithDependencyMetadata{Issue: *dependent})
}
}
details.Comments, _ = store.GetIssueComments(ctx, issue.ID)
allDetails = append(allDetails, details)
continue
}
if idx > 0 {
fmt.Println("\n" + strings.Repeat("─", 60))
}
cyan := color.New(color.FgCyan).SprintFunc()
// Add compaction emoji to title line
tierEmoji := ""
statusSuffix := ""
@@ -247,7 +223,6 @@ var showCmd = &cobra.Command{
tierEmoji = " 📦"
statusSuffix = " (compacted L2)"
}
fmt.Printf("\n%s: %s%s\n", cyan(issue.ID), issue.Title, tierEmoji)
fmt.Printf("Status: %s%s\n", issue.Status, statusSuffix)
fmt.Printf("Priority: P%d\n", issue.Priority)
@@ -260,7 +235,6 @@ var showCmd = &cobra.Command{
}
fmt.Printf("Created: %s\n", issue.CreatedAt.Format("2006-01-02 15:04"))
fmt.Printf("Updated: %s\n", issue.UpdatedAt.Format("2006-01-02 15:04"))
// Show compaction status footer
if issue.CompactionLevel > 0 {
tierEmoji := "🗜️"
@@ -268,7 +242,6 @@ var showCmd = &cobra.Command{
tierEmoji = "📦"
}
tierName := fmt.Sprintf("Tier %d", issue.CompactionLevel)
fmt.Println()
if issue.OriginalSize > 0 {
currentSize := len(issue.Description) + len(issue.Design) + len(issue.Notes) + len(issue.AcceptanceCriteria)
@@ -285,7 +258,6 @@ var showCmd = &cobra.Command{
}
fmt.Printf("%s Compacted: %s (%s)\n", tierEmoji, compactedDate, tierName)
}
if issue.Description != "" {
fmt.Printf("\nDescription:\n%s\n", issue.Description)
}
@@ -298,13 +270,11 @@ var showCmd = &cobra.Command{
if issue.AcceptanceCriteria != "" {
fmt.Printf("\nAcceptance Criteria:\n%s\n", issue.AcceptanceCriteria)
}
// Show labels
labels, _ := store.GetLabels(ctx, issue.ID)
if len(labels) > 0 {
fmt.Printf("\nLabels: %v\n", labels)
}
// Show dependencies
deps, _ := store.GetDependencies(ctx, issue.ID)
if len(deps) > 0 {
@@ -313,7 +283,6 @@ var showCmd = &cobra.Command{
fmt.Printf(" → %s: %s [P%d]\n", dep.ID, dep.Title, dep.Priority)
}
}
// Show dependents
dependents, _ := store.GetDependents(ctx, issue.ID)
if len(dependents) > 0 {
@@ -322,7 +291,6 @@ var showCmd = &cobra.Command{
fmt.Printf(" ← %s: %s [P%d]\n", dep.ID, dep.Title, dep.Priority)
}
}
// Show comments
comments, _ := store.GetIssueComments(ctx, issue.ID)
if len(comments) > 0 {
@@ -331,16 +299,13 @@ var showCmd = &cobra.Command{
fmt.Printf(" [%s at %s]\n %s\n\n", comment.Author, comment.CreatedAt.Format("2006-01-02 15:04"), comment.Text)
}
}
fmt.Println()
}
if jsonOutput && len(allDetails) > 0 {
outputJSON(allDetails)
}
},
}
var updateCmd = &cobra.Command{
Use: "update [id...]",
Short: "Update one or more issues",
@@ -348,7 +313,6 @@ var updateCmd = &cobra.Command{
Run: func(cmd *cobra.Command, args []string) {
// Use global jsonOutput set by PersistentPreRun
updates := make(map[string]interface{})
if cmd.Flags().Changed("status") {
status, _ := cmd.Flags().GetString("status")
updates["status"] = status
@@ -390,14 +354,11 @@ var updateCmd = &cobra.Command{
externalRef, _ := cmd.Flags().GetString("external-ref")
updates["external_ref"] = externalRef
}
if len(updates) == 0 {
fmt.Println("No updates specified")
return
}
ctx := context.Background()
// Resolve partial IDs first
var resolvedIDs []string
if daemonClient != nil {
@@ -418,13 +379,11 @@ var updateCmd = &cobra.Command{
os.Exit(1)
}
}
// If daemon is running, use RPC
if daemonClient != nil {
updatedIssues := []*types.Issue{}
for _, id := range resolvedIDs {
updateArgs := &rpc.UpdateArgs{ID: id}
// Map updates to RPC args
if status, ok := updates["status"].(string); ok {
updateArgs.Status = &status
@@ -450,13 +409,11 @@ var updateCmd = &cobra.Command{
if acceptanceCriteria, ok := updates["acceptance_criteria"].(string); ok {
updateArgs.AcceptanceCriteria = &acceptanceCriteria
}
resp, err := daemonClient.Update(updateArgs)
if err != nil {
fmt.Fprintf(os.Stderr, "Error updating %s: %v\n", id, err)
continue
}
if jsonOutput {
var issue types.Issue
if err := json.Unmarshal(resp.Data, &issue); err == nil {
@@ -467,13 +424,11 @@ var updateCmd = &cobra.Command{
fmt.Printf("%s Updated issue: %s\n", green("✓"), id)
}
}
if jsonOutput && len(updatedIssues) > 0 {
outputJSON(updatedIssues)
}
return
}
// Direct mode
updatedIssues := []*types.Issue{}
for _, id := range resolvedIDs {
@@ -481,7 +436,6 @@ var updateCmd = &cobra.Command{
fmt.Fprintf(os.Stderr, "Error updating %s: %v\n", id, err)
continue
}
if jsonOutput {
issue, _ := store.GetIssue(ctx, id)
if issue != nil {
@@ -492,25 +446,20 @@ var updateCmd = &cobra.Command{
fmt.Printf("%s Updated issue: %s\n", green("✓"), id)
}
}
// Schedule auto-flush if any issues were updated
if len(args) > 0 {
markDirtyAndScheduleFlush()
}
if jsonOutput && len(updatedIssues) > 0 {
outputJSON(updatedIssues)
}
},
}
var editCmd = &cobra.Command{
Use: "edit [id]",
Short: "Edit an issue field in $EDITOR",
Long: `Edit an issue field using your configured $EDITOR.
By default, edits the description. Use flags to edit other fields.
Examples:
bd edit bd-42 # Edit description
bd edit bd-42 --title # Edit title
@@ -521,7 +470,6 @@ Examples:
Run: func(cmd *cobra.Command, args []string) {
id := args[0]
ctx := context.Background()
// Resolve partial ID if in direct mode
if daemonClient == nil {
fullID, err := utils.ResolvePartialID(ctx, store, id)
@@ -531,7 +479,6 @@ Examples:
}
id = fullID
}
// Determine which field to edit
fieldToEdit := "description"
if cmd.Flags().Changed("title") {
@@ -543,7 +490,6 @@ Examples:
} else if cmd.Flags().Changed("acceptance") {
fieldToEdit = "acceptance_criteria"
}
// Get the editor from environment
editor := os.Getenv("EDITOR")
if editor == "" {
@@ -562,11 +508,9 @@ Examples:
fmt.Fprintf(os.Stderr, "Error: No editor found. Set $EDITOR or $VISUAL environment variable.\n")
os.Exit(1)
}
// Get the current issue
var issue *types.Issue
var err error
if daemonClient != nil {
// Daemon mode
showArgs := &rpc.ShowArgs{ID: id}
@@ -575,7 +519,6 @@ Examples:
fmt.Fprintf(os.Stderr, "Error fetching issue %s: %v\n", id, err)
os.Exit(1)
}
issue = &types.Issue{}
if err := json.Unmarshal(resp.Data, issue); err != nil {
fmt.Fprintf(os.Stderr, "Error parsing issue data: %v\n", err)
@@ -593,7 +536,6 @@ Examples:
os.Exit(1)
}
}
// Get the current field value
var currentValue string
switch fieldToEdit {
@@ -608,7 +550,6 @@ Examples:
case "acceptance_criteria":
currentValue = issue.AcceptanceCriteria
}
// Create a temporary file with the current value
tmpFile, err := os.CreateTemp("", fmt.Sprintf("bd-edit-%s-*.txt", fieldToEdit))
if err != nil {
@@ -617,7 +558,6 @@ Examples:
}
tmpPath := tmpFile.Name()
defer func() { _ = os.Remove(tmpPath) }()
// Write current value to temp file
if _, err := tmpFile.WriteString(currentValue); err != nil {
_ = tmpFile.Close() // nolint:gosec // G104: Error already handled above
@@ -625,18 +565,15 @@ Examples:
os.Exit(1)
}
_ = tmpFile.Close() // nolint:gosec // G104: Defer close errors are non-critical
// Open the editor
editorCmd := exec.Command(editor, tmpPath)
editorCmd.Stdin = os.Stdin
editorCmd.Stdout = os.Stdout
editorCmd.Stderr = os.Stderr
if err := editorCmd.Run(); err != nil {
fmt.Fprintf(os.Stderr, "Error running editor: %v\n", err)
os.Exit(1)
}
// Read the edited content
// nolint:gosec // G304: tmpPath is securely created temp file
editedContent, err := os.ReadFile(tmpPath)
@@ -644,30 +581,24 @@ Examples:
fmt.Fprintf(os.Stderr, "Error reading edited file: %v\n", err)
os.Exit(1)
}
newValue := string(editedContent)
// Check if the value changed
if newValue == currentValue {
fmt.Println("No changes made")
return
}
// Validate title if editing title
if fieldToEdit == "title" && strings.TrimSpace(newValue) == "" {
fmt.Fprintf(os.Stderr, "Error: title cannot be empty\n")
os.Exit(1)
}
// Update the issue
updates := map[string]interface{}{
fieldToEdit: newValue,
}
if daemonClient != nil {
// Daemon mode
updateArgs := &rpc.UpdateArgs{ID: id}
switch fieldToEdit {
case "title":
updateArgs.Title = &newValue
@@ -680,7 +611,6 @@ Examples:
case "acceptance_criteria":
updateArgs.AcceptanceCriteria = &newValue
}
_, err := daemonClient.Update(updateArgs)
if err != nil {
fmt.Fprintf(os.Stderr, "Error updating issue: %v\n", err)
@@ -694,13 +624,11 @@ Examples:
}
markDirtyAndScheduleFlush()
}
green := color.New(color.FgGreen).SprintFunc()
fieldName := strings.ReplaceAll(fieldToEdit, "_", " ")
fmt.Printf("%s Updated %s for issue: %s\n", green("✓"), fieldName, id)
},
}
var closeCmd = &cobra.Command{
Use: "close [id...]",
Short: "Close one or more issues",
@@ -711,9 +639,7 @@ var closeCmd = &cobra.Command{
reason = "Closed"
}
// Use global jsonOutput set by PersistentPreRun
ctx := context.Background()
// Resolve partial IDs first
var resolvedIDs []string
if daemonClient != nil {
@@ -734,7 +660,6 @@ var closeCmd = &cobra.Command{
os.Exit(1)
}
}
// If daemon is running, use RPC
if daemonClient != nil {
closedIssues := []*types.Issue{}
@@ -748,7 +673,6 @@ var closeCmd = &cobra.Command{
fmt.Fprintf(os.Stderr, "Error closing %s: %v\n", id, err)
continue
}
if jsonOutput {
var issue types.Issue
if err := json.Unmarshal(resp.Data, &issue); err == nil {
@@ -759,13 +683,11 @@ var closeCmd = &cobra.Command{
fmt.Printf("%s Closed %s: %s\n", green("✓"), id, reason)
}
}
if jsonOutput && len(closedIssues) > 0 {
outputJSON(closedIssues)
}
return
}
// Direct mode
closedIssues := []*types.Issue{}
for _, id := range resolvedIDs {
@@ -783,22 +705,17 @@ var closeCmd = &cobra.Command{
fmt.Printf("%s Closed %s: %s\n", green("✓"), id, reason)
}
}
// Schedule auto-flush if any issues were closed
if len(args) > 0 {
markDirtyAndScheduleFlush()
}
if jsonOutput && len(closedIssues) > 0 {
outputJSON(closedIssues)
}
},
}
func init() {
showCmd.Flags().Bool("json", false, "Output JSON format")
rootCmd.AddCommand(showCmd)
updateCmd.Flags().StringP("status", "s", "", "New status")
updateCmd.Flags().IntP("priority", "p", 0, "New priority")
updateCmd.Flags().String("title", "", "New title")
@@ -810,17 +727,13 @@ func init() {
updateCmd.Flags().String("acceptance-criteria", "", "DEPRECATED: use --acceptance")
_ = updateCmd.Flags().MarkHidden("acceptance-criteria")
updateCmd.Flags().String("external-ref", "", "External reference (e.g., 'gh-9', 'jira-ABC')")
updateCmd.Flags().Bool("json", false, "Output JSON format")
rootCmd.AddCommand(updateCmd)
editCmd.Flags().Bool("title", false, "Edit the title")
editCmd.Flags().Bool("description", false, "Edit the description (default)")
editCmd.Flags().Bool("design", false, "Edit the design notes")
editCmd.Flags().Bool("notes", false, "Edit the notes")
editCmd.Flags().Bool("acceptance", false, "Edit the acceptance criteria")
rootCmd.AddCommand(editCmd)
closeCmd.Flags().StringP("reason", "r", "", "Reason for closing")
closeCmd.Flags().Bool("json", false, "Output JSON format")
rootCmd.AddCommand(closeCmd)
}

View File

@@ -1,23 +1,19 @@
package main
import (
"context"
"encoding/json"
"fmt"
"os"
"time"
"github.com/fatih/color"
"github.com/spf13/cobra"
"github.com/steveyegge/beads/internal/rpc"
"github.com/steveyegge/beads/internal/types"
)
var staleCmd = &cobra.Command{
Use: "stale",
Short: "Show stale issues (not updated recently)",
Long: `Show issues that haven't been updated recently and may need attention.
This helps identify:
- In-progress issues with no recent activity (may be abandoned)
- Open issues that have been forgotten
@@ -27,19 +23,16 @@ This helps identify:
status, _ := cmd.Flags().GetString("status")
limit, _ := cmd.Flags().GetInt("limit")
// Use global jsonOutput set by PersistentPreRun
// Validate status if provided
if status != "" && status != "open" && status != "in_progress" && status != "blocked" {
fmt.Fprintf(os.Stderr, "Error: invalid status '%s'. Valid values: open, in_progress, blocked\n", status)
os.Exit(1)
}
filter := types.StaleFilter{
Days: days,
Status: status,
Limit: limit,
}
// If daemon is running, use RPC
if daemonClient != nil {
staleArgs := &rpc.StaleArgs{
@@ -47,19 +40,16 @@ This helps identify:
Status: status,
Limit: limit,
}
resp, err := daemonClient.Stale(staleArgs)
if err != nil {
fmt.Fprintf(os.Stderr, "Error: %v\n", err)
os.Exit(1)
}
var issues []*types.Issue
if err := json.Unmarshal(resp.Data, &issues); err != nil {
fmt.Fprintf(os.Stderr, "Error parsing response: %v\n", err)
os.Exit(1)
}
if jsonOutput {
if issues == nil {
issues = []*types.Issue{}
@@ -67,11 +57,9 @@ This helps identify:
outputJSON(issues)
return
}
displayStaleIssues(issues, days)
return
}
// Direct mode
ctx := context.Background()
issues, err := store.GetStaleIssues(ctx, filter)
@@ -79,7 +67,6 @@ This helps identify:
fmt.Fprintf(os.Stderr, "Error: %v\n", err)
os.Exit(1)
}
if jsonOutput {
if issues == nil {
issues = []*types.Issue{}
@@ -87,21 +74,17 @@ This helps identify:
outputJSON(issues)
return
}
displayStaleIssues(issues, days)
},
}
func displayStaleIssues(issues []*types.Issue, days int) {
if len(issues) == 0 {
green := color.New(color.FgGreen).SprintFunc()
fmt.Printf("\n%s No stale issues found (all active)\n\n", green("✨"))
return
}
yellow := color.New(color.FgYellow).SprintFunc()
fmt.Printf("\n%s Stale issues (%d not updated in %d+ days):\n\n", yellow("⏰"), len(issues), days)
now := time.Now()
for i, issue := range issues {
daysStale := int(now.Sub(issue.UpdatedAt).Hours() / 24)
@@ -113,12 +96,9 @@ func displayStaleIssues(issues []*types.Issue, days int) {
fmt.Println()
}
}
func init() {
staleCmd.Flags().IntP("days", "d", 30, "Issues not updated in this many days")
staleCmd.Flags().StringP("status", "s", "", "Filter by status (open|in_progress|blocked)")
staleCmd.Flags().IntP("limit", "n", 50, "Maximum issues to show")
staleCmd.Flags().Bool("json", false, "Output JSON format")
rootCmd.AddCommand(staleCmd)
}

View File

@@ -259,6 +259,6 @@ func getAssignedStatus(assignee string) *StatusSummary {
func init() {
statusCmd.Flags().Bool("all", false, "Show all issues (default behavior)")
statusCmd.Flags().Bool("assigned", false, "Show issues assigned to current user")
statusCmd.Flags().Bool("json", false, "Output in JSON format")
// Note: --json flag is defined as a persistent flag in main.go, not here
rootCmd.AddCommand(statusCmd)
}

View File

@@ -1,16 +1,13 @@
package main
import (
"context"
"fmt"
"os"
"strings"
"github.com/fatih/color"
"github.com/spf13/cobra"
"github.com/steveyegge/beads/internal/types"
)
var validateCmd = &cobra.Command{
Use: "validate",
Short: "Run comprehensive database health checks",
@@ -19,7 +16,6 @@ var validateCmd = &cobra.Command{
- Duplicate issues (identical content)
- Test pollution (leaked test issues)
- Git merge conflicts in JSONL
Example:
bd validate # Run all checks
bd validate --fix-all # Auto-fix all issues
@@ -33,13 +29,10 @@ Example:
fmt.Fprintf(os.Stderr, "Use: bd --no-daemon validate\n")
os.Exit(1)
}
fixAll, _ := cmd.Flags().GetBool("fix-all")
checksFlag, _ := cmd.Flags().GetString("checks")
jsonOut, _ := cmd.Flags().GetBool("json")
ctx := context.Background()
// Parse and normalize checks
checks, err := parseChecks(checksFlag)
if err != nil {
@@ -47,7 +40,6 @@ Example:
fmt.Fprintf(os.Stderr, "Valid checks: orphans, duplicates, pollution, conflicts\n")
os.Exit(2)
}
// Fetch all issues once for checks that need them
var allIssues []*types.Issue
needsIssues := false
@@ -64,12 +56,10 @@ Example:
os.Exit(1)
}
}
results := validationResults{
checks: make(map[string]checkResult),
checkOrder: checks,
}
// Run each check
for _, check := range checks {
switch check {
@@ -83,50 +73,41 @@ Example:
results.checks["conflicts"] = validateGitConflicts(ctx, fixAll)
}
}
// Output results
if jsonOut {
outputJSON(results.toJSON())
} else {
results.print(fixAll)
}
// Exit with error code if issues found or errors occurred
if results.hasFailures() {
os.Exit(1)
}
},
}
// parseChecks normalizes and validates check names
func parseChecks(checksFlag string) ([]string, error) {
defaultChecks := []string{"orphans", "duplicates", "pollution", "conflicts"}
if checksFlag == "" {
return defaultChecks, nil
}
// Map of synonyms to canonical names
synonyms := map[string]string{
"dupes": "duplicates",
"git-conflicts": "conflicts",
}
var result []string
seen := make(map[string]bool)
parts := strings.Split(checksFlag, ",")
for _, part := range parts {
check := strings.ToLower(strings.TrimSpace(part))
if check == "" {
continue
}
// Map synonyms
if canonical, ok := synonyms[check]; ok {
check = canonical
}
// Validate
valid := false
for _, validCheck := range defaultChecks {
@@ -138,17 +119,14 @@ func parseChecks(checksFlag string) ([]string, error) {
if !valid {
return nil, fmt.Errorf("unknown check: %s", part)
}
// Deduplicate
if !seen[check] {
seen[check] = true
result = append(result, check)
}
}
return result, nil
}
type checkResult struct {
name string
issueCount int
@@ -156,12 +134,10 @@ type checkResult struct {
err error
suggestions []string
}
type validationResults struct {
checks map[string]checkResult
checkOrder []string
}
func (r *validationResults) hasFailures() bool {
for _, result := range r.checks {
if result.err != nil {
@@ -173,23 +149,19 @@ func (r *validationResults) hasFailures() bool {
}
return false
}
func (r *validationResults) toJSON() map[string]interface{} {
output := map[string]interface{}{
"checks": map[string]interface{}{},
}
totalIssues := 0
totalFixed := 0
hasErrors := false
for name, result := range r.checks {
var errorStr interface{}
if result.err != nil {
errorStr = result.err.Error()
hasErrors = true
}
output["checks"].(map[string]interface{})[name] = map[string]interface{}{
"issue_count": result.issueCount,
"fixed_count": result.fixedCount,
@@ -200,31 +172,24 @@ func (r *validationResults) toJSON() map[string]interface{} {
totalIssues += result.issueCount
totalFixed += result.fixedCount
}
output["total_issues"] = totalIssues
output["total_fixed"] = totalFixed
output["healthy"] = !hasErrors && (totalIssues == 0 || totalIssues == totalFixed)
return output
}
func (r *validationResults) print(_ bool) {
green := color.New(color.FgGreen).SprintFunc()
yellow := color.New(color.FgYellow).SprintFunc()
red := color.New(color.FgRed).SprintFunc()
fmt.Println("\nValidation Results:")
fmt.Println("===================")
totalIssues := 0
totalFixed := 0
// Print in deterministic order
for _, name := range r.checkOrder {
result := r.checks[name]
prefix := "✓"
colorFunc := green
if result.err != nil {
prefix = "✗"
colorFunc = red
@@ -240,13 +205,10 @@ func (r *validationResults) print(_ bool) {
} else {
fmt.Printf("%s %s: OK\n", colorFunc(prefix), result.name)
}
totalIssues += result.issueCount
totalFixed += result.fixedCount
}
fmt.Println()
if totalIssues == 0 {
fmt.Printf("%s Database is healthy!\n", green("✓"))
} else if totalFixed == totalIssues {
@@ -258,7 +220,6 @@ func (r *validationResults) print(_ bool) {
fmt.Printf(" (fixed %d, %d remaining)", totalFixed, remaining)
}
fmt.Println()
// Print suggestions
fmt.Println("\nRecommendations:")
for _, result := range r.checks {
@@ -268,23 +229,19 @@ func (r *validationResults) print(_ bool) {
}
}
}
func validateOrphanedDeps(ctx context.Context, allIssues []*types.Issue, fix bool) checkResult {
result := checkResult{name: "orphaned dependencies"}
// Build ID existence map
existingIDs := make(map[string]bool)
for _, issue := range allIssues {
existingIDs[issue.ID] = true
}
// Find orphaned dependencies
type orphanedDep struct {
issueID string
orphanedID string
}
var orphaned []orphanedDep
for _, issue := range allIssues {
for _, dep := range issue.Dependencies {
if !existingIDs[dep.DependsOnID] {
@@ -295,16 +252,13 @@ func validateOrphanedDeps(ctx context.Context, allIssues []*types.Issue, fix boo
}
}
}
result.issueCount = len(orphaned)
if fix && len(orphaned) > 0 {
// Group by issue
orphansByIssue := make(map[string][]string)
for _, o := range orphaned {
orphansByIssue[o.issueID] = append(orphansByIssue[o.issueID], o.orphanedID)
}
// Fix each issue
for issueID, orphanedIDs := range orphansByIssue {
for _, orphanedID := range orphanedIDs {
@@ -313,30 +267,23 @@ func validateOrphanedDeps(ctx context.Context, allIssues []*types.Issue, fix boo
}
}
}
if result.fixedCount > 0 {
markDirtyAndScheduleFlush()
}
}
if result.issueCount > result.fixedCount {
result.suggestions = append(result.suggestions, "Run 'bd repair-deps --fix' to remove orphaned dependencies")
}
return result
}
func validateDuplicates(_ context.Context, allIssues []*types.Issue, fix bool) checkResult {
result := checkResult{name: "duplicates"}
// Find duplicates
duplicateGroups := findDuplicateGroups(allIssues)
// Count total duplicate issues (excluding one canonical per group)
for _, group := range duplicateGroups {
result.issueCount += len(group) - 1
}
if fix && len(duplicateGroups) > 0 {
// Note: Auto-merge is complex and requires user review
// We don't auto-fix duplicates, just report them
@@ -346,17 +293,13 @@ func validateDuplicates(_ context.Context, allIssues []*types.Issue, fix bool) c
result.suggestions = append(result.suggestions,
fmt.Sprintf("Run 'bd duplicates' to review %d duplicate groups", len(duplicateGroups)))
}
return result
}
func validatePollution(_ context.Context, allIssues []*types.Issue, fix bool) checkResult {
result := checkResult{name: "test pollution"}
// Detect pollution
polluted := detectTestPollution(allIssues)
result.issueCount = len(polluted)
if fix && len(polluted) > 0 {
// Note: Deleting issues is destructive, we just suggest it
result.suggestions = append(result.suggestions,
@@ -365,13 +308,10 @@ func validatePollution(_ context.Context, allIssues []*types.Issue, fix bool) ch
result.suggestions = append(result.suggestions,
fmt.Sprintf("Run 'bd detect-pollution' to review %d potential test issues", len(polluted)))
}
return result
}
func validateGitConflicts(_ context.Context, fix bool) checkResult {
result := checkResult{name: "git conflicts"}
// Check JSONL file for conflict markers
jsonlPath := findJSONLPath()
// nolint:gosec // G304: jsonlPath is validated JSONL file from findJSONLPath
@@ -384,7 +324,6 @@ func validateGitConflicts(_ context.Context, fix bool) checkResult {
result.err = fmt.Errorf("failed to read JSONL: %w", err)
return result
}
// Look for git conflict markers
lines := strings.Split(string(data), "\n")
var conflictLines []int
@@ -396,7 +335,6 @@ func validateGitConflicts(_ context.Context, fix bool) checkResult {
conflictLines = append(conflictLines, i+1)
}
}
if len(conflictLines) > 0 {
result.issueCount = 1 // One conflict situation
result.suggestions = append(result.suggestions,
@@ -410,19 +348,15 @@ func validateGitConflicts(_ context.Context, fix bool) checkResult {
result.suggestions = append(result.suggestions,
"For advanced field-level merging: https://github.com/neongreen/mono/tree/main/beads-merge")
}
// Can't auto-fix git conflicts
if fix && result.issueCount > 0 {
result.suggestions = append(result.suggestions,
"Note: Git conflicts cannot be auto-fixed with --fix-all")
}
return result
}
func init() {
validateCmd.Flags().Bool("fix-all", false, "Auto-fix all fixable issues")
validateCmd.Flags().String("checks", "", "Comma-separated list of checks (orphans,duplicates,pollution,conflicts)")
validateCmd.Flags().Bool("json", false, "Output in JSON format")
rootCmd.AddCommand(validateCmd)
}