Fix lint errors: handle errors, use fmt.Fprintf, apply De Morgan's law, use switch statements

Amp-Thread-ID: https://ampcode.com/threads/T-afcf56b0-a8bc-4310-bb59-1b63e1d70c89
Co-authored-by: Amp <amp@ampcode.com>
This commit is contained in:
Steve Yegge
2025-10-24 12:27:02 -07:00
parent 1d5e89b9bb
commit 9dcb86ebfb
17 changed files with 342 additions and 537 deletions
+2 -1
View File
@@ -16,7 +16,8 @@ import (
"github.com/steveyegge/beads/internal/types" "github.com/steveyegge/beads/internal/types"
) )
// Core types for working with issues // Issue represents a tracked work item with metadata, dependencies, and status.
// Status represents the current state of an issue (open, in progress, closed, blocked).
type ( type (
Issue = types.Issue Issue = types.Issue
Status = types.Status Status = types.Status
+12 -4
View File
@@ -63,7 +63,9 @@ func TestLibraryIntegration(t *testing.T) {
CreatedAt: time.Now(), CreatedAt: time.Now(),
UpdatedAt: time.Now(), UpdatedAt: time.Now(),
} }
store.CreateIssue(ctx, issue, "test-actor") if err := store.CreateIssue(ctx, issue, "test-actor"); err != nil {
t.Fatalf("CreateIssue failed: %v", err)
}
// Get it back // Get it back
retrieved, err := store.GetIssue(ctx, issue.ID) retrieved, err := store.GetIssue(ctx, issue.ID)
@@ -89,7 +91,9 @@ func TestLibraryIntegration(t *testing.T) {
CreatedAt: time.Now(), CreatedAt: time.Now(),
UpdatedAt: time.Now(), UpdatedAt: time.Now(),
} }
store.CreateIssue(ctx, issue, "test-actor") if err := store.CreateIssue(ctx, issue, "test-actor"); err != nil {
t.Fatalf("CreateIssue failed: %v", err)
}
// Update status // Update status
updates := map[string]interface{}{ updates := map[string]interface{}{
@@ -131,8 +135,12 @@ func TestLibraryIntegration(t *testing.T) {
UpdatedAt: time.Now(), UpdatedAt: time.Now(),
} }
store.CreateIssue(ctx, issue1, "test-actor") if err := store.CreateIssue(ctx, issue1, "test-actor"); err != nil {
store.CreateIssue(ctx, issue2, "test-actor") t.Fatalf("CreateIssue failed: %v", err)
}
if err := store.CreateIssue(ctx, issue2, "test-actor"); err != nil {
t.Fatalf("CreateIssue failed: %v", err)
}
// Add dependency: issue2 blocks issue1 // Add dependency: issue2 blocks issue1
dep := &beads.Dependency{ dep := &beads.Dependency{
+1 -1
View File
@@ -1038,7 +1038,7 @@ func runDaemonLoop(interval time.Duration, autoCommit, autoPush bool, logPath, p
} }
return return
case <-ctx.Done(): case <-ctx.Done():
log("Context cancelled, shutting down") log("Context canceled, shutting down")
if err := server.Stop(); err != nil { if err := server.Stop(); err != nil {
log("Error stopping RPC server: %v", err) log("Error stopping RPC server: %v", err)
} }
+1 -1
View File
@@ -249,7 +249,7 @@ func TestDaemonLogFileCreation(t *testing.T) {
timestamp := time.Now().Format("2006-01-02 15:04:05") timestamp := time.Now().Format("2006-01-02 15:04:05")
msg := "Test log message" msg := "Test log message"
_, err = logF.WriteString(fmt.Sprintf("[%s] %s\n", timestamp, msg)) _, err = fmt.Fprintf(logF, "[%s] %s\n", timestamp, msg)
if err != nil { if err != nil {
t.Fatalf("Failed to write to log file: %v", err) t.Fatalf("Failed to write to log file: %v", err)
} }
+2 -2
View File
@@ -184,10 +184,10 @@ var closeEligibleEpicsCmd = &cobra.Command{
Reason: "All children completed", Reason: "All children completed",
}) })
if err != nil || !resp.Success { if err != nil || !resp.Success {
errMsg := "unknown error" errMsg := ""
if err != nil { if err != nil {
errMsg = err.Error() errMsg = err.Error()
} else { } else if !resp.Success {
errMsg = resp.Error errMsg = resp.Error
} }
fmt.Fprintf(os.Stderr, "Error closing %s: %s\n", epicStatus.Epic.ID, errMsg) fmt.Fprintf(os.Stderr, "Error closing %s: %s\n", epicStatus.Epic.ID, errMsg)
+1 -1
View File
@@ -657,7 +657,7 @@ func replaceBoundaryAware(text, oldID, newID string) string {
func isBoundary(c byte) bool { func isBoundary(c byte) bool {
// Issue IDs contain: lowercase letters, digits, and hyphens // Issue IDs contain: lowercase letters, digits, and hyphens
// Boundaries are anything else (space, punctuation, etc.) // Boundaries are anything else (space, punctuation, etc.)
return !(c >= 'a' && c <= 'z' || c >= '0' && c <= '9' || c == '-') return (c < 'a' || c > 'z') && (c < '0' || c > '9') && c != '-'
} }
// isNumeric returns true if the string contains only digits // isNumeric returns true if the string contains only digits
+47 -112
View File
@@ -20,53 +20,50 @@ var labelCmd = &cobra.Command{
Short: "Manage issue labels", Short: "Manage issue labels",
} }
// executeLabelCommand executes a label operation and handles output // Helper function to process label operations for multiple issues
func executeLabelCommand(issueID, label, operation string, operationFunc func(context.Context, string, string, string) error) { func processBatchLabelOperation(issueIDs []string, label string, operation string,
daemonFunc func(string, string) error, storeFunc func(context.Context, string, string, string) error) {
ctx := context.Background() ctx := context.Background()
results := []map[string]interface{}{}
// Use daemon if available for _, issueID := range issueIDs {
if daemonClient != nil {
var err error var err error
if operation == "added" { if daemonClient != nil {
_, err = daemonClient.AddLabel(&rpc.LabelAddArgs{ err = daemonFunc(issueID, label)
ID: issueID,
Label: label,
})
} else { } else {
_, err = daemonClient.RemoveLabel(&rpc.LabelRemoveArgs{ err = storeFunc(ctx, issueID, label, actor)
ID: issueID,
Label: label,
})
} }
if err != nil { if err != nil {
fmt.Fprintf(os.Stderr, "Error: %v\n", err) fmt.Fprintf(os.Stderr, "Error %s label %s %s: %v\n", operation, operation, issueID, err)
os.Exit(1) continue
}
} else {
// Direct mode
if err := operationFunc(ctx, issueID, label, actor); err != nil {
fmt.Fprintf(os.Stderr, "Error: %v\n", err)
os.Exit(1)
} }
// Schedule auto-flush if jsonOutput {
results = append(results, map[string]interface{}{
"status": operation,
"issue_id": issueID,
"label": label,
})
} else {
green := color.New(color.FgGreen).SprintFunc()
verb := "Added"
prep := "to"
if operation == "removed" {
verb = "Removed"
prep = "from"
}
fmt.Printf("%s %s label '%s' %s %s\n", green("✓"), verb, label, prep, issueID)
}
}
if len(issueIDs) > 0 && daemonClient == nil {
markDirtyAndScheduleFlush() markDirtyAndScheduleFlush()
} }
if jsonOutput { if jsonOutput && len(results) > 0 {
outputJSON(map[string]interface{}{ outputJSON(results)
"status": operation,
"issue_id": issueID,
"label": label,
})
return
} }
green := color.New(color.FgGreen).SprintFunc()
// Capitalize first letter manually (strings.Title is deprecated)
capitalizedOp := strings.ToUpper(operation[:1]) + operation[1:]
fmt.Printf("%s %s label '%s' to %s\n", green("✓"), capitalizedOp, label, issueID)
} }
var labelAddCmd = &cobra.Command{ var labelAddCmd = &cobra.Command{
@@ -74,48 +71,17 @@ var labelAddCmd = &cobra.Command{
Short: "Add a label to one or more issues", Short: "Add a label to one or more issues",
Args: cobra.MinimumNArgs(2), Args: cobra.MinimumNArgs(2),
Run: func(cmd *cobra.Command, args []string) { Run: func(cmd *cobra.Command, args []string) {
// Last arg is the label, everything before is issue IDs
label := args[len(args)-1] label := args[len(args)-1]
issueIDs := args[:len(args)-1] issueIDs := args[:len(args)-1]
ctx := context.Background() processBatchLabelOperation(issueIDs, label, "added",
results := []map[string]interface{}{} func(issueID, lbl string) error {
_, err := daemonClient.AddLabel(&rpc.LabelAddArgs{ID: issueID, Label: lbl})
for _, issueID := range issueIDs { return err
var err error },
if daemonClient != nil { func(ctx context.Context, issueID, lbl, act string) error {
_, err = daemonClient.AddLabel(&rpc.LabelAddArgs{ return store.AddLabel(ctx, issueID, lbl, act)
ID: issueID, })
Label: label,
})
} else {
err = store.AddLabel(ctx, issueID, label, actor)
}
if err != nil {
fmt.Fprintf(os.Stderr, "Error adding label to %s: %v\n", issueID, err)
continue
}
if jsonOutput {
results = append(results, map[string]interface{}{
"status": "added",
"issue_id": issueID,
"label": label,
})
} else {
green := color.New(color.FgGreen).SprintFunc()
fmt.Printf("%s Added label '%s' to %s\n", green("✓"), label, issueID)
}
}
if len(issueIDs) > 0 && daemonClient == nil {
markDirtyAndScheduleFlush()
}
if jsonOutput && len(results) > 0 {
outputJSON(results)
}
}, },
} }
@@ -124,48 +90,17 @@ var labelRemoveCmd = &cobra.Command{
Short: "Remove a label from one or more issues", Short: "Remove a label from one or more issues",
Args: cobra.MinimumNArgs(2), Args: cobra.MinimumNArgs(2),
Run: func(cmd *cobra.Command, args []string) { Run: func(cmd *cobra.Command, args []string) {
// Last arg is the label, everything before is issue IDs
label := args[len(args)-1] label := args[len(args)-1]
issueIDs := args[:len(args)-1] issueIDs := args[:len(args)-1]
ctx := context.Background() processBatchLabelOperation(issueIDs, label, "removed",
results := []map[string]interface{}{} func(issueID, lbl string) error {
_, err := daemonClient.RemoveLabel(&rpc.LabelRemoveArgs{ID: issueID, Label: lbl})
for _, issueID := range issueIDs { return err
var err error },
if daemonClient != nil { func(ctx context.Context, issueID, lbl, act string) error {
_, err = daemonClient.RemoveLabel(&rpc.LabelRemoveArgs{ return store.RemoveLabel(ctx, issueID, lbl, act)
ID: issueID, })
Label: label,
})
} else {
err = store.RemoveLabel(ctx, issueID, label, actor)
}
if err != nil {
fmt.Fprintf(os.Stderr, "Error removing label from %s: %v\n", issueID, err)
continue
}
if jsonOutput {
results = append(results, map[string]interface{}{
"status": "removed",
"issue_id": issueID,
"label": label,
})
} else {
green := color.New(color.FgGreen).SprintFunc()
fmt.Printf("%s Removed label '%s' from %s\n", green("✓"), label, issueID)
}
}
if len(issueIDs) > 0 && daemonClient == nil {
markDirtyAndScheduleFlush()
}
if jsonOutput && len(results) > 0 {
outputJSON(results)
}
}, },
} }
+225 -309
View File
@@ -48,6 +48,9 @@ const (
FallbackFlagNoDaemon = "flag_no_daemon" FallbackFlagNoDaemon = "flag_no_daemon"
FallbackConnectFailed = "connect_failed" FallbackConnectFailed = "connect_failed"
FallbackHealthFailed = "health_failed" FallbackHealthFailed = "health_failed"
cmdDaemon = "daemon"
cmdImport = "import"
statusHealthy = "healthy"
FallbackAutoStartDisabled = "auto_start_disabled" FallbackAutoStartDisabled = "auto_start_disabled"
FallbackAutoStartFailed = "auto_start_failed" FallbackAutoStartFailed = "auto_start_failed"
FallbackDaemonUnsupported = "daemon_unsupported" FallbackDaemonUnsupported = "daemon_unsupported"
@@ -87,7 +90,7 @@ var rootCmd = &cobra.Command{
// Apply viper configuration if flags weren't explicitly set // Apply viper configuration if flags weren't explicitly set
// Priority: flags > viper (config file + env vars) > defaults // Priority: flags > viper (config file + env vars) > defaults
// Do this BEFORE early-return so init/version/help respect config // Do this BEFORE early-return so init/version/help respect config
// If flag wasn't explicitly set, use viper value // If flag wasn't explicitly set, use viper value
if !cmd.Flags().Changed("json") { if !cmd.Flags().Changed("json") {
jsonOutput = config.GetBool("json") jsonOutput = config.GetBool("json")
@@ -109,7 +112,7 @@ var rootCmd = &cobra.Command{
} }
// Skip database initialization for commands that don't need a database // Skip database initialization for commands that don't need a database
if cmd.Name() == "init" || cmd.Name() == "daemon" || cmd.Name() == "help" || cmd.Name() == "version" || cmd.Name() == "quickstart" { if cmd.Name() == "init" || cmd.Name() == cmdDaemon || cmd.Name() == "help" || cmd.Name() == "version" || cmd.Name() == "quickstart" {
return return
} }
@@ -133,14 +136,14 @@ var rootCmd = &cobra.Command{
if err == nil { if err == nil {
localBeadsDir = filepath.Join(cwd, ".beads") localBeadsDir = filepath.Join(cwd, ".beads")
} }
// Use public API to find database (same logic as extensions) // Use public API to find database (same logic as extensions)
if foundDB := beads.FindDatabasePath(); foundDB != "" { if foundDB := beads.FindDatabasePath(); foundDB != "" {
dbPath = foundDB dbPath = foundDB
// Special case for import: if we found a database but there's a local .beads/ // Special case for import: if we found a database but there's a local .beads/
// directory without a database, prefer creating a local database // directory without a database, prefer creating a local database
if cmd.Name() == "import" && localBeadsDir != "" { if cmd.Name() == cmdImport && localBeadsDir != "" {
if _, err := os.Stat(localBeadsDir); err == nil { if _, err := os.Stat(localBeadsDir); err == nil {
// Check if found database is NOT in the local .beads/ directory // Check if found database is NOT in the local .beads/ directory
if !strings.HasPrefix(dbPath, localBeadsDir+string(filepath.Separator)) { if !strings.HasPrefix(dbPath, localBeadsDir+string(filepath.Separator)) {
@@ -151,13 +154,13 @@ var rootCmd = &cobra.Command{
} }
} else { } else {
// For import command, allow creating database if .beads/ directory exists // For import command, allow creating database if .beads/ directory exists
if cmd.Name() == "import" && localBeadsDir != "" { if cmd.Name() == cmdImport && localBeadsDir != "" {
if _, err := os.Stat(localBeadsDir); err == nil { if _, err := os.Stat(localBeadsDir); err == nil {
// .beads/ directory exists - set dbPath for import to create // .beads/ directory exists - set dbPath for import to create
dbPath = filepath.Join(localBeadsDir, "vc.db") dbPath = filepath.Join(localBeadsDir, "vc.db")
} }
} }
// If dbPath still not set, error out // If dbPath still not set, error out
if dbPath == "" { if dbPath == "" {
// No database found - error out instead of falling back to ~/.beads // No database found - error out instead of falling back to ~/.beads
@@ -208,18 +211,18 @@ var rootCmd = &cobra.Command{
absDBPath, _ := filepath.Abs(dbPath) absDBPath, _ := filepath.Abs(dbPath)
client.SetDatabasePath(absDBPath) client.SetDatabasePath(absDBPath)
} }
// Perform health check // Perform health check
health, healthErr := client.Health() health, healthErr := client.Health()
if healthErr == nil && health.Status == "healthy" { if healthErr == nil && health.Status == statusHealthy {
// Check version compatibility // Check version compatibility
if !health.Compatible { if !health.Compatible {
if os.Getenv("BD_DEBUG") != "" { if os.Getenv("BD_DEBUG") != "" {
fmt.Fprintf(os.Stderr, "Debug: daemon version mismatch (daemon: %s, client: %s), restarting daemon\n", fmt.Fprintf(os.Stderr, "Debug: daemon version mismatch (daemon: %s, client: %s), restarting daemon\n",
health.Version, Version) health.Version, Version)
} }
client.Close() client.Close()
// Kill old daemon and restart with new version // Kill old daemon and restart with new version
if restartDaemonForVersionMismatch() { if restartDaemonForVersionMismatch() {
// Retry connection after restart // Retry connection after restart
@@ -230,9 +233,9 @@ var rootCmd = &cobra.Command{
client.SetDatabasePath(absDBPath) client.SetDatabasePath(absDBPath)
} }
health, healthErr = client.Health() health, healthErr = client.Health()
if healthErr == nil && health.Status == "healthy" { if healthErr == nil && health.Status == statusHealthy {
daemonClient = client daemonClient = client
daemonStatus.Mode = "daemon" daemonStatus.Mode = cmdDaemon
daemonStatus.Connected = true daemonStatus.Connected = true
daemonStatus.Degraded = false daemonStatus.Degraded = false
daemonStatus.Health = health.Status daemonStatus.Health = health.Status
@@ -246,12 +249,12 @@ var rootCmd = &cobra.Command{
} }
// If restart failed, fall through to direct mode // If restart failed, fall through to direct mode
daemonStatus.FallbackReason = FallbackHealthFailed daemonStatus.FallbackReason = FallbackHealthFailed
daemonStatus.Detail = fmt.Sprintf("version mismatch (daemon: %s, client: %s) and restart failed", daemonStatus.Detail = fmt.Sprintf("version mismatch (daemon: %s, client: %s) and restart failed",
health.Version, Version) health.Version, Version)
} else { } else {
// Daemon is healthy and compatible - use it // Daemon is healthy and compatible - use it
daemonClient = client daemonClient = client
daemonStatus.Mode = "daemon" daemonStatus.Mode = cmdDaemon
daemonStatus.Connected = true daemonStatus.Connected = true
daemonStatus.Degraded = false daemonStatus.Degraded = false
daemonStatus.Health = health.Status daemonStatus.Health = health.Status
@@ -306,12 +309,12 @@ var rootCmd = &cobra.Command{
absDBPath, _ := filepath.Abs(dbPath) absDBPath, _ := filepath.Abs(dbPath)
client.SetDatabasePath(absDBPath) client.SetDatabasePath(absDBPath)
} }
// Check health of auto-started daemon // Check health of auto-started daemon
health, healthErr := client.Health() health, healthErr := client.Health()
if healthErr == nil && health.Status == "healthy" { if healthErr == nil && health.Status == statusHealthy {
daemonClient = client daemonClient = client
daemonStatus.Mode = "daemon" daemonStatus.Mode = cmdDaemon
daemonStatus.Connected = true daemonStatus.Connected = true
daemonStatus.Degraded = false daemonStatus.Degraded = false
daemonStatus.AutoStartSucceeded = true daemonStatus.AutoStartSucceeded = true
@@ -487,96 +490,9 @@ func shouldAutoStartDaemon() bool {
func shouldUseGlobalDaemon() bool { func shouldUseGlobalDaemon() bool {
// Global daemon support is deprecated // Global daemon support is deprecated
// Always use local daemon (per-project .beads/ socket) // Always use local daemon (per-project .beads/ socket)
return false
// Previously supported BEADS_PREFER_GLOBAL_DAEMON env var, but global // Previously supported BEADS_PREFER_GLOBAL_DAEMON env var, but global
// daemon has issues with multi-workspace git workflows // daemon has issues with multi-workspace git workflows
return false
// Heuristic: detect multiple beads repositories
home, err := os.UserHomeDir()
if err != nil {
return false
}
// Count .beads directories under home
repoCount := 0
maxDepth := 5 // Don't scan too deep
var countRepos func(string, int) error
countRepos = func(dir string, depth int) error {
if depth > maxDepth || repoCount > 1 {
return filepath.SkipDir
}
entries, err := os.ReadDir(dir)
if err != nil {
return nil // Skip directories we can't read
}
for _, entry := range entries {
if !entry.IsDir() {
continue
}
name := entry.Name()
// Skip hidden dirs except .beads
if strings.HasPrefix(name, ".") && name != ".beads" {
continue
}
// Skip common large directories
if name == "node_modules" || name == "vendor" || name == "target" || name == ".git" {
continue
}
path := filepath.Join(dir, name)
// Check if this is a .beads directory with a database
if name == ".beads" {
dbPath := filepath.Join(path, "db.sqlite")
if _, err := os.Stat(dbPath); err == nil {
repoCount++
if repoCount > 1 {
return filepath.SkipDir
}
}
continue
}
// Recurse into subdirectories
if depth < maxDepth {
_ = countRepos(path, depth+1)
}
}
return nil
}
// Scan common project directories
projectDirs := []string{
filepath.Join(home, "src"),
filepath.Join(home, "projects"),
filepath.Join(home, "code"),
filepath.Join(home, "workspace"),
filepath.Join(home, "dev"),
}
for _, dir := range projectDirs {
if _, err := os.Stat(dir); err == nil {
_ = countRepos(dir, 0)
if repoCount > 1 {
break
}
}
}
if os.Getenv("BD_DEBUG") != "" {
fmt.Fprintf(os.Stderr, "Debug: found %d beads repositories, prefer global: %v\n", repoCount, repoCount > 1)
}
// Use global daemon if we found more than 1 repository (multi-repo workflow)
// This prevents concurrency issues when multiple repos are being worked on
return repoCount > 1
} }
// restartDaemonForVersionMismatch stops the old daemon and starts a new one // restartDaemonForVersionMismatch stops the old daemon and starts a new one
@@ -599,7 +515,7 @@ func restartDaemonForVersionMismatch() bool {
if os.Getenv("BD_DEBUG") != "" { if os.Getenv("BD_DEBUG") != "" {
fmt.Fprintf(os.Stderr, "Debug: stopping old daemon (PID %d)\n", pid) fmt.Fprintf(os.Stderr, "Debug: stopping old daemon (PID %d)\n", pid)
} }
process, err := os.FindProcess(pid) process, err := os.FindProcess(pid)
if err != nil { if err != nil {
if os.Getenv("BD_DEBUG") != "" { if os.Getenv("BD_DEBUG") != "" {
@@ -629,11 +545,11 @@ func restartDaemonForVersionMismatch() bool {
// Force kill if still running // Force kill if still running
if isRunning, _ := isDaemonRunning(pidFile); isRunning { if isRunning, _ := isDaemonRunning(pidFile); isRunning {
if os.Getenv("BD_DEBUG") != "" { if os.Getenv("BD_DEBUG") != "" {
fmt.Fprintf(os.Stderr, "Debug: force killing old daemon\n") fmt.Fprintf(os.Stderr, "Debug: force killing old daemon\n")
} }
_ = process.Kill() _ = process.Kill()
forcedKill = true forcedKill = true
} }
} }
@@ -738,7 +654,7 @@ func tryAutoStartDaemon(socketPath string) bool {
if lockPID, err := readPIDFromFile(lockPath); err == nil { if lockPID, err := readPIDFromFile(lockPath); err == nil {
if !isPIDAlive(lockPID) { if !isPIDAlive(lockPID) {
if os.Getenv("BD_DEBUG") != "" { if os.Getenv("BD_DEBUG") != "" {
fmt.Fprintf(os.Stderr, "Debug: lock is stale (PID %d dead), removing and retrying\n", lockPID) fmt.Fprintf(os.Stderr, "Debug: lock is stale (PID %d dead), removing and retrying\n", lockPID)
} }
_ = os.Remove(lockPath) _ = os.Remove(lockPath)
// Retry once // Retry once
@@ -778,11 +694,11 @@ func tryAutoStartDaemon(socketPath string) bool {
// Socket is stale (connect failed and PID dead/missing) - safe to remove // Socket is stale (connect failed and PID dead/missing) - safe to remove
if os.Getenv("BD_DEBUG") != "" { if os.Getenv("BD_DEBUG") != "" {
fmt.Fprintf(os.Stderr, "Debug: socket is stale, cleaning up\n") fmt.Fprintf(os.Stderr, "Debug: socket is stale, cleaning up\n")
} }
_ = os.Remove(socketPath) _ = os.Remove(socketPath)
if pidFile != "" { if pidFile != "" {
_ = os.Remove(pidFile) _ = os.Remove(pidFile)
} }
} }
@@ -943,7 +859,7 @@ func recordDaemonStartFailure() {
func getSocketPath() string { func getSocketPath() string {
// Always use local socket (same directory as database: .beads/bd.sock) // Always use local socket (same directory as database: .beads/bd.sock)
localSocket := filepath.Join(filepath.Dir(dbPath), "bd.sock") localSocket := filepath.Join(filepath.Dir(dbPath), "bd.sock")
// Warn if old global socket exists // Warn if old global socket exists
if home, err := os.UserHomeDir(); err == nil { if home, err := os.UserHomeDir(); err == nil {
globalSocket := filepath.Join(home, ".beads", "bd.sock") globalSocket := filepath.Join(home, ".beads", "bd.sock")
@@ -953,7 +869,7 @@ func getSocketPath() string {
fmt.Fprintf(os.Stderr, "To migrate: Stop the global daemon and restart with 'bd daemon' in each project.\n") fmt.Fprintf(os.Stderr, "To migrate: Stop the global daemon and restart with 'bd daemon' in each project.\n")
} }
} }
return localSocket return localSocket
} }
@@ -1100,10 +1016,10 @@ func autoImportIfNewer() {
// Use shared import logic (bd-157) // Use shared import logic (bd-157)
opts := ImportOptions{ opts := ImportOptions{
ResolveCollisions: true, // Auto-import always resolves collisions ResolveCollisions: true, // Auto-import always resolves collisions
DryRun: false, DryRun: false,
SkipUpdate: false, SkipUpdate: false,
Strict: false, Strict: false,
SkipPrefixValidation: true, // Auto-import is lenient about prefixes SkipPrefixValidation: true, // Auto-import is lenient about prefixes
} }
@@ -1928,100 +1844,100 @@ var showCmd = &cobra.Command{
fmt.Println("\n" + strings.Repeat("─", 60)) fmt.Println("\n" + strings.Repeat("─", 60))
} }
// Parse response and use existing formatting code // Parse response and use existing formatting code
type IssueDetails struct { type IssueDetails struct {
types.Issue types.Issue
Labels []string `json:"labels,omitempty"` Labels []string `json:"labels,omitempty"`
Dependencies []*types.Issue `json:"dependencies,omitempty"` Dependencies []*types.Issue `json:"dependencies,omitempty"`
Dependents []*types.Issue `json:"dependents,omitempty"` Dependents []*types.Issue `json:"dependents,omitempty"`
} }
var details IssueDetails var details IssueDetails
if err := json.Unmarshal(resp.Data, &details); err != nil { if err := json.Unmarshal(resp.Data, &details); err != nil {
fmt.Fprintf(os.Stderr, "Error parsing response: %v\n", err) fmt.Fprintf(os.Stderr, "Error parsing response: %v\n", err)
os.Exit(1) os.Exit(1)
} }
issue := &details.Issue issue := &details.Issue
cyan := color.New(color.FgCyan).SprintFunc() cyan := color.New(color.FgCyan).SprintFunc()
// Format output (same as direct mode below) // Format output (same as direct mode below)
tierEmoji := "" tierEmoji := ""
statusSuffix := "" statusSuffix := ""
if issue.CompactionLevel == 1 { switch issue.CompactionLevel {
tierEmoji = " 🗜️" case 1:
} else if issue.CompactionLevel == 2 { tierEmoji = " 🗜️"
tierEmoji = " 📦" statusSuffix = " (compacted L1)"
} case 2:
if issue.CompactionLevel > 0 { tierEmoji = " 📦"
statusSuffix = fmt.Sprintf(" (compacted L%d)", issue.CompactionLevel) statusSuffix = " (compacted L2)"
} }
fmt.Printf("\n%s: %s%s\n", cyan(issue.ID), issue.Title, tierEmoji) fmt.Printf("\n%s: %s%s\n", cyan(issue.ID), issue.Title, tierEmoji)
fmt.Printf("Status: %s%s\n", issue.Status, statusSuffix) fmt.Printf("Status: %s%s\n", issue.Status, statusSuffix)
fmt.Printf("Priority: P%d\n", issue.Priority) fmt.Printf("Priority: P%d\n", issue.Priority)
fmt.Printf("Type: %s\n", issue.IssueType) fmt.Printf("Type: %s\n", issue.IssueType)
if issue.Assignee != "" { if issue.Assignee != "" {
fmt.Printf("Assignee: %s\n", issue.Assignee) fmt.Printf("Assignee: %s\n", issue.Assignee)
} }
if issue.EstimatedMinutes != nil { if issue.EstimatedMinutes != nil {
fmt.Printf("Estimated: %d minutes\n", *issue.EstimatedMinutes) fmt.Printf("Estimated: %d minutes\n", *issue.EstimatedMinutes)
} }
fmt.Printf("Created: %s\n", issue.CreatedAt.Format("2006-01-02 15:04")) fmt.Printf("Created: %s\n", issue.CreatedAt.Format("2006-01-02 15:04"))
fmt.Printf("Updated: %s\n", issue.UpdatedAt.Format("2006-01-02 15:04")) fmt.Printf("Updated: %s\n", issue.UpdatedAt.Format("2006-01-02 15:04"))
// Show compaction status // Show compaction status
if issue.CompactionLevel > 0 { if issue.CompactionLevel > 0 {
fmt.Println() fmt.Println()
if issue.OriginalSize > 0 { if issue.OriginalSize > 0 {
currentSize := len(issue.Description) + len(issue.Design) + len(issue.Notes) + len(issue.AcceptanceCriteria) currentSize := len(issue.Description) + len(issue.Design) + len(issue.Notes) + len(issue.AcceptanceCriteria)
saved := issue.OriginalSize - currentSize saved := issue.OriginalSize - currentSize
if saved > 0 { if saved > 0 {
reduction := float64(saved) / float64(issue.OriginalSize) * 100 reduction := float64(saved) / float64(issue.OriginalSize) * 100
fmt.Printf("📊 Original: %d bytes | Compressed: %d bytes (%.0f%% reduction)\n", fmt.Printf("📊 Original: %d bytes | Compressed: %d bytes (%.0f%% reduction)\n",
issue.OriginalSize, currentSize, reduction) issue.OriginalSize, currentSize, reduction)
}
}
tierEmoji2 := "🗜️"
if issue.CompactionLevel == 2 {
tierEmoji2 = "📦"
}
compactedDate := ""
if issue.CompactedAt != nil {
compactedDate = issue.CompactedAt.Format("2006-01-02")
}
fmt.Printf("%s Compacted: %s (Tier %d)\n", tierEmoji2, compactedDate, issue.CompactionLevel)
}
if issue.Description != "" {
fmt.Printf("\nDescription:\n%s\n", issue.Description)
}
if issue.Design != "" {
fmt.Printf("\nDesign:\n%s\n", issue.Design)
}
if issue.Notes != "" {
fmt.Printf("\nNotes:\n%s\n", issue.Notes)
}
if issue.AcceptanceCriteria != "" {
fmt.Printf("\nAcceptance Criteria:\n%s\n", issue.AcceptanceCriteria)
}
if len(details.Labels) > 0 {
fmt.Printf("\nLabels: %v\n", details.Labels)
}
if len(details.Dependencies) > 0 {
fmt.Printf("\nDepends on (%d):\n", len(details.Dependencies))
for _, dep := range details.Dependencies {
fmt.Printf(" → %s: %s [P%d]\n", dep.ID, dep.Title, dep.Priority)
} }
} }
tierEmoji2 := "🗜️"
if issue.CompactionLevel == 2 {
tierEmoji2 = "📦"
}
compactedDate := ""
if issue.CompactedAt != nil {
compactedDate = issue.CompactedAt.Format("2006-01-02")
}
fmt.Printf("%s Compacted: %s (Tier %d)\n", tierEmoji2, compactedDate, issue.CompactionLevel)
}
if issue.Description != "" { if len(details.Dependents) > 0 {
fmt.Printf("\nDescription:\n%s\n", issue.Description) fmt.Printf("\nBlocks (%d):\n", len(details.Dependents))
} for _, dep := range details.Dependents {
if issue.Design != "" { fmt.Printf(" ← %s: %s [P%d]\n", dep.ID, dep.Title, dep.Priority)
fmt.Printf("\nDesign:\n%s\n", issue.Design) }
}
if issue.Notes != "" {
fmt.Printf("\nNotes:\n%s\n", issue.Notes)
}
if issue.AcceptanceCriteria != "" {
fmt.Printf("\nAcceptance Criteria:\n%s\n", issue.AcceptanceCriteria)
}
if len(details.Labels) > 0 {
fmt.Printf("\nLabels: %v\n", details.Labels)
}
if len(details.Dependencies) > 0 {
fmt.Printf("\nDepends on (%d):\n", len(details.Dependencies))
for _, dep := range details.Dependencies {
fmt.Printf(" → %s: %s [P%d]\n", dep.ID, dep.Title, dep.Priority)
} }
}
if len(details.Dependents) > 0 {
fmt.Printf("\nBlocks (%d):\n", len(details.Dependents))
for _, dep := range details.Dependents {
fmt.Printf(" ← %s: %s [P%d]\n", dep.ID, dep.Title, dep.Priority)
}
}
fmt.Println() fmt.Println()
} }
@@ -2069,94 +1985,94 @@ var showCmd = &cobra.Command{
fmt.Println("\n" + strings.Repeat("─", 60)) fmt.Println("\n" + strings.Repeat("─", 60))
} }
cyan := color.New(color.FgCyan).SprintFunc() cyan := color.New(color.FgCyan).SprintFunc()
// Add compaction emoji to title line // Add compaction emoji to title line
tierEmoji := "" tierEmoji := ""
statusSuffix := "" statusSuffix := ""
if issue.CompactionLevel == 1 { switch issue.CompactionLevel {
tierEmoji = " 🗜️" case 1:
} else if issue.CompactionLevel == 2 { tierEmoji = " 🗜️"
tierEmoji = " 📦" statusSuffix = " (compacted L1)"
} case 2:
if issue.CompactionLevel > 0 { tierEmoji = " 📦"
statusSuffix = fmt.Sprintf(" (compacted L%d)", issue.CompactionLevel) statusSuffix = " (compacted L2)"
}
fmt.Printf("\n%s: %s%s\n", cyan(issue.ID), issue.Title, tierEmoji)
fmt.Printf("Status: %s%s\n", issue.Status, statusSuffix)
fmt.Printf("Priority: P%d\n", issue.Priority)
fmt.Printf("Type: %s\n", issue.IssueType)
if issue.Assignee != "" {
fmt.Printf("Assignee: %s\n", issue.Assignee)
}
if issue.EstimatedMinutes != nil {
fmt.Printf("Estimated: %d minutes\n", *issue.EstimatedMinutes)
}
fmt.Printf("Created: %s\n", issue.CreatedAt.Format("2006-01-02 15:04"))
fmt.Printf("Updated: %s\n", issue.UpdatedAt.Format("2006-01-02 15:04"))
// Show compaction status footer
if issue.CompactionLevel > 0 {
tierEmoji := "🗜️"
if issue.CompactionLevel == 2 {
tierEmoji = "📦"
} }
tierName := fmt.Sprintf("Tier %d", issue.CompactionLevel)
fmt.Println() fmt.Printf("\n%s: %s%s\n", cyan(issue.ID), issue.Title, tierEmoji)
if issue.OriginalSize > 0 { fmt.Printf("Status: %s%s\n", issue.Status, statusSuffix)
currentSize := len(issue.Description) + len(issue.Design) + len(issue.Notes) + len(issue.AcceptanceCriteria) fmt.Printf("Priority: P%d\n", issue.Priority)
saved := issue.OriginalSize - currentSize fmt.Printf("Type: %s\n", issue.IssueType)
if saved > 0 { if issue.Assignee != "" {
reduction := float64(saved) / float64(issue.OriginalSize) * 100 fmt.Printf("Assignee: %s\n", issue.Assignee)
fmt.Printf("📊 Original: %d bytes | Compressed: %d bytes (%.0f%% reduction)\n", }
issue.OriginalSize, currentSize, reduction) if issue.EstimatedMinutes != nil {
fmt.Printf("Estimated: %d minutes\n", *issue.EstimatedMinutes)
}
fmt.Printf("Created: %s\n", issue.CreatedAt.Format("2006-01-02 15:04"))
fmt.Printf("Updated: %s\n", issue.UpdatedAt.Format("2006-01-02 15:04"))
// Show compaction status footer
if issue.CompactionLevel > 0 {
tierEmoji := "🗜️"
if issue.CompactionLevel == 2 {
tierEmoji = "📦"
}
tierName := fmt.Sprintf("Tier %d", issue.CompactionLevel)
fmt.Println()
if issue.OriginalSize > 0 {
currentSize := len(issue.Description) + len(issue.Design) + len(issue.Notes) + len(issue.AcceptanceCriteria)
saved := issue.OriginalSize - currentSize
if saved > 0 {
reduction := float64(saved) / float64(issue.OriginalSize) * 100
fmt.Printf("📊 Original: %d bytes | Compressed: %d bytes (%.0f%% reduction)\n",
issue.OriginalSize, currentSize, reduction)
}
}
compactedDate := ""
if issue.CompactedAt != nil {
compactedDate = issue.CompactedAt.Format("2006-01-02")
}
fmt.Printf("%s Compacted: %s (%s)\n", tierEmoji, compactedDate, tierName)
}
if issue.Description != "" {
fmt.Printf("\nDescription:\n%s\n", issue.Description)
}
if issue.Design != "" {
fmt.Printf("\nDesign:\n%s\n", issue.Design)
}
if issue.Notes != "" {
fmt.Printf("\nNotes:\n%s\n", issue.Notes)
}
if issue.AcceptanceCriteria != "" {
fmt.Printf("\nAcceptance Criteria:\n%s\n", issue.AcceptanceCriteria)
}
// Show labels
labels, _ := store.GetLabels(ctx, issue.ID)
if len(labels) > 0 {
fmt.Printf("\nLabels: %v\n", labels)
}
// Show dependencies
deps, _ := store.GetDependencies(ctx, issue.ID)
if len(deps) > 0 {
fmt.Printf("\nDepends on (%d):\n", len(deps))
for _, dep := range deps {
fmt.Printf(" → %s: %s [P%d]\n", dep.ID, dep.Title, dep.Priority)
} }
} }
compactedDate := ""
if issue.CompactedAt != nil { // Show dependents
compactedDate = issue.CompactedAt.Format("2006-01-02") dependents, _ := store.GetDependents(ctx, issue.ID)
if len(dependents) > 0 {
fmt.Printf("\nBlocks (%d):\n", len(dependents))
for _, dep := range dependents {
fmt.Printf(" ← %s: %s [P%d]\n", dep.ID, dep.Title, dep.Priority)
}
} }
fmt.Printf("%s Compacted: %s (%s)\n", tierEmoji, compactedDate, tierName)
}
if issue.Description != "" {
fmt.Printf("\nDescription:\n%s\n", issue.Description)
}
if issue.Design != "" {
fmt.Printf("\nDesign:\n%s\n", issue.Design)
}
if issue.Notes != "" {
fmt.Printf("\nNotes:\n%s\n", issue.Notes)
}
if issue.AcceptanceCriteria != "" {
fmt.Printf("\nAcceptance Criteria:\n%s\n", issue.AcceptanceCriteria)
}
// Show labels
labels, _ := store.GetLabels(ctx, issue.ID)
if len(labels) > 0 {
fmt.Printf("\nLabels: %v\n", labels)
}
// Show dependencies
deps, _ := store.GetDependencies(ctx, issue.ID)
if len(deps) > 0 {
fmt.Printf("\nDepends on (%d):\n", len(deps))
for _, dep := range deps {
fmt.Printf(" → %s: %s [P%d]\n", dep.ID, dep.Title, dep.Priority)
}
}
// Show dependents
dependents, _ := store.GetDependents(ctx, issue.ID)
if len(dependents) > 0 {
fmt.Printf("\nBlocks (%d):\n", len(dependents))
for _, dep := range dependents {
fmt.Printf(" ← %s: %s [P%d]\n", dep.ID, dep.Title, dep.Priority)
}
}
// Show comments // Show comments
comments, _ := store.GetIssueComments(ctx, issue.ID) comments, _ := store.GetIssueComments(ctx, issue.ID)
@@ -2200,30 +2116,30 @@ var updateCmd = &cobra.Command{
updates["title"] = title updates["title"] = title
} }
if cmd.Flags().Changed("assignee") { if cmd.Flags().Changed("assignee") {
assignee, _ := cmd.Flags().GetString("assignee") assignee, _ := cmd.Flags().GetString("assignee")
updates["assignee"] = assignee updates["assignee"] = assignee
} }
if cmd.Flags().Changed("description") { if cmd.Flags().Changed("description") {
description, _ := cmd.Flags().GetString("description") description, _ := cmd.Flags().GetString("description")
updates["description"] = description updates["description"] = description
}
if cmd.Flags().Changed("design") {
design, _ := cmd.Flags().GetString("design")
updates["design"] = design
} }
if cmd.Flags().Changed("design") {
design, _ := cmd.Flags().GetString("design")
updates["design"] = design
}
if cmd.Flags().Changed("notes") { if cmd.Flags().Changed("notes") {
notes, _ := cmd.Flags().GetString("notes") notes, _ := cmd.Flags().GetString("notes")
updates["notes"] = notes updates["notes"] = notes
} }
if cmd.Flags().Changed("acceptance") || cmd.Flags().Changed("acceptance-criteria") { if cmd.Flags().Changed("acceptance") || cmd.Flags().Changed("acceptance-criteria") {
var acceptanceCriteria string var acceptanceCriteria string
if cmd.Flags().Changed("acceptance") { if cmd.Flags().Changed("acceptance") {
acceptanceCriteria, _ = cmd.Flags().GetString("acceptance") acceptanceCriteria, _ = cmd.Flags().GetString("acceptance")
} else { } else {
acceptanceCriteria, _ = cmd.Flags().GetString("acceptance-criteria") acceptanceCriteria, _ = cmd.Flags().GetString("acceptance-criteria")
}
updates["acceptance_criteria"] = acceptanceCriteria
} }
updates["acceptance_criteria"] = acceptanceCriteria
}
if cmd.Flags().Changed("external-ref") { if cmd.Flags().Changed("external-ref") {
externalRef, _ := cmd.Flags().GetString("external-ref") externalRef, _ := cmd.Flags().GetString("external-ref")
updates["external_ref"] = externalRef updates["external_ref"] = externalRef
@@ -2251,14 +2167,14 @@ var updateCmd = &cobra.Command{
updateArgs.Title = &title updateArgs.Title = &title
} }
if assignee, ok := updates["assignee"].(string); ok { if assignee, ok := updates["assignee"].(string); ok {
updateArgs.Assignee = &assignee updateArgs.Assignee = &assignee
} }
if description, ok := updates["description"].(string); ok { if description, ok := updates["description"].(string); ok {
updateArgs.Description = &description updateArgs.Description = &description
}
if design, ok := updates["design"].(string); ok {
updateArgs.Design = &design
} }
if design, ok := updates["design"].(string); ok {
updateArgs.Design = &design
}
if notes, ok := updates["notes"].(string); ok { if notes, ok := updates["notes"].(string); ok {
updateArgs.Notes = &notes updateArgs.Notes = &notes
} }
-7
View File
@@ -7,7 +7,6 @@ import (
"os" "os"
"regexp" "regexp"
"sort" "sort"
"strconv"
"strings" "strings"
"github.com/fatih/color" "github.com/fatih/color"
@@ -358,12 +357,6 @@ func renumberDependencies(ctx context.Context, idMapping map[string]string, allD
return nil return nil
} }
// Helper to extract numeric part from issue ID
func extractNumber(issueID, prefix string) (int, error) {
numStr := strings.TrimPrefix(issueID, prefix+"-")
return strconv.Atoi(numStr)
}
func init() { func init() {
renumberCmd.Flags().Bool("dry-run", false, "Preview changes without applying them") renumberCmd.Flags().Bool("dry-run", false, "Preview changes without applying them")
renumberCmd.Flags().Bool("force", false, "Actually perform the renumbering") renumberCmd.Flags().Bool("force", false, "Actually perform the renumbering")
+6
View File
@@ -13,18 +13,21 @@ const (
defaultConcurrency = 5 defaultConcurrency = 5
) )
// CompactConfig holds configuration for the compaction process.
type CompactConfig struct { type CompactConfig struct {
APIKey string APIKey string
Concurrency int Concurrency int
DryRun bool DryRun bool
} }
// Compactor handles issue compaction using AI summarization.
type Compactor struct { type Compactor struct {
store *sqlite.SQLiteStorage store *sqlite.SQLiteStorage
haiku *HaikuClient haiku *HaikuClient
config *CompactConfig config *CompactConfig
} }
// New creates a new Compactor instance with the given configuration.
func New(store *sqlite.SQLiteStorage, apiKey string, config *CompactConfig) (*Compactor, error) { func New(store *sqlite.SQLiteStorage, apiKey string, config *CompactConfig) (*Compactor, error) {
if config == nil { if config == nil {
config = &CompactConfig{ config = &CompactConfig{
@@ -58,6 +61,7 @@ func New(store *sqlite.SQLiteStorage, apiKey string, config *CompactConfig) (*Co
}, nil }, nil
} }
// CompactResult holds the outcome of a compaction operation.
type CompactResult struct { type CompactResult struct {
IssueID string IssueID string
OriginalSize int OriginalSize int
@@ -65,6 +69,7 @@ type CompactResult struct {
Err error Err error
} }
// CompactTier1 performs tier-1 compaction on a single issue using AI summarization.
func (c *Compactor) CompactTier1(ctx context.Context, issueID string) error { func (c *Compactor) CompactTier1(ctx context.Context, issueID string) error {
if ctx.Err() != nil { if ctx.Err() != nil {
return ctx.Err() return ctx.Err()
@@ -137,6 +142,7 @@ func (c *Compactor) CompactTier1(ctx context.Context, issueID string) error {
return nil return nil
} }
// CompactTier1Batch performs tier-1 compaction on multiple issues in a single batch.
func (c *Compactor) CompactTier1Batch(ctx context.Context, issueIDs []string) ([]*CompactResult, error) { func (c *Compactor) CompactTier1Batch(ctx context.Context, issueIDs []string) ([]*CompactResult, error) {
if len(issueIDs) == 0 { if len(issueIDs) == 0 {
return nil, nil return nil, nil
+3 -2
View File
@@ -298,11 +298,12 @@ func TestCompactTier1Batch_WithIneligible(t *testing.T) {
} }
for _, result := range results { for _, result := range results {
if result.IssueID == openIssue.ID { switch result.IssueID {
case openIssue.ID:
if result.Err == nil { if result.Err == nil {
t.Error("expected error for ineligible issue") t.Error("expected error for ineligible issue")
} }
} else if result.IssueID == closedIssue.ID { case closedIssue.ID:
if result.Err != nil { if result.Err != nil {
t.Errorf("unexpected error for eligible issue: %v", result.Err) t.Errorf("unexpected error for eligible issue: %v", result.Err)
} }
+1
View File
@@ -22,6 +22,7 @@ const (
initialBackoff = 1 * time.Second initialBackoff = 1 * time.Second
) )
// ErrAPIKeyRequired is returned when an API key is needed but not provided.
var ErrAPIKeyRequired = errors.New("API key required") var ErrAPIKeyRequired = errors.New("API key required")
// HaikuClient wraps the Anthropic API for issue summarization. // HaikuClient wraps the Anthropic API for issue summarization.
+1 -1
View File
@@ -193,7 +193,7 @@ func TestCallWithRetry_ContextCancellation(t *testing.T) {
_, err = client.callWithRetry(ctx, "test prompt") _, err = client.callWithRetry(ctx, "test prompt")
if err == nil { if err == nil {
t.Fatal("expected error when context is cancelled") t.Fatal("expected error when context is canceled")
} }
if err != context.Canceled { if err != context.Canceled {
t.Errorf("expected context.Canceled error, got: %v", err) t.Errorf("expected context.Canceled error, got: %v", err)
+2 -2
View File
@@ -145,8 +145,8 @@ func Set(key string, value interface{}) {
// return v.BindPFlag(key, flag) // return v.BindPFlag(key, flag)
// } // }
// ConfigFileUsed returns the path to the config file being used // FileUsed returns the path to the active configuration file.
func ConfigFileUsed() string { func FileUsed() string {
if v == nil { if v == nil {
return "" return ""
} }
+1 -1
View File
@@ -218,7 +218,7 @@ func (c *Client) Update(args *UpdateArgs) (*Response, error) {
return c.Execute(OpUpdate, args) return c.Execute(OpUpdate, args)
} }
// Close closes an issue via the daemon (operation, not connection) // CloseIssue marks an issue as closed via the daemon.
func (c *Client) CloseIssue(args *CloseArgs) (*Response, error) { func (c *Client) CloseIssue(args *CloseArgs) (*Response, error) {
return c.Execute(OpClose, args) return c.Execute(OpClose, args)
} }
+34 -74
View File
@@ -28,6 +28,10 @@ import (
// It's set as a var so it can be initialized from main // It's set as a var so it can be initialized from main
var ServerVersion = "0.9.10" var ServerVersion = "0.9.10"
const (
statusUnhealthy = "unhealthy"
)
// normalizeLabels trims whitespace, removes empty strings, and deduplicates labels // normalizeLabels trims whitespace, removes empty strings, and deduplicates labels
func normalizeLabels(ss []string) []string { func normalizeLabels(ss []string) []string {
seen := make(map[string]struct{}) seen := make(map[string]struct{})
@@ -259,7 +263,7 @@ func (s *Server) Stop() error {
err = fmt.Errorf("failed to remove socket: %w", removeErr) err = fmt.Errorf("failed to remove socket: %w", removeErr)
} }
}) })
// Wait for Start() goroutine to finish cleanup (with timeout) // Wait for Start() goroutine to finish cleanup (with timeout)
select { select {
case <-s.doneChan: case <-s.doneChan:
@@ -267,7 +271,7 @@ func (s *Server) Stop() error {
case <-time.After(5 * time.Second): case <-time.After(5 * time.Second):
// Timeout waiting for cleanup - continue anyway // Timeout waiting for cleanup - continue anyway
} }
return err return err
} }
@@ -699,13 +703,6 @@ func strValue(p *string) string {
return *p return *p
} }
func strPtr(s string) *string {
if s == "" {
return nil
}
return &s
}
func updatesFromArgs(a UpdateArgs) map[string]interface{} { func updatesFromArgs(a UpdateArgs) map[string]interface{} {
u := map[string]interface{}{} u := map[string]interface{}{}
if a.Title != nil { if a.Title != nil {
@@ -780,7 +777,7 @@ func (s *Server) handleHealth(req *Request) Response {
dbResponseMs := time.Since(start).Seconds() * 1000 dbResponseMs := time.Since(start).Seconds() * 1000
if pingErr != nil { if pingErr != nil {
status = "unhealthy" status = statusUnhealthy
dbError = pingErr.Error() dbError = pingErr.Error()
} else if dbResponseMs > 500 { } else if dbResponseMs > 500 {
status = "degraded" status = "degraded"
@@ -1270,12 +1267,13 @@ func (s *Server) handleDepAdd(req *Request) Response {
return Response{Success: true} return Response{Success: true}
} }
func (s *Server) handleDepRemove(req *Request) Response { // Generic handler for simple store operations with standard error handling
var depArgs DepRemoveArgs func (s *Server) handleSimpleStoreOp(req *Request, argsPtr interface{}, argDesc string,
if err := json.Unmarshal(req.Args, &depArgs); err != nil { opFunc func(context.Context, storage.Storage, string) error) Response {
if err := json.Unmarshal(req.Args, argsPtr); err != nil {
return Response{ return Response{
Success: false, Success: false,
Error: fmt.Sprintf("invalid dep remove args: %v", err), Error: fmt.Sprintf("invalid %s args: %v", argDesc, err),
} }
} }
@@ -1288,70 +1286,35 @@ func (s *Server) handleDepRemove(req *Request) Response {
} }
ctx := s.reqCtx(req) ctx := s.reqCtx(req)
if err := store.RemoveDependency(ctx, depArgs.FromID, depArgs.ToID, s.reqActor(req)); err != nil { if err := opFunc(ctx, store, s.reqActor(req)); err != nil {
return Response{ return Response{
Success: false, Success: false,
Error: fmt.Sprintf("failed to remove dependency: %v", err), Error: fmt.Sprintf("failed to %s: %v", argDesc, err),
} }
} }
return Response{Success: true} return Response{Success: true}
} }
func (s *Server) handleDepRemove(req *Request) Response {
var depArgs DepRemoveArgs
return s.handleSimpleStoreOp(req, &depArgs, "dep remove", func(ctx context.Context, store storage.Storage, actor string) error {
return store.RemoveDependency(ctx, depArgs.FromID, depArgs.ToID, actor)
})
}
func (s *Server) handleLabelAdd(req *Request) Response { func (s *Server) handleLabelAdd(req *Request) Response {
var labelArgs LabelAddArgs var labelArgs LabelAddArgs
if err := json.Unmarshal(req.Args, &labelArgs); err != nil { return s.handleSimpleStoreOp(req, &labelArgs, "label add", func(ctx context.Context, store storage.Storage, actor string) error {
return Response{ return store.AddLabel(ctx, labelArgs.ID, labelArgs.Label, actor)
Success: false, })
Error: fmt.Sprintf("invalid label add args: %v", err),
}
}
store, err := s.getStorageForRequest(req)
if err != nil {
return Response{
Success: false,
Error: fmt.Sprintf("storage error: %v", err),
}
}
ctx := s.reqCtx(req)
if err := store.AddLabel(ctx, labelArgs.ID, labelArgs.Label, s.reqActor(req)); err != nil {
return Response{
Success: false,
Error: fmt.Sprintf("failed to add label: %v", err),
}
}
return Response{Success: true}
} }
func (s *Server) handleLabelRemove(req *Request) Response { func (s *Server) handleLabelRemove(req *Request) Response {
var labelArgs LabelRemoveArgs var labelArgs LabelRemoveArgs
if err := json.Unmarshal(req.Args, &labelArgs); err != nil { return s.handleSimpleStoreOp(req, &labelArgs, "label remove", func(ctx context.Context, store storage.Storage, actor string) error {
return Response{ return store.RemoveLabel(ctx, labelArgs.ID, labelArgs.Label, actor)
Success: false, })
Error: fmt.Sprintf("invalid label remove args: %v", err),
}
}
store, err := s.getStorageForRequest(req)
if err != nil {
return Response{
Success: false,
Error: fmt.Sprintf("storage error: %v", err),
}
}
ctx := s.reqCtx(req)
if err := store.RemoveLabel(ctx, labelArgs.ID, labelArgs.Label, s.reqActor(req)); err != nil {
return Response{
Success: false,
Error: fmt.Sprintf("failed to remove label: %v", err),
}
}
return Response{Success: true}
} }
func (s *Server) handleCommentList(req *Request) Response { func (s *Server) handleCommentList(req *Request) Response {
@@ -1443,11 +1406,7 @@ func (s *Server) handleBatch(req *Request) Response {
resp := s.handleRequest(subReq) resp := s.handleRequest(subReq)
results = append(results, BatchResult{ results = append(results, BatchResult(resp))
Success: resp.Success,
Data: resp.Data,
Error: resp.Error,
})
if !resp.Success { if !resp.Success {
break break
@@ -1537,7 +1496,7 @@ func (s *Server) getStorageForRequest(req *Request) (storage.Storage, error) {
// If we can't stat, still cache it but with zero mtime (will invalidate on next check) // If we can't stat, still cache it but with zero mtime (will invalidate on next check)
info = nil info = nil
} }
mtime := time.Time{} mtime := time.Time{}
if info != nil { if info != nil {
mtime = info.ModTime() mtime = info.ModTime()
@@ -1929,7 +1888,8 @@ func (s *Server) handleCompact(req *Request) Response {
if args.All { if args.All {
var candidates []*sqlite.CompactionCandidate var candidates []*sqlite.CompactionCandidate
if args.Tier == 1 { switch args.Tier {
case 1:
tier1, err := sqliteStore.GetTier1Candidates(ctx) tier1, err := sqliteStore.GetTier1Candidates(ctx)
if err != nil { if err != nil {
return Response{ return Response{
@@ -1938,7 +1898,7 @@ func (s *Server) handleCompact(req *Request) Response {
} }
} }
candidates = tier1 candidates = tier1
} else if args.Tier == 2 { case 2:
tier2, err := sqliteStore.GetTier2Candidates(ctx) tier2, err := sqliteStore.GetTier2Candidates(ctx)
if err != nil { if err != nil {
return Response{ return Response{
@@ -1947,7 +1907,7 @@ func (s *Server) handleCompact(req *Request) Response {
} }
} }
candidates = tier2 candidates = tier2
} else { default:
return Response{ return Response{
Success: false, Success: false,
Error: fmt.Sprintf("invalid tier: %d (must be 1 or 2)", args.Tier), Error: fmt.Sprintf("invalid tier: %d (must be 1 or 2)", args.Tier),
@@ -2201,7 +2161,7 @@ func (s *Server) handleExport(req *Request) Response {
result := map[string]interface{}{ result := map[string]interface{}{
"exported_count": len(exportedIDs), "exported_count": len(exportedIDs),
"path": exportArgs.JSONLPath, "path": exportArgs.JSONLPath,
} }
data, _ := json.Marshal(result) data, _ := json.Marshal(result)
return Response{ return Response{
+3 -19
View File
@@ -128,14 +128,6 @@ func (s *SQLiteStorage) GetTier2Candidates(ctx context.Context) ([]*CompactionCa
daysStr = "90" daysStr = "90"
} }
depthStr, err := s.GetConfig(ctx, "compact_tier2_dep_levels")
if err != nil {
return nil, fmt.Errorf("failed to get compact_tier2_dep_levels: %w", err)
}
if depthStr == "" {
depthStr = "5"
}
commitsStr, err := s.GetConfig(ctx, "compact_tier2_commits") commitsStr, err := s.GetConfig(ctx, "compact_tier2_commits")
if err != nil { if err != nil {
return nil, fmt.Errorf("failed to get compact_tier2_commits: %w", err) return nil, fmt.Errorf("failed to get compact_tier2_commits: %w", err)
@@ -227,20 +219,12 @@ func (s *SQLiteStorage) CheckEligibility(ctx context.Context, issueID string, ti
return false, "issue has no closed_at timestamp", nil return false, "issue has no closed_at timestamp", nil
} }
if tier == 1 { switch tier {
case 1:
if compactionLevel != 0 { if compactionLevel != 0 {
return false, "issue is already compacted", nil return false, "issue is already compacted", nil
} }
// Check if closed long enough
daysStr, err := s.GetConfig(ctx, "compact_tier1_days")
if err != nil {
return false, "", fmt.Errorf("failed to get compact_tier1_days: %w", err)
}
if daysStr == "" {
daysStr = "30"
}
// Check if it appears in tier1 candidates // Check if it appears in tier1 candidates
candidates, err := s.GetTier1Candidates(ctx) candidates, err := s.GetTier1Candidates(ctx)
if err != nil { if err != nil {
@@ -255,7 +239,7 @@ func (s *SQLiteStorage) CheckEligibility(ctx context.Context, issueID string, ti
return false, "issue has open dependents or not closed long enough", nil return false, "issue has open dependents or not closed long enough", nil
} else if tier == 2 { case 2:
if compactionLevel != 1 { if compactionLevel != 1 {
return false, "issue must be at compaction level 1 for tier 2", nil return false, "issue must be at compaction level 1 for tier 2", nil
} }