Add RPC support for compact command (bd-184)
- Added OpCompact and OpCompactStats operation constants - Added CompactArgs, CompactStatsArgs, and response types to RPC protocol - Implemented handleCompact and handleCompactStats in RPC server - Updated compact command to use RPC when daemon is available - Fixed RPC client to include Cwd for proper database routing - Compact now works in daemon mode with --no-daemon flag Amp-Thread-ID: https://ampcode.com/threads/T-87885d07-80ad-466d-9ffb-cc96fab4853f Co-authored-by: Amp <amp@ampcode.com>
This commit is contained in:
@@ -2,6 +2,7 @@ package main
|
||||
|
||||
import (
|
||||
"context"
|
||||
"encoding/json"
|
||||
"fmt"
|
||||
"os"
|
||||
"time"
|
||||
@@ -44,17 +45,34 @@ Examples:
|
||||
Run: func(cmd *cobra.Command, args []string) {
|
||||
ctx := context.Background()
|
||||
|
||||
// Handle compact stats first
|
||||
if compactStats {
|
||||
if daemonClient != nil {
|
||||
runCompactStatsRPC(ctx)
|
||||
} else {
|
||||
sqliteStore, ok := store.(*sqlite.SQLiteStorage)
|
||||
if !ok {
|
||||
fmt.Fprintf(os.Stderr, "Error: compact requires SQLite storage\n")
|
||||
os.Exit(1)
|
||||
}
|
||||
runCompactStats(ctx, sqliteStore)
|
||||
}
|
||||
return
|
||||
}
|
||||
|
||||
// If using daemon, delegate to RPC
|
||||
if daemonClient != nil {
|
||||
runCompactRPC(ctx)
|
||||
return
|
||||
}
|
||||
|
||||
// Direct mode - original logic
|
||||
sqliteStore, ok := store.(*sqlite.SQLiteStorage)
|
||||
if !ok {
|
||||
fmt.Fprintf(os.Stderr, "Error: compact requires SQLite storage\n")
|
||||
os.Exit(1)
|
||||
}
|
||||
|
||||
if compactStats {
|
||||
runCompactStats(ctx, sqliteStore)
|
||||
return
|
||||
}
|
||||
|
||||
if compactID != "" && compactAll {
|
||||
fmt.Fprintf(os.Stderr, "Error: cannot use --id and --all together\n")
|
||||
os.Exit(1)
|
||||
@@ -385,6 +403,164 @@ func progressBar(current, total int) string {
|
||||
return "[" + bar + "]"
|
||||
}
|
||||
|
||||
func runCompactRPC(ctx context.Context) {
|
||||
if compactID != "" && compactAll {
|
||||
fmt.Fprintf(os.Stderr, "Error: cannot use --id and --all together\n")
|
||||
os.Exit(1)
|
||||
}
|
||||
|
||||
if compactForce && compactID == "" {
|
||||
fmt.Fprintf(os.Stderr, "Error: --force requires --id\n")
|
||||
os.Exit(1)
|
||||
}
|
||||
|
||||
if compactID == "" && !compactAll && !compactDryRun {
|
||||
fmt.Fprintf(os.Stderr, "Error: must specify --all, --id, or --dry-run\n")
|
||||
os.Exit(1)
|
||||
}
|
||||
|
||||
apiKey := os.Getenv("ANTHROPIC_API_KEY")
|
||||
if apiKey == "" && !compactDryRun {
|
||||
fmt.Fprintf(os.Stderr, "Error: ANTHROPIC_API_KEY environment variable not set\n")
|
||||
os.Exit(1)
|
||||
}
|
||||
|
||||
args := map[string]interface{}{
|
||||
"tier": compactTier,
|
||||
"dry_run": compactDryRun,
|
||||
"force": compactForce,
|
||||
"all": compactAll,
|
||||
"api_key": apiKey,
|
||||
"workers": compactWorkers,
|
||||
"batch_size": compactBatch,
|
||||
}
|
||||
if compactID != "" {
|
||||
args["issue_id"] = compactID
|
||||
}
|
||||
|
||||
resp, err := daemonClient.Execute("compact", args)
|
||||
if err != nil {
|
||||
fmt.Fprintf(os.Stderr, "Error: %v\n", err)
|
||||
os.Exit(1)
|
||||
}
|
||||
|
||||
if !resp.Success {
|
||||
fmt.Fprintf(os.Stderr, "Error: %s\n", resp.Error)
|
||||
os.Exit(1)
|
||||
}
|
||||
|
||||
if jsonOutput {
|
||||
fmt.Println(string(resp.Data))
|
||||
return
|
||||
}
|
||||
|
||||
var result struct {
|
||||
Success bool `json:"success"`
|
||||
IssueID string `json:"issue_id,omitempty"`
|
||||
OriginalSize int `json:"original_size,omitempty"`
|
||||
CompactedSize int `json:"compacted_size,omitempty"`
|
||||
Reduction string `json:"reduction,omitempty"`
|
||||
Duration string `json:"duration,omitempty"`
|
||||
DryRun bool `json:"dry_run,omitempty"`
|
||||
Results []struct {
|
||||
IssueID string `json:"issue_id"`
|
||||
Success bool `json:"success"`
|
||||
Error string `json:"error,omitempty"`
|
||||
OriginalSize int `json:"original_size,omitempty"`
|
||||
CompactedSize int `json:"compacted_size,omitempty"`
|
||||
Reduction string `json:"reduction,omitempty"`
|
||||
} `json:"results,omitempty"`
|
||||
}
|
||||
|
||||
if err := json.Unmarshal(resp.Data, &result); err != nil {
|
||||
fmt.Fprintf(os.Stderr, "Error parsing response: %v\n", err)
|
||||
os.Exit(1)
|
||||
}
|
||||
|
||||
if compactID != "" {
|
||||
if result.DryRun {
|
||||
fmt.Printf("DRY RUN - Tier %d compaction\n\n", compactTier)
|
||||
fmt.Printf("Issue: %s\n", compactID)
|
||||
fmt.Printf("Original size: %d bytes\n", result.OriginalSize)
|
||||
fmt.Printf("Estimated reduction: %s\n", result.Reduction)
|
||||
} else {
|
||||
fmt.Printf("Successfully compacted %s\n", result.IssueID)
|
||||
fmt.Printf("Original size: %d bytes\n", result.OriginalSize)
|
||||
fmt.Printf("Compacted size: %d bytes\n", result.CompactedSize)
|
||||
fmt.Printf("Reduction: %s\n", result.Reduction)
|
||||
fmt.Printf("Duration: %s\n", result.Duration)
|
||||
}
|
||||
} else if compactAll {
|
||||
if result.DryRun {
|
||||
fmt.Printf("DRY RUN - Found %d candidates for Tier %d compaction\n", len(result.Results), compactTier)
|
||||
} else {
|
||||
successCount := 0
|
||||
for _, r := range result.Results {
|
||||
if r.Success {
|
||||
successCount++
|
||||
}
|
||||
}
|
||||
fmt.Printf("Compacted %d/%d issues in %s\n", successCount, len(result.Results), result.Duration)
|
||||
for _, r := range result.Results {
|
||||
if r.Success {
|
||||
fmt.Printf(" ✓ %s: %d → %d bytes (%s)\n", r.IssueID, r.OriginalSize, r.CompactedSize, r.Reduction)
|
||||
} else {
|
||||
fmt.Printf(" ✗ %s: %s\n", r.IssueID, r.Error)
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func runCompactStatsRPC(ctx context.Context) {
|
||||
args := map[string]interface{}{
|
||||
"tier": compactTier,
|
||||
}
|
||||
|
||||
resp, err := daemonClient.Execute("compact_stats", args)
|
||||
if err != nil {
|
||||
fmt.Fprintf(os.Stderr, "Error: %v\n", err)
|
||||
os.Exit(1)
|
||||
}
|
||||
|
||||
if !resp.Success {
|
||||
fmt.Fprintf(os.Stderr, "Error: %s\n", resp.Error)
|
||||
os.Exit(1)
|
||||
}
|
||||
|
||||
if jsonOutput {
|
||||
fmt.Println(string(resp.Data))
|
||||
return
|
||||
}
|
||||
|
||||
var result struct {
|
||||
Success bool `json:"success"`
|
||||
Stats struct {
|
||||
Tier1Candidates int `json:"tier1_candidates"`
|
||||
Tier2Candidates int `json:"tier2_candidates"`
|
||||
TotalClosed int `json:"total_closed"`
|
||||
Tier1MinAge string `json:"tier1_min_age"`
|
||||
Tier2MinAge string `json:"tier2_min_age"`
|
||||
EstimatedSavings string `json:"estimated_savings,omitempty"`
|
||||
} `json:"stats"`
|
||||
}
|
||||
|
||||
if err := json.Unmarshal(resp.Data, &result); err != nil {
|
||||
fmt.Fprintf(os.Stderr, "Error parsing response: %v\n", err)
|
||||
os.Exit(1)
|
||||
}
|
||||
|
||||
fmt.Printf("\nCompaction Statistics\n")
|
||||
fmt.Printf("=====================\n\n")
|
||||
fmt.Printf("Total closed issues: %d\n\n", result.Stats.TotalClosed)
|
||||
fmt.Printf("Tier 1 (30+ days closed, not compacted):\n")
|
||||
fmt.Printf(" Candidates: %d\n", result.Stats.Tier1Candidates)
|
||||
fmt.Printf(" Min age: %s\n\n", result.Stats.Tier1MinAge)
|
||||
fmt.Printf("Tier 2 (90+ days closed, Tier 1 compacted):\n")
|
||||
fmt.Printf(" Candidates: %d\n", result.Stats.Tier2Candidates)
|
||||
fmt.Printf(" Min age: %s\n", result.Stats.Tier2MinAge)
|
||||
}
|
||||
|
||||
func init() {
|
||||
compactCmd.Flags().BoolVar(&compactDryRun, "dry-run", false, "Preview without compacting")
|
||||
compactCmd.Flags().IntVar(&compactTier, "tier", 1, "Compaction tier (1 or 2)")
|
||||
|
||||
Reference in New Issue
Block a user