Fix lint errors: handle errors, use fmt.Fprintf, apply De Morgan's law, use switch statements

Amp-Thread-ID: https://ampcode.com/threads/T-afcf56b0-a8bc-4310-bb59-1b63e1d70c89
Co-authored-by: Amp <amp@ampcode.com>
This commit is contained in:
Steve Yegge
2025-10-24 12:27:02 -07:00
parent 1d5e89b9bb
commit 9dcb86ebfb
17 changed files with 342 additions and 537 deletions
+6
View File
@@ -13,18 +13,21 @@ const (
defaultConcurrency = 5
)
// CompactConfig holds configuration for the compaction process.
type CompactConfig struct {
APIKey string
Concurrency int
DryRun bool
}
// Compactor handles issue compaction using AI summarization.
type Compactor struct {
store *sqlite.SQLiteStorage
haiku *HaikuClient
config *CompactConfig
}
// New creates a new Compactor instance with the given configuration.
func New(store *sqlite.SQLiteStorage, apiKey string, config *CompactConfig) (*Compactor, error) {
if config == nil {
config = &CompactConfig{
@@ -58,6 +61,7 @@ func New(store *sqlite.SQLiteStorage, apiKey string, config *CompactConfig) (*Co
}, nil
}
// CompactResult holds the outcome of a compaction operation.
type CompactResult struct {
IssueID string
OriginalSize int
@@ -65,6 +69,7 @@ type CompactResult struct {
Err error
}
// CompactTier1 performs tier-1 compaction on a single issue using AI summarization.
func (c *Compactor) CompactTier1(ctx context.Context, issueID string) error {
if ctx.Err() != nil {
return ctx.Err()
@@ -137,6 +142,7 @@ func (c *Compactor) CompactTier1(ctx context.Context, issueID string) error {
return nil
}
// CompactTier1Batch performs tier-1 compaction on multiple issues in a single batch.
func (c *Compactor) CompactTier1Batch(ctx context.Context, issueIDs []string) ([]*CompactResult, error) {
if len(issueIDs) == 0 {
return nil, nil
+3 -2
View File
@@ -298,11 +298,12 @@ func TestCompactTier1Batch_WithIneligible(t *testing.T) {
}
for _, result := range results {
if result.IssueID == openIssue.ID {
switch result.IssueID {
case openIssue.ID:
if result.Err == nil {
t.Error("expected error for ineligible issue")
}
} else if result.IssueID == closedIssue.ID {
case closedIssue.ID:
if result.Err != nil {
t.Errorf("unexpected error for eligible issue: %v", result.Err)
}
+1
View File
@@ -22,6 +22,7 @@ const (
initialBackoff = 1 * time.Second
)
// ErrAPIKeyRequired is returned when an API key is needed but not provided.
var ErrAPIKeyRequired = errors.New("API key required")
// HaikuClient wraps the Anthropic API for issue summarization.
+1 -1
View File
@@ -193,7 +193,7 @@ func TestCallWithRetry_ContextCancellation(t *testing.T) {
_, err = client.callWithRetry(ctx, "test prompt")
if err == nil {
t.Fatal("expected error when context is cancelled")
t.Fatal("expected error when context is canceled")
}
if err != context.Canceled {
t.Errorf("expected context.Canceled error, got: %v", err)
+2 -2
View File
@@ -145,8 +145,8 @@ func Set(key string, value interface{}) {
// return v.BindPFlag(key, flag)
// }
// ConfigFileUsed returns the path to the config file being used
func ConfigFileUsed() string {
// FileUsed returns the path to the active configuration file.
func FileUsed() string {
if v == nil {
return ""
}
+1 -1
View File
@@ -218,7 +218,7 @@ func (c *Client) Update(args *UpdateArgs) (*Response, error) {
return c.Execute(OpUpdate, args)
}
// Close closes an issue via the daemon (operation, not connection)
// CloseIssue marks an issue as closed via the daemon.
func (c *Client) CloseIssue(args *CloseArgs) (*Response, error) {
return c.Execute(OpClose, args)
}
+34 -74
View File
@@ -28,6 +28,10 @@ import (
// It's set as a var so it can be initialized from main
var ServerVersion = "0.9.10"
const (
statusUnhealthy = "unhealthy"
)
// normalizeLabels trims whitespace, removes empty strings, and deduplicates labels
func normalizeLabels(ss []string) []string {
seen := make(map[string]struct{})
@@ -259,7 +263,7 @@ func (s *Server) Stop() error {
err = fmt.Errorf("failed to remove socket: %w", removeErr)
}
})
// Wait for Start() goroutine to finish cleanup (with timeout)
select {
case <-s.doneChan:
@@ -267,7 +271,7 @@ func (s *Server) Stop() error {
case <-time.After(5 * time.Second):
// Timeout waiting for cleanup - continue anyway
}
return err
}
@@ -699,13 +703,6 @@ func strValue(p *string) string {
return *p
}
func strPtr(s string) *string {
if s == "" {
return nil
}
return &s
}
func updatesFromArgs(a UpdateArgs) map[string]interface{} {
u := map[string]interface{}{}
if a.Title != nil {
@@ -780,7 +777,7 @@ func (s *Server) handleHealth(req *Request) Response {
dbResponseMs := time.Since(start).Seconds() * 1000
if pingErr != nil {
status = "unhealthy"
status = statusUnhealthy
dbError = pingErr.Error()
} else if dbResponseMs > 500 {
status = "degraded"
@@ -1270,12 +1267,13 @@ func (s *Server) handleDepAdd(req *Request) Response {
return Response{Success: true}
}
func (s *Server) handleDepRemove(req *Request) Response {
var depArgs DepRemoveArgs
if err := json.Unmarshal(req.Args, &depArgs); err != nil {
// Generic handler for simple store operations with standard error handling
func (s *Server) handleSimpleStoreOp(req *Request, argsPtr interface{}, argDesc string,
opFunc func(context.Context, storage.Storage, string) error) Response {
if err := json.Unmarshal(req.Args, argsPtr); err != nil {
return Response{
Success: false,
Error: fmt.Sprintf("invalid dep remove args: %v", err),
Error: fmt.Sprintf("invalid %s args: %v", argDesc, err),
}
}
@@ -1288,70 +1286,35 @@ func (s *Server) handleDepRemove(req *Request) Response {
}
ctx := s.reqCtx(req)
if err := store.RemoveDependency(ctx, depArgs.FromID, depArgs.ToID, s.reqActor(req)); err != nil {
if err := opFunc(ctx, store, s.reqActor(req)); err != nil {
return Response{
Success: false,
Error: fmt.Sprintf("failed to remove dependency: %v", err),
Error: fmt.Sprintf("failed to %s: %v", argDesc, err),
}
}
return Response{Success: true}
}
func (s *Server) handleDepRemove(req *Request) Response {
var depArgs DepRemoveArgs
return s.handleSimpleStoreOp(req, &depArgs, "dep remove", func(ctx context.Context, store storage.Storage, actor string) error {
return store.RemoveDependency(ctx, depArgs.FromID, depArgs.ToID, actor)
})
}
func (s *Server) handleLabelAdd(req *Request) Response {
var labelArgs LabelAddArgs
if err := json.Unmarshal(req.Args, &labelArgs); err != nil {
return Response{
Success: false,
Error: fmt.Sprintf("invalid label add args: %v", err),
}
}
store, err := s.getStorageForRequest(req)
if err != nil {
return Response{
Success: false,
Error: fmt.Sprintf("storage error: %v", err),
}
}
ctx := s.reqCtx(req)
if err := store.AddLabel(ctx, labelArgs.ID, labelArgs.Label, s.reqActor(req)); err != nil {
return Response{
Success: false,
Error: fmt.Sprintf("failed to add label: %v", err),
}
}
return Response{Success: true}
return s.handleSimpleStoreOp(req, &labelArgs, "label add", func(ctx context.Context, store storage.Storage, actor string) error {
return store.AddLabel(ctx, labelArgs.ID, labelArgs.Label, actor)
})
}
func (s *Server) handleLabelRemove(req *Request) Response {
var labelArgs LabelRemoveArgs
if err := json.Unmarshal(req.Args, &labelArgs); err != nil {
return Response{
Success: false,
Error: fmt.Sprintf("invalid label remove args: %v", err),
}
}
store, err := s.getStorageForRequest(req)
if err != nil {
return Response{
Success: false,
Error: fmt.Sprintf("storage error: %v", err),
}
}
ctx := s.reqCtx(req)
if err := store.RemoveLabel(ctx, labelArgs.ID, labelArgs.Label, s.reqActor(req)); err != nil {
return Response{
Success: false,
Error: fmt.Sprintf("failed to remove label: %v", err),
}
}
return Response{Success: true}
return s.handleSimpleStoreOp(req, &labelArgs, "label remove", func(ctx context.Context, store storage.Storage, actor string) error {
return store.RemoveLabel(ctx, labelArgs.ID, labelArgs.Label, actor)
})
}
func (s *Server) handleCommentList(req *Request) Response {
@@ -1443,11 +1406,7 @@ func (s *Server) handleBatch(req *Request) Response {
resp := s.handleRequest(subReq)
results = append(results, BatchResult{
Success: resp.Success,
Data: resp.Data,
Error: resp.Error,
})
results = append(results, BatchResult(resp))
if !resp.Success {
break
@@ -1537,7 +1496,7 @@ func (s *Server) getStorageForRequest(req *Request) (storage.Storage, error) {
// If we can't stat, still cache it but with zero mtime (will invalidate on next check)
info = nil
}
mtime := time.Time{}
if info != nil {
mtime = info.ModTime()
@@ -1929,7 +1888,8 @@ func (s *Server) handleCompact(req *Request) Response {
if args.All {
var candidates []*sqlite.CompactionCandidate
if args.Tier == 1 {
switch args.Tier {
case 1:
tier1, err := sqliteStore.GetTier1Candidates(ctx)
if err != nil {
return Response{
@@ -1938,7 +1898,7 @@ func (s *Server) handleCompact(req *Request) Response {
}
}
candidates = tier1
} else if args.Tier == 2 {
case 2:
tier2, err := sqliteStore.GetTier2Candidates(ctx)
if err != nil {
return Response{
@@ -1947,7 +1907,7 @@ func (s *Server) handleCompact(req *Request) Response {
}
}
candidates = tier2
} else {
default:
return Response{
Success: false,
Error: fmt.Sprintf("invalid tier: %d (must be 1 or 2)", args.Tier),
@@ -2201,7 +2161,7 @@ func (s *Server) handleExport(req *Request) Response {
result := map[string]interface{}{
"exported_count": len(exportedIDs),
"path": exportArgs.JSONLPath,
"path": exportArgs.JSONLPath,
}
data, _ := json.Marshal(result)
return Response{
+3 -19
View File
@@ -128,14 +128,6 @@ func (s *SQLiteStorage) GetTier2Candidates(ctx context.Context) ([]*CompactionCa
daysStr = "90"
}
depthStr, err := s.GetConfig(ctx, "compact_tier2_dep_levels")
if err != nil {
return nil, fmt.Errorf("failed to get compact_tier2_dep_levels: %w", err)
}
if depthStr == "" {
depthStr = "5"
}
commitsStr, err := s.GetConfig(ctx, "compact_tier2_commits")
if err != nil {
return nil, fmt.Errorf("failed to get compact_tier2_commits: %w", err)
@@ -227,20 +219,12 @@ func (s *SQLiteStorage) CheckEligibility(ctx context.Context, issueID string, ti
return false, "issue has no closed_at timestamp", nil
}
if tier == 1 {
switch tier {
case 1:
if compactionLevel != 0 {
return false, "issue is already compacted", nil
}
// Check if closed long enough
daysStr, err := s.GetConfig(ctx, "compact_tier1_days")
if err != nil {
return false, "", fmt.Errorf("failed to get compact_tier1_days: %w", err)
}
if daysStr == "" {
daysStr = "30"
}
// Check if it appears in tier1 candidates
candidates, err := s.GetTier1Candidates(ctx)
if err != nil {
@@ -255,7 +239,7 @@ func (s *SQLiteStorage) CheckEligibility(ctx context.Context, issueID string, ti
return false, "issue has open dependents or not closed long enough", nil
} else if tier == 2 {
case 2:
if compactionLevel != 1 {
return false, "issue must be at compaction level 1 for tier 2", nil
}