fix: Address all errcheck and misspell linter errors

This commit is contained in:
Steve Yegge
2025-11-01 23:56:03 -07:00
parent a2361f85e7
commit 2b086951c4
15 changed files with 61 additions and 60 deletions

View File

@@ -61,6 +61,7 @@
{"id":"bd-4d80b7b1","content_hash":"0cad3e22d722ff045a29f218962fb00bd8265a1cfc82c5b70f29ffe1a40e4088","title":"Investigate and upgrade to modernc.org/sqlite 1.39.1+","description":"We had to pin modernc.org/sqlite to v1.38.2 due to a FOREIGN KEY constraint regression in v1.39.1 (SQLite 3.50.4).\n\n**Issue:** [deleted:bd-cb64c226.2], GH #144\n\n**Symptom:** CloseIssue fails with \"FOREIGN KEY constraint failed (787)\" when called via MCP/daemon, but works fine via CLI.\n\n**Root Cause:** Unknown - likely stricter FK enforcement in SQLite 3.50.4 or modernc.org wrapper changes.\n\n**Workaround:** Pinned to v1.38.2 (SQLite 3.49.x)\n\n**TODO:**\n1. Monitor modernc.org/sqlite releases for fixes\n2. Check SQLite 3.50.5+ changelogs for FK-related fixes\n3. Investigate why daemon mode fails but CLI succeeds (connection reuse? transaction isolation?)\n4. Consider filing upstream issue with reproducible test case\n5. Upgrade when safe","status":"open","priority":2,"issue_type":"task","created_at":"2025-10-24T11:49:12.836292-07:00","updated_at":"2025-10-30T17:12:58.211344-07:00"}
{"id":"bd-4e21b5ad","content_hash":"8029d0c5b14261648d3d17d8bc26413183962eab2875772cd2585db92c0104a6","title":"Add test case for symmetric collision (both clones create same ID simultaneously)","description":"TestTwoCloneCollision demonstrates the problem, but we need a simpler unit test for the collision resolver itself.\n\nTest should verify:\n- Two issues with same ID, different content\n- Content hash determines winner deterministically \n- Result is same regardless of which clone imports first\n- No title swapping occurs\n\nThis can be a simpler test than the full integration test.","status":"closed","priority":1,"issue_type":"task","created_at":"2025-10-29T17:46:10.046999-07:00","updated_at":"2025-10-31T12:00:43.196705-07:00","closed_at":"2025-10-31T12:00:43.196705-07:00"}
{"id":"bd-4f582ec8","content_hash":"02e00868aecbd17486f988a5927a68a07bc309978b33568361559a182eadb2cc","title":"Test auto-start in fred","description":"","status":"closed","priority":3,"issue_type":"task","created_at":"2025-10-30T17:46:16.668088-07:00","updated_at":"2025-10-31T12:00:43.185723-07:00","closed_at":"2025-10-31T12:00:43.185723-07:00"}
{"id":"bd-4ff2","content_hash":"f7477feb5757bb9663e792f4b740028a786a4ec5f0ec38c131aa4dda0dc7697b","title":"Fix CI failures before 0.21.3 release","description":"CI is failing on multiple jobs:\n1. Nix flake: Tests fail due to missing git in build environment\n2. Windows tests: Need to check what's failing\n3. Linux tests: Need to check what's failing\n4. Linter errors: Many unchecked errors need fixing\n\nNeed to fix before tagging v0.21.3 release.","status":"open","priority":0,"issue_type":"bug","created_at":"2025-11-01T23:52:09.244763-07:00","updated_at":"2025-11-01T23:52:09.244763-07:00"}
{"id":"bd-5314bddf","content_hash":"bbaf3bd26766fb78465900c455661a3608ab1d1485cb964d12229badf138753a","title":"bd detect-pollution - Test pollution detector","description":"Detect test issues that leaked into production DB.\n\nPattern matching for:\n- Titles starting with 'test', 'benchmark', 'sample'\n- Sequential numbering (test-1, test-2)\n- Generic descriptions\n- Created in rapid succession\n\nOptional AI scoring for confidence.\n\nFiles: cmd/bd/detect_pollution.go (new)","status":"open","priority":1,"issue_type":"task","created_at":"2025-10-28T14:48:17.466906-07:00","updated_at":"2025-10-30T17:12:58.219307-07:00"}
{"id":"bd-5599","content_hash":"c48839a6f7f5ca4083ced2f0f47cd250046146032555a14864ac3469a42bb76b","title":"Fix TestListCommand duplicate dependency constraint violation","description":"","status":"closed","priority":2,"issue_type":"bug","created_at":"2025-10-31T21:27:05.557548-07:00","updated_at":"2025-10-31T21:27:11.429018-07:00","closed_at":"2025-10-31T21:27:11.429018-07:00"}
{"id":"bd-581b80b3","content_hash":"04c4d952852ae2673e551d9776698c52b0189754ac5f9ca295bed464a5b86a43","title":"bd find-duplicates - AI-powered duplicate detection","description":"Find semantically duplicate issues.\n\nApproaches:\n1. Mechanical: Exact title/description matching\n2. Embeddings: Cosine similarity (cheap, scalable)\n3. AI: LLM-based semantic comparison (expensive, accurate)\n\nUses embeddings by default for \u003e100 issues.\n\nFiles: cmd/bd/find_duplicates.go (new)","status":"open","priority":1,"issue_type":"task","created_at":"2025-10-29T20:49:49.126801-07:00","updated_at":"2025-10-30T17:12:58.218673-07:00"}

View File

@@ -56,7 +56,7 @@ func runEventDrivenLoop(
defer fallbackTicker.Stop()
} else {
watcher.Start(ctx, log)
defer watcher.Close()
defer func() { _ = watcher.Close() }()
}
// Handle mutation events from RPC server
@@ -124,10 +124,10 @@ func runEventDrivenLoop(
return
case <-ctx.Done():
log.log("Context canceled, shutting down")
if watcher != nil {
watcher.Close()
}
log.log("Context canceled, shutting down")
if watcher != nil {
_ = watcher.Close()
}
if err := server.Stop(); err != nil {
log.log("Error stopping server: %v", err)
}
@@ -137,7 +137,7 @@ func runEventDrivenLoop(
log.log("RPC server failed: %v", err)
cancel()
if watcher != nil {
watcher.Close()
_ = watcher.Close()
}
if stopErr := server.Stop(); stopErr != nil {
log.log("Error stopping server: %v", stopErr)

View File

@@ -87,7 +87,7 @@ func NewFileWatcher(jsonlPath string, onChanged func()) (*FileWatcher, error) {
// File doesn't exist yet - rely on parent dir watch
fmt.Fprintf(os.Stderr, "Info: JSONL file %s doesn't exist yet, watching parent directory\n", jsonlPath)
} else {
watcher.Close()
_ = watcher.Close()
if fallbackDisabled {
return nil, fmt.Errorf("failed to watch JSONL and BEADS_WATCHER_FALLBACK is disabled: %w", err)
}
@@ -148,8 +148,8 @@ func (fw *FileWatcher) Start(ctx context.Context, log daemonLogger) {
// Handle JSONL removal/rename (e.g., git checkout)
if event.Name == fw.jsonlPath && (event.Op&fsnotify.Remove != 0 || event.Op&fsnotify.Rename != 0) {
log.log("JSONL removed/renamed, re-establishing watch")
fw.watcher.Remove(fw.jsonlPath)
log.log("JSONL removed/renamed, re-establishing watch")
_ = fw.watcher.Remove(fw.jsonlPath)
// Retry with exponential backoff
fw.reEstablishWatch(ctx, log)
continue

View File

@@ -77,7 +77,7 @@ uptime, last activity, and exclusive lock status.`,
}
w := tabwriter.NewWriter(os.Stdout, 0, 0, 2, ' ', 0)
fmt.Fprintln(w, "WORKSPACE\tPID\tVERSION\tUPTIME\tLAST ACTIVITY\tLOCK")
_, _ = fmt.Fprintln(w, "WORKSPACE\tPID\tVERSION\tUPTIME\tLAST ACTIVITY\tLOCK")
for _, d := range aliveDaemons {
workspace := d.WorkspacePath
@@ -99,11 +99,11 @@ uptime, last activity, and exclusive lock status.`,
lock = fmt.Sprintf("🔒 %s", d.ExclusiveLockHolder)
}
fmt.Fprintf(w, "%s\t%d\t%s\t%s\t%s\t%s\n",
workspace, d.PID, d.Version, uptime, lastActivity, lock)
}
_, _ = fmt.Fprintf(w, "%s\t%d\t%s\t%s\t%s\t%s\n",
workspace, d.PID, d.Version, uptime, lastActivity, lock)
}
w.Flush()
_ = w.Flush()
},
}
@@ -323,7 +323,7 @@ func tailFollow(filePath string) {
defer file.Close()
// Seek to end
file.Seek(0, io.SeekEnd)
_, _ = file.Seek(0, io.SeekEnd)
reader := bufio.NewReader(file)
for {
@@ -491,7 +491,7 @@ stale sockets, version mismatches, and unresponsive daemons.`,
fmt.Printf(" Unresponsive: %d\n\n", unresponsiveCount)
w := tabwriter.NewWriter(os.Stdout, 0, 0, 2, ' ', 0)
fmt.Fprintln(w, "WORKSPACE\tPID\tVERSION\tSTATUS\tISSUE")
_, _ = fmt.Fprintln(w, "WORKSPACE\tPID\tVERSION\tSTATUS\tISSUE")
for _, r := range reports {
workspace := r.Workspace
@@ -515,11 +515,11 @@ stale sockets, version mismatches, and unresponsive daemons.`,
issue = "-"
}
fmt.Fprintf(w, "%s\t%s\t%s\t%s\t%s\n",
workspace, pidStr, version, status, issue)
}
_, _ = fmt.Fprintf(w, "%s\t%s\t%s\t%s\t%s\n",
workspace, pidStr, version, status, issue)
}
w.Flush()
_ = w.Flush()
// Exit with error if there are any issues
if staleCount > 0 || mismatchCount > 0 || unresponsiveCount > 0 {

View File

@@ -130,9 +130,9 @@ Example:
if !yes {
fmt.Printf("\nDelete %d test issues? [y/N] ", len(polluted))
var response string
fmt.Scanln(&response)
_, _ = fmt.Scanln(&response)
if strings.ToLower(response) != "y" {
fmt.Println("Cancelled.")
fmt.Println("Canceled.")
return
}
}

View File

@@ -438,10 +438,10 @@ func compareVersions(v1, v2 string) int {
// Get part value or default to 0 if part doesn't exist
if i < len(parts1) {
fmt.Sscanf(parts1[i], "%d", &p1)
_, _ = fmt.Sscanf(parts1[i], "%d", &p1)
}
if i < len(parts2) {
fmt.Sscanf(parts2[i], "%d", &p2)
_, _ = fmt.Sscanf(parts2[i], "%d", &p2)
}
if p1 < p2 {
@@ -474,7 +474,7 @@ func fetchLatestGitHubRelease() (string, error) {
if err != nil {
return "", err
}
defer resp.Body.Close()
defer func() { _ = resp.Body.Close() }()
if resp.StatusCode != http.StatusOK {
return "", fmt.Errorf("github api returned status %d", resp.StatusCode)

View File

@@ -256,9 +256,9 @@ This command:
}
fmt.Print("\nRemove these files? [y/N] ")
var response string
fmt.Scanln(&response)
_, _ = fmt.Scanln(&response)
if strings.ToLower(response) != "y" && strings.ToLower(response) != "yes" {
fmt.Println("Cleanup cancelled")
fmt.Println("Cleanup canceled")
return
}
}
@@ -302,8 +302,8 @@ This command:
ctx := context.Background()
issues, err := store.SearchIssues(ctx, "", types.IssueFilter{})
if err != nil {
store.Close()
if jsonOutput {
_ = store.Close()
if jsonOutput {
outputJSON(map[string]interface{}{
"error": "hash_migration_failed",
"message": err.Error(),
@@ -319,7 +319,7 @@ This command:
if !dryRun {
backupPath := strings.TrimSuffix(targetPath, ".db") + ".backup-pre-hash-" + time.Now().Format("20060102-150405") + ".db"
if err := copyFile(targetPath, backupPath); err != nil {
store.Close()
_ = store.Close()
if jsonOutput {
outputJSON(map[string]interface{}{
"error": "backup_failed",
@@ -333,10 +333,10 @@ This command:
if !jsonOutput {
color.Green("✓ Created backup: %s\n", filepath.Base(backupPath))
}
}
mapping, err := migrateToHashIDs(ctx, store, issues, dryRun)
store.Close()
}
mapping, err := migrateToHashIDs(ctx, store, issues, dryRun)
_ = store.Close()
if err != nil {
if jsonOutput {
@@ -355,10 +355,10 @@ This command:
fmt.Printf("\nWould migrate %d issues to hash-based IDs\n", len(mapping))
} else {
color.Green("✓ Migrated %d issues to hash-based IDs\n", len(mapping))
}
}
} else {
store.Close()
}
}
} else {
_ = store.Close()
if !jsonOutput {
fmt.Println("Database already uses hash-based IDs")
}
@@ -504,7 +504,7 @@ func handleUpdateRepoID(dryRun bool, autoYes bool) {
}
os.Exit(1)
}
defer store.Close()
defer func() { _ = store.Close() }()
// Get old repo ID
ctx := context.Background()
@@ -549,9 +549,9 @@ func handleUpdateRepoID(dryRun bool, autoYes bool) {
fmt.Printf("New repo ID: %s\n\n", newRepoID[:8])
fmt.Printf("Continue? [y/N] ")
var response string
fmt.Scanln(&response)
_, _ = fmt.Scanln(&response)
if strings.ToLower(response) != "y" && strings.ToLower(response) != "yes" {
fmt.Println("Cancelled")
fmt.Println("Canceled")
return
}
}

View File

@@ -85,7 +85,7 @@ Use --dry-run to preview changes before applying.`,
}
os.Exit(1)
}
defer store.Close()
defer func() { _ = store.Close() }()
// Get all issues using SearchIssues with empty query and no filters
issues, err := store.SearchIssues(ctx, "", types.IssueFilter{})

View File

@@ -125,7 +125,7 @@ Interactive mode with --interactive prompts for each orphan.`,
for _, o := range orphans {
fmt.Printf("Remove dependency %s → %s (%s)? [y/N]: ", o.issueID, o.dependsOnID, o.depType)
var response string
fmt.Scanln(&response)
_, _ = fmt.Scanln(&response)
if response == "y" || response == "Y" {
// Use direct SQL to remove orphaned dependencies
// RemoveDependency tries to mark the depends_on issue as dirty, which fails for orphans

View File

@@ -600,7 +600,7 @@ Examples:
os.Exit(1)
}
tmpPath := tmpFile.Name()
defer os.Remove(tmpPath)
defer func() { _ = os.Remove(tmpPath) }()
// Write current value to temp file
if _, err := tmpFile.WriteString(currentValue); err != nil {

View File

@@ -163,7 +163,7 @@ func discoverDaemon(socketPath string) DaemonInfo {
}
return daemon
}
defer client.Close()
defer func() { _ = client.Close() }()
// Get status
status, err := client.Status()
@@ -264,7 +264,7 @@ func StopDaemon(daemon DaemonInfo) error {
// Try graceful shutdown via RPC first
client, err := rpc.TryConnectWithTimeout(daemon.SocketPath, 500*time.Millisecond)
if err == nil && client != nil {
defer client.Close()
defer func() { _ = client.Close() }()
if err := client.Shutdown(); err == nil {
// Wait a bit for daemon to shut down
time.Sleep(200 * time.Millisecond)
@@ -334,7 +334,7 @@ func stopDaemonWithTimeout(daemon DaemonInfo) error {
// Try RPC shutdown first (2 second timeout)
client, err := rpc.TryConnectWithTimeout(daemon.SocketPath, 2*time.Second)
if err == nil && client != nil {
defer client.Close()
defer func() { _ = client.Close() }()
if err := client.Shutdown(); err == nil {
// Wait and verify process died
time.Sleep(500 * time.Millisecond)

View File

@@ -20,7 +20,7 @@ func newTestStore(t *testing.T, dbPath string) *sqlite.SQLiteStorage {
// CRITICAL (bd-166): Set issue_prefix to prevent "database not initialized" errors
ctx := context.Background()
if err := store.SetConfig(ctx, "issue_prefix", "bd"); err != nil {
store.Close()
_ = store.Close()
t.Fatalf("Failed to set issue_prefix: %v", err)
}

View File

@@ -126,17 +126,17 @@ func compareIssues(existing, incoming *types.Issue) []string {
// hashIssueContent creates a deterministic hash of issue content (excluding ID and timestamps)
func hashIssueContent(issue *types.Issue) string {
h := sha256.New()
fmt.Fprintf(h, "title:%s\n", issue.Title)
fmt.Fprintf(h, "description:%s\n", issue.Description)
fmt.Fprintf(h, "status:%s\n", issue.Status)
fmt.Fprintf(h, "priority:%d\n", issue.Priority)
fmt.Fprintf(h, "type:%s\n", issue.IssueType)
fmt.Fprintf(h, "assignee:%s\n", issue.Assignee)
fmt.Fprintf(h, "design:%s\n", issue.Design)
fmt.Fprintf(h, "acceptance:%s\n", issue.AcceptanceCriteria)
fmt.Fprintf(h, "notes:%s\n", issue.Notes)
_, _ = fmt.Fprintf(h, "title:%s\n", issue.Title)
_, _ = fmt.Fprintf(h, "description:%s\n", issue.Description)
_, _ = fmt.Fprintf(h, "status:%s\n", issue.Status)
_, _ = fmt.Fprintf(h, "priority:%d\n", issue.Priority)
_, _ = fmt.Fprintf(h, "type:%s\n", issue.IssueType)
_, _ = fmt.Fprintf(h, "assignee:%s\n", issue.Assignee)
_, _ = fmt.Fprintf(h, "design:%s\n", issue.Design)
_, _ = fmt.Fprintf(h, "acceptance:%s\n", issue.AcceptanceCriteria)
_, _ = fmt.Fprintf(h, "notes:%s\n", issue.Notes)
if issue.ExternalRef != nil {
fmt.Fprintf(h, "external_ref:%s\n", *issue.ExternalRef)
_, _ = fmt.Fprintf(h, "external_ref:%s\n", *issue.ExternalRef)
}
return fmt.Sprintf("%x", h.Sum(nil))
}

View File

@@ -18,7 +18,7 @@ func newTestStore(t *testing.T, dbPath string) *SQLiteStorage {
// CRITICAL (bd-166): Set issue_prefix to prevent "database not initialized" errors
ctx := context.Background()
if err := store.SetConfig(ctx, "issue_prefix", "bd"); err != nil {
store.Close()
_ = store.Close()
t.Fatalf("Failed to set issue_prefix: %v", err)
}

View File

@@ -21,6 +21,6 @@ func ExtractIssueNumber(issueID string) int {
return 0
}
var num int
fmt.Sscanf(issueID[idx+1:], "%d", &num)
_, _ = fmt.Sscanf(issueID[idx+1:], "%d", &num)
return num
}