Merge branch 'main' into fix-monitor

Amp-Thread-ID: https://ampcode.com/threads/T-7bbd9558-2eb4-483a-bf7b-c61ea9c22092
Co-authored-by: Amp <amp@ampcode.com>
This commit is contained in:
Steve Yegge
2025-11-20 12:34:30 -05:00
12 changed files with 612 additions and 176 deletions

File diff suppressed because one or more lines are too long

View File

@@ -6,6 +6,7 @@ import (
"os" "os"
"path/filepath" "path/filepath"
"strings" "strings"
"sync"
"time" "time"
"github.com/fsnotify/fsnotify" "github.com/fsnotify/fsnotify"
@@ -27,6 +28,7 @@ type FileWatcher struct {
lastHeadModTime time.Time lastHeadModTime time.Time
lastHeadExists bool lastHeadExists bool
cancel context.CancelFunc cancel context.CancelFunc
wg sync.WaitGroup // Track goroutines for graceful shutdown (bd-jo38)
} }
// NewFileWatcher creates a file watcher for the given JSONL path. // NewFileWatcher creates a file watcher for the given JSONL path.
@@ -120,7 +122,9 @@ func (fw *FileWatcher) Start(ctx context.Context, log daemonLogger) {
return return
} }
fw.wg.Add(1)
go func() { go func() {
defer fw.wg.Done()
jsonlBase := filepath.Base(fw.jsonlPath) jsonlBase := filepath.Base(fw.jsonlPath)
for { for {
@@ -212,7 +216,9 @@ func (fw *FileWatcher) reEstablishWatch(ctx context.Context, log daemonLogger) {
func (fw *FileWatcher) startPolling(ctx context.Context, log daemonLogger) { func (fw *FileWatcher) startPolling(ctx context.Context, log daemonLogger) {
log.log("Starting polling mode with %v interval", fw.pollInterval) log.log("Starting polling mode with %v interval", fw.pollInterval)
ticker := time.NewTicker(fw.pollInterval) ticker := time.NewTicker(fw.pollInterval)
fw.wg.Add(1)
go func() { go func() {
defer fw.wg.Done()
defer ticker.Stop() defer ticker.Stop()
for { for {
select { select {
@@ -297,6 +303,8 @@ func (fw *FileWatcher) Close() error {
if fw.cancel != nil { if fw.cancel != nil {
fw.cancel() fw.cancel()
} }
// Wait for goroutines to finish before cleanup (bd-jo38)
fw.wg.Wait()
fw.debouncer.Cancel() fw.debouncer.Cancel()
if fw.watcher != nil { if fw.watcher != nil {
return fw.watcher.Close() return fw.watcher.Close()

View File

@@ -248,7 +248,24 @@ Stops the daemon gracefully, then starts a new one.`,
os.Exit(1) os.Exit(1)
} }
// Don't wait for daemon to exit (it will fork and continue in background) // Don't wait for daemon to exit (it will fork and continue in background)
go func() { _ = daemonCmd.Wait() }() // Use timeout to prevent goroutine leak if daemon never completes (bd-zqmb)
go func() {
done := make(chan struct{})
go func() {
_ = daemonCmd.Wait()
close(done)
}()
select {
case <-done:
// Daemon exited normally (forked successfully)
case <-time.After(10 * time.Second):
// Timeout - daemon should have forked by now
if daemonCmd.Process != nil {
_ = daemonCmd.Process.Kill()
}
}
}()
if jsonOutput { if jsonOutput {
outputJSON(map[string]interface{}{ outputJSON(map[string]interface{}{
"workspace": workspace, "workspace": workspace,

View File

@@ -419,16 +419,8 @@ func checkIDFormat(path string) doctorCheck {
} }
defer func() { _ = db.Close() }() // Intentionally ignore close error defer func() { _ = db.Close() }() // Intentionally ignore close error
// Get first issue to check ID format // Get sample of issues to check ID format (up to 10 for pattern analysis)
var issueID string rows, err := db.Query("SELECT id FROM issues ORDER BY created_at LIMIT 10")
err = db.QueryRow("SELECT id FROM issues ORDER BY created_at LIMIT 1").Scan(&issueID)
if err == sql.ErrNoRows {
return doctorCheck{
Name: "Issue IDs",
Status: statusOK,
Message: "No issues yet (will use hash-based IDs)",
}
}
if err != nil { if err != nil {
return doctorCheck{ return doctorCheck{
Name: "Issue IDs", Name: "Issue IDs",
@@ -436,9 +428,26 @@ func checkIDFormat(path string) doctorCheck {
Message: "Unable to query issues", Message: "Unable to query issues",
} }
} }
defer rows.Close()
// Detect ID format var issueIDs []string
if isHashID(issueID) { for rows.Next() {
var id string
if err := rows.Scan(&id); err == nil {
issueIDs = append(issueIDs, id)
}
}
if len(issueIDs) == 0 {
return doctorCheck{
Name: "Issue IDs",
Status: statusOK,
Message: "No issues yet (will use hash-based IDs)",
}
}
// Detect ID format using robust heuristic
if detectHashBasedIDs(db, issueIDs) {
return doctorCheck{ return doctorCheck{
Name: "Issue IDs", Name: "Issue IDs",
Status: statusOK, Status: statusOK,
@@ -522,6 +531,110 @@ func getDatabaseVersionFromPath(dbPath string) string {
return "unknown" return "unknown"
} }
// detectHashBasedIDs uses multiple heuristics to determine if the database uses hash-based IDs.
// This is more robust than checking a single ID's format, since base36 hash IDs can be all-numeric.
func detectHashBasedIDs(db *sql.DB, sampleIDs []string) bool {
// Heuristic 1: Check for child_counters table (added for hash ID support)
var tableName string
err := db.QueryRow(`
SELECT name FROM sqlite_master
WHERE type='table' AND name='child_counters'
`).Scan(&tableName)
if err == nil {
// child_counters table exists - this is a strong indicator of hash IDs
return true
}
// Heuristic 2: Check if any sample ID clearly contains letters (a-z)
// Hash IDs use base36 (0-9, a-z), sequential IDs are purely numeric
for _, id := range sampleIDs {
if isHashID(id) {
return true
}
}
// Heuristic 3: Look for patterns that indicate hash IDs
if len(sampleIDs) >= 2 {
// Extract suffixes (part after prefix-) for analysis
var suffixes []string
for _, id := range sampleIDs {
parts := strings.SplitN(id, "-", 2)
if len(parts) == 2 {
// Strip hierarchical suffix like .1 or .1.2
baseSuffix := strings.Split(parts[1], ".")[0]
suffixes = append(suffixes, baseSuffix)
}
}
if len(suffixes) >= 2 {
// Check for variable lengths (strong indicator of adaptive hash IDs)
// BUT: sequential IDs can also have variable length (1, 10, 100)
// So we need to check if the length variation is natural (1→2→3 digits)
// or random (3→8→4 chars typical of adaptive hash IDs)
lengths := make(map[int]int) // length -> count
for _, s := range suffixes {
lengths[len(s)]++
}
// If we have 3+ different lengths, likely hash IDs (adaptive length)
// Sequential IDs typically have 1-2 lengths (e.g., 1-9, 10-99, 100-999)
if len(lengths) >= 3 {
return true
}
// Check for leading zeros (rare in sequential IDs, common in hash IDs)
// Sequential IDs: bd-1, bd-2, bd-10, bd-100
// Hash IDs: bd-0088, bd-02a4, bd-05a1
hasLeadingZero := false
for _, s := range suffixes {
if len(s) > 1 && s[0] == '0' {
hasLeadingZero = true
break
}
}
if hasLeadingZero {
return true
}
// Check for non-sequential ordering
// Try to parse as integers - if they're not sequential, likely hash IDs
allNumeric := true
var nums []int
for _, s := range suffixes {
var num int
if _, err := fmt.Sscanf(s, "%d", &num); err == nil {
nums = append(nums, num)
} else {
allNumeric = false
break
}
}
if allNumeric && len(nums) >= 2 {
// Check if they form a roughly sequential pattern (1,2,3 or 10,11,12)
// Hash IDs would be more random (e.g., 88, 13452, 676)
isSequentialPattern := true
for i := 1; i < len(nums); i++ {
diff := nums[i] - nums[i-1]
// Allow for some gaps (deleted issues), but should be mostly sequential
if diff < 0 || diff > 100 {
isSequentialPattern = false
break
}
}
// If the numbers are NOT sequential, they're likely hash IDs
if !isSequentialPattern {
return true
}
}
}
}
// If we can't determine for sure, default to assuming sequential IDs
// This is conservative - better to recommend migration than miss sequential IDs
return false
}
// Note: isHashID is defined in migrate_hash_ids.go to avoid duplication // Note: isHashID is defined in migrate_hash_ids.go to avoid duplication
// compareVersions compares two semantic version strings. // compareVersions compares two semantic version strings.

View File

@@ -1,7 +1,9 @@
package main package main
import ( import (
"database/sql"
"encoding/json" "encoding/json"
"fmt"
"os" "os"
"path/filepath" "path/filepath"
"strings" "strings"
@@ -98,6 +100,250 @@ func TestDoctorJSONOutput(t *testing.T) {
// Note: isHashID is tested in migrate_hash_ids_test.go // Note: isHashID is tested in migrate_hash_ids_test.go
func TestDetectHashBasedIDs(t *testing.T) {
tests := []struct {
name string
sampleIDs []string
hasTable bool
expected bool
}{
{
name: "hash IDs with letters",
sampleIDs: []string{"bd-a3f8e9", "bd-b2c4d6"},
hasTable: false,
expected: true,
},
{
name: "hash IDs with mixed alphanumeric",
sampleIDs: []string{"bd-0134cc5a", "bd-abc123"},
hasTable: false,
expected: true,
},
{
name: "hash IDs all numeric with variable length",
sampleIDs: []string{"bd-0088", "bd-0134cc5a", "bd-02a4"},
hasTable: false,
expected: true, // Variable length indicates hash IDs
},
{
name: "hash IDs with leading zeros",
sampleIDs: []string{"bd-0088", "bd-02a4", "bd-05a1"},
hasTable: false,
expected: true, // Leading zeros indicate hash IDs
},
{
name: "hash IDs all numeric non-sequential",
sampleIDs: []string{"bd-0088", "bd-2312", "bd-0458"},
hasTable: false,
expected: true, // Non-sequential pattern
},
{
name: "sequential IDs",
sampleIDs: []string{"bd-1", "bd-2", "bd-3", "bd-4"},
hasTable: false,
expected: false, // Sequential pattern
},
{
name: "sequential IDs with gaps",
sampleIDs: []string{"bd-1", "bd-5", "bd-10", "bd-15"},
hasTable: false,
expected: false, // Still sequential pattern (small gaps allowed)
},
{
name: "database with child_counters table",
sampleIDs: []string{"bd-1", "bd-2"},
hasTable: true,
expected: true, // child_counters table indicates hash IDs
},
{
name: "hash IDs with hierarchical children",
sampleIDs: []string{"bd-a3f8e9.1", "bd-a3f8e9.2", "bd-b2c4d6"},
hasTable: false,
expected: true, // Base IDs have letters
},
{
name: "edge case: single ID with letters",
sampleIDs: []string{"bd-abc"},
hasTable: false,
expected: true,
},
{
name: "edge case: single sequential ID",
sampleIDs: []string{"bd-1"},
hasTable: false,
expected: false,
},
}
for _, tt := range tests {
t.Run(tt.name, func(t *testing.T) {
// Create temporary database
tmpDir := t.TempDir()
dbPath := filepath.Join(tmpDir, "test.db")
// Open database and create schema
db, err := sql.Open("sqlite3", dbPath)
if err != nil {
t.Fatalf("Failed to open database: %v", err)
}
defer db.Close()
// Create issues table
_, err = db.Exec(`
CREATE TABLE IF NOT EXISTS issues (
id TEXT PRIMARY KEY,
title TEXT,
created_at TIMESTAMP
)
`)
if err != nil {
t.Fatalf("Failed to create issues table: %v", err)
}
// Create child_counters table if test requires it
if tt.hasTable {
_, err = db.Exec(`
CREATE TABLE IF NOT EXISTS child_counters (
parent_id TEXT PRIMARY KEY,
last_child INTEGER NOT NULL DEFAULT 0
)
`)
if err != nil {
t.Fatalf("Failed to create child_counters table: %v", err)
}
}
// Insert sample issues
for _, id := range tt.sampleIDs {
_, err = db.Exec("INSERT INTO issues (id, title, created_at) VALUES (?, ?, datetime('now'))",
id, "Test issue")
if err != nil {
t.Fatalf("Failed to insert issue %s: %v", id, err)
}
}
// Test detection
result := detectHashBasedIDs(db, tt.sampleIDs)
if result != tt.expected {
t.Errorf("detectHashBasedIDs() = %v, want %v", result, tt.expected)
}
})
}
}
func TestCheckIDFormat(t *testing.T) {
tests := []struct {
name string
issueIDs []string
createTable bool // create child_counters table
expectedStatus string
}{
{
name: "hash IDs with letters",
issueIDs: []string{"bd-a3f8e9", "bd-b2c4d6", "bd-xyz123"},
createTable: false,
expectedStatus: statusOK,
},
{
name: "hash IDs all numeric with leading zeros",
issueIDs: []string{"bd-0088", "bd-02a4", "bd-05a1", "bd-0458"},
createTable: false,
expectedStatus: statusOK,
},
{
name: "hash IDs with child_counters table",
issueIDs: []string{"bd-123", "bd-456"},
createTable: true,
expectedStatus: statusOK,
},
{
name: "sequential IDs",
issueIDs: []string{"bd-1", "bd-2", "bd-3", "bd-4"},
createTable: false,
expectedStatus: statusWarning,
},
{
name: "mixed: mostly hash IDs",
issueIDs: []string{"bd-0088", "bd-0134cc5a", "bd-02a4"},
createTable: false,
expectedStatus: statusOK, // Variable length = hash IDs
},
}
for _, tt := range tests {
t.Run(tt.name, func(t *testing.T) {
// Create temporary workspace
tmpDir := t.TempDir()
beadsDir := filepath.Join(tmpDir, ".beads")
if err := os.Mkdir(beadsDir, 0750); err != nil {
t.Fatal(err)
}
// Create database
dbPath := filepath.Join(beadsDir, "beads.db")
db, err := sql.Open("sqlite3", dbPath)
if err != nil {
t.Fatalf("Failed to open database: %v", err)
}
defer db.Close()
// Create schema
_, err = db.Exec(`
CREATE TABLE IF NOT EXISTS issues (
id TEXT PRIMARY KEY,
title TEXT NOT NULL,
created_at TIMESTAMP DEFAULT CURRENT_TIMESTAMP
)
`)
if err != nil {
t.Fatalf("Failed to create issues table: %v", err)
}
if tt.createTable {
_, err = db.Exec(`
CREATE TABLE IF NOT EXISTS child_counters (
parent_id TEXT PRIMARY KEY,
last_child INTEGER NOT NULL DEFAULT 0
)
`)
if err != nil {
t.Fatalf("Failed to create child_counters table: %v", err)
}
}
// Insert test issues
for i, id := range tt.issueIDs {
_, err = db.Exec(
"INSERT INTO issues (id, title, created_at) VALUES (?, ?, datetime('now', ?||' seconds'))",
id, "Test issue "+id, fmt.Sprintf("+%d", i))
if err != nil {
t.Fatalf("Failed to insert issue %s: %v", id, err)
}
}
db.Close()
// Run check
check := checkIDFormat(tmpDir)
if check.Status != tt.expectedStatus {
t.Errorf("Expected status %s, got %s (message: %s)", tt.expectedStatus, check.Status, check.Message)
}
if tt.expectedStatus == statusOK && check.Status == statusOK {
if !strings.Contains(check.Message, "hash-based") {
t.Errorf("Expected hash-based message, got: %s", check.Message)
}
}
if tt.expectedStatus == statusWarning && check.Status == statusWarning {
if check.Fix == "" {
t.Error("Expected fix message for sequential IDs")
}
}
})
}
}
func TestCheckInstallation(t *testing.T) { func TestCheckInstallation(t *testing.T) {
// Test with missing .beads directory // Test with missing .beads directory
tmpDir := t.TempDir() tmpDir := t.TempDir()

View File

@@ -251,13 +251,33 @@ func TestIsHashID(t *testing.T) {
id string id string
expected bool expected bool
}{ }{
// Sequential IDs (numeric only, short)
{"bd-1", false}, {"bd-1", false},
{"bd-123", false}, {"bd-123", false},
{"bd-9999", false},
// Hash IDs with letters
{"bd-a3f8e9a2", true}, {"bd-a3f8e9a2", true},
{"bd-abc123", true}, {"bd-abc123", true},
{"bd-123abc", true}, {"bd-123abc", true},
{"bd-a3f8e9a2.1", true}, {"bd-a3f8e9a2.1", true},
{"bd-a3f8e9a2.1.2", true}, {"bd-a3f8e9a2.1.2", true},
// Hash IDs that are numeric but 5+ characters (likely hash)
{"bd-12345", true},
{"bd-0088", false}, // 4 chars, all numeric - ambiguous, defaults to false
{"bd-00880", true}, // 5+ chars, likely hash
// Base36 hash IDs with letters
{"bd-5n3", true},
{"bd-65w", true},
{"bd-jmx", true},
{"bd-4rt", true},
// Edge cases
{"bd-", false}, // Empty suffix
{"invalid", false}, // No dash
{"bd-0", false}, // Single digit
} }
for _, tt := range tests { for _, tt := range tests {

View File

@@ -34,7 +34,12 @@ var showCmd = &cobra.Command{
fmt.Fprintf(os.Stderr, "Error resolving ID %s: %v\n", id, err) fmt.Fprintf(os.Stderr, "Error resolving ID %s: %v\n", id, err)
os.Exit(1) os.Exit(1)
} }
resolvedIDs = append(resolvedIDs, string(resp.Data)) var resolvedID string
if err := json.Unmarshal(resp.Data, &resolvedID); err != nil {
fmt.Fprintf(os.Stderr, "Error unmarshaling resolved ID: %v\n", err)
os.Exit(1)
}
resolvedIDs = append(resolvedIDs, resolvedID)
} }
} else { } else {
// In direct mode, resolve via storage // In direct mode, resolve via storage
@@ -392,7 +397,12 @@ var updateCmd = &cobra.Command{
fmt.Fprintf(os.Stderr, "Error resolving ID %s: %v\n", id, err) fmt.Fprintf(os.Stderr, "Error resolving ID %s: %v\n", id, err)
os.Exit(1) os.Exit(1)
} }
resolvedIDs = append(resolvedIDs, string(resp.Data)) var resolvedID string
if err := json.Unmarshal(resp.Data, &resolvedID); err != nil {
fmt.Fprintf(os.Stderr, "Error unmarshaling resolved ID: %v\n", err)
os.Exit(1)
}
resolvedIDs = append(resolvedIDs, resolvedID)
} }
} else { } else {
var err error var err error
@@ -711,7 +721,12 @@ var closeCmd = &cobra.Command{
fmt.Fprintf(os.Stderr, "Error resolving ID %s: %v\n", id, err) fmt.Fprintf(os.Stderr, "Error resolving ID %s: %v\n", id, err)
os.Exit(1) os.Exit(1)
} }
resolvedIDs = append(resolvedIDs, string(resp.Data)) var resolvedID string
if err := json.Unmarshal(resp.Data, &resolvedID); err != nil {
fmt.Fprintf(os.Stderr, "Error unmarshaling resolved ID: %v\n", err)
os.Exit(1)
}
resolvedIDs = append(resolvedIDs, resolvedID)
} }
} else { } else {
var err error var err error

2
go.mod
View File

@@ -5,7 +5,7 @@ go 1.24.0
toolchain go1.24.2 toolchain go1.24.2
require ( require (
github.com/anthropics/anthropic-sdk-go v1.17.0 github.com/anthropics/anthropic-sdk-go v1.18.0
github.com/fatih/color v1.18.0 github.com/fatih/color v1.18.0
github.com/fsnotify/fsnotify v1.9.0 github.com/fsnotify/fsnotify v1.9.0
github.com/google/go-cmp v0.7.0 github.com/google/go-cmp v0.7.0

4
go.sum
View File

@@ -1,5 +1,5 @@
github.com/anthropics/anthropic-sdk-go v1.17.0 h1:BwK8ApcmaAUkvZTiQE0yi3R9XneEFskDIjLTmOAFZxQ= github.com/anthropics/anthropic-sdk-go v1.18.0 h1:jfxRA7AqZoCm83nHO/OVQp8xuwjUKtBziEdMbfmofHU=
github.com/anthropics/anthropic-sdk-go v1.17.0/go.mod h1:WTz31rIUHUHqai2UslPpw5CwXrQP3geYBioRV4WOLvE= github.com/anthropics/anthropic-sdk-go v1.18.0/go.mod h1:WTz31rIUHUHqai2UslPpw5CwXrQP3geYBioRV4WOLvE=
github.com/cpuguy83/go-md2man/v2 v2.0.6/go.mod h1:oOW0eioCTA6cOiMLiUPZOpcVxMig6NIQQ7OS05n1F4g= github.com/cpuguy83/go-md2man/v2 v2.0.6/go.mod h1:oOW0eioCTA6cOiMLiUPZOpcVxMig6NIQQ7OS05n1F4g=
github.com/davecgh/go-spew v1.1.1 h1:vj9j/u1bqnvCEfJOwUhtlOARqs3+rkHYY13jYWTU97c= github.com/davecgh/go-spew v1.1.1 h1:vj9j/u1bqnvCEfJOwUhtlOARqs3+rkHYY13jYWTU97c=
github.com/davecgh/go-spew v1.1.1/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38= github.com/davecgh/go-spew v1.1.1/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38=

View File

@@ -210,7 +210,10 @@ func (s *Server) handleConnection(conn net.Conn) {
Success: false, Success: false,
Error: fmt.Sprintf("invalid request: %v", err), Error: fmt.Sprintf("invalid request: %v", err),
} }
s.writeResponse(writer, resp) if err := s.writeResponse(writer, resp); err != nil {
// Connection broken, stop handling this connection
return
}
continue continue
} }
@@ -220,15 +223,32 @@ func (s *Server) handleConnection(conn net.Conn) {
} }
resp := s.handleRequest(&req) resp := s.handleRequest(&req)
s.writeResponse(writer, resp) if err := s.writeResponse(writer, resp); err != nil {
// Connection broken, stop handling this connection
return
}
} }
} }
func (s *Server) writeResponse(writer *bufio.Writer, resp Response) { func (s *Server) writeResponse(writer *bufio.Writer, resp Response) error {
data, _ := json.Marshal(resp) data, err := json.Marshal(resp)
_, _ = writer.Write(data) if err != nil {
_ = writer.WriteByte('\n') return fmt.Errorf("failed to marshal response: %w", err)
_ = writer.Flush() }
if _, err := writer.Write(data); err != nil {
return fmt.Errorf("failed to write response: %w", err)
}
if err := writer.WriteByte('\n'); err != nil {
return fmt.Errorf("failed to write newline: %w", err)
}
if err := writer.Flush(); err != nil {
return fmt.Errorf("failed to flush response: %w", err)
}
return nil
} }
func (s *Server) handleShutdown(_ *Request) Response { func (s *Server) handleShutdown(_ *Request) Response {

View File

@@ -8,6 +8,7 @@ import (
"fmt" "fmt"
"os" "os"
"path/filepath" "path/filepath"
"runtime"
"strings" "strings"
"sync/atomic" "sync/atomic"
"time" "time"
@@ -113,6 +114,15 @@ func New(path string) (*SQLiteStorage, error) {
if isInMemory { if isInMemory {
db.SetMaxOpenConns(1) db.SetMaxOpenConns(1)
db.SetMaxIdleConns(1) db.SetMaxIdleConns(1)
} else {
// For file-based databases in daemon mode, limit connection pool to prevent
// connection exhaustion under concurrent load. SQLite WAL mode supports
// 1 writer + unlimited readers, but we limit to prevent goroutine pile-up
// on write lock contention (bd-qhws).
maxConns := runtime.NumCPU() + 1 // 1 writer + N readers
db.SetMaxOpenConns(maxConns)
db.SetMaxIdleConns(2)
db.SetConnMaxLifetime(0) // SQLite doesn't need connection recycling
} }
// For file-based databases, enable WAL mode once after opening the connection. // For file-based databases, enable WAL mode once after opening the connection.
@@ -1134,11 +1144,11 @@ func (s *SQLiteStorage) findAllDependentsRecursive(ctx context.Context, tx *sql.
if err != nil { if err != nil {
return nil, err return nil, err
} }
defer rows.Close()
for rows.Next() { for rows.Next() {
var depID string var depID string
if err := rows.Scan(&depID); err != nil { if err := rows.Scan(&depID); err != nil {
_ = rows.Close()
return nil, err return nil, err
} }
if !result[depID] { if !result[depID] {
@@ -1147,10 +1157,8 @@ func (s *SQLiteStorage) findAllDependentsRecursive(ctx context.Context, tx *sql.
} }
} }
if err := rows.Err(); err != nil { if err := rows.Err(); err != nil {
_ = rows.Close()
return nil, err return nil, err
} }
_ = rows.Close()
} }
return result, nil return result, nil

View File

@@ -37,6 +37,14 @@ func ParseIssueID(input string, prefix string) string {
// - No issue found matching the ID // - No issue found matching the ID
// - Multiple issues match (ambiguous prefix) // - Multiple issues match (ambiguous prefix)
func ResolvePartialID(ctx context.Context, store storage.Storage, input string) (string, error) { func ResolvePartialID(ctx context.Context, store storage.Storage, input string) (string, error) {
// Fast path: if the user typed an exact ID that exists, return it as-is.
// This preserves behavior where issue IDs may not match the configured
// issue_prefix (e.g. cross-repo IDs like "ao-izl"), while still allowing
// prefix-based and hash-based resolution for other inputs.
if issue, err := store.GetIssue(ctx, input); err == nil && issue != nil {
return input, nil
}
// Get the configured prefix // Get the configured prefix
prefix, err := store.GetConfig(ctx, "issue_prefix") prefix, err := store.GetConfig(ctx, "issue_prefix")
if err != nil || prefix == "" { if err != nil || prefix == "" {
@@ -63,7 +71,7 @@ func ResolvePartialID(ctx context.Context, store storage.Storage, input string)
normalizedID = prefixWithHyphen + input normalizedID = prefixWithHyphen + input
} }
// First try exact match // First try exact match on normalized ID
issue, err := store.GetIssue(ctx, normalizedID) issue, err := store.GetIssue(ctx, normalizedID)
if err == nil && issue != nil { if err == nil && issue != nil {
return normalizedID, nil return normalizedID, nil