Fix error handling consistency in auto-import and fallback paths (#47)
* Fix error handling consistency in auto-import and fallback paths - Add error checking/warnings for auto-import CRUD operations (UpdateIssue, CreateIssue, AddDependency) - Add error checking/warnings for auto-import label operations (AddLabel, RemoveLabel) - Add warning when import hash storage fails (prevents unnecessary re-imports) - Add proper error handling for UserHomeDir with fallback to current directory These changes make auto-import error handling consistent with manual operations and prevent silent failures that could confuse users or cause data inconsistencies. * Remove invalid version property from golangci-lint config * Fix linter errors: errcheck, unused, goconst, and misspell - Fix unchecked error returns in ROLLBACK statements - Fix unchecked type assertion for status field - Extract LIMIT SQL constant to reduce duplication - Fix spelling: cancelled -> canceled - Remove unused ensureCounterInitialized function - Remove unused parameter in parallel test goroutine
This commit is contained in:
@@ -1,8 +1,6 @@
|
||||
# golangci-lint configuration for beads
|
||||
# See https://golangci-lint.run/usage/configuration/
|
||||
|
||||
version: 2
|
||||
|
||||
run:
|
||||
timeout: 5m
|
||||
tests: true
|
||||
|
||||
@@ -66,7 +66,12 @@ var rootCmd = &cobra.Command{
|
||||
dbPath = foundDB
|
||||
} else {
|
||||
// Fallback to default location (will be created by init command)
|
||||
home, _ := os.UserHomeDir()
|
||||
home, err := os.UserHomeDir()
|
||||
if err != nil {
|
||||
fmt.Fprintf(os.Stderr, "Warning: could not determine home directory: %v\n", err)
|
||||
fmt.Fprintf(os.Stderr, "Using current directory for default database\n")
|
||||
home = "."
|
||||
}
|
||||
dbPath = filepath.Join(home, ".beads", "default.db")
|
||||
}
|
||||
}
|
||||
@@ -345,7 +350,9 @@ func autoImportIfNewer() {
|
||||
updates["closed_at"] = nil
|
||||
}
|
||||
|
||||
_ = store.UpdateIssue(ctx, issue.ID, updates, "auto-import")
|
||||
if err := store.UpdateIssue(ctx, issue.ID, updates, "auto-import"); err != nil {
|
||||
fmt.Fprintf(os.Stderr, "Warning: auto-import failed to update %s: %v\n", issue.ID, err)
|
||||
}
|
||||
} else {
|
||||
// Create new issue - enforce invariant before creation
|
||||
if issue.Status == "closed" {
|
||||
@@ -356,7 +363,9 @@ func autoImportIfNewer() {
|
||||
} else {
|
||||
issue.ClosedAt = nil
|
||||
}
|
||||
_ = store.CreateIssue(ctx, issue, "auto-import")
|
||||
if err := store.CreateIssue(ctx, issue, "auto-import"); err != nil {
|
||||
fmt.Fprintf(os.Stderr, "Warning: auto-import failed to create %s: %v\n", issue.ID, err)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
@@ -388,7 +397,9 @@ func autoImportIfNewer() {
|
||||
}
|
||||
|
||||
if !exists {
|
||||
_ = store.AddDependency(ctx, dep, "auto-import")
|
||||
if err := store.AddDependency(ctx, dep, "auto-import"); err != nil {
|
||||
fmt.Fprintf(os.Stderr, "Warning: auto-import failed to add dependency %s -> %s: %v\n", issue.ID, dep.DependsOnID, err)
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
@@ -423,20 +434,27 @@ func autoImportIfNewer() {
|
||||
// Add missing labels
|
||||
for _, label := range issue.Labels {
|
||||
if !existingLabelMap[label] {
|
||||
_ = store.AddLabel(ctx, issue.ID, label, "auto-import")
|
||||
if err := store.AddLabel(ctx, issue.ID, label, "auto-import"); err != nil {
|
||||
fmt.Fprintf(os.Stderr, "Warning: auto-import failed to add label %s to %s: %v\n", label, issue.ID, err)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// Remove labels not in imported data
|
||||
for _, label := range existingLabels {
|
||||
if !importedLabelMap[label] {
|
||||
_ = store.RemoveLabel(ctx, issue.ID, label, "auto-import")
|
||||
if err := store.RemoveLabel(ctx, issue.ID, label, "auto-import"); err != nil {
|
||||
fmt.Fprintf(os.Stderr, "Warning: auto-import failed to remove label %s from %s: %v\n", label, issue.ID, err)
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// Store new hash after successful import
|
||||
_ = store.SetMetadata(ctx, "last_import_hash", currentHash)
|
||||
if err := store.SetMetadata(ctx, "last_import_hash", currentHash); err != nil {
|
||||
fmt.Fprintf(os.Stderr, "Warning: failed to store import hash: %v\n", err)
|
||||
fmt.Fprintf(os.Stderr, "Auto-import may re-run unnecessarily. Run 'bd export' to fix.\n")
|
||||
}
|
||||
}
|
||||
|
||||
// checkVersionMismatch checks if the binary version matches the database version
|
||||
@@ -1015,9 +1033,19 @@ var showCmd = &cobra.Command{
|
||||
Dependents []*types.Issue `json:"dependents,omitempty"`
|
||||
}
|
||||
details := &IssueDetails{Issue: issue}
|
||||
details.Labels, _ = store.GetLabels(ctx, issue.ID)
|
||||
details.Dependencies, _ = store.GetDependencies(ctx, issue.ID)
|
||||
details.Dependents, _ = store.GetDependents(ctx, issue.ID)
|
||||
var err error
|
||||
details.Labels, err = store.GetLabels(ctx, issue.ID)
|
||||
if err != nil {
|
||||
fmt.Fprintf(os.Stderr, "Warning: failed to fetch labels: %v\n", err)
|
||||
}
|
||||
details.Dependencies, err = store.GetDependencies(ctx, issue.ID)
|
||||
if err != nil {
|
||||
fmt.Fprintf(os.Stderr, "Warning: failed to fetch dependencies: %v\n", err)
|
||||
}
|
||||
details.Dependents, err = store.GetDependents(ctx, issue.ID)
|
||||
if err != nil {
|
||||
fmt.Fprintf(os.Stderr, "Warning: failed to fetch dependents: %v\n", err)
|
||||
}
|
||||
outputJSON(details)
|
||||
return
|
||||
}
|
||||
|
||||
@@ -9,6 +9,8 @@ import (
|
||||
"github.com/steveyegge/beads/internal/types"
|
||||
)
|
||||
|
||||
const limitClause = " LIMIT ?"
|
||||
|
||||
// AddComment adds a comment to an issue
|
||||
func (s *SQLiteStorage) AddComment(ctx context.Context, issueID, actor, comment string) error {
|
||||
tx, err := s.db.BeginTx(ctx, nil)
|
||||
@@ -52,7 +54,7 @@ func (s *SQLiteStorage) GetEvents(ctx context.Context, issueID string, limit int
|
||||
args := []interface{}{issueID}
|
||||
limitSQL := ""
|
||||
if limit > 0 {
|
||||
limitSQL = " LIMIT ?"
|
||||
limitSQL = limitClause
|
||||
args = append(args, limit)
|
||||
}
|
||||
|
||||
|
||||
@@ -418,42 +418,6 @@ func (s *SQLiteStorage) getNextIDForPrefix(ctx context.Context, prefix string) (
|
||||
return nextID, nil
|
||||
}
|
||||
|
||||
// ensureCounterInitialized checks if a counter exists for the given prefix,
|
||||
// and initializes it from existing issues if needed. This is lazy initialization
|
||||
// to avoid scanning the entire issues table on every CreateIssue call.
|
||||
func (s *SQLiteStorage) ensureCounterInitialized(ctx context.Context, prefix string) error {
|
||||
// Check if counter already exists for this prefix
|
||||
var exists int
|
||||
err := s.db.QueryRowContext(ctx,
|
||||
`SELECT 1 FROM issue_counters WHERE prefix = ?`, prefix).Scan(&exists)
|
||||
|
||||
if err == nil {
|
||||
// Counter exists, we're good
|
||||
return nil
|
||||
}
|
||||
|
||||
if err != sql.ErrNoRows {
|
||||
// Unexpected error
|
||||
return fmt.Errorf("failed to check counter existence: %w", err)
|
||||
}
|
||||
|
||||
// Counter doesn't exist, initialize it from existing issues with this prefix
|
||||
_, err = s.db.ExecContext(ctx, `
|
||||
INSERT INTO issue_counters (prefix, last_id)
|
||||
SELECT ?, COALESCE(MAX(CAST(substr(id, LENGTH(?) + 2) AS INTEGER)), 0)
|
||||
FROM issues
|
||||
WHERE id LIKE ? || '-%'
|
||||
AND substr(id, LENGTH(?) + 2) GLOB '[0-9]*'
|
||||
ON CONFLICT(prefix) DO UPDATE SET
|
||||
last_id = MAX(last_id, excluded.last_id)
|
||||
`, prefix, prefix, prefix, prefix)
|
||||
if err != nil {
|
||||
return fmt.Errorf("failed to initialize counter for prefix %s: %w", prefix, err)
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
// SyncAllCounters synchronizes all ID counters based on existing issues in the database
|
||||
// This scans all issues and updates counters to prevent ID collisions with auto-generated IDs
|
||||
func (s *SQLiteStorage) SyncAllCounters(ctx context.Context) error {
|
||||
@@ -508,11 +472,11 @@ func (s *SQLiteStorage) CreateIssue(ctx context.Context, issue *types.Issue, act
|
||||
}
|
||||
|
||||
// Track commit state for defer cleanup
|
||||
// Use context.Background() for ROLLBACK to ensure cleanup happens even if ctx is cancelled
|
||||
// Use context.Background() for ROLLBACK to ensure cleanup happens even if ctx is canceled
|
||||
committed := false
|
||||
defer func() {
|
||||
if !committed {
|
||||
conn.ExecContext(context.Background(), "ROLLBACK")
|
||||
_, _ = conn.ExecContext(context.Background(), "ROLLBACK")
|
||||
}
|
||||
}()
|
||||
|
||||
@@ -955,7 +919,10 @@ func (s *SQLiteStorage) UpdateIssue(ctx context.Context, id string, updates map[
|
||||
|
||||
// Auto-manage closed_at when status changes (enforce invariant)
|
||||
if statusVal, ok := updates["status"]; ok {
|
||||
newStatus := statusVal.(string)
|
||||
newStatus, ok := statusVal.(string)
|
||||
if !ok {
|
||||
return fmt.Errorf("status must be a string")
|
||||
}
|
||||
if newStatus == string(types.StatusClosed) {
|
||||
// Changing to closed: ensure closed_at is set
|
||||
if _, hasClosedAt := updates["closed_at"]; !hasClosedAt {
|
||||
|
||||
@@ -989,7 +989,7 @@ func TestParallelIssueCreation(t *testing.T) {
|
||||
ids := make(chan string, numIssues)
|
||||
|
||||
for i := 0; i < numIssues; i++ {
|
||||
go func(num int) {
|
||||
go func() {
|
||||
issue := &types.Issue{
|
||||
Title: "Parallel test issue",
|
||||
Status: types.StatusOpen,
|
||||
@@ -1003,7 +1003,7 @@ func TestParallelIssueCreation(t *testing.T) {
|
||||
}
|
||||
ids <- issue.ID
|
||||
errors <- nil
|
||||
}(i)
|
||||
}()
|
||||
}
|
||||
|
||||
// Collect results
|
||||
|
||||
Reference in New Issue
Block a user