Merge main into PR #160 - combine reverse mode with substring bugfix

Amp-Thread-ID: https://ampcode.com/threads/T-b2413b0e-2720-45b1-9b3d-acaa7d4cf9b4
Co-authored-by: Amp <amp@ampcode.com>
This commit is contained in:
Steve Yegge
2025-10-27 13:05:41 -07:00
16 changed files with 2842 additions and 488 deletions

View File

@@ -78,6 +78,15 @@
{"id":"bd-169","title":"Add test for CreateIssue with missing issue_prefix","description":"Add explicit test case that verifies CreateIssue fails correctly when issue_prefix config is missing.\n\n**Test:**\n```go\nfunc TestCreateIssue_MissingPrefix(t *testing.T) {\n store, cleanup := setupTestDB(t)\n defer cleanup()\n \n ctx := context.Background()\n \n // Clear the issue_prefix config\n err := store.SetConfig(ctx, \"issue_prefix\", \"\")\n require.NoError(t, err)\n \n // Attempt to create issue should fail\n issue := \u0026types.Issue{\n Title: \"Test issue\",\n Status: types.StatusOpen,\n Priority: 1,\n IssueType: types.TypeTask,\n }\n \n err = store.CreateIssue(ctx, issue, \"test\")\n require.Error(t, err)\n assert.Contains(t, err.Error(), \"database not initialized\")\n assert.Contains(t, err.Error(), \"issue_prefix config is missing\")\n}\n```\n\nThis ensures the fix for bd-166 doesn't regress.","status":"open","priority":3,"issue_type":"task","created_at":"2025-10-26T21:54:36.63521-07:00","updated_at":"2025-10-26T21:54:36.63521-07:00","dependencies":[{"issue_id":"bd-169","depends_on_id":"bd-166","type":"discovered-from","created_at":"2025-10-26T21:54:41.995525-07:00","created_by":"daemon"}]}
{"id":"bd-17","title":"Update EXTENDING.md with UnderlyingDB() usage and best practices","description":"EXTENDING.md currently shows how to use direct sql.Open() to access the database, but doesn't mention the new UnderlyingDB() method that's the recommended way for extensions.\n\n**Update needed:**\n1. Add section showing UnderlyingDB() usage:\n ```go\n store, err := beads.NewSQLiteStorage(dbPath)\n db := store.UnderlyingDB()\n // Create extension tables using db\n ```\n\n2. Document when to use UnderlyingDB() vs direct sql.Open():\n - Use UnderlyingDB() when you want to share the storage connection\n - Use sql.Open() when you need independent connection management\n\n3. Add safety warnings (cross-reference from UnderlyingDB() docs):\n - Don't close the DB\n - Don't modify pool settings\n - Keep transactions short\n\n4. Update the VC example to show UnderlyingDB() pattern\n\n5. Explain beads.Storage.UnderlyingDB() in the API section","status":"closed","priority":1,"issue_type":"task","created_at":"2025-10-22T17:07:56.820056-07:00","updated_at":"2025-10-25T23:15:33.478579-07:00","closed_at":"2025-10-22T19:41:19.895847-07:00","dependencies":[{"issue_id":"bd-17","depends_on_id":"bd-10","type":"discovered-from","created_at":"2025-10-24T13:17:40.32522-07:00","created_by":"renumber"}]}
{"id":"bd-170","title":"Clean up beads-* duplicate issues and review corrupt backup for missing issues","description":"## Current State\n- Database has 3 duplicate beads-* issues (beads-2, beads-3, beads-4) that should be deleted\n- Have corrupt backup: `.beads/beads.db.corrupt-backup` (4.4MB) from bd-166 corruption incident\n- Current clean DB has 172 issues (155 closed, 14 open, 3 in_progress)\n\n## Tasks\n1. **Delete beads-* duplicates** - these are corrupted duplicates from bd-166\n ```bash\n sqlite3 .beads/beads.db \"DELETE FROM issues WHERE id LIKE 'beads-%';\"\n ```\n\n2. **Review corrupt backup for missing issues**\n - Open corrupt backup: `sqlite3 .beads/beads.db.corrupt-backup`\n - Compare issue counts: backup had ~338 issues (165 bd- + 173 beads- duplicates)\n - Check if any ~8 issues exist in backup that are NOT in current DB\n - Cherry-pick any legitimate issues that were lost during cleanup\n\n3. **Verification**\n - Compare issue IDs between corrupt backup and current DB\n - Identify any missing issues worth recovering\n - Document findings\n\n## Why P0\nThis blocks clean database state and may contain lost work from the corruption incident.","status":"closed","priority":0,"issue_type":"task","created_at":"2025-10-26T22:30:00.126524-07:00","updated_at":"2025-10-26T22:30:35.01995-07:00","closed_at":"2025-10-26T22:30:35.01995-07:00"}
{"id":"bd-171","title":"Address gosec security warnings (102 issues)","description":"Security linter warnings: file permissions (0755 should be 0750), G304 file inclusion via variable, G204 subprocess launches. Many are false positives but should be reviewed.","design":"Review each gosec warning. Add exclusions for legitimate cases to .golangci.yml. Fix real security issues (overly permissive file modes).","status":"open","priority":2,"issue_type":"task","created_at":"2025-10-26T23:54:06.320626-07:00","updated_at":"2025-10-26T23:54:06.320626-07:00"}
{"id":"bd-172","title":"Add optional post-merge git hook example for bd sync","description":"Create example git hook that auto-runs bd sync after git pull/merge.\n\nAdd to examples/git-hooks/:\n- post-merge hook that checks if .beads/issues.jsonl changed\n- If changed: run `bd sync` automatically\n- Make it optional/documented (not auto-installed)\n\nBenefits:\n- Zero-friction sync after git pull\n- Complements auto-detection as belt-and-suspenders\n\nNote: post-merge hook already exists for pre-commit/post-merge. Extend it to support sync.","status":"open","priority":2,"issue_type":"task","created_at":"2025-10-26T23:54:06.321545-07:00","updated_at":"2025-10-26T23:54:06.321545-07:00"}
{"id":"bd-173","title":"Address gosec security warnings (102 issues)","description":"Security linter warnings: file permissions (0755 should be 0750), G304 file inclusion via variable, G204 subprocess launches. Many are false positives but should be reviewed.","design":"Review each gosec warning. Add exclusions for legitimate cases to .golangci.yml. Fix real security issues (overly permissive file modes).","status":"open","priority":2,"issue_type":"task","created_at":"2025-10-26T23:55:58.5422-07:00","updated_at":"2025-10-26T23:55:58.5422-07:00"}
{"id":"bd-174","title":"Add optional post-merge git hook example for bd sync","description":"Create example git hook that auto-runs bd sync after git pull/merge.\n\nAdd to examples/git-hooks/:\n- post-merge hook that checks if .beads/issues.jsonl changed\n- If changed: run `bd sync` automatically\n- Make it optional/documented (not auto-installed)\n\nBenefits:\n- Zero-friction sync after git pull\n- Complements auto-detection as belt-and-suspenders\n\nNote: post-merge hook already exists for pre-commit/post-merge. Extend it to support sync.","status":"open","priority":2,"issue_type":"task","created_at":"2025-10-26T23:55:58.542616-07:00","updated_at":"2025-10-26T23:55:58.542616-07:00"}
{"id":"bd-175","title":"Add test coverage for internal/storage/memory backend","description":"","design":"Create internal/storage/memory/memory_test.go with test coverage similar to internal/storage/sqlite tests.\n\nTest areas:\n1. Basic CRUD: CreateIssue, GetIssue, UpdateIssue, DeleteIssue\n2. Bulk operations: CreateIssues, ListIssues with filters\n3. Dependencies: AddDependency, GetDependencies, RemoveDependency\n4. Labels: AddLabel, RemoveLabel, ListLabels\n5. Comments: AddComment, GetComments\n6. ID generation: Prefix handling, counter management\n7. LoadFromIssues: Proper initialization from JSONL data\n8. Thread safety: Concurrent operations with go test -race","status":"closed","priority":1,"issue_type":"task","created_at":"2025-10-27T10:45:33.145874-07:00","updated_at":"2025-10-27T11:26:02.515421-07:00","closed_at":"2025-10-27T11:26:02.515421-07:00"}
{"id":"bd-176","title":"Document distinction between corruption prevention and collision resolution","description":"Clarify that the hash/fingerprint/collision architecture solves logical consistency (wrong prefixes, ID collisions) but NOT physical SQLite corruption. --no-db mode is still needed for multi-process/container scenarios.","status":"closed","priority":2,"issue_type":"task","created_at":"2025-10-27T10:45:46.872233-07:00","updated_at":"2025-10-27T11:27:15.6189-07:00","closed_at":"2025-10-27T11:27:15.6189-07:00"}
{"id":"bd-177","title":"Add prefix validation in SQLite mode to fail fast on mismatches","description":"The new hash/collision architecture prevents logical consistency issues, but doesn't prevent wrong-prefix bugs. Add validation to reject writes with mismatched prefixes.","status":"closed","priority":2,"issue_type":"feature","created_at":"2025-10-27T10:45:46.87772-07:00","updated_at":"2025-10-27T11:28:52.800581-07:00","closed_at":"2025-10-27T11:28:52.800581-07:00"}
{"id":"bd-178","title":"Address gosec security warnings (102 issues)","description":"Security linter warnings: file permissions (0755 should be 0750), G304 file inclusion via variable, G204 subprocess launches. Many are false positives but should be reviewed.","design":"Review each gosec warning. Add exclusions for legitimate cases to .golangci.yml. Fix real security issues (overly permissive file modes).","status":"open","priority":2,"issue_type":"task","created_at":"2025-10-27T12:51:52.033528-07:00","updated_at":"2025-10-27T12:51:52.033528-07:00"}
{"id":"bd-179","title":"Add optional post-merge git hook example for bd sync","description":"Create example git hook that auto-runs bd sync after git pull/merge.\n\nAdd to examples/git-hooks/:\n- post-merge hook that checks if .beads/issues.jsonl changed\n- If changed: run `bd sync` automatically\n- Make it optional/documented (not auto-installed)\n\nBenefits:\n- Zero-friction sync after git pull\n- Complements auto-detection as belt-and-suspenders\n\nNote: post-merge hook already exists for pre-commit/post-merge. Extend it to support sync.","status":"open","priority":2,"issue_type":"task","created_at":"2025-10-27T12:51:52.034442-07:00","updated_at":"2025-10-27T12:51:52.034442-07:00"}
{"id":"bd-18","title":"Consider adding UnderlyingConn(ctx) for safer scoped DB access","description":"Currently UnderlyingDB() returns *sql.DB which is correct for most uses, but for extension migrations/DDL, a scoped connection might be safer.\n\n**Proposal:** Add optional UnderlyingConn(ctx) (*sql.Conn, error) method that:\n- Returns a scoped connection via s.db.Conn(ctx)\n- Encourages lifetime-bounded usage\n- Reduces temptation to tune global pool settings\n- Better for one-time DDL operations like CREATE TABLE\n\n**Implementation:**\n```go\n// UnderlyingConn returns a single connection from the pool for scoped use\n// Useful for migrations and DDL. Close the connection when done.\nfunc (s *SQLiteStorage) UnderlyingConn(ctx context.Context) (*sql.Conn, error) {\n return s.db.Conn(ctx)\n}\n```\n\n**Benefits:**\n- Safer for migrations (explicit scope)\n- Complements UnderlyingDB() for different use cases\n- Low implementation cost\n\n**Trade-off:** Adds another method to maintain, but Oracle considers this balanced compromise between safety and flexibility.\n\n**Decision:** This is optional - evaluate based on VC's actual usage patterns.","status":"closed","priority":3,"issue_type":"feature","created_at":"2025-10-22T17:07:56.832638-07:00","updated_at":"2025-10-25T23:15:33.479496-07:00","closed_at":"2025-10-22T22:02:18.479512-07:00","dependencies":[{"issue_id":"bd-18","depends_on_id":"bd-10","type":"related","created_at":"2025-10-24T13:17:40.325463-07:00","created_by":"renumber"}]}
{"id":"bd-19","title":"MCP close tool method signature error - takes 1 positional argument but 2 were given","description":"The close approval routing fix in beads-mcp v0.11.0 works correctly and successfully routes update(status=\"closed\") calls to close() tool. However, the close() tool has a Python method signature bug that prevents execution.\n\nImpact: All MCP-based close operations are broken. Workaround: Use bd CLI directly.\n\nError: BdDaemonClient.close() takes 1 positional argument but 2 were given\n\nRoot cause: BdDaemonClient.close() only accepts self, but MCP tool passes issue_id and reason.\n\nAdditional issue: CLI close has FOREIGN KEY constraint error when recording reason parameter.\n\nSee GitHub issue #107 for full details.","status":"closed","priority":0,"issue_type":"bug","created_at":"2025-10-22T17:25:34.67056-07:00","updated_at":"2025-10-25T23:15:33.480292-07:00","closed_at":"2025-10-22T17:36:55.463445-07:00"}
{"id":"bd-2","title":"Improve error handling in dependency removal during remapping","description":"In updateDependencyReferences(), RemoveDependency errors are caught and ignored with continue (line 392). Comment says 'if dependency doesn't exist' but this catches ALL errors including real failures. Should check error type with errors.Is(err, ErrDependencyNotFound) and only ignore not-found errors, returning other errors properly.","status":"closed","priority":3,"issue_type":"bug","created_at":"2025-10-21T23:53:44.31362-07:00","updated_at":"2025-10-25T23:15:33.462194-07:00","closed_at":"2025-10-18T09:41:18.209717-07:00"}

47
.beads/config.yaml Normal file
View File

@@ -0,0 +1,47 @@
# Beads Configuration File
# This file configures default behavior for all bd commands in this repository
# All settings can also be set via environment variables (BD_* prefix)
# or overridden with command-line flags
# Issue prefix for this repository (used by bd init)
# If not set, bd init will auto-detect from directory name
# Example: issue-prefix: "myproject" creates issues like "myproject-1", "myproject-2", etc.
# issue-prefix: ""
# Use no-db mode: load from JSONL, no SQLite, write back after each command
# When true, bd will use .beads/issues.jsonl as the source of truth
# instead of SQLite database
no-db: false
# Disable daemon for RPC communication (forces direct database access)
# no-daemon: false
# Disable auto-flush of database to JSONL after mutations
# no-auto-flush: false
# Disable auto-import from JSONL when it's newer than database
# no-auto-import: false
# Enable JSON output by default
# json: false
# Default actor for audit trails (overridden by BD_ACTOR or --actor)
# actor: ""
# Path to database (overridden by BEADS_DB or --db)
# db: ""
# Auto-start daemon if not running (can also use BEADS_AUTO_START_DAEMON)
# auto-start-daemon: true
# Debounce interval for auto-flush (can also use BEADS_FLUSH_DEBOUNCE)
# flush-debounce: "5s"
# Integration settings (access with 'bd config get/set')
# These are stored in the database, not in this file:
# - jira.url
# - jira.project
# - linear.url
# - linear.api-key
# - github.org
# - github.repo

3
.gitignore vendored
View File

@@ -53,3 +53,6 @@ result
# GoReleaser build artifacts
dist/
# Git worktrees
.worktrees/

31
FAQ.md
View File

@@ -361,6 +361,37 @@ bd create "Fix bug" -p 1
See [ADVANCED.md#git-worktrees](ADVANCED.md#git-worktrees) for details.
### What's the difference between SQLite corruption and ID collisions?
bd handles two distinct types of integrity issues:
**1. Logical Consistency (Collision Resolution)**
The hash/fingerprint/collision architecture prevents:
- **ID collisions**: Same ID assigned to different issues (e.g., from parallel workers or branch merges)
- **Wrong prefix bugs**: Issues created with incorrect prefix due to config mismatch
- **Merge conflicts**: Branch divergence creating conflicting JSONL content
**Solution**: `bd import --resolve-collisions` automatically remaps colliding IDs and updates all references.
**2. Physical SQLite Corruption**
SQLite database file corruption can occur from:
- **Disk/hardware failures**: Power loss, disk errors, filesystem corruption
- **Concurrent writes**: Multiple processes writing to the same database file simultaneously
- **Container scenarios**: Shared database volumes with multiple containers
**Solution**: Reimport from JSONL (which survives in git history):
```bash
mv .beads/*.db .beads/*.db.backup
bd init
bd import -i .beads/issues.jsonl
```
**Key Difference**: Collision resolution fixes logical issues in the data. Physical corruption requires restoring from the JSONL source of truth.
**When to use in-memory mode (`--no-db`)**: For multi-process/container scenarios where SQLite's file locking isn't sufficient. The in-memory backend loads from JSONL at startup and writes back after each command, avoiding shared database state entirely.
## Getting Help
### Where can I get more help?

View File

@@ -92,18 +92,29 @@ bd import -i .beads/issues.jsonl
### Database corruption
If you suspect database corruption:
**Important**: Distinguish between **logical consistency issues** (ID collisions, wrong prefixes) and **physical SQLite corruption**.
For **physical database corruption** (disk failures, power loss, filesystem errors):
```bash
# Check database integrity
sqlite3 .beads/*.db "PRAGMA integrity_check;"
# If corrupted, reimport from JSONL
# If corrupted, reimport from JSONL (source of truth in git)
mv .beads/*.db .beads/*.db.backup
bd init
bd import -i .beads/issues.jsonl
```
For **logical consistency issues** (ID collisions from branch merges, parallel workers):
```bash
# This is NOT corruption - use collision resolution instead
bd import -i .beads/issues.jsonl --resolve-collisions
```
See [FAQ](FAQ.md#whats-the-difference-between-sqlite-corruption-and-id-collisions) for the distinction.
### Multiple databases detected warning
If you see a warning about multiple `.beads` databases in the directory hierarchy:

View File

@@ -10,6 +10,7 @@ import (
"github.com/fatih/color"
"github.com/spf13/cobra"
"github.com/steveyegge/beads"
"github.com/steveyegge/beads/internal/config"
"github.com/steveyegge/beads/internal/configfile"
"github.com/steveyegge/beads/internal/storage/sqlite"
)
@@ -18,11 +19,19 @@ var initCmd = &cobra.Command{
Use: "init",
Short: "Initialize bd in the current directory",
Long: `Initialize bd in the current directory by creating a .beads/ directory
and database file. Optionally specify a custom issue prefix.`,
and database file. Optionally specify a custom issue prefix.
With --no-db: creates .beads/ directory and issues.jsonl file instead of SQLite database.`,
Run: func(cmd *cobra.Command, _ []string) {
prefix, _ := cmd.Flags().GetString("prefix")
quiet, _ := cmd.Flags().GetBool("quiet")
// Initialize config (PersistentPreRun doesn't run for init command)
if err := config.Initialize(); err != nil {
fmt.Fprintf(os.Stderr, "Warning: failed to initialize config: %v\n", err)
// Non-fatal - continue with defaults
}
// Check BEADS_DB environment variable if --db flag not set
// (PersistentPreRun doesn't run for init command)
if dbPath == "" {
@@ -31,6 +40,12 @@ and database file. Optionally specify a custom issue prefix.`,
}
}
// Determine prefix with precedence: flag > config > auto-detect
if prefix == "" {
// Try to get from config file
prefix = config.GetString("issue-prefix")
}
if prefix == "" {
// Auto-detect from directory name
cwd, err := os.Getwd()
@@ -88,6 +103,31 @@ and database file. Optionally specify a custom issue prefix.`,
os.Exit(1)
}
// Handle --no-db mode: create issues.jsonl file instead of database
if noDb {
// Create empty issues.jsonl file
jsonlPath := filepath.Join(localBeadsDir, "issues.jsonl")
if _, err := os.Stat(jsonlPath); os.IsNotExist(err) {
if err := os.WriteFile(jsonlPath, []byte{}, 0644); err != nil {
fmt.Fprintf(os.Stderr, "Error: failed to create issues.jsonl: %v\n", err)
os.Exit(1)
}
}
if !quiet {
green := color.New(color.FgGreen).SprintFunc()
cyan := color.New(color.FgCyan).SprintFunc()
fmt.Printf("\n%s bd initialized successfully in --no-db mode!\n\n", green("✓"))
fmt.Printf(" Mode: %s\n", cyan("no-db (JSONL-only)"))
fmt.Printf(" Issues file: %s\n", cyan(jsonlPath))
fmt.Printf(" Issue prefix: %s\n", cyan(prefix))
fmt.Printf(" Issues will be named: %s\n\n", cyan(prefix+"-1, "+prefix+"-2, ..."))
fmt.Printf("Run %s to get started.\n\n", cyan("bd --no-db quickstart"))
}
return
}
// Create .gitignore in .beads directory
gitignorePath := filepath.Join(localBeadsDir, ".gitignore")
gitignoreContent := `# SQLite databases

View File

@@ -2,6 +2,7 @@ package main
import (
"bufio"
"bytes"
"context"
"crypto/sha256"
"encoding/hex"
@@ -19,10 +20,10 @@ import (
"github.com/fatih/color"
"github.com/spf13/cobra"
"github.com/steveyegge/beads"
"github.com/steveyegge/beads/internal/autoimport"
"github.com/steveyegge/beads/internal/config"
"github.com/steveyegge/beads/internal/rpc"
"github.com/steveyegge/beads/internal/storage"
"github.com/steveyegge/beads/internal/storage/memory"
"github.com/steveyegge/beads/internal/storage/sqlite"
"github.com/steveyegge/beads/internal/types"
"golang.org/x/mod/semver"
@@ -104,6 +105,9 @@ var rootCmd = &cobra.Command{
if !cmd.Flags().Changed("no-auto-import") {
noAutoImport = config.GetBool("no-auto-import")
}
if !cmd.Flags().Changed("no-db") {
noDb = config.GetBool("no-db")
}
if !cmd.Flags().Changed("db") && dbPath == "" {
dbPath = config.GetString("db")
}
@@ -123,15 +127,34 @@ var rootCmd = &cobra.Command{
noAutoImport = true
}
// Sync RPC client version with CLI version
rpc.ClientVersion = Version
// Set auto-flush based on flag (invert no-auto-flush)
autoFlushEnabled = !noAutoFlush
// Set auto-import based on flag (invert no-auto-import)
autoImportEnabled = !noAutoImport
// Handle --no-db mode: load from JSONL, use in-memory storage
if noDb {
if err := initializeNoDbMode(); err != nil {
fmt.Fprintf(os.Stderr, "Error initializing --no-db mode: %v\n", err)
os.Exit(1)
}
// Set actor for audit trail
if actor == "" {
if bdActor := os.Getenv("BD_ACTOR"); bdActor != "" {
actor = bdActor
} else if user := os.Getenv("USER"); user != "" {
actor = user
} else {
actor = "unknown"
}
}
// Skip daemon and SQLite initialization - we're in memory mode
return
}
// Initialize database path
if dbPath == "" {
cwd, err := os.Getwd()
@@ -147,36 +170,22 @@ var rootCmd = &cobra.Command{
// Special case for import: if we found a database but there's a local .beads/
// directory without a database, prefer creating a local database
if cmd.Name() == cmdImport && localBeadsDir != "" {
if _, err := os.Stat(localBeadsDir); err == nil {
// Check if found database is NOT in the local .beads/ directory
if !strings.HasPrefix(dbPath, localBeadsDir+string(filepath.Separator)) {
// Look for existing .db file in local .beads/ directory
matches, _ := filepath.Glob(filepath.Join(localBeadsDir, "*.db"))
if len(matches) > 0 {
dbPath = matches[0]
} else {
// No database exists yet - will be created by import
// Use generic name that will be renamed after prefix detection
dbPath = filepath.Join(localBeadsDir, "bd.db")
if _, err := os.Stat(localBeadsDir); err == nil {
// Check if found database is NOT in the local .beads/ directory
if !strings.HasPrefix(dbPath, localBeadsDir+string(filepath.Separator)) {
// Use local .beads/vc.db instead for import
dbPath = filepath.Join(localBeadsDir, "vc.db")
}
}
}
}
} else {
// For import command, allow creating database if .beads/ directory exists
if cmd.Name() == cmdImport && localBeadsDir != "" {
if _, err := os.Stat(localBeadsDir); err == nil {
// Look for existing .db file in local .beads/ directory
matches, _ := filepath.Glob(filepath.Join(localBeadsDir, "*.db"))
if len(matches) > 0 {
dbPath = matches[0]
} else {
// For import command, allow creating database if .beads/ directory exists
if cmd.Name() == cmdImport && localBeadsDir != "" {
if _, err := os.Stat(localBeadsDir); err == nil {
// .beads/ directory exists - set dbPath for import to create
// Use generic name that will be renamed after prefix detection
dbPath = filepath.Join(localBeadsDir, "bd.db")
dbPath = filepath.Join(localBeadsDir, "vc.db")
}
}
}
// If dbPath still not set, error out
if dbPath == "" {
@@ -269,30 +278,18 @@ var rootCmd = &cobra.Command{
daemonStatus.Detail = fmt.Sprintf("version mismatch (daemon: %s, client: %s) and restart failed",
health.Version, Version)
} else {
// Daemon is healthy and compatible - validate database path
beadsDir := filepath.Dir(dbPath)
if err := validateDaemonLock(beadsDir, dbPath); err != nil {
_ = client.Close()
daemonStatus.FallbackReason = FallbackHealthFailed
daemonStatus.Detail = fmt.Sprintf("daemon lock validation failed: %v", err)
if os.Getenv("BD_DEBUG") != "" {
fmt.Fprintf(os.Stderr, "Debug: daemon lock validation failed: %v\n", err)
}
// Fall through to direct mode
} else {
// Daemon is healthy, compatible, and validated - use it
daemonClient = client
daemonStatus.Mode = cmdDaemon
daemonStatus.Connected = true
daemonStatus.Degraded = false
daemonStatus.Health = health.Status
if os.Getenv("BD_DEBUG") != "" {
fmt.Fprintf(os.Stderr, "Debug: connected to daemon at %s (health: %s)\n", socketPath, health.Status)
}
// Warn if using daemon with git worktrees
warnWorktreeDaemon(dbPath)
return // Skip direct storage initialization
// Daemon is healthy and compatible - use it
daemonClient = client
daemonStatus.Mode = cmdDaemon
daemonStatus.Connected = true
daemonStatus.Degraded = false
daemonStatus.Health = health.Status
if os.Getenv("BD_DEBUG") != "" {
fmt.Fprintf(os.Stderr, "Debug: connected to daemon at %s (health: %s)\n", socketPath, health.Status)
}
// Warn if using daemon with git worktrees
warnWorktreeDaemon(dbPath)
return // Skip direct storage initialization
}
} else {
// Health check failed or daemon unhealthy
@@ -436,6 +433,26 @@ var rootCmd = &cobra.Command{
}
},
PersistentPostRun: func(cmd *cobra.Command, args []string) {
// Handle --no-db mode: write memory storage back to JSONL
if noDb {
if store != nil {
cwd, err := os.Getwd()
if err != nil {
fmt.Fprintf(os.Stderr, "Error: failed to get current directory: %v\n", err)
os.Exit(1)
}
beadsDir := filepath.Join(cwd, ".beads")
if memStore, ok := store.(*memory.MemoryStorage); ok {
if err := writeIssuesToJSONL(memStore, beadsDir); err != nil {
fmt.Fprintf(os.Stderr, "Error: failed to write JSONL: %v\n", err)
os.Exit(1)
}
}
}
return
}
// Close daemon client if we're using it
if daemonClient != nil {
_ = daemonClient.Close()
@@ -474,12 +491,12 @@ var rootCmd = &cobra.Command{
// getDebounceDuration returns the auto-flush debounce duration
// Configurable via config file or BEADS_FLUSH_DEBOUNCE env var (e.g., "500ms", "10s")
// Defaults to 30 seconds if not set or invalid (provides batching window)
// Defaults to 5 seconds if not set or invalid
func getDebounceDuration() time.Duration {
duration := config.GetDuration("flush-debounce")
if duration == 0 {
// If parsing failed, use default
return 30 * time.Second
return 5 * time.Second
}
return duration
}
@@ -601,7 +618,7 @@ func restartDaemonForVersionMismatch() bool {
}
args := []string{"daemon"}
cmd := exec.Command(exe, args...) // #nosec G204 - bd daemon command from trusted binary
cmd := exec.Command(exe, args...)
cmd.Env = append(os.Environ(), "BD_DAEMON_FOREGROUND=1")
// Set working directory to database directory so daemon finds correct DB
@@ -696,7 +713,6 @@ func isDaemonHealthy(socketPath string) bool {
}
func acquireStartLock(lockPath, socketPath string) bool {
// #nosec G304 - controlled path from config
lockFile, err := os.OpenFile(lockPath, os.O_CREATE|os.O_EXCL|os.O_WRONLY, 0600)
if err != nil {
debugLog("another process is starting daemon, waiting for readiness")
@@ -777,7 +793,7 @@ func startDaemonProcess(socketPath string, isGlobal bool) bool {
args = append(args, "--global")
}
cmd := exec.Command(binPath, args...) // #nosec G204 - bd daemon command from trusted binary
cmd := exec.Command(binPath, args...)
setupDaemonIO(cmd)
if !isGlobal && dbPath != "" {
@@ -825,7 +841,6 @@ func getPIDFileForSocket(socketPath string) string {
// readPIDFromFile reads a PID from a file
func readPIDFromFile(path string) (int, error) {
// #nosec G304 - controlled path from config
data, err := os.ReadFile(path)
if err != nil {
return 0, err
@@ -881,7 +896,7 @@ func canRetryDaemonStart() bool {
}
// Exponential backoff: 5s, 10s, 20s, 40s, 80s, 120s (capped at 120s)
backoff := time.Duration(5*(1<<uint(daemonStartFailures-1))) * time.Second // #nosec G115 - controlled value, no overflow risk
backoff := time.Duration(5*(1<<uint(daemonStartFailures-1))) * time.Second
if backoff > 120*time.Second {
backoff = 120 * time.Second
}
@@ -945,7 +960,7 @@ func findJSONLPath() string {
// Ensure the directory exists (important for new databases)
// This is the only difference from the public API - we create the directory
dbDir := filepath.Dir(dbPath)
if err := os.MkdirAll(dbDir, 0750); err != nil {
if err := os.MkdirAll(dbDir, 0755); err != nil {
// If we can't create the directory, return discovered path anyway
// (the subsequent write will fail with a clearer error)
return jsonlPath
@@ -958,38 +973,183 @@ func findJSONLPath() string {
// Fixes bd-84: Hash-based comparison is git-proof (mtime comparison fails after git pull)
// Fixes bd-228: Now uses collision detection to prevent silently overwriting local changes
func autoImportIfNewer() {
ctx := context.Background()
notify := autoimport.NewStderrNotifier(os.Getenv("BD_DEBUG") != "")
importFunc := func(ctx context.Context, issues []*types.Issue) (created, updated int, idMapping map[string]string, err error) {
opts := ImportOptions{
ResolveCollisions: true,
DryRun: false,
SkipUpdate: false,
Strict: false,
SkipPrefixValidation: true,
// Find JSONL path
jsonlPath := findJSONLPath()
// Read JSONL file
jsonlData, err := os.ReadFile(jsonlPath)
if err != nil {
// JSONL doesn't exist or can't be accessed, skip import
if os.Getenv("BD_DEBUG") != "" {
fmt.Fprintf(os.Stderr, "Debug: auto-import skipped, JSONL not found: %v\n", err)
}
result, err := importIssuesCore(ctx, dbPath, store, issues, opts)
if err != nil {
return 0, 0, nil, err
}
return result.Created, result.Updated, result.IDMapping, nil
return
}
onChanged := func(needsFullExport bool) {
if needsFullExport {
// Compute current JSONL hash
hasher := sha256.New()
hasher.Write(jsonlData)
currentHash := hex.EncodeToString(hasher.Sum(nil))
// Get last import hash from DB metadata
ctx := context.Background()
lastHash, err := store.GetMetadata(ctx, "last_import_hash")
if err != nil {
// Metadata error - treat as first import rather than skipping (bd-663)
// This allows auto-import to recover from corrupt/missing metadata
if os.Getenv("BD_DEBUG") != "" {
fmt.Fprintf(os.Stderr, "Debug: metadata read failed (%v), treating as first import\n", err)
}
lastHash = ""
}
// Compare hashes
if currentHash == lastHash {
// Content unchanged, skip import
if os.Getenv("BD_DEBUG") != "" {
fmt.Fprintf(os.Stderr, "Debug: auto-import skipped, JSONL unchanged (hash match)\n")
}
return
}
if os.Getenv("BD_DEBUG") != "" {
fmt.Fprintf(os.Stderr, "Debug: auto-import triggered (hash changed)\n")
}
// Check for Git merge conflict markers (bd-270)
// Only match if they appear as standalone lines (not embedded in JSON strings)
lines := bytes.Split(jsonlData, []byte("\n"))
for _, line := range lines {
trimmed := bytes.TrimSpace(line)
if bytes.HasPrefix(trimmed, []byte("<<<<<<< ")) ||
bytes.Equal(trimmed, []byte("=======")) ||
bytes.HasPrefix(trimmed, []byte(">>>>>>> ")) {
fmt.Fprintf(os.Stderr, "\n❌ Git merge conflict detected in %s\n\n", jsonlPath)
fmt.Fprintf(os.Stderr, "The JSONL file contains unresolved merge conflict markers.\n")
fmt.Fprintf(os.Stderr, "This prevents auto-import from loading your issues.\n\n")
fmt.Fprintf(os.Stderr, "To resolve:\n")
fmt.Fprintf(os.Stderr, " 1. Resolve the merge conflict in your Git client, OR\n")
fmt.Fprintf(os.Stderr, " 2. Export from database to regenerate clean JSONL:\n")
fmt.Fprintf(os.Stderr, " bd export -o %s\n\n", jsonlPath)
fmt.Fprintf(os.Stderr, "After resolving, commit the fixed JSONL file.\n")
return
}
}
// Content changed - parse all issues
scanner := bufio.NewScanner(bytes.NewReader(jsonlData))
scanner.Buffer(make([]byte, 0, 1024), 2*1024*1024) // 2MB buffer for large JSON lines
var allIssues []*types.Issue
lineNo := 0
for scanner.Scan() {
lineNo++
line := scanner.Text()
if line == "" {
continue
}
var issue types.Issue
if err := json.Unmarshal([]byte(line), &issue); err != nil {
// Parse error, skip this import
snippet := line
if len(snippet) > 80 {
snippet = snippet[:80] + "..."
}
fmt.Fprintf(os.Stderr, "Auto-import skipped: parse error at line %d: %v\nSnippet: %s\n", lineNo, err, snippet)
return
}
// Fix closed_at invariant: closed issues must have closed_at timestamp
if issue.Status == types.StatusClosed && issue.ClosedAt == nil {
now := time.Now()
issue.ClosedAt = &now
}
allIssues = append(allIssues, &issue)
}
if err := scanner.Err(); err != nil {
fmt.Fprintf(os.Stderr, "Auto-import skipped: scanner error: %v\n", err)
return
}
// Use shared import logic (bd-157)
opts := ImportOptions{
ResolveCollisions: true, // Auto-import always resolves collisions
DryRun: false,
SkipUpdate: false,
Strict: false,
SkipPrefixValidation: true, // Auto-import is lenient about prefixes
}
result, err := importIssuesCore(ctx, dbPath, store, allIssues, opts)
if err != nil {
fmt.Fprintf(os.Stderr, "Auto-import failed: %v\n", err)
return
}
// Show collision remapping notification if any occurred
if len(result.IDMapping) > 0 {
// Build title lookup map to avoid O(n^2) search
titleByID := make(map[string]string)
for _, issue := range allIssues {
titleByID[issue.ID] = issue.Title
}
// Sort remappings by old ID for consistent output
type mapping struct {
oldID string
newID string
}
mappings := make([]mapping, 0, len(result.IDMapping))
for oldID, newID := range result.IDMapping {
mappings = append(mappings, mapping{oldID, newID})
}
sort.Slice(mappings, func(i, j int) bool {
return mappings[i].oldID < mappings[j].oldID
})
maxShow := 10
numRemapped := len(mappings)
if numRemapped < maxShow {
maxShow = numRemapped
}
fmt.Fprintf(os.Stderr, "\nAuto-import: remapped %d colliding issue(s) to new IDs:\n", numRemapped)
for i := 0; i < maxShow; i++ {
m := mappings[i]
title := titleByID[m.oldID]
fmt.Fprintf(os.Stderr, " %s → %s (%s)\n", m.oldID, m.newID, title)
}
if numRemapped > maxShow {
fmt.Fprintf(os.Stderr, " ... and %d more\n", numRemapped-maxShow)
}
fmt.Fprintf(os.Stderr, "\n")
}
// Schedule export to sync JSONL after successful import
changed := (result.Created + result.Updated + len(result.IDMapping)) > 0
if changed {
if len(result.IDMapping) > 0 {
// Remappings may affect many issues, do a full export
markDirtyAndScheduleFullExport()
} else {
// Regular import, incremental export is fine
markDirtyAndScheduleFlush()
}
}
// Store new hash after successful import
if err := store.SetMetadata(ctx, "last_import_hash", currentHash); err != nil {
fmt.Fprintf(os.Stderr, "Warning: failed to update last_import_hash after import: %v\n", err)
fmt.Fprintf(os.Stderr, "This may cause auto-import to retry the same import on next operation.\n")
}
if err := autoimport.AutoImportIfNewer(ctx, store, dbPath, notify, importFunc, onChanged); err != nil {
// Error already logged by notifier
return
// Store import timestamp (bd-159: for staleness detection)
importTime := time.Now().Format(time.RFC3339)
if err := store.SetMetadata(ctx, "last_import_time", importTime); err != nil {
fmt.Fprintf(os.Stderr, "Warning: failed to update last_import_time after import: %v\n", err)
}
}
@@ -1034,8 +1194,7 @@ func checkVersionMismatch() {
} else if cmp > 0 {
// Binary is newer than database
fmt.Fprintf(os.Stderr, "%s\n", yellow("⚠️ Your binary appears NEWER than the database."))
fmt.Fprintf(os.Stderr, "%s\n", yellow("⚠️ Run 'bd migrate' to check for and migrate old database files."))
fmt.Fprintf(os.Stderr, "%s\n\n", yellow("⚠️ The current database version will be updated automatically."))
fmt.Fprintf(os.Stderr, "%s\n\n", yellow("⚠️ The database will be upgraded automatically."))
// Update stored version to current
_ = store.SetMetadata(ctx, "bd_version", Version)
}
@@ -1125,6 +1284,71 @@ func clearAutoFlushState() {
lastFlushError = nil
}
// writeJSONLAtomic writes issues to a JSONL file atomically using temp file + rename.
// This is the common implementation used by both flushToJSONL (SQLite mode) and
// writeIssuesToJSONL (--no-db mode).
//
// Atomic write pattern:
// 1. Create temp file with PID suffix: issues.jsonl.tmp.12345
// 2. Write all issues as JSONL to temp file
// 3. Close temp file
// 4. Atomic rename: temp → target
// 5. Set file permissions to 0644
//
// Error handling: Returns error on any failure. Cleanup is guaranteed via defer.
// Thread-safe: No shared state access. Safe to call from multiple goroutines.
func writeJSONLAtomic(jsonlPath string, issues []*types.Issue) error {
// Sort issues by ID for consistent output
sort.Slice(issues, func(i, j int) bool {
return issues[i].ID < issues[j].ID
})
// Create temp file with PID suffix to avoid collisions (bd-306)
tempPath := fmt.Sprintf("%s.tmp.%d", jsonlPath, os.Getpid())
f, err := os.Create(tempPath)
if err != nil {
return fmt.Errorf("failed to create temp file: %w", err)
}
// Ensure cleanup on failure
defer func() {
if f != nil {
_ = f.Close()
_ = os.Remove(tempPath)
}
}()
// Write all issues as JSONL
encoder := json.NewEncoder(f)
for _, issue := range issues {
if err := encoder.Encode(issue); err != nil {
return fmt.Errorf("failed to encode issue %s: %w", issue.ID, err)
}
}
// Close temp file before renaming
if err := f.Close(); err != nil {
return fmt.Errorf("failed to close temp file: %w", err)
}
f = nil // Prevent defer cleanup
// Atomic rename
if err := os.Rename(tempPath, jsonlPath); err != nil {
_ = os.Remove(tempPath) // Clean up on rename failure
return fmt.Errorf("failed to rename file: %w", err)
}
// Set appropriate file permissions (0644: rw-r--r--)
if err := os.Chmod(jsonlPath, 0644); err != nil {
// Non-fatal - file is already written
if os.Getenv("BD_DEBUG") != "" {
fmt.Fprintf(os.Stderr, "Debug: failed to set file permissions: %v\n", err)
}
}
return nil
}
// flushToJSONL exports dirty issues to JSONL using incremental updates
// flushToJSONL exports dirty database changes to the JSONL file. Uses incremental
// export by default (only exports modified issues), or full export for ID-changing
@@ -1239,7 +1463,6 @@ func flushToJSONL() {
// Read existing JSONL into a map (skip for full export - we'll rebuild from scratch)
issueMap := make(map[string]*types.Issue)
if !fullExport {
// #nosec G304 - controlled path from config
if existingFile, err := os.Open(jsonlPath); err == nil {
scanner := bufio.NewScanner(existingFile)
lineNum := 0
@@ -1286,45 +1509,15 @@ func flushToJSONL() {
issueMap[issueID] = issue
}
// Convert map to sorted slice
// Convert map to slice (will be sorted by writeJSONLAtomic)
issues := make([]*types.Issue, 0, len(issueMap))
for _, issue := range issueMap {
issues = append(issues, issue)
}
sort.Slice(issues, func(i, j int) bool {
return issues[i].ID < issues[j].ID
})
// Write to temp file first, then rename (atomic)
// Use PID in filename to avoid collisions between concurrent bd commands (bd-306)
tempPath := fmt.Sprintf("%s.tmp.%d", jsonlPath, os.Getpid())
// #nosec G304 - controlled path from config
f, err := os.Create(tempPath)
if err != nil {
recordFailure(fmt.Errorf("failed to create temp file: %w", err))
return
}
encoder := json.NewEncoder(f)
for _, issue := range issues {
if err := encoder.Encode(issue); err != nil {
_ = f.Close()
_ = os.Remove(tempPath)
recordFailure(fmt.Errorf("failed to encode issue %s: %w", issue.ID, err))
return
}
}
if err := f.Close(); err != nil {
_ = os.Remove(tempPath)
recordFailure(fmt.Errorf("failed to close temp file: %w", err))
return
}
// Atomic rename
if err := os.Rename(tempPath, jsonlPath); err != nil {
_ = os.Remove(tempPath)
recordFailure(fmt.Errorf("failed to rename file: %w", err))
// Write atomically using common helper
if err := writeJSONLAtomic(jsonlPath, issues); err != nil {
recordFailure(err)
return
}
@@ -1335,7 +1528,6 @@ func flushToJSONL() {
}
// Store hash of exported JSONL (fixes bd-84: enables hash-based auto-import)
// #nosec G304 - controlled path from config
jsonlData, err := os.ReadFile(jsonlPath)
if err == nil {
hasher := sha256.New()
@@ -1354,6 +1546,7 @@ var (
noAutoFlush bool
noAutoImport bool
sandboxMode bool
noDb bool // Use --no-db mode: load from JSONL, write back after each command
)
func init() {
@@ -1369,6 +1562,7 @@ func init() {
rootCmd.PersistentFlags().BoolVar(&noAutoFlush, "no-auto-flush", false, "Disable automatic JSONL sync after CRUD operations")
rootCmd.PersistentFlags().BoolVar(&noAutoImport, "no-auto-import", false, "Disable automatic JSONL import when newer than DB")
rootCmd.PersistentFlags().BoolVar(&sandboxMode, "sandbox", false, "Sandbox mode: disables daemon and auto-sync (equivalent to --no-daemon --no-auto-flush --no-auto-import)")
rootCmd.PersistentFlags().BoolVar(&noDb, "no-db", false, "Use no-db mode: load from JSONL, no SQLite, write back after each command")
}
// createIssuesFromMarkdown parses a markdown file and creates multiple issues
@@ -1718,129 +1912,15 @@ func init() {
rootCmd.AddCommand(createCmd)
}
// resolveIssueID attempts to resolve an issue ID, with a fallback for bare numbers.
// If the ID doesn't exist and is a bare number (no hyphen), it tries adding the
// configured issue_prefix. Returns the issue and the resolved ID.
func resolveIssueID(ctx context.Context, id string) (*types.Issue, string, error) {
// First try with the provided ID
issue, err := store.GetIssue(ctx, id)
if err != nil {
return nil, id, err
}
// If found, return it
if issue != nil {
return issue, id, nil
}
// If not found and ID contains a hyphen, it's already a full ID - don't try fallback
if strings.Contains(id, "-") {
return nil, id, nil
}
// ID is a bare number - try with prefix
prefix, err := store.GetConfig(ctx, "issue_prefix")
if err != nil || prefix == "" {
// No prefix configured, can't do fallback
return nil, id, nil
}
// Try with prefix-id
prefixedID := prefix + "-" + id
issue, err = store.GetIssue(ctx, prefixedID)
if err != nil {
return nil, prefixedID, err
}
// Return the issue with the resolved ID (which may be nil if still not found)
return issue, prefixedID, nil
}
var showCmd = &cobra.Command{
Use: "show [id...]",
Short: "Show issue details",
Long: `Show detailed information for one or more issues.
Examples:
bd show bd-42 # Show single issue
bd show bd-1 bd-2 bd-3 # Show multiple issues
bd show --all-issues # Show all issues (may be expensive)
bd show --priority 0 --priority 1 # Show all P0 and P1 issues
bd show -p 0 -p 1 # Short form`,
Args: func(cmd *cobra.Command, args []string) error {
allIssues, _ := cmd.Flags().GetBool("all-issues")
priorities, _ := cmd.Flags().GetIntSlice("priority")
if !allIssues && len(priorities) == 0 && len(args) == 0 {
return fmt.Errorf("requires at least 1 issue ID, or use --all-issues, or --priority flag")
}
return nil
},
Args: cobra.MinimumNArgs(1),
Run: func(cmd *cobra.Command, args []string) {
allIssues, _ := cmd.Flags().GetBool("all-issues")
priorities, _ := cmd.Flags().GetIntSlice("priority")
// Build list of issue IDs to show
var issueIDs []string
// If --all-issues or --priority is used, fetch matching issues
if allIssues || len(priorities) > 0 {
ctx := context.Background()
if daemonClient != nil {
// Daemon mode - not yet supported
fmt.Fprintf(os.Stderr, "Error: --all-issues and --priority not yet supported in daemon mode\n")
fmt.Fprintf(os.Stderr, "Use --no-daemon flag or specify issue IDs directly\n")
os.Exit(1)
} else {
// Direct mode - fetch all issues
filter := types.IssueFilter{}
issues, err := store.SearchIssues(ctx, "", filter)
if err != nil {
fmt.Fprintf(os.Stderr, "Error searching issues: %v\n", err)
os.Exit(1)
}
// Filter by priority if specified
if len(priorities) > 0 {
priorityMap := make(map[int]bool)
for _, p := range priorities {
priorityMap[p] = true
}
filtered := make([]*types.Issue, 0)
for _, issue := range issues {
if priorityMap[issue.Priority] {
filtered = append(filtered, issue)
}
}
issues = filtered
}
// Extract IDs
for _, issue := range issues {
issueIDs = append(issueIDs, issue.ID)
}
// Warn if showing many issues
if len(issueIDs) > 20 && !jsonOutput {
yellow := color.New(color.FgYellow).SprintFunc()
fmt.Fprintf(os.Stderr, "%s Showing %d issues (this may take a while)\n\n", yellow("⚠"), len(issueIDs))
}
}
} else {
// Use provided IDs
issueIDs = args
}
// Sort issue IDs for consistent ordering when showing multiple issues
if len(issueIDs) > 1 {
sort.Strings(issueIDs)
}
// If daemon is running, use RPC
if daemonClient != nil {
allDetails := []interface{}{}
for idx, id := range issueIDs {
for idx, id := range args {
showArgs := &rpc.ShowArgs{ID: id}
resp, err := daemonClient.Show(showArgs)
if err != nil {
@@ -1977,16 +2057,16 @@ Examples:
// Direct mode
ctx := context.Background()
allDetails := []interface{}{}
for idx, id := range issueIDs {
issue, resolvedID, err := resolveIssueID(ctx, id)
if err != nil {
fmt.Fprintf(os.Stderr, "Error fetching %s: %v\n", id, err)
continue
}
if issue == nil {
fmt.Fprintf(os.Stderr, "Issue %s not found\n", resolvedID)
continue
}
for idx, id := range args {
issue, err := store.GetIssue(ctx, id)
if err != nil {
fmt.Fprintf(os.Stderr, "Error fetching %s: %v\n", id, err)
continue
}
if issue == nil {
fmt.Fprintf(os.Stderr, "Issue %s not found\n", id)
continue
}
if jsonOutput {
// Include labels, dependencies, and comments in JSON output
@@ -2118,8 +2198,6 @@ Examples:
}
func init() {
showCmd.Flags().Bool("all-issues", false, "Show all issues (WARNING: may be expensive for large databases)")
showCmd.Flags().IntSliceP("priority", "p", []int{}, "Show issues with specified priority (can be used multiple times, e.g., -p 0 -p 1)")
rootCmd.AddCommand(showCmd)
}
@@ -2278,202 +2356,6 @@ func init() {
rootCmd.AddCommand(updateCmd)
}
var editCmd = &cobra.Command{
Use: "edit [id]",
Short: "Edit an issue field in $EDITOR",
Long: `Edit an issue field using your configured $EDITOR.
By default, edits the description. Use flags to edit other fields.
Examples:
bd edit bd-42 # Edit description
bd edit bd-42 --title # Edit title
bd edit bd-42 --design # Edit design notes
bd edit bd-42 --notes # Edit notes
bd edit bd-42 --acceptance # Edit acceptance criteria`,
Args: cobra.ExactArgs(1),
Run: func(cmd *cobra.Command, args []string) {
id := args[0]
ctx := context.Background()
// Determine which field to edit
fieldToEdit := "description"
if cmd.Flags().Changed("title") {
fieldToEdit = "title"
} else if cmd.Flags().Changed("design") {
fieldToEdit = "design"
} else if cmd.Flags().Changed("notes") {
fieldToEdit = "notes"
} else if cmd.Flags().Changed("acceptance") {
fieldToEdit = "acceptance_criteria"
}
// Get the editor from environment
editor := os.Getenv("EDITOR")
if editor == "" {
editor = os.Getenv("VISUAL")
}
if editor == "" {
// Try common defaults
for _, defaultEditor := range []string{"vim", "vi", "nano", "emacs"} {
if _, err := exec.LookPath(defaultEditor); err == nil {
editor = defaultEditor
break
}
}
}
if editor == "" {
fmt.Fprintf(os.Stderr, "Error: No editor found. Set $EDITOR or $VISUAL environment variable.\n")
os.Exit(1)
}
// Get the current issue
var issue *types.Issue
var err error
if daemonClient != nil {
// Daemon mode
showArgs := &rpc.ShowArgs{ID: id}
resp, err := daemonClient.Show(showArgs)
if err != nil {
fmt.Fprintf(os.Stderr, "Error fetching issue %s: %v\n", id, err)
os.Exit(1)
}
issue = &types.Issue{}
if err := json.Unmarshal(resp.Data, issue); err != nil {
fmt.Fprintf(os.Stderr, "Error parsing issue data: %v\n", err)
os.Exit(1)
}
} else {
// Direct mode
issue, err = store.GetIssue(ctx, id)
if err != nil {
fmt.Fprintf(os.Stderr, "Error fetching issue %s: %v\n", id, err)
os.Exit(1)
}
if issue == nil {
fmt.Fprintf(os.Stderr, "Issue %s not found\n", id)
os.Exit(1)
}
}
// Get the current field value
var currentValue string
switch fieldToEdit {
case "title":
currentValue = issue.Title
case "description":
currentValue = issue.Description
case "design":
currentValue = issue.Design
case "notes":
currentValue = issue.Notes
case "acceptance_criteria":
currentValue = issue.AcceptanceCriteria
}
// Create a temporary file with the current value
tmpFile, err := os.CreateTemp("", fmt.Sprintf("bd-edit-%s-*.txt", fieldToEdit))
if err != nil {
fmt.Fprintf(os.Stderr, "Error creating temp file: %v\n", err)
os.Exit(1)
}
tmpPath := tmpFile.Name()
defer os.Remove(tmpPath)
// Write current value to temp file
if _, err := tmpFile.WriteString(currentValue); err != nil {
tmpFile.Close()
fmt.Fprintf(os.Stderr, "Error writing to temp file: %v\n", err)
os.Exit(1)
}
tmpFile.Close()
// Open the editor
editorCmd := exec.Command(editor, tmpPath) // #nosec G204 - user-provided editor command is intentional
editorCmd.Stdin = os.Stdin
editorCmd.Stdout = os.Stdout
editorCmd.Stderr = os.Stderr
if err := editorCmd.Run(); err != nil {
fmt.Fprintf(os.Stderr, "Error running editor: %v\n", err)
os.Exit(1)
}
// Read the edited content
// #nosec G304 - controlled temp file path
editedContent, err := os.ReadFile(tmpPath)
if err != nil {
fmt.Fprintf(os.Stderr, "Error reading edited file: %v\n", err)
os.Exit(1)
}
newValue := string(editedContent)
// Check if the value changed
if newValue == currentValue {
fmt.Println("No changes made")
return
}
// Validate title if editing title
if fieldToEdit == "title" && strings.TrimSpace(newValue) == "" {
fmt.Fprintf(os.Stderr, "Error: title cannot be empty\n")
os.Exit(1)
}
// Update the issue
updates := map[string]interface{}{
fieldToEdit: newValue,
}
if daemonClient != nil {
// Daemon mode
updateArgs := &rpc.UpdateArgs{ID: id}
switch fieldToEdit {
case "title":
updateArgs.Title = &newValue
case "description":
updateArgs.Description = &newValue
case "design":
updateArgs.Design = &newValue
case "notes":
updateArgs.Notes = &newValue
case "acceptance_criteria":
updateArgs.AcceptanceCriteria = &newValue
}
_, err := daemonClient.Update(updateArgs)
if err != nil {
fmt.Fprintf(os.Stderr, "Error updating issue: %v\n", err)
os.Exit(1)
}
} else {
// Direct mode
if err := store.UpdateIssue(ctx, id, updates, actor); err != nil {
fmt.Fprintf(os.Stderr, "Error updating issue: %v\n", err)
os.Exit(1)
}
markDirtyAndScheduleFlush()
}
green := color.New(color.FgGreen).SprintFunc()
fieldName := strings.ReplaceAll(fieldToEdit, "_", " ")
fmt.Printf("%s Updated %s for issue: %s\n", green("✓"), fieldName, id)
},
}
func init() {
editCmd.Flags().Bool("title", false, "Edit the title")
editCmd.Flags().Bool("description", false, "Edit the description (default)")
editCmd.Flags().Bool("design", false, "Edit the design notes")
editCmd.Flags().Bool("notes", false, "Edit the notes")
editCmd.Flags().Bool("acceptance", false, "Edit the acceptance criteria")
rootCmd.AddCommand(editCmd)
}
var closeCmd = &cobra.Command{
Use: "close [id...]",
Short: "Close one or more issues",
@@ -2551,14 +2433,6 @@ func init() {
}
func main() {
// Handle --version flag (in addition to 'version' subcommand)
for _, arg := range os.Args[1:] {
if arg == "--version" || arg == "-v" {
fmt.Printf("bd version %s (%s)\n", Version, Build)
return
}
}
if err := rootCmd.Execute(); err != nil {
os.Exit(1)
}

View File

@@ -560,6 +560,9 @@ func TestAutoFlushErrorHandling(t *testing.T) {
t.Skip("chmod-based read-only directory behavior is not reliable on Windows")
}
// Note: We create issues.jsonl as a directory to force os.Create() to fail,
// which works even when running as root (unlike chmod-based approaches)
// Create temp directory for test database
tmpDir, err := os.MkdirTemp("", "bd-test-error-*")
if err != nil {
@@ -601,16 +604,34 @@ func TestAutoFlushErrorHandling(t *testing.T) {
t.Fatalf("Failed to create issue: %v", err)
}
// Create a read-only directory to force flush failure
readOnlyDir := filepath.Join(tmpDir, "readonly")
if err := os.MkdirAll(readOnlyDir, 0555); err != nil {
t.Fatalf("Failed to create read-only dir: %v", err)
// Mark issue as dirty so flushToJSONL will try to export it
if err := testStore.MarkIssueDirty(ctx, issue.ID); err != nil {
t.Fatalf("Failed to mark issue dirty: %v", err)
}
defer os.Chmod(readOnlyDir, 0755) // Restore permissions for cleanup
// Set dbPath to point to read-only directory
// Create a directory where the JSONL file should be, to force write failure
// os.Create() will fail when trying to create a file with a path that's already a directory
failDir := filepath.Join(tmpDir, "faildir")
if err := os.MkdirAll(failDir, 0755); err != nil {
t.Fatalf("Failed to create fail dir: %v", err)
}
// Create issues.jsonl as a directory (not a file) to force Create() to fail
jsonlAsDir := filepath.Join(failDir, "issues.jsonl")
if err := os.MkdirAll(jsonlAsDir, 0755); err != nil {
t.Fatalf("Failed to create issues.jsonl as directory: %v", err)
}
// Set dbPath to point to faildir
originalDBPath := dbPath
dbPath = filepath.Join(readOnlyDir, "test.db")
dbPath = filepath.Join(failDir, "test.db")
// Verify issue is actually marked as dirty
dirtyIDs, err := testStore.GetDirtyIssues(ctx)
if err != nil {
t.Fatalf("Failed to get dirty issues: %v", err)
}
t.Logf("Dirty issues before flush: %v", dirtyIDs)
// Reset failure counter
flushMutex.Lock()
@@ -619,6 +640,9 @@ func TestAutoFlushErrorHandling(t *testing.T) {
isDirty = true
flushMutex.Unlock()
t.Logf("dbPath set to: %s", dbPath)
t.Logf("Expected JSONL path (which is a directory): %s", filepath.Join(failDir, "issues.jsonl"))
// Attempt flush (should fail)
flushToJSONL()

200
cmd/bd/nodb.go Normal file
View File

@@ -0,0 +1,200 @@
package main
import (
"bufio"
"context"
"encoding/json"
"fmt"
"os"
"path/filepath"
"strings"
"github.com/steveyegge/beads/internal/config"
"github.com/steveyegge/beads/internal/storage/memory"
"github.com/steveyegge/beads/internal/types"
)
// initializeNoDbMode sets up in-memory storage from JSONL file
// This is called when --no-db flag is set
func initializeNoDbMode() error {
// Find .beads directory
cwd, err := os.Getwd()
if err != nil {
return fmt.Errorf("failed to get current directory: %w", err)
}
beadsDir := filepath.Join(cwd, ".beads")
if _, err := os.Stat(beadsDir); os.IsNotExist(err) {
return fmt.Errorf("no .beads directory found (hint: run 'bd init' first)")
}
jsonlPath := filepath.Join(beadsDir, "issues.jsonl")
// Create memory storage
memStore := memory.New(jsonlPath)
// Try to load from JSONL if it exists
if _, err := os.Stat(jsonlPath); err == nil {
issues, err := loadIssuesFromJSONL(jsonlPath)
if err != nil {
return fmt.Errorf("failed to load issues from %s: %w", jsonlPath, err)
}
if err := memStore.LoadFromIssues(issues); err != nil {
return fmt.Errorf("failed to load issues into memory: %w", err)
}
if os.Getenv("BD_DEBUG") != "" {
fmt.Fprintf(os.Stderr, "Debug: loaded %d issues from %s\n", len(issues), jsonlPath)
}
} else {
if os.Getenv("BD_DEBUG") != "" {
fmt.Fprintf(os.Stderr, "Debug: no existing %s, starting with empty database\n", jsonlPath)
}
}
// Detect and set prefix
prefix, err := detectPrefix(beadsDir, memStore)
if err != nil {
return fmt.Errorf("failed to detect prefix: %w", err)
}
ctx := context.Background()
if err := memStore.SetConfig(ctx, "issue_prefix", prefix); err != nil {
return fmt.Errorf("failed to set prefix: %w", err)
}
if os.Getenv("BD_DEBUG") != "" {
fmt.Fprintf(os.Stderr, "Debug: using prefix '%s'\n", prefix)
}
// Set global store
store = memStore
return nil
}
// loadIssuesFromJSONL reads all issues from a JSONL file
func loadIssuesFromJSONL(path string) ([]*types.Issue, error) {
file, err := os.Open(path)
if err != nil {
return nil, err
}
defer file.Close()
var issues []*types.Issue
scanner := bufio.NewScanner(file)
lineNum := 0
for scanner.Scan() {
lineNum++
line := scanner.Text()
// Skip empty lines
if strings.TrimSpace(line) == "" {
continue
}
var issue types.Issue
if err := json.Unmarshal([]byte(line), &issue); err != nil {
return nil, fmt.Errorf("line %d: %w", lineNum, err)
}
issues = append(issues, &issue)
}
if err := scanner.Err(); err != nil {
return nil, err
}
return issues, nil
}
// detectPrefix detects the issue prefix to use in --no-db mode
// Priority:
// 1. issue-prefix from config.yaml (if set)
// 2. Common prefix from existing issues (if all share same prefix)
// 3. Current directory name (fallback)
func detectPrefix(beadsDir string, memStore *memory.MemoryStorage) (string, error) {
// Check config.yaml for issue-prefix
configPrefix := config.GetString("issue-prefix")
if configPrefix != "" {
return configPrefix, nil
}
// Check existing issues for common prefix
issues := memStore.GetAllIssues()
if len(issues) > 0 {
// Extract prefix from first issue
firstPrefix := extractIssuePrefix(issues[0].ID)
// Check if all issues share the same prefix
allSame := true
for _, issue := range issues {
if extractIssuePrefix(issue.ID) != firstPrefix {
allSame = false
break
}
}
if allSame && firstPrefix != "" {
return firstPrefix, nil
}
// If issues have mixed prefixes, we can't auto-detect
if !allSame {
return "", fmt.Errorf("issues have mixed prefixes, please set issue-prefix in .beads/config.yaml")
}
}
// Fallback to directory name
cwd, err := os.Getwd()
if err != nil {
return "bd", nil // Ultimate fallback
}
prefix := filepath.Base(cwd)
// Sanitize prefix (remove special characters, use only alphanumeric and hyphens)
prefix = strings.Map(func(r rune) rune {
if (r >= 'a' && r <= 'z') || (r >= '0' && r <= '9') || r == '-' {
return r
}
if r >= 'A' && r <= 'Z' {
return r + ('a' - 'A') // Convert to lowercase
}
return -1 // Remove character
}, prefix)
if prefix == "" {
prefix = "bd"
}
return prefix, nil
}
// extractIssuePrefix extracts the prefix from an issue ID like "bd-123" -> "bd"
func extractIssuePrefix(issueID string) string {
parts := strings.SplitN(issueID, "-", 2)
if len(parts) < 2 {
return ""
}
return parts[0]
}
// writeIssuesToJSONL writes all issues from memory storage to JSONL file atomically
func writeIssuesToJSONL(memStore *memory.MemoryStorage, beadsDir string) error {
jsonlPath := filepath.Join(beadsDir, "issues.jsonl")
// Get all issues from memory storage
issues := memStore.GetAllIssues()
// Write atomically using common helper (handles temp file + rename + permissions)
if err := writeJSONLAtomic(jsonlPath, issues); err != nil {
return err
}
if os.Getenv("BD_DEBUG") != "" {
fmt.Fprintf(os.Stderr, "Debug: wrote %d issues to %s\n", len(issues), jsonlPath)
}
return nil
}

View File

@@ -71,8 +71,10 @@ func Initialize() error {
v.SetDefault("no-daemon", false)
v.SetDefault("no-auto-flush", false)
v.SetDefault("no-auto-import", false)
v.SetDefault("no-db", false)
v.SetDefault("db", "")
v.SetDefault("actor", "")
v.SetDefault("issue-prefix", "")
// Additional environment variables (not prefixed with BD_)
// These are bound explicitly for backward compatibility

View File

@@ -0,0 +1,911 @@
// Package memory implements the storage interface using in-memory data structures.
// This is designed for --no-db mode where the database is loaded from JSONL at startup
// and written back to JSONL after each command.
package memory
import (
"context"
"database/sql"
"fmt"
"sort"
"strings"
"sync"
"time"
"github.com/steveyegge/beads/internal/types"
)
// MemoryStorage implements the Storage interface using in-memory data structures
type MemoryStorage struct {
mu sync.RWMutex // Protects all maps
// Core data
issues map[string]*types.Issue // ID -> Issue
dependencies map[string][]*types.Dependency // IssueID -> Dependencies
labels map[string][]string // IssueID -> Labels
events map[string][]*types.Event // IssueID -> Events
comments map[string][]*types.Comment // IssueID -> Comments
config map[string]string // Config key-value pairs
metadata map[string]string // Metadata key-value pairs
counters map[string]int // Prefix -> Last ID
// For tracking
dirty map[string]bool // IssueIDs that have been modified
jsonlPath string // Path to source JSONL file (for reference)
closed bool
}
// New creates a new in-memory storage backend
func New(jsonlPath string) *MemoryStorage {
return &MemoryStorage{
issues: make(map[string]*types.Issue),
dependencies: make(map[string][]*types.Dependency),
labels: make(map[string][]string),
events: make(map[string][]*types.Event),
comments: make(map[string][]*types.Comment),
config: make(map[string]string),
metadata: make(map[string]string),
counters: make(map[string]int),
dirty: make(map[string]bool),
jsonlPath: jsonlPath,
}
}
// LoadFromIssues populates the in-memory storage from a slice of issues
// This is used when loading from JSONL at startup
func (m *MemoryStorage) LoadFromIssues(issues []*types.Issue) error {
m.mu.Lock()
defer m.mu.Unlock()
for _, issue := range issues {
if issue == nil {
continue
}
// Store the issue
m.issues[issue.ID] = issue
// Store dependencies
if len(issue.Dependencies) > 0 {
m.dependencies[issue.ID] = issue.Dependencies
}
// Store labels
if len(issue.Labels) > 0 {
m.labels[issue.ID] = issue.Labels
}
// Store comments
if len(issue.Comments) > 0 {
m.comments[issue.ID] = issue.Comments
}
// Update counter based on issue ID
prefix, num := extractPrefixAndNumber(issue.ID)
if prefix != "" && num > 0 {
if m.counters[prefix] < num {
m.counters[prefix] = num
}
}
}
return nil
}
// GetAllIssues returns all issues in memory (for export to JSONL)
func (m *MemoryStorage) GetAllIssues() []*types.Issue {
m.mu.RLock()
defer m.mu.RUnlock()
issues := make([]*types.Issue, 0, len(m.issues))
for _, issue := range m.issues {
// Deep copy to avoid mutations
issueCopy := *issue
// Attach dependencies
if deps, ok := m.dependencies[issue.ID]; ok {
issueCopy.Dependencies = deps
}
// Attach labels
if labels, ok := m.labels[issue.ID]; ok {
issueCopy.Labels = labels
}
// Attach comments
if comments, ok := m.comments[issue.ID]; ok {
issueCopy.Comments = comments
}
issues = append(issues, &issueCopy)
}
// Sort by ID for consistent output
sort.Slice(issues, func(i, j int) bool {
return issues[i].ID < issues[j].ID
})
return issues
}
// extractPrefixAndNumber extracts prefix and number from issue ID like "bd-123" -> ("bd", 123)
func extractPrefixAndNumber(id string) (string, int) {
parts := strings.SplitN(id, "-", 2)
if len(parts) != 2 {
return "", 0
}
var num int
_, err := fmt.Sscanf(parts[1], "%d", &num)
if err != nil {
return "", 0
}
return parts[0], num
}
// CreateIssue creates a new issue
func (m *MemoryStorage) CreateIssue(ctx context.Context, issue *types.Issue, actor string) error {
m.mu.Lock()
defer m.mu.Unlock()
// Validate
if err := issue.Validate(); err != nil {
return fmt.Errorf("validation failed: %w", err)
}
// Set timestamps
now := time.Now()
issue.CreatedAt = now
issue.UpdatedAt = now
// Generate ID if not set
if issue.ID == "" {
prefix := m.config["issue_prefix"]
if prefix == "" {
prefix = "bd" // Default fallback
}
// Get next ID
m.counters[prefix]++
issue.ID = fmt.Sprintf("%s-%d", prefix, m.counters[prefix])
}
// Check for duplicate
if _, exists := m.issues[issue.ID]; exists {
return fmt.Errorf("issue %s already exists", issue.ID)
}
// Store issue
m.issues[issue.ID] = issue
m.dirty[issue.ID] = true
// Record event
event := &types.Event{
IssueID: issue.ID,
EventType: types.EventCreated,
Actor: actor,
CreatedAt: now,
}
m.events[issue.ID] = append(m.events[issue.ID], event)
return nil
}
// CreateIssues creates multiple issues atomically
func (m *MemoryStorage) CreateIssues(ctx context.Context, issues []*types.Issue, actor string) error {
m.mu.Lock()
defer m.mu.Unlock()
// Validate all first
for i, issue := range issues {
if err := issue.Validate(); err != nil {
return fmt.Errorf("validation failed for issue %d: %w", i, err)
}
}
now := time.Now()
prefix := m.config["issue_prefix"]
if prefix == "" {
prefix = "bd"
}
// Track IDs in this batch to detect duplicates within batch
batchIDs := make(map[string]bool)
// Generate IDs for issues that need them
for _, issue := range issues {
issue.CreatedAt = now
issue.UpdatedAt = now
if issue.ID == "" {
m.counters[prefix]++
issue.ID = fmt.Sprintf("%s-%d", prefix, m.counters[prefix])
}
// Check for duplicates in existing issues
if _, exists := m.issues[issue.ID]; exists {
return fmt.Errorf("issue %s already exists", issue.ID)
}
// Check for duplicates within this batch
if batchIDs[issue.ID] {
return fmt.Errorf("duplicate ID within batch: %s", issue.ID)
}
batchIDs[issue.ID] = true
}
// Store all issues
for _, issue := range issues {
m.issues[issue.ID] = issue
m.dirty[issue.ID] = true
// Record event
event := &types.Event{
IssueID: issue.ID,
EventType: types.EventCreated,
Actor: actor,
CreatedAt: now,
}
m.events[issue.ID] = append(m.events[issue.ID], event)
}
return nil
}
// GetIssue retrieves an issue by ID
func (m *MemoryStorage) GetIssue(ctx context.Context, id string) (*types.Issue, error) {
m.mu.RLock()
defer m.mu.RUnlock()
issue, exists := m.issues[id]
if !exists {
return nil, nil
}
// Return a copy to avoid mutations
issueCopy := *issue
// Attach dependencies
if deps, ok := m.dependencies[id]; ok {
issueCopy.Dependencies = deps
}
// Attach labels
if labels, ok := m.labels[id]; ok {
issueCopy.Labels = labels
}
return &issueCopy, nil
}
// UpdateIssue updates fields on an issue
func (m *MemoryStorage) UpdateIssue(ctx context.Context, id string, updates map[string]interface{}, actor string) error {
m.mu.Lock()
defer m.mu.Unlock()
issue, exists := m.issues[id]
if !exists {
return fmt.Errorf("issue %s not found", id)
}
now := time.Now()
issue.UpdatedAt = now
// Apply updates
for key, value := range updates {
switch key {
case "title":
if v, ok := value.(string); ok {
issue.Title = v
}
case "description":
if v, ok := value.(string); ok {
issue.Description = v
}
case "design":
if v, ok := value.(string); ok {
issue.Design = v
}
case "acceptance_criteria":
if v, ok := value.(string); ok {
issue.AcceptanceCriteria = v
}
case "notes":
if v, ok := value.(string); ok {
issue.Notes = v
}
case "status":
if v, ok := value.(string); ok {
oldStatus := issue.Status
issue.Status = types.Status(v)
// Manage closed_at
if issue.Status == types.StatusClosed && oldStatus != types.StatusClosed {
issue.ClosedAt = &now
} else if issue.Status != types.StatusClosed && oldStatus == types.StatusClosed {
issue.ClosedAt = nil
}
}
case "priority":
if v, ok := value.(int); ok {
issue.Priority = v
}
case "issue_type":
if v, ok := value.(string); ok {
issue.IssueType = types.IssueType(v)
}
case "assignee":
if v, ok := value.(string); ok {
issue.Assignee = v
} else if value == nil {
issue.Assignee = ""
}
case "external_ref":
if v, ok := value.(string); ok {
issue.ExternalRef = &v
} else if value == nil {
issue.ExternalRef = nil
}
}
}
m.dirty[id] = true
// Record event
eventType := types.EventUpdated
if status, hasStatus := updates["status"]; hasStatus {
if status == string(types.StatusClosed) {
eventType = types.EventClosed
}
}
event := &types.Event{
IssueID: id,
EventType: eventType,
Actor: actor,
CreatedAt: now,
}
m.events[id] = append(m.events[id], event)
return nil
}
// CloseIssue closes an issue with a reason
func (m *MemoryStorage) CloseIssue(ctx context.Context, id string, reason string, actor string) error {
return m.UpdateIssue(ctx, id, map[string]interface{}{
"status": string(types.StatusClosed),
}, actor)
}
// SearchIssues finds issues matching query and filters
func (m *MemoryStorage) SearchIssues(ctx context.Context, query string, filter types.IssueFilter) ([]*types.Issue, error) {
m.mu.RLock()
defer m.mu.RUnlock()
var results []*types.Issue
for _, issue := range m.issues {
// Apply filters
if filter.Status != nil && issue.Status != *filter.Status {
continue
}
if filter.Priority != nil && issue.Priority != *filter.Priority {
continue
}
if filter.IssueType != nil && issue.IssueType != *filter.IssueType {
continue
}
if filter.Assignee != nil && issue.Assignee != *filter.Assignee {
continue
}
// Query search (title, description, or ID)
if query != "" {
query = strings.ToLower(query)
if !strings.Contains(strings.ToLower(issue.Title), query) &&
!strings.Contains(strings.ToLower(issue.Description), query) &&
!strings.Contains(strings.ToLower(issue.ID), query) {
continue
}
}
// Label filtering: must have ALL specified labels
if len(filter.Labels) > 0 {
issueLabels := m.labels[issue.ID]
hasAllLabels := true
for _, reqLabel := range filter.Labels {
found := false
for _, label := range issueLabels {
if label == reqLabel {
found = true
break
}
}
if !found {
hasAllLabels = false
break
}
}
if !hasAllLabels {
continue
}
}
// ID filtering
if len(filter.IDs) > 0 {
found := false
for _, filterID := range filter.IDs {
if issue.ID == filterID {
found = true
break
}
}
if !found {
continue
}
}
// Copy issue and attach metadata
issueCopy := *issue
if deps, ok := m.dependencies[issue.ID]; ok {
issueCopy.Dependencies = deps
}
if labels, ok := m.labels[issue.ID]; ok {
issueCopy.Labels = labels
}
results = append(results, &issueCopy)
}
// Sort by priority, then by created_at
sort.Slice(results, func(i, j int) bool {
if results[i].Priority != results[j].Priority {
return results[i].Priority < results[j].Priority
}
return results[i].CreatedAt.After(results[j].CreatedAt)
})
// Apply limit
if filter.Limit > 0 && len(results) > filter.Limit {
results = results[:filter.Limit]
}
return results, nil
}
// AddDependency adds a dependency between issues
func (m *MemoryStorage) AddDependency(ctx context.Context, dep *types.Dependency, actor string) error {
m.mu.Lock()
defer m.mu.Unlock()
// Check that both issues exist
if _, exists := m.issues[dep.IssueID]; !exists {
return fmt.Errorf("issue %s not found", dep.IssueID)
}
if _, exists := m.issues[dep.DependsOnID]; !exists {
return fmt.Errorf("issue %s not found", dep.DependsOnID)
}
// Check for duplicates
for _, existing := range m.dependencies[dep.IssueID] {
if existing.DependsOnID == dep.DependsOnID && existing.Type == dep.Type {
return fmt.Errorf("dependency already exists")
}
}
m.dependencies[dep.IssueID] = append(m.dependencies[dep.IssueID], dep)
m.dirty[dep.IssueID] = true
return nil
}
// RemoveDependency removes a dependency
func (m *MemoryStorage) RemoveDependency(ctx context.Context, issueID, dependsOnID string, actor string) error {
m.mu.Lock()
defer m.mu.Unlock()
deps := m.dependencies[issueID]
newDeps := make([]*types.Dependency, 0)
for _, dep := range deps {
if dep.DependsOnID != dependsOnID {
newDeps = append(newDeps, dep)
}
}
m.dependencies[issueID] = newDeps
m.dirty[issueID] = true
return nil
}
// GetDependencies gets issues that this issue depends on
func (m *MemoryStorage) GetDependencies(ctx context.Context, issueID string) ([]*types.Issue, error) {
m.mu.RLock()
defer m.mu.RUnlock()
var results []*types.Issue
for _, dep := range m.dependencies[issueID] {
if issue, exists := m.issues[dep.DependsOnID]; exists {
issueCopy := *issue
results = append(results, &issueCopy)
}
}
return results, nil
}
// GetDependents gets issues that depend on this issue
func (m *MemoryStorage) GetDependents(ctx context.Context, issueID string) ([]*types.Issue, error) {
m.mu.RLock()
defer m.mu.RUnlock()
var results []*types.Issue
for id, deps := range m.dependencies {
for _, dep := range deps {
if dep.DependsOnID == issueID {
if issue, exists := m.issues[id]; exists {
results = append(results, issue)
}
break
}
}
}
return results, nil
}
// GetDependencyRecords gets dependency records for an issue
func (m *MemoryStorage) GetDependencyRecords(ctx context.Context, issueID string) ([]*types.Dependency, error) {
m.mu.RLock()
defer m.mu.RUnlock()
return m.dependencies[issueID], nil
}
// GetAllDependencyRecords gets all dependency records
func (m *MemoryStorage) GetAllDependencyRecords(ctx context.Context) (map[string][]*types.Dependency, error) {
m.mu.RLock()
defer m.mu.RUnlock()
// Return a copy
result := make(map[string][]*types.Dependency)
for k, v := range m.dependencies {
result[k] = v
}
return result, nil
}
// GetDependencyTree gets the dependency tree for an issue
func (m *MemoryStorage) GetDependencyTree(ctx context.Context, issueID string, maxDepth int, showAllPaths bool) ([]*types.TreeNode, error) {
// Simplified implementation - just return direct dependencies
deps, err := m.GetDependencies(ctx, issueID)
if err != nil {
return nil, err
}
var nodes []*types.TreeNode
for _, dep := range deps {
node := &types.TreeNode{
Depth: 1,
}
// Copy issue fields
node.ID = dep.ID
node.Title = dep.Title
node.Description = dep.Description
node.Status = dep.Status
node.Priority = dep.Priority
node.IssueType = dep.IssueType
nodes = append(nodes, node)
}
return nodes, nil
}
// DetectCycles detects dependency cycles
func (m *MemoryStorage) DetectCycles(ctx context.Context) ([][]*types.Issue, error) {
// Simplified - return empty (no cycles detected)
return nil, nil
}
// Add label methods
func (m *MemoryStorage) AddLabel(ctx context.Context, issueID, label, actor string) error {
m.mu.Lock()
defer m.mu.Unlock()
// Check if issue exists
if _, exists := m.issues[issueID]; !exists {
return fmt.Errorf("issue %s not found", issueID)
}
// Check for duplicate
for _, l := range m.labels[issueID] {
if l == label {
return nil // Already exists
}
}
m.labels[issueID] = append(m.labels[issueID], label)
m.dirty[issueID] = true
return nil
}
func (m *MemoryStorage) RemoveLabel(ctx context.Context, issueID, label, actor string) error {
m.mu.Lock()
defer m.mu.Unlock()
labels := m.labels[issueID]
newLabels := make([]string, 0)
for _, l := range labels {
if l != label {
newLabels = append(newLabels, l)
}
}
m.labels[issueID] = newLabels
m.dirty[issueID] = true
return nil
}
func (m *MemoryStorage) GetLabels(ctx context.Context, issueID string) ([]string, error) {
m.mu.RLock()
defer m.mu.RUnlock()
return m.labels[issueID], nil
}
func (m *MemoryStorage) GetIssuesByLabel(ctx context.Context, label string) ([]*types.Issue, error) {
m.mu.RLock()
defer m.mu.RUnlock()
var results []*types.Issue
for issueID, labels := range m.labels {
for _, l := range labels {
if l == label {
if issue, exists := m.issues[issueID]; exists {
issueCopy := *issue
results = append(results, &issueCopy)
}
break
}
}
}
return results, nil
}
// Stub implementations for other required methods
func (m *MemoryStorage) GetReadyWork(ctx context.Context, filter types.WorkFilter) ([]*types.Issue, error) {
// Simplified: return open issues with no blocking dependencies
return m.SearchIssues(ctx, "", types.IssueFilter{
Status: func() *types.Status { s := types.StatusOpen; return &s }(),
})
}
func (m *MemoryStorage) GetBlockedIssues(ctx context.Context) ([]*types.BlockedIssue, error) {
return nil, nil
}
func (m *MemoryStorage) GetEpicsEligibleForClosure(ctx context.Context) ([]*types.EpicStatus, error) {
return nil, nil
}
func (m *MemoryStorage) AddComment(ctx context.Context, issueID, actor, comment string) error {
return nil
}
func (m *MemoryStorage) GetEvents(ctx context.Context, issueID string, limit int) ([]*types.Event, error) {
m.mu.RLock()
defer m.mu.RUnlock()
events := m.events[issueID]
if limit > 0 && len(events) > limit {
events = events[len(events)-limit:]
}
return events, nil
}
func (m *MemoryStorage) AddIssueComment(ctx context.Context, issueID, author, text string) (*types.Comment, error) {
m.mu.Lock()
defer m.mu.Unlock()
comment := &types.Comment{
ID: int64(len(m.comments[issueID]) + 1),
IssueID: issueID,
Author: author,
Text: text,
CreatedAt: time.Now(),
}
m.comments[issueID] = append(m.comments[issueID], comment)
m.dirty[issueID] = true
return comment, nil
}
func (m *MemoryStorage) GetIssueComments(ctx context.Context, issueID string) ([]*types.Comment, error) {
m.mu.RLock()
defer m.mu.RUnlock()
return m.comments[issueID], nil
}
func (m *MemoryStorage) GetStatistics(ctx context.Context) (*types.Statistics, error) {
m.mu.RLock()
defer m.mu.RUnlock()
stats := &types.Statistics{
TotalIssues: len(m.issues),
}
for _, issue := range m.issues {
switch issue.Status {
case types.StatusOpen:
stats.OpenIssues++
case types.StatusInProgress:
stats.InProgressIssues++
case types.StatusBlocked:
stats.BlockedIssues++
case types.StatusClosed:
stats.ClosedIssues++
}
}
return stats, nil
}
// Dirty tracking
func (m *MemoryStorage) GetDirtyIssues(ctx context.Context) ([]string, error) {
m.mu.RLock()
defer m.mu.RUnlock()
var dirtyIDs []string
for id := range m.dirty {
dirtyIDs = append(dirtyIDs, id)
}
return dirtyIDs, nil
}
func (m *MemoryStorage) ClearDirtyIssues(ctx context.Context) error {
m.mu.Lock()
defer m.mu.Unlock()
m.dirty = make(map[string]bool)
return nil
}
func (m *MemoryStorage) ClearDirtyIssuesByID(ctx context.Context, issueIDs []string) error {
m.mu.Lock()
defer m.mu.Unlock()
for _, id := range issueIDs {
delete(m.dirty, id)
}
return nil
}
// Config
func (m *MemoryStorage) SetConfig(ctx context.Context, key, value string) error {
m.mu.Lock()
defer m.mu.Unlock()
m.config[key] = value
return nil
}
func (m *MemoryStorage) GetConfig(ctx context.Context, key string) (string, error) {
m.mu.RLock()
defer m.mu.RUnlock()
return m.config[key], nil
}
func (m *MemoryStorage) DeleteConfig(ctx context.Context, key string) error {
m.mu.Lock()
defer m.mu.Unlock()
delete(m.config, key)
return nil
}
func (m *MemoryStorage) GetAllConfig(ctx context.Context) (map[string]string, error) {
m.mu.RLock()
defer m.mu.RUnlock()
// Return a copy to avoid mutations
result := make(map[string]string)
for k, v := range m.config {
result[k] = v
}
return result, nil
}
// Metadata
func (m *MemoryStorage) SetMetadata(ctx context.Context, key, value string) error {
m.mu.Lock()
defer m.mu.Unlock()
m.metadata[key] = value
return nil
}
func (m *MemoryStorage) GetMetadata(ctx context.Context, key string) (string, error) {
m.mu.RLock()
defer m.mu.RUnlock()
return m.metadata[key], nil
}
// Prefix rename operations (no-ops for memory storage)
func (m *MemoryStorage) UpdateIssueID(ctx context.Context, oldID, newID string, issue *types.Issue, actor string) error {
return fmt.Errorf("UpdateIssueID not supported in --no-db mode")
}
func (m *MemoryStorage) RenameDependencyPrefix(ctx context.Context, oldPrefix, newPrefix string) error {
return nil
}
func (m *MemoryStorage) RenameCounterPrefix(ctx context.Context, oldPrefix, newPrefix string) error {
return nil
}
// Lifecycle
func (m *MemoryStorage) Close() error {
m.mu.Lock()
defer m.mu.Unlock()
m.closed = true
return nil
}
func (m *MemoryStorage) Path() string {
return m.jsonlPath
}
// UnderlyingDB returns nil for memory storage (no SQL database)
func (m *MemoryStorage) UnderlyingDB() *sql.DB {
return nil
}
// UnderlyingConn returns error for memory storage (no SQL database)
func (m *MemoryStorage) UnderlyingConn(ctx context.Context) (*sql.Conn, error) {
return nil, fmt.Errorf("UnderlyingConn not available in memory storage")
}
// SyncAllCounters synchronizes ID counters based on existing issues
func (m *MemoryStorage) SyncAllCounters(ctx context.Context) error {
m.mu.Lock()
defer m.mu.Unlock()
// Reset counters
m.counters = make(map[string]int)
// Recompute from issues
for _, issue := range m.issues {
prefix, num := extractPrefixAndNumber(issue.ID)
if prefix != "" && num > 0 {
if m.counters[prefix] < num {
m.counters[prefix] = num
}
}
}
return nil
}
// MarkIssueDirty marks an issue as dirty for export
func (m *MemoryStorage) MarkIssueDirty(ctx context.Context, issueID string) error {
m.mu.Lock()
defer m.mu.Unlock()
m.dirty[issueID] = true
return nil
}

View File

@@ -0,0 +1,915 @@
package memory
import (
"context"
"testing"
"time"
"github.com/steveyegge/beads/internal/types"
)
func setupTestMemory(t *testing.T) *MemoryStorage {
t.Helper()
store := New("")
ctx := context.Background()
// Set issue_prefix config
if err := store.SetConfig(ctx, "issue_prefix", "bd"); err != nil {
t.Fatalf("failed to set issue_prefix: %v", err)
}
return store
}
func TestCreateIssue(t *testing.T) {
store := setupTestMemory(t)
defer store.Close()
ctx := context.Background()
issue := &types.Issue{
Title: "Test issue",
Description: "Test description",
Status: types.StatusOpen,
Priority: 1,
IssueType: types.TypeTask,
}
err := store.CreateIssue(ctx, issue, "test-user")
if err != nil {
t.Fatalf("CreateIssue failed: %v", err)
}
if issue.ID == "" {
t.Error("Issue ID should be set")
}
if !issue.CreatedAt.After(time.Time{}) {
t.Error("CreatedAt should be set")
}
if !issue.UpdatedAt.After(time.Time{}) {
t.Error("UpdatedAt should be set")
}
}
func TestCreateIssueValidation(t *testing.T) {
store := setupTestMemory(t)
defer store.Close()
ctx := context.Background()
tests := []struct {
name string
issue *types.Issue
wantErr bool
}{
{
name: "valid issue",
issue: &types.Issue{
Title: "Valid",
Status: types.StatusOpen,
Priority: 2,
IssueType: types.TypeTask,
},
wantErr: false,
},
{
name: "missing title",
issue: &types.Issue{
Status: types.StatusOpen,
Priority: 2,
IssueType: types.TypeTask,
},
wantErr: true,
},
{
name: "invalid priority",
issue: &types.Issue{
Title: "Test",
Status: types.StatusOpen,
Priority: 10,
IssueType: types.TypeTask,
},
wantErr: true,
},
{
name: "invalid status",
issue: &types.Issue{
Title: "Test",
Status: "invalid",
Priority: 2,
IssueType: types.TypeTask,
},
wantErr: true,
},
}
for _, tt := range tests {
t.Run(tt.name, func(t *testing.T) {
err := store.CreateIssue(ctx, tt.issue, "test-user")
if (err != nil) != tt.wantErr {
t.Errorf("CreateIssue() error = %v, wantErr %v", err, tt.wantErr)
}
})
}
}
func TestGetIssue(t *testing.T) {
store := setupTestMemory(t)
defer store.Close()
ctx := context.Background()
original := &types.Issue{
Title: "Test issue",
Description: "Description",
Design: "Design notes",
AcceptanceCriteria: "Acceptance",
Notes: "Notes",
Status: types.StatusOpen,
Priority: 1,
IssueType: types.TypeFeature,
Assignee: "alice",
}
err := store.CreateIssue(ctx, original, "test-user")
if err != nil {
t.Fatalf("CreateIssue failed: %v", err)
}
// Retrieve the issue
retrieved, err := store.GetIssue(ctx, original.ID)
if err != nil {
t.Fatalf("GetIssue failed: %v", err)
}
if retrieved == nil {
t.Fatal("GetIssue returned nil")
}
if retrieved.ID != original.ID {
t.Errorf("ID mismatch: got %v, want %v", retrieved.ID, original.ID)
}
if retrieved.Title != original.Title {
t.Errorf("Title mismatch: got %v, want %v", retrieved.Title, original.Title)
}
if retrieved.Description != original.Description {
t.Errorf("Description mismatch: got %v, want %v", retrieved.Description, original.Description)
}
if retrieved.Assignee != original.Assignee {
t.Errorf("Assignee mismatch: got %v, want %v", retrieved.Assignee, original.Assignee)
}
}
func TestGetIssueNotFound(t *testing.T) {
store := setupTestMemory(t)
defer store.Close()
ctx := context.Background()
issue, err := store.GetIssue(ctx, "bd-999")
if err != nil {
t.Fatalf("GetIssue failed: %v", err)
}
if issue != nil {
t.Errorf("Expected nil for non-existent issue, got %v", issue)
}
}
func TestCreateIssues(t *testing.T) {
store := setupTestMemory(t)
defer store.Close()
ctx := context.Background()
tests := []struct {
name string
issues []*types.Issue
wantErr bool
}{
{
name: "empty batch",
issues: []*types.Issue{},
wantErr: false,
},
{
name: "single issue",
issues: []*types.Issue{
{Title: "Single issue", Status: types.StatusOpen, Priority: 1, IssueType: types.TypeTask},
},
wantErr: false,
},
{
name: "multiple issues",
issues: []*types.Issue{
{Title: "Issue 1", Status: types.StatusOpen, Priority: 1, IssueType: types.TypeTask},
{Title: "Issue 2", Status: types.StatusInProgress, Priority: 2, IssueType: types.TypeBug},
{Title: "Issue 3", Status: types.StatusOpen, Priority: 3, IssueType: types.TypeFeature},
},
wantErr: false,
},
{
name: "validation error - missing title",
issues: []*types.Issue{
{Title: "Valid issue", Status: types.StatusOpen, Priority: 1, IssueType: types.TypeTask},
{Title: "", Status: types.StatusOpen, Priority: 1, IssueType: types.TypeTask},
},
wantErr: true,
},
{
name: "duplicate ID within batch error",
issues: []*types.Issue{
{ID: "dup-1", Title: "First", Status: types.StatusOpen, Priority: 1, IssueType: types.TypeTask},
{ID: "dup-1", Title: "Second", Status: types.StatusOpen, Priority: 1, IssueType: types.TypeTask},
},
wantErr: true,
},
}
for _, tt := range tests {
t.Run(tt.name, func(t *testing.T) {
// Create fresh storage for each test
testStore := setupTestMemory(t)
defer testStore.Close()
err := testStore.CreateIssues(ctx, tt.issues, "test-user")
if (err != nil) != tt.wantErr {
t.Errorf("CreateIssues() error = %v, wantErr %v", err, tt.wantErr)
}
if !tt.wantErr && len(tt.issues) > 0 {
// Verify all issues got IDs
for i, issue := range tt.issues {
if issue.ID == "" {
t.Errorf("issue %d: ID should be set", i)
}
if !issue.CreatedAt.After(time.Time{}) {
t.Errorf("issue %d: CreatedAt should be set", i)
}
}
}
})
}
}
func TestUpdateIssue(t *testing.T) {
store := setupTestMemory(t)
defer store.Close()
ctx := context.Background()
// Create an issue
issue := &types.Issue{
Title: "Original",
Status: types.StatusOpen,
Priority: 2,
IssueType: types.TypeTask,
}
if err := store.CreateIssue(ctx, issue, "test-user"); err != nil {
t.Fatalf("CreateIssue failed: %v", err)
}
// Update it
updates := map[string]interface{}{
"title": "Updated",
"priority": 1,
"status": string(types.StatusInProgress),
}
if err := store.UpdateIssue(ctx, issue.ID, updates, "test-user"); err != nil {
t.Fatalf("UpdateIssue failed: %v", err)
}
// Retrieve and verify
updated, err := store.GetIssue(ctx, issue.ID)
if err != nil {
t.Fatalf("GetIssue failed: %v", err)
}
if updated.Title != "Updated" {
t.Errorf("Title not updated: got %v", updated.Title)
}
if updated.Priority != 1 {
t.Errorf("Priority not updated: got %v", updated.Priority)
}
if updated.Status != types.StatusInProgress {
t.Errorf("Status not updated: got %v", updated.Status)
}
}
func TestCloseIssue(t *testing.T) {
store := setupTestMemory(t)
defer store.Close()
ctx := context.Background()
// Create an issue
issue := &types.Issue{
Title: "Test",
Status: types.StatusOpen,
Priority: 1,
IssueType: types.TypeTask,
}
if err := store.CreateIssue(ctx, issue, "test-user"); err != nil {
t.Fatalf("CreateIssue failed: %v", err)
}
// Close it
if err := store.CloseIssue(ctx, issue.ID, "Completed", "test-user"); err != nil {
t.Fatalf("CloseIssue failed: %v", err)
}
// Verify
closed, err := store.GetIssue(ctx, issue.ID)
if err != nil {
t.Fatalf("GetIssue failed: %v", err)
}
if closed.Status != types.StatusClosed {
t.Errorf("Status should be closed, got %v", closed.Status)
}
if closed.ClosedAt == nil {
t.Error("ClosedAt should be set")
}
}
func TestSearchIssues(t *testing.T) {
store := setupTestMemory(t)
defer store.Close()
ctx := context.Background()
// Create test issues
issues := []*types.Issue{
{Title: "Bug fix", Status: types.StatusOpen, Priority: 1, IssueType: types.TypeBug},
{Title: "New feature", Status: types.StatusInProgress, Priority: 2, IssueType: types.TypeFeature},
{Title: "Task", Status: types.StatusOpen, Priority: 3, IssueType: types.TypeTask},
}
for _, issue := range issues {
if err := store.CreateIssue(ctx, issue, "test-user"); err != nil {
t.Fatalf("CreateIssue failed: %v", err)
}
}
tests := []struct {
name string
query string
filter types.IssueFilter
wantSize int
}{
{
name: "all issues",
query: "",
filter: types.IssueFilter{},
wantSize: 3,
},
{
name: "search by title",
query: "feature",
filter: types.IssueFilter{},
wantSize: 1,
},
{
name: "filter by status",
query: "",
filter: types.IssueFilter{Status: func() *types.Status { s := types.StatusOpen; return &s }()},
wantSize: 2,
},
{
name: "filter by priority",
query: "",
filter: types.IssueFilter{Priority: func() *int { p := 1; return &p }()},
wantSize: 1,
},
{
name: "filter by type",
query: "",
filter: types.IssueFilter{IssueType: func() *types.IssueType { t := types.TypeBug; return &t }()},
wantSize: 1,
},
}
for _, tt := range tests {
t.Run(tt.name, func(t *testing.T) {
results, err := store.SearchIssues(ctx, tt.query, tt.filter)
if err != nil {
t.Fatalf("SearchIssues failed: %v", err)
}
if len(results) != tt.wantSize {
t.Errorf("Expected %d results, got %d", tt.wantSize, len(results))
}
})
}
}
func TestDependencies(t *testing.T) {
store := setupTestMemory(t)
defer store.Close()
ctx := context.Background()
// Create two issues
issue1 := &types.Issue{
Title: "Issue 1",
Status: types.StatusOpen,
Priority: 1,
IssueType: types.TypeTask,
}
issue2 := &types.Issue{
Title: "Issue 2",
Status: types.StatusOpen,
Priority: 1,
IssueType: types.TypeTask,
}
if err := store.CreateIssue(ctx, issue1, "test-user"); err != nil {
t.Fatalf("CreateIssue failed: %v", err)
}
if err := store.CreateIssue(ctx, issue2, "test-user"); err != nil {
t.Fatalf("CreateIssue failed: %v", err)
}
// Add dependency
dep := &types.Dependency{
IssueID: issue1.ID,
DependsOnID: issue2.ID,
Type: types.DepBlocks,
}
if err := store.AddDependency(ctx, dep, "test-user"); err != nil {
t.Fatalf("AddDependency failed: %v", err)
}
// Get dependencies
deps, err := store.GetDependencies(ctx, issue1.ID)
if err != nil {
t.Fatalf("GetDependencies failed: %v", err)
}
if len(deps) != 1 {
t.Errorf("Expected 1 dependency, got %d", len(deps))
}
if deps[0].ID != issue2.ID {
t.Errorf("Dependency mismatch: got %v", deps[0].ID)
}
// Get dependents
dependents, err := store.GetDependents(ctx, issue2.ID)
if err != nil {
t.Fatalf("GetDependents failed: %v", err)
}
if len(dependents) != 1 {
t.Errorf("Expected 1 dependent, got %d", len(dependents))
}
// Remove dependency
if err := store.RemoveDependency(ctx, issue1.ID, issue2.ID, "test-user"); err != nil {
t.Fatalf("RemoveDependency failed: %v", err)
}
// Verify removed
deps, err = store.GetDependencies(ctx, issue1.ID)
if err != nil {
t.Fatalf("GetDependencies failed: %v", err)
}
if len(deps) != 0 {
t.Errorf("Expected 0 dependencies after removal, got %d", len(deps))
}
}
func TestLabels(t *testing.T) {
store := setupTestMemory(t)
defer store.Close()
ctx := context.Background()
// Create an issue
issue := &types.Issue{
Title: "Test",
Status: types.StatusOpen,
Priority: 1,
IssueType: types.TypeTask,
}
if err := store.CreateIssue(ctx, issue, "test-user"); err != nil {
t.Fatalf("CreateIssue failed: %v", err)
}
// Add labels
if err := store.AddLabel(ctx, issue.ID, "bug", "test-user"); err != nil {
t.Fatalf("AddLabel failed: %v", err)
}
if err := store.AddLabel(ctx, issue.ID, "critical", "test-user"); err != nil {
t.Fatalf("AddLabel failed: %v", err)
}
// Get labels
labels, err := store.GetLabels(ctx, issue.ID)
if err != nil {
t.Fatalf("GetLabels failed: %v", err)
}
if len(labels) != 2 {
t.Errorf("Expected 2 labels, got %d", len(labels))
}
// Remove label
if err := store.RemoveLabel(ctx, issue.ID, "bug", "test-user"); err != nil {
t.Fatalf("RemoveLabel failed: %v", err)
}
// Verify
labels, err = store.GetLabels(ctx, issue.ID)
if err != nil {
t.Fatalf("GetLabels failed: %v", err)
}
if len(labels) != 1 {
t.Errorf("Expected 1 label after removal, got %d", len(labels))
}
}
func TestComments(t *testing.T) {
store := setupTestMemory(t)
defer store.Close()
ctx := context.Background()
// Create an issue
issue := &types.Issue{
Title: "Test",
Status: types.StatusOpen,
Priority: 1,
IssueType: types.TypeTask,
}
if err := store.CreateIssue(ctx, issue, "test-user"); err != nil {
t.Fatalf("CreateIssue failed: %v", err)
}
// Add comment
comment, err := store.AddIssueComment(ctx, issue.ID, "alice", "First comment")
if err != nil {
t.Fatalf("AddIssueComment failed: %v", err)
}
if comment == nil {
t.Fatal("Comment should not be nil")
}
// Get comments
comments, err := store.GetIssueComments(ctx, issue.ID)
if err != nil {
t.Fatalf("GetIssueComments failed: %v", err)
}
if len(comments) != 1 {
t.Errorf("Expected 1 comment, got %d", len(comments))
}
if comments[0].Text != "First comment" {
t.Errorf("Comment text mismatch: got %v", comments[0].Text)
}
}
func TestLoadFromIssues(t *testing.T) {
store := New("")
defer store.Close()
issues := []*types.Issue{
{
ID: "bd-1",
Title: "Issue 1",
Status: types.StatusOpen,
Priority: 1,
IssueType: types.TypeTask,
Labels: []string{"bug", "critical"},
Dependencies: []*types.Dependency{{IssueID: "bd-1", DependsOnID: "bd-2", Type: types.DepBlocks}},
},
{
ID: "bd-2",
Title: "Issue 2",
Status: types.StatusOpen,
Priority: 1,
IssueType: types.TypeTask,
},
}
if err := store.LoadFromIssues(issues); err != nil {
t.Fatalf("LoadFromIssues failed: %v", err)
}
// Verify issues loaded
ctx := context.Background()
loaded, err := store.GetIssue(ctx, "bd-1")
if err != nil {
t.Fatalf("GetIssue failed: %v", err)
}
if loaded == nil {
t.Fatal("Issue should be loaded")
}
if loaded.Title != "Issue 1" {
t.Errorf("Title mismatch: got %v", loaded.Title)
}
// Verify labels loaded
if len(loaded.Labels) != 2 {
t.Errorf("Expected 2 labels, got %d", len(loaded.Labels))
}
// Verify dependencies loaded
if len(loaded.Dependencies) != 1 {
t.Errorf("Expected 1 dependency, got %d", len(loaded.Dependencies))
}
// Verify counter updated
if store.counters["bd"] != 2 {
t.Errorf("Expected counter bd=2, got %d", store.counters["bd"])
}
}
func TestGetAllIssues(t *testing.T) {
store := setupTestMemory(t)
defer store.Close()
ctx := context.Background()
// Create issues
for i := 1; i <= 3; i++ {
issue := &types.Issue{
Title: "Issue",
Status: types.StatusOpen,
Priority: 1,
IssueType: types.TypeTask,
}
if err := store.CreateIssue(ctx, issue, "test-user"); err != nil {
t.Fatalf("CreateIssue failed: %v", err)
}
}
// Get all
all := store.GetAllIssues()
if len(all) != 3 {
t.Errorf("Expected 3 issues, got %d", len(all))
}
// Verify sorted by ID
for i := 1; i < len(all); i++ {
if all[i-1].ID >= all[i].ID {
t.Error("Issues should be sorted by ID")
}
}
}
func TestDirtyTracking(t *testing.T) {
store := setupTestMemory(t)
defer store.Close()
ctx := context.Background()
// Create an issue
issue := &types.Issue{
Title: "Test",
Status: types.StatusOpen,
Priority: 1,
IssueType: types.TypeTask,
}
if err := store.CreateIssue(ctx, issue, "test-user"); err != nil {
t.Fatalf("CreateIssue failed: %v", err)
}
// Should be dirty
dirty, err := store.GetDirtyIssues(ctx)
if err != nil {
t.Fatalf("GetDirtyIssues failed: %v", err)
}
if len(dirty) != 1 {
t.Errorf("Expected 1 dirty issue, got %d", len(dirty))
}
// Clear dirty
if err := store.ClearDirtyIssues(ctx); err != nil {
t.Fatalf("ClearDirtyIssues failed: %v", err)
}
dirty, err = store.GetDirtyIssues(ctx)
if err != nil {
t.Fatalf("GetDirtyIssues failed: %v", err)
}
if len(dirty) != 0 {
t.Errorf("Expected 0 dirty issues after clear, got %d", len(dirty))
}
}
func TestStatistics(t *testing.T) {
store := setupTestMemory(t)
defer store.Close()
ctx := context.Background()
// Create issues with different statuses
issues := []*types.Issue{
{Title: "Open 1", Status: types.StatusOpen, Priority: 1, IssueType: types.TypeTask},
{Title: "Open 2", Status: types.StatusOpen, Priority: 1, IssueType: types.TypeTask},
{Title: "In Progress", Status: types.StatusInProgress, Priority: 1, IssueType: types.TypeTask},
{Title: "Closed", Status: types.StatusClosed, Priority: 1, IssueType: types.TypeTask, ClosedAt: func() *time.Time { t := time.Now(); return &t }()},
}
for _, issue := range issues {
if err := store.CreateIssue(ctx, issue, "test-user"); err != nil {
t.Fatalf("CreateIssue failed: %v", err)
}
// Close the one marked as closed
if issue.Status == types.StatusClosed {
if err := store.CloseIssue(ctx, issue.ID, "Done", "test-user"); err != nil {
t.Fatalf("CloseIssue failed: %v", err)
}
}
}
stats, err := store.GetStatistics(ctx)
if err != nil {
t.Fatalf("GetStatistics failed: %v", err)
}
if stats.TotalIssues != 4 {
t.Errorf("Expected 4 total issues, got %d", stats.TotalIssues)
}
if stats.OpenIssues != 2 {
t.Errorf("Expected 2 open issues, got %d", stats.OpenIssues)
}
if stats.InProgressIssues != 1 {
t.Errorf("Expected 1 in-progress issue, got %d", stats.InProgressIssues)
}
if stats.ClosedIssues != 1 {
t.Errorf("Expected 1 closed issue, got %d", stats.ClosedIssues)
}
}
func TestConfigOperations(t *testing.T) {
store := setupTestMemory(t)
defer store.Close()
ctx := context.Background()
// Set config
if err := store.SetConfig(ctx, "test_key", "test_value"); err != nil {
t.Fatalf("SetConfig failed: %v", err)
}
// Get config
value, err := store.GetConfig(ctx, "test_key")
if err != nil {
t.Fatalf("GetConfig failed: %v", err)
}
if value != "test_value" {
t.Errorf("Expected test_value, got %v", value)
}
// Get all config
allConfig, err := store.GetAllConfig(ctx)
if err != nil {
t.Fatalf("GetAllConfig failed: %v", err)
}
if len(allConfig) < 1 {
t.Error("Expected at least 1 config entry")
}
// Delete config
if err := store.DeleteConfig(ctx, "test_key"); err != nil {
t.Fatalf("DeleteConfig failed: %v", err)
}
value, err = store.GetConfig(ctx, "test_key")
if err != nil {
t.Fatalf("GetConfig failed: %v", err)
}
if value != "" {
t.Errorf("Expected empty value after delete, got %v", value)
}
}
func TestMetadataOperations(t *testing.T) {
store := setupTestMemory(t)
defer store.Close()
ctx := context.Background()
// Set metadata
if err := store.SetMetadata(ctx, "hash", "abc123"); err != nil {
t.Fatalf("SetMetadata failed: %v", err)
}
// Get metadata
value, err := store.GetMetadata(ctx, "hash")
if err != nil {
t.Fatalf("GetMetadata failed: %v", err)
}
if value != "abc123" {
t.Errorf("Expected abc123, got %v", value)
}
}
func TestSyncAllCounters(t *testing.T) {
store := New("")
defer store.Close()
ctx := context.Background()
// Load issues with different prefixes
issues := []*types.Issue{
{ID: "bd-5", Title: "Test 1", Status: types.StatusOpen, Priority: 1, IssueType: types.TypeTask},
{ID: "bd-10", Title: "Test 2", Status: types.StatusOpen, Priority: 1, IssueType: types.TypeTask},
{ID: "custom-3", Title: "Test 3", Status: types.StatusOpen, Priority: 1, IssueType: types.TypeTask},
}
if err := store.LoadFromIssues(issues); err != nil {
t.Fatalf("LoadFromIssues failed: %v", err)
}
// Manually corrupt counter
store.counters["bd"] = 1
// Sync counters
if err := store.SyncAllCounters(ctx); err != nil {
t.Fatalf("SyncAllCounters failed: %v", err)
}
// Verify corrected
if store.counters["bd"] != 10 {
t.Errorf("Expected bd counter to be 10, got %d", store.counters["bd"])
}
if store.counters["custom"] != 3 {
t.Errorf("Expected custom counter to be 3, got %d", store.counters["custom"])
}
}
func TestThreadSafety(t *testing.T) {
store := setupTestMemory(t)
defer store.Close()
ctx := context.Background()
const numGoroutines = 10
// Run concurrent creates
done := make(chan bool)
for i := 0; i < numGoroutines; i++ {
go func(n int) {
issue := &types.Issue{
Title: "Concurrent",
Status: types.StatusOpen,
Priority: 1,
IssueType: types.TypeTask,
}
store.CreateIssue(ctx, issue, "test-user")
done <- true
}(i)
}
// Wait for all goroutines
for i := 0; i < numGoroutines; i++ {
<-done
}
// Verify all created
stats, err := store.GetStatistics(ctx)
if err != nil {
t.Fatalf("GetStatistics failed: %v", err)
}
if stats.TotalIssues != numGoroutines {
t.Errorf("Expected %d issues, got %d", numGoroutines, stats.TotalIssues)
}
}
func TestClose(t *testing.T) {
store := setupTestMemory(t)
if store.closed {
t.Error("Store should not be closed initially")
}
if err := store.Close(); err != nil {
t.Fatalf("Close failed: %v", err)
}
if !store.closed {
t.Error("Store should be closed")
}
}

View File

@@ -501,7 +501,10 @@ func (s *SQLiteStorage) GetDependencyTree(ctx context.Context, issueID string, m
JOIN dependencies d ON i.id = d.issue_id
JOIN tree t ON d.depends_on_id = t.id
WHERE t.depth < ?
AND t.path NOT LIKE '%' || i.id || '%'
AND t.path != i.id
AND t.path NOT LIKE i.id || '→%'
AND t.path NOT LIKE '%→' || i.id || '→%'
AND t.path NOT LIKE '%→' || i.id
)
SELECT id, title, status, priority, description, design,
acceptance_criteria, notes, issue_type, assignee,
@@ -539,7 +542,10 @@ func (s *SQLiteStorage) GetDependencyTree(ctx context.Context, issueID string, m
JOIN dependencies d ON i.id = d.depends_on_id
JOIN tree t ON d.issue_id = t.id
WHERE t.depth < ?
AND t.path NOT LIKE '%' || i.id || '%'
AND t.path != i.id
AND t.path NOT LIKE i.id || '→%'
AND t.path NOT LIKE '%→' || i.id || '→%'
AND t.path NOT LIKE '%→' || i.id
)
SELECT id, title, status, priority, description, design,
acceptance_criteria, notes, issue_type, assignee,

View File

@@ -800,3 +800,106 @@ func TestGetDependencyTree_Reverse(t *testing.T) {
t.Errorf("Expected depth 2 for %s in reverse tree, got %d", issue3.ID, depthMap[issue3.ID])
}
}
func TestGetDependencyTree_SubstringBug(t *testing.T) {
store, cleanup := setupTestDB(t)
defer cleanup()
ctx := context.Background()
// Create 10 issues so we have both bd-1 and bd-10 (substring issue)
// The bug: when traversing from bd-10, bd-1 gets incorrectly excluded
// because "bd-10" contains "bd-1" as a substring
issues := make([]*types.Issue, 10)
for i := 0; i < 10; i++ {
issues[i] = &types.Issue{
Title: fmt.Sprintf("Issue %d", i+1),
Status: types.StatusOpen,
Priority: 1,
IssueType: types.TypeTask,
}
err := store.CreateIssue(ctx, issues[i], "test-user")
if err != nil {
t.Fatalf("CreateIssue failed: %v", err)
}
}
// Create chain: bd-10 → bd-9 → bd-8 → bd-2 → bd-1
// This tests the substring bug where bd-1 should appear but won't due to substring matching
err := store.AddDependency(ctx, &types.Dependency{
IssueID: issues[9].ID, // bd-10
DependsOnID: issues[8].ID, // bd-9
Type: types.DepBlocks,
}, "test-user")
if err != nil {
t.Fatalf("AddDependency bd-10→bd-9 failed: %v", err)
}
err = store.AddDependency(ctx, &types.Dependency{
IssueID: issues[8].ID, // bd-9
DependsOnID: issues[7].ID, // bd-8
Type: types.DepBlocks,
}, "test-user")
if err != nil {
t.Fatalf("AddDependency bd-9→bd-8 failed: %v", err)
}
err = store.AddDependency(ctx, &types.Dependency{
IssueID: issues[7].ID, // bd-8
DependsOnID: issues[1].ID, // bd-2
Type: types.DepBlocks,
}, "test-user")
if err != nil {
t.Fatalf("AddDependency bd-8→bd-2 failed: %v", err)
}
err = store.AddDependency(ctx, &types.Dependency{
IssueID: issues[1].ID, // bd-2
DependsOnID: issues[0].ID, // bd-1
Type: types.DepBlocks,
}, "test-user")
if err != nil {
t.Fatalf("AddDependency bd-2→bd-1 failed: %v", err)
}
// Get tree starting from bd-10
tree, err := store.GetDependencyTree(ctx, issues[9].ID, 10, false, false)
if err != nil {
t.Fatalf("GetDependencyTree failed: %v", err)
}
// Create map of issue IDs in tree for easy checking
treeIDs := make(map[string]bool)
for _, node := range tree {
treeIDs[node.ID] = true
}
// Verify all issues in the chain appear in the tree
// This is the KEY test: bd-1 should be in the tree
// With the substring bug, bd-1 will be missing because "bd-10" contains "bd-1"
expectedIssues := []int{9, 8, 7, 1, 0} // bd-10, bd-9, bd-8, bd-2, bd-1
for _, idx := range expectedIssues {
if !treeIDs[issues[idx].ID] {
t.Errorf("Expected %s in dependency tree, but it was missing (substring bug)", issues[idx].ID)
}
}
// Verify we have the correct number of nodes
if len(tree) != 5 {
t.Errorf("Expected 5 nodes in tree, got %d. Missing nodes indicate substring bug.", len(tree))
}
// Verify depths are correct
depthMap := make(map[string]int)
for _, node := range tree {
depthMap[node.ID] = node.Depth
}
// Check depths: bd-10(0) → bd-9(1) → bd-8(2) → bd-2(3) → bd-1(4)
if depthMap[issues[9].ID] != 0 {
t.Errorf("Expected bd-10 at depth 0, got %d", depthMap[issues[9].ID])
}
if depthMap[issues[0].ID] != 4 {
t.Errorf("Expected bd-1 at depth 4, got %d", depthMap[issues[0].ID])
}
}

View File

@@ -0,0 +1,165 @@
package sqlite
import (
"context"
"os"
"path/filepath"
"testing"
"github.com/steveyegge/beads/internal/types"
)
func TestPrefixValidation(t *testing.T) {
tmpDir, err := os.MkdirTemp("", "beads-prefix-test-*")
if err != nil {
t.Fatalf("failed to create temp dir: %v", err)
}
defer os.RemoveAll(tmpDir)
dbPath := filepath.Join(tmpDir, "test.db")
store, err := New(dbPath)
if err != nil {
t.Fatalf("failed to create storage: %v", err)
}
defer store.Close()
ctx := context.Background()
// Set prefix to "test"
if err := store.SetConfig(ctx, "issue_prefix", "test"); err != nil {
t.Fatalf("failed to set prefix: %v", err)
}
tests := []struct {
name string
issueID string
wantErr bool
}{
{
name: "valid prefix - matches",
issueID: "test-123",
wantErr: false,
},
{
name: "invalid prefix - wrong prefix",
issueID: "bd-456",
wantErr: true,
},
{
name: "invalid prefix - no dash",
issueID: "test123",
wantErr: true,
},
{
name: "invalid prefix - empty",
issueID: "",
wantErr: false, // Empty ID triggers auto-generation
},
}
for _, tt := range tests {
t.Run(tt.name, func(t *testing.T) {
issue := &types.Issue{
ID: tt.issueID,
Title: "Test issue",
Status: types.StatusOpen,
Priority: 1,
IssueType: types.TypeTask,
}
err := store.CreateIssue(ctx, issue, "test-user")
if (err != nil) != tt.wantErr {
t.Errorf("CreateIssue() error = %v, wantErr %v", err, tt.wantErr)
}
// If we expected success and the ID was empty, verify it was generated with correct prefix
if err == nil && tt.issueID == "" {
if issue.ID == "" {
t.Error("ID should be generated")
}
if issue.ID[:5] != "test-" {
t.Errorf("Generated ID should have prefix 'test-', got %s", issue.ID)
}
}
})
}
}
func TestPrefixValidationBatch(t *testing.T) {
tmpDir, err := os.MkdirTemp("", "beads-prefix-batch-test-*")
if err != nil {
t.Fatalf("failed to create temp dir: %v", err)
}
defer os.RemoveAll(tmpDir)
dbPath := filepath.Join(tmpDir, "test.db")
store, err := New(dbPath)
if err != nil {
t.Fatalf("failed to create storage: %v", err)
}
defer store.Close()
ctx := context.Background()
// Set prefix to "batch"
if err := store.SetConfig(ctx, "issue_prefix", "batch"); err != nil {
t.Fatalf("failed to set prefix: %v", err)
}
tests := []struct {
name string
issues []*types.Issue
wantErr bool
}{
{
name: "all valid prefixes",
issues: []*types.Issue{
{ID: "batch-1", Title: "Test 1", Status: types.StatusOpen, Priority: 1, IssueType: types.TypeTask},
{ID: "batch-2", Title: "Test 2", Status: types.StatusOpen, Priority: 1, IssueType: types.TypeTask},
},
wantErr: false,
},
{
name: "one invalid prefix in batch",
issues: []*types.Issue{
{ID: "batch-10", Title: "Test 1", Status: types.StatusOpen, Priority: 1, IssueType: types.TypeTask},
{ID: "wrong-20", Title: "Test 2", Status: types.StatusOpen, Priority: 1, IssueType: types.TypeTask},
},
wantErr: true,
},
{
name: "mixed auto-generated and explicit",
issues: []*types.Issue{
{ID: "batch-100", Title: "Explicit ID", Status: types.StatusOpen, Priority: 1, IssueType: types.TypeTask},
{ID: "", Title: "Auto ID", Status: types.StatusOpen, Priority: 1, IssueType: types.TypeTask},
},
wantErr: false,
},
{
name: "mixed with invalid prefix",
issues: []*types.Issue{
{ID: "", Title: "Auto ID", Status: types.StatusOpen, Priority: 1, IssueType: types.TypeTask},
{ID: "invalid-500", Title: "Wrong prefix", Status: types.StatusOpen, Priority: 1, IssueType: types.TypeTask},
},
wantErr: true,
},
}
for _, tt := range tests {
t.Run(tt.name, func(t *testing.T) {
err := store.CreateIssues(ctx, tt.issues, "test-user")
if (err != nil) != tt.wantErr {
t.Errorf("CreateIssues() error = %v, wantErr %v", err, tt.wantErr)
}
// For successful batches, verify all IDs have correct prefix
if err == nil {
for i, issue := range tt.issues {
if issue.ID[:6] != "batch-" {
t.Errorf("Issue %d: ID should have prefix 'batch-', got %s", i, issue.ID)
}
}
}
})
}
}

View File

@@ -617,19 +617,19 @@ func (s *SQLiteStorage) CreateIssue(ctx context.Context, issue *types.Issue, act
}
}()
// Get prefix from config (needed for both ID generation and validation)
var prefix string
err = conn.QueryRowContext(ctx, `SELECT value FROM config WHERE key = ?`, "issue_prefix").Scan(&prefix)
if err == sql.ErrNoRows || prefix == "" {
// CRITICAL: Reject operation if issue_prefix config is missing (bd-166)
// This prevents duplicate issues with wrong prefix
return fmt.Errorf("database not initialized: issue_prefix config is missing (run 'bd init --prefix <prefix>' first)")
} else if err != nil {
return fmt.Errorf("failed to get config: %w", err)
}
// Generate ID if not set (inside transaction to prevent race conditions)
if issue.ID == "" {
// Get prefix from config
var prefix string
err := conn.QueryRowContext(ctx, `SELECT value FROM config WHERE key = ?`, "issue_prefix").Scan(&prefix)
if err == sql.ErrNoRows || prefix == "" {
// CRITICAL: Reject operation if issue_prefix config is missing (bd-166)
// This prevents duplicate issues with wrong prefix
return fmt.Errorf("database not initialized: issue_prefix config is missing (run 'bd init --prefix <prefix>' first)")
} else if err != nil {
return fmt.Errorf("failed to get config: %w", err)
}
// Atomically initialize counter (if needed) and get next ID (within transaction)
// This ensures the counter starts from the max existing ID, not 1
// CRITICAL: We rely on BEGIN IMMEDIATE above to serialize this operation across processes
@@ -665,6 +665,13 @@ func (s *SQLiteStorage) CreateIssue(ctx context.Context, issue *types.Issue, act
}
issue.ID = fmt.Sprintf("%s-%d", prefix, nextID)
} else {
// Validate that explicitly provided ID matches the configured prefix (bd-177)
// This prevents wrong-prefix bugs when IDs are manually specified
expectedPrefix := prefix + "-"
if !strings.HasPrefix(issue.ID, expectedPrefix) {
return fmt.Errorf("issue ID '%s' does not match configured prefix '%s'", issue.ID, prefix)
}
}
// Insert issue
@@ -743,19 +750,7 @@ func validateBatchIssues(issues []*types.Issue) error {
// generateBatchIDs generates IDs for all issues that need them atomically
func generateBatchIDs(ctx context.Context, conn *sql.Conn, issues []*types.Issue, dbPath string) error {
// Count how many issues need IDs
needIDCount := 0
for _, issue := range issues {
if issue.ID == "" {
needIDCount++
}
}
if needIDCount == 0 {
return nil
}
// Get prefix from config
// Get prefix from config (needed for both generation and validation)
var prefix string
err := conn.QueryRowContext(ctx, `SELECT value FROM config WHERE key = ?`, "issue_prefix").Scan(&prefix)
if err == sql.ErrNoRows || prefix == "" {
@@ -765,6 +760,24 @@ func generateBatchIDs(ctx context.Context, conn *sql.Conn, issues []*types.Issue
return fmt.Errorf("failed to get config: %w", err)
}
// Count how many issues need IDs and validate explicitly provided IDs
needIDCount := 0
expectedPrefix := prefix + "-"
for _, issue := range issues {
if issue.ID == "" {
needIDCount++
} else {
// Validate that explicitly provided ID matches the configured prefix (bd-177)
if !strings.HasPrefix(issue.ID, expectedPrefix) {
return fmt.Errorf("issue ID '%s' does not match configured prefix '%s'", issue.ID, prefix)
}
}
}
if needIDCount == 0 {
return nil
}
// Atomically reserve ID range
var nextID int
err = conn.QueryRowContext(ctx, `