bd sync: 2025-12-23 13:49:07
This commit is contained in:
File diff suppressed because one or more lines are too long
239
cmd/bd/doctor.go
239
cmd/bd/doctor.go
@@ -7,6 +7,7 @@ import (
|
||||
"fmt"
|
||||
"os"
|
||||
"path/filepath"
|
||||
"slices"
|
||||
"strings"
|
||||
"time"
|
||||
|
||||
@@ -52,7 +53,6 @@ var (
|
||||
doctorInteractive bool // bd-3xl: per-fix confirmation mode
|
||||
doctorDryRun bool // bd-a5z: preview fixes without applying
|
||||
doctorOutput string // bd-9cc: export diagnostics to file
|
||||
doctorVerbose bool // bd-4qfb: show all checks including passed
|
||||
perfMode bool
|
||||
checkHealthMode bool
|
||||
)
|
||||
@@ -422,10 +422,6 @@ func applyFixList(path string, fixes []doctorCheck) {
|
||||
// No auto-fix: compaction requires agent review
|
||||
fmt.Printf(" ⚠ Run 'bd compact --analyze' to review candidates\n")
|
||||
continue
|
||||
case "Large Database":
|
||||
// No auto-fix: pruning deletes data, must be user-controlled
|
||||
fmt.Printf(" ⚠ Run 'bd cleanup --older-than 90' to prune old closed issues\n")
|
||||
continue
|
||||
default:
|
||||
fmt.Printf(" ⚠ No automatic fix available for %s\n", check.Name)
|
||||
fmt.Printf(" Manual fix: %s\n", check.Fix)
|
||||
@@ -821,12 +817,6 @@ func runDiagnostics(path string) doctorResult {
|
||||
result.Checks = append(result.Checks, compactionCheck)
|
||||
// Info only, not a warning - compaction requires human review
|
||||
|
||||
// Check 29: Database size (pruning suggestion)
|
||||
// Note: This check has no auto-fix - pruning is destructive and user-controlled
|
||||
sizeCheck := convertDoctorCheck(doctor.CheckDatabaseSize(path))
|
||||
result.Checks = append(result.Checks, sizeCheck)
|
||||
// Don't fail overall check for size warning, just inform
|
||||
|
||||
return result
|
||||
}
|
||||
|
||||
@@ -868,118 +858,136 @@ func exportDiagnostics(result doctorResult, outputPath string) error {
|
||||
}
|
||||
|
||||
func printDiagnostics(result doctorResult) {
|
||||
// Count checks by status and collect into categories
|
||||
var passCount, warnCount, failCount int
|
||||
var errors, warnings []doctorCheck
|
||||
passedByCategory := make(map[string][]doctorCheck)
|
||||
|
||||
for _, check := range result.Checks {
|
||||
switch check.Status {
|
||||
case statusOK:
|
||||
passCount++
|
||||
cat := check.Category
|
||||
if cat == "" {
|
||||
cat = "Other"
|
||||
}
|
||||
passedByCategory[cat] = append(passedByCategory[cat], check)
|
||||
case statusWarning:
|
||||
warnCount++
|
||||
warnings = append(warnings, check)
|
||||
case statusError:
|
||||
failCount++
|
||||
errors = append(errors, check)
|
||||
}
|
||||
}
|
||||
|
||||
// Print header with version and summary at TOP
|
||||
// Print header with version
|
||||
fmt.Printf("\nbd doctor v%s\n\n", result.CLIVersion)
|
||||
fmt.Printf("Summary: %d checks passed, %d warnings, %d errors\n", passCount, warnCount, failCount)
|
||||
|
||||
// Print errors section (always shown if any)
|
||||
if failCount > 0 {
|
||||
fmt.Println()
|
||||
fmt.Println(ui.RenderSeparator())
|
||||
fmt.Printf("%s Errors (%d)\n", ui.RenderFailIcon(), failCount)
|
||||
fmt.Println(ui.RenderSeparator())
|
||||
fmt.Println()
|
||||
// Group checks by category
|
||||
checksByCategory := make(map[string][]doctorCheck)
|
||||
for _, check := range result.Checks {
|
||||
cat := check.Category
|
||||
if cat == "" {
|
||||
cat = "Other"
|
||||
}
|
||||
checksByCategory[cat] = append(checksByCategory[cat], check)
|
||||
}
|
||||
|
||||
for _, check := range errors {
|
||||
fmt.Printf("[%s] %s\n", check.Name, check.Message)
|
||||
// Track counts
|
||||
var passCount, warnCount, failCount int
|
||||
var warnings []doctorCheck
|
||||
|
||||
// Print checks by category in defined order
|
||||
for _, category := range doctor.CategoryOrder {
|
||||
checks, exists := checksByCategory[category]
|
||||
if !exists || len(checks) == 0 {
|
||||
continue
|
||||
}
|
||||
|
||||
// Print category header
|
||||
fmt.Println(ui.RenderCategory(category))
|
||||
|
||||
// Print each check in this category
|
||||
for _, check := range checks {
|
||||
// Determine status icon
|
||||
var statusIcon string
|
||||
switch check.Status {
|
||||
case statusOK:
|
||||
statusIcon = ui.RenderPassIcon()
|
||||
passCount++
|
||||
case statusWarning:
|
||||
statusIcon = ui.RenderWarnIcon()
|
||||
warnCount++
|
||||
warnings = append(warnings, check)
|
||||
case statusError:
|
||||
statusIcon = ui.RenderFailIcon()
|
||||
failCount++
|
||||
warnings = append(warnings, check)
|
||||
}
|
||||
|
||||
// Print check line: icon + name + message
|
||||
fmt.Printf(" %s %s", statusIcon, check.Name)
|
||||
if check.Message != "" {
|
||||
fmt.Printf("%s", ui.RenderMuted(" "+check.Message))
|
||||
}
|
||||
fmt.Println()
|
||||
|
||||
// Print detail if present (indented)
|
||||
if check.Detail != "" {
|
||||
fmt.Printf(" %s\n", check.Detail)
|
||||
fmt.Printf(" %s%s\n", ui.MutedStyle.Render(ui.TreeLast), ui.RenderMuted(check.Detail))
|
||||
}
|
||||
}
|
||||
fmt.Println()
|
||||
}
|
||||
|
||||
// Print any checks without a category
|
||||
if otherChecks, exists := checksByCategory["Other"]; exists && len(otherChecks) > 0 {
|
||||
fmt.Println(ui.RenderCategory("Other"))
|
||||
for _, check := range otherChecks {
|
||||
var statusIcon string
|
||||
switch check.Status {
|
||||
case statusOK:
|
||||
statusIcon = ui.RenderPassIcon()
|
||||
passCount++
|
||||
case statusWarning:
|
||||
statusIcon = ui.RenderWarnIcon()
|
||||
warnCount++
|
||||
warnings = append(warnings, check)
|
||||
case statusError:
|
||||
statusIcon = ui.RenderFailIcon()
|
||||
failCount++
|
||||
warnings = append(warnings, check)
|
||||
}
|
||||
fmt.Printf(" %s %s", statusIcon, check.Name)
|
||||
if check.Message != "" {
|
||||
fmt.Printf("%s", ui.RenderMuted(" "+check.Message))
|
||||
}
|
||||
fmt.Println()
|
||||
if check.Detail != "" {
|
||||
fmt.Printf(" %s%s\n", ui.MutedStyle.Render(ui.TreeLast), ui.RenderMuted(check.Detail))
|
||||
}
|
||||
}
|
||||
fmt.Println()
|
||||
}
|
||||
|
||||
// Print summary line
|
||||
fmt.Println(ui.RenderSeparator())
|
||||
summary := fmt.Sprintf("%s %d passed %s %d warnings %s %d failed",
|
||||
ui.RenderPassIcon(), passCount,
|
||||
ui.RenderWarnIcon(), warnCount,
|
||||
ui.RenderFailIcon(), failCount,
|
||||
)
|
||||
fmt.Println(summary)
|
||||
|
||||
// Print warnings/errors section with fixes
|
||||
if len(warnings) > 0 {
|
||||
fmt.Println()
|
||||
fmt.Println(ui.RenderWarn(ui.IconWarn + " WARNINGS"))
|
||||
|
||||
// Sort by severity: errors first, then warnings
|
||||
slices.SortStableFunc(warnings, func(a, b doctorCheck) int {
|
||||
// Errors (statusError) come before warnings (statusWarning)
|
||||
if a.Status == statusError && b.Status != statusError {
|
||||
return -1
|
||||
}
|
||||
if a.Status != statusError && b.Status == statusError {
|
||||
return 1
|
||||
}
|
||||
return 0 // maintain original order within same severity
|
||||
})
|
||||
|
||||
for i, check := range warnings {
|
||||
// Show numbered items with icon and color based on status
|
||||
// Errors get entire line in red, warnings just the number in yellow
|
||||
line := fmt.Sprintf("%s: %s", check.Name, check.Message)
|
||||
if check.Status == statusError {
|
||||
fmt.Printf(" %s %s %s\n", ui.RenderFailIcon(), ui.RenderFail(fmt.Sprintf("%d.", i+1)), ui.RenderFail(line))
|
||||
} else {
|
||||
fmt.Printf(" %s %s %s\n", ui.RenderWarnIcon(), ui.RenderWarn(fmt.Sprintf("%d.", i+1)), line)
|
||||
}
|
||||
if check.Fix != "" {
|
||||
fmt.Printf(" Fix: %s\n", check.Fix)
|
||||
fmt.Printf(" %s%s\n", ui.MutedStyle.Render(ui.TreeLast), check.Fix)
|
||||
}
|
||||
fmt.Println()
|
||||
}
|
||||
}
|
||||
|
||||
// Print warnings section (always shown if any)
|
||||
if warnCount > 0 {
|
||||
fmt.Println(ui.RenderSeparator())
|
||||
fmt.Printf("%s Warnings (%d)\n", ui.RenderWarnIcon(), warnCount)
|
||||
fmt.Println(ui.RenderSeparator())
|
||||
fmt.Println()
|
||||
|
||||
for _, check := range warnings {
|
||||
fmt.Printf("[%s] %s\n", check.Name, check.Message)
|
||||
if check.Detail != "" {
|
||||
fmt.Printf(" %s\n", check.Detail)
|
||||
}
|
||||
if check.Fix != "" {
|
||||
fmt.Printf(" Fix: %s\n", check.Fix)
|
||||
}
|
||||
fmt.Println()
|
||||
}
|
||||
}
|
||||
|
||||
// Print passed section
|
||||
if passCount > 0 {
|
||||
fmt.Println(ui.RenderSeparator())
|
||||
if doctorVerbose {
|
||||
// Verbose mode: show all passed checks grouped by category
|
||||
fmt.Printf("%s Passed (%d)\n", ui.RenderPassIcon(), passCount)
|
||||
fmt.Println(ui.RenderSeparator())
|
||||
fmt.Println()
|
||||
|
||||
for _, category := range doctor.CategoryOrder {
|
||||
checks, exists := passedByCategory[category]
|
||||
if !exists || len(checks) == 0 {
|
||||
continue
|
||||
}
|
||||
fmt.Printf(" %s\n", category)
|
||||
for _, check := range checks {
|
||||
fmt.Printf(" %s %s", ui.RenderPassIcon(), check.Name)
|
||||
if check.Message != "" {
|
||||
fmt.Printf(" %s", ui.RenderMuted(check.Message))
|
||||
}
|
||||
fmt.Println()
|
||||
}
|
||||
fmt.Println()
|
||||
}
|
||||
|
||||
// Print "Other" category if exists
|
||||
if otherChecks, exists := passedByCategory["Other"]; exists && len(otherChecks) > 0 {
|
||||
fmt.Printf(" %s\n", "Other")
|
||||
for _, check := range otherChecks {
|
||||
fmt.Printf(" %s %s", ui.RenderPassIcon(), check.Name)
|
||||
if check.Message != "" {
|
||||
fmt.Printf(" %s", ui.RenderMuted(check.Message))
|
||||
}
|
||||
fmt.Println()
|
||||
}
|
||||
fmt.Println()
|
||||
}
|
||||
} else {
|
||||
// Default mode: collapsed summary
|
||||
fmt.Printf("%s Passed (%d) %s\n", ui.RenderPassIcon(), passCount, ui.RenderMuted("[use --verbose to show details]"))
|
||||
fmt.Println(ui.RenderSeparator())
|
||||
}
|
||||
}
|
||||
|
||||
// Final status message
|
||||
if failCount == 0 && warnCount == 0 {
|
||||
} else {
|
||||
fmt.Println()
|
||||
fmt.Printf("%s\n", ui.RenderPass("✓ All checks passed"))
|
||||
}
|
||||
@@ -990,5 +998,4 @@ func init() {
|
||||
doctorCmd.Flags().BoolVar(&perfMode, "perf", false, "Run performance diagnostics and generate CPU profile")
|
||||
doctorCmd.Flags().BoolVar(&checkHealthMode, "check-health", false, "Quick health check for git hooks (silent on success)")
|
||||
doctorCmd.Flags().StringVarP(&doctorOutput, "output", "o", "", "Export diagnostics to JSON file (bd-9cc)")
|
||||
doctorCmd.Flags().BoolVarP(&doctorVerbose, "verbose", "v", false, "Show all checks including passed (bd-4qfb)")
|
||||
}
|
||||
|
||||
@@ -620,92 +620,3 @@ func isNoDbModeConfigured(beadsDir string) bool {
|
||||
|
||||
return cfg.NoDb
|
||||
}
|
||||
|
||||
// CheckDatabaseSize warns when the database has accumulated many closed issues.
|
||||
// This is purely informational - pruning is NEVER auto-fixed because it
|
||||
// permanently deletes data. Users must explicitly run 'bd cleanup' to prune.
|
||||
//
|
||||
// Config: doctor.suggest_pruning_issue_count (default: 5000, 0 = disabled)
|
||||
//
|
||||
// DESIGN NOTE: This check intentionally has NO auto-fix. Unlike other doctor
|
||||
// checks that fix configuration or sync issues, pruning is destructive and
|
||||
// irreversible. The user must make an explicit decision to delete their
|
||||
// closed issue history. We only provide guidance, never action.
|
||||
func CheckDatabaseSize(path string) DoctorCheck {
|
||||
beadsDir := filepath.Join(path, ".beads")
|
||||
|
||||
// Get database path
|
||||
var dbPath string
|
||||
if cfg, err := configfile.Load(beadsDir); err == nil && cfg != nil && cfg.Database != "" {
|
||||
dbPath = cfg.DatabasePath(beadsDir)
|
||||
} else {
|
||||
dbPath = filepath.Join(beadsDir, beads.CanonicalDatabaseName)
|
||||
}
|
||||
|
||||
// If no database, skip this check
|
||||
if _, err := os.Stat(dbPath); os.IsNotExist(err) {
|
||||
return DoctorCheck{
|
||||
Name: "Large Database",
|
||||
Status: StatusOK,
|
||||
Message: "N/A (no database)",
|
||||
}
|
||||
}
|
||||
|
||||
// Read threshold from config (default 5000, 0 = disabled)
|
||||
threshold := 5000
|
||||
db, err := sql.Open("sqlite3", "file:"+dbPath+"?mode=ro&_pragma=busy_timeout(30000)")
|
||||
if err != nil {
|
||||
return DoctorCheck{
|
||||
Name: "Large Database",
|
||||
Status: StatusOK,
|
||||
Message: "N/A (unable to open database)",
|
||||
}
|
||||
}
|
||||
defer db.Close()
|
||||
|
||||
// Check for custom threshold in config table
|
||||
var thresholdStr string
|
||||
err = db.QueryRow("SELECT value FROM config WHERE key = ?", "doctor.suggest_pruning_issue_count").Scan(&thresholdStr)
|
||||
if err == nil {
|
||||
if _, err := fmt.Sscanf(thresholdStr, "%d", &threshold); err != nil {
|
||||
threshold = 5000 // Reset to default on parse error
|
||||
}
|
||||
}
|
||||
|
||||
// If disabled, return OK
|
||||
if threshold == 0 {
|
||||
return DoctorCheck{
|
||||
Name: "Large Database",
|
||||
Status: StatusOK,
|
||||
Message: "Check disabled (threshold = 0)",
|
||||
}
|
||||
}
|
||||
|
||||
// Count closed issues
|
||||
var closedCount int
|
||||
err = db.QueryRow("SELECT COUNT(*) FROM issues WHERE status = 'closed'").Scan(&closedCount)
|
||||
if err != nil {
|
||||
return DoctorCheck{
|
||||
Name: "Large Database",
|
||||
Status: StatusOK,
|
||||
Message: "N/A (unable to count issues)",
|
||||
}
|
||||
}
|
||||
|
||||
// Check against threshold
|
||||
if closedCount > threshold {
|
||||
return DoctorCheck{
|
||||
Name: "Large Database",
|
||||
Status: StatusWarning,
|
||||
Message: fmt.Sprintf("%d closed issues (threshold: %d)", closedCount, threshold),
|
||||
Detail: "Large number of closed issues may impact performance",
|
||||
Fix: "Consider running 'bd cleanup --older-than 90' to prune old closed issues",
|
||||
}
|
||||
}
|
||||
|
||||
return DoctorCheck{
|
||||
Name: "Large Database",
|
||||
Status: StatusOK,
|
||||
Message: fmt.Sprintf("%d closed issues (threshold: %d)", closedCount, threshold),
|
||||
}
|
||||
}
|
||||
|
||||
@@ -145,8 +145,6 @@ func CheckSyncBranchHookCompatibility(path string) DoctorCheck {
|
||||
Status: StatusWarning,
|
||||
Message: "Pre-push hook is not a bd hook",
|
||||
Detail: "Cannot verify sync-branch compatibility with custom hooks",
|
||||
Fix: "Either run 'bd hooks install --force' to use bd hooks,\n" +
|
||||
" or ensure your custom hook skips validation when pushing to sync-branch",
|
||||
}
|
||||
}
|
||||
|
||||
|
||||
@@ -188,7 +188,7 @@ func CheckLegacyJSONLFilename(repoPath string) DoctorCheck {
|
||||
Detail: "Having multiple JSONL files can cause sync and merge conflicts.\n" +
|
||||
" Only one JSONL file should be used per repository.",
|
||||
Fix: "Determine which file is current and remove the others:\n" +
|
||||
" 1. Check .beads/metadata.json for 'jsonl_export' setting\n" +
|
||||
" 1. Check 'bd stats' to see which file is being used\n" +
|
||||
" 2. Verify with 'git log .beads/*.jsonl' to see commit history\n" +
|
||||
" 3. Remove the unused file(s): git rm .beads/<unused>.jsonl\n" +
|
||||
" 4. Commit the change",
|
||||
|
||||
104
cmd/bd/search.go
104
cmd/bd/search.go
@@ -26,14 +26,9 @@ Examples:
|
||||
bd search "database" --label backend --limit 10
|
||||
bd search --query "performance" --assignee alice
|
||||
bd search "bd-5q" # Search by partial ID
|
||||
bd search "security" --priority 1 # Exact priority match
|
||||
bd search "security" --priority-min 0 --priority-max 2 # Priority range
|
||||
bd search "security" --priority-min 0 --priority-max 2
|
||||
bd search "bug" --created-after 2025-01-01
|
||||
bd search "refactor" --updated-after 2025-01-01 --priority-min 1
|
||||
bd search "bug" --desc-contains "authentication" # Search in description
|
||||
bd search "" --empty-description # Issues without description
|
||||
bd search "" --no-assignee # Unassigned issues
|
||||
bd search "" --no-labels # Issues without labels
|
||||
bd search "bug" --sort priority
|
||||
bd search "task" --sort created --reverse`,
|
||||
Run: func(cmd *cobra.Command, args []string) {
|
||||
@@ -46,31 +41,9 @@ Examples:
|
||||
query = queryFlag
|
||||
}
|
||||
|
||||
// Check if any filter flags are set (allows empty query with filters)
|
||||
hasFilters := cmd.Flags().Changed("status") ||
|
||||
cmd.Flags().Changed("priority") ||
|
||||
cmd.Flags().Changed("assignee") ||
|
||||
cmd.Flags().Changed("type") ||
|
||||
cmd.Flags().Changed("label") ||
|
||||
cmd.Flags().Changed("label-any") ||
|
||||
cmd.Flags().Changed("created-after") ||
|
||||
cmd.Flags().Changed("created-before") ||
|
||||
cmd.Flags().Changed("updated-after") ||
|
||||
cmd.Flags().Changed("updated-before") ||
|
||||
cmd.Flags().Changed("closed-after") ||
|
||||
cmd.Flags().Changed("closed-before") ||
|
||||
cmd.Flags().Changed("priority-min") ||
|
||||
cmd.Flags().Changed("priority-max") ||
|
||||
cmd.Flags().Changed("title-contains") ||
|
||||
cmd.Flags().Changed("desc-contains") ||
|
||||
cmd.Flags().Changed("notes-contains") ||
|
||||
cmd.Flags().Changed("empty-description") ||
|
||||
cmd.Flags().Changed("no-assignee") ||
|
||||
cmd.Flags().Changed("no-labels")
|
||||
|
||||
// If no query and no filters provided, show help
|
||||
if query == "" && !hasFilters {
|
||||
fmt.Fprintf(os.Stderr, "Error: search query or filter is required\n")
|
||||
// If no query provided, show help
|
||||
if query == "" {
|
||||
fmt.Fprintf(os.Stderr, "Error: search query is required\n")
|
||||
if err := cmd.Help(); err != nil {
|
||||
fmt.Fprintf(os.Stderr, "Error displaying help: %v\n", err)
|
||||
}
|
||||
@@ -88,11 +61,6 @@ Examples:
|
||||
sortBy, _ := cmd.Flags().GetString("sort")
|
||||
reverse, _ := cmd.Flags().GetBool("reverse")
|
||||
|
||||
// Pattern matching flags
|
||||
titleContains, _ := cmd.Flags().GetString("title-contains")
|
||||
descContains, _ := cmd.Flags().GetString("desc-contains")
|
||||
notesContains, _ := cmd.Flags().GetString("notes-contains")
|
||||
|
||||
// Date range flags
|
||||
createdAfter, _ := cmd.Flags().GetString("created-after")
|
||||
createdBefore, _ := cmd.Flags().GetString("created-before")
|
||||
@@ -101,11 +69,6 @@ Examples:
|
||||
closedAfter, _ := cmd.Flags().GetString("closed-after")
|
||||
closedBefore, _ := cmd.Flags().GetString("closed-before")
|
||||
|
||||
// Empty/null check flags
|
||||
emptyDesc, _ := cmd.Flags().GetBool("empty-description")
|
||||
noAssignee, _ := cmd.Flags().GetBool("no-assignee")
|
||||
noLabels, _ := cmd.Flags().GetBool("no-labels")
|
||||
|
||||
// Priority range flags
|
||||
priorityMinStr, _ := cmd.Flags().GetString("priority-min")
|
||||
priorityMaxStr, _ := cmd.Flags().GetString("priority-max")
|
||||
@@ -141,39 +104,6 @@ Examples:
|
||||
filter.LabelsAny = labelsAny
|
||||
}
|
||||
|
||||
// Exact priority match (use Changed() to properly handle P0)
|
||||
if cmd.Flags().Changed("priority") {
|
||||
priorityStr, _ := cmd.Flags().GetString("priority")
|
||||
priority, err := validation.ValidatePriority(priorityStr)
|
||||
if err != nil {
|
||||
fmt.Fprintf(os.Stderr, "Error: %v\n", err)
|
||||
os.Exit(1)
|
||||
}
|
||||
filter.Priority = &priority
|
||||
}
|
||||
|
||||
// Pattern matching
|
||||
if titleContains != "" {
|
||||
filter.TitleContains = titleContains
|
||||
}
|
||||
if descContains != "" {
|
||||
filter.DescriptionContains = descContains
|
||||
}
|
||||
if notesContains != "" {
|
||||
filter.NotesContains = notesContains
|
||||
}
|
||||
|
||||
// Empty/null checks
|
||||
if emptyDesc {
|
||||
filter.EmptyDescription = true
|
||||
}
|
||||
if noAssignee {
|
||||
filter.NoAssignee = true
|
||||
}
|
||||
if noLabels {
|
||||
filter.NoLabels = true
|
||||
}
|
||||
|
||||
// Date ranges
|
||||
if createdAfter != "" {
|
||||
t, err := parseTimeFlag(createdAfter)
|
||||
@@ -270,21 +200,6 @@ Examples:
|
||||
listArgs.LabelsAny = labelsAny
|
||||
}
|
||||
|
||||
// Exact priority match
|
||||
if filter.Priority != nil {
|
||||
listArgs.Priority = filter.Priority
|
||||
}
|
||||
|
||||
// Pattern matching
|
||||
listArgs.TitleContains = titleContains
|
||||
listArgs.DescriptionContains = descContains
|
||||
listArgs.NotesContains = notesContains
|
||||
|
||||
// Empty/null checks
|
||||
listArgs.EmptyDescription = filter.EmptyDescription
|
||||
listArgs.NoAssignee = filter.NoAssignee
|
||||
listArgs.NoLabels = filter.NoLabels
|
||||
|
||||
// Date ranges
|
||||
if filter.CreatedAfter != nil {
|
||||
listArgs.CreatedAfter = filter.CreatedAfter.Format(time.RFC3339)
|
||||
@@ -457,7 +372,6 @@ func outputSearchResults(issues []*types.Issue, query string, longFormat bool) {
|
||||
func init() {
|
||||
searchCmd.Flags().String("query", "", "Search query (alternative to positional argument)")
|
||||
searchCmd.Flags().StringP("status", "s", "", "Filter by status (open, in_progress, blocked, deferred, closed)")
|
||||
registerPriorityFlag(searchCmd, "")
|
||||
searchCmd.Flags().StringP("assignee", "a", "", "Filter by assignee")
|
||||
searchCmd.Flags().StringP("type", "t", "", "Filter by type (bug, feature, task, epic, chore, merge-request, molecule, gate)")
|
||||
searchCmd.Flags().StringSliceP("label", "l", []string{}, "Filter by labels (AND: must have ALL)")
|
||||
@@ -467,11 +381,6 @@ func init() {
|
||||
searchCmd.Flags().String("sort", "", "Sort by field: priority, created, updated, closed, status, id, title, type, assignee")
|
||||
searchCmd.Flags().BoolP("reverse", "r", false, "Reverse sort order")
|
||||
|
||||
// Pattern matching flags
|
||||
searchCmd.Flags().String("title-contains", "", "Filter by title substring (case-insensitive)")
|
||||
searchCmd.Flags().String("desc-contains", "", "Filter by description substring (case-insensitive)")
|
||||
searchCmd.Flags().String("notes-contains", "", "Filter by notes substring (case-insensitive)")
|
||||
|
||||
// Date range flags
|
||||
searchCmd.Flags().String("created-after", "", "Filter issues created after date (YYYY-MM-DD or RFC3339)")
|
||||
searchCmd.Flags().String("created-before", "", "Filter issues created before date (YYYY-MM-DD or RFC3339)")
|
||||
@@ -480,11 +389,6 @@ func init() {
|
||||
searchCmd.Flags().String("closed-after", "", "Filter issues closed after date (YYYY-MM-DD or RFC3339)")
|
||||
searchCmd.Flags().String("closed-before", "", "Filter issues closed before date (YYYY-MM-DD or RFC3339)")
|
||||
|
||||
// Empty/null check flags
|
||||
searchCmd.Flags().Bool("empty-description", false, "Filter issues with empty or missing description")
|
||||
searchCmd.Flags().Bool("no-assignee", false, "Filter issues with no assignee")
|
||||
searchCmd.Flags().Bool("no-labels", false, "Filter issues with no labels")
|
||||
|
||||
// Priority range flags
|
||||
searchCmd.Flags().String("priority-min", "", "Filter by minimum priority (inclusive, 0-4 or P0-P4)")
|
||||
searchCmd.Flags().String("priority-max", "", "Filter by maximum priority (inclusive, 0-4 or P0-P4)")
|
||||
|
||||
@@ -972,10 +972,6 @@ var closeCmd = &cobra.Command{
|
||||
Run: func(cmd *cobra.Command, args []string) {
|
||||
CheckReadonly("close")
|
||||
reason, _ := cmd.Flags().GetString("reason")
|
||||
// Check --resolution alias if --reason not provided
|
||||
if reason == "" {
|
||||
reason, _ = cmd.Flags().GetString("resolution")
|
||||
}
|
||||
if reason == "" {
|
||||
reason = "Closed"
|
||||
}
|
||||
@@ -1057,8 +1053,6 @@ var closeCmd = &cobra.Command{
|
||||
if hookRunner != nil {
|
||||
hookRunner.Run(hooks.EventClose, &issue)
|
||||
}
|
||||
// Run config-based close hooks (bd-g4b4)
|
||||
hooks.RunConfigCloseHooks(ctx, &issue)
|
||||
if jsonOutput {
|
||||
closedIssues = append(closedIssues, &issue)
|
||||
}
|
||||
@@ -1111,12 +1105,8 @@ var closeCmd = &cobra.Command{
|
||||
|
||||
// Run close hook (bd-kwro.8)
|
||||
closedIssue, _ := store.GetIssue(ctx, id)
|
||||
if closedIssue != nil {
|
||||
if hookRunner != nil {
|
||||
hookRunner.Run(hooks.EventClose, closedIssue)
|
||||
}
|
||||
// Run config-based close hooks (bd-g4b4)
|
||||
hooks.RunConfigCloseHooks(ctx, closedIssue)
|
||||
if closedIssue != nil && hookRunner != nil {
|
||||
hookRunner.Run(hooks.EventClose, closedIssue)
|
||||
}
|
||||
|
||||
if jsonOutput {
|
||||
@@ -1421,8 +1411,6 @@ func init() {
|
||||
rootCmd.AddCommand(editCmd)
|
||||
|
||||
closeCmd.Flags().StringP("reason", "r", "", "Reason for closing")
|
||||
closeCmd.Flags().String("resolution", "", "Alias for --reason (Jira CLI convention)")
|
||||
_ = closeCmd.Flags().MarkHidden("resolution") // Hidden alias for agent/CLI ergonomics
|
||||
closeCmd.Flags().Bool("json", false, "Output JSON format")
|
||||
closeCmd.Flags().BoolP("force", "f", false, "Force close pinned issues")
|
||||
closeCmd.Flags().Bool("continue", false, "Auto-advance to next step in molecule")
|
||||
|
||||
16
cmd/bd/testdata/close_resolution_alias.txt
vendored
16
cmd/bd/testdata/close_resolution_alias.txt
vendored
@@ -1,16 +0,0 @@
|
||||
# Test bd close --resolution alias (GH#721)
|
||||
# Jira CLI convention: --resolution instead of --reason
|
||||
bd init --prefix test
|
||||
|
||||
# Create issue
|
||||
bd create 'Issue to close with resolution'
|
||||
cp stdout issue.txt
|
||||
exec sh -c 'grep -oE "test-[a-z0-9]+" issue.txt > issue_id.txt'
|
||||
|
||||
# Close using --resolution alias
|
||||
exec sh -c 'bd close $(cat issue_id.txt) --resolution "Fixed via resolution alias"'
|
||||
stdout 'Closed test-'
|
||||
|
||||
# Verify close_reason is set correctly
|
||||
exec sh -c 'bd show $(cat issue_id.txt) --json'
|
||||
stdout 'Fixed via resolution alias'
|
||||
@@ -104,73 +104,6 @@ external_projects:
|
||||
gastown: /path/to/gastown
|
||||
```
|
||||
|
||||
### Hooks Configuration
|
||||
|
||||
bd supports config-based hooks for automation and notifications. Currently, close hooks are implemented.
|
||||
|
||||
#### Close Hooks
|
||||
|
||||
Close hooks run after an issue is successfully closed via `bd close`. They execute synchronously but failures are logged as warnings and don't block the close operation.
|
||||
|
||||
**Configuration:**
|
||||
|
||||
```yaml
|
||||
# .beads/config.yaml
|
||||
hooks:
|
||||
on_close:
|
||||
- name: show-next
|
||||
command: bd ready --limit 1
|
||||
- name: context-check
|
||||
command: echo "Issue $BEAD_ID closed. Check context if nearing limit."
|
||||
- command: notify-team.sh # name is optional
|
||||
```
|
||||
|
||||
**Environment Variables:**
|
||||
|
||||
Hook commands receive issue data via environment variables:
|
||||
|
||||
| Variable | Description |
|
||||
|----------|-------------|
|
||||
| `BEAD_ID` | Issue ID (e.g., `bd-abc1`) |
|
||||
| `BEAD_TITLE` | Issue title |
|
||||
| `BEAD_TYPE` | Issue type (`task`, `bug`, `feature`, etc.) |
|
||||
| `BEAD_PRIORITY` | Priority (0-4) |
|
||||
| `BEAD_CLOSE_REASON` | Close reason if provided |
|
||||
|
||||
**Example Use Cases:**
|
||||
|
||||
1. **Show next work item:**
|
||||
```yaml
|
||||
hooks:
|
||||
on_close:
|
||||
- name: next-task
|
||||
command: bd ready --limit 1
|
||||
```
|
||||
|
||||
2. **Context check reminder:**
|
||||
```yaml
|
||||
hooks:
|
||||
on_close:
|
||||
- name: context-check
|
||||
command: |
|
||||
echo "Issue $BEAD_ID ($BEAD_TITLE) closed."
|
||||
echo "Priority was P$BEAD_PRIORITY. Reason: $BEAD_CLOSE_REASON"
|
||||
```
|
||||
|
||||
3. **Integration with external tools:**
|
||||
```yaml
|
||||
hooks:
|
||||
on_close:
|
||||
- name: slack-notify
|
||||
command: curl -X POST "$SLACK_WEBHOOK" -d "{\"text\":\"Closed: $BEAD_ID - $BEAD_TITLE\"}"
|
||||
```
|
||||
|
||||
**Notes:**
|
||||
- Hooks have a 10-second timeout
|
||||
- Hook failures log warnings but don't fail the close operation
|
||||
- Commands run via `sh -c`, so shell features like pipes and redirects work
|
||||
- Both script-based hooks (`.beads/hooks/on_close`) and config-based hooks run
|
||||
|
||||
### Why Two Systems?
|
||||
|
||||
**Tool settings (Viper)** are user preferences:
|
||||
|
||||
@@ -1427,237 +1427,6 @@ func TestIsWispDatabase(t *testing.T) {
|
||||
}
|
||||
}
|
||||
|
||||
// TestFindDatabaseInBeadsDir tests the database discovery within a .beads directory
|
||||
func TestFindDatabaseInBeadsDir(t *testing.T) {
|
||||
tests := []struct {
|
||||
name string
|
||||
files []string
|
||||
configJSON string
|
||||
expectDB string
|
||||
warnOnIssues bool
|
||||
}{
|
||||
{
|
||||
name: "canonical beads.db only",
|
||||
files: []string{"beads.db"},
|
||||
expectDB: "beads.db",
|
||||
},
|
||||
{
|
||||
name: "legacy bd.db only",
|
||||
files: []string{"bd.db"},
|
||||
expectDB: "bd.db",
|
||||
},
|
||||
{
|
||||
name: "prefers beads.db over other db files",
|
||||
files: []string{"custom.db", "beads.db", "other.db"},
|
||||
expectDB: "beads.db",
|
||||
},
|
||||
{
|
||||
name: "skips backup files",
|
||||
files: []string{"beads.backup.db", "real.db"},
|
||||
expectDB: "real.db",
|
||||
},
|
||||
{
|
||||
name: "skips vc.db",
|
||||
files: []string{"vc.db", "beads.db"},
|
||||
expectDB: "beads.db",
|
||||
},
|
||||
{
|
||||
name: "no db files returns empty",
|
||||
files: []string{"readme.txt", "config.yaml"},
|
||||
expectDB: "",
|
||||
},
|
||||
{
|
||||
name: "only backup files returns empty",
|
||||
files: []string{"beads.backup.db", "vc.db"},
|
||||
expectDB: "",
|
||||
},
|
||||
}
|
||||
|
||||
for _, tt := range tests {
|
||||
t.Run(tt.name, func(t *testing.T) {
|
||||
tmpDir, err := os.MkdirTemp("", "beads-findindir-test-*")
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
defer os.RemoveAll(tmpDir)
|
||||
|
||||
// Create test files
|
||||
for _, file := range tt.files {
|
||||
path := filepath.Join(tmpDir, file)
|
||||
if err := os.WriteFile(path, []byte{}, 0644); err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
}
|
||||
|
||||
// Write config.json if specified
|
||||
if tt.configJSON != "" {
|
||||
configPath := filepath.Join(tmpDir, "config.json")
|
||||
if err := os.WriteFile(configPath, []byte(tt.configJSON), 0644); err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
}
|
||||
|
||||
result := findDatabaseInBeadsDir(tmpDir, tt.warnOnIssues)
|
||||
|
||||
if tt.expectDB == "" {
|
||||
if result != "" {
|
||||
t.Errorf("findDatabaseInBeadsDir() = %q, want empty string", result)
|
||||
}
|
||||
} else {
|
||||
expected := filepath.Join(tmpDir, tt.expectDB)
|
||||
if result != expected {
|
||||
t.Errorf("findDatabaseInBeadsDir() = %q, want %q", result, expected)
|
||||
}
|
||||
}
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
// TestFindAllDatabases tests the multi-database discovery
|
||||
func TestFindAllDatabases(t *testing.T) {
|
||||
// Save original state
|
||||
originalEnv := os.Getenv("BEADS_DIR")
|
||||
defer func() {
|
||||
if originalEnv != "" {
|
||||
os.Setenv("BEADS_DIR", originalEnv)
|
||||
} else {
|
||||
os.Unsetenv("BEADS_DIR")
|
||||
}
|
||||
}()
|
||||
os.Unsetenv("BEADS_DIR")
|
||||
|
||||
// Create temp directory structure
|
||||
tmpDir, err := os.MkdirTemp("", "beads-findall-test-*")
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
defer os.RemoveAll(tmpDir)
|
||||
|
||||
// Create .beads directory with database
|
||||
beadsDir := filepath.Join(tmpDir, ".beads")
|
||||
if err := os.MkdirAll(beadsDir, 0755); err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
dbPath := filepath.Join(beadsDir, "beads.db")
|
||||
if err := os.WriteFile(dbPath, []byte{}, 0644); err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
|
||||
// Create subdirectory and change to it
|
||||
subDir := filepath.Join(tmpDir, "sub", "nested")
|
||||
if err := os.MkdirAll(subDir, 0755); err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
|
||||
t.Chdir(subDir)
|
||||
|
||||
// FindAllDatabases should find the parent .beads
|
||||
result := FindAllDatabases()
|
||||
|
||||
if len(result) == 0 {
|
||||
t.Error("FindAllDatabases() returned empty slice, expected at least one database")
|
||||
} else {
|
||||
// Verify the path matches
|
||||
resultResolved, _ := filepath.EvalSymlinks(result[0].Path)
|
||||
dbPathResolved, _ := filepath.EvalSymlinks(dbPath)
|
||||
if resultResolved != dbPathResolved {
|
||||
t.Errorf("FindAllDatabases()[0].Path = %q, want %q", result[0].Path, dbPath)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// TestFindAllDatabases_NoDatabase tests FindAllDatabases when no database exists
|
||||
func TestFindAllDatabases_NoDatabase(t *testing.T) {
|
||||
// Save original state
|
||||
originalEnv := os.Getenv("BEADS_DIR")
|
||||
defer func() {
|
||||
if originalEnv != "" {
|
||||
os.Setenv("BEADS_DIR", originalEnv)
|
||||
} else {
|
||||
os.Unsetenv("BEADS_DIR")
|
||||
}
|
||||
}()
|
||||
os.Unsetenv("BEADS_DIR")
|
||||
|
||||
// Create temp directory without .beads
|
||||
tmpDir, err := os.MkdirTemp("", "beads-findall-nodb-*")
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
defer os.RemoveAll(tmpDir)
|
||||
|
||||
t.Chdir(tmpDir)
|
||||
|
||||
// FindAllDatabases should return empty slice (not nil)
|
||||
result := FindAllDatabases()
|
||||
|
||||
if result == nil {
|
||||
t.Error("FindAllDatabases() returned nil, expected empty slice")
|
||||
}
|
||||
if len(result) != 0 {
|
||||
t.Errorf("FindAllDatabases() returned %d databases, expected 0", len(result))
|
||||
}
|
||||
}
|
||||
|
||||
// TestFindAllDatabases_StopsAtFirst tests that FindAllDatabases stops at first .beads found
|
||||
func TestFindAllDatabases_StopsAtFirst(t *testing.T) {
|
||||
// Save original state
|
||||
originalEnv := os.Getenv("BEADS_DIR")
|
||||
defer func() {
|
||||
if originalEnv != "" {
|
||||
os.Setenv("BEADS_DIR", originalEnv)
|
||||
} else {
|
||||
os.Unsetenv("BEADS_DIR")
|
||||
}
|
||||
}()
|
||||
os.Unsetenv("BEADS_DIR")
|
||||
|
||||
// Create temp directory structure with nested .beads dirs
|
||||
tmpDir, err := os.MkdirTemp("", "beads-findall-nested-*")
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
defer os.RemoveAll(tmpDir)
|
||||
|
||||
// Create parent .beads
|
||||
parentBeadsDir := filepath.Join(tmpDir, ".beads")
|
||||
if err := os.MkdirAll(parentBeadsDir, 0755); err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
if err := os.WriteFile(filepath.Join(parentBeadsDir, "beads.db"), []byte{}, 0644); err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
|
||||
// Create child project with its own .beads
|
||||
childDir := filepath.Join(tmpDir, "child")
|
||||
childBeadsDir := filepath.Join(childDir, ".beads")
|
||||
if err := os.MkdirAll(childBeadsDir, 0755); err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
childDBPath := filepath.Join(childBeadsDir, "beads.db")
|
||||
if err := os.WriteFile(childDBPath, []byte{}, 0644); err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
|
||||
// Change to child directory
|
||||
t.Chdir(childDir)
|
||||
|
||||
// FindAllDatabases should return only the child's database (stops at first)
|
||||
result := FindAllDatabases()
|
||||
|
||||
if len(result) != 1 {
|
||||
t.Errorf("FindAllDatabases() returned %d databases, expected 1 (should stop at first)", len(result))
|
||||
}
|
||||
|
||||
if len(result) > 0 {
|
||||
resultResolved, _ := filepath.EvalSymlinks(result[0].Path)
|
||||
childDBResolved, _ := filepath.EvalSymlinks(childDBPath)
|
||||
if resultResolved != childDBResolved {
|
||||
t.Errorf("FindAllDatabases() found %q, expected child database %q", result[0].Path, childDBPath)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// TestEnsureWispGitignore tests that EnsureWispGitignore correctly
|
||||
// adds the wisp directory to .gitignore
|
||||
func TestEnsureWispGitignore(t *testing.T) {
|
||||
|
||||
@@ -1,507 +0,0 @@
|
||||
package beads
|
||||
|
||||
import (
|
||||
"os"
|
||||
"os/exec"
|
||||
"path/filepath"
|
||||
"strings"
|
||||
"testing"
|
||||
)
|
||||
|
||||
// TestCanonicalizeGitURL tests URL normalization for various git URL formats
|
||||
func TestCanonicalizeGitURL(t *testing.T) {
|
||||
tests := []struct {
|
||||
name string
|
||||
input string
|
||||
expected string
|
||||
}{
|
||||
// HTTPS URLs
|
||||
{
|
||||
name: "https basic",
|
||||
input: "https://github.com/user/repo",
|
||||
expected: "github.com/user/repo",
|
||||
},
|
||||
{
|
||||
name: "https with .git suffix",
|
||||
input: "https://github.com/user/repo.git",
|
||||
expected: "github.com/user/repo",
|
||||
},
|
||||
{
|
||||
name: "https with trailing slash",
|
||||
input: "https://github.com/user/repo/",
|
||||
expected: "github.com/user/repo",
|
||||
},
|
||||
{
|
||||
name: "https uppercase host",
|
||||
input: "https://GitHub.COM/User/Repo.git",
|
||||
expected: "github.com/User/Repo",
|
||||
},
|
||||
{
|
||||
name: "https with port 443",
|
||||
input: "https://github.com:443/user/repo.git",
|
||||
expected: "github.com/user/repo",
|
||||
},
|
||||
{
|
||||
name: "https with custom port",
|
||||
input: "https://gitlab.company.com:8443/user/repo.git",
|
||||
expected: "gitlab.company.com:8443/user/repo",
|
||||
},
|
||||
|
||||
// SSH URLs (protocol style)
|
||||
{
|
||||
name: "ssh protocol basic",
|
||||
input: "ssh://git@github.com/user/repo.git",
|
||||
expected: "github.com/user/repo",
|
||||
},
|
||||
{
|
||||
name: "ssh with port 22",
|
||||
input: "ssh://git@github.com:22/user/repo.git",
|
||||
expected: "github.com/user/repo",
|
||||
},
|
||||
{
|
||||
name: "ssh with custom port",
|
||||
input: "ssh://git@gitlab.company.com:2222/user/repo.git",
|
||||
expected: "gitlab.company.com:2222/user/repo",
|
||||
},
|
||||
|
||||
// SCP-style URLs (git@host:path)
|
||||
{
|
||||
name: "scp style basic",
|
||||
input: "git@github.com:user/repo.git",
|
||||
expected: "github.com/user/repo",
|
||||
},
|
||||
{
|
||||
name: "scp style without .git",
|
||||
input: "git@github.com:user/repo",
|
||||
expected: "github.com/user/repo",
|
||||
},
|
||||
{
|
||||
name: "scp style uppercase host",
|
||||
input: "git@GITHUB.COM:User/Repo.git",
|
||||
expected: "github.com/User/Repo",
|
||||
},
|
||||
{
|
||||
name: "scp style with trailing slash",
|
||||
input: "git@github.com:user/repo/",
|
||||
expected: "github.com/user/repo",
|
||||
},
|
||||
{
|
||||
name: "scp style deep path",
|
||||
input: "git@gitlab.com:org/team/project/repo.git",
|
||||
expected: "gitlab.com/org/team/project/repo",
|
||||
},
|
||||
|
||||
// HTTP URLs (less common but valid)
|
||||
{
|
||||
name: "http basic",
|
||||
input: "http://github.com/user/repo.git",
|
||||
expected: "github.com/user/repo",
|
||||
},
|
||||
{
|
||||
name: "http with port 80",
|
||||
input: "http://github.com:80/user/repo.git",
|
||||
expected: "github.com/user/repo",
|
||||
},
|
||||
|
||||
// Git protocol
|
||||
{
|
||||
name: "git protocol",
|
||||
input: "git://github.com/user/repo.git",
|
||||
expected: "github.com/user/repo",
|
||||
},
|
||||
|
||||
// Whitespace handling
|
||||
{
|
||||
name: "with leading whitespace",
|
||||
input: " https://github.com/user/repo.git",
|
||||
expected: "github.com/user/repo",
|
||||
},
|
||||
{
|
||||
name: "with trailing whitespace",
|
||||
input: "https://github.com/user/repo.git ",
|
||||
expected: "github.com/user/repo",
|
||||
},
|
||||
{
|
||||
name: "with newline",
|
||||
input: "https://github.com/user/repo.git\n",
|
||||
expected: "github.com/user/repo",
|
||||
},
|
||||
}
|
||||
|
||||
for _, tt := range tests {
|
||||
t.Run(tt.name, func(t *testing.T) {
|
||||
result, err := canonicalizeGitURL(tt.input)
|
||||
if err != nil {
|
||||
t.Fatalf("canonicalizeGitURL(%q) error = %v", tt.input, err)
|
||||
}
|
||||
if result != tt.expected {
|
||||
t.Errorf("canonicalizeGitURL(%q) = %q, want %q", tt.input, result, tt.expected)
|
||||
}
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
// TestCanonicalizeGitURL_LocalPath tests that local paths are handled
|
||||
func TestCanonicalizeGitURL_LocalPath(t *testing.T) {
|
||||
// Create a temp directory to use as a "local path"
|
||||
tmpDir := t.TempDir()
|
||||
|
||||
// Local absolute path
|
||||
result, err := canonicalizeGitURL(tmpDir)
|
||||
if err != nil {
|
||||
t.Fatalf("canonicalizeGitURL(%q) error = %v", tmpDir, err)
|
||||
}
|
||||
|
||||
// Should return a forward-slash path
|
||||
if strings.Contains(result, "\\") {
|
||||
t.Errorf("canonicalizeGitURL(%q) = %q, should use forward slashes", tmpDir, result)
|
||||
}
|
||||
}
|
||||
|
||||
// TestCanonicalizeGitURL_WindowsPath tests Windows path detection
|
||||
func TestCanonicalizeGitURL_WindowsPath(t *testing.T) {
|
||||
// This tests the Windows path detection logic (C:/)
|
||||
// The function should NOT treat "C:/foo/bar" as an scp-style URL
|
||||
tests := []struct {
|
||||
input string
|
||||
expected string
|
||||
}{
|
||||
// These are NOT scp-style URLs - they're Windows paths
|
||||
{"C:/Users/test/repo", "C:/Users/test/repo"},
|
||||
{"D:/projects/myrepo", "D:/projects/myrepo"},
|
||||
}
|
||||
|
||||
for _, tt := range tests {
|
||||
result, err := canonicalizeGitURL(tt.input)
|
||||
if err != nil {
|
||||
t.Fatalf("canonicalizeGitURL(%q) error = %v", tt.input, err)
|
||||
}
|
||||
// Should preserve the Windows path structure (forward slashes)
|
||||
if !strings.Contains(result, "/") {
|
||||
t.Errorf("canonicalizeGitURL(%q) = %q, expected path with slashes", tt.input, result)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// TestComputeRepoID_WithRemote tests ComputeRepoID when remote.origin.url exists
|
||||
func TestComputeRepoID_WithRemote(t *testing.T) {
|
||||
// Create temporary directory for test repo
|
||||
tmpDir := t.TempDir()
|
||||
|
||||
// Initialize git repo
|
||||
cmd := exec.Command("git", "init")
|
||||
cmd.Dir = tmpDir
|
||||
if err := cmd.Run(); err != nil {
|
||||
t.Skipf("git not available: %v", err)
|
||||
}
|
||||
|
||||
// Configure git user
|
||||
cmd = exec.Command("git", "config", "user.email", "test@example.com")
|
||||
cmd.Dir = tmpDir
|
||||
_ = cmd.Run()
|
||||
cmd = exec.Command("git", "config", "user.name", "Test User")
|
||||
cmd.Dir = tmpDir
|
||||
_ = cmd.Run()
|
||||
|
||||
// Set remote.origin.url
|
||||
cmd = exec.Command("git", "remote", "add", "origin", "https://github.com/user/test-repo.git")
|
||||
cmd.Dir = tmpDir
|
||||
if err := cmd.Run(); err != nil {
|
||||
t.Fatalf("git remote add failed: %v", err)
|
||||
}
|
||||
|
||||
// Change to repo dir
|
||||
t.Chdir(tmpDir)
|
||||
|
||||
// ComputeRepoID should return a consistent hash
|
||||
result1, err := ComputeRepoID()
|
||||
if err != nil {
|
||||
t.Fatalf("ComputeRepoID() error = %v", err)
|
||||
}
|
||||
|
||||
// Should be a 32-character hex string (16 bytes)
|
||||
if len(result1) != 32 {
|
||||
t.Errorf("ComputeRepoID() = %q, expected 32 character hex string", result1)
|
||||
}
|
||||
|
||||
// Should be consistent across calls
|
||||
result2, err := ComputeRepoID()
|
||||
if err != nil {
|
||||
t.Fatalf("ComputeRepoID() second call error = %v", err)
|
||||
}
|
||||
if result1 != result2 {
|
||||
t.Errorf("ComputeRepoID() not consistent: %q vs %q", result1, result2)
|
||||
}
|
||||
}
|
||||
|
||||
// TestComputeRepoID_NoRemote tests ComputeRepoID when no remote exists
|
||||
func TestComputeRepoID_NoRemote(t *testing.T) {
|
||||
// Create temporary directory for test repo
|
||||
tmpDir := t.TempDir()
|
||||
|
||||
// Initialize git repo (no remote)
|
||||
cmd := exec.Command("git", "init")
|
||||
cmd.Dir = tmpDir
|
||||
if err := cmd.Run(); err != nil {
|
||||
t.Skipf("git not available: %v", err)
|
||||
}
|
||||
|
||||
// Change to repo dir
|
||||
t.Chdir(tmpDir)
|
||||
|
||||
// ComputeRepoID should fall back to using the local path
|
||||
result, err := ComputeRepoID()
|
||||
if err != nil {
|
||||
t.Fatalf("ComputeRepoID() error = %v", err)
|
||||
}
|
||||
|
||||
// Should still return a 32-character hex string
|
||||
if len(result) != 32 {
|
||||
t.Errorf("ComputeRepoID() = %q, expected 32 character hex string", result)
|
||||
}
|
||||
}
|
||||
|
||||
// TestComputeRepoID_NotGitRepo tests ComputeRepoID when not in a git repo
|
||||
func TestComputeRepoID_NotGitRepo(t *testing.T) {
|
||||
// Create temporary directory that is NOT a git repo
|
||||
tmpDir := t.TempDir()
|
||||
|
||||
t.Chdir(tmpDir)
|
||||
|
||||
// ComputeRepoID should return an error
|
||||
_, err := ComputeRepoID()
|
||||
if err == nil {
|
||||
t.Error("ComputeRepoID() expected error for non-git directory, got nil")
|
||||
}
|
||||
if !strings.Contains(err.Error(), "not a git repository") {
|
||||
t.Errorf("ComputeRepoID() error = %q, expected 'not a git repository'", err.Error())
|
||||
}
|
||||
}
|
||||
|
||||
// TestComputeRepoID_DifferentRemotesSameCanonical tests that different URL formats
|
||||
// for the same repo produce the same ID
|
||||
func TestComputeRepoID_DifferentRemotesSameCanonical(t *testing.T) {
|
||||
remotes := []string{
|
||||
"https://github.com/user/repo.git",
|
||||
"git@github.com:user/repo.git",
|
||||
"ssh://git@github.com/user/repo.git",
|
||||
}
|
||||
|
||||
var ids []string
|
||||
|
||||
for _, remote := range remotes {
|
||||
tmpDir := t.TempDir()
|
||||
|
||||
// Initialize git repo
|
||||
cmd := exec.Command("git", "init")
|
||||
cmd.Dir = tmpDir
|
||||
if err := cmd.Run(); err != nil {
|
||||
t.Skipf("git not available: %v", err)
|
||||
}
|
||||
|
||||
// Set remote
|
||||
cmd = exec.Command("git", "remote", "add", "origin", remote)
|
||||
cmd.Dir = tmpDir
|
||||
if err := cmd.Run(); err != nil {
|
||||
t.Fatalf("git remote add failed for %q: %v", remote, err)
|
||||
}
|
||||
|
||||
t.Chdir(tmpDir)
|
||||
|
||||
id, err := ComputeRepoID()
|
||||
if err != nil {
|
||||
t.Fatalf("ComputeRepoID() for remote %q error = %v", remote, err)
|
||||
}
|
||||
ids = append(ids, id)
|
||||
}
|
||||
|
||||
// All IDs should be the same since they point to the same canonical repo
|
||||
for i := 1; i < len(ids); i++ {
|
||||
if ids[i] != ids[0] {
|
||||
t.Errorf("ComputeRepoID() produced different IDs for same repo:\n remote[0]=%q id=%s\n remote[%d]=%q id=%s",
|
||||
remotes[0], ids[0], i, remotes[i], ids[i])
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// TestGetCloneID_Basic tests GetCloneID returns a consistent ID
|
||||
func TestGetCloneID_Basic(t *testing.T) {
|
||||
// Create temporary directory for test repo
|
||||
tmpDir := t.TempDir()
|
||||
|
||||
// Initialize git repo
|
||||
cmd := exec.Command("git", "init")
|
||||
cmd.Dir = tmpDir
|
||||
if err := cmd.Run(); err != nil {
|
||||
t.Skipf("git not available: %v", err)
|
||||
}
|
||||
|
||||
t.Chdir(tmpDir)
|
||||
|
||||
// GetCloneID should return a consistent hash
|
||||
result1, err := GetCloneID()
|
||||
if err != nil {
|
||||
t.Fatalf("GetCloneID() error = %v", err)
|
||||
}
|
||||
|
||||
// Should be a 16-character hex string (8 bytes)
|
||||
if len(result1) != 16 {
|
||||
t.Errorf("GetCloneID() = %q, expected 16 character hex string", result1)
|
||||
}
|
||||
|
||||
// Should be consistent across calls
|
||||
result2, err := GetCloneID()
|
||||
if err != nil {
|
||||
t.Fatalf("GetCloneID() second call error = %v", err)
|
||||
}
|
||||
if result1 != result2 {
|
||||
t.Errorf("GetCloneID() not consistent: %q vs %q", result1, result2)
|
||||
}
|
||||
}
|
||||
|
||||
// TestGetCloneID_DifferentDirs tests GetCloneID produces different IDs for different clones
|
||||
func TestGetCloneID_DifferentDirs(t *testing.T) {
|
||||
ids := make(map[string]string)
|
||||
|
||||
for i := 0; i < 3; i++ {
|
||||
tmpDir := t.TempDir()
|
||||
|
||||
// Initialize git repo
|
||||
cmd := exec.Command("git", "init")
|
||||
cmd.Dir = tmpDir
|
||||
if err := cmd.Run(); err != nil {
|
||||
t.Skipf("git not available: %v", err)
|
||||
}
|
||||
|
||||
t.Chdir(tmpDir)
|
||||
|
||||
id, err := GetCloneID()
|
||||
if err != nil {
|
||||
t.Fatalf("GetCloneID() error = %v", err)
|
||||
}
|
||||
|
||||
// Each clone should have a unique ID
|
||||
if prev, exists := ids[id]; exists {
|
||||
t.Errorf("GetCloneID() produced duplicate ID %q for dirs %q and %q", id, prev, tmpDir)
|
||||
}
|
||||
ids[id] = tmpDir
|
||||
}
|
||||
}
|
||||
|
||||
// TestGetCloneID_NotGitRepo tests GetCloneID when not in a git repo
|
||||
func TestGetCloneID_NotGitRepo(t *testing.T) {
|
||||
// Create temporary directory that is NOT a git repo
|
||||
tmpDir := t.TempDir()
|
||||
|
||||
t.Chdir(tmpDir)
|
||||
|
||||
// GetCloneID should return an error
|
||||
_, err := GetCloneID()
|
||||
if err == nil {
|
||||
t.Error("GetCloneID() expected error for non-git directory, got nil")
|
||||
}
|
||||
if !strings.Contains(err.Error(), "not a git repository") {
|
||||
t.Errorf("GetCloneID() error = %q, expected 'not a git repository'", err.Error())
|
||||
}
|
||||
}
|
||||
|
||||
// TestGetCloneID_IncludesHostname tests that GetCloneID includes hostname
|
||||
// to differentiate the same path on different machines
|
||||
func TestGetCloneID_IncludesHostname(t *testing.T) {
|
||||
// This test verifies the concept - we can't actually test different hostnames
|
||||
// but we can verify that the same path produces the same ID on this machine
|
||||
tmpDir := t.TempDir()
|
||||
|
||||
// Initialize git repo
|
||||
cmd := exec.Command("git", "init")
|
||||
cmd.Dir = tmpDir
|
||||
if err := cmd.Run(); err != nil {
|
||||
t.Skipf("git not available: %v", err)
|
||||
}
|
||||
|
||||
t.Chdir(tmpDir)
|
||||
|
||||
hostname, _ := os.Hostname()
|
||||
id, err := GetCloneID()
|
||||
if err != nil {
|
||||
t.Fatalf("GetCloneID() error = %v", err)
|
||||
}
|
||||
|
||||
// Just verify we got a valid ID - we can't test different hostnames
|
||||
// but the implementation includes hostname in the hash
|
||||
if len(id) != 16 {
|
||||
t.Errorf("GetCloneID() = %q, expected 16 character hex string (hostname=%s)", id, hostname)
|
||||
}
|
||||
}
|
||||
|
||||
// TestGetCloneID_Worktree tests GetCloneID in a worktree
|
||||
func TestGetCloneID_Worktree(t *testing.T) {
|
||||
// Create temporary directory for test
|
||||
tmpDir := t.TempDir()
|
||||
|
||||
// Initialize main git repo
|
||||
mainRepoDir := filepath.Join(tmpDir, "main-repo")
|
||||
if err := os.MkdirAll(mainRepoDir, 0755); err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
|
||||
cmd := exec.Command("git", "init")
|
||||
cmd.Dir = mainRepoDir
|
||||
if err := cmd.Run(); err != nil {
|
||||
t.Skipf("git not available: %v", err)
|
||||
}
|
||||
|
||||
// Configure git user
|
||||
cmd = exec.Command("git", "config", "user.email", "test@example.com")
|
||||
cmd.Dir = mainRepoDir
|
||||
_ = cmd.Run()
|
||||
cmd = exec.Command("git", "config", "user.name", "Test User")
|
||||
cmd.Dir = mainRepoDir
|
||||
_ = cmd.Run()
|
||||
|
||||
// Create initial commit (required for worktree)
|
||||
dummyFile := filepath.Join(mainRepoDir, "README.md")
|
||||
if err := os.WriteFile(dummyFile, []byte("# Test\n"), 0644); err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
cmd = exec.Command("git", "add", "README.md")
|
||||
cmd.Dir = mainRepoDir
|
||||
_ = cmd.Run()
|
||||
cmd = exec.Command("git", "commit", "-m", "Initial commit")
|
||||
cmd.Dir = mainRepoDir
|
||||
if err := cmd.Run(); err != nil {
|
||||
t.Fatalf("git commit failed: %v", err)
|
||||
}
|
||||
|
||||
// Create a worktree
|
||||
worktreeDir := filepath.Join(tmpDir, "worktree")
|
||||
cmd = exec.Command("git", "worktree", "add", worktreeDir, "HEAD")
|
||||
cmd.Dir = mainRepoDir
|
||||
if err := cmd.Run(); err != nil {
|
||||
t.Fatalf("git worktree add failed: %v", err)
|
||||
}
|
||||
defer func() {
|
||||
cmd := exec.Command("git", "worktree", "remove", worktreeDir)
|
||||
cmd.Dir = mainRepoDir
|
||||
_ = cmd.Run()
|
||||
}()
|
||||
|
||||
// Get IDs from both locations
|
||||
t.Chdir(mainRepoDir)
|
||||
mainID, err := GetCloneID()
|
||||
if err != nil {
|
||||
t.Fatalf("GetCloneID() in main repo error = %v", err)
|
||||
}
|
||||
|
||||
t.Chdir(worktreeDir)
|
||||
worktreeID, err := GetCloneID()
|
||||
if err != nil {
|
||||
t.Fatalf("GetCloneID() in worktree error = %v", err)
|
||||
}
|
||||
|
||||
// Worktree should have a DIFFERENT ID than main repo
|
||||
// because they're different paths (different clones conceptually)
|
||||
if mainID == worktreeID {
|
||||
t.Errorf("GetCloneID() returned same ID for main repo and worktree - should be different")
|
||||
}
|
||||
}
|
||||
@@ -1,732 +0,0 @@
|
||||
package compact
|
||||
|
||||
import (
|
||||
"context"
|
||||
"encoding/json"
|
||||
"net/http"
|
||||
"net/http/httptest"
|
||||
"strings"
|
||||
"testing"
|
||||
"time"
|
||||
|
||||
"github.com/anthropics/anthropic-sdk-go/option"
|
||||
"github.com/steveyegge/beads/internal/storage/sqlite"
|
||||
"github.com/steveyegge/beads/internal/types"
|
||||
)
|
||||
|
||||
// setupTestStore creates a test SQLite store for unit tests
|
||||
func setupTestStore(t *testing.T) *sqlite.SQLiteStorage {
|
||||
t.Helper()
|
||||
|
||||
tmpDB := t.TempDir() + "/test.db"
|
||||
store, err := sqlite.New(context.Background(), tmpDB)
|
||||
if err != nil {
|
||||
t.Fatalf("failed to create storage: %v", err)
|
||||
}
|
||||
|
||||
ctx := context.Background()
|
||||
// Set issue_prefix to prevent "database not initialized" errors
|
||||
if err := store.SetConfig(ctx, "issue_prefix", "bd"); err != nil {
|
||||
t.Fatalf("failed to set issue_prefix: %v", err)
|
||||
}
|
||||
// Use 7 days minimum for Tier 1 compaction
|
||||
if err := store.SetConfig(ctx, "compact_tier1_days", "7"); err != nil {
|
||||
t.Fatalf("failed to set config: %v", err)
|
||||
}
|
||||
if err := store.SetConfig(ctx, "compact_tier1_dep_levels", "2"); err != nil {
|
||||
t.Fatalf("failed to set config: %v", err)
|
||||
}
|
||||
|
||||
return store
|
||||
}
|
||||
|
||||
// createTestIssue creates a closed issue eligible for compaction
|
||||
func createTestIssue(t *testing.T, store *sqlite.SQLiteStorage, id string) *types.Issue {
|
||||
t.Helper()
|
||||
|
||||
ctx := context.Background()
|
||||
prefix, _ := store.GetConfig(ctx, "issue_prefix")
|
||||
if prefix == "" {
|
||||
prefix = "bd"
|
||||
}
|
||||
|
||||
now := time.Now()
|
||||
// Issue closed 8 days ago (beyond 7-day threshold for Tier 1)
|
||||
closedAt := now.Add(-8 * 24 * time.Hour)
|
||||
issue := &types.Issue{
|
||||
ID: id,
|
||||
Title: "Test Issue",
|
||||
Description: `Implemented a comprehensive authentication system for the application.
|
||||
|
||||
The system includes JWT token generation, refresh token handling, password hashing with bcrypt,
|
||||
rate limiting on login attempts, and session management.`,
|
||||
Design: `Authentication Flow:
|
||||
1. User submits credentials
|
||||
2. Server validates against database
|
||||
3. On success, generate JWT with user claims`,
|
||||
Notes: "Performance considerations and testing strategy notes.",
|
||||
AcceptanceCriteria: "- Users can register\n- Users can login\n- Protected endpoints work",
|
||||
Status: types.StatusClosed,
|
||||
Priority: 2,
|
||||
IssueType: types.TypeTask,
|
||||
CreatedAt: now.Add(-48 * time.Hour),
|
||||
UpdatedAt: now.Add(-24 * time.Hour),
|
||||
ClosedAt: &closedAt,
|
||||
}
|
||||
|
||||
if err := store.CreateIssue(ctx, issue, prefix); err != nil {
|
||||
t.Fatalf("failed to create issue: %v", err)
|
||||
}
|
||||
|
||||
return issue
|
||||
}
|
||||
|
||||
func TestNew_WithConfig(t *testing.T) {
|
||||
store := setupTestStore(t)
|
||||
defer store.Close()
|
||||
|
||||
config := &Config{
|
||||
Concurrency: 10,
|
||||
DryRun: true,
|
||||
}
|
||||
c, err := New(store, "", config)
|
||||
if err != nil {
|
||||
t.Fatalf("failed to create compactor: %v", err)
|
||||
}
|
||||
if c.config.Concurrency != 10 {
|
||||
t.Errorf("expected concurrency 10, got %d", c.config.Concurrency)
|
||||
}
|
||||
if !c.config.DryRun {
|
||||
t.Error("expected DryRun to be true")
|
||||
}
|
||||
}
|
||||
|
||||
func TestNew_DefaultConcurrency(t *testing.T) {
|
||||
store := setupTestStore(t)
|
||||
defer store.Close()
|
||||
|
||||
c, err := New(store, "", nil)
|
||||
if err != nil {
|
||||
t.Fatalf("failed to create compactor: %v", err)
|
||||
}
|
||||
if c.config.Concurrency != defaultConcurrency {
|
||||
t.Errorf("expected default concurrency %d, got %d", defaultConcurrency, c.config.Concurrency)
|
||||
}
|
||||
}
|
||||
|
||||
func TestNew_ZeroConcurrency(t *testing.T) {
|
||||
store := setupTestStore(t)
|
||||
defer store.Close()
|
||||
|
||||
config := &Config{
|
||||
Concurrency: 0,
|
||||
DryRun: true,
|
||||
}
|
||||
c, err := New(store, "", config)
|
||||
if err != nil {
|
||||
t.Fatalf("failed to create compactor: %v", err)
|
||||
}
|
||||
// Zero concurrency should be replaced with default
|
||||
if c.config.Concurrency != defaultConcurrency {
|
||||
t.Errorf("expected default concurrency %d, got %d", defaultConcurrency, c.config.Concurrency)
|
||||
}
|
||||
}
|
||||
|
||||
func TestNew_NegativeConcurrency(t *testing.T) {
|
||||
store := setupTestStore(t)
|
||||
defer store.Close()
|
||||
|
||||
config := &Config{
|
||||
Concurrency: -5,
|
||||
DryRun: true,
|
||||
}
|
||||
c, err := New(store, "", config)
|
||||
if err != nil {
|
||||
t.Fatalf("failed to create compactor: %v", err)
|
||||
}
|
||||
// Negative concurrency should be replaced with default
|
||||
if c.config.Concurrency != defaultConcurrency {
|
||||
t.Errorf("expected default concurrency %d, got %d", defaultConcurrency, c.config.Concurrency)
|
||||
}
|
||||
}
|
||||
|
||||
func TestNew_WithAPIKey(t *testing.T) {
|
||||
store := setupTestStore(t)
|
||||
defer store.Close()
|
||||
|
||||
// Clear env var to test explicit key
|
||||
t.Setenv("ANTHROPIC_API_KEY", "")
|
||||
|
||||
config := &Config{
|
||||
DryRun: true, // DryRun so we don't actually need a valid key
|
||||
}
|
||||
c, err := New(store, "test-api-key", config)
|
||||
if err != nil {
|
||||
t.Fatalf("failed to create compactor: %v", err)
|
||||
}
|
||||
if c.config.APIKey != "test-api-key" {
|
||||
t.Errorf("expected api key 'test-api-key', got '%s'", c.config.APIKey)
|
||||
}
|
||||
}
|
||||
|
||||
func TestNew_NoAPIKeyFallsToDryRun(t *testing.T) {
|
||||
store := setupTestStore(t)
|
||||
defer store.Close()
|
||||
|
||||
// Clear env var
|
||||
t.Setenv("ANTHROPIC_API_KEY", "")
|
||||
|
||||
config := &Config{
|
||||
DryRun: false, // Try to create real client
|
||||
}
|
||||
c, err := New(store, "", config)
|
||||
if err != nil {
|
||||
t.Fatalf("failed to create compactor: %v", err)
|
||||
}
|
||||
// Should fall back to DryRun when no API key
|
||||
if !c.config.DryRun {
|
||||
t.Error("expected DryRun to be true when no API key provided")
|
||||
}
|
||||
}
|
||||
|
||||
func TestNew_AuditSettings(t *testing.T) {
|
||||
store := setupTestStore(t)
|
||||
defer store.Close()
|
||||
|
||||
t.Setenv("ANTHROPIC_API_KEY", "test-key")
|
||||
|
||||
config := &Config{
|
||||
AuditEnabled: true,
|
||||
Actor: "test-actor",
|
||||
}
|
||||
c, err := New(store, "", config)
|
||||
if err != nil {
|
||||
t.Fatalf("failed to create compactor: %v", err)
|
||||
}
|
||||
if c.haiku == nil {
|
||||
t.Fatal("expected haiku client to be created")
|
||||
}
|
||||
if !c.haiku.auditEnabled {
|
||||
t.Error("expected auditEnabled to be true")
|
||||
}
|
||||
if c.haiku.auditActor != "test-actor" {
|
||||
t.Errorf("expected auditActor 'test-actor', got '%s'", c.haiku.auditActor)
|
||||
}
|
||||
}
|
||||
|
||||
func TestCompactTier1_DryRun(t *testing.T) {
|
||||
store := setupTestStore(t)
|
||||
defer store.Close()
|
||||
|
||||
issue := createTestIssue(t, store, "bd-1")
|
||||
|
||||
config := &Config{DryRun: true}
|
||||
c, err := New(store, "", config)
|
||||
if err != nil {
|
||||
t.Fatalf("failed to create compactor: %v", err)
|
||||
}
|
||||
|
||||
ctx := context.Background()
|
||||
err = c.CompactTier1(ctx, issue.ID)
|
||||
if err == nil {
|
||||
t.Fatal("expected dry-run error, got nil")
|
||||
}
|
||||
if !strings.HasPrefix(err.Error(), "dry-run:") {
|
||||
t.Errorf("expected dry-run error prefix, got: %v", err)
|
||||
}
|
||||
|
||||
// Verify issue was not modified
|
||||
afterIssue, err := store.GetIssue(ctx, issue.ID)
|
||||
if err != nil {
|
||||
t.Fatalf("failed to get issue: %v", err)
|
||||
}
|
||||
if afterIssue.Description != issue.Description {
|
||||
t.Error("dry-run should not modify issue")
|
||||
}
|
||||
}
|
||||
|
||||
func TestCompactTier1_IneligibleOpenIssue(t *testing.T) {
|
||||
store := setupTestStore(t)
|
||||
defer store.Close()
|
||||
|
||||
ctx := context.Background()
|
||||
prefix, _ := store.GetConfig(ctx, "issue_prefix")
|
||||
if prefix == "" {
|
||||
prefix = "bd"
|
||||
}
|
||||
|
||||
now := time.Now()
|
||||
issue := &types.Issue{
|
||||
ID: "bd-open",
|
||||
Title: "Open Issue",
|
||||
Description: "Should not be compacted",
|
||||
Status: types.StatusOpen,
|
||||
Priority: 2,
|
||||
IssueType: types.TypeTask,
|
||||
CreatedAt: now,
|
||||
UpdatedAt: now,
|
||||
}
|
||||
if err := store.CreateIssue(ctx, issue, prefix); err != nil {
|
||||
t.Fatalf("failed to create issue: %v", err)
|
||||
}
|
||||
|
||||
config := &Config{DryRun: true}
|
||||
c, err := New(store, "", config)
|
||||
if err != nil {
|
||||
t.Fatalf("failed to create compactor: %v", err)
|
||||
}
|
||||
|
||||
err = c.CompactTier1(ctx, issue.ID)
|
||||
if err == nil {
|
||||
t.Fatal("expected error for ineligible issue, got nil")
|
||||
}
|
||||
if !strings.Contains(err.Error(), "not eligible") {
|
||||
t.Errorf("expected 'not eligible' error, got: %v", err)
|
||||
}
|
||||
}
|
||||
|
||||
func TestCompactTier1_NonexistentIssue(t *testing.T) {
|
||||
store := setupTestStore(t)
|
||||
defer store.Close()
|
||||
|
||||
config := &Config{DryRun: true}
|
||||
c, err := New(store, "", config)
|
||||
if err != nil {
|
||||
t.Fatalf("failed to create compactor: %v", err)
|
||||
}
|
||||
|
||||
ctx := context.Background()
|
||||
err = c.CompactTier1(ctx, "bd-nonexistent")
|
||||
if err == nil {
|
||||
t.Fatal("expected error for nonexistent issue")
|
||||
}
|
||||
}
|
||||
|
||||
func TestCompactTier1_ContextCanceled(t *testing.T) {
|
||||
store := setupTestStore(t)
|
||||
defer store.Close()
|
||||
|
||||
issue := createTestIssue(t, store, "bd-cancel")
|
||||
|
||||
config := &Config{DryRun: true}
|
||||
c, err := New(store, "", config)
|
||||
if err != nil {
|
||||
t.Fatalf("failed to create compactor: %v", err)
|
||||
}
|
||||
|
||||
ctx, cancel := context.WithCancel(context.Background())
|
||||
cancel() // Cancel immediately
|
||||
|
||||
err = c.CompactTier1(ctx, issue.ID)
|
||||
if err == nil {
|
||||
t.Fatal("expected error for canceled context")
|
||||
}
|
||||
if err != context.Canceled {
|
||||
t.Errorf("expected context.Canceled, got: %v", err)
|
||||
}
|
||||
}
|
||||
|
||||
func TestCompactTier1Batch_EmptyList(t *testing.T) {
|
||||
store := setupTestStore(t)
|
||||
defer store.Close()
|
||||
|
||||
config := &Config{DryRun: true}
|
||||
c, err := New(store, "", config)
|
||||
if err != nil {
|
||||
t.Fatalf("failed to create compactor: %v", err)
|
||||
}
|
||||
|
||||
ctx := context.Background()
|
||||
results, err := c.CompactTier1Batch(ctx, []string{})
|
||||
if err != nil {
|
||||
t.Fatalf("unexpected error: %v", err)
|
||||
}
|
||||
if results != nil {
|
||||
t.Errorf("expected nil results for empty list, got: %v", results)
|
||||
}
|
||||
}
|
||||
|
||||
func TestCompactTier1Batch_DryRun(t *testing.T) {
|
||||
store := setupTestStore(t)
|
||||
defer store.Close()
|
||||
|
||||
issue1 := createTestIssue(t, store, "bd-batch-1")
|
||||
issue2 := createTestIssue(t, store, "bd-batch-2")
|
||||
|
||||
config := &Config{DryRun: true, Concurrency: 2}
|
||||
c, err := New(store, "", config)
|
||||
if err != nil {
|
||||
t.Fatalf("failed to create compactor: %v", err)
|
||||
}
|
||||
|
||||
ctx := context.Background()
|
||||
results, err := c.CompactTier1Batch(ctx, []string{issue1.ID, issue2.ID})
|
||||
if err != nil {
|
||||
t.Fatalf("failed to batch compact: %v", err)
|
||||
}
|
||||
|
||||
if len(results) != 2 {
|
||||
t.Fatalf("expected 2 results, got %d", len(results))
|
||||
}
|
||||
|
||||
for _, result := range results {
|
||||
if result.Err != nil {
|
||||
t.Errorf("unexpected error for %s: %v", result.IssueID, result.Err)
|
||||
}
|
||||
if result.OriginalSize == 0 {
|
||||
t.Errorf("expected non-zero original size for %s", result.IssueID)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func TestCompactTier1Batch_MixedEligibility(t *testing.T) {
|
||||
store := setupTestStore(t)
|
||||
defer store.Close()
|
||||
|
||||
closedIssue := createTestIssue(t, store, "bd-closed")
|
||||
|
||||
ctx := context.Background()
|
||||
prefix, _ := store.GetConfig(ctx, "issue_prefix")
|
||||
if prefix == "" {
|
||||
prefix = "bd"
|
||||
}
|
||||
|
||||
now := time.Now()
|
||||
openIssue := &types.Issue{
|
||||
ID: "bd-open",
|
||||
Title: "Open Issue",
|
||||
Description: "Should not be compacted",
|
||||
Status: types.StatusOpen,
|
||||
Priority: 2,
|
||||
IssueType: types.TypeTask,
|
||||
CreatedAt: now,
|
||||
UpdatedAt: now,
|
||||
}
|
||||
if err := store.CreateIssue(ctx, openIssue, prefix); err != nil {
|
||||
t.Fatalf("failed to create issue: %v", err)
|
||||
}
|
||||
|
||||
config := &Config{DryRun: true, Concurrency: 2}
|
||||
c, err := New(store, "", config)
|
||||
if err != nil {
|
||||
t.Fatalf("failed to create compactor: %v", err)
|
||||
}
|
||||
|
||||
results, err := c.CompactTier1Batch(ctx, []string{closedIssue.ID, openIssue.ID})
|
||||
if err != nil {
|
||||
t.Fatalf("failed to batch compact: %v", err)
|
||||
}
|
||||
|
||||
if len(results) != 2 {
|
||||
t.Fatalf("expected 2 results, got %d", len(results))
|
||||
}
|
||||
|
||||
var foundClosed, foundOpen bool
|
||||
for _, result := range results {
|
||||
switch result.IssueID {
|
||||
case openIssue.ID:
|
||||
foundOpen = true
|
||||
if result.Err == nil {
|
||||
t.Error("expected error for ineligible issue")
|
||||
}
|
||||
case closedIssue.ID:
|
||||
foundClosed = true
|
||||
if result.Err != nil {
|
||||
t.Errorf("unexpected error for eligible issue: %v", result.Err)
|
||||
}
|
||||
}
|
||||
}
|
||||
if !foundClosed || !foundOpen {
|
||||
t.Error("missing expected results")
|
||||
}
|
||||
}
|
||||
|
||||
func TestCompactTier1Batch_NonexistentIssue(t *testing.T) {
|
||||
store := setupTestStore(t)
|
||||
defer store.Close()
|
||||
|
||||
closedIssue := createTestIssue(t, store, "bd-closed")
|
||||
|
||||
config := &Config{DryRun: true, Concurrency: 2}
|
||||
c, err := New(store, "", config)
|
||||
if err != nil {
|
||||
t.Fatalf("failed to create compactor: %v", err)
|
||||
}
|
||||
|
||||
ctx := context.Background()
|
||||
results, err := c.CompactTier1Batch(ctx, []string{closedIssue.ID, "bd-nonexistent"})
|
||||
if err != nil {
|
||||
t.Fatalf("batch operation failed: %v", err)
|
||||
}
|
||||
|
||||
if len(results) != 2 {
|
||||
t.Fatalf("expected 2 results, got %d", len(results))
|
||||
}
|
||||
|
||||
var successCount, errorCount int
|
||||
for _, r := range results {
|
||||
if r.Err == nil {
|
||||
successCount++
|
||||
} else {
|
||||
errorCount++
|
||||
}
|
||||
}
|
||||
|
||||
if successCount != 1 {
|
||||
t.Errorf("expected 1 success, got %d", successCount)
|
||||
}
|
||||
if errorCount != 1 {
|
||||
t.Errorf("expected 1 error, got %d", errorCount)
|
||||
}
|
||||
}
|
||||
|
||||
func TestCompactTier1_WithMockAPI(t *testing.T) {
|
||||
store := setupTestStore(t)
|
||||
defer store.Close()
|
||||
|
||||
issue := createTestIssue(t, store, "bd-mock-api")
|
||||
|
||||
// Create mock server that returns a short summary
|
||||
server := httptest.NewServer(http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) {
|
||||
w.Header().Set("Content-Type", "application/json")
|
||||
json.NewEncoder(w).Encode(map[string]interface{}{
|
||||
"id": "msg_test123",
|
||||
"type": "message",
|
||||
"role": "assistant",
|
||||
"model": "claude-3-5-haiku-20241022",
|
||||
"content": []map[string]interface{}{
|
||||
{
|
||||
"type": "text",
|
||||
"text": "**Summary:** Short summary.\n\n**Key Decisions:** None.\n\n**Resolution:** Done.",
|
||||
},
|
||||
},
|
||||
})
|
||||
}))
|
||||
defer server.Close()
|
||||
|
||||
t.Setenv("ANTHROPIC_API_KEY", "test-key")
|
||||
|
||||
// Create compactor with mock API
|
||||
config := &Config{Concurrency: 1}
|
||||
c, err := New(store, "", config)
|
||||
if err != nil {
|
||||
t.Fatalf("failed to create compactor: %v", err)
|
||||
}
|
||||
|
||||
// Replace the haiku client with one pointing to mock server
|
||||
c.haiku, err = NewHaikuClient("test-key", option.WithBaseURL(server.URL), option.WithMaxRetries(0))
|
||||
if err != nil {
|
||||
t.Fatalf("failed to create mock haiku client: %v", err)
|
||||
}
|
||||
|
||||
ctx := context.Background()
|
||||
err = c.CompactTier1(ctx, issue.ID)
|
||||
if err != nil {
|
||||
t.Fatalf("unexpected error: %v", err)
|
||||
}
|
||||
|
||||
// Verify issue was updated
|
||||
afterIssue, err := store.GetIssue(ctx, issue.ID)
|
||||
if err != nil {
|
||||
t.Fatalf("failed to get issue: %v", err)
|
||||
}
|
||||
|
||||
if afterIssue.Description == issue.Description {
|
||||
t.Error("description should have been updated")
|
||||
}
|
||||
if afterIssue.Design != "" {
|
||||
t.Error("design should be cleared")
|
||||
}
|
||||
if afterIssue.Notes != "" {
|
||||
t.Error("notes should be cleared")
|
||||
}
|
||||
if afterIssue.AcceptanceCriteria != "" {
|
||||
t.Error("acceptance criteria should be cleared")
|
||||
}
|
||||
}
|
||||
|
||||
func TestCompactTier1_SummaryNotShorter(t *testing.T) {
|
||||
store := setupTestStore(t)
|
||||
defer store.Close()
|
||||
|
||||
// Create issue with very short content
|
||||
ctx := context.Background()
|
||||
prefix, _ := store.GetConfig(ctx, "issue_prefix")
|
||||
if prefix == "" {
|
||||
prefix = "bd"
|
||||
}
|
||||
|
||||
now := time.Now()
|
||||
closedAt := now.Add(-8 * 24 * time.Hour)
|
||||
issue := &types.Issue{
|
||||
ID: "bd-short",
|
||||
Title: "Short",
|
||||
Description: "X", // Very short description
|
||||
Status: types.StatusClosed,
|
||||
Priority: 2,
|
||||
IssueType: types.TypeTask,
|
||||
CreatedAt: now.Add(-48 * time.Hour),
|
||||
UpdatedAt: now.Add(-24 * time.Hour),
|
||||
ClosedAt: &closedAt,
|
||||
}
|
||||
if err := store.CreateIssue(ctx, issue, prefix); err != nil {
|
||||
t.Fatalf("failed to create issue: %v", err)
|
||||
}
|
||||
|
||||
// Create mock server that returns a longer summary
|
||||
server := httptest.NewServer(http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) {
|
||||
w.Header().Set("Content-Type", "application/json")
|
||||
json.NewEncoder(w).Encode(map[string]interface{}{
|
||||
"id": "msg_test123",
|
||||
"type": "message",
|
||||
"role": "assistant",
|
||||
"model": "claude-3-5-haiku-20241022",
|
||||
"content": []map[string]interface{}{
|
||||
{
|
||||
"type": "text",
|
||||
"text": "**Summary:** This is a much longer summary that exceeds the original content length.\n\n**Key Decisions:** Multiple decisions.\n\n**Resolution:** Complete.",
|
||||
},
|
||||
},
|
||||
})
|
||||
}))
|
||||
defer server.Close()
|
||||
|
||||
t.Setenv("ANTHROPIC_API_KEY", "test-key")
|
||||
|
||||
config := &Config{Concurrency: 1}
|
||||
c, err := New(store, "", config)
|
||||
if err != nil {
|
||||
t.Fatalf("failed to create compactor: %v", err)
|
||||
}
|
||||
|
||||
c.haiku, err = NewHaikuClient("test-key", option.WithBaseURL(server.URL), option.WithMaxRetries(0))
|
||||
if err != nil {
|
||||
t.Fatalf("failed to create mock haiku client: %v", err)
|
||||
}
|
||||
|
||||
err = c.CompactTier1(ctx, issue.ID)
|
||||
if err == nil {
|
||||
t.Fatal("expected error when summary is longer")
|
||||
}
|
||||
if !strings.Contains(err.Error(), "would increase size") {
|
||||
t.Errorf("expected 'would increase size' error, got: %v", err)
|
||||
}
|
||||
|
||||
// Verify issue was NOT modified (kept original)
|
||||
afterIssue, err := store.GetIssue(ctx, issue.ID)
|
||||
if err != nil {
|
||||
t.Fatalf("failed to get issue: %v", err)
|
||||
}
|
||||
if afterIssue.Description != issue.Description {
|
||||
t.Error("description should not have been modified when summary is longer")
|
||||
}
|
||||
}
|
||||
|
||||
func TestCompactTier1Batch_WithMockAPI(t *testing.T) {
|
||||
store := setupTestStore(t)
|
||||
defer store.Close()
|
||||
|
||||
issue1 := createTestIssue(t, store, "bd-batch-mock-1")
|
||||
issue2 := createTestIssue(t, store, "bd-batch-mock-2")
|
||||
|
||||
// Create mock server
|
||||
server := httptest.NewServer(http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) {
|
||||
w.Header().Set("Content-Type", "application/json")
|
||||
json.NewEncoder(w).Encode(map[string]interface{}{
|
||||
"id": "msg_test123",
|
||||
"type": "message",
|
||||
"role": "assistant",
|
||||
"model": "claude-3-5-haiku-20241022",
|
||||
"content": []map[string]interface{}{
|
||||
{
|
||||
"type": "text",
|
||||
"text": "**Summary:** Compacted.\n\n**Key Decisions:** None.\n\n**Resolution:** Done.",
|
||||
},
|
||||
},
|
||||
})
|
||||
}))
|
||||
defer server.Close()
|
||||
|
||||
t.Setenv("ANTHROPIC_API_KEY", "test-key")
|
||||
|
||||
config := &Config{Concurrency: 2}
|
||||
c, err := New(store, "", config)
|
||||
if err != nil {
|
||||
t.Fatalf("failed to create compactor: %v", err)
|
||||
}
|
||||
|
||||
c.haiku, err = NewHaikuClient("test-key", option.WithBaseURL(server.URL), option.WithMaxRetries(0))
|
||||
if err != nil {
|
||||
t.Fatalf("failed to create mock haiku client: %v", err)
|
||||
}
|
||||
|
||||
ctx := context.Background()
|
||||
results, err := c.CompactTier1Batch(ctx, []string{issue1.ID, issue2.ID})
|
||||
if err != nil {
|
||||
t.Fatalf("failed to batch compact: %v", err)
|
||||
}
|
||||
|
||||
if len(results) != 2 {
|
||||
t.Fatalf("expected 2 results, got %d", len(results))
|
||||
}
|
||||
|
||||
for _, result := range results {
|
||||
if result.Err != nil {
|
||||
t.Errorf("unexpected error for %s: %v", result.IssueID, result.Err)
|
||||
}
|
||||
if result.CompactedSize == 0 {
|
||||
t.Errorf("expected non-zero compacted size for %s", result.IssueID)
|
||||
}
|
||||
if result.CompactedSize >= result.OriginalSize {
|
||||
t.Errorf("expected size reduction for %s: %d → %d", result.IssueID, result.OriginalSize, result.CompactedSize)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func TestResult_Fields(t *testing.T) {
|
||||
r := &Result{
|
||||
IssueID: "bd-1",
|
||||
OriginalSize: 100,
|
||||
CompactedSize: 50,
|
||||
Err: nil,
|
||||
}
|
||||
|
||||
if r.IssueID != "bd-1" {
|
||||
t.Errorf("expected IssueID 'bd-1', got '%s'", r.IssueID)
|
||||
}
|
||||
if r.OriginalSize != 100 {
|
||||
t.Errorf("expected OriginalSize 100, got %d", r.OriginalSize)
|
||||
}
|
||||
if r.CompactedSize != 50 {
|
||||
t.Errorf("expected CompactedSize 50, got %d", r.CompactedSize)
|
||||
}
|
||||
if r.Err != nil {
|
||||
t.Errorf("expected nil Err, got %v", r.Err)
|
||||
}
|
||||
}
|
||||
|
||||
func TestConfig_Fields(t *testing.T) {
|
||||
c := &Config{
|
||||
APIKey: "test-key",
|
||||
Concurrency: 10,
|
||||
DryRun: true,
|
||||
AuditEnabled: true,
|
||||
Actor: "test-actor",
|
||||
}
|
||||
|
||||
if c.APIKey != "test-key" {
|
||||
t.Errorf("expected APIKey 'test-key', got '%s'", c.APIKey)
|
||||
}
|
||||
if c.Concurrency != 10 {
|
||||
t.Errorf("expected Concurrency 10, got %d", c.Concurrency)
|
||||
}
|
||||
if !c.DryRun {
|
||||
t.Error("expected DryRun true")
|
||||
}
|
||||
if !c.AuditEnabled {
|
||||
t.Error("expected AuditEnabled true")
|
||||
}
|
||||
if c.Actor != "test-actor" {
|
||||
t.Errorf("expected Actor 'test-actor', got '%s'", c.Actor)
|
||||
}
|
||||
}
|
||||
@@ -1,171 +0,0 @@
|
||||
package compact
|
||||
|
||||
import (
|
||||
"os"
|
||||
"os/exec"
|
||||
"path/filepath"
|
||||
"regexp"
|
||||
"testing"
|
||||
)
|
||||
|
||||
func TestGetCurrentCommitHash_InGitRepo(t *testing.T) {
|
||||
// This test runs in the actual beads repo, so it should return a valid hash
|
||||
hash := GetCurrentCommitHash()
|
||||
|
||||
// Should be a 40-character hex string
|
||||
if len(hash) != 40 {
|
||||
t.Errorf("expected 40-char hash, got %d chars: %s", len(hash), hash)
|
||||
}
|
||||
|
||||
// Should be valid hex
|
||||
matched, err := regexp.MatchString("^[0-9a-f]{40}$", hash)
|
||||
if err != nil {
|
||||
t.Fatalf("regex error: %v", err)
|
||||
}
|
||||
if !matched {
|
||||
t.Errorf("expected hex hash, got: %s", hash)
|
||||
}
|
||||
}
|
||||
|
||||
func TestGetCurrentCommitHash_NotInGitRepo(t *testing.T) {
|
||||
// Save current directory
|
||||
originalDir, err := os.Getwd()
|
||||
if err != nil {
|
||||
t.Fatalf("failed to get cwd: %v", err)
|
||||
}
|
||||
|
||||
// Create a temporary directory that is NOT a git repo
|
||||
tmpDir := t.TempDir()
|
||||
|
||||
// Change to the temp directory
|
||||
if err := os.Chdir(tmpDir); err != nil {
|
||||
t.Fatalf("failed to chdir to temp dir: %v", err)
|
||||
}
|
||||
defer func() {
|
||||
// Restore original directory
|
||||
if err := os.Chdir(originalDir); err != nil {
|
||||
t.Fatalf("failed to restore cwd: %v", err)
|
||||
}
|
||||
}()
|
||||
|
||||
// Should return empty string when not in a git repo
|
||||
hash := GetCurrentCommitHash()
|
||||
if hash != "" {
|
||||
t.Errorf("expected empty string outside git repo, got: %s", hash)
|
||||
}
|
||||
}
|
||||
|
||||
func TestGetCurrentCommitHash_NewGitRepo(t *testing.T) {
|
||||
// Save current directory
|
||||
originalDir, err := os.Getwd()
|
||||
if err != nil {
|
||||
t.Fatalf("failed to get cwd: %v", err)
|
||||
}
|
||||
|
||||
// Create a temporary directory
|
||||
tmpDir := t.TempDir()
|
||||
|
||||
// Initialize a new git repo
|
||||
cmd := exec.Command("git", "init")
|
||||
cmd.Dir = tmpDir
|
||||
if err := cmd.Run(); err != nil {
|
||||
t.Fatalf("failed to init git repo: %v", err)
|
||||
}
|
||||
|
||||
// Configure git user for the commit
|
||||
cmd = exec.Command("git", "config", "user.email", "test@test.com")
|
||||
cmd.Dir = tmpDir
|
||||
if err := cmd.Run(); err != nil {
|
||||
t.Fatalf("failed to set git email: %v", err)
|
||||
}
|
||||
|
||||
cmd = exec.Command("git", "config", "user.name", "Test User")
|
||||
cmd.Dir = tmpDir
|
||||
if err := cmd.Run(); err != nil {
|
||||
t.Fatalf("failed to set git name: %v", err)
|
||||
}
|
||||
|
||||
// Create a file and commit it
|
||||
testFile := filepath.Join(tmpDir, "test.txt")
|
||||
if err := os.WriteFile(testFile, []byte("test"), 0644); err != nil {
|
||||
t.Fatalf("failed to write test file: %v", err)
|
||||
}
|
||||
|
||||
cmd = exec.Command("git", "add", ".")
|
||||
cmd.Dir = tmpDir
|
||||
if err := cmd.Run(); err != nil {
|
||||
t.Fatalf("failed to git add: %v", err)
|
||||
}
|
||||
|
||||
cmd = exec.Command("git", "commit", "-m", "test commit")
|
||||
cmd.Dir = tmpDir
|
||||
if err := cmd.Run(); err != nil {
|
||||
t.Fatalf("failed to git commit: %v", err)
|
||||
}
|
||||
|
||||
// Change to the new git repo
|
||||
if err := os.Chdir(tmpDir); err != nil {
|
||||
t.Fatalf("failed to chdir to git repo: %v", err)
|
||||
}
|
||||
defer func() {
|
||||
// Restore original directory
|
||||
if err := os.Chdir(originalDir); err != nil {
|
||||
t.Fatalf("failed to restore cwd: %v", err)
|
||||
}
|
||||
}()
|
||||
|
||||
// Should return a valid hash
|
||||
hash := GetCurrentCommitHash()
|
||||
if len(hash) != 40 {
|
||||
t.Errorf("expected 40-char hash, got %d chars: %s", len(hash), hash)
|
||||
}
|
||||
|
||||
// Verify it matches git rev-parse output
|
||||
cmd = exec.Command("git", "rev-parse", "HEAD")
|
||||
cmd.Dir = tmpDir
|
||||
out, err := cmd.Output()
|
||||
if err != nil {
|
||||
t.Fatalf("failed to run git rev-parse: %v", err)
|
||||
}
|
||||
|
||||
expected := string(out)
|
||||
expected = expected[:len(expected)-1] // trim newline
|
||||
if hash != expected {
|
||||
t.Errorf("hash mismatch: got %s, expected %s", hash, expected)
|
||||
}
|
||||
}
|
||||
|
||||
func TestGetCurrentCommitHash_EmptyGitRepo(t *testing.T) {
|
||||
// Save current directory
|
||||
originalDir, err := os.Getwd()
|
||||
if err != nil {
|
||||
t.Fatalf("failed to get cwd: %v", err)
|
||||
}
|
||||
|
||||
// Create a temporary directory
|
||||
tmpDir := t.TempDir()
|
||||
|
||||
// Initialize a new git repo but don't commit anything
|
||||
cmd := exec.Command("git", "init")
|
||||
cmd.Dir = tmpDir
|
||||
if err := cmd.Run(); err != nil {
|
||||
t.Fatalf("failed to init git repo: %v", err)
|
||||
}
|
||||
|
||||
// Change to the empty git repo
|
||||
if err := os.Chdir(tmpDir); err != nil {
|
||||
t.Fatalf("failed to chdir to git repo: %v", err)
|
||||
}
|
||||
defer func() {
|
||||
// Restore original directory
|
||||
if err := os.Chdir(originalDir); err != nil {
|
||||
t.Fatalf("failed to restore cwd: %v", err)
|
||||
}
|
||||
}()
|
||||
|
||||
// Should return empty string for repo with no commits
|
||||
hash := GetCurrentCommitHash()
|
||||
if hash != "" {
|
||||
t.Errorf("expected empty string for empty git repo, got: %s", hash)
|
||||
}
|
||||
}
|
||||
@@ -38,7 +38,7 @@ type HaikuClient struct {
|
||||
}
|
||||
|
||||
// NewHaikuClient creates a new Haiku API client. Env var ANTHROPIC_API_KEY takes precedence over explicit apiKey.
|
||||
func NewHaikuClient(apiKey string, opts ...option.RequestOption) (*HaikuClient, error) {
|
||||
func NewHaikuClient(apiKey string) (*HaikuClient, error) {
|
||||
envKey := os.Getenv("ANTHROPIC_API_KEY")
|
||||
if envKey != "" {
|
||||
apiKey = envKey
|
||||
@@ -47,10 +47,7 @@ func NewHaikuClient(apiKey string, opts ...option.RequestOption) (*HaikuClient,
|
||||
return nil, fmt.Errorf("%w: set ANTHROPIC_API_KEY environment variable or provide via config", ErrAPIKeyRequired)
|
||||
}
|
||||
|
||||
// Build options: API key first, then any additional options (for testing)
|
||||
allOpts := []option.RequestOption{option.WithAPIKey(apiKey)}
|
||||
allOpts = append(allOpts, opts...)
|
||||
client := anthropic.NewClient(allOpts...)
|
||||
client := anthropic.NewClient(option.WithAPIKey(apiKey))
|
||||
|
||||
tier1Tmpl, err := template.New("tier1").Parse(tier1PromptTemplate)
|
||||
if err != nil {
|
||||
|
||||
@@ -2,18 +2,11 @@ package compact
|
||||
|
||||
import (
|
||||
"context"
|
||||
"encoding/json"
|
||||
"errors"
|
||||
"net"
|
||||
"net/http"
|
||||
"net/http/httptest"
|
||||
"strings"
|
||||
"sync/atomic"
|
||||
"testing"
|
||||
"time"
|
||||
|
||||
"github.com/anthropics/anthropic-sdk-go"
|
||||
"github.com/anthropics/anthropic-sdk-go/option"
|
||||
"github.com/steveyegge/beads/internal/types"
|
||||
)
|
||||
|
||||
@@ -196,399 +189,3 @@ func TestIsRetryable(t *testing.T) {
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
// mockTimeoutError implements net.Error for timeout testing
|
||||
type mockTimeoutError struct {
|
||||
timeout bool
|
||||
}
|
||||
|
||||
func (e *mockTimeoutError) Error() string { return "mock timeout error" }
|
||||
func (e *mockTimeoutError) Timeout() bool { return e.timeout }
|
||||
func (e *mockTimeoutError) Temporary() bool { return false }
|
||||
|
||||
func TestIsRetryable_NetworkTimeout(t *testing.T) {
|
||||
// Network timeout should be retryable
|
||||
timeoutErr := &mockTimeoutError{timeout: true}
|
||||
if !isRetryable(timeoutErr) {
|
||||
t.Error("network timeout error should be retryable")
|
||||
}
|
||||
|
||||
// Non-timeout network error should not be retryable
|
||||
nonTimeoutErr := &mockTimeoutError{timeout: false}
|
||||
if isRetryable(nonTimeoutErr) {
|
||||
t.Error("non-timeout network error should not be retryable")
|
||||
}
|
||||
}
|
||||
|
||||
func TestIsRetryable_APIErrors(t *testing.T) {
|
||||
tests := []struct {
|
||||
name string
|
||||
statusCode int
|
||||
expected bool
|
||||
}{
|
||||
{"rate limit 429", 429, true},
|
||||
{"server error 500", 500, true},
|
||||
{"server error 502", 502, true},
|
||||
{"server error 503", 503, true},
|
||||
{"bad request 400", 400, false},
|
||||
{"unauthorized 401", 401, false},
|
||||
{"forbidden 403", 403, false},
|
||||
{"not found 404", 404, false},
|
||||
}
|
||||
|
||||
for _, tt := range tests {
|
||||
t.Run(tt.name, func(t *testing.T) {
|
||||
apiErr := &anthropic.Error{StatusCode: tt.statusCode}
|
||||
got := isRetryable(apiErr)
|
||||
if got != tt.expected {
|
||||
t.Errorf("isRetryable(API error %d) = %v, want %v", tt.statusCode, got, tt.expected)
|
||||
}
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
// createMockAnthropicServer creates a mock server that returns Anthropic API responses
|
||||
func createMockAnthropicServer(handler http.HandlerFunc) *httptest.Server {
|
||||
return httptest.NewServer(handler)
|
||||
}
|
||||
|
||||
// mockAnthropicResponse creates a valid Anthropic Messages API response
|
||||
func mockAnthropicResponse(text string) map[string]interface{} {
|
||||
return map[string]interface{}{
|
||||
"id": "msg_test123",
|
||||
"type": "message",
|
||||
"role": "assistant",
|
||||
"model": "claude-3-5-haiku-20241022",
|
||||
"stop_reason": "end_turn",
|
||||
"stop_sequence": nil,
|
||||
"usage": map[string]int{
|
||||
"input_tokens": 100,
|
||||
"output_tokens": 50,
|
||||
},
|
||||
"content": []map[string]interface{}{
|
||||
{
|
||||
"type": "text",
|
||||
"text": text,
|
||||
},
|
||||
},
|
||||
}
|
||||
}
|
||||
|
||||
func TestSummarizeTier1_MockAPI(t *testing.T) {
|
||||
// Create mock server that returns a valid summary
|
||||
server := createMockAnthropicServer(func(w http.ResponseWriter, r *http.Request) {
|
||||
// Verify request method and path
|
||||
if r.Method != "POST" {
|
||||
t.Errorf("expected POST, got %s", r.Method)
|
||||
}
|
||||
if !strings.HasSuffix(r.URL.Path, "/messages") {
|
||||
t.Errorf("expected /messages path, got %s", r.URL.Path)
|
||||
}
|
||||
|
||||
w.Header().Set("Content-Type", "application/json")
|
||||
resp := mockAnthropicResponse("**Summary:** Fixed auth bug.\n\n**Key Decisions:** Used OAuth.\n\n**Resolution:** Complete.")
|
||||
json.NewEncoder(w).Encode(resp)
|
||||
})
|
||||
defer server.Close()
|
||||
|
||||
client, err := NewHaikuClient("test-key", option.WithBaseURL(server.URL))
|
||||
if err != nil {
|
||||
t.Fatalf("failed to create client: %v", err)
|
||||
}
|
||||
|
||||
issue := &types.Issue{
|
||||
ID: "bd-1",
|
||||
Title: "Fix authentication bug",
|
||||
Description: "OAuth login was broken",
|
||||
Status: types.StatusClosed,
|
||||
}
|
||||
|
||||
ctx := context.Background()
|
||||
result, err := client.SummarizeTier1(ctx, issue)
|
||||
if err != nil {
|
||||
t.Fatalf("unexpected error: %v", err)
|
||||
}
|
||||
|
||||
if !strings.Contains(result, "**Summary:**") {
|
||||
t.Error("result should contain Summary section")
|
||||
}
|
||||
if !strings.Contains(result, "Fixed auth bug") {
|
||||
t.Error("result should contain summary text")
|
||||
}
|
||||
}
|
||||
|
||||
func TestSummarizeTier1_APIError(t *testing.T) {
|
||||
// Create mock server that returns an error
|
||||
server := createMockAnthropicServer(func(w http.ResponseWriter, r *http.Request) {
|
||||
w.WriteHeader(http.StatusBadRequest)
|
||||
json.NewEncoder(w).Encode(map[string]interface{}{
|
||||
"type": "error",
|
||||
"error": map[string]interface{}{
|
||||
"type": "invalid_request_error",
|
||||
"message": "Invalid API key",
|
||||
},
|
||||
})
|
||||
})
|
||||
defer server.Close()
|
||||
|
||||
client, err := NewHaikuClient("test-key", option.WithBaseURL(server.URL))
|
||||
if err != nil {
|
||||
t.Fatalf("failed to create client: %v", err)
|
||||
}
|
||||
|
||||
issue := &types.Issue{
|
||||
ID: "bd-1",
|
||||
Title: "Test",
|
||||
Description: "Test",
|
||||
Status: types.StatusClosed,
|
||||
}
|
||||
|
||||
ctx := context.Background()
|
||||
_, err = client.SummarizeTier1(ctx, issue)
|
||||
if err == nil {
|
||||
t.Fatal("expected error from API")
|
||||
}
|
||||
if !strings.Contains(err.Error(), "non-retryable") {
|
||||
t.Errorf("expected non-retryable error, got: %v", err)
|
||||
}
|
||||
}
|
||||
|
||||
func TestCallWithRetry_RetriesOn429(t *testing.T) {
|
||||
var attempts int32
|
||||
|
||||
server := createMockAnthropicServer(func(w http.ResponseWriter, r *http.Request) {
|
||||
attempt := atomic.AddInt32(&attempts, 1)
|
||||
if attempt <= 2 {
|
||||
// First two attempts return 429
|
||||
w.WriteHeader(http.StatusTooManyRequests)
|
||||
json.NewEncoder(w).Encode(map[string]interface{}{
|
||||
"type": "error",
|
||||
"error": map[string]interface{}{
|
||||
"type": "rate_limit_error",
|
||||
"message": "Rate limited",
|
||||
},
|
||||
})
|
||||
return
|
||||
}
|
||||
// Third attempt succeeds
|
||||
w.Header().Set("Content-Type", "application/json")
|
||||
json.NewEncoder(w).Encode(mockAnthropicResponse("Success after retries"))
|
||||
})
|
||||
defer server.Close()
|
||||
|
||||
// Disable SDK's internal retries to test our retry logic only
|
||||
client, err := NewHaikuClient("test-key", option.WithBaseURL(server.URL), option.WithMaxRetries(0))
|
||||
if err != nil {
|
||||
t.Fatalf("failed to create client: %v", err)
|
||||
}
|
||||
// Use short backoff for testing
|
||||
client.initialBackoff = 10 * time.Millisecond
|
||||
|
||||
ctx := context.Background()
|
||||
result, err := client.callWithRetry(ctx, "test prompt")
|
||||
if err != nil {
|
||||
t.Fatalf("expected success after retries, got: %v", err)
|
||||
}
|
||||
if result != "Success after retries" {
|
||||
t.Errorf("expected 'Success after retries', got: %s", result)
|
||||
}
|
||||
if attempts != 3 {
|
||||
t.Errorf("expected 3 attempts, got: %d", attempts)
|
||||
}
|
||||
}
|
||||
|
||||
func TestCallWithRetry_RetriesOn500(t *testing.T) {
|
||||
var attempts int32
|
||||
|
||||
server := createMockAnthropicServer(func(w http.ResponseWriter, r *http.Request) {
|
||||
attempt := atomic.AddInt32(&attempts, 1)
|
||||
if attempt == 1 {
|
||||
// First attempt returns 500
|
||||
w.WriteHeader(http.StatusInternalServerError)
|
||||
json.NewEncoder(w).Encode(map[string]interface{}{
|
||||
"type": "error",
|
||||
"error": map[string]interface{}{
|
||||
"type": "api_error",
|
||||
"message": "Internal server error",
|
||||
},
|
||||
})
|
||||
return
|
||||
}
|
||||
// Second attempt succeeds
|
||||
w.Header().Set("Content-Type", "application/json")
|
||||
json.NewEncoder(w).Encode(mockAnthropicResponse("Recovered from 500"))
|
||||
})
|
||||
defer server.Close()
|
||||
|
||||
// Disable SDK's internal retries to test our retry logic only
|
||||
client, err := NewHaikuClient("test-key", option.WithBaseURL(server.URL), option.WithMaxRetries(0))
|
||||
if err != nil {
|
||||
t.Fatalf("failed to create client: %v", err)
|
||||
}
|
||||
client.initialBackoff = 10 * time.Millisecond
|
||||
|
||||
ctx := context.Background()
|
||||
result, err := client.callWithRetry(ctx, "test prompt")
|
||||
if err != nil {
|
||||
t.Fatalf("expected success after retry, got: %v", err)
|
||||
}
|
||||
if result != "Recovered from 500" {
|
||||
t.Errorf("expected 'Recovered from 500', got: %s", result)
|
||||
}
|
||||
}
|
||||
|
||||
func TestCallWithRetry_ExhaustsRetries(t *testing.T) {
|
||||
var attempts int32
|
||||
|
||||
server := createMockAnthropicServer(func(w http.ResponseWriter, r *http.Request) {
|
||||
atomic.AddInt32(&attempts, 1)
|
||||
// Always return 429
|
||||
w.WriteHeader(http.StatusTooManyRequests)
|
||||
json.NewEncoder(w).Encode(map[string]interface{}{
|
||||
"type": "error",
|
||||
"error": map[string]interface{}{
|
||||
"type": "rate_limit_error",
|
||||
"message": "Rate limited",
|
||||
},
|
||||
})
|
||||
})
|
||||
defer server.Close()
|
||||
|
||||
// Disable SDK's internal retries to test our retry logic only
|
||||
client, err := NewHaikuClient("test-key", option.WithBaseURL(server.URL), option.WithMaxRetries(0))
|
||||
if err != nil {
|
||||
t.Fatalf("failed to create client: %v", err)
|
||||
}
|
||||
client.initialBackoff = 1 * time.Millisecond
|
||||
client.maxRetries = 2
|
||||
|
||||
ctx := context.Background()
|
||||
_, err = client.callWithRetry(ctx, "test prompt")
|
||||
if err == nil {
|
||||
t.Fatal("expected error after exhausting retries")
|
||||
}
|
||||
if !strings.Contains(err.Error(), "failed after") {
|
||||
t.Errorf("expected 'failed after' error, got: %v", err)
|
||||
}
|
||||
// Initial attempt + 2 retries = 3 total
|
||||
if attempts != 3 {
|
||||
t.Errorf("expected 3 attempts, got: %d", attempts)
|
||||
}
|
||||
}
|
||||
|
||||
func TestCallWithRetry_NoRetryOn400(t *testing.T) {
|
||||
var attempts int32
|
||||
|
||||
server := createMockAnthropicServer(func(w http.ResponseWriter, r *http.Request) {
|
||||
atomic.AddInt32(&attempts, 1)
|
||||
w.WriteHeader(http.StatusBadRequest)
|
||||
json.NewEncoder(w).Encode(map[string]interface{}{
|
||||
"type": "error",
|
||||
"error": map[string]interface{}{
|
||||
"type": "invalid_request_error",
|
||||
"message": "Bad request",
|
||||
},
|
||||
})
|
||||
})
|
||||
defer server.Close()
|
||||
|
||||
client, err := NewHaikuClient("test-key", option.WithBaseURL(server.URL))
|
||||
if err != nil {
|
||||
t.Fatalf("failed to create client: %v", err)
|
||||
}
|
||||
client.initialBackoff = 10 * time.Millisecond
|
||||
|
||||
ctx := context.Background()
|
||||
_, err = client.callWithRetry(ctx, "test prompt")
|
||||
if err == nil {
|
||||
t.Fatal("expected error for bad request")
|
||||
}
|
||||
if !strings.Contains(err.Error(), "non-retryable") {
|
||||
t.Errorf("expected non-retryable error, got: %v", err)
|
||||
}
|
||||
if attempts != 1 {
|
||||
t.Errorf("expected only 1 attempt for non-retryable error, got: %d", attempts)
|
||||
}
|
||||
}
|
||||
|
||||
func TestCallWithRetry_ContextTimeout(t *testing.T) {
|
||||
server := createMockAnthropicServer(func(w http.ResponseWriter, r *http.Request) {
|
||||
// Delay longer than context timeout
|
||||
time.Sleep(200 * time.Millisecond)
|
||||
w.Header().Set("Content-Type", "application/json")
|
||||
json.NewEncoder(w).Encode(mockAnthropicResponse("too late"))
|
||||
})
|
||||
defer server.Close()
|
||||
|
||||
client, err := NewHaikuClient("test-key", option.WithBaseURL(server.URL))
|
||||
if err != nil {
|
||||
t.Fatalf("failed to create client: %v", err)
|
||||
}
|
||||
|
||||
ctx, cancel := context.WithTimeout(context.Background(), 50*time.Millisecond)
|
||||
defer cancel()
|
||||
|
||||
_, err = client.callWithRetry(ctx, "test prompt")
|
||||
if err == nil {
|
||||
t.Fatal("expected timeout error")
|
||||
}
|
||||
if !errors.Is(err, context.DeadlineExceeded) {
|
||||
t.Errorf("expected context.DeadlineExceeded, got: %v", err)
|
||||
}
|
||||
}
|
||||
|
||||
func TestCallWithRetry_EmptyContent(t *testing.T) {
|
||||
server := createMockAnthropicServer(func(w http.ResponseWriter, r *http.Request) {
|
||||
w.Header().Set("Content-Type", "application/json")
|
||||
// Return response with empty content array
|
||||
json.NewEncoder(w).Encode(map[string]interface{}{
|
||||
"id": "msg_test123",
|
||||
"type": "message",
|
||||
"role": "assistant",
|
||||
"model": "claude-3-5-haiku-20241022",
|
||||
"content": []map[string]interface{}{},
|
||||
})
|
||||
})
|
||||
defer server.Close()
|
||||
|
||||
client, err := NewHaikuClient("test-key", option.WithBaseURL(server.URL))
|
||||
if err != nil {
|
||||
t.Fatalf("failed to create client: %v", err)
|
||||
}
|
||||
|
||||
ctx := context.Background()
|
||||
_, err = client.callWithRetry(ctx, "test prompt")
|
||||
if err == nil {
|
||||
t.Fatal("expected error for empty content")
|
||||
}
|
||||
if !strings.Contains(err.Error(), "no content blocks") {
|
||||
t.Errorf("expected 'no content blocks' error, got: %v", err)
|
||||
}
|
||||
}
|
||||
|
||||
func TestBytesWriter(t *testing.T) {
|
||||
w := &bytesWriter{}
|
||||
|
||||
n, err := w.Write([]byte("hello"))
|
||||
if err != nil {
|
||||
t.Fatalf("unexpected error: %v", err)
|
||||
}
|
||||
if n != 5 {
|
||||
t.Errorf("expected n=5, got %d", n)
|
||||
}
|
||||
|
||||
n, err = w.Write([]byte(" world"))
|
||||
if err != nil {
|
||||
t.Fatalf("unexpected error: %v", err)
|
||||
}
|
||||
if n != 6 {
|
||||
t.Errorf("expected n=6, got %d", n)
|
||||
}
|
||||
|
||||
if string(w.buf) != "hello world" {
|
||||
t.Errorf("expected 'hello world', got '%s'", string(w.buf))
|
||||
}
|
||||
}
|
||||
|
||||
// Verify net.Error interface is properly satisfied for test mocks
|
||||
var _ net.Error = (*mockTimeoutError)(nil)
|
||||
|
||||
@@ -306,43 +306,6 @@ func ResolveExternalProjectPath(projectName string) string {
|
||||
return path
|
||||
}
|
||||
|
||||
// HookEntry represents a single config-based hook
|
||||
type HookEntry struct {
|
||||
Command string `yaml:"command" mapstructure:"command"` // Shell command to run
|
||||
Name string `yaml:"name" mapstructure:"name"` // Optional display name
|
||||
}
|
||||
|
||||
// GetCloseHooks returns the on_close hooks from config
|
||||
func GetCloseHooks() []HookEntry {
|
||||
if v == nil {
|
||||
return nil
|
||||
}
|
||||
var hooks []HookEntry
|
||||
raw := v.Get("hooks.on_close")
|
||||
if raw == nil {
|
||||
return nil
|
||||
}
|
||||
|
||||
// Handle slice of maps (from YAML parsing)
|
||||
if rawSlice, ok := raw.([]interface{}); ok {
|
||||
for _, item := range rawSlice {
|
||||
if m, ok := item.(map[string]interface{}); ok {
|
||||
entry := HookEntry{}
|
||||
if cmd, ok := m["command"].(string); ok {
|
||||
entry.Command = cmd
|
||||
}
|
||||
if name, ok := m["name"].(string); ok {
|
||||
entry.Name = name
|
||||
}
|
||||
if entry.Command != "" {
|
||||
hooks = append(hooks, entry)
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
return hooks
|
||||
}
|
||||
|
||||
// GetIdentity resolves the user's identity for messaging.
|
||||
// Priority chain:
|
||||
// 1. flagValue (if non-empty, from --identity flag)
|
||||
|
||||
@@ -1,66 +0,0 @@
|
||||
// Package hooks provides a hook system for extensibility.
|
||||
// This file implements config-based hooks defined in .beads/config.yaml.
|
||||
|
||||
package hooks
|
||||
|
||||
import (
|
||||
"context"
|
||||
"fmt"
|
||||
"os"
|
||||
"os/exec"
|
||||
"strconv"
|
||||
"time"
|
||||
|
||||
"github.com/steveyegge/beads/internal/config"
|
||||
"github.com/steveyegge/beads/internal/types"
|
||||
)
|
||||
|
||||
// RunConfigCloseHooks executes all on_close hooks from config.yaml.
|
||||
// Hook commands receive issue data via environment variables:
|
||||
// - BEAD_ID: Issue ID (e.g., bd-abc1)
|
||||
// - BEAD_TITLE: Issue title
|
||||
// - BEAD_TYPE: Issue type (task, bug, feature, etc.)
|
||||
// - BEAD_PRIORITY: Priority (0-4)
|
||||
// - BEAD_CLOSE_REASON: Close reason if provided
|
||||
//
|
||||
// Hooks run synchronously but failures are logged as warnings and don't
|
||||
// block the close operation.
|
||||
func RunConfigCloseHooks(ctx context.Context, issue *types.Issue) {
|
||||
hooks := config.GetCloseHooks()
|
||||
if len(hooks) == 0 {
|
||||
return
|
||||
}
|
||||
|
||||
// Build environment variables for hooks
|
||||
env := append(os.Environ(),
|
||||
"BEAD_ID="+issue.ID,
|
||||
"BEAD_TITLE="+issue.Title,
|
||||
"BEAD_TYPE="+string(issue.IssueType),
|
||||
"BEAD_PRIORITY="+strconv.Itoa(issue.Priority),
|
||||
"BEAD_CLOSE_REASON="+issue.CloseReason,
|
||||
)
|
||||
|
||||
timeout := 10 * time.Second
|
||||
|
||||
for _, hook := range hooks {
|
||||
hookCtx, cancel := context.WithTimeout(ctx, timeout)
|
||||
|
||||
// #nosec G204 -- command comes from user's config file
|
||||
cmd := exec.CommandContext(hookCtx, "sh", "-c", hook.Command)
|
||||
cmd.Env = env
|
||||
cmd.Stdout = os.Stdout
|
||||
cmd.Stderr = os.Stderr
|
||||
|
||||
err := cmd.Run()
|
||||
cancel()
|
||||
|
||||
if err != nil {
|
||||
// Log warning but don't fail the close
|
||||
name := hook.Name
|
||||
if name == "" {
|
||||
name = hook.Command
|
||||
}
|
||||
fmt.Fprintf(os.Stderr, "Warning: close hook %q failed: %v\n", name, err)
|
||||
}
|
||||
}
|
||||
}
|
||||
@@ -1,271 +0,0 @@
|
||||
package hooks
|
||||
|
||||
import (
|
||||
"context"
|
||||
"os"
|
||||
"path/filepath"
|
||||
"strings"
|
||||
"testing"
|
||||
"time"
|
||||
|
||||
"github.com/steveyegge/beads/internal/config"
|
||||
"github.com/steveyegge/beads/internal/types"
|
||||
)
|
||||
|
||||
func TestRunConfigCloseHooks_NoHooks(t *testing.T) {
|
||||
// Create a temp dir without any config
|
||||
tmpDir := t.TempDir()
|
||||
beadsDir := filepath.Join(tmpDir, ".beads")
|
||||
if err := os.MkdirAll(beadsDir, 0755); err != nil {
|
||||
t.Fatalf("Failed to create .beads dir: %v", err)
|
||||
}
|
||||
|
||||
// Change to the temp dir and initialize config
|
||||
oldWd, _ := os.Getwd()
|
||||
defer func() { _ = os.Chdir(oldWd) }()
|
||||
if err := os.Chdir(tmpDir); err != nil {
|
||||
t.Fatalf("Failed to chdir: %v", err)
|
||||
}
|
||||
|
||||
// Re-initialize config
|
||||
if err := config.Initialize(); err != nil {
|
||||
t.Fatalf("Failed to initialize config: %v", err)
|
||||
}
|
||||
|
||||
issue := &types.Issue{ID: "bd-test", Title: "Test Issue"}
|
||||
ctx := context.Background()
|
||||
|
||||
// Should not panic with no hooks
|
||||
RunConfigCloseHooks(ctx, issue)
|
||||
}
|
||||
|
||||
func TestRunConfigCloseHooks_ExecutesCommand(t *testing.T) {
|
||||
tmpDir := t.TempDir()
|
||||
beadsDir := filepath.Join(tmpDir, ".beads")
|
||||
if err := os.MkdirAll(beadsDir, 0755); err != nil {
|
||||
t.Fatalf("Failed to create .beads dir: %v", err)
|
||||
}
|
||||
|
||||
outputFile := filepath.Join(tmpDir, "hook_output.txt")
|
||||
|
||||
// Create config.yaml with a close hook
|
||||
configContent := `hooks:
|
||||
on_close:
|
||||
- name: test-hook
|
||||
command: echo "$BEAD_ID $BEAD_TITLE" > ` + outputFile + `
|
||||
`
|
||||
configPath := filepath.Join(beadsDir, "config.yaml")
|
||||
if err := os.WriteFile(configPath, []byte(configContent), 0644); err != nil {
|
||||
t.Fatalf("Failed to write config: %v", err)
|
||||
}
|
||||
|
||||
// Change to the temp dir and initialize config
|
||||
oldWd, _ := os.Getwd()
|
||||
defer func() { _ = os.Chdir(oldWd) }()
|
||||
if err := os.Chdir(tmpDir); err != nil {
|
||||
t.Fatalf("Failed to chdir: %v", err)
|
||||
}
|
||||
|
||||
// Re-initialize config
|
||||
if err := config.Initialize(); err != nil {
|
||||
t.Fatalf("Failed to initialize config: %v", err)
|
||||
}
|
||||
|
||||
issue := &types.Issue{
|
||||
ID: "bd-abc1",
|
||||
Title: "Test Issue",
|
||||
IssueType: types.TypeBug,
|
||||
Priority: 1,
|
||||
CloseReason: "Fixed",
|
||||
}
|
||||
ctx := context.Background()
|
||||
|
||||
RunConfigCloseHooks(ctx, issue)
|
||||
|
||||
// Wait for hook to complete
|
||||
time.Sleep(100 * time.Millisecond)
|
||||
|
||||
// Verify output
|
||||
output, err := os.ReadFile(outputFile)
|
||||
if err != nil {
|
||||
t.Fatalf("Failed to read output file: %v", err)
|
||||
}
|
||||
|
||||
expected := "bd-abc1 Test Issue"
|
||||
if !strings.Contains(string(output), expected) {
|
||||
t.Errorf("Hook output = %q, want to contain %q", string(output), expected)
|
||||
}
|
||||
}
|
||||
|
||||
func TestRunConfigCloseHooks_EnvVars(t *testing.T) {
|
||||
tmpDir := t.TempDir()
|
||||
beadsDir := filepath.Join(tmpDir, ".beads")
|
||||
if err := os.MkdirAll(beadsDir, 0755); err != nil {
|
||||
t.Fatalf("Failed to create .beads dir: %v", err)
|
||||
}
|
||||
|
||||
outputFile := filepath.Join(tmpDir, "env_output.txt")
|
||||
|
||||
// Create config.yaml with a close hook that outputs all env vars
|
||||
configContent := `hooks:
|
||||
on_close:
|
||||
- name: env-check
|
||||
command: echo "ID=$BEAD_ID TYPE=$BEAD_TYPE PRIORITY=$BEAD_PRIORITY REASON=$BEAD_CLOSE_REASON" > ` + outputFile + `
|
||||
`
|
||||
configPath := filepath.Join(beadsDir, "config.yaml")
|
||||
if err := os.WriteFile(configPath, []byte(configContent), 0644); err != nil {
|
||||
t.Fatalf("Failed to write config: %v", err)
|
||||
}
|
||||
|
||||
// Change to the temp dir and initialize config
|
||||
oldWd, _ := os.Getwd()
|
||||
defer func() { _ = os.Chdir(oldWd) }()
|
||||
if err := os.Chdir(tmpDir); err != nil {
|
||||
t.Fatalf("Failed to chdir: %v", err)
|
||||
}
|
||||
|
||||
// Re-initialize config
|
||||
if err := config.Initialize(); err != nil {
|
||||
t.Fatalf("Failed to initialize config: %v", err)
|
||||
}
|
||||
|
||||
issue := &types.Issue{
|
||||
ID: "bd-xyz9",
|
||||
Title: "Bug Fix",
|
||||
IssueType: types.TypeFeature,
|
||||
Priority: 2,
|
||||
CloseReason: "Completed",
|
||||
}
|
||||
ctx := context.Background()
|
||||
|
||||
RunConfigCloseHooks(ctx, issue)
|
||||
|
||||
// Wait for hook to complete
|
||||
time.Sleep(100 * time.Millisecond)
|
||||
|
||||
// Verify output contains all env vars
|
||||
output, err := os.ReadFile(outputFile)
|
||||
if err != nil {
|
||||
t.Fatalf("Failed to read output file: %v", err)
|
||||
}
|
||||
|
||||
outputStr := string(output)
|
||||
checks := []string{
|
||||
"ID=bd-xyz9",
|
||||
"TYPE=feature",
|
||||
"PRIORITY=2",
|
||||
"REASON=Completed",
|
||||
}
|
||||
|
||||
for _, check := range checks {
|
||||
if !strings.Contains(outputStr, check) {
|
||||
t.Errorf("Hook output = %q, want to contain %q", outputStr, check)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func TestRunConfigCloseHooks_HookFailure(t *testing.T) {
|
||||
tmpDir := t.TempDir()
|
||||
beadsDir := filepath.Join(tmpDir, ".beads")
|
||||
if err := os.MkdirAll(beadsDir, 0755); err != nil {
|
||||
t.Fatalf("Failed to create .beads dir: %v", err)
|
||||
}
|
||||
|
||||
successFile := filepath.Join(tmpDir, "success.txt")
|
||||
|
||||
// Create config.yaml with a failing hook followed by a succeeding one
|
||||
configContent := `hooks:
|
||||
on_close:
|
||||
- name: failing-hook
|
||||
command: exit 1
|
||||
- name: success-hook
|
||||
command: echo "success" > ` + successFile + `
|
||||
`
|
||||
configPath := filepath.Join(beadsDir, "config.yaml")
|
||||
if err := os.WriteFile(configPath, []byte(configContent), 0644); err != nil {
|
||||
t.Fatalf("Failed to write config: %v", err)
|
||||
}
|
||||
|
||||
// Change to the temp dir and initialize config
|
||||
oldWd, _ := os.Getwd()
|
||||
defer func() { _ = os.Chdir(oldWd) }()
|
||||
if err := os.Chdir(tmpDir); err != nil {
|
||||
t.Fatalf("Failed to chdir: %v", err)
|
||||
}
|
||||
|
||||
// Re-initialize config
|
||||
if err := config.Initialize(); err != nil {
|
||||
t.Fatalf("Failed to initialize config: %v", err)
|
||||
}
|
||||
|
||||
issue := &types.Issue{ID: "bd-test", Title: "Test"}
|
||||
ctx := context.Background()
|
||||
|
||||
// Should not panic even with failing hook
|
||||
RunConfigCloseHooks(ctx, issue)
|
||||
|
||||
// Wait for hooks to complete
|
||||
time.Sleep(100 * time.Millisecond)
|
||||
|
||||
// Verify second hook still ran
|
||||
output, err := os.ReadFile(successFile)
|
||||
if err != nil {
|
||||
t.Fatalf("Second hook should have run despite first failing: %v", err)
|
||||
}
|
||||
|
||||
if !strings.Contains(string(output), "success") {
|
||||
t.Error("Second hook did not produce expected output")
|
||||
}
|
||||
}
|
||||
|
||||
func TestGetCloseHooks(t *testing.T) {
|
||||
tmpDir := t.TempDir()
|
||||
beadsDir := filepath.Join(tmpDir, ".beads")
|
||||
if err := os.MkdirAll(beadsDir, 0755); err != nil {
|
||||
t.Fatalf("Failed to create .beads dir: %v", err)
|
||||
}
|
||||
|
||||
// Create config.yaml with multiple hooks
|
||||
configContent := `hooks:
|
||||
on_close:
|
||||
- name: first-hook
|
||||
command: echo first
|
||||
- name: second-hook
|
||||
command: echo second
|
||||
- command: echo unnamed
|
||||
`
|
||||
configPath := filepath.Join(beadsDir, "config.yaml")
|
||||
if err := os.WriteFile(configPath, []byte(configContent), 0644); err != nil {
|
||||
t.Fatalf("Failed to write config: %v", err)
|
||||
}
|
||||
|
||||
// Change to the temp dir and initialize config
|
||||
oldWd, _ := os.Getwd()
|
||||
defer func() { _ = os.Chdir(oldWd) }()
|
||||
if err := os.Chdir(tmpDir); err != nil {
|
||||
t.Fatalf("Failed to chdir: %v", err)
|
||||
}
|
||||
|
||||
// Re-initialize config
|
||||
if err := config.Initialize(); err != nil {
|
||||
t.Fatalf("Failed to initialize config: %v", err)
|
||||
}
|
||||
|
||||
hooks := config.GetCloseHooks()
|
||||
|
||||
if len(hooks) != 3 {
|
||||
t.Fatalf("Expected 3 hooks, got %d", len(hooks))
|
||||
}
|
||||
|
||||
if hooks[0].Name != "first-hook" || hooks[0].Command != "echo first" {
|
||||
t.Errorf("First hook = %+v, want name=first-hook, command=echo first", hooks[0])
|
||||
}
|
||||
|
||||
if hooks[1].Name != "second-hook" || hooks[1].Command != "echo second" {
|
||||
t.Errorf("Second hook = %+v, want name=second-hook, command=echo second", hooks[1])
|
||||
}
|
||||
|
||||
if hooks[2].Name != "" || hooks[2].Command != "echo unnamed" {
|
||||
t.Errorf("Third hook = %+v, want name='', command=echo unnamed", hooks[2])
|
||||
}
|
||||
}
|
||||
File diff suppressed because it is too large
Load Diff
@@ -1,464 +0,0 @@
|
||||
package sqlite
|
||||
|
||||
import (
|
||||
"context"
|
||||
"database/sql"
|
||||
"fmt"
|
||||
"strings"
|
||||
"time"
|
||||
|
||||
"github.com/steveyegge/beads/internal/types"
|
||||
)
|
||||
|
||||
// CreateTombstone converts an existing issue to a tombstone record.
|
||||
// This is a soft-delete that preserves the issue in the database with status="tombstone".
|
||||
// The issue will still appear in exports but be excluded from normal queries.
|
||||
// Dependencies must be removed separately before calling this method.
|
||||
func (s *SQLiteStorage) CreateTombstone(ctx context.Context, id string, actor string, reason string) error {
|
||||
// Get the issue to preserve its original type
|
||||
issue, err := s.GetIssue(ctx, id)
|
||||
if err != nil {
|
||||
return fmt.Errorf("failed to get issue: %w", err)
|
||||
}
|
||||
if issue == nil {
|
||||
return fmt.Errorf("issue not found: %s", id)
|
||||
}
|
||||
|
||||
tx, err := s.db.BeginTx(ctx, nil)
|
||||
if err != nil {
|
||||
return fmt.Errorf("failed to begin transaction: %w", err)
|
||||
}
|
||||
defer func() { _ = tx.Rollback() }()
|
||||
|
||||
now := time.Now()
|
||||
originalType := string(issue.IssueType)
|
||||
|
||||
// Convert issue to tombstone
|
||||
// Note: closed_at must be set to NULL because of CHECK constraint:
|
||||
// (status = 'closed') = (closed_at IS NOT NULL)
|
||||
_, err = tx.ExecContext(ctx, `
|
||||
UPDATE issues
|
||||
SET status = ?,
|
||||
closed_at = NULL,
|
||||
deleted_at = ?,
|
||||
deleted_by = ?,
|
||||
delete_reason = ?,
|
||||
original_type = ?,
|
||||
updated_at = ?
|
||||
WHERE id = ?
|
||||
`, types.StatusTombstone, now, actor, reason, originalType, now, id)
|
||||
if err != nil {
|
||||
return fmt.Errorf("failed to create tombstone: %w", err)
|
||||
}
|
||||
|
||||
// Record tombstone creation event
|
||||
_, err = tx.ExecContext(ctx, `
|
||||
INSERT INTO events (issue_id, event_type, actor, comment)
|
||||
VALUES (?, ?, ?, ?)
|
||||
`, id, "deleted", actor, reason)
|
||||
if err != nil {
|
||||
return fmt.Errorf("failed to record tombstone event: %w", err)
|
||||
}
|
||||
|
||||
// Mark issue as dirty for incremental export
|
||||
_, err = tx.ExecContext(ctx, `
|
||||
INSERT INTO dirty_issues (issue_id, marked_at)
|
||||
VALUES (?, ?)
|
||||
ON CONFLICT (issue_id) DO UPDATE SET marked_at = excluded.marked_at
|
||||
`, id, now)
|
||||
if err != nil {
|
||||
return fmt.Errorf("failed to mark issue dirty: %w", err)
|
||||
}
|
||||
|
||||
// Invalidate blocked issues cache since status changed (bd-5qim)
|
||||
// Tombstone issues don't block others, so this affects blocking calculations
|
||||
if err := s.invalidateBlockedCache(ctx, tx); err != nil {
|
||||
return fmt.Errorf("failed to invalidate blocked cache: %w", err)
|
||||
}
|
||||
|
||||
if err := tx.Commit(); err != nil {
|
||||
return wrapDBError("commit tombstone transaction", err)
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
// DeleteIssue permanently removes an issue from the database
|
||||
func (s *SQLiteStorage) DeleteIssue(ctx context.Context, id string) error {
|
||||
tx, err := s.db.BeginTx(ctx, nil)
|
||||
if err != nil {
|
||||
return fmt.Errorf("failed to begin transaction: %w", err)
|
||||
}
|
||||
defer func() { _ = tx.Rollback() }()
|
||||
|
||||
// Delete dependencies (both directions)
|
||||
_, err = tx.ExecContext(ctx, `DELETE FROM dependencies WHERE issue_id = ? OR depends_on_id = ?`, id, id)
|
||||
if err != nil {
|
||||
return fmt.Errorf("failed to delete dependencies: %w", err)
|
||||
}
|
||||
|
||||
// Delete events
|
||||
_, err = tx.ExecContext(ctx, `DELETE FROM events WHERE issue_id = ?`, id)
|
||||
if err != nil {
|
||||
return fmt.Errorf("failed to delete events: %w", err)
|
||||
}
|
||||
|
||||
// Delete comments (no FK cascade on this table) (bd-687g)
|
||||
_, err = tx.ExecContext(ctx, `DELETE FROM comments WHERE issue_id = ?`, id)
|
||||
if err != nil {
|
||||
return fmt.Errorf("failed to delete comments: %w", err)
|
||||
}
|
||||
|
||||
// Delete from dirty_issues
|
||||
_, err = tx.ExecContext(ctx, `DELETE FROM dirty_issues WHERE issue_id = ?`, id)
|
||||
if err != nil {
|
||||
return fmt.Errorf("failed to delete dirty marker: %w", err)
|
||||
}
|
||||
|
||||
// Delete the issue itself
|
||||
result, err := tx.ExecContext(ctx, `DELETE FROM issues WHERE id = ?`, id)
|
||||
if err != nil {
|
||||
return fmt.Errorf("failed to delete issue: %w", err)
|
||||
}
|
||||
|
||||
rowsAffected, err := result.RowsAffected()
|
||||
if err != nil {
|
||||
return fmt.Errorf("failed to check rows affected: %w", err)
|
||||
}
|
||||
if rowsAffected == 0 {
|
||||
return fmt.Errorf("issue not found: %s", id)
|
||||
}
|
||||
|
||||
if err := tx.Commit(); err != nil {
|
||||
return wrapDBError("commit delete transaction", err)
|
||||
}
|
||||
|
||||
// REMOVED (bd-c7af): Counter sync after deletion - no longer needed with hash IDs
|
||||
return nil
|
||||
}
|
||||
|
||||
// DeleteIssuesResult contains statistics about a batch deletion operation
|
||||
type DeleteIssuesResult struct {
|
||||
DeletedCount int
|
||||
DependenciesCount int
|
||||
LabelsCount int
|
||||
EventsCount int
|
||||
OrphanedIssues []string
|
||||
}
|
||||
|
||||
// DeleteIssues deletes multiple issues in a single transaction
|
||||
// If cascade is true, recursively deletes dependents
|
||||
// If cascade is false but force is true, deletes issues and orphans their dependents
|
||||
// If cascade and force are both false, returns an error if any issue has dependents
|
||||
// If dryRun is true, only computes statistics without deleting
|
||||
func (s *SQLiteStorage) DeleteIssues(ctx context.Context, ids []string, cascade bool, force bool, dryRun bool) (*DeleteIssuesResult, error) {
|
||||
if len(ids) == 0 {
|
||||
return &DeleteIssuesResult{}, nil
|
||||
}
|
||||
|
||||
tx, err := s.db.BeginTx(ctx, nil)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("failed to begin transaction: %w", err)
|
||||
}
|
||||
defer func() { _ = tx.Rollback() }()
|
||||
|
||||
idSet := buildIDSet(ids)
|
||||
result := &DeleteIssuesResult{}
|
||||
|
||||
expandedIDs, err := s.resolveDeleteSet(ctx, tx, ids, idSet, cascade, force, result)
|
||||
if err != nil {
|
||||
return nil, wrapDBError("resolve delete set", err)
|
||||
}
|
||||
|
||||
inClause, args := buildSQLInClause(expandedIDs)
|
||||
if err := s.populateDeleteStats(ctx, tx, inClause, args, result); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
if dryRun {
|
||||
return result, nil
|
||||
}
|
||||
|
||||
if err := s.executeDelete(ctx, tx, inClause, args, result); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
if err := tx.Commit(); err != nil {
|
||||
return nil, fmt.Errorf("failed to commit transaction: %w", err)
|
||||
}
|
||||
|
||||
// REMOVED (bd-c7af): Counter sync after deletion - no longer needed with hash IDs
|
||||
|
||||
return result, nil
|
||||
}
|
||||
|
||||
func buildIDSet(ids []string) map[string]bool {
|
||||
idSet := make(map[string]bool, len(ids))
|
||||
for _, id := range ids {
|
||||
idSet[id] = true
|
||||
}
|
||||
return idSet
|
||||
}
|
||||
|
||||
func (s *SQLiteStorage) resolveDeleteSet(ctx context.Context, tx *sql.Tx, ids []string, idSet map[string]bool, cascade bool, force bool, result *DeleteIssuesResult) ([]string, error) {
|
||||
if cascade {
|
||||
return s.expandWithDependents(ctx, tx, ids, idSet)
|
||||
}
|
||||
if !force {
|
||||
return ids, s.validateNoDependents(ctx, tx, ids, idSet, result)
|
||||
}
|
||||
return ids, s.trackOrphanedIssues(ctx, tx, ids, idSet, result)
|
||||
}
|
||||
|
||||
func (s *SQLiteStorage) expandWithDependents(ctx context.Context, tx *sql.Tx, ids []string, _ map[string]bool) ([]string, error) {
|
||||
allToDelete, err := s.findAllDependentsRecursive(ctx, tx, ids)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("failed to find dependents: %w", err)
|
||||
}
|
||||
expandedIDs := make([]string, 0, len(allToDelete))
|
||||
for id := range allToDelete {
|
||||
expandedIDs = append(expandedIDs, id)
|
||||
}
|
||||
return expandedIDs, nil
|
||||
}
|
||||
|
||||
func (s *SQLiteStorage) validateNoDependents(ctx context.Context, tx *sql.Tx, ids []string, idSet map[string]bool, result *DeleteIssuesResult) error {
|
||||
for _, id := range ids {
|
||||
if err := s.checkSingleIssueValidation(ctx, tx, id, idSet, result); err != nil {
|
||||
return wrapDBError("check dependents", err)
|
||||
}
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
func (s *SQLiteStorage) checkSingleIssueValidation(ctx context.Context, tx *sql.Tx, id string, idSet map[string]bool, result *DeleteIssuesResult) error {
|
||||
var depCount int
|
||||
err := tx.QueryRowContext(ctx,
|
||||
`SELECT COUNT(*) FROM dependencies WHERE depends_on_id = ?`, id).Scan(&depCount)
|
||||
if err != nil {
|
||||
return fmt.Errorf("failed to check dependents for %s: %w", id, err)
|
||||
}
|
||||
if depCount == 0 {
|
||||
return nil
|
||||
}
|
||||
|
||||
rows, err := tx.QueryContext(ctx,
|
||||
`SELECT issue_id FROM dependencies WHERE depends_on_id = ?`, id)
|
||||
if err != nil {
|
||||
return fmt.Errorf("failed to get dependents for %s: %w", id, err)
|
||||
}
|
||||
defer func() { _ = rows.Close() }()
|
||||
|
||||
hasExternal := false
|
||||
for rows.Next() {
|
||||
var depID string
|
||||
if err := rows.Scan(&depID); err != nil {
|
||||
return fmt.Errorf("failed to scan dependent: %w", err)
|
||||
}
|
||||
if !idSet[depID] {
|
||||
hasExternal = true
|
||||
result.OrphanedIssues = append(result.OrphanedIssues, depID)
|
||||
}
|
||||
}
|
||||
|
||||
if err := rows.Err(); err != nil {
|
||||
return fmt.Errorf("failed to iterate dependents for %s: %w", id, err)
|
||||
}
|
||||
|
||||
if hasExternal {
|
||||
return fmt.Errorf("issue %s has dependents not in deletion set; use --cascade to delete them or --force to orphan them", id)
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
func (s *SQLiteStorage) trackOrphanedIssues(ctx context.Context, tx *sql.Tx, ids []string, idSet map[string]bool, result *DeleteIssuesResult) error {
|
||||
orphanSet := make(map[string]bool)
|
||||
for _, id := range ids {
|
||||
if err := s.collectOrphansForID(ctx, tx, id, idSet, orphanSet); err != nil {
|
||||
return wrapDBError("collect orphans", err)
|
||||
}
|
||||
}
|
||||
for orphanID := range orphanSet {
|
||||
result.OrphanedIssues = append(result.OrphanedIssues, orphanID)
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
func (s *SQLiteStorage) collectOrphansForID(ctx context.Context, tx *sql.Tx, id string, idSet map[string]bool, orphanSet map[string]bool) error {
|
||||
rows, err := tx.QueryContext(ctx,
|
||||
`SELECT issue_id FROM dependencies WHERE depends_on_id = ?`, id)
|
||||
if err != nil {
|
||||
return fmt.Errorf("failed to get dependents for %s: %w", id, err)
|
||||
}
|
||||
defer func() { _ = rows.Close() }()
|
||||
|
||||
for rows.Next() {
|
||||
var depID string
|
||||
if err := rows.Scan(&depID); err != nil {
|
||||
return fmt.Errorf("failed to scan dependent: %w", err)
|
||||
}
|
||||
if !idSet[depID] {
|
||||
orphanSet[depID] = true
|
||||
}
|
||||
}
|
||||
return rows.Err()
|
||||
}
|
||||
|
||||
func buildSQLInClause(ids []string) (string, []interface{}) {
|
||||
placeholders := make([]string, len(ids))
|
||||
args := make([]interface{}, len(ids))
|
||||
for i, id := range ids {
|
||||
placeholders[i] = "?"
|
||||
args[i] = id
|
||||
}
|
||||
return strings.Join(placeholders, ","), args
|
||||
}
|
||||
|
||||
func (s *SQLiteStorage) populateDeleteStats(ctx context.Context, tx *sql.Tx, inClause string, args []interface{}, result *DeleteIssuesResult) error {
|
||||
counts := []struct {
|
||||
query string
|
||||
dest *int
|
||||
}{
|
||||
{fmt.Sprintf(`SELECT COUNT(*) FROM dependencies WHERE issue_id IN (%s) OR depends_on_id IN (%s)`, inClause, inClause), &result.DependenciesCount},
|
||||
{fmt.Sprintf(`SELECT COUNT(*) FROM labels WHERE issue_id IN (%s)`, inClause), &result.LabelsCount},
|
||||
{fmt.Sprintf(`SELECT COUNT(*) FROM events WHERE issue_id IN (%s)`, inClause), &result.EventsCount},
|
||||
}
|
||||
|
||||
for _, c := range counts {
|
||||
queryArgs := args
|
||||
if c.dest == &result.DependenciesCount {
|
||||
queryArgs = append(args, args...)
|
||||
}
|
||||
if err := tx.QueryRowContext(ctx, c.query, queryArgs...).Scan(c.dest); err != nil {
|
||||
return fmt.Errorf("failed to count: %w", err)
|
||||
}
|
||||
}
|
||||
|
||||
result.DeletedCount = len(args)
|
||||
return nil
|
||||
}
|
||||
|
||||
func (s *SQLiteStorage) executeDelete(ctx context.Context, tx *sql.Tx, inClause string, args []interface{}, result *DeleteIssuesResult) error {
|
||||
// Note: This method now creates tombstones instead of hard-deleting (bd-3b4)
|
||||
// Only dependencies are deleted - issues are converted to tombstones
|
||||
|
||||
// 1. Delete dependencies - tombstones don't block other issues
|
||||
_, err := tx.ExecContext(ctx,
|
||||
fmt.Sprintf(`DELETE FROM dependencies WHERE issue_id IN (%s) OR depends_on_id IN (%s)`, inClause, inClause),
|
||||
append(args, args...)...)
|
||||
if err != nil {
|
||||
return fmt.Errorf("failed to delete dependencies: %w", err)
|
||||
}
|
||||
|
||||
// 2. Get issue types before converting to tombstones (need for original_type)
|
||||
issueTypes := make(map[string]string)
|
||||
rows, err := tx.QueryContext(ctx,
|
||||
fmt.Sprintf(`SELECT id, issue_type FROM issues WHERE id IN (%s)`, inClause),
|
||||
args...)
|
||||
if err != nil {
|
||||
return fmt.Errorf("failed to get issue types: %w", err)
|
||||
}
|
||||
for rows.Next() {
|
||||
var id, issueType string
|
||||
if err := rows.Scan(&id, &issueType); err != nil {
|
||||
_ = rows.Close() // #nosec G104 - error handling not critical in error path
|
||||
return fmt.Errorf("failed to scan issue type: %w", err)
|
||||
}
|
||||
issueTypes[id] = issueType
|
||||
}
|
||||
_ = rows.Close()
|
||||
|
||||
// 3. Convert issues to tombstones (only for issues that exist)
|
||||
// Note: closed_at must be set to NULL because of CHECK constraint:
|
||||
// (status = 'closed') = (closed_at IS NOT NULL)
|
||||
now := time.Now()
|
||||
deletedCount := 0
|
||||
for id, originalType := range issueTypes {
|
||||
execResult, err := tx.ExecContext(ctx, `
|
||||
UPDATE issues
|
||||
SET status = ?,
|
||||
closed_at = NULL,
|
||||
deleted_at = ?,
|
||||
deleted_by = ?,
|
||||
delete_reason = ?,
|
||||
original_type = ?,
|
||||
updated_at = ?
|
||||
WHERE id = ?
|
||||
`, types.StatusTombstone, now, "batch delete", "batch delete", originalType, now, id)
|
||||
if err != nil {
|
||||
return fmt.Errorf("failed to create tombstone for %s: %w", id, err)
|
||||
}
|
||||
|
||||
rowsAffected, _ := execResult.RowsAffected()
|
||||
if rowsAffected == 0 {
|
||||
continue // Issue doesn't exist, skip
|
||||
}
|
||||
deletedCount++
|
||||
|
||||
// Record tombstone creation event
|
||||
_, err = tx.ExecContext(ctx, `
|
||||
INSERT INTO events (issue_id, event_type, actor, comment)
|
||||
VALUES (?, ?, ?, ?)
|
||||
`, id, "deleted", "batch delete", "batch delete")
|
||||
if err != nil {
|
||||
return fmt.Errorf("failed to record tombstone event for %s: %w", id, err)
|
||||
}
|
||||
|
||||
// Mark issue as dirty for incremental export
|
||||
_, err = tx.ExecContext(ctx, `
|
||||
INSERT INTO dirty_issues (issue_id, marked_at)
|
||||
VALUES (?, ?)
|
||||
ON CONFLICT (issue_id) DO UPDATE SET marked_at = excluded.marked_at
|
||||
`, id, now)
|
||||
if err != nil {
|
||||
return fmt.Errorf("failed to mark issue dirty for %s: %w", id, err)
|
||||
}
|
||||
}
|
||||
|
||||
// 4. Invalidate blocked issues cache since statuses changed (bd-5qim)
|
||||
if err := s.invalidateBlockedCache(ctx, tx); err != nil {
|
||||
return fmt.Errorf("failed to invalidate blocked cache: %w", err)
|
||||
}
|
||||
|
||||
result.DeletedCount = deletedCount
|
||||
return nil
|
||||
}
|
||||
|
||||
// findAllDependentsRecursive finds all issues that depend on the given issues, recursively
|
||||
func (s *SQLiteStorage) findAllDependentsRecursive(ctx context.Context, tx *sql.Tx, ids []string) (map[string]bool, error) {
|
||||
result := make(map[string]bool)
|
||||
for _, id := range ids {
|
||||
result[id] = true
|
||||
}
|
||||
|
||||
toProcess := make([]string, len(ids))
|
||||
copy(toProcess, ids)
|
||||
|
||||
for len(toProcess) > 0 {
|
||||
current := toProcess[0]
|
||||
toProcess = toProcess[1:]
|
||||
|
||||
rows, err := tx.QueryContext(ctx,
|
||||
`SELECT issue_id FROM dependencies WHERE depends_on_id = ?`, current)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
defer rows.Close()
|
||||
|
||||
for rows.Next() {
|
||||
var depID string
|
||||
if err := rows.Scan(&depID); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
if !result[depID] {
|
||||
result[depID] = true
|
||||
toProcess = append(toProcess, depID)
|
||||
}
|
||||
}
|
||||
if err := rows.Err(); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
}
|
||||
|
||||
return result, nil
|
||||
}
|
||||
@@ -1,50 +0,0 @@
|
||||
package sqlite
|
||||
|
||||
import (
|
||||
"database/sql"
|
||||
"encoding/json"
|
||||
"time"
|
||||
)
|
||||
|
||||
// parseNullableTimeString parses a nullable time string from database TEXT columns.
|
||||
// The ncruces/go-sqlite3 driver only auto-converts TEXT→time.Time for columns declared
|
||||
// as DATETIME/DATE/TIME/TIMESTAMP. For TEXT columns (like deleted_at), we must parse manually.
|
||||
// Supports RFC3339, RFC3339Nano, and SQLite's native format.
|
||||
func parseNullableTimeString(ns sql.NullString) *time.Time {
|
||||
if !ns.Valid || ns.String == "" {
|
||||
return nil
|
||||
}
|
||||
// Try RFC3339Nano first (more precise), then RFC3339, then SQLite format
|
||||
for _, layout := range []string{time.RFC3339Nano, time.RFC3339, "2006-01-02 15:04:05"} {
|
||||
if t, err := time.Parse(layout, ns.String); err == nil {
|
||||
return &t
|
||||
}
|
||||
}
|
||||
return nil // Unparseable - shouldn't happen with valid data
|
||||
}
|
||||
|
||||
// parseJSONStringArray parses a JSON string array from database TEXT column.
|
||||
// Returns empty slice if the string is empty or invalid JSON.
|
||||
func parseJSONStringArray(s string) []string {
|
||||
if s == "" {
|
||||
return nil
|
||||
}
|
||||
var result []string
|
||||
if err := json.Unmarshal([]byte(s), &result); err != nil {
|
||||
return nil // Invalid JSON - shouldn't happen with valid data
|
||||
}
|
||||
return result
|
||||
}
|
||||
|
||||
// formatJSONStringArray formats a string slice as JSON for database storage.
|
||||
// Returns empty string if the slice is nil or empty.
|
||||
func formatJSONStringArray(arr []string) string {
|
||||
if len(arr) == 0 {
|
||||
return ""
|
||||
}
|
||||
data, err := json.Marshal(arr)
|
||||
if err != nil {
|
||||
return ""
|
||||
}
|
||||
return string(data)
|
||||
}
|
||||
@@ -1,149 +0,0 @@
|
||||
package sqlite
|
||||
|
||||
import (
|
||||
"context"
|
||||
"fmt"
|
||||
"time"
|
||||
|
||||
"github.com/steveyegge/beads/internal/types"
|
||||
)
|
||||
|
||||
// UpdateIssueID updates an issue ID and all its text fields in a single transaction
|
||||
func (s *SQLiteStorage) UpdateIssueID(ctx context.Context, oldID, newID string, issue *types.Issue, actor string) error {
|
||||
// Get exclusive connection to ensure PRAGMA applies
|
||||
conn, err := s.db.Conn(ctx)
|
||||
if err != nil {
|
||||
return fmt.Errorf("failed to get connection: %w", err)
|
||||
}
|
||||
defer func() { _ = conn.Close() }()
|
||||
|
||||
// Disable foreign keys on this specific connection
|
||||
_, err = conn.ExecContext(ctx, `PRAGMA foreign_keys = OFF`)
|
||||
if err != nil {
|
||||
return fmt.Errorf("failed to disable foreign keys: %w", err)
|
||||
}
|
||||
|
||||
tx, err := conn.BeginTx(ctx, nil)
|
||||
if err != nil {
|
||||
return fmt.Errorf("failed to begin transaction: %w", err)
|
||||
}
|
||||
defer func() { _ = tx.Rollback() }()
|
||||
|
||||
result, err := tx.ExecContext(ctx, `
|
||||
UPDATE issues
|
||||
SET id = ?, title = ?, description = ?, design = ?, acceptance_criteria = ?, notes = ?, updated_at = ?
|
||||
WHERE id = ?
|
||||
`, newID, issue.Title, issue.Description, issue.Design, issue.AcceptanceCriteria, issue.Notes, time.Now(), oldID)
|
||||
if err != nil {
|
||||
return fmt.Errorf("failed to update issue ID: %w", err)
|
||||
}
|
||||
|
||||
rows, err := result.RowsAffected()
|
||||
if err != nil {
|
||||
return fmt.Errorf("failed to get rows affected: %w", err)
|
||||
}
|
||||
if rows == 0 {
|
||||
return fmt.Errorf("issue not found: %s", oldID)
|
||||
}
|
||||
|
||||
_, err = tx.ExecContext(ctx, `UPDATE dependencies SET issue_id = ? WHERE issue_id = ?`, newID, oldID)
|
||||
if err != nil {
|
||||
return fmt.Errorf("failed to update issue_id in dependencies: %w", err)
|
||||
}
|
||||
|
||||
_, err = tx.ExecContext(ctx, `UPDATE dependencies SET depends_on_id = ? WHERE depends_on_id = ?`, newID, oldID)
|
||||
if err != nil {
|
||||
return fmt.Errorf("failed to update depends_on_id in dependencies: %w", err)
|
||||
}
|
||||
|
||||
_, err = tx.ExecContext(ctx, `UPDATE events SET issue_id = ? WHERE issue_id = ?`, newID, oldID)
|
||||
if err != nil {
|
||||
return fmt.Errorf("failed to update events: %w", err)
|
||||
}
|
||||
|
||||
_, err = tx.ExecContext(ctx, `UPDATE labels SET issue_id = ? WHERE issue_id = ?`, newID, oldID)
|
||||
if err != nil {
|
||||
return fmt.Errorf("failed to update labels: %w", err)
|
||||
}
|
||||
|
||||
_, err = tx.ExecContext(ctx, `UPDATE comments SET issue_id = ? WHERE issue_id = ?`, newID, oldID)
|
||||
if err != nil {
|
||||
return fmt.Errorf("failed to update comments: %w", err)
|
||||
}
|
||||
|
||||
_, err = tx.ExecContext(ctx, `
|
||||
UPDATE dirty_issues SET issue_id = ? WHERE issue_id = ?
|
||||
`, newID, oldID)
|
||||
if err != nil {
|
||||
return fmt.Errorf("failed to update dirty_issues: %w", err)
|
||||
}
|
||||
|
||||
_, err = tx.ExecContext(ctx, `UPDATE issue_snapshots SET issue_id = ? WHERE issue_id = ?`, newID, oldID)
|
||||
if err != nil {
|
||||
return fmt.Errorf("failed to update issue_snapshots: %w", err)
|
||||
}
|
||||
|
||||
_, err = tx.ExecContext(ctx, `UPDATE compaction_snapshots SET issue_id = ? WHERE issue_id = ?`, newID, oldID)
|
||||
if err != nil {
|
||||
return fmt.Errorf("failed to update compaction_snapshots: %w", err)
|
||||
}
|
||||
|
||||
_, err = tx.ExecContext(ctx, `
|
||||
INSERT INTO dirty_issues (issue_id, marked_at)
|
||||
VALUES (?, ?)
|
||||
ON CONFLICT (issue_id) DO UPDATE SET marked_at = excluded.marked_at
|
||||
`, newID, time.Now())
|
||||
if err != nil {
|
||||
return fmt.Errorf("failed to mark issue dirty: %w", err)
|
||||
}
|
||||
|
||||
_, err = tx.ExecContext(ctx, `
|
||||
INSERT INTO events (issue_id, event_type, actor, old_value, new_value)
|
||||
VALUES (?, 'renamed', ?, ?, ?)
|
||||
`, newID, actor, oldID, newID)
|
||||
if err != nil {
|
||||
return fmt.Errorf("failed to record rename event: %w", err)
|
||||
}
|
||||
|
||||
return tx.Commit()
|
||||
}
|
||||
|
||||
// RenameDependencyPrefix updates the prefix in all dependency records
|
||||
// GH#630: This was previously a no-op, causing dependencies to break after rename-prefix
|
||||
func (s *SQLiteStorage) RenameDependencyPrefix(ctx context.Context, oldPrefix, newPrefix string) error {
|
||||
// Update issue_id column
|
||||
_, err := s.db.ExecContext(ctx, `
|
||||
UPDATE dependencies
|
||||
SET issue_id = ? || substr(issue_id, length(?) + 1)
|
||||
WHERE issue_id LIKE ? || '%'
|
||||
`, newPrefix, oldPrefix, oldPrefix)
|
||||
if err != nil {
|
||||
return fmt.Errorf("failed to update issue_id in dependencies: %w", err)
|
||||
}
|
||||
|
||||
// Update depends_on_id column
|
||||
_, err = s.db.ExecContext(ctx, `
|
||||
UPDATE dependencies
|
||||
SET depends_on_id = ? || substr(depends_on_id, length(?) + 1)
|
||||
WHERE depends_on_id LIKE ? || '%'
|
||||
`, newPrefix, oldPrefix, oldPrefix)
|
||||
if err != nil {
|
||||
return fmt.Errorf("failed to update depends_on_id in dependencies: %w", err)
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
// RenameCounterPrefix is a no-op with hash-based IDs (bd-8e05)
|
||||
// Kept for backward compatibility with rename-prefix command
|
||||
func (s *SQLiteStorage) RenameCounterPrefix(ctx context.Context, oldPrefix, newPrefix string) error {
|
||||
// Hash-based IDs don't use counters, so nothing to update
|
||||
return nil
|
||||
}
|
||||
|
||||
// ResetCounter is a no-op with hash-based IDs (bd-8e05)
|
||||
// Kept for backward compatibility
|
||||
func (s *SQLiteStorage) ResetCounter(ctx context.Context, prefix string) error {
|
||||
// Hash-based IDs don't use counters, so nothing to reset
|
||||
return nil
|
||||
}
|
||||
@@ -1,429 +0,0 @@
|
||||
package sqlite
|
||||
|
||||
import (
|
||||
"context"
|
||||
"database/sql"
|
||||
"fmt"
|
||||
"strings"
|
||||
"time"
|
||||
|
||||
"github.com/steveyegge/beads/internal/types"
|
||||
)
|
||||
|
||||
// GetCloseReason retrieves the close reason from the most recent closed event for an issue
|
||||
func (s *SQLiteStorage) GetCloseReason(ctx context.Context, issueID string) (string, error) {
|
||||
var comment sql.NullString
|
||||
err := s.db.QueryRowContext(ctx, `
|
||||
SELECT comment FROM events
|
||||
WHERE issue_id = ? AND event_type = ?
|
||||
ORDER BY created_at DESC
|
||||
LIMIT 1
|
||||
`, issueID, types.EventClosed).Scan(&comment)
|
||||
|
||||
if err == sql.ErrNoRows {
|
||||
return "", nil
|
||||
}
|
||||
if err != nil {
|
||||
return "", fmt.Errorf("failed to get close reason: %w", err)
|
||||
}
|
||||
if comment.Valid {
|
||||
return comment.String, nil
|
||||
}
|
||||
return "", nil
|
||||
}
|
||||
|
||||
// GetCloseReasonsForIssues retrieves close reasons for multiple issues in a single query
|
||||
func (s *SQLiteStorage) GetCloseReasonsForIssues(ctx context.Context, issueIDs []string) (map[string]string, error) {
|
||||
result := make(map[string]string)
|
||||
if len(issueIDs) == 0 {
|
||||
return result, nil
|
||||
}
|
||||
|
||||
// Build placeholders for IN clause
|
||||
placeholders := make([]string, len(issueIDs))
|
||||
args := make([]interface{}, len(issueIDs)+1)
|
||||
args[0] = types.EventClosed
|
||||
for i, id := range issueIDs {
|
||||
placeholders[i] = "?"
|
||||
args[i+1] = id
|
||||
}
|
||||
|
||||
// Use a subquery to get the most recent closed event for each issue
|
||||
// #nosec G201 - safe SQL with controlled formatting
|
||||
query := fmt.Sprintf(`
|
||||
SELECT e.issue_id, e.comment
|
||||
FROM events e
|
||||
INNER JOIN (
|
||||
SELECT issue_id, MAX(created_at) as max_created_at
|
||||
FROM events
|
||||
WHERE event_type = ? AND issue_id IN (%s)
|
||||
GROUP BY issue_id
|
||||
) latest ON e.issue_id = latest.issue_id AND e.created_at = latest.max_created_at
|
||||
WHERE e.event_type = ?
|
||||
`, strings.Join(placeholders, ", "))
|
||||
|
||||
// Append event_type again for the outer WHERE clause
|
||||
args = append(args, types.EventClosed)
|
||||
|
||||
rows, err := s.db.QueryContext(ctx, query, args...)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("failed to get close reasons: %w", err)
|
||||
}
|
||||
defer func() { _ = rows.Close() }()
|
||||
|
||||
for rows.Next() {
|
||||
var issueID string
|
||||
var comment sql.NullString
|
||||
if err := rows.Scan(&issueID, &comment); err != nil {
|
||||
return nil, fmt.Errorf("failed to scan close reason: %w", err)
|
||||
}
|
||||
if comment.Valid && comment.String != "" {
|
||||
result[issueID] = comment.String
|
||||
}
|
||||
}
|
||||
|
||||
return result, nil
|
||||
}
|
||||
|
||||
// GetIssueByExternalRef retrieves an issue by external reference
|
||||
func (s *SQLiteStorage) GetIssueByExternalRef(ctx context.Context, externalRef string) (*types.Issue, error) {
|
||||
var issue types.Issue
|
||||
var closedAt sql.NullTime
|
||||
var estimatedMinutes sql.NullInt64
|
||||
var assignee sql.NullString
|
||||
var externalRefCol sql.NullString
|
||||
var compactedAt sql.NullTime
|
||||
var originalSize sql.NullInt64
|
||||
var contentHash sql.NullString
|
||||
var compactedAtCommit sql.NullString
|
||||
var sourceRepo sql.NullString
|
||||
var closeReason sql.NullString
|
||||
var deletedAt sql.NullString // TEXT column, not DATETIME - must parse manually
|
||||
var deletedBy sql.NullString
|
||||
var deleteReason sql.NullString
|
||||
var originalType sql.NullString
|
||||
// Messaging fields (bd-kwro)
|
||||
var sender sql.NullString
|
||||
var wisp sql.NullInt64
|
||||
// Pinned field (bd-7h5)
|
||||
var pinned sql.NullInt64
|
||||
// Template field (beads-1ra)
|
||||
var isTemplate sql.NullInt64
|
||||
// Gate fields (bd-udsi)
|
||||
var awaitType sql.NullString
|
||||
var awaitID sql.NullString
|
||||
var timeoutNs sql.NullInt64
|
||||
var waiters sql.NullString
|
||||
|
||||
err := s.db.QueryRowContext(ctx, `
|
||||
SELECT id, content_hash, title, description, design, acceptance_criteria, notes,
|
||||
status, priority, issue_type, assignee, estimated_minutes,
|
||||
created_at, updated_at, closed_at, external_ref,
|
||||
compaction_level, compacted_at, compacted_at_commit, original_size, source_repo, close_reason,
|
||||
deleted_at, deleted_by, delete_reason, original_type,
|
||||
sender, ephemeral, pinned, is_template,
|
||||
await_type, await_id, timeout_ns, waiters
|
||||
FROM issues
|
||||
WHERE external_ref = ?
|
||||
`, externalRef).Scan(
|
||||
&issue.ID, &contentHash, &issue.Title, &issue.Description, &issue.Design,
|
||||
&issue.AcceptanceCriteria, &issue.Notes, &issue.Status,
|
||||
&issue.Priority, &issue.IssueType, &assignee, &estimatedMinutes,
|
||||
&issue.CreatedAt, &issue.UpdatedAt, &closedAt, &externalRefCol,
|
||||
&issue.CompactionLevel, &compactedAt, &compactedAtCommit, &originalSize, &sourceRepo, &closeReason,
|
||||
&deletedAt, &deletedBy, &deleteReason, &originalType,
|
||||
&sender, &wisp, &pinned, &isTemplate,
|
||||
&awaitType, &awaitID, &timeoutNs, &waiters,
|
||||
)
|
||||
|
||||
if err == sql.ErrNoRows {
|
||||
return nil, nil
|
||||
}
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("failed to get issue by external_ref: %w", err)
|
||||
}
|
||||
|
||||
if contentHash.Valid {
|
||||
issue.ContentHash = contentHash.String
|
||||
}
|
||||
if closedAt.Valid {
|
||||
issue.ClosedAt = &closedAt.Time
|
||||
}
|
||||
if estimatedMinutes.Valid {
|
||||
mins := int(estimatedMinutes.Int64)
|
||||
issue.EstimatedMinutes = &mins
|
||||
}
|
||||
if assignee.Valid {
|
||||
issue.Assignee = assignee.String
|
||||
}
|
||||
if externalRefCol.Valid {
|
||||
issue.ExternalRef = &externalRefCol.String
|
||||
}
|
||||
if compactedAt.Valid {
|
||||
issue.CompactedAt = &compactedAt.Time
|
||||
}
|
||||
if compactedAtCommit.Valid {
|
||||
issue.CompactedAtCommit = &compactedAtCommit.String
|
||||
}
|
||||
if originalSize.Valid {
|
||||
issue.OriginalSize = int(originalSize.Int64)
|
||||
}
|
||||
if sourceRepo.Valid {
|
||||
issue.SourceRepo = sourceRepo.String
|
||||
}
|
||||
if closeReason.Valid {
|
||||
issue.CloseReason = closeReason.String
|
||||
}
|
||||
issue.DeletedAt = parseNullableTimeString(deletedAt)
|
||||
if deletedBy.Valid {
|
||||
issue.DeletedBy = deletedBy.String
|
||||
}
|
||||
if deleteReason.Valid {
|
||||
issue.DeleteReason = deleteReason.String
|
||||
}
|
||||
if originalType.Valid {
|
||||
issue.OriginalType = originalType.String
|
||||
}
|
||||
// Messaging fields (bd-kwro)
|
||||
if sender.Valid {
|
||||
issue.Sender = sender.String
|
||||
}
|
||||
if wisp.Valid && wisp.Int64 != 0 {
|
||||
issue.Wisp = true
|
||||
}
|
||||
// Pinned field (bd-7h5)
|
||||
if pinned.Valid && pinned.Int64 != 0 {
|
||||
issue.Pinned = true
|
||||
}
|
||||
// Template field (beads-1ra)
|
||||
if isTemplate.Valid && isTemplate.Int64 != 0 {
|
||||
issue.IsTemplate = true
|
||||
}
|
||||
// Gate fields (bd-udsi)
|
||||
if awaitType.Valid {
|
||||
issue.AwaitType = awaitType.String
|
||||
}
|
||||
if awaitID.Valid {
|
||||
issue.AwaitID = awaitID.String
|
||||
}
|
||||
if timeoutNs.Valid {
|
||||
issue.Timeout = time.Duration(timeoutNs.Int64)
|
||||
}
|
||||
if waiters.Valid && waiters.String != "" {
|
||||
issue.Waiters = parseJSONStringArray(waiters.String)
|
||||
}
|
||||
|
||||
// Fetch labels for this issue
|
||||
labels, err := s.GetLabels(ctx, issue.ID)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("failed to get labels: %w", err)
|
||||
}
|
||||
issue.Labels = labels
|
||||
|
||||
return &issue, nil
|
||||
}
|
||||
|
||||
// SearchIssues finds issues matching query and filters
|
||||
func (s *SQLiteStorage) SearchIssues(ctx context.Context, query string, filter types.IssueFilter) ([]*types.Issue, error) {
|
||||
// Check for external database file modifications (daemon mode)
|
||||
s.checkFreshness()
|
||||
|
||||
// Hold read lock during database operations to prevent reconnect() from
|
||||
// closing the connection mid-query (GH#607 race condition fix)
|
||||
s.reconnectMu.RLock()
|
||||
defer s.reconnectMu.RUnlock()
|
||||
|
||||
whereClauses := []string{}
|
||||
args := []interface{}{}
|
||||
|
||||
if query != "" {
|
||||
whereClauses = append(whereClauses, "(title LIKE ? OR description LIKE ? OR id LIKE ?)")
|
||||
pattern := "%" + query + "%"
|
||||
args = append(args, pattern, pattern, pattern)
|
||||
}
|
||||
|
||||
if filter.TitleSearch != "" {
|
||||
whereClauses = append(whereClauses, "title LIKE ?")
|
||||
pattern := "%" + filter.TitleSearch + "%"
|
||||
args = append(args, pattern)
|
||||
}
|
||||
|
||||
// Pattern matching
|
||||
if filter.TitleContains != "" {
|
||||
whereClauses = append(whereClauses, "title LIKE ?")
|
||||
args = append(args, "%"+filter.TitleContains+"%")
|
||||
}
|
||||
if filter.DescriptionContains != "" {
|
||||
whereClauses = append(whereClauses, "description LIKE ?")
|
||||
args = append(args, "%"+filter.DescriptionContains+"%")
|
||||
}
|
||||
if filter.NotesContains != "" {
|
||||
whereClauses = append(whereClauses, "notes LIKE ?")
|
||||
args = append(args, "%"+filter.NotesContains+"%")
|
||||
}
|
||||
|
||||
if filter.Status != nil {
|
||||
whereClauses = append(whereClauses, "status = ?")
|
||||
args = append(args, *filter.Status)
|
||||
} else if !filter.IncludeTombstones {
|
||||
// Exclude tombstones by default unless explicitly filtering for them (bd-1bu)
|
||||
whereClauses = append(whereClauses, "status != ?")
|
||||
args = append(args, types.StatusTombstone)
|
||||
}
|
||||
|
||||
if filter.Priority != nil {
|
||||
whereClauses = append(whereClauses, "priority = ?")
|
||||
args = append(args, *filter.Priority)
|
||||
}
|
||||
|
||||
// Priority ranges
|
||||
if filter.PriorityMin != nil {
|
||||
whereClauses = append(whereClauses, "priority >= ?")
|
||||
args = append(args, *filter.PriorityMin)
|
||||
}
|
||||
if filter.PriorityMax != nil {
|
||||
whereClauses = append(whereClauses, "priority <= ?")
|
||||
args = append(args, *filter.PriorityMax)
|
||||
}
|
||||
|
||||
if filter.IssueType != nil {
|
||||
whereClauses = append(whereClauses, "issue_type = ?")
|
||||
args = append(args, *filter.IssueType)
|
||||
}
|
||||
|
||||
if filter.Assignee != nil {
|
||||
whereClauses = append(whereClauses, "assignee = ?")
|
||||
args = append(args, *filter.Assignee)
|
||||
}
|
||||
|
||||
// Date ranges
|
||||
if filter.CreatedAfter != nil {
|
||||
whereClauses = append(whereClauses, "created_at > ?")
|
||||
args = append(args, filter.CreatedAfter.Format(time.RFC3339))
|
||||
}
|
||||
if filter.CreatedBefore != nil {
|
||||
whereClauses = append(whereClauses, "created_at < ?")
|
||||
args = append(args, filter.CreatedBefore.Format(time.RFC3339))
|
||||
}
|
||||
if filter.UpdatedAfter != nil {
|
||||
whereClauses = append(whereClauses, "updated_at > ?")
|
||||
args = append(args, filter.UpdatedAfter.Format(time.RFC3339))
|
||||
}
|
||||
if filter.UpdatedBefore != nil {
|
||||
whereClauses = append(whereClauses, "updated_at < ?")
|
||||
args = append(args, filter.UpdatedBefore.Format(time.RFC3339))
|
||||
}
|
||||
if filter.ClosedAfter != nil {
|
||||
whereClauses = append(whereClauses, "closed_at > ?")
|
||||
args = append(args, filter.ClosedAfter.Format(time.RFC3339))
|
||||
}
|
||||
if filter.ClosedBefore != nil {
|
||||
whereClauses = append(whereClauses, "closed_at < ?")
|
||||
args = append(args, filter.ClosedBefore.Format(time.RFC3339))
|
||||
}
|
||||
|
||||
// Empty/null checks
|
||||
if filter.EmptyDescription {
|
||||
whereClauses = append(whereClauses, "(description IS NULL OR description = '')")
|
||||
}
|
||||
if filter.NoAssignee {
|
||||
whereClauses = append(whereClauses, "(assignee IS NULL OR assignee = '')")
|
||||
}
|
||||
if filter.NoLabels {
|
||||
whereClauses = append(whereClauses, "id NOT IN (SELECT DISTINCT issue_id FROM labels)")
|
||||
}
|
||||
|
||||
// Label filtering: issue must have ALL specified labels
|
||||
if len(filter.Labels) > 0 {
|
||||
for _, label := range filter.Labels {
|
||||
whereClauses = append(whereClauses, "id IN (SELECT issue_id FROM labels WHERE label = ?)")
|
||||
args = append(args, label)
|
||||
}
|
||||
}
|
||||
|
||||
// Label filtering (OR): issue must have AT LEAST ONE of these labels
|
||||
if len(filter.LabelsAny) > 0 {
|
||||
placeholders := make([]string, len(filter.LabelsAny))
|
||||
for i, label := range filter.LabelsAny {
|
||||
placeholders[i] = "?"
|
||||
args = append(args, label)
|
||||
}
|
||||
whereClauses = append(whereClauses, fmt.Sprintf("id IN (SELECT issue_id FROM labels WHERE label IN (%s))", strings.Join(placeholders, ", ")))
|
||||
}
|
||||
|
||||
// ID filtering: match specific issue IDs
|
||||
if len(filter.IDs) > 0 {
|
||||
placeholders := make([]string, len(filter.IDs))
|
||||
for i, id := range filter.IDs {
|
||||
placeholders[i] = "?"
|
||||
args = append(args, id)
|
||||
}
|
||||
whereClauses = append(whereClauses, fmt.Sprintf("id IN (%s)", strings.Join(placeholders, ", ")))
|
||||
}
|
||||
|
||||
// Wisp filtering (bd-kwro.9)
|
||||
if filter.Wisp != nil {
|
||||
if *filter.Wisp {
|
||||
whereClauses = append(whereClauses, "ephemeral = 1") // SQL column is still 'ephemeral'
|
||||
} else {
|
||||
whereClauses = append(whereClauses, "(ephemeral = 0 OR ephemeral IS NULL)")
|
||||
}
|
||||
}
|
||||
|
||||
// Pinned filtering (bd-7h5)
|
||||
if filter.Pinned != nil {
|
||||
if *filter.Pinned {
|
||||
whereClauses = append(whereClauses, "pinned = 1")
|
||||
} else {
|
||||
whereClauses = append(whereClauses, "(pinned = 0 OR pinned IS NULL)")
|
||||
}
|
||||
}
|
||||
|
||||
// Template filtering (beads-1ra)
|
||||
if filter.IsTemplate != nil {
|
||||
if *filter.IsTemplate {
|
||||
whereClauses = append(whereClauses, "is_template = 1")
|
||||
} else {
|
||||
whereClauses = append(whereClauses, "(is_template = 0 OR is_template IS NULL)")
|
||||
}
|
||||
}
|
||||
|
||||
// Parent filtering (bd-yqhh): filter children by parent issue
|
||||
if filter.ParentID != nil {
|
||||
whereClauses = append(whereClauses, "id IN (SELECT issue_id FROM dependencies WHERE type = 'parent-child' AND depends_on_id = ?)")
|
||||
args = append(args, *filter.ParentID)
|
||||
}
|
||||
|
||||
whereSQL := ""
|
||||
if len(whereClauses) > 0 {
|
||||
whereSQL = "WHERE " + strings.Join(whereClauses, " AND ")
|
||||
}
|
||||
|
||||
limitSQL := ""
|
||||
if filter.Limit > 0 {
|
||||
limitSQL = " LIMIT ?"
|
||||
args = append(args, filter.Limit)
|
||||
}
|
||||
|
||||
// #nosec G201 - safe SQL with controlled formatting
|
||||
querySQL := fmt.Sprintf(`
|
||||
SELECT id, content_hash, title, description, design, acceptance_criteria, notes,
|
||||
status, priority, issue_type, assignee, estimated_minutes,
|
||||
created_at, updated_at, closed_at, external_ref, source_repo, close_reason,
|
||||
deleted_at, deleted_by, delete_reason, original_type,
|
||||
sender, ephemeral, pinned, is_template,
|
||||
await_type, await_id, timeout_ns, waiters
|
||||
FROM issues
|
||||
%s
|
||||
ORDER BY priority ASC, created_at DESC
|
||||
%s
|
||||
`, whereSQL, limitSQL)
|
||||
|
||||
rows, err := s.db.QueryContext(ctx, querySQL, args...)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("failed to search issues: %w", err)
|
||||
}
|
||||
defer func() { _ = rows.Close() }()
|
||||
|
||||
return s.scanIssues(ctx, rows)
|
||||
}
|
||||
@@ -1,109 +0,0 @@
|
||||
# Beads Skill for Claude Code
|
||||
|
||||
A comprehensive skill for using [beads](https://github.com/steveyegge/beads) (bd) issue tracking with Claude Code.
|
||||
|
||||
## What This Skill Does
|
||||
|
||||
This skill teaches Claude Code how to use bd effectively for:
|
||||
- **Multi-session work tracking** - Persistent memory across conversation compactions
|
||||
- **Dependency management** - Graph-based issue relationships
|
||||
- **Session handoff** - Writing notes that survive context resets
|
||||
- **Molecules and wisps** (v0.34.0+) - Reusable work templates and ephemeral workflows
|
||||
|
||||
## Installation
|
||||
|
||||
Copy the `beads/` directory to your Claude Code skills location:
|
||||
|
||||
```bash
|
||||
# Global installation
|
||||
cp -r beads ~/.claude/skills/
|
||||
|
||||
# Or project-local
|
||||
cp -r beads .claude/skills/
|
||||
```
|
||||
|
||||
## When Claude Uses This Skill
|
||||
|
||||
The skill activates when conversations involve:
|
||||
- "multi-session", "complex dependencies", "resume after weeks"
|
||||
- "project memory", "persistent context", "side quest tracking"
|
||||
- Work that spans multiple days or compaction cycles
|
||||
- Tasks too complex for simple TodoWrite lists
|
||||
|
||||
## File Structure
|
||||
|
||||
```
|
||||
beads/
|
||||
├── SKILL.md # Main skill file (Claude reads this first)
|
||||
├── README.md # This file (for humans)
|
||||
└── references/ # Detailed documentation (loaded on demand)
|
||||
├── BOUNDARIES.md # When to use bd vs TodoWrite
|
||||
├── CLI_BOOTSTRAP_ADMIN.md # CLI command reference
|
||||
├── DEPENDENCIES.md # Dependency semantics (A blocks B vs B blocks A)
|
||||
├── INTEGRATION_PATTERNS.md # TodoWrite and other tool integration
|
||||
├── ISSUE_CREATION.md # When and how to create issues
|
||||
├── MOLECULES.md # Protos, mols, wisps (v0.34.0+)
|
||||
├── PATTERNS.md # Common usage patterns
|
||||
├── RESUMABILITY.md # Writing notes for post-compaction recovery
|
||||
├── STATIC_DATA.md # Using bd for reference databases
|
||||
├── TROUBLESHOOTING.md # Common issues and fixes
|
||||
└── WORKFLOWS.md # Step-by-step workflow guides
|
||||
```
|
||||
|
||||
## Key Concepts
|
||||
|
||||
### bd vs TodoWrite
|
||||
|
||||
| Use bd when... | Use TodoWrite when... |
|
||||
|----------------|----------------------|
|
||||
| Work spans multiple sessions | Single-session tasks |
|
||||
| Complex dependencies exist | Linear step-by-step work |
|
||||
| Need to resume after weeks | Just need a quick checklist |
|
||||
| Knowledge work with fuzzy boundaries | Clear, immediate tasks |
|
||||
|
||||
### The Dependency Direction Trap
|
||||
|
||||
`bd dep add A B` means **"A depends on B"** (B must complete before A can start).
|
||||
|
||||
```bash
|
||||
# Want: "Setup must complete before Implementation"
|
||||
bd dep add implementation setup # ✓ CORRECT
|
||||
# NOT: bd dep add setup implementation # ✗ WRONG
|
||||
```
|
||||
|
||||
### Surviving Compaction
|
||||
|
||||
When Claude's context gets compacted, conversation history is lost but bd state survives. Write notes as if explaining to a future Claude with zero context:
|
||||
|
||||
```bash
|
||||
bd update issue-123 --notes "COMPLETED: JWT auth with RS256
|
||||
KEY DECISION: RS256 over HS256 for key rotation
|
||||
IN PROGRESS: Password reset flow
|
||||
NEXT: Implement rate limiting"
|
||||
```
|
||||
|
||||
## Requirements
|
||||
|
||||
- [bd CLI](https://github.com/steveyegge/beads) installed (`brew install steveyegge/beads/bd`)
|
||||
- A git repository (bd requires git for sync)
|
||||
- Initialized database (`bd init` in project root)
|
||||
|
||||
## Version Compatibility
|
||||
|
||||
- **v0.34.0+**: Full support including molecules, wisps, and cross-project dependencies
|
||||
- **v0.15.0+**: Core functionality (dependencies, notes, status tracking)
|
||||
- **Earlier versions**: Basic functionality but some features may be missing
|
||||
|
||||
## Contributing
|
||||
|
||||
This skill is maintained at [github.com/steveyegge/beads](https://github.com/steveyegge/beads) in the `skills/beads/` directory.
|
||||
|
||||
Issues and PRs welcome for:
|
||||
- Documentation improvements
|
||||
- New workflow patterns
|
||||
- Bug fixes in examples
|
||||
- Additional troubleshooting scenarios
|
||||
|
||||
## License
|
||||
|
||||
MIT (same as beads)
|
||||
@@ -1,407 +0,0 @@
|
||||
# Integration Patterns with Other Skills
|
||||
|
||||
How bd-issue-tracking integrates with TodoWrite, writing-plans, and other skills for optimal workflow.
|
||||
|
||||
## Contents
|
||||
|
||||
- [TodoWrite Integration](#todowrite-integration) - Temporal layering pattern
|
||||
- [writing-plans Integration](#writing-plans-integration) - Detailed implementation plans
|
||||
- [Cross-Skill Workflows](#cross-skill-workflows) - Using multiple skills together
|
||||
- [Decision Framework](#decision-framework) - When to use which tool
|
||||
|
||||
---
|
||||
|
||||
## TodoWrite Integration
|
||||
|
||||
**Both tools complement each other at different timescales:**
|
||||
|
||||
### Temporal Layering Pattern
|
||||
|
||||
**TodoWrite** (short-term working memory - this hour):
|
||||
- Tactical execution: "Review Section 3", "Expand Q&A answers"
|
||||
- Marked completed as you go
|
||||
- Present/future tense ("Review", "Expand", "Create")
|
||||
- Ephemeral: Disappears when session ends
|
||||
|
||||
**Beads** (long-term episodic memory - this week/month):
|
||||
- Strategic objectives: "Continue work on strategic planning document"
|
||||
- Key decisions and outcomes in notes field
|
||||
- Past tense in notes ("COMPLETED", "Discovered", "Blocked by")
|
||||
- Persistent: Survives compaction and session boundaries
|
||||
|
||||
**Key insight**: TodoWrite = working copy for the current hour. Beads = project journal for the current month.
|
||||
|
||||
### The Handoff Pattern
|
||||
|
||||
1. **Session start**: Read bead → Create TodoWrite items for immediate actions
|
||||
2. **During work**: Mark TodoWrite items completed as you go
|
||||
3. **Reach milestone**: Update bead notes with outcomes + context
|
||||
4. **Session end**: TodoWrite disappears, bead survives with enriched notes
|
||||
|
||||
**After compaction**: TodoWrite is gone forever, but bead notes reconstruct what happened.
|
||||
|
||||
### Example: TodoWrite tracks execution, Beads capture meaning
|
||||
|
||||
**TodoWrite (ephemeral execution view):**
|
||||
```
|
||||
[completed] Implement login endpoint
|
||||
[in_progress] Add password hashing with bcrypt
|
||||
[pending] Create session middleware
|
||||
```
|
||||
|
||||
**Corresponding bead notes (persistent context):**
|
||||
```bash
|
||||
bd update issue-123 --notes "COMPLETED: Login endpoint with bcrypt password
|
||||
hashing (12 rounds). KEY DECISION: Using JWT tokens (not sessions) for stateless
|
||||
auth - simplifies horizontal scaling. IN PROGRESS: Session middleware implementation.
|
||||
NEXT: Need user input on token expiry time (1hr vs 24hr trade-off)."
|
||||
```
|
||||
|
||||
**What's different**:
|
||||
- TodoWrite: Task names (what to do)
|
||||
- Beads: Outcomes and decisions (what was learned, why it matters)
|
||||
|
||||
**Don't duplicate**: TodoWrite tracks execution, Beads captures meaning and context.
|
||||
|
||||
### When to Update Each Tool
|
||||
|
||||
**Update TodoWrite** (frequently):
|
||||
- Mark task completed as you finish each one
|
||||
- Add new tasks as you break down work
|
||||
- Update in_progress when switching tasks
|
||||
|
||||
**Update Beads** (at milestones):
|
||||
- Completed a significant piece of work
|
||||
- Made a key decision that needs documentation
|
||||
- Hit a blocker that pauses progress
|
||||
- About to ask user for input
|
||||
- Session token usage > 70%
|
||||
- End of session
|
||||
|
||||
**Pattern**: TodoWrite changes every few minutes. Beads updates every hour or at natural breakpoints.
|
||||
|
||||
### Full Workflow Example
|
||||
|
||||
**Scenario**: Implement OAuth authentication (multi-session work)
|
||||
|
||||
**Session 1 - Planning**:
|
||||
```bash
|
||||
# Create bd issue
|
||||
bd create "Implement OAuth authentication" -t feature -p 0 --design "
|
||||
JWT tokens with refresh rotation.
|
||||
See BOUNDARIES.md for bd vs TodoWrite decision.
|
||||
"
|
||||
|
||||
# Mark in_progress
|
||||
bd update oauth-1 --status in_progress
|
||||
|
||||
# Create TodoWrite for today's work
|
||||
TodoWrite:
|
||||
- [ ] Research OAuth 2.0 refresh token flow
|
||||
- [ ] Design token schema
|
||||
- [ ] Set up test environment
|
||||
```
|
||||
|
||||
**End of Session 1**:
|
||||
```bash
|
||||
# Update bd with outcomes
|
||||
bd update oauth-1 --notes "COMPLETED: Researched OAuth2 refresh flow. Decided on 7-day refresh tokens.
|
||||
KEY DECISION: RS256 over HS256 (enables key rotation per security review).
|
||||
IN PROGRESS: Need to set up test OAuth provider.
|
||||
NEXT: Configure test provider, then implement token endpoint."
|
||||
|
||||
# TodoWrite disappears when session ends
|
||||
```
|
||||
|
||||
**Session 2 - Implementation** (after compaction):
|
||||
```bash
|
||||
# Read bd to reconstruct context
|
||||
bd show oauth-1
|
||||
# See: COMPLETED research, NEXT is configure test provider
|
||||
|
||||
# Create fresh TodoWrite from NEXT
|
||||
TodoWrite:
|
||||
- [ ] Configure test OAuth provider
|
||||
- [ ] Implement token endpoint
|
||||
- [ ] Add basic tests
|
||||
|
||||
# Work proceeds...
|
||||
|
||||
# Update bd at milestone
|
||||
bd update oauth-1 --notes "COMPLETED: Test provider configured, token endpoint implemented.
|
||||
TESTS: 5 passing (token generation, validation, expiry).
|
||||
IN PROGRESS: Adding refresh token rotation.
|
||||
NEXT: Implement rotation, add rate limiting, security review."
|
||||
```
|
||||
|
||||
**For complete decision criteria and boundaries, see:** [BOUNDARIES.md](BOUNDARIES.md)
|
||||
|
||||
---
|
||||
|
||||
## writing-plans Integration
|
||||
|
||||
**For complex multi-step features**, the design field in bd issues can link to detailed implementation plans that break work into bite-sized RED-GREEN-REFACTOR steps.
|
||||
|
||||
### When to Create Detailed Plans
|
||||
|
||||
**Use detailed plans for:**
|
||||
- Complex features with multiple components
|
||||
- Multi-session work requiring systematic breakdown
|
||||
- Features where TDD discipline adds value (core logic, critical paths)
|
||||
- Work that benefits from explicit task sequencing
|
||||
|
||||
**Skip detailed plans for:**
|
||||
- Simple features (single function, straightforward logic)
|
||||
- Exploratory work (API testing, pattern discovery)
|
||||
- Infrastructure setup (configuration, wiring)
|
||||
|
||||
**The test:** If you can implement it in one session without a checklist, skip the detailed plan.
|
||||
|
||||
### Using the writing-plans Skill
|
||||
|
||||
When design field needs detailed breakdown, reference the **writing-plans** skill:
|
||||
|
||||
**Pattern:**
|
||||
```bash
|
||||
# Create issue with high-level design
|
||||
bd create "Implement OAuth token refresh" --design "
|
||||
Add JWT refresh token flow with rotation.
|
||||
See docs/plans/2025-10-23-oauth-refresh-design.md for detailed plan.
|
||||
"
|
||||
|
||||
# Then use writing-plans skill to create detailed plan
|
||||
# The skill creates: docs/plans/YYYY-MM-DD-<feature-name>.md
|
||||
```
|
||||
|
||||
**Detailed plan structure** (from writing-plans):
|
||||
- Bite-sized tasks (2-5 minutes each)
|
||||
- Explicit RED-GREEN-REFACTOR steps per task
|
||||
- Exact file paths and complete code
|
||||
- Verification commands with expected output
|
||||
- Frequent commit points
|
||||
|
||||
**Example task from detailed plan:**
|
||||
```markdown
|
||||
### Task 1: Token Refresh Endpoint
|
||||
|
||||
**Files:**
|
||||
- Create: `src/auth/refresh.py`
|
||||
- Test: `tests/auth/test_refresh.py`
|
||||
|
||||
**Step 1: Write failing test**
|
||||
```python
|
||||
def test_refresh_token_returns_new_access_token():
|
||||
refresh_token = create_valid_refresh_token()
|
||||
response = refresh_endpoint(refresh_token)
|
||||
assert response.status == 200
|
||||
assert response.access_token is not None
|
||||
```
|
||||
|
||||
**Step 2: Run test to verify it fails**
|
||||
Run: `pytest tests/auth/test_refresh.py::test_refresh_token_returns_new_access_token -v`
|
||||
Expected: FAIL with "refresh_endpoint not defined"
|
||||
|
||||
**Step 3: Implement minimal code**
|
||||
[... exact implementation ...]
|
||||
|
||||
**Step 4: Verify test passes**
|
||||
[... verification ...]
|
||||
|
||||
**Step 5: Commit**
|
||||
```bash
|
||||
git add tests/auth/test_refresh.py src/auth/refresh.py
|
||||
git commit -m "feat: add token refresh endpoint"
|
||||
```
|
||||
```
|
||||
|
||||
### Integration with bd Workflow
|
||||
|
||||
**Three-layer structure**:
|
||||
1. **bd issue**: Strategic objective + high-level design
|
||||
2. **Detailed plan** (writing-plans): Step-by-step execution guide
|
||||
3. **TodoWrite**: Current task within the plan
|
||||
|
||||
**During planning phase:**
|
||||
1. Create bd issue with high-level design
|
||||
2. If complex: Use writing-plans skill to create detailed plan
|
||||
3. Link plan in design field: `See docs/plans/YYYY-MM-DD-<topic>.md`
|
||||
|
||||
**During execution phase:**
|
||||
1. Open detailed plan (if exists)
|
||||
2. Use TodoWrite to track current task within plan
|
||||
3. Update bd notes at milestones, not per-task
|
||||
4. Close bd issue when all plan tasks complete
|
||||
|
||||
**Don't duplicate:** Detailed plan = execution steps. BD notes = outcomes and decisions.
|
||||
|
||||
**Example bd notes after using detailed plan:**
|
||||
```bash
|
||||
bd update oauth-5 --notes "COMPLETED: Token refresh endpoint (5 tasks from plan: endpoint + rotation + tests)
|
||||
KEY DECISION: 7-day refresh tokens (vs 30-day) - reduces risk of token theft
|
||||
TESTS: All 12 tests passing (auth, rotation, expiry, error handling)"
|
||||
```
|
||||
|
||||
### When NOT to Use Detailed Plans
|
||||
|
||||
**Red flags:**
|
||||
- Feature is simple enough to implement in one pass
|
||||
- Work is exploratory (discovering patterns, testing APIs)
|
||||
- Infrastructure work (OAuth setup, MCP configuration)
|
||||
- Would spend more time planning than implementing
|
||||
|
||||
**Rule of thumb:** Use detailed plans when systematic breakdown prevents mistakes, not for ceremony.
|
||||
|
||||
**Pattern summary**:
|
||||
- **Simple feature**: bd issue only
|
||||
- **Complex feature**: bd issue + TodoWrite
|
||||
- **Very complex feature**: bd issue + writing-plans + TodoWrite
|
||||
|
||||
---
|
||||
|
||||
## Cross-Skill Workflows
|
||||
|
||||
### Pattern: Research Document with Strategic Planning
|
||||
|
||||
**Scenario**: User asks "Help me write a strategic planning document for Q4"
|
||||
|
||||
**Tools used**: bd-issue-tracking + developing-strategic-documents skill
|
||||
|
||||
**Workflow**:
|
||||
1. Create bd issue for tracking:
|
||||
```bash
|
||||
bd create "Q4 strategic planning document" -t task -p 0
|
||||
bd update strat-1 --status in_progress
|
||||
```
|
||||
|
||||
2. Use developing-strategic-documents skill for research and writing
|
||||
|
||||
3. Update bd notes at milestones:
|
||||
```bash
|
||||
bd update strat-1 --notes "COMPLETED: Research phase (reviewed 5 competitor docs, 3 internal reports)
|
||||
KEY DECISION: Focus on market expansion over cost optimization per exec input
|
||||
IN PROGRESS: Drafting recommendations section
|
||||
NEXT: Get exec review of draft recommendations before finalizing"
|
||||
```
|
||||
|
||||
4. TodoWrite tracks immediate writing tasks:
|
||||
```
|
||||
- [ ] Draft recommendation 1: Market expansion
|
||||
- [ ] Add supporting data from research
|
||||
- [ ] Create budget estimates
|
||||
```
|
||||
|
||||
**Why this works**: bd preserves context across sessions (document might take days), skill provides writing framework, TodoWrite tracks current work.
|
||||
|
||||
### Pattern: Multi-File Refactoring
|
||||
|
||||
**Scenario**: Refactor authentication system across 8 files
|
||||
|
||||
**Tools used**: bd-issue-tracking + systematic-debugging (if issues found)
|
||||
|
||||
**Workflow**:
|
||||
1. Create epic and subtasks:
|
||||
```bash
|
||||
bd create "Refactor auth system to use JWT" -t epic -p 0
|
||||
bd create "Update login endpoint" -t task
|
||||
bd create "Update token validation" -t task
|
||||
bd create "Update middleware" -t task
|
||||
bd create "Update tests" -t task
|
||||
|
||||
# Link hierarchy
|
||||
bd dep add auth-epic login-1 --type parent-child
|
||||
bd dep add auth-epic validation-2 --type parent-child
|
||||
bd dep add auth-epic middleware-3 --type parent-child
|
||||
bd dep add auth-epic tests-4 --type parent-child
|
||||
|
||||
# Add ordering
|
||||
bd dep add validation-2 login-1 # validation depends on login
|
||||
bd dep add middleware-3 validation-2 # middleware depends on validation
|
||||
bd dep add tests-4 middleware-3 # tests depend on middleware
|
||||
```
|
||||
|
||||
2. Work through subtasks in order, using TodoWrite for each:
|
||||
```
|
||||
Current: login-1
|
||||
TodoWrite:
|
||||
- [ ] Update login route signature
|
||||
- [ ] Add JWT generation
|
||||
- [ ] Update tests
|
||||
- [ ] Verify backward compatibility
|
||||
```
|
||||
|
||||
3. Update bd notes as each completes:
|
||||
```bash
|
||||
bd close login-1 --reason "Updated to JWT. Tests passing. Backward compatible with session auth."
|
||||
```
|
||||
|
||||
4. If issues discovered, use systematic-debugging skill + create blocker issues
|
||||
|
||||
**Why this works**: bd tracks dependencies and progress across files, TodoWrite focuses on current file, skills provide specialized frameworks when needed.
|
||||
|
||||
---
|
||||
|
||||
## Decision Framework
|
||||
|
||||
### Which Tool for Which Purpose?
|
||||
|
||||
| Need | Tool | Why |
|
||||
|------|------|-----|
|
||||
| Track today's execution | TodoWrite | Lightweight, shows current progress |
|
||||
| Preserve context across sessions | bd | Survives compaction, persistent memory |
|
||||
| Detailed implementation steps | writing-plans | RED-GREEN-REFACTOR breakdown |
|
||||
| Research document structure | developing-strategic-documents | Domain-specific framework |
|
||||
| Debug complex issue | systematic-debugging | Structured debugging protocol |
|
||||
|
||||
### Decision Tree
|
||||
|
||||
```
|
||||
Is this work done in this session?
|
||||
├─ Yes → Use TodoWrite only
|
||||
└─ No → Use bd
|
||||
├─ Simple feature → bd issue + TodoWrite
|
||||
└─ Complex feature → bd issue + writing-plans + TodoWrite
|
||||
|
||||
Will conversation history get compacted?
|
||||
├─ Likely → Use bd (context survives)
|
||||
└─ Unlikely → TodoWrite is sufficient
|
||||
|
||||
Does work have dependencies or blockers?
|
||||
├─ Yes → Use bd (tracks relationships)
|
||||
└─ No → TodoWrite is sufficient
|
||||
|
||||
Is this specialized domain work?
|
||||
├─ Research/writing → developing-strategic-documents
|
||||
├─ Complex debugging → systematic-debugging
|
||||
├─ Detailed implementation → writing-plans
|
||||
└─ General tracking → bd + TodoWrite
|
||||
```
|
||||
|
||||
### Integration Anti-Patterns
|
||||
|
||||
**Don't**:
|
||||
- Duplicate TodoWrite tasks into bd notes (different purposes)
|
||||
- Create bd issues for single-session linear work (use TodoWrite)
|
||||
- Put detailed implementation steps in bd notes (use writing-plans)
|
||||
- Update bd after every TodoWrite task (update at milestones)
|
||||
- Use writing-plans for exploratory work (defeats the purpose)
|
||||
|
||||
**Do**:
|
||||
- Update bd when changing tools or reaching milestones
|
||||
- Use TodoWrite as "working copy" of bd's NEXT section
|
||||
- Link between tools (bd design field → writing-plans file path)
|
||||
- Choose the right level of formality for the work complexity
|
||||
|
||||
---
|
||||
|
||||
## Summary
|
||||
|
||||
**Key principle**: Each tool operates at a different timescale and level of detail.
|
||||
|
||||
- **TodoWrite**: Minutes to hours (current execution)
|
||||
- **bd**: Hours to weeks (persistent context)
|
||||
- **writing-plans**: Days to weeks (detailed breakdown)
|
||||
- **Other skills**: As needed (domain frameworks)
|
||||
|
||||
**Integration pattern**: Use the lightest tool sufficient for the task, add heavier tools only when complexity demands it.
|
||||
|
||||
**For complete boundaries and decision criteria, see:** [BOUNDARIES.md](BOUNDARIES.md)
|
||||
@@ -1,354 +0,0 @@
|
||||
# Molecules and Wisps Reference
|
||||
|
||||
This reference covers bd's molecular chemistry system for reusable work templates and ephemeral workflows.
|
||||
|
||||
## The Chemistry Metaphor
|
||||
|
||||
bd v0.34.0 introduces a chemistry-inspired workflow system:
|
||||
|
||||
| Phase | Name | Storage | Synced? | Use Case |
|
||||
|-------|------|---------|---------|----------|
|
||||
| **Solid** | Proto | `.beads/` | Yes | Reusable template (epic with `template` label) |
|
||||
| **Liquid** | Mol | `.beads/` | Yes | Persistent instance (real issues from template) |
|
||||
| **Vapor** | Wisp | `.beads-wisp/` | No | Ephemeral instance (operational work, no audit trail) |
|
||||
|
||||
**Phase transitions:**
|
||||
- `spawn` / `pour`: Solid (proto) → Liquid (mol)
|
||||
- `wisp create`: Solid (proto) → Vapor (wisp)
|
||||
- `squash`: Vapor (wisp) → Digest (permanent summary)
|
||||
- `burn`: Vapor (wisp) → Nothing (deleted, no trace)
|
||||
- `distill`: Liquid (ad-hoc epic) → Solid (proto)
|
||||
|
||||
## When to Use Molecules
|
||||
|
||||
### Use Protos/Mols When:
|
||||
- **Repeatable patterns** - Same workflow structure used multiple times (releases, reviews, onboarding)
|
||||
- **Team knowledge capture** - Encoding tribal knowledge as executable templates
|
||||
- **Audit trail matters** - Work that needs to be tracked and reviewed later
|
||||
- **Cross-session persistence** - Work spanning multiple days/sessions
|
||||
|
||||
### Use Wisps When:
|
||||
- **Operational loops** - Patrol cycles, health checks, routine monitoring
|
||||
- **One-shot orchestration** - Temporary coordination that shouldn't clutter history
|
||||
- **Diagnostic runs** - Debugging workflows with no archival value
|
||||
- **High-frequency ephemeral work** - Would create noise in permanent database
|
||||
|
||||
**Key insight:** Wisps prevent database bloat from routine operations while still providing structure during execution.
|
||||
|
||||
---
|
||||
|
||||
## Proto Management
|
||||
|
||||
### Creating a Proto
|
||||
|
||||
Protos are epics with the `template` label. Create manually or distill from existing work:
|
||||
|
||||
```bash
|
||||
# Manual creation
|
||||
bd create "Release Workflow" --type epic --label template
|
||||
bd create "Run tests for {{component}}" --type task
|
||||
bd dep add task-id epic-id --type parent-child
|
||||
|
||||
# Distill from ad-hoc work (extracts template from existing epic)
|
||||
bd mol distill bd-abc123 --as "Release Workflow" --var version=1.0.0
|
||||
```
|
||||
|
||||
**Proto naming convention:** Use `mol-` prefix for clarity (e.g., `mol-release`, `mol-patrol`).
|
||||
|
||||
### Listing Protos
|
||||
|
||||
```bash
|
||||
bd mol catalog # List all protos
|
||||
bd mol catalog --json # Machine-readable
|
||||
```
|
||||
|
||||
### Viewing Proto Structure
|
||||
|
||||
```bash
|
||||
bd mol show mol-release # Show template structure and variables
|
||||
bd mol show mol-release --json # Machine-readable
|
||||
```
|
||||
|
||||
---
|
||||
|
||||
## Spawning Molecules
|
||||
|
||||
### Basic Spawn (Creates Wisp by Default)
|
||||
|
||||
```bash
|
||||
bd mol spawn mol-patrol # Creates wisp (ephemeral)
|
||||
bd mol spawn mol-feature --pour # Creates mol (persistent)
|
||||
bd mol spawn mol-release --var version=2.0 # With variable substitution
|
||||
```
|
||||
|
||||
**Chemistry shortcuts:**
|
||||
```bash
|
||||
bd pour mol-feature # Shortcut for spawn --pour
|
||||
bd wisp create mol-patrol # Explicit wisp creation
|
||||
```
|
||||
|
||||
### Spawn with Immediate Execution
|
||||
|
||||
```bash
|
||||
bd mol run mol-release --var version=2.0
|
||||
```
|
||||
|
||||
`bd mol run` does three things:
|
||||
1. Spawns the molecule (persistent)
|
||||
2. Assigns root issue to caller
|
||||
3. Pins root issue for session recovery
|
||||
|
||||
**Use `mol run` when:** Starting durable work that should survive crashes. The pin ensures `bd ready` shows the work after restart.
|
||||
|
||||
### Spawn with Attachments
|
||||
|
||||
Attach additional protos in a single command:
|
||||
|
||||
```bash
|
||||
bd mol spawn mol-feature --attach mol-testing --var name=auth
|
||||
# Spawns mol-feature, then spawns mol-testing and bonds them
|
||||
```
|
||||
|
||||
**Attach types:**
|
||||
- `sequential` (default) - Attached runs after primary completes
|
||||
- `parallel` - Attached runs alongside primary
|
||||
- `conditional` - Attached runs only if primary fails
|
||||
|
||||
```bash
|
||||
bd mol spawn mol-deploy --attach mol-rollback --attach-type conditional
|
||||
```
|
||||
|
||||
---
|
||||
|
||||
## Bonding Molecules
|
||||
|
||||
### Bond Types
|
||||
|
||||
```bash
|
||||
bd mol bond A B # Sequential: B runs after A
|
||||
bd mol bond A B --type parallel # Parallel: B runs alongside A
|
||||
bd mol bond A B --type conditional # Conditional: B runs if A fails
|
||||
```
|
||||
|
||||
### Operand Combinations
|
||||
|
||||
| A | B | Result |
|
||||
|---|---|--------|
|
||||
| proto | proto | Compound proto (reusable template) |
|
||||
| proto | mol | Spawn proto, attach to molecule |
|
||||
| mol | proto | Spawn proto, attach to molecule |
|
||||
| mol | mol | Join into compound molecule |
|
||||
|
||||
### Phase Control in Bonds
|
||||
|
||||
By default, spawned protos inherit target's phase. Override with flags:
|
||||
|
||||
```bash
|
||||
# Found bug during wisp patrol? Persist it:
|
||||
bd mol bond mol-critical-bug wisp-patrol --pour
|
||||
|
||||
# Need ephemeral diagnostic on persistent feature?
|
||||
bd mol bond mol-temp-check bd-feature --wisp
|
||||
```
|
||||
|
||||
### Custom Compound Names
|
||||
|
||||
```bash
|
||||
bd mol bond mol-feature mol-deploy --as "Feature with Deploy"
|
||||
```
|
||||
|
||||
---
|
||||
|
||||
## Wisp Lifecycle
|
||||
|
||||
### Creating Wisps
|
||||
|
||||
```bash
|
||||
bd wisp create mol-patrol # From proto
|
||||
bd mol spawn mol-patrol # Same (spawn defaults to wisp)
|
||||
bd mol spawn mol-check --var target=db # With variables
|
||||
```
|
||||
|
||||
### Listing Wisps
|
||||
|
||||
```bash
|
||||
bd wisp list # List all wisps
|
||||
bd wisp list --json # Machine-readable
|
||||
```
|
||||
|
||||
### Ending Wisps
|
||||
|
||||
**Option 1: Squash (compress to digest)**
|
||||
```bash
|
||||
bd mol squash wisp-abc123 # Auto-generate summary
|
||||
bd mol squash wisp-abc123 --summary "Completed patrol" # Agent-provided summary
|
||||
bd mol squash wisp-abc123 --keep-children # Keep children, just create digest
|
||||
bd mol squash wisp-abc123 --dry-run # Preview
|
||||
```
|
||||
|
||||
Squash creates a permanent digest issue summarizing the wisp's work, then deletes the wisp children.
|
||||
|
||||
**Option 2: Burn (delete without trace)**
|
||||
```bash
|
||||
bd mol burn wisp-abc123 # Delete wisp, no digest
|
||||
```
|
||||
|
||||
Use burn for routine work with no archival value.
|
||||
|
||||
### Garbage Collection
|
||||
|
||||
```bash
|
||||
bd wisp gc # Clean up orphaned wisps
|
||||
```
|
||||
|
||||
---
|
||||
|
||||
## Distilling Protos
|
||||
|
||||
Extract a reusable template from ad-hoc work:
|
||||
|
||||
```bash
|
||||
bd mol distill bd-o5xe --as "Release Workflow"
|
||||
bd mol distill bd-abc --var feature_name=auth-refactor --var version=1.0.0
|
||||
```
|
||||
|
||||
**What distill does:**
|
||||
1. Loads existing epic and all children
|
||||
2. Clones structure as new proto (adds `template` label)
|
||||
3. Replaces concrete values with `{{variable}}` placeholders
|
||||
|
||||
**Variable syntax (both work):**
|
||||
```bash
|
||||
--var branch=feature-auth # variable=value (recommended)
|
||||
--var feature-auth=branch # value=variable (auto-detected)
|
||||
```
|
||||
|
||||
**Use cases:**
|
||||
- Team develops good workflow organically, wants to reuse it
|
||||
- Capture tribal knowledge as executable templates
|
||||
- Create starting point for similar future work
|
||||
|
||||
---
|
||||
|
||||
## Cross-Project Dependencies
|
||||
|
||||
### Concept
|
||||
|
||||
Projects can depend on capabilities shipped by other projects:
|
||||
|
||||
```bash
|
||||
# Project A ships a capability
|
||||
bd ship auth-api # Marks capability as available
|
||||
|
||||
# Project B depends on it
|
||||
bd dep add bd-123 external:project-a:auth-api
|
||||
```
|
||||
|
||||
### Shipping Capabilities
|
||||
|
||||
```bash
|
||||
bd ship <capability> # Ship capability (requires closed issue)
|
||||
bd ship <capability> --force # Ship even if issue not closed
|
||||
bd ship <capability> --dry-run # Preview
|
||||
```
|
||||
|
||||
**How it works:**
|
||||
1. Find issue with `export:<capability>` label
|
||||
2. Validate issue is closed
|
||||
3. Add `provides:<capability>` label
|
||||
|
||||
### Depending on External Capabilities
|
||||
|
||||
```bash
|
||||
bd dep add <issue> external:<project>:<capability>
|
||||
```
|
||||
|
||||
The dependency is satisfied when the external project has a closed issue with `provides:<capability>` label.
|
||||
|
||||
**`bd ready` respects external deps:** Issues blocked by unsatisfied external dependencies won't appear in ready list.
|
||||
|
||||
---
|
||||
|
||||
## Common Patterns
|
||||
|
||||
### Pattern: Weekly Review Proto
|
||||
|
||||
```bash
|
||||
# Create proto
|
||||
bd create "Weekly Review" --type epic --label template
|
||||
bd create "Review open issues" --type task
|
||||
bd create "Update priorities" --type task
|
||||
bd create "Archive stale work" --type task
|
||||
# Link as children...
|
||||
|
||||
# Use each week
|
||||
bd mol spawn mol-weekly-review --pour
|
||||
```
|
||||
|
||||
### Pattern: Ephemeral Patrol Cycle
|
||||
|
||||
```bash
|
||||
# Patrol proto exists
|
||||
bd wisp create mol-patrol
|
||||
|
||||
# Execute patrol work...
|
||||
|
||||
# End patrol
|
||||
bd mol squash wisp-abc123 --summary "Patrol complete: 3 issues found, 2 resolved"
|
||||
```
|
||||
|
||||
### Pattern: Feature with Rollback
|
||||
|
||||
```bash
|
||||
bd mol spawn mol-deploy --attach mol-rollback --attach-type conditional
|
||||
# If deploy fails, rollback automatically becomes unblocked
|
||||
```
|
||||
|
||||
### Pattern: Capture Tribal Knowledge
|
||||
|
||||
```bash
|
||||
# After completing a good workflow organically
|
||||
bd mol distill bd-release-epic --as "Release Process" --var version=X.Y.Z
|
||||
# Now team can: bd mol spawn mol-release-process --var version=2.0.0
|
||||
```
|
||||
|
||||
---
|
||||
|
||||
## CLI Quick Reference
|
||||
|
||||
| Command | Purpose |
|
||||
|---------|---------|
|
||||
| `bd mol catalog` | List available protos |
|
||||
| `bd mol show <id>` | Show proto/mol structure |
|
||||
| `bd mol spawn <proto>` | Create wisp from proto (default) |
|
||||
| `bd mol spawn <proto> --pour` | Create persistent mol from proto |
|
||||
| `bd mol run <proto>` | Spawn + assign + pin (durable execution) |
|
||||
| `bd mol bond <A> <B>` | Combine protos or molecules |
|
||||
| `bd mol distill <epic>` | Extract proto from ad-hoc work |
|
||||
| `bd mol squash <mol>` | Compress wisp children to digest |
|
||||
| `bd mol burn <wisp>` | Delete wisp without trace |
|
||||
| `bd pour <proto>` | Shortcut for `spawn --pour` |
|
||||
| `bd wisp create <proto>` | Create ephemeral wisp |
|
||||
| `bd wisp list` | List all wisps |
|
||||
| `bd wisp gc` | Garbage collect orphaned wisps |
|
||||
| `bd ship <capability>` | Publish capability for cross-project deps |
|
||||
|
||||
---
|
||||
|
||||
## Troubleshooting
|
||||
|
||||
**"Proto not found"**
|
||||
- Check `bd mol catalog` for available protos
|
||||
- Protos need `template` label on the epic
|
||||
|
||||
**"Variable not substituted"**
|
||||
- Use `--var key=value` syntax
|
||||
- Check proto for `{{key}}` placeholders with `bd mol show`
|
||||
|
||||
**"Wisp commands fail"**
|
||||
- Wisps stored in `.beads-wisp/` (separate from `.beads/`)
|
||||
- Check `bd wisp list` for active wisps
|
||||
|
||||
**"External dependency not satisfied"**
|
||||
- Target project must have closed issue with `provides:<capability>` label
|
||||
- Use `bd ship <capability>` in target project first
|
||||
@@ -1,341 +0,0 @@
|
||||
# Common Usage Patterns
|
||||
|
||||
Practical patterns for using bd effectively across different scenarios.
|
||||
|
||||
## Contents
|
||||
|
||||
- [Knowledge Work Session](#knowledge-work-session) - Resume long-running research or writing tasks
|
||||
- [Side Quest Handling](#side-quest-handling) - Capture discovered work without losing context
|
||||
- [Multi-Session Project Resume](#multi-session-project-resume) - Pick up work after time away
|
||||
- [Status Transitions](#status-transitions) - When to change issue status
|
||||
- [Compaction Recovery](#compaction-recovery) - Resume after conversation history is lost
|
||||
- [Issue Closure](#issue-closure) - Documenting completion properly
|
||||
|
||||
---
|
||||
|
||||
## Knowledge Work Session
|
||||
|
||||
**Scenario**: User asks "Help me write a proposal for expanding the analytics platform"
|
||||
|
||||
**What you see**:
|
||||
```bash
|
||||
$ bd ready
|
||||
# Returns: bd-42 "Research analytics platform expansion proposal" (in_progress)
|
||||
|
||||
$ bd show bd-42
|
||||
Notes: "COMPLETED: Reviewed current stack (Mixpanel, Amplitude)
|
||||
IN PROGRESS: Drafting cost-benefit analysis section
|
||||
NEXT: Need user input on budget constraints before finalizing recommendations"
|
||||
```
|
||||
|
||||
**What you do**:
|
||||
1. Read notes to understand current state
|
||||
2. Create TodoWrite for immediate work:
|
||||
```
|
||||
- [ ] Draft cost-benefit analysis
|
||||
- [ ] Ask user about budget constraints
|
||||
- [ ] Finalize recommendations
|
||||
```
|
||||
3. Work on tasks, mark TodoWrite items completed
|
||||
4. At milestone, update bd notes:
|
||||
```bash
|
||||
bd update bd-42 --notes "COMPLETED: Cost-benefit analysis drafted.
|
||||
KEY DECISION: User confirmed $50k budget cap - ruled out enterprise options.
|
||||
IN PROGRESS: Finalizing recommendations (Posthog + custom ETL).
|
||||
NEXT: Get user review of draft before closing issue."
|
||||
```
|
||||
|
||||
**Outcome**: TodoWrite disappears at session end, but bd notes preserve context for next session.
|
||||
|
||||
**Key insight**: Notes field captures the "why" and context, TodoWrite tracks the "doing" right now.
|
||||
|
||||
---
|
||||
|
||||
## Side Quest Handling
|
||||
|
||||
**Scenario**: During main task, discover a problem that needs attention.
|
||||
|
||||
**Pattern**:
|
||||
1. Create issue immediately: `bd create "Found: inventory system needs refactoring"`
|
||||
2. Link provenance: `bd dep add main-task new-issue --type discovered-from`
|
||||
3. Assess urgency: blocker or can defer?
|
||||
4. **If blocker**:
|
||||
- `bd update main-task --status blocked`
|
||||
- `bd update new-issue --status in_progress`
|
||||
- Work on the blocker
|
||||
5. **If deferrable**:
|
||||
- Note in new issue's design field
|
||||
- Continue main task
|
||||
- New issue persists for later
|
||||
|
||||
**Why this works**: Captures context immediately (before forgetting), preserves relationship to main work, allows flexible prioritization.
|
||||
|
||||
**Example (with MCP):**
|
||||
|
||||
Working on "Implement checkout flow" (checkout-1), discover payment validation security hole:
|
||||
|
||||
1. Create bug issue: `mcp__plugin_beads_beads__create` with `{title: "Fix: payment validation bypasses card expiry check", type: "bug", priority: 0}`
|
||||
2. Link discovery: `mcp__plugin_beads_beads__dep` with `{from_issue: "checkout-1", to_issue: "payment-bug-2", type: "discovered-from"}`
|
||||
3. Block current work: `mcp__plugin_beads_beads__update` with `{issue_id: "checkout-1", status: "blocked", notes: "Blocked by payment-bug-2: security hole in validation"}`
|
||||
4. Start new work: `mcp__plugin_beads_beads__update` with `{issue_id: "payment-bug-2", status: "in_progress"}`
|
||||
|
||||
(CLI: `bd create "Fix: payment validation..." -t bug -p 0` then `bd dep add` and `bd update` commands)
|
||||
|
||||
---
|
||||
|
||||
## Multi-Session Project Resume
|
||||
|
||||
**Scenario**: Starting work after days or weeks away from a project.
|
||||
|
||||
**Pattern (with MCP)**:
|
||||
1. **Check what's ready**: Use `mcp__plugin_beads_beads__ready` to see available work
|
||||
2. **Check what's stuck**: Use `mcp__plugin_beads_beads__blocked` to understand blockers
|
||||
3. **Check recent progress**: Use `mcp__plugin_beads_beads__list` with `status:"closed"` to see completions
|
||||
4. **Read detailed context**: Use `mcp__plugin_beads_beads__show` for the issue you'll work on
|
||||
5. **Update status**: Use `mcp__plugin_beads_beads__update` with `status:"in_progress"`
|
||||
6. **Begin work**: Create TodoWrite from notes field's NEXT section
|
||||
|
||||
(CLI: `bd ready`, `bd blocked`, `bd list --status closed`, `bd show <id>`, `bd update <id> --status in_progress`)
|
||||
|
||||
**Example**:
|
||||
```bash
|
||||
$ bd ready
|
||||
Ready to work on (3):
|
||||
auth-5: "Add OAuth refresh token rotation" (priority: 0)
|
||||
api-12: "Document REST API endpoints" (priority: 1)
|
||||
test-8: "Add integration tests for payment flow" (priority: 2)
|
||||
|
||||
$ bd show auth-5
|
||||
Title: Add OAuth refresh token rotation
|
||||
Status: open
|
||||
Priority: 0 (critical)
|
||||
|
||||
Notes:
|
||||
COMPLETED: Basic JWT auth working
|
||||
IN PROGRESS: Need to add token refresh
|
||||
NEXT: Implement rotation per OWASP guidelines (7-day refresh tokens)
|
||||
BLOCKER: None - ready to proceed
|
||||
|
||||
$ bd update auth-5 --status in_progress
|
||||
# Now create TodoWrite based on NEXT section
|
||||
```
|
||||
|
||||
**For complete session start workflow with checklist, see:** [WORKFLOWS.md](WORKFLOWS.md#session-start)
|
||||
|
||||
---
|
||||
|
||||
## Status Transitions
|
||||
|
||||
Understanding when to change issue status.
|
||||
|
||||
### Status Lifecycle
|
||||
|
||||
```
|
||||
open → in_progress → closed
|
||||
↓ ↓
|
||||
blocked blocked
|
||||
```
|
||||
|
||||
### When to Use Each Status
|
||||
|
||||
**open** (default):
|
||||
- Issue created but not started
|
||||
- Waiting for dependencies to clear
|
||||
- Planned work not yet begun
|
||||
- **Command**: Issues start as `open` by default
|
||||
|
||||
**in_progress**:
|
||||
- Actively working on this issue right now
|
||||
- Has been read and understood
|
||||
- Making commits or changes related to this
|
||||
- **Command**: `bd update issue-id --status in_progress`
|
||||
- **When**: Start of work session on this issue
|
||||
|
||||
**blocked**:
|
||||
- Cannot proceed due to external blocker
|
||||
- Waiting for user input/decision
|
||||
- Dependency not completed
|
||||
- Technical blocker discovered
|
||||
- **Command**: `bd update issue-id --status blocked`
|
||||
- **When**: Hit a blocker, capture what blocks you in notes
|
||||
- **Note**: Document blocker in notes field: "BLOCKER: Waiting for API key from ops team"
|
||||
|
||||
**closed**:
|
||||
- Work completed and verified
|
||||
- Tests passing
|
||||
- Acceptance criteria met
|
||||
- **Command**: `bd close issue-id --reason "Implemented with tests passing"`
|
||||
- **When**: All work done, ready to move on
|
||||
- **Note**: Issues remain in database, just marked complete
|
||||
|
||||
### Transition Examples
|
||||
|
||||
**Starting work**:
|
||||
```bash
|
||||
bd ready # See what's available
|
||||
bd update auth-5 --status in_progress
|
||||
# Begin working
|
||||
```
|
||||
|
||||
**Hit a blocker**:
|
||||
```bash
|
||||
bd update auth-5 --status blocked --notes "BLOCKER: Need OAuth client ID from product team. Emailed Jane on 2025-10-23."
|
||||
# Switch to different issue or create new work
|
||||
```
|
||||
|
||||
**Unblocking**:
|
||||
```bash
|
||||
# Once blocker resolved
|
||||
bd update auth-5 --status in_progress --notes "UNBLOCKED: Received OAuth credentials. Resuming implementation."
|
||||
```
|
||||
|
||||
**Completing**:
|
||||
```bash
|
||||
bd close auth-5 --reason "Implemented OAuth refresh with 7-day rotation. Tests passing. PR #42 merged."
|
||||
```
|
||||
|
||||
---
|
||||
|
||||
## Compaction Recovery
|
||||
|
||||
**Scenario**: Conversation history has been compacted. You need to resume work with zero conversation context.
|
||||
|
||||
**What survives compaction**:
|
||||
- All bd issues and notes
|
||||
- Complete work history
|
||||
- Dependencies and relationships
|
||||
|
||||
**What's lost**:
|
||||
- Conversation history
|
||||
- TodoWrite lists
|
||||
- Recent discussion
|
||||
|
||||
### Recovery Pattern
|
||||
|
||||
1. **Check in-progress work**:
|
||||
```bash
|
||||
bd list --status in_progress
|
||||
```
|
||||
|
||||
2. **Read notes for context**:
|
||||
```bash
|
||||
bd show issue-id
|
||||
# Read notes field - should explain current state
|
||||
```
|
||||
|
||||
3. **Reconstruct TodoWrite from notes**:
|
||||
- COMPLETED section: Done, skip
|
||||
- IN PROGRESS section: Current state
|
||||
- NEXT section: **This becomes your TodoWrite list**
|
||||
|
||||
4. **Report to user**:
|
||||
```
|
||||
"From bd notes: [summary of COMPLETED]. Currently [IN PROGRESS].
|
||||
Next steps: [from NEXT]. Should I continue with that?"
|
||||
```
|
||||
|
||||
### Example Recovery
|
||||
|
||||
**bd show returns**:
|
||||
```
|
||||
Issue: bd-42 "OAuth refresh token implementation"
|
||||
Status: in_progress
|
||||
Notes:
|
||||
COMPLETED: Basic JWT validation working (RS256, 1hr access tokens)
|
||||
KEY DECISION: 7-day refresh tokens per security review
|
||||
IN PROGRESS: Implementing token rotation endpoint
|
||||
NEXT: Add rate limiting (5 refresh attempts per 15min), then write tests
|
||||
BLOCKER: None
|
||||
```
|
||||
|
||||
**Recovery actions**:
|
||||
1. Read notes, understand context
|
||||
2. Create TodoWrite:
|
||||
```
|
||||
- [ ] Implement rate limiting on refresh endpoint
|
||||
- [ ] Write tests for token rotation
|
||||
- [ ] Verify security guidelines met
|
||||
```
|
||||
3. Report: "From notes: JWT validation is done with 7-day refresh tokens. Currently implementing rotation endpoint. Next: add rate limiting and tests. Should I continue?"
|
||||
4. Resume work based on user response
|
||||
|
||||
**For complete compaction survival workflow, see:** [WORKFLOWS.md](WORKFLOWS.md#compaction-survival)
|
||||
|
||||
---
|
||||
|
||||
## Issue Closure
|
||||
|
||||
**Scenario**: Work is complete. How to close properly?
|
||||
|
||||
### Closure Checklist
|
||||
|
||||
Before closing, verify:
|
||||
- [ ] **Acceptance criteria met**: All items checked off
|
||||
- [ ] **Tests passing**: If applicable
|
||||
- [ ] **Documentation updated**: If needed
|
||||
- [ ] **Follow-up work filed**: New issues created for discovered work
|
||||
- [ ] **Key decisions documented**: In notes field
|
||||
|
||||
### Closure Pattern
|
||||
|
||||
**Minimal closure** (simple tasks):
|
||||
```bash
|
||||
bd close task-123 --reason "Implemented feature X"
|
||||
```
|
||||
|
||||
**Detailed closure** (complex work):
|
||||
```bash
|
||||
# Update notes with final state
|
||||
bd update task-123 --notes "COMPLETED: OAuth refresh with 7-day rotation
|
||||
KEY DECISION: RS256 over HS256 per security review
|
||||
TESTS: 12 tests passing (auth, rotation, expiry, errors)
|
||||
FOLLOW-UP: Filed perf-99 for token cleanup job"
|
||||
|
||||
# Close with summary
|
||||
bd close task-123 --reason "Implemented OAuth refresh token rotation with rate limiting. All security guidelines met. Tests passing."
|
||||
```
|
||||
|
||||
### Documenting Resolution (Outcome vs Design)
|
||||
|
||||
For issues where the outcome differed from initial design, use `--notes` to document what actually happened:
|
||||
|
||||
```bash
|
||||
# Initial design was hypothesis - document actual outcome in notes
|
||||
bd update bug-456 --notes "RESOLUTION: Not a bug - behavior is correct per OAuth spec. Documentation was unclear. Filed docs-789 to clarify auth flow in user guide."
|
||||
|
||||
bd close bug-456 --reason "Resolved: documentation issue, not bug"
|
||||
```
|
||||
|
||||
**Pattern**: Design field = initial approach. Notes field = what actually happened (prefix with RESOLUTION: for clarity).
|
||||
|
||||
### Discovering Follow-up Work
|
||||
|
||||
When closing reveals new work:
|
||||
|
||||
```bash
|
||||
# While closing auth feature, realize performance needs work
|
||||
bd create "Optimize token lookup query" -t task -p 2
|
||||
|
||||
# Link the provenance
|
||||
bd dep add auth-5 perf-99 --type discovered-from
|
||||
|
||||
# Now close original
|
||||
bd close auth-5 --reason "OAuth refresh implemented. Discovered perf optimization needed (filed perf-99)."
|
||||
```
|
||||
|
||||
**Why link with discovered-from**: Preserves the context of how you found the new work. Future you will appreciate knowing it came from the auth implementation.
|
||||
|
||||
---
|
||||
|
||||
## Pattern Summary
|
||||
|
||||
| Pattern | When to Use | Key Command | Preserves |
|
||||
|---------|-------------|-------------|-----------|
|
||||
| **Knowledge Work** | Long-running research, writing | `bd update --notes` | Context across sessions |
|
||||
| **Side Quest** | Discovered during other work | `bd dep add --type discovered-from` | Relationship to original |
|
||||
| **Multi-Session Resume** | Returning after time away | `bd ready`, `bd show` | Full project state |
|
||||
| **Status Transitions** | Tracking work state | `bd update --status` | Current state |
|
||||
| **Compaction Recovery** | History lost | Read notes field | All context in notes |
|
||||
| **Issue Closure** | Completing work | `bd close --reason` | Decisions and outcomes |
|
||||
|
||||
**For detailed workflows with step-by-step checklists, see:** [WORKFLOWS.md](WORKFLOWS.md)
|
||||
@@ -1,489 +0,0 @@
|
||||
# Troubleshooting Guide
|
||||
|
||||
Common issues encountered when using bd and how to resolve them.
|
||||
|
||||
## Interface-Specific Troubleshooting
|
||||
|
||||
**MCP tools (local environment):**
|
||||
- MCP tools require bd daemon running
|
||||
- Check daemon status: `bd daemon --status` (CLI)
|
||||
- If MCP tools fail, verify daemon is running and restart if needed
|
||||
- MCP tools automatically use daemon mode (no --no-daemon option)
|
||||
|
||||
**CLI (web environment or local):**
|
||||
- CLI can use daemon mode (default) or direct mode (--no-daemon)
|
||||
- Direct mode has 3-5 second sync delay
|
||||
- Web environment: Install via `npm install -g @beads/cli`
|
||||
- Web environment: Initialize via `bd init <prefix>` before first use
|
||||
|
||||
**Most issues below apply to both interfaces** - the underlying database and daemon behavior is the same.
|
||||
|
||||
## Contents
|
||||
|
||||
- [Dependencies Not Persisting](#dependencies-not-persisting)
|
||||
- [Status Updates Not Visible](#status-updates-not-visible)
|
||||
- [Daemon Won't Start](#daemon-wont-start)
|
||||
- [Database Errors on Cloud Storage](#database-errors-on-cloud-storage)
|
||||
- [JSONL File Not Created](#jsonl-file-not-created)
|
||||
- [Version Requirements](#version-requirements)
|
||||
|
||||
---
|
||||
|
||||
## Dependencies Not Persisting
|
||||
|
||||
### Symptom
|
||||
```bash
|
||||
bd dep add issue-2 issue-1 --type blocks
|
||||
# Reports: ✓ Added dependency
|
||||
bd show issue-2
|
||||
# Shows: No dependencies listed
|
||||
```
|
||||
|
||||
### Root Cause (Fixed in v0.15.0+)
|
||||
This was a **bug in bd** (GitHub issue #101) where the daemon ignored dependencies during issue creation. **Fixed in bd v0.15.0** (Oct 21, 2025).
|
||||
|
||||
### Resolution
|
||||
|
||||
**1. Check your bd version:**
|
||||
```bash
|
||||
bd version
|
||||
```
|
||||
|
||||
**2. If version < 0.15.0, update bd:**
|
||||
```bash
|
||||
# Via Homebrew (macOS/Linux)
|
||||
brew upgrade bd
|
||||
|
||||
# Via go install
|
||||
go install github.com/steveyegge/beads/cmd/bd@latest
|
||||
|
||||
# Via package manager
|
||||
# See https://github.com/steveyegge/beads#installing
|
||||
```
|
||||
|
||||
**3. Restart daemon after upgrade:**
|
||||
```bash
|
||||
pkill -f "bd daemon" # Kill old daemon
|
||||
bd daemon # Start new daemon with fix
|
||||
```
|
||||
|
||||
**4. Test dependency creation:**
|
||||
```bash
|
||||
bd create "Test A" -t task
|
||||
bd create "Test B" -t task
|
||||
bd dep add <B-id> <A-id> --type blocks
|
||||
bd show <B-id>
|
||||
# Should show: "Depends on (1): → <A-id>"
|
||||
```
|
||||
|
||||
### Still Not Working?
|
||||
|
||||
If dependencies still don't persist after updating:
|
||||
|
||||
1. **Check daemon is running:**
|
||||
```bash
|
||||
ps aux | grep "bd daemon"
|
||||
```
|
||||
|
||||
2. **Try without --no-daemon flag:**
|
||||
```bash
|
||||
# Instead of: bd --no-daemon dep add ...
|
||||
# Use: bd dep add ... (let daemon handle it)
|
||||
```
|
||||
|
||||
3. **Check JSONL file:**
|
||||
```bash
|
||||
cat .beads/issues.jsonl | jq '.dependencies'
|
||||
# Should show dependency array
|
||||
```
|
||||
|
||||
4. **Report to beads GitHub** with:
|
||||
- `bd version` output
|
||||
- Operating system
|
||||
- Reproducible test case
|
||||
|
||||
---
|
||||
|
||||
## Status Updates Not Visible
|
||||
|
||||
### Symptom
|
||||
```bash
|
||||
bd --no-daemon update issue-1 --status in_progress
|
||||
# Reports: ✓ Updated issue: issue-1
|
||||
bd show issue-1
|
||||
# Shows: Status: open (not in_progress!)
|
||||
```
|
||||
|
||||
### Root Cause
|
||||
This is **expected behavior**, not a bug. Understanding requires knowing bd's architecture:
|
||||
|
||||
**BD Architecture:**
|
||||
- **JSONL files** (`.beads/issues.jsonl`): Human-readable export format
|
||||
- **SQLite database** (`.beads/*.db`): Source of truth for queries
|
||||
- **Daemon**: Syncs JSONL ↔ SQLite every 5 minutes
|
||||
|
||||
**What `--no-daemon` actually does:**
|
||||
- **Writes**: Go directly to JSONL file
|
||||
- **Reads**: Still come from SQLite database
|
||||
- **Sync delay**: Daemon imports JSONL → SQLite periodically
|
||||
|
||||
### Resolution
|
||||
|
||||
**Option 1: Use daemon mode (recommended)**
|
||||
```bash
|
||||
# Don't use --no-daemon for CRUD operations
|
||||
bd update issue-1 --status in_progress
|
||||
bd show issue-1
|
||||
# ✓ Status reflects immediately
|
||||
```
|
||||
|
||||
**Option 2: Wait for sync (if using --no-daemon)**
|
||||
```bash
|
||||
bd --no-daemon update issue-1 --status in_progress
|
||||
# Wait 3-5 seconds for daemon to sync
|
||||
sleep 5
|
||||
bd show issue-1
|
||||
# ✓ Status should reflect now
|
||||
```
|
||||
|
||||
**Option 3: Manual sync trigger**
|
||||
```bash
|
||||
bd --no-daemon update issue-1 --status in_progress
|
||||
# Trigger sync by exporting/importing
|
||||
bd export > /dev/null 2>&1 # Forces sync
|
||||
bd show issue-1
|
||||
```
|
||||
|
||||
### When to Use `--no-daemon`
|
||||
|
||||
**Use --no-daemon for:**
|
||||
- Batch import scripts (performance)
|
||||
- CI/CD environments (no persistent daemon)
|
||||
- Testing/debugging
|
||||
|
||||
**Don't use --no-daemon for:**
|
||||
- Interactive development
|
||||
- Real-time status checks
|
||||
- When you need immediate query results
|
||||
|
||||
---
|
||||
|
||||
## Daemon Won't Start
|
||||
|
||||
### Symptom
|
||||
```bash
|
||||
bd daemon
|
||||
# Error: not in a git repository
|
||||
# Hint: run 'git init' to initialize a repository
|
||||
```
|
||||
|
||||
### Root Cause
|
||||
bd daemon requires a **git repository** because it uses git for:
|
||||
- Syncing issues to git remote (optional)
|
||||
- Version control of `.beads/*.jsonl` files
|
||||
- Commit history of issue changes
|
||||
|
||||
### Resolution
|
||||
|
||||
**Initialize git repository:**
|
||||
```bash
|
||||
# In your project directory
|
||||
git init
|
||||
bd daemon
|
||||
# ✓ Daemon should start now
|
||||
```
|
||||
|
||||
**Prevent git remote operations:**
|
||||
```bash
|
||||
# If you don't want daemon to pull from remote
|
||||
bd daemon --global=false
|
||||
```
|
||||
|
||||
**Flags:**
|
||||
- `--global=false`: Don't sync with git remote
|
||||
- `--interval=10m`: Custom sync interval (default: 5m)
|
||||
- `--auto-commit=true`: Auto-commit JSONL changes
|
||||
|
||||
---
|
||||
|
||||
## Database Errors on Cloud Storage
|
||||
|
||||
### Symptom
|
||||
```bash
|
||||
# In directory: /Users/name/Google Drive/...
|
||||
bd init myproject
|
||||
# Error: disk I/O error (522)
|
||||
# OR: Error: database is locked
|
||||
```
|
||||
|
||||
### Root Cause
|
||||
**SQLite incompatibility with cloud sync filesystems.**
|
||||
|
||||
Cloud services (Google Drive, Dropbox, OneDrive, iCloud) don't support:
|
||||
- POSIX file locking (required by SQLite)
|
||||
- Consistent file handles across sync operations
|
||||
- Atomic write operations
|
||||
|
||||
This is a **known SQLite limitation**, not a bd bug.
|
||||
|
||||
### Resolution
|
||||
|
||||
**Move bd database to local filesystem:**
|
||||
|
||||
```bash
|
||||
# Wrong location (cloud sync)
|
||||
~/Google Drive/My Work/project/.beads/ # ✗ Will fail
|
||||
|
||||
# Correct location (local disk)
|
||||
~/Repos/project/.beads/ # ✓ Works reliably
|
||||
~/Projects/project/.beads/ # ✓ Works reliably
|
||||
```
|
||||
|
||||
**Migration steps:**
|
||||
|
||||
1. **Move project to local disk:**
|
||||
```bash
|
||||
mv ~/Google\ Drive/project ~/Repos/project
|
||||
cd ~/Repos/project
|
||||
```
|
||||
|
||||
2. **Re-initialize bd (if needed):**
|
||||
```bash
|
||||
bd init myproject
|
||||
```
|
||||
|
||||
3. **Import existing issues (if you had JSONL export):**
|
||||
```bash
|
||||
bd import < issues-backup.jsonl
|
||||
```
|
||||
|
||||
**Alternative: Use global `~/.beads/` database**
|
||||
|
||||
If you must keep work on cloud storage:
|
||||
```bash
|
||||
# Don't initialize bd in cloud-synced directory
|
||||
# Use global database instead
|
||||
cd ~/Google\ Drive/project
|
||||
bd create "My task"
|
||||
# Uses ~/.beads/default.db (on local disk)
|
||||
```
|
||||
|
||||
**Workaround limitations:**
|
||||
- No per-project database isolation
|
||||
- All projects share same issue prefix
|
||||
- Manual tracking of which issues belong to which project
|
||||
|
||||
**Recommendation:** Keep code/projects on local disk, sync final deliverables to cloud.
|
||||
|
||||
---
|
||||
|
||||
## JSONL File Not Created
|
||||
|
||||
### Symptom
|
||||
```bash
|
||||
bd init myproject
|
||||
bd --no-daemon create "Test" -t task
|
||||
ls .beads/
|
||||
# Only shows: .gitignore, myproject.db
|
||||
# Missing: issues.jsonl
|
||||
```
|
||||
|
||||
### Root Cause
|
||||
**JSONL initialization coupling.** The `issues.jsonl` file is created by daemon on first startup, not by `bd init`.
|
||||
|
||||
### Resolution
|
||||
|
||||
**Start daemon once to initialize JSONL:**
|
||||
```bash
|
||||
bd daemon --global=false &
|
||||
# Wait for initialization
|
||||
sleep 2
|
||||
|
||||
# Now JSONL file exists
|
||||
ls .beads/issues.jsonl
|
||||
# ✓ File created
|
||||
|
||||
# Subsequent --no-daemon operations work
|
||||
bd --no-daemon create "Task 1" -t task
|
||||
cat .beads/issues.jsonl
|
||||
# ✓ Shows task data
|
||||
```
|
||||
|
||||
**Why this matters:**
|
||||
- Daemon owns the JSONL export format
|
||||
- First daemon run creates empty JSONL skeleton
|
||||
- `--no-daemon` operations assume JSONL exists
|
||||
|
||||
**Pattern for batch scripts:**
|
||||
```bash
|
||||
#!/bin/bash
|
||||
# Batch import script
|
||||
|
||||
bd init myproject
|
||||
bd daemon --global=false & # Start daemon
|
||||
sleep 3 # Wait for initialization
|
||||
|
||||
# Now safe to use --no-daemon for performance
|
||||
for item in "${items[@]}"; do
|
||||
bd --no-daemon create "$item" -t feature
|
||||
done
|
||||
|
||||
# Daemon syncs JSONL → SQLite in background
|
||||
sleep 5 # Wait for final sync
|
||||
|
||||
# Query results
|
||||
bd stats
|
||||
```
|
||||
|
||||
---
|
||||
|
||||
## Version Requirements
|
||||
|
||||
### Minimum Version for Dependency Persistence
|
||||
|
||||
**Issue:** Dependencies created but don't appear in `bd show` or dependency tree.
|
||||
|
||||
**Fix:** Upgrade to **bd v0.15.0+** (released Oct 2025)
|
||||
|
||||
**Check version:**
|
||||
```bash
|
||||
bd version
|
||||
# Should show: bd version 0.15.0 or higher
|
||||
```
|
||||
|
||||
**If using MCP plugin:**
|
||||
```bash
|
||||
# Update Claude Code beads plugin
|
||||
claude plugin update beads
|
||||
```
|
||||
|
||||
### Breaking Changes
|
||||
|
||||
**v0.15.0:**
|
||||
- MCP parameter names changed from `from_id/to_id` to `issue_id/depends_on_id`
|
||||
- Dependency creation now persists correctly in daemon mode
|
||||
|
||||
**v0.14.0:**
|
||||
- Daemon architecture changes
|
||||
- Auto-sync JSONL behavior introduced
|
||||
|
||||
---
|
||||
|
||||
## MCP-Specific Issues
|
||||
|
||||
### Dependencies Created Backwards
|
||||
|
||||
**Symptom:**
|
||||
Using MCP tools, dependencies end up reversed from intended.
|
||||
|
||||
**Example:**
|
||||
```python
|
||||
# Want: "task-2 depends on task-1" (task-1 blocks task-2)
|
||||
beads_add_dependency(issue_id="task-1", depends_on_id="task-2")
|
||||
# Wrong! This makes task-1 depend on task-2
|
||||
```
|
||||
|
||||
**Root Cause:**
|
||||
Parameter confusion between old (`from_id/to_id`) and new (`issue_id/depends_on_id`) names.
|
||||
|
||||
**Resolution:**
|
||||
|
||||
**Correct MCP usage (bd v0.15.0+):**
|
||||
```python
|
||||
# Correct: task-2 depends on task-1
|
||||
beads_add_dependency(
|
||||
issue_id="task-2", # Issue that has dependency
|
||||
depends_on_id="task-1", # Issue that must complete first
|
||||
dep_type="blocks"
|
||||
)
|
||||
```
|
||||
|
||||
**Mnemonic:**
|
||||
- `issue_id`: The issue that **waits**
|
||||
- `depends_on_id`: The issue that **must finish first**
|
||||
|
||||
**Equivalent CLI:**
|
||||
```bash
|
||||
bd dep add task-2 task-1 --type blocks
|
||||
# Meaning: task-2 depends on task-1
|
||||
```
|
||||
|
||||
**Verify dependency direction:**
|
||||
```bash
|
||||
bd show task-2
|
||||
# Should show: "Depends on: task-1"
|
||||
# Not the other way around
|
||||
```
|
||||
|
||||
---
|
||||
|
||||
## Getting Help
|
||||
|
||||
### Debug Checklist
|
||||
|
||||
Before reporting issues, collect this information:
|
||||
|
||||
```bash
|
||||
# 1. Version
|
||||
bd version
|
||||
|
||||
# 2. Daemon status
|
||||
ps aux | grep "bd daemon"
|
||||
|
||||
# 3. Database location
|
||||
echo $PWD/.beads/*.db
|
||||
ls -la .beads/
|
||||
|
||||
# 4. Git status
|
||||
git status
|
||||
git log --oneline -1
|
||||
|
||||
# 5. JSONL contents (for dependency issues)
|
||||
cat .beads/issues.jsonl | jq '.' | head -50
|
||||
```
|
||||
|
||||
### Report to beads GitHub
|
||||
|
||||
If problems persist:
|
||||
|
||||
1. **Check existing issues:** https://github.com/steveyegge/beads/issues
|
||||
2. **Create new issue** with:
|
||||
- bd version (`bd version`)
|
||||
- Operating system
|
||||
- Debug checklist output (above)
|
||||
- Minimal reproducible example
|
||||
- Expected vs actual behavior
|
||||
|
||||
### Claude Code Skill Issues
|
||||
|
||||
If the **bd-issue-tracking skill** provides incorrect guidance:
|
||||
|
||||
1. **Check skill version:**
|
||||
```bash
|
||||
ls -la ~/.claude/skills/bd-issue-tracking/
|
||||
head -20 ~/.claude/skills/bd-issue-tracking/SKILL.md
|
||||
```
|
||||
|
||||
2. **Report via Claude Code feedback** or user's GitHub
|
||||
|
||||
---
|
||||
|
||||
## Quick Reference: Common Fixes
|
||||
|
||||
| Problem | Quick Fix |
|
||||
|---------|-----------|
|
||||
| Dependencies not saving | Upgrade to bd v0.15.0+ |
|
||||
| Status updates lag | Use daemon mode (not `--no-daemon`) |
|
||||
| Daemon won't start | Run `git init` first |
|
||||
| Database errors on Google Drive | Move to local filesystem |
|
||||
| JSONL file missing | Start daemon once: `bd daemon &` |
|
||||
| Dependencies backwards (MCP) | Update to v0.15.0+, use `issue_id/depends_on_id` correctly |
|
||||
|
||||
---
|
||||
|
||||
## Related Documentation
|
||||
|
||||
- [CLI Reference](CLI_REFERENCE.md) - Complete command documentation
|
||||
- [Dependencies Guide](DEPENDENCIES.md) - Understanding dependency types
|
||||
- [Workflows](WORKFLOWS.md) - Step-by-step workflow guides
|
||||
- [beads GitHub](https://github.com/steveyegge/beads) - Official documentation
|
||||
Reference in New Issue
Block a user