refactor: remove unused bd pin/unpin/hook commands (bd-x0zl)
Analysis found these commands are dead code: - gt never calls `bd pin` - uses `bd update --status=pinned` instead - Beads.Pin() wrapper exists but is never called - bd hook functionality duplicated by gt mol status - Code comment says "pinned field is cosmetic for bd hook visibility" Removed: - cmd/bd/pin.go - cmd/bd/unpin.go - cmd/bd/hook.go 🤖 Generated with [Claude Code](https://claude.com/claude-code) Co-Authored-By: Claude Opus 4.5 <noreply@anthropic.com>
This commit is contained in:
@@ -48,10 +48,10 @@ func TestMain(m *testing.M) {
|
||||
fmt.Fprintf(os.Stderr, "Failed to build bd binary: %v\n%s\n", err, out)
|
||||
os.Exit(1)
|
||||
}
|
||||
|
||||
|
||||
// Optimize git for tests
|
||||
os.Setenv("GIT_CONFIG_NOSYSTEM", "1")
|
||||
|
||||
|
||||
os.Exit(m.Run())
|
||||
}
|
||||
|
||||
@@ -85,35 +85,35 @@ func TestHashIDs_MultiCloneConverge(t *testing.T) {
|
||||
}
|
||||
t.Parallel()
|
||||
tmpDir := testutil.TempDirInMemory(t)
|
||||
|
||||
|
||||
bdPath := getBDPath()
|
||||
if _, err := os.Stat(bdPath); err != nil {
|
||||
t.Fatalf("bd binary not found at %s", bdPath)
|
||||
}
|
||||
|
||||
|
||||
// Setup remote and 3 clones
|
||||
remoteDir := setupBareRepo(t, tmpDir)
|
||||
cloneA := setupClone(t, tmpDir, remoteDir, "A", bdPath)
|
||||
cloneB := setupClone(t, tmpDir, remoteDir, "B", bdPath)
|
||||
cloneC := setupClone(t, tmpDir, remoteDir, "C", bdPath)
|
||||
|
||||
|
||||
// Each clone creates unique issue (different content = different hash ID)
|
||||
createIssueInClone(t, cloneA, "Issue from clone A")
|
||||
createIssueInClone(t, cloneB, "Issue from clone B")
|
||||
createIssueInClone(t, cloneC, "Issue from clone C")
|
||||
|
||||
|
||||
// Sync all clones once (hash IDs prevent collisions, don't need multiple rounds)
|
||||
for _, clone := range []string{cloneA, cloneB, cloneC} {
|
||||
runCmdOutputWithEnvAllowError(t, clone, map[string]string{"BEADS_NO_DAEMON": "1"}, true, bdPath, "sync")
|
||||
}
|
||||
|
||||
|
||||
// Verify all clones have all 3 issues
|
||||
expectedTitles := map[string]bool{
|
||||
"Issue from clone A": true,
|
||||
"Issue from clone B": true,
|
||||
"Issue from clone C": true,
|
||||
}
|
||||
|
||||
|
||||
allConverged := true
|
||||
for name, dir := range map[string]string{"A": cloneA, "B": cloneB, "C": cloneC} {
|
||||
titles := getTitlesFromClone(t, dir)
|
||||
@@ -122,7 +122,7 @@ func TestHashIDs_MultiCloneConverge(t *testing.T) {
|
||||
allConverged = false
|
||||
}
|
||||
}
|
||||
|
||||
|
||||
if allConverged {
|
||||
t.Log("✓ All 3 clones converged with hash-based IDs")
|
||||
} else {
|
||||
@@ -138,26 +138,26 @@ func TestHashIDs_IdenticalContentDedup(t *testing.T) {
|
||||
}
|
||||
t.Parallel()
|
||||
tmpDir := testutil.TempDirInMemory(t)
|
||||
|
||||
|
||||
bdPath := getBDPath()
|
||||
if _, err := os.Stat(bdPath); err != nil {
|
||||
t.Fatalf("bd binary not found at %s", bdPath)
|
||||
}
|
||||
|
||||
|
||||
// Setup remote and 2 clones
|
||||
remoteDir := setupBareRepo(t, tmpDir)
|
||||
cloneA := setupClone(t, tmpDir, remoteDir, "A", bdPath)
|
||||
cloneB := setupClone(t, tmpDir, remoteDir, "B", bdPath)
|
||||
|
||||
|
||||
// Both clones create identical issue (same content = same hash ID)
|
||||
createIssueInClone(t, cloneA, "Identical issue")
|
||||
createIssueInClone(t, cloneB, "Identical issue")
|
||||
|
||||
|
||||
// Sync both clones once (hash IDs handle dedup automatically)
|
||||
for _, clone := range []string{cloneA, cloneB} {
|
||||
runCmdOutputWithEnvAllowError(t, clone, map[string]string{"BEADS_NO_DAEMON": "1"}, true, bdPath, "sync")
|
||||
}
|
||||
|
||||
|
||||
// Verify both clones have exactly 1 issue (deduplication worked)
|
||||
for name, dir := range map[string]string{"A": cloneA, "B": cloneB} {
|
||||
titles := getTitlesFromClone(t, dir)
|
||||
@@ -168,7 +168,7 @@ func TestHashIDs_IdenticalContentDedup(t *testing.T) {
|
||||
t.Errorf("Clone %s missing expected issue: %v", name, sortedKeys(titles))
|
||||
}
|
||||
}
|
||||
|
||||
|
||||
t.Log("✓ Identical content deduplicated correctly with hash-based IDs")
|
||||
}
|
||||
|
||||
@@ -177,36 +177,36 @@ func TestHashIDs_IdenticalContentDedup(t *testing.T) {
|
||||
func setupBareRepo(t *testing.T, tmpDir string) string {
|
||||
t.Helper()
|
||||
remoteDir := filepath.Join(tmpDir, "remote.git")
|
||||
runCmd(t, tmpDir, "git", "init", "--bare", remoteDir)
|
||||
|
||||
runCmd(t, tmpDir, "git", "init", "--bare", "-b", "master", remoteDir)
|
||||
|
||||
tempClone := filepath.Join(tmpDir, "temp-init")
|
||||
runCmd(t, tmpDir, "git", "clone", remoteDir, tempClone)
|
||||
runCmd(t, tempClone, "git", "commit", "--allow-empty", "-m", "Initial commit")
|
||||
runCmd(t, tempClone, "git", "push", "origin", "master")
|
||||
|
||||
|
||||
return remoteDir
|
||||
}
|
||||
|
||||
func setupClone(t *testing.T, tmpDir, remoteDir, name, bdPath string) string {
|
||||
t.Helper()
|
||||
cloneDir := filepath.Join(tmpDir, "clone-"+strings.ToLower(name))
|
||||
|
||||
|
||||
// Use shallow, shared clones for speed
|
||||
runCmd(t, tmpDir, "git", "clone", "--shared", "--depth=1", "--no-tags", remoteDir, cloneDir)
|
||||
|
||||
|
||||
// Disable hooks to avoid overhead
|
||||
emptyHooks := filepath.Join(cloneDir, ".empty-hooks")
|
||||
os.MkdirAll(emptyHooks, 0755)
|
||||
runCmd(t, cloneDir, "git", "config", "core.hooksPath", emptyHooks)
|
||||
|
||||
|
||||
// Speed configs
|
||||
runCmd(t, cloneDir, "git", "config", "gc.auto", "0")
|
||||
runCmd(t, cloneDir, "git", "config", "core.fsync", "false")
|
||||
runCmd(t, cloneDir, "git", "config", "commit.gpgSign", "false")
|
||||
|
||||
|
||||
bdCmd := getBDCommand()
|
||||
copyFile(t, bdPath, filepath.Join(cloneDir, filepath.Base(bdCmd)))
|
||||
|
||||
|
||||
if name == "A" {
|
||||
runCmd(t, cloneDir, bdCmd, "init", "--quiet", "--prefix", "test")
|
||||
runCmd(t, cloneDir, "git", "add", ".beads")
|
||||
@@ -216,7 +216,7 @@ func setupClone(t *testing.T, tmpDir, remoteDir, name, bdPath string) string {
|
||||
runCmd(t, cloneDir, "git", "pull", "origin", "master")
|
||||
runCmd(t, cloneDir, bdCmd, "init", "--quiet", "--prefix", "test")
|
||||
}
|
||||
|
||||
|
||||
return cloneDir
|
||||
}
|
||||
|
||||
@@ -231,13 +231,13 @@ func getTitlesFromClone(t *testing.T, cloneDir string) map[string]bool {
|
||||
"BEADS_NO_DAEMON": "1",
|
||||
"BD_NO_AUTO_IMPORT": "1",
|
||||
}, getBDCommand(), "list", "--json")
|
||||
|
||||
|
||||
jsonStart := strings.Index(listJSON, "[")
|
||||
if jsonStart == -1 {
|
||||
return make(map[string]bool)
|
||||
}
|
||||
listJSON = listJSON[jsonStart:]
|
||||
|
||||
|
||||
var issues []struct {
|
||||
Title string `json:"title"`
|
||||
}
|
||||
@@ -245,7 +245,7 @@ func getTitlesFromClone(t *testing.T, cloneDir string) map[string]bool {
|
||||
t.Logf("Failed to parse JSON: %v", err)
|
||||
return make(map[string]bool)
|
||||
}
|
||||
|
||||
|
||||
titles := make(map[string]bool)
|
||||
for _, issue := range issues {
|
||||
titles[issue.Title] = true
|
||||
@@ -280,7 +280,7 @@ func installGitHooks(t *testing.T, repoDir string) {
|
||||
hooksDir := filepath.Join(repoDir, ".git", "hooks")
|
||||
// Ensure POSIX-style path for sh scripts (even on Windows)
|
||||
bdCmd := strings.ReplaceAll(getBDCommand(), "\\", "/")
|
||||
|
||||
|
||||
preCommit := fmt.Sprintf(`#!/bin/sh
|
||||
%s --no-daemon export -o .beads/issues.jsonl >/dev/null 2>&1 || true
|
||||
git add .beads/issues.jsonl >/dev/null 2>&1 || true
|
||||
|
||||
@@ -5,6 +5,8 @@ import (
|
||||
"os/exec"
|
||||
"path/filepath"
|
||||
"testing"
|
||||
|
||||
"github.com/steveyegge/beads/internal/git"
|
||||
)
|
||||
|
||||
func TestFindDatabasePathEnvVar(t *testing.T) {
|
||||
@@ -821,6 +823,7 @@ func TestFindGitRoot_RegularRepo(t *testing.T) {
|
||||
}
|
||||
|
||||
t.Chdir(subDir)
|
||||
git.ResetCaches() // Reset after chdir for caching tests
|
||||
|
||||
// findGitRoot should return the repo root
|
||||
result := findGitRoot()
|
||||
@@ -897,6 +900,7 @@ func TestFindGitRoot_Worktree(t *testing.T) {
|
||||
|
||||
// Change to the worktree directory
|
||||
t.Chdir(worktreeDir)
|
||||
git.ResetCaches() // Reset after chdir for caching tests
|
||||
|
||||
// findGitRoot should return the WORKTREE root, not the main repo root
|
||||
result := findGitRoot()
|
||||
@@ -927,6 +931,7 @@ func TestFindGitRoot_NotGitRepo(t *testing.T) {
|
||||
defer os.RemoveAll(tmpDir)
|
||||
|
||||
t.Chdir(tmpDir)
|
||||
git.ResetCaches() // Reset after chdir for caching tests
|
||||
|
||||
// findGitRoot should return empty string
|
||||
result := findGitRoot()
|
||||
@@ -1024,6 +1029,7 @@ func TestFindBeadsDir_Worktree(t *testing.T) {
|
||||
|
||||
// Change to worktree
|
||||
t.Chdir(worktreeDir)
|
||||
git.ResetCaches() // Reset after chdir for caching tests
|
||||
|
||||
// FindBeadsDir should prioritize the main repo's .beads for worktrees (bd-de6)
|
||||
result := FindBeadsDir()
|
||||
@@ -1134,6 +1140,7 @@ func TestFindDatabasePath_Worktree(t *testing.T) {
|
||||
t.Fatal(err)
|
||||
}
|
||||
t.Chdir(worktreeSubDir)
|
||||
git.ResetCaches() // Reset after chdir for caching tests
|
||||
|
||||
// FindDatabasePath should find the main repo's shared database
|
||||
result := FindDatabasePath()
|
||||
@@ -1241,6 +1248,7 @@ func TestFindDatabasePath_WorktreeNoLocalDB(t *testing.T) {
|
||||
|
||||
// Change to worktree
|
||||
t.Chdir(worktreeDir)
|
||||
git.ResetCaches() // Reset after chdir for caching tests
|
||||
|
||||
// FindDatabasePath should find the main repo's shared database
|
||||
result := FindDatabasePath()
|
||||
|
||||
@@ -212,6 +212,8 @@ func expandLoopWithVars(step *Step, vars map[string]string) ([]*Step, error) {
|
||||
// expandLoopIteration expands a single iteration of a loop.
|
||||
// The iteration index is used to generate unique step IDs.
|
||||
// The iterVars map contains loop variable bindings for this iteration (gt-8tmz.27).
|
||||
//
|
||||
//nolint:unparam // error return kept for API consistency with future error handling
|
||||
func expandLoopIteration(step *Step, iteration int, iterVars map[string]string) ([]*Step, error) {
|
||||
result := make([]*Step, 0, len(step.Loop.Body))
|
||||
|
||||
|
||||
@@ -72,8 +72,11 @@ func ApplyExpansions(steps []*Step, compose *ComposeRules, parser *Parser) ([]*S
|
||||
return nil, fmt.Errorf("expand: %q has no template steps", rule.With)
|
||||
}
|
||||
|
||||
// Merge formula default vars with rule overrides (gt-8tmz.34)
|
||||
vars := mergeVars(expFormula, rule.Vars)
|
||||
|
||||
// Expand the target step (start at depth 0)
|
||||
expandedSteps, err := expandStep(targetStep, expFormula.Template, 0)
|
||||
expandedSteps, err := expandStep(targetStep, expFormula.Template, 0, vars)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("expand %q: %w", rule.Target, err)
|
||||
}
|
||||
@@ -112,6 +115,9 @@ func ApplyExpansions(steps []*Step, compose *ComposeRules, parser *Parser) ([]*S
|
||||
return nil, fmt.Errorf("map: %q has no template steps", rule.With)
|
||||
}
|
||||
|
||||
// Merge formula default vars with rule overrides (gt-8tmz.34)
|
||||
vars := mergeVars(expFormula, rule.Vars)
|
||||
|
||||
// Find all matching steps (including nested children - gt-8tmz.33)
|
||||
// Rebuild stepMap to capture any changes from previous expansions
|
||||
stepMap = buildStepMap(result)
|
||||
@@ -124,7 +130,7 @@ func ApplyExpansions(steps []*Step, compose *ComposeRules, parser *Parser) ([]*S
|
||||
|
||||
// Expand each matching step
|
||||
for _, targetStep := range toExpand {
|
||||
expandedSteps, err := expandStep(targetStep, expFormula.Template, 0)
|
||||
expandedSteps, err := expandStep(targetStep, expFormula.Template, 0, vars)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("map %q -> %q: %w", rule.Select, targetStep.ID, err)
|
||||
}
|
||||
@@ -146,14 +152,45 @@ func ApplyExpansions(steps []*Step, compose *ComposeRules, parser *Parser) ([]*S
|
||||
}
|
||||
}
|
||||
|
||||
// Validate no duplicate step IDs after expansion (gt-8tmz.36)
|
||||
if dups := findDuplicateStepIDs(result); len(dups) > 0 {
|
||||
return nil, fmt.Errorf("duplicate step IDs after expansion: %v", dups)
|
||||
}
|
||||
|
||||
return result, nil
|
||||
}
|
||||
|
||||
// findDuplicateStepIDs returns any duplicate step IDs found in the steps slice.
|
||||
// It recursively checks all children.
|
||||
func findDuplicateStepIDs(steps []*Step) []string {
|
||||
seen := make(map[string]int)
|
||||
countStepIDs(steps, seen)
|
||||
|
||||
var dups []string
|
||||
for id, count := range seen {
|
||||
if count > 1 {
|
||||
dups = append(dups, id)
|
||||
}
|
||||
}
|
||||
return dups
|
||||
}
|
||||
|
||||
// countStepIDs counts occurrences of each step ID recursively.
|
||||
func countStepIDs(steps []*Step, counts map[string]int) {
|
||||
for _, step := range steps {
|
||||
counts[step.ID]++
|
||||
if len(step.Children) > 0 {
|
||||
countStepIDs(step.Children, counts)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// expandStep expands a target step using the given template.
|
||||
// Returns the expanded steps with placeholders substituted.
|
||||
// The depth parameter tracks recursion depth for children; if it exceeds
|
||||
// DefaultMaxExpansionDepth, an error is returned.
|
||||
func expandStep(target *Step, template []*Step, depth int) ([]*Step, error) {
|
||||
// The vars parameter provides variable values for {varname} substitution.
|
||||
func expandStep(target *Step, template []*Step, depth int, vars map[string]string) ([]*Step, error) {
|
||||
if depth > DefaultMaxExpansionDepth {
|
||||
return nil, fmt.Errorf("expansion depth limit exceeded: max %d levels (currently at %d) - step %q",
|
||||
DefaultMaxExpansionDepth, depth, target.ID)
|
||||
@@ -163,12 +200,12 @@ func expandStep(target *Step, template []*Step, depth int) ([]*Step, error) {
|
||||
|
||||
for _, tmpl := range template {
|
||||
expanded := &Step{
|
||||
ID: substituteTargetPlaceholders(tmpl.ID, target),
|
||||
Title: substituteTargetPlaceholders(tmpl.Title, target),
|
||||
Description: substituteTargetPlaceholders(tmpl.Description, target),
|
||||
ID: substituteVars(substituteTargetPlaceholders(tmpl.ID, target), vars),
|
||||
Title: substituteVars(substituteTargetPlaceholders(tmpl.Title, target), vars),
|
||||
Description: substituteVars(substituteTargetPlaceholders(tmpl.Description, target), vars),
|
||||
Type: tmpl.Type,
|
||||
Priority: tmpl.Priority,
|
||||
Assignee: tmpl.Assignee,
|
||||
Assignee: substituteVars(tmpl.Assignee, vars),
|
||||
SourceFormula: tmpl.SourceFormula, // Preserve source from template (gt-8tmz.18)
|
||||
SourceLocation: tmpl.SourceLocation, // Preserve source location (gt-8tmz.18)
|
||||
}
|
||||
@@ -177,7 +214,7 @@ func expandStep(target *Step, template []*Step, depth int) ([]*Step, error) {
|
||||
if len(tmpl.Labels) > 0 {
|
||||
expanded.Labels = make([]string, len(tmpl.Labels))
|
||||
for i, l := range tmpl.Labels {
|
||||
expanded.Labels[i] = substituteTargetPlaceholders(l, target)
|
||||
expanded.Labels[i] = substituteVars(substituteTargetPlaceholders(l, target), vars)
|
||||
}
|
||||
}
|
||||
|
||||
@@ -185,20 +222,20 @@ func expandStep(target *Step, template []*Step, depth int) ([]*Step, error) {
|
||||
if len(tmpl.DependsOn) > 0 {
|
||||
expanded.DependsOn = make([]string, len(tmpl.DependsOn))
|
||||
for i, d := range tmpl.DependsOn {
|
||||
expanded.DependsOn[i] = substituteTargetPlaceholders(d, target)
|
||||
expanded.DependsOn[i] = substituteVars(substituteTargetPlaceholders(d, target), vars)
|
||||
}
|
||||
}
|
||||
|
||||
if len(tmpl.Needs) > 0 {
|
||||
expanded.Needs = make([]string, len(tmpl.Needs))
|
||||
for i, n := range tmpl.Needs {
|
||||
expanded.Needs[i] = substituteTargetPlaceholders(n, target)
|
||||
expanded.Needs[i] = substituteVars(substituteTargetPlaceholders(n, target), vars)
|
||||
}
|
||||
}
|
||||
|
||||
// Handle children recursively with depth tracking
|
||||
if len(tmpl.Children) > 0 {
|
||||
children, err := expandStep(target, tmpl.Children, depth+1)
|
||||
children, err := expandStep(target, tmpl.Children, depth+1, vars)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
@@ -232,6 +269,26 @@ func substituteTargetPlaceholders(s string, target *Step) string {
|
||||
return s
|
||||
}
|
||||
|
||||
// mergeVars merges formula default vars with rule overrides.
|
||||
// Override values take precedence over defaults.
|
||||
func mergeVars(formula *Formula, overrides map[string]string) map[string]string {
|
||||
result := make(map[string]string)
|
||||
|
||||
// Start with formula defaults
|
||||
for name, def := range formula.Vars {
|
||||
if def.Default != "" {
|
||||
result[name] = def.Default
|
||||
}
|
||||
}
|
||||
|
||||
// Apply overrides (these win)
|
||||
for name, value := range overrides {
|
||||
result[name] = value
|
||||
}
|
||||
|
||||
return result
|
||||
}
|
||||
|
||||
// buildStepMap creates a map of step ID to step (recursive).
|
||||
func buildStepMap(steps []*Step) map[string]*Step {
|
||||
result := make(map[string]*Step)
|
||||
@@ -303,3 +360,82 @@ func UpdateDependenciesForExpansion(steps []*Step, expandedID string, lastExpand
|
||||
|
||||
return result
|
||||
}
|
||||
|
||||
// ApplyInlineExpansions applies Step.Expand fields to inline expansions (gt-8tmz.35).
|
||||
// Steps with the Expand field set are replaced by the referenced expansion template.
|
||||
// The step's ExpandVars are passed as variable overrides to the expansion.
|
||||
//
|
||||
// This differs from compose.Expand in that the expansion is declared inline on the
|
||||
// step itself rather than in a central compose section.
|
||||
//
|
||||
// Returns a new steps slice with inline expansions applied.
|
||||
// The original steps slice is not modified.
|
||||
func ApplyInlineExpansions(steps []*Step, parser *Parser) ([]*Step, error) {
|
||||
if parser == nil {
|
||||
return steps, nil
|
||||
}
|
||||
|
||||
return applyInlineExpansionsRecursive(steps, parser, 0)
|
||||
}
|
||||
|
||||
// applyInlineExpansionsRecursive handles inline expansions for a slice of steps.
|
||||
// depth tracks recursion to prevent infinite expansion loops.
|
||||
func applyInlineExpansionsRecursive(steps []*Step, parser *Parser, depth int) ([]*Step, error) {
|
||||
if depth > DefaultMaxExpansionDepth {
|
||||
return nil, fmt.Errorf("inline expansion depth limit exceeded: max %d levels", DefaultMaxExpansionDepth)
|
||||
}
|
||||
|
||||
var result []*Step
|
||||
|
||||
for _, step := range steps {
|
||||
// Check if this step has an inline expansion
|
||||
if step.Expand != "" {
|
||||
// Load the expansion formula
|
||||
expFormula, err := parser.LoadByName(step.Expand)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("inline expand on step %q: loading %q: %w", step.ID, step.Expand, err)
|
||||
}
|
||||
|
||||
if expFormula.Type != TypeExpansion {
|
||||
return nil, fmt.Errorf("inline expand on step %q: %q is not an expansion formula (type=%s)",
|
||||
step.ID, step.Expand, expFormula.Type)
|
||||
}
|
||||
|
||||
if len(expFormula.Template) == 0 {
|
||||
return nil, fmt.Errorf("inline expand on step %q: %q has no template steps", step.ID, step.Expand)
|
||||
}
|
||||
|
||||
// Merge formula default vars with step's ExpandVars overrides
|
||||
vars := mergeVars(expFormula, step.ExpandVars)
|
||||
|
||||
// Expand the step using the template (reuse existing expandStep)
|
||||
expandedSteps, err := expandStep(step, expFormula.Template, 0, vars)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("inline expand on step %q: %w", step.ID, err)
|
||||
}
|
||||
|
||||
// Recursively process expanded steps for nested inline expansions
|
||||
processedSteps, err := applyInlineExpansionsRecursive(expandedSteps, parser, depth+1)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
result = append(result, processedSteps...)
|
||||
} else {
|
||||
// No inline expansion - keep the step, but process children recursively
|
||||
clone := cloneStep(step)
|
||||
|
||||
if len(step.Children) > 0 {
|
||||
processedChildren, err := applyInlineExpansionsRecursive(step.Children, parser, depth)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
clone.Children = processedChildren
|
||||
}
|
||||
|
||||
result = append(result, clone)
|
||||
}
|
||||
}
|
||||
|
||||
return result, nil
|
||||
}
|
||||
|
||||
@@ -87,7 +87,7 @@ func TestExpandStep(t *testing.T) {
|
||||
},
|
||||
}
|
||||
|
||||
result, err := expandStep(target, template, 0)
|
||||
result, err := expandStep(target, template, 0, nil)
|
||||
if err != nil {
|
||||
t.Fatalf("expandStep failed: %v", err)
|
||||
}
|
||||
@@ -138,7 +138,7 @@ func TestExpandStepDepthLimit(t *testing.T) {
|
||||
|
||||
// With depth 0 start, going to level 6 means 7 levels total (0-6)
|
||||
// DefaultMaxExpansionDepth is 5, so this should fail
|
||||
_, err := expandStep(target, template, 0)
|
||||
_, err := expandStep(target, template, 0, nil)
|
||||
if err == nil {
|
||||
t.Fatal("expected depth limit error, got nil")
|
||||
}
|
||||
@@ -159,7 +159,7 @@ func TestExpandStepDepthLimit(t *testing.T) {
|
||||
}
|
||||
|
||||
shallowTemplate := []*Step{shallowChild}
|
||||
result, err := expandStep(target, shallowTemplate, 0)
|
||||
result, err := expandStep(target, shallowTemplate, 0, nil)
|
||||
if err != nil {
|
||||
t.Fatalf("expected shallow template to succeed, got: %v", err)
|
||||
}
|
||||
@@ -432,3 +432,387 @@ func getChildIDs(steps []*Step) []string {
|
||||
}
|
||||
return ids
|
||||
}
|
||||
|
||||
func TestSubstituteVars(t *testing.T) {
|
||||
tests := []struct {
|
||||
name string
|
||||
input string
|
||||
vars map[string]string
|
||||
expected string
|
||||
}{
|
||||
{
|
||||
name: "single var substitution",
|
||||
input: "Deploy to {environment}",
|
||||
vars: map[string]string{"environment": "production"},
|
||||
expected: "Deploy to production",
|
||||
},
|
||||
{
|
||||
name: "multiple var substitution",
|
||||
input: "{component} v{version}",
|
||||
vars: map[string]string{"component": "auth", "version": "2.0"},
|
||||
expected: "auth v2.0",
|
||||
},
|
||||
{
|
||||
name: "unmatched placeholder stays",
|
||||
input: "{known} and {unknown}",
|
||||
vars: map[string]string{"known": "replaced"},
|
||||
expected: "replaced and {unknown}",
|
||||
},
|
||||
{
|
||||
name: "empty vars map",
|
||||
input: "no {change}",
|
||||
vars: nil,
|
||||
expected: "no {change}",
|
||||
},
|
||||
{
|
||||
name: "empty string",
|
||||
input: "",
|
||||
vars: map[string]string{"foo": "bar"},
|
||||
expected: "",
|
||||
},
|
||||
}
|
||||
|
||||
for _, tt := range tests {
|
||||
t.Run(tt.name, func(t *testing.T) {
|
||||
result := substituteVars(tt.input, tt.vars)
|
||||
if result != tt.expected {
|
||||
t.Errorf("substituteVars(%q, %v) = %q, want %q", tt.input, tt.vars, result, tt.expected)
|
||||
}
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
func TestMergeVars(t *testing.T) {
|
||||
formula := &Formula{
|
||||
Vars: map[string]*VarDef{
|
||||
"env": {Default: "staging"},
|
||||
"version": {Default: "1.0"},
|
||||
"name": {Required: true}, // No default
|
||||
},
|
||||
}
|
||||
|
||||
t.Run("overrides take precedence", func(t *testing.T) {
|
||||
overrides := map[string]string{"env": "production"}
|
||||
result := mergeVars(formula, overrides)
|
||||
|
||||
if result["env"] != "production" {
|
||||
t.Errorf("env = %q, want 'production'", result["env"])
|
||||
}
|
||||
if result["version"] != "1.0" {
|
||||
t.Errorf("version = %q, want '1.0'", result["version"])
|
||||
}
|
||||
})
|
||||
|
||||
t.Run("override adds new var", func(t *testing.T) {
|
||||
overrides := map[string]string{"custom": "value"}
|
||||
result := mergeVars(formula, overrides)
|
||||
|
||||
if result["custom"] != "value" {
|
||||
t.Errorf("custom = %q, want 'value'", result["custom"])
|
||||
}
|
||||
})
|
||||
|
||||
t.Run("nil overrides uses defaults", func(t *testing.T) {
|
||||
result := mergeVars(formula, nil)
|
||||
|
||||
if result["env"] != "staging" {
|
||||
t.Errorf("env = %q, want 'staging'", result["env"])
|
||||
}
|
||||
})
|
||||
}
|
||||
|
||||
func TestApplyExpansionsWithVars(t *testing.T) {
|
||||
// Create a temporary directory with an expansion formula that uses vars
|
||||
tmpDir := t.TempDir()
|
||||
|
||||
// Create an expansion formula with variables
|
||||
envExpansion := `{
|
||||
"formula": "env-deploy",
|
||||
"type": "expansion",
|
||||
"version": 1,
|
||||
"vars": {
|
||||
"environment": {"default": "staging"},
|
||||
"replicas": {"default": "1"}
|
||||
},
|
||||
"template": [
|
||||
{"id": "{target}.prepare-{environment}", "title": "Prepare {environment} for {target.title}"},
|
||||
{"id": "{target}.deploy-{environment}", "title": "Deploy to {environment} with {replicas} replicas", "needs": ["{target}.prepare-{environment}"]}
|
||||
]
|
||||
}`
|
||||
err := os.WriteFile(filepath.Join(tmpDir, "env-deploy.formula.json"), []byte(envExpansion), 0644)
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
|
||||
parser := NewParser(tmpDir)
|
||||
|
||||
t.Run("expand with var overrides", func(t *testing.T) {
|
||||
steps := []*Step{
|
||||
{ID: "design", Title: "Design"},
|
||||
{ID: "release", Title: "Release v2"},
|
||||
{ID: "test", Title: "Test"},
|
||||
}
|
||||
|
||||
compose := &ComposeRules{
|
||||
Expand: []*ExpandRule{
|
||||
{
|
||||
Target: "release",
|
||||
With: "env-deploy",
|
||||
Vars: map[string]string{"environment": "production", "replicas": "3"},
|
||||
},
|
||||
},
|
||||
}
|
||||
|
||||
result, err := ApplyExpansions(steps, compose, parser)
|
||||
if err != nil {
|
||||
t.Fatalf("ApplyExpansions failed: %v", err)
|
||||
}
|
||||
|
||||
if len(result) != 4 {
|
||||
t.Fatalf("expected 4 steps, got %d", len(result))
|
||||
}
|
||||
|
||||
// Check expanded step IDs include var substitution
|
||||
expectedIDs := []string{"design", "release.prepare-production", "release.deploy-production", "test"}
|
||||
for i, exp := range expectedIDs {
|
||||
if result[i].ID != exp {
|
||||
t.Errorf("result[%d].ID = %q, want %q", i, result[i].ID, exp)
|
||||
}
|
||||
}
|
||||
|
||||
// Check title includes both target and var substitution
|
||||
if result[2].Title != "Deploy to production with 3 replicas" {
|
||||
t.Errorf("deploy title = %q, want 'Deploy to production with 3 replicas'", result[2].Title)
|
||||
}
|
||||
|
||||
// Check that needs was also substituted correctly
|
||||
if len(result[2].Needs) != 1 || result[2].Needs[0] != "release.prepare-production" {
|
||||
t.Errorf("deploy needs = %v, want [release.prepare-production]", result[2].Needs)
|
||||
}
|
||||
})
|
||||
|
||||
t.Run("expand with default vars", func(t *testing.T) {
|
||||
steps := []*Step{
|
||||
{ID: "release", Title: "Release"},
|
||||
}
|
||||
|
||||
compose := &ComposeRules{
|
||||
Expand: []*ExpandRule{
|
||||
{Target: "release", With: "env-deploy"},
|
||||
},
|
||||
}
|
||||
|
||||
result, err := ApplyExpansions(steps, compose, parser)
|
||||
if err != nil {
|
||||
t.Fatalf("ApplyExpansions failed: %v", err)
|
||||
}
|
||||
|
||||
// Check that defaults are used
|
||||
if result[0].ID != "release.prepare-staging" {
|
||||
t.Errorf("result[0].ID = %q, want 'release.prepare-staging'", result[0].ID)
|
||||
}
|
||||
if result[1].Title != "Deploy to staging with 1 replicas" {
|
||||
t.Errorf("deploy title = %q, want 'Deploy to staging with 1 replicas'", result[1].Title)
|
||||
}
|
||||
})
|
||||
|
||||
t.Run("map with var overrides", func(t *testing.T) {
|
||||
steps := []*Step{
|
||||
{ID: "deploy.api", Title: "Deploy API"},
|
||||
{ID: "deploy.web", Title: "Deploy Web"},
|
||||
}
|
||||
|
||||
compose := &ComposeRules{
|
||||
Map: []*MapRule{
|
||||
{
|
||||
Select: "deploy.*",
|
||||
With: "env-deploy",
|
||||
Vars: map[string]string{"environment": "prod"},
|
||||
},
|
||||
},
|
||||
}
|
||||
|
||||
result, err := ApplyExpansions(steps, compose, parser)
|
||||
if err != nil {
|
||||
t.Fatalf("ApplyExpansions failed: %v", err)
|
||||
}
|
||||
|
||||
// Each deploy.* step should expand with prod environment
|
||||
expectedIDs := []string{
|
||||
"deploy.api.prepare-prod", "deploy.api.deploy-prod",
|
||||
"deploy.web.prepare-prod", "deploy.web.deploy-prod",
|
||||
}
|
||||
if len(result) != len(expectedIDs) {
|
||||
t.Fatalf("expected %d steps, got %d", len(expectedIDs), len(result))
|
||||
}
|
||||
for i, exp := range expectedIDs {
|
||||
if result[i].ID != exp {
|
||||
t.Errorf("result[%d].ID = %q, want %q", i, result[i].ID, exp)
|
||||
}
|
||||
}
|
||||
})
|
||||
}
|
||||
|
||||
func TestApplyExpansionsDuplicateIDs(t *testing.T) {
|
||||
// Create a temporary directory with an expansion formula
|
||||
tmpDir := t.TempDir()
|
||||
|
||||
// Create expansion formula that generates "{target}.draft"
|
||||
ruleOfFive := `{
|
||||
"formula": "rule-of-five",
|
||||
"type": "expansion",
|
||||
"version": 1,
|
||||
"template": [
|
||||
{"id": "{target}.draft", "title": "Draft: {target.title}"},
|
||||
{"id": "{target}.refine", "title": "Refine", "needs": ["{target}.draft"]}
|
||||
]
|
||||
}`
|
||||
err := os.WriteFile(filepath.Join(tmpDir, "rule-of-five.formula.json"), []byte(ruleOfFive), 0644)
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
|
||||
parser := NewParser(tmpDir)
|
||||
|
||||
// Test: expansion creates duplicate with existing step
|
||||
t.Run("duplicate with existing step", func(t *testing.T) {
|
||||
// "implement.draft" already exists, expansion will try to create it again
|
||||
steps := []*Step{
|
||||
{ID: "design", Title: "Design"},
|
||||
{ID: "implement", Title: "Implement the feature"},
|
||||
{ID: "implement.draft", Title: "Existing draft"}, // Conflicts with expansion
|
||||
{ID: "test", Title: "Test"},
|
||||
}
|
||||
|
||||
compose := &ComposeRules{
|
||||
Expand: []*ExpandRule{
|
||||
{Target: "implement", With: "rule-of-five"},
|
||||
},
|
||||
}
|
||||
|
||||
_, err := ApplyExpansions(steps, compose, parser)
|
||||
if err == nil {
|
||||
t.Fatal("expected error for duplicate step IDs, got nil")
|
||||
}
|
||||
|
||||
if !strings.Contains(err.Error(), "duplicate step IDs") {
|
||||
t.Errorf("expected duplicate step IDs error, got: %v", err)
|
||||
}
|
||||
if !strings.Contains(err.Error(), "implement.draft") {
|
||||
t.Errorf("expected error to mention 'implement.draft', got: %v", err)
|
||||
}
|
||||
})
|
||||
|
||||
// Test: map creates duplicates across multiple expansions
|
||||
t.Run("map creates cross-expansion duplicates", func(t *testing.T) {
|
||||
// Create a formula that generates static IDs (not using {target})
|
||||
staticExpansion := `{
|
||||
"formula": "static-ids",
|
||||
"type": "expansion",
|
||||
"version": 1,
|
||||
"template": [
|
||||
{"id": "shared-step", "title": "Shared step"},
|
||||
{"id": "another-shared", "title": "Another shared"}
|
||||
]
|
||||
}`
|
||||
err := os.WriteFile(filepath.Join(tmpDir, "static-ids.formula.json"), []byte(staticExpansion), 0644)
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
|
||||
steps := []*Step{
|
||||
{ID: "impl.auth", Title: "Implement auth"},
|
||||
{ID: "impl.api", Title: "Implement API"},
|
||||
}
|
||||
|
||||
compose := &ComposeRules{
|
||||
Map: []*MapRule{
|
||||
{Select: "impl.*", With: "static-ids"},
|
||||
},
|
||||
}
|
||||
|
||||
_, err = ApplyExpansions(steps, compose, parser)
|
||||
if err == nil {
|
||||
t.Fatal("expected error for duplicate step IDs from map, got nil")
|
||||
}
|
||||
|
||||
if !strings.Contains(err.Error(), "duplicate step IDs") {
|
||||
t.Errorf("expected duplicate step IDs error, got: %v", err)
|
||||
}
|
||||
})
|
||||
}
|
||||
|
||||
func TestFindDuplicateStepIDs(t *testing.T) {
|
||||
tests := []struct {
|
||||
name string
|
||||
steps []*Step
|
||||
expected []string
|
||||
}{
|
||||
{
|
||||
name: "no duplicates",
|
||||
steps: []*Step{
|
||||
{ID: "a"},
|
||||
{ID: "b"},
|
||||
{ID: "c"},
|
||||
},
|
||||
expected: nil,
|
||||
},
|
||||
{
|
||||
name: "top-level duplicate",
|
||||
steps: []*Step{
|
||||
{ID: "a"},
|
||||
{ID: "b"},
|
||||
{ID: "a"},
|
||||
},
|
||||
expected: []string{"a"},
|
||||
},
|
||||
{
|
||||
name: "nested duplicate",
|
||||
steps: []*Step{
|
||||
{ID: "parent", Children: []*Step{
|
||||
{ID: "child"},
|
||||
}},
|
||||
{ID: "child"}, // Duplicate with nested child
|
||||
},
|
||||
expected: []string{"child"},
|
||||
},
|
||||
{
|
||||
name: "deeply nested duplicate",
|
||||
steps: []*Step{
|
||||
{ID: "root", Children: []*Step{
|
||||
{ID: "level1", Children: []*Step{
|
||||
{ID: "level2"},
|
||||
}},
|
||||
}},
|
||||
{ID: "other", Children: []*Step{
|
||||
{ID: "level2"}, // Duplicate with deeply nested
|
||||
}},
|
||||
},
|
||||
expected: []string{"level2"},
|
||||
},
|
||||
}
|
||||
|
||||
for _, tt := range tests {
|
||||
t.Run(tt.name, func(t *testing.T) {
|
||||
dups := findDuplicateStepIDs(tt.steps)
|
||||
|
||||
if len(dups) != len(tt.expected) {
|
||||
t.Fatalf("expected %d duplicates, got %d: %v", len(tt.expected), len(dups), dups)
|
||||
}
|
||||
|
||||
// Check all expected duplicates are found (order may vary)
|
||||
for _, exp := range tt.expected {
|
||||
found := false
|
||||
for _, dup := range dups {
|
||||
if dup == exp {
|
||||
found = true
|
||||
break
|
||||
}
|
||||
}
|
||||
if !found {
|
||||
t.Errorf("expected duplicate %q not found in %v", exp, dups)
|
||||
}
|
||||
}
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
@@ -7,10 +7,16 @@ import (
|
||||
"path/filepath"
|
||||
"regexp"
|
||||
"strings"
|
||||
|
||||
"github.com/BurntSushi/toml"
|
||||
)
|
||||
|
||||
// FormulaExt is the file extension for formula files.
|
||||
const FormulaExt = ".formula.json"
|
||||
// Formula file extensions. TOML is preferred, JSON is legacy fallback.
|
||||
const (
|
||||
FormulaExtTOML = ".formula.toml"
|
||||
FormulaExtJSON = ".formula.json"
|
||||
FormulaExt = FormulaExtJSON // Legacy alias for backwards compatibility
|
||||
)
|
||||
|
||||
// Parser handles loading and resolving formulas.
|
||||
//
|
||||
@@ -68,6 +74,7 @@ func defaultSearchPaths() []string {
|
||||
}
|
||||
|
||||
// ParseFile parses a formula from a file path.
|
||||
// Detects format from extension: .formula.toml or .formula.json
|
||||
func (p *Parser) ParseFile(path string) (*Formula, error) {
|
||||
// Check cache first
|
||||
absPath, err := filepath.Abs(path)
|
||||
@@ -86,7 +93,13 @@ func (p *Parser) ParseFile(path string) (*Formula, error) {
|
||||
return nil, fmt.Errorf("read %s: %w", path, err)
|
||||
}
|
||||
|
||||
formula, err := p.Parse(data)
|
||||
// Detect format from extension
|
||||
var formula *Formula
|
||||
if strings.HasSuffix(path, FormulaExtTOML) {
|
||||
formula, err = p.ParseTOML(data)
|
||||
} else {
|
||||
formula, err = p.Parse(data)
|
||||
}
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("parse %s: %w", path, err)
|
||||
}
|
||||
@@ -122,6 +135,24 @@ func (p *Parser) Parse(data []byte) (*Formula, error) {
|
||||
return &formula, nil
|
||||
}
|
||||
|
||||
// ParseTOML parses a formula from TOML bytes.
|
||||
func (p *Parser) ParseTOML(data []byte) (*Formula, error) {
|
||||
var formula Formula
|
||||
if err := toml.Unmarshal(data, &formula); err != nil {
|
||||
return nil, fmt.Errorf("toml: %w", err)
|
||||
}
|
||||
|
||||
// Set defaults
|
||||
if formula.Version == 0 {
|
||||
formula.Version = 1
|
||||
}
|
||||
if formula.Type == "" {
|
||||
formula.Type = TypeWorkflow
|
||||
}
|
||||
|
||||
return &formula, nil
|
||||
}
|
||||
|
||||
// Resolve fully resolves a formula, processing extends and expansions.
|
||||
// Returns a new formula with all inheritance applied.
|
||||
func (p *Parser) Resolve(formula *Formula) (*Formula, error) {
|
||||
@@ -205,18 +236,21 @@ func (p *Parser) Resolve(formula *Formula) (*Formula, error) {
|
||||
}
|
||||
|
||||
// loadFormula loads a formula by name from search paths.
|
||||
// Tries TOML first (.formula.toml), then falls back to JSON (.formula.json).
|
||||
func (p *Parser) loadFormula(name string) (*Formula, error) {
|
||||
// Check cache first
|
||||
if cached, ok := p.cache[name]; ok {
|
||||
return cached, nil
|
||||
}
|
||||
|
||||
// Search for the formula file
|
||||
filename := name + FormulaExt
|
||||
// Search for the formula file - try TOML first, then JSON
|
||||
extensions := []string{FormulaExtTOML, FormulaExtJSON}
|
||||
for _, dir := range p.searchPaths {
|
||||
path := filepath.Join(dir, filename)
|
||||
if _, err := os.Stat(path); err == nil {
|
||||
return p.ParseFile(path)
|
||||
for _, ext := range extensions {
|
||||
path := filepath.Join(dir, name+ext)
|
||||
if _, err := os.Stat(path); err == nil {
|
||||
return p.ParseFile(path)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
|
||||
@@ -249,20 +249,16 @@ type LoopSpec struct {
|
||||
// OnCompleteSpec defines actions triggered when a step completes (gt-8tmz.8).
|
||||
// Used for runtime expansion over step output (the for-each construct).
|
||||
//
|
||||
// Example JSON:
|
||||
// Example YAML:
|
||||
//
|
||||
// {
|
||||
// "id": "survey-workers",
|
||||
// "on_complete": {
|
||||
// "for_each": "output.polecats",
|
||||
// "bond": "mol-polecat-arm",
|
||||
// "vars": {
|
||||
// "polecat_name": "{item.name}",
|
||||
// "rig": "{item.rig}"
|
||||
// },
|
||||
// "parallel": true
|
||||
// }
|
||||
// }
|
||||
// step: survey-workers
|
||||
// on_complete:
|
||||
// for_each: output.polecats
|
||||
// bond: mol-polecat-arm
|
||||
// vars:
|
||||
// polecat_name: "{item.name}"
|
||||
// rig: "{item.rig}"
|
||||
// parallel: true
|
||||
type OnCompleteSpec struct {
|
||||
// ForEach is the path to the iterable collection in step output.
|
||||
// Format: "output.<field>" or "output.<field>.<nested>"
|
||||
|
||||
@@ -5,6 +5,7 @@ import (
|
||||
"os/exec"
|
||||
"path/filepath"
|
||||
"strings"
|
||||
"sync"
|
||||
)
|
||||
|
||||
// GetGitDir returns the actual .git directory path for the current repository.
|
||||
@@ -52,28 +53,53 @@ func GetGitHeadPath() (string, error) {
|
||||
return filepath.Join(gitDir, "HEAD"), nil
|
||||
}
|
||||
|
||||
// isWorktreeOnce ensures we only check worktree status once per process.
|
||||
// This is safe because worktree status cannot change during a single command execution.
|
||||
var (
|
||||
isWorktreeOnce sync.Once
|
||||
isWorktreeResult bool
|
||||
)
|
||||
|
||||
// IsWorktree returns true if the current directory is in a Git worktree.
|
||||
// This is determined by comparing --git-dir and --git-common-dir.
|
||||
// The result is cached after the first call since worktree status doesn't
|
||||
// change during a single command execution.
|
||||
func IsWorktree() bool {
|
||||
isWorktreeOnce.Do(func() {
|
||||
isWorktreeResult = isWorktreeUncached()
|
||||
})
|
||||
return isWorktreeResult
|
||||
}
|
||||
|
||||
// isWorktreeUncached performs the actual worktree check without caching.
|
||||
// Called once by IsWorktree and cached for subsequent calls.
|
||||
func isWorktreeUncached() bool {
|
||||
gitDir := getGitDirNoError("--git-dir")
|
||||
if gitDir == "" {
|
||||
return false
|
||||
}
|
||||
|
||||
|
||||
commonDir := getGitDirNoError("--git-common-dir")
|
||||
if commonDir == "" {
|
||||
return false
|
||||
}
|
||||
|
||||
|
||||
absGit, err1 := filepath.Abs(gitDir)
|
||||
absCommon, err2 := filepath.Abs(commonDir)
|
||||
if err1 != nil || err2 != nil {
|
||||
return false
|
||||
}
|
||||
|
||||
|
||||
return absGit != absCommon
|
||||
}
|
||||
|
||||
// mainRepoRootOnce ensures we only get main repo root once per process.
|
||||
var (
|
||||
mainRepoRootOnce sync.Once
|
||||
mainRepoRootResult string
|
||||
mainRepoRootErr error
|
||||
)
|
||||
|
||||
// GetMainRepoRoot returns the main repository root directory.
|
||||
// When in a worktree, this returns the main repository root.
|
||||
// Otherwise, it returns the regular repository root.
|
||||
@@ -82,7 +108,16 @@ func IsWorktree() bool {
|
||||
// /project/.worktrees/feature/), this correctly returns the main repo
|
||||
// root (/project/) by using git rev-parse --git-common-dir which always
|
||||
// points to the main repo's .git directory. (GH#509)
|
||||
// The result is cached after the first call.
|
||||
func GetMainRepoRoot() (string, error) {
|
||||
mainRepoRootOnce.Do(func() {
|
||||
mainRepoRootResult, mainRepoRootErr = getMainRepoRootUncached()
|
||||
})
|
||||
return mainRepoRootResult, mainRepoRootErr
|
||||
}
|
||||
|
||||
// getMainRepoRootUncached performs the actual main repo root lookup without caching.
|
||||
func getMainRepoRootUncached() (string, error) {
|
||||
// Use --git-common-dir which always returns the main repo's .git directory,
|
||||
// even when running from within a worktree or its subdirectories.
|
||||
// This is the most reliable method for finding the main repo root.
|
||||
@@ -103,13 +138,28 @@ func GetMainRepoRoot() (string, error) {
|
||||
return mainRepoRoot, nil
|
||||
}
|
||||
|
||||
// repoRootOnce ensures we only get repo root once per process.
|
||||
var (
|
||||
repoRootOnce sync.Once
|
||||
repoRootResult string
|
||||
)
|
||||
|
||||
// GetRepoRoot returns the root directory of the current git repository.
|
||||
// Returns empty string if not in a git repository.
|
||||
//
|
||||
// This function is worktree-aware and handles Windows path normalization
|
||||
// (Git on Windows may return paths like /c/Users/... or C:/Users/...).
|
||||
// It also resolves symlinks to get the canonical path.
|
||||
// The result is cached after the first call.
|
||||
func GetRepoRoot() string {
|
||||
repoRootOnce.Do(func() {
|
||||
repoRootResult = getRepoRootUncached()
|
||||
})
|
||||
return repoRootResult
|
||||
}
|
||||
|
||||
// getRepoRootUncached performs the actual repo root lookup without caching.
|
||||
func getRepoRootUncached() string {
|
||||
cmd := exec.Command("git", "rev-parse", "--show-toplevel")
|
||||
output, err := cmd.Output()
|
||||
if err != nil {
|
||||
@@ -156,4 +206,20 @@ func getGitDirNoError(flag string) string {
|
||||
return ""
|
||||
}
|
||||
return strings.TrimSpace(string(out))
|
||||
}
|
||||
|
||||
// ResetCaches resets all cached git information. This is intended for use
|
||||
// by tests that need to change directory between subtests.
|
||||
// In production, these caches are safe because the working directory
|
||||
// doesn't change during a single command execution.
|
||||
//
|
||||
// WARNING: Not thread-safe. Only call from single-threaded test contexts.
|
||||
func ResetCaches() {
|
||||
isWorktreeOnce = sync.Once{}
|
||||
isWorktreeResult = false
|
||||
mainRepoRootOnce = sync.Once{}
|
||||
mainRepoRootResult = ""
|
||||
mainRepoRootErr = nil
|
||||
repoRootOnce = sync.Once{}
|
||||
repoRootResult = ""
|
||||
}
|
||||
@@ -864,6 +864,7 @@ func TestCountJSONLIssues(t *testing.T) {
|
||||
// TestGetMainRepoRoot tests the GetMainRepoRoot function for various scenarios
|
||||
func TestGetMainRepoRoot(t *testing.T) {
|
||||
t.Run("returns correct root for regular repo", func(t *testing.T) {
|
||||
ResetCaches() // Reset caches from previous subtests
|
||||
repoPath, cleanup := setupTestRepo(t)
|
||||
defer cleanup()
|
||||
|
||||
@@ -877,6 +878,7 @@ func TestGetMainRepoRoot(t *testing.T) {
|
||||
if err := os.Chdir(repoPath); err != nil {
|
||||
t.Fatalf("Failed to chdir to repo: %v", err)
|
||||
}
|
||||
ResetCaches() // Reset after chdir
|
||||
|
||||
root, err := GetMainRepoRoot()
|
||||
if err != nil {
|
||||
@@ -893,6 +895,7 @@ func TestGetMainRepoRoot(t *testing.T) {
|
||||
})
|
||||
|
||||
t.Run("returns main repo root from worktree", func(t *testing.T) {
|
||||
ResetCaches() // Reset caches from previous subtests
|
||||
repoPath, cleanup := setupTestRepo(t)
|
||||
defer cleanup()
|
||||
|
||||
@@ -913,6 +916,7 @@ func TestGetMainRepoRoot(t *testing.T) {
|
||||
if err := os.Chdir(worktreePath); err != nil {
|
||||
t.Fatalf("Failed to chdir to worktree: %v", err)
|
||||
}
|
||||
ResetCaches() // Reset after chdir
|
||||
|
||||
root, err := GetMainRepoRoot()
|
||||
if err != nil {
|
||||
@@ -929,6 +933,7 @@ func TestGetMainRepoRoot(t *testing.T) {
|
||||
})
|
||||
|
||||
t.Run("returns main repo root from nested worktree (GH#509)", func(t *testing.T) {
|
||||
ResetCaches() // Reset caches from previous subtests
|
||||
repoPath, cleanup := setupTestRepo(t)
|
||||
defer cleanup()
|
||||
|
||||
@@ -950,6 +955,7 @@ func TestGetMainRepoRoot(t *testing.T) {
|
||||
if err := os.Chdir(nestedWorktreePath); err != nil {
|
||||
t.Fatalf("Failed to chdir to nested worktree: %v", err)
|
||||
}
|
||||
ResetCaches() // Reset after chdir
|
||||
|
||||
root, err := GetMainRepoRoot()
|
||||
if err != nil {
|
||||
@@ -966,6 +972,7 @@ func TestGetMainRepoRoot(t *testing.T) {
|
||||
})
|
||||
|
||||
t.Run("returns main repo root from subdirectory of nested worktree", func(t *testing.T) {
|
||||
ResetCaches() // Reset caches from previous subtests
|
||||
repoPath, cleanup := setupTestRepo(t)
|
||||
defer cleanup()
|
||||
|
||||
@@ -993,6 +1000,7 @@ func TestGetMainRepoRoot(t *testing.T) {
|
||||
if err := os.Chdir(subDir); err != nil {
|
||||
t.Fatalf("Failed to chdir to subdir: %v", err)
|
||||
}
|
||||
ResetCaches() // Reset after chdir
|
||||
|
||||
root, err := GetMainRepoRoot()
|
||||
if err != nil {
|
||||
@@ -1012,6 +1020,7 @@ func TestGetMainRepoRoot(t *testing.T) {
|
||||
// TestIsWorktree tests the IsWorktree function
|
||||
func TestIsWorktree(t *testing.T) {
|
||||
t.Run("returns false for regular repo", func(t *testing.T) {
|
||||
ResetCaches() // Reset caches from previous subtests
|
||||
repoPath, cleanup := setupTestRepo(t)
|
||||
defer cleanup()
|
||||
|
||||
@@ -1024,6 +1033,7 @@ func TestIsWorktree(t *testing.T) {
|
||||
if err := os.Chdir(repoPath); err != nil {
|
||||
t.Fatalf("Failed to chdir to repo: %v", err)
|
||||
}
|
||||
ResetCaches() // Reset after chdir
|
||||
|
||||
if IsWorktree() {
|
||||
t.Error("IsWorktree() should return false for regular repo")
|
||||
@@ -1031,6 +1041,7 @@ func TestIsWorktree(t *testing.T) {
|
||||
})
|
||||
|
||||
t.Run("returns true for worktree", func(t *testing.T) {
|
||||
ResetCaches() // Reset caches from previous subtests
|
||||
repoPath, cleanup := setupTestRepo(t)
|
||||
defer cleanup()
|
||||
|
||||
@@ -1051,6 +1062,7 @@ func TestIsWorktree(t *testing.T) {
|
||||
t.Fatalf("Failed to chdir to worktree: %v", err)
|
||||
}
|
||||
|
||||
ResetCaches() // Reset after chdir to worktree
|
||||
if !IsWorktree() {
|
||||
t.Error("IsWorktree() should return true for worktree")
|
||||
}
|
||||
|
||||
@@ -336,8 +336,8 @@ func TestRun_Async(t *testing.T) {
|
||||
outputFile := filepath.Join(tmpDir, "async_output.txt")
|
||||
|
||||
// Create a hook that writes to a file
|
||||
hookScript := `#!/bin/sh
|
||||
echo "async" > ` + outputFile
|
||||
hookScript := "#!/bin/sh\n" +
|
||||
"echo \"async\" > \"" + outputFile + "\"\n"
|
||||
if err := os.WriteFile(hookPath, []byte(hookScript), 0755); err != nil {
|
||||
t.Fatalf("Failed to create hook file: %v", err)
|
||||
}
|
||||
@@ -348,15 +348,17 @@ echo "async" > ` + outputFile
|
||||
// Run should return immediately
|
||||
runner.Run(EventClose, issue)
|
||||
|
||||
// Wait for the async hook to complete with retries
|
||||
// Wait for the async hook to complete with retries.
|
||||
// Under high test load the goroutine scheduling + exec can be delayed.
|
||||
var output []byte
|
||||
var err error
|
||||
for i := 0; i < 10; i++ {
|
||||
time.Sleep(100 * time.Millisecond)
|
||||
deadline := time.Now().Add(3 * time.Second)
|
||||
for time.Now().Before(deadline) {
|
||||
output, err = os.ReadFile(outputFile)
|
||||
if err == nil {
|
||||
break
|
||||
}
|
||||
time.Sleep(50 * time.Millisecond)
|
||||
}
|
||||
|
||||
if err != nil {
|
||||
|
||||
@@ -292,12 +292,14 @@ func IsExpiredTombstone(issue Issue, ttl time.Duration) bool {
|
||||
}
|
||||
|
||||
func merge3Way(base, left, right []Issue) ([]Issue, []string) {
|
||||
return merge3WayWithTTL(base, left, right, DefaultTombstoneTTL)
|
||||
return Merge3WayWithTTL(base, left, right, DefaultTombstoneTTL)
|
||||
}
|
||||
|
||||
// merge3WayWithTTL performs a 3-way merge with configurable tombstone TTL.
|
||||
// Merge3WayWithTTL performs a 3-way merge with configurable tombstone TTL.
|
||||
// This is the core merge function that handles tombstone semantics.
|
||||
func merge3WayWithTTL(base, left, right []Issue, ttl time.Duration) ([]Issue, []string) {
|
||||
// Use this when you need to configure TTL for testing, debugging, or
|
||||
// per-repository configuration. For default TTL behavior, use merge3Way.
|
||||
func Merge3WayWithTTL(base, left, right []Issue, ttl time.Duration) ([]Issue, []string) {
|
||||
// Build maps for quick lookup by IssueKey
|
||||
baseMap := make(map[IssueKey]Issue)
|
||||
for _, issue := range base {
|
||||
|
||||
@@ -1648,7 +1648,7 @@ func TestMerge3WayWithTTL(t *testing.T) {
|
||||
left := []Issue{tombstone}
|
||||
right := []Issue{liveIssue}
|
||||
|
||||
result, _ := merge3WayWithTTL(base, left, right, shortTTL)
|
||||
result, _ := Merge3WayWithTTL(base, left, right, shortTTL)
|
||||
if len(result) != 1 {
|
||||
t.Fatalf("expected 1 issue, got %d", len(result))
|
||||
}
|
||||
@@ -1665,7 +1665,7 @@ func TestMerge3WayWithTTL(t *testing.T) {
|
||||
left := []Issue{tombstone}
|
||||
right := []Issue{liveIssue}
|
||||
|
||||
result, _ := merge3WayWithTTL(base, left, right, longTTL)
|
||||
result, _ := Merge3WayWithTTL(base, left, right, longTTL)
|
||||
if len(result) != 1 {
|
||||
t.Fatalf("expected 1 issue, got %d", len(result))
|
||||
}
|
||||
|
||||
340
internal/routing/routes.go
Normal file
340
internal/routing/routes.go
Normal file
@@ -0,0 +1,340 @@
|
||||
package routing
|
||||
|
||||
import (
|
||||
"bufio"
|
||||
"context"
|
||||
"encoding/json"
|
||||
"fmt"
|
||||
"os"
|
||||
"path/filepath"
|
||||
"strings"
|
||||
|
||||
"github.com/steveyegge/beads/internal/storage"
|
||||
"github.com/steveyegge/beads/internal/storage/sqlite"
|
||||
)
|
||||
|
||||
// RoutesFileName is the name of the routes configuration file
|
||||
const RoutesFileName = "routes.jsonl"
|
||||
|
||||
// Route represents a prefix-to-path routing rule
|
||||
type Route struct {
|
||||
Prefix string `json:"prefix"` // Issue ID prefix (e.g., "gt-")
|
||||
Path string `json:"path"` // Relative path to .beads directory
|
||||
}
|
||||
|
||||
// LoadRoutes loads routes from routes.jsonl in the given beads directory.
|
||||
// Returns an empty slice if the file doesn't exist.
|
||||
func LoadRoutes(beadsDir string) ([]Route, error) {
|
||||
routesPath := filepath.Join(beadsDir, RoutesFileName)
|
||||
file, err := os.Open(routesPath) //nolint:gosec // routesPath is constructed from known beadsDir
|
||||
if err != nil {
|
||||
if os.IsNotExist(err) {
|
||||
return nil, nil // No routes file is not an error
|
||||
}
|
||||
return nil, err
|
||||
}
|
||||
defer file.Close()
|
||||
|
||||
var routes []Route
|
||||
scanner := bufio.NewScanner(file)
|
||||
for scanner.Scan() {
|
||||
line := strings.TrimSpace(scanner.Text())
|
||||
if line == "" || strings.HasPrefix(line, "#") {
|
||||
continue // Skip empty lines and comments
|
||||
}
|
||||
|
||||
var route Route
|
||||
if err := json.Unmarshal([]byte(line), &route); err != nil {
|
||||
continue // Skip malformed lines
|
||||
}
|
||||
if route.Prefix != "" && route.Path != "" {
|
||||
routes = append(routes, route)
|
||||
}
|
||||
}
|
||||
|
||||
return routes, scanner.Err()
|
||||
}
|
||||
|
||||
// ExtractPrefix extracts the prefix from an issue ID.
|
||||
// For "gt-abc123", returns "gt-".
|
||||
// For "bd-abc123", returns "bd-".
|
||||
// Returns empty string if no prefix found.
|
||||
func ExtractPrefix(id string) string {
|
||||
idx := strings.Index(id, "-")
|
||||
if idx < 0 {
|
||||
return ""
|
||||
}
|
||||
return id[:idx+1] // Include the hyphen
|
||||
}
|
||||
|
||||
// ExtractProjectFromPath extracts the project name from a route path.
|
||||
// For "beads/mayor/rig", returns "beads".
|
||||
// For "gastown/crew/max", returns "gastown".
|
||||
func ExtractProjectFromPath(path string) string {
|
||||
// Get the first component of the path
|
||||
parts := strings.Split(path, "/")
|
||||
if len(parts) > 0 && parts[0] != "" {
|
||||
return parts[0]
|
||||
}
|
||||
return ""
|
||||
}
|
||||
|
||||
// LookupRigByName finds a route by rig name (first path component).
|
||||
// For example, LookupRigByName("beads", beadsDir) would find the route
|
||||
// with path "beads/mayor/rig" and return it.
|
||||
//
|
||||
// Returns the matching route and true if found, or zero Route and false if not.
|
||||
func LookupRigByName(rigName, beadsDir string) (Route, bool) {
|
||||
routes, err := LoadRoutes(beadsDir)
|
||||
if err != nil || len(routes) == 0 {
|
||||
return Route{}, false
|
||||
}
|
||||
|
||||
for _, route := range routes {
|
||||
project := ExtractProjectFromPath(route.Path)
|
||||
if project == rigName {
|
||||
return route, true
|
||||
}
|
||||
}
|
||||
|
||||
return Route{}, false
|
||||
}
|
||||
|
||||
// LookupRigForgiving finds a route using flexible matching.
|
||||
// Accepts any of these formats and normalizes them:
|
||||
// - "bd-" (exact prefix)
|
||||
// - "bd" (prefix without hyphen, will try "bd-")
|
||||
// - "beads" (rig name)
|
||||
//
|
||||
// This provides good agent UX - meet them where they are.
|
||||
func LookupRigForgiving(input, beadsDir string) (Route, bool) {
|
||||
routes, err := LoadRoutes(beadsDir)
|
||||
if err != nil || len(routes) == 0 {
|
||||
return Route{}, false
|
||||
}
|
||||
|
||||
// Normalize: remove trailing hyphen for comparison
|
||||
normalized := strings.TrimSuffix(input, "-")
|
||||
|
||||
for _, route := range routes {
|
||||
// Try exact prefix match (with or without hyphen)
|
||||
prefixBase := strings.TrimSuffix(route.Prefix, "-")
|
||||
if normalized == prefixBase || input == route.Prefix {
|
||||
return route, true
|
||||
}
|
||||
|
||||
// Try rig name match
|
||||
project := ExtractProjectFromPath(route.Path)
|
||||
if input == project {
|
||||
return route, true
|
||||
}
|
||||
}
|
||||
|
||||
return Route{}, false
|
||||
}
|
||||
|
||||
// ResolveBeadsDirForRig returns the beads directory for a given rig identifier.
|
||||
// This is used by --rig and --prefix flags to create issues in a different rig.
|
||||
//
|
||||
// The input is forgiving - accepts any of:
|
||||
// - "beads", "gastown" (rig names)
|
||||
// - "bd-", "gt-" (exact prefixes)
|
||||
// - "bd", "gt" (prefixes without hyphen)
|
||||
//
|
||||
// Parameters:
|
||||
// - rigOrPrefix: rig name or prefix in any format
|
||||
// - currentBeadsDir: the current .beads directory (used to find routes.jsonl)
|
||||
//
|
||||
// Returns:
|
||||
// - beadsDir: the target .beads directory path
|
||||
// - prefix: the issue prefix for that rig (e.g., "bd-")
|
||||
// - err: error if rig not found or path doesn't exist
|
||||
func ResolveBeadsDirForRig(rigOrPrefix, currentBeadsDir string) (beadsDir string, prefix string, err error) {
|
||||
route, found := LookupRigForgiving(rigOrPrefix, currentBeadsDir)
|
||||
if !found {
|
||||
return "", "", fmt.Errorf("rig or prefix %q not found in routes.jsonl", rigOrPrefix)
|
||||
}
|
||||
|
||||
// Resolve the target beads directory
|
||||
projectRoot := filepath.Dir(currentBeadsDir)
|
||||
targetPath := filepath.Join(projectRoot, route.Path, ".beads")
|
||||
|
||||
// Follow redirect if present
|
||||
targetPath = resolveRedirect(targetPath)
|
||||
|
||||
// Verify the target exists
|
||||
if info, statErr := os.Stat(targetPath); statErr != nil || !info.IsDir() {
|
||||
return "", "", fmt.Errorf("rig %q beads directory not found: %s", rigOrPrefix, targetPath)
|
||||
}
|
||||
|
||||
if os.Getenv("BD_DEBUG_ROUTING") != "" {
|
||||
fmt.Fprintf(os.Stderr, "[routing] Rig %q -> prefix %s, path %s\n", rigOrPrefix, route.Prefix, targetPath)
|
||||
}
|
||||
|
||||
return targetPath, route.Prefix, nil
|
||||
}
|
||||
|
||||
// ResolveToExternalRef attempts to convert a foreign issue ID to an external reference
|
||||
// using routes.jsonl for prefix-based routing.
|
||||
//
|
||||
// If the ID's prefix matches a route, returns "external:<project>:<id>".
|
||||
// Otherwise, returns empty string (no route found).
|
||||
//
|
||||
// Example: If routes.jsonl has {"prefix": "bd-", "path": "beads/mayor/rig"}
|
||||
// then ResolveToExternalRef("bd-abc", beadsDir) returns "external:beads:bd-abc"
|
||||
func ResolveToExternalRef(id, beadsDir string) string {
|
||||
routes, err := LoadRoutes(beadsDir)
|
||||
if err != nil || len(routes) == 0 {
|
||||
return ""
|
||||
}
|
||||
|
||||
prefix := ExtractPrefix(id)
|
||||
if prefix == "" {
|
||||
return ""
|
||||
}
|
||||
|
||||
for _, route := range routes {
|
||||
if route.Prefix == prefix {
|
||||
project := ExtractProjectFromPath(route.Path)
|
||||
if project != "" {
|
||||
return fmt.Sprintf("external:%s:%s", project, id)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
return ""
|
||||
}
|
||||
|
||||
// ResolveBeadsDirForID determines which beads directory contains the given issue ID.
|
||||
// It first checks the local beads directory, then consults routes.jsonl for prefix-based routing.
|
||||
//
|
||||
// Parameters:
|
||||
// - ctx: context for database operations
|
||||
// - id: the issue ID to look up
|
||||
// - currentBeadsDir: the current/local .beads directory path
|
||||
//
|
||||
// Returns:
|
||||
// - beadsDir: the resolved .beads directory path
|
||||
// - routed: true if the ID was routed to a different directory
|
||||
// - err: any error encountered
|
||||
func ResolveBeadsDirForID(ctx context.Context, id, currentBeadsDir string) (string, bool, error) {
|
||||
// Step 1: Check for routes.jsonl FIRST based on ID prefix
|
||||
// This allows prefix-based routing without needing to check the local store
|
||||
routes, loadErr := LoadRoutes(currentBeadsDir)
|
||||
if loadErr == nil && len(routes) > 0 {
|
||||
prefix := ExtractPrefix(id)
|
||||
if prefix != "" {
|
||||
for _, route := range routes {
|
||||
if route.Prefix == prefix {
|
||||
// Found a matching route - resolve the path
|
||||
projectRoot := filepath.Dir(currentBeadsDir)
|
||||
targetPath := filepath.Join(projectRoot, route.Path, ".beads")
|
||||
|
||||
// Follow redirect if present
|
||||
targetPath = resolveRedirect(targetPath)
|
||||
|
||||
// Verify the target exists
|
||||
if info, err := os.Stat(targetPath); err == nil && info.IsDir() {
|
||||
// Debug logging
|
||||
if os.Getenv("BD_DEBUG_ROUTING") != "" {
|
||||
fmt.Fprintf(os.Stderr, "[routing] ID %s matched prefix %s -> %s\n", id, prefix, targetPath)
|
||||
}
|
||||
return targetPath, true, nil
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// Step 2: No route matched or no routes file - use local store
|
||||
return currentBeadsDir, false, nil
|
||||
}
|
||||
|
||||
// resolveRedirect checks for a redirect file in the beads directory
|
||||
// and resolves the redirect path if present.
|
||||
func resolveRedirect(beadsDir string) string {
|
||||
redirectFile := filepath.Join(beadsDir, "redirect")
|
||||
data, err := os.ReadFile(redirectFile) //nolint:gosec // redirectFile is constructed from known beadsDir
|
||||
if err != nil {
|
||||
if os.Getenv("BD_DEBUG_ROUTING") != "" {
|
||||
fmt.Fprintf(os.Stderr, "[routing] No redirect file at %s: %v\n", redirectFile, err)
|
||||
}
|
||||
return beadsDir // No redirect
|
||||
}
|
||||
|
||||
redirectPath := strings.TrimSpace(string(data))
|
||||
if os.Getenv("BD_DEBUG_ROUTING") != "" {
|
||||
fmt.Fprintf(os.Stderr, "[routing] Read redirect: %q from %s\n", redirectPath, redirectFile)
|
||||
}
|
||||
if redirectPath == "" {
|
||||
return beadsDir
|
||||
}
|
||||
|
||||
// Handle relative paths
|
||||
if !filepath.IsAbs(redirectPath) {
|
||||
redirectPath = filepath.Join(beadsDir, redirectPath)
|
||||
}
|
||||
|
||||
// Clean and resolve the path
|
||||
redirectPath = filepath.Clean(redirectPath)
|
||||
if os.Getenv("BD_DEBUG_ROUTING") != "" {
|
||||
fmt.Fprintf(os.Stderr, "[routing] Resolved redirect path: %s\n", redirectPath)
|
||||
}
|
||||
|
||||
// Verify the redirect target exists
|
||||
if info, err := os.Stat(redirectPath); err == nil && info.IsDir() {
|
||||
if os.Getenv("BD_DEBUG_ROUTING") != "" {
|
||||
fmt.Fprintf(os.Stderr, "[routing] Followed redirect from %s -> %s\n", beadsDir, redirectPath)
|
||||
}
|
||||
return redirectPath
|
||||
} else if os.Getenv("BD_DEBUG_ROUTING") != "" {
|
||||
fmt.Fprintf(os.Stderr, "[routing] Redirect target check failed: %v\n", err)
|
||||
}
|
||||
|
||||
return beadsDir
|
||||
}
|
||||
|
||||
// RoutedStorage represents a storage connection that may have been routed
|
||||
// to a different beads directory than the local one.
|
||||
type RoutedStorage struct {
|
||||
Storage storage.Storage
|
||||
BeadsDir string
|
||||
Routed bool // true if this is a routed (non-local) storage
|
||||
}
|
||||
|
||||
// Close closes the storage connection
|
||||
func (rs *RoutedStorage) Close() error {
|
||||
if rs.Storage != nil {
|
||||
return rs.Storage.Close()
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
// GetRoutedStorageForID returns a storage connection for the given issue ID.
|
||||
// If the ID matches a route, it opens a connection to the routed database.
|
||||
// Otherwise, it returns nil (caller should use their existing storage).
|
||||
//
|
||||
// The caller is responsible for closing the returned RoutedStorage.
|
||||
func GetRoutedStorageForID(ctx context.Context, id, currentBeadsDir string) (*RoutedStorage, error) {
|
||||
beadsDir, routed, err := ResolveBeadsDirForID(ctx, id, currentBeadsDir)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
if !routed {
|
||||
return nil, nil // No routing needed, caller should use existing storage
|
||||
}
|
||||
|
||||
// Open storage for the routed directory
|
||||
dbPath := filepath.Join(beadsDir, "beads.db")
|
||||
store, err := sqlite.New(ctx, dbPath)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
return &RoutedStorage{
|
||||
Storage: store,
|
||||
BeadsDir: beadsDir,
|
||||
Routed: true,
|
||||
}, nil
|
||||
}
|
||||
@@ -88,3 +88,57 @@ func TestDetectUserRole_Fallback(t *testing.T) {
|
||||
t.Errorf("DetectUserRole() = %v, want %v (fallback)", role, Contributor)
|
||||
}
|
||||
}
|
||||
|
||||
func TestExtractPrefix(t *testing.T) {
|
||||
tests := []struct {
|
||||
id string
|
||||
want string
|
||||
}{
|
||||
{"gt-abc123", "gt-"},
|
||||
{"bd-xyz", "bd-"},
|
||||
{"hq-1234", "hq-"},
|
||||
{"abc123", ""}, // No hyphen
|
||||
{"", ""}, // Empty string
|
||||
{"-abc", "-"}, // Starts with hyphen
|
||||
}
|
||||
|
||||
for _, tt := range tests {
|
||||
t.Run(tt.id, func(t *testing.T) {
|
||||
got := ExtractPrefix(tt.id)
|
||||
if got != tt.want {
|
||||
t.Errorf("ExtractPrefix(%q) = %q, want %q", tt.id, got, tt.want)
|
||||
}
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
func TestExtractProjectFromPath(t *testing.T) {
|
||||
tests := []struct {
|
||||
path string
|
||||
want string
|
||||
}{
|
||||
{"beads/mayor/rig", "beads"},
|
||||
{"gastown/crew/max", "gastown"},
|
||||
{"simple", "simple"},
|
||||
{"", ""},
|
||||
{"/absolute/path", ""}, // Starts with /, first component is empty
|
||||
}
|
||||
|
||||
for _, tt := range tests {
|
||||
t.Run(tt.path, func(t *testing.T) {
|
||||
got := ExtractProjectFromPath(tt.path)
|
||||
if got != tt.want {
|
||||
t.Errorf("ExtractProjectFromPath(%q) = %q, want %q", tt.path, got, tt.want)
|
||||
}
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
func TestResolveToExternalRef(t *testing.T) {
|
||||
// This test is limited since it requires a routes.jsonl file
|
||||
// Just test that it returns empty string for nonexistent directory
|
||||
got := ResolveToExternalRef("bd-abc", "/nonexistent/path")
|
||||
if got != "" {
|
||||
t.Errorf("ResolveToExternalRef() = %q, want empty string for nonexistent path", got)
|
||||
}
|
||||
}
|
||||
|
||||
@@ -333,6 +333,11 @@ func (c *Client) Ready(args *ReadyArgs) (*Response, error) {
|
||||
return c.Execute(OpReady, args)
|
||||
}
|
||||
|
||||
// Blocked gets blocked issues via the daemon
|
||||
func (c *Client) Blocked(args *BlockedArgs) (*Response, error) {
|
||||
return c.Execute(OpBlocked, args)
|
||||
}
|
||||
|
||||
// Stale gets stale issues via the daemon
|
||||
func (c *Client) Stale(args *StaleArgs) (*Response, error) {
|
||||
return c.Execute(OpStale, args)
|
||||
|
||||
107
internal/rpc/client_gate_shutdown_test.go
Normal file
107
internal/rpc/client_gate_shutdown_test.go
Normal file
@@ -0,0 +1,107 @@
|
||||
package rpc
|
||||
|
||||
import (
|
||||
"encoding/json"
|
||||
"testing"
|
||||
"time"
|
||||
|
||||
"github.com/steveyegge/beads/internal/types"
|
||||
)
|
||||
|
||||
func TestClient_GateLifecycleAndShutdown(t *testing.T) {
|
||||
_, client, cleanup := setupTestServer(t)
|
||||
defer cleanup()
|
||||
|
||||
createResp, err := client.GateCreate(&GateCreateArgs{
|
||||
Title: "Test Gate",
|
||||
AwaitType: "human",
|
||||
AwaitID: "",
|
||||
Timeout: 5 * time.Minute,
|
||||
Waiters: []string{"mayor/"},
|
||||
})
|
||||
if err != nil {
|
||||
t.Fatalf("GateCreate: %v", err)
|
||||
}
|
||||
|
||||
var created GateCreateResult
|
||||
if err := json.Unmarshal(createResp.Data, &created); err != nil {
|
||||
t.Fatalf("unmarshal GateCreateResult: %v", err)
|
||||
}
|
||||
if created.ID == "" {
|
||||
t.Fatalf("expected created gate ID")
|
||||
}
|
||||
|
||||
listResp, err := client.GateList(&GateListArgs{All: false})
|
||||
if err != nil {
|
||||
t.Fatalf("GateList: %v", err)
|
||||
}
|
||||
var openGates []*types.Issue
|
||||
if err := json.Unmarshal(listResp.Data, &openGates); err != nil {
|
||||
t.Fatalf("unmarshal GateList: %v", err)
|
||||
}
|
||||
if len(openGates) != 1 || openGates[0].ID != created.ID {
|
||||
t.Fatalf("unexpected open gates: %+v", openGates)
|
||||
}
|
||||
|
||||
showResp, err := client.GateShow(&GateShowArgs{ID: created.ID})
|
||||
if err != nil {
|
||||
t.Fatalf("GateShow: %v", err)
|
||||
}
|
||||
var gate types.Issue
|
||||
if err := json.Unmarshal(showResp.Data, &gate); err != nil {
|
||||
t.Fatalf("unmarshal GateShow: %v", err)
|
||||
}
|
||||
if gate.ID != created.ID || gate.IssueType != types.TypeGate {
|
||||
t.Fatalf("unexpected gate: %+v", gate)
|
||||
}
|
||||
|
||||
waitResp, err := client.GateWait(&GateWaitArgs{ID: created.ID, Waiters: []string{"deacon/"}})
|
||||
if err != nil {
|
||||
t.Fatalf("GateWait: %v", err)
|
||||
}
|
||||
var waitResult GateWaitResult
|
||||
if err := json.Unmarshal(waitResp.Data, &waitResult); err != nil {
|
||||
t.Fatalf("unmarshal GateWaitResult: %v", err)
|
||||
}
|
||||
if waitResult.AddedCount != 1 {
|
||||
t.Fatalf("expected 1 waiter added, got %d", waitResult.AddedCount)
|
||||
}
|
||||
|
||||
closeResp, err := client.GateClose(&GateCloseArgs{ID: created.ID, Reason: "done"})
|
||||
if err != nil {
|
||||
t.Fatalf("GateClose: %v", err)
|
||||
}
|
||||
var closedGate types.Issue
|
||||
if err := json.Unmarshal(closeResp.Data, &closedGate); err != nil {
|
||||
t.Fatalf("unmarshal GateClose: %v", err)
|
||||
}
|
||||
if closedGate.Status != types.StatusClosed {
|
||||
t.Fatalf("expected closed status, got %q", closedGate.Status)
|
||||
}
|
||||
|
||||
listResp, err = client.GateList(&GateListArgs{All: false})
|
||||
if err != nil {
|
||||
t.Fatalf("GateList open: %v", err)
|
||||
}
|
||||
if err := json.Unmarshal(listResp.Data, &openGates); err != nil {
|
||||
t.Fatalf("unmarshal GateList open: %v", err)
|
||||
}
|
||||
if len(openGates) != 0 {
|
||||
t.Fatalf("expected no open gates, got %+v", openGates)
|
||||
}
|
||||
|
||||
listResp, err = client.GateList(&GateListArgs{All: true})
|
||||
if err != nil {
|
||||
t.Fatalf("GateList all: %v", err)
|
||||
}
|
||||
if err := json.Unmarshal(listResp.Data, &openGates); err != nil {
|
||||
t.Fatalf("unmarshal GateList all: %v", err)
|
||||
}
|
||||
if len(openGates) != 1 || openGates[0].ID != created.ID {
|
||||
t.Fatalf("expected 1 total gate, got %+v", openGates)
|
||||
}
|
||||
|
||||
if err := client.Shutdown(); err != nil {
|
||||
t.Fatalf("Shutdown: %v", err)
|
||||
}
|
||||
}
|
||||
@@ -3,6 +3,8 @@ package rpc
|
||||
import (
|
||||
"encoding/json"
|
||||
"time"
|
||||
|
||||
"github.com/steveyegge/beads/internal/types"
|
||||
)
|
||||
|
||||
// Operation constants for all bd commands
|
||||
@@ -18,6 +20,7 @@ const (
|
||||
OpCount = "count"
|
||||
OpShow = "show"
|
||||
OpReady = "ready"
|
||||
OpBlocked = "blocked"
|
||||
OpStale = "stale"
|
||||
OpStats = "stats"
|
||||
OpDepAdd = "dep_add"
|
||||
@@ -86,11 +89,12 @@ type CreateArgs struct {
|
||||
WaitsFor string `json:"waits_for,omitempty"` // Spawner issue ID to wait for
|
||||
WaitsForGate string `json:"waits_for_gate,omitempty"` // Gate type: all-children or any-children
|
||||
// Messaging fields (bd-kwro)
|
||||
Sender string `json:"sender,omitempty"` // Who sent this (for messages)
|
||||
Wisp bool `json:"wisp,omitempty"` // Wisp = ephemeral vapor from the Steam Engine; bulk-deleted when closed
|
||||
Sender string `json:"sender,omitempty"` // Who sent this (for messages)
|
||||
Ephemeral bool `json:"ephemeral,omitempty"` // If true, not exported to JSONL; bulk-deleted when closed
|
||||
RepliesTo string `json:"replies_to,omitempty"` // Issue ID for conversation threading
|
||||
// ID generation (bd-hobo)
|
||||
IDPrefix string `json:"id_prefix,omitempty"` // Override prefix for ID generation (mol, wisp, etc.)
|
||||
IDPrefix string `json:"id_prefix,omitempty"` // Override prefix for ID generation (mol, eph, etc.)
|
||||
CreatedBy string `json:"created_by,omitempty"` // Who created the issue
|
||||
}
|
||||
|
||||
// UpdateArgs represents arguments for the update operation
|
||||
@@ -111,8 +115,8 @@ type UpdateArgs struct {
|
||||
RemoveLabels []string `json:"remove_labels,omitempty"`
|
||||
SetLabels []string `json:"set_labels,omitempty"`
|
||||
// Messaging fields (bd-kwro)
|
||||
Sender *string `json:"sender,omitempty"` // Who sent this (for messages)
|
||||
Wisp *bool `json:"wisp,omitempty"` // Wisp = ephemeral vapor from the Steam Engine; bulk-deleted when closed
|
||||
Sender *string `json:"sender,omitempty"` // Who sent this (for messages)
|
||||
Ephemeral *bool `json:"ephemeral,omitempty"` // If true, not exported to JSONL; bulk-deleted when closed
|
||||
RepliesTo *string `json:"replies_to,omitempty"` // Issue ID for conversation threading
|
||||
// Graph link fields (bd-fu83)
|
||||
RelatesTo *string `json:"relates_to,omitempty"` // JSON array of related issue IDs
|
||||
@@ -124,8 +128,16 @@ type UpdateArgs struct {
|
||||
|
||||
// CloseArgs represents arguments for the close operation
|
||||
type CloseArgs struct {
|
||||
ID string `json:"id"`
|
||||
Reason string `json:"reason,omitempty"`
|
||||
ID string `json:"id"`
|
||||
Reason string `json:"reason,omitempty"`
|
||||
SuggestNext bool `json:"suggest_next,omitempty"` // Return newly unblocked issues (GH#679)
|
||||
}
|
||||
|
||||
// CloseResult is returned when SuggestNext is true (GH#679)
|
||||
// When SuggestNext is false, just the closed issue is returned for backward compatibility
|
||||
type CloseResult struct {
|
||||
Closed *types.Issue `json:"closed"` // The issue that was closed
|
||||
Unblocked []*types.Issue `json:"unblocked,omitempty"` // Issues newly unblocked by closing
|
||||
}
|
||||
|
||||
// DeleteArgs represents arguments for the delete operation
|
||||
@@ -181,8 +193,8 @@ type ListArgs struct {
|
||||
// Parent filtering (bd-yqhh)
|
||||
ParentID string `json:"parent_id,omitempty"`
|
||||
|
||||
// Wisp filtering (bd-bkul)
|
||||
Wisp *bool `json:"wisp,omitempty"`
|
||||
// Ephemeral filtering (bd-bkul)
|
||||
Ephemeral *bool `json:"ephemeral,omitempty"`
|
||||
}
|
||||
|
||||
// CountArgs represents arguments for the count operation
|
||||
@@ -243,6 +255,12 @@ type ReadyArgs struct {
|
||||
SortPolicy string `json:"sort_policy,omitempty"`
|
||||
Labels []string `json:"labels,omitempty"`
|
||||
LabelsAny []string `json:"labels_any,omitempty"`
|
||||
ParentID string `json:"parent_id,omitempty"` // Filter to descendants of this bead/epic
|
||||
}
|
||||
|
||||
// BlockedArgs represents arguments for the blocked operation
|
||||
type BlockedArgs struct {
|
||||
ParentID string `json:"parent_id,omitempty"` // Filter to descendants of this bead/epic
|
||||
}
|
||||
|
||||
// StaleArgs represents arguments for the stale command
|
||||
|
||||
@@ -81,8 +81,8 @@ func updatesFromArgs(a UpdateArgs) map[string]interface{} {
|
||||
if a.Sender != nil {
|
||||
u["sender"] = *a.Sender
|
||||
}
|
||||
if a.Wisp != nil {
|
||||
u["wisp"] = *a.Wisp
|
||||
if a.Ephemeral != nil {
|
||||
u["ephemeral"] = *a.Ephemeral
|
||||
}
|
||||
if a.RepliesTo != nil {
|
||||
u["replies_to"] = *a.RepliesTo
|
||||
@@ -176,11 +176,12 @@ func (s *Server) handleCreate(req *Request) Response {
|
||||
EstimatedMinutes: createArgs.EstimatedMinutes,
|
||||
Status: types.StatusOpen,
|
||||
// Messaging fields (bd-kwro)
|
||||
Sender: createArgs.Sender,
|
||||
Wisp: createArgs.Wisp,
|
||||
Sender: createArgs.Sender,
|
||||
Ephemeral: createArgs.Ephemeral,
|
||||
// NOTE: RepliesTo now handled via replies-to dependency (Decision 004)
|
||||
// ID generation (bd-hobo)
|
||||
IDPrefix: createArgs.IDPrefix,
|
||||
IDPrefix: createArgs.IDPrefix,
|
||||
CreatedBy: createArgs.CreatedBy,
|
||||
}
|
||||
|
||||
// Check if any dependencies are discovered-from type
|
||||
@@ -555,6 +556,26 @@ func (s *Server) handleClose(req *Request) Response {
|
||||
})
|
||||
|
||||
closedIssue, _ := store.GetIssue(ctx, closeArgs.ID)
|
||||
|
||||
// If SuggestNext is requested, find newly unblocked issues (GH#679)
|
||||
if closeArgs.SuggestNext {
|
||||
unblocked, err := store.GetNewlyUnblockedByClose(ctx, closeArgs.ID)
|
||||
if err != nil {
|
||||
// Non-fatal: still return the closed issue
|
||||
unblocked = nil
|
||||
}
|
||||
result := CloseResult{
|
||||
Closed: closedIssue,
|
||||
Unblocked: unblocked,
|
||||
}
|
||||
data, _ := json.Marshal(result)
|
||||
return Response{
|
||||
Success: true,
|
||||
Data: data,
|
||||
}
|
||||
}
|
||||
|
||||
// Backward compatible: just return the closed issue
|
||||
data, _ := json.Marshal(closedIssue)
|
||||
return Response{
|
||||
Success: true,
|
||||
@@ -823,8 +844,8 @@ func (s *Server) handleList(req *Request) Response {
|
||||
filter.ParentID = &listArgs.ParentID
|
||||
}
|
||||
|
||||
// Wisp filtering (bd-bkul)
|
||||
filter.Wisp = listArgs.Wisp
|
||||
// Ephemeral filtering (bd-bkul)
|
||||
filter.Ephemeral = listArgs.Ephemeral
|
||||
|
||||
// Guard against excessive ID lists to avoid SQLite parameter limits
|
||||
const maxIDs = 1000
|
||||
@@ -1201,12 +1222,16 @@ func (s *Server) handleShow(req *Request) Response {
|
||||
}
|
||||
}
|
||||
|
||||
// Fetch comments
|
||||
comments, _ := store.GetIssueComments(ctx, issue.ID)
|
||||
|
||||
// Create detailed response with related data
|
||||
type IssueDetails struct {
|
||||
*types.Issue
|
||||
Labels []string `json:"labels,omitempty"`
|
||||
Dependencies []*types.IssueWithDependencyMetadata `json:"dependencies,omitempty"`
|
||||
Dependents []*types.IssueWithDependencyMetadata `json:"dependents,omitempty"`
|
||||
Comments []*types.Comment `json:"comments,omitempty"`
|
||||
}
|
||||
|
||||
details := &IssueDetails{
|
||||
@@ -1214,6 +1239,7 @@ func (s *Server) handleShow(req *Request) Response {
|
||||
Labels: labels,
|
||||
Dependencies: deps,
|
||||
Dependents: dependents,
|
||||
Comments: comments,
|
||||
}
|
||||
|
||||
data, _ := json.Marshal(details)
|
||||
@@ -1242,6 +1268,7 @@ func (s *Server) handleReady(req *Request) Response {
|
||||
|
||||
wf := types.WorkFilter{
|
||||
Status: types.StatusOpen,
|
||||
Type: readyArgs.Type,
|
||||
Priority: readyArgs.Priority,
|
||||
Unassigned: readyArgs.Unassigned,
|
||||
Limit: readyArgs.Limit,
|
||||
@@ -1252,6 +1279,9 @@ func (s *Server) handleReady(req *Request) Response {
|
||||
if readyArgs.Assignee != "" && !readyArgs.Unassigned {
|
||||
wf.Assignee = &readyArgs.Assignee
|
||||
}
|
||||
if readyArgs.ParentID != "" {
|
||||
wf.ParentID = &readyArgs.ParentID
|
||||
}
|
||||
|
||||
ctx := s.reqCtx(req)
|
||||
issues, err := store.GetReadyWork(ctx, wf)
|
||||
@@ -1269,6 +1299,44 @@ func (s *Server) handleReady(req *Request) Response {
|
||||
}
|
||||
}
|
||||
|
||||
func (s *Server) handleBlocked(req *Request) Response {
|
||||
var blockedArgs BlockedArgs
|
||||
if err := json.Unmarshal(req.Args, &blockedArgs); err != nil {
|
||||
return Response{
|
||||
Success: false,
|
||||
Error: fmt.Sprintf("invalid blocked args: %v", err),
|
||||
}
|
||||
}
|
||||
|
||||
store := s.storage
|
||||
if store == nil {
|
||||
return Response{
|
||||
Success: false,
|
||||
Error: "storage not available (global daemon deprecated - use local daemon instead with 'bd daemon' in your project)",
|
||||
}
|
||||
}
|
||||
|
||||
var wf types.WorkFilter
|
||||
if blockedArgs.ParentID != "" {
|
||||
wf.ParentID = &blockedArgs.ParentID
|
||||
}
|
||||
|
||||
ctx := s.reqCtx(req)
|
||||
blocked, err := store.GetBlockedIssues(ctx, wf)
|
||||
if err != nil {
|
||||
return Response{
|
||||
Success: false,
|
||||
Error: fmt.Sprintf("failed to get blocked issues: %v", err),
|
||||
}
|
||||
}
|
||||
|
||||
data, _ := json.Marshal(blocked)
|
||||
return Response{
|
||||
Success: true,
|
||||
Data: data,
|
||||
}
|
||||
}
|
||||
|
||||
func (s *Server) handleStale(req *Request) Response {
|
||||
var staleArgs StaleArgs
|
||||
if err := json.Unmarshal(req.Args, &staleArgs); err != nil {
|
||||
@@ -1412,7 +1480,7 @@ func (s *Server) handleGateCreate(req *Request) Response {
|
||||
Status: types.StatusOpen,
|
||||
Priority: 1, // Gates are typically high priority
|
||||
Assignee: "deacon/",
|
||||
Wisp: true, // Gates are wisps (ephemeral)
|
||||
Ephemeral: true, // Gates are wisps (ephemeral)
|
||||
AwaitType: args.AwaitType,
|
||||
AwaitID: args.AwaitID,
|
||||
Timeout: args.Timeout,
|
||||
|
||||
@@ -1,6 +1,7 @@
|
||||
package rpc
|
||||
|
||||
import (
|
||||
"context"
|
||||
"encoding/json"
|
||||
"testing"
|
||||
"time"
|
||||
@@ -9,6 +10,49 @@ import (
|
||||
"github.com/steveyegge/beads/internal/types"
|
||||
)
|
||||
|
||||
// TestHandleCreate_SetsCreatedBy verifies that CreatedBy is passed through RPC and stored (GH#748)
|
||||
func TestHandleCreate_SetsCreatedBy(t *testing.T) {
|
||||
store := memory.New("/tmp/test.jsonl")
|
||||
server := NewServer("/tmp/test.sock", store, "/tmp", "/tmp/test.db")
|
||||
|
||||
createArgs := CreateArgs{
|
||||
Title: "Test CreatedBy Field",
|
||||
IssueType: "task",
|
||||
Priority: 2,
|
||||
CreatedBy: "test-actor",
|
||||
}
|
||||
createJSON, _ := json.Marshal(createArgs)
|
||||
createReq := &Request{
|
||||
Operation: OpCreate,
|
||||
Args: createJSON,
|
||||
Actor: "test-actor",
|
||||
}
|
||||
|
||||
resp := server.handleCreate(createReq)
|
||||
if !resp.Success {
|
||||
t.Fatalf("create failed: %s", resp.Error)
|
||||
}
|
||||
|
||||
var createdIssue types.Issue
|
||||
if err := json.Unmarshal(resp.Data, &createdIssue); err != nil {
|
||||
t.Fatalf("failed to parse response: %v", err)
|
||||
}
|
||||
|
||||
// Verify CreatedBy was set in the response
|
||||
if createdIssue.CreatedBy != "test-actor" {
|
||||
t.Errorf("expected CreatedBy 'test-actor' in response, got %q", createdIssue.CreatedBy)
|
||||
}
|
||||
|
||||
// Verify CreatedBy was persisted to storage
|
||||
storedIssue, err := store.GetIssue(context.Background(), createdIssue.ID)
|
||||
if err != nil {
|
||||
t.Fatalf("failed to get issue from storage: %v", err)
|
||||
}
|
||||
if storedIssue.CreatedBy != "test-actor" {
|
||||
t.Errorf("expected CreatedBy 'test-actor' in storage, got %q", storedIssue.CreatedBy)
|
||||
}
|
||||
}
|
||||
|
||||
func TestEmitMutation(t *testing.T) {
|
||||
store := memory.New("/tmp/test.jsonl")
|
||||
server := NewServer("/tmp/test.sock", store, "/tmp", "/tmp/test.db")
|
||||
|
||||
@@ -188,6 +188,8 @@ func (s *Server) handleRequest(req *Request) Response {
|
||||
resp = s.handleResolveID(req)
|
||||
case OpReady:
|
||||
resp = s.handleReady(req)
|
||||
case OpBlocked:
|
||||
resp = s.handleBlocked(req)
|
||||
case OpStale:
|
||||
resp = s.handleStale(req)
|
||||
case OpStats:
|
||||
|
||||
@@ -1097,10 +1097,16 @@ func (m *MemoryStorage) getOpenBlockers(issueID string) []string {
|
||||
|
||||
// GetBlockedIssues returns issues that are blocked by other issues
|
||||
// Note: Pinned issues are excluded from the output (beads-ei4)
|
||||
func (m *MemoryStorage) GetBlockedIssues(ctx context.Context) ([]*types.BlockedIssue, error) {
|
||||
func (m *MemoryStorage) GetBlockedIssues(ctx context.Context, filter types.WorkFilter) ([]*types.BlockedIssue, error) {
|
||||
m.mu.RLock()
|
||||
defer m.mu.RUnlock()
|
||||
|
||||
// Build set of descendant IDs if parent filter is specified
|
||||
var descendantIDs map[string]bool
|
||||
if filter.ParentID != nil {
|
||||
descendantIDs = m.getAllDescendants(*filter.ParentID)
|
||||
}
|
||||
|
||||
var results []*types.BlockedIssue
|
||||
|
||||
for _, issue := range m.issues {
|
||||
@@ -1114,6 +1120,11 @@ func (m *MemoryStorage) GetBlockedIssues(ctx context.Context) ([]*types.BlockedI
|
||||
continue
|
||||
}
|
||||
|
||||
// Parent filtering: only include descendants of specified parent
|
||||
if descendantIDs != nil && !descendantIDs[issue.ID] {
|
||||
continue
|
||||
}
|
||||
|
||||
blockers := m.getOpenBlockers(issue.ID)
|
||||
// Issue is "blocked" if: status is blocked, status is deferred, or has open blockers
|
||||
if issue.Status != types.StatusBlocked && issue.Status != types.StatusDeferred && len(blockers) == 0 {
|
||||
@@ -1149,6 +1160,27 @@ func (m *MemoryStorage) GetBlockedIssues(ctx context.Context) ([]*types.BlockedI
|
||||
return results, nil
|
||||
}
|
||||
|
||||
// getAllDescendants returns all descendant IDs of a parent issue recursively
|
||||
func (m *MemoryStorage) getAllDescendants(parentID string) map[string]bool {
|
||||
descendants := make(map[string]bool)
|
||||
m.collectDescendants(parentID, descendants)
|
||||
return descendants
|
||||
}
|
||||
|
||||
// collectDescendants recursively collects all descendants of a parent
|
||||
func (m *MemoryStorage) collectDescendants(parentID string, descendants map[string]bool) {
|
||||
for issueID, deps := range m.dependencies {
|
||||
for _, dep := range deps {
|
||||
if dep.Type == types.DepParentChild && dep.DependsOnID == parentID {
|
||||
if !descendants[issueID] {
|
||||
descendants[issueID] = true
|
||||
m.collectDescendants(issueID, descendants)
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func (m *MemoryStorage) GetEpicsEligibleForClosure(ctx context.Context) ([]*types.EpicStatus, error) {
|
||||
return nil, nil
|
||||
}
|
||||
@@ -1184,6 +1216,58 @@ func (m *MemoryStorage) GetStaleIssues(ctx context.Context, filter types.StaleFi
|
||||
return stale, nil
|
||||
}
|
||||
|
||||
// GetNewlyUnblockedByClose returns issues that became unblocked when the given issue was closed.
|
||||
// This is used by the --suggest-next flag on bd close (GH#679).
|
||||
func (m *MemoryStorage) GetNewlyUnblockedByClose(ctx context.Context, closedIssueID string) ([]*types.Issue, error) {
|
||||
m.mu.RLock()
|
||||
defer m.mu.RUnlock()
|
||||
|
||||
var unblocked []*types.Issue
|
||||
|
||||
// Find issues that depend on the closed issue
|
||||
for issueID, deps := range m.dependencies {
|
||||
issue, exists := m.issues[issueID]
|
||||
if !exists {
|
||||
continue
|
||||
}
|
||||
|
||||
// Only consider open/in_progress, non-pinned issues
|
||||
if issue.Status != types.StatusOpen && issue.Status != types.StatusInProgress {
|
||||
continue
|
||||
}
|
||||
if issue.Pinned {
|
||||
continue
|
||||
}
|
||||
|
||||
// Check if this issue depended on the closed issue
|
||||
dependedOnClosed := false
|
||||
for _, dep := range deps {
|
||||
if dep.DependsOnID == closedIssueID && dep.Type == types.DepBlocks {
|
||||
dependedOnClosed = true
|
||||
break
|
||||
}
|
||||
}
|
||||
|
||||
if !dependedOnClosed {
|
||||
continue
|
||||
}
|
||||
|
||||
// Check if now unblocked (no remaining open blockers)
|
||||
blockers := m.getOpenBlockers(issueID)
|
||||
if len(blockers) == 0 {
|
||||
issueCopy := *issue
|
||||
unblocked = append(unblocked, &issueCopy)
|
||||
}
|
||||
}
|
||||
|
||||
// Sort by priority ascending
|
||||
sort.Slice(unblocked, func(i, j int) bool {
|
||||
return unblocked[i].Priority < unblocked[j].Priority
|
||||
})
|
||||
|
||||
return unblocked, nil
|
||||
}
|
||||
|
||||
func (m *MemoryStorage) AddComment(ctx context.Context, issueID, actor, comment string) error {
|
||||
return nil
|
||||
}
|
||||
|
||||
921
internal/storage/memory/memory_more_coverage_test.go
Normal file
921
internal/storage/memory/memory_more_coverage_test.go
Normal file
@@ -0,0 +1,921 @@
|
||||
package memory
|
||||
|
||||
import (
|
||||
"context"
|
||||
"testing"
|
||||
"time"
|
||||
|
||||
"github.com/steveyegge/beads/internal/storage"
|
||||
"github.com/steveyegge/beads/internal/types"
|
||||
)
|
||||
|
||||
func TestMemoryStorage_LoadFromIssues_IndexesAndCounters(t *testing.T) {
|
||||
store := New("/tmp/example.jsonl")
|
||||
defer store.Close()
|
||||
|
||||
extRef := "ext-1"
|
||||
issues := []*types.Issue{
|
||||
nil,
|
||||
{
|
||||
ID: "bd-10",
|
||||
Title: "Ten",
|
||||
Status: types.StatusOpen,
|
||||
Priority: 1,
|
||||
IssueType: types.TypeTask,
|
||||
ExternalRef: &extRef,
|
||||
Dependencies: []*types.Dependency{{
|
||||
IssueID: "bd-10",
|
||||
DependsOnID: "bd-2",
|
||||
Type: types.DepBlocks,
|
||||
}},
|
||||
Labels: []string{"l1"},
|
||||
Comments: []*types.Comment{{ID: 1, IssueID: "bd-10", Author: "a", Text: "c"}},
|
||||
},
|
||||
{ID: "bd-2", Title: "Two", Status: types.StatusOpen, Priority: 1, IssueType: types.TypeTask},
|
||||
{ID: "bd-a3f8e9", Title: "Parent", Status: types.StatusOpen, Priority: 1, IssueType: types.TypeTask},
|
||||
{ID: "bd-a3f8e9.3", Title: "Child", Status: types.StatusOpen, Priority: 1, IssueType: types.TypeTask},
|
||||
}
|
||||
|
||||
if err := store.LoadFromIssues(issues); err != nil {
|
||||
t.Fatalf("LoadFromIssues: %v", err)
|
||||
}
|
||||
|
||||
ctx := context.Background()
|
||||
|
||||
got, err := store.GetIssueByExternalRef(ctx, "ext-1")
|
||||
if err != nil {
|
||||
t.Fatalf("GetIssueByExternalRef: %v", err)
|
||||
}
|
||||
if got == nil || got.ID != "bd-10" {
|
||||
t.Fatalf("GetIssueByExternalRef got=%v", got)
|
||||
}
|
||||
if len(got.Dependencies) != 1 || got.Dependencies[0].DependsOnID != "bd-2" {
|
||||
t.Fatalf("expected deps attached")
|
||||
}
|
||||
if len(got.Labels) != 1 || got.Labels[0] != "l1" {
|
||||
t.Fatalf("expected labels attached")
|
||||
}
|
||||
|
||||
// Exercise CreateIssue ID generation based on the loaded counter (bd-10 => next should be bd-11).
|
||||
if err := store.SetConfig(ctx, "issue_prefix", "bd"); err != nil {
|
||||
t.Fatalf("SetConfig: %v", err)
|
||||
}
|
||||
newIssue := &types.Issue{Title: "New", Status: types.StatusOpen, Priority: 1, IssueType: types.TypeTask}
|
||||
if err := store.CreateIssue(ctx, newIssue, "actor"); err != nil {
|
||||
t.Fatalf("CreateIssue: %v", err)
|
||||
}
|
||||
if newIssue.ID != "bd-11" {
|
||||
t.Fatalf("expected generated id bd-11, got %q", newIssue.ID)
|
||||
}
|
||||
|
||||
// Hierarchical counter for parent extracted from bd-a3f8e9.3.
|
||||
childID, err := store.GetNextChildID(ctx, "bd-a3f8e9")
|
||||
if err != nil {
|
||||
t.Fatalf("GetNextChildID: %v", err)
|
||||
}
|
||||
if childID != "bd-a3f8e9.4" {
|
||||
t.Fatalf("expected bd-a3f8e9.4, got %q", childID)
|
||||
}
|
||||
}
|
||||
|
||||
func TestMemoryStorage_GetAllIssues_SortsAndCopies(t *testing.T) {
|
||||
store := setupTestMemory(t)
|
||||
defer store.Close()
|
||||
ctx := context.Background()
|
||||
|
||||
// Create out-of-order IDs.
|
||||
a := &types.Issue{ID: "bd-2", Title: "A", Status: types.StatusOpen, Priority: 1, IssueType: types.TypeTask}
|
||||
b := &types.Issue{ID: "bd-1", Title: "B", Status: types.StatusOpen, Priority: 1, IssueType: types.TypeTask}
|
||||
if err := store.CreateIssue(ctx, a, "actor"); err != nil {
|
||||
t.Fatalf("CreateIssue a: %v", err)
|
||||
}
|
||||
if err := store.CreateIssue(ctx, b, "actor"); err != nil {
|
||||
t.Fatalf("CreateIssue b: %v", err)
|
||||
}
|
||||
|
||||
if err := store.AddLabel(ctx, a.ID, "l1", "actor"); err != nil {
|
||||
t.Fatalf("AddLabel: %v", err)
|
||||
}
|
||||
|
||||
all := store.GetAllIssues()
|
||||
if len(all) != 2 {
|
||||
t.Fatalf("expected 2 issues, got %d", len(all))
|
||||
}
|
||||
if all[0].ID != "bd-1" || all[1].ID != "bd-2" {
|
||||
t.Fatalf("expected sorted by ID, got %q then %q", all[0].ID, all[1].ID)
|
||||
}
|
||||
|
||||
// Returned issues must be copies (mutating should not affect stored issue struct).
|
||||
all[1].Title = "mutated"
|
||||
got, err := store.GetIssue(ctx, "bd-2")
|
||||
if err != nil {
|
||||
t.Fatalf("GetIssue: %v", err)
|
||||
}
|
||||
if got.Title != "A" {
|
||||
t.Fatalf("expected stored title unchanged, got %q", got.Title)
|
||||
}
|
||||
}
|
||||
|
||||
func TestMemoryStorage_CreateIssues_DefaultPrefix_DuplicateExisting_ExternalRef(t *testing.T) {
|
||||
store := New("")
|
||||
defer store.Close()
|
||||
ctx := context.Background()
|
||||
|
||||
// Default prefix should be "bd" when unset.
|
||||
issues := []*types.Issue{{Title: "A", Status: types.StatusOpen, Priority: 1, IssueType: types.TypeTask}}
|
||||
if err := store.CreateIssues(ctx, issues, "actor"); err != nil {
|
||||
t.Fatalf("CreateIssues: %v", err)
|
||||
}
|
||||
if issues[0].ID != "bd-1" {
|
||||
t.Fatalf("expected bd-1, got %q", issues[0].ID)
|
||||
}
|
||||
|
||||
ext := "ext"
|
||||
batch := []*types.Issue{{ID: "bd-x", Title: "B", Status: types.StatusOpen, Priority: 1, IssueType: types.TypeTask, ExternalRef: &ext}}
|
||||
if err := store.CreateIssues(ctx, batch, "actor"); err != nil {
|
||||
t.Fatalf("CreateIssues: %v", err)
|
||||
}
|
||||
if got, _ := store.GetIssueByExternalRef(ctx, "ext"); got == nil || got.ID != "bd-x" {
|
||||
t.Fatalf("expected external ref indexed")
|
||||
}
|
||||
|
||||
// Duplicate existing issue ID branch.
|
||||
dup := []*types.Issue{{ID: "bd-x", Title: "Dup", Status: types.StatusOpen, Priority: 1, IssueType: types.TypeTask}}
|
||||
if err := store.CreateIssues(ctx, dup, "actor"); err == nil {
|
||||
t.Fatalf("expected duplicate existing issue error")
|
||||
}
|
||||
}
|
||||
|
||||
func TestMemoryStorage_GetIssueByExternalRef_IndexPointsToMissingIssue(t *testing.T) {
|
||||
store := setupTestMemory(t)
|
||||
defer store.Close()
|
||||
ctx := context.Background()
|
||||
|
||||
store.mu.Lock()
|
||||
store.externalRefToID["dangling"] = "bd-nope"
|
||||
store.mu.Unlock()
|
||||
|
||||
got, err := store.GetIssueByExternalRef(ctx, "dangling")
|
||||
if err != nil {
|
||||
t.Fatalf("GetIssueByExternalRef: %v", err)
|
||||
}
|
||||
if got != nil {
|
||||
t.Fatalf("expected nil for dangling ref")
|
||||
}
|
||||
}
|
||||
|
||||
func TestMemoryStorage_DependencyCounts_Records_Tree_Cycles(t *testing.T) {
|
||||
store := setupTestMemory(t)
|
||||
defer store.Close()
|
||||
ctx := context.Background()
|
||||
|
||||
a := &types.Issue{ID: "bd-1", Title: "A", Status: types.StatusOpen, Priority: 1, IssueType: types.TypeTask}
|
||||
b := &types.Issue{ID: "bd-2", Title: "B", Status: types.StatusOpen, Priority: 1, IssueType: types.TypeTask}
|
||||
c := &types.Issue{ID: "bd-3", Title: "C", Status: types.StatusOpen, Priority: 1, IssueType: types.TypeTask}
|
||||
d := &types.Issue{ID: "bd-4", Title: "D", Status: types.StatusOpen, Priority: 1, IssueType: types.TypeTask}
|
||||
for _, iss := range []*types.Issue{a, b, c, d} {
|
||||
if err := store.CreateIssue(ctx, iss, "actor"); err != nil {
|
||||
t.Fatalf("CreateIssue %s: %v", iss.ID, err)
|
||||
}
|
||||
}
|
||||
|
||||
if err := store.AddDependency(ctx, &types.Dependency{IssueID: a.ID, DependsOnID: b.ID, Type: types.DepBlocks}, "actor"); err != nil {
|
||||
t.Fatalf("AddDependency: %v", err)
|
||||
}
|
||||
if err := store.AddDependency(ctx, &types.Dependency{IssueID: a.ID, DependsOnID: c.ID, Type: types.DepBlocks}, "actor"); err != nil {
|
||||
t.Fatalf("AddDependency: %v", err)
|
||||
}
|
||||
if err := store.AddDependency(ctx, &types.Dependency{IssueID: d.ID, DependsOnID: b.ID, Type: types.DepBlocks}, "actor"); err != nil {
|
||||
t.Fatalf("AddDependency: %v", err)
|
||||
}
|
||||
|
||||
counts, err := store.GetDependencyCounts(ctx, []string{a.ID, b.ID, "bd-missing"})
|
||||
if err != nil {
|
||||
t.Fatalf("GetDependencyCounts: %v", err)
|
||||
}
|
||||
if counts[a.ID].DependencyCount != 2 || counts[a.ID].DependentCount != 0 {
|
||||
t.Fatalf("unexpected counts for A: %+v", counts[a.ID])
|
||||
}
|
||||
if counts[b.ID].DependencyCount != 0 || counts[b.ID].DependentCount != 2 {
|
||||
t.Fatalf("unexpected counts for B: %+v", counts[b.ID])
|
||||
}
|
||||
if counts["bd-missing"].DependencyCount != 0 || counts["bd-missing"].DependentCount != 0 {
|
||||
t.Fatalf("unexpected counts for missing: %+v", counts["bd-missing"])
|
||||
}
|
||||
|
||||
deps, err := store.GetDependencyRecords(ctx, a.ID)
|
||||
if err != nil {
|
||||
t.Fatalf("GetDependencyRecords: %v", err)
|
||||
}
|
||||
if len(deps) != 2 {
|
||||
t.Fatalf("expected 2 deps, got %d", len(deps))
|
||||
}
|
||||
|
||||
allDeps, err := store.GetAllDependencyRecords(ctx)
|
||||
if err != nil {
|
||||
t.Fatalf("GetAllDependencyRecords: %v", err)
|
||||
}
|
||||
if len(allDeps[a.ID]) != 2 {
|
||||
t.Fatalf("expected all deps for A")
|
||||
}
|
||||
|
||||
nodes, err := store.GetDependencyTree(ctx, a.ID, 3, false, false)
|
||||
if err != nil {
|
||||
t.Fatalf("GetDependencyTree: %v", err)
|
||||
}
|
||||
if len(nodes) != 2 || nodes[0].Depth != 1 {
|
||||
t.Fatalf("unexpected tree: %+v", nodes)
|
||||
}
|
||||
|
||||
cycles, err := store.DetectCycles(ctx)
|
||||
if err != nil {
|
||||
t.Fatalf("DetectCycles: %v", err)
|
||||
}
|
||||
if cycles != nil {
|
||||
t.Fatalf("expected nil cycles, got %+v", cycles)
|
||||
}
|
||||
}
|
||||
|
||||
func TestMemoryStorage_HashTracking_NoOps(t *testing.T) {
|
||||
store := setupTestMemory(t)
|
||||
defer store.Close()
|
||||
ctx := context.Background()
|
||||
|
||||
if hash, err := store.GetDirtyIssueHash(ctx, "bd-1"); err != nil || hash != "" {
|
||||
t.Fatalf("GetDirtyIssueHash: hash=%q err=%v", hash, err)
|
||||
}
|
||||
if hash, err := store.GetExportHash(ctx, "bd-1"); err != nil || hash != "" {
|
||||
t.Fatalf("GetExportHash: hash=%q err=%v", hash, err)
|
||||
}
|
||||
if err := store.SetExportHash(ctx, "bd-1", "h"); err != nil {
|
||||
t.Fatalf("SetExportHash: %v", err)
|
||||
}
|
||||
if err := store.ClearAllExportHashes(ctx); err != nil {
|
||||
t.Fatalf("ClearAllExportHashes: %v", err)
|
||||
}
|
||||
if hash, err := store.GetJSONLFileHash(ctx); err != nil || hash != "" {
|
||||
t.Fatalf("GetJSONLFileHash: hash=%q err=%v", hash, err)
|
||||
}
|
||||
if err := store.SetJSONLFileHash(ctx, "h"); err != nil {
|
||||
t.Fatalf("SetJSONLFileHash: %v", err)
|
||||
}
|
||||
}
|
||||
|
||||
func TestMemoryStorage_LabelsAndCommentsHelpers(t *testing.T) {
|
||||
store := setupTestMemory(t)
|
||||
defer store.Close()
|
||||
ctx := context.Background()
|
||||
|
||||
a := &types.Issue{ID: "bd-1", Title: "A", Status: types.StatusOpen, Priority: 1, IssueType: types.TypeTask}
|
||||
b := &types.Issue{ID: "bd-2", Title: "B", Status: types.StatusOpen, Priority: 1, IssueType: types.TypeTask}
|
||||
if err := store.CreateIssue(ctx, a, "actor"); err != nil {
|
||||
t.Fatalf("CreateIssue a: %v", err)
|
||||
}
|
||||
if err := store.CreateIssue(ctx, b, "actor"); err != nil {
|
||||
t.Fatalf("CreateIssue b: %v", err)
|
||||
}
|
||||
|
||||
if err := store.AddLabel(ctx, a.ID, "l1", "actor"); err != nil {
|
||||
t.Fatalf("AddLabel: %v", err)
|
||||
}
|
||||
if err := store.AddLabel(ctx, b.ID, "l2", "actor"); err != nil {
|
||||
t.Fatalf("AddLabel: %v", err)
|
||||
}
|
||||
|
||||
labels, err := store.GetLabelsForIssues(ctx, []string{a.ID, b.ID, "bd-missing"})
|
||||
if err != nil {
|
||||
t.Fatalf("GetLabelsForIssues: %v", err)
|
||||
}
|
||||
if len(labels) != 2 {
|
||||
t.Fatalf("expected 2 entries, got %d", len(labels))
|
||||
}
|
||||
if labels[a.ID][0] != "l1" {
|
||||
t.Fatalf("unexpected labels for A: %+v", labels[a.ID])
|
||||
}
|
||||
|
||||
issues, err := store.GetIssuesByLabel(ctx, "l1")
|
||||
if err != nil {
|
||||
t.Fatalf("GetIssuesByLabel: %v", err)
|
||||
}
|
||||
if len(issues) != 1 || issues[0].ID != a.ID {
|
||||
t.Fatalf("unexpected issues: %+v", issues)
|
||||
}
|
||||
|
||||
if _, err := store.AddIssueComment(ctx, a.ID, "author", "text"); err != nil {
|
||||
t.Fatalf("AddIssueComment: %v", err)
|
||||
}
|
||||
comments, err := store.GetCommentsForIssues(ctx, []string{a.ID, b.ID})
|
||||
if err != nil {
|
||||
t.Fatalf("GetCommentsForIssues: %v", err)
|
||||
}
|
||||
if len(comments[a.ID]) != 1 {
|
||||
t.Fatalf("expected comments for A")
|
||||
}
|
||||
}
|
||||
|
||||
func TestMemoryStorage_StaleEventsCustomStatusAndLifecycleHelpers(t *testing.T) {
|
||||
store := New("/tmp/x.jsonl")
|
||||
defer store.Close()
|
||||
ctx := context.Background()
|
||||
|
||||
if store.Path() != "/tmp/x.jsonl" {
|
||||
t.Fatalf("Path mismatch")
|
||||
}
|
||||
if store.UnderlyingDB() != nil {
|
||||
t.Fatalf("expected nil UnderlyingDB")
|
||||
}
|
||||
if _, err := store.UnderlyingConn(ctx); err == nil {
|
||||
t.Fatalf("expected UnderlyingConn error")
|
||||
}
|
||||
if err := store.RunInTransaction(ctx, func(tx storage.Transaction) error { return nil }); err == nil {
|
||||
t.Fatalf("expected RunInTransaction error")
|
||||
}
|
||||
|
||||
if err := store.SetConfig(ctx, "issue_prefix", "bd"); err != nil {
|
||||
t.Fatalf("SetConfig: %v", err)
|
||||
}
|
||||
a := &types.Issue{ID: "bd-1", Title: "A", Status: types.StatusOpen, Priority: 1, IssueType: types.TypeTask}
|
||||
if err := store.CreateIssue(ctx, a, "actor"); err != nil {
|
||||
t.Fatalf("CreateIssue: %v", err)
|
||||
}
|
||||
|
||||
// Force updated_at into the past for stale detection.
|
||||
store.mu.Lock()
|
||||
a.UpdatedAt = time.Now().Add(-10 * 24 * time.Hour)
|
||||
store.mu.Unlock()
|
||||
|
||||
stale, err := store.GetStaleIssues(ctx, types.StaleFilter{Days: 7, Limit: 10})
|
||||
if err != nil {
|
||||
t.Fatalf("GetStaleIssues: %v", err)
|
||||
}
|
||||
if len(stale) != 1 || stale[0].ID != a.ID {
|
||||
t.Fatalf("unexpected stale: %+v", stale)
|
||||
}
|
||||
|
||||
if err := store.AddComment(ctx, a.ID, "actor", "c"); err != nil {
|
||||
t.Fatalf("AddComment: %v", err)
|
||||
}
|
||||
if err := store.MarkIssueDirty(ctx, a.ID); err != nil {
|
||||
t.Fatalf("MarkIssueDirty: %v", err)
|
||||
}
|
||||
|
||||
// Generate multiple events and ensure limiting returns the last N.
|
||||
if err := store.UpdateIssue(ctx, a.ID, map[string]interface{}{"title": "t1"}, "actor"); err != nil {
|
||||
t.Fatalf("UpdateIssue: %v", err)
|
||||
}
|
||||
if err := store.UpdateIssue(ctx, a.ID, map[string]interface{}{"title": "t2"}, "actor"); err != nil {
|
||||
t.Fatalf("UpdateIssue: %v", err)
|
||||
}
|
||||
evs, err := store.GetEvents(ctx, a.ID, 2)
|
||||
if err != nil {
|
||||
t.Fatalf("GetEvents: %v", err)
|
||||
}
|
||||
if len(evs) != 2 {
|
||||
t.Fatalf("expected 2 events, got %d", len(evs))
|
||||
}
|
||||
|
||||
if err := store.SetConfig(ctx, "status.custom", " triage, blocked , ,done "); err != nil {
|
||||
t.Fatalf("SetConfig: %v", err)
|
||||
}
|
||||
statuses, err := store.GetCustomStatuses(ctx)
|
||||
if err != nil {
|
||||
t.Fatalf("GetCustomStatuses: %v", err)
|
||||
}
|
||||
if len(statuses) != 3 || statuses[0] != "triage" || statuses[1] != "blocked" || statuses[2] != "done" {
|
||||
t.Fatalf("unexpected statuses: %+v", statuses)
|
||||
}
|
||||
if got := parseCustomStatuses(""); got != nil {
|
||||
t.Fatalf("expected nil for empty parseCustomStatuses")
|
||||
}
|
||||
|
||||
// Empty custom statuses.
|
||||
if err := store.DeleteConfig(ctx, "status.custom"); err != nil {
|
||||
t.Fatalf("DeleteConfig: %v", err)
|
||||
}
|
||||
statuses, err = store.GetCustomStatuses(ctx)
|
||||
if err != nil {
|
||||
t.Fatalf("GetCustomStatuses(empty): %v", err)
|
||||
}
|
||||
if statuses != nil {
|
||||
t.Fatalf("expected nil statuses when unset, got %+v", statuses)
|
||||
}
|
||||
|
||||
if _, err := store.GetEpicsEligibleForClosure(ctx); err != nil {
|
||||
t.Fatalf("GetEpicsEligibleForClosure: %v", err)
|
||||
}
|
||||
|
||||
if err := store.UpdateIssueID(ctx, "old", "new", nil, "actor"); err == nil {
|
||||
t.Fatalf("expected UpdateIssueID error")
|
||||
}
|
||||
if err := store.RenameDependencyPrefix(ctx, "old", "new"); err != nil {
|
||||
t.Fatalf("RenameDependencyPrefix: %v", err)
|
||||
}
|
||||
if err := store.RenameCounterPrefix(ctx, "old", "new"); err != nil {
|
||||
t.Fatalf("RenameCounterPrefix: %v", err)
|
||||
}
|
||||
}
|
||||
|
||||
func TestMemoryStorage_AddLabelAndAddDependency_ErrorPaths(t *testing.T) {
|
||||
store := setupTestMemory(t)
|
||||
defer store.Close()
|
||||
ctx := context.Background()
|
||||
|
||||
issue := &types.Issue{ID: "bd-1", Title: "A", Status: types.StatusOpen, Priority: 1, IssueType: types.TypeTask}
|
||||
if err := store.CreateIssue(ctx, issue, "actor"); err != nil {
|
||||
t.Fatalf("CreateIssue: %v", err)
|
||||
}
|
||||
|
||||
if err := store.AddLabel(ctx, "bd-missing", "l", "actor"); err == nil {
|
||||
t.Fatalf("expected AddLabel error for missing issue")
|
||||
}
|
||||
if err := store.AddLabel(ctx, issue.ID, "l", "actor"); err != nil {
|
||||
t.Fatalf("AddLabel: %v", err)
|
||||
}
|
||||
// Duplicate label is a no-op.
|
||||
if err := store.AddLabel(ctx, issue.ID, "l", "actor"); err != nil {
|
||||
t.Fatalf("AddLabel duplicate: %v", err)
|
||||
}
|
||||
|
||||
// AddDependency error paths.
|
||||
if err := store.AddDependency(ctx, &types.Dependency{IssueID: "bd-missing", DependsOnID: issue.ID, Type: types.DepBlocks}, "actor"); err == nil {
|
||||
t.Fatalf("expected AddDependency error for missing IssueID")
|
||||
}
|
||||
if err := store.AddDependency(ctx, &types.Dependency{IssueID: issue.ID, DependsOnID: "bd-missing", Type: types.DepBlocks}, "actor"); err == nil {
|
||||
t.Fatalf("expected AddDependency error for missing DependsOnID")
|
||||
}
|
||||
}
|
||||
|
||||
func TestMemoryStorage_GetNextChildID_Errors(t *testing.T) {
|
||||
store := setupTestMemory(t)
|
||||
defer store.Close()
|
||||
ctx := context.Background()
|
||||
|
||||
if _, err := store.GetNextChildID(ctx, "bd-missing"); err == nil {
|
||||
t.Fatalf("expected error for missing parent")
|
||||
}
|
||||
|
||||
deep := &types.Issue{ID: "bd-1.1.1.1", Title: "Deep", Status: types.StatusOpen, Priority: 1, IssueType: types.TypeTask}
|
||||
if err := store.CreateIssue(ctx, deep, "actor"); err != nil {
|
||||
t.Fatalf("CreateIssue: %v", err)
|
||||
}
|
||||
if _, err := store.GetNextChildID(ctx, deep.ID); err == nil {
|
||||
t.Fatalf("expected max depth error")
|
||||
}
|
||||
}
|
||||
|
||||
func TestMemoryStorage_GetAllIssues_AttachesDependenciesAndComments(t *testing.T) {
|
||||
store := setupTestMemory(t)
|
||||
defer store.Close()
|
||||
ctx := context.Background()
|
||||
|
||||
a := &types.Issue{ID: "bd-1", Title: "A", Status: types.StatusOpen, Priority: 1, IssueType: types.TypeTask}
|
||||
b := &types.Issue{ID: "bd-2", Title: "B", Status: types.StatusOpen, Priority: 1, IssueType: types.TypeTask}
|
||||
if err := store.CreateIssue(ctx, a, "actor"); err != nil {
|
||||
t.Fatalf("CreateIssue a: %v", err)
|
||||
}
|
||||
if err := store.CreateIssue(ctx, b, "actor"); err != nil {
|
||||
t.Fatalf("CreateIssue b: %v", err)
|
||||
}
|
||||
if err := store.AddDependency(ctx, &types.Dependency{IssueID: a.ID, DependsOnID: b.ID, Type: types.DepBlocks}, "actor"); err != nil {
|
||||
t.Fatalf("AddDependency: %v", err)
|
||||
}
|
||||
if _, err := store.AddIssueComment(ctx, a.ID, "author", "text"); err != nil {
|
||||
t.Fatalf("AddIssueComment: %v", err)
|
||||
}
|
||||
|
||||
all := store.GetAllIssues()
|
||||
var gotA *types.Issue
|
||||
for _, iss := range all {
|
||||
if iss.ID == a.ID {
|
||||
gotA = iss
|
||||
break
|
||||
}
|
||||
}
|
||||
if gotA == nil {
|
||||
t.Fatalf("expected to find issue A")
|
||||
}
|
||||
if len(gotA.Dependencies) != 1 || gotA.Dependencies[0].DependsOnID != b.ID {
|
||||
t.Fatalf("expected deps attached")
|
||||
}
|
||||
if len(gotA.Comments) != 1 || gotA.Comments[0].Text != "text" {
|
||||
t.Fatalf("expected comments attached")
|
||||
}
|
||||
}
|
||||
|
||||
func TestMemoryStorage_GetStaleIssues_FilteringAndLimit(t *testing.T) {
|
||||
store := setupTestMemory(t)
|
||||
defer store.Close()
|
||||
ctx := context.Background()
|
||||
|
||||
old := &types.Issue{ID: "bd-1", Title: "Old", Status: types.StatusOpen, Priority: 1, IssueType: types.TypeTask}
|
||||
newer := &types.Issue{ID: "bd-2", Title: "Newer", Status: types.StatusInProgress, Priority: 1, IssueType: types.TypeTask}
|
||||
closed := &types.Issue{ID: "bd-3", Title: "Closed", Status: types.StatusOpen, Priority: 1, IssueType: types.TypeTask}
|
||||
for _, iss := range []*types.Issue{old, newer, closed} {
|
||||
if err := store.CreateIssue(ctx, iss, "actor"); err != nil {
|
||||
t.Fatalf("CreateIssue %s: %v", iss.ID, err)
|
||||
}
|
||||
}
|
||||
if err := store.CloseIssue(ctx, closed.ID, "done", "actor"); err != nil {
|
||||
t.Fatalf("CloseIssue: %v", err)
|
||||
}
|
||||
|
||||
store.mu.Lock()
|
||||
store.issues[old.ID].UpdatedAt = time.Now().Add(-20 * 24 * time.Hour)
|
||||
store.issues[newer.ID].UpdatedAt = time.Now().Add(-10 * 24 * time.Hour)
|
||||
store.issues[closed.ID].UpdatedAt = time.Now().Add(-30 * 24 * time.Hour)
|
||||
store.mu.Unlock()
|
||||
|
||||
stale, err := store.GetStaleIssues(ctx, types.StaleFilter{Days: 7, Status: "in_progress"})
|
||||
if err != nil {
|
||||
t.Fatalf("GetStaleIssues: %v", err)
|
||||
}
|
||||
if len(stale) != 1 || stale[0].ID != newer.ID {
|
||||
t.Fatalf("unexpected stale filtered: %+v", stale)
|
||||
}
|
||||
|
||||
stale, err = store.GetStaleIssues(ctx, types.StaleFilter{Days: 7, Limit: 1})
|
||||
if err != nil {
|
||||
t.Fatalf("GetStaleIssues: %v", err)
|
||||
}
|
||||
if len(stale) != 1 || stale[0].ID != old.ID {
|
||||
t.Fatalf("expected oldest stale first, got %+v", stale)
|
||||
}
|
||||
}
|
||||
|
||||
func TestMemoryStorage_Statistics_EpicsEligibleForClosure_Counting(t *testing.T) {
|
||||
store := setupTestMemory(t)
|
||||
defer store.Close()
|
||||
ctx := context.Background()
|
||||
|
||||
ep := &types.Issue{ID: "bd-1", Title: "Epic", Status: types.StatusOpen, Priority: 1, IssueType: types.TypeEpic}
|
||||
c1 := &types.Issue{ID: "bd-2", Title: "Child1", Status: types.StatusOpen, Priority: 1, IssueType: types.TypeTask}
|
||||
c2 := &types.Issue{ID: "bd-3", Title: "Child2", Status: types.StatusOpen, Priority: 1, IssueType: types.TypeTask}
|
||||
for _, iss := range []*types.Issue{ep, c1, c2} {
|
||||
if err := store.CreateIssue(ctx, iss, "actor"); err != nil {
|
||||
t.Fatalf("CreateIssue %s: %v", iss.ID, err)
|
||||
}
|
||||
}
|
||||
if err := store.CloseIssue(ctx, c1.ID, "done", "actor"); err != nil {
|
||||
t.Fatalf("CloseIssue c1: %v", err)
|
||||
}
|
||||
if err := store.CloseIssue(ctx, c2.ID, "done", "actor"); err != nil {
|
||||
t.Fatalf("CloseIssue c2: %v", err)
|
||||
}
|
||||
// Parent-child deps: child -> epic.
|
||||
if err := store.AddDependency(ctx, &types.Dependency{IssueID: c1.ID, DependsOnID: ep.ID, Type: types.DepParentChild}, "actor"); err != nil {
|
||||
t.Fatalf("AddDependency: %v", err)
|
||||
}
|
||||
if err := store.AddDependency(ctx, &types.Dependency{IssueID: c2.ID, DependsOnID: ep.ID, Type: types.DepParentChild}, "actor"); err != nil {
|
||||
t.Fatalf("AddDependency: %v", err)
|
||||
}
|
||||
|
||||
stats, err := store.GetStatistics(ctx)
|
||||
if err != nil {
|
||||
t.Fatalf("GetStatistics: %v", err)
|
||||
}
|
||||
if stats.EpicsEligibleForClosure != 1 {
|
||||
t.Fatalf("expected 1 epic eligible, got %d", stats.EpicsEligibleForClosure)
|
||||
}
|
||||
}
|
||||
|
||||
func TestMemoryStorage_UpdateIssue_SearchIssues_ReadyWork_BlockedIssues(t *testing.T) {
|
||||
store := setupTestMemory(t)
|
||||
defer store.Close()
|
||||
ctx := context.Background()
|
||||
|
||||
now := time.Now()
|
||||
assignee := "alice"
|
||||
|
||||
parent := &types.Issue{ID: "bd-1", Title: "Parent", Status: types.StatusOpen, Priority: 1, IssueType: types.TypeEpic}
|
||||
child := &types.Issue{ID: "bd-2", Title: "Child", Status: types.StatusOpen, Priority: 2, IssueType: types.TypeTask, Assignee: assignee}
|
||||
blocker := &types.Issue{ID: "bd-3", Title: "Blocker", Status: types.StatusOpen, Priority: 3, IssueType: types.TypeTask}
|
||||
pinned := &types.Issue{ID: "bd-4", Title: "Pinned", Status: types.StatusOpen, Priority: 1, IssueType: types.TypeTask, Pinned: true}
|
||||
workflow := &types.Issue{ID: "bd-5", Title: "Workflow", Status: types.StatusOpen, Priority: 1, IssueType: types.TypeMergeRequest}
|
||||
for _, iss := range []*types.Issue{parent, child, blocker, pinned, workflow} {
|
||||
if err := store.CreateIssue(ctx, iss, "actor"); err != nil {
|
||||
t.Fatalf("CreateIssue %s: %v", iss.ID, err)
|
||||
}
|
||||
}
|
||||
|
||||
// Make created_at deterministic for sorting.
|
||||
store.mu.Lock()
|
||||
store.issues[parent.ID].CreatedAt = now.Add(-100 * time.Hour)
|
||||
store.issues[child.ID].CreatedAt = now.Add(-1 * time.Hour)
|
||||
store.issues[blocker.ID].CreatedAt = now.Add(-2 * time.Hour)
|
||||
store.issues[pinned.ID].CreatedAt = now.Add(-3 * time.Hour)
|
||||
store.issues[workflow.ID].CreatedAt = now.Add(-4 * time.Hour)
|
||||
store.mu.Unlock()
|
||||
|
||||
// Dependencies: child is a child of parent; child is blocked by blocker.
|
||||
if err := store.AddDependency(ctx, &types.Dependency{IssueID: child.ID, DependsOnID: parent.ID, Type: types.DepParentChild}, "actor"); err != nil {
|
||||
t.Fatalf("AddDependency parent-child: %v", err)
|
||||
}
|
||||
if err := store.AddDependency(ctx, &types.Dependency{IssueID: child.ID, DependsOnID: blocker.ID, Type: types.DepBlocks}, "actor"); err != nil {
|
||||
t.Fatalf("AddDependency blocks: %v", err)
|
||||
}
|
||||
|
||||
// AddDependency duplicate error path.
|
||||
if err := store.AddDependency(ctx, &types.Dependency{IssueID: child.ID, DependsOnID: blocker.ID, Type: types.DepBlocks}, "actor"); err == nil {
|
||||
t.Fatalf("expected duplicate dependency error")
|
||||
}
|
||||
|
||||
// UpdateIssue: exercise assignee nil, external_ref update+clear, and closed_at behavior.
|
||||
ext := "old-ext"
|
||||
store.mu.Lock()
|
||||
store.issues[child.ID].ExternalRef = &ext
|
||||
store.externalRefToID[ext] = child.ID
|
||||
store.mu.Unlock()
|
||||
|
||||
if err := store.UpdateIssue(ctx, child.ID, map[string]interface{}{"assignee": nil, "external_ref": "new-ext"}, "actor"); err != nil {
|
||||
t.Fatalf("UpdateIssue: %v", err)
|
||||
}
|
||||
if got, _ := store.GetIssueByExternalRef(ctx, "old-ext"); got != nil {
|
||||
t.Fatalf("expected old-ext removed")
|
||||
}
|
||||
if got, _ := store.GetIssueByExternalRef(ctx, "new-ext"); got == nil || got.ID != child.ID {
|
||||
t.Fatalf("expected new-ext mapping")
|
||||
}
|
||||
|
||||
if err := store.UpdateIssue(ctx, child.ID, map[string]interface{}{"status": string(types.StatusClosed)}, "actor"); err != nil {
|
||||
t.Fatalf("UpdateIssue close: %v", err)
|
||||
}
|
||||
closed, _ := store.GetIssue(ctx, child.ID)
|
||||
if closed.ClosedAt == nil {
|
||||
t.Fatalf("expected ClosedAt set")
|
||||
}
|
||||
if err := store.UpdateIssue(ctx, child.ID, map[string]interface{}{"status": string(types.StatusOpen), "external_ref": nil}, "actor"); err != nil {
|
||||
t.Fatalf("UpdateIssue reopen: %v", err)
|
||||
}
|
||||
reopened, _ := store.GetIssue(ctx, child.ID)
|
||||
if reopened.ClosedAt != nil {
|
||||
t.Fatalf("expected ClosedAt cleared")
|
||||
}
|
||||
if got, _ := store.GetIssueByExternalRef(ctx, "new-ext"); got != nil {
|
||||
t.Fatalf("expected new-ext cleared")
|
||||
}
|
||||
|
||||
// SearchIssues: query, label AND/OR, IDs filter, ParentID filter, limit.
|
||||
if err := store.AddLabel(ctx, parent.ID, "l1", "actor"); err != nil {
|
||||
t.Fatalf("AddLabel: %v", err)
|
||||
}
|
||||
if err := store.AddLabel(ctx, child.ID, "l1", "actor"); err != nil {
|
||||
t.Fatalf("AddLabel: %v", err)
|
||||
}
|
||||
if err := store.AddLabel(ctx, child.ID, "l2", "actor"); err != nil {
|
||||
t.Fatalf("AddLabel: %v", err)
|
||||
}
|
||||
|
||||
st := types.StatusOpen
|
||||
res, err := store.SearchIssues(ctx, "parent", types.IssueFilter{Status: &st})
|
||||
if err != nil {
|
||||
t.Fatalf("SearchIssues: %v", err)
|
||||
}
|
||||
if len(res) != 1 || res[0].ID != parent.ID {
|
||||
t.Fatalf("unexpected SearchIssues results: %+v", res)
|
||||
}
|
||||
|
||||
res, err = store.SearchIssues(ctx, "", types.IssueFilter{Labels: []string{"l1", "l2"}})
|
||||
if err != nil {
|
||||
t.Fatalf("SearchIssues labels AND: %v", err)
|
||||
}
|
||||
if len(res) != 1 || res[0].ID != child.ID {
|
||||
t.Fatalf("unexpected labels AND results: %+v", res)
|
||||
}
|
||||
|
||||
res, err = store.SearchIssues(ctx, "", types.IssueFilter{IDs: []string{child.ID}})
|
||||
if err != nil {
|
||||
t.Fatalf("SearchIssues IDs: %v", err)
|
||||
}
|
||||
if len(res) != 1 || res[0].ID != child.ID {
|
||||
t.Fatalf("unexpected IDs results: %+v", res)
|
||||
}
|
||||
|
||||
res, err = store.SearchIssues(ctx, "", types.IssueFilter{ParentID: &parent.ID})
|
||||
if err != nil {
|
||||
t.Fatalf("SearchIssues ParentID: %v", err)
|
||||
}
|
||||
if len(res) != 1 || res[0].ID != child.ID {
|
||||
t.Fatalf("unexpected ParentID results: %+v", res)
|
||||
}
|
||||
|
||||
res, err = store.SearchIssues(ctx, "", types.IssueFilter{LabelsAny: []string{"l2", "missing"}, Limit: 1})
|
||||
if err != nil {
|
||||
t.Fatalf("SearchIssues labels OR: %v", err)
|
||||
}
|
||||
if len(res) != 1 {
|
||||
t.Fatalf("expected limit 1")
|
||||
}
|
||||
|
||||
// Ready work: child is blocked, pinned excluded, workflow excluded by default.
|
||||
ready, err := store.GetReadyWork(ctx, types.WorkFilter{})
|
||||
if err != nil {
|
||||
t.Fatalf("GetReadyWork: %v", err)
|
||||
}
|
||||
if len(ready) != 2 { // parent + blocker
|
||||
t.Fatalf("expected 2 ready issues, got %d: %+v", len(ready), ready)
|
||||
}
|
||||
|
||||
// Filter by workflow type explicitly.
|
||||
ready, err = store.GetReadyWork(ctx, types.WorkFilter{Type: string(types.TypeMergeRequest)})
|
||||
if err != nil {
|
||||
t.Fatalf("GetReadyWork type: %v", err)
|
||||
}
|
||||
if len(ready) != 1 || ready[0].ID != workflow.ID {
|
||||
t.Fatalf("expected only workflow issue, got %+v", ready)
|
||||
}
|
||||
|
||||
// Status + priority filters.
|
||||
prio := 3
|
||||
ready, err = store.GetReadyWork(ctx, types.WorkFilter{Status: types.StatusOpen, Priority: &prio})
|
||||
if err != nil {
|
||||
t.Fatalf("GetReadyWork status+priority: %v", err)
|
||||
}
|
||||
if len(ready) != 1 || ready[0].ID != blocker.ID {
|
||||
t.Fatalf("expected blocker only, got %+v", ready)
|
||||
}
|
||||
|
||||
// Label filters.
|
||||
ready, err = store.GetReadyWork(ctx, types.WorkFilter{Labels: []string{"l1"}})
|
||||
if err != nil {
|
||||
t.Fatalf("GetReadyWork labels AND: %v", err)
|
||||
}
|
||||
if len(ready) != 1 || ready[0].ID != parent.ID {
|
||||
t.Fatalf("expected parent only, got %+v", ready)
|
||||
}
|
||||
ready, err = store.GetReadyWork(ctx, types.WorkFilter{LabelsAny: []string{"l2"}})
|
||||
if err != nil {
|
||||
t.Fatalf("GetReadyWork labels OR: %v", err)
|
||||
}
|
||||
if len(ready) != 0 {
|
||||
t.Fatalf("expected 0 because only l2 issue is blocked")
|
||||
}
|
||||
|
||||
// Assignee filter vs Unassigned precedence.
|
||||
ready, err = store.GetReadyWork(ctx, types.WorkFilter{Assignee: &assignee})
|
||||
if err != nil {
|
||||
t.Fatalf("GetReadyWork assignee: %v", err)
|
||||
}
|
||||
if len(ready) != 0 {
|
||||
t.Fatalf("expected 0 due to child being blocked")
|
||||
}
|
||||
ready, err = store.GetReadyWork(ctx, types.WorkFilter{Unassigned: true})
|
||||
if err != nil {
|
||||
t.Fatalf("GetReadyWork unassigned: %v", err)
|
||||
}
|
||||
for _, iss := range ready {
|
||||
if iss.Assignee != "" {
|
||||
t.Fatalf("expected unassigned only")
|
||||
}
|
||||
}
|
||||
|
||||
// Sort policies + limit.
|
||||
ready, err = store.GetReadyWork(ctx, types.WorkFilter{SortPolicy: types.SortPolicyOldest, Limit: 1})
|
||||
if err != nil {
|
||||
t.Fatalf("GetReadyWork oldest: %v", err)
|
||||
}
|
||||
if len(ready) != 1 || ready[0].ID != parent.ID {
|
||||
t.Fatalf("expected oldest=parent, got %+v", ready)
|
||||
}
|
||||
ready, err = store.GetReadyWork(ctx, types.WorkFilter{SortPolicy: types.SortPolicyPriority})
|
||||
if err != nil {
|
||||
t.Fatalf("GetReadyWork priority: %v", err)
|
||||
}
|
||||
if len(ready) < 2 || ready[0].Priority > ready[1].Priority {
|
||||
t.Fatalf("expected priority sort")
|
||||
}
|
||||
// Hybrid: recent issues first.
|
||||
ready, err = store.GetReadyWork(ctx, types.WorkFilter{SortPolicy: types.SortPolicyHybrid})
|
||||
if err != nil {
|
||||
t.Fatalf("GetReadyWork hybrid: %v", err)
|
||||
}
|
||||
if len(ready) != 2 || ready[0].ID != blocker.ID {
|
||||
t.Fatalf("expected recent (blocker) first in hybrid, got %+v", ready)
|
||||
}
|
||||
|
||||
// Blocked issues: child is blocked by an open blocker.
|
||||
blocked, err := store.GetBlockedIssues(ctx, types.WorkFilter{})
|
||||
if err != nil {
|
||||
t.Fatalf("GetBlockedIssues: %v", err)
|
||||
}
|
||||
if len(blocked) != 1 || blocked[0].ID != child.ID || blocked[0].BlockedByCount != 1 {
|
||||
t.Fatalf("unexpected blocked issues: %+v", blocked)
|
||||
}
|
||||
|
||||
// Cover getOpenBlockers missing-blocker branch.
|
||||
missing := &types.Issue{ID: "bd-6", Title: "Missing blocker dep", Status: types.StatusOpen, Priority: 1, IssueType: types.TypeTask}
|
||||
if err := store.CreateIssue(ctx, missing, "actor"); err != nil {
|
||||
t.Fatalf("CreateIssue: %v", err)
|
||||
}
|
||||
// Bypass AddDependency validation to cover the missing-blocker branch in getOpenBlockers.
|
||||
store.mu.Lock()
|
||||
store.dependencies[missing.ID] = append(store.dependencies[missing.ID], &types.Dependency{IssueID: missing.ID, DependsOnID: "bd-does-not-exist", Type: types.DepBlocks})
|
||||
store.mu.Unlock()
|
||||
blocked, err = store.GetBlockedIssues(ctx, types.WorkFilter{})
|
||||
if err != nil {
|
||||
t.Fatalf("GetBlockedIssues: %v", err)
|
||||
}
|
||||
if len(blocked) != 2 {
|
||||
t.Fatalf("expected 2 blocked issues, got %d", len(blocked))
|
||||
}
|
||||
}
|
||||
|
||||
func TestMemoryStorage_UpdateIssue_CoversMoreFields(t *testing.T) {
|
||||
store := setupTestMemory(t)
|
||||
defer store.Close()
|
||||
ctx := context.Background()
|
||||
|
||||
iss := &types.Issue{ID: "bd-1", Title: "A", Status: types.StatusOpen, Priority: 1, IssueType: types.TypeTask}
|
||||
if err := store.CreateIssue(ctx, iss, "actor"); err != nil {
|
||||
t.Fatalf("CreateIssue: %v", err)
|
||||
}
|
||||
|
||||
if err := store.UpdateIssue(ctx, iss.ID, map[string]interface{}{
|
||||
"description": "d",
|
||||
"design": "design",
|
||||
"acceptance_criteria": "ac",
|
||||
"notes": "n",
|
||||
"priority": 2,
|
||||
"issue_type": string(types.TypeBug),
|
||||
"assignee": "bob",
|
||||
"status": string(types.StatusInProgress),
|
||||
}, "actor"); err != nil {
|
||||
t.Fatalf("UpdateIssue: %v", err)
|
||||
}
|
||||
|
||||
got, _ := store.GetIssue(ctx, iss.ID)
|
||||
if got.Description != "d" || got.Design != "design" || got.AcceptanceCriteria != "ac" || got.Notes != "n" {
|
||||
t.Fatalf("expected text fields updated")
|
||||
}
|
||||
if got.Priority != 2 || got.IssueType != types.TypeBug || got.Assignee != "bob" || got.Status != types.StatusInProgress {
|
||||
t.Fatalf("expected fields updated")
|
||||
}
|
||||
|
||||
// Status closed when already closed should not clear ClosedAt.
|
||||
if err := store.CloseIssue(ctx, iss.ID, "done", "actor"); err != nil {
|
||||
t.Fatalf("CloseIssue: %v", err)
|
||||
}
|
||||
closedOnce, _ := store.GetIssue(ctx, iss.ID)
|
||||
if closedOnce.ClosedAt == nil {
|
||||
t.Fatalf("expected ClosedAt")
|
||||
}
|
||||
if err := store.UpdateIssue(ctx, iss.ID, map[string]interface{}{"status": string(types.StatusClosed)}, "actor"); err != nil {
|
||||
t.Fatalf("UpdateIssue closed->closed: %v", err)
|
||||
}
|
||||
closedTwice, _ := store.GetIssue(ctx, iss.ID)
|
||||
if closedTwice.ClosedAt == nil {
|
||||
t.Fatalf("expected ClosedAt preserved")
|
||||
}
|
||||
}
|
||||
|
||||
func TestMemoryStorage_CountEpicsEligibleForClosure_CoversBranches(t *testing.T) {
|
||||
store := setupTestMemory(t)
|
||||
defer store.Close()
|
||||
ctx := context.Background()
|
||||
|
||||
ep1 := &types.Issue{ID: "bd-1", Title: "Epic1", Status: types.StatusOpen, Priority: 1, IssueType: types.TypeEpic}
|
||||
epClosed := &types.Issue{ID: "bd-2", Title: "EpicClosed", Status: types.StatusOpen, Priority: 1, IssueType: types.TypeEpic}
|
||||
nonEpic := &types.Issue{ID: "bd-3", Title: "NotEpic", Status: types.StatusOpen, Priority: 1, IssueType: types.TypeTask}
|
||||
c := &types.Issue{ID: "bd-4", Title: "Child", Status: types.StatusOpen, Priority: 1, IssueType: types.TypeTask}
|
||||
for _, iss := range []*types.Issue{ep1, epClosed, nonEpic, c} {
|
||||
if err := store.CreateIssue(ctx, iss, "actor"); err != nil {
|
||||
t.Fatalf("CreateIssue %s: %v", iss.ID, err)
|
||||
}
|
||||
}
|
||||
if err := store.CloseIssue(ctx, epClosed.ID, "done", "actor"); err != nil {
|
||||
t.Fatalf("CloseIssue: %v", err)
|
||||
}
|
||||
// Child -> ep1 (eligible once child is closed).
|
||||
if err := store.AddDependency(ctx, &types.Dependency{IssueID: c.ID, DependsOnID: ep1.ID, Type: types.DepParentChild}, "actor"); err != nil {
|
||||
t.Fatalf("AddDependency: %v", err)
|
||||
}
|
||||
// Child -> nonEpic should not count.
|
||||
if err := store.AddDependency(ctx, &types.Dependency{IssueID: c.ID, DependsOnID: nonEpic.ID, Type: types.DepParentChild}, "actor"); err != nil {
|
||||
t.Fatalf("AddDependency: %v", err)
|
||||
}
|
||||
// Child -> missing epic should not count.
|
||||
store.mu.Lock()
|
||||
store.dependencies[c.ID] = append(store.dependencies[c.ID], &types.Dependency{IssueID: c.ID, DependsOnID: "bd-missing", Type: types.DepParentChild})
|
||||
store.mu.Unlock()
|
||||
|
||||
// Close child to make ep1 eligible.
|
||||
if err := store.CloseIssue(ctx, c.ID, "done", "actor"); err != nil {
|
||||
t.Fatalf("CloseIssue child: %v", err)
|
||||
}
|
||||
|
||||
stats, err := store.GetStatistics(ctx)
|
||||
if err != nil {
|
||||
t.Fatalf("GetStatistics: %v", err)
|
||||
}
|
||||
if stats.EpicsEligibleForClosure != 1 {
|
||||
t.Fatalf("expected 1 eligible epic, got %d", stats.EpicsEligibleForClosure)
|
||||
}
|
||||
}
|
||||
|
||||
func TestExtractParentAndChildNumber_CoversFailures(t *testing.T) {
|
||||
if _, _, ok := extractParentAndChildNumber("no-dot"); ok {
|
||||
t.Fatalf("expected ok=false")
|
||||
}
|
||||
if _, _, ok := extractParentAndChildNumber("parent.bad"); ok {
|
||||
t.Fatalf("expected ok=false")
|
||||
}
|
||||
}
|
||||
@@ -124,7 +124,7 @@ func TestGetBlockedIssues_IncludesExplicitlyBlockedStatus(t *testing.T) {
|
||||
t.Fatalf("AddDependency failed: %v", err)
|
||||
}
|
||||
|
||||
blocked, err := store.GetBlockedIssues(ctx)
|
||||
blocked, err := store.GetBlockedIssues(ctx, types.WorkFilter{})
|
||||
if err != nil {
|
||||
t.Fatalf("GetBlockedIssues failed: %v", err)
|
||||
}
|
||||
|
||||
@@ -247,7 +247,7 @@ func (s *SQLiteStorage) GetDependenciesWithMetadata(ctx context.Context, issueID
|
||||
rows, err := s.db.QueryContext(ctx, `
|
||||
SELECT i.id, i.content_hash, i.title, i.description, i.design, i.acceptance_criteria, i.notes,
|
||||
i.status, i.priority, i.issue_type, i.assignee, i.estimated_minutes,
|
||||
i.created_at, i.updated_at, i.closed_at, i.external_ref, i.source_repo,
|
||||
i.created_at, i.created_by, i.updated_at, i.closed_at, i.external_ref, i.source_repo,
|
||||
i.deleted_at, i.deleted_by, i.delete_reason, i.original_type,
|
||||
i.sender, i.ephemeral, i.pinned, i.is_template,
|
||||
i.await_type, i.await_id, i.timeout_ns, i.waiters,
|
||||
@@ -270,7 +270,7 @@ func (s *SQLiteStorage) GetDependentsWithMetadata(ctx context.Context, issueID s
|
||||
rows, err := s.db.QueryContext(ctx, `
|
||||
SELECT i.id, i.content_hash, i.title, i.description, i.design, i.acceptance_criteria, i.notes,
|
||||
i.status, i.priority, i.issue_type, i.assignee, i.estimated_minutes,
|
||||
i.created_at, i.updated_at, i.closed_at, i.external_ref, i.source_repo,
|
||||
i.created_at, i.created_by, i.updated_at, i.closed_at, i.external_ref, i.source_repo,
|
||||
i.deleted_at, i.deleted_by, i.delete_reason, i.original_type,
|
||||
i.sender, i.ephemeral, i.pinned, i.is_template,
|
||||
i.await_type, i.await_id, i.timeout_ns, i.waiters,
|
||||
@@ -484,7 +484,7 @@ func (s *SQLiteStorage) GetDependencyTree(ctx context.Context, issueID string, m
|
||||
SELECT
|
||||
i.id, i.title, i.status, i.priority, i.description, i.design,
|
||||
i.acceptance_criteria, i.notes, i.issue_type, i.assignee,
|
||||
i.estimated_minutes, i.created_at, i.updated_at, i.closed_at,
|
||||
i.estimated_minutes, i.created_at, i.created_by, i.updated_at, i.closed_at,
|
||||
i.external_ref,
|
||||
0 as depth,
|
||||
i.id as path,
|
||||
@@ -497,7 +497,7 @@ func (s *SQLiteStorage) GetDependencyTree(ctx context.Context, issueID string, m
|
||||
SELECT
|
||||
i.id, i.title, i.status, i.priority, i.description, i.design,
|
||||
i.acceptance_criteria, i.notes, i.issue_type, i.assignee,
|
||||
i.estimated_minutes, i.created_at, i.updated_at, i.closed_at,
|
||||
i.estimated_minutes, i.created_at, i.created_by, i.updated_at, i.closed_at,
|
||||
i.external_ref,
|
||||
t.depth + 1,
|
||||
t.path || '→' || i.id,
|
||||
@@ -525,7 +525,7 @@ func (s *SQLiteStorage) GetDependencyTree(ctx context.Context, issueID string, m
|
||||
SELECT
|
||||
i.id, i.title, i.status, i.priority, i.description, i.design,
|
||||
i.acceptance_criteria, i.notes, i.issue_type, i.assignee,
|
||||
i.estimated_minutes, i.created_at, i.updated_at, i.closed_at,
|
||||
i.estimated_minutes, i.created_at, i.created_by, i.updated_at, i.closed_at,
|
||||
i.external_ref,
|
||||
0 as depth,
|
||||
i.id as path,
|
||||
@@ -538,7 +538,7 @@ func (s *SQLiteStorage) GetDependencyTree(ctx context.Context, issueID string, m
|
||||
SELECT
|
||||
i.id, i.title, i.status, i.priority, i.description, i.design,
|
||||
i.acceptance_criteria, i.notes, i.issue_type, i.assignee,
|
||||
i.estimated_minutes, i.created_at, i.updated_at, i.closed_at,
|
||||
i.estimated_minutes, i.created_at, i.created_by, i.updated_at, i.closed_at,
|
||||
i.external_ref,
|
||||
t.depth + 1,
|
||||
t.path || '→' || i.id,
|
||||
@@ -839,7 +839,7 @@ func (s *SQLiteStorage) scanIssues(ctx context.Context, rows *sql.Rows) ([]*type
|
||||
&issue.ID, &contentHash, &issue.Title, &issue.Description, &issue.Design,
|
||||
&issue.AcceptanceCriteria, &issue.Notes, &issue.Status,
|
||||
&issue.Priority, &issue.IssueType, &assignee, &estimatedMinutes,
|
||||
&issue.CreatedAt, &issue.UpdatedAt, &closedAt, &externalRef, &sourceRepo, &closeReason,
|
||||
&issue.CreatedAt, &issue.CreatedBy, &issue.UpdatedAt, &closedAt, &externalRef, &sourceRepo, &closeReason,
|
||||
&deletedAt, &deletedBy, &deleteReason, &originalType,
|
||||
&sender, &wisp, &pinned, &isTemplate,
|
||||
&awaitType, &awaitID, &timeoutNs, &waiters,
|
||||
@@ -885,7 +885,7 @@ func (s *SQLiteStorage) scanIssues(ctx context.Context, rows *sql.Rows) ([]*type
|
||||
issue.Sender = sender.String
|
||||
}
|
||||
if wisp.Valid && wisp.Int64 != 0 {
|
||||
issue.Wisp = true
|
||||
issue.Ephemeral = true
|
||||
}
|
||||
// Pinned field (bd-7h5)
|
||||
if pinned.Valid && pinned.Int64 != 0 {
|
||||
@@ -962,7 +962,7 @@ func (s *SQLiteStorage) scanIssuesWithDependencyType(ctx context.Context, rows *
|
||||
&issue.ID, &contentHash, &issue.Title, &issue.Description, &issue.Design,
|
||||
&issue.AcceptanceCriteria, &issue.Notes, &issue.Status,
|
||||
&issue.Priority, &issue.IssueType, &assignee, &estimatedMinutes,
|
||||
&issue.CreatedAt, &issue.UpdatedAt, &closedAt, &externalRef, &sourceRepo,
|
||||
&issue.CreatedAt, &issue.CreatedBy, &issue.UpdatedAt, &closedAt, &externalRef, &sourceRepo,
|
||||
&deletedAt, &deletedBy, &deleteReason, &originalType,
|
||||
&sender, &wisp, &pinned, &isTemplate,
|
||||
&awaitType, &awaitID, &timeoutNs, &waiters,
|
||||
@@ -1006,7 +1006,7 @@ func (s *SQLiteStorage) scanIssuesWithDependencyType(ctx context.Context, rows *
|
||||
issue.Sender = sender.String
|
||||
}
|
||||
if wisp.Valid && wisp.Int64 != 0 {
|
||||
issue.Wisp = true
|
||||
issue.Ephemeral = true
|
||||
}
|
||||
// Pinned field (bd-7h5)
|
||||
if pinned.Valid && pinned.Int64 != 0 {
|
||||
|
||||
@@ -295,7 +295,7 @@ func TestRepliesTo(t *testing.T) {
|
||||
IssueType: types.TypeMessage,
|
||||
Sender: "alice",
|
||||
Assignee: "bob",
|
||||
Wisp: true,
|
||||
Ephemeral: true,
|
||||
CreatedAt: time.Now(),
|
||||
UpdatedAt: time.Now(),
|
||||
}
|
||||
@@ -307,7 +307,7 @@ func TestRepliesTo(t *testing.T) {
|
||||
IssueType: types.TypeMessage,
|
||||
Sender: "bob",
|
||||
Assignee: "alice",
|
||||
Wisp: true,
|
||||
Ephemeral: true,
|
||||
CreatedAt: time.Now(),
|
||||
UpdatedAt: time.Now(),
|
||||
}
|
||||
@@ -363,7 +363,7 @@ func TestRepliesTo_Chain(t *testing.T) {
|
||||
IssueType: types.TypeMessage,
|
||||
Sender: "user",
|
||||
Assignee: "inbox",
|
||||
Wisp: true,
|
||||
Ephemeral: true,
|
||||
CreatedAt: time.Now(),
|
||||
UpdatedAt: time.Now(),
|
||||
}
|
||||
@@ -415,7 +415,7 @@ func TestWispField(t *testing.T) {
|
||||
Status: types.StatusOpen,
|
||||
Priority: 2,
|
||||
IssueType: types.TypeMessage,
|
||||
Wisp: true,
|
||||
Ephemeral: true,
|
||||
CreatedAt: time.Now(),
|
||||
UpdatedAt: time.Now(),
|
||||
}
|
||||
@@ -426,7 +426,7 @@ func TestWispField(t *testing.T) {
|
||||
Status: types.StatusOpen,
|
||||
Priority: 2,
|
||||
IssueType: types.TypeTask,
|
||||
Wisp: false,
|
||||
Ephemeral: false,
|
||||
CreatedAt: time.Now(),
|
||||
UpdatedAt: time.Now(),
|
||||
}
|
||||
@@ -443,7 +443,7 @@ func TestWispField(t *testing.T) {
|
||||
if err != nil {
|
||||
t.Fatalf("GetIssue failed: %v", err)
|
||||
}
|
||||
if !savedWisp.Wisp {
|
||||
if !savedWisp.Ephemeral {
|
||||
t.Error("Wisp issue should have Wisp=true")
|
||||
}
|
||||
|
||||
@@ -451,7 +451,7 @@ func TestWispField(t *testing.T) {
|
||||
if err != nil {
|
||||
t.Fatalf("GetIssue failed: %v", err)
|
||||
}
|
||||
if savedPermanent.Wisp {
|
||||
if savedPermanent.Ephemeral {
|
||||
t.Error("Permanent issue should have Wisp=false")
|
||||
}
|
||||
}
|
||||
@@ -468,7 +468,7 @@ func TestWispFilter(t *testing.T) {
|
||||
Status: types.StatusClosed, // Closed for cleanup test
|
||||
Priority: 2,
|
||||
IssueType: types.TypeMessage,
|
||||
Wisp: true,
|
||||
Ephemeral: true,
|
||||
CreatedAt: time.Now(),
|
||||
UpdatedAt: time.Now(),
|
||||
}
|
||||
@@ -483,7 +483,7 @@ func TestWispFilter(t *testing.T) {
|
||||
Status: types.StatusClosed,
|
||||
Priority: 2,
|
||||
IssueType: types.TypeTask,
|
||||
Wisp: false,
|
||||
Ephemeral: false,
|
||||
CreatedAt: time.Now(),
|
||||
UpdatedAt: time.Now(),
|
||||
}
|
||||
@@ -497,7 +497,7 @@ func TestWispFilter(t *testing.T) {
|
||||
closedStatus := types.StatusClosed
|
||||
wispFilter := types.IssueFilter{
|
||||
Status: &closedStatus,
|
||||
Wisp: &wispTrue,
|
||||
Ephemeral: &wispTrue,
|
||||
}
|
||||
|
||||
wispIssues, err := store.SearchIssues(ctx, "", wispFilter)
|
||||
@@ -512,7 +512,7 @@ func TestWispFilter(t *testing.T) {
|
||||
wispFalse := false
|
||||
nonWispFilter := types.IssueFilter{
|
||||
Status: &closedStatus,
|
||||
Wisp: &wispFalse,
|
||||
Ephemeral: &wispFalse,
|
||||
}
|
||||
|
||||
permanentIssues, err := store.SearchIssues(ctx, "", nonWispFilter)
|
||||
|
||||
@@ -28,7 +28,7 @@ func insertIssue(ctx context.Context, conn *sql.Conn, issue *types.Issue) error
|
||||
}
|
||||
|
||||
wisp := 0
|
||||
if issue.Wisp {
|
||||
if issue.Ephemeral {
|
||||
wisp = 1
|
||||
}
|
||||
pinned := 0
|
||||
@@ -44,16 +44,16 @@ func insertIssue(ctx context.Context, conn *sql.Conn, issue *types.Issue) error
|
||||
INSERT OR IGNORE INTO issues (
|
||||
id, content_hash, title, description, design, acceptance_criteria, notes,
|
||||
status, priority, issue_type, assignee, estimated_minutes,
|
||||
created_at, updated_at, closed_at, external_ref, source_repo, close_reason,
|
||||
created_at, created_by, updated_at, closed_at, external_ref, source_repo, close_reason,
|
||||
deleted_at, deleted_by, delete_reason, original_type,
|
||||
sender, ephemeral, pinned, is_template,
|
||||
await_type, await_id, timeout_ns, waiters
|
||||
) VALUES (?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?)
|
||||
) VALUES (?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?)
|
||||
`,
|
||||
issue.ID, issue.ContentHash, issue.Title, issue.Description, issue.Design,
|
||||
issue.AcceptanceCriteria, issue.Notes, issue.Status,
|
||||
issue.Priority, issue.IssueType, issue.Assignee,
|
||||
issue.EstimatedMinutes, issue.CreatedAt, issue.UpdatedAt,
|
||||
issue.EstimatedMinutes, issue.CreatedAt, issue.CreatedBy, issue.UpdatedAt,
|
||||
issue.ClosedAt, issue.ExternalRef, sourceRepo, issue.CloseReason,
|
||||
issue.DeletedAt, issue.DeletedBy, issue.DeleteReason, issue.OriginalType,
|
||||
issue.Sender, wisp, pinned, isTemplate,
|
||||
@@ -76,11 +76,11 @@ func insertIssues(ctx context.Context, conn *sql.Conn, issues []*types.Issue) er
|
||||
INSERT OR IGNORE INTO issues (
|
||||
id, content_hash, title, description, design, acceptance_criteria, notes,
|
||||
status, priority, issue_type, assignee, estimated_minutes,
|
||||
created_at, updated_at, closed_at, external_ref, source_repo, close_reason,
|
||||
created_at, created_by, updated_at, closed_at, external_ref, source_repo, close_reason,
|
||||
deleted_at, deleted_by, delete_reason, original_type,
|
||||
sender, ephemeral, pinned, is_template,
|
||||
await_type, await_id, timeout_ns, waiters
|
||||
) VALUES (?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?)
|
||||
) VALUES (?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?)
|
||||
`)
|
||||
if err != nil {
|
||||
return fmt.Errorf("failed to prepare statement: %w", err)
|
||||
@@ -94,7 +94,7 @@ func insertIssues(ctx context.Context, conn *sql.Conn, issues []*types.Issue) er
|
||||
}
|
||||
|
||||
wisp := 0
|
||||
if issue.Wisp {
|
||||
if issue.Ephemeral {
|
||||
wisp = 1
|
||||
}
|
||||
pinned := 0
|
||||
@@ -110,7 +110,7 @@ func insertIssues(ctx context.Context, conn *sql.Conn, issues []*types.Issue) er
|
||||
issue.ID, issue.ContentHash, issue.Title, issue.Description, issue.Design,
|
||||
issue.AcceptanceCriteria, issue.Notes, issue.Status,
|
||||
issue.Priority, issue.IssueType, issue.Assignee,
|
||||
issue.EstimatedMinutes, issue.CreatedAt, issue.UpdatedAt,
|
||||
issue.EstimatedMinutes, issue.CreatedAt, issue.CreatedBy, issue.UpdatedAt,
|
||||
issue.ClosedAt, issue.ExternalRef, sourceRepo, issue.CloseReason,
|
||||
issue.DeletedAt, issue.DeletedBy, issue.DeleteReason, issue.OriginalType,
|
||||
issue.Sender, wisp, pinned, isTemplate,
|
||||
|
||||
@@ -157,7 +157,7 @@ func (s *SQLiteStorage) GetIssuesByLabel(ctx context.Context, label string) ([]*
|
||||
rows, err := s.db.QueryContext(ctx, `
|
||||
SELECT i.id, i.content_hash, i.title, i.description, i.design, i.acceptance_criteria, i.notes,
|
||||
i.status, i.priority, i.issue_type, i.assignee, i.estimated_minutes,
|
||||
i.created_at, i.updated_at, i.closed_at, i.external_ref, i.source_repo, i.close_reason,
|
||||
i.created_at, i.created_by, i.updated_at, i.closed_at, i.external_ref, i.source_repo, i.close_reason,
|
||||
i.deleted_at, i.deleted_by, i.delete_reason, i.original_type,
|
||||
i.sender, i.ephemeral, i.pinned, i.is_template,
|
||||
i.await_type, i.await_id, i.timeout_ns, i.waiters
|
||||
|
||||
@@ -44,6 +44,8 @@ var migrationsList = []Migration{
|
||||
{"remove_depends_on_fk", migrations.MigrateRemoveDependsOnFK},
|
||||
{"additional_indexes", migrations.MigrateAdditionalIndexes},
|
||||
{"gate_columns", migrations.MigrateGateColumns},
|
||||
{"tombstone_closed_at", migrations.MigrateTombstoneClosedAt},
|
||||
{"created_by_column", migrations.MigrateCreatedByColumn},
|
||||
}
|
||||
|
||||
// MigrationInfo contains metadata about a migration for inspection
|
||||
|
||||
@@ -20,10 +20,6 @@ func MigrateMessagingFields(db *sql.DB) error {
|
||||
}{
|
||||
{"sender", "TEXT DEFAULT ''"},
|
||||
{"ephemeral", "INTEGER DEFAULT 0"},
|
||||
{"replies_to", "TEXT DEFAULT ''"},
|
||||
{"relates_to", "TEXT DEFAULT ''"},
|
||||
{"duplicate_of", "TEXT DEFAULT ''"},
|
||||
{"superseded_by", "TEXT DEFAULT ''"},
|
||||
}
|
||||
|
||||
for _, col := range columns {
|
||||
@@ -59,11 +55,5 @@ func MigrateMessagingFields(db *sql.DB) error {
|
||||
return fmt.Errorf("failed to create sender index: %w", err)
|
||||
}
|
||||
|
||||
// Add index for replies_to (for efficient thread queries)
|
||||
_, err = db.Exec(`CREATE INDEX IF NOT EXISTS idx_issues_replies_to ON issues(replies_to) WHERE replies_to != ''`)
|
||||
if err != nil {
|
||||
return fmt.Errorf("failed to create replies_to index: %w", err)
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
@@ -21,137 +21,176 @@ import (
|
||||
func MigrateEdgeFields(db *sql.DB) error {
|
||||
now := time.Now()
|
||||
|
||||
hasColumn := func(name string) (bool, error) {
|
||||
var exists bool
|
||||
err := db.QueryRow(`
|
||||
SELECT COUNT(*) > 0
|
||||
FROM pragma_table_info('issues')
|
||||
WHERE name = ?
|
||||
`, name).Scan(&exists)
|
||||
return exists, err
|
||||
}
|
||||
|
||||
hasRepliesTo, err := hasColumn("replies_to")
|
||||
if err != nil {
|
||||
return fmt.Errorf("failed to check replies_to column: %w", err)
|
||||
}
|
||||
hasRelatesTo, err := hasColumn("relates_to")
|
||||
if err != nil {
|
||||
return fmt.Errorf("failed to check relates_to column: %w", err)
|
||||
}
|
||||
hasDuplicateOf, err := hasColumn("duplicate_of")
|
||||
if err != nil {
|
||||
return fmt.Errorf("failed to check duplicate_of column: %w", err)
|
||||
}
|
||||
hasSupersededBy, err := hasColumn("superseded_by")
|
||||
if err != nil {
|
||||
return fmt.Errorf("failed to check superseded_by column: %w", err)
|
||||
}
|
||||
|
||||
if !hasRepliesTo && !hasRelatesTo && !hasDuplicateOf && !hasSupersededBy {
|
||||
return nil
|
||||
}
|
||||
|
||||
// Migrate replies_to fields to replies-to edges
|
||||
// For thread_id, use the parent's ID as the thread root for first-level replies
|
||||
// (more sophisticated thread detection would require recursive queries)
|
||||
rows, err := db.Query(`
|
||||
SELECT id, replies_to
|
||||
FROM issues
|
||||
WHERE replies_to != '' AND replies_to IS NOT NULL
|
||||
`)
|
||||
if err != nil {
|
||||
return fmt.Errorf("failed to query replies_to fields: %w", err)
|
||||
}
|
||||
defer rows.Close()
|
||||
|
||||
for rows.Next() {
|
||||
var issueID, repliesTo string
|
||||
if err := rows.Scan(&issueID, &repliesTo); err != nil {
|
||||
return fmt.Errorf("failed to scan replies_to row: %w", err)
|
||||
}
|
||||
|
||||
// Use repliesTo as thread_id (the root of the thread)
|
||||
// This is a simplification - existing threads will have the parent as thread root
|
||||
_, err := db.Exec(`
|
||||
INSERT OR IGNORE INTO dependencies (issue_id, depends_on_id, type, created_at, created_by, metadata, thread_id)
|
||||
VALUES (?, ?, 'replies-to', ?, 'migration', '{}', ?)
|
||||
`, issueID, repliesTo, now, repliesTo)
|
||||
if hasRepliesTo {
|
||||
rows, err := db.Query(`
|
||||
SELECT id, replies_to
|
||||
FROM issues
|
||||
WHERE replies_to != '' AND replies_to IS NOT NULL
|
||||
`)
|
||||
if err != nil {
|
||||
return fmt.Errorf("failed to create replies-to edge for %s: %w", issueID, err)
|
||||
return fmt.Errorf("failed to query replies_to fields: %w", err)
|
||||
}
|
||||
defer rows.Close()
|
||||
|
||||
for rows.Next() {
|
||||
var issueID, repliesTo string
|
||||
if err := rows.Scan(&issueID, &repliesTo); err != nil {
|
||||
return fmt.Errorf("failed to scan replies_to row: %w", err)
|
||||
}
|
||||
|
||||
// Use repliesTo as thread_id (the root of the thread)
|
||||
// This is a simplification - existing threads will have the parent as thread root
|
||||
_, err := db.Exec(`
|
||||
INSERT OR IGNORE INTO dependencies (issue_id, depends_on_id, type, created_at, created_by, metadata, thread_id)
|
||||
VALUES (?, ?, 'replies-to', ?, 'migration', '{}', ?)
|
||||
`, issueID, repliesTo, now, repliesTo)
|
||||
if err != nil {
|
||||
return fmt.Errorf("failed to create replies-to edge for %s: %w", issueID, err)
|
||||
}
|
||||
}
|
||||
if err := rows.Err(); err != nil {
|
||||
return fmt.Errorf("error iterating replies_to rows: %w", err)
|
||||
}
|
||||
}
|
||||
if err := rows.Err(); err != nil {
|
||||
return fmt.Errorf("error iterating replies_to rows: %w", err)
|
||||
}
|
||||
|
||||
// Migrate relates_to fields to relates-to edges
|
||||
// relates_to is stored as JSON array string
|
||||
rows, err = db.Query(`
|
||||
SELECT id, relates_to
|
||||
FROM issues
|
||||
WHERE relates_to != '' AND relates_to != '[]' AND relates_to IS NOT NULL
|
||||
`)
|
||||
if err != nil {
|
||||
return fmt.Errorf("failed to query relates_to fields: %w", err)
|
||||
}
|
||||
defer rows.Close()
|
||||
|
||||
for rows.Next() {
|
||||
var issueID, relatesTo string
|
||||
if err := rows.Scan(&issueID, &relatesTo); err != nil {
|
||||
return fmt.Errorf("failed to scan relates_to row: %w", err)
|
||||
if hasRelatesTo {
|
||||
rows, err := db.Query(`
|
||||
SELECT id, relates_to
|
||||
FROM issues
|
||||
WHERE relates_to != '' AND relates_to != '[]' AND relates_to IS NOT NULL
|
||||
`)
|
||||
if err != nil {
|
||||
return fmt.Errorf("failed to query relates_to fields: %w", err)
|
||||
}
|
||||
defer rows.Close()
|
||||
|
||||
// Parse JSON array
|
||||
var relatedIDs []string
|
||||
if err := json.Unmarshal([]byte(relatesTo), &relatedIDs); err != nil {
|
||||
// Skip malformed JSON
|
||||
continue
|
||||
}
|
||||
for rows.Next() {
|
||||
var issueID, relatesTo string
|
||||
if err := rows.Scan(&issueID, &relatesTo); err != nil {
|
||||
return fmt.Errorf("failed to scan relates_to row: %w", err)
|
||||
}
|
||||
|
||||
for _, relatedID := range relatedIDs {
|
||||
if relatedID == "" {
|
||||
// Parse JSON array
|
||||
var relatedIDs []string
|
||||
if err := json.Unmarshal([]byte(relatesTo), &relatedIDs); err != nil {
|
||||
// Skip malformed JSON
|
||||
continue
|
||||
}
|
||||
_, err := db.Exec(`
|
||||
INSERT OR IGNORE INTO dependencies (issue_id, depends_on_id, type, created_at, created_by, metadata, thread_id)
|
||||
VALUES (?, ?, 'relates-to', ?, 'migration', '{}', '')
|
||||
`, issueID, relatedID, now)
|
||||
if err != nil {
|
||||
return fmt.Errorf("failed to create relates-to edge for %s -> %s: %w", issueID, relatedID, err)
|
||||
|
||||
for _, relatedID := range relatedIDs {
|
||||
if relatedID == "" {
|
||||
continue
|
||||
}
|
||||
_, err := db.Exec(`
|
||||
INSERT OR IGNORE INTO dependencies (issue_id, depends_on_id, type, created_at, created_by, metadata, thread_id)
|
||||
VALUES (?, ?, 'relates-to', ?, 'migration', '{}', '')
|
||||
`, issueID, relatedID, now)
|
||||
if err != nil {
|
||||
return fmt.Errorf("failed to create relates-to edge for %s -> %s: %w", issueID, relatedID, err)
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
if err := rows.Err(); err != nil {
|
||||
return fmt.Errorf("error iterating relates_to rows: %w", err)
|
||||
if err := rows.Err(); err != nil {
|
||||
return fmt.Errorf("error iterating relates_to rows: %w", err)
|
||||
}
|
||||
}
|
||||
|
||||
// Migrate duplicate_of fields to duplicates edges
|
||||
rows, err = db.Query(`
|
||||
SELECT id, duplicate_of
|
||||
FROM issues
|
||||
WHERE duplicate_of != '' AND duplicate_of IS NOT NULL
|
||||
`)
|
||||
if err != nil {
|
||||
return fmt.Errorf("failed to query duplicate_of fields: %w", err)
|
||||
}
|
||||
defer rows.Close()
|
||||
|
||||
for rows.Next() {
|
||||
var issueID, duplicateOf string
|
||||
if err := rows.Scan(&issueID, &duplicateOf); err != nil {
|
||||
return fmt.Errorf("failed to scan duplicate_of row: %w", err)
|
||||
}
|
||||
|
||||
_, err := db.Exec(`
|
||||
INSERT OR IGNORE INTO dependencies (issue_id, depends_on_id, type, created_at, created_by, metadata, thread_id)
|
||||
VALUES (?, ?, 'duplicates', ?, 'migration', '{}', '')
|
||||
`, issueID, duplicateOf, now)
|
||||
if hasDuplicateOf {
|
||||
rows, err := db.Query(`
|
||||
SELECT id, duplicate_of
|
||||
FROM issues
|
||||
WHERE duplicate_of != '' AND duplicate_of IS NOT NULL
|
||||
`)
|
||||
if err != nil {
|
||||
return fmt.Errorf("failed to create duplicates edge for %s: %w", issueID, err)
|
||||
return fmt.Errorf("failed to query duplicate_of fields: %w", err)
|
||||
}
|
||||
defer rows.Close()
|
||||
|
||||
for rows.Next() {
|
||||
var issueID, duplicateOf string
|
||||
if err := rows.Scan(&issueID, &duplicateOf); err != nil {
|
||||
return fmt.Errorf("failed to scan duplicate_of row: %w", err)
|
||||
}
|
||||
|
||||
_, err := db.Exec(`
|
||||
INSERT OR IGNORE INTO dependencies (issue_id, depends_on_id, type, created_at, created_by, metadata, thread_id)
|
||||
VALUES (?, ?, 'duplicates', ?, 'migration', '{}', '')
|
||||
`, issueID, duplicateOf, now)
|
||||
if err != nil {
|
||||
return fmt.Errorf("failed to create duplicates edge for %s: %w", issueID, err)
|
||||
}
|
||||
}
|
||||
if err := rows.Err(); err != nil {
|
||||
return fmt.Errorf("error iterating duplicate_of rows: %w", err)
|
||||
}
|
||||
}
|
||||
if err := rows.Err(); err != nil {
|
||||
return fmt.Errorf("error iterating duplicate_of rows: %w", err)
|
||||
}
|
||||
|
||||
// Migrate superseded_by fields to supersedes edges
|
||||
rows, err = db.Query(`
|
||||
SELECT id, superseded_by
|
||||
FROM issues
|
||||
WHERE superseded_by != '' AND superseded_by IS NOT NULL
|
||||
`)
|
||||
if err != nil {
|
||||
return fmt.Errorf("failed to query superseded_by fields: %w", err)
|
||||
}
|
||||
defer rows.Close()
|
||||
|
||||
for rows.Next() {
|
||||
var issueID, supersededBy string
|
||||
if err := rows.Scan(&issueID, &supersededBy); err != nil {
|
||||
return fmt.Errorf("failed to scan superseded_by row: %w", err)
|
||||
}
|
||||
|
||||
_, err := db.Exec(`
|
||||
INSERT OR IGNORE INTO dependencies (issue_id, depends_on_id, type, created_at, created_by, metadata, thread_id)
|
||||
VALUES (?, ?, 'supersedes', ?, 'migration', '{}', '')
|
||||
`, issueID, supersededBy, now)
|
||||
if hasSupersededBy {
|
||||
rows, err := db.Query(`
|
||||
SELECT id, superseded_by
|
||||
FROM issues
|
||||
WHERE superseded_by != '' AND superseded_by IS NOT NULL
|
||||
`)
|
||||
if err != nil {
|
||||
return fmt.Errorf("failed to create supersedes edge for %s: %w", issueID, err)
|
||||
return fmt.Errorf("failed to query superseded_by fields: %w", err)
|
||||
}
|
||||
defer rows.Close()
|
||||
|
||||
for rows.Next() {
|
||||
var issueID, supersededBy string
|
||||
if err := rows.Scan(&issueID, &supersededBy); err != nil {
|
||||
return fmt.Errorf("failed to scan superseded_by row: %w", err)
|
||||
}
|
||||
|
||||
_, err := db.Exec(`
|
||||
INSERT OR IGNORE INTO dependencies (issue_id, depends_on_id, type, created_at, created_by, metadata, thread_id)
|
||||
VALUES (?, ?, 'supersedes', ?, 'migration', '{}', '')
|
||||
`, issueID, supersededBy, now)
|
||||
if err != nil {
|
||||
return fmt.Errorf("failed to create supersedes edge for %s: %w", issueID, err)
|
||||
}
|
||||
}
|
||||
if err := rows.Err(); err != nil {
|
||||
return fmt.Errorf("error iterating superseded_by rows: %w", err)
|
||||
}
|
||||
}
|
||||
if err := rows.Err(); err != nil {
|
||||
return fmt.Errorf("error iterating superseded_by rows: %w", err)
|
||||
}
|
||||
|
||||
return nil
|
||||
|
||||
@@ -57,6 +57,57 @@ func MigrateDropEdgeColumns(db *sql.DB) error {
|
||||
return nil
|
||||
}
|
||||
|
||||
// Preserve newer columns if they already exist (migration may run on partially-migrated DBs).
|
||||
hasPinned, err := checkCol("pinned")
|
||||
if err != nil {
|
||||
return fmt.Errorf("failed to check pinned column: %w", err)
|
||||
}
|
||||
hasIsTemplate, err := checkCol("is_template")
|
||||
if err != nil {
|
||||
return fmt.Errorf("failed to check is_template column: %w", err)
|
||||
}
|
||||
hasAwaitType, err := checkCol("await_type")
|
||||
if err != nil {
|
||||
return fmt.Errorf("failed to check await_type column: %w", err)
|
||||
}
|
||||
hasAwaitID, err := checkCol("await_id")
|
||||
if err != nil {
|
||||
return fmt.Errorf("failed to check await_id column: %w", err)
|
||||
}
|
||||
hasTimeoutNs, err := checkCol("timeout_ns")
|
||||
if err != nil {
|
||||
return fmt.Errorf("failed to check timeout_ns column: %w", err)
|
||||
}
|
||||
hasWaiters, err := checkCol("waiters")
|
||||
if err != nil {
|
||||
return fmt.Errorf("failed to check waiters column: %w", err)
|
||||
}
|
||||
|
||||
pinnedExpr := "0"
|
||||
if hasPinned {
|
||||
pinnedExpr = "pinned"
|
||||
}
|
||||
isTemplateExpr := "0"
|
||||
if hasIsTemplate {
|
||||
isTemplateExpr = "is_template"
|
||||
}
|
||||
awaitTypeExpr := "''"
|
||||
if hasAwaitType {
|
||||
awaitTypeExpr = "await_type"
|
||||
}
|
||||
awaitIDExpr := "''"
|
||||
if hasAwaitID {
|
||||
awaitIDExpr = "await_id"
|
||||
}
|
||||
timeoutNsExpr := "0"
|
||||
if hasTimeoutNs {
|
||||
timeoutNsExpr = "timeout_ns"
|
||||
}
|
||||
waitersExpr := "''"
|
||||
if hasWaiters {
|
||||
waitersExpr = "waiters"
|
||||
}
|
||||
|
||||
// SQLite 3.35.0+ supports DROP COLUMN, but we use table recreation for compatibility
|
||||
// This is idempotent - we recreate the table without the deprecated columns
|
||||
|
||||
@@ -117,6 +168,12 @@ func MigrateDropEdgeColumns(db *sql.DB) error {
|
||||
original_type TEXT DEFAULT '',
|
||||
sender TEXT DEFAULT '',
|
||||
ephemeral INTEGER DEFAULT 0,
|
||||
pinned INTEGER DEFAULT 0,
|
||||
is_template INTEGER DEFAULT 0,
|
||||
await_type TEXT,
|
||||
await_id TEXT,
|
||||
timeout_ns INTEGER,
|
||||
waiters TEXT,
|
||||
close_reason TEXT DEFAULT '',
|
||||
CHECK ((status = 'closed') = (closed_at IS NOT NULL))
|
||||
)
|
||||
@@ -132,7 +189,8 @@ func MigrateDropEdgeColumns(db *sql.DB) error {
|
||||
notes, status, priority, issue_type, assignee, estimated_minutes,
|
||||
created_at, updated_at, closed_at, external_ref, source_repo, compaction_level,
|
||||
compacted_at, compacted_at_commit, original_size, deleted_at,
|
||||
deleted_by, delete_reason, original_type, sender, ephemeral, close_reason
|
||||
deleted_by, delete_reason, original_type, sender, ephemeral, pinned, is_template,
|
||||
await_type, await_id, timeout_ns, waiters, close_reason
|
||||
)
|
||||
SELECT
|
||||
id, content_hash, title, description, design, acceptance_criteria,
|
||||
@@ -140,9 +198,11 @@ func MigrateDropEdgeColumns(db *sql.DB) error {
|
||||
created_at, updated_at, closed_at, external_ref, COALESCE(source_repo, ''), compaction_level,
|
||||
compacted_at, compacted_at_commit, original_size, deleted_at,
|
||||
deleted_by, delete_reason, original_type, sender, ephemeral,
|
||||
%s, %s,
|
||||
%s, %s, %s, %s,
|
||||
COALESCE(close_reason, '')
|
||||
FROM issues
|
||||
`)
|
||||
`, pinnedExpr, isTemplateExpr, awaitTypeExpr, awaitIDExpr, timeoutNsExpr, waitersExpr)
|
||||
if err != nil {
|
||||
return fmt.Errorf("failed to copy issues data: %w", err)
|
||||
}
|
||||
|
||||
@@ -20,6 +20,11 @@ func MigratePinnedColumn(db *sql.DB) error {
|
||||
}
|
||||
|
||||
if columnExists {
|
||||
// Column exists (e.g. created by new schema); ensure index exists.
|
||||
_, err = db.Exec(`CREATE INDEX IF NOT EXISTS idx_issues_pinned ON issues(pinned) WHERE pinned = 1`)
|
||||
if err != nil {
|
||||
return fmt.Errorf("failed to create pinned index: %w", err)
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
|
||||
@@ -21,6 +21,11 @@ func MigrateIsTemplateColumn(db *sql.DB) error {
|
||||
}
|
||||
|
||||
if columnExists {
|
||||
// Column exists (e.g. created by new schema); ensure index exists.
|
||||
_, err = db.Exec(`CREATE INDEX IF NOT EXISTS idx_issues_is_template ON issues(is_template) WHERE is_template = 1`)
|
||||
if err != nil {
|
||||
return fmt.Errorf("failed to create is_template index: %w", err)
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
|
||||
251
internal/storage/sqlite/migrations/028_tombstone_closed_at.go
Normal file
251
internal/storage/sqlite/migrations/028_tombstone_closed_at.go
Normal file
@@ -0,0 +1,251 @@
|
||||
package migrations
|
||||
|
||||
import (
|
||||
"database/sql"
|
||||
"fmt"
|
||||
"strings"
|
||||
)
|
||||
|
||||
// MigrateTombstoneClosedAt updates the closed_at constraint to allow tombstones
|
||||
// to retain their closed_at timestamp from before deletion.
|
||||
//
|
||||
// Previously: CHECK ((status = 'closed') = (closed_at IS NOT NULL))
|
||||
// - This required clearing closed_at when creating tombstones from closed issues
|
||||
//
|
||||
// Now: CHECK (closed + tombstone OR non-closed/tombstone with no closed_at)
|
||||
// - closed issues must have closed_at
|
||||
// - tombstones may have closed_at (from before deletion) or not
|
||||
// - other statuses must NOT have closed_at
|
||||
//
|
||||
// This allows importing tombstones that were closed before being deleted,
|
||||
// preserving the historical closed_at timestamp for audit purposes.
|
||||
func MigrateTombstoneClosedAt(db *sql.DB) error {
|
||||
// SQLite doesn't support ALTER TABLE to modify CHECK constraints
|
||||
// We must recreate the table with the new constraint
|
||||
|
||||
// Idempotency check: see if the new CHECK constraint already exists
|
||||
// The new constraint contains "status = 'tombstone'" which the old one didn't
|
||||
var tableSql string
|
||||
err := db.QueryRow(`SELECT sql FROM sqlite_master WHERE type='table' AND name='issues'`).Scan(&tableSql)
|
||||
if err != nil {
|
||||
return fmt.Errorf("failed to get issues table schema: %w", err)
|
||||
}
|
||||
// If the schema already has the tombstone clause, migration is already applied
|
||||
if strings.Contains(tableSql, "status = 'tombstone'") || strings.Contains(tableSql, `status = "tombstone"`) {
|
||||
return nil
|
||||
}
|
||||
|
||||
// Step 0: Drop views that depend on the issues table
|
||||
_, err = db.Exec(`DROP VIEW IF EXISTS ready_issues`)
|
||||
if err != nil {
|
||||
return fmt.Errorf("failed to drop ready_issues view: %w", err)
|
||||
}
|
||||
_, err = db.Exec(`DROP VIEW IF EXISTS blocked_issues`)
|
||||
if err != nil {
|
||||
return fmt.Errorf("failed to drop blocked_issues view: %w", err)
|
||||
}
|
||||
|
||||
// Step 1: Create new table with updated constraint
|
||||
_, err = db.Exec(`
|
||||
CREATE TABLE IF NOT EXISTS issues_new (
|
||||
id TEXT PRIMARY KEY,
|
||||
content_hash TEXT,
|
||||
title TEXT NOT NULL CHECK(length(title) <= 500),
|
||||
description TEXT NOT NULL DEFAULT '',
|
||||
design TEXT NOT NULL DEFAULT '',
|
||||
acceptance_criteria TEXT NOT NULL DEFAULT '',
|
||||
notes TEXT NOT NULL DEFAULT '',
|
||||
status TEXT NOT NULL DEFAULT 'open',
|
||||
priority INTEGER NOT NULL DEFAULT 2 CHECK(priority >= 0 AND priority <= 4),
|
||||
issue_type TEXT NOT NULL DEFAULT 'task',
|
||||
assignee TEXT,
|
||||
estimated_minutes INTEGER,
|
||||
created_at DATETIME NOT NULL DEFAULT CURRENT_TIMESTAMP,
|
||||
created_by TEXT DEFAULT '',
|
||||
updated_at DATETIME NOT NULL DEFAULT CURRENT_TIMESTAMP,
|
||||
closed_at DATETIME,
|
||||
external_ref TEXT,
|
||||
source_repo TEXT DEFAULT '',
|
||||
compaction_level INTEGER DEFAULT 0,
|
||||
compacted_at DATETIME,
|
||||
compacted_at_commit TEXT,
|
||||
original_size INTEGER,
|
||||
deleted_at DATETIME,
|
||||
deleted_by TEXT DEFAULT '',
|
||||
delete_reason TEXT DEFAULT '',
|
||||
original_type TEXT DEFAULT '',
|
||||
sender TEXT DEFAULT '',
|
||||
ephemeral INTEGER DEFAULT 0,
|
||||
close_reason TEXT DEFAULT '',
|
||||
pinned INTEGER DEFAULT 0,
|
||||
is_template INTEGER DEFAULT 0,
|
||||
await_type TEXT,
|
||||
await_id TEXT,
|
||||
timeout_ns INTEGER,
|
||||
waiters TEXT,
|
||||
CHECK (
|
||||
(status = 'closed' AND closed_at IS NOT NULL) OR
|
||||
(status = 'tombstone') OR
|
||||
(status NOT IN ('closed', 'tombstone') AND closed_at IS NULL)
|
||||
)
|
||||
)
|
||||
`)
|
||||
if err != nil {
|
||||
return fmt.Errorf("failed to create new issues table: %w", err)
|
||||
}
|
||||
|
||||
// Step 2: Copy data from old table to new table
|
||||
// We need to check if created_by column exists in the old table
|
||||
// If not, we insert a default empty string for it
|
||||
var hasCreatedBy bool
|
||||
rows, err := db.Query(`PRAGMA table_info(issues)`)
|
||||
if err != nil {
|
||||
return fmt.Errorf("failed to get table info: %w", err)
|
||||
}
|
||||
for rows.Next() {
|
||||
var cid int
|
||||
var name, ctype string
|
||||
var notnull, pk int
|
||||
var dflt interface{}
|
||||
if err := rows.Scan(&cid, &name, &ctype, ¬null, &dflt, &pk); err != nil {
|
||||
rows.Close()
|
||||
return fmt.Errorf("failed to scan table info: %w", err)
|
||||
}
|
||||
if name == "created_by" {
|
||||
hasCreatedBy = true
|
||||
break
|
||||
}
|
||||
}
|
||||
rows.Close()
|
||||
|
||||
var insertSQL string
|
||||
if hasCreatedBy {
|
||||
// Old table has created_by, copy all columns directly
|
||||
insertSQL = `
|
||||
INSERT INTO issues_new (
|
||||
id, content_hash, title, description, design, acceptance_criteria, notes,
|
||||
status, priority, issue_type, assignee, estimated_minutes, created_at,
|
||||
created_by, updated_at, closed_at, external_ref, source_repo, compaction_level,
|
||||
compacted_at, compacted_at_commit, original_size, deleted_at, deleted_by,
|
||||
delete_reason, original_type, sender, ephemeral, close_reason, pinned,
|
||||
is_template, await_type, await_id, timeout_ns, waiters
|
||||
)
|
||||
SELECT
|
||||
id, content_hash, title, description, design, acceptance_criteria, notes,
|
||||
status, priority, issue_type, assignee, estimated_minutes, created_at,
|
||||
created_by, updated_at, closed_at, external_ref, source_repo, compaction_level,
|
||||
compacted_at, compacted_at_commit, original_size, deleted_at, deleted_by,
|
||||
delete_reason, original_type, sender, ephemeral, close_reason, pinned,
|
||||
is_template, await_type, await_id, timeout_ns, waiters
|
||||
FROM issues
|
||||
`
|
||||
} else {
|
||||
// Old table doesn't have created_by, use empty string default
|
||||
insertSQL = `
|
||||
INSERT INTO issues_new (
|
||||
id, content_hash, title, description, design, acceptance_criteria, notes,
|
||||
status, priority, issue_type, assignee, estimated_minutes, created_at,
|
||||
created_by, updated_at, closed_at, external_ref, source_repo, compaction_level,
|
||||
compacted_at, compacted_at_commit, original_size, deleted_at, deleted_by,
|
||||
delete_reason, original_type, sender, ephemeral, close_reason, pinned,
|
||||
is_template, await_type, await_id, timeout_ns, waiters
|
||||
)
|
||||
SELECT
|
||||
id, content_hash, title, description, design, acceptance_criteria, notes,
|
||||
status, priority, issue_type, assignee, estimated_minutes, created_at,
|
||||
'', updated_at, closed_at, external_ref, source_repo, compaction_level,
|
||||
compacted_at, compacted_at_commit, original_size, deleted_at, deleted_by,
|
||||
delete_reason, original_type, sender, ephemeral, close_reason, pinned,
|
||||
is_template, await_type, await_id, timeout_ns, waiters
|
||||
FROM issues
|
||||
`
|
||||
}
|
||||
|
||||
_, err = db.Exec(insertSQL)
|
||||
if err != nil {
|
||||
return fmt.Errorf("failed to copy issues data: %w", err)
|
||||
}
|
||||
|
||||
// Step 3: Drop old table
|
||||
_, err = db.Exec(`DROP TABLE issues`)
|
||||
if err != nil {
|
||||
return fmt.Errorf("failed to drop old issues table: %w", err)
|
||||
}
|
||||
|
||||
// Step 4: Rename new table to original name
|
||||
_, err = db.Exec(`ALTER TABLE issues_new RENAME TO issues`)
|
||||
if err != nil {
|
||||
return fmt.Errorf("failed to rename new issues table: %w", err)
|
||||
}
|
||||
|
||||
// Step 5: Recreate indexes (they were dropped with the table)
|
||||
indexes := []string{
|
||||
`CREATE INDEX IF NOT EXISTS idx_issues_status ON issues(status)`,
|
||||
`CREATE INDEX IF NOT EXISTS idx_issues_priority ON issues(priority)`,
|
||||
`CREATE INDEX IF NOT EXISTS idx_issues_assignee ON issues(assignee)`,
|
||||
`CREATE INDEX IF NOT EXISTS idx_issues_created_at ON issues(created_at)`,
|
||||
`CREATE INDEX IF NOT EXISTS idx_issues_external_ref ON issues(external_ref) WHERE external_ref IS NOT NULL`,
|
||||
`CREATE INDEX IF NOT EXISTS idx_issues_pinned ON issues(pinned) WHERE pinned = 1`,
|
||||
`CREATE INDEX IF NOT EXISTS idx_issues_is_template ON issues(is_template) WHERE is_template = 1`,
|
||||
`CREATE INDEX IF NOT EXISTS idx_issues_updated_at ON issues(updated_at)`,
|
||||
`CREATE INDEX IF NOT EXISTS idx_issues_status_priority ON issues(status, priority)`,
|
||||
`CREATE INDEX IF NOT EXISTS idx_issues_gate ON issues(issue_type) WHERE issue_type = 'gate'`,
|
||||
}
|
||||
|
||||
for _, idx := range indexes {
|
||||
if _, err := db.Exec(idx); err != nil {
|
||||
return fmt.Errorf("failed to create index: %w", err)
|
||||
}
|
||||
}
|
||||
|
||||
// Step 6: Recreate views that we dropped
|
||||
_, err = db.Exec(`
|
||||
CREATE VIEW IF NOT EXISTS ready_issues AS
|
||||
WITH RECURSIVE
|
||||
blocked_directly AS (
|
||||
SELECT DISTINCT d.issue_id
|
||||
FROM dependencies d
|
||||
JOIN issues blocker ON d.depends_on_id = blocker.id
|
||||
WHERE d.type = 'blocks'
|
||||
AND blocker.status IN ('open', 'in_progress', 'blocked', 'deferred')
|
||||
),
|
||||
blocked_transitively AS (
|
||||
SELECT issue_id, 0 as depth
|
||||
FROM blocked_directly
|
||||
UNION ALL
|
||||
SELECT d.issue_id, bt.depth + 1
|
||||
FROM blocked_transitively bt
|
||||
JOIN dependencies d ON d.depends_on_id = bt.issue_id
|
||||
WHERE d.type = 'parent-child'
|
||||
AND bt.depth < 50
|
||||
)
|
||||
SELECT i.*
|
||||
FROM issues i
|
||||
WHERE i.status = 'open'
|
||||
AND NOT EXISTS (
|
||||
SELECT 1 FROM blocked_transitively WHERE issue_id = i.id
|
||||
)
|
||||
`)
|
||||
if err != nil {
|
||||
return fmt.Errorf("failed to recreate ready_issues view: %w", err)
|
||||
}
|
||||
|
||||
_, err = db.Exec(`
|
||||
CREATE VIEW IF NOT EXISTS blocked_issues AS
|
||||
SELECT
|
||||
i.*,
|
||||
COUNT(d.depends_on_id) as blocked_by_count
|
||||
FROM issues i
|
||||
JOIN dependencies d ON i.id = d.issue_id
|
||||
JOIN issues blocker ON d.depends_on_id = blocker.id
|
||||
WHERE i.status IN ('open', 'in_progress', 'blocked', 'deferred')
|
||||
AND d.type = 'blocks'
|
||||
AND blocker.status IN ('open', 'in_progress', 'blocked', 'deferred')
|
||||
GROUP BY i.id
|
||||
`)
|
||||
if err != nil {
|
||||
return fmt.Errorf("failed to recreate blocked_issues view: %w", err)
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
34
internal/storage/sqlite/migrations/029_created_by_column.go
Normal file
34
internal/storage/sqlite/migrations/029_created_by_column.go
Normal file
@@ -0,0 +1,34 @@
|
||||
package migrations
|
||||
|
||||
import (
|
||||
"database/sql"
|
||||
"fmt"
|
||||
)
|
||||
|
||||
// MigrateCreatedByColumn adds the created_by column to the issues table.
|
||||
// This tracks who created the issue, using the same actor chain as comment authors
|
||||
// (--actor flag, BD_ACTOR env, or $USER). GH#748.
|
||||
func MigrateCreatedByColumn(db *sql.DB) error {
|
||||
// Check if column already exists
|
||||
var columnExists bool
|
||||
err := db.QueryRow(`
|
||||
SELECT COUNT(*) > 0
|
||||
FROM pragma_table_info('issues')
|
||||
WHERE name = 'created_by'
|
||||
`).Scan(&columnExists)
|
||||
if err != nil {
|
||||
return fmt.Errorf("failed to check created_by column: %w", err)
|
||||
}
|
||||
|
||||
if columnExists {
|
||||
return nil
|
||||
}
|
||||
|
||||
// Add the created_by column
|
||||
_, err = db.Exec(`ALTER TABLE issues ADD COLUMN created_by TEXT DEFAULT ''`)
|
||||
if err != nil {
|
||||
return fmt.Errorf("failed to add created_by column: %w", err)
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
@@ -0,0 +1,59 @@
|
||||
package sqlite
|
||||
|
||||
import (
|
||||
"context"
|
||||
"path/filepath"
|
||||
"testing"
|
||||
|
||||
"github.com/steveyegge/beads/internal/types"
|
||||
)
|
||||
|
||||
func TestRunMigrations_DoesNotResetPinnedOrTemplate(t *testing.T) {
|
||||
ctx := context.Background()
|
||||
dir := t.TempDir()
|
||||
dbPath := filepath.Join(dir, "beads.db")
|
||||
|
||||
s, err := New(ctx, dbPath)
|
||||
if err != nil {
|
||||
t.Fatalf("New: %v", err)
|
||||
}
|
||||
t.Cleanup(func() { _ = s.Close() })
|
||||
|
||||
if err := s.SetConfig(ctx, "issue_prefix", "test"); err != nil {
|
||||
t.Fatalf("SetConfig(issue_prefix): %v", err)
|
||||
}
|
||||
|
||||
issue := &types.Issue{
|
||||
Title: "Pinned template",
|
||||
Status: types.StatusOpen,
|
||||
Priority: 2,
|
||||
IssueType: types.TypeTask,
|
||||
Pinned: true,
|
||||
IsTemplate: true,
|
||||
}
|
||||
if err := s.CreateIssue(ctx, issue, "test-user"); err != nil {
|
||||
t.Fatalf("CreateIssue: %v", err)
|
||||
}
|
||||
|
||||
_ = s.Close()
|
||||
|
||||
s2, err := New(ctx, dbPath)
|
||||
if err != nil {
|
||||
t.Fatalf("New(reopen): %v", err)
|
||||
}
|
||||
defer func() { _ = s2.Close() }()
|
||||
|
||||
got, err := s2.GetIssue(ctx, issue.ID)
|
||||
if err != nil {
|
||||
t.Fatalf("GetIssue: %v", err)
|
||||
}
|
||||
if got == nil {
|
||||
t.Fatalf("expected issue to exist")
|
||||
}
|
||||
if !got.Pinned {
|
||||
t.Fatalf("expected issue to remain pinned")
|
||||
}
|
||||
if !got.IsTemplate {
|
||||
t.Fatalf("expected issue to remain template")
|
||||
}
|
||||
}
|
||||
@@ -470,6 +470,7 @@ func TestMigrateContentHashColumn(t *testing.T) {
|
||||
assignee TEXT,
|
||||
estimated_minutes INTEGER,
|
||||
created_at DATETIME NOT NULL,
|
||||
created_by TEXT DEFAULT '',
|
||||
updated_at DATETIME NOT NULL,
|
||||
closed_at DATETIME,
|
||||
external_ref TEXT,
|
||||
@@ -497,7 +498,7 @@ func TestMigrateContentHashColumn(t *testing.T) {
|
||||
waiters TEXT DEFAULT '',
|
||||
CHECK ((status = 'closed') = (closed_at IS NOT NULL))
|
||||
);
|
||||
INSERT INTO issues SELECT id, title, description, design, acceptance_criteria, notes, status, priority, issue_type, assignee, estimated_minutes, created_at, updated_at, closed_at, external_ref, compaction_level, compacted_at, original_size, compacted_at_commit, source_repo, '', NULL, '', '', '', '', 0, 0, 0, '', '', '', '', '', '', 0, '' FROM issues_backup;
|
||||
INSERT INTO issues SELECT id, title, description, design, acceptance_criteria, notes, status, priority, issue_type, assignee, estimated_minutes, created_at, '', updated_at, closed_at, external_ref, compaction_level, compacted_at, original_size, compacted_at_commit, source_repo, '', NULL, '', '', '', '', 0, 0, 0, '', '', '', '', '', '', 0, '' FROM issues_backup;
|
||||
DROP TABLE issues_backup;
|
||||
`)
|
||||
if err != nil {
|
||||
|
||||
@@ -282,7 +282,7 @@ func (s *SQLiteStorage) upsertIssueInTx(ctx context.Context, tx *sql.Tx, issue *
|
||||
err := tx.QueryRowContext(ctx, `SELECT id FROM issues WHERE id = ?`, issue.ID).Scan(&existingID)
|
||||
|
||||
wisp := 0
|
||||
if issue.Wisp {
|
||||
if issue.Ephemeral {
|
||||
wisp = 1
|
||||
}
|
||||
pinned := 0
|
||||
@@ -330,9 +330,23 @@ func (s *SQLiteStorage) upsertIssueInTx(ctx context.Context, tx *sql.Tx, issue *
|
||||
}
|
||||
|
||||
if existingHash != issue.ContentHash {
|
||||
// Pinned field fix (bd-phtv): Use COALESCE(NULLIF(?, 0), pinned) to preserve
|
||||
// existing pinned=1 when incoming pinned=0 (which means field was absent in
|
||||
// JSONL due to omitempty). This prevents auto-import from resetting pinned issues.
|
||||
// Clone-local field protection pattern (bd-phtv, bd-gr4q):
|
||||
//
|
||||
// Some fields are clone-local state that shouldn't be overwritten by JSONL import:
|
||||
// - pinned: Local hook attachment (not synced between clones)
|
||||
// - await_type, await_id, timeout_ns, waiters: Gate state (wisps, never exported)
|
||||
//
|
||||
// Problem: Go's omitempty causes zero values to be absent from JSONL.
|
||||
// When importing, absent fields unmarshal as zero, which would overwrite local state.
|
||||
//
|
||||
// Solution: COALESCE(NULLIF(incoming, zero_value), existing_column)
|
||||
// - For strings: COALESCE(NULLIF(?, ''), column) -- preserve if incoming is ""
|
||||
// - For integers: COALESCE(NULLIF(?, 0), column) -- preserve if incoming is 0
|
||||
//
|
||||
// When to use this pattern:
|
||||
// 1. Field is clone-local (not part of shared issue ledger)
|
||||
// 2. Field uses omitempty (so zero value means "absent", not "clear")
|
||||
// 3. Accidental clearing would cause data loss or incorrect behavior
|
||||
_, err = tx.ExecContext(ctx, `
|
||||
UPDATE issues SET
|
||||
content_hash = ?, title = ?, description = ?, design = ?,
|
||||
@@ -341,7 +355,10 @@ func (s *SQLiteStorage) upsertIssueInTx(ctx context.Context, tx *sql.Tx, issue *
|
||||
updated_at = ?, closed_at = ?, external_ref = ?, source_repo = ?,
|
||||
deleted_at = ?, deleted_by = ?, delete_reason = ?, original_type = ?,
|
||||
sender = ?, ephemeral = ?, pinned = COALESCE(NULLIF(?, 0), pinned), is_template = ?,
|
||||
await_type = ?, await_id = ?, timeout_ns = ?, waiters = ?
|
||||
await_type = COALESCE(NULLIF(?, ''), await_type),
|
||||
await_id = COALESCE(NULLIF(?, ''), await_id),
|
||||
timeout_ns = COALESCE(NULLIF(?, 0), timeout_ns),
|
||||
waiters = COALESCE(NULLIF(?, ''), waiters)
|
||||
WHERE id = ?
|
||||
`,
|
||||
issue.ContentHash, issue.Title, issue.Description, issue.Design,
|
||||
|
||||
@@ -54,7 +54,7 @@ func (s *SQLiteStorage) ExportToMultiRepo(ctx context.Context) (map[string]int,
|
||||
// Wisps exist only in SQLite and are shared via .beads/redirect, not JSONL.
|
||||
filtered := make([]*types.Issue, 0, len(allIssues))
|
||||
for _, issue := range allIssues {
|
||||
if !issue.Wisp {
|
||||
if !issue.Ephemeral {
|
||||
filtered = append(filtered, issue)
|
||||
}
|
||||
}
|
||||
|
||||
@@ -892,3 +892,108 @@ func TestExportToMultiRepo(t *testing.T) {
|
||||
}
|
||||
})
|
||||
}
|
||||
|
||||
// TestUpsertPreservesGateFields tests that gate await fields are preserved during upsert (bd-gr4q).
|
||||
// Gates are wisps and aren't exported to JSONL. When an issue with the same ID is imported,
|
||||
// the await fields should NOT be cleared.
|
||||
func TestUpsertPreservesGateFields(t *testing.T) {
|
||||
store, cleanup := setupTestDB(t)
|
||||
defer cleanup()
|
||||
|
||||
ctx := context.Background()
|
||||
|
||||
// Create a gate with await fields directly in the database
|
||||
gate := &types.Issue{
|
||||
ID: "bd-gate1",
|
||||
Title: "Test Gate",
|
||||
Status: types.StatusOpen,
|
||||
Priority: 1,
|
||||
IssueType: types.TypeGate,
|
||||
Ephemeral: true,
|
||||
AwaitType: "gh:run",
|
||||
AwaitID: "123456789",
|
||||
Timeout: 30 * 60 * 1000000000, // 30 minutes in nanoseconds
|
||||
Waiters: []string{"beads/dave"},
|
||||
CreatedAt: time.Now(),
|
||||
UpdatedAt: time.Now(),
|
||||
}
|
||||
gate.ContentHash = gate.ComputeContentHash()
|
||||
|
||||
if err := store.CreateIssue(ctx, gate, "test"); err != nil {
|
||||
t.Fatalf("failed to create gate: %v", err)
|
||||
}
|
||||
|
||||
// Verify gate was created with await fields
|
||||
retrieved, err := store.GetIssue(ctx, gate.ID)
|
||||
if err != nil || retrieved == nil {
|
||||
t.Fatalf("failed to get gate: %v", err)
|
||||
}
|
||||
if retrieved.AwaitType != "gh:run" {
|
||||
t.Errorf("expected AwaitType=gh:run, got %q", retrieved.AwaitType)
|
||||
}
|
||||
if retrieved.AwaitID != "123456789" {
|
||||
t.Errorf("expected AwaitID=123456789, got %q", retrieved.AwaitID)
|
||||
}
|
||||
|
||||
// Create a JSONL file with an issue that has the same ID but no await fields
|
||||
// (simulating what happens when a non-gate issue is imported)
|
||||
tmpDir := t.TempDir()
|
||||
jsonlPath := filepath.Join(tmpDir, "issues.jsonl")
|
||||
f, err := os.Create(jsonlPath)
|
||||
if err != nil {
|
||||
t.Fatalf("failed to create JSONL file: %v", err)
|
||||
}
|
||||
|
||||
// Same ID, different content (to trigger update), no await fields
|
||||
incomingIssue := types.Issue{
|
||||
ID: "bd-gate1",
|
||||
Title: "Test Gate Updated", // Different title to trigger update
|
||||
Status: types.StatusOpen,
|
||||
Priority: 1,
|
||||
IssueType: types.TypeGate,
|
||||
AwaitType: "", // Empty - simulating JSONL without await fields
|
||||
AwaitID: "", // Empty
|
||||
Timeout: 0,
|
||||
Waiters: nil,
|
||||
CreatedAt: time.Now(),
|
||||
UpdatedAt: time.Now().Add(time.Second), // Newer timestamp
|
||||
}
|
||||
incomingIssue.ContentHash = incomingIssue.ComputeContentHash()
|
||||
|
||||
enc := json.NewEncoder(f)
|
||||
if err := enc.Encode(incomingIssue); err != nil {
|
||||
t.Fatalf("failed to encode issue: %v", err)
|
||||
}
|
||||
f.Close()
|
||||
|
||||
// Import the JSONL file (this should NOT clear the await fields)
|
||||
_, err = store.importJSONLFile(ctx, jsonlPath, "test")
|
||||
if err != nil {
|
||||
t.Fatalf("importJSONLFile failed: %v", err)
|
||||
}
|
||||
|
||||
// Verify await fields are preserved
|
||||
updated, err := store.GetIssue(ctx, gate.ID)
|
||||
if err != nil || updated == nil {
|
||||
t.Fatalf("failed to get updated gate: %v", err)
|
||||
}
|
||||
|
||||
// Title should be updated
|
||||
if updated.Title != "Test Gate Updated" {
|
||||
t.Errorf("expected title to be updated, got %q", updated.Title)
|
||||
}
|
||||
|
||||
// Await fields should be PRESERVED (not cleared)
|
||||
if updated.AwaitType != "gh:run" {
|
||||
t.Errorf("AwaitType was cleared! expected 'gh:run', got %q", updated.AwaitType)
|
||||
}
|
||||
if updated.AwaitID != "123456789" {
|
||||
t.Errorf("AwaitID was cleared! expected '123456789', got %q", updated.AwaitID)
|
||||
}
|
||||
if updated.Timeout != 30*60*1000000000 {
|
||||
t.Errorf("Timeout was cleared! expected %d, got %d", 30*60*1000000000, updated.Timeout)
|
||||
}
|
||||
if len(updated.Waiters) != 1 || updated.Waiters[0] != "beads/dave" {
|
||||
t.Errorf("Waiters was cleared! expected [beads/dave], got %v", updated.Waiters)
|
||||
}
|
||||
}
|
||||
|
||||
@@ -278,7 +278,7 @@ func (s *SQLiteStorage) GetIssue(ctx context.Context, id string) (*types.Issue,
|
||||
err := s.db.QueryRowContext(ctx, `
|
||||
SELECT id, content_hash, title, description, design, acceptance_criteria, notes,
|
||||
status, priority, issue_type, assignee, estimated_minutes,
|
||||
created_at, updated_at, closed_at, external_ref,
|
||||
created_at, created_by, updated_at, closed_at, external_ref,
|
||||
compaction_level, compacted_at, compacted_at_commit, original_size, source_repo, close_reason,
|
||||
deleted_at, deleted_by, delete_reason, original_type,
|
||||
sender, ephemeral, pinned, is_template,
|
||||
@@ -289,7 +289,7 @@ func (s *SQLiteStorage) GetIssue(ctx context.Context, id string) (*types.Issue,
|
||||
&issue.ID, &contentHash, &issue.Title, &issue.Description, &issue.Design,
|
||||
&issue.AcceptanceCriteria, &issue.Notes, &issue.Status,
|
||||
&issue.Priority, &issue.IssueType, &assignee, &estimatedMinutes,
|
||||
&issue.CreatedAt, &issue.UpdatedAt, &closedAt, &externalRef,
|
||||
&issue.CreatedAt, &issue.CreatedBy, &issue.UpdatedAt, &closedAt, &externalRef,
|
||||
&issue.CompactionLevel, &compactedAt, &compactedAtCommit, &originalSize, &sourceRepo, &closeReason,
|
||||
&deletedAt, &deletedBy, &deleteReason, &originalType,
|
||||
&sender, &wisp, &pinned, &isTemplate,
|
||||
@@ -349,7 +349,7 @@ func (s *SQLiteStorage) GetIssue(ctx context.Context, id string) (*types.Issue,
|
||||
issue.Sender = sender.String
|
||||
}
|
||||
if wisp.Valid && wisp.Int64 != 0 {
|
||||
issue.Wisp = true
|
||||
issue.Ephemeral = true
|
||||
}
|
||||
// Pinned field (bd-7h5)
|
||||
if pinned.Valid && pinned.Int64 != 0 {
|
||||
@@ -491,7 +491,7 @@ func (s *SQLiteStorage) GetIssueByExternalRef(ctx context.Context, externalRef s
|
||||
err := s.db.QueryRowContext(ctx, `
|
||||
SELECT id, content_hash, title, description, design, acceptance_criteria, notes,
|
||||
status, priority, issue_type, assignee, estimated_minutes,
|
||||
created_at, updated_at, closed_at, external_ref,
|
||||
created_at, created_by, updated_at, closed_at, external_ref,
|
||||
compaction_level, compacted_at, compacted_at_commit, original_size, source_repo, close_reason,
|
||||
deleted_at, deleted_by, delete_reason, original_type,
|
||||
sender, ephemeral, pinned, is_template,
|
||||
@@ -502,7 +502,7 @@ func (s *SQLiteStorage) GetIssueByExternalRef(ctx context.Context, externalRef s
|
||||
&issue.ID, &contentHash, &issue.Title, &issue.Description, &issue.Design,
|
||||
&issue.AcceptanceCriteria, &issue.Notes, &issue.Status,
|
||||
&issue.Priority, &issue.IssueType, &assignee, &estimatedMinutes,
|
||||
&issue.CreatedAt, &issue.UpdatedAt, &closedAt, &externalRefCol,
|
||||
&issue.CreatedAt, &issue.CreatedBy, &issue.UpdatedAt, &closedAt, &externalRefCol,
|
||||
&issue.CompactionLevel, &compactedAt, &compactedAtCommit, &originalSize, &sourceRepo, &closeReason,
|
||||
&deletedAt, &deletedBy, &deleteReason, &originalType,
|
||||
&sender, &wisp, &pinned, &isTemplate,
|
||||
@@ -562,7 +562,7 @@ func (s *SQLiteStorage) GetIssueByExternalRef(ctx context.Context, externalRef s
|
||||
issue.Sender = sender.String
|
||||
}
|
||||
if wisp.Valid && wisp.Int64 != 0 {
|
||||
issue.Wisp = true
|
||||
issue.Ephemeral = true
|
||||
}
|
||||
// Pinned field (bd-7h5)
|
||||
if pinned.Valid && pinned.Int64 != 0 {
|
||||
@@ -1652,8 +1652,8 @@ func (s *SQLiteStorage) SearchIssues(ctx context.Context, query string, filter t
|
||||
}
|
||||
|
||||
// Wisp filtering (bd-kwro.9)
|
||||
if filter.Wisp != nil {
|
||||
if *filter.Wisp {
|
||||
if filter.Ephemeral != nil {
|
||||
if *filter.Ephemeral {
|
||||
whereClauses = append(whereClauses, "ephemeral = 1") // SQL column is still 'ephemeral'
|
||||
} else {
|
||||
whereClauses = append(whereClauses, "(ephemeral = 0 OR ephemeral IS NULL)")
|
||||
@@ -1699,7 +1699,7 @@ func (s *SQLiteStorage) SearchIssues(ctx context.Context, query string, filter t
|
||||
querySQL := fmt.Sprintf(`
|
||||
SELECT id, content_hash, title, description, design, acceptance_criteria, notes,
|
||||
status, priority, issue_type, assignee, estimated_minutes,
|
||||
created_at, updated_at, closed_at, external_ref, source_repo, close_reason,
|
||||
created_at, created_by, updated_at, closed_at, external_ref, source_repo, close_reason,
|
||||
deleted_at, deleted_by, delete_reason, original_type,
|
||||
sender, ephemeral, pinned, is_template,
|
||||
await_type, await_id, timeout_ns, waiters
|
||||
|
||||
@@ -17,7 +17,8 @@ import (
|
||||
// Excludes pinned issues which are persistent anchors, not actionable work (bd-92u)
|
||||
func (s *SQLiteStorage) GetReadyWork(ctx context.Context, filter types.WorkFilter) ([]*types.Issue, error) {
|
||||
whereClauses := []string{
|
||||
"i.pinned = 0", // Exclude pinned issues (bd-92u)
|
||||
"i.pinned = 0", // Exclude pinned issues (bd-92u)
|
||||
"(i.ephemeral = 0 OR i.ephemeral IS NULL)", // Exclude wisps (hq-t15s)
|
||||
}
|
||||
args := []interface{}{}
|
||||
|
||||
@@ -86,6 +87,25 @@ func (s *SQLiteStorage) GetReadyWork(ctx context.Context, filter types.WorkFilte
|
||||
}
|
||||
}
|
||||
|
||||
// Parent filtering: filter to all descendants of a root issue (epic/molecule)
|
||||
// Uses recursive CTE to find all descendants via parent-child dependencies
|
||||
if filter.ParentID != nil {
|
||||
whereClauses = append(whereClauses, `
|
||||
i.id IN (
|
||||
WITH RECURSIVE descendants AS (
|
||||
SELECT issue_id FROM dependencies
|
||||
WHERE type = 'parent-child' AND depends_on_id = ?
|
||||
UNION ALL
|
||||
SELECT d.issue_id FROM dependencies d
|
||||
JOIN descendants dt ON d.depends_on_id = dt.issue_id
|
||||
WHERE d.type = 'parent-child'
|
||||
)
|
||||
SELECT issue_id FROM descendants
|
||||
)
|
||||
`)
|
||||
args = append(args, *filter.ParentID)
|
||||
}
|
||||
|
||||
// Build WHERE clause properly
|
||||
whereSQL := strings.Join(whereClauses, " AND ")
|
||||
|
||||
@@ -118,7 +138,7 @@ func (s *SQLiteStorage) GetReadyWork(ctx context.Context, filter types.WorkFilte
|
||||
query := fmt.Sprintf(`
|
||||
SELECT i.id, i.content_hash, i.title, i.description, i.design, i.acceptance_criteria, i.notes,
|
||||
i.status, i.priority, i.issue_type, i.assignee, i.estimated_minutes,
|
||||
i.created_at, i.updated_at, i.closed_at, i.external_ref, i.source_repo, i.close_reason,
|
||||
i.created_at, i.created_by, i.updated_at, i.closed_at, i.external_ref, i.source_repo, i.close_reason,
|
||||
i.deleted_at, i.deleted_by, i.delete_reason, i.original_type,
|
||||
i.sender, i.ephemeral, i.pinned, i.is_template,
|
||||
i.await_type, i.await_id, i.timeout_ns, i.waiters
|
||||
@@ -380,7 +400,7 @@ func (s *SQLiteStorage) GetStaleIssues(ctx context.Context, filter types.StaleFi
|
||||
issue.Sender = sender.String
|
||||
}
|
||||
if ephemeral.Valid && ephemeral.Int64 != 0 {
|
||||
issue.Wisp = true
|
||||
issue.Ephemeral = true
|
||||
}
|
||||
// Pinned field (bd-7h5)
|
||||
if pinned.Valid && pinned.Int64 != 0 {
|
||||
@@ -413,7 +433,7 @@ func (s *SQLiteStorage) GetStaleIssues(ctx context.Context, filter types.StaleFi
|
||||
// GetBlockedIssues returns issues that are blocked by dependencies or have status=blocked
|
||||
// Note: Pinned issues are excluded from the output (beads-ei4)
|
||||
// Note: Includes external: references in blocked_by list (bd-om4a)
|
||||
func (s *SQLiteStorage) GetBlockedIssues(ctx context.Context) ([]*types.BlockedIssue, error) {
|
||||
func (s *SQLiteStorage) GetBlockedIssues(ctx context.Context, filter types.WorkFilter) ([]*types.BlockedIssue, error) {
|
||||
// Use UNION to combine:
|
||||
// 1. Issues with open/in_progress/blocked status that have dependency blockers
|
||||
// 2. Issues with status=blocked (even if they have no dependency blockers)
|
||||
@@ -423,11 +443,41 @@ func (s *SQLiteStorage) GetBlockedIssues(ctx context.Context) ([]*types.BlockedI
|
||||
// For blocked_by_count and blocker_ids:
|
||||
// - Count local blockers (open issues) + external refs (external:*)
|
||||
// - External refs are always considered "open" until resolved (bd-om4a)
|
||||
rows, err := s.db.QueryContext(ctx, `
|
||||
|
||||
// Build additional WHERE clauses for filtering
|
||||
var filterClauses []string
|
||||
var args []any
|
||||
|
||||
// Parent filtering: filter to all descendants of a root issue (epic/molecule)
|
||||
if filter.ParentID != nil {
|
||||
filterClauses = append(filterClauses, `
|
||||
i.id IN (
|
||||
WITH RECURSIVE descendants AS (
|
||||
SELECT issue_id FROM dependencies
|
||||
WHERE type = 'parent-child' AND depends_on_id = ?
|
||||
UNION ALL
|
||||
SELECT d.issue_id FROM dependencies d
|
||||
JOIN descendants dt ON d.depends_on_id = dt.issue_id
|
||||
WHERE d.type = 'parent-child'
|
||||
)
|
||||
SELECT issue_id FROM descendants
|
||||
)
|
||||
`)
|
||||
args = append(args, *filter.ParentID)
|
||||
}
|
||||
|
||||
// Build filter clause SQL
|
||||
filterSQL := ""
|
||||
if len(filterClauses) > 0 {
|
||||
filterSQL = " AND " + strings.Join(filterClauses, " AND ")
|
||||
}
|
||||
|
||||
// nolint:gosec // G201: filterSQL contains only parameterized WHERE clauses with ? placeholders, not user input
|
||||
query := fmt.Sprintf(`
|
||||
SELECT
|
||||
i.id, i.title, i.description, i.design, i.acceptance_criteria, i.notes,
|
||||
i.status, i.priority, i.issue_type, i.assignee, i.estimated_minutes,
|
||||
i.created_at, i.updated_at, i.closed_at, i.external_ref, i.source_repo,
|
||||
i.created_at, i.created_by, i.updated_at, i.closed_at, i.external_ref, i.source_repo,
|
||||
COALESCE(COUNT(d.depends_on_id), 0) as blocked_by_count,
|
||||
COALESCE(GROUP_CONCAT(d.depends_on_id, ','), '') as blocker_ids
|
||||
FROM issues i
|
||||
@@ -441,7 +491,7 @@ func (s *SQLiteStorage) GetBlockedIssues(ctx context.Context) ([]*types.BlockedI
|
||||
AND blocker.status IN ('open', 'in_progress', 'blocked', 'deferred')
|
||||
)
|
||||
-- External refs: always included (resolution happens at query time)
|
||||
OR d.depends_on_id LIKE 'external:%'
|
||||
OR d.depends_on_id LIKE 'external:%%'
|
||||
)
|
||||
WHERE i.status IN ('open', 'in_progress', 'blocked', 'deferred')
|
||||
AND i.pinned = 0
|
||||
@@ -461,12 +511,14 @@ func (s *SQLiteStorage) GetBlockedIssues(ctx context.Context) ([]*types.BlockedI
|
||||
SELECT 1 FROM dependencies d3
|
||||
WHERE d3.issue_id = i.id
|
||||
AND d3.type = 'blocks'
|
||||
AND d3.depends_on_id LIKE 'external:%'
|
||||
AND d3.depends_on_id LIKE 'external:%%'
|
||||
)
|
||||
)
|
||||
%s
|
||||
GROUP BY i.id
|
||||
ORDER BY i.priority ASC
|
||||
`)
|
||||
`, filterSQL)
|
||||
rows, err := s.db.QueryContext(ctx, query, args...)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("failed to get blocked issues: %w", err)
|
||||
}
|
||||
@@ -486,7 +538,7 @@ func (s *SQLiteStorage) GetBlockedIssues(ctx context.Context) ([]*types.BlockedI
|
||||
&issue.ID, &issue.Title, &issue.Description, &issue.Design,
|
||||
&issue.AcceptanceCriteria, &issue.Notes, &issue.Status,
|
||||
&issue.Priority, &issue.IssueType, &assignee, &estimatedMinutes,
|
||||
&issue.CreatedAt, &issue.UpdatedAt, &closedAt, &externalRef, &sourceRepo, &issue.BlockedByCount,
|
||||
&issue.CreatedAt, &issue.CreatedBy, &issue.UpdatedAt, &closedAt, &externalRef, &sourceRepo, &issue.BlockedByCount,
|
||||
&blockerIDsStr,
|
||||
)
|
||||
if err != nil {
|
||||
@@ -596,6 +648,49 @@ func filterBlockedByExternalDeps(ctx context.Context, blocked []*types.BlockedIs
|
||||
return result
|
||||
}
|
||||
|
||||
// GetNewlyUnblockedByClose returns issues that became unblocked when the given issue was closed.
|
||||
// This is used by the --suggest-next flag on bd close to show what work is now available.
|
||||
// An issue is "newly unblocked" if:
|
||||
// - It had a 'blocks' dependency on the closed issue
|
||||
// - It is now unblocked (not in blocked_issues_cache)
|
||||
// - It has status open or in_progress (ready to work on)
|
||||
//
|
||||
// The cache is already rebuilt by CloseIssue before this is called, so we just need to
|
||||
// find dependents that are no longer blocked.
|
||||
func (s *SQLiteStorage) GetNewlyUnblockedByClose(ctx context.Context, closedIssueID string) ([]*types.Issue, error) {
|
||||
// Find issues that:
|
||||
// 1. Had a 'blocks' dependency on the closed issue
|
||||
// 2. Are now NOT in blocked_issues_cache (unblocked)
|
||||
// 3. Have status open or in_progress
|
||||
// 4. Are not pinned
|
||||
query := `
|
||||
SELECT i.id, i.content_hash, i.title, i.description, i.design, i.acceptance_criteria, i.notes,
|
||||
i.status, i.priority, i.issue_type, i.assignee, i.estimated_minutes,
|
||||
i.created_at, i.created_by, i.updated_at, i.closed_at, i.external_ref, i.source_repo, i.close_reason,
|
||||
i.deleted_at, i.deleted_by, i.delete_reason, i.original_type,
|
||||
i.sender, i.ephemeral, i.pinned, i.is_template,
|
||||
i.await_type, i.await_id, i.timeout_ns, i.waiters
|
||||
FROM issues i
|
||||
JOIN dependencies d ON i.id = d.issue_id
|
||||
WHERE d.depends_on_id = ?
|
||||
AND d.type = 'blocks'
|
||||
AND i.status IN ('open', 'in_progress')
|
||||
AND i.pinned = 0
|
||||
AND NOT EXISTS (
|
||||
SELECT 1 FROM blocked_issues_cache WHERE issue_id = i.id
|
||||
)
|
||||
ORDER BY i.priority ASC
|
||||
`
|
||||
|
||||
rows, err := s.db.QueryContext(ctx, query, closedIssueID)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("failed to get newly unblocked issues: %w", err)
|
||||
}
|
||||
defer func() { _ = rows.Close() }()
|
||||
|
||||
return s.scanIssues(ctx, rows)
|
||||
}
|
||||
|
||||
// buildOrderByClause generates the ORDER BY clause based on sort policy
|
||||
func buildOrderByClause(policy types.SortPolicy) string {
|
||||
switch policy {
|
||||
|
||||
@@ -182,7 +182,7 @@ func TestGetBlockedIssues(t *testing.T) {
|
||||
store.AddDependency(ctx, &types.Dependency{IssueID: issue3.ID, DependsOnID: issue2.ID, Type: types.DepBlocks}, "test-user")
|
||||
|
||||
// Get blocked issues
|
||||
blocked, err := store.GetBlockedIssues(ctx)
|
||||
blocked, err := store.GetBlockedIssues(ctx, types.WorkFilter{})
|
||||
if err != nil {
|
||||
t.Fatalf("GetBlockedIssues failed: %v", err)
|
||||
}
|
||||
@@ -1215,7 +1215,7 @@ func TestGetBlockedIssuesFiltersExternalDeps(t *testing.T) {
|
||||
}
|
||||
|
||||
// Test 1: External dep not satisfied - issue should appear as blocked
|
||||
blocked, err := mainStore.GetBlockedIssues(ctx)
|
||||
blocked, err := mainStore.GetBlockedIssues(ctx, types.WorkFilter{})
|
||||
if err != nil {
|
||||
t.Fatalf("GetBlockedIssues failed: %v", err)
|
||||
}
|
||||
@@ -1260,7 +1260,7 @@ func TestGetBlockedIssuesFiltersExternalDeps(t *testing.T) {
|
||||
}
|
||||
|
||||
// Now GetBlockedIssues should NOT show the issue (external dep satisfied)
|
||||
blocked, err = mainStore.GetBlockedIssues(ctx)
|
||||
blocked, err = mainStore.GetBlockedIssues(ctx, types.WorkFilter{})
|
||||
if err != nil {
|
||||
t.Fatalf("GetBlockedIssues failed after shipping: %v", err)
|
||||
}
|
||||
@@ -1379,7 +1379,7 @@ func TestGetBlockedIssuesPartialExternalDeps(t *testing.T) {
|
||||
externalStore.Close()
|
||||
|
||||
// Issue should still be blocked (cap2 not satisfied)
|
||||
blocked, err := mainStore.GetBlockedIssues(ctx)
|
||||
blocked, err := mainStore.GetBlockedIssues(ctx, types.WorkFilter{})
|
||||
if err != nil {
|
||||
t.Fatalf("GetBlockedIssues failed: %v", err)
|
||||
}
|
||||
@@ -1512,3 +1512,212 @@ func TestCheckExternalDepInvalidFormats(t *testing.T) {
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
// TestGetNewlyUnblockedByClose tests the --suggest-next functionality (GH#679)
|
||||
func TestGetNewlyUnblockedByClose(t *testing.T) {
|
||||
env := newTestEnv(t)
|
||||
|
||||
// Create a blocker issue
|
||||
blocker := env.CreateIssueWith("Blocker", types.StatusOpen, 1, types.TypeTask)
|
||||
|
||||
// Create two issues blocked by the blocker
|
||||
blocked1 := env.CreateIssueWith("Blocked 1", types.StatusOpen, 2, types.TypeTask)
|
||||
blocked2 := env.CreateIssueWith("Blocked 2", types.StatusOpen, 3, types.TypeTask)
|
||||
|
||||
// Create one issue blocked by multiple issues (blocker + another)
|
||||
otherBlocker := env.CreateIssueWith("Other Blocker", types.StatusOpen, 1, types.TypeTask)
|
||||
multiBlocked := env.CreateIssueWith("Multi Blocked", types.StatusOpen, 2, types.TypeTask)
|
||||
|
||||
// Add dependencies (issue depends on blocker)
|
||||
env.AddDep(blocked1, blocker)
|
||||
env.AddDep(blocked2, blocker)
|
||||
env.AddDep(multiBlocked, blocker)
|
||||
env.AddDep(multiBlocked, otherBlocker)
|
||||
|
||||
// Close the blocker
|
||||
env.Close(blocker, "Done")
|
||||
|
||||
// Get newly unblocked issues
|
||||
ctx := context.Background()
|
||||
unblocked, err := env.Store.GetNewlyUnblockedByClose(ctx, blocker.ID)
|
||||
if err != nil {
|
||||
t.Fatalf("GetNewlyUnblockedByClose failed: %v", err)
|
||||
}
|
||||
|
||||
// Should return blocked1 and blocked2 (but not multiBlocked, which is still blocked by otherBlocker)
|
||||
if len(unblocked) != 2 {
|
||||
t.Errorf("Expected 2 unblocked issues, got %d", len(unblocked))
|
||||
}
|
||||
|
||||
// Check that the right issues are unblocked
|
||||
unblockedIDs := make(map[string]bool)
|
||||
for _, issue := range unblocked {
|
||||
unblockedIDs[issue.ID] = true
|
||||
}
|
||||
|
||||
if !unblockedIDs[blocked1.ID] {
|
||||
t.Errorf("Expected %s to be unblocked", blocked1.ID)
|
||||
}
|
||||
if !unblockedIDs[blocked2.ID] {
|
||||
t.Errorf("Expected %s to be unblocked", blocked2.ID)
|
||||
}
|
||||
if unblockedIDs[multiBlocked.ID] {
|
||||
t.Errorf("Expected %s to still be blocked (has another blocker)", multiBlocked.ID)
|
||||
}
|
||||
}
|
||||
|
||||
// TestParentIDFilterDescendants tests that ParentID filter returns all descendants of an epic
|
||||
func TestParentIDFilterDescendants(t *testing.T) {
|
||||
env := newTestEnv(t)
|
||||
|
||||
// Create hierarchy:
|
||||
// epic1 (root)
|
||||
// ├── task1 (child of epic1)
|
||||
// ├── task2 (child of epic1)
|
||||
// └── epic2 (child of epic1)
|
||||
// └── task3 (grandchild of epic1)
|
||||
// task4 (unrelated, should not appear in results)
|
||||
epic1 := env.CreateEpic("Epic 1")
|
||||
task1 := env.CreateIssue("Task 1")
|
||||
task2 := env.CreateIssue("Task 2")
|
||||
epic2 := env.CreateEpic("Epic 2")
|
||||
task3 := env.CreateIssue("Task 3")
|
||||
task4 := env.CreateIssue("Task 4 - unrelated")
|
||||
|
||||
env.AddParentChild(task1, epic1)
|
||||
env.AddParentChild(task2, epic1)
|
||||
env.AddParentChild(epic2, epic1)
|
||||
env.AddParentChild(task3, epic2)
|
||||
|
||||
// Query with ParentID = epic1
|
||||
parentID := epic1.ID
|
||||
ready := env.GetReadyWork(types.WorkFilter{ParentID: &parentID})
|
||||
|
||||
// Should include task1, task2, epic2, task3 (all descendants of epic1)
|
||||
// Should NOT include epic1 itself or task4
|
||||
if len(ready) != 4 {
|
||||
t.Fatalf("Expected 4 ready issues in parent scope, got %d", len(ready))
|
||||
}
|
||||
|
||||
// Verify the returned issues are the expected ones
|
||||
readyIDs := make(map[string]bool)
|
||||
for _, issue := range ready {
|
||||
readyIDs[issue.ID] = true
|
||||
}
|
||||
|
||||
if !readyIDs[task1.ID] {
|
||||
t.Errorf("Expected task1 to be in results")
|
||||
}
|
||||
if !readyIDs[task2.ID] {
|
||||
t.Errorf("Expected task2 to be in results")
|
||||
}
|
||||
if !readyIDs[epic2.ID] {
|
||||
t.Errorf("Expected epic2 to be in results")
|
||||
}
|
||||
if !readyIDs[task3.ID] {
|
||||
t.Errorf("Expected task3 to be in results")
|
||||
}
|
||||
if readyIDs[epic1.ID] {
|
||||
t.Errorf("Expected epic1 (root) to NOT be in results")
|
||||
}
|
||||
if readyIDs[task4.ID] {
|
||||
t.Errorf("Expected task4 (unrelated) to NOT be in results")
|
||||
}
|
||||
}
|
||||
|
||||
// TestParentIDWithOtherFilters tests that ParentID can be combined with other filters
|
||||
func TestParentIDWithOtherFilters(t *testing.T) {
|
||||
env := newTestEnv(t)
|
||||
|
||||
// Create hierarchy:
|
||||
// epic1 (root)
|
||||
// ├── task1 (priority 0)
|
||||
// ├── task2 (priority 1)
|
||||
// └── task3 (priority 2)
|
||||
epic1 := env.CreateEpic("Epic 1")
|
||||
task1 := env.CreateIssueWith("Task 1 - P0", types.StatusOpen, 0, types.TypeTask)
|
||||
task2 := env.CreateIssueWith("Task 2 - P1", types.StatusOpen, 1, types.TypeTask)
|
||||
task3 := env.CreateIssueWith("Task 3 - P2", types.StatusOpen, 2, types.TypeTask)
|
||||
|
||||
env.AddParentChild(task1, epic1)
|
||||
env.AddParentChild(task2, epic1)
|
||||
env.AddParentChild(task3, epic1)
|
||||
|
||||
// Query with ParentID = epic1 AND priority = 1
|
||||
parentID := epic1.ID
|
||||
priority := 1
|
||||
ready := env.GetReadyWork(types.WorkFilter{ParentID: &parentID, Priority: &priority})
|
||||
|
||||
// Should only include task2 (parent + priority 1)
|
||||
if len(ready) != 1 {
|
||||
t.Fatalf("Expected 1 issue with parent + priority filter, got %d", len(ready))
|
||||
}
|
||||
if ready[0].ID != task2.ID {
|
||||
t.Errorf("Expected task2, got %s", ready[0].ID)
|
||||
}
|
||||
}
|
||||
|
||||
// TestParentIDWithBlockedDescendants tests that blocked descendants are excluded
|
||||
func TestParentIDWithBlockedDescendants(t *testing.T) {
|
||||
env := newTestEnv(t)
|
||||
|
||||
// Create hierarchy:
|
||||
// epic1 (root)
|
||||
// ├── task1 (ready)
|
||||
// ├── task2 (blocked by blocker)
|
||||
// └── task3 (ready)
|
||||
// blocker (unrelated)
|
||||
epic1 := env.CreateEpic("Epic 1")
|
||||
task1 := env.CreateIssue("Task 1 - ready")
|
||||
task2 := env.CreateIssue("Task 2 - blocked")
|
||||
task3 := env.CreateIssue("Task 3 - ready")
|
||||
blocker := env.CreateIssue("Blocker")
|
||||
|
||||
env.AddParentChild(task1, epic1)
|
||||
env.AddParentChild(task2, epic1)
|
||||
env.AddParentChild(task3, epic1)
|
||||
env.AddDep(task2, blocker) // task2 is blocked
|
||||
|
||||
// Query with ParentID = epic1
|
||||
parentID := epic1.ID
|
||||
ready := env.GetReadyWork(types.WorkFilter{ParentID: &parentID})
|
||||
|
||||
// Should include task1, task3 (ready descendants)
|
||||
// Should NOT include task2 (blocked)
|
||||
if len(ready) != 2 {
|
||||
t.Fatalf("Expected 2 ready descendants, got %d", len(ready))
|
||||
}
|
||||
|
||||
readyIDs := make(map[string]bool)
|
||||
for _, issue := range ready {
|
||||
readyIDs[issue.ID] = true
|
||||
}
|
||||
|
||||
if !readyIDs[task1.ID] {
|
||||
t.Errorf("Expected task1 to be ready")
|
||||
}
|
||||
if !readyIDs[task3.ID] {
|
||||
t.Errorf("Expected task3 to be ready")
|
||||
}
|
||||
if readyIDs[task2.ID] {
|
||||
t.Errorf("Expected task2 to be blocked")
|
||||
}
|
||||
}
|
||||
|
||||
// TestParentIDEmptyParent tests that empty parent returns nothing
|
||||
func TestParentIDEmptyParent(t *testing.T) {
|
||||
env := newTestEnv(t)
|
||||
|
||||
// Create an epic with no children
|
||||
epic1 := env.CreateEpic("Epic 1 - no children")
|
||||
env.CreateIssue("Unrelated task")
|
||||
|
||||
// Query with ParentID = epic1 (which has no children)
|
||||
parentID := epic1.ID
|
||||
ready := env.GetReadyWork(types.WorkFilter{ParentID: &parentID})
|
||||
|
||||
// Should return empty since epic1 has no descendants
|
||||
if len(ready) != 0 {
|
||||
t.Fatalf("Expected 0 ready issues for empty parent, got %d", len(ready))
|
||||
}
|
||||
}
|
||||
|
||||
@@ -16,6 +16,7 @@ CREATE TABLE IF NOT EXISTS issues (
|
||||
assignee TEXT,
|
||||
estimated_minutes INTEGER,
|
||||
created_at DATETIME NOT NULL DEFAULT CURRENT_TIMESTAMP,
|
||||
created_by TEXT DEFAULT '',
|
||||
updated_at DATETIME NOT NULL DEFAULT CURRENT_TIMESTAMP,
|
||||
closed_at DATETIME,
|
||||
external_ref TEXT,
|
||||
@@ -36,7 +37,12 @@ CREATE TABLE IF NOT EXISTS issues (
|
||||
is_template INTEGER DEFAULT 0,
|
||||
-- NOTE: replies_to, relates_to, duplicate_of, superseded_by removed per Decision 004
|
||||
-- These relationships are now stored in the dependencies table
|
||||
CHECK ((status = 'closed') = (closed_at IS NOT NULL))
|
||||
-- closed_at constraint: closed issues must have it, tombstones may retain it from before deletion
|
||||
CHECK (
|
||||
(status = 'closed' AND closed_at IS NOT NULL) OR
|
||||
(status = 'tombstone') OR
|
||||
(status NOT IN ('closed', 'tombstone') AND closed_at IS NULL)
|
||||
)
|
||||
);
|
||||
|
||||
CREATE INDEX IF NOT EXISTS idx_issues_status ON issues(status);
|
||||
@@ -224,6 +230,7 @@ WITH RECURSIVE
|
||||
SELECT i.*
|
||||
FROM issues i
|
||||
WHERE i.status = 'open'
|
||||
AND (i.ephemeral = 0 OR i.ephemeral IS NULL)
|
||||
AND NOT EXISTS (
|
||||
SELECT 1 FROM blocked_transitively WHERE issue_id = i.id
|
||||
);
|
||||
|
||||
@@ -310,7 +310,7 @@ func (t *sqliteTxStorage) GetIssue(ctx context.Context, id string) (*types.Issue
|
||||
row := t.conn.QueryRowContext(ctx, `
|
||||
SELECT id, content_hash, title, description, design, acceptance_criteria, notes,
|
||||
status, priority, issue_type, assignee, estimated_minutes,
|
||||
created_at, updated_at, closed_at, external_ref,
|
||||
created_at, created_by, updated_at, closed_at, external_ref,
|
||||
compaction_level, compacted_at, compacted_at_commit, original_size, source_repo, close_reason,
|
||||
deleted_at, deleted_by, delete_reason, original_type,
|
||||
sender, ephemeral, pinned, is_template,
|
||||
@@ -1089,8 +1089,8 @@ func (t *sqliteTxStorage) SearchIssues(ctx context.Context, query string, filter
|
||||
}
|
||||
|
||||
// Wisp filtering (bd-kwro.9)
|
||||
if filter.Wisp != nil {
|
||||
if *filter.Wisp {
|
||||
if filter.Ephemeral != nil {
|
||||
if *filter.Ephemeral {
|
||||
whereClauses = append(whereClauses, "ephemeral = 1") // SQL column is still 'ephemeral'
|
||||
} else {
|
||||
whereClauses = append(whereClauses, "(ephemeral = 0 OR ephemeral IS NULL)")
|
||||
@@ -1127,7 +1127,7 @@ func (t *sqliteTxStorage) SearchIssues(ctx context.Context, query string, filter
|
||||
querySQL := fmt.Sprintf(`
|
||||
SELECT id, content_hash, title, description, design, acceptance_criteria, notes,
|
||||
status, priority, issue_type, assignee, estimated_minutes,
|
||||
created_at, updated_at, closed_at, external_ref,
|
||||
created_at, created_by, updated_at, closed_at, external_ref,
|
||||
compaction_level, compacted_at, compacted_at_commit, original_size, source_repo, close_reason,
|
||||
deleted_at, deleted_by, delete_reason, original_type,
|
||||
sender, ephemeral, pinned, is_template,
|
||||
@@ -1188,7 +1188,7 @@ func scanIssueRow(row scanner) (*types.Issue, error) {
|
||||
&issue.ID, &contentHash, &issue.Title, &issue.Description, &issue.Design,
|
||||
&issue.AcceptanceCriteria, &issue.Notes, &issue.Status,
|
||||
&issue.Priority, &issue.IssueType, &assignee, &estimatedMinutes,
|
||||
&issue.CreatedAt, &issue.UpdatedAt, &closedAt, &externalRef,
|
||||
&issue.CreatedAt, &issue.CreatedBy, &issue.UpdatedAt, &closedAt, &externalRef,
|
||||
&issue.CompactionLevel, &compactedAt, &compactedAtCommit, &originalSize, &sourceRepo, &closeReason,
|
||||
&deletedAt, &deletedBy, &deleteReason, &originalType,
|
||||
&sender, &wisp, &pinned, &isTemplate,
|
||||
@@ -1244,7 +1244,7 @@ func scanIssueRow(row scanner) (*types.Issue, error) {
|
||||
issue.Sender = sender.String
|
||||
}
|
||||
if wisp.Valid && wisp.Int64 != 0 {
|
||||
issue.Wisp = true
|
||||
issue.Ephemeral = true
|
||||
}
|
||||
// Pinned field (bd-7h5)
|
||||
if pinned.Valid && pinned.Int64 != 0 {
|
||||
|
||||
@@ -107,9 +107,10 @@ type Storage interface {
|
||||
|
||||
// Ready Work & Blocking
|
||||
GetReadyWork(ctx context.Context, filter types.WorkFilter) ([]*types.Issue, error)
|
||||
GetBlockedIssues(ctx context.Context) ([]*types.BlockedIssue, error)
|
||||
GetBlockedIssues(ctx context.Context, filter types.WorkFilter) ([]*types.BlockedIssue, error)
|
||||
GetEpicsEligibleForClosure(ctx context.Context) ([]*types.EpicStatus, error)
|
||||
GetStaleIssues(ctx context.Context, filter types.StaleFilter) ([]*types.Issue, error)
|
||||
GetNewlyUnblockedByClose(ctx context.Context, closedIssueID string) ([]*types.Issue, error) // GH#679
|
||||
|
||||
// Events
|
||||
AddComment(ctx context.Context, issueID, actor, comment string) error
|
||||
|
||||
@@ -89,7 +89,7 @@ func (m *mockStorage) GetIssuesByLabel(ctx context.Context, label string) ([]*ty
|
||||
func (m *mockStorage) GetReadyWork(ctx context.Context, filter types.WorkFilter) ([]*types.Issue, error) {
|
||||
return nil, nil
|
||||
}
|
||||
func (m *mockStorage) GetBlockedIssues(ctx context.Context) ([]*types.BlockedIssue, error) {
|
||||
func (m *mockStorage) GetBlockedIssues(ctx context.Context, filter types.WorkFilter) ([]*types.BlockedIssue, error) {
|
||||
return nil, nil
|
||||
}
|
||||
func (m *mockStorage) GetEpicsEligibleForClosure(ctx context.Context) ([]*types.EpicStatus, error) {
|
||||
@@ -98,6 +98,9 @@ func (m *mockStorage) GetEpicsEligibleForClosure(ctx context.Context) ([]*types.
|
||||
func (m *mockStorage) GetStaleIssues(ctx context.Context, filter types.StaleFilter) ([]*types.Issue, error) {
|
||||
return nil, nil
|
||||
}
|
||||
func (m *mockStorage) GetNewlyUnblockedByClose(ctx context.Context, closedIssueID string) ([]*types.Issue, error) {
|
||||
return nil, nil
|
||||
}
|
||||
func (m *mockStorage) AddComment(ctx context.Context, issueID, actor, comment string) error {
|
||||
return nil
|
||||
}
|
||||
|
||||
@@ -31,7 +31,7 @@ func TestCommitToSyncBranch(t *testing.T) {
|
||||
writeFile(t, jsonlPath, `{"id":"test-1"}`)
|
||||
runGit(t, repoDir, "add", ".")
|
||||
runGit(t, repoDir, "commit", "-m", "initial sync branch commit")
|
||||
runGit(t, repoDir, "checkout", "master")
|
||||
runGit(t, repoDir, "checkout", "main")
|
||||
|
||||
// Write new content to commit
|
||||
writeFile(t, jsonlPath, `{"id":"test-1"}`+"\n"+`{"id":"test-2"}`)
|
||||
@@ -64,7 +64,7 @@ func TestCommitToSyncBranch(t *testing.T) {
|
||||
writeFile(t, jsonlPath, `{"id":"test-1"}`)
|
||||
runGit(t, repoDir, "add", ".")
|
||||
runGit(t, repoDir, "commit", "-m", "initial")
|
||||
runGit(t, repoDir, "checkout", "master")
|
||||
runGit(t, repoDir, "checkout", "main")
|
||||
|
||||
// Write the same content that's in the sync branch
|
||||
writeFile(t, jsonlPath, `{"id":"test-1"}`)
|
||||
@@ -101,7 +101,7 @@ func TestPullFromSyncBranch(t *testing.T) {
|
||||
writeFile(t, jsonlPath, `{"id":"test-1"}`)
|
||||
runGit(t, repoDir, "add", ".")
|
||||
runGit(t, repoDir, "commit", "-m", "local sync")
|
||||
runGit(t, repoDir, "checkout", "master")
|
||||
runGit(t, repoDir, "checkout", "main")
|
||||
|
||||
// Pull should handle the case where remote doesn't have the branch
|
||||
result, err := PullFromSyncBranch(ctx, repoDir, syncBranch, jsonlPath, false)
|
||||
@@ -131,7 +131,7 @@ func TestPullFromSyncBranch(t *testing.T) {
|
||||
runGit(t, repoDir, "commit", "-m", "sync commit")
|
||||
// Set up a fake remote ref at the same commit
|
||||
runGit(t, repoDir, "update-ref", "refs/remotes/origin/"+syncBranch, "HEAD")
|
||||
runGit(t, repoDir, "checkout", "master")
|
||||
runGit(t, repoDir, "checkout", "main")
|
||||
|
||||
// Pull when already at remote HEAD
|
||||
result, err := PullFromSyncBranch(ctx, repoDir, syncBranch, jsonlPath, false)
|
||||
@@ -158,7 +158,7 @@ func TestPullFromSyncBranch(t *testing.T) {
|
||||
runGit(t, repoDir, "add", ".")
|
||||
runGit(t, repoDir, "commit", "-m", "sync commit")
|
||||
runGit(t, repoDir, "update-ref", "refs/remotes/origin/"+syncBranch, "HEAD")
|
||||
runGit(t, repoDir, "checkout", "master")
|
||||
runGit(t, repoDir, "checkout", "main")
|
||||
|
||||
// Remove local JSONL to verify it gets copied back
|
||||
os.Remove(jsonlPath)
|
||||
@@ -198,7 +198,7 @@ func TestPullFromSyncBranch(t *testing.T) {
|
||||
|
||||
// Reset back to base (so remote is ahead)
|
||||
runGit(t, repoDir, "reset", "--hard", baseCommit)
|
||||
runGit(t, repoDir, "checkout", "master")
|
||||
runGit(t, repoDir, "checkout", "main")
|
||||
|
||||
// Pull should fast-forward
|
||||
result, err := PullFromSyncBranch(ctx, repoDir, syncBranch, jsonlPath, false)
|
||||
@@ -233,7 +233,7 @@ func TestResetToRemote(t *testing.T) {
|
||||
writeFile(t, jsonlPath, `{"id":"local-1"}`)
|
||||
runGit(t, repoDir, "add", ".")
|
||||
runGit(t, repoDir, "commit", "-m", "local commit")
|
||||
runGit(t, repoDir, "checkout", "master")
|
||||
runGit(t, repoDir, "checkout", "main")
|
||||
|
||||
// ResetToRemote should fail since remote branch doesn't exist
|
||||
err := ResetToRemote(ctx, repoDir, syncBranch, jsonlPath)
|
||||
@@ -264,7 +264,7 @@ func TestPushSyncBranch(t *testing.T) {
|
||||
writeFile(t, filepath.Join(repoDir, ".beads", "issues.jsonl"), `{"id":"test-1"}`)
|
||||
runGit(t, repoDir, "add", ".")
|
||||
runGit(t, repoDir, "commit", "-m", "initial")
|
||||
runGit(t, repoDir, "checkout", "master")
|
||||
runGit(t, repoDir, "checkout", "main")
|
||||
|
||||
// PushSyncBranch should handle the worktree creation
|
||||
err := PushSyncBranch(ctx, repoDir, syncBranch)
|
||||
@@ -391,8 +391,8 @@ func setupTestRepoWithRemote(t *testing.T) string {
|
||||
t.Fatalf("Failed to create temp dir: %v", err)
|
||||
}
|
||||
|
||||
// Initialize git repo
|
||||
runGit(t, tmpDir, "init")
|
||||
// Initialize git repo with 'main' as default branch (modern git convention)
|
||||
runGit(t, tmpDir, "init", "--initial-branch=main")
|
||||
runGit(t, tmpDir, "config", "user.email", "test@test.com")
|
||||
runGit(t, tmpDir, "config", "user.name", "Test User")
|
||||
|
||||
@@ -413,4 +413,3 @@ func setupTestRepoWithRemote(t *testing.T) string {
|
||||
|
||||
return tmpDir
|
||||
}
|
||||
|
||||
|
||||
@@ -23,6 +23,7 @@ type Issue struct {
|
||||
Assignee string `json:"assignee,omitempty"`
|
||||
EstimatedMinutes *int `json:"estimated_minutes,omitempty"`
|
||||
CreatedAt time.Time `json:"created_at"`
|
||||
CreatedBy string `json:"created_by,omitempty"` // Who created this issue (GH#748)
|
||||
UpdatedAt time.Time `json:"updated_at"`
|
||||
ClosedAt *time.Time `json:"closed_at,omitempty"`
|
||||
CloseReason string `json:"close_reason,omitempty"` // Reason provided when closing the issue
|
||||
@@ -43,8 +44,8 @@ type Issue struct {
|
||||
OriginalType string `json:"original_type,omitempty"` // Issue type before deletion (for tombstones)
|
||||
|
||||
// Messaging fields (bd-kwro): inter-agent communication support
|
||||
Sender string `json:"sender,omitempty"` // Who sent this (for messages)
|
||||
Wisp bool `json:"wisp,omitempty"` // Wisp = ephemeral vapor from the Steam Engine; bulk-deleted when closed
|
||||
Sender string `json:"sender,omitempty"` // Who sent this (for messages)
|
||||
Ephemeral bool `json:"ephemeral,omitempty"` // If true, not exported to JSONL; bulk-deleted when closed
|
||||
// NOTE: RepliesTo, RelatesTo, DuplicateOf, SupersededBy moved to dependencies table
|
||||
// per Decision 004 (Edge Schema Consolidation). Use dependency API instead.
|
||||
|
||||
@@ -97,7 +98,9 @@ func (i *Issue) ComputeContentHash() string {
|
||||
h.Write([]byte{0})
|
||||
h.Write([]byte(i.Assignee))
|
||||
h.Write([]byte{0})
|
||||
|
||||
h.Write([]byte(i.CreatedBy))
|
||||
h.Write([]byte{0})
|
||||
|
||||
if i.ExternalRef != nil {
|
||||
h.Write([]byte(*i.ExternalRef))
|
||||
}
|
||||
@@ -246,10 +249,11 @@ func (i *Issue) ValidateWithCustomStatuses(customStatuses []string) error {
|
||||
return fmt.Errorf("estimated_minutes cannot be negative")
|
||||
}
|
||||
// Enforce closed_at invariant: closed_at should be set if and only if status is closed
|
||||
// Exception: tombstones may retain closed_at from before deletion
|
||||
if i.Status == StatusClosed && i.ClosedAt == nil {
|
||||
return fmt.Errorf("closed issues must have closed_at timestamp")
|
||||
}
|
||||
if i.Status != StatusClosed && i.ClosedAt != nil {
|
||||
if i.Status != StatusClosed && i.Status != StatusTombstone && i.ClosedAt != nil {
|
||||
return fmt.Errorf("non-closed issues cannot have closed_at timestamp")
|
||||
}
|
||||
// Enforce tombstone invariants (bd-md2): deleted_at must be set for tombstones, and only for tombstones
|
||||
@@ -594,8 +598,8 @@ type IssueFilter struct {
|
||||
// Tombstone filtering (bd-1bu)
|
||||
IncludeTombstones bool // If false (default), exclude tombstones from results
|
||||
|
||||
// Wisp filtering (bd-kwro.9)
|
||||
Wisp *bool // Filter by wisp flag (nil = any, true = only wisps, false = only non-wisps)
|
||||
// Ephemeral filtering (bd-kwro.9)
|
||||
Ephemeral *bool // Filter by ephemeral flag (nil = any, true = only ephemeral, false = only persistent)
|
||||
|
||||
// Pinned filtering (bd-7h5)
|
||||
Pinned *bool // Filter by pinned flag (nil = any, true = only pinned, false = only non-pinned)
|
||||
@@ -646,6 +650,9 @@ type WorkFilter struct {
|
||||
LabelsAny []string // OR semantics: issue must have AT LEAST ONE of these labels
|
||||
Limit int
|
||||
SortPolicy SortPolicy
|
||||
|
||||
// Parent filtering: filter to descendants of a bead/epic (recursive)
|
||||
ParentID *string // Show all descendants of this issue
|
||||
}
|
||||
|
||||
// StaleFilter is used to filter stale issue queries
|
||||
|
||||
Reference in New Issue
Block a user