Merge pull request #752 from jordanhubbard/main

E2E / chaos testing - first PoC with just 48% code coverage
This commit is contained in:
Steve Yegge
2025-12-26 17:38:01 -08:00
committed by GitHub
63 changed files with 4854 additions and 516 deletions

View File

@@ -48,10 +48,10 @@ func TestMain(m *testing.M) {
fmt.Fprintf(os.Stderr, "Failed to build bd binary: %v\n%s\n", err, out)
os.Exit(1)
}
// Optimize git for tests
os.Setenv("GIT_CONFIG_NOSYSTEM", "1")
os.Exit(m.Run())
}
@@ -85,35 +85,35 @@ func TestHashIDs_MultiCloneConverge(t *testing.T) {
}
t.Parallel()
tmpDir := testutil.TempDirInMemory(t)
bdPath := getBDPath()
if _, err := os.Stat(bdPath); err != nil {
t.Fatalf("bd binary not found at %s", bdPath)
}
// Setup remote and 3 clones
remoteDir := setupBareRepo(t, tmpDir)
cloneA := setupClone(t, tmpDir, remoteDir, "A", bdPath)
cloneB := setupClone(t, tmpDir, remoteDir, "B", bdPath)
cloneC := setupClone(t, tmpDir, remoteDir, "C", bdPath)
// Each clone creates unique issue (different content = different hash ID)
createIssueInClone(t, cloneA, "Issue from clone A")
createIssueInClone(t, cloneB, "Issue from clone B")
createIssueInClone(t, cloneC, "Issue from clone C")
// Sync all clones once (hash IDs prevent collisions, don't need multiple rounds)
for _, clone := range []string{cloneA, cloneB, cloneC} {
runCmdOutputWithEnvAllowError(t, clone, map[string]string{"BEADS_NO_DAEMON": "1"}, true, bdPath, "sync")
}
// Verify all clones have all 3 issues
expectedTitles := map[string]bool{
"Issue from clone A": true,
"Issue from clone B": true,
"Issue from clone C": true,
}
allConverged := true
for name, dir := range map[string]string{"A": cloneA, "B": cloneB, "C": cloneC} {
titles := getTitlesFromClone(t, dir)
@@ -122,7 +122,7 @@ func TestHashIDs_MultiCloneConverge(t *testing.T) {
allConverged = false
}
}
if allConverged {
t.Log("✓ All 3 clones converged with hash-based IDs")
} else {
@@ -138,26 +138,26 @@ func TestHashIDs_IdenticalContentDedup(t *testing.T) {
}
t.Parallel()
tmpDir := testutil.TempDirInMemory(t)
bdPath := getBDPath()
if _, err := os.Stat(bdPath); err != nil {
t.Fatalf("bd binary not found at %s", bdPath)
}
// Setup remote and 2 clones
remoteDir := setupBareRepo(t, tmpDir)
cloneA := setupClone(t, tmpDir, remoteDir, "A", bdPath)
cloneB := setupClone(t, tmpDir, remoteDir, "B", bdPath)
// Both clones create identical issue (same content = same hash ID)
createIssueInClone(t, cloneA, "Identical issue")
createIssueInClone(t, cloneB, "Identical issue")
// Sync both clones once (hash IDs handle dedup automatically)
for _, clone := range []string{cloneA, cloneB} {
runCmdOutputWithEnvAllowError(t, clone, map[string]string{"BEADS_NO_DAEMON": "1"}, true, bdPath, "sync")
}
// Verify both clones have exactly 1 issue (deduplication worked)
for name, dir := range map[string]string{"A": cloneA, "B": cloneB} {
titles := getTitlesFromClone(t, dir)
@@ -168,7 +168,7 @@ func TestHashIDs_IdenticalContentDedup(t *testing.T) {
t.Errorf("Clone %s missing expected issue: %v", name, sortedKeys(titles))
}
}
t.Log("✓ Identical content deduplicated correctly with hash-based IDs")
}
@@ -177,36 +177,36 @@ func TestHashIDs_IdenticalContentDedup(t *testing.T) {
func setupBareRepo(t *testing.T, tmpDir string) string {
t.Helper()
remoteDir := filepath.Join(tmpDir, "remote.git")
runCmd(t, tmpDir, "git", "init", "--bare", remoteDir)
runCmd(t, tmpDir, "git", "init", "--bare", "-b", "master", remoteDir)
tempClone := filepath.Join(tmpDir, "temp-init")
runCmd(t, tmpDir, "git", "clone", remoteDir, tempClone)
runCmd(t, tempClone, "git", "commit", "--allow-empty", "-m", "Initial commit")
runCmd(t, tempClone, "git", "push", "origin", "master")
return remoteDir
}
func setupClone(t *testing.T, tmpDir, remoteDir, name, bdPath string) string {
t.Helper()
cloneDir := filepath.Join(tmpDir, "clone-"+strings.ToLower(name))
// Use shallow, shared clones for speed
runCmd(t, tmpDir, "git", "clone", "--shared", "--depth=1", "--no-tags", remoteDir, cloneDir)
// Disable hooks to avoid overhead
emptyHooks := filepath.Join(cloneDir, ".empty-hooks")
os.MkdirAll(emptyHooks, 0755)
runCmd(t, cloneDir, "git", "config", "core.hooksPath", emptyHooks)
// Speed configs
runCmd(t, cloneDir, "git", "config", "gc.auto", "0")
runCmd(t, cloneDir, "git", "config", "core.fsync", "false")
runCmd(t, cloneDir, "git", "config", "commit.gpgSign", "false")
bdCmd := getBDCommand()
copyFile(t, bdPath, filepath.Join(cloneDir, filepath.Base(bdCmd)))
if name == "A" {
runCmd(t, cloneDir, bdCmd, "init", "--quiet", "--prefix", "test")
runCmd(t, cloneDir, "git", "add", ".beads")
@@ -216,7 +216,7 @@ func setupClone(t *testing.T, tmpDir, remoteDir, name, bdPath string) string {
runCmd(t, cloneDir, "git", "pull", "origin", "master")
runCmd(t, cloneDir, bdCmd, "init", "--quiet", "--prefix", "test")
}
return cloneDir
}
@@ -231,13 +231,13 @@ func getTitlesFromClone(t *testing.T, cloneDir string) map[string]bool {
"BEADS_NO_DAEMON": "1",
"BD_NO_AUTO_IMPORT": "1",
}, getBDCommand(), "list", "--json")
jsonStart := strings.Index(listJSON, "[")
if jsonStart == -1 {
return make(map[string]bool)
}
listJSON = listJSON[jsonStart:]
var issues []struct {
Title string `json:"title"`
}
@@ -245,7 +245,7 @@ func getTitlesFromClone(t *testing.T, cloneDir string) map[string]bool {
t.Logf("Failed to parse JSON: %v", err)
return make(map[string]bool)
}
titles := make(map[string]bool)
for _, issue := range issues {
titles[issue.Title] = true
@@ -280,7 +280,7 @@ func installGitHooks(t *testing.T, repoDir string) {
hooksDir := filepath.Join(repoDir, ".git", "hooks")
// Ensure POSIX-style path for sh scripts (even on Windows)
bdCmd := strings.ReplaceAll(getBDCommand(), "\\", "/")
preCommit := fmt.Sprintf(`#!/bin/sh
%s --no-daemon export -o .beads/issues.jsonl >/dev/null 2>&1 || true
git add .beads/issues.jsonl >/dev/null 2>&1 || true

View File

@@ -336,8 +336,8 @@ func TestRun_Async(t *testing.T) {
outputFile := filepath.Join(tmpDir, "async_output.txt")
// Create a hook that writes to a file
hookScript := `#!/bin/sh
echo "async" > ` + outputFile
hookScript := "#!/bin/sh\n" +
"echo \"async\" > \"" + outputFile + "\"\n"
if err := os.WriteFile(hookPath, []byte(hookScript), 0755); err != nil {
t.Fatalf("Failed to create hook file: %v", err)
}
@@ -348,15 +348,17 @@ echo "async" > ` + outputFile
// Run should return immediately
runner.Run(EventClose, issue)
// Wait for the async hook to complete with retries
// Wait for the async hook to complete with retries.
// Under high test load the goroutine scheduling + exec can be delayed.
var output []byte
var err error
for i := 0; i < 10; i++ {
time.Sleep(100 * time.Millisecond)
deadline := time.Now().Add(3 * time.Second)
for time.Now().Before(deadline) {
output, err = os.ReadFile(outputFile)
if err == nil {
break
}
time.Sleep(50 * time.Millisecond)
}
if err != nil {

View File

@@ -0,0 +1,107 @@
package rpc
import (
"encoding/json"
"testing"
"time"
"github.com/steveyegge/beads/internal/types"
)
func TestClient_GateLifecycleAndShutdown(t *testing.T) {
_, client, cleanup := setupTestServer(t)
defer cleanup()
createResp, err := client.GateCreate(&GateCreateArgs{
Title: "Test Gate",
AwaitType: "human",
AwaitID: "",
Timeout: 5 * time.Minute,
Waiters: []string{"mayor/"},
})
if err != nil {
t.Fatalf("GateCreate: %v", err)
}
var created GateCreateResult
if err := json.Unmarshal(createResp.Data, &created); err != nil {
t.Fatalf("unmarshal GateCreateResult: %v", err)
}
if created.ID == "" {
t.Fatalf("expected created gate ID")
}
listResp, err := client.GateList(&GateListArgs{All: false})
if err != nil {
t.Fatalf("GateList: %v", err)
}
var openGates []*types.Issue
if err := json.Unmarshal(listResp.Data, &openGates); err != nil {
t.Fatalf("unmarshal GateList: %v", err)
}
if len(openGates) != 1 || openGates[0].ID != created.ID {
t.Fatalf("unexpected open gates: %+v", openGates)
}
showResp, err := client.GateShow(&GateShowArgs{ID: created.ID})
if err != nil {
t.Fatalf("GateShow: %v", err)
}
var gate types.Issue
if err := json.Unmarshal(showResp.Data, &gate); err != nil {
t.Fatalf("unmarshal GateShow: %v", err)
}
if gate.ID != created.ID || gate.IssueType != types.TypeGate {
t.Fatalf("unexpected gate: %+v", gate)
}
waitResp, err := client.GateWait(&GateWaitArgs{ID: created.ID, Waiters: []string{"deacon/"}})
if err != nil {
t.Fatalf("GateWait: %v", err)
}
var waitResult GateWaitResult
if err := json.Unmarshal(waitResp.Data, &waitResult); err != nil {
t.Fatalf("unmarshal GateWaitResult: %v", err)
}
if waitResult.AddedCount != 1 {
t.Fatalf("expected 1 waiter added, got %d", waitResult.AddedCount)
}
closeResp, err := client.GateClose(&GateCloseArgs{ID: created.ID, Reason: "done"})
if err != nil {
t.Fatalf("GateClose: %v", err)
}
var closedGate types.Issue
if err := json.Unmarshal(closeResp.Data, &closedGate); err != nil {
t.Fatalf("unmarshal GateClose: %v", err)
}
if closedGate.Status != types.StatusClosed {
t.Fatalf("expected closed status, got %q", closedGate.Status)
}
listResp, err = client.GateList(&GateListArgs{All: false})
if err != nil {
t.Fatalf("GateList open: %v", err)
}
if err := json.Unmarshal(listResp.Data, &openGates); err != nil {
t.Fatalf("unmarshal GateList open: %v", err)
}
if len(openGates) != 0 {
t.Fatalf("expected no open gates, got %+v", openGates)
}
listResp, err = client.GateList(&GateListArgs{All: true})
if err != nil {
t.Fatalf("GateList all: %v", err)
}
if err := json.Unmarshal(listResp.Data, &openGates); err != nil {
t.Fatalf("unmarshal GateList all: %v", err)
}
if len(openGates) != 1 || openGates[0].ID != created.ID {
t.Fatalf("expected 1 total gate, got %+v", openGates)
}
if err := client.Shutdown(); err != nil {
t.Fatalf("Shutdown: %v", err)
}
}

View File

@@ -0,0 +1,921 @@
package memory
import (
"context"
"testing"
"time"
"github.com/steveyegge/beads/internal/storage"
"github.com/steveyegge/beads/internal/types"
)
func TestMemoryStorage_LoadFromIssues_IndexesAndCounters(t *testing.T) {
store := New("/tmp/example.jsonl")
defer store.Close()
extRef := "ext-1"
issues := []*types.Issue{
nil,
{
ID: "bd-10",
Title: "Ten",
Status: types.StatusOpen,
Priority: 1,
IssueType: types.TypeTask,
ExternalRef: &extRef,
Dependencies: []*types.Dependency{{
IssueID: "bd-10",
DependsOnID: "bd-2",
Type: types.DepBlocks,
}},
Labels: []string{"l1"},
Comments: []*types.Comment{{ID: 1, IssueID: "bd-10", Author: "a", Text: "c"}},
},
{ID: "bd-2", Title: "Two", Status: types.StatusOpen, Priority: 1, IssueType: types.TypeTask},
{ID: "bd-a3f8e9", Title: "Parent", Status: types.StatusOpen, Priority: 1, IssueType: types.TypeTask},
{ID: "bd-a3f8e9.3", Title: "Child", Status: types.StatusOpen, Priority: 1, IssueType: types.TypeTask},
}
if err := store.LoadFromIssues(issues); err != nil {
t.Fatalf("LoadFromIssues: %v", err)
}
ctx := context.Background()
got, err := store.GetIssueByExternalRef(ctx, "ext-1")
if err != nil {
t.Fatalf("GetIssueByExternalRef: %v", err)
}
if got == nil || got.ID != "bd-10" {
t.Fatalf("GetIssueByExternalRef got=%v", got)
}
if len(got.Dependencies) != 1 || got.Dependencies[0].DependsOnID != "bd-2" {
t.Fatalf("expected deps attached")
}
if len(got.Labels) != 1 || got.Labels[0] != "l1" {
t.Fatalf("expected labels attached")
}
// Exercise CreateIssue ID generation based on the loaded counter (bd-10 => next should be bd-11).
if err := store.SetConfig(ctx, "issue_prefix", "bd"); err != nil {
t.Fatalf("SetConfig: %v", err)
}
newIssue := &types.Issue{Title: "New", Status: types.StatusOpen, Priority: 1, IssueType: types.TypeTask}
if err := store.CreateIssue(ctx, newIssue, "actor"); err != nil {
t.Fatalf("CreateIssue: %v", err)
}
if newIssue.ID != "bd-11" {
t.Fatalf("expected generated id bd-11, got %q", newIssue.ID)
}
// Hierarchical counter for parent extracted from bd-a3f8e9.3.
childID, err := store.GetNextChildID(ctx, "bd-a3f8e9")
if err != nil {
t.Fatalf("GetNextChildID: %v", err)
}
if childID != "bd-a3f8e9.4" {
t.Fatalf("expected bd-a3f8e9.4, got %q", childID)
}
}
func TestMemoryStorage_GetAllIssues_SortsAndCopies(t *testing.T) {
store := setupTestMemory(t)
defer store.Close()
ctx := context.Background()
// Create out-of-order IDs.
a := &types.Issue{ID: "bd-2", Title: "A", Status: types.StatusOpen, Priority: 1, IssueType: types.TypeTask}
b := &types.Issue{ID: "bd-1", Title: "B", Status: types.StatusOpen, Priority: 1, IssueType: types.TypeTask}
if err := store.CreateIssue(ctx, a, "actor"); err != nil {
t.Fatalf("CreateIssue a: %v", err)
}
if err := store.CreateIssue(ctx, b, "actor"); err != nil {
t.Fatalf("CreateIssue b: %v", err)
}
if err := store.AddLabel(ctx, a.ID, "l1", "actor"); err != nil {
t.Fatalf("AddLabel: %v", err)
}
all := store.GetAllIssues()
if len(all) != 2 {
t.Fatalf("expected 2 issues, got %d", len(all))
}
if all[0].ID != "bd-1" || all[1].ID != "bd-2" {
t.Fatalf("expected sorted by ID, got %q then %q", all[0].ID, all[1].ID)
}
// Returned issues must be copies (mutating should not affect stored issue struct).
all[1].Title = "mutated"
got, err := store.GetIssue(ctx, "bd-2")
if err != nil {
t.Fatalf("GetIssue: %v", err)
}
if got.Title != "A" {
t.Fatalf("expected stored title unchanged, got %q", got.Title)
}
}
func TestMemoryStorage_CreateIssues_DefaultPrefix_DuplicateExisting_ExternalRef(t *testing.T) {
store := New("")
defer store.Close()
ctx := context.Background()
// Default prefix should be "bd" when unset.
issues := []*types.Issue{{Title: "A", Status: types.StatusOpen, Priority: 1, IssueType: types.TypeTask}}
if err := store.CreateIssues(ctx, issues, "actor"); err != nil {
t.Fatalf("CreateIssues: %v", err)
}
if issues[0].ID != "bd-1" {
t.Fatalf("expected bd-1, got %q", issues[0].ID)
}
ext := "ext"
batch := []*types.Issue{{ID: "bd-x", Title: "B", Status: types.StatusOpen, Priority: 1, IssueType: types.TypeTask, ExternalRef: &ext}}
if err := store.CreateIssues(ctx, batch, "actor"); err != nil {
t.Fatalf("CreateIssues: %v", err)
}
if got, _ := store.GetIssueByExternalRef(ctx, "ext"); got == nil || got.ID != "bd-x" {
t.Fatalf("expected external ref indexed")
}
// Duplicate existing issue ID branch.
dup := []*types.Issue{{ID: "bd-x", Title: "Dup", Status: types.StatusOpen, Priority: 1, IssueType: types.TypeTask}}
if err := store.CreateIssues(ctx, dup, "actor"); err == nil {
t.Fatalf("expected duplicate existing issue error")
}
}
func TestMemoryStorage_GetIssueByExternalRef_IndexPointsToMissingIssue(t *testing.T) {
store := setupTestMemory(t)
defer store.Close()
ctx := context.Background()
store.mu.Lock()
store.externalRefToID["dangling"] = "bd-nope"
store.mu.Unlock()
got, err := store.GetIssueByExternalRef(ctx, "dangling")
if err != nil {
t.Fatalf("GetIssueByExternalRef: %v", err)
}
if got != nil {
t.Fatalf("expected nil for dangling ref")
}
}
func TestMemoryStorage_DependencyCounts_Records_Tree_Cycles(t *testing.T) {
store := setupTestMemory(t)
defer store.Close()
ctx := context.Background()
a := &types.Issue{ID: "bd-1", Title: "A", Status: types.StatusOpen, Priority: 1, IssueType: types.TypeTask}
b := &types.Issue{ID: "bd-2", Title: "B", Status: types.StatusOpen, Priority: 1, IssueType: types.TypeTask}
c := &types.Issue{ID: "bd-3", Title: "C", Status: types.StatusOpen, Priority: 1, IssueType: types.TypeTask}
d := &types.Issue{ID: "bd-4", Title: "D", Status: types.StatusOpen, Priority: 1, IssueType: types.TypeTask}
for _, iss := range []*types.Issue{a, b, c, d} {
if err := store.CreateIssue(ctx, iss, "actor"); err != nil {
t.Fatalf("CreateIssue %s: %v", iss.ID, err)
}
}
if err := store.AddDependency(ctx, &types.Dependency{IssueID: a.ID, DependsOnID: b.ID, Type: types.DepBlocks}, "actor"); err != nil {
t.Fatalf("AddDependency: %v", err)
}
if err := store.AddDependency(ctx, &types.Dependency{IssueID: a.ID, DependsOnID: c.ID, Type: types.DepBlocks}, "actor"); err != nil {
t.Fatalf("AddDependency: %v", err)
}
if err := store.AddDependency(ctx, &types.Dependency{IssueID: d.ID, DependsOnID: b.ID, Type: types.DepBlocks}, "actor"); err != nil {
t.Fatalf("AddDependency: %v", err)
}
counts, err := store.GetDependencyCounts(ctx, []string{a.ID, b.ID, "bd-missing"})
if err != nil {
t.Fatalf("GetDependencyCounts: %v", err)
}
if counts[a.ID].DependencyCount != 2 || counts[a.ID].DependentCount != 0 {
t.Fatalf("unexpected counts for A: %+v", counts[a.ID])
}
if counts[b.ID].DependencyCount != 0 || counts[b.ID].DependentCount != 2 {
t.Fatalf("unexpected counts for B: %+v", counts[b.ID])
}
if counts["bd-missing"].DependencyCount != 0 || counts["bd-missing"].DependentCount != 0 {
t.Fatalf("unexpected counts for missing: %+v", counts["bd-missing"])
}
deps, err := store.GetDependencyRecords(ctx, a.ID)
if err != nil {
t.Fatalf("GetDependencyRecords: %v", err)
}
if len(deps) != 2 {
t.Fatalf("expected 2 deps, got %d", len(deps))
}
allDeps, err := store.GetAllDependencyRecords(ctx)
if err != nil {
t.Fatalf("GetAllDependencyRecords: %v", err)
}
if len(allDeps[a.ID]) != 2 {
t.Fatalf("expected all deps for A")
}
nodes, err := store.GetDependencyTree(ctx, a.ID, 3, false, false)
if err != nil {
t.Fatalf("GetDependencyTree: %v", err)
}
if len(nodes) != 2 || nodes[0].Depth != 1 {
t.Fatalf("unexpected tree: %+v", nodes)
}
cycles, err := store.DetectCycles(ctx)
if err != nil {
t.Fatalf("DetectCycles: %v", err)
}
if cycles != nil {
t.Fatalf("expected nil cycles, got %+v", cycles)
}
}
func TestMemoryStorage_HashTracking_NoOps(t *testing.T) {
store := setupTestMemory(t)
defer store.Close()
ctx := context.Background()
if hash, err := store.GetDirtyIssueHash(ctx, "bd-1"); err != nil || hash != "" {
t.Fatalf("GetDirtyIssueHash: hash=%q err=%v", hash, err)
}
if hash, err := store.GetExportHash(ctx, "bd-1"); err != nil || hash != "" {
t.Fatalf("GetExportHash: hash=%q err=%v", hash, err)
}
if err := store.SetExportHash(ctx, "bd-1", "h"); err != nil {
t.Fatalf("SetExportHash: %v", err)
}
if err := store.ClearAllExportHashes(ctx); err != nil {
t.Fatalf("ClearAllExportHashes: %v", err)
}
if hash, err := store.GetJSONLFileHash(ctx); err != nil || hash != "" {
t.Fatalf("GetJSONLFileHash: hash=%q err=%v", hash, err)
}
if err := store.SetJSONLFileHash(ctx, "h"); err != nil {
t.Fatalf("SetJSONLFileHash: %v", err)
}
}
func TestMemoryStorage_LabelsAndCommentsHelpers(t *testing.T) {
store := setupTestMemory(t)
defer store.Close()
ctx := context.Background()
a := &types.Issue{ID: "bd-1", Title: "A", Status: types.StatusOpen, Priority: 1, IssueType: types.TypeTask}
b := &types.Issue{ID: "bd-2", Title: "B", Status: types.StatusOpen, Priority: 1, IssueType: types.TypeTask}
if err := store.CreateIssue(ctx, a, "actor"); err != nil {
t.Fatalf("CreateIssue a: %v", err)
}
if err := store.CreateIssue(ctx, b, "actor"); err != nil {
t.Fatalf("CreateIssue b: %v", err)
}
if err := store.AddLabel(ctx, a.ID, "l1", "actor"); err != nil {
t.Fatalf("AddLabel: %v", err)
}
if err := store.AddLabel(ctx, b.ID, "l2", "actor"); err != nil {
t.Fatalf("AddLabel: %v", err)
}
labels, err := store.GetLabelsForIssues(ctx, []string{a.ID, b.ID, "bd-missing"})
if err != nil {
t.Fatalf("GetLabelsForIssues: %v", err)
}
if len(labels) != 2 {
t.Fatalf("expected 2 entries, got %d", len(labels))
}
if labels[a.ID][0] != "l1" {
t.Fatalf("unexpected labels for A: %+v", labels[a.ID])
}
issues, err := store.GetIssuesByLabel(ctx, "l1")
if err != nil {
t.Fatalf("GetIssuesByLabel: %v", err)
}
if len(issues) != 1 || issues[0].ID != a.ID {
t.Fatalf("unexpected issues: %+v", issues)
}
if _, err := store.AddIssueComment(ctx, a.ID, "author", "text"); err != nil {
t.Fatalf("AddIssueComment: %v", err)
}
comments, err := store.GetCommentsForIssues(ctx, []string{a.ID, b.ID})
if err != nil {
t.Fatalf("GetCommentsForIssues: %v", err)
}
if len(comments[a.ID]) != 1 {
t.Fatalf("expected comments for A")
}
}
func TestMemoryStorage_StaleEventsCustomStatusAndLifecycleHelpers(t *testing.T) {
store := New("/tmp/x.jsonl")
defer store.Close()
ctx := context.Background()
if store.Path() != "/tmp/x.jsonl" {
t.Fatalf("Path mismatch")
}
if store.UnderlyingDB() != nil {
t.Fatalf("expected nil UnderlyingDB")
}
if _, err := store.UnderlyingConn(ctx); err == nil {
t.Fatalf("expected UnderlyingConn error")
}
if err := store.RunInTransaction(ctx, func(tx storage.Transaction) error { return nil }); err == nil {
t.Fatalf("expected RunInTransaction error")
}
if err := store.SetConfig(ctx, "issue_prefix", "bd"); err != nil {
t.Fatalf("SetConfig: %v", err)
}
a := &types.Issue{ID: "bd-1", Title: "A", Status: types.StatusOpen, Priority: 1, IssueType: types.TypeTask}
if err := store.CreateIssue(ctx, a, "actor"); err != nil {
t.Fatalf("CreateIssue: %v", err)
}
// Force updated_at into the past for stale detection.
store.mu.Lock()
a.UpdatedAt = time.Now().Add(-10 * 24 * time.Hour)
store.mu.Unlock()
stale, err := store.GetStaleIssues(ctx, types.StaleFilter{Days: 7, Limit: 10})
if err != nil {
t.Fatalf("GetStaleIssues: %v", err)
}
if len(stale) != 1 || stale[0].ID != a.ID {
t.Fatalf("unexpected stale: %+v", stale)
}
if err := store.AddComment(ctx, a.ID, "actor", "c"); err != nil {
t.Fatalf("AddComment: %v", err)
}
if err := store.MarkIssueDirty(ctx, a.ID); err != nil {
t.Fatalf("MarkIssueDirty: %v", err)
}
// Generate multiple events and ensure limiting returns the last N.
if err := store.UpdateIssue(ctx, a.ID, map[string]interface{}{"title": "t1"}, "actor"); err != nil {
t.Fatalf("UpdateIssue: %v", err)
}
if err := store.UpdateIssue(ctx, a.ID, map[string]interface{}{"title": "t2"}, "actor"); err != nil {
t.Fatalf("UpdateIssue: %v", err)
}
evs, err := store.GetEvents(ctx, a.ID, 2)
if err != nil {
t.Fatalf("GetEvents: %v", err)
}
if len(evs) != 2 {
t.Fatalf("expected 2 events, got %d", len(evs))
}
if err := store.SetConfig(ctx, "status.custom", " triage, blocked , ,done "); err != nil {
t.Fatalf("SetConfig: %v", err)
}
statuses, err := store.GetCustomStatuses(ctx)
if err != nil {
t.Fatalf("GetCustomStatuses: %v", err)
}
if len(statuses) != 3 || statuses[0] != "triage" || statuses[1] != "blocked" || statuses[2] != "done" {
t.Fatalf("unexpected statuses: %+v", statuses)
}
if got := parseCustomStatuses(""); got != nil {
t.Fatalf("expected nil for empty parseCustomStatuses")
}
// Empty custom statuses.
if err := store.DeleteConfig(ctx, "status.custom"); err != nil {
t.Fatalf("DeleteConfig: %v", err)
}
statuses, err = store.GetCustomStatuses(ctx)
if err != nil {
t.Fatalf("GetCustomStatuses(empty): %v", err)
}
if statuses != nil {
t.Fatalf("expected nil statuses when unset, got %+v", statuses)
}
if _, err := store.GetEpicsEligibleForClosure(ctx); err != nil {
t.Fatalf("GetEpicsEligibleForClosure: %v", err)
}
if err := store.UpdateIssueID(ctx, "old", "new", nil, "actor"); err == nil {
t.Fatalf("expected UpdateIssueID error")
}
if err := store.RenameDependencyPrefix(ctx, "old", "new"); err != nil {
t.Fatalf("RenameDependencyPrefix: %v", err)
}
if err := store.RenameCounterPrefix(ctx, "old", "new"); err != nil {
t.Fatalf("RenameCounterPrefix: %v", err)
}
}
func TestMemoryStorage_AddLabelAndAddDependency_ErrorPaths(t *testing.T) {
store := setupTestMemory(t)
defer store.Close()
ctx := context.Background()
issue := &types.Issue{ID: "bd-1", Title: "A", Status: types.StatusOpen, Priority: 1, IssueType: types.TypeTask}
if err := store.CreateIssue(ctx, issue, "actor"); err != nil {
t.Fatalf("CreateIssue: %v", err)
}
if err := store.AddLabel(ctx, "bd-missing", "l", "actor"); err == nil {
t.Fatalf("expected AddLabel error for missing issue")
}
if err := store.AddLabel(ctx, issue.ID, "l", "actor"); err != nil {
t.Fatalf("AddLabel: %v", err)
}
// Duplicate label is a no-op.
if err := store.AddLabel(ctx, issue.ID, "l", "actor"); err != nil {
t.Fatalf("AddLabel duplicate: %v", err)
}
// AddDependency error paths.
if err := store.AddDependency(ctx, &types.Dependency{IssueID: "bd-missing", DependsOnID: issue.ID, Type: types.DepBlocks}, "actor"); err == nil {
t.Fatalf("expected AddDependency error for missing IssueID")
}
if err := store.AddDependency(ctx, &types.Dependency{IssueID: issue.ID, DependsOnID: "bd-missing", Type: types.DepBlocks}, "actor"); err == nil {
t.Fatalf("expected AddDependency error for missing DependsOnID")
}
}
func TestMemoryStorage_GetNextChildID_Errors(t *testing.T) {
store := setupTestMemory(t)
defer store.Close()
ctx := context.Background()
if _, err := store.GetNextChildID(ctx, "bd-missing"); err == nil {
t.Fatalf("expected error for missing parent")
}
deep := &types.Issue{ID: "bd-1.1.1.1", Title: "Deep", Status: types.StatusOpen, Priority: 1, IssueType: types.TypeTask}
if err := store.CreateIssue(ctx, deep, "actor"); err != nil {
t.Fatalf("CreateIssue: %v", err)
}
if _, err := store.GetNextChildID(ctx, deep.ID); err == nil {
t.Fatalf("expected max depth error")
}
}
func TestMemoryStorage_GetAllIssues_AttachesDependenciesAndComments(t *testing.T) {
store := setupTestMemory(t)
defer store.Close()
ctx := context.Background()
a := &types.Issue{ID: "bd-1", Title: "A", Status: types.StatusOpen, Priority: 1, IssueType: types.TypeTask}
b := &types.Issue{ID: "bd-2", Title: "B", Status: types.StatusOpen, Priority: 1, IssueType: types.TypeTask}
if err := store.CreateIssue(ctx, a, "actor"); err != nil {
t.Fatalf("CreateIssue a: %v", err)
}
if err := store.CreateIssue(ctx, b, "actor"); err != nil {
t.Fatalf("CreateIssue b: %v", err)
}
if err := store.AddDependency(ctx, &types.Dependency{IssueID: a.ID, DependsOnID: b.ID, Type: types.DepBlocks}, "actor"); err != nil {
t.Fatalf("AddDependency: %v", err)
}
if _, err := store.AddIssueComment(ctx, a.ID, "author", "text"); err != nil {
t.Fatalf("AddIssueComment: %v", err)
}
all := store.GetAllIssues()
var gotA *types.Issue
for _, iss := range all {
if iss.ID == a.ID {
gotA = iss
break
}
}
if gotA == nil {
t.Fatalf("expected to find issue A")
}
if len(gotA.Dependencies) != 1 || gotA.Dependencies[0].DependsOnID != b.ID {
t.Fatalf("expected deps attached")
}
if len(gotA.Comments) != 1 || gotA.Comments[0].Text != "text" {
t.Fatalf("expected comments attached")
}
}
func TestMemoryStorage_GetStaleIssues_FilteringAndLimit(t *testing.T) {
store := setupTestMemory(t)
defer store.Close()
ctx := context.Background()
old := &types.Issue{ID: "bd-1", Title: "Old", Status: types.StatusOpen, Priority: 1, IssueType: types.TypeTask}
newer := &types.Issue{ID: "bd-2", Title: "Newer", Status: types.StatusInProgress, Priority: 1, IssueType: types.TypeTask}
closed := &types.Issue{ID: "bd-3", Title: "Closed", Status: types.StatusOpen, Priority: 1, IssueType: types.TypeTask}
for _, iss := range []*types.Issue{old, newer, closed} {
if err := store.CreateIssue(ctx, iss, "actor"); err != nil {
t.Fatalf("CreateIssue %s: %v", iss.ID, err)
}
}
if err := store.CloseIssue(ctx, closed.ID, "done", "actor"); err != nil {
t.Fatalf("CloseIssue: %v", err)
}
store.mu.Lock()
store.issues[old.ID].UpdatedAt = time.Now().Add(-20 * 24 * time.Hour)
store.issues[newer.ID].UpdatedAt = time.Now().Add(-10 * 24 * time.Hour)
store.issues[closed.ID].UpdatedAt = time.Now().Add(-30 * 24 * time.Hour)
store.mu.Unlock()
stale, err := store.GetStaleIssues(ctx, types.StaleFilter{Days: 7, Status: "in_progress"})
if err != nil {
t.Fatalf("GetStaleIssues: %v", err)
}
if len(stale) != 1 || stale[0].ID != newer.ID {
t.Fatalf("unexpected stale filtered: %+v", stale)
}
stale, err = store.GetStaleIssues(ctx, types.StaleFilter{Days: 7, Limit: 1})
if err != nil {
t.Fatalf("GetStaleIssues: %v", err)
}
if len(stale) != 1 || stale[0].ID != old.ID {
t.Fatalf("expected oldest stale first, got %+v", stale)
}
}
func TestMemoryStorage_Statistics_EpicsEligibleForClosure_Counting(t *testing.T) {
store := setupTestMemory(t)
defer store.Close()
ctx := context.Background()
ep := &types.Issue{ID: "bd-1", Title: "Epic", Status: types.StatusOpen, Priority: 1, IssueType: types.TypeEpic}
c1 := &types.Issue{ID: "bd-2", Title: "Child1", Status: types.StatusOpen, Priority: 1, IssueType: types.TypeTask}
c2 := &types.Issue{ID: "bd-3", Title: "Child2", Status: types.StatusOpen, Priority: 1, IssueType: types.TypeTask}
for _, iss := range []*types.Issue{ep, c1, c2} {
if err := store.CreateIssue(ctx, iss, "actor"); err != nil {
t.Fatalf("CreateIssue %s: %v", iss.ID, err)
}
}
if err := store.CloseIssue(ctx, c1.ID, "done", "actor"); err != nil {
t.Fatalf("CloseIssue c1: %v", err)
}
if err := store.CloseIssue(ctx, c2.ID, "done", "actor"); err != nil {
t.Fatalf("CloseIssue c2: %v", err)
}
// Parent-child deps: child -> epic.
if err := store.AddDependency(ctx, &types.Dependency{IssueID: c1.ID, DependsOnID: ep.ID, Type: types.DepParentChild}, "actor"); err != nil {
t.Fatalf("AddDependency: %v", err)
}
if err := store.AddDependency(ctx, &types.Dependency{IssueID: c2.ID, DependsOnID: ep.ID, Type: types.DepParentChild}, "actor"); err != nil {
t.Fatalf("AddDependency: %v", err)
}
stats, err := store.GetStatistics(ctx)
if err != nil {
t.Fatalf("GetStatistics: %v", err)
}
if stats.EpicsEligibleForClosure != 1 {
t.Fatalf("expected 1 epic eligible, got %d", stats.EpicsEligibleForClosure)
}
}
func TestMemoryStorage_UpdateIssue_SearchIssues_ReadyWork_BlockedIssues(t *testing.T) {
store := setupTestMemory(t)
defer store.Close()
ctx := context.Background()
now := time.Now()
assignee := "alice"
parent := &types.Issue{ID: "bd-1", Title: "Parent", Status: types.StatusOpen, Priority: 1, IssueType: types.TypeEpic}
child := &types.Issue{ID: "bd-2", Title: "Child", Status: types.StatusOpen, Priority: 2, IssueType: types.TypeTask, Assignee: assignee}
blocker := &types.Issue{ID: "bd-3", Title: "Blocker", Status: types.StatusOpen, Priority: 3, IssueType: types.TypeTask}
pinned := &types.Issue{ID: "bd-4", Title: "Pinned", Status: types.StatusOpen, Priority: 1, IssueType: types.TypeTask, Pinned: true}
workflow := &types.Issue{ID: "bd-5", Title: "Workflow", Status: types.StatusOpen, Priority: 1, IssueType: types.TypeMergeRequest}
for _, iss := range []*types.Issue{parent, child, blocker, pinned, workflow} {
if err := store.CreateIssue(ctx, iss, "actor"); err != nil {
t.Fatalf("CreateIssue %s: %v", iss.ID, err)
}
}
// Make created_at deterministic for sorting.
store.mu.Lock()
store.issues[parent.ID].CreatedAt = now.Add(-100 * time.Hour)
store.issues[child.ID].CreatedAt = now.Add(-1 * time.Hour)
store.issues[blocker.ID].CreatedAt = now.Add(-2 * time.Hour)
store.issues[pinned.ID].CreatedAt = now.Add(-3 * time.Hour)
store.issues[workflow.ID].CreatedAt = now.Add(-4 * time.Hour)
store.mu.Unlock()
// Dependencies: child is a child of parent; child is blocked by blocker.
if err := store.AddDependency(ctx, &types.Dependency{IssueID: child.ID, DependsOnID: parent.ID, Type: types.DepParentChild}, "actor"); err != nil {
t.Fatalf("AddDependency parent-child: %v", err)
}
if err := store.AddDependency(ctx, &types.Dependency{IssueID: child.ID, DependsOnID: blocker.ID, Type: types.DepBlocks}, "actor"); err != nil {
t.Fatalf("AddDependency blocks: %v", err)
}
// AddDependency duplicate error path.
if err := store.AddDependency(ctx, &types.Dependency{IssueID: child.ID, DependsOnID: blocker.ID, Type: types.DepBlocks}, "actor"); err == nil {
t.Fatalf("expected duplicate dependency error")
}
// UpdateIssue: exercise assignee nil, external_ref update+clear, and closed_at behavior.
ext := "old-ext"
store.mu.Lock()
store.issues[child.ID].ExternalRef = &ext
store.externalRefToID[ext] = child.ID
store.mu.Unlock()
if err := store.UpdateIssue(ctx, child.ID, map[string]interface{}{"assignee": nil, "external_ref": "new-ext"}, "actor"); err != nil {
t.Fatalf("UpdateIssue: %v", err)
}
if got, _ := store.GetIssueByExternalRef(ctx, "old-ext"); got != nil {
t.Fatalf("expected old-ext removed")
}
if got, _ := store.GetIssueByExternalRef(ctx, "new-ext"); got == nil || got.ID != child.ID {
t.Fatalf("expected new-ext mapping")
}
if err := store.UpdateIssue(ctx, child.ID, map[string]interface{}{"status": string(types.StatusClosed)}, "actor"); err != nil {
t.Fatalf("UpdateIssue close: %v", err)
}
closed, _ := store.GetIssue(ctx, child.ID)
if closed.ClosedAt == nil {
t.Fatalf("expected ClosedAt set")
}
if err := store.UpdateIssue(ctx, child.ID, map[string]interface{}{"status": string(types.StatusOpen), "external_ref": nil}, "actor"); err != nil {
t.Fatalf("UpdateIssue reopen: %v", err)
}
reopened, _ := store.GetIssue(ctx, child.ID)
if reopened.ClosedAt != nil {
t.Fatalf("expected ClosedAt cleared")
}
if got, _ := store.GetIssueByExternalRef(ctx, "new-ext"); got != nil {
t.Fatalf("expected new-ext cleared")
}
// SearchIssues: query, label AND/OR, IDs filter, ParentID filter, limit.
if err := store.AddLabel(ctx, parent.ID, "l1", "actor"); err != nil {
t.Fatalf("AddLabel: %v", err)
}
if err := store.AddLabel(ctx, child.ID, "l1", "actor"); err != nil {
t.Fatalf("AddLabel: %v", err)
}
if err := store.AddLabel(ctx, child.ID, "l2", "actor"); err != nil {
t.Fatalf("AddLabel: %v", err)
}
st := types.StatusOpen
res, err := store.SearchIssues(ctx, "parent", types.IssueFilter{Status: &st})
if err != nil {
t.Fatalf("SearchIssues: %v", err)
}
if len(res) != 1 || res[0].ID != parent.ID {
t.Fatalf("unexpected SearchIssues results: %+v", res)
}
res, err = store.SearchIssues(ctx, "", types.IssueFilter{Labels: []string{"l1", "l2"}})
if err != nil {
t.Fatalf("SearchIssues labels AND: %v", err)
}
if len(res) != 1 || res[0].ID != child.ID {
t.Fatalf("unexpected labels AND results: %+v", res)
}
res, err = store.SearchIssues(ctx, "", types.IssueFilter{IDs: []string{child.ID}})
if err != nil {
t.Fatalf("SearchIssues IDs: %v", err)
}
if len(res) != 1 || res[0].ID != child.ID {
t.Fatalf("unexpected IDs results: %+v", res)
}
res, err = store.SearchIssues(ctx, "", types.IssueFilter{ParentID: &parent.ID})
if err != nil {
t.Fatalf("SearchIssues ParentID: %v", err)
}
if len(res) != 1 || res[0].ID != child.ID {
t.Fatalf("unexpected ParentID results: %+v", res)
}
res, err = store.SearchIssues(ctx, "", types.IssueFilter{LabelsAny: []string{"l2", "missing"}, Limit: 1})
if err != nil {
t.Fatalf("SearchIssues labels OR: %v", err)
}
if len(res) != 1 {
t.Fatalf("expected limit 1")
}
// Ready work: child is blocked, pinned excluded, workflow excluded by default.
ready, err := store.GetReadyWork(ctx, types.WorkFilter{})
if err != nil {
t.Fatalf("GetReadyWork: %v", err)
}
if len(ready) != 2 { // parent + blocker
t.Fatalf("expected 2 ready issues, got %d: %+v", len(ready), ready)
}
// Filter by workflow type explicitly.
ready, err = store.GetReadyWork(ctx, types.WorkFilter{Type: string(types.TypeMergeRequest)})
if err != nil {
t.Fatalf("GetReadyWork type: %v", err)
}
if len(ready) != 1 || ready[0].ID != workflow.ID {
t.Fatalf("expected only workflow issue, got %+v", ready)
}
// Status + priority filters.
prio := 3
ready, err = store.GetReadyWork(ctx, types.WorkFilter{Status: types.StatusOpen, Priority: &prio})
if err != nil {
t.Fatalf("GetReadyWork status+priority: %v", err)
}
if len(ready) != 1 || ready[0].ID != blocker.ID {
t.Fatalf("expected blocker only, got %+v", ready)
}
// Label filters.
ready, err = store.GetReadyWork(ctx, types.WorkFilter{Labels: []string{"l1"}})
if err != nil {
t.Fatalf("GetReadyWork labels AND: %v", err)
}
if len(ready) != 1 || ready[0].ID != parent.ID {
t.Fatalf("expected parent only, got %+v", ready)
}
ready, err = store.GetReadyWork(ctx, types.WorkFilter{LabelsAny: []string{"l2"}})
if err != nil {
t.Fatalf("GetReadyWork labels OR: %v", err)
}
if len(ready) != 0 {
t.Fatalf("expected 0 because only l2 issue is blocked")
}
// Assignee filter vs Unassigned precedence.
ready, err = store.GetReadyWork(ctx, types.WorkFilter{Assignee: &assignee})
if err != nil {
t.Fatalf("GetReadyWork assignee: %v", err)
}
if len(ready) != 0 {
t.Fatalf("expected 0 due to child being blocked")
}
ready, err = store.GetReadyWork(ctx, types.WorkFilter{Unassigned: true})
if err != nil {
t.Fatalf("GetReadyWork unassigned: %v", err)
}
for _, iss := range ready {
if iss.Assignee != "" {
t.Fatalf("expected unassigned only")
}
}
// Sort policies + limit.
ready, err = store.GetReadyWork(ctx, types.WorkFilter{SortPolicy: types.SortPolicyOldest, Limit: 1})
if err != nil {
t.Fatalf("GetReadyWork oldest: %v", err)
}
if len(ready) != 1 || ready[0].ID != parent.ID {
t.Fatalf("expected oldest=parent, got %+v", ready)
}
ready, err = store.GetReadyWork(ctx, types.WorkFilter{SortPolicy: types.SortPolicyPriority})
if err != nil {
t.Fatalf("GetReadyWork priority: %v", err)
}
if len(ready) < 2 || ready[0].Priority > ready[1].Priority {
t.Fatalf("expected priority sort")
}
// Hybrid: recent issues first.
ready, err = store.GetReadyWork(ctx, types.WorkFilter{SortPolicy: types.SortPolicyHybrid})
if err != nil {
t.Fatalf("GetReadyWork hybrid: %v", err)
}
if len(ready) != 2 || ready[0].ID != blocker.ID {
t.Fatalf("expected recent (blocker) first in hybrid, got %+v", ready)
}
// Blocked issues: child is blocked by an open blocker.
blocked, err := store.GetBlockedIssues(ctx, types.WorkFilter{})
if err != nil {
t.Fatalf("GetBlockedIssues: %v", err)
}
if len(blocked) != 1 || blocked[0].ID != child.ID || blocked[0].BlockedByCount != 1 {
t.Fatalf("unexpected blocked issues: %+v", blocked)
}
// Cover getOpenBlockers missing-blocker branch.
missing := &types.Issue{ID: "bd-6", Title: "Missing blocker dep", Status: types.StatusOpen, Priority: 1, IssueType: types.TypeTask}
if err := store.CreateIssue(ctx, missing, "actor"); err != nil {
t.Fatalf("CreateIssue: %v", err)
}
// Bypass AddDependency validation to cover the missing-blocker branch in getOpenBlockers.
store.mu.Lock()
store.dependencies[missing.ID] = append(store.dependencies[missing.ID], &types.Dependency{IssueID: missing.ID, DependsOnID: "bd-does-not-exist", Type: types.DepBlocks})
store.mu.Unlock()
blocked, err = store.GetBlockedIssues(ctx, types.WorkFilter{})
if err != nil {
t.Fatalf("GetBlockedIssues: %v", err)
}
if len(blocked) != 2 {
t.Fatalf("expected 2 blocked issues, got %d", len(blocked))
}
}
func TestMemoryStorage_UpdateIssue_CoversMoreFields(t *testing.T) {
store := setupTestMemory(t)
defer store.Close()
ctx := context.Background()
iss := &types.Issue{ID: "bd-1", Title: "A", Status: types.StatusOpen, Priority: 1, IssueType: types.TypeTask}
if err := store.CreateIssue(ctx, iss, "actor"); err != nil {
t.Fatalf("CreateIssue: %v", err)
}
if err := store.UpdateIssue(ctx, iss.ID, map[string]interface{}{
"description": "d",
"design": "design",
"acceptance_criteria": "ac",
"notes": "n",
"priority": 2,
"issue_type": string(types.TypeBug),
"assignee": "bob",
"status": string(types.StatusInProgress),
}, "actor"); err != nil {
t.Fatalf("UpdateIssue: %v", err)
}
got, _ := store.GetIssue(ctx, iss.ID)
if got.Description != "d" || got.Design != "design" || got.AcceptanceCriteria != "ac" || got.Notes != "n" {
t.Fatalf("expected text fields updated")
}
if got.Priority != 2 || got.IssueType != types.TypeBug || got.Assignee != "bob" || got.Status != types.StatusInProgress {
t.Fatalf("expected fields updated")
}
// Status closed when already closed should not clear ClosedAt.
if err := store.CloseIssue(ctx, iss.ID, "done", "actor"); err != nil {
t.Fatalf("CloseIssue: %v", err)
}
closedOnce, _ := store.GetIssue(ctx, iss.ID)
if closedOnce.ClosedAt == nil {
t.Fatalf("expected ClosedAt")
}
if err := store.UpdateIssue(ctx, iss.ID, map[string]interface{}{"status": string(types.StatusClosed)}, "actor"); err != nil {
t.Fatalf("UpdateIssue closed->closed: %v", err)
}
closedTwice, _ := store.GetIssue(ctx, iss.ID)
if closedTwice.ClosedAt == nil {
t.Fatalf("expected ClosedAt preserved")
}
}
func TestMemoryStorage_CountEpicsEligibleForClosure_CoversBranches(t *testing.T) {
store := setupTestMemory(t)
defer store.Close()
ctx := context.Background()
ep1 := &types.Issue{ID: "bd-1", Title: "Epic1", Status: types.StatusOpen, Priority: 1, IssueType: types.TypeEpic}
epClosed := &types.Issue{ID: "bd-2", Title: "EpicClosed", Status: types.StatusOpen, Priority: 1, IssueType: types.TypeEpic}
nonEpic := &types.Issue{ID: "bd-3", Title: "NotEpic", Status: types.StatusOpen, Priority: 1, IssueType: types.TypeTask}
c := &types.Issue{ID: "bd-4", Title: "Child", Status: types.StatusOpen, Priority: 1, IssueType: types.TypeTask}
for _, iss := range []*types.Issue{ep1, epClosed, nonEpic, c} {
if err := store.CreateIssue(ctx, iss, "actor"); err != nil {
t.Fatalf("CreateIssue %s: %v", iss.ID, err)
}
}
if err := store.CloseIssue(ctx, epClosed.ID, "done", "actor"); err != nil {
t.Fatalf("CloseIssue: %v", err)
}
// Child -> ep1 (eligible once child is closed).
if err := store.AddDependency(ctx, &types.Dependency{IssueID: c.ID, DependsOnID: ep1.ID, Type: types.DepParentChild}, "actor"); err != nil {
t.Fatalf("AddDependency: %v", err)
}
// Child -> nonEpic should not count.
if err := store.AddDependency(ctx, &types.Dependency{IssueID: c.ID, DependsOnID: nonEpic.ID, Type: types.DepParentChild}, "actor"); err != nil {
t.Fatalf("AddDependency: %v", err)
}
// Child -> missing epic should not count.
store.mu.Lock()
store.dependencies[c.ID] = append(store.dependencies[c.ID], &types.Dependency{IssueID: c.ID, DependsOnID: "bd-missing", Type: types.DepParentChild})
store.mu.Unlock()
// Close child to make ep1 eligible.
if err := store.CloseIssue(ctx, c.ID, "done", "actor"); err != nil {
t.Fatalf("CloseIssue child: %v", err)
}
stats, err := store.GetStatistics(ctx)
if err != nil {
t.Fatalf("GetStatistics: %v", err)
}
if stats.EpicsEligibleForClosure != 1 {
t.Fatalf("expected 1 eligible epic, got %d", stats.EpicsEligibleForClosure)
}
}
func TestExtractParentAndChildNumber_CoversFailures(t *testing.T) {
if _, _, ok := extractParentAndChildNumber("no-dot"); ok {
t.Fatalf("expected ok=false")
}
if _, _, ok := extractParentAndChildNumber("parent.bad"); ok {
t.Fatalf("expected ok=false")
}
}

View File

@@ -20,10 +20,6 @@ func MigrateMessagingFields(db *sql.DB) error {
}{
{"sender", "TEXT DEFAULT ''"},
{"ephemeral", "INTEGER DEFAULT 0"},
{"replies_to", "TEXT DEFAULT ''"},
{"relates_to", "TEXT DEFAULT ''"},
{"duplicate_of", "TEXT DEFAULT ''"},
{"superseded_by", "TEXT DEFAULT ''"},
}
for _, col := range columns {
@@ -59,11 +55,5 @@ func MigrateMessagingFields(db *sql.DB) error {
return fmt.Errorf("failed to create sender index: %w", err)
}
// Add index for replies_to (for efficient thread queries)
_, err = db.Exec(`CREATE INDEX IF NOT EXISTS idx_issues_replies_to ON issues(replies_to) WHERE replies_to != ''`)
if err != nil {
return fmt.Errorf("failed to create replies_to index: %w", err)
}
return nil
}

View File

@@ -21,137 +21,176 @@ import (
func MigrateEdgeFields(db *sql.DB) error {
now := time.Now()
hasColumn := func(name string) (bool, error) {
var exists bool
err := db.QueryRow(`
SELECT COUNT(*) > 0
FROM pragma_table_info('issues')
WHERE name = ?
`, name).Scan(&exists)
return exists, err
}
hasRepliesTo, err := hasColumn("replies_to")
if err != nil {
return fmt.Errorf("failed to check replies_to column: %w", err)
}
hasRelatesTo, err := hasColumn("relates_to")
if err != nil {
return fmt.Errorf("failed to check relates_to column: %w", err)
}
hasDuplicateOf, err := hasColumn("duplicate_of")
if err != nil {
return fmt.Errorf("failed to check duplicate_of column: %w", err)
}
hasSupersededBy, err := hasColumn("superseded_by")
if err != nil {
return fmt.Errorf("failed to check superseded_by column: %w", err)
}
if !hasRepliesTo && !hasRelatesTo && !hasDuplicateOf && !hasSupersededBy {
return nil
}
// Migrate replies_to fields to replies-to edges
// For thread_id, use the parent's ID as the thread root for first-level replies
// (more sophisticated thread detection would require recursive queries)
rows, err := db.Query(`
SELECT id, replies_to
FROM issues
WHERE replies_to != '' AND replies_to IS NOT NULL
`)
if err != nil {
return fmt.Errorf("failed to query replies_to fields: %w", err)
}
defer rows.Close()
for rows.Next() {
var issueID, repliesTo string
if err := rows.Scan(&issueID, &repliesTo); err != nil {
return fmt.Errorf("failed to scan replies_to row: %w", err)
}
// Use repliesTo as thread_id (the root of the thread)
// This is a simplification - existing threads will have the parent as thread root
_, err := db.Exec(`
INSERT OR IGNORE INTO dependencies (issue_id, depends_on_id, type, created_at, created_by, metadata, thread_id)
VALUES (?, ?, 'replies-to', ?, 'migration', '{}', ?)
`, issueID, repliesTo, now, repliesTo)
if hasRepliesTo {
rows, err := db.Query(`
SELECT id, replies_to
FROM issues
WHERE replies_to != '' AND replies_to IS NOT NULL
`)
if err != nil {
return fmt.Errorf("failed to create replies-to edge for %s: %w", issueID, err)
return fmt.Errorf("failed to query replies_to fields: %w", err)
}
defer rows.Close()
for rows.Next() {
var issueID, repliesTo string
if err := rows.Scan(&issueID, &repliesTo); err != nil {
return fmt.Errorf("failed to scan replies_to row: %w", err)
}
// Use repliesTo as thread_id (the root of the thread)
// This is a simplification - existing threads will have the parent as thread root
_, err := db.Exec(`
INSERT OR IGNORE INTO dependencies (issue_id, depends_on_id, type, created_at, created_by, metadata, thread_id)
VALUES (?, ?, 'replies-to', ?, 'migration', '{}', ?)
`, issueID, repliesTo, now, repliesTo)
if err != nil {
return fmt.Errorf("failed to create replies-to edge for %s: %w", issueID, err)
}
}
if err := rows.Err(); err != nil {
return fmt.Errorf("error iterating replies_to rows: %w", err)
}
}
if err := rows.Err(); err != nil {
return fmt.Errorf("error iterating replies_to rows: %w", err)
}
// Migrate relates_to fields to relates-to edges
// relates_to is stored as JSON array string
rows, err = db.Query(`
SELECT id, relates_to
FROM issues
WHERE relates_to != '' AND relates_to != '[]' AND relates_to IS NOT NULL
`)
if err != nil {
return fmt.Errorf("failed to query relates_to fields: %w", err)
}
defer rows.Close()
for rows.Next() {
var issueID, relatesTo string
if err := rows.Scan(&issueID, &relatesTo); err != nil {
return fmt.Errorf("failed to scan relates_to row: %w", err)
if hasRelatesTo {
rows, err := db.Query(`
SELECT id, relates_to
FROM issues
WHERE relates_to != '' AND relates_to != '[]' AND relates_to IS NOT NULL
`)
if err != nil {
return fmt.Errorf("failed to query relates_to fields: %w", err)
}
defer rows.Close()
// Parse JSON array
var relatedIDs []string
if err := json.Unmarshal([]byte(relatesTo), &relatedIDs); err != nil {
// Skip malformed JSON
continue
}
for rows.Next() {
var issueID, relatesTo string
if err := rows.Scan(&issueID, &relatesTo); err != nil {
return fmt.Errorf("failed to scan relates_to row: %w", err)
}
for _, relatedID := range relatedIDs {
if relatedID == "" {
// Parse JSON array
var relatedIDs []string
if err := json.Unmarshal([]byte(relatesTo), &relatedIDs); err != nil {
// Skip malformed JSON
continue
}
_, err := db.Exec(`
INSERT OR IGNORE INTO dependencies (issue_id, depends_on_id, type, created_at, created_by, metadata, thread_id)
VALUES (?, ?, 'relates-to', ?, 'migration', '{}', '')
`, issueID, relatedID, now)
if err != nil {
return fmt.Errorf("failed to create relates-to edge for %s -> %s: %w", issueID, relatedID, err)
for _, relatedID := range relatedIDs {
if relatedID == "" {
continue
}
_, err := db.Exec(`
INSERT OR IGNORE INTO dependencies (issue_id, depends_on_id, type, created_at, created_by, metadata, thread_id)
VALUES (?, ?, 'relates-to', ?, 'migration', '{}', '')
`, issueID, relatedID, now)
if err != nil {
return fmt.Errorf("failed to create relates-to edge for %s -> %s: %w", issueID, relatedID, err)
}
}
}
}
if err := rows.Err(); err != nil {
return fmt.Errorf("error iterating relates_to rows: %w", err)
if err := rows.Err(); err != nil {
return fmt.Errorf("error iterating relates_to rows: %w", err)
}
}
// Migrate duplicate_of fields to duplicates edges
rows, err = db.Query(`
SELECT id, duplicate_of
FROM issues
WHERE duplicate_of != '' AND duplicate_of IS NOT NULL
`)
if err != nil {
return fmt.Errorf("failed to query duplicate_of fields: %w", err)
}
defer rows.Close()
for rows.Next() {
var issueID, duplicateOf string
if err := rows.Scan(&issueID, &duplicateOf); err != nil {
return fmt.Errorf("failed to scan duplicate_of row: %w", err)
}
_, err := db.Exec(`
INSERT OR IGNORE INTO dependencies (issue_id, depends_on_id, type, created_at, created_by, metadata, thread_id)
VALUES (?, ?, 'duplicates', ?, 'migration', '{}', '')
`, issueID, duplicateOf, now)
if hasDuplicateOf {
rows, err := db.Query(`
SELECT id, duplicate_of
FROM issues
WHERE duplicate_of != '' AND duplicate_of IS NOT NULL
`)
if err != nil {
return fmt.Errorf("failed to create duplicates edge for %s: %w", issueID, err)
return fmt.Errorf("failed to query duplicate_of fields: %w", err)
}
defer rows.Close()
for rows.Next() {
var issueID, duplicateOf string
if err := rows.Scan(&issueID, &duplicateOf); err != nil {
return fmt.Errorf("failed to scan duplicate_of row: %w", err)
}
_, err := db.Exec(`
INSERT OR IGNORE INTO dependencies (issue_id, depends_on_id, type, created_at, created_by, metadata, thread_id)
VALUES (?, ?, 'duplicates', ?, 'migration', '{}', '')
`, issueID, duplicateOf, now)
if err != nil {
return fmt.Errorf("failed to create duplicates edge for %s: %w", issueID, err)
}
}
if err := rows.Err(); err != nil {
return fmt.Errorf("error iterating duplicate_of rows: %w", err)
}
}
if err := rows.Err(); err != nil {
return fmt.Errorf("error iterating duplicate_of rows: %w", err)
}
// Migrate superseded_by fields to supersedes edges
rows, err = db.Query(`
SELECT id, superseded_by
FROM issues
WHERE superseded_by != '' AND superseded_by IS NOT NULL
`)
if err != nil {
return fmt.Errorf("failed to query superseded_by fields: %w", err)
}
defer rows.Close()
for rows.Next() {
var issueID, supersededBy string
if err := rows.Scan(&issueID, &supersededBy); err != nil {
return fmt.Errorf("failed to scan superseded_by row: %w", err)
}
_, err := db.Exec(`
INSERT OR IGNORE INTO dependencies (issue_id, depends_on_id, type, created_at, created_by, metadata, thread_id)
VALUES (?, ?, 'supersedes', ?, 'migration', '{}', '')
`, issueID, supersededBy, now)
if hasSupersededBy {
rows, err := db.Query(`
SELECT id, superseded_by
FROM issues
WHERE superseded_by != '' AND superseded_by IS NOT NULL
`)
if err != nil {
return fmt.Errorf("failed to create supersedes edge for %s: %w", issueID, err)
return fmt.Errorf("failed to query superseded_by fields: %w", err)
}
defer rows.Close()
for rows.Next() {
var issueID, supersededBy string
if err := rows.Scan(&issueID, &supersededBy); err != nil {
return fmt.Errorf("failed to scan superseded_by row: %w", err)
}
_, err := db.Exec(`
INSERT OR IGNORE INTO dependencies (issue_id, depends_on_id, type, created_at, created_by, metadata, thread_id)
VALUES (?, ?, 'supersedes', ?, 'migration', '{}', '')
`, issueID, supersededBy, now)
if err != nil {
return fmt.Errorf("failed to create supersedes edge for %s: %w", issueID, err)
}
}
if err := rows.Err(); err != nil {
return fmt.Errorf("error iterating superseded_by rows: %w", err)
}
}
if err := rows.Err(); err != nil {
return fmt.Errorf("error iterating superseded_by rows: %w", err)
}
return nil

View File

@@ -57,6 +57,57 @@ func MigrateDropEdgeColumns(db *sql.DB) error {
return nil
}
// Preserve newer columns if they already exist (migration may run on partially-migrated DBs).
hasPinned, err := checkCol("pinned")
if err != nil {
return fmt.Errorf("failed to check pinned column: %w", err)
}
hasIsTemplate, err := checkCol("is_template")
if err != nil {
return fmt.Errorf("failed to check is_template column: %w", err)
}
hasAwaitType, err := checkCol("await_type")
if err != nil {
return fmt.Errorf("failed to check await_type column: %w", err)
}
hasAwaitID, err := checkCol("await_id")
if err != nil {
return fmt.Errorf("failed to check await_id column: %w", err)
}
hasTimeoutNs, err := checkCol("timeout_ns")
if err != nil {
return fmt.Errorf("failed to check timeout_ns column: %w", err)
}
hasWaiters, err := checkCol("waiters")
if err != nil {
return fmt.Errorf("failed to check waiters column: %w", err)
}
pinnedExpr := "0"
if hasPinned {
pinnedExpr = "pinned"
}
isTemplateExpr := "0"
if hasIsTemplate {
isTemplateExpr = "is_template"
}
awaitTypeExpr := "''"
if hasAwaitType {
awaitTypeExpr = "await_type"
}
awaitIDExpr := "''"
if hasAwaitID {
awaitIDExpr = "await_id"
}
timeoutNsExpr := "0"
if hasTimeoutNs {
timeoutNsExpr = "timeout_ns"
}
waitersExpr := "''"
if hasWaiters {
waitersExpr = "waiters"
}
// SQLite 3.35.0+ supports DROP COLUMN, but we use table recreation for compatibility
// This is idempotent - we recreate the table without the deprecated columns
@@ -117,6 +168,12 @@ func MigrateDropEdgeColumns(db *sql.DB) error {
original_type TEXT DEFAULT '',
sender TEXT DEFAULT '',
ephemeral INTEGER DEFAULT 0,
pinned INTEGER DEFAULT 0,
is_template INTEGER DEFAULT 0,
await_type TEXT,
await_id TEXT,
timeout_ns INTEGER,
waiters TEXT,
close_reason TEXT DEFAULT '',
CHECK ((status = 'closed') = (closed_at IS NOT NULL))
)
@@ -132,7 +189,8 @@ func MigrateDropEdgeColumns(db *sql.DB) error {
notes, status, priority, issue_type, assignee, estimated_minutes,
created_at, updated_at, closed_at, external_ref, source_repo, compaction_level,
compacted_at, compacted_at_commit, original_size, deleted_at,
deleted_by, delete_reason, original_type, sender, ephemeral, close_reason
deleted_by, delete_reason, original_type, sender, ephemeral, pinned, is_template,
await_type, await_id, timeout_ns, waiters, close_reason
)
SELECT
id, content_hash, title, description, design, acceptance_criteria,
@@ -140,9 +198,11 @@ func MigrateDropEdgeColumns(db *sql.DB) error {
created_at, updated_at, closed_at, external_ref, COALESCE(source_repo, ''), compaction_level,
compacted_at, compacted_at_commit, original_size, deleted_at,
deleted_by, delete_reason, original_type, sender, ephemeral,
%s, %s,
%s, %s, %s, %s,
COALESCE(close_reason, '')
FROM issues
`)
`, pinnedExpr, isTemplateExpr, awaitTypeExpr, awaitIDExpr, timeoutNsExpr, waitersExpr)
if err != nil {
return fmt.Errorf("failed to copy issues data: %w", err)
}

View File

@@ -20,6 +20,11 @@ func MigratePinnedColumn(db *sql.DB) error {
}
if columnExists {
// Column exists (e.g. created by new schema); ensure index exists.
_, err = db.Exec(`CREATE INDEX IF NOT EXISTS idx_issues_pinned ON issues(pinned) WHERE pinned = 1`)
if err != nil {
return fmt.Errorf("failed to create pinned index: %w", err)
}
return nil
}

View File

@@ -21,6 +21,11 @@ func MigrateIsTemplateColumn(db *sql.DB) error {
}
if columnExists {
// Column exists (e.g. created by new schema); ensure index exists.
_, err = db.Exec(`CREATE INDEX IF NOT EXISTS idx_issues_is_template ON issues(is_template) WHERE is_template = 1`)
if err != nil {
return fmt.Errorf("failed to create is_template index: %w", err)
}
return nil
}

View File

@@ -0,0 +1,59 @@
package sqlite
import (
"context"
"path/filepath"
"testing"
"github.com/steveyegge/beads/internal/types"
)
func TestRunMigrations_DoesNotResetPinnedOrTemplate(t *testing.T) {
ctx := context.Background()
dir := t.TempDir()
dbPath := filepath.Join(dir, "beads.db")
s, err := New(ctx, dbPath)
if err != nil {
t.Fatalf("New: %v", err)
}
t.Cleanup(func() { _ = s.Close() })
if err := s.SetConfig(ctx, "issue_prefix", "test"); err != nil {
t.Fatalf("SetConfig(issue_prefix): %v", err)
}
issue := &types.Issue{
Title: "Pinned template",
Status: types.StatusOpen,
Priority: 2,
IssueType: types.TypeTask,
Pinned: true,
IsTemplate: true,
}
if err := s.CreateIssue(ctx, issue, "test-user"); err != nil {
t.Fatalf("CreateIssue: %v", err)
}
_ = s.Close()
s2, err := New(ctx, dbPath)
if err != nil {
t.Fatalf("New(reopen): %v", err)
}
defer func() { _ = s2.Close() }()
got, err := s2.GetIssue(ctx, issue.ID)
if err != nil {
t.Fatalf("GetIssue: %v", err)
}
if got == nil {
t.Fatalf("expected issue to exist")
}
if !got.Pinned {
t.Fatalf("expected issue to remain pinned")
}
if !got.IsTemplate {
t.Fatalf("expected issue to remain template")
}
}

View File

@@ -392,7 +392,7 @@ func setupTestRepoWithRemote(t *testing.T) string {
}
// Initialize git repo
runGit(t, tmpDir, "init")
runGit(t, tmpDir, "init", "-b", "master")
runGit(t, tmpDir, "config", "user.email", "test@test.com")
runGit(t, tmpDir, "config", "user.name", "Test User")
@@ -413,4 +413,3 @@ func setupTestRepoWithRemote(t *testing.T) string {
return tmpDir
}