Merge origin/main, fix test git init for modern git

This commit is contained in:
Steve Yegge
2025-12-27 00:17:36 -08:00
114 changed files with 5869 additions and 787 deletions
+28 -28
View File
@@ -48,10 +48,10 @@ func TestMain(m *testing.M) {
fmt.Fprintf(os.Stderr, "Failed to build bd binary: %v\n%s\n", err, out)
os.Exit(1)
}
// Optimize git for tests
os.Setenv("GIT_CONFIG_NOSYSTEM", "1")
os.Exit(m.Run())
}
@@ -85,35 +85,35 @@ func TestHashIDs_MultiCloneConverge(t *testing.T) {
}
t.Parallel()
tmpDir := testutil.TempDirInMemory(t)
bdPath := getBDPath()
if _, err := os.Stat(bdPath); err != nil {
t.Fatalf("bd binary not found at %s", bdPath)
}
// Setup remote and 3 clones
remoteDir := setupBareRepo(t, tmpDir)
cloneA := setupClone(t, tmpDir, remoteDir, "A", bdPath)
cloneB := setupClone(t, tmpDir, remoteDir, "B", bdPath)
cloneC := setupClone(t, tmpDir, remoteDir, "C", bdPath)
// Each clone creates unique issue (different content = different hash ID)
createIssueInClone(t, cloneA, "Issue from clone A")
createIssueInClone(t, cloneB, "Issue from clone B")
createIssueInClone(t, cloneC, "Issue from clone C")
// Sync all clones once (hash IDs prevent collisions, don't need multiple rounds)
for _, clone := range []string{cloneA, cloneB, cloneC} {
runCmdOutputWithEnvAllowError(t, clone, map[string]string{"BEADS_NO_DAEMON": "1"}, true, bdPath, "sync")
}
// Verify all clones have all 3 issues
expectedTitles := map[string]bool{
"Issue from clone A": true,
"Issue from clone B": true,
"Issue from clone C": true,
}
allConverged := true
for name, dir := range map[string]string{"A": cloneA, "B": cloneB, "C": cloneC} {
titles := getTitlesFromClone(t, dir)
@@ -122,7 +122,7 @@ func TestHashIDs_MultiCloneConverge(t *testing.T) {
allConverged = false
}
}
if allConverged {
t.Log("✓ All 3 clones converged with hash-based IDs")
} else {
@@ -138,26 +138,26 @@ func TestHashIDs_IdenticalContentDedup(t *testing.T) {
}
t.Parallel()
tmpDir := testutil.TempDirInMemory(t)
bdPath := getBDPath()
if _, err := os.Stat(bdPath); err != nil {
t.Fatalf("bd binary not found at %s", bdPath)
}
// Setup remote and 2 clones
remoteDir := setupBareRepo(t, tmpDir)
cloneA := setupClone(t, tmpDir, remoteDir, "A", bdPath)
cloneB := setupClone(t, tmpDir, remoteDir, "B", bdPath)
// Both clones create identical issue (same content = same hash ID)
createIssueInClone(t, cloneA, "Identical issue")
createIssueInClone(t, cloneB, "Identical issue")
// Sync both clones once (hash IDs handle dedup automatically)
for _, clone := range []string{cloneA, cloneB} {
runCmdOutputWithEnvAllowError(t, clone, map[string]string{"BEADS_NO_DAEMON": "1"}, true, bdPath, "sync")
}
// Verify both clones have exactly 1 issue (deduplication worked)
for name, dir := range map[string]string{"A": cloneA, "B": cloneB} {
titles := getTitlesFromClone(t, dir)
@@ -168,7 +168,7 @@ func TestHashIDs_IdenticalContentDedup(t *testing.T) {
t.Errorf("Clone %s missing expected issue: %v", name, sortedKeys(titles))
}
}
t.Log("✓ Identical content deduplicated correctly with hash-based IDs")
}
@@ -177,36 +177,36 @@ func TestHashIDs_IdenticalContentDedup(t *testing.T) {
func setupBareRepo(t *testing.T, tmpDir string) string {
t.Helper()
remoteDir := filepath.Join(tmpDir, "remote.git")
runCmd(t, tmpDir, "git", "init", "--bare", remoteDir)
runCmd(t, tmpDir, "git", "init", "--bare", "-b", "master", remoteDir)
tempClone := filepath.Join(tmpDir, "temp-init")
runCmd(t, tmpDir, "git", "clone", remoteDir, tempClone)
runCmd(t, tempClone, "git", "commit", "--allow-empty", "-m", "Initial commit")
runCmd(t, tempClone, "git", "push", "origin", "master")
return remoteDir
}
func setupClone(t *testing.T, tmpDir, remoteDir, name, bdPath string) string {
t.Helper()
cloneDir := filepath.Join(tmpDir, "clone-"+strings.ToLower(name))
// Use shallow, shared clones for speed
runCmd(t, tmpDir, "git", "clone", "--shared", "--depth=1", "--no-tags", remoteDir, cloneDir)
// Disable hooks to avoid overhead
emptyHooks := filepath.Join(cloneDir, ".empty-hooks")
os.MkdirAll(emptyHooks, 0755)
runCmd(t, cloneDir, "git", "config", "core.hooksPath", emptyHooks)
// Speed configs
runCmd(t, cloneDir, "git", "config", "gc.auto", "0")
runCmd(t, cloneDir, "git", "config", "core.fsync", "false")
runCmd(t, cloneDir, "git", "config", "commit.gpgSign", "false")
bdCmd := getBDCommand()
copyFile(t, bdPath, filepath.Join(cloneDir, filepath.Base(bdCmd)))
if name == "A" {
runCmd(t, cloneDir, bdCmd, "init", "--quiet", "--prefix", "test")
runCmd(t, cloneDir, "git", "add", ".beads")
@@ -216,7 +216,7 @@ func setupClone(t *testing.T, tmpDir, remoteDir, name, bdPath string) string {
runCmd(t, cloneDir, "git", "pull", "origin", "master")
runCmd(t, cloneDir, bdCmd, "init", "--quiet", "--prefix", "test")
}
return cloneDir
}
@@ -231,13 +231,13 @@ func getTitlesFromClone(t *testing.T, cloneDir string) map[string]bool {
"BEADS_NO_DAEMON": "1",
"BD_NO_AUTO_IMPORT": "1",
}, getBDCommand(), "list", "--json")
jsonStart := strings.Index(listJSON, "[")
if jsonStart == -1 {
return make(map[string]bool)
}
listJSON = listJSON[jsonStart:]
var issues []struct {
Title string `json:"title"`
}
@@ -245,7 +245,7 @@ func getTitlesFromClone(t *testing.T, cloneDir string) map[string]bool {
t.Logf("Failed to parse JSON: %v", err)
return make(map[string]bool)
}
titles := make(map[string]bool)
for _, issue := range issues {
titles[issue.Title] = true
@@ -280,7 +280,7 @@ func installGitHooks(t *testing.T, repoDir string) {
hooksDir := filepath.Join(repoDir, ".git", "hooks")
// Ensure POSIX-style path for sh scripts (even on Windows)
bdCmd := strings.ReplaceAll(getBDCommand(), "\\", "/")
preCommit := fmt.Sprintf(`#!/bin/sh
%s --no-daemon export -o .beads/issues.jsonl >/dev/null 2>&1 || true
git add .beads/issues.jsonl >/dev/null 2>&1 || true
+7 -5
View File
@@ -336,8 +336,8 @@ func TestRun_Async(t *testing.T) {
outputFile := filepath.Join(tmpDir, "async_output.txt")
// Create a hook that writes to a file
hookScript := `#!/bin/sh
echo "async" > ` + outputFile
hookScript := "#!/bin/sh\n" +
"echo \"async\" > \"" + outputFile + "\"\n"
if err := os.WriteFile(hookPath, []byte(hookScript), 0755); err != nil {
t.Fatalf("Failed to create hook file: %v", err)
}
@@ -348,15 +348,17 @@ echo "async" > ` + outputFile
// Run should return immediately
runner.Run(EventClose, issue)
// Wait for the async hook to complete with retries
// Wait for the async hook to complete with retries.
// Under high test load the goroutine scheduling + exec can be delayed.
var output []byte
var err error
for i := 0; i < 10; i++ {
time.Sleep(100 * time.Millisecond)
deadline := time.Now().Add(3 * time.Second)
for time.Now().Before(deadline) {
output, err = os.ReadFile(outputFile)
if err == nil {
break
}
time.Sleep(50 * time.Millisecond)
}
if err != nil {
+43
View File
@@ -67,6 +67,49 @@ func ExtractPrefix(id string) string {
return id[:idx+1] // Include the hyphen
}
// ExtractProjectFromPath extracts the project name from a route path.
// For "beads/mayor/rig", returns "beads".
// For "gastown/crew/max", returns "gastown".
func ExtractProjectFromPath(path string) string {
// Get the first component of the path
parts := strings.Split(path, "/")
if len(parts) > 0 && parts[0] != "" {
return parts[0]
}
return ""
}
// ResolveToExternalRef attempts to convert a foreign issue ID to an external reference
// using routes.jsonl for prefix-based routing.
//
// If the ID's prefix matches a route, returns "external:<project>:<id>".
// Otherwise, returns empty string (no route found).
//
// Example: If routes.jsonl has {"prefix": "bd-", "path": "beads/mayor/rig"}
// then ResolveToExternalRef("bd-abc", beadsDir) returns "external:beads:bd-abc"
func ResolveToExternalRef(id, beadsDir string) string {
routes, err := LoadRoutes(beadsDir)
if err != nil || len(routes) == 0 {
return ""
}
prefix := ExtractPrefix(id)
if prefix == "" {
return ""
}
for _, route := range routes {
if route.Prefix == prefix {
project := ExtractProjectFromPath(route.Path)
if project != "" {
return fmt.Sprintf("external:%s:%s", project, id)
}
}
}
return ""
}
// ResolveBeadsDirForID determines which beads directory contains the given issue ID.
// It first checks the local beads directory, then consults routes.jsonl for prefix-based routing.
//
+54
View File
@@ -88,3 +88,57 @@ func TestDetectUserRole_Fallback(t *testing.T) {
t.Errorf("DetectUserRole() = %v, want %v (fallback)", role, Contributor)
}
}
func TestExtractPrefix(t *testing.T) {
tests := []struct {
id string
want string
}{
{"gt-abc123", "gt-"},
{"bd-xyz", "bd-"},
{"hq-1234", "hq-"},
{"abc123", ""}, // No hyphen
{"", ""}, // Empty string
{"-abc", "-"}, // Starts with hyphen
}
for _, tt := range tests {
t.Run(tt.id, func(t *testing.T) {
got := ExtractPrefix(tt.id)
if got != tt.want {
t.Errorf("ExtractPrefix(%q) = %q, want %q", tt.id, got, tt.want)
}
})
}
}
func TestExtractProjectFromPath(t *testing.T) {
tests := []struct {
path string
want string
}{
{"beads/mayor/rig", "beads"},
{"gastown/crew/max", "gastown"},
{"simple", "simple"},
{"", ""},
{"/absolute/path", ""}, // Starts with /, first component is empty
}
for _, tt := range tests {
t.Run(tt.path, func(t *testing.T) {
got := ExtractProjectFromPath(tt.path)
if got != tt.want {
t.Errorf("ExtractProjectFromPath(%q) = %q, want %q", tt.path, got, tt.want)
}
})
}
}
func TestResolveToExternalRef(t *testing.T) {
// This test is limited since it requires a routes.jsonl file
// Just test that it returns empty string for nonexistent directory
got := ResolveToExternalRef("bd-abc", "/nonexistent/path")
if got != "" {
t.Errorf("ResolveToExternalRef() = %q, want empty string for nonexistent path", got)
}
}
+107
View File
@@ -0,0 +1,107 @@
package rpc
import (
"encoding/json"
"testing"
"time"
"github.com/steveyegge/beads/internal/types"
)
func TestClient_GateLifecycleAndShutdown(t *testing.T) {
_, client, cleanup := setupTestServer(t)
defer cleanup()
createResp, err := client.GateCreate(&GateCreateArgs{
Title: "Test Gate",
AwaitType: "human",
AwaitID: "",
Timeout: 5 * time.Minute,
Waiters: []string{"mayor/"},
})
if err != nil {
t.Fatalf("GateCreate: %v", err)
}
var created GateCreateResult
if err := json.Unmarshal(createResp.Data, &created); err != nil {
t.Fatalf("unmarshal GateCreateResult: %v", err)
}
if created.ID == "" {
t.Fatalf("expected created gate ID")
}
listResp, err := client.GateList(&GateListArgs{All: false})
if err != nil {
t.Fatalf("GateList: %v", err)
}
var openGates []*types.Issue
if err := json.Unmarshal(listResp.Data, &openGates); err != nil {
t.Fatalf("unmarshal GateList: %v", err)
}
if len(openGates) != 1 || openGates[0].ID != created.ID {
t.Fatalf("unexpected open gates: %+v", openGates)
}
showResp, err := client.GateShow(&GateShowArgs{ID: created.ID})
if err != nil {
t.Fatalf("GateShow: %v", err)
}
var gate types.Issue
if err := json.Unmarshal(showResp.Data, &gate); err != nil {
t.Fatalf("unmarshal GateShow: %v", err)
}
if gate.ID != created.ID || gate.IssueType != types.TypeGate {
t.Fatalf("unexpected gate: %+v", gate)
}
waitResp, err := client.GateWait(&GateWaitArgs{ID: created.ID, Waiters: []string{"deacon/"}})
if err != nil {
t.Fatalf("GateWait: %v", err)
}
var waitResult GateWaitResult
if err := json.Unmarshal(waitResp.Data, &waitResult); err != nil {
t.Fatalf("unmarshal GateWaitResult: %v", err)
}
if waitResult.AddedCount != 1 {
t.Fatalf("expected 1 waiter added, got %d", waitResult.AddedCount)
}
closeResp, err := client.GateClose(&GateCloseArgs{ID: created.ID, Reason: "done"})
if err != nil {
t.Fatalf("GateClose: %v", err)
}
var closedGate types.Issue
if err := json.Unmarshal(closeResp.Data, &closedGate); err != nil {
t.Fatalf("unmarshal GateClose: %v", err)
}
if closedGate.Status != types.StatusClosed {
t.Fatalf("expected closed status, got %q", closedGate.Status)
}
listResp, err = client.GateList(&GateListArgs{All: false})
if err != nil {
t.Fatalf("GateList open: %v", err)
}
if err := json.Unmarshal(listResp.Data, &openGates); err != nil {
t.Fatalf("unmarshal GateList open: %v", err)
}
if len(openGates) != 0 {
t.Fatalf("expected no open gates, got %+v", openGates)
}
listResp, err = client.GateList(&GateListArgs{All: true})
if err != nil {
t.Fatalf("GateList all: %v", err)
}
if err := json.Unmarshal(listResp.Data, &openGates); err != nil {
t.Fatalf("unmarshal GateList all: %v", err)
}
if len(openGates) != 1 || openGates[0].ID != created.ID {
t.Fatalf("expected 1 total gate, got %+v", openGates)
}
if err := client.Shutdown(); err != nil {
t.Fatalf("Shutdown: %v", err)
}
}
+8 -7
View File
@@ -89,11 +89,12 @@ type CreateArgs struct {
WaitsFor string `json:"waits_for,omitempty"` // Spawner issue ID to wait for
WaitsForGate string `json:"waits_for_gate,omitempty"` // Gate type: all-children or any-children
// Messaging fields (bd-kwro)
Sender string `json:"sender,omitempty"` // Who sent this (for messages)
Wisp bool `json:"wisp,omitempty"` // Wisp = ephemeral vapor from the Steam Engine; bulk-deleted when closed
Sender string `json:"sender,omitempty"` // Who sent this (for messages)
Ephemeral bool `json:"ephemeral,omitempty"` // If true, not exported to JSONL; bulk-deleted when closed
RepliesTo string `json:"replies_to,omitempty"` // Issue ID for conversation threading
// ID generation (bd-hobo)
IDPrefix string `json:"id_prefix,omitempty"` // Override prefix for ID generation (mol, wisp, etc.)
IDPrefix string `json:"id_prefix,omitempty"` // Override prefix for ID generation (mol, eph, etc.)
CreatedBy string `json:"created_by,omitempty"` // Who created the issue
}
// UpdateArgs represents arguments for the update operation
@@ -114,8 +115,8 @@ type UpdateArgs struct {
RemoveLabels []string `json:"remove_labels,omitempty"`
SetLabels []string `json:"set_labels,omitempty"`
// Messaging fields (bd-kwro)
Sender *string `json:"sender,omitempty"` // Who sent this (for messages)
Wisp *bool `json:"wisp,omitempty"` // Wisp = ephemeral vapor from the Steam Engine; bulk-deleted when closed
Sender *string `json:"sender,omitempty"` // Who sent this (for messages)
Ephemeral *bool `json:"ephemeral,omitempty"` // If true, not exported to JSONL; bulk-deleted when closed
RepliesTo *string `json:"replies_to,omitempty"` // Issue ID for conversation threading
// Graph link fields (bd-fu83)
RelatesTo *string `json:"relates_to,omitempty"` // JSON array of related issue IDs
@@ -192,8 +193,8 @@ type ListArgs struct {
// Parent filtering (bd-yqhh)
ParentID string `json:"parent_id,omitempty"`
// Wisp filtering (bd-bkul)
Wisp *bool `json:"wisp,omitempty"`
// Ephemeral filtering (bd-bkul)
Ephemeral *bool `json:"ephemeral,omitempty"`
}
// CountArgs represents arguments for the count operation
+14 -8
View File
@@ -81,8 +81,8 @@ func updatesFromArgs(a UpdateArgs) map[string]interface{} {
if a.Sender != nil {
u["sender"] = *a.Sender
}
if a.Wisp != nil {
u["wisp"] = *a.Wisp
if a.Ephemeral != nil {
u["ephemeral"] = *a.Ephemeral
}
if a.RepliesTo != nil {
u["replies_to"] = *a.RepliesTo
@@ -176,11 +176,12 @@ func (s *Server) handleCreate(req *Request) Response {
EstimatedMinutes: createArgs.EstimatedMinutes,
Status: types.StatusOpen,
// Messaging fields (bd-kwro)
Sender: createArgs.Sender,
Wisp: createArgs.Wisp,
Sender: createArgs.Sender,
Ephemeral: createArgs.Ephemeral,
// NOTE: RepliesTo now handled via replies-to dependency (Decision 004)
// ID generation (bd-hobo)
IDPrefix: createArgs.IDPrefix,
IDPrefix: createArgs.IDPrefix,
CreatedBy: createArgs.CreatedBy,
}
// Check if any dependencies are discovered-from type
@@ -843,8 +844,8 @@ func (s *Server) handleList(req *Request) Response {
filter.ParentID = &listArgs.ParentID
}
// Wisp filtering (bd-bkul)
filter.Wisp = listArgs.Wisp
// Ephemeral filtering (bd-bkul)
filter.Ephemeral = listArgs.Ephemeral
// Guard against excessive ID lists to avoid SQLite parameter limits
const maxIDs = 1000
@@ -1221,12 +1222,16 @@ func (s *Server) handleShow(req *Request) Response {
}
}
// Fetch comments
comments, _ := store.GetIssueComments(ctx, issue.ID)
// Create detailed response with related data
type IssueDetails struct {
*types.Issue
Labels []string `json:"labels,omitempty"`
Dependencies []*types.IssueWithDependencyMetadata `json:"dependencies,omitempty"`
Dependents []*types.IssueWithDependencyMetadata `json:"dependents,omitempty"`
Comments []*types.Comment `json:"comments,omitempty"`
}
details := &IssueDetails{
@@ -1234,6 +1239,7 @@ func (s *Server) handleShow(req *Request) Response {
Labels: labels,
Dependencies: deps,
Dependents: dependents,
Comments: comments,
}
data, _ := json.Marshal(details)
@@ -1474,7 +1480,7 @@ func (s *Server) handleGateCreate(req *Request) Response {
Status: types.StatusOpen,
Priority: 1, // Gates are typically high priority
Assignee: "deacon/",
Wisp: true, // Gates are wisps (ephemeral)
Ephemeral: true, // Gates are wisps (ephemeral)
AwaitType: args.AwaitType,
AwaitID: args.AwaitID,
Timeout: args.Timeout,
+44
View File
@@ -1,6 +1,7 @@
package rpc
import (
"context"
"encoding/json"
"testing"
"time"
@@ -9,6 +10,49 @@ import (
"github.com/steveyegge/beads/internal/types"
)
// TestHandleCreate_SetsCreatedBy verifies that CreatedBy is passed through RPC and stored (GH#748)
func TestHandleCreate_SetsCreatedBy(t *testing.T) {
store := memory.New("/tmp/test.jsonl")
server := NewServer("/tmp/test.sock", store, "/tmp", "/tmp/test.db")
createArgs := CreateArgs{
Title: "Test CreatedBy Field",
IssueType: "task",
Priority: 2,
CreatedBy: "test-actor",
}
createJSON, _ := json.Marshal(createArgs)
createReq := &Request{
Operation: OpCreate,
Args: createJSON,
Actor: "test-actor",
}
resp := server.handleCreate(createReq)
if !resp.Success {
t.Fatalf("create failed: %s", resp.Error)
}
var createdIssue types.Issue
if err := json.Unmarshal(resp.Data, &createdIssue); err != nil {
t.Fatalf("failed to parse response: %v", err)
}
// Verify CreatedBy was set in the response
if createdIssue.CreatedBy != "test-actor" {
t.Errorf("expected CreatedBy 'test-actor' in response, got %q", createdIssue.CreatedBy)
}
// Verify CreatedBy was persisted to storage
storedIssue, err := store.GetIssue(context.Background(), createdIssue.ID)
if err != nil {
t.Fatalf("failed to get issue from storage: %v", err)
}
if storedIssue.CreatedBy != "test-actor" {
t.Errorf("expected CreatedBy 'test-actor' in storage, got %q", storedIssue.CreatedBy)
}
}
func TestEmitMutation(t *testing.T) {
store := memory.New("/tmp/test.jsonl")
server := NewServer("/tmp/test.sock", store, "/tmp", "/tmp/test.db")
@@ -0,0 +1,921 @@
package memory
import (
"context"
"testing"
"time"
"github.com/steveyegge/beads/internal/storage"
"github.com/steveyegge/beads/internal/types"
)
func TestMemoryStorage_LoadFromIssues_IndexesAndCounters(t *testing.T) {
store := New("/tmp/example.jsonl")
defer store.Close()
extRef := "ext-1"
issues := []*types.Issue{
nil,
{
ID: "bd-10",
Title: "Ten",
Status: types.StatusOpen,
Priority: 1,
IssueType: types.TypeTask,
ExternalRef: &extRef,
Dependencies: []*types.Dependency{{
IssueID: "bd-10",
DependsOnID: "bd-2",
Type: types.DepBlocks,
}},
Labels: []string{"l1"},
Comments: []*types.Comment{{ID: 1, IssueID: "bd-10", Author: "a", Text: "c"}},
},
{ID: "bd-2", Title: "Two", Status: types.StatusOpen, Priority: 1, IssueType: types.TypeTask},
{ID: "bd-a3f8e9", Title: "Parent", Status: types.StatusOpen, Priority: 1, IssueType: types.TypeTask},
{ID: "bd-a3f8e9.3", Title: "Child", Status: types.StatusOpen, Priority: 1, IssueType: types.TypeTask},
}
if err := store.LoadFromIssues(issues); err != nil {
t.Fatalf("LoadFromIssues: %v", err)
}
ctx := context.Background()
got, err := store.GetIssueByExternalRef(ctx, "ext-1")
if err != nil {
t.Fatalf("GetIssueByExternalRef: %v", err)
}
if got == nil || got.ID != "bd-10" {
t.Fatalf("GetIssueByExternalRef got=%v", got)
}
if len(got.Dependencies) != 1 || got.Dependencies[0].DependsOnID != "bd-2" {
t.Fatalf("expected deps attached")
}
if len(got.Labels) != 1 || got.Labels[0] != "l1" {
t.Fatalf("expected labels attached")
}
// Exercise CreateIssue ID generation based on the loaded counter (bd-10 => next should be bd-11).
if err := store.SetConfig(ctx, "issue_prefix", "bd"); err != nil {
t.Fatalf("SetConfig: %v", err)
}
newIssue := &types.Issue{Title: "New", Status: types.StatusOpen, Priority: 1, IssueType: types.TypeTask}
if err := store.CreateIssue(ctx, newIssue, "actor"); err != nil {
t.Fatalf("CreateIssue: %v", err)
}
if newIssue.ID != "bd-11" {
t.Fatalf("expected generated id bd-11, got %q", newIssue.ID)
}
// Hierarchical counter for parent extracted from bd-a3f8e9.3.
childID, err := store.GetNextChildID(ctx, "bd-a3f8e9")
if err != nil {
t.Fatalf("GetNextChildID: %v", err)
}
if childID != "bd-a3f8e9.4" {
t.Fatalf("expected bd-a3f8e9.4, got %q", childID)
}
}
func TestMemoryStorage_GetAllIssues_SortsAndCopies(t *testing.T) {
store := setupTestMemory(t)
defer store.Close()
ctx := context.Background()
// Create out-of-order IDs.
a := &types.Issue{ID: "bd-2", Title: "A", Status: types.StatusOpen, Priority: 1, IssueType: types.TypeTask}
b := &types.Issue{ID: "bd-1", Title: "B", Status: types.StatusOpen, Priority: 1, IssueType: types.TypeTask}
if err := store.CreateIssue(ctx, a, "actor"); err != nil {
t.Fatalf("CreateIssue a: %v", err)
}
if err := store.CreateIssue(ctx, b, "actor"); err != nil {
t.Fatalf("CreateIssue b: %v", err)
}
if err := store.AddLabel(ctx, a.ID, "l1", "actor"); err != nil {
t.Fatalf("AddLabel: %v", err)
}
all := store.GetAllIssues()
if len(all) != 2 {
t.Fatalf("expected 2 issues, got %d", len(all))
}
if all[0].ID != "bd-1" || all[1].ID != "bd-2" {
t.Fatalf("expected sorted by ID, got %q then %q", all[0].ID, all[1].ID)
}
// Returned issues must be copies (mutating should not affect stored issue struct).
all[1].Title = "mutated"
got, err := store.GetIssue(ctx, "bd-2")
if err != nil {
t.Fatalf("GetIssue: %v", err)
}
if got.Title != "A" {
t.Fatalf("expected stored title unchanged, got %q", got.Title)
}
}
func TestMemoryStorage_CreateIssues_DefaultPrefix_DuplicateExisting_ExternalRef(t *testing.T) {
store := New("")
defer store.Close()
ctx := context.Background()
// Default prefix should be "bd" when unset.
issues := []*types.Issue{{Title: "A", Status: types.StatusOpen, Priority: 1, IssueType: types.TypeTask}}
if err := store.CreateIssues(ctx, issues, "actor"); err != nil {
t.Fatalf("CreateIssues: %v", err)
}
if issues[0].ID != "bd-1" {
t.Fatalf("expected bd-1, got %q", issues[0].ID)
}
ext := "ext"
batch := []*types.Issue{{ID: "bd-x", Title: "B", Status: types.StatusOpen, Priority: 1, IssueType: types.TypeTask, ExternalRef: &ext}}
if err := store.CreateIssues(ctx, batch, "actor"); err != nil {
t.Fatalf("CreateIssues: %v", err)
}
if got, _ := store.GetIssueByExternalRef(ctx, "ext"); got == nil || got.ID != "bd-x" {
t.Fatalf("expected external ref indexed")
}
// Duplicate existing issue ID branch.
dup := []*types.Issue{{ID: "bd-x", Title: "Dup", Status: types.StatusOpen, Priority: 1, IssueType: types.TypeTask}}
if err := store.CreateIssues(ctx, dup, "actor"); err == nil {
t.Fatalf("expected duplicate existing issue error")
}
}
func TestMemoryStorage_GetIssueByExternalRef_IndexPointsToMissingIssue(t *testing.T) {
store := setupTestMemory(t)
defer store.Close()
ctx := context.Background()
store.mu.Lock()
store.externalRefToID["dangling"] = "bd-nope"
store.mu.Unlock()
got, err := store.GetIssueByExternalRef(ctx, "dangling")
if err != nil {
t.Fatalf("GetIssueByExternalRef: %v", err)
}
if got != nil {
t.Fatalf("expected nil for dangling ref")
}
}
func TestMemoryStorage_DependencyCounts_Records_Tree_Cycles(t *testing.T) {
store := setupTestMemory(t)
defer store.Close()
ctx := context.Background()
a := &types.Issue{ID: "bd-1", Title: "A", Status: types.StatusOpen, Priority: 1, IssueType: types.TypeTask}
b := &types.Issue{ID: "bd-2", Title: "B", Status: types.StatusOpen, Priority: 1, IssueType: types.TypeTask}
c := &types.Issue{ID: "bd-3", Title: "C", Status: types.StatusOpen, Priority: 1, IssueType: types.TypeTask}
d := &types.Issue{ID: "bd-4", Title: "D", Status: types.StatusOpen, Priority: 1, IssueType: types.TypeTask}
for _, iss := range []*types.Issue{a, b, c, d} {
if err := store.CreateIssue(ctx, iss, "actor"); err != nil {
t.Fatalf("CreateIssue %s: %v", iss.ID, err)
}
}
if err := store.AddDependency(ctx, &types.Dependency{IssueID: a.ID, DependsOnID: b.ID, Type: types.DepBlocks}, "actor"); err != nil {
t.Fatalf("AddDependency: %v", err)
}
if err := store.AddDependency(ctx, &types.Dependency{IssueID: a.ID, DependsOnID: c.ID, Type: types.DepBlocks}, "actor"); err != nil {
t.Fatalf("AddDependency: %v", err)
}
if err := store.AddDependency(ctx, &types.Dependency{IssueID: d.ID, DependsOnID: b.ID, Type: types.DepBlocks}, "actor"); err != nil {
t.Fatalf("AddDependency: %v", err)
}
counts, err := store.GetDependencyCounts(ctx, []string{a.ID, b.ID, "bd-missing"})
if err != nil {
t.Fatalf("GetDependencyCounts: %v", err)
}
if counts[a.ID].DependencyCount != 2 || counts[a.ID].DependentCount != 0 {
t.Fatalf("unexpected counts for A: %+v", counts[a.ID])
}
if counts[b.ID].DependencyCount != 0 || counts[b.ID].DependentCount != 2 {
t.Fatalf("unexpected counts for B: %+v", counts[b.ID])
}
if counts["bd-missing"].DependencyCount != 0 || counts["bd-missing"].DependentCount != 0 {
t.Fatalf("unexpected counts for missing: %+v", counts["bd-missing"])
}
deps, err := store.GetDependencyRecords(ctx, a.ID)
if err != nil {
t.Fatalf("GetDependencyRecords: %v", err)
}
if len(deps) != 2 {
t.Fatalf("expected 2 deps, got %d", len(deps))
}
allDeps, err := store.GetAllDependencyRecords(ctx)
if err != nil {
t.Fatalf("GetAllDependencyRecords: %v", err)
}
if len(allDeps[a.ID]) != 2 {
t.Fatalf("expected all deps for A")
}
nodes, err := store.GetDependencyTree(ctx, a.ID, 3, false, false)
if err != nil {
t.Fatalf("GetDependencyTree: %v", err)
}
if len(nodes) != 2 || nodes[0].Depth != 1 {
t.Fatalf("unexpected tree: %+v", nodes)
}
cycles, err := store.DetectCycles(ctx)
if err != nil {
t.Fatalf("DetectCycles: %v", err)
}
if cycles != nil {
t.Fatalf("expected nil cycles, got %+v", cycles)
}
}
func TestMemoryStorage_HashTracking_NoOps(t *testing.T) {
store := setupTestMemory(t)
defer store.Close()
ctx := context.Background()
if hash, err := store.GetDirtyIssueHash(ctx, "bd-1"); err != nil || hash != "" {
t.Fatalf("GetDirtyIssueHash: hash=%q err=%v", hash, err)
}
if hash, err := store.GetExportHash(ctx, "bd-1"); err != nil || hash != "" {
t.Fatalf("GetExportHash: hash=%q err=%v", hash, err)
}
if err := store.SetExportHash(ctx, "bd-1", "h"); err != nil {
t.Fatalf("SetExportHash: %v", err)
}
if err := store.ClearAllExportHashes(ctx); err != nil {
t.Fatalf("ClearAllExportHashes: %v", err)
}
if hash, err := store.GetJSONLFileHash(ctx); err != nil || hash != "" {
t.Fatalf("GetJSONLFileHash: hash=%q err=%v", hash, err)
}
if err := store.SetJSONLFileHash(ctx, "h"); err != nil {
t.Fatalf("SetJSONLFileHash: %v", err)
}
}
func TestMemoryStorage_LabelsAndCommentsHelpers(t *testing.T) {
store := setupTestMemory(t)
defer store.Close()
ctx := context.Background()
a := &types.Issue{ID: "bd-1", Title: "A", Status: types.StatusOpen, Priority: 1, IssueType: types.TypeTask}
b := &types.Issue{ID: "bd-2", Title: "B", Status: types.StatusOpen, Priority: 1, IssueType: types.TypeTask}
if err := store.CreateIssue(ctx, a, "actor"); err != nil {
t.Fatalf("CreateIssue a: %v", err)
}
if err := store.CreateIssue(ctx, b, "actor"); err != nil {
t.Fatalf("CreateIssue b: %v", err)
}
if err := store.AddLabel(ctx, a.ID, "l1", "actor"); err != nil {
t.Fatalf("AddLabel: %v", err)
}
if err := store.AddLabel(ctx, b.ID, "l2", "actor"); err != nil {
t.Fatalf("AddLabel: %v", err)
}
labels, err := store.GetLabelsForIssues(ctx, []string{a.ID, b.ID, "bd-missing"})
if err != nil {
t.Fatalf("GetLabelsForIssues: %v", err)
}
if len(labels) != 2 {
t.Fatalf("expected 2 entries, got %d", len(labels))
}
if labels[a.ID][0] != "l1" {
t.Fatalf("unexpected labels for A: %+v", labels[a.ID])
}
issues, err := store.GetIssuesByLabel(ctx, "l1")
if err != nil {
t.Fatalf("GetIssuesByLabel: %v", err)
}
if len(issues) != 1 || issues[0].ID != a.ID {
t.Fatalf("unexpected issues: %+v", issues)
}
if _, err := store.AddIssueComment(ctx, a.ID, "author", "text"); err != nil {
t.Fatalf("AddIssueComment: %v", err)
}
comments, err := store.GetCommentsForIssues(ctx, []string{a.ID, b.ID})
if err != nil {
t.Fatalf("GetCommentsForIssues: %v", err)
}
if len(comments[a.ID]) != 1 {
t.Fatalf("expected comments for A")
}
}
func TestMemoryStorage_StaleEventsCustomStatusAndLifecycleHelpers(t *testing.T) {
store := New("/tmp/x.jsonl")
defer store.Close()
ctx := context.Background()
if store.Path() != "/tmp/x.jsonl" {
t.Fatalf("Path mismatch")
}
if store.UnderlyingDB() != nil {
t.Fatalf("expected nil UnderlyingDB")
}
if _, err := store.UnderlyingConn(ctx); err == nil {
t.Fatalf("expected UnderlyingConn error")
}
if err := store.RunInTransaction(ctx, func(tx storage.Transaction) error { return nil }); err == nil {
t.Fatalf("expected RunInTransaction error")
}
if err := store.SetConfig(ctx, "issue_prefix", "bd"); err != nil {
t.Fatalf("SetConfig: %v", err)
}
a := &types.Issue{ID: "bd-1", Title: "A", Status: types.StatusOpen, Priority: 1, IssueType: types.TypeTask}
if err := store.CreateIssue(ctx, a, "actor"); err != nil {
t.Fatalf("CreateIssue: %v", err)
}
// Force updated_at into the past for stale detection.
store.mu.Lock()
a.UpdatedAt = time.Now().Add(-10 * 24 * time.Hour)
store.mu.Unlock()
stale, err := store.GetStaleIssues(ctx, types.StaleFilter{Days: 7, Limit: 10})
if err != nil {
t.Fatalf("GetStaleIssues: %v", err)
}
if len(stale) != 1 || stale[0].ID != a.ID {
t.Fatalf("unexpected stale: %+v", stale)
}
if err := store.AddComment(ctx, a.ID, "actor", "c"); err != nil {
t.Fatalf("AddComment: %v", err)
}
if err := store.MarkIssueDirty(ctx, a.ID); err != nil {
t.Fatalf("MarkIssueDirty: %v", err)
}
// Generate multiple events and ensure limiting returns the last N.
if err := store.UpdateIssue(ctx, a.ID, map[string]interface{}{"title": "t1"}, "actor"); err != nil {
t.Fatalf("UpdateIssue: %v", err)
}
if err := store.UpdateIssue(ctx, a.ID, map[string]interface{}{"title": "t2"}, "actor"); err != nil {
t.Fatalf("UpdateIssue: %v", err)
}
evs, err := store.GetEvents(ctx, a.ID, 2)
if err != nil {
t.Fatalf("GetEvents: %v", err)
}
if len(evs) != 2 {
t.Fatalf("expected 2 events, got %d", len(evs))
}
if err := store.SetConfig(ctx, "status.custom", " triage, blocked , ,done "); err != nil {
t.Fatalf("SetConfig: %v", err)
}
statuses, err := store.GetCustomStatuses(ctx)
if err != nil {
t.Fatalf("GetCustomStatuses: %v", err)
}
if len(statuses) != 3 || statuses[0] != "triage" || statuses[1] != "blocked" || statuses[2] != "done" {
t.Fatalf("unexpected statuses: %+v", statuses)
}
if got := parseCustomStatuses(""); got != nil {
t.Fatalf("expected nil for empty parseCustomStatuses")
}
// Empty custom statuses.
if err := store.DeleteConfig(ctx, "status.custom"); err != nil {
t.Fatalf("DeleteConfig: %v", err)
}
statuses, err = store.GetCustomStatuses(ctx)
if err != nil {
t.Fatalf("GetCustomStatuses(empty): %v", err)
}
if statuses != nil {
t.Fatalf("expected nil statuses when unset, got %+v", statuses)
}
if _, err := store.GetEpicsEligibleForClosure(ctx); err != nil {
t.Fatalf("GetEpicsEligibleForClosure: %v", err)
}
if err := store.UpdateIssueID(ctx, "old", "new", nil, "actor"); err == nil {
t.Fatalf("expected UpdateIssueID error")
}
if err := store.RenameDependencyPrefix(ctx, "old", "new"); err != nil {
t.Fatalf("RenameDependencyPrefix: %v", err)
}
if err := store.RenameCounterPrefix(ctx, "old", "new"); err != nil {
t.Fatalf("RenameCounterPrefix: %v", err)
}
}
func TestMemoryStorage_AddLabelAndAddDependency_ErrorPaths(t *testing.T) {
store := setupTestMemory(t)
defer store.Close()
ctx := context.Background()
issue := &types.Issue{ID: "bd-1", Title: "A", Status: types.StatusOpen, Priority: 1, IssueType: types.TypeTask}
if err := store.CreateIssue(ctx, issue, "actor"); err != nil {
t.Fatalf("CreateIssue: %v", err)
}
if err := store.AddLabel(ctx, "bd-missing", "l", "actor"); err == nil {
t.Fatalf("expected AddLabel error for missing issue")
}
if err := store.AddLabel(ctx, issue.ID, "l", "actor"); err != nil {
t.Fatalf("AddLabel: %v", err)
}
// Duplicate label is a no-op.
if err := store.AddLabel(ctx, issue.ID, "l", "actor"); err != nil {
t.Fatalf("AddLabel duplicate: %v", err)
}
// AddDependency error paths.
if err := store.AddDependency(ctx, &types.Dependency{IssueID: "bd-missing", DependsOnID: issue.ID, Type: types.DepBlocks}, "actor"); err == nil {
t.Fatalf("expected AddDependency error for missing IssueID")
}
if err := store.AddDependency(ctx, &types.Dependency{IssueID: issue.ID, DependsOnID: "bd-missing", Type: types.DepBlocks}, "actor"); err == nil {
t.Fatalf("expected AddDependency error for missing DependsOnID")
}
}
func TestMemoryStorage_GetNextChildID_Errors(t *testing.T) {
store := setupTestMemory(t)
defer store.Close()
ctx := context.Background()
if _, err := store.GetNextChildID(ctx, "bd-missing"); err == nil {
t.Fatalf("expected error for missing parent")
}
deep := &types.Issue{ID: "bd-1.1.1.1", Title: "Deep", Status: types.StatusOpen, Priority: 1, IssueType: types.TypeTask}
if err := store.CreateIssue(ctx, deep, "actor"); err != nil {
t.Fatalf("CreateIssue: %v", err)
}
if _, err := store.GetNextChildID(ctx, deep.ID); err == nil {
t.Fatalf("expected max depth error")
}
}
func TestMemoryStorage_GetAllIssues_AttachesDependenciesAndComments(t *testing.T) {
store := setupTestMemory(t)
defer store.Close()
ctx := context.Background()
a := &types.Issue{ID: "bd-1", Title: "A", Status: types.StatusOpen, Priority: 1, IssueType: types.TypeTask}
b := &types.Issue{ID: "bd-2", Title: "B", Status: types.StatusOpen, Priority: 1, IssueType: types.TypeTask}
if err := store.CreateIssue(ctx, a, "actor"); err != nil {
t.Fatalf("CreateIssue a: %v", err)
}
if err := store.CreateIssue(ctx, b, "actor"); err != nil {
t.Fatalf("CreateIssue b: %v", err)
}
if err := store.AddDependency(ctx, &types.Dependency{IssueID: a.ID, DependsOnID: b.ID, Type: types.DepBlocks}, "actor"); err != nil {
t.Fatalf("AddDependency: %v", err)
}
if _, err := store.AddIssueComment(ctx, a.ID, "author", "text"); err != nil {
t.Fatalf("AddIssueComment: %v", err)
}
all := store.GetAllIssues()
var gotA *types.Issue
for _, iss := range all {
if iss.ID == a.ID {
gotA = iss
break
}
}
if gotA == nil {
t.Fatalf("expected to find issue A")
}
if len(gotA.Dependencies) != 1 || gotA.Dependencies[0].DependsOnID != b.ID {
t.Fatalf("expected deps attached")
}
if len(gotA.Comments) != 1 || gotA.Comments[0].Text != "text" {
t.Fatalf("expected comments attached")
}
}
func TestMemoryStorage_GetStaleIssues_FilteringAndLimit(t *testing.T) {
store := setupTestMemory(t)
defer store.Close()
ctx := context.Background()
old := &types.Issue{ID: "bd-1", Title: "Old", Status: types.StatusOpen, Priority: 1, IssueType: types.TypeTask}
newer := &types.Issue{ID: "bd-2", Title: "Newer", Status: types.StatusInProgress, Priority: 1, IssueType: types.TypeTask}
closed := &types.Issue{ID: "bd-3", Title: "Closed", Status: types.StatusOpen, Priority: 1, IssueType: types.TypeTask}
for _, iss := range []*types.Issue{old, newer, closed} {
if err := store.CreateIssue(ctx, iss, "actor"); err != nil {
t.Fatalf("CreateIssue %s: %v", iss.ID, err)
}
}
if err := store.CloseIssue(ctx, closed.ID, "done", "actor"); err != nil {
t.Fatalf("CloseIssue: %v", err)
}
store.mu.Lock()
store.issues[old.ID].UpdatedAt = time.Now().Add(-20 * 24 * time.Hour)
store.issues[newer.ID].UpdatedAt = time.Now().Add(-10 * 24 * time.Hour)
store.issues[closed.ID].UpdatedAt = time.Now().Add(-30 * 24 * time.Hour)
store.mu.Unlock()
stale, err := store.GetStaleIssues(ctx, types.StaleFilter{Days: 7, Status: "in_progress"})
if err != nil {
t.Fatalf("GetStaleIssues: %v", err)
}
if len(stale) != 1 || stale[0].ID != newer.ID {
t.Fatalf("unexpected stale filtered: %+v", stale)
}
stale, err = store.GetStaleIssues(ctx, types.StaleFilter{Days: 7, Limit: 1})
if err != nil {
t.Fatalf("GetStaleIssues: %v", err)
}
if len(stale) != 1 || stale[0].ID != old.ID {
t.Fatalf("expected oldest stale first, got %+v", stale)
}
}
func TestMemoryStorage_Statistics_EpicsEligibleForClosure_Counting(t *testing.T) {
store := setupTestMemory(t)
defer store.Close()
ctx := context.Background()
ep := &types.Issue{ID: "bd-1", Title: "Epic", Status: types.StatusOpen, Priority: 1, IssueType: types.TypeEpic}
c1 := &types.Issue{ID: "bd-2", Title: "Child1", Status: types.StatusOpen, Priority: 1, IssueType: types.TypeTask}
c2 := &types.Issue{ID: "bd-3", Title: "Child2", Status: types.StatusOpen, Priority: 1, IssueType: types.TypeTask}
for _, iss := range []*types.Issue{ep, c1, c2} {
if err := store.CreateIssue(ctx, iss, "actor"); err != nil {
t.Fatalf("CreateIssue %s: %v", iss.ID, err)
}
}
if err := store.CloseIssue(ctx, c1.ID, "done", "actor"); err != nil {
t.Fatalf("CloseIssue c1: %v", err)
}
if err := store.CloseIssue(ctx, c2.ID, "done", "actor"); err != nil {
t.Fatalf("CloseIssue c2: %v", err)
}
// Parent-child deps: child -> epic.
if err := store.AddDependency(ctx, &types.Dependency{IssueID: c1.ID, DependsOnID: ep.ID, Type: types.DepParentChild}, "actor"); err != nil {
t.Fatalf("AddDependency: %v", err)
}
if err := store.AddDependency(ctx, &types.Dependency{IssueID: c2.ID, DependsOnID: ep.ID, Type: types.DepParentChild}, "actor"); err != nil {
t.Fatalf("AddDependency: %v", err)
}
stats, err := store.GetStatistics(ctx)
if err != nil {
t.Fatalf("GetStatistics: %v", err)
}
if stats.EpicsEligibleForClosure != 1 {
t.Fatalf("expected 1 epic eligible, got %d", stats.EpicsEligibleForClosure)
}
}
func TestMemoryStorage_UpdateIssue_SearchIssues_ReadyWork_BlockedIssues(t *testing.T) {
store := setupTestMemory(t)
defer store.Close()
ctx := context.Background()
now := time.Now()
assignee := "alice"
parent := &types.Issue{ID: "bd-1", Title: "Parent", Status: types.StatusOpen, Priority: 1, IssueType: types.TypeEpic}
child := &types.Issue{ID: "bd-2", Title: "Child", Status: types.StatusOpen, Priority: 2, IssueType: types.TypeTask, Assignee: assignee}
blocker := &types.Issue{ID: "bd-3", Title: "Blocker", Status: types.StatusOpen, Priority: 3, IssueType: types.TypeTask}
pinned := &types.Issue{ID: "bd-4", Title: "Pinned", Status: types.StatusOpen, Priority: 1, IssueType: types.TypeTask, Pinned: true}
workflow := &types.Issue{ID: "bd-5", Title: "Workflow", Status: types.StatusOpen, Priority: 1, IssueType: types.TypeMergeRequest}
for _, iss := range []*types.Issue{parent, child, blocker, pinned, workflow} {
if err := store.CreateIssue(ctx, iss, "actor"); err != nil {
t.Fatalf("CreateIssue %s: %v", iss.ID, err)
}
}
// Make created_at deterministic for sorting.
store.mu.Lock()
store.issues[parent.ID].CreatedAt = now.Add(-100 * time.Hour)
store.issues[child.ID].CreatedAt = now.Add(-1 * time.Hour)
store.issues[blocker.ID].CreatedAt = now.Add(-2 * time.Hour)
store.issues[pinned.ID].CreatedAt = now.Add(-3 * time.Hour)
store.issues[workflow.ID].CreatedAt = now.Add(-4 * time.Hour)
store.mu.Unlock()
// Dependencies: child is a child of parent; child is blocked by blocker.
if err := store.AddDependency(ctx, &types.Dependency{IssueID: child.ID, DependsOnID: parent.ID, Type: types.DepParentChild}, "actor"); err != nil {
t.Fatalf("AddDependency parent-child: %v", err)
}
if err := store.AddDependency(ctx, &types.Dependency{IssueID: child.ID, DependsOnID: blocker.ID, Type: types.DepBlocks}, "actor"); err != nil {
t.Fatalf("AddDependency blocks: %v", err)
}
// AddDependency duplicate error path.
if err := store.AddDependency(ctx, &types.Dependency{IssueID: child.ID, DependsOnID: blocker.ID, Type: types.DepBlocks}, "actor"); err == nil {
t.Fatalf("expected duplicate dependency error")
}
// UpdateIssue: exercise assignee nil, external_ref update+clear, and closed_at behavior.
ext := "old-ext"
store.mu.Lock()
store.issues[child.ID].ExternalRef = &ext
store.externalRefToID[ext] = child.ID
store.mu.Unlock()
if err := store.UpdateIssue(ctx, child.ID, map[string]interface{}{"assignee": nil, "external_ref": "new-ext"}, "actor"); err != nil {
t.Fatalf("UpdateIssue: %v", err)
}
if got, _ := store.GetIssueByExternalRef(ctx, "old-ext"); got != nil {
t.Fatalf("expected old-ext removed")
}
if got, _ := store.GetIssueByExternalRef(ctx, "new-ext"); got == nil || got.ID != child.ID {
t.Fatalf("expected new-ext mapping")
}
if err := store.UpdateIssue(ctx, child.ID, map[string]interface{}{"status": string(types.StatusClosed)}, "actor"); err != nil {
t.Fatalf("UpdateIssue close: %v", err)
}
closed, _ := store.GetIssue(ctx, child.ID)
if closed.ClosedAt == nil {
t.Fatalf("expected ClosedAt set")
}
if err := store.UpdateIssue(ctx, child.ID, map[string]interface{}{"status": string(types.StatusOpen), "external_ref": nil}, "actor"); err != nil {
t.Fatalf("UpdateIssue reopen: %v", err)
}
reopened, _ := store.GetIssue(ctx, child.ID)
if reopened.ClosedAt != nil {
t.Fatalf("expected ClosedAt cleared")
}
if got, _ := store.GetIssueByExternalRef(ctx, "new-ext"); got != nil {
t.Fatalf("expected new-ext cleared")
}
// SearchIssues: query, label AND/OR, IDs filter, ParentID filter, limit.
if err := store.AddLabel(ctx, parent.ID, "l1", "actor"); err != nil {
t.Fatalf("AddLabel: %v", err)
}
if err := store.AddLabel(ctx, child.ID, "l1", "actor"); err != nil {
t.Fatalf("AddLabel: %v", err)
}
if err := store.AddLabel(ctx, child.ID, "l2", "actor"); err != nil {
t.Fatalf("AddLabel: %v", err)
}
st := types.StatusOpen
res, err := store.SearchIssues(ctx, "parent", types.IssueFilter{Status: &st})
if err != nil {
t.Fatalf("SearchIssues: %v", err)
}
if len(res) != 1 || res[0].ID != parent.ID {
t.Fatalf("unexpected SearchIssues results: %+v", res)
}
res, err = store.SearchIssues(ctx, "", types.IssueFilter{Labels: []string{"l1", "l2"}})
if err != nil {
t.Fatalf("SearchIssues labels AND: %v", err)
}
if len(res) != 1 || res[0].ID != child.ID {
t.Fatalf("unexpected labels AND results: %+v", res)
}
res, err = store.SearchIssues(ctx, "", types.IssueFilter{IDs: []string{child.ID}})
if err != nil {
t.Fatalf("SearchIssues IDs: %v", err)
}
if len(res) != 1 || res[0].ID != child.ID {
t.Fatalf("unexpected IDs results: %+v", res)
}
res, err = store.SearchIssues(ctx, "", types.IssueFilter{ParentID: &parent.ID})
if err != nil {
t.Fatalf("SearchIssues ParentID: %v", err)
}
if len(res) != 1 || res[0].ID != child.ID {
t.Fatalf("unexpected ParentID results: %+v", res)
}
res, err = store.SearchIssues(ctx, "", types.IssueFilter{LabelsAny: []string{"l2", "missing"}, Limit: 1})
if err != nil {
t.Fatalf("SearchIssues labels OR: %v", err)
}
if len(res) != 1 {
t.Fatalf("expected limit 1")
}
// Ready work: child is blocked, pinned excluded, workflow excluded by default.
ready, err := store.GetReadyWork(ctx, types.WorkFilter{})
if err != nil {
t.Fatalf("GetReadyWork: %v", err)
}
if len(ready) != 2 { // parent + blocker
t.Fatalf("expected 2 ready issues, got %d: %+v", len(ready), ready)
}
// Filter by workflow type explicitly.
ready, err = store.GetReadyWork(ctx, types.WorkFilter{Type: string(types.TypeMergeRequest)})
if err != nil {
t.Fatalf("GetReadyWork type: %v", err)
}
if len(ready) != 1 || ready[0].ID != workflow.ID {
t.Fatalf("expected only workflow issue, got %+v", ready)
}
// Status + priority filters.
prio := 3
ready, err = store.GetReadyWork(ctx, types.WorkFilter{Status: types.StatusOpen, Priority: &prio})
if err != nil {
t.Fatalf("GetReadyWork status+priority: %v", err)
}
if len(ready) != 1 || ready[0].ID != blocker.ID {
t.Fatalf("expected blocker only, got %+v", ready)
}
// Label filters.
ready, err = store.GetReadyWork(ctx, types.WorkFilter{Labels: []string{"l1"}})
if err != nil {
t.Fatalf("GetReadyWork labels AND: %v", err)
}
if len(ready) != 1 || ready[0].ID != parent.ID {
t.Fatalf("expected parent only, got %+v", ready)
}
ready, err = store.GetReadyWork(ctx, types.WorkFilter{LabelsAny: []string{"l2"}})
if err != nil {
t.Fatalf("GetReadyWork labels OR: %v", err)
}
if len(ready) != 0 {
t.Fatalf("expected 0 because only l2 issue is blocked")
}
// Assignee filter vs Unassigned precedence.
ready, err = store.GetReadyWork(ctx, types.WorkFilter{Assignee: &assignee})
if err != nil {
t.Fatalf("GetReadyWork assignee: %v", err)
}
if len(ready) != 0 {
t.Fatalf("expected 0 due to child being blocked")
}
ready, err = store.GetReadyWork(ctx, types.WorkFilter{Unassigned: true})
if err != nil {
t.Fatalf("GetReadyWork unassigned: %v", err)
}
for _, iss := range ready {
if iss.Assignee != "" {
t.Fatalf("expected unassigned only")
}
}
// Sort policies + limit.
ready, err = store.GetReadyWork(ctx, types.WorkFilter{SortPolicy: types.SortPolicyOldest, Limit: 1})
if err != nil {
t.Fatalf("GetReadyWork oldest: %v", err)
}
if len(ready) != 1 || ready[0].ID != parent.ID {
t.Fatalf("expected oldest=parent, got %+v", ready)
}
ready, err = store.GetReadyWork(ctx, types.WorkFilter{SortPolicy: types.SortPolicyPriority})
if err != nil {
t.Fatalf("GetReadyWork priority: %v", err)
}
if len(ready) < 2 || ready[0].Priority > ready[1].Priority {
t.Fatalf("expected priority sort")
}
// Hybrid: recent issues first.
ready, err = store.GetReadyWork(ctx, types.WorkFilter{SortPolicy: types.SortPolicyHybrid})
if err != nil {
t.Fatalf("GetReadyWork hybrid: %v", err)
}
if len(ready) != 2 || ready[0].ID != blocker.ID {
t.Fatalf("expected recent (blocker) first in hybrid, got %+v", ready)
}
// Blocked issues: child is blocked by an open blocker.
blocked, err := store.GetBlockedIssues(ctx, types.WorkFilter{})
if err != nil {
t.Fatalf("GetBlockedIssues: %v", err)
}
if len(blocked) != 1 || blocked[0].ID != child.ID || blocked[0].BlockedByCount != 1 {
t.Fatalf("unexpected blocked issues: %+v", blocked)
}
// Cover getOpenBlockers missing-blocker branch.
missing := &types.Issue{ID: "bd-6", Title: "Missing blocker dep", Status: types.StatusOpen, Priority: 1, IssueType: types.TypeTask}
if err := store.CreateIssue(ctx, missing, "actor"); err != nil {
t.Fatalf("CreateIssue: %v", err)
}
// Bypass AddDependency validation to cover the missing-blocker branch in getOpenBlockers.
store.mu.Lock()
store.dependencies[missing.ID] = append(store.dependencies[missing.ID], &types.Dependency{IssueID: missing.ID, DependsOnID: "bd-does-not-exist", Type: types.DepBlocks})
store.mu.Unlock()
blocked, err = store.GetBlockedIssues(ctx, types.WorkFilter{})
if err != nil {
t.Fatalf("GetBlockedIssues: %v", err)
}
if len(blocked) != 2 {
t.Fatalf("expected 2 blocked issues, got %d", len(blocked))
}
}
func TestMemoryStorage_UpdateIssue_CoversMoreFields(t *testing.T) {
store := setupTestMemory(t)
defer store.Close()
ctx := context.Background()
iss := &types.Issue{ID: "bd-1", Title: "A", Status: types.StatusOpen, Priority: 1, IssueType: types.TypeTask}
if err := store.CreateIssue(ctx, iss, "actor"); err != nil {
t.Fatalf("CreateIssue: %v", err)
}
if err := store.UpdateIssue(ctx, iss.ID, map[string]interface{}{
"description": "d",
"design": "design",
"acceptance_criteria": "ac",
"notes": "n",
"priority": 2,
"issue_type": string(types.TypeBug),
"assignee": "bob",
"status": string(types.StatusInProgress),
}, "actor"); err != nil {
t.Fatalf("UpdateIssue: %v", err)
}
got, _ := store.GetIssue(ctx, iss.ID)
if got.Description != "d" || got.Design != "design" || got.AcceptanceCriteria != "ac" || got.Notes != "n" {
t.Fatalf("expected text fields updated")
}
if got.Priority != 2 || got.IssueType != types.TypeBug || got.Assignee != "bob" || got.Status != types.StatusInProgress {
t.Fatalf("expected fields updated")
}
// Status closed when already closed should not clear ClosedAt.
if err := store.CloseIssue(ctx, iss.ID, "done", "actor"); err != nil {
t.Fatalf("CloseIssue: %v", err)
}
closedOnce, _ := store.GetIssue(ctx, iss.ID)
if closedOnce.ClosedAt == nil {
t.Fatalf("expected ClosedAt")
}
if err := store.UpdateIssue(ctx, iss.ID, map[string]interface{}{"status": string(types.StatusClosed)}, "actor"); err != nil {
t.Fatalf("UpdateIssue closed->closed: %v", err)
}
closedTwice, _ := store.GetIssue(ctx, iss.ID)
if closedTwice.ClosedAt == nil {
t.Fatalf("expected ClosedAt preserved")
}
}
func TestMemoryStorage_CountEpicsEligibleForClosure_CoversBranches(t *testing.T) {
store := setupTestMemory(t)
defer store.Close()
ctx := context.Background()
ep1 := &types.Issue{ID: "bd-1", Title: "Epic1", Status: types.StatusOpen, Priority: 1, IssueType: types.TypeEpic}
epClosed := &types.Issue{ID: "bd-2", Title: "EpicClosed", Status: types.StatusOpen, Priority: 1, IssueType: types.TypeEpic}
nonEpic := &types.Issue{ID: "bd-3", Title: "NotEpic", Status: types.StatusOpen, Priority: 1, IssueType: types.TypeTask}
c := &types.Issue{ID: "bd-4", Title: "Child", Status: types.StatusOpen, Priority: 1, IssueType: types.TypeTask}
for _, iss := range []*types.Issue{ep1, epClosed, nonEpic, c} {
if err := store.CreateIssue(ctx, iss, "actor"); err != nil {
t.Fatalf("CreateIssue %s: %v", iss.ID, err)
}
}
if err := store.CloseIssue(ctx, epClosed.ID, "done", "actor"); err != nil {
t.Fatalf("CloseIssue: %v", err)
}
// Child -> ep1 (eligible once child is closed).
if err := store.AddDependency(ctx, &types.Dependency{IssueID: c.ID, DependsOnID: ep1.ID, Type: types.DepParentChild}, "actor"); err != nil {
t.Fatalf("AddDependency: %v", err)
}
// Child -> nonEpic should not count.
if err := store.AddDependency(ctx, &types.Dependency{IssueID: c.ID, DependsOnID: nonEpic.ID, Type: types.DepParentChild}, "actor"); err != nil {
t.Fatalf("AddDependency: %v", err)
}
// Child -> missing epic should not count.
store.mu.Lock()
store.dependencies[c.ID] = append(store.dependencies[c.ID], &types.Dependency{IssueID: c.ID, DependsOnID: "bd-missing", Type: types.DepParentChild})
store.mu.Unlock()
// Close child to make ep1 eligible.
if err := store.CloseIssue(ctx, c.ID, "done", "actor"); err != nil {
t.Fatalf("CloseIssue child: %v", err)
}
stats, err := store.GetStatistics(ctx)
if err != nil {
t.Fatalf("GetStatistics: %v", err)
}
if stats.EpicsEligibleForClosure != 1 {
t.Fatalf("expected 1 eligible epic, got %d", stats.EpicsEligibleForClosure)
}
}
func TestExtractParentAndChildNumber_CoversFailures(t *testing.T) {
if _, _, ok := extractParentAndChildNumber("no-dot"); ok {
t.Fatalf("expected ok=false")
}
if _, _, ok := extractParentAndChildNumber("parent.bad"); ok {
t.Fatalf("expected ok=false")
}
}
+2 -2
View File
@@ -885,7 +885,7 @@ func (s *SQLiteStorage) scanIssues(ctx context.Context, rows *sql.Rows) ([]*type
issue.Sender = sender.String
}
if wisp.Valid && wisp.Int64 != 0 {
issue.Wisp = true
issue.Ephemeral = true
}
// Pinned field (bd-7h5)
if pinned.Valid && pinned.Int64 != 0 {
@@ -1006,7 +1006,7 @@ func (s *SQLiteStorage) scanIssuesWithDependencyType(ctx context.Context, rows *
issue.Sender = sender.String
}
if wisp.Valid && wisp.Int64 != 0 {
issue.Wisp = true
issue.Ephemeral = true
}
// Pinned field (bd-7h5)
if pinned.Valid && pinned.Int64 != 0 {
+11 -11
View File
@@ -295,7 +295,7 @@ func TestRepliesTo(t *testing.T) {
IssueType: types.TypeMessage,
Sender: "alice",
Assignee: "bob",
Wisp: true,
Ephemeral: true,
CreatedAt: time.Now(),
UpdatedAt: time.Now(),
}
@@ -307,7 +307,7 @@ func TestRepliesTo(t *testing.T) {
IssueType: types.TypeMessage,
Sender: "bob",
Assignee: "alice",
Wisp: true,
Ephemeral: true,
CreatedAt: time.Now(),
UpdatedAt: time.Now(),
}
@@ -363,7 +363,7 @@ func TestRepliesTo_Chain(t *testing.T) {
IssueType: types.TypeMessage,
Sender: "user",
Assignee: "inbox",
Wisp: true,
Ephemeral: true,
CreatedAt: time.Now(),
UpdatedAt: time.Now(),
}
@@ -415,7 +415,7 @@ func TestWispField(t *testing.T) {
Status: types.StatusOpen,
Priority: 2,
IssueType: types.TypeMessage,
Wisp: true,
Ephemeral: true,
CreatedAt: time.Now(),
UpdatedAt: time.Now(),
}
@@ -426,7 +426,7 @@ func TestWispField(t *testing.T) {
Status: types.StatusOpen,
Priority: 2,
IssueType: types.TypeTask,
Wisp: false,
Ephemeral: false,
CreatedAt: time.Now(),
UpdatedAt: time.Now(),
}
@@ -443,7 +443,7 @@ func TestWispField(t *testing.T) {
if err != nil {
t.Fatalf("GetIssue failed: %v", err)
}
if !savedWisp.Wisp {
if !savedWisp.Ephemeral {
t.Error("Wisp issue should have Wisp=true")
}
@@ -451,7 +451,7 @@ func TestWispField(t *testing.T) {
if err != nil {
t.Fatalf("GetIssue failed: %v", err)
}
if savedPermanent.Wisp {
if savedPermanent.Ephemeral {
t.Error("Permanent issue should have Wisp=false")
}
}
@@ -468,7 +468,7 @@ func TestWispFilter(t *testing.T) {
Status: types.StatusClosed, // Closed for cleanup test
Priority: 2,
IssueType: types.TypeMessage,
Wisp: true,
Ephemeral: true,
CreatedAt: time.Now(),
UpdatedAt: time.Now(),
}
@@ -483,7 +483,7 @@ func TestWispFilter(t *testing.T) {
Status: types.StatusClosed,
Priority: 2,
IssueType: types.TypeTask,
Wisp: false,
Ephemeral: false,
CreatedAt: time.Now(),
UpdatedAt: time.Now(),
}
@@ -497,7 +497,7 @@ func TestWispFilter(t *testing.T) {
closedStatus := types.StatusClosed
wispFilter := types.IssueFilter{
Status: &closedStatus,
Wisp: &wispTrue,
Ephemeral: &wispTrue,
}
wispIssues, err := store.SearchIssues(ctx, "", wispFilter)
@@ -512,7 +512,7 @@ func TestWispFilter(t *testing.T) {
wispFalse := false
nonWispFilter := types.IssueFilter{
Status: &closedStatus,
Wisp: &wispFalse,
Ephemeral: &wispFalse,
}
permanentIssues, err := store.SearchIssues(ctx, "", nonWispFilter)
+2 -2
View File
@@ -28,7 +28,7 @@ func insertIssue(ctx context.Context, conn *sql.Conn, issue *types.Issue) error
}
wisp := 0
if issue.Wisp {
if issue.Ephemeral {
wisp = 1
}
pinned := 0
@@ -94,7 +94,7 @@ func insertIssues(ctx context.Context, conn *sql.Conn, issues []*types.Issue) er
}
wisp := 0
if issue.Wisp {
if issue.Ephemeral {
wisp = 1
}
pinned := 0
@@ -20,10 +20,6 @@ func MigrateMessagingFields(db *sql.DB) error {
}{
{"sender", "TEXT DEFAULT ''"},
{"ephemeral", "INTEGER DEFAULT 0"},
{"replies_to", "TEXT DEFAULT ''"},
{"relates_to", "TEXT DEFAULT ''"},
{"duplicate_of", "TEXT DEFAULT ''"},
{"superseded_by", "TEXT DEFAULT ''"},
}
for _, col := range columns {
@@ -59,11 +55,5 @@ func MigrateMessagingFields(db *sql.DB) error {
return fmt.Errorf("failed to create sender index: %w", err)
}
// Add index for replies_to (for efficient thread queries)
_, err = db.Exec(`CREATE INDEX IF NOT EXISTS idx_issues_replies_to ON issues(replies_to) WHERE replies_to != ''`)
if err != nil {
return fmt.Errorf("failed to create replies_to index: %w", err)
}
return nil
}
@@ -21,137 +21,176 @@ import (
func MigrateEdgeFields(db *sql.DB) error {
now := time.Now()
hasColumn := func(name string) (bool, error) {
var exists bool
err := db.QueryRow(`
SELECT COUNT(*) > 0
FROM pragma_table_info('issues')
WHERE name = ?
`, name).Scan(&exists)
return exists, err
}
hasRepliesTo, err := hasColumn("replies_to")
if err != nil {
return fmt.Errorf("failed to check replies_to column: %w", err)
}
hasRelatesTo, err := hasColumn("relates_to")
if err != nil {
return fmt.Errorf("failed to check relates_to column: %w", err)
}
hasDuplicateOf, err := hasColumn("duplicate_of")
if err != nil {
return fmt.Errorf("failed to check duplicate_of column: %w", err)
}
hasSupersededBy, err := hasColumn("superseded_by")
if err != nil {
return fmt.Errorf("failed to check superseded_by column: %w", err)
}
if !hasRepliesTo && !hasRelatesTo && !hasDuplicateOf && !hasSupersededBy {
return nil
}
// Migrate replies_to fields to replies-to edges
// For thread_id, use the parent's ID as the thread root for first-level replies
// (more sophisticated thread detection would require recursive queries)
rows, err := db.Query(`
SELECT id, replies_to
FROM issues
WHERE replies_to != '' AND replies_to IS NOT NULL
`)
if err != nil {
return fmt.Errorf("failed to query replies_to fields: %w", err)
}
defer rows.Close()
for rows.Next() {
var issueID, repliesTo string
if err := rows.Scan(&issueID, &repliesTo); err != nil {
return fmt.Errorf("failed to scan replies_to row: %w", err)
}
// Use repliesTo as thread_id (the root of the thread)
// This is a simplification - existing threads will have the parent as thread root
_, err := db.Exec(`
INSERT OR IGNORE INTO dependencies (issue_id, depends_on_id, type, created_at, created_by, metadata, thread_id)
VALUES (?, ?, 'replies-to', ?, 'migration', '{}', ?)
`, issueID, repliesTo, now, repliesTo)
if hasRepliesTo {
rows, err := db.Query(`
SELECT id, replies_to
FROM issues
WHERE replies_to != '' AND replies_to IS NOT NULL
`)
if err != nil {
return fmt.Errorf("failed to create replies-to edge for %s: %w", issueID, err)
return fmt.Errorf("failed to query replies_to fields: %w", err)
}
defer rows.Close()
for rows.Next() {
var issueID, repliesTo string
if err := rows.Scan(&issueID, &repliesTo); err != nil {
return fmt.Errorf("failed to scan replies_to row: %w", err)
}
// Use repliesTo as thread_id (the root of the thread)
// This is a simplification - existing threads will have the parent as thread root
_, err := db.Exec(`
INSERT OR IGNORE INTO dependencies (issue_id, depends_on_id, type, created_at, created_by, metadata, thread_id)
VALUES (?, ?, 'replies-to', ?, 'migration', '{}', ?)
`, issueID, repliesTo, now, repliesTo)
if err != nil {
return fmt.Errorf("failed to create replies-to edge for %s: %w", issueID, err)
}
}
if err := rows.Err(); err != nil {
return fmt.Errorf("error iterating replies_to rows: %w", err)
}
}
if err := rows.Err(); err != nil {
return fmt.Errorf("error iterating replies_to rows: %w", err)
}
// Migrate relates_to fields to relates-to edges
// relates_to is stored as JSON array string
rows, err = db.Query(`
SELECT id, relates_to
FROM issues
WHERE relates_to != '' AND relates_to != '[]' AND relates_to IS NOT NULL
`)
if err != nil {
return fmt.Errorf("failed to query relates_to fields: %w", err)
}
defer rows.Close()
for rows.Next() {
var issueID, relatesTo string
if err := rows.Scan(&issueID, &relatesTo); err != nil {
return fmt.Errorf("failed to scan relates_to row: %w", err)
if hasRelatesTo {
rows, err := db.Query(`
SELECT id, relates_to
FROM issues
WHERE relates_to != '' AND relates_to != '[]' AND relates_to IS NOT NULL
`)
if err != nil {
return fmt.Errorf("failed to query relates_to fields: %w", err)
}
defer rows.Close()
// Parse JSON array
var relatedIDs []string
if err := json.Unmarshal([]byte(relatesTo), &relatedIDs); err != nil {
// Skip malformed JSON
continue
}
for rows.Next() {
var issueID, relatesTo string
if err := rows.Scan(&issueID, &relatesTo); err != nil {
return fmt.Errorf("failed to scan relates_to row: %w", err)
}
for _, relatedID := range relatedIDs {
if relatedID == "" {
// Parse JSON array
var relatedIDs []string
if err := json.Unmarshal([]byte(relatesTo), &relatedIDs); err != nil {
// Skip malformed JSON
continue
}
_, err := db.Exec(`
INSERT OR IGNORE INTO dependencies (issue_id, depends_on_id, type, created_at, created_by, metadata, thread_id)
VALUES (?, ?, 'relates-to', ?, 'migration', '{}', '')
`, issueID, relatedID, now)
if err != nil {
return fmt.Errorf("failed to create relates-to edge for %s -> %s: %w", issueID, relatedID, err)
for _, relatedID := range relatedIDs {
if relatedID == "" {
continue
}
_, err := db.Exec(`
INSERT OR IGNORE INTO dependencies (issue_id, depends_on_id, type, created_at, created_by, metadata, thread_id)
VALUES (?, ?, 'relates-to', ?, 'migration', '{}', '')
`, issueID, relatedID, now)
if err != nil {
return fmt.Errorf("failed to create relates-to edge for %s -> %s: %w", issueID, relatedID, err)
}
}
}
}
if err := rows.Err(); err != nil {
return fmt.Errorf("error iterating relates_to rows: %w", err)
if err := rows.Err(); err != nil {
return fmt.Errorf("error iterating relates_to rows: %w", err)
}
}
// Migrate duplicate_of fields to duplicates edges
rows, err = db.Query(`
SELECT id, duplicate_of
FROM issues
WHERE duplicate_of != '' AND duplicate_of IS NOT NULL
`)
if err != nil {
return fmt.Errorf("failed to query duplicate_of fields: %w", err)
}
defer rows.Close()
for rows.Next() {
var issueID, duplicateOf string
if err := rows.Scan(&issueID, &duplicateOf); err != nil {
return fmt.Errorf("failed to scan duplicate_of row: %w", err)
}
_, err := db.Exec(`
INSERT OR IGNORE INTO dependencies (issue_id, depends_on_id, type, created_at, created_by, metadata, thread_id)
VALUES (?, ?, 'duplicates', ?, 'migration', '{}', '')
`, issueID, duplicateOf, now)
if hasDuplicateOf {
rows, err := db.Query(`
SELECT id, duplicate_of
FROM issues
WHERE duplicate_of != '' AND duplicate_of IS NOT NULL
`)
if err != nil {
return fmt.Errorf("failed to create duplicates edge for %s: %w", issueID, err)
return fmt.Errorf("failed to query duplicate_of fields: %w", err)
}
defer rows.Close()
for rows.Next() {
var issueID, duplicateOf string
if err := rows.Scan(&issueID, &duplicateOf); err != nil {
return fmt.Errorf("failed to scan duplicate_of row: %w", err)
}
_, err := db.Exec(`
INSERT OR IGNORE INTO dependencies (issue_id, depends_on_id, type, created_at, created_by, metadata, thread_id)
VALUES (?, ?, 'duplicates', ?, 'migration', '{}', '')
`, issueID, duplicateOf, now)
if err != nil {
return fmt.Errorf("failed to create duplicates edge for %s: %w", issueID, err)
}
}
if err := rows.Err(); err != nil {
return fmt.Errorf("error iterating duplicate_of rows: %w", err)
}
}
if err := rows.Err(); err != nil {
return fmt.Errorf("error iterating duplicate_of rows: %w", err)
}
// Migrate superseded_by fields to supersedes edges
rows, err = db.Query(`
SELECT id, superseded_by
FROM issues
WHERE superseded_by != '' AND superseded_by IS NOT NULL
`)
if err != nil {
return fmt.Errorf("failed to query superseded_by fields: %w", err)
}
defer rows.Close()
for rows.Next() {
var issueID, supersededBy string
if err := rows.Scan(&issueID, &supersededBy); err != nil {
return fmt.Errorf("failed to scan superseded_by row: %w", err)
}
_, err := db.Exec(`
INSERT OR IGNORE INTO dependencies (issue_id, depends_on_id, type, created_at, created_by, metadata, thread_id)
VALUES (?, ?, 'supersedes', ?, 'migration', '{}', '')
`, issueID, supersededBy, now)
if hasSupersededBy {
rows, err := db.Query(`
SELECT id, superseded_by
FROM issues
WHERE superseded_by != '' AND superseded_by IS NOT NULL
`)
if err != nil {
return fmt.Errorf("failed to create supersedes edge for %s: %w", issueID, err)
return fmt.Errorf("failed to query superseded_by fields: %w", err)
}
defer rows.Close()
for rows.Next() {
var issueID, supersededBy string
if err := rows.Scan(&issueID, &supersededBy); err != nil {
return fmt.Errorf("failed to scan superseded_by row: %w", err)
}
_, err := db.Exec(`
INSERT OR IGNORE INTO dependencies (issue_id, depends_on_id, type, created_at, created_by, metadata, thread_id)
VALUES (?, ?, 'supersedes', ?, 'migration', '{}', '')
`, issueID, supersededBy, now)
if err != nil {
return fmt.Errorf("failed to create supersedes edge for %s: %w", issueID, err)
}
}
if err := rows.Err(); err != nil {
return fmt.Errorf("error iterating superseded_by rows: %w", err)
}
}
if err := rows.Err(); err != nil {
return fmt.Errorf("error iterating superseded_by rows: %w", err)
}
return nil
@@ -57,6 +57,57 @@ func MigrateDropEdgeColumns(db *sql.DB) error {
return nil
}
// Preserve newer columns if they already exist (migration may run on partially-migrated DBs).
hasPinned, err := checkCol("pinned")
if err != nil {
return fmt.Errorf("failed to check pinned column: %w", err)
}
hasIsTemplate, err := checkCol("is_template")
if err != nil {
return fmt.Errorf("failed to check is_template column: %w", err)
}
hasAwaitType, err := checkCol("await_type")
if err != nil {
return fmt.Errorf("failed to check await_type column: %w", err)
}
hasAwaitID, err := checkCol("await_id")
if err != nil {
return fmt.Errorf("failed to check await_id column: %w", err)
}
hasTimeoutNs, err := checkCol("timeout_ns")
if err != nil {
return fmt.Errorf("failed to check timeout_ns column: %w", err)
}
hasWaiters, err := checkCol("waiters")
if err != nil {
return fmt.Errorf("failed to check waiters column: %w", err)
}
pinnedExpr := "0"
if hasPinned {
pinnedExpr = "pinned"
}
isTemplateExpr := "0"
if hasIsTemplate {
isTemplateExpr = "is_template"
}
awaitTypeExpr := "''"
if hasAwaitType {
awaitTypeExpr = "await_type"
}
awaitIDExpr := "''"
if hasAwaitID {
awaitIDExpr = "await_id"
}
timeoutNsExpr := "0"
if hasTimeoutNs {
timeoutNsExpr = "timeout_ns"
}
waitersExpr := "''"
if hasWaiters {
waitersExpr = "waiters"
}
// SQLite 3.35.0+ supports DROP COLUMN, but we use table recreation for compatibility
// This is idempotent - we recreate the table without the deprecated columns
@@ -117,6 +168,12 @@ func MigrateDropEdgeColumns(db *sql.DB) error {
original_type TEXT DEFAULT '',
sender TEXT DEFAULT '',
ephemeral INTEGER DEFAULT 0,
pinned INTEGER DEFAULT 0,
is_template INTEGER DEFAULT 0,
await_type TEXT,
await_id TEXT,
timeout_ns INTEGER,
waiters TEXT,
close_reason TEXT DEFAULT '',
CHECK ((status = 'closed') = (closed_at IS NOT NULL))
)
@@ -132,7 +189,8 @@ func MigrateDropEdgeColumns(db *sql.DB) error {
notes, status, priority, issue_type, assignee, estimated_minutes,
created_at, updated_at, closed_at, external_ref, source_repo, compaction_level,
compacted_at, compacted_at_commit, original_size, deleted_at,
deleted_by, delete_reason, original_type, sender, ephemeral, close_reason
deleted_by, delete_reason, original_type, sender, ephemeral, pinned, is_template,
await_type, await_id, timeout_ns, waiters, close_reason
)
SELECT
id, content_hash, title, description, design, acceptance_criteria,
@@ -140,9 +198,11 @@ func MigrateDropEdgeColumns(db *sql.DB) error {
created_at, updated_at, closed_at, external_ref, COALESCE(source_repo, ''), compaction_level,
compacted_at, compacted_at_commit, original_size, deleted_at,
deleted_by, delete_reason, original_type, sender, ephemeral,
%s, %s,
%s, %s, %s, %s,
COALESCE(close_reason, '')
FROM issues
`)
`, pinnedExpr, isTemplateExpr, awaitTypeExpr, awaitIDExpr, timeoutNsExpr, waitersExpr)
if err != nil {
return fmt.Errorf("failed to copy issues data: %w", err)
}
@@ -20,6 +20,11 @@ func MigratePinnedColumn(db *sql.DB) error {
}
if columnExists {
// Column exists (e.g. created by new schema); ensure index exists.
_, err = db.Exec(`CREATE INDEX IF NOT EXISTS idx_issues_pinned ON issues(pinned) WHERE pinned = 1`)
if err != nil {
return fmt.Errorf("failed to create pinned index: %w", err)
}
return nil
}
@@ -21,6 +21,11 @@ func MigrateIsTemplateColumn(db *sql.DB) error {
}
if columnExists {
// Column exists (e.g. created by new schema); ensure index exists.
_, err = db.Exec(`CREATE INDEX IF NOT EXISTS idx_issues_is_template ON issues(is_template) WHERE is_template = 1`)
if err != nil {
return fmt.Errorf("failed to create is_template index: %w", err)
}
return nil
}
@@ -3,6 +3,7 @@ package migrations
import (
"database/sql"
"fmt"
"strings"
)
// MigrateTombstoneClosedAt updates the closed_at constraint to allow tombstones
@@ -22,8 +23,20 @@ func MigrateTombstoneClosedAt(db *sql.DB) error {
// SQLite doesn't support ALTER TABLE to modify CHECK constraints
// We must recreate the table with the new constraint
// Idempotency check: see if the new CHECK constraint already exists
// The new constraint contains "status = 'tombstone'" which the old one didn't
var tableSql string
err := db.QueryRow(`SELECT sql FROM sqlite_master WHERE type='table' AND name='issues'`).Scan(&tableSql)
if err != nil {
return fmt.Errorf("failed to get issues table schema: %w", err)
}
// If the schema already has the tombstone clause, migration is already applied
if strings.Contains(tableSql, "status = 'tombstone'") || strings.Contains(tableSql, `status = "tombstone"`) {
return nil
}
// Step 0: Drop views that depend on the issues table
_, err := db.Exec(`DROP VIEW IF EXISTS ready_issues`)
_, err = db.Exec(`DROP VIEW IF EXISTS ready_issues`)
if err != nil {
return fmt.Errorf("failed to drop ready_issues view: %w", err)
}
@@ -48,6 +61,7 @@ func MigrateTombstoneClosedAt(db *sql.DB) error {
assignee TEXT,
estimated_minutes INTEGER,
created_at DATETIME NOT NULL DEFAULT CURRENT_TIMESTAMP,
created_by TEXT DEFAULT '',
updated_at DATETIME NOT NULL DEFAULT CURRENT_TIMESTAMP,
closed_at DATETIME,
external_ref TEXT,
@@ -81,10 +95,73 @@ func MigrateTombstoneClosedAt(db *sql.DB) error {
}
// Step 2: Copy data from old table to new table
_, err = db.Exec(`
INSERT INTO issues_new
SELECT * FROM issues
`)
// We need to check if created_by column exists in the old table
// If not, we insert a default empty string for it
var hasCreatedBy bool
rows, err := db.Query(`PRAGMA table_info(issues)`)
if err != nil {
return fmt.Errorf("failed to get table info: %w", err)
}
for rows.Next() {
var cid int
var name, ctype string
var notnull, pk int
var dflt interface{}
if err := rows.Scan(&cid, &name, &ctype, &notnull, &dflt, &pk); err != nil {
rows.Close()
return fmt.Errorf("failed to scan table info: %w", err)
}
if name == "created_by" {
hasCreatedBy = true
break
}
}
rows.Close()
var insertSQL string
if hasCreatedBy {
// Old table has created_by, copy all columns directly
insertSQL = `
INSERT INTO issues_new (
id, content_hash, title, description, design, acceptance_criteria, notes,
status, priority, issue_type, assignee, estimated_minutes, created_at,
created_by, updated_at, closed_at, external_ref, source_repo, compaction_level,
compacted_at, compacted_at_commit, original_size, deleted_at, deleted_by,
delete_reason, original_type, sender, ephemeral, close_reason, pinned,
is_template, await_type, await_id, timeout_ns, waiters
)
SELECT
id, content_hash, title, description, design, acceptance_criteria, notes,
status, priority, issue_type, assignee, estimated_minutes, created_at,
created_by, updated_at, closed_at, external_ref, source_repo, compaction_level,
compacted_at, compacted_at_commit, original_size, deleted_at, deleted_by,
delete_reason, original_type, sender, ephemeral, close_reason, pinned,
is_template, await_type, await_id, timeout_ns, waiters
FROM issues
`
} else {
// Old table doesn't have created_by, use empty string default
insertSQL = `
INSERT INTO issues_new (
id, content_hash, title, description, design, acceptance_criteria, notes,
status, priority, issue_type, assignee, estimated_minutes, created_at,
created_by, updated_at, closed_at, external_ref, source_repo, compaction_level,
compacted_at, compacted_at_commit, original_size, deleted_at, deleted_by,
delete_reason, original_type, sender, ephemeral, close_reason, pinned,
is_template, await_type, await_id, timeout_ns, waiters
)
SELECT
id, content_hash, title, description, design, acceptance_criteria, notes,
status, priority, issue_type, assignee, estimated_minutes, created_at,
'', updated_at, closed_at, external_ref, source_repo, compaction_level,
compacted_at, compacted_at_commit, original_size, deleted_at, deleted_by,
delete_reason, original_type, sender, ephemeral, close_reason, pinned,
is_template, await_type, await_id, timeout_ns, waiters
FROM issues
`
}
_, err = db.Exec(insertSQL)
if err != nil {
return fmt.Errorf("failed to copy issues data: %w", err)
}
@@ -0,0 +1,59 @@
package sqlite
import (
"context"
"path/filepath"
"testing"
"github.com/steveyegge/beads/internal/types"
)
func TestRunMigrations_DoesNotResetPinnedOrTemplate(t *testing.T) {
ctx := context.Background()
dir := t.TempDir()
dbPath := filepath.Join(dir, "beads.db")
s, err := New(ctx, dbPath)
if err != nil {
t.Fatalf("New: %v", err)
}
t.Cleanup(func() { _ = s.Close() })
if err := s.SetConfig(ctx, "issue_prefix", "test"); err != nil {
t.Fatalf("SetConfig(issue_prefix): %v", err)
}
issue := &types.Issue{
Title: "Pinned template",
Status: types.StatusOpen,
Priority: 2,
IssueType: types.TypeTask,
Pinned: true,
IsTemplate: true,
}
if err := s.CreateIssue(ctx, issue, "test-user"); err != nil {
t.Fatalf("CreateIssue: %v", err)
}
_ = s.Close()
s2, err := New(ctx, dbPath)
if err != nil {
t.Fatalf("New(reopen): %v", err)
}
defer func() { _ = s2.Close() }()
got, err := s2.GetIssue(ctx, issue.ID)
if err != nil {
t.Fatalf("GetIssue: %v", err)
}
if got == nil {
t.Fatalf("expected issue to exist")
}
if !got.Pinned {
t.Fatalf("expected issue to remain pinned")
}
if !got.IsTemplate {
t.Fatalf("expected issue to remain template")
}
}
+1 -1
View File
@@ -282,7 +282,7 @@ func (s *SQLiteStorage) upsertIssueInTx(ctx context.Context, tx *sql.Tx, issue *
err := tx.QueryRowContext(ctx, `SELECT id FROM issues WHERE id = ?`, issue.ID).Scan(&existingID)
wisp := 0
if issue.Wisp {
if issue.Ephemeral {
wisp = 1
}
pinned := 0
+1 -1
View File
@@ -54,7 +54,7 @@ func (s *SQLiteStorage) ExportToMultiRepo(ctx context.Context) (map[string]int,
// Wisps exist only in SQLite and are shared via .beads/redirect, not JSONL.
filtered := make([]*types.Issue, 0, len(allIssues))
for _, issue := range allIssues {
if !issue.Wisp {
if !issue.Ephemeral {
filtered = append(filtered, issue)
}
}
+1 -1
View File
@@ -909,7 +909,7 @@ func TestUpsertPreservesGateFields(t *testing.T) {
Status: types.StatusOpen,
Priority: 1,
IssueType: types.TypeGate,
Wisp: true,
Ephemeral: true,
AwaitType: "gh:run",
AwaitID: "123456789",
Timeout: 30 * 60 * 1000000000, // 30 minutes in nanoseconds
+4 -4
View File
@@ -349,7 +349,7 @@ func (s *SQLiteStorage) GetIssue(ctx context.Context, id string) (*types.Issue,
issue.Sender = sender.String
}
if wisp.Valid && wisp.Int64 != 0 {
issue.Wisp = true
issue.Ephemeral = true
}
// Pinned field (bd-7h5)
if pinned.Valid && pinned.Int64 != 0 {
@@ -562,7 +562,7 @@ func (s *SQLiteStorage) GetIssueByExternalRef(ctx context.Context, externalRef s
issue.Sender = sender.String
}
if wisp.Valid && wisp.Int64 != 0 {
issue.Wisp = true
issue.Ephemeral = true
}
// Pinned field (bd-7h5)
if pinned.Valid && pinned.Int64 != 0 {
@@ -1652,8 +1652,8 @@ func (s *SQLiteStorage) SearchIssues(ctx context.Context, query string, filter t
}
// Wisp filtering (bd-kwro.9)
if filter.Wisp != nil {
if *filter.Wisp {
if filter.Ephemeral != nil {
if *filter.Ephemeral {
whereClauses = append(whereClauses, "ephemeral = 1") // SQL column is still 'ephemeral'
} else {
whereClauses = append(whereClauses, "(ephemeral = 0 OR ephemeral IS NULL)")
+3 -2
View File
@@ -17,7 +17,8 @@ import (
// Excludes pinned issues which are persistent anchors, not actionable work (bd-92u)
func (s *SQLiteStorage) GetReadyWork(ctx context.Context, filter types.WorkFilter) ([]*types.Issue, error) {
whereClauses := []string{
"i.pinned = 0", // Exclude pinned issues (bd-92u)
"i.pinned = 0", // Exclude pinned issues (bd-92u)
"(i.ephemeral = 0 OR i.ephemeral IS NULL)", // Exclude wisps (hq-t15s)
}
args := []interface{}{}
@@ -399,7 +400,7 @@ func (s *SQLiteStorage) GetStaleIssues(ctx context.Context, filter types.StaleFi
issue.Sender = sender.String
}
if ephemeral.Valid && ephemeral.Int64 != 0 {
issue.Wisp = true
issue.Ephemeral = true
}
// Pinned field (bd-7h5)
if pinned.Valid && pinned.Int64 != 0 {
+1
View File
@@ -230,6 +230,7 @@ WITH RECURSIVE
SELECT i.*
FROM issues i
WHERE i.status = 'open'
AND (i.ephemeral = 0 OR i.ephemeral IS NULL)
AND NOT EXISTS (
SELECT 1 FROM blocked_transitively WHERE issue_id = i.id
);
+3 -3
View File
@@ -1089,8 +1089,8 @@ func (t *sqliteTxStorage) SearchIssues(ctx context.Context, query string, filter
}
// Wisp filtering (bd-kwro.9)
if filter.Wisp != nil {
if *filter.Wisp {
if filter.Ephemeral != nil {
if *filter.Ephemeral {
whereClauses = append(whereClauses, "ephemeral = 1") // SQL column is still 'ephemeral'
} else {
whereClauses = append(whereClauses, "(ephemeral = 0 OR ephemeral IS NULL)")
@@ -1244,7 +1244,7 @@ func scanIssueRow(row scanner) (*types.Issue, error) {
issue.Sender = sender.String
}
if wisp.Valid && wisp.Int64 != 0 {
issue.Wisp = true
issue.Ephemeral = true
}
// Pinned field (bd-7h5)
if pinned.Valid && pinned.Int64 != 0 {
@@ -413,4 +413,3 @@ func setupTestRepoWithRemote(t *testing.T) string {
return tmpDir
}
+4 -4
View File
@@ -44,8 +44,8 @@ type Issue struct {
OriginalType string `json:"original_type,omitempty"` // Issue type before deletion (for tombstones)
// Messaging fields (bd-kwro): inter-agent communication support
Sender string `json:"sender,omitempty"` // Who sent this (for messages)
Wisp bool `json:"wisp,omitempty"` // Wisp = ephemeral vapor from the Steam Engine; bulk-deleted when closed
Sender string `json:"sender,omitempty"` // Who sent this (for messages)
Ephemeral bool `json:"ephemeral,omitempty"` // If true, not exported to JSONL; bulk-deleted when closed
// NOTE: RepliesTo, RelatesTo, DuplicateOf, SupersededBy moved to dependencies table
// per Decision 004 (Edge Schema Consolidation). Use dependency API instead.
@@ -598,8 +598,8 @@ type IssueFilter struct {
// Tombstone filtering (bd-1bu)
IncludeTombstones bool // If false (default), exclude tombstones from results
// Wisp filtering (bd-kwro.9)
Wisp *bool // Filter by wisp flag (nil = any, true = only wisps, false = only non-wisps)
// Ephemeral filtering (bd-kwro.9)
Ephemeral *bool // Filter by ephemeral flag (nil = any, true = only ephemeral, false = only persistent)
// Pinned filtering (bd-7h5)
Pinned *bool // Filter by pinned flag (nil = any, true = only pinned, false = only non-pinned)