bd sync: 2025-12-27 15:56:42
This commit is contained in:
@@ -1097,16 +1097,10 @@ func (m *MemoryStorage) getOpenBlockers(issueID string) []string {
|
||||
|
||||
// GetBlockedIssues returns issues that are blocked by other issues
|
||||
// Note: Pinned issues are excluded from the output (beads-ei4)
|
||||
func (m *MemoryStorage) GetBlockedIssues(ctx context.Context, filter types.WorkFilter) ([]*types.BlockedIssue, error) {
|
||||
func (m *MemoryStorage) GetBlockedIssues(ctx context.Context) ([]*types.BlockedIssue, error) {
|
||||
m.mu.RLock()
|
||||
defer m.mu.RUnlock()
|
||||
|
||||
// Build set of descendant IDs if parent filter is specified
|
||||
var descendantIDs map[string]bool
|
||||
if filter.ParentID != nil {
|
||||
descendantIDs = m.getAllDescendants(*filter.ParentID)
|
||||
}
|
||||
|
||||
var results []*types.BlockedIssue
|
||||
|
||||
for _, issue := range m.issues {
|
||||
@@ -1120,11 +1114,6 @@ func (m *MemoryStorage) GetBlockedIssues(ctx context.Context, filter types.WorkF
|
||||
continue
|
||||
}
|
||||
|
||||
// Parent filtering: only include descendants of specified parent
|
||||
if descendantIDs != nil && !descendantIDs[issue.ID] {
|
||||
continue
|
||||
}
|
||||
|
||||
blockers := m.getOpenBlockers(issue.ID)
|
||||
// Issue is "blocked" if: status is blocked, status is deferred, or has open blockers
|
||||
if issue.Status != types.StatusBlocked && issue.Status != types.StatusDeferred && len(blockers) == 0 {
|
||||
@@ -1160,27 +1149,6 @@ func (m *MemoryStorage) GetBlockedIssues(ctx context.Context, filter types.WorkF
|
||||
return results, nil
|
||||
}
|
||||
|
||||
// getAllDescendants returns all descendant IDs of a parent issue recursively
|
||||
func (m *MemoryStorage) getAllDescendants(parentID string) map[string]bool {
|
||||
descendants := make(map[string]bool)
|
||||
m.collectDescendants(parentID, descendants)
|
||||
return descendants
|
||||
}
|
||||
|
||||
// collectDescendants recursively collects all descendants of a parent
|
||||
func (m *MemoryStorage) collectDescendants(parentID string, descendants map[string]bool) {
|
||||
for issueID, deps := range m.dependencies {
|
||||
for _, dep := range deps {
|
||||
if dep.Type == types.DepParentChild && dep.DependsOnID == parentID {
|
||||
if !descendants[issueID] {
|
||||
descendants[issueID] = true
|
||||
m.collectDescendants(issueID, descendants)
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func (m *MemoryStorage) GetEpicsEligibleForClosure(ctx context.Context) ([]*types.EpicStatus, error) {
|
||||
return nil, nil
|
||||
}
|
||||
@@ -1216,58 +1184,6 @@ func (m *MemoryStorage) GetStaleIssues(ctx context.Context, filter types.StaleFi
|
||||
return stale, nil
|
||||
}
|
||||
|
||||
// GetNewlyUnblockedByClose returns issues that became unblocked when the given issue was closed.
|
||||
// This is used by the --suggest-next flag on bd close (GH#679).
|
||||
func (m *MemoryStorage) GetNewlyUnblockedByClose(ctx context.Context, closedIssueID string) ([]*types.Issue, error) {
|
||||
m.mu.RLock()
|
||||
defer m.mu.RUnlock()
|
||||
|
||||
var unblocked []*types.Issue
|
||||
|
||||
// Find issues that depend on the closed issue
|
||||
for issueID, deps := range m.dependencies {
|
||||
issue, exists := m.issues[issueID]
|
||||
if !exists {
|
||||
continue
|
||||
}
|
||||
|
||||
// Only consider open/in_progress, non-pinned issues
|
||||
if issue.Status != types.StatusOpen && issue.Status != types.StatusInProgress {
|
||||
continue
|
||||
}
|
||||
if issue.Pinned {
|
||||
continue
|
||||
}
|
||||
|
||||
// Check if this issue depended on the closed issue
|
||||
dependedOnClosed := false
|
||||
for _, dep := range deps {
|
||||
if dep.DependsOnID == closedIssueID && dep.Type == types.DepBlocks {
|
||||
dependedOnClosed = true
|
||||
break
|
||||
}
|
||||
}
|
||||
|
||||
if !dependedOnClosed {
|
||||
continue
|
||||
}
|
||||
|
||||
// Check if now unblocked (no remaining open blockers)
|
||||
blockers := m.getOpenBlockers(issueID)
|
||||
if len(blockers) == 0 {
|
||||
issueCopy := *issue
|
||||
unblocked = append(unblocked, &issueCopy)
|
||||
}
|
||||
}
|
||||
|
||||
// Sort by priority ascending
|
||||
sort.Slice(unblocked, func(i, j int) bool {
|
||||
return unblocked[i].Priority < unblocked[j].Priority
|
||||
})
|
||||
|
||||
return unblocked, nil
|
||||
}
|
||||
|
||||
func (m *MemoryStorage) AddComment(ctx context.Context, issueID, actor, comment string) error {
|
||||
return nil
|
||||
}
|
||||
|
||||
@@ -1,921 +0,0 @@
|
||||
package memory
|
||||
|
||||
import (
|
||||
"context"
|
||||
"testing"
|
||||
"time"
|
||||
|
||||
"github.com/steveyegge/beads/internal/storage"
|
||||
"github.com/steveyegge/beads/internal/types"
|
||||
)
|
||||
|
||||
func TestMemoryStorage_LoadFromIssues_IndexesAndCounters(t *testing.T) {
|
||||
store := New("/tmp/example.jsonl")
|
||||
defer store.Close()
|
||||
|
||||
extRef := "ext-1"
|
||||
issues := []*types.Issue{
|
||||
nil,
|
||||
{
|
||||
ID: "bd-10",
|
||||
Title: "Ten",
|
||||
Status: types.StatusOpen,
|
||||
Priority: 1,
|
||||
IssueType: types.TypeTask,
|
||||
ExternalRef: &extRef,
|
||||
Dependencies: []*types.Dependency{{
|
||||
IssueID: "bd-10",
|
||||
DependsOnID: "bd-2",
|
||||
Type: types.DepBlocks,
|
||||
}},
|
||||
Labels: []string{"l1"},
|
||||
Comments: []*types.Comment{{ID: 1, IssueID: "bd-10", Author: "a", Text: "c"}},
|
||||
},
|
||||
{ID: "bd-2", Title: "Two", Status: types.StatusOpen, Priority: 1, IssueType: types.TypeTask},
|
||||
{ID: "bd-a3f8e9", Title: "Parent", Status: types.StatusOpen, Priority: 1, IssueType: types.TypeTask},
|
||||
{ID: "bd-a3f8e9.3", Title: "Child", Status: types.StatusOpen, Priority: 1, IssueType: types.TypeTask},
|
||||
}
|
||||
|
||||
if err := store.LoadFromIssues(issues); err != nil {
|
||||
t.Fatalf("LoadFromIssues: %v", err)
|
||||
}
|
||||
|
||||
ctx := context.Background()
|
||||
|
||||
got, err := store.GetIssueByExternalRef(ctx, "ext-1")
|
||||
if err != nil {
|
||||
t.Fatalf("GetIssueByExternalRef: %v", err)
|
||||
}
|
||||
if got == nil || got.ID != "bd-10" {
|
||||
t.Fatalf("GetIssueByExternalRef got=%v", got)
|
||||
}
|
||||
if len(got.Dependencies) != 1 || got.Dependencies[0].DependsOnID != "bd-2" {
|
||||
t.Fatalf("expected deps attached")
|
||||
}
|
||||
if len(got.Labels) != 1 || got.Labels[0] != "l1" {
|
||||
t.Fatalf("expected labels attached")
|
||||
}
|
||||
|
||||
// Exercise CreateIssue ID generation based on the loaded counter (bd-10 => next should be bd-11).
|
||||
if err := store.SetConfig(ctx, "issue_prefix", "bd"); err != nil {
|
||||
t.Fatalf("SetConfig: %v", err)
|
||||
}
|
||||
newIssue := &types.Issue{Title: "New", Status: types.StatusOpen, Priority: 1, IssueType: types.TypeTask}
|
||||
if err := store.CreateIssue(ctx, newIssue, "actor"); err != nil {
|
||||
t.Fatalf("CreateIssue: %v", err)
|
||||
}
|
||||
if newIssue.ID != "bd-11" {
|
||||
t.Fatalf("expected generated id bd-11, got %q", newIssue.ID)
|
||||
}
|
||||
|
||||
// Hierarchical counter for parent extracted from bd-a3f8e9.3.
|
||||
childID, err := store.GetNextChildID(ctx, "bd-a3f8e9")
|
||||
if err != nil {
|
||||
t.Fatalf("GetNextChildID: %v", err)
|
||||
}
|
||||
if childID != "bd-a3f8e9.4" {
|
||||
t.Fatalf("expected bd-a3f8e9.4, got %q", childID)
|
||||
}
|
||||
}
|
||||
|
||||
func TestMemoryStorage_GetAllIssues_SortsAndCopies(t *testing.T) {
|
||||
store := setupTestMemory(t)
|
||||
defer store.Close()
|
||||
ctx := context.Background()
|
||||
|
||||
// Create out-of-order IDs.
|
||||
a := &types.Issue{ID: "bd-2", Title: "A", Status: types.StatusOpen, Priority: 1, IssueType: types.TypeTask}
|
||||
b := &types.Issue{ID: "bd-1", Title: "B", Status: types.StatusOpen, Priority: 1, IssueType: types.TypeTask}
|
||||
if err := store.CreateIssue(ctx, a, "actor"); err != nil {
|
||||
t.Fatalf("CreateIssue a: %v", err)
|
||||
}
|
||||
if err := store.CreateIssue(ctx, b, "actor"); err != nil {
|
||||
t.Fatalf("CreateIssue b: %v", err)
|
||||
}
|
||||
|
||||
if err := store.AddLabel(ctx, a.ID, "l1", "actor"); err != nil {
|
||||
t.Fatalf("AddLabel: %v", err)
|
||||
}
|
||||
|
||||
all := store.GetAllIssues()
|
||||
if len(all) != 2 {
|
||||
t.Fatalf("expected 2 issues, got %d", len(all))
|
||||
}
|
||||
if all[0].ID != "bd-1" || all[1].ID != "bd-2" {
|
||||
t.Fatalf("expected sorted by ID, got %q then %q", all[0].ID, all[1].ID)
|
||||
}
|
||||
|
||||
// Returned issues must be copies (mutating should not affect stored issue struct).
|
||||
all[1].Title = "mutated"
|
||||
got, err := store.GetIssue(ctx, "bd-2")
|
||||
if err != nil {
|
||||
t.Fatalf("GetIssue: %v", err)
|
||||
}
|
||||
if got.Title != "A" {
|
||||
t.Fatalf("expected stored title unchanged, got %q", got.Title)
|
||||
}
|
||||
}
|
||||
|
||||
func TestMemoryStorage_CreateIssues_DefaultPrefix_DuplicateExisting_ExternalRef(t *testing.T) {
|
||||
store := New("")
|
||||
defer store.Close()
|
||||
ctx := context.Background()
|
||||
|
||||
// Default prefix should be "bd" when unset.
|
||||
issues := []*types.Issue{{Title: "A", Status: types.StatusOpen, Priority: 1, IssueType: types.TypeTask}}
|
||||
if err := store.CreateIssues(ctx, issues, "actor"); err != nil {
|
||||
t.Fatalf("CreateIssues: %v", err)
|
||||
}
|
||||
if issues[0].ID != "bd-1" {
|
||||
t.Fatalf("expected bd-1, got %q", issues[0].ID)
|
||||
}
|
||||
|
||||
ext := "ext"
|
||||
batch := []*types.Issue{{ID: "bd-x", Title: "B", Status: types.StatusOpen, Priority: 1, IssueType: types.TypeTask, ExternalRef: &ext}}
|
||||
if err := store.CreateIssues(ctx, batch, "actor"); err != nil {
|
||||
t.Fatalf("CreateIssues: %v", err)
|
||||
}
|
||||
if got, _ := store.GetIssueByExternalRef(ctx, "ext"); got == nil || got.ID != "bd-x" {
|
||||
t.Fatalf("expected external ref indexed")
|
||||
}
|
||||
|
||||
// Duplicate existing issue ID branch.
|
||||
dup := []*types.Issue{{ID: "bd-x", Title: "Dup", Status: types.StatusOpen, Priority: 1, IssueType: types.TypeTask}}
|
||||
if err := store.CreateIssues(ctx, dup, "actor"); err == nil {
|
||||
t.Fatalf("expected duplicate existing issue error")
|
||||
}
|
||||
}
|
||||
|
||||
func TestMemoryStorage_GetIssueByExternalRef_IndexPointsToMissingIssue(t *testing.T) {
|
||||
store := setupTestMemory(t)
|
||||
defer store.Close()
|
||||
ctx := context.Background()
|
||||
|
||||
store.mu.Lock()
|
||||
store.externalRefToID["dangling"] = "bd-nope"
|
||||
store.mu.Unlock()
|
||||
|
||||
got, err := store.GetIssueByExternalRef(ctx, "dangling")
|
||||
if err != nil {
|
||||
t.Fatalf("GetIssueByExternalRef: %v", err)
|
||||
}
|
||||
if got != nil {
|
||||
t.Fatalf("expected nil for dangling ref")
|
||||
}
|
||||
}
|
||||
|
||||
func TestMemoryStorage_DependencyCounts_Records_Tree_Cycles(t *testing.T) {
|
||||
store := setupTestMemory(t)
|
||||
defer store.Close()
|
||||
ctx := context.Background()
|
||||
|
||||
a := &types.Issue{ID: "bd-1", Title: "A", Status: types.StatusOpen, Priority: 1, IssueType: types.TypeTask}
|
||||
b := &types.Issue{ID: "bd-2", Title: "B", Status: types.StatusOpen, Priority: 1, IssueType: types.TypeTask}
|
||||
c := &types.Issue{ID: "bd-3", Title: "C", Status: types.StatusOpen, Priority: 1, IssueType: types.TypeTask}
|
||||
d := &types.Issue{ID: "bd-4", Title: "D", Status: types.StatusOpen, Priority: 1, IssueType: types.TypeTask}
|
||||
for _, iss := range []*types.Issue{a, b, c, d} {
|
||||
if err := store.CreateIssue(ctx, iss, "actor"); err != nil {
|
||||
t.Fatalf("CreateIssue %s: %v", iss.ID, err)
|
||||
}
|
||||
}
|
||||
|
||||
if err := store.AddDependency(ctx, &types.Dependency{IssueID: a.ID, DependsOnID: b.ID, Type: types.DepBlocks}, "actor"); err != nil {
|
||||
t.Fatalf("AddDependency: %v", err)
|
||||
}
|
||||
if err := store.AddDependency(ctx, &types.Dependency{IssueID: a.ID, DependsOnID: c.ID, Type: types.DepBlocks}, "actor"); err != nil {
|
||||
t.Fatalf("AddDependency: %v", err)
|
||||
}
|
||||
if err := store.AddDependency(ctx, &types.Dependency{IssueID: d.ID, DependsOnID: b.ID, Type: types.DepBlocks}, "actor"); err != nil {
|
||||
t.Fatalf("AddDependency: %v", err)
|
||||
}
|
||||
|
||||
counts, err := store.GetDependencyCounts(ctx, []string{a.ID, b.ID, "bd-missing"})
|
||||
if err != nil {
|
||||
t.Fatalf("GetDependencyCounts: %v", err)
|
||||
}
|
||||
if counts[a.ID].DependencyCount != 2 || counts[a.ID].DependentCount != 0 {
|
||||
t.Fatalf("unexpected counts for A: %+v", counts[a.ID])
|
||||
}
|
||||
if counts[b.ID].DependencyCount != 0 || counts[b.ID].DependentCount != 2 {
|
||||
t.Fatalf("unexpected counts for B: %+v", counts[b.ID])
|
||||
}
|
||||
if counts["bd-missing"].DependencyCount != 0 || counts["bd-missing"].DependentCount != 0 {
|
||||
t.Fatalf("unexpected counts for missing: %+v", counts["bd-missing"])
|
||||
}
|
||||
|
||||
deps, err := store.GetDependencyRecords(ctx, a.ID)
|
||||
if err != nil {
|
||||
t.Fatalf("GetDependencyRecords: %v", err)
|
||||
}
|
||||
if len(deps) != 2 {
|
||||
t.Fatalf("expected 2 deps, got %d", len(deps))
|
||||
}
|
||||
|
||||
allDeps, err := store.GetAllDependencyRecords(ctx)
|
||||
if err != nil {
|
||||
t.Fatalf("GetAllDependencyRecords: %v", err)
|
||||
}
|
||||
if len(allDeps[a.ID]) != 2 {
|
||||
t.Fatalf("expected all deps for A")
|
||||
}
|
||||
|
||||
nodes, err := store.GetDependencyTree(ctx, a.ID, 3, false, false)
|
||||
if err != nil {
|
||||
t.Fatalf("GetDependencyTree: %v", err)
|
||||
}
|
||||
if len(nodes) != 2 || nodes[0].Depth != 1 {
|
||||
t.Fatalf("unexpected tree: %+v", nodes)
|
||||
}
|
||||
|
||||
cycles, err := store.DetectCycles(ctx)
|
||||
if err != nil {
|
||||
t.Fatalf("DetectCycles: %v", err)
|
||||
}
|
||||
if cycles != nil {
|
||||
t.Fatalf("expected nil cycles, got %+v", cycles)
|
||||
}
|
||||
}
|
||||
|
||||
func TestMemoryStorage_HashTracking_NoOps(t *testing.T) {
|
||||
store := setupTestMemory(t)
|
||||
defer store.Close()
|
||||
ctx := context.Background()
|
||||
|
||||
if hash, err := store.GetDirtyIssueHash(ctx, "bd-1"); err != nil || hash != "" {
|
||||
t.Fatalf("GetDirtyIssueHash: hash=%q err=%v", hash, err)
|
||||
}
|
||||
if hash, err := store.GetExportHash(ctx, "bd-1"); err != nil || hash != "" {
|
||||
t.Fatalf("GetExportHash: hash=%q err=%v", hash, err)
|
||||
}
|
||||
if err := store.SetExportHash(ctx, "bd-1", "h"); err != nil {
|
||||
t.Fatalf("SetExportHash: %v", err)
|
||||
}
|
||||
if err := store.ClearAllExportHashes(ctx); err != nil {
|
||||
t.Fatalf("ClearAllExportHashes: %v", err)
|
||||
}
|
||||
if hash, err := store.GetJSONLFileHash(ctx); err != nil || hash != "" {
|
||||
t.Fatalf("GetJSONLFileHash: hash=%q err=%v", hash, err)
|
||||
}
|
||||
if err := store.SetJSONLFileHash(ctx, "h"); err != nil {
|
||||
t.Fatalf("SetJSONLFileHash: %v", err)
|
||||
}
|
||||
}
|
||||
|
||||
func TestMemoryStorage_LabelsAndCommentsHelpers(t *testing.T) {
|
||||
store := setupTestMemory(t)
|
||||
defer store.Close()
|
||||
ctx := context.Background()
|
||||
|
||||
a := &types.Issue{ID: "bd-1", Title: "A", Status: types.StatusOpen, Priority: 1, IssueType: types.TypeTask}
|
||||
b := &types.Issue{ID: "bd-2", Title: "B", Status: types.StatusOpen, Priority: 1, IssueType: types.TypeTask}
|
||||
if err := store.CreateIssue(ctx, a, "actor"); err != nil {
|
||||
t.Fatalf("CreateIssue a: %v", err)
|
||||
}
|
||||
if err := store.CreateIssue(ctx, b, "actor"); err != nil {
|
||||
t.Fatalf("CreateIssue b: %v", err)
|
||||
}
|
||||
|
||||
if err := store.AddLabel(ctx, a.ID, "l1", "actor"); err != nil {
|
||||
t.Fatalf("AddLabel: %v", err)
|
||||
}
|
||||
if err := store.AddLabel(ctx, b.ID, "l2", "actor"); err != nil {
|
||||
t.Fatalf("AddLabel: %v", err)
|
||||
}
|
||||
|
||||
labels, err := store.GetLabelsForIssues(ctx, []string{a.ID, b.ID, "bd-missing"})
|
||||
if err != nil {
|
||||
t.Fatalf("GetLabelsForIssues: %v", err)
|
||||
}
|
||||
if len(labels) != 2 {
|
||||
t.Fatalf("expected 2 entries, got %d", len(labels))
|
||||
}
|
||||
if labels[a.ID][0] != "l1" {
|
||||
t.Fatalf("unexpected labels for A: %+v", labels[a.ID])
|
||||
}
|
||||
|
||||
issues, err := store.GetIssuesByLabel(ctx, "l1")
|
||||
if err != nil {
|
||||
t.Fatalf("GetIssuesByLabel: %v", err)
|
||||
}
|
||||
if len(issues) != 1 || issues[0].ID != a.ID {
|
||||
t.Fatalf("unexpected issues: %+v", issues)
|
||||
}
|
||||
|
||||
if _, err := store.AddIssueComment(ctx, a.ID, "author", "text"); err != nil {
|
||||
t.Fatalf("AddIssueComment: %v", err)
|
||||
}
|
||||
comments, err := store.GetCommentsForIssues(ctx, []string{a.ID, b.ID})
|
||||
if err != nil {
|
||||
t.Fatalf("GetCommentsForIssues: %v", err)
|
||||
}
|
||||
if len(comments[a.ID]) != 1 {
|
||||
t.Fatalf("expected comments for A")
|
||||
}
|
||||
}
|
||||
|
||||
func TestMemoryStorage_StaleEventsCustomStatusAndLifecycleHelpers(t *testing.T) {
|
||||
store := New("/tmp/x.jsonl")
|
||||
defer store.Close()
|
||||
ctx := context.Background()
|
||||
|
||||
if store.Path() != "/tmp/x.jsonl" {
|
||||
t.Fatalf("Path mismatch")
|
||||
}
|
||||
if store.UnderlyingDB() != nil {
|
||||
t.Fatalf("expected nil UnderlyingDB")
|
||||
}
|
||||
if _, err := store.UnderlyingConn(ctx); err == nil {
|
||||
t.Fatalf("expected UnderlyingConn error")
|
||||
}
|
||||
if err := store.RunInTransaction(ctx, func(tx storage.Transaction) error { return nil }); err == nil {
|
||||
t.Fatalf("expected RunInTransaction error")
|
||||
}
|
||||
|
||||
if err := store.SetConfig(ctx, "issue_prefix", "bd"); err != nil {
|
||||
t.Fatalf("SetConfig: %v", err)
|
||||
}
|
||||
a := &types.Issue{ID: "bd-1", Title: "A", Status: types.StatusOpen, Priority: 1, IssueType: types.TypeTask}
|
||||
if err := store.CreateIssue(ctx, a, "actor"); err != nil {
|
||||
t.Fatalf("CreateIssue: %v", err)
|
||||
}
|
||||
|
||||
// Force updated_at into the past for stale detection.
|
||||
store.mu.Lock()
|
||||
a.UpdatedAt = time.Now().Add(-10 * 24 * time.Hour)
|
||||
store.mu.Unlock()
|
||||
|
||||
stale, err := store.GetStaleIssues(ctx, types.StaleFilter{Days: 7, Limit: 10})
|
||||
if err != nil {
|
||||
t.Fatalf("GetStaleIssues: %v", err)
|
||||
}
|
||||
if len(stale) != 1 || stale[0].ID != a.ID {
|
||||
t.Fatalf("unexpected stale: %+v", stale)
|
||||
}
|
||||
|
||||
if err := store.AddComment(ctx, a.ID, "actor", "c"); err != nil {
|
||||
t.Fatalf("AddComment: %v", err)
|
||||
}
|
||||
if err := store.MarkIssueDirty(ctx, a.ID); err != nil {
|
||||
t.Fatalf("MarkIssueDirty: %v", err)
|
||||
}
|
||||
|
||||
// Generate multiple events and ensure limiting returns the last N.
|
||||
if err := store.UpdateIssue(ctx, a.ID, map[string]interface{}{"title": "t1"}, "actor"); err != nil {
|
||||
t.Fatalf("UpdateIssue: %v", err)
|
||||
}
|
||||
if err := store.UpdateIssue(ctx, a.ID, map[string]interface{}{"title": "t2"}, "actor"); err != nil {
|
||||
t.Fatalf("UpdateIssue: %v", err)
|
||||
}
|
||||
evs, err := store.GetEvents(ctx, a.ID, 2)
|
||||
if err != nil {
|
||||
t.Fatalf("GetEvents: %v", err)
|
||||
}
|
||||
if len(evs) != 2 {
|
||||
t.Fatalf("expected 2 events, got %d", len(evs))
|
||||
}
|
||||
|
||||
if err := store.SetConfig(ctx, "status.custom", " triage, blocked , ,done "); err != nil {
|
||||
t.Fatalf("SetConfig: %v", err)
|
||||
}
|
||||
statuses, err := store.GetCustomStatuses(ctx)
|
||||
if err != nil {
|
||||
t.Fatalf("GetCustomStatuses: %v", err)
|
||||
}
|
||||
if len(statuses) != 3 || statuses[0] != "triage" || statuses[1] != "blocked" || statuses[2] != "done" {
|
||||
t.Fatalf("unexpected statuses: %+v", statuses)
|
||||
}
|
||||
if got := parseCustomStatuses(""); got != nil {
|
||||
t.Fatalf("expected nil for empty parseCustomStatuses")
|
||||
}
|
||||
|
||||
// Empty custom statuses.
|
||||
if err := store.DeleteConfig(ctx, "status.custom"); err != nil {
|
||||
t.Fatalf("DeleteConfig: %v", err)
|
||||
}
|
||||
statuses, err = store.GetCustomStatuses(ctx)
|
||||
if err != nil {
|
||||
t.Fatalf("GetCustomStatuses(empty): %v", err)
|
||||
}
|
||||
if statuses != nil {
|
||||
t.Fatalf("expected nil statuses when unset, got %+v", statuses)
|
||||
}
|
||||
|
||||
if _, err := store.GetEpicsEligibleForClosure(ctx); err != nil {
|
||||
t.Fatalf("GetEpicsEligibleForClosure: %v", err)
|
||||
}
|
||||
|
||||
if err := store.UpdateIssueID(ctx, "old", "new", nil, "actor"); err == nil {
|
||||
t.Fatalf("expected UpdateIssueID error")
|
||||
}
|
||||
if err := store.RenameDependencyPrefix(ctx, "old", "new"); err != nil {
|
||||
t.Fatalf("RenameDependencyPrefix: %v", err)
|
||||
}
|
||||
if err := store.RenameCounterPrefix(ctx, "old", "new"); err != nil {
|
||||
t.Fatalf("RenameCounterPrefix: %v", err)
|
||||
}
|
||||
}
|
||||
|
||||
func TestMemoryStorage_AddLabelAndAddDependency_ErrorPaths(t *testing.T) {
|
||||
store := setupTestMemory(t)
|
||||
defer store.Close()
|
||||
ctx := context.Background()
|
||||
|
||||
issue := &types.Issue{ID: "bd-1", Title: "A", Status: types.StatusOpen, Priority: 1, IssueType: types.TypeTask}
|
||||
if err := store.CreateIssue(ctx, issue, "actor"); err != nil {
|
||||
t.Fatalf("CreateIssue: %v", err)
|
||||
}
|
||||
|
||||
if err := store.AddLabel(ctx, "bd-missing", "l", "actor"); err == nil {
|
||||
t.Fatalf("expected AddLabel error for missing issue")
|
||||
}
|
||||
if err := store.AddLabel(ctx, issue.ID, "l", "actor"); err != nil {
|
||||
t.Fatalf("AddLabel: %v", err)
|
||||
}
|
||||
// Duplicate label is a no-op.
|
||||
if err := store.AddLabel(ctx, issue.ID, "l", "actor"); err != nil {
|
||||
t.Fatalf("AddLabel duplicate: %v", err)
|
||||
}
|
||||
|
||||
// AddDependency error paths.
|
||||
if err := store.AddDependency(ctx, &types.Dependency{IssueID: "bd-missing", DependsOnID: issue.ID, Type: types.DepBlocks}, "actor"); err == nil {
|
||||
t.Fatalf("expected AddDependency error for missing IssueID")
|
||||
}
|
||||
if err := store.AddDependency(ctx, &types.Dependency{IssueID: issue.ID, DependsOnID: "bd-missing", Type: types.DepBlocks}, "actor"); err == nil {
|
||||
t.Fatalf("expected AddDependency error for missing DependsOnID")
|
||||
}
|
||||
}
|
||||
|
||||
func TestMemoryStorage_GetNextChildID_Errors(t *testing.T) {
|
||||
store := setupTestMemory(t)
|
||||
defer store.Close()
|
||||
ctx := context.Background()
|
||||
|
||||
if _, err := store.GetNextChildID(ctx, "bd-missing"); err == nil {
|
||||
t.Fatalf("expected error for missing parent")
|
||||
}
|
||||
|
||||
deep := &types.Issue{ID: "bd-1.1.1.1", Title: "Deep", Status: types.StatusOpen, Priority: 1, IssueType: types.TypeTask}
|
||||
if err := store.CreateIssue(ctx, deep, "actor"); err != nil {
|
||||
t.Fatalf("CreateIssue: %v", err)
|
||||
}
|
||||
if _, err := store.GetNextChildID(ctx, deep.ID); err == nil {
|
||||
t.Fatalf("expected max depth error")
|
||||
}
|
||||
}
|
||||
|
||||
func TestMemoryStorage_GetAllIssues_AttachesDependenciesAndComments(t *testing.T) {
|
||||
store := setupTestMemory(t)
|
||||
defer store.Close()
|
||||
ctx := context.Background()
|
||||
|
||||
a := &types.Issue{ID: "bd-1", Title: "A", Status: types.StatusOpen, Priority: 1, IssueType: types.TypeTask}
|
||||
b := &types.Issue{ID: "bd-2", Title: "B", Status: types.StatusOpen, Priority: 1, IssueType: types.TypeTask}
|
||||
if err := store.CreateIssue(ctx, a, "actor"); err != nil {
|
||||
t.Fatalf("CreateIssue a: %v", err)
|
||||
}
|
||||
if err := store.CreateIssue(ctx, b, "actor"); err != nil {
|
||||
t.Fatalf("CreateIssue b: %v", err)
|
||||
}
|
||||
if err := store.AddDependency(ctx, &types.Dependency{IssueID: a.ID, DependsOnID: b.ID, Type: types.DepBlocks}, "actor"); err != nil {
|
||||
t.Fatalf("AddDependency: %v", err)
|
||||
}
|
||||
if _, err := store.AddIssueComment(ctx, a.ID, "author", "text"); err != nil {
|
||||
t.Fatalf("AddIssueComment: %v", err)
|
||||
}
|
||||
|
||||
all := store.GetAllIssues()
|
||||
var gotA *types.Issue
|
||||
for _, iss := range all {
|
||||
if iss.ID == a.ID {
|
||||
gotA = iss
|
||||
break
|
||||
}
|
||||
}
|
||||
if gotA == nil {
|
||||
t.Fatalf("expected to find issue A")
|
||||
}
|
||||
if len(gotA.Dependencies) != 1 || gotA.Dependencies[0].DependsOnID != b.ID {
|
||||
t.Fatalf("expected deps attached")
|
||||
}
|
||||
if len(gotA.Comments) != 1 || gotA.Comments[0].Text != "text" {
|
||||
t.Fatalf("expected comments attached")
|
||||
}
|
||||
}
|
||||
|
||||
func TestMemoryStorage_GetStaleIssues_FilteringAndLimit(t *testing.T) {
|
||||
store := setupTestMemory(t)
|
||||
defer store.Close()
|
||||
ctx := context.Background()
|
||||
|
||||
old := &types.Issue{ID: "bd-1", Title: "Old", Status: types.StatusOpen, Priority: 1, IssueType: types.TypeTask}
|
||||
newer := &types.Issue{ID: "bd-2", Title: "Newer", Status: types.StatusInProgress, Priority: 1, IssueType: types.TypeTask}
|
||||
closed := &types.Issue{ID: "bd-3", Title: "Closed", Status: types.StatusOpen, Priority: 1, IssueType: types.TypeTask}
|
||||
for _, iss := range []*types.Issue{old, newer, closed} {
|
||||
if err := store.CreateIssue(ctx, iss, "actor"); err != nil {
|
||||
t.Fatalf("CreateIssue %s: %v", iss.ID, err)
|
||||
}
|
||||
}
|
||||
if err := store.CloseIssue(ctx, closed.ID, "done", "actor"); err != nil {
|
||||
t.Fatalf("CloseIssue: %v", err)
|
||||
}
|
||||
|
||||
store.mu.Lock()
|
||||
store.issues[old.ID].UpdatedAt = time.Now().Add(-20 * 24 * time.Hour)
|
||||
store.issues[newer.ID].UpdatedAt = time.Now().Add(-10 * 24 * time.Hour)
|
||||
store.issues[closed.ID].UpdatedAt = time.Now().Add(-30 * 24 * time.Hour)
|
||||
store.mu.Unlock()
|
||||
|
||||
stale, err := store.GetStaleIssues(ctx, types.StaleFilter{Days: 7, Status: "in_progress"})
|
||||
if err != nil {
|
||||
t.Fatalf("GetStaleIssues: %v", err)
|
||||
}
|
||||
if len(stale) != 1 || stale[0].ID != newer.ID {
|
||||
t.Fatalf("unexpected stale filtered: %+v", stale)
|
||||
}
|
||||
|
||||
stale, err = store.GetStaleIssues(ctx, types.StaleFilter{Days: 7, Limit: 1})
|
||||
if err != nil {
|
||||
t.Fatalf("GetStaleIssues: %v", err)
|
||||
}
|
||||
if len(stale) != 1 || stale[0].ID != old.ID {
|
||||
t.Fatalf("expected oldest stale first, got %+v", stale)
|
||||
}
|
||||
}
|
||||
|
||||
func TestMemoryStorage_Statistics_EpicsEligibleForClosure_Counting(t *testing.T) {
|
||||
store := setupTestMemory(t)
|
||||
defer store.Close()
|
||||
ctx := context.Background()
|
||||
|
||||
ep := &types.Issue{ID: "bd-1", Title: "Epic", Status: types.StatusOpen, Priority: 1, IssueType: types.TypeEpic}
|
||||
c1 := &types.Issue{ID: "bd-2", Title: "Child1", Status: types.StatusOpen, Priority: 1, IssueType: types.TypeTask}
|
||||
c2 := &types.Issue{ID: "bd-3", Title: "Child2", Status: types.StatusOpen, Priority: 1, IssueType: types.TypeTask}
|
||||
for _, iss := range []*types.Issue{ep, c1, c2} {
|
||||
if err := store.CreateIssue(ctx, iss, "actor"); err != nil {
|
||||
t.Fatalf("CreateIssue %s: %v", iss.ID, err)
|
||||
}
|
||||
}
|
||||
if err := store.CloseIssue(ctx, c1.ID, "done", "actor"); err != nil {
|
||||
t.Fatalf("CloseIssue c1: %v", err)
|
||||
}
|
||||
if err := store.CloseIssue(ctx, c2.ID, "done", "actor"); err != nil {
|
||||
t.Fatalf("CloseIssue c2: %v", err)
|
||||
}
|
||||
// Parent-child deps: child -> epic.
|
||||
if err := store.AddDependency(ctx, &types.Dependency{IssueID: c1.ID, DependsOnID: ep.ID, Type: types.DepParentChild}, "actor"); err != nil {
|
||||
t.Fatalf("AddDependency: %v", err)
|
||||
}
|
||||
if err := store.AddDependency(ctx, &types.Dependency{IssueID: c2.ID, DependsOnID: ep.ID, Type: types.DepParentChild}, "actor"); err != nil {
|
||||
t.Fatalf("AddDependency: %v", err)
|
||||
}
|
||||
|
||||
stats, err := store.GetStatistics(ctx)
|
||||
if err != nil {
|
||||
t.Fatalf("GetStatistics: %v", err)
|
||||
}
|
||||
if stats.EpicsEligibleForClosure != 1 {
|
||||
t.Fatalf("expected 1 epic eligible, got %d", stats.EpicsEligibleForClosure)
|
||||
}
|
||||
}
|
||||
|
||||
func TestMemoryStorage_UpdateIssue_SearchIssues_ReadyWork_BlockedIssues(t *testing.T) {
|
||||
store := setupTestMemory(t)
|
||||
defer store.Close()
|
||||
ctx := context.Background()
|
||||
|
||||
now := time.Now()
|
||||
assignee := "alice"
|
||||
|
||||
parent := &types.Issue{ID: "bd-1", Title: "Parent", Status: types.StatusOpen, Priority: 1, IssueType: types.TypeEpic}
|
||||
child := &types.Issue{ID: "bd-2", Title: "Child", Status: types.StatusOpen, Priority: 2, IssueType: types.TypeTask, Assignee: assignee}
|
||||
blocker := &types.Issue{ID: "bd-3", Title: "Blocker", Status: types.StatusOpen, Priority: 3, IssueType: types.TypeTask}
|
||||
pinned := &types.Issue{ID: "bd-4", Title: "Pinned", Status: types.StatusOpen, Priority: 1, IssueType: types.TypeTask, Pinned: true}
|
||||
workflow := &types.Issue{ID: "bd-5", Title: "Workflow", Status: types.StatusOpen, Priority: 1, IssueType: types.TypeMergeRequest}
|
||||
for _, iss := range []*types.Issue{parent, child, blocker, pinned, workflow} {
|
||||
if err := store.CreateIssue(ctx, iss, "actor"); err != nil {
|
||||
t.Fatalf("CreateIssue %s: %v", iss.ID, err)
|
||||
}
|
||||
}
|
||||
|
||||
// Make created_at deterministic for sorting.
|
||||
store.mu.Lock()
|
||||
store.issues[parent.ID].CreatedAt = now.Add(-100 * time.Hour)
|
||||
store.issues[child.ID].CreatedAt = now.Add(-1 * time.Hour)
|
||||
store.issues[blocker.ID].CreatedAt = now.Add(-2 * time.Hour)
|
||||
store.issues[pinned.ID].CreatedAt = now.Add(-3 * time.Hour)
|
||||
store.issues[workflow.ID].CreatedAt = now.Add(-4 * time.Hour)
|
||||
store.mu.Unlock()
|
||||
|
||||
// Dependencies: child is a child of parent; child is blocked by blocker.
|
||||
if err := store.AddDependency(ctx, &types.Dependency{IssueID: child.ID, DependsOnID: parent.ID, Type: types.DepParentChild}, "actor"); err != nil {
|
||||
t.Fatalf("AddDependency parent-child: %v", err)
|
||||
}
|
||||
if err := store.AddDependency(ctx, &types.Dependency{IssueID: child.ID, DependsOnID: blocker.ID, Type: types.DepBlocks}, "actor"); err != nil {
|
||||
t.Fatalf("AddDependency blocks: %v", err)
|
||||
}
|
||||
|
||||
// AddDependency duplicate error path.
|
||||
if err := store.AddDependency(ctx, &types.Dependency{IssueID: child.ID, DependsOnID: blocker.ID, Type: types.DepBlocks}, "actor"); err == nil {
|
||||
t.Fatalf("expected duplicate dependency error")
|
||||
}
|
||||
|
||||
// UpdateIssue: exercise assignee nil, external_ref update+clear, and closed_at behavior.
|
||||
ext := "old-ext"
|
||||
store.mu.Lock()
|
||||
store.issues[child.ID].ExternalRef = &ext
|
||||
store.externalRefToID[ext] = child.ID
|
||||
store.mu.Unlock()
|
||||
|
||||
if err := store.UpdateIssue(ctx, child.ID, map[string]interface{}{"assignee": nil, "external_ref": "new-ext"}, "actor"); err != nil {
|
||||
t.Fatalf("UpdateIssue: %v", err)
|
||||
}
|
||||
if got, _ := store.GetIssueByExternalRef(ctx, "old-ext"); got != nil {
|
||||
t.Fatalf("expected old-ext removed")
|
||||
}
|
||||
if got, _ := store.GetIssueByExternalRef(ctx, "new-ext"); got == nil || got.ID != child.ID {
|
||||
t.Fatalf("expected new-ext mapping")
|
||||
}
|
||||
|
||||
if err := store.UpdateIssue(ctx, child.ID, map[string]interface{}{"status": string(types.StatusClosed)}, "actor"); err != nil {
|
||||
t.Fatalf("UpdateIssue close: %v", err)
|
||||
}
|
||||
closed, _ := store.GetIssue(ctx, child.ID)
|
||||
if closed.ClosedAt == nil {
|
||||
t.Fatalf("expected ClosedAt set")
|
||||
}
|
||||
if err := store.UpdateIssue(ctx, child.ID, map[string]interface{}{"status": string(types.StatusOpen), "external_ref": nil}, "actor"); err != nil {
|
||||
t.Fatalf("UpdateIssue reopen: %v", err)
|
||||
}
|
||||
reopened, _ := store.GetIssue(ctx, child.ID)
|
||||
if reopened.ClosedAt != nil {
|
||||
t.Fatalf("expected ClosedAt cleared")
|
||||
}
|
||||
if got, _ := store.GetIssueByExternalRef(ctx, "new-ext"); got != nil {
|
||||
t.Fatalf("expected new-ext cleared")
|
||||
}
|
||||
|
||||
// SearchIssues: query, label AND/OR, IDs filter, ParentID filter, limit.
|
||||
if err := store.AddLabel(ctx, parent.ID, "l1", "actor"); err != nil {
|
||||
t.Fatalf("AddLabel: %v", err)
|
||||
}
|
||||
if err := store.AddLabel(ctx, child.ID, "l1", "actor"); err != nil {
|
||||
t.Fatalf("AddLabel: %v", err)
|
||||
}
|
||||
if err := store.AddLabel(ctx, child.ID, "l2", "actor"); err != nil {
|
||||
t.Fatalf("AddLabel: %v", err)
|
||||
}
|
||||
|
||||
st := types.StatusOpen
|
||||
res, err := store.SearchIssues(ctx, "parent", types.IssueFilter{Status: &st})
|
||||
if err != nil {
|
||||
t.Fatalf("SearchIssues: %v", err)
|
||||
}
|
||||
if len(res) != 1 || res[0].ID != parent.ID {
|
||||
t.Fatalf("unexpected SearchIssues results: %+v", res)
|
||||
}
|
||||
|
||||
res, err = store.SearchIssues(ctx, "", types.IssueFilter{Labels: []string{"l1", "l2"}})
|
||||
if err != nil {
|
||||
t.Fatalf("SearchIssues labels AND: %v", err)
|
||||
}
|
||||
if len(res) != 1 || res[0].ID != child.ID {
|
||||
t.Fatalf("unexpected labels AND results: %+v", res)
|
||||
}
|
||||
|
||||
res, err = store.SearchIssues(ctx, "", types.IssueFilter{IDs: []string{child.ID}})
|
||||
if err != nil {
|
||||
t.Fatalf("SearchIssues IDs: %v", err)
|
||||
}
|
||||
if len(res) != 1 || res[0].ID != child.ID {
|
||||
t.Fatalf("unexpected IDs results: %+v", res)
|
||||
}
|
||||
|
||||
res, err = store.SearchIssues(ctx, "", types.IssueFilter{ParentID: &parent.ID})
|
||||
if err != nil {
|
||||
t.Fatalf("SearchIssues ParentID: %v", err)
|
||||
}
|
||||
if len(res) != 1 || res[0].ID != child.ID {
|
||||
t.Fatalf("unexpected ParentID results: %+v", res)
|
||||
}
|
||||
|
||||
res, err = store.SearchIssues(ctx, "", types.IssueFilter{LabelsAny: []string{"l2", "missing"}, Limit: 1})
|
||||
if err != nil {
|
||||
t.Fatalf("SearchIssues labels OR: %v", err)
|
||||
}
|
||||
if len(res) != 1 {
|
||||
t.Fatalf("expected limit 1")
|
||||
}
|
||||
|
||||
// Ready work: child is blocked, pinned excluded, workflow excluded by default.
|
||||
ready, err := store.GetReadyWork(ctx, types.WorkFilter{})
|
||||
if err != nil {
|
||||
t.Fatalf("GetReadyWork: %v", err)
|
||||
}
|
||||
if len(ready) != 2 { // parent + blocker
|
||||
t.Fatalf("expected 2 ready issues, got %d: %+v", len(ready), ready)
|
||||
}
|
||||
|
||||
// Filter by workflow type explicitly.
|
||||
ready, err = store.GetReadyWork(ctx, types.WorkFilter{Type: string(types.TypeMergeRequest)})
|
||||
if err != nil {
|
||||
t.Fatalf("GetReadyWork type: %v", err)
|
||||
}
|
||||
if len(ready) != 1 || ready[0].ID != workflow.ID {
|
||||
t.Fatalf("expected only workflow issue, got %+v", ready)
|
||||
}
|
||||
|
||||
// Status + priority filters.
|
||||
prio := 3
|
||||
ready, err = store.GetReadyWork(ctx, types.WorkFilter{Status: types.StatusOpen, Priority: &prio})
|
||||
if err != nil {
|
||||
t.Fatalf("GetReadyWork status+priority: %v", err)
|
||||
}
|
||||
if len(ready) != 1 || ready[0].ID != blocker.ID {
|
||||
t.Fatalf("expected blocker only, got %+v", ready)
|
||||
}
|
||||
|
||||
// Label filters.
|
||||
ready, err = store.GetReadyWork(ctx, types.WorkFilter{Labels: []string{"l1"}})
|
||||
if err != nil {
|
||||
t.Fatalf("GetReadyWork labels AND: %v", err)
|
||||
}
|
||||
if len(ready) != 1 || ready[0].ID != parent.ID {
|
||||
t.Fatalf("expected parent only, got %+v", ready)
|
||||
}
|
||||
ready, err = store.GetReadyWork(ctx, types.WorkFilter{LabelsAny: []string{"l2"}})
|
||||
if err != nil {
|
||||
t.Fatalf("GetReadyWork labels OR: %v", err)
|
||||
}
|
||||
if len(ready) != 0 {
|
||||
t.Fatalf("expected 0 because only l2 issue is blocked")
|
||||
}
|
||||
|
||||
// Assignee filter vs Unassigned precedence.
|
||||
ready, err = store.GetReadyWork(ctx, types.WorkFilter{Assignee: &assignee})
|
||||
if err != nil {
|
||||
t.Fatalf("GetReadyWork assignee: %v", err)
|
||||
}
|
||||
if len(ready) != 0 {
|
||||
t.Fatalf("expected 0 due to child being blocked")
|
||||
}
|
||||
ready, err = store.GetReadyWork(ctx, types.WorkFilter{Unassigned: true})
|
||||
if err != nil {
|
||||
t.Fatalf("GetReadyWork unassigned: %v", err)
|
||||
}
|
||||
for _, iss := range ready {
|
||||
if iss.Assignee != "" {
|
||||
t.Fatalf("expected unassigned only")
|
||||
}
|
||||
}
|
||||
|
||||
// Sort policies + limit.
|
||||
ready, err = store.GetReadyWork(ctx, types.WorkFilter{SortPolicy: types.SortPolicyOldest, Limit: 1})
|
||||
if err != nil {
|
||||
t.Fatalf("GetReadyWork oldest: %v", err)
|
||||
}
|
||||
if len(ready) != 1 || ready[0].ID != parent.ID {
|
||||
t.Fatalf("expected oldest=parent, got %+v", ready)
|
||||
}
|
||||
ready, err = store.GetReadyWork(ctx, types.WorkFilter{SortPolicy: types.SortPolicyPriority})
|
||||
if err != nil {
|
||||
t.Fatalf("GetReadyWork priority: %v", err)
|
||||
}
|
||||
if len(ready) < 2 || ready[0].Priority > ready[1].Priority {
|
||||
t.Fatalf("expected priority sort")
|
||||
}
|
||||
// Hybrid: recent issues first.
|
||||
ready, err = store.GetReadyWork(ctx, types.WorkFilter{SortPolicy: types.SortPolicyHybrid})
|
||||
if err != nil {
|
||||
t.Fatalf("GetReadyWork hybrid: %v", err)
|
||||
}
|
||||
if len(ready) != 2 || ready[0].ID != blocker.ID {
|
||||
t.Fatalf("expected recent (blocker) first in hybrid, got %+v", ready)
|
||||
}
|
||||
|
||||
// Blocked issues: child is blocked by an open blocker.
|
||||
blocked, err := store.GetBlockedIssues(ctx, types.WorkFilter{})
|
||||
if err != nil {
|
||||
t.Fatalf("GetBlockedIssues: %v", err)
|
||||
}
|
||||
if len(blocked) != 1 || blocked[0].ID != child.ID || blocked[0].BlockedByCount != 1 {
|
||||
t.Fatalf("unexpected blocked issues: %+v", blocked)
|
||||
}
|
||||
|
||||
// Cover getOpenBlockers missing-blocker branch.
|
||||
missing := &types.Issue{ID: "bd-6", Title: "Missing blocker dep", Status: types.StatusOpen, Priority: 1, IssueType: types.TypeTask}
|
||||
if err := store.CreateIssue(ctx, missing, "actor"); err != nil {
|
||||
t.Fatalf("CreateIssue: %v", err)
|
||||
}
|
||||
// Bypass AddDependency validation to cover the missing-blocker branch in getOpenBlockers.
|
||||
store.mu.Lock()
|
||||
store.dependencies[missing.ID] = append(store.dependencies[missing.ID], &types.Dependency{IssueID: missing.ID, DependsOnID: "bd-does-not-exist", Type: types.DepBlocks})
|
||||
store.mu.Unlock()
|
||||
blocked, err = store.GetBlockedIssues(ctx, types.WorkFilter{})
|
||||
if err != nil {
|
||||
t.Fatalf("GetBlockedIssues: %v", err)
|
||||
}
|
||||
if len(blocked) != 2 {
|
||||
t.Fatalf("expected 2 blocked issues, got %d", len(blocked))
|
||||
}
|
||||
}
|
||||
|
||||
func TestMemoryStorage_UpdateIssue_CoversMoreFields(t *testing.T) {
|
||||
store := setupTestMemory(t)
|
||||
defer store.Close()
|
||||
ctx := context.Background()
|
||||
|
||||
iss := &types.Issue{ID: "bd-1", Title: "A", Status: types.StatusOpen, Priority: 1, IssueType: types.TypeTask}
|
||||
if err := store.CreateIssue(ctx, iss, "actor"); err != nil {
|
||||
t.Fatalf("CreateIssue: %v", err)
|
||||
}
|
||||
|
||||
if err := store.UpdateIssue(ctx, iss.ID, map[string]interface{}{
|
||||
"description": "d",
|
||||
"design": "design",
|
||||
"acceptance_criteria": "ac",
|
||||
"notes": "n",
|
||||
"priority": 2,
|
||||
"issue_type": string(types.TypeBug),
|
||||
"assignee": "bob",
|
||||
"status": string(types.StatusInProgress),
|
||||
}, "actor"); err != nil {
|
||||
t.Fatalf("UpdateIssue: %v", err)
|
||||
}
|
||||
|
||||
got, _ := store.GetIssue(ctx, iss.ID)
|
||||
if got.Description != "d" || got.Design != "design" || got.AcceptanceCriteria != "ac" || got.Notes != "n" {
|
||||
t.Fatalf("expected text fields updated")
|
||||
}
|
||||
if got.Priority != 2 || got.IssueType != types.TypeBug || got.Assignee != "bob" || got.Status != types.StatusInProgress {
|
||||
t.Fatalf("expected fields updated")
|
||||
}
|
||||
|
||||
// Status closed when already closed should not clear ClosedAt.
|
||||
if err := store.CloseIssue(ctx, iss.ID, "done", "actor"); err != nil {
|
||||
t.Fatalf("CloseIssue: %v", err)
|
||||
}
|
||||
closedOnce, _ := store.GetIssue(ctx, iss.ID)
|
||||
if closedOnce.ClosedAt == nil {
|
||||
t.Fatalf("expected ClosedAt")
|
||||
}
|
||||
if err := store.UpdateIssue(ctx, iss.ID, map[string]interface{}{"status": string(types.StatusClosed)}, "actor"); err != nil {
|
||||
t.Fatalf("UpdateIssue closed->closed: %v", err)
|
||||
}
|
||||
closedTwice, _ := store.GetIssue(ctx, iss.ID)
|
||||
if closedTwice.ClosedAt == nil {
|
||||
t.Fatalf("expected ClosedAt preserved")
|
||||
}
|
||||
}
|
||||
|
||||
func TestMemoryStorage_CountEpicsEligibleForClosure_CoversBranches(t *testing.T) {
|
||||
store := setupTestMemory(t)
|
||||
defer store.Close()
|
||||
ctx := context.Background()
|
||||
|
||||
ep1 := &types.Issue{ID: "bd-1", Title: "Epic1", Status: types.StatusOpen, Priority: 1, IssueType: types.TypeEpic}
|
||||
epClosed := &types.Issue{ID: "bd-2", Title: "EpicClosed", Status: types.StatusOpen, Priority: 1, IssueType: types.TypeEpic}
|
||||
nonEpic := &types.Issue{ID: "bd-3", Title: "NotEpic", Status: types.StatusOpen, Priority: 1, IssueType: types.TypeTask}
|
||||
c := &types.Issue{ID: "bd-4", Title: "Child", Status: types.StatusOpen, Priority: 1, IssueType: types.TypeTask}
|
||||
for _, iss := range []*types.Issue{ep1, epClosed, nonEpic, c} {
|
||||
if err := store.CreateIssue(ctx, iss, "actor"); err != nil {
|
||||
t.Fatalf("CreateIssue %s: %v", iss.ID, err)
|
||||
}
|
||||
}
|
||||
if err := store.CloseIssue(ctx, epClosed.ID, "done", "actor"); err != nil {
|
||||
t.Fatalf("CloseIssue: %v", err)
|
||||
}
|
||||
// Child -> ep1 (eligible once child is closed).
|
||||
if err := store.AddDependency(ctx, &types.Dependency{IssueID: c.ID, DependsOnID: ep1.ID, Type: types.DepParentChild}, "actor"); err != nil {
|
||||
t.Fatalf("AddDependency: %v", err)
|
||||
}
|
||||
// Child -> nonEpic should not count.
|
||||
if err := store.AddDependency(ctx, &types.Dependency{IssueID: c.ID, DependsOnID: nonEpic.ID, Type: types.DepParentChild}, "actor"); err != nil {
|
||||
t.Fatalf("AddDependency: %v", err)
|
||||
}
|
||||
// Child -> missing epic should not count.
|
||||
store.mu.Lock()
|
||||
store.dependencies[c.ID] = append(store.dependencies[c.ID], &types.Dependency{IssueID: c.ID, DependsOnID: "bd-missing", Type: types.DepParentChild})
|
||||
store.mu.Unlock()
|
||||
|
||||
// Close child to make ep1 eligible.
|
||||
if err := store.CloseIssue(ctx, c.ID, "done", "actor"); err != nil {
|
||||
t.Fatalf("CloseIssue child: %v", err)
|
||||
}
|
||||
|
||||
stats, err := store.GetStatistics(ctx)
|
||||
if err != nil {
|
||||
t.Fatalf("GetStatistics: %v", err)
|
||||
}
|
||||
if stats.EpicsEligibleForClosure != 1 {
|
||||
t.Fatalf("expected 1 eligible epic, got %d", stats.EpicsEligibleForClosure)
|
||||
}
|
||||
}
|
||||
|
||||
func TestExtractParentAndChildNumber_CoversFailures(t *testing.T) {
|
||||
if _, _, ok := extractParentAndChildNumber("no-dot"); ok {
|
||||
t.Fatalf("expected ok=false")
|
||||
}
|
||||
if _, _, ok := extractParentAndChildNumber("parent.bad"); ok {
|
||||
t.Fatalf("expected ok=false")
|
||||
}
|
||||
}
|
||||
@@ -124,7 +124,7 @@ func TestGetBlockedIssues_IncludesExplicitlyBlockedStatus(t *testing.T) {
|
||||
t.Fatalf("AddDependency failed: %v", err)
|
||||
}
|
||||
|
||||
blocked, err := store.GetBlockedIssues(ctx, types.WorkFilter{})
|
||||
blocked, err := store.GetBlockedIssues(ctx)
|
||||
if err != nil {
|
||||
t.Fatalf("GetBlockedIssues failed: %v", err)
|
||||
}
|
||||
|
||||
@@ -247,7 +247,7 @@ func (s *SQLiteStorage) GetDependenciesWithMetadata(ctx context.Context, issueID
|
||||
rows, err := s.db.QueryContext(ctx, `
|
||||
SELECT i.id, i.content_hash, i.title, i.description, i.design, i.acceptance_criteria, i.notes,
|
||||
i.status, i.priority, i.issue_type, i.assignee, i.estimated_minutes,
|
||||
i.created_at, i.created_by, i.updated_at, i.closed_at, i.external_ref, i.source_repo,
|
||||
i.created_at, i.updated_at, i.closed_at, i.external_ref, i.source_repo,
|
||||
i.deleted_at, i.deleted_by, i.delete_reason, i.original_type,
|
||||
i.sender, i.ephemeral, i.pinned, i.is_template,
|
||||
i.await_type, i.await_id, i.timeout_ns, i.waiters,
|
||||
@@ -270,7 +270,7 @@ func (s *SQLiteStorage) GetDependentsWithMetadata(ctx context.Context, issueID s
|
||||
rows, err := s.db.QueryContext(ctx, `
|
||||
SELECT i.id, i.content_hash, i.title, i.description, i.design, i.acceptance_criteria, i.notes,
|
||||
i.status, i.priority, i.issue_type, i.assignee, i.estimated_minutes,
|
||||
i.created_at, i.created_by, i.updated_at, i.closed_at, i.external_ref, i.source_repo,
|
||||
i.created_at, i.updated_at, i.closed_at, i.external_ref, i.source_repo,
|
||||
i.deleted_at, i.deleted_by, i.delete_reason, i.original_type,
|
||||
i.sender, i.ephemeral, i.pinned, i.is_template,
|
||||
i.await_type, i.await_id, i.timeout_ns, i.waiters,
|
||||
@@ -484,7 +484,7 @@ func (s *SQLiteStorage) GetDependencyTree(ctx context.Context, issueID string, m
|
||||
SELECT
|
||||
i.id, i.title, i.status, i.priority, i.description, i.design,
|
||||
i.acceptance_criteria, i.notes, i.issue_type, i.assignee,
|
||||
i.estimated_minutes, i.created_at, i.created_by, i.updated_at, i.closed_at,
|
||||
i.estimated_minutes, i.created_at, i.updated_at, i.closed_at,
|
||||
i.external_ref,
|
||||
0 as depth,
|
||||
i.id as path,
|
||||
@@ -497,7 +497,7 @@ func (s *SQLiteStorage) GetDependencyTree(ctx context.Context, issueID string, m
|
||||
SELECT
|
||||
i.id, i.title, i.status, i.priority, i.description, i.design,
|
||||
i.acceptance_criteria, i.notes, i.issue_type, i.assignee,
|
||||
i.estimated_minutes, i.created_at, i.created_by, i.updated_at, i.closed_at,
|
||||
i.estimated_minutes, i.created_at, i.updated_at, i.closed_at,
|
||||
i.external_ref,
|
||||
t.depth + 1,
|
||||
t.path || '→' || i.id,
|
||||
@@ -525,7 +525,7 @@ func (s *SQLiteStorage) GetDependencyTree(ctx context.Context, issueID string, m
|
||||
SELECT
|
||||
i.id, i.title, i.status, i.priority, i.description, i.design,
|
||||
i.acceptance_criteria, i.notes, i.issue_type, i.assignee,
|
||||
i.estimated_minutes, i.created_at, i.created_by, i.updated_at, i.closed_at,
|
||||
i.estimated_minutes, i.created_at, i.updated_at, i.closed_at,
|
||||
i.external_ref,
|
||||
0 as depth,
|
||||
i.id as path,
|
||||
@@ -538,7 +538,7 @@ func (s *SQLiteStorage) GetDependencyTree(ctx context.Context, issueID string, m
|
||||
SELECT
|
||||
i.id, i.title, i.status, i.priority, i.description, i.design,
|
||||
i.acceptance_criteria, i.notes, i.issue_type, i.assignee,
|
||||
i.estimated_minutes, i.created_at, i.created_by, i.updated_at, i.closed_at,
|
||||
i.estimated_minutes, i.created_at, i.updated_at, i.closed_at,
|
||||
i.external_ref,
|
||||
t.depth + 1,
|
||||
t.path || '→' || i.id,
|
||||
@@ -839,7 +839,7 @@ func (s *SQLiteStorage) scanIssues(ctx context.Context, rows *sql.Rows) ([]*type
|
||||
&issue.ID, &contentHash, &issue.Title, &issue.Description, &issue.Design,
|
||||
&issue.AcceptanceCriteria, &issue.Notes, &issue.Status,
|
||||
&issue.Priority, &issue.IssueType, &assignee, &estimatedMinutes,
|
||||
&issue.CreatedAt, &issue.CreatedBy, &issue.UpdatedAt, &closedAt, &externalRef, &sourceRepo, &closeReason,
|
||||
&issue.CreatedAt, &issue.UpdatedAt, &closedAt, &externalRef, &sourceRepo, &closeReason,
|
||||
&deletedAt, &deletedBy, &deleteReason, &originalType,
|
||||
&sender, &wisp, &pinned, &isTemplate,
|
||||
&awaitType, &awaitID, &timeoutNs, &waiters,
|
||||
@@ -885,7 +885,7 @@ func (s *SQLiteStorage) scanIssues(ctx context.Context, rows *sql.Rows) ([]*type
|
||||
issue.Sender = sender.String
|
||||
}
|
||||
if wisp.Valid && wisp.Int64 != 0 {
|
||||
issue.Ephemeral = true
|
||||
issue.Wisp = true
|
||||
}
|
||||
// Pinned field (bd-7h5)
|
||||
if pinned.Valid && pinned.Int64 != 0 {
|
||||
@@ -962,7 +962,7 @@ func (s *SQLiteStorage) scanIssuesWithDependencyType(ctx context.Context, rows *
|
||||
&issue.ID, &contentHash, &issue.Title, &issue.Description, &issue.Design,
|
||||
&issue.AcceptanceCriteria, &issue.Notes, &issue.Status,
|
||||
&issue.Priority, &issue.IssueType, &assignee, &estimatedMinutes,
|
||||
&issue.CreatedAt, &issue.CreatedBy, &issue.UpdatedAt, &closedAt, &externalRef, &sourceRepo,
|
||||
&issue.CreatedAt, &issue.UpdatedAt, &closedAt, &externalRef, &sourceRepo,
|
||||
&deletedAt, &deletedBy, &deleteReason, &originalType,
|
||||
&sender, &wisp, &pinned, &isTemplate,
|
||||
&awaitType, &awaitID, &timeoutNs, &waiters,
|
||||
@@ -1006,7 +1006,7 @@ func (s *SQLiteStorage) scanIssuesWithDependencyType(ctx context.Context, rows *
|
||||
issue.Sender = sender.String
|
||||
}
|
||||
if wisp.Valid && wisp.Int64 != 0 {
|
||||
issue.Ephemeral = true
|
||||
issue.Wisp = true
|
||||
}
|
||||
// Pinned field (bd-7h5)
|
||||
if pinned.Valid && pinned.Int64 != 0 {
|
||||
|
||||
@@ -295,7 +295,7 @@ func TestRepliesTo(t *testing.T) {
|
||||
IssueType: types.TypeMessage,
|
||||
Sender: "alice",
|
||||
Assignee: "bob",
|
||||
Ephemeral: true,
|
||||
Wisp: true,
|
||||
CreatedAt: time.Now(),
|
||||
UpdatedAt: time.Now(),
|
||||
}
|
||||
@@ -307,7 +307,7 @@ func TestRepliesTo(t *testing.T) {
|
||||
IssueType: types.TypeMessage,
|
||||
Sender: "bob",
|
||||
Assignee: "alice",
|
||||
Ephemeral: true,
|
||||
Wisp: true,
|
||||
CreatedAt: time.Now(),
|
||||
UpdatedAt: time.Now(),
|
||||
}
|
||||
@@ -363,7 +363,7 @@ func TestRepliesTo_Chain(t *testing.T) {
|
||||
IssueType: types.TypeMessage,
|
||||
Sender: "user",
|
||||
Assignee: "inbox",
|
||||
Ephemeral: true,
|
||||
Wisp: true,
|
||||
CreatedAt: time.Now(),
|
||||
UpdatedAt: time.Now(),
|
||||
}
|
||||
@@ -415,7 +415,7 @@ func TestWispField(t *testing.T) {
|
||||
Status: types.StatusOpen,
|
||||
Priority: 2,
|
||||
IssueType: types.TypeMessage,
|
||||
Ephemeral: true,
|
||||
Wisp: true,
|
||||
CreatedAt: time.Now(),
|
||||
UpdatedAt: time.Now(),
|
||||
}
|
||||
@@ -426,7 +426,7 @@ func TestWispField(t *testing.T) {
|
||||
Status: types.StatusOpen,
|
||||
Priority: 2,
|
||||
IssueType: types.TypeTask,
|
||||
Ephemeral: false,
|
||||
Wisp: false,
|
||||
CreatedAt: time.Now(),
|
||||
UpdatedAt: time.Now(),
|
||||
}
|
||||
@@ -443,7 +443,7 @@ func TestWispField(t *testing.T) {
|
||||
if err != nil {
|
||||
t.Fatalf("GetIssue failed: %v", err)
|
||||
}
|
||||
if !savedWisp.Ephemeral {
|
||||
if !savedWisp.Wisp {
|
||||
t.Error("Wisp issue should have Wisp=true")
|
||||
}
|
||||
|
||||
@@ -451,7 +451,7 @@ func TestWispField(t *testing.T) {
|
||||
if err != nil {
|
||||
t.Fatalf("GetIssue failed: %v", err)
|
||||
}
|
||||
if savedPermanent.Ephemeral {
|
||||
if savedPermanent.Wisp {
|
||||
t.Error("Permanent issue should have Wisp=false")
|
||||
}
|
||||
}
|
||||
@@ -468,7 +468,7 @@ func TestWispFilter(t *testing.T) {
|
||||
Status: types.StatusClosed, // Closed for cleanup test
|
||||
Priority: 2,
|
||||
IssueType: types.TypeMessage,
|
||||
Ephemeral: true,
|
||||
Wisp: true,
|
||||
CreatedAt: time.Now(),
|
||||
UpdatedAt: time.Now(),
|
||||
}
|
||||
@@ -483,7 +483,7 @@ func TestWispFilter(t *testing.T) {
|
||||
Status: types.StatusClosed,
|
||||
Priority: 2,
|
||||
IssueType: types.TypeTask,
|
||||
Ephemeral: false,
|
||||
Wisp: false,
|
||||
CreatedAt: time.Now(),
|
||||
UpdatedAt: time.Now(),
|
||||
}
|
||||
@@ -497,7 +497,7 @@ func TestWispFilter(t *testing.T) {
|
||||
closedStatus := types.StatusClosed
|
||||
wispFilter := types.IssueFilter{
|
||||
Status: &closedStatus,
|
||||
Ephemeral: &wispTrue,
|
||||
Wisp: &wispTrue,
|
||||
}
|
||||
|
||||
wispIssues, err := store.SearchIssues(ctx, "", wispFilter)
|
||||
@@ -512,7 +512,7 @@ func TestWispFilter(t *testing.T) {
|
||||
wispFalse := false
|
||||
nonWispFilter := types.IssueFilter{
|
||||
Status: &closedStatus,
|
||||
Ephemeral: &wispFalse,
|
||||
Wisp: &wispFalse,
|
||||
}
|
||||
|
||||
permanentIssues, err := store.SearchIssues(ctx, "", nonWispFilter)
|
||||
|
||||
@@ -28,7 +28,7 @@ func insertIssue(ctx context.Context, conn *sql.Conn, issue *types.Issue) error
|
||||
}
|
||||
|
||||
wisp := 0
|
||||
if issue.Ephemeral {
|
||||
if issue.Wisp {
|
||||
wisp = 1
|
||||
}
|
||||
pinned := 0
|
||||
@@ -44,16 +44,16 @@ func insertIssue(ctx context.Context, conn *sql.Conn, issue *types.Issue) error
|
||||
INSERT OR IGNORE INTO issues (
|
||||
id, content_hash, title, description, design, acceptance_criteria, notes,
|
||||
status, priority, issue_type, assignee, estimated_minutes,
|
||||
created_at, created_by, updated_at, closed_at, external_ref, source_repo, close_reason,
|
||||
created_at, updated_at, closed_at, external_ref, source_repo, close_reason,
|
||||
deleted_at, deleted_by, delete_reason, original_type,
|
||||
sender, ephemeral, pinned, is_template,
|
||||
await_type, await_id, timeout_ns, waiters
|
||||
) VALUES (?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?)
|
||||
) VALUES (?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?)
|
||||
`,
|
||||
issue.ID, issue.ContentHash, issue.Title, issue.Description, issue.Design,
|
||||
issue.AcceptanceCriteria, issue.Notes, issue.Status,
|
||||
issue.Priority, issue.IssueType, issue.Assignee,
|
||||
issue.EstimatedMinutes, issue.CreatedAt, issue.CreatedBy, issue.UpdatedAt,
|
||||
issue.EstimatedMinutes, issue.CreatedAt, issue.UpdatedAt,
|
||||
issue.ClosedAt, issue.ExternalRef, sourceRepo, issue.CloseReason,
|
||||
issue.DeletedAt, issue.DeletedBy, issue.DeleteReason, issue.OriginalType,
|
||||
issue.Sender, wisp, pinned, isTemplate,
|
||||
@@ -76,11 +76,11 @@ func insertIssues(ctx context.Context, conn *sql.Conn, issues []*types.Issue) er
|
||||
INSERT OR IGNORE INTO issues (
|
||||
id, content_hash, title, description, design, acceptance_criteria, notes,
|
||||
status, priority, issue_type, assignee, estimated_minutes,
|
||||
created_at, created_by, updated_at, closed_at, external_ref, source_repo, close_reason,
|
||||
created_at, updated_at, closed_at, external_ref, source_repo, close_reason,
|
||||
deleted_at, deleted_by, delete_reason, original_type,
|
||||
sender, ephemeral, pinned, is_template,
|
||||
await_type, await_id, timeout_ns, waiters
|
||||
) VALUES (?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?)
|
||||
) VALUES (?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?)
|
||||
`)
|
||||
if err != nil {
|
||||
return fmt.Errorf("failed to prepare statement: %w", err)
|
||||
@@ -94,7 +94,7 @@ func insertIssues(ctx context.Context, conn *sql.Conn, issues []*types.Issue) er
|
||||
}
|
||||
|
||||
wisp := 0
|
||||
if issue.Ephemeral {
|
||||
if issue.Wisp {
|
||||
wisp = 1
|
||||
}
|
||||
pinned := 0
|
||||
@@ -110,7 +110,7 @@ func insertIssues(ctx context.Context, conn *sql.Conn, issues []*types.Issue) er
|
||||
issue.ID, issue.ContentHash, issue.Title, issue.Description, issue.Design,
|
||||
issue.AcceptanceCriteria, issue.Notes, issue.Status,
|
||||
issue.Priority, issue.IssueType, issue.Assignee,
|
||||
issue.EstimatedMinutes, issue.CreatedAt, issue.CreatedBy, issue.UpdatedAt,
|
||||
issue.EstimatedMinutes, issue.CreatedAt, issue.UpdatedAt,
|
||||
issue.ClosedAt, issue.ExternalRef, sourceRepo, issue.CloseReason,
|
||||
issue.DeletedAt, issue.DeletedBy, issue.DeleteReason, issue.OriginalType,
|
||||
issue.Sender, wisp, pinned, isTemplate,
|
||||
|
||||
@@ -157,7 +157,7 @@ func (s *SQLiteStorage) GetIssuesByLabel(ctx context.Context, label string) ([]*
|
||||
rows, err := s.db.QueryContext(ctx, `
|
||||
SELECT i.id, i.content_hash, i.title, i.description, i.design, i.acceptance_criteria, i.notes,
|
||||
i.status, i.priority, i.issue_type, i.assignee, i.estimated_minutes,
|
||||
i.created_at, i.created_by, i.updated_at, i.closed_at, i.external_ref, i.source_repo, i.close_reason,
|
||||
i.created_at, i.updated_at, i.closed_at, i.external_ref, i.source_repo, i.close_reason,
|
||||
i.deleted_at, i.deleted_by, i.delete_reason, i.original_type,
|
||||
i.sender, i.ephemeral, i.pinned, i.is_template,
|
||||
i.await_type, i.await_id, i.timeout_ns, i.waiters
|
||||
|
||||
@@ -44,8 +44,6 @@ var migrationsList = []Migration{
|
||||
{"remove_depends_on_fk", migrations.MigrateRemoveDependsOnFK},
|
||||
{"additional_indexes", migrations.MigrateAdditionalIndexes},
|
||||
{"gate_columns", migrations.MigrateGateColumns},
|
||||
{"tombstone_closed_at", migrations.MigrateTombstoneClosedAt},
|
||||
{"created_by_column", migrations.MigrateCreatedByColumn},
|
||||
}
|
||||
|
||||
// MigrationInfo contains metadata about a migration for inspection
|
||||
|
||||
@@ -20,6 +20,10 @@ func MigrateMessagingFields(db *sql.DB) error {
|
||||
}{
|
||||
{"sender", "TEXT DEFAULT ''"},
|
||||
{"ephemeral", "INTEGER DEFAULT 0"},
|
||||
{"replies_to", "TEXT DEFAULT ''"},
|
||||
{"relates_to", "TEXT DEFAULT ''"},
|
||||
{"duplicate_of", "TEXT DEFAULT ''"},
|
||||
{"superseded_by", "TEXT DEFAULT ''"},
|
||||
}
|
||||
|
||||
for _, col := range columns {
|
||||
@@ -55,5 +59,11 @@ func MigrateMessagingFields(db *sql.DB) error {
|
||||
return fmt.Errorf("failed to create sender index: %w", err)
|
||||
}
|
||||
|
||||
// Add index for replies_to (for efficient thread queries)
|
||||
_, err = db.Exec(`CREATE INDEX IF NOT EXISTS idx_issues_replies_to ON issues(replies_to) WHERE replies_to != ''`)
|
||||
if err != nil {
|
||||
return fmt.Errorf("failed to create replies_to index: %w", err)
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
@@ -21,177 +21,138 @@ import (
|
||||
func MigrateEdgeFields(db *sql.DB) error {
|
||||
now := time.Now()
|
||||
|
||||
hasColumn := func(name string) (bool, error) {
|
||||
var exists bool
|
||||
err := db.QueryRow(`
|
||||
SELECT COUNT(*) > 0
|
||||
FROM pragma_table_info('issues')
|
||||
WHERE name = ?
|
||||
`, name).Scan(&exists)
|
||||
return exists, err
|
||||
}
|
||||
|
||||
hasRepliesTo, err := hasColumn("replies_to")
|
||||
if err != nil {
|
||||
return fmt.Errorf("failed to check replies_to column: %w", err)
|
||||
}
|
||||
hasRelatesTo, err := hasColumn("relates_to")
|
||||
if err != nil {
|
||||
return fmt.Errorf("failed to check relates_to column: %w", err)
|
||||
}
|
||||
hasDuplicateOf, err := hasColumn("duplicate_of")
|
||||
if err != nil {
|
||||
return fmt.Errorf("failed to check duplicate_of column: %w", err)
|
||||
}
|
||||
hasSupersededBy, err := hasColumn("superseded_by")
|
||||
if err != nil {
|
||||
return fmt.Errorf("failed to check superseded_by column: %w", err)
|
||||
}
|
||||
|
||||
if !hasRepliesTo && !hasRelatesTo && !hasDuplicateOf && !hasSupersededBy {
|
||||
return nil
|
||||
}
|
||||
|
||||
// Migrate replies_to fields to replies-to edges
|
||||
// For thread_id, use the parent's ID as the thread root for first-level replies
|
||||
// (more sophisticated thread detection would require recursive queries)
|
||||
if hasRepliesTo {
|
||||
rows, err := db.Query(`
|
||||
SELECT id, replies_to
|
||||
FROM issues
|
||||
WHERE replies_to != '' AND replies_to IS NOT NULL
|
||||
`)
|
||||
rows, err := db.Query(`
|
||||
SELECT id, replies_to
|
||||
FROM issues
|
||||
WHERE replies_to != '' AND replies_to IS NOT NULL
|
||||
`)
|
||||
if err != nil {
|
||||
return fmt.Errorf("failed to query replies_to fields: %w", err)
|
||||
}
|
||||
defer rows.Close()
|
||||
|
||||
for rows.Next() {
|
||||
var issueID, repliesTo string
|
||||
if err := rows.Scan(&issueID, &repliesTo); err != nil {
|
||||
return fmt.Errorf("failed to scan replies_to row: %w", err)
|
||||
}
|
||||
|
||||
// Use repliesTo as thread_id (the root of the thread)
|
||||
// This is a simplification - existing threads will have the parent as thread root
|
||||
_, err := db.Exec(`
|
||||
INSERT OR IGNORE INTO dependencies (issue_id, depends_on_id, type, created_at, created_by, metadata, thread_id)
|
||||
VALUES (?, ?, 'replies-to', ?, 'migration', '{}', ?)
|
||||
`, issueID, repliesTo, now, repliesTo)
|
||||
if err != nil {
|
||||
return fmt.Errorf("failed to query replies_to fields: %w", err)
|
||||
}
|
||||
defer rows.Close()
|
||||
|
||||
for rows.Next() {
|
||||
var issueID, repliesTo string
|
||||
if err := rows.Scan(&issueID, &repliesTo); err != nil {
|
||||
return fmt.Errorf("failed to scan replies_to row: %w", err)
|
||||
}
|
||||
|
||||
// Use repliesTo as thread_id (the root of the thread)
|
||||
// This is a simplification - existing threads will have the parent as thread root
|
||||
_, err := db.Exec(`
|
||||
INSERT OR IGNORE INTO dependencies (issue_id, depends_on_id, type, created_at, created_by, metadata, thread_id)
|
||||
VALUES (?, ?, 'replies-to', ?, 'migration', '{}', ?)
|
||||
`, issueID, repliesTo, now, repliesTo)
|
||||
if err != nil {
|
||||
return fmt.Errorf("failed to create replies-to edge for %s: %w", issueID, err)
|
||||
}
|
||||
}
|
||||
if err := rows.Err(); err != nil {
|
||||
return fmt.Errorf("error iterating replies_to rows: %w", err)
|
||||
return fmt.Errorf("failed to create replies-to edge for %s: %w", issueID, err)
|
||||
}
|
||||
}
|
||||
if err := rows.Err(); err != nil {
|
||||
return fmt.Errorf("error iterating replies_to rows: %w", err)
|
||||
}
|
||||
|
||||
// Migrate relates_to fields to relates-to edges
|
||||
// relates_to is stored as JSON array string
|
||||
if hasRelatesTo {
|
||||
rows, err := db.Query(`
|
||||
SELECT id, relates_to
|
||||
FROM issues
|
||||
WHERE relates_to != '' AND relates_to != '[]' AND relates_to IS NOT NULL
|
||||
`)
|
||||
if err != nil {
|
||||
return fmt.Errorf("failed to query relates_to fields: %w", err)
|
||||
rows, err = db.Query(`
|
||||
SELECT id, relates_to
|
||||
FROM issues
|
||||
WHERE relates_to != '' AND relates_to != '[]' AND relates_to IS NOT NULL
|
||||
`)
|
||||
if err != nil {
|
||||
return fmt.Errorf("failed to query relates_to fields: %w", err)
|
||||
}
|
||||
defer rows.Close()
|
||||
|
||||
for rows.Next() {
|
||||
var issueID, relatesTo string
|
||||
if err := rows.Scan(&issueID, &relatesTo); err != nil {
|
||||
return fmt.Errorf("failed to scan relates_to row: %w", err)
|
||||
}
|
||||
defer rows.Close()
|
||||
|
||||
for rows.Next() {
|
||||
var issueID, relatesTo string
|
||||
if err := rows.Scan(&issueID, &relatesTo); err != nil {
|
||||
return fmt.Errorf("failed to scan relates_to row: %w", err)
|
||||
}
|
||||
// Parse JSON array
|
||||
var relatedIDs []string
|
||||
if err := json.Unmarshal([]byte(relatesTo), &relatedIDs); err != nil {
|
||||
// Skip malformed JSON
|
||||
continue
|
||||
}
|
||||
|
||||
// Parse JSON array
|
||||
var relatedIDs []string
|
||||
if err := json.Unmarshal([]byte(relatesTo), &relatedIDs); err != nil {
|
||||
// Skip malformed JSON
|
||||
for _, relatedID := range relatedIDs {
|
||||
if relatedID == "" {
|
||||
continue
|
||||
}
|
||||
|
||||
for _, relatedID := range relatedIDs {
|
||||
if relatedID == "" {
|
||||
continue
|
||||
}
|
||||
_, err := db.Exec(`
|
||||
INSERT OR IGNORE INTO dependencies (issue_id, depends_on_id, type, created_at, created_by, metadata, thread_id)
|
||||
VALUES (?, ?, 'relates-to', ?, 'migration', '{}', '')
|
||||
`, issueID, relatedID, now)
|
||||
if err != nil {
|
||||
return fmt.Errorf("failed to create relates-to edge for %s -> %s: %w", issueID, relatedID, err)
|
||||
}
|
||||
_, err := db.Exec(`
|
||||
INSERT OR IGNORE INTO dependencies (issue_id, depends_on_id, type, created_at, created_by, metadata, thread_id)
|
||||
VALUES (?, ?, 'relates-to', ?, 'migration', '{}', '')
|
||||
`, issueID, relatedID, now)
|
||||
if err != nil {
|
||||
return fmt.Errorf("failed to create relates-to edge for %s -> %s: %w", issueID, relatedID, err)
|
||||
}
|
||||
}
|
||||
if err := rows.Err(); err != nil {
|
||||
return fmt.Errorf("error iterating relates_to rows: %w", err)
|
||||
}
|
||||
}
|
||||
if err := rows.Err(); err != nil {
|
||||
return fmt.Errorf("error iterating relates_to rows: %w", err)
|
||||
}
|
||||
|
||||
// Migrate duplicate_of fields to duplicates edges
|
||||
if hasDuplicateOf {
|
||||
rows, err := db.Query(`
|
||||
SELECT id, duplicate_of
|
||||
FROM issues
|
||||
WHERE duplicate_of != '' AND duplicate_of IS NOT NULL
|
||||
`)
|
||||
rows, err = db.Query(`
|
||||
SELECT id, duplicate_of
|
||||
FROM issues
|
||||
WHERE duplicate_of != '' AND duplicate_of IS NOT NULL
|
||||
`)
|
||||
if err != nil {
|
||||
return fmt.Errorf("failed to query duplicate_of fields: %w", err)
|
||||
}
|
||||
defer rows.Close()
|
||||
|
||||
for rows.Next() {
|
||||
var issueID, duplicateOf string
|
||||
if err := rows.Scan(&issueID, &duplicateOf); err != nil {
|
||||
return fmt.Errorf("failed to scan duplicate_of row: %w", err)
|
||||
}
|
||||
|
||||
_, err := db.Exec(`
|
||||
INSERT OR IGNORE INTO dependencies (issue_id, depends_on_id, type, created_at, created_by, metadata, thread_id)
|
||||
VALUES (?, ?, 'duplicates', ?, 'migration', '{}', '')
|
||||
`, issueID, duplicateOf, now)
|
||||
if err != nil {
|
||||
return fmt.Errorf("failed to query duplicate_of fields: %w", err)
|
||||
}
|
||||
defer rows.Close()
|
||||
|
||||
for rows.Next() {
|
||||
var issueID, duplicateOf string
|
||||
if err := rows.Scan(&issueID, &duplicateOf); err != nil {
|
||||
return fmt.Errorf("failed to scan duplicate_of row: %w", err)
|
||||
}
|
||||
|
||||
_, err := db.Exec(`
|
||||
INSERT OR IGNORE INTO dependencies (issue_id, depends_on_id, type, created_at, created_by, metadata, thread_id)
|
||||
VALUES (?, ?, 'duplicates', ?, 'migration', '{}', '')
|
||||
`, issueID, duplicateOf, now)
|
||||
if err != nil {
|
||||
return fmt.Errorf("failed to create duplicates edge for %s: %w", issueID, err)
|
||||
}
|
||||
}
|
||||
if err := rows.Err(); err != nil {
|
||||
return fmt.Errorf("error iterating duplicate_of rows: %w", err)
|
||||
return fmt.Errorf("failed to create duplicates edge for %s: %w", issueID, err)
|
||||
}
|
||||
}
|
||||
if err := rows.Err(); err != nil {
|
||||
return fmt.Errorf("error iterating duplicate_of rows: %w", err)
|
||||
}
|
||||
|
||||
// Migrate superseded_by fields to supersedes edges
|
||||
if hasSupersededBy {
|
||||
rows, err := db.Query(`
|
||||
SELECT id, superseded_by
|
||||
FROM issues
|
||||
WHERE superseded_by != '' AND superseded_by IS NOT NULL
|
||||
`)
|
||||
rows, err = db.Query(`
|
||||
SELECT id, superseded_by
|
||||
FROM issues
|
||||
WHERE superseded_by != '' AND superseded_by IS NOT NULL
|
||||
`)
|
||||
if err != nil {
|
||||
return fmt.Errorf("failed to query superseded_by fields: %w", err)
|
||||
}
|
||||
defer rows.Close()
|
||||
|
||||
for rows.Next() {
|
||||
var issueID, supersededBy string
|
||||
if err := rows.Scan(&issueID, &supersededBy); err != nil {
|
||||
return fmt.Errorf("failed to scan superseded_by row: %w", err)
|
||||
}
|
||||
|
||||
_, err := db.Exec(`
|
||||
INSERT OR IGNORE INTO dependencies (issue_id, depends_on_id, type, created_at, created_by, metadata, thread_id)
|
||||
VALUES (?, ?, 'supersedes', ?, 'migration', '{}', '')
|
||||
`, issueID, supersededBy, now)
|
||||
if err != nil {
|
||||
return fmt.Errorf("failed to query superseded_by fields: %w", err)
|
||||
}
|
||||
defer rows.Close()
|
||||
|
||||
for rows.Next() {
|
||||
var issueID, supersededBy string
|
||||
if err := rows.Scan(&issueID, &supersededBy); err != nil {
|
||||
return fmt.Errorf("failed to scan superseded_by row: %w", err)
|
||||
}
|
||||
|
||||
_, err := db.Exec(`
|
||||
INSERT OR IGNORE INTO dependencies (issue_id, depends_on_id, type, created_at, created_by, metadata, thread_id)
|
||||
VALUES (?, ?, 'supersedes', ?, 'migration', '{}', '')
|
||||
`, issueID, supersededBy, now)
|
||||
if err != nil {
|
||||
return fmt.Errorf("failed to create supersedes edge for %s: %w", issueID, err)
|
||||
}
|
||||
}
|
||||
if err := rows.Err(); err != nil {
|
||||
return fmt.Errorf("error iterating superseded_by rows: %w", err)
|
||||
return fmt.Errorf("failed to create supersedes edge for %s: %w", issueID, err)
|
||||
}
|
||||
}
|
||||
if err := rows.Err(); err != nil {
|
||||
return fmt.Errorf("error iterating superseded_by rows: %w", err)
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
@@ -57,57 +57,6 @@ func MigrateDropEdgeColumns(db *sql.DB) error {
|
||||
return nil
|
||||
}
|
||||
|
||||
// Preserve newer columns if they already exist (migration may run on partially-migrated DBs).
|
||||
hasPinned, err := checkCol("pinned")
|
||||
if err != nil {
|
||||
return fmt.Errorf("failed to check pinned column: %w", err)
|
||||
}
|
||||
hasIsTemplate, err := checkCol("is_template")
|
||||
if err != nil {
|
||||
return fmt.Errorf("failed to check is_template column: %w", err)
|
||||
}
|
||||
hasAwaitType, err := checkCol("await_type")
|
||||
if err != nil {
|
||||
return fmt.Errorf("failed to check await_type column: %w", err)
|
||||
}
|
||||
hasAwaitID, err := checkCol("await_id")
|
||||
if err != nil {
|
||||
return fmt.Errorf("failed to check await_id column: %w", err)
|
||||
}
|
||||
hasTimeoutNs, err := checkCol("timeout_ns")
|
||||
if err != nil {
|
||||
return fmt.Errorf("failed to check timeout_ns column: %w", err)
|
||||
}
|
||||
hasWaiters, err := checkCol("waiters")
|
||||
if err != nil {
|
||||
return fmt.Errorf("failed to check waiters column: %w", err)
|
||||
}
|
||||
|
||||
pinnedExpr := "0"
|
||||
if hasPinned {
|
||||
pinnedExpr = "pinned"
|
||||
}
|
||||
isTemplateExpr := "0"
|
||||
if hasIsTemplate {
|
||||
isTemplateExpr = "is_template"
|
||||
}
|
||||
awaitTypeExpr := "''"
|
||||
if hasAwaitType {
|
||||
awaitTypeExpr = "await_type"
|
||||
}
|
||||
awaitIDExpr := "''"
|
||||
if hasAwaitID {
|
||||
awaitIDExpr = "await_id"
|
||||
}
|
||||
timeoutNsExpr := "0"
|
||||
if hasTimeoutNs {
|
||||
timeoutNsExpr = "timeout_ns"
|
||||
}
|
||||
waitersExpr := "''"
|
||||
if hasWaiters {
|
||||
waitersExpr = "waiters"
|
||||
}
|
||||
|
||||
// SQLite 3.35.0+ supports DROP COLUMN, but we use table recreation for compatibility
|
||||
// This is idempotent - we recreate the table without the deprecated columns
|
||||
|
||||
@@ -168,12 +117,6 @@ func MigrateDropEdgeColumns(db *sql.DB) error {
|
||||
original_type TEXT DEFAULT '',
|
||||
sender TEXT DEFAULT '',
|
||||
ephemeral INTEGER DEFAULT 0,
|
||||
pinned INTEGER DEFAULT 0,
|
||||
is_template INTEGER DEFAULT 0,
|
||||
await_type TEXT,
|
||||
await_id TEXT,
|
||||
timeout_ns INTEGER,
|
||||
waiters TEXT,
|
||||
close_reason TEXT DEFAULT '',
|
||||
CHECK ((status = 'closed') = (closed_at IS NOT NULL))
|
||||
)
|
||||
@@ -189,8 +132,7 @@ func MigrateDropEdgeColumns(db *sql.DB) error {
|
||||
notes, status, priority, issue_type, assignee, estimated_minutes,
|
||||
created_at, updated_at, closed_at, external_ref, source_repo, compaction_level,
|
||||
compacted_at, compacted_at_commit, original_size, deleted_at,
|
||||
deleted_by, delete_reason, original_type, sender, ephemeral, pinned, is_template,
|
||||
await_type, await_id, timeout_ns, waiters, close_reason
|
||||
deleted_by, delete_reason, original_type, sender, ephemeral, close_reason
|
||||
)
|
||||
SELECT
|
||||
id, content_hash, title, description, design, acceptance_criteria,
|
||||
@@ -198,11 +140,9 @@ func MigrateDropEdgeColumns(db *sql.DB) error {
|
||||
created_at, updated_at, closed_at, external_ref, COALESCE(source_repo, ''), compaction_level,
|
||||
compacted_at, compacted_at_commit, original_size, deleted_at,
|
||||
deleted_by, delete_reason, original_type, sender, ephemeral,
|
||||
%s, %s,
|
||||
%s, %s, %s, %s,
|
||||
COALESCE(close_reason, '')
|
||||
FROM issues
|
||||
`, pinnedExpr, isTemplateExpr, awaitTypeExpr, awaitIDExpr, timeoutNsExpr, waitersExpr)
|
||||
`)
|
||||
if err != nil {
|
||||
return fmt.Errorf("failed to copy issues data: %w", err)
|
||||
}
|
||||
|
||||
@@ -20,11 +20,6 @@ func MigratePinnedColumn(db *sql.DB) error {
|
||||
}
|
||||
|
||||
if columnExists {
|
||||
// Column exists (e.g. created by new schema); ensure index exists.
|
||||
_, err = db.Exec(`CREATE INDEX IF NOT EXISTS idx_issues_pinned ON issues(pinned) WHERE pinned = 1`)
|
||||
if err != nil {
|
||||
return fmt.Errorf("failed to create pinned index: %w", err)
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
|
||||
@@ -21,11 +21,6 @@ func MigrateIsTemplateColumn(db *sql.DB) error {
|
||||
}
|
||||
|
||||
if columnExists {
|
||||
// Column exists (e.g. created by new schema); ensure index exists.
|
||||
_, err = db.Exec(`CREATE INDEX IF NOT EXISTS idx_issues_is_template ON issues(is_template) WHERE is_template = 1`)
|
||||
if err != nil {
|
||||
return fmt.Errorf("failed to create is_template index: %w", err)
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
|
||||
@@ -1,251 +0,0 @@
|
||||
package migrations
|
||||
|
||||
import (
|
||||
"database/sql"
|
||||
"fmt"
|
||||
"strings"
|
||||
)
|
||||
|
||||
// MigrateTombstoneClosedAt updates the closed_at constraint to allow tombstones
|
||||
// to retain their closed_at timestamp from before deletion.
|
||||
//
|
||||
// Previously: CHECK ((status = 'closed') = (closed_at IS NOT NULL))
|
||||
// - This required clearing closed_at when creating tombstones from closed issues
|
||||
//
|
||||
// Now: CHECK (closed + tombstone OR non-closed/tombstone with no closed_at)
|
||||
// - closed issues must have closed_at
|
||||
// - tombstones may have closed_at (from before deletion) or not
|
||||
// - other statuses must NOT have closed_at
|
||||
//
|
||||
// This allows importing tombstones that were closed before being deleted,
|
||||
// preserving the historical closed_at timestamp for audit purposes.
|
||||
func MigrateTombstoneClosedAt(db *sql.DB) error {
|
||||
// SQLite doesn't support ALTER TABLE to modify CHECK constraints
|
||||
// We must recreate the table with the new constraint
|
||||
|
||||
// Idempotency check: see if the new CHECK constraint already exists
|
||||
// The new constraint contains "status = 'tombstone'" which the old one didn't
|
||||
var tableSql string
|
||||
err := db.QueryRow(`SELECT sql FROM sqlite_master WHERE type='table' AND name='issues'`).Scan(&tableSql)
|
||||
if err != nil {
|
||||
return fmt.Errorf("failed to get issues table schema: %w", err)
|
||||
}
|
||||
// If the schema already has the tombstone clause, migration is already applied
|
||||
if strings.Contains(tableSql, "status = 'tombstone'") || strings.Contains(tableSql, `status = "tombstone"`) {
|
||||
return nil
|
||||
}
|
||||
|
||||
// Step 0: Drop views that depend on the issues table
|
||||
_, err = db.Exec(`DROP VIEW IF EXISTS ready_issues`)
|
||||
if err != nil {
|
||||
return fmt.Errorf("failed to drop ready_issues view: %w", err)
|
||||
}
|
||||
_, err = db.Exec(`DROP VIEW IF EXISTS blocked_issues`)
|
||||
if err != nil {
|
||||
return fmt.Errorf("failed to drop blocked_issues view: %w", err)
|
||||
}
|
||||
|
||||
// Step 1: Create new table with updated constraint
|
||||
_, err = db.Exec(`
|
||||
CREATE TABLE IF NOT EXISTS issues_new (
|
||||
id TEXT PRIMARY KEY,
|
||||
content_hash TEXT,
|
||||
title TEXT NOT NULL CHECK(length(title) <= 500),
|
||||
description TEXT NOT NULL DEFAULT '',
|
||||
design TEXT NOT NULL DEFAULT '',
|
||||
acceptance_criteria TEXT NOT NULL DEFAULT '',
|
||||
notes TEXT NOT NULL DEFAULT '',
|
||||
status TEXT NOT NULL DEFAULT 'open',
|
||||
priority INTEGER NOT NULL DEFAULT 2 CHECK(priority >= 0 AND priority <= 4),
|
||||
issue_type TEXT NOT NULL DEFAULT 'task',
|
||||
assignee TEXT,
|
||||
estimated_minutes INTEGER,
|
||||
created_at DATETIME NOT NULL DEFAULT CURRENT_TIMESTAMP,
|
||||
created_by TEXT DEFAULT '',
|
||||
updated_at DATETIME NOT NULL DEFAULT CURRENT_TIMESTAMP,
|
||||
closed_at DATETIME,
|
||||
external_ref TEXT,
|
||||
source_repo TEXT DEFAULT '',
|
||||
compaction_level INTEGER DEFAULT 0,
|
||||
compacted_at DATETIME,
|
||||
compacted_at_commit TEXT,
|
||||
original_size INTEGER,
|
||||
deleted_at DATETIME,
|
||||
deleted_by TEXT DEFAULT '',
|
||||
delete_reason TEXT DEFAULT '',
|
||||
original_type TEXT DEFAULT '',
|
||||
sender TEXT DEFAULT '',
|
||||
ephemeral INTEGER DEFAULT 0,
|
||||
close_reason TEXT DEFAULT '',
|
||||
pinned INTEGER DEFAULT 0,
|
||||
is_template INTEGER DEFAULT 0,
|
||||
await_type TEXT,
|
||||
await_id TEXT,
|
||||
timeout_ns INTEGER,
|
||||
waiters TEXT,
|
||||
CHECK (
|
||||
(status = 'closed' AND closed_at IS NOT NULL) OR
|
||||
(status = 'tombstone') OR
|
||||
(status NOT IN ('closed', 'tombstone') AND closed_at IS NULL)
|
||||
)
|
||||
)
|
||||
`)
|
||||
if err != nil {
|
||||
return fmt.Errorf("failed to create new issues table: %w", err)
|
||||
}
|
||||
|
||||
// Step 2: Copy data from old table to new table
|
||||
// We need to check if created_by column exists in the old table
|
||||
// If not, we insert a default empty string for it
|
||||
var hasCreatedBy bool
|
||||
rows, err := db.Query(`PRAGMA table_info(issues)`)
|
||||
if err != nil {
|
||||
return fmt.Errorf("failed to get table info: %w", err)
|
||||
}
|
||||
for rows.Next() {
|
||||
var cid int
|
||||
var name, ctype string
|
||||
var notnull, pk int
|
||||
var dflt interface{}
|
||||
if err := rows.Scan(&cid, &name, &ctype, ¬null, &dflt, &pk); err != nil {
|
||||
rows.Close()
|
||||
return fmt.Errorf("failed to scan table info: %w", err)
|
||||
}
|
||||
if name == "created_by" {
|
||||
hasCreatedBy = true
|
||||
break
|
||||
}
|
||||
}
|
||||
rows.Close()
|
||||
|
||||
var insertSQL string
|
||||
if hasCreatedBy {
|
||||
// Old table has created_by, copy all columns directly
|
||||
insertSQL = `
|
||||
INSERT INTO issues_new (
|
||||
id, content_hash, title, description, design, acceptance_criteria, notes,
|
||||
status, priority, issue_type, assignee, estimated_minutes, created_at,
|
||||
created_by, updated_at, closed_at, external_ref, source_repo, compaction_level,
|
||||
compacted_at, compacted_at_commit, original_size, deleted_at, deleted_by,
|
||||
delete_reason, original_type, sender, ephemeral, close_reason, pinned,
|
||||
is_template, await_type, await_id, timeout_ns, waiters
|
||||
)
|
||||
SELECT
|
||||
id, content_hash, title, description, design, acceptance_criteria, notes,
|
||||
status, priority, issue_type, assignee, estimated_minutes, created_at,
|
||||
created_by, updated_at, closed_at, external_ref, source_repo, compaction_level,
|
||||
compacted_at, compacted_at_commit, original_size, deleted_at, deleted_by,
|
||||
delete_reason, original_type, sender, ephemeral, close_reason, pinned,
|
||||
is_template, await_type, await_id, timeout_ns, waiters
|
||||
FROM issues
|
||||
`
|
||||
} else {
|
||||
// Old table doesn't have created_by, use empty string default
|
||||
insertSQL = `
|
||||
INSERT INTO issues_new (
|
||||
id, content_hash, title, description, design, acceptance_criteria, notes,
|
||||
status, priority, issue_type, assignee, estimated_minutes, created_at,
|
||||
created_by, updated_at, closed_at, external_ref, source_repo, compaction_level,
|
||||
compacted_at, compacted_at_commit, original_size, deleted_at, deleted_by,
|
||||
delete_reason, original_type, sender, ephemeral, close_reason, pinned,
|
||||
is_template, await_type, await_id, timeout_ns, waiters
|
||||
)
|
||||
SELECT
|
||||
id, content_hash, title, description, design, acceptance_criteria, notes,
|
||||
status, priority, issue_type, assignee, estimated_minutes, created_at,
|
||||
'', updated_at, closed_at, external_ref, source_repo, compaction_level,
|
||||
compacted_at, compacted_at_commit, original_size, deleted_at, deleted_by,
|
||||
delete_reason, original_type, sender, ephemeral, close_reason, pinned,
|
||||
is_template, await_type, await_id, timeout_ns, waiters
|
||||
FROM issues
|
||||
`
|
||||
}
|
||||
|
||||
_, err = db.Exec(insertSQL)
|
||||
if err != nil {
|
||||
return fmt.Errorf("failed to copy issues data: %w", err)
|
||||
}
|
||||
|
||||
// Step 3: Drop old table
|
||||
_, err = db.Exec(`DROP TABLE issues`)
|
||||
if err != nil {
|
||||
return fmt.Errorf("failed to drop old issues table: %w", err)
|
||||
}
|
||||
|
||||
// Step 4: Rename new table to original name
|
||||
_, err = db.Exec(`ALTER TABLE issues_new RENAME TO issues`)
|
||||
if err != nil {
|
||||
return fmt.Errorf("failed to rename new issues table: %w", err)
|
||||
}
|
||||
|
||||
// Step 5: Recreate indexes (they were dropped with the table)
|
||||
indexes := []string{
|
||||
`CREATE INDEX IF NOT EXISTS idx_issues_status ON issues(status)`,
|
||||
`CREATE INDEX IF NOT EXISTS idx_issues_priority ON issues(priority)`,
|
||||
`CREATE INDEX IF NOT EXISTS idx_issues_assignee ON issues(assignee)`,
|
||||
`CREATE INDEX IF NOT EXISTS idx_issues_created_at ON issues(created_at)`,
|
||||
`CREATE INDEX IF NOT EXISTS idx_issues_external_ref ON issues(external_ref) WHERE external_ref IS NOT NULL`,
|
||||
`CREATE INDEX IF NOT EXISTS idx_issues_pinned ON issues(pinned) WHERE pinned = 1`,
|
||||
`CREATE INDEX IF NOT EXISTS idx_issues_is_template ON issues(is_template) WHERE is_template = 1`,
|
||||
`CREATE INDEX IF NOT EXISTS idx_issues_updated_at ON issues(updated_at)`,
|
||||
`CREATE INDEX IF NOT EXISTS idx_issues_status_priority ON issues(status, priority)`,
|
||||
`CREATE INDEX IF NOT EXISTS idx_issues_gate ON issues(issue_type) WHERE issue_type = 'gate'`,
|
||||
}
|
||||
|
||||
for _, idx := range indexes {
|
||||
if _, err := db.Exec(idx); err != nil {
|
||||
return fmt.Errorf("failed to create index: %w", err)
|
||||
}
|
||||
}
|
||||
|
||||
// Step 6: Recreate views that we dropped
|
||||
_, err = db.Exec(`
|
||||
CREATE VIEW IF NOT EXISTS ready_issues AS
|
||||
WITH RECURSIVE
|
||||
blocked_directly AS (
|
||||
SELECT DISTINCT d.issue_id
|
||||
FROM dependencies d
|
||||
JOIN issues blocker ON d.depends_on_id = blocker.id
|
||||
WHERE d.type = 'blocks'
|
||||
AND blocker.status IN ('open', 'in_progress', 'blocked', 'deferred')
|
||||
),
|
||||
blocked_transitively AS (
|
||||
SELECT issue_id, 0 as depth
|
||||
FROM blocked_directly
|
||||
UNION ALL
|
||||
SELECT d.issue_id, bt.depth + 1
|
||||
FROM blocked_transitively bt
|
||||
JOIN dependencies d ON d.depends_on_id = bt.issue_id
|
||||
WHERE d.type = 'parent-child'
|
||||
AND bt.depth < 50
|
||||
)
|
||||
SELECT i.*
|
||||
FROM issues i
|
||||
WHERE i.status = 'open'
|
||||
AND NOT EXISTS (
|
||||
SELECT 1 FROM blocked_transitively WHERE issue_id = i.id
|
||||
)
|
||||
`)
|
||||
if err != nil {
|
||||
return fmt.Errorf("failed to recreate ready_issues view: %w", err)
|
||||
}
|
||||
|
||||
_, err = db.Exec(`
|
||||
CREATE VIEW IF NOT EXISTS blocked_issues AS
|
||||
SELECT
|
||||
i.*,
|
||||
COUNT(d.depends_on_id) as blocked_by_count
|
||||
FROM issues i
|
||||
JOIN dependencies d ON i.id = d.issue_id
|
||||
JOIN issues blocker ON d.depends_on_id = blocker.id
|
||||
WHERE i.status IN ('open', 'in_progress', 'blocked', 'deferred')
|
||||
AND d.type = 'blocks'
|
||||
AND blocker.status IN ('open', 'in_progress', 'blocked', 'deferred')
|
||||
GROUP BY i.id
|
||||
`)
|
||||
if err != nil {
|
||||
return fmt.Errorf("failed to recreate blocked_issues view: %w", err)
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
@@ -1,34 +0,0 @@
|
||||
package migrations
|
||||
|
||||
import (
|
||||
"database/sql"
|
||||
"fmt"
|
||||
)
|
||||
|
||||
// MigrateCreatedByColumn adds the created_by column to the issues table.
|
||||
// This tracks who created the issue, using the same actor chain as comment authors
|
||||
// (--actor flag, BD_ACTOR env, or $USER). GH#748.
|
||||
func MigrateCreatedByColumn(db *sql.DB) error {
|
||||
// Check if column already exists
|
||||
var columnExists bool
|
||||
err := db.QueryRow(`
|
||||
SELECT COUNT(*) > 0
|
||||
FROM pragma_table_info('issues')
|
||||
WHERE name = 'created_by'
|
||||
`).Scan(&columnExists)
|
||||
if err != nil {
|
||||
return fmt.Errorf("failed to check created_by column: %w", err)
|
||||
}
|
||||
|
||||
if columnExists {
|
||||
return nil
|
||||
}
|
||||
|
||||
// Add the created_by column
|
||||
_, err = db.Exec(`ALTER TABLE issues ADD COLUMN created_by TEXT DEFAULT ''`)
|
||||
if err != nil {
|
||||
return fmt.Errorf("failed to add created_by column: %w", err)
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
@@ -1,59 +0,0 @@
|
||||
package sqlite
|
||||
|
||||
import (
|
||||
"context"
|
||||
"path/filepath"
|
||||
"testing"
|
||||
|
||||
"github.com/steveyegge/beads/internal/types"
|
||||
)
|
||||
|
||||
func TestRunMigrations_DoesNotResetPinnedOrTemplate(t *testing.T) {
|
||||
ctx := context.Background()
|
||||
dir := t.TempDir()
|
||||
dbPath := filepath.Join(dir, "beads.db")
|
||||
|
||||
s, err := New(ctx, dbPath)
|
||||
if err != nil {
|
||||
t.Fatalf("New: %v", err)
|
||||
}
|
||||
t.Cleanup(func() { _ = s.Close() })
|
||||
|
||||
if err := s.SetConfig(ctx, "issue_prefix", "test"); err != nil {
|
||||
t.Fatalf("SetConfig(issue_prefix): %v", err)
|
||||
}
|
||||
|
||||
issue := &types.Issue{
|
||||
Title: "Pinned template",
|
||||
Status: types.StatusOpen,
|
||||
Priority: 2,
|
||||
IssueType: types.TypeTask,
|
||||
Pinned: true,
|
||||
IsTemplate: true,
|
||||
}
|
||||
if err := s.CreateIssue(ctx, issue, "test-user"); err != nil {
|
||||
t.Fatalf("CreateIssue: %v", err)
|
||||
}
|
||||
|
||||
_ = s.Close()
|
||||
|
||||
s2, err := New(ctx, dbPath)
|
||||
if err != nil {
|
||||
t.Fatalf("New(reopen): %v", err)
|
||||
}
|
||||
defer func() { _ = s2.Close() }()
|
||||
|
||||
got, err := s2.GetIssue(ctx, issue.ID)
|
||||
if err != nil {
|
||||
t.Fatalf("GetIssue: %v", err)
|
||||
}
|
||||
if got == nil {
|
||||
t.Fatalf("expected issue to exist")
|
||||
}
|
||||
if !got.Pinned {
|
||||
t.Fatalf("expected issue to remain pinned")
|
||||
}
|
||||
if !got.IsTemplate {
|
||||
t.Fatalf("expected issue to remain template")
|
||||
}
|
||||
}
|
||||
@@ -470,7 +470,6 @@ func TestMigrateContentHashColumn(t *testing.T) {
|
||||
assignee TEXT,
|
||||
estimated_minutes INTEGER,
|
||||
created_at DATETIME NOT NULL,
|
||||
created_by TEXT DEFAULT '',
|
||||
updated_at DATETIME NOT NULL,
|
||||
closed_at DATETIME,
|
||||
external_ref TEXT,
|
||||
@@ -498,7 +497,7 @@ func TestMigrateContentHashColumn(t *testing.T) {
|
||||
waiters TEXT DEFAULT '',
|
||||
CHECK ((status = 'closed') = (closed_at IS NOT NULL))
|
||||
);
|
||||
INSERT INTO issues SELECT id, title, description, design, acceptance_criteria, notes, status, priority, issue_type, assignee, estimated_minutes, created_at, '', updated_at, closed_at, external_ref, compaction_level, compacted_at, original_size, compacted_at_commit, source_repo, '', NULL, '', '', '', '', 0, 0, 0, '', '', '', '', '', '', 0, '' FROM issues_backup;
|
||||
INSERT INTO issues SELECT id, title, description, design, acceptance_criteria, notes, status, priority, issue_type, assignee, estimated_minutes, created_at, updated_at, closed_at, external_ref, compaction_level, compacted_at, original_size, compacted_at_commit, source_repo, '', NULL, '', '', '', '', 0, 0, 0, '', '', '', '', '', '', 0, '' FROM issues_backup;
|
||||
DROP TABLE issues_backup;
|
||||
`)
|
||||
if err != nil {
|
||||
|
||||
@@ -282,7 +282,7 @@ func (s *SQLiteStorage) upsertIssueInTx(ctx context.Context, tx *sql.Tx, issue *
|
||||
err := tx.QueryRowContext(ctx, `SELECT id FROM issues WHERE id = ?`, issue.ID).Scan(&existingID)
|
||||
|
||||
wisp := 0
|
||||
if issue.Ephemeral {
|
||||
if issue.Wisp {
|
||||
wisp = 1
|
||||
}
|
||||
pinned := 0
|
||||
@@ -330,23 +330,9 @@ func (s *SQLiteStorage) upsertIssueInTx(ctx context.Context, tx *sql.Tx, issue *
|
||||
}
|
||||
|
||||
if existingHash != issue.ContentHash {
|
||||
// Clone-local field protection pattern (bd-phtv, bd-gr4q):
|
||||
//
|
||||
// Some fields are clone-local state that shouldn't be overwritten by JSONL import:
|
||||
// - pinned: Local hook attachment (not synced between clones)
|
||||
// - await_type, await_id, timeout_ns, waiters: Gate state (wisps, never exported)
|
||||
//
|
||||
// Problem: Go's omitempty causes zero values to be absent from JSONL.
|
||||
// When importing, absent fields unmarshal as zero, which would overwrite local state.
|
||||
//
|
||||
// Solution: COALESCE(NULLIF(incoming, zero_value), existing_column)
|
||||
// - For strings: COALESCE(NULLIF(?, ''), column) -- preserve if incoming is ""
|
||||
// - For integers: COALESCE(NULLIF(?, 0), column) -- preserve if incoming is 0
|
||||
//
|
||||
// When to use this pattern:
|
||||
// 1. Field is clone-local (not part of shared issue ledger)
|
||||
// 2. Field uses omitempty (so zero value means "absent", not "clear")
|
||||
// 3. Accidental clearing would cause data loss or incorrect behavior
|
||||
// Pinned field fix (bd-phtv): Use COALESCE(NULLIF(?, 0), pinned) to preserve
|
||||
// existing pinned=1 when incoming pinned=0 (which means field was absent in
|
||||
// JSONL due to omitempty). This prevents auto-import from resetting pinned issues.
|
||||
_, err = tx.ExecContext(ctx, `
|
||||
UPDATE issues SET
|
||||
content_hash = ?, title = ?, description = ?, design = ?,
|
||||
@@ -355,10 +341,7 @@ func (s *SQLiteStorage) upsertIssueInTx(ctx context.Context, tx *sql.Tx, issue *
|
||||
updated_at = ?, closed_at = ?, external_ref = ?, source_repo = ?,
|
||||
deleted_at = ?, deleted_by = ?, delete_reason = ?, original_type = ?,
|
||||
sender = ?, ephemeral = ?, pinned = COALESCE(NULLIF(?, 0), pinned), is_template = ?,
|
||||
await_type = COALESCE(NULLIF(?, ''), await_type),
|
||||
await_id = COALESCE(NULLIF(?, ''), await_id),
|
||||
timeout_ns = COALESCE(NULLIF(?, 0), timeout_ns),
|
||||
waiters = COALESCE(NULLIF(?, ''), waiters)
|
||||
await_type = ?, await_id = ?, timeout_ns = ?, waiters = ?
|
||||
WHERE id = ?
|
||||
`,
|
||||
issue.ContentHash, issue.Title, issue.Description, issue.Design,
|
||||
|
||||
@@ -54,7 +54,7 @@ func (s *SQLiteStorage) ExportToMultiRepo(ctx context.Context) (map[string]int,
|
||||
// Wisps exist only in SQLite and are shared via .beads/redirect, not JSONL.
|
||||
filtered := make([]*types.Issue, 0, len(allIssues))
|
||||
for _, issue := range allIssues {
|
||||
if !issue.Ephemeral {
|
||||
if !issue.Wisp {
|
||||
filtered = append(filtered, issue)
|
||||
}
|
||||
}
|
||||
|
||||
@@ -892,108 +892,3 @@ func TestExportToMultiRepo(t *testing.T) {
|
||||
}
|
||||
})
|
||||
}
|
||||
|
||||
// TestUpsertPreservesGateFields tests that gate await fields are preserved during upsert (bd-gr4q).
|
||||
// Gates are wisps and aren't exported to JSONL. When an issue with the same ID is imported,
|
||||
// the await fields should NOT be cleared.
|
||||
func TestUpsertPreservesGateFields(t *testing.T) {
|
||||
store, cleanup := setupTestDB(t)
|
||||
defer cleanup()
|
||||
|
||||
ctx := context.Background()
|
||||
|
||||
// Create a gate with await fields directly in the database
|
||||
gate := &types.Issue{
|
||||
ID: "bd-gate1",
|
||||
Title: "Test Gate",
|
||||
Status: types.StatusOpen,
|
||||
Priority: 1,
|
||||
IssueType: types.TypeGate,
|
||||
Ephemeral: true,
|
||||
AwaitType: "gh:run",
|
||||
AwaitID: "123456789",
|
||||
Timeout: 30 * 60 * 1000000000, // 30 minutes in nanoseconds
|
||||
Waiters: []string{"beads/dave"},
|
||||
CreatedAt: time.Now(),
|
||||
UpdatedAt: time.Now(),
|
||||
}
|
||||
gate.ContentHash = gate.ComputeContentHash()
|
||||
|
||||
if err := store.CreateIssue(ctx, gate, "test"); err != nil {
|
||||
t.Fatalf("failed to create gate: %v", err)
|
||||
}
|
||||
|
||||
// Verify gate was created with await fields
|
||||
retrieved, err := store.GetIssue(ctx, gate.ID)
|
||||
if err != nil || retrieved == nil {
|
||||
t.Fatalf("failed to get gate: %v", err)
|
||||
}
|
||||
if retrieved.AwaitType != "gh:run" {
|
||||
t.Errorf("expected AwaitType=gh:run, got %q", retrieved.AwaitType)
|
||||
}
|
||||
if retrieved.AwaitID != "123456789" {
|
||||
t.Errorf("expected AwaitID=123456789, got %q", retrieved.AwaitID)
|
||||
}
|
||||
|
||||
// Create a JSONL file with an issue that has the same ID but no await fields
|
||||
// (simulating what happens when a non-gate issue is imported)
|
||||
tmpDir := t.TempDir()
|
||||
jsonlPath := filepath.Join(tmpDir, "issues.jsonl")
|
||||
f, err := os.Create(jsonlPath)
|
||||
if err != nil {
|
||||
t.Fatalf("failed to create JSONL file: %v", err)
|
||||
}
|
||||
|
||||
// Same ID, different content (to trigger update), no await fields
|
||||
incomingIssue := types.Issue{
|
||||
ID: "bd-gate1",
|
||||
Title: "Test Gate Updated", // Different title to trigger update
|
||||
Status: types.StatusOpen,
|
||||
Priority: 1,
|
||||
IssueType: types.TypeGate,
|
||||
AwaitType: "", // Empty - simulating JSONL without await fields
|
||||
AwaitID: "", // Empty
|
||||
Timeout: 0,
|
||||
Waiters: nil,
|
||||
CreatedAt: time.Now(),
|
||||
UpdatedAt: time.Now().Add(time.Second), // Newer timestamp
|
||||
}
|
||||
incomingIssue.ContentHash = incomingIssue.ComputeContentHash()
|
||||
|
||||
enc := json.NewEncoder(f)
|
||||
if err := enc.Encode(incomingIssue); err != nil {
|
||||
t.Fatalf("failed to encode issue: %v", err)
|
||||
}
|
||||
f.Close()
|
||||
|
||||
// Import the JSONL file (this should NOT clear the await fields)
|
||||
_, err = store.importJSONLFile(ctx, jsonlPath, "test")
|
||||
if err != nil {
|
||||
t.Fatalf("importJSONLFile failed: %v", err)
|
||||
}
|
||||
|
||||
// Verify await fields are preserved
|
||||
updated, err := store.GetIssue(ctx, gate.ID)
|
||||
if err != nil || updated == nil {
|
||||
t.Fatalf("failed to get updated gate: %v", err)
|
||||
}
|
||||
|
||||
// Title should be updated
|
||||
if updated.Title != "Test Gate Updated" {
|
||||
t.Errorf("expected title to be updated, got %q", updated.Title)
|
||||
}
|
||||
|
||||
// Await fields should be PRESERVED (not cleared)
|
||||
if updated.AwaitType != "gh:run" {
|
||||
t.Errorf("AwaitType was cleared! expected 'gh:run', got %q", updated.AwaitType)
|
||||
}
|
||||
if updated.AwaitID != "123456789" {
|
||||
t.Errorf("AwaitID was cleared! expected '123456789', got %q", updated.AwaitID)
|
||||
}
|
||||
if updated.Timeout != 30*60*1000000000 {
|
||||
t.Errorf("Timeout was cleared! expected %d, got %d", 30*60*1000000000, updated.Timeout)
|
||||
}
|
||||
if len(updated.Waiters) != 1 || updated.Waiters[0] != "beads/dave" {
|
||||
t.Errorf("Waiters was cleared! expected [beads/dave], got %v", updated.Waiters)
|
||||
}
|
||||
}
|
||||
|
||||
@@ -278,7 +278,7 @@ func (s *SQLiteStorage) GetIssue(ctx context.Context, id string) (*types.Issue,
|
||||
err := s.db.QueryRowContext(ctx, `
|
||||
SELECT id, content_hash, title, description, design, acceptance_criteria, notes,
|
||||
status, priority, issue_type, assignee, estimated_minutes,
|
||||
created_at, created_by, updated_at, closed_at, external_ref,
|
||||
created_at, updated_at, closed_at, external_ref,
|
||||
compaction_level, compacted_at, compacted_at_commit, original_size, source_repo, close_reason,
|
||||
deleted_at, deleted_by, delete_reason, original_type,
|
||||
sender, ephemeral, pinned, is_template,
|
||||
@@ -289,7 +289,7 @@ func (s *SQLiteStorage) GetIssue(ctx context.Context, id string) (*types.Issue,
|
||||
&issue.ID, &contentHash, &issue.Title, &issue.Description, &issue.Design,
|
||||
&issue.AcceptanceCriteria, &issue.Notes, &issue.Status,
|
||||
&issue.Priority, &issue.IssueType, &assignee, &estimatedMinutes,
|
||||
&issue.CreatedAt, &issue.CreatedBy, &issue.UpdatedAt, &closedAt, &externalRef,
|
||||
&issue.CreatedAt, &issue.UpdatedAt, &closedAt, &externalRef,
|
||||
&issue.CompactionLevel, &compactedAt, &compactedAtCommit, &originalSize, &sourceRepo, &closeReason,
|
||||
&deletedAt, &deletedBy, &deleteReason, &originalType,
|
||||
&sender, &wisp, &pinned, &isTemplate,
|
||||
@@ -349,7 +349,7 @@ func (s *SQLiteStorage) GetIssue(ctx context.Context, id string) (*types.Issue,
|
||||
issue.Sender = sender.String
|
||||
}
|
||||
if wisp.Valid && wisp.Int64 != 0 {
|
||||
issue.Ephemeral = true
|
||||
issue.Wisp = true
|
||||
}
|
||||
// Pinned field (bd-7h5)
|
||||
if pinned.Valid && pinned.Int64 != 0 {
|
||||
@@ -491,7 +491,7 @@ func (s *SQLiteStorage) GetIssueByExternalRef(ctx context.Context, externalRef s
|
||||
err := s.db.QueryRowContext(ctx, `
|
||||
SELECT id, content_hash, title, description, design, acceptance_criteria, notes,
|
||||
status, priority, issue_type, assignee, estimated_minutes,
|
||||
created_at, created_by, updated_at, closed_at, external_ref,
|
||||
created_at, updated_at, closed_at, external_ref,
|
||||
compaction_level, compacted_at, compacted_at_commit, original_size, source_repo, close_reason,
|
||||
deleted_at, deleted_by, delete_reason, original_type,
|
||||
sender, ephemeral, pinned, is_template,
|
||||
@@ -502,7 +502,7 @@ func (s *SQLiteStorage) GetIssueByExternalRef(ctx context.Context, externalRef s
|
||||
&issue.ID, &contentHash, &issue.Title, &issue.Description, &issue.Design,
|
||||
&issue.AcceptanceCriteria, &issue.Notes, &issue.Status,
|
||||
&issue.Priority, &issue.IssueType, &assignee, &estimatedMinutes,
|
||||
&issue.CreatedAt, &issue.CreatedBy, &issue.UpdatedAt, &closedAt, &externalRefCol,
|
||||
&issue.CreatedAt, &issue.UpdatedAt, &closedAt, &externalRefCol,
|
||||
&issue.CompactionLevel, &compactedAt, &compactedAtCommit, &originalSize, &sourceRepo, &closeReason,
|
||||
&deletedAt, &deletedBy, &deleteReason, &originalType,
|
||||
&sender, &wisp, &pinned, &isTemplate,
|
||||
@@ -562,7 +562,7 @@ func (s *SQLiteStorage) GetIssueByExternalRef(ctx context.Context, externalRef s
|
||||
issue.Sender = sender.String
|
||||
}
|
||||
if wisp.Valid && wisp.Int64 != 0 {
|
||||
issue.Ephemeral = true
|
||||
issue.Wisp = true
|
||||
}
|
||||
// Pinned field (bd-7h5)
|
||||
if pinned.Valid && pinned.Int64 != 0 {
|
||||
@@ -1652,8 +1652,8 @@ func (s *SQLiteStorage) SearchIssues(ctx context.Context, query string, filter t
|
||||
}
|
||||
|
||||
// Wisp filtering (bd-kwro.9)
|
||||
if filter.Ephemeral != nil {
|
||||
if *filter.Ephemeral {
|
||||
if filter.Wisp != nil {
|
||||
if *filter.Wisp {
|
||||
whereClauses = append(whereClauses, "ephemeral = 1") // SQL column is still 'ephemeral'
|
||||
} else {
|
||||
whereClauses = append(whereClauses, "(ephemeral = 0 OR ephemeral IS NULL)")
|
||||
@@ -1699,7 +1699,7 @@ func (s *SQLiteStorage) SearchIssues(ctx context.Context, query string, filter t
|
||||
querySQL := fmt.Sprintf(`
|
||||
SELECT id, content_hash, title, description, design, acceptance_criteria, notes,
|
||||
status, priority, issue_type, assignee, estimated_minutes,
|
||||
created_at, created_by, updated_at, closed_at, external_ref, source_repo, close_reason,
|
||||
created_at, updated_at, closed_at, external_ref, source_repo, close_reason,
|
||||
deleted_at, deleted_by, delete_reason, original_type,
|
||||
sender, ephemeral, pinned, is_template,
|
||||
await_type, await_id, timeout_ns, waiters
|
||||
|
||||
@@ -17,8 +17,7 @@ import (
|
||||
// Excludes pinned issues which are persistent anchors, not actionable work (bd-92u)
|
||||
func (s *SQLiteStorage) GetReadyWork(ctx context.Context, filter types.WorkFilter) ([]*types.Issue, error) {
|
||||
whereClauses := []string{
|
||||
"i.pinned = 0", // Exclude pinned issues (bd-92u)
|
||||
"(i.ephemeral = 0 OR i.ephemeral IS NULL)", // Exclude wisps (hq-t15s)
|
||||
"i.pinned = 0", // Exclude pinned issues (bd-92u)
|
||||
}
|
||||
args := []interface{}{}
|
||||
|
||||
@@ -87,25 +86,6 @@ func (s *SQLiteStorage) GetReadyWork(ctx context.Context, filter types.WorkFilte
|
||||
}
|
||||
}
|
||||
|
||||
// Parent filtering: filter to all descendants of a root issue (epic/molecule)
|
||||
// Uses recursive CTE to find all descendants via parent-child dependencies
|
||||
if filter.ParentID != nil {
|
||||
whereClauses = append(whereClauses, `
|
||||
i.id IN (
|
||||
WITH RECURSIVE descendants AS (
|
||||
SELECT issue_id FROM dependencies
|
||||
WHERE type = 'parent-child' AND depends_on_id = ?
|
||||
UNION ALL
|
||||
SELECT d.issue_id FROM dependencies d
|
||||
JOIN descendants dt ON d.depends_on_id = dt.issue_id
|
||||
WHERE d.type = 'parent-child'
|
||||
)
|
||||
SELECT issue_id FROM descendants
|
||||
)
|
||||
`)
|
||||
args = append(args, *filter.ParentID)
|
||||
}
|
||||
|
||||
// Build WHERE clause properly
|
||||
whereSQL := strings.Join(whereClauses, " AND ")
|
||||
|
||||
@@ -138,7 +118,7 @@ func (s *SQLiteStorage) GetReadyWork(ctx context.Context, filter types.WorkFilte
|
||||
query := fmt.Sprintf(`
|
||||
SELECT i.id, i.content_hash, i.title, i.description, i.design, i.acceptance_criteria, i.notes,
|
||||
i.status, i.priority, i.issue_type, i.assignee, i.estimated_minutes,
|
||||
i.created_at, i.created_by, i.updated_at, i.closed_at, i.external_ref, i.source_repo, i.close_reason,
|
||||
i.created_at, i.updated_at, i.closed_at, i.external_ref, i.source_repo, i.close_reason,
|
||||
i.deleted_at, i.deleted_by, i.delete_reason, i.original_type,
|
||||
i.sender, i.ephemeral, i.pinned, i.is_template,
|
||||
i.await_type, i.await_id, i.timeout_ns, i.waiters
|
||||
@@ -400,7 +380,7 @@ func (s *SQLiteStorage) GetStaleIssues(ctx context.Context, filter types.StaleFi
|
||||
issue.Sender = sender.String
|
||||
}
|
||||
if ephemeral.Valid && ephemeral.Int64 != 0 {
|
||||
issue.Ephemeral = true
|
||||
issue.Wisp = true
|
||||
}
|
||||
// Pinned field (bd-7h5)
|
||||
if pinned.Valid && pinned.Int64 != 0 {
|
||||
@@ -433,7 +413,7 @@ func (s *SQLiteStorage) GetStaleIssues(ctx context.Context, filter types.StaleFi
|
||||
// GetBlockedIssues returns issues that are blocked by dependencies or have status=blocked
|
||||
// Note: Pinned issues are excluded from the output (beads-ei4)
|
||||
// Note: Includes external: references in blocked_by list (bd-om4a)
|
||||
func (s *SQLiteStorage) GetBlockedIssues(ctx context.Context, filter types.WorkFilter) ([]*types.BlockedIssue, error) {
|
||||
func (s *SQLiteStorage) GetBlockedIssues(ctx context.Context) ([]*types.BlockedIssue, error) {
|
||||
// Use UNION to combine:
|
||||
// 1. Issues with open/in_progress/blocked status that have dependency blockers
|
||||
// 2. Issues with status=blocked (even if they have no dependency blockers)
|
||||
@@ -443,41 +423,11 @@ func (s *SQLiteStorage) GetBlockedIssues(ctx context.Context, filter types.WorkF
|
||||
// For blocked_by_count and blocker_ids:
|
||||
// - Count local blockers (open issues) + external refs (external:*)
|
||||
// - External refs are always considered "open" until resolved (bd-om4a)
|
||||
|
||||
// Build additional WHERE clauses for filtering
|
||||
var filterClauses []string
|
||||
var args []any
|
||||
|
||||
// Parent filtering: filter to all descendants of a root issue (epic/molecule)
|
||||
if filter.ParentID != nil {
|
||||
filterClauses = append(filterClauses, `
|
||||
i.id IN (
|
||||
WITH RECURSIVE descendants AS (
|
||||
SELECT issue_id FROM dependencies
|
||||
WHERE type = 'parent-child' AND depends_on_id = ?
|
||||
UNION ALL
|
||||
SELECT d.issue_id FROM dependencies d
|
||||
JOIN descendants dt ON d.depends_on_id = dt.issue_id
|
||||
WHERE d.type = 'parent-child'
|
||||
)
|
||||
SELECT issue_id FROM descendants
|
||||
)
|
||||
`)
|
||||
args = append(args, *filter.ParentID)
|
||||
}
|
||||
|
||||
// Build filter clause SQL
|
||||
filterSQL := ""
|
||||
if len(filterClauses) > 0 {
|
||||
filterSQL = " AND " + strings.Join(filterClauses, " AND ")
|
||||
}
|
||||
|
||||
// nolint:gosec // G201: filterSQL contains only parameterized WHERE clauses with ? placeholders, not user input
|
||||
query := fmt.Sprintf(`
|
||||
rows, err := s.db.QueryContext(ctx, `
|
||||
SELECT
|
||||
i.id, i.title, i.description, i.design, i.acceptance_criteria, i.notes,
|
||||
i.status, i.priority, i.issue_type, i.assignee, i.estimated_minutes,
|
||||
i.created_at, i.created_by, i.updated_at, i.closed_at, i.external_ref, i.source_repo,
|
||||
i.created_at, i.updated_at, i.closed_at, i.external_ref, i.source_repo,
|
||||
COALESCE(COUNT(d.depends_on_id), 0) as blocked_by_count,
|
||||
COALESCE(GROUP_CONCAT(d.depends_on_id, ','), '') as blocker_ids
|
||||
FROM issues i
|
||||
@@ -491,7 +441,7 @@ func (s *SQLiteStorage) GetBlockedIssues(ctx context.Context, filter types.WorkF
|
||||
AND blocker.status IN ('open', 'in_progress', 'blocked', 'deferred')
|
||||
)
|
||||
-- External refs: always included (resolution happens at query time)
|
||||
OR d.depends_on_id LIKE 'external:%%'
|
||||
OR d.depends_on_id LIKE 'external:%'
|
||||
)
|
||||
WHERE i.status IN ('open', 'in_progress', 'blocked', 'deferred')
|
||||
AND i.pinned = 0
|
||||
@@ -511,14 +461,12 @@ func (s *SQLiteStorage) GetBlockedIssues(ctx context.Context, filter types.WorkF
|
||||
SELECT 1 FROM dependencies d3
|
||||
WHERE d3.issue_id = i.id
|
||||
AND d3.type = 'blocks'
|
||||
AND d3.depends_on_id LIKE 'external:%%'
|
||||
AND d3.depends_on_id LIKE 'external:%'
|
||||
)
|
||||
)
|
||||
%s
|
||||
GROUP BY i.id
|
||||
ORDER BY i.priority ASC
|
||||
`, filterSQL)
|
||||
rows, err := s.db.QueryContext(ctx, query, args...)
|
||||
`)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("failed to get blocked issues: %w", err)
|
||||
}
|
||||
@@ -538,7 +486,7 @@ func (s *SQLiteStorage) GetBlockedIssues(ctx context.Context, filter types.WorkF
|
||||
&issue.ID, &issue.Title, &issue.Description, &issue.Design,
|
||||
&issue.AcceptanceCriteria, &issue.Notes, &issue.Status,
|
||||
&issue.Priority, &issue.IssueType, &assignee, &estimatedMinutes,
|
||||
&issue.CreatedAt, &issue.CreatedBy, &issue.UpdatedAt, &closedAt, &externalRef, &sourceRepo, &issue.BlockedByCount,
|
||||
&issue.CreatedAt, &issue.UpdatedAt, &closedAt, &externalRef, &sourceRepo, &issue.BlockedByCount,
|
||||
&blockerIDsStr,
|
||||
)
|
||||
if err != nil {
|
||||
@@ -648,49 +596,6 @@ func filterBlockedByExternalDeps(ctx context.Context, blocked []*types.BlockedIs
|
||||
return result
|
||||
}
|
||||
|
||||
// GetNewlyUnblockedByClose returns issues that became unblocked when the given issue was closed.
|
||||
// This is used by the --suggest-next flag on bd close to show what work is now available.
|
||||
// An issue is "newly unblocked" if:
|
||||
// - It had a 'blocks' dependency on the closed issue
|
||||
// - It is now unblocked (not in blocked_issues_cache)
|
||||
// - It has status open or in_progress (ready to work on)
|
||||
//
|
||||
// The cache is already rebuilt by CloseIssue before this is called, so we just need to
|
||||
// find dependents that are no longer blocked.
|
||||
func (s *SQLiteStorage) GetNewlyUnblockedByClose(ctx context.Context, closedIssueID string) ([]*types.Issue, error) {
|
||||
// Find issues that:
|
||||
// 1. Had a 'blocks' dependency on the closed issue
|
||||
// 2. Are now NOT in blocked_issues_cache (unblocked)
|
||||
// 3. Have status open or in_progress
|
||||
// 4. Are not pinned
|
||||
query := `
|
||||
SELECT i.id, i.content_hash, i.title, i.description, i.design, i.acceptance_criteria, i.notes,
|
||||
i.status, i.priority, i.issue_type, i.assignee, i.estimated_minutes,
|
||||
i.created_at, i.created_by, i.updated_at, i.closed_at, i.external_ref, i.source_repo, i.close_reason,
|
||||
i.deleted_at, i.deleted_by, i.delete_reason, i.original_type,
|
||||
i.sender, i.ephemeral, i.pinned, i.is_template,
|
||||
i.await_type, i.await_id, i.timeout_ns, i.waiters
|
||||
FROM issues i
|
||||
JOIN dependencies d ON i.id = d.issue_id
|
||||
WHERE d.depends_on_id = ?
|
||||
AND d.type = 'blocks'
|
||||
AND i.status IN ('open', 'in_progress')
|
||||
AND i.pinned = 0
|
||||
AND NOT EXISTS (
|
||||
SELECT 1 FROM blocked_issues_cache WHERE issue_id = i.id
|
||||
)
|
||||
ORDER BY i.priority ASC
|
||||
`
|
||||
|
||||
rows, err := s.db.QueryContext(ctx, query, closedIssueID)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("failed to get newly unblocked issues: %w", err)
|
||||
}
|
||||
defer func() { _ = rows.Close() }()
|
||||
|
||||
return s.scanIssues(ctx, rows)
|
||||
}
|
||||
|
||||
// buildOrderByClause generates the ORDER BY clause based on sort policy
|
||||
func buildOrderByClause(policy types.SortPolicy) string {
|
||||
switch policy {
|
||||
|
||||
@@ -182,7 +182,7 @@ func TestGetBlockedIssues(t *testing.T) {
|
||||
store.AddDependency(ctx, &types.Dependency{IssueID: issue3.ID, DependsOnID: issue2.ID, Type: types.DepBlocks}, "test-user")
|
||||
|
||||
// Get blocked issues
|
||||
blocked, err := store.GetBlockedIssues(ctx, types.WorkFilter{})
|
||||
blocked, err := store.GetBlockedIssues(ctx)
|
||||
if err != nil {
|
||||
t.Fatalf("GetBlockedIssues failed: %v", err)
|
||||
}
|
||||
@@ -1215,7 +1215,7 @@ func TestGetBlockedIssuesFiltersExternalDeps(t *testing.T) {
|
||||
}
|
||||
|
||||
// Test 1: External dep not satisfied - issue should appear as blocked
|
||||
blocked, err := mainStore.GetBlockedIssues(ctx, types.WorkFilter{})
|
||||
blocked, err := mainStore.GetBlockedIssues(ctx)
|
||||
if err != nil {
|
||||
t.Fatalf("GetBlockedIssues failed: %v", err)
|
||||
}
|
||||
@@ -1260,7 +1260,7 @@ func TestGetBlockedIssuesFiltersExternalDeps(t *testing.T) {
|
||||
}
|
||||
|
||||
// Now GetBlockedIssues should NOT show the issue (external dep satisfied)
|
||||
blocked, err = mainStore.GetBlockedIssues(ctx, types.WorkFilter{})
|
||||
blocked, err = mainStore.GetBlockedIssues(ctx)
|
||||
if err != nil {
|
||||
t.Fatalf("GetBlockedIssues failed after shipping: %v", err)
|
||||
}
|
||||
@@ -1379,7 +1379,7 @@ func TestGetBlockedIssuesPartialExternalDeps(t *testing.T) {
|
||||
externalStore.Close()
|
||||
|
||||
// Issue should still be blocked (cap2 not satisfied)
|
||||
blocked, err := mainStore.GetBlockedIssues(ctx, types.WorkFilter{})
|
||||
blocked, err := mainStore.GetBlockedIssues(ctx)
|
||||
if err != nil {
|
||||
t.Fatalf("GetBlockedIssues failed: %v", err)
|
||||
}
|
||||
@@ -1512,212 +1512,3 @@ func TestCheckExternalDepInvalidFormats(t *testing.T) {
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
// TestGetNewlyUnblockedByClose tests the --suggest-next functionality (GH#679)
|
||||
func TestGetNewlyUnblockedByClose(t *testing.T) {
|
||||
env := newTestEnv(t)
|
||||
|
||||
// Create a blocker issue
|
||||
blocker := env.CreateIssueWith("Blocker", types.StatusOpen, 1, types.TypeTask)
|
||||
|
||||
// Create two issues blocked by the blocker
|
||||
blocked1 := env.CreateIssueWith("Blocked 1", types.StatusOpen, 2, types.TypeTask)
|
||||
blocked2 := env.CreateIssueWith("Blocked 2", types.StatusOpen, 3, types.TypeTask)
|
||||
|
||||
// Create one issue blocked by multiple issues (blocker + another)
|
||||
otherBlocker := env.CreateIssueWith("Other Blocker", types.StatusOpen, 1, types.TypeTask)
|
||||
multiBlocked := env.CreateIssueWith("Multi Blocked", types.StatusOpen, 2, types.TypeTask)
|
||||
|
||||
// Add dependencies (issue depends on blocker)
|
||||
env.AddDep(blocked1, blocker)
|
||||
env.AddDep(blocked2, blocker)
|
||||
env.AddDep(multiBlocked, blocker)
|
||||
env.AddDep(multiBlocked, otherBlocker)
|
||||
|
||||
// Close the blocker
|
||||
env.Close(blocker, "Done")
|
||||
|
||||
// Get newly unblocked issues
|
||||
ctx := context.Background()
|
||||
unblocked, err := env.Store.GetNewlyUnblockedByClose(ctx, blocker.ID)
|
||||
if err != nil {
|
||||
t.Fatalf("GetNewlyUnblockedByClose failed: %v", err)
|
||||
}
|
||||
|
||||
// Should return blocked1 and blocked2 (but not multiBlocked, which is still blocked by otherBlocker)
|
||||
if len(unblocked) != 2 {
|
||||
t.Errorf("Expected 2 unblocked issues, got %d", len(unblocked))
|
||||
}
|
||||
|
||||
// Check that the right issues are unblocked
|
||||
unblockedIDs := make(map[string]bool)
|
||||
for _, issue := range unblocked {
|
||||
unblockedIDs[issue.ID] = true
|
||||
}
|
||||
|
||||
if !unblockedIDs[blocked1.ID] {
|
||||
t.Errorf("Expected %s to be unblocked", blocked1.ID)
|
||||
}
|
||||
if !unblockedIDs[blocked2.ID] {
|
||||
t.Errorf("Expected %s to be unblocked", blocked2.ID)
|
||||
}
|
||||
if unblockedIDs[multiBlocked.ID] {
|
||||
t.Errorf("Expected %s to still be blocked (has another blocker)", multiBlocked.ID)
|
||||
}
|
||||
}
|
||||
|
||||
// TestParentIDFilterDescendants tests that ParentID filter returns all descendants of an epic
|
||||
func TestParentIDFilterDescendants(t *testing.T) {
|
||||
env := newTestEnv(t)
|
||||
|
||||
// Create hierarchy:
|
||||
// epic1 (root)
|
||||
// ├── task1 (child of epic1)
|
||||
// ├── task2 (child of epic1)
|
||||
// └── epic2 (child of epic1)
|
||||
// └── task3 (grandchild of epic1)
|
||||
// task4 (unrelated, should not appear in results)
|
||||
epic1 := env.CreateEpic("Epic 1")
|
||||
task1 := env.CreateIssue("Task 1")
|
||||
task2 := env.CreateIssue("Task 2")
|
||||
epic2 := env.CreateEpic("Epic 2")
|
||||
task3 := env.CreateIssue("Task 3")
|
||||
task4 := env.CreateIssue("Task 4 - unrelated")
|
||||
|
||||
env.AddParentChild(task1, epic1)
|
||||
env.AddParentChild(task2, epic1)
|
||||
env.AddParentChild(epic2, epic1)
|
||||
env.AddParentChild(task3, epic2)
|
||||
|
||||
// Query with ParentID = epic1
|
||||
parentID := epic1.ID
|
||||
ready := env.GetReadyWork(types.WorkFilter{ParentID: &parentID})
|
||||
|
||||
// Should include task1, task2, epic2, task3 (all descendants of epic1)
|
||||
// Should NOT include epic1 itself or task4
|
||||
if len(ready) != 4 {
|
||||
t.Fatalf("Expected 4 ready issues in parent scope, got %d", len(ready))
|
||||
}
|
||||
|
||||
// Verify the returned issues are the expected ones
|
||||
readyIDs := make(map[string]bool)
|
||||
for _, issue := range ready {
|
||||
readyIDs[issue.ID] = true
|
||||
}
|
||||
|
||||
if !readyIDs[task1.ID] {
|
||||
t.Errorf("Expected task1 to be in results")
|
||||
}
|
||||
if !readyIDs[task2.ID] {
|
||||
t.Errorf("Expected task2 to be in results")
|
||||
}
|
||||
if !readyIDs[epic2.ID] {
|
||||
t.Errorf("Expected epic2 to be in results")
|
||||
}
|
||||
if !readyIDs[task3.ID] {
|
||||
t.Errorf("Expected task3 to be in results")
|
||||
}
|
||||
if readyIDs[epic1.ID] {
|
||||
t.Errorf("Expected epic1 (root) to NOT be in results")
|
||||
}
|
||||
if readyIDs[task4.ID] {
|
||||
t.Errorf("Expected task4 (unrelated) to NOT be in results")
|
||||
}
|
||||
}
|
||||
|
||||
// TestParentIDWithOtherFilters tests that ParentID can be combined with other filters
|
||||
func TestParentIDWithOtherFilters(t *testing.T) {
|
||||
env := newTestEnv(t)
|
||||
|
||||
// Create hierarchy:
|
||||
// epic1 (root)
|
||||
// ├── task1 (priority 0)
|
||||
// ├── task2 (priority 1)
|
||||
// └── task3 (priority 2)
|
||||
epic1 := env.CreateEpic("Epic 1")
|
||||
task1 := env.CreateIssueWith("Task 1 - P0", types.StatusOpen, 0, types.TypeTask)
|
||||
task2 := env.CreateIssueWith("Task 2 - P1", types.StatusOpen, 1, types.TypeTask)
|
||||
task3 := env.CreateIssueWith("Task 3 - P2", types.StatusOpen, 2, types.TypeTask)
|
||||
|
||||
env.AddParentChild(task1, epic1)
|
||||
env.AddParentChild(task2, epic1)
|
||||
env.AddParentChild(task3, epic1)
|
||||
|
||||
// Query with ParentID = epic1 AND priority = 1
|
||||
parentID := epic1.ID
|
||||
priority := 1
|
||||
ready := env.GetReadyWork(types.WorkFilter{ParentID: &parentID, Priority: &priority})
|
||||
|
||||
// Should only include task2 (parent + priority 1)
|
||||
if len(ready) != 1 {
|
||||
t.Fatalf("Expected 1 issue with parent + priority filter, got %d", len(ready))
|
||||
}
|
||||
if ready[0].ID != task2.ID {
|
||||
t.Errorf("Expected task2, got %s", ready[0].ID)
|
||||
}
|
||||
}
|
||||
|
||||
// TestParentIDWithBlockedDescendants tests that blocked descendants are excluded
|
||||
func TestParentIDWithBlockedDescendants(t *testing.T) {
|
||||
env := newTestEnv(t)
|
||||
|
||||
// Create hierarchy:
|
||||
// epic1 (root)
|
||||
// ├── task1 (ready)
|
||||
// ├── task2 (blocked by blocker)
|
||||
// └── task3 (ready)
|
||||
// blocker (unrelated)
|
||||
epic1 := env.CreateEpic("Epic 1")
|
||||
task1 := env.CreateIssue("Task 1 - ready")
|
||||
task2 := env.CreateIssue("Task 2 - blocked")
|
||||
task3 := env.CreateIssue("Task 3 - ready")
|
||||
blocker := env.CreateIssue("Blocker")
|
||||
|
||||
env.AddParentChild(task1, epic1)
|
||||
env.AddParentChild(task2, epic1)
|
||||
env.AddParentChild(task3, epic1)
|
||||
env.AddDep(task2, blocker) // task2 is blocked
|
||||
|
||||
// Query with ParentID = epic1
|
||||
parentID := epic1.ID
|
||||
ready := env.GetReadyWork(types.WorkFilter{ParentID: &parentID})
|
||||
|
||||
// Should include task1, task3 (ready descendants)
|
||||
// Should NOT include task2 (blocked)
|
||||
if len(ready) != 2 {
|
||||
t.Fatalf("Expected 2 ready descendants, got %d", len(ready))
|
||||
}
|
||||
|
||||
readyIDs := make(map[string]bool)
|
||||
for _, issue := range ready {
|
||||
readyIDs[issue.ID] = true
|
||||
}
|
||||
|
||||
if !readyIDs[task1.ID] {
|
||||
t.Errorf("Expected task1 to be ready")
|
||||
}
|
||||
if !readyIDs[task3.ID] {
|
||||
t.Errorf("Expected task3 to be ready")
|
||||
}
|
||||
if readyIDs[task2.ID] {
|
||||
t.Errorf("Expected task2 to be blocked")
|
||||
}
|
||||
}
|
||||
|
||||
// TestParentIDEmptyParent tests that empty parent returns nothing
|
||||
func TestParentIDEmptyParent(t *testing.T) {
|
||||
env := newTestEnv(t)
|
||||
|
||||
// Create an epic with no children
|
||||
epic1 := env.CreateEpic("Epic 1 - no children")
|
||||
env.CreateIssue("Unrelated task")
|
||||
|
||||
// Query with ParentID = epic1 (which has no children)
|
||||
parentID := epic1.ID
|
||||
ready := env.GetReadyWork(types.WorkFilter{ParentID: &parentID})
|
||||
|
||||
// Should return empty since epic1 has no descendants
|
||||
if len(ready) != 0 {
|
||||
t.Fatalf("Expected 0 ready issues for empty parent, got %d", len(ready))
|
||||
}
|
||||
}
|
||||
|
||||
@@ -16,7 +16,6 @@ CREATE TABLE IF NOT EXISTS issues (
|
||||
assignee TEXT,
|
||||
estimated_minutes INTEGER,
|
||||
created_at DATETIME NOT NULL DEFAULT CURRENT_TIMESTAMP,
|
||||
created_by TEXT DEFAULT '',
|
||||
updated_at DATETIME NOT NULL DEFAULT CURRENT_TIMESTAMP,
|
||||
closed_at DATETIME,
|
||||
external_ref TEXT,
|
||||
@@ -37,12 +36,7 @@ CREATE TABLE IF NOT EXISTS issues (
|
||||
is_template INTEGER DEFAULT 0,
|
||||
-- NOTE: replies_to, relates_to, duplicate_of, superseded_by removed per Decision 004
|
||||
-- These relationships are now stored in the dependencies table
|
||||
-- closed_at constraint: closed issues must have it, tombstones may retain it from before deletion
|
||||
CHECK (
|
||||
(status = 'closed' AND closed_at IS NOT NULL) OR
|
||||
(status = 'tombstone') OR
|
||||
(status NOT IN ('closed', 'tombstone') AND closed_at IS NULL)
|
||||
)
|
||||
CHECK ((status = 'closed') = (closed_at IS NOT NULL))
|
||||
);
|
||||
|
||||
CREATE INDEX IF NOT EXISTS idx_issues_status ON issues(status);
|
||||
@@ -230,7 +224,6 @@ WITH RECURSIVE
|
||||
SELECT i.*
|
||||
FROM issues i
|
||||
WHERE i.status = 'open'
|
||||
AND (i.ephemeral = 0 OR i.ephemeral IS NULL)
|
||||
AND NOT EXISTS (
|
||||
SELECT 1 FROM blocked_transitively WHERE issue_id = i.id
|
||||
);
|
||||
|
||||
@@ -310,7 +310,7 @@ func (t *sqliteTxStorage) GetIssue(ctx context.Context, id string) (*types.Issue
|
||||
row := t.conn.QueryRowContext(ctx, `
|
||||
SELECT id, content_hash, title, description, design, acceptance_criteria, notes,
|
||||
status, priority, issue_type, assignee, estimated_minutes,
|
||||
created_at, created_by, updated_at, closed_at, external_ref,
|
||||
created_at, updated_at, closed_at, external_ref,
|
||||
compaction_level, compacted_at, compacted_at_commit, original_size, source_repo, close_reason,
|
||||
deleted_at, deleted_by, delete_reason, original_type,
|
||||
sender, ephemeral, pinned, is_template,
|
||||
@@ -1089,8 +1089,8 @@ func (t *sqliteTxStorage) SearchIssues(ctx context.Context, query string, filter
|
||||
}
|
||||
|
||||
// Wisp filtering (bd-kwro.9)
|
||||
if filter.Ephemeral != nil {
|
||||
if *filter.Ephemeral {
|
||||
if filter.Wisp != nil {
|
||||
if *filter.Wisp {
|
||||
whereClauses = append(whereClauses, "ephemeral = 1") // SQL column is still 'ephemeral'
|
||||
} else {
|
||||
whereClauses = append(whereClauses, "(ephemeral = 0 OR ephemeral IS NULL)")
|
||||
@@ -1127,7 +1127,7 @@ func (t *sqliteTxStorage) SearchIssues(ctx context.Context, query string, filter
|
||||
querySQL := fmt.Sprintf(`
|
||||
SELECT id, content_hash, title, description, design, acceptance_criteria, notes,
|
||||
status, priority, issue_type, assignee, estimated_minutes,
|
||||
created_at, created_by, updated_at, closed_at, external_ref,
|
||||
created_at, updated_at, closed_at, external_ref,
|
||||
compaction_level, compacted_at, compacted_at_commit, original_size, source_repo, close_reason,
|
||||
deleted_at, deleted_by, delete_reason, original_type,
|
||||
sender, ephemeral, pinned, is_template,
|
||||
@@ -1188,7 +1188,7 @@ func scanIssueRow(row scanner) (*types.Issue, error) {
|
||||
&issue.ID, &contentHash, &issue.Title, &issue.Description, &issue.Design,
|
||||
&issue.AcceptanceCriteria, &issue.Notes, &issue.Status,
|
||||
&issue.Priority, &issue.IssueType, &assignee, &estimatedMinutes,
|
||||
&issue.CreatedAt, &issue.CreatedBy, &issue.UpdatedAt, &closedAt, &externalRef,
|
||||
&issue.CreatedAt, &issue.UpdatedAt, &closedAt, &externalRef,
|
||||
&issue.CompactionLevel, &compactedAt, &compactedAtCommit, &originalSize, &sourceRepo, &closeReason,
|
||||
&deletedAt, &deletedBy, &deleteReason, &originalType,
|
||||
&sender, &wisp, &pinned, &isTemplate,
|
||||
@@ -1244,7 +1244,7 @@ func scanIssueRow(row scanner) (*types.Issue, error) {
|
||||
issue.Sender = sender.String
|
||||
}
|
||||
if wisp.Valid && wisp.Int64 != 0 {
|
||||
issue.Ephemeral = true
|
||||
issue.Wisp = true
|
||||
}
|
||||
// Pinned field (bd-7h5)
|
||||
if pinned.Valid && pinned.Int64 != 0 {
|
||||
|
||||
@@ -107,10 +107,9 @@ type Storage interface {
|
||||
|
||||
// Ready Work & Blocking
|
||||
GetReadyWork(ctx context.Context, filter types.WorkFilter) ([]*types.Issue, error)
|
||||
GetBlockedIssues(ctx context.Context, filter types.WorkFilter) ([]*types.BlockedIssue, error)
|
||||
GetBlockedIssues(ctx context.Context) ([]*types.BlockedIssue, error)
|
||||
GetEpicsEligibleForClosure(ctx context.Context) ([]*types.EpicStatus, error)
|
||||
GetStaleIssues(ctx context.Context, filter types.StaleFilter) ([]*types.Issue, error)
|
||||
GetNewlyUnblockedByClose(ctx context.Context, closedIssueID string) ([]*types.Issue, error) // GH#679
|
||||
|
||||
// Events
|
||||
AddComment(ctx context.Context, issueID, actor, comment string) error
|
||||
|
||||
@@ -89,7 +89,7 @@ func (m *mockStorage) GetIssuesByLabel(ctx context.Context, label string) ([]*ty
|
||||
func (m *mockStorage) GetReadyWork(ctx context.Context, filter types.WorkFilter) ([]*types.Issue, error) {
|
||||
return nil, nil
|
||||
}
|
||||
func (m *mockStorage) GetBlockedIssues(ctx context.Context, filter types.WorkFilter) ([]*types.BlockedIssue, error) {
|
||||
func (m *mockStorage) GetBlockedIssues(ctx context.Context) ([]*types.BlockedIssue, error) {
|
||||
return nil, nil
|
||||
}
|
||||
func (m *mockStorage) GetEpicsEligibleForClosure(ctx context.Context) ([]*types.EpicStatus, error) {
|
||||
@@ -98,9 +98,6 @@ func (m *mockStorage) GetEpicsEligibleForClosure(ctx context.Context) ([]*types.
|
||||
func (m *mockStorage) GetStaleIssues(ctx context.Context, filter types.StaleFilter) ([]*types.Issue, error) {
|
||||
return nil, nil
|
||||
}
|
||||
func (m *mockStorage) GetNewlyUnblockedByClose(ctx context.Context, closedIssueID string) ([]*types.Issue, error) {
|
||||
return nil, nil
|
||||
}
|
||||
func (m *mockStorage) AddComment(ctx context.Context, issueID, actor, comment string) error {
|
||||
return nil
|
||||
}
|
||||
|
||||
Reference in New Issue
Block a user