Merge branch 'steveyegge:main' into main

This commit is contained in:
Jordan Hubbard
2025-12-26 17:22:14 -04:00
committed by GitHub
73 changed files with 5037 additions and 2358 deletions

View File

@@ -44,6 +44,7 @@ var migrationsList = []Migration{
{"remove_depends_on_fk", migrations.MigrateRemoveDependsOnFK},
{"additional_indexes", migrations.MigrateAdditionalIndexes},
{"gate_columns", migrations.MigrateGateColumns},
{"tombstone_closed_at", migrations.MigrateTombstoneClosedAt},
}
// MigrationInfo contains metadata about a migration for inspection

View File

@@ -0,0 +1,174 @@
package migrations
import (
"database/sql"
"fmt"
)
// MigrateTombstoneClosedAt updates the closed_at constraint to allow tombstones
// to retain their closed_at timestamp from before deletion.
//
// Previously: CHECK ((status = 'closed') = (closed_at IS NOT NULL))
// - This required clearing closed_at when creating tombstones from closed issues
//
// Now: CHECK (closed + tombstone OR non-closed/tombstone with no closed_at)
// - closed issues must have closed_at
// - tombstones may have closed_at (from before deletion) or not
// - other statuses must NOT have closed_at
//
// This allows importing tombstones that were closed before being deleted,
// preserving the historical closed_at timestamp for audit purposes.
func MigrateTombstoneClosedAt(db *sql.DB) error {
// SQLite doesn't support ALTER TABLE to modify CHECK constraints
// We must recreate the table with the new constraint
// Step 0: Drop views that depend on the issues table
_, err := db.Exec(`DROP VIEW IF EXISTS ready_issues`)
if err != nil {
return fmt.Errorf("failed to drop ready_issues view: %w", err)
}
_, err = db.Exec(`DROP VIEW IF EXISTS blocked_issues`)
if err != nil {
return fmt.Errorf("failed to drop blocked_issues view: %w", err)
}
// Step 1: Create new table with updated constraint
_, err = db.Exec(`
CREATE TABLE IF NOT EXISTS issues_new (
id TEXT PRIMARY KEY,
content_hash TEXT,
title TEXT NOT NULL CHECK(length(title) <= 500),
description TEXT NOT NULL DEFAULT '',
design TEXT NOT NULL DEFAULT '',
acceptance_criteria TEXT NOT NULL DEFAULT '',
notes TEXT NOT NULL DEFAULT '',
status TEXT NOT NULL DEFAULT 'open',
priority INTEGER NOT NULL DEFAULT 2 CHECK(priority >= 0 AND priority <= 4),
issue_type TEXT NOT NULL DEFAULT 'task',
assignee TEXT,
estimated_minutes INTEGER,
created_at DATETIME NOT NULL DEFAULT CURRENT_TIMESTAMP,
updated_at DATETIME NOT NULL DEFAULT CURRENT_TIMESTAMP,
closed_at DATETIME,
external_ref TEXT,
source_repo TEXT DEFAULT '',
compaction_level INTEGER DEFAULT 0,
compacted_at DATETIME,
compacted_at_commit TEXT,
original_size INTEGER,
deleted_at DATETIME,
deleted_by TEXT DEFAULT '',
delete_reason TEXT DEFAULT '',
original_type TEXT DEFAULT '',
sender TEXT DEFAULT '',
ephemeral INTEGER DEFAULT 0,
close_reason TEXT DEFAULT '',
pinned INTEGER DEFAULT 0,
is_template INTEGER DEFAULT 0,
await_type TEXT,
await_id TEXT,
timeout_ns INTEGER,
waiters TEXT,
CHECK (
(status = 'closed' AND closed_at IS NOT NULL) OR
(status = 'tombstone') OR
(status NOT IN ('closed', 'tombstone') AND closed_at IS NULL)
)
)
`)
if err != nil {
return fmt.Errorf("failed to create new issues table: %w", err)
}
// Step 2: Copy data from old table to new table
_, err = db.Exec(`
INSERT INTO issues_new
SELECT * FROM issues
`)
if err != nil {
return fmt.Errorf("failed to copy issues data: %w", err)
}
// Step 3: Drop old table
_, err = db.Exec(`DROP TABLE issues`)
if err != nil {
return fmt.Errorf("failed to drop old issues table: %w", err)
}
// Step 4: Rename new table to original name
_, err = db.Exec(`ALTER TABLE issues_new RENAME TO issues`)
if err != nil {
return fmt.Errorf("failed to rename new issues table: %w", err)
}
// Step 5: Recreate indexes (they were dropped with the table)
indexes := []string{
`CREATE INDEX IF NOT EXISTS idx_issues_status ON issues(status)`,
`CREATE INDEX IF NOT EXISTS idx_issues_priority ON issues(priority)`,
`CREATE INDEX IF NOT EXISTS idx_issues_assignee ON issues(assignee)`,
`CREATE INDEX IF NOT EXISTS idx_issues_created_at ON issues(created_at)`,
`CREATE INDEX IF NOT EXISTS idx_issues_external_ref ON issues(external_ref) WHERE external_ref IS NOT NULL`,
`CREATE INDEX IF NOT EXISTS idx_issues_pinned ON issues(pinned) WHERE pinned = 1`,
`CREATE INDEX IF NOT EXISTS idx_issues_is_template ON issues(is_template) WHERE is_template = 1`,
`CREATE INDEX IF NOT EXISTS idx_issues_updated_at ON issues(updated_at)`,
`CREATE INDEX IF NOT EXISTS idx_issues_status_priority ON issues(status, priority)`,
`CREATE INDEX IF NOT EXISTS idx_issues_gate ON issues(issue_type) WHERE issue_type = 'gate'`,
}
for _, idx := range indexes {
if _, err := db.Exec(idx); err != nil {
return fmt.Errorf("failed to create index: %w", err)
}
}
// Step 6: Recreate views that we dropped
_, err = db.Exec(`
CREATE VIEW IF NOT EXISTS ready_issues AS
WITH RECURSIVE
blocked_directly AS (
SELECT DISTINCT d.issue_id
FROM dependencies d
JOIN issues blocker ON d.depends_on_id = blocker.id
WHERE d.type = 'blocks'
AND blocker.status IN ('open', 'in_progress', 'blocked', 'deferred')
),
blocked_transitively AS (
SELECT issue_id, 0 as depth
FROM blocked_directly
UNION ALL
SELECT d.issue_id, bt.depth + 1
FROM blocked_transitively bt
JOIN dependencies d ON d.depends_on_id = bt.issue_id
WHERE d.type = 'parent-child'
AND bt.depth < 50
)
SELECT i.*
FROM issues i
WHERE i.status = 'open'
AND NOT EXISTS (
SELECT 1 FROM blocked_transitively WHERE issue_id = i.id
)
`)
if err != nil {
return fmt.Errorf("failed to recreate ready_issues view: %w", err)
}
_, err = db.Exec(`
CREATE VIEW IF NOT EXISTS blocked_issues AS
SELECT
i.*,
COUNT(d.depends_on_id) as blocked_by_count
FROM issues i
JOIN dependencies d ON i.id = d.issue_id
JOIN issues blocker ON d.depends_on_id = blocker.id
WHERE i.status IN ('open', 'in_progress', 'blocked', 'deferred')
AND d.type = 'blocks'
AND blocker.status IN ('open', 'in_progress', 'blocked', 'deferred')
GROUP BY i.id
`)
if err != nil {
return fmt.Errorf("failed to recreate blocked_issues view: %w", err)
}
return nil
}

View File

@@ -330,9 +330,23 @@ func (s *SQLiteStorage) upsertIssueInTx(ctx context.Context, tx *sql.Tx, issue *
}
if existingHash != issue.ContentHash {
// Pinned field fix (bd-phtv): Use COALESCE(NULLIF(?, 0), pinned) to preserve
// existing pinned=1 when incoming pinned=0 (which means field was absent in
// JSONL due to omitempty). This prevents auto-import from resetting pinned issues.
// Clone-local field protection pattern (bd-phtv, bd-gr4q):
//
// Some fields are clone-local state that shouldn't be overwritten by JSONL import:
// - pinned: Local hook attachment (not synced between clones)
// - await_type, await_id, timeout_ns, waiters: Gate state (wisps, never exported)
//
// Problem: Go's omitempty causes zero values to be absent from JSONL.
// When importing, absent fields unmarshal as zero, which would overwrite local state.
//
// Solution: COALESCE(NULLIF(incoming, zero_value), existing_column)
// - For strings: COALESCE(NULLIF(?, ''), column) -- preserve if incoming is ""
// - For integers: COALESCE(NULLIF(?, 0), column) -- preserve if incoming is 0
//
// When to use this pattern:
// 1. Field is clone-local (not part of shared issue ledger)
// 2. Field uses omitempty (so zero value means "absent", not "clear")
// 3. Accidental clearing would cause data loss or incorrect behavior
_, err = tx.ExecContext(ctx, `
UPDATE issues SET
content_hash = ?, title = ?, description = ?, design = ?,
@@ -341,7 +355,10 @@ func (s *SQLiteStorage) upsertIssueInTx(ctx context.Context, tx *sql.Tx, issue *
updated_at = ?, closed_at = ?, external_ref = ?, source_repo = ?,
deleted_at = ?, deleted_by = ?, delete_reason = ?, original_type = ?,
sender = ?, ephemeral = ?, pinned = COALESCE(NULLIF(?, 0), pinned), is_template = ?,
await_type = ?, await_id = ?, timeout_ns = ?, waiters = ?
await_type = COALESCE(NULLIF(?, ''), await_type),
await_id = COALESCE(NULLIF(?, ''), await_id),
timeout_ns = COALESCE(NULLIF(?, 0), timeout_ns),
waiters = COALESCE(NULLIF(?, ''), waiters)
WHERE id = ?
`,
issue.ContentHash, issue.Title, issue.Description, issue.Design,

View File

@@ -892,3 +892,108 @@ func TestExportToMultiRepo(t *testing.T) {
}
})
}
// TestUpsertPreservesGateFields tests that gate await fields are preserved during upsert (bd-gr4q).
// Gates are wisps and aren't exported to JSONL. When an issue with the same ID is imported,
// the await fields should NOT be cleared.
func TestUpsertPreservesGateFields(t *testing.T) {
store, cleanup := setupTestDB(t)
defer cleanup()
ctx := context.Background()
// Create a gate with await fields directly in the database
gate := &types.Issue{
ID: "bd-gate1",
Title: "Test Gate",
Status: types.StatusOpen,
Priority: 1,
IssueType: types.TypeGate,
Wisp: true,
AwaitType: "gh:run",
AwaitID: "123456789",
Timeout: 30 * 60 * 1000000000, // 30 minutes in nanoseconds
Waiters: []string{"beads/dave"},
CreatedAt: time.Now(),
UpdatedAt: time.Now(),
}
gate.ContentHash = gate.ComputeContentHash()
if err := store.CreateIssue(ctx, gate, "test"); err != nil {
t.Fatalf("failed to create gate: %v", err)
}
// Verify gate was created with await fields
retrieved, err := store.GetIssue(ctx, gate.ID)
if err != nil || retrieved == nil {
t.Fatalf("failed to get gate: %v", err)
}
if retrieved.AwaitType != "gh:run" {
t.Errorf("expected AwaitType=gh:run, got %q", retrieved.AwaitType)
}
if retrieved.AwaitID != "123456789" {
t.Errorf("expected AwaitID=123456789, got %q", retrieved.AwaitID)
}
// Create a JSONL file with an issue that has the same ID but no await fields
// (simulating what happens when a non-gate issue is imported)
tmpDir := t.TempDir()
jsonlPath := filepath.Join(tmpDir, "issues.jsonl")
f, err := os.Create(jsonlPath)
if err != nil {
t.Fatalf("failed to create JSONL file: %v", err)
}
// Same ID, different content (to trigger update), no await fields
incomingIssue := types.Issue{
ID: "bd-gate1",
Title: "Test Gate Updated", // Different title to trigger update
Status: types.StatusOpen,
Priority: 1,
IssueType: types.TypeGate,
AwaitType: "", // Empty - simulating JSONL without await fields
AwaitID: "", // Empty
Timeout: 0,
Waiters: nil,
CreatedAt: time.Now(),
UpdatedAt: time.Now().Add(time.Second), // Newer timestamp
}
incomingIssue.ContentHash = incomingIssue.ComputeContentHash()
enc := json.NewEncoder(f)
if err := enc.Encode(incomingIssue); err != nil {
t.Fatalf("failed to encode issue: %v", err)
}
f.Close()
// Import the JSONL file (this should NOT clear the await fields)
_, err = store.importJSONLFile(ctx, jsonlPath, "test")
if err != nil {
t.Fatalf("importJSONLFile failed: %v", err)
}
// Verify await fields are preserved
updated, err := store.GetIssue(ctx, gate.ID)
if err != nil || updated == nil {
t.Fatalf("failed to get updated gate: %v", err)
}
// Title should be updated
if updated.Title != "Test Gate Updated" {
t.Errorf("expected title to be updated, got %q", updated.Title)
}
// Await fields should be PRESERVED (not cleared)
if updated.AwaitType != "gh:run" {
t.Errorf("AwaitType was cleared! expected 'gh:run', got %q", updated.AwaitType)
}
if updated.AwaitID != "123456789" {
t.Errorf("AwaitID was cleared! expected '123456789', got %q", updated.AwaitID)
}
if updated.Timeout != 30*60*1000000000 {
t.Errorf("Timeout was cleared! expected %d, got %d", 30*60*1000000000, updated.Timeout)
}
if len(updated.Waiters) != 1 || updated.Waiters[0] != "beads/dave" {
t.Errorf("Waiters was cleared! expected [beads/dave], got %v", updated.Waiters)
}
}

View File

@@ -86,6 +86,25 @@ func (s *SQLiteStorage) GetReadyWork(ctx context.Context, filter types.WorkFilte
}
}
// Parent filtering: filter to all descendants of a root issue (epic/molecule)
// Uses recursive CTE to find all descendants via parent-child dependencies
if filter.ParentID != nil {
whereClauses = append(whereClauses, `
i.id IN (
WITH RECURSIVE descendants AS (
SELECT issue_id FROM dependencies
WHERE type = 'parent-child' AND depends_on_id = ?
UNION ALL
SELECT d.issue_id FROM dependencies d
JOIN descendants dt ON d.depends_on_id = dt.issue_id
WHERE d.type = 'parent-child'
)
SELECT issue_id FROM descendants
)
`)
args = append(args, *filter.ParentID)
}
// Build WHERE clause properly
whereSQL := strings.Join(whereClauses, " AND ")
@@ -413,7 +432,7 @@ func (s *SQLiteStorage) GetStaleIssues(ctx context.Context, filter types.StaleFi
// GetBlockedIssues returns issues that are blocked by dependencies or have status=blocked
// Note: Pinned issues are excluded from the output (beads-ei4)
// Note: Includes external: references in blocked_by list (bd-om4a)
func (s *SQLiteStorage) GetBlockedIssues(ctx context.Context) ([]*types.BlockedIssue, error) {
func (s *SQLiteStorage) GetBlockedIssues(ctx context.Context, filter types.WorkFilter) ([]*types.BlockedIssue, error) {
// Use UNION to combine:
// 1. Issues with open/in_progress/blocked status that have dependency blockers
// 2. Issues with status=blocked (even if they have no dependency blockers)
@@ -423,7 +442,37 @@ func (s *SQLiteStorage) GetBlockedIssues(ctx context.Context) ([]*types.BlockedI
// For blocked_by_count and blocker_ids:
// - Count local blockers (open issues) + external refs (external:*)
// - External refs are always considered "open" until resolved (bd-om4a)
rows, err := s.db.QueryContext(ctx, `
// Build additional WHERE clauses for filtering
var filterClauses []string
var args []any
// Parent filtering: filter to all descendants of a root issue (epic/molecule)
if filter.ParentID != nil {
filterClauses = append(filterClauses, `
i.id IN (
WITH RECURSIVE descendants AS (
SELECT issue_id FROM dependencies
WHERE type = 'parent-child' AND depends_on_id = ?
UNION ALL
SELECT d.issue_id FROM dependencies d
JOIN descendants dt ON d.depends_on_id = dt.issue_id
WHERE d.type = 'parent-child'
)
SELECT issue_id FROM descendants
)
`)
args = append(args, *filter.ParentID)
}
// Build filter clause SQL
filterSQL := ""
if len(filterClauses) > 0 {
filterSQL = " AND " + strings.Join(filterClauses, " AND ")
}
// nolint:gosec // G201: filterSQL contains only parameterized WHERE clauses with ? placeholders, not user input
query := fmt.Sprintf(`
SELECT
i.id, i.title, i.description, i.design, i.acceptance_criteria, i.notes,
i.status, i.priority, i.issue_type, i.assignee, i.estimated_minutes,
@@ -441,7 +490,7 @@ func (s *SQLiteStorage) GetBlockedIssues(ctx context.Context) ([]*types.BlockedI
AND blocker.status IN ('open', 'in_progress', 'blocked', 'deferred')
)
-- External refs: always included (resolution happens at query time)
OR d.depends_on_id LIKE 'external:%'
OR d.depends_on_id LIKE 'external:%%'
)
WHERE i.status IN ('open', 'in_progress', 'blocked', 'deferred')
AND i.pinned = 0
@@ -461,12 +510,14 @@ func (s *SQLiteStorage) GetBlockedIssues(ctx context.Context) ([]*types.BlockedI
SELECT 1 FROM dependencies d3
WHERE d3.issue_id = i.id
AND d3.type = 'blocks'
AND d3.depends_on_id LIKE 'external:%'
AND d3.depends_on_id LIKE 'external:%%'
)
)
%s
GROUP BY i.id
ORDER BY i.priority ASC
`)
`, filterSQL)
rows, err := s.db.QueryContext(ctx, query, args...)
if err != nil {
return nil, fmt.Errorf("failed to get blocked issues: %w", err)
}
@@ -596,6 +647,49 @@ func filterBlockedByExternalDeps(ctx context.Context, blocked []*types.BlockedIs
return result
}
// GetNewlyUnblockedByClose returns issues that became unblocked when the given issue was closed.
// This is used by the --suggest-next flag on bd close to show what work is now available.
// An issue is "newly unblocked" if:
// - It had a 'blocks' dependency on the closed issue
// - It is now unblocked (not in blocked_issues_cache)
// - It has status open or in_progress (ready to work on)
//
// The cache is already rebuilt by CloseIssue before this is called, so we just need to
// find dependents that are no longer blocked.
func (s *SQLiteStorage) GetNewlyUnblockedByClose(ctx context.Context, closedIssueID string) ([]*types.Issue, error) {
// Find issues that:
// 1. Had a 'blocks' dependency on the closed issue
// 2. Are now NOT in blocked_issues_cache (unblocked)
// 3. Have status open or in_progress
// 4. Are not pinned
query := `
SELECT i.id, i.content_hash, i.title, i.description, i.design, i.acceptance_criteria, i.notes,
i.status, i.priority, i.issue_type, i.assignee, i.estimated_minutes,
i.created_at, i.updated_at, i.closed_at, i.external_ref, i.source_repo, i.close_reason,
i.deleted_at, i.deleted_by, i.delete_reason, i.original_type,
i.sender, i.ephemeral, i.pinned, i.is_template,
i.await_type, i.await_id, i.timeout_ns, i.waiters
FROM issues i
JOIN dependencies d ON i.id = d.issue_id
WHERE d.depends_on_id = ?
AND d.type = 'blocks'
AND i.status IN ('open', 'in_progress')
AND i.pinned = 0
AND NOT EXISTS (
SELECT 1 FROM blocked_issues_cache WHERE issue_id = i.id
)
ORDER BY i.priority ASC
`
rows, err := s.db.QueryContext(ctx, query, closedIssueID)
if err != nil {
return nil, fmt.Errorf("failed to get newly unblocked issues: %w", err)
}
defer func() { _ = rows.Close() }()
return s.scanIssues(ctx, rows)
}
// buildOrderByClause generates the ORDER BY clause based on sort policy
func buildOrderByClause(policy types.SortPolicy) string {
switch policy {

View File

@@ -182,7 +182,7 @@ func TestGetBlockedIssues(t *testing.T) {
store.AddDependency(ctx, &types.Dependency{IssueID: issue3.ID, DependsOnID: issue2.ID, Type: types.DepBlocks}, "test-user")
// Get blocked issues
blocked, err := store.GetBlockedIssues(ctx)
blocked, err := store.GetBlockedIssues(ctx, types.WorkFilter{})
if err != nil {
t.Fatalf("GetBlockedIssues failed: %v", err)
}
@@ -1215,7 +1215,7 @@ func TestGetBlockedIssuesFiltersExternalDeps(t *testing.T) {
}
// Test 1: External dep not satisfied - issue should appear as blocked
blocked, err := mainStore.GetBlockedIssues(ctx)
blocked, err := mainStore.GetBlockedIssues(ctx, types.WorkFilter{})
if err != nil {
t.Fatalf("GetBlockedIssues failed: %v", err)
}
@@ -1260,7 +1260,7 @@ func TestGetBlockedIssuesFiltersExternalDeps(t *testing.T) {
}
// Now GetBlockedIssues should NOT show the issue (external dep satisfied)
blocked, err = mainStore.GetBlockedIssues(ctx)
blocked, err = mainStore.GetBlockedIssues(ctx, types.WorkFilter{})
if err != nil {
t.Fatalf("GetBlockedIssues failed after shipping: %v", err)
}
@@ -1379,7 +1379,7 @@ func TestGetBlockedIssuesPartialExternalDeps(t *testing.T) {
externalStore.Close()
// Issue should still be blocked (cap2 not satisfied)
blocked, err := mainStore.GetBlockedIssues(ctx)
blocked, err := mainStore.GetBlockedIssues(ctx, types.WorkFilter{})
if err != nil {
t.Fatalf("GetBlockedIssues failed: %v", err)
}
@@ -1512,3 +1512,212 @@ func TestCheckExternalDepInvalidFormats(t *testing.T) {
})
}
}
// TestGetNewlyUnblockedByClose tests the --suggest-next functionality (GH#679)
func TestGetNewlyUnblockedByClose(t *testing.T) {
env := newTestEnv(t)
// Create a blocker issue
blocker := env.CreateIssueWith("Blocker", types.StatusOpen, 1, types.TypeTask)
// Create two issues blocked by the blocker
blocked1 := env.CreateIssueWith("Blocked 1", types.StatusOpen, 2, types.TypeTask)
blocked2 := env.CreateIssueWith("Blocked 2", types.StatusOpen, 3, types.TypeTask)
// Create one issue blocked by multiple issues (blocker + another)
otherBlocker := env.CreateIssueWith("Other Blocker", types.StatusOpen, 1, types.TypeTask)
multiBlocked := env.CreateIssueWith("Multi Blocked", types.StatusOpen, 2, types.TypeTask)
// Add dependencies (issue depends on blocker)
env.AddDep(blocked1, blocker)
env.AddDep(blocked2, blocker)
env.AddDep(multiBlocked, blocker)
env.AddDep(multiBlocked, otherBlocker)
// Close the blocker
env.Close(blocker, "Done")
// Get newly unblocked issues
ctx := context.Background()
unblocked, err := env.Store.GetNewlyUnblockedByClose(ctx, blocker.ID)
if err != nil {
t.Fatalf("GetNewlyUnblockedByClose failed: %v", err)
}
// Should return blocked1 and blocked2 (but not multiBlocked, which is still blocked by otherBlocker)
if len(unblocked) != 2 {
t.Errorf("Expected 2 unblocked issues, got %d", len(unblocked))
}
// Check that the right issues are unblocked
unblockedIDs := make(map[string]bool)
for _, issue := range unblocked {
unblockedIDs[issue.ID] = true
}
if !unblockedIDs[blocked1.ID] {
t.Errorf("Expected %s to be unblocked", blocked1.ID)
}
if !unblockedIDs[blocked2.ID] {
t.Errorf("Expected %s to be unblocked", blocked2.ID)
}
if unblockedIDs[multiBlocked.ID] {
t.Errorf("Expected %s to still be blocked (has another blocker)", multiBlocked.ID)
}
}
// TestParentIDFilterDescendants tests that ParentID filter returns all descendants of an epic
func TestParentIDFilterDescendants(t *testing.T) {
env := newTestEnv(t)
// Create hierarchy:
// epic1 (root)
// ├── task1 (child of epic1)
// ├── task2 (child of epic1)
// └── epic2 (child of epic1)
// └── task3 (grandchild of epic1)
// task4 (unrelated, should not appear in results)
epic1 := env.CreateEpic("Epic 1")
task1 := env.CreateIssue("Task 1")
task2 := env.CreateIssue("Task 2")
epic2 := env.CreateEpic("Epic 2")
task3 := env.CreateIssue("Task 3")
task4 := env.CreateIssue("Task 4 - unrelated")
env.AddParentChild(task1, epic1)
env.AddParentChild(task2, epic1)
env.AddParentChild(epic2, epic1)
env.AddParentChild(task3, epic2)
// Query with ParentID = epic1
parentID := epic1.ID
ready := env.GetReadyWork(types.WorkFilter{ParentID: &parentID})
// Should include task1, task2, epic2, task3 (all descendants of epic1)
// Should NOT include epic1 itself or task4
if len(ready) != 4 {
t.Fatalf("Expected 4 ready issues in parent scope, got %d", len(ready))
}
// Verify the returned issues are the expected ones
readyIDs := make(map[string]bool)
for _, issue := range ready {
readyIDs[issue.ID] = true
}
if !readyIDs[task1.ID] {
t.Errorf("Expected task1 to be in results")
}
if !readyIDs[task2.ID] {
t.Errorf("Expected task2 to be in results")
}
if !readyIDs[epic2.ID] {
t.Errorf("Expected epic2 to be in results")
}
if !readyIDs[task3.ID] {
t.Errorf("Expected task3 to be in results")
}
if readyIDs[epic1.ID] {
t.Errorf("Expected epic1 (root) to NOT be in results")
}
if readyIDs[task4.ID] {
t.Errorf("Expected task4 (unrelated) to NOT be in results")
}
}
// TestParentIDWithOtherFilters tests that ParentID can be combined with other filters
func TestParentIDWithOtherFilters(t *testing.T) {
env := newTestEnv(t)
// Create hierarchy:
// epic1 (root)
// ├── task1 (priority 0)
// ├── task2 (priority 1)
// └── task3 (priority 2)
epic1 := env.CreateEpic("Epic 1")
task1 := env.CreateIssueWith("Task 1 - P0", types.StatusOpen, 0, types.TypeTask)
task2 := env.CreateIssueWith("Task 2 - P1", types.StatusOpen, 1, types.TypeTask)
task3 := env.CreateIssueWith("Task 3 - P2", types.StatusOpen, 2, types.TypeTask)
env.AddParentChild(task1, epic1)
env.AddParentChild(task2, epic1)
env.AddParentChild(task3, epic1)
// Query with ParentID = epic1 AND priority = 1
parentID := epic1.ID
priority := 1
ready := env.GetReadyWork(types.WorkFilter{ParentID: &parentID, Priority: &priority})
// Should only include task2 (parent + priority 1)
if len(ready) != 1 {
t.Fatalf("Expected 1 issue with parent + priority filter, got %d", len(ready))
}
if ready[0].ID != task2.ID {
t.Errorf("Expected task2, got %s", ready[0].ID)
}
}
// TestParentIDWithBlockedDescendants tests that blocked descendants are excluded
func TestParentIDWithBlockedDescendants(t *testing.T) {
env := newTestEnv(t)
// Create hierarchy:
// epic1 (root)
// ├── task1 (ready)
// ├── task2 (blocked by blocker)
// └── task3 (ready)
// blocker (unrelated)
epic1 := env.CreateEpic("Epic 1")
task1 := env.CreateIssue("Task 1 - ready")
task2 := env.CreateIssue("Task 2 - blocked")
task3 := env.CreateIssue("Task 3 - ready")
blocker := env.CreateIssue("Blocker")
env.AddParentChild(task1, epic1)
env.AddParentChild(task2, epic1)
env.AddParentChild(task3, epic1)
env.AddDep(task2, blocker) // task2 is blocked
// Query with ParentID = epic1
parentID := epic1.ID
ready := env.GetReadyWork(types.WorkFilter{ParentID: &parentID})
// Should include task1, task3 (ready descendants)
// Should NOT include task2 (blocked)
if len(ready) != 2 {
t.Fatalf("Expected 2 ready descendants, got %d", len(ready))
}
readyIDs := make(map[string]bool)
for _, issue := range ready {
readyIDs[issue.ID] = true
}
if !readyIDs[task1.ID] {
t.Errorf("Expected task1 to be ready")
}
if !readyIDs[task3.ID] {
t.Errorf("Expected task3 to be ready")
}
if readyIDs[task2.ID] {
t.Errorf("Expected task2 to be blocked")
}
}
// TestParentIDEmptyParent tests that empty parent returns nothing
func TestParentIDEmptyParent(t *testing.T) {
env := newTestEnv(t)
// Create an epic with no children
epic1 := env.CreateEpic("Epic 1 - no children")
env.CreateIssue("Unrelated task")
// Query with ParentID = epic1 (which has no children)
parentID := epic1.ID
ready := env.GetReadyWork(types.WorkFilter{ParentID: &parentID})
// Should return empty since epic1 has no descendants
if len(ready) != 0 {
t.Fatalf("Expected 0 ready issues for empty parent, got %d", len(ready))
}
}

View File

@@ -36,7 +36,12 @@ CREATE TABLE IF NOT EXISTS issues (
is_template INTEGER DEFAULT 0,
-- NOTE: replies_to, relates_to, duplicate_of, superseded_by removed per Decision 004
-- These relationships are now stored in the dependencies table
CHECK ((status = 'closed') = (closed_at IS NOT NULL))
-- closed_at constraint: closed issues must have it, tombstones may retain it from before deletion
CHECK (
(status = 'closed' AND closed_at IS NOT NULL) OR
(status = 'tombstone') OR
(status NOT IN ('closed', 'tombstone') AND closed_at IS NULL)
)
);
CREATE INDEX IF NOT EXISTS idx_issues_status ON issues(status);