Merge PR #160: Add --reverse flag for discovery tree visualization

Co-authored-by: David Laing <david@davidlaing.com>
This commit is contained in:
Steve Yegge
2025-10-27 13:08:43 -07:00
6 changed files with 215 additions and 48 deletions
+2
View File
@@ -85,6 +85,8 @@
{"id":"bd-175","title":"Add test coverage for internal/storage/memory backend","description":"","design":"Create internal/storage/memory/memory_test.go with test coverage similar to internal/storage/sqlite tests.\n\nTest areas:\n1. Basic CRUD: CreateIssue, GetIssue, UpdateIssue, DeleteIssue\n2. Bulk operations: CreateIssues, ListIssues with filters\n3. Dependencies: AddDependency, GetDependencies, RemoveDependency\n4. Labels: AddLabel, RemoveLabel, ListLabels\n5. Comments: AddComment, GetComments\n6. ID generation: Prefix handling, counter management\n7. LoadFromIssues: Proper initialization from JSONL data\n8. Thread safety: Concurrent operations with go test -race","status":"closed","priority":1,"issue_type":"task","created_at":"2025-10-27T10:45:33.145874-07:00","updated_at":"2025-10-27T11:26:02.515421-07:00","closed_at":"2025-10-27T11:26:02.515421-07:00"} {"id":"bd-175","title":"Add test coverage for internal/storage/memory backend","description":"","design":"Create internal/storage/memory/memory_test.go with test coverage similar to internal/storage/sqlite tests.\n\nTest areas:\n1. Basic CRUD: CreateIssue, GetIssue, UpdateIssue, DeleteIssue\n2. Bulk operations: CreateIssues, ListIssues with filters\n3. Dependencies: AddDependency, GetDependencies, RemoveDependency\n4. Labels: AddLabel, RemoveLabel, ListLabels\n5. Comments: AddComment, GetComments\n6. ID generation: Prefix handling, counter management\n7. LoadFromIssues: Proper initialization from JSONL data\n8. Thread safety: Concurrent operations with go test -race","status":"closed","priority":1,"issue_type":"task","created_at":"2025-10-27T10:45:33.145874-07:00","updated_at":"2025-10-27T11:26:02.515421-07:00","closed_at":"2025-10-27T11:26:02.515421-07:00"}
{"id":"bd-176","title":"Document distinction between corruption prevention and collision resolution","description":"Clarify that the hash/fingerprint/collision architecture solves logical consistency (wrong prefixes, ID collisions) but NOT physical SQLite corruption. --no-db mode is still needed for multi-process/container scenarios.","status":"closed","priority":2,"issue_type":"task","created_at":"2025-10-27T10:45:46.872233-07:00","updated_at":"2025-10-27T11:27:15.6189-07:00","closed_at":"2025-10-27T11:27:15.6189-07:00"} {"id":"bd-176","title":"Document distinction between corruption prevention and collision resolution","description":"Clarify that the hash/fingerprint/collision architecture solves logical consistency (wrong prefixes, ID collisions) but NOT physical SQLite corruption. --no-db mode is still needed for multi-process/container scenarios.","status":"closed","priority":2,"issue_type":"task","created_at":"2025-10-27T10:45:46.872233-07:00","updated_at":"2025-10-27T11:27:15.6189-07:00","closed_at":"2025-10-27T11:27:15.6189-07:00"}
{"id":"bd-177","title":"Add prefix validation in SQLite mode to fail fast on mismatches","description":"The new hash/collision architecture prevents logical consistency issues, but doesn't prevent wrong-prefix bugs. Add validation to reject writes with mismatched prefixes.","status":"closed","priority":2,"issue_type":"feature","created_at":"2025-10-27T10:45:46.87772-07:00","updated_at":"2025-10-27T11:28:52.800581-07:00","closed_at":"2025-10-27T11:28:52.800581-07:00"} {"id":"bd-177","title":"Add prefix validation in SQLite mode to fail fast on mismatches","description":"The new hash/collision architecture prevents logical consistency issues, but doesn't prevent wrong-prefix bugs. Add validation to reject writes with mismatched prefixes.","status":"closed","priority":2,"issue_type":"feature","created_at":"2025-10-27T10:45:46.87772-07:00","updated_at":"2025-10-27T11:28:52.800581-07:00","closed_at":"2025-10-27T11:28:52.800581-07:00"}
{"id":"bd-178","title":"Address gosec security warnings (102 issues)","description":"Security linter warnings: file permissions (0755 should be 0750), G304 file inclusion via variable, G204 subprocess launches. Many are false positives but should be reviewed.","design":"Review each gosec warning. Add exclusions for legitimate cases to .golangci.yml. Fix real security issues (overly permissive file modes).","status":"open","priority":2,"issue_type":"task","created_at":"2025-10-27T12:51:52.033528-07:00","updated_at":"2025-10-27T12:51:52.033528-07:00"}
{"id":"bd-179","title":"Add optional post-merge git hook example for bd sync","description":"Create example git hook that auto-runs bd sync after git pull/merge.\n\nAdd to examples/git-hooks/:\n- post-merge hook that checks if .beads/issues.jsonl changed\n- If changed: run `bd sync` automatically\n- Make it optional/documented (not auto-installed)\n\nBenefits:\n- Zero-friction sync after git pull\n- Complements auto-detection as belt-and-suspenders\n\nNote: post-merge hook already exists for pre-commit/post-merge. Extend it to support sync.","status":"open","priority":2,"issue_type":"task","created_at":"2025-10-27T12:51:52.034442-07:00","updated_at":"2025-10-27T12:51:52.034442-07:00"}
{"id":"bd-18","title":"Consider adding UnderlyingConn(ctx) for safer scoped DB access","description":"Currently UnderlyingDB() returns *sql.DB which is correct for most uses, but for extension migrations/DDL, a scoped connection might be safer.\n\n**Proposal:** Add optional UnderlyingConn(ctx) (*sql.Conn, error) method that:\n- Returns a scoped connection via s.db.Conn(ctx)\n- Encourages lifetime-bounded usage\n- Reduces temptation to tune global pool settings\n- Better for one-time DDL operations like CREATE TABLE\n\n**Implementation:**\n```go\n// UnderlyingConn returns a single connection from the pool for scoped use\n// Useful for migrations and DDL. Close the connection when done.\nfunc (s *SQLiteStorage) UnderlyingConn(ctx context.Context) (*sql.Conn, error) {\n return s.db.Conn(ctx)\n}\n```\n\n**Benefits:**\n- Safer for migrations (explicit scope)\n- Complements UnderlyingDB() for different use cases\n- Low implementation cost\n\n**Trade-off:** Adds another method to maintain, but Oracle considers this balanced compromise between safety and flexibility.\n\n**Decision:** This is optional - evaluate based on VC's actual usage patterns.","status":"closed","priority":3,"issue_type":"feature","created_at":"2025-10-22T17:07:56.832638-07:00","updated_at":"2025-10-25T23:15:33.479496-07:00","closed_at":"2025-10-22T22:02:18.479512-07:00","dependencies":[{"issue_id":"bd-18","depends_on_id":"bd-10","type":"related","created_at":"2025-10-24T13:17:40.325463-07:00","created_by":"renumber"}]} {"id":"bd-18","title":"Consider adding UnderlyingConn(ctx) for safer scoped DB access","description":"Currently UnderlyingDB() returns *sql.DB which is correct for most uses, but for extension migrations/DDL, a scoped connection might be safer.\n\n**Proposal:** Add optional UnderlyingConn(ctx) (*sql.Conn, error) method that:\n- Returns a scoped connection via s.db.Conn(ctx)\n- Encourages lifetime-bounded usage\n- Reduces temptation to tune global pool settings\n- Better for one-time DDL operations like CREATE TABLE\n\n**Implementation:**\n```go\n// UnderlyingConn returns a single connection from the pool for scoped use\n// Useful for migrations and DDL. Close the connection when done.\nfunc (s *SQLiteStorage) UnderlyingConn(ctx context.Context) (*sql.Conn, error) {\n return s.db.Conn(ctx)\n}\n```\n\n**Benefits:**\n- Safer for migrations (explicit scope)\n- Complements UnderlyingDB() for different use cases\n- Low implementation cost\n\n**Trade-off:** Adds another method to maintain, but Oracle considers this balanced compromise between safety and flexibility.\n\n**Decision:** This is optional - evaluate based on VC's actual usage patterns.","status":"closed","priority":3,"issue_type":"feature","created_at":"2025-10-22T17:07:56.832638-07:00","updated_at":"2025-10-25T23:15:33.479496-07:00","closed_at":"2025-10-22T22:02:18.479512-07:00","dependencies":[{"issue_id":"bd-18","depends_on_id":"bd-10","type":"related","created_at":"2025-10-24T13:17:40.325463-07:00","created_by":"renumber"}]}
{"id":"bd-19","title":"MCP close tool method signature error - takes 1 positional argument but 2 were given","description":"The close approval routing fix in beads-mcp v0.11.0 works correctly and successfully routes update(status=\"closed\") calls to close() tool. However, the close() tool has a Python method signature bug that prevents execution.\n\nImpact: All MCP-based close operations are broken. Workaround: Use bd CLI directly.\n\nError: BdDaemonClient.close() takes 1 positional argument but 2 were given\n\nRoot cause: BdDaemonClient.close() only accepts self, but MCP tool passes issue_id and reason.\n\nAdditional issue: CLI close has FOREIGN KEY constraint error when recording reason parameter.\n\nSee GitHub issue #107 for full details.","status":"closed","priority":0,"issue_type":"bug","created_at":"2025-10-22T17:25:34.67056-07:00","updated_at":"2025-10-25T23:15:33.480292-07:00","closed_at":"2025-10-22T17:36:55.463445-07:00"} {"id":"bd-19","title":"MCP close tool method signature error - takes 1 positional argument but 2 were given","description":"The close approval routing fix in beads-mcp v0.11.0 works correctly and successfully routes update(status=\"closed\") calls to close() tool. However, the close() tool has a Python method signature bug that prevents execution.\n\nImpact: All MCP-based close operations are broken. Workaround: Use bd CLI directly.\n\nError: BdDaemonClient.close() takes 1 positional argument but 2 were given\n\nRoot cause: BdDaemonClient.close() only accepts self, but MCP tool passes issue_id and reason.\n\nAdditional issue: CLI close has FOREIGN KEY constraint error when recording reason parameter.\n\nSee GitHub issue #107 for full details.","status":"closed","priority":0,"issue_type":"bug","created_at":"2025-10-22T17:25:34.67056-07:00","updated_at":"2025-10-25T23:15:33.480292-07:00","closed_at":"2025-10-22T17:36:55.463445-07:00"}
{"id":"bd-2","title":"Improve error handling in dependency removal during remapping","description":"In updateDependencyReferences(), RemoveDependency errors are caught and ignored with continue (line 392). Comment says 'if dependency doesn't exist' but this catches ALL errors including real failures. Should check error type with errors.Is(err, ErrDependencyNotFound) and only ignore not-found errors, returning other errors properly.","status":"closed","priority":3,"issue_type":"bug","created_at":"2025-10-21T23:53:44.31362-07:00","updated_at":"2025-10-25T23:15:33.462194-07:00","closed_at":"2025-10-18T09:41:18.209717-07:00"} {"id":"bd-2","title":"Improve error handling in dependency removal during remapping","description":"In updateDependencyReferences(), RemoveDependency errors are caught and ignored with continue (line 392). Comment says 'if dependency doesn't exist' but this catches ALL errors including real failures. Should check error type with errors.Is(err, ErrDependencyNotFound) and only ignore not-found errors, returning other errors properly.","status":"closed","priority":3,"issue_type":"bug","created_at":"2025-10-21T23:53:44.31362-07:00","updated_at":"2025-10-25T23:15:33.462194-07:00","closed_at":"2025-10-18T09:41:18.209717-07:00"}
+13 -3
View File
@@ -179,6 +179,7 @@ var depTreeCmd = &cobra.Command{
showAllPaths, _ := cmd.Flags().GetBool("show-all-paths") showAllPaths, _ := cmd.Flags().GetBool("show-all-paths")
maxDepth, _ := cmd.Flags().GetInt("max-depth") maxDepth, _ := cmd.Flags().GetInt("max-depth")
reverse, _ := cmd.Flags().GetBool("reverse")
if maxDepth < 1 { if maxDepth < 1 {
fmt.Fprintf(os.Stderr, "Error: --max-depth must be >= 1\n") fmt.Fprintf(os.Stderr, "Error: --max-depth must be >= 1\n")
@@ -186,7 +187,7 @@ var depTreeCmd = &cobra.Command{
} }
ctx := context.Background() ctx := context.Background()
tree, err := store.GetDependencyTree(ctx, args[0], maxDepth, showAllPaths) tree, err := store.GetDependencyTree(ctx, args[0], maxDepth, showAllPaths, reverse)
if err != nil { if err != nil {
fmt.Fprintf(os.Stderr, "Error: %v\n", err) fmt.Fprintf(os.Stderr, "Error: %v\n", err)
os.Exit(1) os.Exit(1)
@@ -202,12 +203,20 @@ var depTreeCmd = &cobra.Command{
} }
if len(tree) == 0 { if len(tree) == 0 {
fmt.Printf("\n%s has no dependencies\n", args[0]) if reverse {
fmt.Printf("\n%s has no dependents\n", args[0])
} else {
fmt.Printf("\n%s has no dependencies\n", args[0])
}
return return
} }
cyan := color.New(color.FgCyan).SprintFunc() cyan := color.New(color.FgCyan).SprintFunc()
fmt.Printf("\n%s Dependency tree for %s:\n\n", cyan("🌲"), args[0]) if reverse {
fmt.Printf("\n%s Dependent tree for %s:\n\n", cyan("🌲"), args[0])
} else {
fmt.Printf("\n%s Dependency tree for %s:\n\n", cyan("🌲"), args[0])
}
hasTruncation := false hasTruncation := false
for _, node := range tree { for _, node := range tree {
@@ -286,6 +295,7 @@ func init() {
depAddCmd.Flags().StringP("type", "t", "blocks", "Dependency type (blocks|related|parent-child|discovered-from)") depAddCmd.Flags().StringP("type", "t", "blocks", "Dependency type (blocks|related|parent-child|discovered-from)")
depTreeCmd.Flags().Bool("show-all-paths", false, "Show all paths to nodes (no deduplication for diamond dependencies)") depTreeCmd.Flags().Bool("show-all-paths", false, "Show all paths to nodes (no deduplication for diamond dependencies)")
depTreeCmd.Flags().IntP("max-depth", "d", 50, "Maximum tree depth to display (safety limit)") depTreeCmd.Flags().IntP("max-depth", "d", 50, "Maximum tree depth to display (safety limit)")
depTreeCmd.Flags().Bool("reverse", false, "Show dependent tree (what was discovered from this) instead of dependency tree (what blocks this)")
depCmd.AddCommand(depAddCmd) depCmd.AddCommand(depAddCmd)
depCmd.AddCommand(depRemoveCmd) depCmd.AddCommand(depRemoveCmd)
depCmd.AddCommand(depTreeCmd) depCmd.AddCommand(depTreeCmd)
+30 -1
View File
@@ -21,6 +21,11 @@ Manage dependencies between beads issues.
- **tree**: Show dependency tree for an issue - **tree**: Show dependency tree for an issue
- $1: "tree" - $1: "tree"
- $2: Issue ID - $2: Issue ID
- Flags:
- `--reverse`: Show dependent tree (what was discovered from this) instead of dependency tree (what blocks this)
- `--json`: Output as JSON
- `--max-depth N`: Limit tree depth (default: 50)
- `--show-all-paths`: Show all paths (no deduplication for diamond dependencies)
- **cycles**: Detect dependency cycles - **cycles**: Detect dependency cycles
@@ -34,5 +39,29 @@ Manage dependencies between beads issues.
## Examples ## Examples
- `bd dep add bd-10 bd-20 --type blocks`: bd-10 blocks bd-20 - `bd dep add bd-10 bd-20 --type blocks`: bd-10 blocks bd-20
- `bd dep tree bd-20`: Show what blocks bd-20 and what bd-20 blocks - `bd dep tree bd-20`: Show what blocks bd-20 (dependency tree going UP)
- `bd dep tree bd-1 --reverse`: Show what was discovered from bd-1 (dependent tree going DOWN)
- `bd dep tree bd-1 --reverse --max-depth 3`: Show discovery tree with depth limit
- `bd dep cycles`: Check for circular dependencies - `bd dep cycles`: Check for circular dependencies
## Reverse Mode: Discovery Trees
The `--reverse` flag inverts the tree direction to show **dependents** instead of **dependencies**:
**Normal mode** (`bd dep tree ISSUE`):
- Shows what blocks you (dependency tree)
- Answers: "What must I complete before I can work on this?"
- Tree flows **UP** toward prerequisites
**Reverse mode** (`bd dep tree ISSUE --reverse`):
- Shows what was discovered from you (dependent tree)
- Answers: "What work was discovered while working on this?"
- Tree flows **DOWN** from goal to discovered tasks
- Perfect for visualizing work breakdown and discovery chains
**Use Cases:**
- Document project evolution and how work expanded from initial goal
- Share "how we got here" context with stakeholders
- Visualize work breakdown structure from epics
- Track discovery chains (what led to what)
- Show yak shaving journeys in retrospectives
+88 -37
View File
@@ -462,52 +462,103 @@ func (s *SQLiteStorage) GetAllDependencyRecords(ctx context.Context) (map[string
// When showAllPaths is false (default), nodes appearing via multiple paths (diamond dependencies) // When showAllPaths is false (default), nodes appearing via multiple paths (diamond dependencies)
// appear only once at their shallowest depth in the tree. // appear only once at their shallowest depth in the tree.
// When showAllPaths is true, all paths are shown with duplicate nodes at different depths. // When showAllPaths is true, all paths are shown with duplicate nodes at different depths.
func (s *SQLiteStorage) GetDependencyTree(ctx context.Context, issueID string, maxDepth int, showAllPaths bool) ([]*types.TreeNode, error) { // When reverse is true, shows dependent tree (what was discovered from this) instead of dependency tree (what blocks this).
func (s *SQLiteStorage) GetDependencyTree(ctx context.Context, issueID string, maxDepth int, showAllPaths bool, reverse bool) ([]*types.TreeNode, error) {
if maxDepth <= 0 { if maxDepth <= 0 {
maxDepth = 50 maxDepth = 50
} }
// First, build the complete tree with all paths using recursive CTE // Build SQL query based on direction
// We need to track the full path to handle proper tree structure // Normal mode: traverse dependencies (what blocks me) - goes UP
rows, err := s.db.QueryContext(ctx, ` // Reverse mode: traverse dependents (what was discovered from me) - goes DOWN
WITH RECURSIVE tree AS ( var query string
SELECT if reverse {
i.id, i.title, i.status, i.priority, i.description, i.design, // Reverse: show dependents (what depends on this issue)
i.acceptance_criteria, i.notes, i.issue_type, i.assignee, query = `
i.estimated_minutes, i.created_at, i.updated_at, i.closed_at, WITH RECURSIVE tree AS (
i.external_ref, SELECT
0 as depth, i.id, i.title, i.status, i.priority, i.description, i.design,
i.id as path, i.acceptance_criteria, i.notes, i.issue_type, i.assignee,
i.id as parent_id i.estimated_minutes, i.created_at, i.updated_at, i.closed_at,
FROM issues i i.external_ref,
WHERE i.id = ? 0 as depth,
i.id as path,
i.id as parent_id
FROM issues i
WHERE i.id = ?
UNION ALL UNION ALL
SELECT SELECT
i.id, i.title, i.status, i.priority, i.description, i.design, i.id, i.title, i.status, i.priority, i.description, i.design,
i.acceptance_criteria, i.notes, i.issue_type, i.assignee, i.acceptance_criteria, i.notes, i.issue_type, i.assignee,
i.estimated_minutes, i.created_at, i.updated_at, i.closed_at, i.estimated_minutes, i.created_at, i.updated_at, i.closed_at,
i.external_ref, i.external_ref,
t.depth + 1, t.depth + 1,
t.path || '→' || i.id, t.path || '→' || i.id,
t.id t.id
FROM issues i FROM issues i
JOIN dependencies d ON i.id = d.depends_on_id JOIN dependencies d ON i.id = d.issue_id
JOIN tree t ON d.issue_id = t.id JOIN tree t ON d.depends_on_id = t.id
WHERE t.depth < ? WHERE t.depth < ?
AND t.path != i.id AND t.path != i.id
AND t.path NOT LIKE i.id || '%' AND t.path NOT LIKE i.id || '%'
AND t.path NOT LIKE '%' || i.id || '%' AND t.path NOT LIKE '%' || i.id || '%'
AND t.path NOT LIKE '%' || i.id AND t.path NOT LIKE '%' || i.id
) )
SELECT id, title, status, priority, description, design, SELECT id, title, status, priority, description, design,
acceptance_criteria, notes, issue_type, assignee, acceptance_criteria, notes, issue_type, assignee,
estimated_minutes, created_at, updated_at, closed_at, estimated_minutes, created_at, updated_at, closed_at,
external_ref, depth, parent_id external_ref, depth, parent_id
FROM tree FROM tree
ORDER BY depth, priority, id ORDER BY depth, priority, id
`, issueID, maxDepth) `
} else {
// Normal: show dependencies (what this issue depends on)
query = `
WITH RECURSIVE tree AS (
SELECT
i.id, i.title, i.status, i.priority, i.description, i.design,
i.acceptance_criteria, i.notes, i.issue_type, i.assignee,
i.estimated_minutes, i.created_at, i.updated_at, i.closed_at,
i.external_ref,
0 as depth,
i.id as path,
i.id as parent_id
FROM issues i
WHERE i.id = ?
UNION ALL
SELECT
i.id, i.title, i.status, i.priority, i.description, i.design,
i.acceptance_criteria, i.notes, i.issue_type, i.assignee,
i.estimated_minutes, i.created_at, i.updated_at, i.closed_at,
i.external_ref,
t.depth + 1,
t.path || '→' || i.id,
t.id
FROM issues i
JOIN dependencies d ON i.id = d.depends_on_id
JOIN tree t ON d.issue_id = t.id
WHERE t.depth < ?
AND t.path != i.id
AND t.path NOT LIKE i.id || '%'
AND t.path NOT LIKE '%' || i.id || '%'
AND t.path NOT LIKE '%' || i.id
)
SELECT id, title, status, priority, description, design,
acceptance_criteria, notes, issue_type, assignee,
estimated_minutes, created_at, updated_at, closed_at,
external_ref, depth, parent_id
FROM tree
ORDER BY depth, priority, id
`
}
// First, build the complete tree with all paths using recursive CTE
// We need to track the full path to handle proper tree structure
rows, err := s.db.QueryContext(ctx, query, issueID, maxDepth)
if err != nil { if err != nil {
return nil, fmt.Errorf("failed to get dependency tree: %w", err) return nil, fmt.Errorf("failed to get dependency tree: %w", err)
} }
+80 -5
View File
@@ -249,7 +249,7 @@ func TestGetDependencyTree(t *testing.T) {
store.AddDependency(ctx, &types.Dependency{IssueID: issue3.ID, DependsOnID: issue2.ID, Type: types.DepBlocks}, "test-user") store.AddDependency(ctx, &types.Dependency{IssueID: issue3.ID, DependsOnID: issue2.ID, Type: types.DepBlocks}, "test-user")
// Get tree starting from issue3 // Get tree starting from issue3
tree, err := store.GetDependencyTree(ctx, issue3.ID, 10, false) tree, err := store.GetDependencyTree(ctx, issue3.ID, 10, false, false)
if err != nil { if err != nil {
t.Fatalf("GetDependencyTree failed: %v", err) t.Fatalf("GetDependencyTree failed: %v", err)
} }
@@ -311,7 +311,7 @@ func TestGetDependencyTree_TruncationDepth(t *testing.T) {
} }
// Get tree with maxDepth=2 (should only get 3 nodes: depths 0, 1, 2) // Get tree with maxDepth=2 (should only get 3 nodes: depths 0, 1, 2)
tree, err := store.GetDependencyTree(ctx, issues[4].ID, 2, false) tree, err := store.GetDependencyTree(ctx, issues[4].ID, 2, false, false)
if err != nil { if err != nil {
t.Fatalf("GetDependencyTree failed: %v", err) t.Fatalf("GetDependencyTree failed: %v", err)
} }
@@ -354,7 +354,7 @@ func TestGetDependencyTree_DefaultDepth(t *testing.T) {
}, "test-user") }, "test-user")
// Get tree with default depth (50) // Get tree with default depth (50)
tree, err := store.GetDependencyTree(ctx, issue2.ID, 50, false) tree, err := store.GetDependencyTree(ctx, issue2.ID, 50, false, false)
if err != nil { if err != nil {
t.Fatalf("GetDependencyTree failed: %v", err) t.Fatalf("GetDependencyTree failed: %v", err)
} }
@@ -399,7 +399,7 @@ func TestGetDependencyTree_MaxDepthOne(t *testing.T) {
}, "test-user") }, "test-user")
// Get tree with maxDepth=1 (should get root + one level) // Get tree with maxDepth=1 (should get root + one level)
tree, err := store.GetDependencyTree(ctx, issue3.ID, 1, false) tree, err := store.GetDependencyTree(ctx, issue3.ID, 1, false, false)
if err != nil { if err != nil {
t.Fatalf("GetDependencyTree failed: %v", err) t.Fatalf("GetDependencyTree failed: %v", err)
} }
@@ -726,6 +726,81 @@ func TestCrossTypeCyclePreventionThreeIssues(t *testing.T) {
} }
} }
func TestGetDependencyTree_Reverse(t *testing.T) {
store, cleanup := setupTestDB(t)
defer cleanup()
ctx := context.Background()
// Create a dependency chain: issue1 <- issue2 <- issue3
// (issue3 depends on issue2, issue2 depends on issue1)
issue1 := &types.Issue{
Title: "Base issue",
Status: types.StatusOpen,
Priority: 2,
IssueType: types.TypeTask,
}
issue2 := &types.Issue{
Title: "Depends on issue1",
Status: types.StatusOpen,
Priority: 2,
IssueType: types.TypeTask,
}
issue3 := &types.Issue{
Title: "Depends on issue2",
Status: types.StatusOpen,
Priority: 2,
IssueType: types.TypeTask,
}
store.CreateIssue(ctx, issue1, "test")
store.CreateIssue(ctx, issue2, "test")
store.CreateIssue(ctx, issue3, "test")
// Create dependencies: issue3 → issue2 → issue1
dep1 := &types.Dependency{IssueID: issue2.ID, DependsOnID: issue1.ID, Type: types.DepBlocks}
dep2 := &types.Dependency{IssueID: issue3.ID, DependsOnID: issue2.ID, Type: types.DepBlocks}
store.AddDependency(ctx, dep1, "test")
store.AddDependency(ctx, dep2, "test")
// Test normal mode: from issue3, should traverse UP to issue1
normalTree, err := store.GetDependencyTree(ctx, issue3.ID, 10, false, false)
if err != nil {
t.Fatalf("GetDependencyTree normal mode failed: %v", err)
}
if len(normalTree) != 3 {
t.Fatalf("Expected 3 nodes in normal tree, got %d", len(normalTree))
}
// Test reverse mode: from issue1, should traverse DOWN to issue3
reverseTree, err := store.GetDependencyTree(ctx, issue1.ID, 10, false, true)
if err != nil {
t.Fatalf("GetDependencyTree reverse mode failed: %v", err)
}
if len(reverseTree) != 3 {
t.Fatalf("Expected 3 nodes in reverse tree, got %d", len(reverseTree))
}
// Verify reverse tree structure: issue1 at depth 0
depthMap := make(map[string]int)
for _, node := range reverseTree {
depthMap[node.ID] = node.Depth
}
if depthMap[issue1.ID] != 0 {
t.Errorf("Expected depth 0 for %s in reverse tree, got %d", issue1.ID, depthMap[issue1.ID])
}
// issue2 should be at depth 1 (depends on issue1)
if depthMap[issue2.ID] != 1 {
t.Errorf("Expected depth 1 for %s in reverse tree, got %d", issue2.ID, depthMap[issue2.ID])
}
// issue3 should be at depth 2 (depends on issue2)
if depthMap[issue3.ID] != 2 {
t.Errorf("Expected depth 2 for %s in reverse tree, got %d", issue3.ID, depthMap[issue3.ID])
}
}
func TestGetDependencyTree_SubstringBug(t *testing.T) { func TestGetDependencyTree_SubstringBug(t *testing.T) {
store, cleanup := setupTestDB(t) store, cleanup := setupTestDB(t)
defer cleanup() defer cleanup()
@@ -788,7 +863,7 @@ func TestGetDependencyTree_SubstringBug(t *testing.T) {
} }
// Get tree starting from bd-10 // Get tree starting from bd-10
tree, err := store.GetDependencyTree(ctx, issues[9].ID, 10, false) tree, err := store.GetDependencyTree(ctx, issues[9].ID, 10, false, false)
if err != nil { if err != nil {
t.Fatalf("GetDependencyTree failed: %v", err) t.Fatalf("GetDependencyTree failed: %v", err)
} }
+2 -2
View File
@@ -25,7 +25,7 @@ type Storage interface {
GetDependents(ctx context.Context, issueID string) ([]*types.Issue, error) GetDependents(ctx context.Context, issueID string) ([]*types.Issue, error)
GetDependencyRecords(ctx context.Context, issueID string) ([]*types.Dependency, error) GetDependencyRecords(ctx context.Context, issueID string) ([]*types.Dependency, error)
GetAllDependencyRecords(ctx context.Context) (map[string][]*types.Dependency, error) GetAllDependencyRecords(ctx context.Context) (map[string][]*types.Dependency, error)
GetDependencyTree(ctx context.Context, issueID string, maxDepth int, showAllPaths bool) ([]*types.TreeNode, error) GetDependencyTree(ctx context.Context, issueID string, maxDepth int, showAllPaths bool, reverse bool) ([]*types.TreeNode, error)
DetectCycles(ctx context.Context) ([][]*types.Issue, error) DetectCycles(ctx context.Context) ([][]*types.Issue, error)
// Labels // Labels
@@ -53,7 +53,7 @@ type Storage interface {
// Dirty tracking (for incremental JSONL export) // Dirty tracking (for incremental JSONL export)
GetDirtyIssues(ctx context.Context) ([]string, error) GetDirtyIssues(ctx context.Context) ([]string, error)
GetDirtyIssueHash(ctx context.Context, issueID string) (string, error) // For timestamp-only dedup (bd-164) GetDirtyIssueHash(ctx context.Context, issueID string) (string, error) // For timestamp-only dedup (bd-164)
ClearDirtyIssues(ctx context.Context) error // WARNING: Race condition (bd-52), use ClearDirtyIssuesByID ClearDirtyIssues(ctx context.Context) error // WARNING: Race condition (bd-52), use ClearDirtyIssuesByID
ClearDirtyIssuesByID(ctx context.Context, issueIDs []string) error ClearDirtyIssuesByID(ctx context.Context, issueIDs []string) error
// Export hash tracking (for timestamp-only dedup, bd-164) // Export hash tracking (for timestamp-only dedup, bd-164)