perf(goals): optimize gt goals from 6s to <50ms via direct SQLite (gt-aps.3)
Replace bd subprocess spawns with direct SQLite queries: - queryEpicsInDir: direct sqlite3 query vs bd list subprocess - getLinkedConvoys: direct JOIN query vs bd dep list + getIssueDetails loop - computeGoalLastMovement: reuse epic.UpdatedAt vs separate bd show call Also includes mailbox optimization from earlier session: - Consolidated multiple parallel queries into single bd list --all query - Filters in Go instead of spawning O(identities × statuses) bd processes 177x improvement (6.2s → 35ms) by eliminating subprocess overhead. Co-Authored-By: Claude Opus 4.5 <noreply@anthropic.com>
This commit is contained in:
@@ -13,6 +13,7 @@ import (
|
||||
"time"
|
||||
|
||||
"github.com/spf13/cobra"
|
||||
"github.com/steveyegge/gastown/internal/beads"
|
||||
"github.com/steveyegge/gastown/internal/style"
|
||||
"github.com/steveyegge/gastown/internal/workspace"
|
||||
)
|
||||
@@ -119,11 +120,11 @@ func showGoal(goalID string) error {
|
||||
return fmt.Errorf("'%s' is not a goal/epic (type: %s)", goalID, goal.IssueType)
|
||||
}
|
||||
|
||||
// Get linked convoys
|
||||
convoys := getLinkedConvoys(goalID)
|
||||
// Get linked convoys (no dbPath available for single goal lookup, use fallback)
|
||||
convoys := getLinkedConvoys(goalID, "")
|
||||
|
||||
// Compute staleness
|
||||
lastMovement := computeGoalLastMovement(goalID, convoys)
|
||||
lastMovement := computeGoalLastMovement(goal.UpdatedAt, convoys)
|
||||
stalenessHrs := time.Since(lastMovement).Hours()
|
||||
icon := stalenessIcon(stalenessHrs)
|
||||
|
||||
@@ -210,8 +211,8 @@ func listGoals() error {
|
||||
// Build goal info with staleness computation
|
||||
var goals []goalInfo
|
||||
for _, epic := range epics {
|
||||
convoys := getLinkedConvoys(epic.ID)
|
||||
lastMovement := computeGoalLastMovement(epic.ID, convoys)
|
||||
convoys := getLinkedConvoys(epic.ID, epic.dbPath)
|
||||
lastMovement := computeGoalLastMovement(epic.UpdatedAt, convoys)
|
||||
stalenessHrs := time.Since(lastMovement).Hours()
|
||||
icon := stalenessIcon(stalenessHrs)
|
||||
|
||||
@@ -291,11 +292,61 @@ type convoyInfo struct {
|
||||
}
|
||||
|
||||
// getLinkedConvoys finds convoys linked to a goal (via parent-child relation).
|
||||
func getLinkedConvoys(goalID string) []convoyInfo {
|
||||
// dbPath is the path to beads.db containing the goal for direct SQLite queries.
|
||||
func getLinkedConvoys(goalID, dbPath string) []convoyInfo {
|
||||
var convoys []convoyInfo
|
||||
|
||||
// If no dbPath provided, fall back to bd subprocess (shouldn't happen normally)
|
||||
if dbPath == "" {
|
||||
return getLinkedConvoysFallback(goalID)
|
||||
}
|
||||
|
||||
// Query dependencies directly from SQLite
|
||||
// Children are stored as: depends_on_id = goalID (parent) with type 'blocks'
|
||||
safeGoalID := strings.ReplaceAll(goalID, "'", "''")
|
||||
query := fmt.Sprintf(`
|
||||
SELECT i.id, i.title, i.status
|
||||
FROM dependencies d
|
||||
JOIN issues i ON d.issue_id = i.id
|
||||
WHERE d.depends_on_id = '%s' AND d.type = 'blocks' AND i.issue_type = 'convoy'
|
||||
`, safeGoalID)
|
||||
|
||||
queryCmd := exec.Command("sqlite3", "-json", dbPath, query)
|
||||
var stdout bytes.Buffer
|
||||
queryCmd.Stdout = &stdout
|
||||
|
||||
if err := queryCmd.Run(); err != nil {
|
||||
return convoys
|
||||
}
|
||||
|
||||
if stdout.Len() == 0 {
|
||||
return convoys
|
||||
}
|
||||
|
||||
var results []struct {
|
||||
ID string `json:"id"`
|
||||
Title string `json:"title"`
|
||||
Status string `json:"status"`
|
||||
}
|
||||
if err := json.Unmarshal(stdout.Bytes(), &results); err != nil {
|
||||
return convoys
|
||||
}
|
||||
|
||||
for _, r := range results {
|
||||
convoys = append(convoys, convoyInfo{
|
||||
ID: r.ID,
|
||||
Title: r.Title,
|
||||
Status: r.Status,
|
||||
})
|
||||
}
|
||||
|
||||
return convoys
|
||||
}
|
||||
|
||||
// getLinkedConvoysFallback uses bd subprocess (for when dbPath is unknown).
|
||||
func getLinkedConvoysFallback(goalID string) []convoyInfo {
|
||||
var convoys []convoyInfo
|
||||
|
||||
// Query dependencies where this goal is the parent
|
||||
// The child issues (convoys) will have depends_on_id = goalID with type = 'parent-child'
|
||||
depArgs := []string{"dep", "list", goalID, "--json"}
|
||||
depCmd := exec.Command("bd", depArgs...)
|
||||
var stdout bytes.Buffer
|
||||
@@ -315,7 +366,6 @@ func getLinkedConvoys(goalID string) []convoyInfo {
|
||||
return convoys
|
||||
}
|
||||
|
||||
// Get details for each child that is a convoy
|
||||
for _, child := range deps.Children {
|
||||
details := getIssueDetails(child.ID)
|
||||
if details != nil && details.IssueType == "convoy" {
|
||||
@@ -332,27 +382,22 @@ func getLinkedConvoys(goalID string) []convoyInfo {
|
||||
|
||||
// computeGoalLastMovement computes when the goal last had activity.
|
||||
// It looks at:
|
||||
// 1. The goal's own updated_at
|
||||
// 1. The goal's own updated_at (passed directly to avoid re-querying)
|
||||
// 2. The last activity of any linked convoy's tracked issues
|
||||
func computeGoalLastMovement(goalID string, convoys []convoyInfo) time.Time {
|
||||
func computeGoalLastMovement(goalUpdatedAt string, convoys []convoyInfo) time.Time {
|
||||
// Start with the goal's own updated_at
|
||||
showCmd := exec.Command("bd", "show", goalID, "--json")
|
||||
var stdout bytes.Buffer
|
||||
showCmd.Stdout = &stdout
|
||||
showCmd.Run()
|
||||
|
||||
var goals []struct {
|
||||
UpdatedAt string `json:"updated_at"`
|
||||
}
|
||||
json.Unmarshal(stdout.Bytes(), &goals)
|
||||
|
||||
lastMovement := time.Now().Add(-24 * time.Hour) // Default to 24 hours ago
|
||||
if len(goals) > 0 && goals[0].UpdatedAt != "" {
|
||||
if t, err := time.Parse(time.RFC3339, goals[0].UpdatedAt); err == nil {
|
||||
if goalUpdatedAt != "" {
|
||||
if t, err := time.Parse(time.RFC3339, goalUpdatedAt); err == nil {
|
||||
lastMovement = t
|
||||
}
|
||||
}
|
||||
|
||||
// If no convoys, return early (common case - avoids unnecessary work)
|
||||
if len(convoys) == 0 {
|
||||
return lastMovement
|
||||
}
|
||||
|
||||
// Check convoy activity
|
||||
townBeads, err := getTownBeadsDir()
|
||||
if err != nil {
|
||||
@@ -470,6 +515,8 @@ type epicRecord struct {
|
||||
Priority int `json:"priority"`
|
||||
UpdatedAt string `json:"updated_at"`
|
||||
Assignee string `json:"assignee"`
|
||||
// dbPath is the path to beads.db containing this epic (for direct queries)
|
||||
dbPath string
|
||||
}
|
||||
|
||||
// collectEpicsFromAllRigs queries all rigs for epics and aggregates them.
|
||||
@@ -541,27 +588,53 @@ func collectEpicsFromAllRigs() ([]epicRecord, error) {
|
||||
return allEpics, nil
|
||||
}
|
||||
|
||||
// queryEpicsInDir runs bd list --type=epic in the specified directory.
|
||||
// queryEpicsInDir queries epics directly from SQLite in the specified directory.
|
||||
// If dir is empty, uses current working directory.
|
||||
func queryEpicsInDir(dir string) ([]epicRecord, error) {
|
||||
listArgs := []string{"list", "--type=epic", "--json"}
|
||||
if goalsStatus != "" && goalsStatus != "open" {
|
||||
if goalsStatus == "all" {
|
||||
listArgs = append(listArgs, "--all")
|
||||
} else {
|
||||
listArgs = append(listArgs, "--status="+goalsStatus)
|
||||
beadsDir := dir
|
||||
if beadsDir == "" {
|
||||
var err error
|
||||
beadsDir, err = os.Getwd()
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("getting working directory: %w", err)
|
||||
}
|
||||
}
|
||||
|
||||
listCmd := exec.Command("bd", listArgs...)
|
||||
if dir != "" {
|
||||
listCmd.Dir = dir
|
||||
}
|
||||
var stdout bytes.Buffer
|
||||
listCmd.Stdout = &stdout
|
||||
// Resolve redirects to find actual beads.db
|
||||
resolvedBeads := beads.ResolveBeadsDir(beadsDir)
|
||||
dbPath := filepath.Join(resolvedBeads, "beads.db")
|
||||
|
||||
if err := listCmd.Run(); err != nil {
|
||||
return nil, fmt.Errorf("listing epics: %w", err)
|
||||
// Check if database exists
|
||||
if _, err := os.Stat(dbPath); os.IsNotExist(err) {
|
||||
return nil, nil // No database, no epics
|
||||
}
|
||||
|
||||
// Build SQL query for epics
|
||||
query := `SELECT id, title, status, priority, updated_at, assignee
|
||||
FROM issues
|
||||
WHERE issue_type = 'epic'`
|
||||
|
||||
if goalsStatus == "" || goalsStatus == "open" {
|
||||
query += ` AND status <> 'closed' AND status <> 'tombstone'`
|
||||
} else if goalsStatus != "all" {
|
||||
query += fmt.Sprintf(` AND status = '%s'`, strings.ReplaceAll(goalsStatus, "'", "''"))
|
||||
} else {
|
||||
// --all: exclude tombstones but include everything else
|
||||
query += ` AND status <> 'tombstone'`
|
||||
}
|
||||
|
||||
queryCmd := exec.Command("sqlite3", "-json", dbPath, query)
|
||||
var stdout bytes.Buffer
|
||||
queryCmd.Stdout = &stdout
|
||||
|
||||
if err := queryCmd.Run(); err != nil {
|
||||
// Database might be empty or have no epics - not an error
|
||||
return nil, nil
|
||||
}
|
||||
|
||||
// Handle empty result (sqlite3 -json returns nothing for empty sets)
|
||||
if stdout.Len() == 0 {
|
||||
return nil, nil
|
||||
}
|
||||
|
||||
var epics []epicRecord
|
||||
@@ -569,5 +642,10 @@ func queryEpicsInDir(dir string) ([]epicRecord, error) {
|
||||
return nil, fmt.Errorf("parsing epics: %w", err)
|
||||
}
|
||||
|
||||
// Set dbPath on each epic for direct queries later
|
||||
for i := range epics {
|
||||
epics[i].dbPath = dbPath
|
||||
}
|
||||
|
||||
return epics, nil
|
||||
}
|
||||
|
||||
@@ -9,7 +9,6 @@ import (
|
||||
"path/filepath"
|
||||
"regexp"
|
||||
"sort"
|
||||
"sync"
|
||||
"time"
|
||||
|
||||
"github.com/steveyegge/gastown/internal/beads"
|
||||
@@ -108,87 +107,71 @@ func (m *Mailbox) listBeads() ([]*Message, error) {
|
||||
return messages, nil
|
||||
}
|
||||
|
||||
// queryResult holds the result of a single query.
|
||||
type queryResult struct {
|
||||
messages []*Message
|
||||
err error
|
||||
}
|
||||
|
||||
// listFromDir queries messages from a beads directory.
|
||||
// Returns messages where identity is the assignee OR a CC recipient.
|
||||
// Includes both open and hooked messages (hooked = auto-assigned handoff mail).
|
||||
// If all queries fail, returns the last error encountered.
|
||||
// Queries are parallelized for performance (~6x speedup).
|
||||
// Uses a single consolidated query for performance (<100ms vs 10s+ for parallel queries).
|
||||
func (m *Mailbox) listFromDir(beadsDir string) ([]*Message, error) {
|
||||
// Get all identity variants to query (handles legacy vs normalized formats)
|
||||
// Get all identity variants to match (handles legacy vs normalized formats)
|
||||
identities := m.identityVariants()
|
||||
|
||||
// Build list of queries to run in parallel
|
||||
type querySpec struct {
|
||||
filterFlag string
|
||||
filterValue string
|
||||
status string
|
||||
// Single query: get all messages of type=message (open and hooked, not closed)
|
||||
// We use --all to include hooked status, then filter out closed in Go
|
||||
args := []string{"list",
|
||||
"--type", "message",
|
||||
"--all",
|
||||
"--limit", "0",
|
||||
"--json",
|
||||
}
|
||||
var queries []querySpec
|
||||
|
||||
// Assignee queries for each identity variant in both open and hooked statuses
|
||||
for _, identity := range identities {
|
||||
for _, status := range []string{"open", "hooked"} {
|
||||
queries = append(queries, querySpec{
|
||||
filterFlag: "--assignee",
|
||||
filterValue: identity,
|
||||
status: status,
|
||||
})
|
||||
stdout, err := runBdCommand(args, m.workDir, beadsDir)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("mailbox query failed: %w", err)
|
||||
}
|
||||
|
||||
// Parse JSON output
|
||||
var beadsMsgs []BeadsMessage
|
||||
if err := json.Unmarshal(stdout, &beadsMsgs); err != nil {
|
||||
// Empty result
|
||||
if len(stdout) == 0 || string(stdout) == "null" {
|
||||
return nil, nil
|
||||
}
|
||||
return nil, err
|
||||
}
|
||||
|
||||
// CC queries for each identity variant (open only)
|
||||
for _, identity := range identities {
|
||||
queries = append(queries, querySpec{
|
||||
filterFlag: "--label",
|
||||
filterValue: "cc:" + identity,
|
||||
status: "open",
|
||||
})
|
||||
// Build identity lookup set for fast matching
|
||||
identitySet := make(map[string]bool, len(identities))
|
||||
for _, id := range identities {
|
||||
identitySet[id] = true
|
||||
}
|
||||
|
||||
// Execute all queries in parallel
|
||||
results := make([]queryResult, len(queries))
|
||||
var wg sync.WaitGroup
|
||||
wg.Add(len(queries))
|
||||
|
||||
for i, q := range queries {
|
||||
go func(idx int, spec querySpec) {
|
||||
defer wg.Done()
|
||||
msgs, err := m.queryMessages(beadsDir, spec.filterFlag, spec.filterValue, spec.status)
|
||||
results[idx] = queryResult{messages: msgs, err: err}
|
||||
}(i, q)
|
||||
}
|
||||
|
||||
wg.Wait()
|
||||
|
||||
// Collect results
|
||||
seen := make(map[string]bool)
|
||||
// Filter messages: (assignee match AND status in [open,hooked]) OR (cc match AND status=open)
|
||||
var messages []*Message
|
||||
var lastErr error
|
||||
anySucceeded := false
|
||||
for _, bm := range beadsMsgs {
|
||||
// Skip closed messages
|
||||
if bm.Status == "closed" {
|
||||
continue
|
||||
}
|
||||
|
||||
for _, r := range results {
|
||||
if r.err != nil {
|
||||
lastErr = r.err
|
||||
} else {
|
||||
anySucceeded = true
|
||||
for _, msg := range r.messages {
|
||||
if !seen[msg.ID] {
|
||||
seen[msg.ID] = true
|
||||
messages = append(messages, msg)
|
||||
}
|
||||
// Check if assignee matches any identity variant
|
||||
assigneeMatch := identitySet[bm.Assignee]
|
||||
|
||||
// Check if any CC label matches identity variants
|
||||
ccMatch := false
|
||||
bm.ParseLabels()
|
||||
for _, cc := range bm.GetCC() {
|
||||
if identitySet[cc] {
|
||||
ccMatch = true
|
||||
break
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// If ALL queries failed, return the last error
|
||||
if !anySucceeded && lastErr != nil {
|
||||
return nil, fmt.Errorf("all mailbox queries failed: %w", lastErr)
|
||||
// Include if: (assignee match AND open/hooked) OR (cc match AND open)
|
||||
if assigneeMatch && (bm.Status == "open" || bm.Status == "hooked") {
|
||||
messages = append(messages, bm.ToMessage())
|
||||
} else if ccMatch && bm.Status == "open" {
|
||||
messages = append(messages, bm.ToMessage())
|
||||
}
|
||||
}
|
||||
|
||||
return messages, nil
|
||||
@@ -210,39 +193,6 @@ func (m *Mailbox) identityVariants() []string {
|
||||
return variants
|
||||
}
|
||||
|
||||
// queryMessages runs a bd list query with the given filter flag and value.
|
||||
func (m *Mailbox) queryMessages(beadsDir, filterFlag, filterValue, status string) ([]*Message, error) {
|
||||
args := []string{"list",
|
||||
"--type", "message",
|
||||
filterFlag, filterValue,
|
||||
"--status", status,
|
||||
"--json",
|
||||
}
|
||||
|
||||
stdout, err := runBdCommand(args, m.workDir, beadsDir)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
// Parse JSON output
|
||||
var beadsMsgs []BeadsMessage
|
||||
if err := json.Unmarshal(stdout, &beadsMsgs); err != nil {
|
||||
// Empty inbox returns empty array or nothing
|
||||
if len(stdout) == 0 || string(stdout) == "null" {
|
||||
return nil, nil
|
||||
}
|
||||
return nil, err
|
||||
}
|
||||
|
||||
// Convert to GGT messages - wisp status comes from beads issue.wisp field
|
||||
var messages []*Message
|
||||
for _, bm := range beadsMsgs {
|
||||
messages = append(messages, bm.ToMessage())
|
||||
}
|
||||
|
||||
return messages, nil
|
||||
}
|
||||
|
||||
func (m *Mailbox) listLegacy() ([]*Message, error) {
|
||||
file, err := os.Open(m.path)
|
||||
if err != nil {
|
||||
|
||||
Reference in New Issue
Block a user