Merge Nux polecat work (bd-9btu, bd-9hc9, bd-lrj8, bd-8zbo)

This commit is contained in:
Steve Yegge
2025-12-28 16:39:46 -08:00
13 changed files with 551 additions and 592 deletions
+250 -227
View File
@@ -18,6 +18,7 @@ import (
"github.com/steveyegge/beads/internal/beads" "github.com/steveyegge/beads/internal/beads"
"github.com/steveyegge/beads/internal/config" "github.com/steveyegge/beads/internal/config"
"github.com/steveyegge/beads/internal/debug" "github.com/steveyegge/beads/internal/debug"
"github.com/steveyegge/beads/internal/storage"
"github.com/steveyegge/beads/internal/types" "github.com/steveyegge/beads/internal/types"
"github.com/steveyegge/beads/internal/ui" "github.com/steveyegge/beads/internal/ui"
"github.com/steveyegge/beads/internal/utils" "github.com/steveyegge/beads/internal/utils"
@@ -462,6 +463,194 @@ func writeJSONLAtomic(jsonlPath string, issues []*types.Issue) ([]string, error)
return exportedIDs, nil return exportedIDs, nil
} }
// recordFlushFailure records a flush failure, incrementing the failure counter
// and displaying warnings after consecutive failures.
func recordFlushFailure(err error) {
flushMutex.Lock()
flushFailureCount++
lastFlushError = err
failCount := flushFailureCount
flushMutex.Unlock()
// Always show the immediate warning
fmt.Fprintf(os.Stderr, "Warning: auto-flush failed: %v\n", err)
// Show prominent warning after 3+ consecutive failures
if failCount >= 3 {
fmt.Fprintf(os.Stderr, "\n%s\n", ui.RenderFail("⚠️ CRITICAL: Auto-flush has failed "+fmt.Sprint(failCount)+" times consecutively!"))
fmt.Fprintf(os.Stderr, "%s\n", ui.RenderFail("⚠️ Your JSONL file may be out of sync with the database."))
fmt.Fprintf(os.Stderr, "%s\n\n", ui.RenderFail("⚠️ Run 'bd export -o .beads/issues.jsonl' manually to fix."))
}
}
// recordFlushSuccess records a successful flush, resetting the failure counter.
func recordFlushSuccess() {
flushMutex.Lock()
flushFailureCount = 0
lastFlushError = nil
flushMutex.Unlock()
}
// readExistingJSONL reads an existing JSONL file into a map for incremental merging.
// Returns empty map if file doesn't exist or can't be read.
func readExistingJSONL(jsonlPath string) (map[string]*types.Issue, error) {
issueMap := make(map[string]*types.Issue)
existingFile, err := os.Open(jsonlPath)
if err != nil {
if os.IsNotExist(err) {
return issueMap, nil // File doesn't exist, return empty map
}
return nil, fmt.Errorf("failed to open existing JSONL: %w", err)
}
defer existingFile.Close()
scanner := bufio.NewScanner(existingFile)
// Increase buffer to handle large JSON lines
// Default scanner limit is 64KB which can cause silent truncation
scanner.Buffer(make([]byte, 0, 1024), 2*1024*1024) // 2MB max line size
lineNum := 0
for scanner.Scan() {
lineNum++
line := scanner.Text()
if line == "" {
continue
}
var issue types.Issue
if err := json.Unmarshal([]byte(line), &issue); err == nil {
issue.SetDefaults() // Apply defaults for omitted fields (beads-399)
issueMap[issue.ID] = &issue
} else {
// Warn about malformed JSONL lines
fmt.Fprintf(os.Stderr, "Warning: skipping malformed JSONL line %d: %v\n", lineNum, err)
}
}
if err := scanner.Err(); err != nil {
return nil, fmt.Errorf("failed to read existing JSONL: %w", err)
}
return issueMap, nil
}
// fetchAndMergeIssues fetches dirty issues from the database and merges them into issueMap.
// Issues that no longer exist are removed from the map.
func fetchAndMergeIssues(ctx context.Context, s storage.Storage, dirtyIDs []string, issueMap map[string]*types.Issue) error {
for _, issueID := range dirtyIDs {
issue, err := s.GetIssue(ctx, issueID)
if err != nil {
return fmt.Errorf("failed to get issue %s: %w", issueID, err)
}
if issue == nil {
// Issue was deleted, remove from map
delete(issueMap, issueID)
continue
}
// Get dependencies for this issue
deps, err := s.GetDependencyRecords(ctx, issueID)
if err != nil {
return fmt.Errorf("failed to get dependencies for %s: %w", issueID, err)
}
issue.Dependencies = deps
// Update map
issueMap[issueID] = issue
}
return nil
}
// filterWisps removes ephemeral (wisp) issues from the map and returns a slice.
// Wisps should never be exported to JSONL.
func filterWisps(issueMap map[string]*types.Issue) []*types.Issue {
issues := make([]*types.Issue, 0, len(issueMap))
wispsSkipped := 0
for _, issue := range issueMap {
if issue.Ephemeral {
wispsSkipped++
continue
}
issues = append(issues, issue)
}
if wispsSkipped > 0 {
debug.Logf("auto-flush: filtered %d wisps from export", wispsSkipped)
}
return issues
}
// filterByMultiRepoPrefix filters issues by prefix in multi-repo mode.
// Non-primary repos should only export issues matching their own prefix.
func filterByMultiRepoPrefix(ctx context.Context, s storage.Storage, issues []*types.Issue) []*types.Issue {
multiRepo := config.GetMultiRepoConfig()
if multiRepo == nil {
return issues
}
// Get our configured prefix
prefix, prefixErr := s.GetConfig(ctx, "issue_prefix")
if prefixErr != nil || prefix == "" {
return issues
}
// Determine if we're the primary repo
cwd, _ := os.Getwd()
primaryPath := multiRepo.Primary
if primaryPath == "" || primaryPath == "." {
primaryPath = cwd
}
// Normalize paths for comparison
absCwd, _ := filepath.Abs(cwd)
absPrimary, _ := filepath.Abs(primaryPath)
if absCwd == absPrimary {
return issues // Primary repo exports all issues
}
// Filter to only issues matching our prefix
filtered := make([]*types.Issue, 0, len(issues))
prefixWithDash := prefix
if !strings.HasSuffix(prefixWithDash, "-") {
prefixWithDash = prefix + "-"
}
for _, issue := range issues {
if strings.HasPrefix(issue.ID, prefixWithDash) {
filtered = append(filtered, issue)
}
}
debug.Logf("multi-repo filter: %d issues -> %d (prefix %s)", len(issues), len(filtered), prefix)
return filtered
}
// updateFlushExportMetadata stores hashes and timestamps after a successful flush export.
func updateFlushExportMetadata(ctx context.Context, s storage.Storage, jsonlPath string) {
jsonlData, err := os.ReadFile(jsonlPath)
if err != nil {
return // Non-fatal, just skip metadata update
}
hasher := sha256.New()
hasher.Write(jsonlData)
exportedHash := hex.EncodeToString(hasher.Sum(nil))
if err := s.SetMetadata(ctx, "jsonl_content_hash", exportedHash); err != nil {
fmt.Fprintf(os.Stderr, "Warning: failed to update jsonl_content_hash after export: %v\n", err)
}
// Store JSONL file hash for integrity validation
if err := s.SetJSONLFileHash(ctx, exportedHash); err != nil {
fmt.Fprintf(os.Stderr, "Warning: failed to update jsonl_file_hash after export: %v\n", err)
}
// Update last_import_time so staleness check doesn't see JSONL as "newer" (fixes #399)
// Use RFC3339Nano to preserve nanosecond precision.
exportTime := time.Now().Format(time.RFC3339Nano)
if err := s.SetMetadata(ctx, "last_import_time", exportTime); err != nil {
fmt.Fprintf(os.Stderr, "Warning: failed to update last_import_time after export: %v\n", err)
}
}
// flushState captures the state needed for a flush operation // flushState captures the state needed for a flush operation
type flushState struct { type flushState struct {
forceDirty bool // Force flush even if isDirty is false forceDirty bool // Force flush even if isDirty is false
@@ -507,30 +696,13 @@ func flushToJSONLWithState(state flushState) {
storeMutex.Unlock() storeMutex.Unlock()
ctx := rootCtx ctx := rootCtx
// Validate JSONL integrity BEFORE checking isDirty // Validate JSONL integrity BEFORE checking isDirty
// This detects if JSONL and export_hashes are out of sync (e.g., after git operations) // This detects if JSONL and export_hashes are out of sync (e.g., after git operations)
// If export_hashes was cleared, we need to do a full export even if nothing is dirty
integrityNeedsFullExport, err := validateJSONLIntegrity(ctx, jsonlPath) integrityNeedsFullExport, err := validateJSONLIntegrity(ctx, jsonlPath)
if err != nil { if err != nil {
// Special case: missing JSONL is not fatal, just forces full export
if !os.IsNotExist(err) { if !os.IsNotExist(err) {
// Record failure without clearing isDirty (we didn't do any work yet) recordFlushFailure(err)
flushMutex.Lock()
flushFailureCount++
lastFlushError = err
failCount := flushFailureCount
flushMutex.Unlock()
// Always show the immediate warning
fmt.Fprintf(os.Stderr, "Warning: auto-flush failed: %v\n", err)
// Show prominent warning after 3+ consecutive failures
if failCount >= 3 {
fmt.Fprintf(os.Stderr, "\n%s\n", ui.RenderFail("⚠️ CRITICAL: Auto-flush has failed "+fmt.Sprint(failCount)+" times consecutively!"))
fmt.Fprintf(os.Stderr, "%s\n", ui.RenderFail("⚠️ Your JSONL file may be out of sync with the database."))
fmt.Fprintf(os.Stderr, "%s\n\n", ui.RenderFail("⚠️ Run 'bd export -o .beads/issues.jsonl' manually to fix."))
}
return return
} }
// Missing JSONL: treat as "force full export" case // Missing JSONL: treat as "force full export" case
@@ -538,235 +710,86 @@ func flushToJSONLWithState(state flushState) {
} }
// Check if we should proceed with export // Check if we should proceed with export
// Use only the state parameter - don't read global flags
// Caller is responsible for passing correct forceDirty/forceFullExport values
if !state.forceDirty && !integrityNeedsFullExport { if !state.forceDirty && !integrityNeedsFullExport {
// Nothing to do: not forced and no integrity issue
return return
} }
// Determine export mode // Determine export mode
fullExport := state.forceFullExport || integrityNeedsFullExport fullExport := state.forceFullExport || integrityNeedsFullExport
// Helper to record failure
recordFailure := func(err error) {
flushMutex.Lock()
flushFailureCount++
lastFlushError = err
failCount := flushFailureCount
flushMutex.Unlock()
// Always show the immediate warning
fmt.Fprintf(os.Stderr, "Warning: auto-flush failed: %v\n", err)
// Show prominent warning after 3+ consecutive failures
if failCount >= 3 {
fmt.Fprintf(os.Stderr, "\n%s\n", ui.RenderFail("⚠️ CRITICAL: Auto-flush has failed "+fmt.Sprint(failCount)+" times consecutively!"))
fmt.Fprintf(os.Stderr, "%s\n", ui.RenderFail("⚠️ Your JSONL file may be out of sync with the database."))
fmt.Fprintf(os.Stderr, "%s\n\n", ui.RenderFail("⚠️ Run 'bd export -o .beads/issues.jsonl' manually to fix."))
}
}
// Helper to record success
recordSuccess := func() {
flushMutex.Lock()
flushFailureCount = 0
lastFlushError = nil
flushMutex.Unlock()
}
// Determine which issues to export // Determine which issues to export
var dirtyIDs []string dirtyIDs, err := getIssuesToExport(ctx, fullExport)
if fullExport {
// Full export: get ALL issues (needed after ID-changing operations like renumber)
allIssues, err2 := store.SearchIssues(ctx, "", types.IssueFilter{})
if err2 != nil {
recordFailure(fmt.Errorf("failed to get all issues: %w", err2))
return
}
dirtyIDs = make([]string, len(allIssues))
for i, issue := range allIssues {
dirtyIDs[i] = issue.ID
}
} else {
// Incremental export: get only dirty issue IDs
var err2 error
dirtyIDs, err2 = store.GetDirtyIssues(ctx)
if err2 != nil {
recordFailure(fmt.Errorf("failed to get dirty issues: %w", err2))
return
}
// No dirty issues? Nothing to do!
if len(dirtyIDs) == 0 {
recordSuccess()
return
}
}
// Read existing JSONL into a map (skip for full export - we'll rebuild from scratch)
issueMap := make(map[string]*types.Issue)
if !fullExport {
if existingFile, err := os.Open(jsonlPath); err == nil {
scanner := bufio.NewScanner(existingFile)
// Increase buffer to handle large JSON lines
// Default scanner limit is 64KB which can cause silent truncation
scanner.Buffer(make([]byte, 0, 1024), 2*1024*1024) // 2MB max line size
lineNum := 0
for scanner.Scan() {
lineNum++
line := scanner.Text()
if line == "" {
continue
}
var issue types.Issue
if err := json.Unmarshal([]byte(line), &issue); err == nil {
issue.SetDefaults() // Apply defaults for omitted fields (beads-399)
issueMap[issue.ID] = &issue
} else {
// Warn about malformed JSONL lines
fmt.Fprintf(os.Stderr, "Warning: skipping malformed JSONL line %d: %v\n", lineNum, err)
}
}
// Check for scanner errors
if err := scanner.Err(); err != nil {
_ = existingFile.Close()
recordFailure(fmt.Errorf("failed to read existing JSONL: %w", err))
return
}
_ = existingFile.Close()
}
}
// Fetch only dirty issues from DB
for _, issueID := range dirtyIDs {
issue, err := store.GetIssue(ctx, issueID)
if err != nil {
recordFailure(fmt.Errorf("failed to get issue %s: %w", issueID, err))
return
}
if issue == nil {
// Issue was deleted, remove from map
delete(issueMap, issueID)
continue
}
// Get dependencies for this issue
deps, err := store.GetDependencyRecords(ctx, issueID)
if err != nil {
recordFailure(fmt.Errorf("failed to get dependencies for %s: %w", issueID, err))
return
}
issue.Dependencies = deps
// Update map
issueMap[issueID] = issue
}
// Convert map to slice (will be sorted by writeJSONLAtomic)
// Filter out wisps - they should never be exported to JSONL
// Wisps exist only in SQLite and are shared via .beads/redirect, not JSONL.
// This prevents "zombie" issues that resurrect after mol squash deletes them.
issues := make([]*types.Issue, 0, len(issueMap))
wispsSkipped := 0
for _, issue := range issueMap {
if issue.Ephemeral {
wispsSkipped++
continue
}
issues = append(issues, issue)
}
if wispsSkipped > 0 {
debug.Logf("auto-flush: filtered %d wisps from export", wispsSkipped)
}
// Filter issues by prefix in multi-repo mode for non-primary repos (fixes GH #437)
// In multi-repo mode, non-primary repos should only export issues that match
// their own prefix. Issues from other repos (hydrated for unified view) should
// NOT be written to the local JSONL.
multiRepo := config.GetMultiRepoConfig()
if multiRepo != nil {
// Get our configured prefix
prefix, prefixErr := store.GetConfig(ctx, "issue_prefix")
if prefixErr == nil && prefix != "" {
// Determine if we're the primary repo
cwd, _ := os.Getwd()
primaryPath := multiRepo.Primary
if primaryPath == "" || primaryPath == "." {
primaryPath = cwd
}
// Normalize paths for comparison
absCwd, _ := filepath.Abs(cwd)
absPrimary, _ := filepath.Abs(primaryPath)
isPrimary := absCwd == absPrimary
if !isPrimary {
// Filter to only issues matching our prefix
filtered := make([]*types.Issue, 0, len(issues))
prefixWithDash := prefix
if !strings.HasSuffix(prefixWithDash, "-") {
prefixWithDash = prefix + "-"
}
for _, issue := range issues {
if strings.HasPrefix(issue.ID, prefixWithDash) {
filtered = append(filtered, issue)
}
}
debug.Logf("multi-repo filter: %d issues -> %d (prefix %s)", len(issues), len(filtered), prefix)
issues = filtered
}
}
}
// Write atomically using common helper
exportedIDs, err := writeJSONLAtomic(jsonlPath, issues)
if err != nil { if err != nil {
recordFailure(err) recordFlushFailure(err)
return
}
if len(dirtyIDs) == 0 && !fullExport {
recordFlushSuccess()
return return
} }
// Clear only the dirty issues that were actually exported (fixes race condition) // Read existing JSONL into a map (skip for full export - we'll rebuild from scratch)
// Don't clear issues that were skipped due to timestamp-only changes var issueMap map[string]*types.Issue
if fullExport {
issueMap = make(map[string]*types.Issue)
} else {
issueMap, err = readExistingJSONL(jsonlPath)
if err != nil {
recordFlushFailure(err)
return
}
}
// Fetch dirty issues from DB and merge into map
if err := fetchAndMergeIssues(ctx, store, dirtyIDs, issueMap); err != nil {
recordFlushFailure(err)
return
}
// Convert map to slice, filtering out wisps
issues := filterWisps(issueMap)
// Filter by prefix in multi-repo mode
issues = filterByMultiRepoPrefix(ctx, store, issues)
// Write atomically
exportedIDs, err := writeJSONLAtomic(jsonlPath, issues)
if err != nil {
recordFlushFailure(err)
return
}
// Clear dirty issues that were exported
if len(exportedIDs) > 0 { if len(exportedIDs) > 0 {
if err := store.ClearDirtyIssuesByID(ctx, exportedIDs); err != nil { if err := store.ClearDirtyIssuesByID(ctx, exportedIDs); err != nil {
// Don't fail the whole flush for this, but warn
fmt.Fprintf(os.Stderr, "Warning: failed to clear dirty issues: %v\n", err) fmt.Fprintf(os.Stderr, "Warning: failed to clear dirty issues: %v\n", err)
} }
} }
// Store hash of exported JSONL (enables hash-based auto-import) // Update metadata (hashes, timestamps)
// Renamed from last_import_hash to jsonl_content_hash updateFlushExportMetadata(ctx, store, jsonlPath)
jsonlData, err := os.ReadFile(jsonlPath)
if err == nil {
hasher := sha256.New()
hasher.Write(jsonlData)
exportedHash := hex.EncodeToString(hasher.Sum(nil))
if err := store.SetMetadata(ctx, "jsonl_content_hash", exportedHash); err != nil {
fmt.Fprintf(os.Stderr, "Warning: failed to update jsonl_content_hash after export: %v\n", err)
}
// Store JSONL file hash for integrity validation recordFlushSuccess()
if err := store.SetJSONLFileHash(ctx, exportedHash); err != nil { }
fmt.Fprintf(os.Stderr, "Warning: failed to update jsonl_file_hash after export: %v\n", err)
}
// Update last_import_time so staleness check doesn't see JSONL as "newer" (fixes #399) // getIssuesToExport determines which issue IDs need to be exported.
// CheckStaleness() compares last_import_time against JSONL mtime. After export, // For full export, returns all issue IDs. For incremental, returns only dirty IDs.
// the JSONL mtime is updated, so we must also update last_import_time to prevent func getIssuesToExport(ctx context.Context, fullExport bool) ([]string, error) {
// false "stale" detection on subsequent reads. if fullExport {
// allIssues, err := store.SearchIssues(ctx, "", types.IssueFilter{})
// Use RFC3339Nano to preserve nanosecond precision. The file mtime has nanosecond if err != nil {
// precision, so using RFC3339 (second precision) would cause the stored time to be return nil, fmt.Errorf("failed to get all issues: %w", err)
// slightly earlier than the file mtime, triggering false staleness.
exportTime := time.Now().Format(time.RFC3339Nano)
if err := store.SetMetadata(ctx, "last_import_time", exportTime); err != nil {
fmt.Fprintf(os.Stderr, "Warning: failed to update last_import_time after export: %v\n", err)
} }
ids := make([]string, len(allIssues))
for i, issue := range allIssues {
ids[i] = issue.ID
}
return ids, nil
} }
// Success! FlushManager manages its local state in run() goroutine. dirtyIDs, err := store.GetDirtyIssues(ctx)
recordSuccess() if err != nil {
return nil, fmt.Errorf("failed to get dirty issues: %w", err)
}
return dirtyIDs, nil
} }
+285 -349
View File
@@ -99,7 +99,20 @@ type cookResult struct {
BondPoints []string `json:"bond_points,omitempty"` BondPoints []string `json:"bond_points,omitempty"`
} }
func runCook(cmd *cobra.Command, args []string) { // cookFlags holds parsed command-line flags for the cook command
type cookFlags struct {
dryRun bool
persist bool
force bool
searchPaths []string
prefix string
inputVars map[string]string
runtimeMode bool
formulaPath string
}
// parseCookFlags parses and validates cook command flags
func parseCookFlags(cmd *cobra.Command, args []string) (*cookFlags, error) {
dryRun, _ := cmd.Flags().GetBool("dry-run") dryRun, _ := cmd.Flags().GetBool("dry-run")
persist, _ := cmd.Flags().GetBool("persist") persist, _ := cmd.Flags().GetBool("persist")
force, _ := cmd.Flags().GetBool("force") force, _ := cmd.Flags().GetBool("force")
@@ -113,61 +126,51 @@ func runCook(cmd *cobra.Command, args []string) {
for _, v := range varFlags { for _, v := range varFlags {
parts := strings.SplitN(v, "=", 2) parts := strings.SplitN(v, "=", 2)
if len(parts) != 2 { if len(parts) != 2 {
fmt.Fprintf(os.Stderr, "Error: invalid variable format '%s', expected 'key=value'\n", v) return nil, fmt.Errorf("invalid variable format '%s', expected 'key=value'", v)
os.Exit(1)
} }
inputVars[parts[0]] = parts[1] inputVars[parts[0]] = parts[1]
} }
// Determine cooking mode // Validate mode
if mode != "" && mode != "compile" && mode != "runtime" {
return nil, fmt.Errorf("invalid mode '%s', must be 'compile' or 'runtime'", mode)
}
// Runtime mode is triggered by: explicit --mode=runtime OR providing --var flags // Runtime mode is triggered by: explicit --mode=runtime OR providing --var flags
runtimeMode := mode == "runtime" || len(inputVars) > 0 runtimeMode := mode == "runtime" || len(inputVars) > 0
if mode != "" && mode != "compile" && mode != "runtime" {
fmt.Fprintf(os.Stderr, "Error: invalid mode '%s', must be 'compile' or 'runtime'\n", mode)
os.Exit(1)
}
// Only need store access if persisting return &cookFlags{
if persist { dryRun: dryRun,
CheckReadonly("cook --persist") persist: persist,
force: force,
searchPaths: searchPaths,
prefix: prefix,
inputVars: inputVars,
runtimeMode: runtimeMode,
formulaPath: args[0],
}, nil
}
if store == nil { // loadAndResolveFormula parses a formula file and applies all transformations
if daemonClient != nil { func loadAndResolveFormula(formulaPath string, searchPaths []string) (*formula.Formula, error) {
fmt.Fprintf(os.Stderr, "Error: cook --persist requires direct database access\n")
fmt.Fprintf(os.Stderr, "Hint: use --no-daemon flag: bd --no-daemon cook %s --persist ...\n", args[0])
} else {
fmt.Fprintf(os.Stderr, "Error: no database connection\n")
}
os.Exit(1)
}
}
ctx := rootCtx
// Create parser with search paths
parser := formula.NewParser(searchPaths...) parser := formula.NewParser(searchPaths...)
// Parse the formula file // Parse the formula file
formulaPath := args[0]
f, err := parser.ParseFile(formulaPath) f, err := parser.ParseFile(formulaPath)
if err != nil { if err != nil {
fmt.Fprintf(os.Stderr, "Error parsing formula: %v\n", err) return nil, fmt.Errorf("parsing formula: %w", err)
os.Exit(1)
} }
// Resolve inheritance // Resolve inheritance
resolved, err := parser.Resolve(f) resolved, err := parser.Resolve(f)
if err != nil { if err != nil {
fmt.Fprintf(os.Stderr, "Error resolving formula: %v\n", err) return nil, fmt.Errorf("resolving formula: %w", err)
os.Exit(1)
} }
// Apply control flow operators - loops, branches, gates // Apply control flow operators - loops, branches, gates
// This must happen before advice and expansions so they can act on expanded loop steps
controlFlowSteps, err := formula.ApplyControlFlow(resolved.Steps, resolved.Compose) controlFlowSteps, err := formula.ApplyControlFlow(resolved.Steps, resolved.Compose)
if err != nil { if err != nil {
fmt.Fprintf(os.Stderr, "Error applying control flow: %v\n", err) return nil, fmt.Errorf("applying control flow: %w", err)
os.Exit(1)
} }
resolved.Steps = controlFlowSteps resolved.Steps = controlFlowSteps
@@ -177,11 +180,9 @@ func runCook(cmd *cobra.Command, args []string) {
} }
// Apply inline step expansions // Apply inline step expansions
// This processes Step.Expand fields before compose.expand/map rules
inlineExpandedSteps, err := formula.ApplyInlineExpansions(resolved.Steps, parser) inlineExpandedSteps, err := formula.ApplyInlineExpansions(resolved.Steps, parser)
if err != nil { if err != nil {
fmt.Fprintf(os.Stderr, "Error applying inline expansions: %v\n", err) return nil, fmt.Errorf("applying inline expansions: %w", err)
os.Exit(1)
} }
resolved.Steps = inlineExpandedSteps resolved.Steps = inlineExpandedSteps
@@ -189,8 +190,7 @@ func runCook(cmd *cobra.Command, args []string) {
if resolved.Compose != nil && (len(resolved.Compose.Expand) > 0 || len(resolved.Compose.Map) > 0) { if resolved.Compose != nil && (len(resolved.Compose.Expand) > 0 || len(resolved.Compose.Map) > 0) {
expandedSteps, err := formula.ApplyExpansions(resolved.Steps, resolved.Compose, parser) expandedSteps, err := formula.ApplyExpansions(resolved.Steps, resolved.Compose, parser)
if err != nil { if err != nil {
fmt.Fprintf(os.Stderr, "Error applying expansions: %v\n", err) return nil, fmt.Errorf("applying expansions: %w", err)
os.Exit(1)
} }
resolved.Steps = expandedSteps resolved.Steps = expandedSteps
} }
@@ -200,12 +200,10 @@ func runCook(cmd *cobra.Command, args []string) {
for _, aspectName := range resolved.Compose.Aspects { for _, aspectName := range resolved.Compose.Aspects {
aspectFormula, err := parser.LoadByName(aspectName) aspectFormula, err := parser.LoadByName(aspectName)
if err != nil { if err != nil {
fmt.Fprintf(os.Stderr, "Error loading aspect %q: %v\n", aspectName, err) return nil, fmt.Errorf("loading aspect %q: %w", aspectName, err)
os.Exit(1)
} }
if aspectFormula.Type != formula.TypeAspect { if aspectFormula.Type != formula.TypeAspect {
fmt.Fprintf(os.Stderr, "Error: %q is not an aspect formula (type=%s)\n", aspectName, aspectFormula.Type) return nil, fmt.Errorf("%q is not an aspect formula (type=%s)", aspectName, aspectFormula.Type)
os.Exit(1)
} }
if len(aspectFormula.Advice) > 0 { if len(aspectFormula.Advice) > 0 {
resolved.Steps = formula.ApplyAdvice(resolved.Steps, aspectFormula.Advice) resolved.Steps = formula.ApplyAdvice(resolved.Steps, aspectFormula.Advice)
@@ -213,141 +211,119 @@ func runCook(cmd *cobra.Command, args []string) {
} }
} }
// Apply prefix to proto ID if specified return resolved, nil
protoID := resolved.Formula }
if prefix != "" {
protoID = prefix + resolved.Formula
}
// Extract variables used in the formula // outputCookDryRun displays a dry-run preview of what would be cooked
vars := formula.ExtractVariables(resolved) func outputCookDryRun(resolved *formula.Formula, protoID string, runtimeMode bool, inputVars map[string]string, vars, bondPoints []string) {
modeLabel := "compile-time"
// Collect bond points if runtimeMode {
var bondPoints []string modeLabel = "runtime"
if resolved.Compose != nil { // Apply defaults for runtime mode display
for _, bp := range resolved.Compose.BondPoints { for name, def := range resolved.Vars {
bondPoints = append(bondPoints, bp.ID) if _, provided := inputVars[name]; !provided && def.Default != "" {
inputVars[name] = def.Default
}
} }
} }
if dryRun { fmt.Printf("\nDry run: would cook formula %s as proto %s (%s mode)\n\n", resolved.Formula, protoID, modeLabel)
// Determine mode label for display
modeLabel := "compile-time"
if runtimeMode {
modeLabel = "runtime"
// Apply defaults for runtime mode display
for name, def := range resolved.Vars {
if _, provided := inputVars[name]; !provided && def.Default != "" {
inputVars[name] = def.Default
}
}
}
fmt.Printf("\nDry run: would cook formula %s as proto %s (%s mode)\n\n", resolved.Formula, protoID, modeLabel) // In runtime mode, show substituted steps
if runtimeMode {
substituteFormulaVars(resolved, inputVars)
fmt.Printf("Steps (%d) [variables substituted]:\n", len(resolved.Steps))
} else {
fmt.Printf("Steps (%d) [{{variables}} shown as placeholders]:\n", len(resolved.Steps))
}
printFormulaSteps(resolved.Steps, " ")
// In runtime mode, show substituted steps if len(vars) > 0 {
if runtimeMode { fmt.Printf("\nVariables used: %s\n", strings.Join(vars, ", "))
// Create a copy with substituted values for display
substituteFormulaVars(resolved, inputVars)
fmt.Printf("Steps (%d) [variables substituted]:\n", len(resolved.Steps))
} else {
fmt.Printf("Steps (%d) [{{variables}} shown as placeholders]:\n", len(resolved.Steps))
}
printFormulaSteps(resolved.Steps, " ")
if len(vars) > 0 {
fmt.Printf("\nVariables used: %s\n", strings.Join(vars, ", "))
}
// Show variable values in runtime mode
if runtimeMode && len(inputVars) > 0 {
fmt.Printf("\nVariable values:\n")
for name, value := range inputVars {
fmt.Printf(" {{%s}} = %s\n", name, value)
}
}
if len(bondPoints) > 0 {
fmt.Printf("Bond points: %s\n", strings.Join(bondPoints, ", "))
}
// Show variable definitions (more useful in compile-time mode)
if !runtimeMode && len(resolved.Vars) > 0 {
fmt.Printf("\nVariable definitions:\n")
for name, def := range resolved.Vars {
attrs := []string{}
if def.Required {
attrs = append(attrs, "required")
}
if def.Default != "" {
attrs = append(attrs, fmt.Sprintf("default=%s", def.Default))
}
if len(def.Enum) > 0 {
attrs = append(attrs, fmt.Sprintf("enum=[%s]", strings.Join(def.Enum, ",")))
}
attrStr := ""
if len(attrs) > 0 {
attrStr = fmt.Sprintf(" (%s)", strings.Join(attrs, ", "))
}
fmt.Printf(" {{%s}}: %s%s\n", name, def.Description, attrStr)
}
}
return
} }
// Ephemeral mode (default): output resolved formula as JSON to stdout // Show variable values in runtime mode
if !persist { if runtimeMode && len(inputVars) > 0 {
// Runtime mode: substitute variables before output fmt.Printf("\nVariable values:\n")
if runtimeMode { for name, value := range inputVars {
// Apply defaults from formula variable definitions fmt.Printf(" {{%s}} = %s\n", name, value)
for name, def := range resolved.Vars {
if _, provided := inputVars[name]; !provided && def.Default != "" {
inputVars[name] = def.Default
}
}
// Check for missing required variables
var missingVars []string
for _, v := range vars {
if _, ok := inputVars[v]; !ok {
missingVars = append(missingVars, v)
}
}
if len(missingVars) > 0 {
fmt.Fprintf(os.Stderr, "Error: runtime mode requires all variables to have values\n")
fmt.Fprintf(os.Stderr, "Missing: %s\n", strings.Join(missingVars, ", "))
fmt.Fprintf(os.Stderr, "Provide with: --var %s=<value>\n", missingVars[0])
os.Exit(1)
}
// Substitute variables in the formula
substituteFormulaVars(resolved, inputVars)
} }
outputJSON(resolved)
return
} }
// Persist mode: create proto bead in database (legacy behavior) if len(bondPoints) > 0 {
fmt.Printf("Bond points: %s\n", strings.Join(bondPoints, ", "))
}
// Show variable definitions (more useful in compile-time mode)
if !runtimeMode && len(resolved.Vars) > 0 {
fmt.Printf("\nVariable definitions:\n")
for name, def := range resolved.Vars {
attrs := []string{}
if def.Required {
attrs = append(attrs, "required")
}
if def.Default != "" {
attrs = append(attrs, fmt.Sprintf("default=%s", def.Default))
}
if len(def.Enum) > 0 {
attrs = append(attrs, fmt.Sprintf("enum=[%s]", strings.Join(def.Enum, ",")))
}
attrStr := ""
if len(attrs) > 0 {
attrStr = fmt.Sprintf(" (%s)", strings.Join(attrs, ", "))
}
fmt.Printf(" {{%s}}: %s%s\n", name, def.Description, attrStr)
}
}
}
// outputCookEphemeral outputs the resolved formula as JSON (ephemeral mode)
func outputCookEphemeral(resolved *formula.Formula, runtimeMode bool, inputVars map[string]string, vars []string) error {
if runtimeMode {
// Apply defaults from formula variable definitions
for name, def := range resolved.Vars {
if _, provided := inputVars[name]; !provided && def.Default != "" {
inputVars[name] = def.Default
}
}
// Check for missing required variables
var missingVars []string
for _, v := range vars {
if _, ok := inputVars[v]; !ok {
missingVars = append(missingVars, v)
}
}
if len(missingVars) > 0 {
return fmt.Errorf("runtime mode requires all variables to have values\nMissing: %s\nProvide with: --var %s=<value>",
strings.Join(missingVars, ", "), missingVars[0])
}
// Substitute variables in the formula
substituteFormulaVars(resolved, inputVars)
}
outputJSON(resolved)
return nil
}
// persistCookFormula creates a proto bead in the database (persist mode)
func persistCookFormula(ctx context.Context, resolved *formula.Formula, protoID string, force bool, vars, bondPoints []string) error {
// Check if proto already exists // Check if proto already exists
existingProto, err := store.GetIssue(ctx, protoID) existingProto, err := store.GetIssue(ctx, protoID)
if err == nil && existingProto != nil { if err == nil && existingProto != nil {
if !force { if !force {
fmt.Fprintf(os.Stderr, "Error: proto %s already exists\n", protoID) return fmt.Errorf("proto %s already exists (use --force to replace)", protoID)
fmt.Fprintf(os.Stderr, "Hint: use --force to replace it\n")
os.Exit(1)
} }
// Delete existing proto and its children // Delete existing proto and its children
if err := deleteProtoSubgraph(ctx, store, protoID); err != nil { if err := deleteProtoSubgraph(ctx, store, protoID); err != nil {
fmt.Fprintf(os.Stderr, "Error deleting existing proto: %v\n", err) return fmt.Errorf("deleting existing proto: %w", err)
os.Exit(1)
} }
} }
// Create the proto bead from the formula // Create the proto bead from the formula
result, err := cookFormula(ctx, store, resolved, protoID) result, err := cookFormula(ctx, store, resolved, protoID)
if err != nil { if err != nil {
fmt.Fprintf(os.Stderr, "Error cooking formula: %v\n", err) return fmt.Errorf("cooking formula: %w", err)
os.Exit(1)
} }
// Schedule auto-flush // Schedule auto-flush
@@ -361,7 +337,7 @@ func runCook(cmd *cobra.Command, args []string) {
Variables: vars, Variables: vars,
BondPoints: bondPoints, BondPoints: bondPoints,
}) })
return return nil
} }
fmt.Printf("%s Cooked proto: %s\n", ui.RenderPass("✓"), result.ProtoID) fmt.Printf("%s Cooked proto: %s\n", ui.RenderPass("✓"), result.ProtoID)
@@ -373,6 +349,73 @@ func runCook(cmd *cobra.Command, args []string) {
fmt.Printf(" Bond points: %s\n", strings.Join(bondPoints, ", ")) fmt.Printf(" Bond points: %s\n", strings.Join(bondPoints, ", "))
} }
fmt.Printf("\nTo use: bd mol pour %s --var <name>=<value>\n", result.ProtoID) fmt.Printf("\nTo use: bd mol pour %s --var <name>=<value>\n", result.ProtoID)
return nil
}
func runCook(cmd *cobra.Command, args []string) {
// Parse and validate flags
flags, err := parseCookFlags(cmd, args)
if err != nil {
fmt.Fprintf(os.Stderr, "Error: %v\n", err)
os.Exit(1)
}
// Validate store access for persist mode
if flags.persist {
CheckReadonly("cook --persist")
if store == nil {
if daemonClient != nil {
fmt.Fprintf(os.Stderr, "Error: cook --persist requires direct database access\n")
fmt.Fprintf(os.Stderr, "Hint: use --no-daemon flag: bd --no-daemon cook %s --persist ...\n", flags.formulaPath)
} else {
fmt.Fprintf(os.Stderr, "Error: no database connection\n")
}
os.Exit(1)
}
}
// Load and resolve the formula
resolved, err := loadAndResolveFormula(flags.formulaPath, flags.searchPaths)
if err != nil {
fmt.Fprintf(os.Stderr, "Error: %v\n", err)
os.Exit(1)
}
// Apply prefix to proto ID if specified
protoID := resolved.Formula
if flags.prefix != "" {
protoID = flags.prefix + resolved.Formula
}
// Extract variables and bond points
vars := formula.ExtractVariables(resolved)
var bondPoints []string
if resolved.Compose != nil {
for _, bp := range resolved.Compose.BondPoints {
bondPoints = append(bondPoints, bp.ID)
}
}
// Handle dry-run mode
if flags.dryRun {
outputCookDryRun(resolved, protoID, flags.runtimeMode, flags.inputVars, vars, bondPoints)
return
}
// Handle ephemeral mode (default)
if !flags.persist {
if err := outputCookEphemeral(resolved, flags.runtimeMode, flags.inputVars, vars); err != nil {
fmt.Fprintf(os.Stderr, "Error: %v\n", err)
os.Exit(1)
}
return
}
// Handle persist mode
if err := persistCookFormula(rootCtx, resolved, protoID, flags.force, vars, bondPoints); err != nil {
fmt.Fprintf(os.Stderr, "Error: %v\n", err)
os.Exit(1)
}
} }
// cookFormulaResult holds the result of cooking // cookFormulaResult holds the result of cooking
@@ -410,15 +453,13 @@ func cookFormulaToSubgraph(f *formula.Formula, protoID string) (*TemplateSubgrap
issueMap[protoID] = rootIssue issueMap[protoID] = rootIssue
// Collect issues for each step (use protoID as parent for step IDs) // Collect issues for each step (use protoID as parent for step IDs)
collectStepsToSubgraph(f.Steps, protoID, issueMap, &issues, &deps) // The unified collectSteps builds both issueMap and idMapping
idMapping := make(map[string]string)
collectSteps(f.Steps, protoID, idMapping, issueMap, &issues, &deps, nil) // nil = keep labels on issues
// Collect dependencies from depends_on // Collect dependencies from depends_on using the idMapping built above
stepIDMapping := make(map[string]string)
for _, step := range f.Steps { for _, step := range f.Steps {
collectStepIDMappings(step, protoID, stepIDMapping) collectDependencies(step, idMapping, &deps)
}
for _, step := range f.Steps {
collectDependenciesToSubgraph(step, stepIDMapping, &deps)
} }
return &TemplateSubgraph{ return &TemplateSubgraph{
@@ -429,145 +470,99 @@ func cookFormulaToSubgraph(f *formula.Formula, protoID string) (*TemplateSubgrap
}, nil }, nil
} }
// collectStepsToSubgraph collects issues and dependencies for steps and their children. // processStepToIssue converts a formula.Step to a types.Issue.
// This is the in-memory version that doesn't create labels (since those require DB). // The issue includes all fields including Labels populated from step.Labels and waits_for.
func collectStepsToSubgraph(steps []*formula.Step, parentID string, issueMap map[string]*types.Issue, // This is the shared core logic used by both DB-persisted and in-memory cooking.
issues *[]*types.Issue, deps *[]*types.Dependency) { func processStepToIssue(step *formula.Step, parentID string) *types.Issue {
// Generate issue ID (formula-name.step-id)
issueID := fmt.Sprintf("%s.%s", parentID, step.ID)
// Determine issue type (children override to epic)
issueType := stepTypeToIssueType(step.Type)
if len(step.Children) > 0 {
issueType = types.TypeEpic
}
// Determine priority
priority := 2
if step.Priority != nil {
priority = *step.Priority
}
issue := &types.Issue{
ID: issueID,
Title: step.Title, // Keep {{variables}} for substitution at pour time
Description: step.Description,
Status: types.StatusOpen,
Priority: priority,
IssueType: issueType,
Assignee: step.Assignee,
IsTemplate: true,
CreatedAt: time.Now(),
UpdatedAt: time.Now(),
SourceFormula: step.SourceFormula, // Source tracing
SourceLocation: step.SourceLocation, // Source tracing
}
// Populate labels from step
issue.Labels = append(issue.Labels, step.Labels...)
// Add gate label for waits_for field
if step.WaitsFor != "" {
gateLabel := fmt.Sprintf("gate:%s", step.WaitsFor)
issue.Labels = append(issue.Labels, gateLabel)
}
return issue
}
// collectSteps collects issues and dependencies for steps and their children.
// This is the unified implementation used by both DB-persisted and in-memory cooking.
//
// Parameters:
// - idMapping: step.ID → issue.ID (always populated, used for dependency resolution)
// - issueMap: issue.ID → issue (optional, nil for DB path, populated for in-memory path)
// - labelHandler: callback for each label (if nil, labels stay on issue; if set, labels are
// extracted and issue.Labels is cleared - use for DB path)
func collectSteps(steps []*formula.Step, parentID string,
idMapping map[string]string,
issueMap map[string]*types.Issue,
issues *[]*types.Issue,
deps *[]*types.Dependency,
labelHandler func(issueID, label string)) {
for _, step := range steps { for _, step := range steps {
// Generate issue ID (formula-name.step-id) issue := processStepToIssue(step, parentID)
issueID := fmt.Sprintf("%s.%s", parentID, step.ID)
// Determine issue type (children override to epic)
issueType := stepTypeToIssueType(step.Type)
if len(step.Children) > 0 {
issueType = types.TypeEpic
}
// Determine priority
priority := 2
if step.Priority != nil {
priority = *step.Priority
}
issue := &types.Issue{
ID: issueID,
Title: step.Title, // Keep {{variables}} for substitution at pour time
Description: step.Description,
Status: types.StatusOpen,
Priority: priority,
IssueType: issueType,
Assignee: step.Assignee,
IsTemplate: true,
CreatedAt: time.Now(),
UpdatedAt: time.Now(),
SourceFormula: step.SourceFormula, // Source tracing
SourceLocation: step.SourceLocation, // Source tracing
}
// Store labels in the issue's Labels field for in-memory use
issue.Labels = append(issue.Labels, step.Labels...)
// Add gate label for waits_for field
if step.WaitsFor != "" {
gateLabel := fmt.Sprintf("gate:%s", step.WaitsFor)
issue.Labels = append(issue.Labels, gateLabel)
}
*issues = append(*issues, issue) *issues = append(*issues, issue)
issueMap[issueID] = issue
// Build mappings
idMapping[step.ID] = issue.ID
if issueMap != nil {
issueMap[issue.ID] = issue
}
// Handle labels: extract via callback (DB path) or keep on issue (in-memory path)
if labelHandler != nil {
for _, label := range issue.Labels {
labelHandler(issue.ID, label)
}
issue.Labels = nil // DB stores labels separately
}
// Add parent-child dependency // Add parent-child dependency
*deps = append(*deps, &types.Dependency{ *deps = append(*deps, &types.Dependency{
IssueID: issueID, IssueID: issue.ID,
DependsOnID: parentID, DependsOnID: parentID,
Type: types.DepParentChild, Type: types.DepParentChild,
}) })
// Recursively collect children // Recursively collect children
if len(step.Children) > 0 { if len(step.Children) > 0 {
collectStepsToSubgraph(step.Children, issueID, issueMap, issues, deps) collectSteps(step.Children, issue.ID, idMapping, issueMap, issues, deps, labelHandler)
} }
} }
} }
// collectStepIDMappings builds a map from step ID to full issue ID
func collectStepIDMappings(step *formula.Step, parentID string, mapping map[string]string) {
issueID := fmt.Sprintf("%s.%s", parentID, step.ID)
mapping[step.ID] = issueID
for _, child := range step.Children {
collectStepIDMappings(child, issueID, mapping)
}
}
// collectDependenciesToSubgraph collects blocking dependencies from depends_on and needs fields.
func collectDependenciesToSubgraph(step *formula.Step, idMapping map[string]string, deps *[]*types.Dependency) {
issueID := idMapping[step.ID]
// Process depends_on field
for _, depID := range step.DependsOn {
depIssueID, ok := idMapping[depID]
if !ok {
continue // Will be caught during validation
}
*deps = append(*deps, &types.Dependency{
IssueID: issueID,
DependsOnID: depIssueID,
Type: types.DepBlocks,
})
}
// Process needs field - simpler alias for sibling dependencies
for _, needID := range step.Needs {
needIssueID, ok := idMapping[needID]
if !ok {
continue // Will be caught during validation
}
*deps = append(*deps, &types.Dependency{
IssueID: issueID,
DependsOnID: needIssueID,
Type: types.DepBlocks,
})
}
// Process waits_for field - fanout gate dependency
if step.WaitsFor != "" {
waitsForSpec := formula.ParseWaitsFor(step.WaitsFor)
if waitsForSpec != nil {
// Determine spawner ID
spawnerStepID := waitsForSpec.SpawnerID
if spawnerStepID == "" && len(step.Needs) > 0 {
// Infer spawner from first need
spawnerStepID = step.Needs[0]
}
if spawnerStepID != "" {
if spawnerIssueID, ok := idMapping[spawnerStepID]; ok {
// Create WaitsFor dependency with metadata
meta := types.WaitsForMeta{
Gate: waitsForSpec.Gate,
}
metaJSON, _ := json.Marshal(meta)
*deps = append(*deps, &types.Dependency{
IssueID: issueID,
DependsOnID: spawnerIssueID,
Type: types.DepWaitsFor,
Metadata: string(metaJSON),
})
}
}
}
}
// Recursively handle children
for _, child := range step.Children {
collectDependenciesToSubgraph(child, idMapping, deps)
}
}
// resolveAndCookFormula loads a formula by name, resolves it, applies all transformations, // resolveAndCookFormula loads a formula by name, resolves it, applies all transformations,
// and returns an in-memory TemplateSubgraph ready for instantiation. // and returns an in-memory TemplateSubgraph ready for instantiation.
@@ -694,7 +689,10 @@ func cookFormula(ctx context.Context, s storage.Storage, f *formula.Formula, pro
labels = append(labels, struct{ issueID, label string }{protoID, MoleculeLabel}) labels = append(labels, struct{ issueID, label string }{protoID, MoleculeLabel})
// Collect issues for each step (use protoID as parent for step IDs) // Collect issues for each step (use protoID as parent for step IDs)
collectStepsRecursive(f.Steps, protoID, idMapping, &issues, &deps, &labels) // Use labelHandler to extract labels for separate DB storage
collectSteps(f.Steps, protoID, idMapping, nil, &issues, &deps, func(issueID, label string) {
labels = append(labels, struct{ issueID, label string }{issueID, label})
})
// Collect dependencies from depends_on // Collect dependencies from depends_on
for _, step := range f.Steps { for _, step := range f.Steps {
@@ -753,70 +751,8 @@ func cookFormula(ctx context.Context, s storage.Storage, f *formula.Formula, pro
}, nil }, nil
} }
// collectStepsRecursive collects issues, dependencies, and labels for steps and their children. // collectDependencies collects blocking dependencies from depends_on, needs, and waits_for fields.
func collectStepsRecursive(steps []*formula.Step, parentID string, idMapping map[string]string, // This is the shared implementation used by both DB-persisted and in-memory subgraph cooking.
issues *[]*types.Issue, deps *[]*types.Dependency, labels *[]struct{ issueID, label string }) {
for _, step := range steps {
// Generate issue ID (formula-name.step-id)
issueID := fmt.Sprintf("%s.%s", parentID, step.ID)
// Determine issue type (children override to epic)
issueType := stepTypeToIssueType(step.Type)
if len(step.Children) > 0 {
issueType = types.TypeEpic
}
// Determine priority
priority := 2
if step.Priority != nil {
priority = *step.Priority
}
issue := &types.Issue{
ID: issueID,
Title: step.Title, // Keep {{variables}} for substitution at pour time
Description: step.Description,
Status: types.StatusOpen,
Priority: priority,
IssueType: issueType,
Assignee: step.Assignee,
IsTemplate: true,
CreatedAt: time.Now(),
UpdatedAt: time.Now(),
SourceFormula: step.SourceFormula, // Source tracing
SourceLocation: step.SourceLocation, // Source tracing
}
*issues = append(*issues, issue)
// Collect labels
for _, label := range step.Labels {
*labels = append(*labels, struct{ issueID, label string }{issueID, label})
}
// Add gate label for waits_for field
if step.WaitsFor != "" {
gateLabel := fmt.Sprintf("gate:%s", step.WaitsFor)
*labels = append(*labels, struct{ issueID, label string }{issueID, gateLabel})
}
idMapping[step.ID] = issueID
// Add parent-child dependency
*deps = append(*deps, &types.Dependency{
IssueID: issueID,
DependsOnID: parentID,
Type: types.DepParentChild,
})
// Recursively collect children
if len(step.Children) > 0 {
collectStepsRecursive(step.Children, issueID, idMapping, issues, deps, labels)
}
}
}
// collectDependencies collects blocking dependencies from depends_on and needs fields.
func collectDependencies(step *formula.Step, idMapping map[string]string, deps *[]*types.Dependency) { func collectDependencies(step *formula.Step, idMapping map[string]string, deps *[]*types.Dependency) {
issueID := idMapping[step.ID] issueID := idMapping[step.ID]
+2 -2
View File
@@ -161,7 +161,7 @@ var createCmd = &cobra.Command{
repoPath = routing.DetermineTargetRepo(routingConfig, userRole, ".") repoPath = routing.DetermineTargetRepo(routingConfig, userRole, ".")
} }
// TODO: Switch to target repo for multi-repo support // TODO(bd-6x6g): Switch to target repo for multi-repo support
// For now, we just log the target repo in debug mode // For now, we just log the target repo in debug mode
if repoPath != "." { if repoPath != "." {
debug.Logf("DEBUG: Target repo: %s\n", repoPath) debug.Logf("DEBUG: Target repo: %s\n", repoPath)
@@ -205,7 +205,7 @@ var createCmd = &cobra.Command{
// Get database prefix from config // Get database prefix from config
var dbPrefix string var dbPrefix string
if daemonClient != nil { if daemonClient != nil {
// TODO: Add RPC method to get config in daemon mode // TODO(bd-ag35): Add RPC method to get config in daemon mode
// For now, skip validation in daemon mode (needs RPC enhancement) // For now, skip validation in daemon mode (needs RPC enhancement)
} else { } else {
// Direct mode - check config // Direct mode - check config
+1 -1
View File
@@ -128,7 +128,7 @@ func setupDaemonLogger(logPath string, jsonFormat bool, level slog.Level) (*lumb
} }
// setupDaemonLoggerLegacy is the old signature for backward compatibility during migration. // setupDaemonLoggerLegacy is the old signature for backward compatibility during migration.
// TODO: Remove this once all callers are updated to use the new signature. // TODO(bd-2dwo): Remove this once all callers are updated to use the new signature.
func setupDaemonLoggerLegacy(logPath string) (*lumberjack.Logger, daemonLogger) { func setupDaemonLoggerLegacy(logPath string) (*lumberjack.Logger, daemonLogger) {
return setupDaemonLogger(logPath, false, slog.LevelInfo) return setupDaemonLogger(logPath, false, slog.LevelInfo)
} }
+1 -1
View File
@@ -630,7 +630,7 @@ func detectJiraConflicts(ctx context.Context) ([]JiraConflict, error) {
// Check if updated since last sync // Check if updated since last sync
if issue.UpdatedAt.After(lastSync) { if issue.UpdatedAt.After(lastSync) {
// This is a potential conflict - for now, mark as conflict // This is a potential conflict - for now, mark as conflict
// TODO: In a full implementation, we'd fetch the Jira issue and compare timestamps // TODO(bd-0qx5): In a full implementation, we'd fetch the Jira issue and compare timestamps
conflicts = append(conflicts, JiraConflict{ conflicts = append(conflicts, JiraConflict{
IssueID: issue.ID, IssueID: issue.ID,
LocalUpdated: issue.UpdatedAt, LocalUpdated: issue.UpdatedAt,
+1 -1
View File
@@ -19,7 +19,7 @@ import (
_ "github.com/ncruces/go-sqlite3/embed" _ "github.com/ncruces/go-sqlite3/embed"
) )
// TODO: Consider integrating into 'bd doctor' migration detection // TODO(bd-7l27): Consider integrating into 'bd doctor' migration detection
var migrateCmd = &cobra.Command{ var migrateCmd = &cobra.Command{
Use: "migrate", Use: "migrate",
GroupID: "maint", GroupID: "maint",
+1 -1
View File
@@ -21,7 +21,7 @@ import (
"github.com/steveyegge/beads/internal/ui" "github.com/steveyegge/beads/internal/ui"
) )
// TODO: Consider integrating into 'bd doctor' migration detection // TODO(bd-7l27): Consider integrating into 'bd doctor' migration detection
var migrateHashIDsCmd = &cobra.Command{ var migrateHashIDsCmd = &cobra.Command{
Use: "hash-ids", Use: "hash-ids",
Short: "Migrate sequential IDs to hash-based IDs (legacy)", Short: "Migrate sequential IDs to hash-based IDs (legacy)",
+1 -1
View File
@@ -12,7 +12,7 @@ import (
"github.com/steveyegge/beads/internal/storage/sqlite" "github.com/steveyegge/beads/internal/storage/sqlite"
) )
// TODO: Consider integrating into 'bd doctor' migration detection // TODO(bd-7l27): Consider integrating into 'bd doctor' migration detection
var migrateIssuesCmd = &cobra.Command{ var migrateIssuesCmd = &cobra.Command{
Use: "issues", Use: "issues",
Short: "Move issues between repositories", Short: "Move issues between repositories",
+1 -1
View File
@@ -13,7 +13,7 @@ import (
"github.com/steveyegge/beads/internal/syncbranch" "github.com/steveyegge/beads/internal/syncbranch"
) )
// TODO: Consider integrating into 'bd doctor' migration detection // TODO(bd-7l27): Consider integrating into 'bd doctor' migration detection
var migrateSyncCmd = &cobra.Command{ var migrateSyncCmd = &cobra.Command{
Use: "sync <branch-name>", Use: "sync <branch-name>",
Short: "Migrate to sync.branch workflow for multi-clone setups", Short: "Migrate to sync.branch workflow for multi-clone setups",
+1 -1
View File
@@ -69,7 +69,7 @@ func loadLegacyDeletionsCmd(path string) (map[string]legacyDeletionRecordCmd, []
return records, warnings, nil return records, warnings, nil
} }
// TODO: Consider integrating into 'bd doctor' migration detection // TODO(bd-7l27): Consider integrating into 'bd doctor' migration detection
var migrateTombstonesCmd = &cobra.Command{ var migrateTombstonesCmd = &cobra.Command{
Use: "tombstones", Use: "tombstones",
Short: "Convert deletions.jsonl entries to inline tombstones", Short: "Convert deletions.jsonl entries to inline tombstones",
+1 -1
View File
@@ -64,7 +64,7 @@ func runMolStale(cmd *cobra.Command, args []string) {
if daemonClient != nil { if daemonClient != nil {
// For now, stale check requires direct store access // For now, stale check requires direct store access
// TODO: Add RPC endpoint for stale check // TODO(bd-ag35): Add RPC endpoint for stale check
fmt.Fprintf(os.Stderr, "Error: mol stale requires direct database access\n") fmt.Fprintf(os.Stderr, "Error: mol stale requires direct database access\n")
fmt.Fprintf(os.Stderr, "Hint: use --no-daemon flag: bd --no-daemon mol stale\n") fmt.Fprintf(os.Stderr, "Hint: use --no-daemon flag: bd --no-daemon mol stale\n")
os.Exit(1) os.Exit(1)
+1 -1
View File
@@ -441,7 +441,7 @@ func TestHasJSONLConflict_MultipleConflicts(t *testing.T) {
func TestZFCSkipsExportAfterImport(t *testing.T) { func TestZFCSkipsExportAfterImport(t *testing.T) {
// Skip this test - it calls importFromJSONL which spawns bd import as subprocess, // Skip this test - it calls importFromJSONL which spawns bd import as subprocess,
// but os.Executable() returns the test binary during tests, not the bd binary. // but os.Executable() returns the test binary during tests, not the bd binary.
// TODO: Refactor to use direct import logic instead of subprocess. // TODO(bd-h048): Refactor to use direct import logic instead of subprocess.
t.Skip("Test requires subprocess spawning which doesn't work in test environment") t.Skip("Test requires subprocess spawning which doesn't work in test environment")
if testing.Short() { if testing.Short() {
t.Skip("Skipping test that spawns subprocess in short mode") t.Skip("Skipping test that spawns subprocess in short mode")
+5 -5
View File
@@ -167,23 +167,23 @@ type Step struct {
// Expand references an expansion formula to inline here. // Expand references an expansion formula to inline here.
// When set, this step is replaced by the expansion's steps. // When set, this step is replaced by the expansion's steps.
// TODO(future): Not yet implemented in bd cook. Filed as future work. // TODO(bd-7zka): Not yet implemented in bd cook. Filed as future work.
Expand string `json:"expand,omitempty"` Expand string `json:"expand,omitempty"`
// ExpandVars are variable overrides for the expansion. // ExpandVars are variable overrides for the expansion.
// TODO(future): Not yet implemented in bd cook. Filed as future work. // TODO(bd-7zka): Not yet implemented in bd cook. Filed as future work.
ExpandVars map[string]string `json:"expand_vars,omitempty"` ExpandVars map[string]string `json:"expand_vars,omitempty"`
// Condition makes this step optional based on a variable. // Condition makes this step optional based on a variable.
// Format: "{{var}}" (truthy) or "{{var}} == value". // Format: "{{var}}" (truthy) or "{{var}} == value".
// TODO(future): Not yet implemented in bd cook. Filed as future work. // TODO(bd-7zka): Not yet implemented in bd cook. Filed as future work.
Condition string `json:"condition,omitempty"` Condition string `json:"condition,omitempty"`
// Children are nested steps (for creating epic hierarchies). // Children are nested steps (for creating epic hierarchies).
Children []*Step `json:"children,omitempty"` Children []*Step `json:"children,omitempty"`
// Gate defines an async wait condition for this step. // Gate defines an async wait condition for this step.
// TODO(future): Not yet implemented in bd cook. Will integrate with bd-udsi gates. // TODO(bd-7zka): Not yet implemented in bd cook. Will integrate with bd-udsi gates.
Gate *Gate `json:"gate,omitempty"` Gate *Gate `json:"gate,omitempty"`
// Loop defines iteration for this step. // Loop defines iteration for this step.
@@ -207,7 +207,7 @@ type Step struct {
} }
// Gate defines an async wait condition (integrates with bd-udsi). // Gate defines an async wait condition (integrates with bd-udsi).
// TODO(future): Not yet implemented in bd cook. Schema defined for future use. // TODO(bd-7zka): Not yet implemented in bd cook. Schema defined for future use.
type Gate struct { type Gate struct {
// Type is the condition type: gh:run, gh:pr, timer, human, mail. // Type is the condition type: gh:run, gh:pr, timer, human, mail.
Type string `json:"type"` Type string `json:"type"`