diff --git a/cmd/bd/autoflush.go b/cmd/bd/autoflush.go index f2b4dfda..30186ea6 100644 --- a/cmd/bd/autoflush.go +++ b/cmd/bd/autoflush.go @@ -18,6 +18,7 @@ import ( "github.com/steveyegge/beads/internal/beads" "github.com/steveyegge/beads/internal/config" "github.com/steveyegge/beads/internal/debug" + "github.com/steveyegge/beads/internal/storage" "github.com/steveyegge/beads/internal/types" "github.com/steveyegge/beads/internal/ui" "github.com/steveyegge/beads/internal/utils" @@ -462,6 +463,194 @@ func writeJSONLAtomic(jsonlPath string, issues []*types.Issue) ([]string, error) return exportedIDs, nil } +// recordFlushFailure records a flush failure, incrementing the failure counter +// and displaying warnings after consecutive failures. +func recordFlushFailure(err error) { + flushMutex.Lock() + flushFailureCount++ + lastFlushError = err + failCount := flushFailureCount + flushMutex.Unlock() + + // Always show the immediate warning + fmt.Fprintf(os.Stderr, "Warning: auto-flush failed: %v\n", err) + + // Show prominent warning after 3+ consecutive failures + if failCount >= 3 { + fmt.Fprintf(os.Stderr, "\n%s\n", ui.RenderFail("⚠️ CRITICAL: Auto-flush has failed "+fmt.Sprint(failCount)+" times consecutively!")) + fmt.Fprintf(os.Stderr, "%s\n", ui.RenderFail("⚠️ Your JSONL file may be out of sync with the database.")) + fmt.Fprintf(os.Stderr, "%s\n\n", ui.RenderFail("⚠️ Run 'bd export -o .beads/issues.jsonl' manually to fix.")) + } +} + +// recordFlushSuccess records a successful flush, resetting the failure counter. +func recordFlushSuccess() { + flushMutex.Lock() + flushFailureCount = 0 + lastFlushError = nil + flushMutex.Unlock() +} + +// readExistingJSONL reads an existing JSONL file into a map for incremental merging. +// Returns empty map if file doesn't exist or can't be read. +func readExistingJSONL(jsonlPath string) (map[string]*types.Issue, error) { + issueMap := make(map[string]*types.Issue) + + existingFile, err := os.Open(jsonlPath) + if err != nil { + if os.IsNotExist(err) { + return issueMap, nil // File doesn't exist, return empty map + } + return nil, fmt.Errorf("failed to open existing JSONL: %w", err) + } + defer existingFile.Close() + + scanner := bufio.NewScanner(existingFile) + // Increase buffer to handle large JSON lines + // Default scanner limit is 64KB which can cause silent truncation + scanner.Buffer(make([]byte, 0, 1024), 2*1024*1024) // 2MB max line size + + lineNum := 0 + for scanner.Scan() { + lineNum++ + line := scanner.Text() + if line == "" { + continue + } + var issue types.Issue + if err := json.Unmarshal([]byte(line), &issue); err == nil { + issue.SetDefaults() // Apply defaults for omitted fields (beads-399) + issueMap[issue.ID] = &issue + } else { + // Warn about malformed JSONL lines + fmt.Fprintf(os.Stderr, "Warning: skipping malformed JSONL line %d: %v\n", lineNum, err) + } + } + + if err := scanner.Err(); err != nil { + return nil, fmt.Errorf("failed to read existing JSONL: %w", err) + } + + return issueMap, nil +} + +// fetchAndMergeIssues fetches dirty issues from the database and merges them into issueMap. +// Issues that no longer exist are removed from the map. +func fetchAndMergeIssues(ctx context.Context, s storage.Storage, dirtyIDs []string, issueMap map[string]*types.Issue) error { + for _, issueID := range dirtyIDs { + issue, err := s.GetIssue(ctx, issueID) + if err != nil { + return fmt.Errorf("failed to get issue %s: %w", issueID, err) + } + if issue == nil { + // Issue was deleted, remove from map + delete(issueMap, issueID) + continue + } + + // Get dependencies for this issue + deps, err := s.GetDependencyRecords(ctx, issueID) + if err != nil { + return fmt.Errorf("failed to get dependencies for %s: %w", issueID, err) + } + issue.Dependencies = deps + + // Update map + issueMap[issueID] = issue + } + return nil +} + +// filterWisps removes ephemeral (wisp) issues from the map and returns a slice. +// Wisps should never be exported to JSONL. +func filterWisps(issueMap map[string]*types.Issue) []*types.Issue { + issues := make([]*types.Issue, 0, len(issueMap)) + wispsSkipped := 0 + for _, issue := range issueMap { + if issue.Ephemeral { + wispsSkipped++ + continue + } + issues = append(issues, issue) + } + if wispsSkipped > 0 { + debug.Logf("auto-flush: filtered %d wisps from export", wispsSkipped) + } + return issues +} + +// filterByMultiRepoPrefix filters issues by prefix in multi-repo mode. +// Non-primary repos should only export issues matching their own prefix. +func filterByMultiRepoPrefix(ctx context.Context, s storage.Storage, issues []*types.Issue) []*types.Issue { + multiRepo := config.GetMultiRepoConfig() + if multiRepo == nil { + return issues + } + + // Get our configured prefix + prefix, prefixErr := s.GetConfig(ctx, "issue_prefix") + if prefixErr != nil || prefix == "" { + return issues + } + + // Determine if we're the primary repo + cwd, _ := os.Getwd() + primaryPath := multiRepo.Primary + if primaryPath == "" || primaryPath == "." { + primaryPath = cwd + } + + // Normalize paths for comparison + absCwd, _ := filepath.Abs(cwd) + absPrimary, _ := filepath.Abs(primaryPath) + + if absCwd == absPrimary { + return issues // Primary repo exports all issues + } + + // Filter to only issues matching our prefix + filtered := make([]*types.Issue, 0, len(issues)) + prefixWithDash := prefix + if !strings.HasSuffix(prefixWithDash, "-") { + prefixWithDash = prefix + "-" + } + for _, issue := range issues { + if strings.HasPrefix(issue.ID, prefixWithDash) { + filtered = append(filtered, issue) + } + } + debug.Logf("multi-repo filter: %d issues -> %d (prefix %s)", len(issues), len(filtered), prefix) + return filtered +} + +// updateFlushExportMetadata stores hashes and timestamps after a successful flush export. +func updateFlushExportMetadata(ctx context.Context, s storage.Storage, jsonlPath string) { + jsonlData, err := os.ReadFile(jsonlPath) + if err != nil { + return // Non-fatal, just skip metadata update + } + + hasher := sha256.New() + hasher.Write(jsonlData) + exportedHash := hex.EncodeToString(hasher.Sum(nil)) + + if err := s.SetMetadata(ctx, "jsonl_content_hash", exportedHash); err != nil { + fmt.Fprintf(os.Stderr, "Warning: failed to update jsonl_content_hash after export: %v\n", err) + } + + // Store JSONL file hash for integrity validation + if err := s.SetJSONLFileHash(ctx, exportedHash); err != nil { + fmt.Fprintf(os.Stderr, "Warning: failed to update jsonl_file_hash after export: %v\n", err) + } + + // Update last_import_time so staleness check doesn't see JSONL as "newer" (fixes #399) + // Use RFC3339Nano to preserve nanosecond precision. + exportTime := time.Now().Format(time.RFC3339Nano) + if err := s.SetMetadata(ctx, "last_import_time", exportTime); err != nil { + fmt.Fprintf(os.Stderr, "Warning: failed to update last_import_time after export: %v\n", err) + } +} + // flushState captures the state needed for a flush operation type flushState struct { forceDirty bool // Force flush even if isDirty is false @@ -507,30 +696,13 @@ func flushToJSONLWithState(state flushState) { storeMutex.Unlock() ctx := rootCtx - + // Validate JSONL integrity BEFORE checking isDirty // This detects if JSONL and export_hashes are out of sync (e.g., after git operations) - // If export_hashes was cleared, we need to do a full export even if nothing is dirty integrityNeedsFullExport, err := validateJSONLIntegrity(ctx, jsonlPath) if err != nil { - // Special case: missing JSONL is not fatal, just forces full export if !os.IsNotExist(err) { - // Record failure without clearing isDirty (we didn't do any work yet) - flushMutex.Lock() - flushFailureCount++ - lastFlushError = err - failCount := flushFailureCount - flushMutex.Unlock() - - // Always show the immediate warning - fmt.Fprintf(os.Stderr, "Warning: auto-flush failed: %v\n", err) - - // Show prominent warning after 3+ consecutive failures - if failCount >= 3 { - fmt.Fprintf(os.Stderr, "\n%s\n", ui.RenderFail("⚠️ CRITICAL: Auto-flush has failed "+fmt.Sprint(failCount)+" times consecutively!")) - fmt.Fprintf(os.Stderr, "%s\n", ui.RenderFail("⚠️ Your JSONL file may be out of sync with the database.")) - fmt.Fprintf(os.Stderr, "%s\n\n", ui.RenderFail("⚠️ Run 'bd export -o .beads/issues.jsonl' manually to fix.")) - } + recordFlushFailure(err) return } // Missing JSONL: treat as "force full export" case @@ -538,235 +710,86 @@ func flushToJSONLWithState(state flushState) { } // Check if we should proceed with export - // Use only the state parameter - don't read global flags - // Caller is responsible for passing correct forceDirty/forceFullExport values if !state.forceDirty && !integrityNeedsFullExport { - // Nothing to do: not forced and no integrity issue return } // Determine export mode fullExport := state.forceFullExport || integrityNeedsFullExport - // Helper to record failure - recordFailure := func(err error) { - flushMutex.Lock() - flushFailureCount++ - lastFlushError = err - failCount := flushFailureCount - flushMutex.Unlock() - - // Always show the immediate warning - fmt.Fprintf(os.Stderr, "Warning: auto-flush failed: %v\n", err) - - // Show prominent warning after 3+ consecutive failures - if failCount >= 3 { - fmt.Fprintf(os.Stderr, "\n%s\n", ui.RenderFail("⚠️ CRITICAL: Auto-flush has failed "+fmt.Sprint(failCount)+" times consecutively!")) - fmt.Fprintf(os.Stderr, "%s\n", ui.RenderFail("⚠️ Your JSONL file may be out of sync with the database.")) - fmt.Fprintf(os.Stderr, "%s\n\n", ui.RenderFail("⚠️ Run 'bd export -o .beads/issues.jsonl' manually to fix.")) - } - } - - // Helper to record success - recordSuccess := func() { - flushMutex.Lock() - flushFailureCount = 0 - lastFlushError = nil - flushMutex.Unlock() - } - // Determine which issues to export - var dirtyIDs []string - - if fullExport { - // Full export: get ALL issues (needed after ID-changing operations like renumber) - allIssues, err2 := store.SearchIssues(ctx, "", types.IssueFilter{}) - if err2 != nil { - recordFailure(fmt.Errorf("failed to get all issues: %w", err2)) - return - } - dirtyIDs = make([]string, len(allIssues)) - for i, issue := range allIssues { - dirtyIDs[i] = issue.ID - } - } else { - // Incremental export: get only dirty issue IDs - var err2 error - dirtyIDs, err2 = store.GetDirtyIssues(ctx) - if err2 != nil { - recordFailure(fmt.Errorf("failed to get dirty issues: %w", err2)) - return - } - - // No dirty issues? Nothing to do! - if len(dirtyIDs) == 0 { - recordSuccess() - return - } - } - - // Read existing JSONL into a map (skip for full export - we'll rebuild from scratch) - issueMap := make(map[string]*types.Issue) - if !fullExport { - if existingFile, err := os.Open(jsonlPath); err == nil { - scanner := bufio.NewScanner(existingFile) - // Increase buffer to handle large JSON lines - // Default scanner limit is 64KB which can cause silent truncation - scanner.Buffer(make([]byte, 0, 1024), 2*1024*1024) // 2MB max line size - lineNum := 0 - for scanner.Scan() { - lineNum++ - line := scanner.Text() - if line == "" { - continue - } - var issue types.Issue - if err := json.Unmarshal([]byte(line), &issue); err == nil { - issue.SetDefaults() // Apply defaults for omitted fields (beads-399) - issueMap[issue.ID] = &issue - } else { - // Warn about malformed JSONL lines - fmt.Fprintf(os.Stderr, "Warning: skipping malformed JSONL line %d: %v\n", lineNum, err) - } - } - // Check for scanner errors - if err := scanner.Err(); err != nil { - _ = existingFile.Close() - recordFailure(fmt.Errorf("failed to read existing JSONL: %w", err)) - return - } - _ = existingFile.Close() - } - } - - // Fetch only dirty issues from DB - for _, issueID := range dirtyIDs { - issue, err := store.GetIssue(ctx, issueID) - if err != nil { - recordFailure(fmt.Errorf("failed to get issue %s: %w", issueID, err)) - return - } - if issue == nil { - // Issue was deleted, remove from map - delete(issueMap, issueID) - continue - } - - // Get dependencies for this issue - deps, err := store.GetDependencyRecords(ctx, issueID) - if err != nil { - recordFailure(fmt.Errorf("failed to get dependencies for %s: %w", issueID, err)) - return - } - issue.Dependencies = deps - - // Update map - issueMap[issueID] = issue - } - - // Convert map to slice (will be sorted by writeJSONLAtomic) - // Filter out wisps - they should never be exported to JSONL - // Wisps exist only in SQLite and are shared via .beads/redirect, not JSONL. - // This prevents "zombie" issues that resurrect after mol squash deletes them. - issues := make([]*types.Issue, 0, len(issueMap)) - wispsSkipped := 0 - for _, issue := range issueMap { - if issue.Ephemeral { - wispsSkipped++ - continue - } - issues = append(issues, issue) - } - if wispsSkipped > 0 { - debug.Logf("auto-flush: filtered %d wisps from export", wispsSkipped) - } - - // Filter issues by prefix in multi-repo mode for non-primary repos (fixes GH #437) - // In multi-repo mode, non-primary repos should only export issues that match - // their own prefix. Issues from other repos (hydrated for unified view) should - // NOT be written to the local JSONL. - multiRepo := config.GetMultiRepoConfig() - if multiRepo != nil { - // Get our configured prefix - prefix, prefixErr := store.GetConfig(ctx, "issue_prefix") - if prefixErr == nil && prefix != "" { - // Determine if we're the primary repo - cwd, _ := os.Getwd() - primaryPath := multiRepo.Primary - if primaryPath == "" || primaryPath == "." { - primaryPath = cwd - } - - // Normalize paths for comparison - absCwd, _ := filepath.Abs(cwd) - absPrimary, _ := filepath.Abs(primaryPath) - - isPrimary := absCwd == absPrimary - - if !isPrimary { - // Filter to only issues matching our prefix - filtered := make([]*types.Issue, 0, len(issues)) - prefixWithDash := prefix - if !strings.HasSuffix(prefixWithDash, "-") { - prefixWithDash = prefix + "-" - } - for _, issue := range issues { - if strings.HasPrefix(issue.ID, prefixWithDash) { - filtered = append(filtered, issue) - } - } - debug.Logf("multi-repo filter: %d issues -> %d (prefix %s)", len(issues), len(filtered), prefix) - issues = filtered - } - } - } - - // Write atomically using common helper - exportedIDs, err := writeJSONLAtomic(jsonlPath, issues) + dirtyIDs, err := getIssuesToExport(ctx, fullExport) if err != nil { - recordFailure(err) + recordFlushFailure(err) + return + } + if len(dirtyIDs) == 0 && !fullExport { + recordFlushSuccess() return } - // Clear only the dirty issues that were actually exported (fixes race condition) - // Don't clear issues that were skipped due to timestamp-only changes + // Read existing JSONL into a map (skip for full export - we'll rebuild from scratch) + var issueMap map[string]*types.Issue + if fullExport { + issueMap = make(map[string]*types.Issue) + } else { + issueMap, err = readExistingJSONL(jsonlPath) + if err != nil { + recordFlushFailure(err) + return + } + } + + // Fetch dirty issues from DB and merge into map + if err := fetchAndMergeIssues(ctx, store, dirtyIDs, issueMap); err != nil { + recordFlushFailure(err) + return + } + + // Convert map to slice, filtering out wisps + issues := filterWisps(issueMap) + + // Filter by prefix in multi-repo mode + issues = filterByMultiRepoPrefix(ctx, store, issues) + + // Write atomically + exportedIDs, err := writeJSONLAtomic(jsonlPath, issues) + if err != nil { + recordFlushFailure(err) + return + } + + // Clear dirty issues that were exported if len(exportedIDs) > 0 { if err := store.ClearDirtyIssuesByID(ctx, exportedIDs); err != nil { - // Don't fail the whole flush for this, but warn fmt.Fprintf(os.Stderr, "Warning: failed to clear dirty issues: %v\n", err) } } - // Store hash of exported JSONL (enables hash-based auto-import) - // Renamed from last_import_hash to jsonl_content_hash - jsonlData, err := os.ReadFile(jsonlPath) - if err == nil { - hasher := sha256.New() - hasher.Write(jsonlData) - exportedHash := hex.EncodeToString(hasher.Sum(nil)) - if err := store.SetMetadata(ctx, "jsonl_content_hash", exportedHash); err != nil { - fmt.Fprintf(os.Stderr, "Warning: failed to update jsonl_content_hash after export: %v\n", err) - } + // Update metadata (hashes, timestamps) + updateFlushExportMetadata(ctx, store, jsonlPath) - // Store JSONL file hash for integrity validation - if err := store.SetJSONLFileHash(ctx, exportedHash); err != nil { - fmt.Fprintf(os.Stderr, "Warning: failed to update jsonl_file_hash after export: %v\n", err) - } + recordFlushSuccess() +} - // Update last_import_time so staleness check doesn't see JSONL as "newer" (fixes #399) - // CheckStaleness() compares last_import_time against JSONL mtime. After export, - // the JSONL mtime is updated, so we must also update last_import_time to prevent - // false "stale" detection on subsequent reads. - // - // Use RFC3339Nano to preserve nanosecond precision. The file mtime has nanosecond - // precision, so using RFC3339 (second precision) would cause the stored time to be - // slightly earlier than the file mtime, triggering false staleness. - exportTime := time.Now().Format(time.RFC3339Nano) - if err := store.SetMetadata(ctx, "last_import_time", exportTime); err != nil { - fmt.Fprintf(os.Stderr, "Warning: failed to update last_import_time after export: %v\n", err) +// getIssuesToExport determines which issue IDs need to be exported. +// For full export, returns all issue IDs. For incremental, returns only dirty IDs. +func getIssuesToExport(ctx context.Context, fullExport bool) ([]string, error) { + if fullExport { + allIssues, err := store.SearchIssues(ctx, "", types.IssueFilter{}) + if err != nil { + return nil, fmt.Errorf("failed to get all issues: %w", err) } + ids := make([]string, len(allIssues)) + for i, issue := range allIssues { + ids[i] = issue.ID + } + return ids, nil } - // Success! FlushManager manages its local state in run() goroutine. - recordSuccess() + dirtyIDs, err := store.GetDirtyIssues(ctx) + if err != nil { + return nil, fmt.Errorf("failed to get dirty issues: %w", err) + } + return dirtyIDs, nil } diff --git a/cmd/bd/cook.go b/cmd/bd/cook.go index ec015b09..8c0325bb 100644 --- a/cmd/bd/cook.go +++ b/cmd/bd/cook.go @@ -99,7 +99,20 @@ type cookResult struct { BondPoints []string `json:"bond_points,omitempty"` } -func runCook(cmd *cobra.Command, args []string) { +// cookFlags holds parsed command-line flags for the cook command +type cookFlags struct { + dryRun bool + persist bool + force bool + searchPaths []string + prefix string + inputVars map[string]string + runtimeMode bool + formulaPath string +} + +// parseCookFlags parses and validates cook command flags +func parseCookFlags(cmd *cobra.Command, args []string) (*cookFlags, error) { dryRun, _ := cmd.Flags().GetBool("dry-run") persist, _ := cmd.Flags().GetBool("persist") force, _ := cmd.Flags().GetBool("force") @@ -113,61 +126,51 @@ func runCook(cmd *cobra.Command, args []string) { for _, v := range varFlags { parts := strings.SplitN(v, "=", 2) if len(parts) != 2 { - fmt.Fprintf(os.Stderr, "Error: invalid variable format '%s', expected 'key=value'\n", v) - os.Exit(1) + return nil, fmt.Errorf("invalid variable format '%s', expected 'key=value'", v) } inputVars[parts[0]] = parts[1] } - // Determine cooking mode + // Validate mode + if mode != "" && mode != "compile" && mode != "runtime" { + return nil, fmt.Errorf("invalid mode '%s', must be 'compile' or 'runtime'", mode) + } + // Runtime mode is triggered by: explicit --mode=runtime OR providing --var flags runtimeMode := mode == "runtime" || len(inputVars) > 0 - if mode != "" && mode != "compile" && mode != "runtime" { - fmt.Fprintf(os.Stderr, "Error: invalid mode '%s', must be 'compile' or 'runtime'\n", mode) - os.Exit(1) - } - // Only need store access if persisting - if persist { - CheckReadonly("cook --persist") + return &cookFlags{ + dryRun: dryRun, + persist: persist, + force: force, + searchPaths: searchPaths, + prefix: prefix, + inputVars: inputVars, + runtimeMode: runtimeMode, + formulaPath: args[0], + }, nil +} - if store == nil { - if daemonClient != nil { - fmt.Fprintf(os.Stderr, "Error: cook --persist requires direct database access\n") - fmt.Fprintf(os.Stderr, "Hint: use --no-daemon flag: bd --no-daemon cook %s --persist ...\n", args[0]) - } else { - fmt.Fprintf(os.Stderr, "Error: no database connection\n") - } - os.Exit(1) - } - } - - ctx := rootCtx - - // Create parser with search paths +// loadAndResolveFormula parses a formula file and applies all transformations +func loadAndResolveFormula(formulaPath string, searchPaths []string) (*formula.Formula, error) { parser := formula.NewParser(searchPaths...) // Parse the formula file - formulaPath := args[0] f, err := parser.ParseFile(formulaPath) if err != nil { - fmt.Fprintf(os.Stderr, "Error parsing formula: %v\n", err) - os.Exit(1) + return nil, fmt.Errorf("parsing formula: %w", err) } // Resolve inheritance resolved, err := parser.Resolve(f) if err != nil { - fmt.Fprintf(os.Stderr, "Error resolving formula: %v\n", err) - os.Exit(1) + return nil, fmt.Errorf("resolving formula: %w", err) } // Apply control flow operators - loops, branches, gates - // This must happen before advice and expansions so they can act on expanded loop steps controlFlowSteps, err := formula.ApplyControlFlow(resolved.Steps, resolved.Compose) if err != nil { - fmt.Fprintf(os.Stderr, "Error applying control flow: %v\n", err) - os.Exit(1) + return nil, fmt.Errorf("applying control flow: %w", err) } resolved.Steps = controlFlowSteps @@ -177,11 +180,9 @@ func runCook(cmd *cobra.Command, args []string) { } // Apply inline step expansions - // This processes Step.Expand fields before compose.expand/map rules inlineExpandedSteps, err := formula.ApplyInlineExpansions(resolved.Steps, parser) if err != nil { - fmt.Fprintf(os.Stderr, "Error applying inline expansions: %v\n", err) - os.Exit(1) + return nil, fmt.Errorf("applying inline expansions: %w", err) } resolved.Steps = inlineExpandedSteps @@ -189,8 +190,7 @@ func runCook(cmd *cobra.Command, args []string) { if resolved.Compose != nil && (len(resolved.Compose.Expand) > 0 || len(resolved.Compose.Map) > 0) { expandedSteps, err := formula.ApplyExpansions(resolved.Steps, resolved.Compose, parser) if err != nil { - fmt.Fprintf(os.Stderr, "Error applying expansions: %v\n", err) - os.Exit(1) + return nil, fmt.Errorf("applying expansions: %w", err) } resolved.Steps = expandedSteps } @@ -200,12 +200,10 @@ func runCook(cmd *cobra.Command, args []string) { for _, aspectName := range resolved.Compose.Aspects { aspectFormula, err := parser.LoadByName(aspectName) if err != nil { - fmt.Fprintf(os.Stderr, "Error loading aspect %q: %v\n", aspectName, err) - os.Exit(1) + return nil, fmt.Errorf("loading aspect %q: %w", aspectName, err) } if aspectFormula.Type != formula.TypeAspect { - fmt.Fprintf(os.Stderr, "Error: %q is not an aspect formula (type=%s)\n", aspectName, aspectFormula.Type) - os.Exit(1) + return nil, fmt.Errorf("%q is not an aspect formula (type=%s)", aspectName, aspectFormula.Type) } if len(aspectFormula.Advice) > 0 { resolved.Steps = formula.ApplyAdvice(resolved.Steps, aspectFormula.Advice) @@ -213,141 +211,119 @@ func runCook(cmd *cobra.Command, args []string) { } } - // Apply prefix to proto ID if specified - protoID := resolved.Formula - if prefix != "" { - protoID = prefix + resolved.Formula - } + return resolved, nil +} - // Extract variables used in the formula - vars := formula.ExtractVariables(resolved) - - // Collect bond points - var bondPoints []string - if resolved.Compose != nil { - for _, bp := range resolved.Compose.BondPoints { - bondPoints = append(bondPoints, bp.ID) +// outputCookDryRun displays a dry-run preview of what would be cooked +func outputCookDryRun(resolved *formula.Formula, protoID string, runtimeMode bool, inputVars map[string]string, vars, bondPoints []string) { + modeLabel := "compile-time" + if runtimeMode { + modeLabel = "runtime" + // Apply defaults for runtime mode display + for name, def := range resolved.Vars { + if _, provided := inputVars[name]; !provided && def.Default != "" { + inputVars[name] = def.Default + } } } - if dryRun { - // Determine mode label for display - modeLabel := "compile-time" - if runtimeMode { - modeLabel = "runtime" - // Apply defaults for runtime mode display - for name, def := range resolved.Vars { - if _, provided := inputVars[name]; !provided && def.Default != "" { - inputVars[name] = def.Default - } - } - } + fmt.Printf("\nDry run: would cook formula %s as proto %s (%s mode)\n\n", resolved.Formula, protoID, modeLabel) - fmt.Printf("\nDry run: would cook formula %s as proto %s (%s mode)\n\n", resolved.Formula, protoID, modeLabel) + // In runtime mode, show substituted steps + if runtimeMode { + substituteFormulaVars(resolved, inputVars) + fmt.Printf("Steps (%d) [variables substituted]:\n", len(resolved.Steps)) + } else { + fmt.Printf("Steps (%d) [{{variables}} shown as placeholders]:\n", len(resolved.Steps)) + } + printFormulaSteps(resolved.Steps, " ") - // In runtime mode, show substituted steps - if runtimeMode { - // Create a copy with substituted values for display - substituteFormulaVars(resolved, inputVars) - fmt.Printf("Steps (%d) [variables substituted]:\n", len(resolved.Steps)) - } else { - fmt.Printf("Steps (%d) [{{variables}} shown as placeholders]:\n", len(resolved.Steps)) - } - printFormulaSteps(resolved.Steps, " ") - - if len(vars) > 0 { - fmt.Printf("\nVariables used: %s\n", strings.Join(vars, ", ")) - } - - // Show variable values in runtime mode - if runtimeMode && len(inputVars) > 0 { - fmt.Printf("\nVariable values:\n") - for name, value := range inputVars { - fmt.Printf(" {{%s}} = %s\n", name, value) - } - } - - if len(bondPoints) > 0 { - fmt.Printf("Bond points: %s\n", strings.Join(bondPoints, ", ")) - } - - // Show variable definitions (more useful in compile-time mode) - if !runtimeMode && len(resolved.Vars) > 0 { - fmt.Printf("\nVariable definitions:\n") - for name, def := range resolved.Vars { - attrs := []string{} - if def.Required { - attrs = append(attrs, "required") - } - if def.Default != "" { - attrs = append(attrs, fmt.Sprintf("default=%s", def.Default)) - } - if len(def.Enum) > 0 { - attrs = append(attrs, fmt.Sprintf("enum=[%s]", strings.Join(def.Enum, ","))) - } - attrStr := "" - if len(attrs) > 0 { - attrStr = fmt.Sprintf(" (%s)", strings.Join(attrs, ", ")) - } - fmt.Printf(" {{%s}}: %s%s\n", name, def.Description, attrStr) - } - } - return + if len(vars) > 0 { + fmt.Printf("\nVariables used: %s\n", strings.Join(vars, ", ")) } - // Ephemeral mode (default): output resolved formula as JSON to stdout - if !persist { - // Runtime mode: substitute variables before output - if runtimeMode { - // Apply defaults from formula variable definitions - for name, def := range resolved.Vars { - if _, provided := inputVars[name]; !provided && def.Default != "" { - inputVars[name] = def.Default - } - } - - // Check for missing required variables - var missingVars []string - for _, v := range vars { - if _, ok := inputVars[v]; !ok { - missingVars = append(missingVars, v) - } - } - if len(missingVars) > 0 { - fmt.Fprintf(os.Stderr, "Error: runtime mode requires all variables to have values\n") - fmt.Fprintf(os.Stderr, "Missing: %s\n", strings.Join(missingVars, ", ")) - fmt.Fprintf(os.Stderr, "Provide with: --var %s=\n", missingVars[0]) - os.Exit(1) - } - - // Substitute variables in the formula - substituteFormulaVars(resolved, inputVars) + // Show variable values in runtime mode + if runtimeMode && len(inputVars) > 0 { + fmt.Printf("\nVariable values:\n") + for name, value := range inputVars { + fmt.Printf(" {{%s}} = %s\n", name, value) } - outputJSON(resolved) - return } - // Persist mode: create proto bead in database (legacy behavior) + if len(bondPoints) > 0 { + fmt.Printf("Bond points: %s\n", strings.Join(bondPoints, ", ")) + } + + // Show variable definitions (more useful in compile-time mode) + if !runtimeMode && len(resolved.Vars) > 0 { + fmt.Printf("\nVariable definitions:\n") + for name, def := range resolved.Vars { + attrs := []string{} + if def.Required { + attrs = append(attrs, "required") + } + if def.Default != "" { + attrs = append(attrs, fmt.Sprintf("default=%s", def.Default)) + } + if len(def.Enum) > 0 { + attrs = append(attrs, fmt.Sprintf("enum=[%s]", strings.Join(def.Enum, ","))) + } + attrStr := "" + if len(attrs) > 0 { + attrStr = fmt.Sprintf(" (%s)", strings.Join(attrs, ", ")) + } + fmt.Printf(" {{%s}}: %s%s\n", name, def.Description, attrStr) + } + } +} + +// outputCookEphemeral outputs the resolved formula as JSON (ephemeral mode) +func outputCookEphemeral(resolved *formula.Formula, runtimeMode bool, inputVars map[string]string, vars []string) error { + if runtimeMode { + // Apply defaults from formula variable definitions + for name, def := range resolved.Vars { + if _, provided := inputVars[name]; !provided && def.Default != "" { + inputVars[name] = def.Default + } + } + + // Check for missing required variables + var missingVars []string + for _, v := range vars { + if _, ok := inputVars[v]; !ok { + missingVars = append(missingVars, v) + } + } + if len(missingVars) > 0 { + return fmt.Errorf("runtime mode requires all variables to have values\nMissing: %s\nProvide with: --var %s=", + strings.Join(missingVars, ", "), missingVars[0]) + } + + // Substitute variables in the formula + substituteFormulaVars(resolved, inputVars) + } + outputJSON(resolved) + return nil +} + +// persistCookFormula creates a proto bead in the database (persist mode) +func persistCookFormula(ctx context.Context, resolved *formula.Formula, protoID string, force bool, vars, bondPoints []string) error { // Check if proto already exists existingProto, err := store.GetIssue(ctx, protoID) if err == nil && existingProto != nil { if !force { - fmt.Fprintf(os.Stderr, "Error: proto %s already exists\n", protoID) - fmt.Fprintf(os.Stderr, "Hint: use --force to replace it\n") - os.Exit(1) + return fmt.Errorf("proto %s already exists (use --force to replace)", protoID) } // Delete existing proto and its children if err := deleteProtoSubgraph(ctx, store, protoID); err != nil { - fmt.Fprintf(os.Stderr, "Error deleting existing proto: %v\n", err) - os.Exit(1) + return fmt.Errorf("deleting existing proto: %w", err) } } // Create the proto bead from the formula result, err := cookFormula(ctx, store, resolved, protoID) if err != nil { - fmt.Fprintf(os.Stderr, "Error cooking formula: %v\n", err) - os.Exit(1) + return fmt.Errorf("cooking formula: %w", err) } // Schedule auto-flush @@ -361,7 +337,7 @@ func runCook(cmd *cobra.Command, args []string) { Variables: vars, BondPoints: bondPoints, }) - return + return nil } fmt.Printf("%s Cooked proto: %s\n", ui.RenderPass("✓"), result.ProtoID) @@ -373,6 +349,73 @@ func runCook(cmd *cobra.Command, args []string) { fmt.Printf(" Bond points: %s\n", strings.Join(bondPoints, ", ")) } fmt.Printf("\nTo use: bd mol pour %s --var =\n", result.ProtoID) + return nil +} + +func runCook(cmd *cobra.Command, args []string) { + // Parse and validate flags + flags, err := parseCookFlags(cmd, args) + if err != nil { + fmt.Fprintf(os.Stderr, "Error: %v\n", err) + os.Exit(1) + } + + // Validate store access for persist mode + if flags.persist { + CheckReadonly("cook --persist") + if store == nil { + if daemonClient != nil { + fmt.Fprintf(os.Stderr, "Error: cook --persist requires direct database access\n") + fmt.Fprintf(os.Stderr, "Hint: use --no-daemon flag: bd --no-daemon cook %s --persist ...\n", flags.formulaPath) + } else { + fmt.Fprintf(os.Stderr, "Error: no database connection\n") + } + os.Exit(1) + } + } + + // Load and resolve the formula + resolved, err := loadAndResolveFormula(flags.formulaPath, flags.searchPaths) + if err != nil { + fmt.Fprintf(os.Stderr, "Error: %v\n", err) + os.Exit(1) + } + + // Apply prefix to proto ID if specified + protoID := resolved.Formula + if flags.prefix != "" { + protoID = flags.prefix + resolved.Formula + } + + // Extract variables and bond points + vars := formula.ExtractVariables(resolved) + var bondPoints []string + if resolved.Compose != nil { + for _, bp := range resolved.Compose.BondPoints { + bondPoints = append(bondPoints, bp.ID) + } + } + + // Handle dry-run mode + if flags.dryRun { + outputCookDryRun(resolved, protoID, flags.runtimeMode, flags.inputVars, vars, bondPoints) + return + } + + // Handle ephemeral mode (default) + if !flags.persist { + if err := outputCookEphemeral(resolved, flags.runtimeMode, flags.inputVars, vars); err != nil { + fmt.Fprintf(os.Stderr, "Error: %v\n", err) + os.Exit(1) + } + return + } + + // Handle persist mode + if err := persistCookFormula(rootCtx, resolved, protoID, flags.force, vars, bondPoints); err != nil { + fmt.Fprintf(os.Stderr, "Error: %v\n", err) + os.Exit(1) + } } // cookFormulaResult holds the result of cooking @@ -410,15 +453,13 @@ func cookFormulaToSubgraph(f *formula.Formula, protoID string) (*TemplateSubgrap issueMap[protoID] = rootIssue // Collect issues for each step (use protoID as parent for step IDs) - collectStepsToSubgraph(f.Steps, protoID, issueMap, &issues, &deps) + // The unified collectSteps builds both issueMap and idMapping + idMapping := make(map[string]string) + collectSteps(f.Steps, protoID, idMapping, issueMap, &issues, &deps, nil) // nil = keep labels on issues - // Collect dependencies from depends_on - stepIDMapping := make(map[string]string) + // Collect dependencies from depends_on using the idMapping built above for _, step := range f.Steps { - collectStepIDMappings(step, protoID, stepIDMapping) - } - for _, step := range f.Steps { - collectDependenciesToSubgraph(step, stepIDMapping, &deps) + collectDependencies(step, idMapping, &deps) } return &TemplateSubgraph{ @@ -429,145 +470,99 @@ func cookFormulaToSubgraph(f *formula.Formula, protoID string) (*TemplateSubgrap }, nil } -// collectStepsToSubgraph collects issues and dependencies for steps and their children. -// This is the in-memory version that doesn't create labels (since those require DB). -func collectStepsToSubgraph(steps []*formula.Step, parentID string, issueMap map[string]*types.Issue, - issues *[]*types.Issue, deps *[]*types.Dependency) { +// processStepToIssue converts a formula.Step to a types.Issue. +// The issue includes all fields including Labels populated from step.Labels and waits_for. +// This is the shared core logic used by both DB-persisted and in-memory cooking. +func processStepToIssue(step *formula.Step, parentID string) *types.Issue { + // Generate issue ID (formula-name.step-id) + issueID := fmt.Sprintf("%s.%s", parentID, step.ID) + + // Determine issue type (children override to epic) + issueType := stepTypeToIssueType(step.Type) + if len(step.Children) > 0 { + issueType = types.TypeEpic + } + + // Determine priority + priority := 2 + if step.Priority != nil { + priority = *step.Priority + } + + issue := &types.Issue{ + ID: issueID, + Title: step.Title, // Keep {{variables}} for substitution at pour time + Description: step.Description, + Status: types.StatusOpen, + Priority: priority, + IssueType: issueType, + Assignee: step.Assignee, + IsTemplate: true, + CreatedAt: time.Now(), + UpdatedAt: time.Now(), + SourceFormula: step.SourceFormula, // Source tracing + SourceLocation: step.SourceLocation, // Source tracing + } + + // Populate labels from step + issue.Labels = append(issue.Labels, step.Labels...) + + // Add gate label for waits_for field + if step.WaitsFor != "" { + gateLabel := fmt.Sprintf("gate:%s", step.WaitsFor) + issue.Labels = append(issue.Labels, gateLabel) + } + + return issue +} + +// collectSteps collects issues and dependencies for steps and their children. +// This is the unified implementation used by both DB-persisted and in-memory cooking. +// +// Parameters: +// - idMapping: step.ID → issue.ID (always populated, used for dependency resolution) +// - issueMap: issue.ID → issue (optional, nil for DB path, populated for in-memory path) +// - labelHandler: callback for each label (if nil, labels stay on issue; if set, labels are +// extracted and issue.Labels is cleared - use for DB path) +func collectSteps(steps []*formula.Step, parentID string, + idMapping map[string]string, + issueMap map[string]*types.Issue, + issues *[]*types.Issue, + deps *[]*types.Dependency, + labelHandler func(issueID, label string)) { for _, step := range steps { - // Generate issue ID (formula-name.step-id) - issueID := fmt.Sprintf("%s.%s", parentID, step.ID) - - // Determine issue type (children override to epic) - issueType := stepTypeToIssueType(step.Type) - if len(step.Children) > 0 { - issueType = types.TypeEpic - } - - // Determine priority - priority := 2 - if step.Priority != nil { - priority = *step.Priority - } - - issue := &types.Issue{ - ID: issueID, - Title: step.Title, // Keep {{variables}} for substitution at pour time - Description: step.Description, - Status: types.StatusOpen, - Priority: priority, - IssueType: issueType, - Assignee: step.Assignee, - IsTemplate: true, - CreatedAt: time.Now(), - UpdatedAt: time.Now(), - SourceFormula: step.SourceFormula, // Source tracing - SourceLocation: step.SourceLocation, // Source tracing - } - - // Store labels in the issue's Labels field for in-memory use - issue.Labels = append(issue.Labels, step.Labels...) - - // Add gate label for waits_for field - if step.WaitsFor != "" { - gateLabel := fmt.Sprintf("gate:%s", step.WaitsFor) - issue.Labels = append(issue.Labels, gateLabel) - } - + issue := processStepToIssue(step, parentID) *issues = append(*issues, issue) - issueMap[issueID] = issue + + // Build mappings + idMapping[step.ID] = issue.ID + if issueMap != nil { + issueMap[issue.ID] = issue + } + + // Handle labels: extract via callback (DB path) or keep on issue (in-memory path) + if labelHandler != nil { + for _, label := range issue.Labels { + labelHandler(issue.ID, label) + } + issue.Labels = nil // DB stores labels separately + } // Add parent-child dependency *deps = append(*deps, &types.Dependency{ - IssueID: issueID, + IssueID: issue.ID, DependsOnID: parentID, Type: types.DepParentChild, }) // Recursively collect children if len(step.Children) > 0 { - collectStepsToSubgraph(step.Children, issueID, issueMap, issues, deps) + collectSteps(step.Children, issue.ID, idMapping, issueMap, issues, deps, labelHandler) } } } -// collectStepIDMappings builds a map from step ID to full issue ID -func collectStepIDMappings(step *formula.Step, parentID string, mapping map[string]string) { - issueID := fmt.Sprintf("%s.%s", parentID, step.ID) - mapping[step.ID] = issueID - - for _, child := range step.Children { - collectStepIDMappings(child, issueID, mapping) - } -} - -// collectDependenciesToSubgraph collects blocking dependencies from depends_on and needs fields. -func collectDependenciesToSubgraph(step *formula.Step, idMapping map[string]string, deps *[]*types.Dependency) { - issueID := idMapping[step.ID] - - // Process depends_on field - for _, depID := range step.DependsOn { - depIssueID, ok := idMapping[depID] - if !ok { - continue // Will be caught during validation - } - - *deps = append(*deps, &types.Dependency{ - IssueID: issueID, - DependsOnID: depIssueID, - Type: types.DepBlocks, - }) - } - - // Process needs field - simpler alias for sibling dependencies - for _, needID := range step.Needs { - needIssueID, ok := idMapping[needID] - if !ok { - continue // Will be caught during validation - } - - *deps = append(*deps, &types.Dependency{ - IssueID: issueID, - DependsOnID: needIssueID, - Type: types.DepBlocks, - }) - } - - // Process waits_for field - fanout gate dependency - if step.WaitsFor != "" { - waitsForSpec := formula.ParseWaitsFor(step.WaitsFor) - if waitsForSpec != nil { - // Determine spawner ID - spawnerStepID := waitsForSpec.SpawnerID - if spawnerStepID == "" && len(step.Needs) > 0 { - // Infer spawner from first need - spawnerStepID = step.Needs[0] - } - - if spawnerStepID != "" { - if spawnerIssueID, ok := idMapping[spawnerStepID]; ok { - // Create WaitsFor dependency with metadata - meta := types.WaitsForMeta{ - Gate: waitsForSpec.Gate, - } - metaJSON, _ := json.Marshal(meta) - - *deps = append(*deps, &types.Dependency{ - IssueID: issueID, - DependsOnID: spawnerIssueID, - Type: types.DepWaitsFor, - Metadata: string(metaJSON), - }) - } - } - } - } - - // Recursively handle children - for _, child := range step.Children { - collectDependenciesToSubgraph(child, idMapping, deps) - } -} // resolveAndCookFormula loads a formula by name, resolves it, applies all transformations, // and returns an in-memory TemplateSubgraph ready for instantiation. @@ -694,7 +689,10 @@ func cookFormula(ctx context.Context, s storage.Storage, f *formula.Formula, pro labels = append(labels, struct{ issueID, label string }{protoID, MoleculeLabel}) // Collect issues for each step (use protoID as parent for step IDs) - collectStepsRecursive(f.Steps, protoID, idMapping, &issues, &deps, &labels) + // Use labelHandler to extract labels for separate DB storage + collectSteps(f.Steps, protoID, idMapping, nil, &issues, &deps, func(issueID, label string) { + labels = append(labels, struct{ issueID, label string }{issueID, label}) + }) // Collect dependencies from depends_on for _, step := range f.Steps { @@ -753,70 +751,8 @@ func cookFormula(ctx context.Context, s storage.Storage, f *formula.Formula, pro }, nil } -// collectStepsRecursive collects issues, dependencies, and labels for steps and their children. -func collectStepsRecursive(steps []*formula.Step, parentID string, idMapping map[string]string, - issues *[]*types.Issue, deps *[]*types.Dependency, labels *[]struct{ issueID, label string }) { - - for _, step := range steps { - // Generate issue ID (formula-name.step-id) - issueID := fmt.Sprintf("%s.%s", parentID, step.ID) - - // Determine issue type (children override to epic) - issueType := stepTypeToIssueType(step.Type) - if len(step.Children) > 0 { - issueType = types.TypeEpic - } - - // Determine priority - priority := 2 - if step.Priority != nil { - priority = *step.Priority - } - - issue := &types.Issue{ - ID: issueID, - Title: step.Title, // Keep {{variables}} for substitution at pour time - Description: step.Description, - Status: types.StatusOpen, - Priority: priority, - IssueType: issueType, - Assignee: step.Assignee, - IsTemplate: true, - CreatedAt: time.Now(), - UpdatedAt: time.Now(), - SourceFormula: step.SourceFormula, // Source tracing - SourceLocation: step.SourceLocation, // Source tracing - } - *issues = append(*issues, issue) - - // Collect labels - for _, label := range step.Labels { - *labels = append(*labels, struct{ issueID, label string }{issueID, label}) - } - - // Add gate label for waits_for field - if step.WaitsFor != "" { - gateLabel := fmt.Sprintf("gate:%s", step.WaitsFor) - *labels = append(*labels, struct{ issueID, label string }{issueID, gateLabel}) - } - - idMapping[step.ID] = issueID - - // Add parent-child dependency - *deps = append(*deps, &types.Dependency{ - IssueID: issueID, - DependsOnID: parentID, - Type: types.DepParentChild, - }) - - // Recursively collect children - if len(step.Children) > 0 { - collectStepsRecursive(step.Children, issueID, idMapping, issues, deps, labels) - } - } -} - -// collectDependencies collects blocking dependencies from depends_on and needs fields. +// collectDependencies collects blocking dependencies from depends_on, needs, and waits_for fields. +// This is the shared implementation used by both DB-persisted and in-memory subgraph cooking. func collectDependencies(step *formula.Step, idMapping map[string]string, deps *[]*types.Dependency) { issueID := idMapping[step.ID] diff --git a/cmd/bd/create.go b/cmd/bd/create.go index 0a6de264..2e0b3894 100644 --- a/cmd/bd/create.go +++ b/cmd/bd/create.go @@ -161,7 +161,7 @@ var createCmd = &cobra.Command{ repoPath = routing.DetermineTargetRepo(routingConfig, userRole, ".") } - // TODO: Switch to target repo for multi-repo support + // TODO(bd-6x6g): Switch to target repo for multi-repo support // For now, we just log the target repo in debug mode if repoPath != "." { debug.Logf("DEBUG: Target repo: %s\n", repoPath) @@ -205,7 +205,7 @@ var createCmd = &cobra.Command{ // Get database prefix from config var dbPrefix string if daemonClient != nil { - // TODO: Add RPC method to get config in daemon mode + // TODO(bd-ag35): Add RPC method to get config in daemon mode // For now, skip validation in daemon mode (needs RPC enhancement) } else { // Direct mode - check config diff --git a/cmd/bd/daemon_logger.go b/cmd/bd/daemon_logger.go index bf085871..17da2207 100644 --- a/cmd/bd/daemon_logger.go +++ b/cmd/bd/daemon_logger.go @@ -128,7 +128,7 @@ func setupDaemonLogger(logPath string, jsonFormat bool, level slog.Level) (*lumb } // setupDaemonLoggerLegacy is the old signature for backward compatibility during migration. -// TODO: Remove this once all callers are updated to use the new signature. +// TODO(bd-2dwo): Remove this once all callers are updated to use the new signature. func setupDaemonLoggerLegacy(logPath string) (*lumberjack.Logger, daemonLogger) { return setupDaemonLogger(logPath, false, slog.LevelInfo) } diff --git a/cmd/bd/jira.go b/cmd/bd/jira.go index 88141549..2571d4a0 100644 --- a/cmd/bd/jira.go +++ b/cmd/bd/jira.go @@ -630,7 +630,7 @@ func detectJiraConflicts(ctx context.Context) ([]JiraConflict, error) { // Check if updated since last sync if issue.UpdatedAt.After(lastSync) { // This is a potential conflict - for now, mark as conflict - // TODO: In a full implementation, we'd fetch the Jira issue and compare timestamps + // TODO(bd-0qx5): In a full implementation, we'd fetch the Jira issue and compare timestamps conflicts = append(conflicts, JiraConflict{ IssueID: issue.ID, LocalUpdated: issue.UpdatedAt, diff --git a/cmd/bd/migrate.go b/cmd/bd/migrate.go index 14d15bbd..5e8ef980 100644 --- a/cmd/bd/migrate.go +++ b/cmd/bd/migrate.go @@ -19,7 +19,7 @@ import ( _ "github.com/ncruces/go-sqlite3/embed" ) -// TODO: Consider integrating into 'bd doctor' migration detection +// TODO(bd-7l27): Consider integrating into 'bd doctor' migration detection var migrateCmd = &cobra.Command{ Use: "migrate", GroupID: "maint", diff --git a/cmd/bd/migrate_hash_ids.go b/cmd/bd/migrate_hash_ids.go index e9139769..f1e743fd 100644 --- a/cmd/bd/migrate_hash_ids.go +++ b/cmd/bd/migrate_hash_ids.go @@ -21,7 +21,7 @@ import ( "github.com/steveyegge/beads/internal/ui" ) -// TODO: Consider integrating into 'bd doctor' migration detection +// TODO(bd-7l27): Consider integrating into 'bd doctor' migration detection var migrateHashIDsCmd = &cobra.Command{ Use: "hash-ids", Short: "Migrate sequential IDs to hash-based IDs (legacy)", diff --git a/cmd/bd/migrate_issues.go b/cmd/bd/migrate_issues.go index eaad3f88..b6c05d17 100644 --- a/cmd/bd/migrate_issues.go +++ b/cmd/bd/migrate_issues.go @@ -12,7 +12,7 @@ import ( "github.com/steveyegge/beads/internal/storage/sqlite" ) -// TODO: Consider integrating into 'bd doctor' migration detection +// TODO(bd-7l27): Consider integrating into 'bd doctor' migration detection var migrateIssuesCmd = &cobra.Command{ Use: "issues", Short: "Move issues between repositories", diff --git a/cmd/bd/migrate_sync.go b/cmd/bd/migrate_sync.go index ba37dfc9..350315cd 100644 --- a/cmd/bd/migrate_sync.go +++ b/cmd/bd/migrate_sync.go @@ -13,7 +13,7 @@ import ( "github.com/steveyegge/beads/internal/syncbranch" ) -// TODO: Consider integrating into 'bd doctor' migration detection +// TODO(bd-7l27): Consider integrating into 'bd doctor' migration detection var migrateSyncCmd = &cobra.Command{ Use: "sync ", Short: "Migrate to sync.branch workflow for multi-clone setups", diff --git a/cmd/bd/migrate_tombstones.go b/cmd/bd/migrate_tombstones.go index cf094bd7..2d1357c4 100644 --- a/cmd/bd/migrate_tombstones.go +++ b/cmd/bd/migrate_tombstones.go @@ -69,7 +69,7 @@ func loadLegacyDeletionsCmd(path string) (map[string]legacyDeletionRecordCmd, [] return records, warnings, nil } -// TODO: Consider integrating into 'bd doctor' migration detection +// TODO(bd-7l27): Consider integrating into 'bd doctor' migration detection var migrateTombstonesCmd = &cobra.Command{ Use: "tombstones", Short: "Convert deletions.jsonl entries to inline tombstones", diff --git a/cmd/bd/mol_stale.go b/cmd/bd/mol_stale.go index 0460f0ee..d510a6fc 100644 --- a/cmd/bd/mol_stale.go +++ b/cmd/bd/mol_stale.go @@ -64,7 +64,7 @@ func runMolStale(cmd *cobra.Command, args []string) { if daemonClient != nil { // For now, stale check requires direct store access - // TODO: Add RPC endpoint for stale check + // TODO(bd-ag35): Add RPC endpoint for stale check fmt.Fprintf(os.Stderr, "Error: mol stale requires direct database access\n") fmt.Fprintf(os.Stderr, "Hint: use --no-daemon flag: bd --no-daemon mol stale\n") os.Exit(1) diff --git a/cmd/bd/sync_test.go b/cmd/bd/sync_test.go index df6ec510..b3f2302e 100644 --- a/cmd/bd/sync_test.go +++ b/cmd/bd/sync_test.go @@ -441,7 +441,7 @@ func TestHasJSONLConflict_MultipleConflicts(t *testing.T) { func TestZFCSkipsExportAfterImport(t *testing.T) { // Skip this test - it calls importFromJSONL which spawns bd import as subprocess, // but os.Executable() returns the test binary during tests, not the bd binary. - // TODO: Refactor to use direct import logic instead of subprocess. + // TODO(bd-h048): Refactor to use direct import logic instead of subprocess. t.Skip("Test requires subprocess spawning which doesn't work in test environment") if testing.Short() { t.Skip("Skipping test that spawns subprocess in short mode") diff --git a/internal/formula/types.go b/internal/formula/types.go index 4b253e69..8c79fcb8 100644 --- a/internal/formula/types.go +++ b/internal/formula/types.go @@ -167,23 +167,23 @@ type Step struct { // Expand references an expansion formula to inline here. // When set, this step is replaced by the expansion's steps. - // TODO(future): Not yet implemented in bd cook. Filed as future work. + // TODO(bd-7zka): Not yet implemented in bd cook. Filed as future work. Expand string `json:"expand,omitempty"` // ExpandVars are variable overrides for the expansion. - // TODO(future): Not yet implemented in bd cook. Filed as future work. + // TODO(bd-7zka): Not yet implemented in bd cook. Filed as future work. ExpandVars map[string]string `json:"expand_vars,omitempty"` // Condition makes this step optional based on a variable. // Format: "{{var}}" (truthy) or "{{var}} == value". - // TODO(future): Not yet implemented in bd cook. Filed as future work. + // TODO(bd-7zka): Not yet implemented in bd cook. Filed as future work. Condition string `json:"condition,omitempty"` // Children are nested steps (for creating epic hierarchies). Children []*Step `json:"children,omitempty"` // Gate defines an async wait condition for this step. - // TODO(future): Not yet implemented in bd cook. Will integrate with bd-udsi gates. + // TODO(bd-7zka): Not yet implemented in bd cook. Will integrate with bd-udsi gates. Gate *Gate `json:"gate,omitempty"` // Loop defines iteration for this step. @@ -207,7 +207,7 @@ type Step struct { } // Gate defines an async wait condition (integrates with bd-udsi). -// TODO(future): Not yet implemented in bd cook. Schema defined for future use. +// TODO(bd-7zka): Not yet implemented in bd cook. Schema defined for future use. type Gate struct { // Type is the condition type: gh:run, gh:pr, timer, human, mail. Type string `json:"type"`