bd sync: 2025-12-27 15:56:42

This commit is contained in:
Steve Yegge
2025-12-27 15:56:42 -08:00
parent 87f535a65e
commit c8b912cbe6
179 changed files with 3051 additions and 10283 deletions

View File

@@ -671,7 +671,7 @@ func flushToJSONLWithState(state flushState) {
issues := make([]*types.Issue, 0, len(issueMap))
wispsSkipped := 0
for _, issue := range issueMap {
if issue.Ephemeral {
if issue.Wisp {
wispsSkipped++
continue
}

View File

@@ -15,7 +15,7 @@ type CleanupEmptyResponse struct {
DeletedCount int `json:"deleted_count"`
Message string `json:"message"`
Filter string `json:"filter,omitempty"`
Ephemeral bool `json:"ephemeral,omitempty"`
Wisp bool `json:"wisp,omitempty"`
}
// Hard delete mode: bypass tombstone TTL safety, use --older-than days directly
@@ -56,7 +56,7 @@ Delete issues closed more than 30 days ago:
bd cleanup --older-than 30 --force
Delete only closed wisps (transient molecules):
bd cleanup --ephemeral --force
bd cleanup --wisp --force
Preview what would be deleted/pruned:
bd cleanup --dry-run
@@ -80,7 +80,7 @@ SEE ALSO:
cascade, _ := cmd.Flags().GetBool("cascade")
olderThanDays, _ := cmd.Flags().GetInt("older-than")
hardDelete, _ := cmd.Flags().GetBool("hard")
wispOnly, _ := cmd.Flags().GetBool("ephemeral")
wispOnly, _ := cmd.Flags().GetBool("wisp")
// Calculate custom TTL for --hard mode
// When --hard is set, use --older-than days as the tombstone TTL cutoff
@@ -129,7 +129,7 @@ SEE ALSO:
// Add wisp filter if specified (bd-kwro.9)
if wispOnly {
wispTrue := true
filter.Ephemeral = &wispTrue
filter.Wisp = &wispTrue
}
// Get all closed issues matching filter
@@ -165,7 +165,7 @@ SEE ALSO:
result.Filter = fmt.Sprintf("older than %d days", olderThanDays)
}
if wispOnly {
result.Ephemeral = true
result.Wisp = true
}
outputJSON(result)
} else {
@@ -270,6 +270,6 @@ func init() {
cleanupCmd.Flags().Bool("cascade", false, "Recursively delete all dependent issues")
cleanupCmd.Flags().Int("older-than", 0, "Only delete issues closed more than N days ago (0 = all closed issues)")
cleanupCmd.Flags().Bool("hard", false, "Bypass tombstone TTL safety; use --older-than days as cutoff")
cleanupCmd.Flags().Bool("ephemeral", false, "Only delete closed wisps (transient molecules)")
cleanupCmd.Flags().Bool("wisp", false, "Only delete closed wisps (transient molecules)")
rootCmd.AddCommand(cleanupCmd)
}

View File

@@ -1,426 +0,0 @@
//go:build e2e
package main
import (
"bytes"
"context"
"encoding/json"
"io"
"os"
"path/filepath"
"strings"
"sync"
"testing"
"time"
"github.com/steveyegge/beads/internal/storage/sqlite"
"github.com/steveyegge/beads/internal/types"
)
var cliCoverageMutex sync.Mutex
func runBDForCoverage(t *testing.T, dir string, args ...string) (stdout string, stderr string) {
t.Helper()
cliCoverageMutex.Lock()
defer cliCoverageMutex.Unlock()
// Add --no-daemon to all commands except init.
if len(args) > 0 && args[0] != "init" {
args = append([]string{"--no-daemon"}, args...)
}
oldStdout := os.Stdout
oldStderr := os.Stderr
oldDir, _ := os.Getwd()
oldArgs := os.Args
if err := os.Chdir(dir); err != nil {
t.Fatalf("chdir %s: %v", dir, err)
}
rOut, wOut, _ := os.Pipe()
rErr, wErr, _ := os.Pipe()
os.Stdout = wOut
os.Stderr = wErr
// Ensure direct mode.
oldNoDaemon, noDaemonWasSet := os.LookupEnv("BEADS_NO_DAEMON")
os.Setenv("BEADS_NO_DAEMON", "1")
defer func() {
if noDaemonWasSet {
_ = os.Setenv("BEADS_NO_DAEMON", oldNoDaemon)
} else {
os.Unsetenv("BEADS_NO_DAEMON")
}
}()
// Mark tests explicitly.
oldTestMode, testModeWasSet := os.LookupEnv("BEADS_TEST_MODE")
os.Setenv("BEADS_TEST_MODE", "1")
defer func() {
if testModeWasSet {
_ = os.Setenv("BEADS_TEST_MODE", oldTestMode)
} else {
os.Unsetenv("BEADS_TEST_MODE")
}
}()
// Ensure all commands (including init) operate on the temp workspace DB.
db := filepath.Join(dir, ".beads", "beads.db")
beadsDir := filepath.Join(dir, ".beads")
oldBeadsDir, beadsDirWasSet := os.LookupEnv("BEADS_DIR")
os.Setenv("BEADS_DIR", beadsDir)
defer func() {
if beadsDirWasSet {
_ = os.Setenv("BEADS_DIR", oldBeadsDir)
} else {
os.Unsetenv("BEADS_DIR")
}
}()
oldDB, dbWasSet := os.LookupEnv("BEADS_DB")
os.Setenv("BEADS_DB", db)
defer func() {
if dbWasSet {
_ = os.Setenv("BEADS_DB", oldDB)
} else {
os.Unsetenv("BEADS_DB")
}
}()
oldBDDB, bdDBWasSet := os.LookupEnv("BD_DB")
os.Setenv("BD_DB", db)
defer func() {
if bdDBWasSet {
_ = os.Setenv("BD_DB", oldBDDB)
} else {
os.Unsetenv("BD_DB")
}
}()
// Ensure actor is set so label operations record audit fields.
oldActor, actorWasSet := os.LookupEnv("BD_ACTOR")
os.Setenv("BD_ACTOR", "test-user")
defer func() {
if actorWasSet {
_ = os.Setenv("BD_ACTOR", oldActor)
} else {
os.Unsetenv("BD_ACTOR")
}
}()
oldBeadsActor, beadsActorWasSet := os.LookupEnv("BEADS_ACTOR")
os.Setenv("BEADS_ACTOR", "test-user")
defer func() {
if beadsActorWasSet {
_ = os.Setenv("BEADS_ACTOR", oldBeadsActor)
} else {
os.Unsetenv("BEADS_ACTOR")
}
}()
rootCmd.SetArgs(args)
os.Args = append([]string{"bd"}, args...)
err := rootCmd.Execute()
// Close and clean up all global state to prevent contamination between tests.
if store != nil {
store.Close()
store = nil
}
if daemonClient != nil {
daemonClient.Close()
daemonClient = nil
}
// Reset all global flags and state (keep aligned with integration cli_fast_test).
dbPath = ""
actor = ""
jsonOutput = false
noDaemon = false
noAutoFlush = false
noAutoImport = false
sandboxMode = false
noDb = false
autoFlushEnabled = true
storeActive = false
flushFailureCount = 0
lastFlushError = nil
if flushManager != nil {
_ = flushManager.Shutdown()
flushManager = nil
}
rootCtx = nil
rootCancel = nil
// Give SQLite time to release file locks.
time.Sleep(10 * time.Millisecond)
_ = wOut.Close()
_ = wErr.Close()
os.Stdout = oldStdout
os.Stderr = oldStderr
_ = os.Chdir(oldDir)
os.Args = oldArgs
rootCmd.SetArgs(nil)
var outBuf, errBuf bytes.Buffer
_, _ = io.Copy(&outBuf, rOut)
_, _ = io.Copy(&errBuf, rErr)
_ = rOut.Close()
_ = rErr.Close()
stdout = outBuf.String()
stderr = errBuf.String()
if err != nil {
t.Fatalf("bd %v failed: %v\nStdout: %s\nStderr: %s", args, err, stdout, stderr)
}
return stdout, stderr
}
func extractJSONPayload(s string) string {
if i := strings.IndexAny(s, "[{"); i >= 0 {
return s[i:]
}
return s
}
func parseCreatedIssueID(t *testing.T, out string) string {
t.Helper()
p := extractJSONPayload(out)
var m map[string]interface{}
if err := json.Unmarshal([]byte(p), &m); err != nil {
t.Fatalf("parse create JSON: %v\n%s", err, out)
}
id, _ := m["id"].(string)
if id == "" {
t.Fatalf("missing id in create output: %s", out)
}
return id
}
func TestCoverage_ShowUpdateClose(t *testing.T) {
if testing.Short() {
t.Skip("skipping CLI coverage test in short mode")
}
dir := t.TempDir()
runBDForCoverage(t, dir, "init", "--prefix", "test", "--quiet")
out, _ := runBDForCoverage(t, dir, "create", "Show coverage issue", "-p", "1", "--json")
id := parseCreatedIssueID(t, out)
// Exercise update label flows (add -> set -> add/remove).
runBDForCoverage(t, dir, "update", id, "--add-label", "old", "--json")
runBDForCoverage(t, dir, "update", id, "--set-labels", "a,b", "--add-label", "c", "--remove-label", "a", "--json")
runBDForCoverage(t, dir, "update", id, "--remove-label", "old", "--json")
// Show JSON output and verify labels were applied.
showOut, _ := runBDForCoverage(t, dir, "show", "--allow-stale", id, "--json")
showPayload := extractJSONPayload(showOut)
var details []map[string]interface{}
if err := json.Unmarshal([]byte(showPayload), &details); err != nil {
// Some commands may emit a single object; fall back to object parse.
var single map[string]interface{}
if err2 := json.Unmarshal([]byte(showPayload), &single); err2 != nil {
t.Fatalf("parse show JSON: %v / %v\n%s", err, err2, showOut)
}
details = []map[string]interface{}{single}
}
if len(details) != 1 {
t.Fatalf("expected 1 issue, got %d", len(details))
}
labelsAny, ok := details[0]["labels"]
if !ok {
t.Fatalf("expected labels in show output: %s", showOut)
}
labelsBytes, _ := json.Marshal(labelsAny)
labelsStr := string(labelsBytes)
if !strings.Contains(labelsStr, "b") || !strings.Contains(labelsStr, "c") {
t.Fatalf("expected labels b and c, got %s", labelsStr)
}
if strings.Contains(labelsStr, "a") || strings.Contains(labelsStr, "old") {
t.Fatalf("expected labels a and old to be absent, got %s", labelsStr)
}
// Show text output.
showText, _ := runBDForCoverage(t, dir, "show", "--allow-stale", id)
if !strings.Contains(showText, "Show coverage issue") {
t.Fatalf("expected show output to contain title, got: %s", showText)
}
// Multi-ID show should print both issues.
out2, _ := runBDForCoverage(t, dir, "create", "Second issue", "-p", "2", "--json")
id2 := parseCreatedIssueID(t, out2)
multi, _ := runBDForCoverage(t, dir, "show", "--allow-stale", id, id2)
if !strings.Contains(multi, "Show coverage issue") || !strings.Contains(multi, "Second issue") {
t.Fatalf("expected multi-show output to include both titles, got: %s", multi)
}
if !strings.Contains(multi, "─") {
t.Fatalf("expected multi-show output to include a separator line, got: %s", multi)
}
// Close and verify JSON output.
closeOut, _ := runBDForCoverage(t, dir, "close", id, "--reason", "Done", "--json")
closePayload := extractJSONPayload(closeOut)
var closed []map[string]interface{}
if err := json.Unmarshal([]byte(closePayload), &closed); err != nil {
t.Fatalf("parse close JSON: %v\n%s", err, closeOut)
}
if len(closed) != 1 {
t.Fatalf("expected 1 closed issue, got %d", len(closed))
}
if status, _ := closed[0]["status"].(string); status != string(types.StatusClosed) {
t.Fatalf("expected status closed, got %q", status)
}
}
func TestCoverage_TemplateAndPinnedProtections(t *testing.T) {
if testing.Short() {
t.Skip("skipping CLI coverage test in short mode")
}
dir := t.TempDir()
runBDForCoverage(t, dir, "init", "--prefix", "test", "--quiet")
// Create a pinned issue and verify close requires --force.
out, _ := runBDForCoverage(t, dir, "create", "Pinned issue", "-p", "1", "--json")
pinnedID := parseCreatedIssueID(t, out)
runBDForCoverage(t, dir, "update", pinnedID, "--status", string(types.StatusPinned), "--json")
_, closeErr := runBDForCoverage(t, dir, "close", pinnedID, "--reason", "Done")
if !strings.Contains(closeErr, "cannot close pinned issue") {
t.Fatalf("expected pinned close to be rejected, stderr: %s", closeErr)
}
forceOut, _ := runBDForCoverage(t, dir, "close", pinnedID, "--force", "--reason", "Done", "--json")
forcePayload := extractJSONPayload(forceOut)
var closed []map[string]interface{}
if err := json.Unmarshal([]byte(forcePayload), &closed); err != nil {
t.Fatalf("parse close JSON: %v\n%s", err, forceOut)
}
if len(closed) != 1 {
t.Fatalf("expected 1 closed issue, got %d", len(closed))
}
// Insert a template issue directly and verify update/close protect it.
dbFile := filepath.Join(dir, ".beads", "beads.db")
s, err := sqlite.New(context.Background(), dbFile)
if err != nil {
t.Fatalf("sqlite.New: %v", err)
}
ctx := context.Background()
template := &types.Issue{
Title: "Template issue",
Status: types.StatusOpen,
Priority: 2,
IssueType: types.TypeTask,
IsTemplate: true,
}
if err := s.CreateIssue(ctx, template, "test-user"); err != nil {
s.Close()
t.Fatalf("CreateIssue: %v", err)
}
created, err := s.GetIssue(ctx, template.ID)
if err != nil {
s.Close()
t.Fatalf("GetIssue(template): %v", err)
}
if created == nil || !created.IsTemplate {
s.Close()
t.Fatalf("expected inserted issue to be IsTemplate=true, got %+v", created)
}
_ = s.Close()
showOut, _ := runBDForCoverage(t, dir, "show", "--allow-stale", template.ID, "--json")
showPayload := extractJSONPayload(showOut)
var showDetails []map[string]interface{}
if err := json.Unmarshal([]byte(showPayload), &showDetails); err != nil {
t.Fatalf("parse show JSON: %v\n%s", err, showOut)
}
if len(showDetails) != 1 {
t.Fatalf("expected 1 issue from show, got %d", len(showDetails))
}
// Re-open the DB after running the CLI to confirm is_template persisted.
s2, err := sqlite.New(context.Background(), dbFile)
if err != nil {
t.Fatalf("sqlite.New (reopen): %v", err)
}
postShow, err := s2.GetIssue(context.Background(), template.ID)
_ = s2.Close()
if err != nil {
t.Fatalf("GetIssue(template, post-show): %v", err)
}
if postShow == nil || !postShow.IsTemplate {
t.Fatalf("expected template to remain IsTemplate=true post-show, got %+v", postShow)
}
if v, ok := showDetails[0]["is_template"]; ok {
if b, ok := v.(bool); !ok || !b {
t.Fatalf("expected show JSON is_template=true, got %v", v)
}
} else {
t.Fatalf("expected show JSON to include is_template=true, got: %s", showOut)
}
_, updErr := runBDForCoverage(t, dir, "update", template.ID, "--title", "New title")
if !strings.Contains(updErr, "cannot update template") {
t.Fatalf("expected template update to be rejected, stderr: %s", updErr)
}
_, closeTemplateErr := runBDForCoverage(t, dir, "close", template.ID, "--reason", "Done")
if !strings.Contains(closeTemplateErr, "cannot close template") {
t.Fatalf("expected template close to be rejected, stderr: %s", closeTemplateErr)
}
}
func TestCoverage_ShowThread(t *testing.T) {
if testing.Short() {
t.Skip("skipping CLI coverage test in short mode")
}
dir := t.TempDir()
runBDForCoverage(t, dir, "init", "--prefix", "test", "--quiet")
dbFile := filepath.Join(dir, ".beads", "beads.db")
s, err := sqlite.New(context.Background(), dbFile)
if err != nil {
t.Fatalf("sqlite.New: %v", err)
}
ctx := context.Background()
root := &types.Issue{Title: "Root message", IssueType: types.TypeMessage, Status: types.StatusOpen, Sender: "alice", Assignee: "bob"}
reply1 := &types.Issue{Title: "Re: Root", IssueType: types.TypeMessage, Status: types.StatusOpen, Sender: "bob", Assignee: "alice"}
reply2 := &types.Issue{Title: "Re: Re: Root", IssueType: types.TypeMessage, Status: types.StatusOpen, Sender: "alice", Assignee: "bob"}
if err := s.CreateIssue(ctx, root, "test-user"); err != nil {
s.Close()
t.Fatalf("CreateIssue root: %v", err)
}
if err := s.CreateIssue(ctx, reply1, "test-user"); err != nil {
s.Close()
t.Fatalf("CreateIssue reply1: %v", err)
}
if err := s.CreateIssue(ctx, reply2, "test-user"); err != nil {
s.Close()
t.Fatalf("CreateIssue reply2: %v", err)
}
if err := s.AddDependency(ctx, &types.Dependency{IssueID: reply1.ID, DependsOnID: root.ID, Type: types.DepRepliesTo, ThreadID: root.ID}, "test-user"); err != nil {
s.Close()
t.Fatalf("AddDependency reply1->root: %v", err)
}
if err := s.AddDependency(ctx, &types.Dependency{IssueID: reply2.ID, DependsOnID: reply1.ID, Type: types.DepRepliesTo, ThreadID: root.ID}, "test-user"); err != nil {
s.Close()
t.Fatalf("AddDependency reply2->reply1: %v", err)
}
_ = s.Close()
out, _ := runBDForCoverage(t, dir, "show", "--allow-stale", reply2.ID, "--thread")
if !strings.Contains(out, "Thread") || !strings.Contains(out, "Total: 3 messages") {
t.Fatalf("expected thread output, got: %s", out)
}
if !strings.Contains(out, root.ID) || !strings.Contains(out, reply1.ID) || !strings.Contains(out, reply2.ID) {
t.Fatalf("expected thread output to include message IDs, got: %s", out)
}
}

View File

@@ -157,15 +157,6 @@ func runCook(cmd *cobra.Command, args []string) {
resolved.Steps = formula.ApplyAdvice(resolved.Steps, resolved.Advice)
}
// Apply inline step expansions (gt-8tmz.35)
// This processes Step.Expand fields before compose.expand/map rules
inlineExpandedSteps, err := formula.ApplyInlineExpansions(resolved.Steps, parser)
if err != nil {
fmt.Fprintf(os.Stderr, "Error applying inline expansions: %v\n", err)
os.Exit(1)
}
resolved.Steps = inlineExpandedSteps
// Apply expansion operators (gt-8tmz.3)
if resolved.Compose != nil && (len(resolved.Compose.Expand) > 0 || len(resolved.Compose.Map) > 0) {
expandedSteps, err := formula.ApplyExpansions(resolved.Steps, resolved.Compose, parser)
@@ -353,7 +344,7 @@ func runCook(cmd *cobra.Command, args []string) {
if len(bondPoints) > 0 {
fmt.Printf(" Bond points: %s\n", strings.Join(bondPoints, ", "))
}
fmt.Printf("\nTo use: bd mol pour %s --var <name>=<value>\n", result.ProtoID)
fmt.Printf("\nTo use: bd pour %s --var <name>=<value>\n", result.ProtoID)
}
// cookFormulaResult holds the result of cooking
@@ -365,8 +356,6 @@ type cookFormulaResult struct {
// cookFormulaToSubgraph creates an in-memory TemplateSubgraph from a resolved formula.
// This is the ephemeral proto implementation - no database storage.
// The returned subgraph can be passed directly to cloneSubgraph for instantiation.
//
//nolint:unparam // error return kept for API consistency with future error handling
func cookFormulaToSubgraph(f *formula.Formula, protoID string) (*TemplateSubgraph, error) {
// Map step ID -> created issue
issueMap := make(map[string]*types.Issue)
@@ -597,13 +586,6 @@ func resolveAndCookFormula(formulaName string, searchPaths []string) (*TemplateS
resolved.Steps = formula.ApplyAdvice(resolved.Steps, resolved.Advice)
}
// Apply inline step expansions (gt-8tmz.35)
inlineExpandedSteps, err := formula.ApplyInlineExpansions(resolved.Steps, parser)
if err != nil {
return nil, fmt.Errorf("applying inline expansions to %q: %w", formulaName, err)
}
resolved.Steps = inlineExpandedSteps
// Apply expansion operators (gt-8tmz.3)
if resolved.Compose != nil && (len(resolved.Compose.Expand) > 0 || len(resolved.Compose.Map) > 0) {
expandedSteps, err := formula.ApplyExpansions(resolved.Steps, resolved.Compose, parser)

View File

@@ -4,7 +4,6 @@ import (
"encoding/json"
"fmt"
"os"
"path/filepath"
"strings"
"github.com/spf13/cobra"
@@ -13,7 +12,6 @@ import (
"github.com/steveyegge/beads/internal/hooks"
"github.com/steveyegge/beads/internal/routing"
"github.com/steveyegge/beads/internal/rpc"
"github.com/steveyegge/beads/internal/storage/sqlite"
"github.com/steveyegge/beads/internal/types"
"github.com/steveyegge/beads/internal/ui"
"github.com/steveyegge/beads/internal/validation"
@@ -109,23 +107,7 @@ var createCmd = &cobra.Command{
waitsForGate, _ := cmd.Flags().GetString("waits-for-gate")
forceCreate, _ := cmd.Flags().GetBool("force")
repoOverride, _ := cmd.Flags().GetString("repo")
rigOverride, _ := cmd.Flags().GetString("rig")
prefixOverride, _ := cmd.Flags().GetString("prefix")
wisp, _ := cmd.Flags().GetBool("ephemeral")
// Handle --rig or --prefix flag: create issue in a different rig
// Both flags use the same forgiving lookup (accepts rig names or prefixes)
targetRig := rigOverride
if prefixOverride != "" {
if targetRig != "" {
FatalError("cannot specify both --rig and --prefix flags")
}
targetRig = prefixOverride
}
if targetRig != "" {
createInRig(cmd, targetRig, title, description, issueType, priority, design, acceptance, assignee, labels, externalRef, wisp)
return
}
wisp, _ := cmd.Flags().GetBool("wisp")
// Get estimate if provided
var estimatedMinutes *int
@@ -240,8 +222,7 @@ var createCmd = &cobra.Command{
Dependencies: deps,
WaitsFor: waitsFor,
WaitsForGate: waitsForGate,
Ephemeral: wisp,
CreatedBy: getActorWithGit(),
Wisp: wisp,
}
resp, err := daemonClient.Create(createArgs)
@@ -286,8 +267,7 @@ var createCmd = &cobra.Command{
Assignee: assignee,
ExternalRef: externalRefPtr,
EstimatedMinutes: estimatedMinutes,
Ephemeral: wisp,
CreatedBy: getActorWithGit(), // GH#748: track who created the issue
Wisp: wisp,
}
ctx := rootCtx
@@ -465,111 +445,8 @@ func init() {
createCmd.Flags().String("waits-for-gate", "all-children", "Gate type: all-children (wait for all) or any-children (wait for first)")
createCmd.Flags().Bool("force", false, "Force creation even if prefix doesn't match database prefix")
createCmd.Flags().String("repo", "", "Target repository for issue (overrides auto-routing)")
createCmd.Flags().String("rig", "", "Create issue in a different rig (e.g., --rig beads)")
createCmd.Flags().String("prefix", "", "Create issue in rig by prefix (e.g., --prefix bd- or --prefix bd or --prefix beads)")
createCmd.Flags().IntP("estimate", "e", 0, "Time estimate in minutes (e.g., 60 for 1 hour)")
createCmd.Flags().Bool("ephemeral", false, "Create as ephemeral (ephemeral, not exported to JSONL)")
createCmd.Flags().Bool("wisp", false, "Create as wisp (ephemeral, not exported to JSONL)")
// Note: --json flag is defined as a persistent flag in main.go, not here
rootCmd.AddCommand(createCmd)
}
// createInRig creates an issue in a different rig using --rig flag.
// This bypasses the normal daemon/direct flow and directly creates in the target rig.
func createInRig(cmd *cobra.Command, rigName, title, description, issueType string, priority int, design, acceptance, assignee string, labels []string, externalRef string, wisp bool) {
ctx := rootCtx
// Find the town-level beads directory (where routes.jsonl lives)
townBeadsDir, err := findTownBeadsDir()
if err != nil {
FatalError("cannot use --rig: %v", err)
}
// Resolve the target rig's beads directory
targetBeadsDir, _, err := routing.ResolveBeadsDirForRig(rigName, townBeadsDir)
if err != nil {
FatalError("%v", err)
}
// Open storage for the target rig
targetDBPath := filepath.Join(targetBeadsDir, "beads.db")
targetStore, err := sqlite.New(ctx, targetDBPath)
if err != nil {
FatalError("failed to open rig %q database: %v", rigName, err)
}
defer targetStore.Close()
var externalRefPtr *string
if externalRef != "" {
externalRefPtr = &externalRef
}
// Create issue without ID - CreateIssue will generate one with the correct prefix
issue := &types.Issue{
Title: title,
Description: description,
Design: design,
AcceptanceCriteria: acceptance,
Status: types.StatusOpen,
Priority: priority,
IssueType: types.IssueType(issueType),
Assignee: assignee,
ExternalRef: externalRefPtr,
Ephemeral: wisp,
CreatedBy: getActorWithGit(),
}
if err := targetStore.CreateIssue(ctx, issue, actor); err != nil {
FatalError("failed to create issue in rig %q: %v", rigName, err)
}
// Add labels if specified
for _, label := range labels {
if err := targetStore.AddLabel(ctx, issue.ID, label, actor); err != nil {
WarnError("failed to add label %s: %v", label, err)
}
}
// Get silent flag
silent, _ := cmd.Flags().GetBool("silent")
if jsonOutput {
outputJSON(issue)
} else if silent {
fmt.Println(issue.ID)
} else {
fmt.Printf("%s Created issue in rig %q: %s\n", ui.RenderPass("✓"), rigName, issue.ID)
fmt.Printf(" Title: %s\n", issue.Title)
fmt.Printf(" Priority: P%d\n", issue.Priority)
fmt.Printf(" Status: %s\n", issue.Status)
}
}
// findTownBeadsDir finds the town-level .beads directory (where routes.jsonl lives).
// It walks up from the current directory looking for a .beads directory with routes.jsonl.
func findTownBeadsDir() (string, error) {
// Start from current directory and walk up
dir, err := os.Getwd()
if err != nil {
return "", err
}
for {
beadsDir := filepath.Join(dir, ".beads")
routesFile := filepath.Join(beadsDir, routing.RoutesFileName)
// Check if this .beads directory has routes.jsonl
if _, err := os.Stat(routesFile); err == nil {
return beadsDir, nil
}
// Move up one directory
parent := filepath.Dir(dir)
if parent == dir {
// Reached filesystem root
break
}
dir = parent
}
return "", fmt.Errorf("no routes.jsonl found in any parent .beads directory")
}

View File

@@ -111,7 +111,6 @@ func CreateIssueFromFormValues(ctx context.Context, s storage.Storage, fv *creat
IssueType: types.IssueType(fv.IssueType),
Assignee: fv.Assignee,
ExternalRef: externalRefPtr,
CreatedBy: getActorWithGit(), // GH#748: track who created the issue
}
// Check if any dependencies are discovered-from type

View File

@@ -355,11 +355,6 @@ func runDaemonLoop(interval time.Duration, autoCommit, autoPush, autoPull, local
// Check for multiple .db files (ambiguity error)
beadsDir := filepath.Dir(daemonDBPath)
// Reset backoff on daemon start (fresh start, but preserve NeedsManualSync hint)
if !localMode {
ResetBackoffOnDaemonStart(beadsDir)
}
matches, err := filepath.Glob(filepath.Join(beadsDir, "*.db"))
if err == nil && len(matches) > 1 {
// Filter out backup files (*.backup-*.db, *.backup.db)

View File

@@ -30,36 +30,36 @@ func TestDaemonAutoImportAfterGitPull(t *testing.T) {
t.Fatal(err)
}
defer os.RemoveAll(tempDir)
// Create "remote" repository
remoteDir := filepath.Join(tempDir, "remote")
if err := os.MkdirAll(remoteDir, 0750); err != nil {
t.Fatalf("Failed to create remote dir: %v", err)
}
// Initialize remote git repo
runGitCmd(t, remoteDir, "init", "--bare", "-b", "master")
runGitCmd(t, remoteDir, "init", "--bare")
// Create "clone1" repository (Agent A)
clone1Dir := filepath.Join(tempDir, "clone1")
runGitCmd(t, tempDir, "clone", remoteDir, clone1Dir)
configureGit(t, clone1Dir)
// Initialize beads in clone1
clone1BeadsDir := filepath.Join(clone1Dir, ".beads")
if err := os.MkdirAll(clone1BeadsDir, 0750); err != nil {
t.Fatalf("Failed to create .beads dir: %v", err)
}
clone1DBPath := filepath.Join(clone1BeadsDir, "test.db")
clone1Store := newTestStore(t, clone1DBPath)
defer clone1Store.Close()
ctx := context.Background()
if err := clone1Store.SetMetadata(ctx, "issue_prefix", "test"); err != nil {
t.Fatalf("Failed to set prefix: %v", err)
}
// Create an open issue in clone1
issue := &types.Issue{
Title: "Test daemon auto-import",
@@ -73,39 +73,39 @@ func TestDaemonAutoImportAfterGitPull(t *testing.T) {
t.Fatalf("Failed to create issue: %v", err)
}
issueID := issue.ID
// Export to JSONL
jsonlPath := filepath.Join(clone1BeadsDir, "issues.jsonl")
if err := exportIssuesToJSONL(ctx, clone1Store, jsonlPath); err != nil {
t.Fatalf("Failed to export: %v", err)
}
// Commit and push from clone1
runGitCmd(t, clone1Dir, "add", ".beads")
runGitCmd(t, clone1Dir, "commit", "-m", "Add test issue")
runGitCmd(t, clone1Dir, "push", "origin", "master")
// Create "clone2" repository (Agent B)
clone2Dir := filepath.Join(tempDir, "clone2")
runGitCmd(t, tempDir, "clone", remoteDir, clone2Dir)
configureGit(t, clone2Dir)
// Initialize empty database in clone2
clone2BeadsDir := filepath.Join(clone2Dir, ".beads")
clone2DBPath := filepath.Join(clone2BeadsDir, "test.db")
clone2Store := newTestStore(t, clone2DBPath)
defer clone2Store.Close()
if err := clone2Store.SetMetadata(ctx, "issue_prefix", "test"); err != nil {
t.Fatalf("Failed to set prefix: %v", err)
}
// Import initial JSONL in clone2
clone2JSONLPath := filepath.Join(clone2BeadsDir, "issues.jsonl")
if err := importJSONLToStore(ctx, clone2Store, clone2DBPath, clone2JSONLPath); err != nil {
t.Fatalf("Failed to import: %v", err)
}
// Verify issue exists in clone2
initialIssue, err := clone2Store.GetIssue(ctx, issueID)
if err != nil {
@@ -114,27 +114,27 @@ func TestDaemonAutoImportAfterGitPull(t *testing.T) {
if initialIssue.Status != types.StatusOpen {
t.Errorf("Expected status open, got %s", initialIssue.Status)
}
// NOW THE CRITICAL TEST: Agent A closes the issue and pushes
t.Run("DaemonAutoImportsAfterGitPull", func(t *testing.T) {
// Agent A closes the issue
if err := clone1Store.CloseIssue(ctx, issueID, "Completed", "agent-a"); err != nil {
t.Fatalf("Failed to close issue: %v", err)
}
// Agent A exports to JSONL
if err := exportIssuesToJSONL(ctx, clone1Store, jsonlPath); err != nil {
t.Fatalf("Failed to export after close: %v", err)
}
// Agent A commits and pushes
runGitCmd(t, clone1Dir, "add", ".beads/issues.jsonl")
runGitCmd(t, clone1Dir, "commit", "-m", "Close issue")
runGitCmd(t, clone1Dir, "push", "origin", "master")
// Agent B does git pull (updates JSONL on disk)
runGitCmd(t, clone2Dir, "pull")
// Wait for filesystem to settle after git operations
// Windows has lower filesystem timestamp precision (typically 100ms)
// and file I/O may be slower, so we need a longer delay
@@ -143,23 +143,23 @@ func TestDaemonAutoImportAfterGitPull(t *testing.T) {
} else {
time.Sleep(50 * time.Millisecond)
}
// Start daemon server in clone2
socketPath := filepath.Join(clone2BeadsDir, "bd.sock")
os.Remove(socketPath) // Ensure clean state
server := rpc.NewServer(socketPath, clone2Store, clone2Dir, clone2DBPath)
// Start server in background
serverCtx, serverCancel := context.WithCancel(context.Background())
defer serverCancel()
go func() {
if err := server.Start(serverCtx); err != nil {
t.Logf("Server error: %v", err)
}
}()
// Wait for server to be ready
for i := 0; i < 50; i++ {
time.Sleep(10 * time.Millisecond)
@@ -167,7 +167,7 @@ func TestDaemonAutoImportAfterGitPull(t *testing.T) {
break
}
}
// Simulate a daemon request (like "bd show <issue>")
// The daemon should auto-import the updated JSONL before responding
client, err := rpc.TryConnect(socketPath)
@@ -178,15 +178,15 @@ func TestDaemonAutoImportAfterGitPull(t *testing.T) {
t.Fatal("Client is nil")
}
defer client.Close()
client.SetDatabasePath(clone2DBPath) // Route to correct database
// Make a request that triggers auto-import check
resp, err := client.Execute("show", map[string]string{"id": issueID})
if err != nil {
t.Fatalf("Failed to get issue from daemon: %v", err)
}
// Parse response
var issue types.Issue
issueJSON, err := json.Marshal(resp.Data)
@@ -196,25 +196,25 @@ func TestDaemonAutoImportAfterGitPull(t *testing.T) {
if err := json.Unmarshal(issueJSON, &issue); err != nil {
t.Fatalf("Failed to unmarshal issue: %v", err)
}
status := issue.Status
// CRITICAL ASSERTION: Daemon should return CLOSED status from JSONL
// not stale OPEN status from SQLite
if status != types.StatusClosed {
t.Errorf("DAEMON AUTO-IMPORT FAILED: Expected status 'closed' but got '%s'", status)
t.Errorf("This means daemon is serving stale SQLite data instead of auto-importing JSONL")
// Double-check JSONL has correct status
jsonlData, _ := os.ReadFile(clone2JSONLPath)
t.Logf("JSONL content: %s", string(jsonlData))
// Double-check what's in SQLite
directIssue, _ := clone2Store.GetIssue(ctx, issueID)
t.Logf("SQLite status: %s", directIssue.Status)
}
})
// Additional test: Verify multiple rapid changes
t.Run("DaemonHandlesRapidUpdates", func(t *testing.T) {
// Agent A updates priority
@@ -223,18 +223,18 @@ func TestDaemonAutoImportAfterGitPull(t *testing.T) {
}, "agent-a"); err != nil {
t.Fatalf("Failed to update priority: %v", err)
}
if err := exportIssuesToJSONL(ctx, clone1Store, jsonlPath); err != nil {
t.Fatalf("Failed to export: %v", err)
}
runGitCmd(t, clone1Dir, "add", ".beads/issues.jsonl")
runGitCmd(t, clone1Dir, "commit", "-m", "Update priority")
runGitCmd(t, clone1Dir, "push", "origin", "master")
// Agent B pulls
runGitCmd(t, clone2Dir, "pull")
// Query via daemon - should see priority 0
// (Execute forces auto-import synchronously)
socketPath := filepath.Join(clone2BeadsDir, "bd.sock")
@@ -243,18 +243,18 @@ func TestDaemonAutoImportAfterGitPull(t *testing.T) {
t.Fatalf("Failed to connect to daemon: %v", err)
}
defer client.Close()
client.SetDatabasePath(clone2DBPath) // Route to correct database
resp, err := client.Execute("show", map[string]string{"id": issueID})
if err != nil {
t.Fatalf("Failed to get issue from daemon: %v", err)
}
var issue types.Issue
issueJSON, _ := json.Marshal(resp.Data)
json.Unmarshal(issueJSON, &issue)
if issue.Priority != 0 {
t.Errorf("Expected priority 0 after auto-import, got %d", issue.Priority)
}
@@ -273,23 +273,23 @@ func TestDaemonAutoImportDataCorruption(t *testing.T) {
t.Fatal(err)
}
defer os.RemoveAll(tempDir)
// Setup remote and two clones
remoteDir := filepath.Join(tempDir, "remote")
os.MkdirAll(remoteDir, 0750)
runGitCmd(t, remoteDir, "init", "--bare", "-b", "master")
runGitCmd(t, remoteDir, "init", "--bare")
clone1Dir := filepath.Join(tempDir, "clone1")
runGitCmd(t, tempDir, "clone", remoteDir, clone1Dir)
configureGit(t, clone1Dir)
clone2Dir := filepath.Join(tempDir, "clone2")
runGitCmd(t, tempDir, "clone", remoteDir, clone2Dir)
configureGit(t, clone2Dir)
// Initialize beads in both clones
ctx := context.Background()
// Clone1 setup
clone1BeadsDir := filepath.Join(clone1Dir, ".beads")
os.MkdirAll(clone1BeadsDir, 0750)
@@ -297,7 +297,7 @@ func TestDaemonAutoImportDataCorruption(t *testing.T) {
clone1Store := newTestStore(t, clone1DBPath)
defer clone1Store.Close()
clone1Store.SetMetadata(ctx, "issue_prefix", "test")
// Clone2 setup
clone2BeadsDir := filepath.Join(clone2Dir, ".beads")
os.MkdirAll(clone2BeadsDir, 0750)
@@ -305,7 +305,7 @@ func TestDaemonAutoImportDataCorruption(t *testing.T) {
clone2Store := newTestStore(t, clone2DBPath)
defer clone2Store.Close()
clone2Store.SetMetadata(ctx, "issue_prefix", "test")
// Agent A creates issue and pushes
issue2 := &types.Issue{
Title: "Shared issue",
@@ -317,18 +317,18 @@ func TestDaemonAutoImportDataCorruption(t *testing.T) {
}
clone1Store.CreateIssue(ctx, issue2, "agent-a")
issueID := issue2.ID
clone1JSONLPath := filepath.Join(clone1BeadsDir, "issues.jsonl")
exportIssuesToJSONL(ctx, clone1Store, clone1JSONLPath)
runGitCmd(t, clone1Dir, "add", ".beads")
runGitCmd(t, clone1Dir, "commit", "-m", "Initial issue")
runGitCmd(t, clone1Dir, "push", "origin", "master")
// Agent B pulls and imports
runGitCmd(t, clone2Dir, "pull")
clone2JSONLPath := filepath.Join(clone2BeadsDir, "issues.jsonl")
importJSONLToStore(ctx, clone2Store, clone2DBPath, clone2JSONLPath)
// THE CORRUPTION SCENARIO:
// 1. Agent A closes the issue and pushes
clone1Store.CloseIssue(ctx, issueID, "Done", "agent-a")
@@ -336,31 +336,31 @@ func TestDaemonAutoImportDataCorruption(t *testing.T) {
runGitCmd(t, clone1Dir, "add", ".beads/issues.jsonl")
runGitCmd(t, clone1Dir, "commit", "-m", "Close issue")
runGitCmd(t, clone1Dir, "push", "origin", "master")
// 2. Agent B does git pull (JSONL updated on disk)
runGitCmd(t, clone2Dir, "pull")
// Wait for filesystem to settle after git operations
time.Sleep(50 * time.Millisecond)
// 3. Agent B daemon exports STALE data (if auto-import doesn't work)
// This would overwrite Agent A's closure with old "open" status
// Start daemon in clone2
socketPath := filepath.Join(clone2BeadsDir, "bd.sock")
os.Remove(socketPath)
server := rpc.NewServer(socketPath, clone2Store, clone2Dir, clone2DBPath)
serverCtx, serverCancel := context.WithCancel(context.Background())
defer serverCancel()
go func() {
if err := server.Start(serverCtx); err != nil {
t.Logf("Server error: %v", err)
}
}()
// Wait for server
for i := 0; i < 50; i++ {
time.Sleep(10 * time.Millisecond)
@@ -368,43 +368,43 @@ func TestDaemonAutoImportDataCorruption(t *testing.T) {
break
}
}
// Trigger daemon operation (should auto-import first)
client, err := rpc.TryConnect(socketPath)
if err != nil {
t.Fatalf("Failed to connect: %v", err)
}
defer client.Close()
client.SetDatabasePath(clone2DBPath)
resp, err := client.Execute("show", map[string]string{"id": issueID})
if err != nil {
t.Fatalf("Failed to get issue: %v", err)
}
var issue types.Issue
issueJSON, _ := json.Marshal(resp.Data)
json.Unmarshal(issueJSON, &issue)
status := issue.Status
// If daemon didn't auto-import, this would be "open" (stale)
// With the fix, it should be "closed" (fresh from JSONL)
if status != types.StatusClosed {
t.Errorf("DATA CORRUPTION DETECTED: Daemon has stale status '%s' instead of 'closed'", status)
t.Error("If daemon exports this stale data, it will overwrite Agent A's changes on next push")
}
// Now simulate daemon export (which happens on timer)
// With auto-import working, this export should have fresh data
exportIssuesToJSONL(ctx, clone2Store, clone2JSONLPath)
// Read back JSONL to verify it has correct status
data, _ := os.ReadFile(clone2JSONLPath)
var exportedIssue types.Issue
json.NewDecoder(bytes.NewReader(data)).Decode(&exportedIssue)
if exportedIssue.Status != types.StatusClosed {
t.Errorf("CORRUPTION: Exported JSONL has wrong status '%s', would overwrite remote", exportedIssue.Status)
}

View File

@@ -31,19 +31,6 @@ var (
daemonStartFailures int
)
var (
executableFn = os.Executable
execCommandFn = exec.Command
openFileFn = os.OpenFile
findProcessFn = os.FindProcess
removeFileFn = os.Remove
configureDaemonProcessFn = configureDaemonProcess
waitForSocketReadinessFn = waitForSocketReadiness
startDaemonProcessFn = startDaemonProcess
isDaemonRunningFn = isDaemonRunning
sendStopSignalFn = sendStopSignal
)
// shouldAutoStartDaemon checks if daemon auto-start is enabled
func shouldAutoStartDaemon() bool {
// Check BEADS_NO_DAEMON first (escape hatch for single-user workflows)
@@ -66,6 +53,7 @@ func shouldAutoStartDaemon() bool {
return config.GetBool("auto-start-daemon") // Defaults to true
}
// restartDaemonForVersionMismatch stops the old daemon and starts a new one
// Returns true if restart was successful
func restartDaemonForVersionMismatch() bool {
@@ -79,17 +67,17 @@ func restartDaemonForVersionMismatch() bool {
// Check if daemon is running and stop it
forcedKill := false
if isRunning, pid := isDaemonRunningFn(pidFile); isRunning {
if isRunning, pid := isDaemonRunning(pidFile); isRunning {
debug.Logf("stopping old daemon (PID %d)", pid)
process, err := findProcessFn(pid)
process, err := os.FindProcess(pid)
if err != nil {
debug.Logf("failed to find process: %v", err)
return false
}
// Send stop signal
if err := sendStopSignalFn(process); err != nil {
if err := sendStopSignal(process); err != nil {
debug.Logf("failed to signal daemon: %v", err)
return false
}
@@ -97,14 +85,14 @@ func restartDaemonForVersionMismatch() bool {
// Wait for daemon to stop, then force kill
for i := 0; i < daemonShutdownAttempts; i++ {
time.Sleep(daemonShutdownPollInterval)
if isRunning, _ := isDaemonRunningFn(pidFile); !isRunning {
if isRunning, _ := isDaemonRunning(pidFile); !isRunning {
debug.Logf("old daemon stopped successfully")
break
}
}
// Force kill if still running
if isRunning, _ := isDaemonRunningFn(pidFile); isRunning {
if isRunning, _ := isDaemonRunning(pidFile); isRunning {
debug.Logf("force killing old daemon")
_ = process.Kill()
forcedKill = true
@@ -113,19 +101,19 @@ func restartDaemonForVersionMismatch() bool {
// Clean up stale socket and PID file after force kill or if not running
if forcedKill || !isDaemonRunningQuiet(pidFile) {
_ = removeFileFn(socketPath)
_ = removeFileFn(pidFile)
_ = os.Remove(socketPath)
_ = os.Remove(pidFile)
}
// Start new daemon with current binary version
exe, err := executableFn()
exe, err := os.Executable()
if err != nil {
debug.Logf("failed to get executable path: %v", err)
return false
}
args := []string{"daemon", "--start"}
cmd := execCommandFn(exe, args...)
cmd := exec.Command(exe, args...)
cmd.Env = append(os.Environ(), "BD_DAEMON_FOREGROUND=1")
// Set working directory to database directory so daemon finds correct DB
@@ -133,9 +121,9 @@ func restartDaemonForVersionMismatch() bool {
cmd.Dir = filepath.Dir(dbPath)
}
configureDaemonProcessFn(cmd)
configureDaemonProcess(cmd)
devNull, err := openFileFn(os.DevNull, os.O_RDWR, 0)
devNull, err := os.OpenFile(os.DevNull, os.O_RDWR, 0)
if err == nil {
cmd.Stdin = devNull
cmd.Stdout = devNull
@@ -152,7 +140,7 @@ func restartDaemonForVersionMismatch() bool {
go func() { _ = cmd.Wait() }()
// Wait for daemon to be ready using shared helper
if waitForSocketReadinessFn(socketPath, 5*time.Second) {
if waitForSocketReadiness(socketPath, 5*time.Second) {
debug.Logf("new daemon started successfully")
return true
}
@@ -165,7 +153,7 @@ func restartDaemonForVersionMismatch() bool {
// isDaemonRunningQuiet checks if daemon is running without output
func isDaemonRunningQuiet(pidFile string) bool {
isRunning, _ := isDaemonRunningFn(pidFile)
isRunning, _ := isDaemonRunning(pidFile)
return isRunning
}
@@ -197,7 +185,7 @@ func tryAutoStartDaemon(socketPath string) bool {
}
socketPath = determineSocketPath(socketPath)
return startDaemonProcessFn(socketPath)
return startDaemonProcess(socketPath)
}
func debugLog(msg string, args ...interface{}) {
@@ -281,21 +269,21 @@ func determineSocketPath(socketPath string) string {
}
func startDaemonProcess(socketPath string) bool {
binPath, err := executableFn()
binPath, err := os.Executable()
if err != nil {
binPath = os.Args[0]
}
args := []string{"daemon", "--start"}
cmd := execCommandFn(binPath, args...)
cmd := exec.Command(binPath, args...)
setupDaemonIO(cmd)
if dbPath != "" {
cmd.Dir = filepath.Dir(dbPath)
}
configureDaemonProcessFn(cmd)
configureDaemonProcess(cmd)
if err := cmd.Start(); err != nil {
recordDaemonStartFailure()
debugLog("failed to start daemon: %v", err)
@@ -304,7 +292,7 @@ func startDaemonProcess(socketPath string) bool {
go func() { _ = cmd.Wait() }()
if waitForSocketReadinessFn(socketPath, 5*time.Second) {
if waitForSocketReadiness(socketPath, 5*time.Second) {
recordDaemonStartSuccess()
return true
}
@@ -318,7 +306,7 @@ func startDaemonProcess(socketPath string) bool {
}
func setupDaemonIO(cmd *exec.Cmd) {
devNull, err := openFileFn(os.DevNull, os.O_RDWR, 0)
devNull, err := os.OpenFile(os.DevNull, os.O_RDWR, 0)
if err == nil {
cmd.Stdout = devNull
cmd.Stderr = devNull

View File

@@ -1,331 +0,0 @@
package main
import (
"bytes"
"context"
"io"
"os"
"os/exec"
"path/filepath"
"runtime"
"testing"
"time"
"github.com/steveyegge/beads/internal/config"
)
func tempSockDir(t *testing.T) string {
t.Helper()
base := "/tmp"
if runtime.GOOS == windowsOS {
base = os.TempDir()
} else if _, err := os.Stat(base); err != nil {
base = os.TempDir()
}
d, err := os.MkdirTemp(base, "bd-sock-*")
if err != nil {
t.Fatalf("MkdirTemp: %v", err)
}
t.Cleanup(func() { _ = os.RemoveAll(d) })
return d
}
func startTestRPCServer(t *testing.T) (socketPath string, cleanup func()) {
t.Helper()
tmpDir := tempSockDir(t)
beadsDir := filepath.Join(tmpDir, ".beads")
if err := os.MkdirAll(beadsDir, 0o750); err != nil {
t.Fatalf("MkdirAll: %v", err)
}
socketPath = filepath.Join(beadsDir, "bd.sock")
db := filepath.Join(beadsDir, "test.db")
store := newTestStore(t, db)
ctx, cancel := context.WithCancel(context.Background())
log := newTestLogger()
server, _, err := startRPCServer(ctx, socketPath, store, tmpDir, db, log)
if err != nil {
cancel()
t.Fatalf("startRPCServer: %v", err)
}
cleanup = func() {
cancel()
if server != nil {
_ = server.Stop()
}
}
return socketPath, cleanup
}
func captureStderr(t *testing.T, fn func()) string {
t.Helper()
old := os.Stderr
r, w, err := os.Pipe()
if err != nil {
t.Fatalf("os.Pipe: %v", err)
}
os.Stderr = w
var buf bytes.Buffer
done := make(chan struct{})
go func() {
_, _ = io.Copy(&buf, r)
close(done)
}()
fn()
_ = w.Close()
os.Stderr = old
<-done
_ = r.Close()
return buf.String()
}
func TestDaemonAutostart_AcquireStartLock_CreatesAndCleansStale(t *testing.T) {
tmpDir := t.TempDir()
lockPath := filepath.Join(tmpDir, "bd.sock.startlock")
pid, err := readPIDFromFile(lockPath)
if err == nil || pid != 0 {
// lock doesn't exist yet; expect read to fail.
}
if !acquireStartLock(lockPath, filepath.Join(tmpDir, "bd.sock")) {
t.Fatalf("expected acquireStartLock to succeed")
}
got, err := readPIDFromFile(lockPath)
if err != nil {
t.Fatalf("readPIDFromFile: %v", err)
}
if got != os.Getpid() {
t.Fatalf("expected lock PID %d, got %d", os.Getpid(), got)
}
// Stale lock: dead/unreadable PID should be removed and recreated.
if err := os.WriteFile(lockPath, []byte("0\n"), 0o600); err != nil {
t.Fatalf("WriteFile: %v", err)
}
if !acquireStartLock(lockPath, filepath.Join(tmpDir, "bd.sock")) {
t.Fatalf("expected acquireStartLock to succeed on stale lock")
}
got, err = readPIDFromFile(lockPath)
if err != nil {
t.Fatalf("readPIDFromFile: %v", err)
}
if got != os.Getpid() {
t.Fatalf("expected recreated lock PID %d, got %d", os.Getpid(), got)
}
}
func TestDaemonAutostart_SocketHealthAndReadiness(t *testing.T) {
socketPath, cleanup := startTestRPCServer(t)
defer cleanup()
if !canDialSocket(socketPath, 500*time.Millisecond) {
t.Fatalf("expected canDialSocket to succeed")
}
if !isDaemonHealthy(socketPath) {
t.Fatalf("expected isDaemonHealthy to succeed")
}
if !waitForSocketReadiness(socketPath, 500*time.Millisecond) {
t.Fatalf("expected waitForSocketReadiness to succeed")
}
missing := filepath.Join(tempSockDir(t), "missing.sock")
if canDialSocket(missing, 50*time.Millisecond) {
t.Fatalf("expected canDialSocket to fail")
}
if waitForSocketReadiness(missing, 200*time.Millisecond) {
t.Fatalf("expected waitForSocketReadiness to time out")
}
}
func TestDaemonAutostart_HandleExistingSocket(t *testing.T) {
socketPath, cleanup := startTestRPCServer(t)
defer cleanup()
if !handleExistingSocket(socketPath) {
t.Fatalf("expected handleExistingSocket true for running daemon")
}
}
func TestDaemonAutostart_HandleExistingSocket_StaleCleansUp(t *testing.T) {
tmpDir := t.TempDir()
beadsDir := filepath.Join(tmpDir, ".beads")
if err := os.MkdirAll(beadsDir, 0o750); err != nil {
t.Fatalf("MkdirAll: %v", err)
}
socketPath := filepath.Join(beadsDir, "bd.sock")
pidFile := filepath.Join(beadsDir, "daemon.pid")
if err := os.WriteFile(socketPath, []byte("not-a-socket"), 0o600); err != nil {
t.Fatalf("WriteFile socket: %v", err)
}
if err := os.WriteFile(pidFile, []byte("0\n"), 0o600); err != nil {
t.Fatalf("WriteFile pid: %v", err)
}
if handleExistingSocket(socketPath) {
t.Fatalf("expected false for stale socket")
}
if _, err := os.Stat(socketPath); !os.IsNotExist(err) {
t.Fatalf("expected socket removed")
}
if _, err := os.Stat(pidFile); !os.IsNotExist(err) {
t.Fatalf("expected pidfile removed")
}
}
func TestDaemonAutostart_TryAutoStartDaemon_EarlyExits(t *testing.T) {
oldFailures := daemonStartFailures
oldLast := lastDaemonStartAttempt
defer func() {
daemonStartFailures = oldFailures
lastDaemonStartAttempt = oldLast
}()
daemonStartFailures = 1
lastDaemonStartAttempt = time.Now()
if tryAutoStartDaemon(filepath.Join(t.TempDir(), "bd.sock")) {
t.Fatalf("expected tryAutoStartDaemon to skip due to backoff")
}
daemonStartFailures = 0
lastDaemonStartAttempt = time.Time{}
socketPath, cleanup := startTestRPCServer(t)
defer cleanup()
if !tryAutoStartDaemon(socketPath) {
t.Fatalf("expected tryAutoStartDaemon true when daemon already healthy")
}
}
func TestDaemonAutostart_MiscHelpers(t *testing.T) {
if determineSocketPath("/x") != "/x" {
t.Fatalf("determineSocketPath should be identity")
}
if err := config.Initialize(); err != nil {
t.Fatalf("config.Initialize: %v", err)
}
old := config.GetDuration("flush-debounce")
defer config.Set("flush-debounce", old)
config.Set("flush-debounce", 0)
if got := getDebounceDuration(); got != 5*time.Second {
t.Fatalf("expected default debounce 5s, got %v", got)
}
config.Set("flush-debounce", 2*time.Second)
if got := getDebounceDuration(); got != 2*time.Second {
t.Fatalf("expected debounce 2s, got %v", got)
}
}
func TestDaemonAutostart_EmitVerboseWarning(t *testing.T) {
old := daemonStatus
defer func() { daemonStatus = old }()
daemonStatus.SocketPath = "/tmp/bd.sock"
for _, tt := range []struct {
reason string
shouldWrite bool
}{
{FallbackConnectFailed, true},
{FallbackHealthFailed, true},
{FallbackAutoStartDisabled, true},
{FallbackAutoStartFailed, true},
{FallbackDaemonUnsupported, true},
{FallbackWorktreeSafety, false},
{FallbackFlagNoDaemon, false},
} {
t.Run(tt.reason, func(t *testing.T) {
daemonStatus.FallbackReason = tt.reason
out := captureStderr(t, emitVerboseWarning)
if tt.shouldWrite && out == "" {
t.Fatalf("expected output")
}
if !tt.shouldWrite && out != "" {
t.Fatalf("expected no output, got %q", out)
}
})
}
}
func TestDaemonAutostart_StartDaemonProcess_Stubbed(t *testing.T) {
oldExec := execCommandFn
oldWait := waitForSocketReadinessFn
oldCfg := configureDaemonProcessFn
defer func() {
execCommandFn = oldExec
waitForSocketReadinessFn = oldWait
configureDaemonProcessFn = oldCfg
}()
execCommandFn = func(string, ...string) *exec.Cmd {
return exec.Command(os.Args[0], "-test.run=^$")
}
waitForSocketReadinessFn = func(string, time.Duration) bool { return true }
configureDaemonProcessFn = func(*exec.Cmd) {}
if !startDaemonProcess(filepath.Join(t.TempDir(), "bd.sock")) {
t.Fatalf("expected startDaemonProcess true when readiness stubbed")
}
}
func TestDaemonAutostart_RestartDaemonForVersionMismatch_Stubbed(t *testing.T) {
oldExec := execCommandFn
oldWait := waitForSocketReadinessFn
oldRun := isDaemonRunningFn
oldCfg := configureDaemonProcessFn
defer func() {
execCommandFn = oldExec
waitForSocketReadinessFn = oldWait
isDaemonRunningFn = oldRun
configureDaemonProcessFn = oldCfg
}()
tmpDir := t.TempDir()
beadsDir := filepath.Join(tmpDir, ".beads")
if err := os.MkdirAll(beadsDir, 0o750); err != nil {
t.Fatalf("MkdirAll: %v", err)
}
oldDB := dbPath
defer func() { dbPath = oldDB }()
dbPath = filepath.Join(beadsDir, "test.db")
pidFile, err := getPIDFilePath()
if err != nil {
t.Fatalf("getPIDFilePath: %v", err)
}
sock := getSocketPath()
if err := os.WriteFile(pidFile, []byte("999999\n"), 0o600); err != nil {
t.Fatalf("WriteFile pid: %v", err)
}
if err := os.WriteFile(sock, []byte("stale"), 0o600); err != nil {
t.Fatalf("WriteFile sock: %v", err)
}
execCommandFn = func(string, ...string) *exec.Cmd {
return exec.Command(os.Args[0], "-test.run=^$")
}
waitForSocketReadinessFn = func(string, time.Duration) bool { return true }
isDaemonRunningFn = func(string) (bool, int) { return false, 0 }
configureDaemonProcessFn = func(*exec.Cmd) {}
if !restartDaemonForVersionMismatch() {
t.Fatalf("expected restartDaemonForVersionMismatch true when stubbed")
}
if _, err := os.Stat(pidFile); !os.IsNotExist(err) {
t.Fatalf("expected pidfile removed")
}
if _, err := os.Stat(sock); !os.IsNotExist(err) {
t.Fatalf("expected socket removed")
}
}

View File

@@ -157,26 +157,23 @@ func TestDebouncer_MultipleSequentialTriggerCycles(t *testing.T) {
})
t.Cleanup(debouncer.Cancel)
awaitCount := func(want int32) {
deadline := time.Now().Add(500 * time.Millisecond)
for time.Now().Before(deadline) {
if got := atomic.LoadInt32(&count); got >= want {
return
}
time.Sleep(5 * time.Millisecond)
}
got := atomic.LoadInt32(&count)
t.Fatalf("timeout waiting for count=%d (got %d)", want, got)
debouncer.Trigger()
time.Sleep(40 * time.Millisecond)
if got := atomic.LoadInt32(&count); got != 1 {
t.Errorf("first cycle: got %d, want 1", got)
}
debouncer.Trigger()
awaitCount(1)
time.Sleep(40 * time.Millisecond)
if got := atomic.LoadInt32(&count); got != 2 {
t.Errorf("second cycle: got %d, want 2", got)
}
debouncer.Trigger()
awaitCount(2)
debouncer.Trigger()
awaitCount(3)
time.Sleep(40 * time.Millisecond)
if got := atomic.LoadInt32(&count); got != 3 {
t.Errorf("third cycle: got %d, want 3", got)
}
}
func TestDebouncer_CancelImmediatelyAfterTrigger(t *testing.T) {

View File

@@ -529,19 +529,6 @@ func performAutoImport(ctx context.Context, store storage.Storage, skipGit bool,
if skipGit {
mode = "local auto-import"
}
// Check backoff before attempting sync (skip for local mode)
if !skipGit {
jsonlPath := findJSONLPath()
if jsonlPath != "" {
beadsDir := filepath.Dir(jsonlPath)
if ShouldSkipSync(beadsDir) {
log.log("Skipping %s: in backoff period", mode)
return
}
}
}
log.log("Starting %s...", mode)
jsonlPath := findJSONLPath()
@@ -592,16 +579,14 @@ func performAutoImport(ctx context.Context, store storage.Storage, skipGit bool,
// Try sync branch first
pulled, err := syncBranchPull(importCtx, store, log)
if err != nil {
backoff := RecordSyncFailure(beadsDir, err.Error())
log.log("Sync branch pull failed: %v (backoff: %v)", err, backoff)
log.log("Sync branch pull failed: %v", err)
return
}
// If sync branch not configured, use regular pull
if !pulled {
if err := gitPull(importCtx); err != nil {
backoff := RecordSyncFailure(beadsDir, err.Error())
log.log("Pull failed: %v (backoff: %v)", err, backoff)
log.log("Pull failed: %v", err)
return
}
log.log("Pulled from remote")
@@ -637,8 +622,6 @@ func performAutoImport(ctx context.Context, store storage.Storage, skipGit bool,
if skipGit {
log.log("Local auto-import complete")
} else {
// Record success to clear backoff state
RecordSyncSuccess(beadsDir)
log.log("Auto-import complete")
}
}

View File

@@ -48,12 +48,12 @@ func TestSyncBranchCommitAndPush_NotConfigured(t *testing.T) {
// Create test issue
issue := &types.Issue{
Title: "Test issue",
Status: types.StatusOpen,
Priority: 1,
IssueType: types.TypeTask,
CreatedAt: time.Now(),
UpdatedAt: time.Now(),
Title: "Test issue",
Status: types.StatusOpen,
Priority: 1,
IssueType: types.TypeTask,
CreatedAt: time.Now(),
UpdatedAt: time.Now(),
}
if err := store.CreateIssue(ctx, issue, "test"); err != nil {
t.Fatalf("Failed to create issue: %v", err)
@@ -122,12 +122,12 @@ func TestSyncBranchCommitAndPush_Success(t *testing.T) {
// Create test issue
issue := &types.Issue{
Title: "Test sync branch issue",
Status: types.StatusOpen,
Priority: 1,
IssueType: types.TypeTask,
CreatedAt: time.Now(),
UpdatedAt: time.Now(),
Title: "Test sync branch issue",
Status: types.StatusOpen,
Priority: 1,
IssueType: types.TypeTask,
CreatedAt: time.Now(),
UpdatedAt: time.Now(),
}
if err := store.CreateIssue(ctx, issue, "test"); err != nil {
t.Fatalf("Failed to create issue: %v", err)
@@ -228,12 +228,12 @@ func TestSyncBranchCommitAndPush_EnvOverridesDB(t *testing.T) {
// Create test issue and export JSONL
issue := &types.Issue{
Title: "Env override issue",
Status: types.StatusOpen,
Priority: 1,
IssueType: types.TypeTask,
CreatedAt: time.Now(),
UpdatedAt: time.Now(),
Title: "Env override issue",
Status: types.StatusOpen,
Priority: 1,
IssueType: types.TypeTask,
CreatedAt: time.Now(),
UpdatedAt: time.Now(),
}
if err := store.CreateIssue(ctx, issue, "test"); err != nil {
t.Fatalf("Failed to create issue: %v", err)
@@ -303,12 +303,12 @@ func TestSyncBranchCommitAndPush_NoChanges(t *testing.T) {
}
issue := &types.Issue{
Title: "Test issue",
Status: types.StatusOpen,
Priority: 1,
IssueType: types.TypeTask,
CreatedAt: time.Now(),
UpdatedAt: time.Now(),
Title: "Test issue",
Status: types.StatusOpen,
Priority: 1,
IssueType: types.TypeTask,
CreatedAt: time.Now(),
UpdatedAt: time.Now(),
}
if err := store.CreateIssue(ctx, issue, "test"); err != nil {
t.Fatalf("Failed to create issue: %v", err)
@@ -380,12 +380,12 @@ func TestSyncBranchCommitAndPush_WorktreeHealthCheck(t *testing.T) {
}
issue := &types.Issue{
Title: "Test issue",
Status: types.StatusOpen,
Priority: 1,
IssueType: types.TypeTask,
CreatedAt: time.Now(),
UpdatedAt: time.Now(),
Title: "Test issue",
Status: types.StatusOpen,
Priority: 1,
IssueType: types.TypeTask,
CreatedAt: time.Now(),
UpdatedAt: time.Now(),
}
if err := store.CreateIssue(ctx, issue, "test"); err != nil {
t.Fatalf("Failed to create issue: %v", err)
@@ -497,7 +497,7 @@ func TestSyncBranchPull_Success(t *testing.T) {
if err := os.MkdirAll(remoteDir, 0755); err != nil {
t.Fatalf("Failed to create remote dir: %v", err)
}
runGitCmd(t, remoteDir, "init", "--bare", "-b", "master")
runGitCmd(t, remoteDir, "init", "--bare")
// Create clone1 (will push changes)
clone1Dir := filepath.Join(tmpDir, "clone1")
@@ -528,12 +528,12 @@ func TestSyncBranchPull_Success(t *testing.T) {
// Create issue in clone1
issue := &types.Issue{
Title: "Test sync pull issue",
Status: types.StatusOpen,
Priority: 1,
IssueType: types.TypeTask,
CreatedAt: time.Now(),
UpdatedAt: time.Now(),
Title: "Test sync pull issue",
Status: types.StatusOpen,
Priority: 1,
IssueType: types.TypeTask,
CreatedAt: time.Now(),
UpdatedAt: time.Now(),
}
if err := store1.CreateIssue(ctx, issue, "test"); err != nil {
t.Fatalf("Failed to create issue: %v", err)
@@ -639,7 +639,7 @@ func TestSyncBranchIntegration_EndToEnd(t *testing.T) {
tmpDir := t.TempDir()
remoteDir := filepath.Join(tmpDir, "remote")
os.MkdirAll(remoteDir, 0755)
runGitCmd(t, remoteDir, "init", "--bare", "-b", "master")
runGitCmd(t, remoteDir, "init", "--bare")
// Clone1: Agent A
clone1Dir := filepath.Join(tmpDir, "clone1")
@@ -660,12 +660,12 @@ func TestSyncBranchIntegration_EndToEnd(t *testing.T) {
// Agent A creates issue
issue := &types.Issue{
Title: "E2E test issue",
Status: types.StatusOpen,
Priority: 1,
IssueType: types.TypeTask,
CreatedAt: time.Now(),
UpdatedAt: time.Now(),
Title: "E2E test issue",
Status: types.StatusOpen,
Priority: 1,
IssueType: types.TypeTask,
CreatedAt: time.Now(),
UpdatedAt: time.Now(),
}
store1.CreateIssue(ctx, issue, "agent-a")
issueID := issue.ID
@@ -914,7 +914,7 @@ func TestSyncBranchMultipleConcurrentClones(t *testing.T) {
tmpDir := t.TempDir()
remoteDir := filepath.Join(tmpDir, "remote")
os.MkdirAll(remoteDir, 0755)
runGitCmd(t, remoteDir, "init", "--bare", "-b", "master")
runGitCmd(t, remoteDir, "init", "--bare")
syncBranch := "beads-sync"
@@ -1454,7 +1454,7 @@ func TestGitPushFromWorktree_FetchRebaseRetry(t *testing.T) {
// Create a "remote" bare repository
remoteDir := t.TempDir()
runGitCmd(t, remoteDir, "init", "--bare", "-b", "master")
runGitCmd(t, remoteDir, "init", "--bare")
// Create first clone (simulates another developer's clone)
clone1Dir := t.TempDir()
@@ -1524,7 +1524,7 @@ func TestGitPushFromWorktree_FetchRebaseRetry(t *testing.T) {
// Now try to push from worktree - this should trigger the fetch-rebase-retry logic
// because the remote has commits that the local worktree doesn't have
err := gitPushFromWorktree(ctx, worktreePath, "beads-sync", "")
err := gitPushFromWorktree(ctx, worktreePath, "beads-sync")
if err != nil {
t.Fatalf("gitPushFromWorktree failed: %v (expected fetch-rebase-retry to succeed)", err)
}

View File

@@ -1,165 +0,0 @@
package main
import (
"encoding/json"
"os"
"path/filepath"
"sync"
"time"
)
// SyncState tracks daemon sync health for backoff and user hints.
// Stored in .beads/sync-state.json (gitignored, local-only).
type SyncState struct {
LastFailure time.Time `json:"last_failure,omitempty"`
FailureCount int `json:"failure_count"`
BackoffUntil time.Time `json:"backoff_until,omitempty"`
NeedsManualSync bool `json:"needs_manual_sync"`
FailureReason string `json:"failure_reason,omitempty"`
}
const (
syncStateFile = "sync-state.json"
// Backoff schedule: 30s, 1m, 2m, 5m, 10m, 30m (cap)
maxBackoffDuration = 30 * time.Minute
// Clear stale state after 24 hours
staleStateThreshold = 24 * time.Hour
)
var (
// backoffSchedule defines the exponential backoff durations
backoffSchedule = []time.Duration{
30 * time.Second,
1 * time.Minute,
2 * time.Minute,
5 * time.Minute,
10 * time.Minute,
30 * time.Minute,
}
// syncStateMu protects concurrent access to sync state file
syncStateMu sync.Mutex
)
// LoadSyncState loads the sync state from .beads/sync-state.json.
// Returns empty state if file doesn't exist or is stale.
func LoadSyncState(beadsDir string) SyncState {
syncStateMu.Lock()
defer syncStateMu.Unlock()
statePath := filepath.Join(beadsDir, syncStateFile)
data, err := os.ReadFile(statePath) // #nosec G304 - path constructed from beadsDir
if err != nil {
return SyncState{}
}
var state SyncState
if err := json.Unmarshal(data, &state); err != nil {
return SyncState{}
}
// Clear stale state (older than 24h with no recent failures)
if !state.LastFailure.IsZero() && time.Since(state.LastFailure) > staleStateThreshold {
_ = os.Remove(statePath)
return SyncState{}
}
return state
}
// SaveSyncState saves the sync state to .beads/sync-state.json.
func SaveSyncState(beadsDir string, state SyncState) error {
syncStateMu.Lock()
defer syncStateMu.Unlock()
statePath := filepath.Join(beadsDir, syncStateFile)
// If state is empty/reset, remove the file
if state.FailureCount == 0 && !state.NeedsManualSync {
_ = os.Remove(statePath)
return nil
}
data, err := json.MarshalIndent(state, "", " ")
if err != nil {
return err
}
return os.WriteFile(statePath, data, 0600)
}
// ClearSyncState removes the sync state file.
func ClearSyncState(beadsDir string) error {
syncStateMu.Lock()
defer syncStateMu.Unlock()
statePath := filepath.Join(beadsDir, syncStateFile)
err := os.Remove(statePath)
if os.IsNotExist(err) {
return nil
}
return err
}
// RecordSyncFailure updates the sync state after a failure.
// Returns the duration until next retry.
func RecordSyncFailure(beadsDir string, reason string) time.Duration {
state := LoadSyncState(beadsDir)
state.LastFailure = time.Now()
state.FailureCount++
state.FailureReason = reason
// Calculate backoff duration
backoffIndex := state.FailureCount - 1
if backoffIndex >= len(backoffSchedule) {
backoffIndex = len(backoffSchedule) - 1
}
backoff := backoffSchedule[backoffIndex]
state.BackoffUntil = time.Now().Add(backoff)
// Mark as needing manual sync after 3 failures (likely a conflict)
if state.FailureCount >= 3 {
state.NeedsManualSync = true
}
_ = SaveSyncState(beadsDir, state)
return backoff
}
// RecordSyncSuccess clears the sync state after a successful sync.
func RecordSyncSuccess(beadsDir string) {
_ = ClearSyncState(beadsDir)
}
// ShouldSkipSync returns true if we're still in the backoff period.
func ShouldSkipSync(beadsDir string) bool {
state := LoadSyncState(beadsDir)
if state.BackoffUntil.IsZero() {
return false
}
return time.Now().Before(state.BackoffUntil)
}
// ResetBackoffOnDaemonStart resets backoff counters when daemon starts,
// but preserves NeedsManualSync flag so hints still show.
// This allows a fresh start while keeping user informed of conflicts.
func ResetBackoffOnDaemonStart(beadsDir string) {
state := LoadSyncState(beadsDir)
// Nothing to reset
if state.FailureCount == 0 && !state.NeedsManualSync {
return
}
// Reset backoff but preserve NeedsManualSync
needsManual := state.NeedsManualSync
reason := state.FailureReason
state = SyncState{
NeedsManualSync: needsManual,
FailureReason: reason,
}
_ = SaveSyncState(beadsDir, state)
}

View File

@@ -8,7 +8,6 @@ import (
"context"
"encoding/json"
"io"
"log/slog"
"os"
"path/filepath"
"strings"
@@ -898,7 +897,11 @@ func setupDaemonTestEnvForDelete(t *testing.T) (context.Context, context.CancelF
ctx, cancel := context.WithTimeout(context.Background(), 30*time.Second)
log := daemonLogger{logger: slog.New(slog.NewTextHandler(io.Discard, &slog.HandlerOptions{Level: slog.LevelInfo}))}
log := daemonLogger{
logFunc: func(format string, args ...interface{}) {
t.Logf("[daemon] "+format, args...)
},
}
server, _, err := startRPCServer(ctx, socketPath, testStore, tmpDir, testDBPath, log)
if err != nil {

View File

@@ -5,11 +5,9 @@ import (
"encoding/json"
"fmt"
"os"
"path/filepath"
"strings"
"github.com/spf13/cobra"
"github.com/steveyegge/beads/internal/routing"
"github.com/steveyegge/beads/internal/rpc"
"github.com/steveyegge/beads/internal/storage/sqlite"
"github.com/steveyegge/beads/internal/types"
@@ -17,14 +15,6 @@ import (
"github.com/steveyegge/beads/internal/utils"
)
// getBeadsDir returns the .beads directory path, derived from the global dbPath.
func getBeadsDir() string {
if dbPath != "" {
return filepath.Dir(dbPath)
}
return ""
}
// isChildOf returns true if childID is a hierarchical child of parentID.
// For example, "bd-abc.1" is a child of "bd-abc", and "bd-abc.1.2" is a child of "bd-abc.1".
func isChildOf(childID, parentID string) bool {
@@ -98,15 +88,9 @@ Examples:
resolveArgs = &rpc.ResolveIDArgs{ID: args[1]}
resp, err = daemonClient.ResolveID(resolveArgs)
if err != nil {
// Resolution failed - try auto-converting to external ref (bd-lfiu)
beadsDir := getBeadsDir()
if extRef := routing.ResolveToExternalRef(args[1], beadsDir); extRef != "" {
toID = extRef
isExternalRef = true
} else {
FatalErrorRespectJSON("resolving dependency ID %s: %v", args[1], err)
}
} else if err := json.Unmarshal(resp.Data, &toID); err != nil {
FatalErrorRespectJSON("resolving dependency ID %s: %v", args[1], err)
}
if err := json.Unmarshal(resp.Data, &toID); err != nil {
FatalErrorRespectJSON("unmarshaling resolved ID: %v", err)
}
}
@@ -127,14 +111,7 @@ Examples:
} else {
toID, err = utils.ResolvePartialID(ctx, store, args[1])
if err != nil {
// Resolution failed - try auto-converting to external ref (bd-lfiu)
beadsDir := getBeadsDir()
if extRef := routing.ResolveToExternalRef(args[1], beadsDir); extRef != "" {
toID = extRef
isExternalRef = true
} else {
FatalErrorRespectJSON("resolving dependency ID %s: %v", args[1], err)
}
FatalErrorRespectJSON("resolving dependency ID %s: %v", args[1], err)
}
}
}

View File

@@ -43,8 +43,8 @@ type doctorResult struct {
Checks []doctorCheck `json:"checks"`
OverallOK bool `json:"overall_ok"`
CLIVersion string `json:"cli_version"`
Timestamp string `json:"timestamp,omitempty"` // bd-9cc: ISO8601 timestamp for historical tracking
Platform map[string]string `json:"platform,omitempty"` // bd-9cc: platform info for debugging
Timestamp string `json:"timestamp,omitempty"` // bd-9cc: ISO8601 timestamp for historical tracking
Platform map[string]string `json:"platform,omitempty"` // bd-9cc: platform info for debugging
}
var (
@@ -353,42 +353,6 @@ func applyFixesInteractive(path string, issues []doctorCheck) {
// applyFixList applies a list of fixes and reports results
func applyFixList(path string, fixes []doctorCheck) {
// Apply fixes in a dependency-aware order.
// Rough dependency chain:
// permissions/daemon cleanup → config sanity → DB integrity/migrations → DB↔JSONL sync.
order := []string{
"Permissions",
"Daemon Health",
"Database Config",
"JSONL Config",
"Database Integrity",
"Database",
"Schema Compatibility",
"JSONL Integrity",
"DB-JSONL Sync",
}
priority := make(map[string]int, len(order))
for i, name := range order {
priority[name] = i
}
slices.SortStableFunc(fixes, func(a, b doctorCheck) int {
pa, oka := priority[a.Name]
if !oka {
pa = 1000
}
pb, okb := priority[b.Name]
if !okb {
pb = 1000
}
if pa < pb {
return -1
}
if pa > pb {
return 1
}
return 0
})
fixedCount := 0
errorCount := 0
@@ -409,9 +373,6 @@ func applyFixList(path string, fixes []doctorCheck) {
err = fix.Permissions(path)
case "Database":
err = fix.DatabaseVersion(path)
case "Database Integrity":
// Corruption detected - try recovery from JSONL
err = fix.DatabaseCorruptionRecovery(path)
case "Schema Compatibility":
err = fix.SchemaCompatibility(path)
case "Repo Fingerprint":
@@ -426,8 +387,6 @@ func applyFixList(path string, fixes []doctorCheck) {
err = fix.DatabaseConfig(path)
case "JSONL Config":
err = fix.LegacyJSONLConfig(path)
case "JSONL Integrity":
err = fix.JSONLIntegrity(path)
case "Deletions Manifest":
err = fix.MigrateTombstones(path)
case "Untracked Files":
@@ -473,10 +432,6 @@ func applyFixList(path string, fixes []doctorCheck) {
// No auto-fix: compaction requires agent review
fmt.Printf(" ⚠ Run 'bd compact --analyze' to review candidates\n")
continue
case "Large Database":
// No auto-fix: pruning deletes data, must be user-controlled
fmt.Printf(" ⚠ Run 'bd cleanup --older-than 90' to prune old closed issues\n")
continue
default:
fmt.Printf(" ⚠ No automatic fix available for %s\n", check.Name)
fmt.Printf(" Manual fix: %s\n", check.Fix)
@@ -732,13 +687,6 @@ func runDiagnostics(path string) doctorResult {
result.Checks = append(result.Checks, configValuesCheck)
// Don't fail overall check for config value warnings, just warn
// Check 7b: JSONL integrity (malformed lines, missing IDs)
jsonlIntegrityCheck := convertWithCategory(doctor.CheckJSONLIntegrity(path), doctor.CategoryData)
result.Checks = append(result.Checks, jsonlIntegrityCheck)
if jsonlIntegrityCheck.Status == statusWarning || jsonlIntegrityCheck.Status == statusError {
result.OverallOK = false
}
// Check 8: Daemon health
daemonCheck := convertWithCategory(doctor.CheckDaemonStatus(path, Version), doctor.CategoryRuntime)
result.Checks = append(result.Checks, daemonCheck)
@@ -802,16 +750,6 @@ func runDiagnostics(path string) doctorResult {
result.Checks = append(result.Checks, mergeDriverCheck)
// Don't fail overall check for merge driver, just warn
// Check 15a: Git working tree cleanliness (AGENTS.md hygiene)
gitWorkingTreeCheck := convertWithCategory(doctor.CheckGitWorkingTree(path), doctor.CategoryGit)
result.Checks = append(result.Checks, gitWorkingTreeCheck)
// Don't fail overall check for dirty working tree, just warn
// Check 15b: Git upstream sync (ahead/behind/diverged)
gitUpstreamCheck := convertWithCategory(doctor.CheckGitUpstream(path), doctor.CategoryGit)
result.Checks = append(result.Checks, gitUpstreamCheck)
// Don't fail overall check for upstream drift, just warn
// Check 16: Metadata.json version tracking (bd-u4sb)
metadataCheck := convertWithCategory(doctor.CheckMetadataVersionTracking(path, Version), doctor.CategoryMetadata)
result.Checks = append(result.Checks, metadataCheck)
@@ -899,12 +837,6 @@ func runDiagnostics(path string) doctorResult {
result.Checks = append(result.Checks, compactionCheck)
// Info only, not a warning - compaction requires human review
// Check 29: Database size (pruning suggestion)
// Note: This check has no auto-fix - pruning is destructive and user-controlled
sizeCheck := convertDoctorCheck(doctor.CheckDatabaseSize(path))
result.Checks = append(result.Checks, sizeCheck)
// Don't fail overall check for size warning, just inform
return result
}

View File

@@ -316,10 +316,6 @@ func checkMetadataConfigValues(repoPath string) []string {
// Validate jsonl_export filename
if cfg.JSONLExport != "" {
switch cfg.JSONLExport {
case "deletions.jsonl", "interactions.jsonl", "molecules.jsonl":
issues = append(issues, fmt.Sprintf("metadata.json jsonl_export: %q is a system file and should not be configured as a JSONL export (expected issues.jsonl)", cfg.JSONLExport))
}
if strings.Contains(cfg.JSONLExport, string(os.PathSeparator)) || strings.Contains(cfg.JSONLExport, "/") {
issues = append(issues, fmt.Sprintf("metadata.json jsonl_export: %q should be a filename, not a path", cfg.JSONLExport))
}
@@ -357,7 +353,7 @@ func checkDatabaseConfigValues(repoPath string) []string {
}
// Open database in read-only mode
db, err := sql.Open("sqlite3", sqliteConnString(dbPath, true))
db, err := sql.Open("sqlite3", "file:"+dbPath+"?mode=ro")
if err != nil {
return issues // Can't open database, skip
}

View File

@@ -213,21 +213,6 @@ func TestCheckMetadataConfigValues(t *testing.T) {
t.Error("expected issues for wrong jsonl extension")
}
})
t.Run("jsonl_export cannot be system file", func(t *testing.T) {
metadataContent := `{
"database": "beads.db",
"jsonl_export": "interactions.jsonl"
}`
if err := os.WriteFile(filepath.Join(beadsDir, "metadata.json"), []byte(metadataContent), 0644); err != nil {
t.Fatalf("failed to write metadata.json: %v", err)
}
issues := checkMetadataConfigValues(tmpDir)
if len(issues) == 0 {
t.Error("expected issues for system jsonl_export")
}
})
}
func contains(s, substr string) bool {

View File

@@ -155,9 +155,9 @@ func CheckSchemaCompatibility(path string) DoctorCheck {
}
}
// Open database (bd-ckvw: schema probe)
// Open database (bd-ckvw: This will run migrations and schema probe)
// Note: We can't use the global 'store' because doctor can check arbitrary paths
db, err := sql.Open("sqlite3", sqliteConnString(dbPath, true))
db, err := sql.Open("sqlite3", "file:"+dbPath+"?_pragma=foreign_keys(ON)&_pragma=busy_timeout(30000)")
if err != nil {
return DoctorCheck{
Name: "Schema Compatibility",
@@ -244,30 +244,13 @@ func CheckDatabaseIntegrity(path string) DoctorCheck {
}
// Open database in read-only mode for integrity check
db, err := sql.Open("sqlite3", sqliteConnString(dbPath, true))
db, err := sql.Open("sqlite3", "file:"+dbPath+"?mode=ro&_pragma=busy_timeout(30000)")
if err != nil {
// Check if JSONL recovery is possible
jsonlCount, _, jsonlErr := CountJSONLIssues(filepath.Join(beadsDir, "issues.jsonl"))
if jsonlErr != nil {
jsonlCount, _, jsonlErr = CountJSONLIssues(filepath.Join(beadsDir, "beads.jsonl"))
}
if jsonlErr == nil && jsonlCount > 0 {
return DoctorCheck{
Name: "Database Integrity",
Status: StatusError,
Message: fmt.Sprintf("Failed to open database (JSONL has %d issues for recovery)", jsonlCount),
Detail: err.Error(),
Fix: "Run 'bd doctor --fix' to recover from JSONL backup",
}
}
return DoctorCheck{
Name: "Database Integrity",
Status: StatusError,
Message: "Failed to open database for integrity check",
Detail: err.Error(),
Fix: "Run 'bd doctor --fix' to back up the corrupt DB and rebuild from JSONL (if available), or restore from backup",
}
}
defer db.Close()
@@ -276,28 +259,11 @@ func CheckDatabaseIntegrity(path string) DoctorCheck {
// This checks the entire database for corruption
rows, err := db.Query("PRAGMA integrity_check")
if err != nil {
// Check if JSONL recovery is possible
jsonlCount, _, jsonlErr := CountJSONLIssues(filepath.Join(beadsDir, "issues.jsonl"))
if jsonlErr != nil {
jsonlCount, _, jsonlErr = CountJSONLIssues(filepath.Join(beadsDir, "beads.jsonl"))
}
if jsonlErr == nil && jsonlCount > 0 {
return DoctorCheck{
Name: "Database Integrity",
Status: StatusError,
Message: fmt.Sprintf("Failed to run integrity check (JSONL has %d issues for recovery)", jsonlCount),
Detail: err.Error(),
Fix: "Run 'bd doctor --fix' to recover from JSONL backup",
}
}
return DoctorCheck{
Name: "Database Integrity",
Status: StatusError,
Message: "Failed to run integrity check",
Detail: err.Error(),
Fix: "Run 'bd doctor --fix' to back up the corrupt DB and rebuild from JSONL (if available), or restore from backup",
}
}
defer rows.Close()
@@ -320,59 +286,28 @@ func CheckDatabaseIntegrity(path string) DoctorCheck {
}
}
// Any other result indicates corruption - check if JSONL recovery is possible
jsonlCount, _, jsonlErr := CountJSONLIssues(filepath.Join(beadsDir, "issues.jsonl"))
if jsonlErr != nil {
// Try alternate name
jsonlCount, _, jsonlErr = CountJSONLIssues(filepath.Join(beadsDir, "beads.jsonl"))
}
if jsonlErr == nil && jsonlCount > 0 {
return DoctorCheck{
Name: "Database Integrity",
Status: StatusError,
Message: fmt.Sprintf("Database corruption detected (JSONL has %d issues for recovery)", jsonlCount),
Detail: strings.Join(results, "; "),
Fix: "Run 'bd doctor --fix' to recover from JSONL backup",
}
}
// Any other result indicates corruption
return DoctorCheck{
Name: "Database Integrity",
Status: StatusError,
Message: "Database corruption detected",
Detail: strings.Join(results, "; "),
Fix: "Run 'bd doctor --fix' to back up the corrupt DB and rebuild from JSONL (if available), or restore from backup",
Fix: "Database may need recovery. Export with 'bd export' if possible, then restore from backup or reinitialize",
}
}
// CheckDatabaseJSONLSync checks if database and JSONL are in sync
func CheckDatabaseJSONLSync(path string) DoctorCheck {
beadsDir := filepath.Join(path, ".beads")
// Resolve database path (respects metadata.json override).
dbPath := filepath.Join(beadsDir, beads.CanonicalDatabaseName)
if cfg, err := configfile.Load(beadsDir); err == nil && cfg != nil && cfg.Database != "" {
dbPath = cfg.DatabasePath(beadsDir)
}
// Find JSONL file (respects metadata.json override when set).
jsonlPath := ""
if cfg, err := configfile.Load(beadsDir); err == nil && cfg != nil {
if cfg.JSONLExport != "" && !isSystemJSONLFilename(cfg.JSONLExport) {
p := cfg.JSONLPath(beadsDir)
if _, err := os.Stat(p); err == nil {
jsonlPath = p
}
}
}
if jsonlPath == "" {
for _, name := range []string{"issues.jsonl", "beads.jsonl"} {
testPath := filepath.Join(beadsDir, name)
if _, err := os.Stat(testPath); err == nil {
jsonlPath = testPath
break
}
// Find JSONL file
var jsonlPath string
for _, name := range []string{"issues.jsonl", "beads.jsonl"} {
testPath := filepath.Join(beadsDir, name)
if _, err := os.Stat(testPath); err == nil {
jsonlPath = testPath
break
}
}
@@ -398,7 +333,7 @@ func CheckDatabaseJSONLSync(path string) DoctorCheck {
jsonlCount, jsonlPrefixes, jsonlErr := CountJSONLIssues(jsonlPath)
// Single database open for all queries (instead of 3 separate opens)
db, err := sql.Open("sqlite3", sqliteConnString(dbPath, true))
db, err := sql.Open("sqlite3", dbPath)
if err != nil {
// Database can't be opened. If JSONL has issues, suggest recovery.
if jsonlErr == nil && jsonlCount > 0 {
@@ -455,16 +390,11 @@ func CheckDatabaseJSONLSync(path string) DoctorCheck {
// Use JSONL error if we got it earlier
if jsonlErr != nil {
fixMsg := "Run 'bd doctor --fix' to attempt recovery"
if strings.Contains(jsonlErr.Error(), "malformed") {
fixMsg = "Run 'bd doctor --fix' to back up and regenerate the JSONL from the database"
}
return DoctorCheck{
Name: "DB-JSONL Sync",
Status: StatusWarning,
Message: "Unable to read JSONL file",
Detail: jsonlErr.Error(),
Fix: fixMsg,
}
}
@@ -571,7 +501,7 @@ func FixDBJSONLSync(path string) error {
// getDatabaseVersionFromPath reads the database version from the given path
func getDatabaseVersionFromPath(dbPath string) string {
db, err := sql.Open("sqlite3", sqliteConnString(dbPath, true))
db, err := sql.Open("sqlite3", "file:"+dbPath+"?mode=ro")
if err != nil {
return "unknown"
}
@@ -690,92 +620,3 @@ func isNoDbModeConfigured(beadsDir string) bool {
return cfg.NoDb
}
// CheckDatabaseSize warns when the database has accumulated many closed issues.
// This is purely informational - pruning is NEVER auto-fixed because it
// permanently deletes data. Users must explicitly run 'bd cleanup' to prune.
//
// Config: doctor.suggest_pruning_issue_count (default: 5000, 0 = disabled)
//
// DESIGN NOTE: This check intentionally has NO auto-fix. Unlike other doctor
// checks that fix configuration or sync issues, pruning is destructive and
// irreversible. The user must make an explicit decision to delete their
// closed issue history. We only provide guidance, never action.
func CheckDatabaseSize(path string) DoctorCheck {
beadsDir := filepath.Join(path, ".beads")
// Get database path
var dbPath string
if cfg, err := configfile.Load(beadsDir); err == nil && cfg != nil && cfg.Database != "" {
dbPath = cfg.DatabasePath(beadsDir)
} else {
dbPath = filepath.Join(beadsDir, beads.CanonicalDatabaseName)
}
// If no database, skip this check
if _, err := os.Stat(dbPath); os.IsNotExist(err) {
return DoctorCheck{
Name: "Large Database",
Status: StatusOK,
Message: "N/A (no database)",
}
}
// Read threshold from config (default 5000, 0 = disabled)
threshold := 5000
db, err := sql.Open("sqlite3", "file:"+dbPath+"?mode=ro&_pragma=busy_timeout(30000)")
if err != nil {
return DoctorCheck{
Name: "Large Database",
Status: StatusOK,
Message: "N/A (unable to open database)",
}
}
defer db.Close()
// Check for custom threshold in config table
var thresholdStr string
err = db.QueryRow("SELECT value FROM config WHERE key = ?", "doctor.suggest_pruning_issue_count").Scan(&thresholdStr)
if err == nil {
if _, err := fmt.Sscanf(thresholdStr, "%d", &threshold); err != nil {
threshold = 5000 // Reset to default on parse error
}
}
// If disabled, return OK
if threshold == 0 {
return DoctorCheck{
Name: "Large Database",
Status: StatusOK,
Message: "Check disabled (threshold = 0)",
}
}
// Count closed issues
var closedCount int
err = db.QueryRow("SELECT COUNT(*) FROM issues WHERE status = 'closed'").Scan(&closedCount)
if err != nil {
return DoctorCheck{
Name: "Large Database",
Status: StatusOK,
Message: "N/A (unable to count issues)",
}
}
// Check against threshold
if closedCount > threshold {
return DoctorCheck{
Name: "Large Database",
Status: StatusWarning,
Message: fmt.Sprintf("%d closed issues (threshold: %d)", closedCount, threshold),
Detail: "Large number of closed issues may impact performance",
Fix: "Consider running 'bd cleanup --older-than 90' to prune old closed issues",
}
}
return DoctorCheck{
Name: "Large Database",
Status: StatusOK,
Message: fmt.Sprintf("%d closed issues (threshold: %d)", closedCount, threshold),
}
}

View File

@@ -12,13 +12,6 @@ import (
// This prevents fork bombs when tests call functions that execute bd subcommands.
var ErrTestBinary = fmt.Errorf("running as test binary - cannot execute bd subcommands")
func newBdCmd(bdBinary string, args ...string) *exec.Cmd {
fullArgs := append([]string{"--no-daemon"}, args...)
cmd := exec.Command(bdBinary, fullArgs...) // #nosec G204 -- bdBinary from validated executable path
cmd.Env = append(os.Environ(), "BEADS_NO_DAEMON=1")
return cmd
}
// getBdBinary returns the path to the bd binary to use for fix operations.
// It prefers the current executable to avoid command injection attacks.
// Returns ErrTestBinary if running as a test binary to prevent fork bombs.

View File

@@ -3,6 +3,7 @@ package fix
import (
"fmt"
"os"
"os/exec"
"path/filepath"
)
@@ -35,7 +36,7 @@ func Daemon(path string) error {
}
// Run bd daemons killall to clean up stale daemons
cmd := newBdCmd(bdBinary, "daemons", "killall")
cmd := exec.Command(bdBinary, "daemons", "killall") // #nosec G204 -- bdBinary from validated executable path
cmd.Stdout = os.Stdout
cmd.Stderr = os.Stderr

View File

@@ -32,13 +32,6 @@ func DatabaseConfig(path string) error {
fixed := false
// Never treat system JSONL files as a JSONL export configuration.
if isSystemJSONLFilename(cfg.JSONLExport) {
fmt.Printf(" Updating jsonl_export: %s → issues.jsonl\n", cfg.JSONLExport)
cfg.JSONLExport = "issues.jsonl"
fixed = true
}
// Check if configured JSONL exists
if cfg.JSONLExport != "" {
jsonlPath := cfg.JSONLPath(beadsDir)
@@ -106,15 +99,7 @@ func findActualJSONLFile(beadsDir string) string {
strings.Contains(lowerName, ".orig") ||
strings.Contains(lowerName, ".bak") ||
strings.Contains(lowerName, "~") ||
strings.HasPrefix(lowerName, "backup_") ||
// System files are not JSONL exports.
name == "deletions.jsonl" ||
name == "interactions.jsonl" ||
name == "molecules.jsonl" ||
// Git merge conflict artifacts (e.g., issues.base.jsonl, issues.left.jsonl)
strings.Contains(lowerName, ".base.jsonl") ||
strings.Contains(lowerName, ".left.jsonl") ||
strings.Contains(lowerName, ".right.jsonl") {
strings.HasPrefix(lowerName, "backup_") {
continue
}
@@ -136,15 +121,6 @@ func findActualJSONLFile(beadsDir string) string {
return candidates[0]
}
func isSystemJSONLFilename(name string) bool {
switch name {
case "deletions.jsonl", "interactions.jsonl", "molecules.jsonl":
return true
default:
return false
}
}
// LegacyJSONLConfig migrates from legacy beads.jsonl to canonical issues.jsonl.
// This renames the file, updates metadata.json, and updates .gitattributes if present.
// bd-6xd: issues.jsonl is the canonical filename

View File

@@ -220,53 +220,3 @@ func TestLegacyJSONLConfig_UpdatesGitattributes(t *testing.T) {
t.Errorf("Expected .gitattributes to reference issues.jsonl, got: %q", string(content))
}
}
// TestFindActualJSONLFile_SkipsSystemFiles ensures system JSONL files are never treated as JSONL exports.
func TestFindActualJSONLFile_SkipsSystemFiles(t *testing.T) {
tmpDir := t.TempDir()
// Only system files → no candidates.
if err := os.WriteFile(filepath.Join(tmpDir, "interactions.jsonl"), []byte(`{"id":"x"}`), 0644); err != nil {
t.Fatal(err)
}
if got := findActualJSONLFile(tmpDir); got != "" {
t.Fatalf("expected empty result, got %q", got)
}
// System + legacy export → legacy wins.
if err := os.WriteFile(filepath.Join(tmpDir, "beads.jsonl"), []byte(`{"id":"x"}`), 0644); err != nil {
t.Fatal(err)
}
if got := findActualJSONLFile(tmpDir); got != "beads.jsonl" {
t.Fatalf("expected beads.jsonl, got %q", got)
}
}
func TestDatabaseConfigFix_RejectsSystemJSONLExport(t *testing.T) {
tmpDir := t.TempDir()
beadsDir := filepath.Join(tmpDir, ".beads")
if err := os.Mkdir(beadsDir, 0755); err != nil {
t.Fatalf("Failed to create .beads dir: %v", err)
}
if err := os.WriteFile(filepath.Join(beadsDir, "interactions.jsonl"), []byte(`{"id":"x"}`), 0644); err != nil {
t.Fatalf("Failed to create interactions.jsonl: %v", err)
}
cfg := &configfile.Config{Database: "beads.db", JSONLExport: "interactions.jsonl"}
if err := cfg.Save(beadsDir); err != nil {
t.Fatalf("Failed to save config: %v", err)
}
if err := DatabaseConfig(tmpDir); err != nil {
t.Fatalf("DatabaseConfig failed: %v", err)
}
updated, err := configfile.Load(beadsDir)
if err != nil {
t.Fatalf("Failed to load updated config: %v", err)
}
if updated.JSONLExport != "issues.jsonl" {
t.Fatalf("expected issues.jsonl, got %q", updated.JSONLExport)
}
}

View File

@@ -1,116 +0,0 @@
package fix
import (
"fmt"
"os"
"path/filepath"
"time"
"github.com/steveyegge/beads/internal/beads"
"github.com/steveyegge/beads/internal/configfile"
)
// DatabaseIntegrity attempts to recover from database corruption by:
// 1. Backing up the corrupt database (and WAL/SHM if present)
// 2. Re-initializing the database from the working tree JSONL export
//
// This is intentionally conservative: it will not delete JSONL, and it preserves the
// original DB as a backup for forensic recovery.
func DatabaseIntegrity(path string) error {
if err := validateBeadsWorkspace(path); err != nil {
return err
}
absPath, err := filepath.Abs(path)
if err != nil {
return fmt.Errorf("failed to resolve path: %w", err)
}
beadsDir := filepath.Join(absPath, ".beads")
// Best-effort: stop any running daemon to reduce the chance of DB file locks.
_ = Daemon(absPath)
// Resolve database path (respects metadata.json database override).
var dbPath string
if cfg, err := configfile.Load(beadsDir); err == nil && cfg != nil && cfg.Database != "" {
dbPath = cfg.DatabasePath(beadsDir)
} else {
dbPath = filepath.Join(beadsDir, beads.CanonicalDatabaseName)
}
// Find JSONL source of truth.
jsonlPath := ""
if cfg, err := configfile.Load(beadsDir); err == nil && cfg != nil {
if cfg.JSONLExport != "" && !isSystemJSONLFilename(cfg.JSONLExport) {
candidate := cfg.JSONLPath(beadsDir)
if _, err := os.Stat(candidate); err == nil {
jsonlPath = candidate
}
}
}
if jsonlPath == "" {
for _, name := range []string{"issues.jsonl", "beads.jsonl"} {
candidate := filepath.Join(beadsDir, name)
if _, err := os.Stat(candidate); err == nil {
jsonlPath = candidate
break
}
}
}
if jsonlPath == "" {
return fmt.Errorf("cannot auto-recover: no JSONL export found in %s", beadsDir)
}
// Back up corrupt DB and its sidecar files.
ts := time.Now().UTC().Format("20060102T150405Z")
backupDB := dbPath + "." + ts + ".corrupt.backup.db"
if err := moveFile(dbPath, backupDB); err != nil {
// Retry once after attempting to kill daemons again (helps on platforms with strict file locks).
_ = Daemon(absPath)
if err2 := moveFile(dbPath, backupDB); err2 != nil {
// Prefer the original error (more likely root cause).
return fmt.Errorf("failed to back up database: %w", err)
}
}
for _, suffix := range []string{"-wal", "-shm", "-journal"} {
sidecar := dbPath + suffix
if _, err := os.Stat(sidecar); err == nil {
_ = moveFile(sidecar, backupDB+suffix) // best effort
}
}
// Rebuild by importing from the working tree JSONL into a fresh database.
bdBinary, err := getBdBinary()
if err != nil {
return err
}
// Use import (not init) so we always hydrate from the working tree JSONL, not git-tracked blobs.
args := []string{"--db", dbPath, "import", "-i", jsonlPath, "--force", "--no-git-history"}
cmd := newBdCmd(bdBinary, args...)
cmd.Dir = absPath
cmd.Stdout = os.Stdout
cmd.Stderr = os.Stderr
if err := cmd.Run(); err != nil {
// Best-effort rollback: attempt to restore the original DB, while preserving the backup.
failedTS := time.Now().UTC().Format("20060102T150405Z")
if _, statErr := os.Stat(dbPath); statErr == nil {
failedDB := dbPath + "." + failedTS + ".failed.init.db"
_ = moveFile(dbPath, failedDB)
for _, suffix := range []string{"-wal", "-shm", "-journal"} {
_ = moveFile(dbPath+suffix, failedDB+suffix)
}
}
_ = copyFile(backupDB, dbPath)
for _, suffix := range []string{"-wal", "-shm", "-journal"} {
if _, statErr := os.Stat(backupDB + suffix); statErr == nil {
_ = copyFile(backupDB+suffix, dbPath+suffix)
}
}
return fmt.Errorf("failed to rebuild database from JSONL: %w (backup: %s)", err, backupDB)
}
return nil
}

View File

@@ -1,57 +0,0 @@
package fix
import (
"errors"
"fmt"
"io"
"os"
"syscall"
)
var (
renameFile = os.Rename
removeFile = os.Remove
openFileRO = os.Open
openFileRW = os.OpenFile
)
func moveFile(src, dst string) error {
if err := renameFile(src, dst); err == nil {
return nil
} else if isEXDEV(err) {
if err := copyFile(src, dst); err != nil {
return err
}
if err := removeFile(src); err != nil {
return fmt.Errorf("failed to remove source after copy: %w", err)
}
return nil
} else {
return err
}
}
func copyFile(src, dst string) error {
in, err := openFileRO(src) // #nosec G304 -- src is within the workspace
if err != nil {
return err
}
defer in.Close()
out, err := openFileRW(dst, os.O_CREATE|os.O_TRUNC|os.O_WRONLY, 0644)
if err != nil {
return err
}
defer func() { _ = out.Close() }()
if _, err := io.Copy(out, in); err != nil {
return err
}
return out.Close()
}
func isEXDEV(err error) bool {
var linkErr *os.LinkError
if errors.As(err, &linkErr) {
return errors.Is(linkErr.Err, syscall.EXDEV)
}
return errors.Is(err, syscall.EXDEV)
}

View File

@@ -1,71 +0,0 @@
package fix
import (
"errors"
"os"
"path/filepath"
"syscall"
"testing"
)
func TestMoveFile_EXDEV_FallsBackToCopy(t *testing.T) {
root := t.TempDir()
src := filepath.Join(root, "src.txt")
dst := filepath.Join(root, "dst.txt")
if err := os.WriteFile(src, []byte("hello"), 0644); err != nil {
t.Fatal(err)
}
oldRename := renameFile
defer func() { renameFile = oldRename }()
renameFile = func(oldpath, newpath string) error {
return &os.LinkError{Op: "rename", Old: oldpath, New: newpath, Err: syscall.EXDEV}
}
if err := moveFile(src, dst); err != nil {
t.Fatalf("moveFile failed: %v", err)
}
if _, err := os.Stat(src); !os.IsNotExist(err) {
t.Fatalf("expected src to be removed, stat err=%v", err)
}
data, err := os.ReadFile(dst)
if err != nil {
t.Fatalf("read dst: %v", err)
}
if string(data) != "hello" {
t.Fatalf("dst contents=%q", string(data))
}
}
func TestMoveFile_EXDEV_CopyFails_LeavesSource(t *testing.T) {
root := t.TempDir()
src := filepath.Join(root, "src.txt")
dst := filepath.Join(root, "dst.txt")
if err := os.WriteFile(src, []byte("hello"), 0644); err != nil {
t.Fatal(err)
}
oldRename := renameFile
oldOpenRW := openFileRW
defer func() {
renameFile = oldRename
openFileRW = oldOpenRW
}()
renameFile = func(oldpath, newpath string) error {
return &os.LinkError{Op: "rename", Old: oldpath, New: newpath, Err: syscall.EXDEV}
}
openFileRW = func(name string, flag int, perm os.FileMode) (*os.File, error) {
return nil, &os.PathError{Op: "open", Path: name, Err: syscall.ENOSPC}
}
err := moveFile(src, dst)
if err == nil {
t.Fatalf("expected error")
}
if !errors.Is(err, syscall.ENOSPC) {
t.Fatalf("expected ENOSPC, got %v", err)
}
if _, err := os.Stat(src); err != nil {
t.Fatalf("expected src to remain, stat err=%v", err)
}
}

View File

@@ -28,7 +28,7 @@ func GitHooks(path string) error {
}
// Run bd hooks install
cmd := newBdCmd(bdBinary, "hooks", "install")
cmd := exec.Command(bdBinary, "hooks", "install") // #nosec G204 -- bdBinary from validated executable path
cmd.Dir = path // Set working directory without changing process dir
cmd.Stdout = os.Stdout
cmd.Stderr = os.Stderr

View File

@@ -1,87 +0,0 @@
package fix
import (
"fmt"
"os"
"path/filepath"
"time"
"github.com/steveyegge/beads/internal/beads"
"github.com/steveyegge/beads/internal/configfile"
"github.com/steveyegge/beads/internal/utils"
)
// JSONLIntegrity backs up a malformed JSONL export and regenerates it from the database.
// This is safe only when a database exists and is readable.
func JSONLIntegrity(path string) error {
if err := validateBeadsWorkspace(path); err != nil {
return err
}
absPath, err := filepath.Abs(path)
if err != nil {
return fmt.Errorf("failed to resolve path: %w", err)
}
beadsDir := filepath.Join(absPath, ".beads")
// Resolve db path.
dbPath := filepath.Join(beadsDir, beads.CanonicalDatabaseName)
if cfg, err := configfile.Load(beadsDir); err == nil && cfg != nil && cfg.Database != "" {
dbPath = cfg.DatabasePath(beadsDir)
}
if _, err := os.Stat(dbPath); os.IsNotExist(err) {
return fmt.Errorf("cannot auto-repair JSONL: no database found")
}
// Resolve JSONL export path.
jsonlPath := ""
if cfg, err := configfile.Load(beadsDir); err == nil && cfg != nil {
if cfg.JSONLExport != "" && !isSystemJSONLFilename(cfg.JSONLExport) {
p := cfg.JSONLPath(beadsDir)
if _, err := os.Stat(p); err == nil {
jsonlPath = p
}
}
}
if jsonlPath == "" {
p := utils.FindJSONLInDir(beadsDir)
if _, err := os.Stat(p); err == nil {
jsonlPath = p
}
}
if jsonlPath == "" {
return fmt.Errorf("cannot auto-repair JSONL: no JSONL file found")
}
// Back up the JSONL.
ts := time.Now().UTC().Format("20060102T150405Z")
backup := jsonlPath + "." + ts + ".corrupt.backup.jsonl"
if err := moveFile(jsonlPath, backup); err != nil {
return fmt.Errorf("failed to back up JSONL: %w", err)
}
binary, err := getBdBinary()
if err != nil {
_ = moveFile(backup, jsonlPath)
return err
}
// Re-export from DB.
cmd := newBdCmd(binary, "--db", dbPath, "export", "-o", jsonlPath, "--force")
cmd.Dir = absPath
cmd.Stdout = os.Stdout
cmd.Stderr = os.Stderr
if err := cmd.Run(); err != nil {
// Best-effort rollback: restore the original JSONL, but keep the backup.
failedTS := time.Now().UTC().Format("20060102T150405Z")
if _, statErr := os.Stat(jsonlPath); statErr == nil {
failed := jsonlPath + "." + failedTS + ".failed.regen.jsonl"
_ = moveFile(jsonlPath, failed)
}
_ = copyFile(backup, jsonlPath)
return fmt.Errorf("failed to regenerate JSONL from database: %w (backup: %s)", err, backup)
}
return nil
}

View File

@@ -3,10 +3,8 @@ package fix
import (
"fmt"
"os"
"os/exec"
"path/filepath"
"github.com/steveyegge/beads/internal/beads"
"github.com/steveyegge/beads/internal/configfile"
)
// DatabaseVersion fixes database version mismatches by running bd migrate,
@@ -25,15 +23,12 @@ func DatabaseVersion(path string) error {
// Check if database exists - if not, run init instead of migrate (bd-4h9)
beadsDir := filepath.Join(path, ".beads")
dbPath := filepath.Join(beadsDir, beads.CanonicalDatabaseName)
if cfg, err := configfile.Load(beadsDir); err == nil && cfg != nil && cfg.Database != "" {
dbPath = cfg.DatabasePath(beadsDir)
}
dbPath := filepath.Join(beadsDir, "beads.db")
if _, err := os.Stat(dbPath); os.IsNotExist(err) {
// No database - this is a fresh clone, run bd init
fmt.Println("→ No database found, running 'bd init' to hydrate from JSONL...")
cmd := newBdCmd(bdBinary, "--db", dbPath, "init")
cmd := exec.Command(bdBinary, "init") // #nosec G204 -- bdBinary from validated executable path
cmd.Dir = path
cmd.Stdout = os.Stdout
cmd.Stderr = os.Stderr
@@ -46,8 +41,8 @@ func DatabaseVersion(path string) error {
}
// Database exists - run bd migrate
cmd := newBdCmd(bdBinary, "--db", dbPath, "migrate")
cmd.Dir = path // Set working directory without changing process dir
cmd := exec.Command(bdBinary, "migrate") // #nosec G204 -- bdBinary from validated executable path
cmd.Dir = path // Set working directory without changing process dir
cmd.Stdout = os.Stdout
cmd.Stderr = os.Stderr

View File

@@ -1,81 +0,0 @@
package fix
import (
"fmt"
"os"
"os/exec"
"path/filepath"
)
// DatabaseCorruptionRecovery recovers a corrupted database from JSONL backup.
// It backs up the corrupted database, deletes it, and re-imports from JSONL.
func DatabaseCorruptionRecovery(path string) error {
// Validate workspace
if err := validateBeadsWorkspace(path); err != nil {
return err
}
beadsDir := filepath.Join(path, ".beads")
dbPath := filepath.Join(beadsDir, "beads.db")
// Check if database exists
if _, err := os.Stat(dbPath); os.IsNotExist(err) {
return fmt.Errorf("no database to recover")
}
// Find JSONL file
jsonlPath := findJSONLPath(beadsDir)
if jsonlPath == "" {
return fmt.Errorf("no JSONL backup found - cannot recover (try restoring from git history)")
}
// Count issues in JSONL
issueCount, err := countJSONLIssues(jsonlPath)
if err != nil {
return fmt.Errorf("failed to read JSONL: %w", err)
}
if issueCount == 0 {
return fmt.Errorf("JSONL is empty - cannot recover (try restoring from git history)")
}
// Backup corrupted database
backupPath := dbPath + ".corrupt"
fmt.Printf(" Backing up corrupted database to %s\n", filepath.Base(backupPath))
if err := os.Rename(dbPath, backupPath); err != nil {
return fmt.Errorf("failed to backup corrupted database: %w", err)
}
// Get bd binary path
bdBinary, err := getBdBinary()
if err != nil {
// Restore corrupted database on failure
_ = os.Rename(backupPath, dbPath)
return err
}
// Run bd import with --rename-on-import to handle prefix mismatches
fmt.Printf(" Recovering %d issues from %s\n", issueCount, filepath.Base(jsonlPath))
cmd := exec.Command(bdBinary, "import", "-i", jsonlPath, "--rename-on-import") // #nosec G204
cmd.Dir = path
cmd.Stdout = os.Stdout
cmd.Stderr = os.Stderr
if err := cmd.Run(); err != nil {
// Keep backup on failure
fmt.Printf(" Warning: recovery failed, corrupted database preserved at %s\n", filepath.Base(backupPath))
return fmt.Errorf("failed to import from JSONL: %w", err)
}
// Run migrate to set version metadata
migrateCmd := exec.Command(bdBinary, "migrate") // #nosec G204
migrateCmd.Dir = path
migrateCmd.Stdout = os.Stdout
migrateCmd.Stderr = os.Stderr
if err := migrateCmd.Run(); err != nil {
// Non-fatal - import succeeded, version just won't be set
fmt.Printf(" Warning: migration failed (non-fatal): %v\n", err)
}
fmt.Printf(" Recovered %d issues from JSONL backup\n", issueCount)
return nil
}

View File

@@ -3,6 +3,7 @@ package fix
import (
"fmt"
"os"
"os/exec"
"path/filepath"
"strings"
)
@@ -30,9 +31,9 @@ func readLineUnbuffered() (string, error) {
// RepoFingerprint fixes repo fingerprint mismatches by prompting the user
// for which action to take. This is interactive because the consequences
// differ significantly between options:
// 1. Update repo ID (if URL changed or bd upgraded)
// 2. Reinitialize database (if wrong database was copied)
// 3. Skip (do nothing)
// 1. Update repo ID (if URL changed or bd upgraded)
// 2. Reinitialize database (if wrong database was copied)
// 3. Skip (do nothing)
func RepoFingerprint(path string) error {
// Validate workspace
if err := validateBeadsWorkspace(path); err != nil {
@@ -66,7 +67,7 @@ func RepoFingerprint(path string) error {
case "1":
// Run bd migrate --update-repo-id
fmt.Println(" → Running 'bd migrate --update-repo-id'...")
cmd := newBdCmd(bdBinary, "migrate", "--update-repo-id")
cmd := exec.Command(bdBinary, "migrate", "--update-repo-id") // #nosec G204 -- bdBinary from validated executable path
cmd.Dir = path
cmd.Stdin = os.Stdin // Allow user to respond to migrate's confirmation prompt
cmd.Stdout = os.Stdout
@@ -104,7 +105,7 @@ func RepoFingerprint(path string) error {
_ = os.Remove(dbPath + "-shm")
fmt.Println(" → Running 'bd init'...")
cmd := newBdCmd(bdBinary, "init", "--quiet")
cmd := exec.Command(bdBinary, "init", "--quiet") // #nosec G204 -- bdBinary from validated executable path
cmd.Dir = path
cmd.Stdout = os.Stdout
cmd.Stderr = os.Stderr

View File

@@ -1,52 +0,0 @@
package fix
import (
"fmt"
"os"
"strings"
"time"
)
func sqliteConnString(path string, readOnly bool) string {
path = strings.TrimSpace(path)
if path == "" {
return ""
}
busy := 30 * time.Second
if v := strings.TrimSpace(os.Getenv("BD_LOCK_TIMEOUT")); v != "" {
if d, err := time.ParseDuration(v); err == nil {
busy = d
}
}
busyMs := int64(busy / time.Millisecond)
if strings.HasPrefix(path, "file:") {
conn := path
sep := "?"
if strings.Contains(conn, "?") {
sep = "&"
}
if readOnly && !strings.Contains(conn, "mode=") {
conn += sep + "mode=ro"
sep = "&"
}
if !strings.Contains(conn, "_pragma=busy_timeout") {
conn += fmt.Sprintf("%s_pragma=busy_timeout(%d)", sep, busyMs)
sep = "&"
}
if !strings.Contains(conn, "_pragma=foreign_keys") {
conn += sep + "_pragma=foreign_keys(ON)"
sep = "&"
}
if !strings.Contains(conn, "_time_format=") {
conn += sep + "_time_format=sqlite"
}
return conn
}
if readOnly {
return fmt.Sprintf("file:%s?mode=ro&_pragma=foreign_keys(ON)&_pragma=busy_timeout(%d)&_time_format=sqlite", path, busyMs)
}
return fmt.Sprintf("file:%s?_pragma=foreign_keys(ON)&_pragma=busy_timeout(%d)&_time_format=sqlite", path, busyMs)
}

View File

@@ -6,6 +6,7 @@ import (
"encoding/json"
"fmt"
"os"
"os/exec"
"path/filepath"
_ "github.com/ncruces/go-sqlite3/driver"
@@ -37,23 +38,13 @@ func DBJSONLSync(path string) error {
// Find JSONL file
var jsonlPath string
if cfg, err := configfile.Load(beadsDir); err == nil && cfg != nil {
if cfg.JSONLExport != "" && !isSystemJSONLFilename(cfg.JSONLExport) {
p := cfg.JSONLPath(beadsDir)
if _, err := os.Stat(p); err == nil {
jsonlPath = p
}
}
}
if jsonlPath == "" {
issuesJSONL := filepath.Join(beadsDir, "issues.jsonl")
beadsJSONL := filepath.Join(beadsDir, "beads.jsonl")
issuesJSONL := filepath.Join(beadsDir, "issues.jsonl")
beadsJSONL := filepath.Join(beadsDir, "beads.jsonl")
if _, err := os.Stat(issuesJSONL); err == nil {
jsonlPath = issuesJSONL
} else if _, err := os.Stat(beadsJSONL); err == nil {
jsonlPath = beadsJSONL
}
if _, err := os.Stat(issuesJSONL); err == nil {
jsonlPath = issuesJSONL
} else if _, err := os.Stat(beadsJSONL); err == nil {
jsonlPath = beadsJSONL
}
// Check if both database and JSONL exist
@@ -111,36 +102,21 @@ func DBJSONLSync(path string) error {
return err
}
// Run the appropriate sync command
var cmd *exec.Cmd
if syncDirection == "export" {
// Export DB to JSONL file (must specify -o to write to file, not stdout)
jsonlOutputPath := jsonlPath
exportCmd := newBdCmd(bdBinary, "--db", dbPath, "export", "-o", jsonlOutputPath, "--force")
exportCmd.Dir = path // Set working directory without changing process dir
exportCmd.Stdout = os.Stdout
exportCmd.Stderr = os.Stderr
if err := exportCmd.Run(); err != nil {
return fmt.Errorf("failed to export database to JSONL: %w", err)
}
// Staleness check uses last_import_time. After exporting, JSONL mtime is newer,
// so mark the DB as fresh by running a no-op import (skip existing issues).
markFreshCmd := newBdCmd(bdBinary, "--db", dbPath, "import", "-i", jsonlOutputPath, "--force", "--skip-existing", "--no-git-history")
markFreshCmd.Dir = path
markFreshCmd.Stdout = os.Stdout
markFreshCmd.Stderr = os.Stderr
if err := markFreshCmd.Run(); err != nil {
return fmt.Errorf("failed to mark database as fresh after export: %w", err)
}
return nil
jsonlOutputPath := filepath.Join(beadsDir, "issues.jsonl")
cmd = exec.Command(bdBinary, "export", "-o", jsonlOutputPath, "--force") // #nosec G204 -- bdBinary from validated executable path
} else {
cmd = exec.Command(bdBinary, "sync", "--import-only") // #nosec G204 -- bdBinary from validated executable path
}
importCmd := newBdCmd(bdBinary, "--db", dbPath, "sync", "--import-only")
importCmd.Dir = path // Set working directory without changing process dir
importCmd.Stdout = os.Stdout
importCmd.Stderr = os.Stderr
cmd.Dir = path // Set working directory without changing process dir
cmd.Stdout = os.Stdout
cmd.Stderr = os.Stderr
if err := importCmd.Run(); err != nil {
if err := cmd.Run(); err != nil {
return fmt.Errorf("failed to sync database with JSONL: %w", err)
}
@@ -149,7 +125,7 @@ func DBJSONLSync(path string) error {
// countDatabaseIssues counts the number of issues in the database.
func countDatabaseIssues(dbPath string) (int, error) {
db, err := sql.Open("sqlite3", sqliteConnString(dbPath, true))
db, err := sql.Open("sqlite3", dbPath)
if err != nil {
return 0, fmt.Errorf("failed to open database: %w", err)
}

View File

@@ -32,7 +32,8 @@ func SyncBranchConfig(path string) error {
}
// Set sync.branch using bd config set
setCmd := newBdCmd(bdBinary, "config", "set", "sync.branch", currentBranch)
// #nosec G204 - bdBinary is controlled by getBdBinary() which returns os.Executable()
setCmd := exec.Command(bdBinary, "config", "set", "sync.branch", currentBranch)
setCmd.Dir = path
if output, err := setCmd.CombinedOutput(); err != nil {
return fmt.Errorf("failed to set sync.branch: %w\nOutput: %s", err, string(output))

View File

@@ -180,14 +180,11 @@ func ChildParentDependencies(path string) error {
}
defer db.Close()
// Find child→parent BLOCKING dependencies where issue_id starts with depends_on_id + "."
// Only matches blocking types (blocks, conditional-blocks, waits-for) that cause deadlock.
// Excludes 'parent-child' type which is a legitimate structural hierarchy relationship.
// Find child→parent dependencies where issue_id starts with depends_on_id + "."
query := `
SELECT d.issue_id, d.depends_on_id, d.type
SELECT d.issue_id, d.depends_on_id
FROM dependencies d
WHERE d.issue_id LIKE d.depends_on_id || '.%'
AND d.type IN ('blocks', 'conditional-blocks', 'waits-for')
`
rows, err := db.Query(query)
if err != nil {
@@ -198,13 +195,12 @@ func ChildParentDependencies(path string) error {
type badDep struct {
issueID string
dependsOnID string
depType string
}
var badDeps []badDep
for rows.Next() {
var d badDep
if err := rows.Scan(&d.issueID, &d.dependsOnID, &d.depType); err == nil {
if err := rows.Scan(&d.issueID, &d.dependsOnID); err == nil {
badDeps = append(badDeps, d)
}
}
@@ -214,10 +210,10 @@ func ChildParentDependencies(path string) error {
return nil
}
// Delete child→parent blocking dependencies (preserving parent-child type)
// Delete child→parent dependencies
for _, d := range badDeps {
_, err := db.Exec("DELETE FROM dependencies WHERE issue_id = ? AND depends_on_id = ? AND type = ?",
d.issueID, d.dependsOnID, d.depType)
_, err := db.Exec("DELETE FROM dependencies WHERE issue_id = ? AND depends_on_id = ?",
d.issueID, d.dependsOnID)
if err != nil {
fmt.Printf(" Warning: failed to remove %s→%s: %v\n", d.issueID, d.dependsOnID, err)
} else {
@@ -233,5 +229,5 @@ func ChildParentDependencies(path string) error {
// openDB opens a SQLite database for read-write access
func openDB(dbPath string) (*sql.DB, error) {
return sql.Open("sqlite3", sqliteConnString(dbPath, false))
return sql.Open("sqlite3", dbPath)
}

View File

@@ -138,66 +138,3 @@ func TestChildParentDependencies_FixesBadDeps(t *testing.T) {
t.Errorf("Expected 2 dirty issues (unique issue_ids from removed deps), got %d", dirtyCount)
}
}
// TestChildParentDependencies_PreservesParentChildType verifies that legitimate
// parent-child type dependencies are NOT removed (only blocking types are removed).
// Regression test for GitHub issue #750.
func TestChildParentDependencies_PreservesParentChildType(t *testing.T) {
// Set up test database with both 'blocks' and 'parent-child' type deps
dir := t.TempDir()
beadsDir := filepath.Join(dir, ".beads")
if err := os.MkdirAll(beadsDir, 0755); err != nil {
t.Fatal(err)
}
dbPath := filepath.Join(beadsDir, "beads.db")
db, err := openDB(dbPath)
if err != nil {
t.Fatal(err)
}
// Create schema with both 'blocks' (anti-pattern) and 'parent-child' (legitimate) deps
_, err = db.Exec(`
CREATE TABLE issues (id TEXT PRIMARY KEY);
CREATE TABLE dependencies (issue_id TEXT, depends_on_id TEXT, type TEXT);
CREATE TABLE dirty_issues (issue_id TEXT PRIMARY KEY);
INSERT INTO issues (id) VALUES ('bd-abc'), ('bd-abc.1'), ('bd-abc.2');
INSERT INTO dependencies (issue_id, depends_on_id, type) VALUES
('bd-abc.1', 'bd-abc', 'parent-child'),
('bd-abc.2', 'bd-abc', 'parent-child'),
('bd-abc.1', 'bd-abc', 'blocks');
`)
if err != nil {
t.Fatal(err)
}
db.Close()
// Run fix
err = ChildParentDependencies(dir)
if err != nil {
t.Fatalf("ChildParentDependencies failed: %v", err)
}
// Verify only 'blocks' type was removed, 'parent-child' preserved
db, _ = openDB(dbPath)
defer db.Close()
var blocksCount int
db.QueryRow("SELECT COUNT(*) FROM dependencies WHERE type = 'blocks'").Scan(&blocksCount)
if blocksCount != 0 {
t.Errorf("Expected 0 'blocks' dependencies after fix, got %d", blocksCount)
}
var parentChildCount int
db.QueryRow("SELECT COUNT(*) FROM dependencies WHERE type = 'parent-child'").Scan(&parentChildCount)
if parentChildCount != 2 {
t.Errorf("Expected 2 'parent-child' dependencies preserved, got %d", parentChildCount)
}
// Verify only 1 dirty issue (the one with 'blocks' dep removed)
var dirtyCount int
db.QueryRow("SELECT COUNT(*) FROM dirty_issues").Scan(&dirtyCount)
if dirtyCount != 1 {
t.Errorf("Expected 1 dirty issue, got %d", dirtyCount)
}
}

View File

@@ -78,173 +78,6 @@ func CheckGitHooks() DoctorCheck {
}
}
// CheckGitWorkingTree checks if the git working tree is clean.
// This helps prevent leaving work stranded (AGENTS.md: keep git state clean).
func CheckGitWorkingTree(path string) DoctorCheck {
cmd := exec.Command("git", "rev-parse", "--git-dir")
cmd.Dir = path
if err := cmd.Run(); err != nil {
return DoctorCheck{
Name: "Git Working Tree",
Status: StatusOK,
Message: "N/A (not a git repository)",
}
}
cmd = exec.Command("git", "status", "--porcelain")
cmd.Dir = path
out, err := cmd.Output()
if err != nil {
return DoctorCheck{
Name: "Git Working Tree",
Status: StatusWarning,
Message: "Unable to check git status",
Detail: err.Error(),
Fix: "Run 'git status' and commit/stash changes before syncing",
}
}
status := strings.TrimSpace(string(out))
if status == "" {
return DoctorCheck{
Name: "Git Working Tree",
Status: StatusOK,
Message: "Clean",
}
}
// Show a small sample of paths for quick debugging.
lines := strings.Split(status, "\n")
maxLines := 8
if len(lines) > maxLines {
lines = append(lines[:maxLines], "…")
}
return DoctorCheck{
Name: "Git Working Tree",
Status: StatusWarning,
Message: "Uncommitted changes present",
Detail: strings.Join(lines, "\n"),
Fix: "Commit or stash changes, then follow AGENTS.md: git pull --rebase && git push",
}
}
// CheckGitUpstream checks whether the current branch is up to date with its upstream.
// This catches common "forgot to pull/push" failure modes (AGENTS.md: pull --rebase, push).
func CheckGitUpstream(path string) DoctorCheck {
cmd := exec.Command("git", "rev-parse", "--git-dir")
cmd.Dir = path
if err := cmd.Run(); err != nil {
return DoctorCheck{
Name: "Git Upstream",
Status: StatusOK,
Message: "N/A (not a git repository)",
}
}
// Detect detached HEAD.
cmd = exec.Command("git", "symbolic-ref", "--short", "HEAD")
cmd.Dir = path
branchOut, err := cmd.Output()
if err != nil {
return DoctorCheck{
Name: "Git Upstream",
Status: StatusWarning,
Message: "Detached HEAD (no branch)",
Fix: "Check out a branch before syncing",
}
}
branch := strings.TrimSpace(string(branchOut))
cmd = exec.Command("git", "rev-parse", "--abbrev-ref", "--symbolic-full-name", "@{u}")
cmd.Dir = path
upOut, err := cmd.Output()
if err != nil {
return DoctorCheck{
Name: "Git Upstream",
Status: StatusWarning,
Message: fmt.Sprintf("No upstream configured for %s", branch),
Fix: fmt.Sprintf("Set upstream then push: git push -u origin %s", branch),
}
}
upstream := strings.TrimSpace(string(upOut))
ahead, aheadErr := gitRevListCount(path, "@{u}..HEAD")
behind, behindErr := gitRevListCount(path, "HEAD..@{u}")
if aheadErr != nil || behindErr != nil {
detailParts := []string{}
if aheadErr != nil {
detailParts = append(detailParts, "ahead: "+aheadErr.Error())
}
if behindErr != nil {
detailParts = append(detailParts, "behind: "+behindErr.Error())
}
return DoctorCheck{
Name: "Git Upstream",
Status: StatusWarning,
Message: fmt.Sprintf("Unable to compare with upstream (%s)", upstream),
Detail: strings.Join(detailParts, "; "),
Fix: "Run 'git fetch' then check: git status -sb",
}
}
if ahead == 0 && behind == 0 {
return DoctorCheck{
Name: "Git Upstream",
Status: StatusOK,
Message: fmt.Sprintf("Up to date (%s)", upstream),
Detail: fmt.Sprintf("Branch: %s", branch),
}
}
if ahead > 0 && behind == 0 {
return DoctorCheck{
Name: "Git Upstream",
Status: StatusWarning,
Message: fmt.Sprintf("Ahead of upstream by %d commit(s)", ahead),
Detail: fmt.Sprintf("Branch: %s, upstream: %s", branch, upstream),
Fix: "Run 'git push' (AGENTS.md: git pull --rebase && git push)",
}
}
if behind > 0 && ahead == 0 {
return DoctorCheck{
Name: "Git Upstream",
Status: StatusWarning,
Message: fmt.Sprintf("Behind upstream by %d commit(s)", behind),
Detail: fmt.Sprintf("Branch: %s, upstream: %s", branch, upstream),
Fix: "Run 'git pull --rebase' (then re-run bd sync / bd doctor)",
}
}
return DoctorCheck{
Name: "Git Upstream",
Status: StatusWarning,
Message: fmt.Sprintf("Diverged from upstream (ahead %d, behind %d)", ahead, behind),
Detail: fmt.Sprintf("Branch: %s, upstream: %s", branch, upstream),
Fix: "Run 'git pull --rebase' then 'git push'",
}
}
func gitRevListCount(path string, rangeExpr string) (int, error) {
cmd := exec.Command("git", "rev-list", "--count", rangeExpr) // #nosec G204 -- fixed args
cmd.Dir = path
out, err := cmd.Output()
if err != nil {
return 0, err
}
countStr := strings.TrimSpace(string(out))
if countStr == "" {
return 0, nil
}
var n int
if _, err := fmt.Sscanf(countStr, "%d", &n); err != nil {
return 0, err
}
return n, nil
}
// CheckSyncBranchHookCompatibility checks if pre-push hook is compatible with sync-branch mode.
// When sync-branch is configured, the pre-push hook must have the sync-branch bypass logic
// (added in version 0.29.0). Without it, users experience circular "bd sync" failures (issue #532).
@@ -312,8 +145,6 @@ func CheckSyncBranchHookCompatibility(path string) DoctorCheck {
Status: StatusWarning,
Message: "Pre-push hook is not a bd hook",
Detail: "Cannot verify sync-branch compatibility with custom hooks",
Fix: "Either run 'bd hooks install --force' to use bd hooks,\n" +
" or ensure your custom hook skips validation when pushing to sync-branch",
}
}
@@ -831,5 +662,5 @@ func CheckOrphanedIssues(path string) DoctorCheck {
// openDBReadOnly opens a SQLite database in read-only mode
func openDBReadOnly(dbPath string) (*sql.DB, error) {
return sql.Open("sqlite3", sqliteConnString(dbPath, true))
return sql.Open("sqlite3", "file:"+dbPath+"?mode=ro")
}

View File

@@ -1,176 +0,0 @@
package doctor
import (
"os"
"os/exec"
"path/filepath"
"strings"
"testing"
)
func mkTmpDirInTmp(t *testing.T, prefix string) string {
t.Helper()
dir, err := os.MkdirTemp("/tmp", prefix)
if err != nil {
// Fallback for platforms without /tmp (e.g. Windows).
dir, err = os.MkdirTemp("", prefix)
if err != nil {
t.Fatalf("failed to create temp dir: %v", err)
}
}
t.Cleanup(func() { _ = os.RemoveAll(dir) })
return dir
}
func runGit(t *testing.T, dir string, args ...string) string {
t.Helper()
cmd := exec.Command("git", args...)
cmd.Dir = dir
out, err := cmd.CombinedOutput()
if err != nil {
t.Fatalf("git %v failed: %v\n%s", args, err, string(out))
}
return string(out)
}
func initRepo(t *testing.T, dir string, branch string) {
t.Helper()
_ = os.MkdirAll(filepath.Join(dir, ".beads"), 0755)
runGit(t, dir, "init", "-b", branch)
runGit(t, dir, "config", "user.email", "test@test.com")
runGit(t, dir, "config", "user.name", "Test User")
}
func commitFile(t *testing.T, dir, name, content, msg string) {
t.Helper()
path := filepath.Join(dir, name)
if err := os.MkdirAll(filepath.Dir(path), 0755); err != nil {
t.Fatalf("mkdir: %v", err)
}
if err := os.WriteFile(path, []byte(content), 0644); err != nil {
t.Fatalf("write file: %v", err)
}
runGit(t, dir, "add", name)
runGit(t, dir, "commit", "-m", msg)
}
func TestCheckGitWorkingTree(t *testing.T) {
t.Run("not a git repo", func(t *testing.T) {
dir := mkTmpDirInTmp(t, "bd-git-nt-*")
check := CheckGitWorkingTree(dir)
if check.Status != StatusOK {
t.Fatalf("status=%q want %q", check.Status, StatusOK)
}
if !strings.Contains(check.Message, "N/A") {
t.Fatalf("message=%q want N/A", check.Message)
}
})
t.Run("clean", func(t *testing.T) {
dir := mkTmpDirInTmp(t, "bd-git-clean-*")
initRepo(t, dir, "main")
commitFile(t, dir, "README.md", "# test\n", "initial")
check := CheckGitWorkingTree(dir)
if check.Status != StatusOK {
t.Fatalf("status=%q want %q (msg=%q)", check.Status, StatusOK, check.Message)
}
})
t.Run("dirty", func(t *testing.T) {
dir := mkTmpDirInTmp(t, "bd-git-dirty-*")
initRepo(t, dir, "main")
commitFile(t, dir, "README.md", "# test\n", "initial")
if err := os.WriteFile(filepath.Join(dir, "dirty.txt"), []byte("x"), 0644); err != nil {
t.Fatalf("write dirty file: %v", err)
}
check := CheckGitWorkingTree(dir)
if check.Status != StatusWarning {
t.Fatalf("status=%q want %q (msg=%q)", check.Status, StatusWarning, check.Message)
}
})
}
func TestCheckGitUpstream(t *testing.T) {
t.Run("no upstream", func(t *testing.T) {
dir := mkTmpDirInTmp(t, "bd-git-up-*")
initRepo(t, dir, "main")
commitFile(t, dir, "README.md", "# test\n", "initial")
check := CheckGitUpstream(dir)
if check.Status != StatusWarning {
t.Fatalf("status=%q want %q (msg=%q)", check.Status, StatusWarning, check.Message)
}
if !strings.Contains(check.Message, "No upstream") {
t.Fatalf("message=%q want to mention upstream", check.Message)
}
})
t.Run("up to date", func(t *testing.T) {
dir := mkTmpDirInTmp(t, "bd-git-up2-*")
remote := mkTmpDirInTmp(t, "bd-git-remote-*")
runGit(t, remote, "init", "--bare", "--initial-branch=main")
initRepo(t, dir, "main")
commitFile(t, dir, "README.md", "# test\n", "initial")
runGit(t, dir, "remote", "add", "origin", remote)
runGit(t, dir, "push", "-u", "origin", "main")
check := CheckGitUpstream(dir)
if check.Status != StatusOK {
t.Fatalf("status=%q want %q (msg=%q)", check.Status, StatusOK, check.Message)
}
})
t.Run("ahead of upstream", func(t *testing.T) {
dir := mkTmpDirInTmp(t, "bd-git-ahead-*")
remote := mkTmpDirInTmp(t, "bd-git-remote2-*")
runGit(t, remote, "init", "--bare", "--initial-branch=main")
initRepo(t, dir, "main")
commitFile(t, dir, "README.md", "# test\n", "initial")
runGit(t, dir, "remote", "add", "origin", remote)
runGit(t, dir, "push", "-u", "origin", "main")
commitFile(t, dir, "file2.txt", "x", "local commit")
check := CheckGitUpstream(dir)
if check.Status != StatusWarning {
t.Fatalf("status=%q want %q (msg=%q)", check.Status, StatusWarning, check.Message)
}
if !strings.Contains(check.Message, "Ahead") {
t.Fatalf("message=%q want to mention ahead", check.Message)
}
})
t.Run("behind upstream", func(t *testing.T) {
dir := mkTmpDirInTmp(t, "bd-git-behind-*")
remote := mkTmpDirInTmp(t, "bd-git-remote3-*")
runGit(t, remote, "init", "--bare", "--initial-branch=main")
initRepo(t, dir, "main")
commitFile(t, dir, "README.md", "# test\n", "initial")
runGit(t, dir, "remote", "add", "origin", remote)
runGit(t, dir, "push", "-u", "origin", "main")
// Advance remote via another clone.
clone := mkTmpDirInTmp(t, "bd-git-clone-*")
runGit(t, clone, "clone", remote, ".")
runGit(t, clone, "config", "user.email", "test@test.com")
runGit(t, clone, "config", "user.name", "Test User")
commitFile(t, clone, "remote.txt", "y", "remote commit")
runGit(t, clone, "push", "origin", "main")
// Update tracking refs.
runGit(t, dir, "fetch", "origin")
check := CheckGitUpstream(dir)
if check.Status != StatusWarning {
t.Fatalf("status=%q want %q (msg=%q)", check.Status, StatusWarning, check.Message)
}
if !strings.Contains(check.Message, "Behind") {
t.Fatalf("message=%q want to mention behind", check.Message)
}
})
}

View File

@@ -23,8 +23,8 @@ func setupGitRepo(t *testing.T) string {
t.Fatalf("failed to create .beads directory: %v", err)
}
// Initialize git repo with 'main' as default branch (modern git convention)
cmd := exec.Command("git", "init", "--initial-branch=main")
// Initialize git repo
cmd := exec.Command("git", "init")
cmd.Dir = dir
if err := cmd.Run(); err != nil {
t.Fatalf("failed to init git repo: %v", err)
@@ -278,8 +278,8 @@ func setupGitRepoInDir(t *testing.T, dir string) {
t.Fatalf("failed to create .beads directory: %v", err)
}
// Initialize git repo with 'main' as default branch (modern git convention)
cmd := exec.Command("git", "init", "--initial-branch=main")
// Initialize git repo
cmd := exec.Command("git", "init")
cmd.Dir = dir
if err := cmd.Run(); err != nil {
t.Fatalf("failed to init git repo: %v", err)

View File

@@ -19,7 +19,6 @@ daemon.lock
daemon.log
daemon.pid
bd.sock
sync-state.json
# Local version tracking (prevents upgrade notification spam after git ops)
.local_version

View File

@@ -106,7 +106,7 @@ func CheckPermissions(path string) DoctorCheck {
dbPath := filepath.Join(beadsDir, beads.CanonicalDatabaseName)
if _, err := os.Stat(dbPath); err == nil {
// Try to open database
db, err := sql.Open("sqlite3", sqliteConnString(dbPath, true))
db, err := sql.Open("sqlite3", dbPath)
if err != nil {
return DoctorCheck{
Name: "Permissions",
@@ -118,7 +118,7 @@ func CheckPermissions(path string) DoctorCheck {
_ = db.Close() // Intentionally ignore close error
// Try a write test
db, err = sql.Open("sqlite", sqliteConnString(dbPath, true))
db, err = sql.Open("sqlite", dbPath)
if err == nil {
_, err = db.Exec("SELECT 1")
_ = db.Close() // Intentionally ignore close error

View File

@@ -51,7 +51,7 @@ func CheckIDFormat(path string) DoctorCheck {
}
// Open database
db, err := sql.Open("sqlite3", sqliteConnString(dbPath, true))
db, err := sql.Open("sqlite3", "file:"+dbPath+"?mode=ro")
if err != nil {
return DoctorCheck{
Name: "Issue IDs",
@@ -121,7 +121,7 @@ func CheckDependencyCycles(path string) DoctorCheck {
}
// Open database to check for cycles
db, err := sql.Open("sqlite3", sqliteConnString(dbPath, true))
db, err := sql.Open("sqlite3", dbPath)
if err != nil {
return DoctorCheck{
Name: "Dependency Cycles",
@@ -216,7 +216,7 @@ func CheckTombstones(path string) DoctorCheck {
}
}
db, err := sql.Open("sqlite3", sqliteConnString(dbPath, true))
db, err := sql.Open("sqlite3", dbPath)
if err != nil {
return DoctorCheck{
Name: "Tombstones",
@@ -420,7 +420,7 @@ func CheckRepoFingerprint(path string) DoctorCheck {
}
// Open database
db, err := sql.Open("sqlite3", sqliteConnString(dbPath, true))
db, err := sql.Open("sqlite3", "file:"+dbPath+"?mode=ro")
if err != nil {
return DoctorCheck{
Name: "Repo Fingerprint",

View File

@@ -1,123 +0,0 @@
package doctor
import (
"bufio"
"encoding/json"
"fmt"
"os"
"path/filepath"
"strings"
"github.com/steveyegge/beads/internal/beads"
"github.com/steveyegge/beads/internal/configfile"
"github.com/steveyegge/beads/internal/utils"
)
func CheckJSONLIntegrity(path string) DoctorCheck {
beadsDir := filepath.Join(path, ".beads")
// Resolve JSONL path.
jsonlPath := ""
if cfg, err := configfile.Load(beadsDir); err == nil && cfg != nil {
if cfg.JSONLExport != "" && !isSystemJSONLFilename(cfg.JSONLExport) {
p := cfg.JSONLPath(beadsDir)
if _, err := os.Stat(p); err == nil {
jsonlPath = p
}
}
}
if jsonlPath == "" {
// Fall back to a best-effort discovery within .beads/.
p := utils.FindJSONLInDir(beadsDir)
if _, err := os.Stat(p); err == nil {
jsonlPath = p
}
}
if jsonlPath == "" {
return DoctorCheck{Name: "JSONL Integrity", Status: StatusOK, Message: "N/A (no JSONL file)"}
}
// Best-effort scan for malformed lines.
f, err := os.Open(jsonlPath) // #nosec G304 -- jsonlPath is within the workspace
if err != nil {
return DoctorCheck{
Name: "JSONL Integrity",
Status: StatusWarning,
Message: "Unable to read JSONL file",
Detail: err.Error(),
}
}
defer f.Close()
var malformed int
var examples []string
scanner := bufio.NewScanner(f)
lineNo := 0
for scanner.Scan() {
lineNo++
line := strings.TrimSpace(scanner.Text())
if line == "" {
continue
}
var v struct {
ID string `json:"id"`
}
if err := json.Unmarshal([]byte(line), &v); err != nil || v.ID == "" {
malformed++
if len(examples) < 5 {
if err != nil {
examples = append(examples, fmt.Sprintf("line %d: %v", lineNo, err))
} else {
examples = append(examples, fmt.Sprintf("line %d: missing id", lineNo))
}
}
}
}
if err := scanner.Err(); err != nil {
return DoctorCheck{
Name: "JSONL Integrity",
Status: StatusWarning,
Message: "Unable to scan JSONL file",
Detail: err.Error(),
}
}
if malformed == 0 {
return DoctorCheck{
Name: "JSONL Integrity",
Status: StatusOK,
Message: fmt.Sprintf("%s looks valid", filepath.Base(jsonlPath)),
}
}
// If we have a database, we can auto-repair by re-exporting from DB.
dbPath := filepath.Join(beadsDir, beads.CanonicalDatabaseName)
if cfg, err := configfile.Load(beadsDir); err == nil && cfg != nil && cfg.Database != "" {
dbPath = cfg.DatabasePath(beadsDir)
}
if _, err := os.Stat(dbPath); os.IsNotExist(err) {
return DoctorCheck{
Name: "JSONL Integrity",
Status: StatusError,
Message: fmt.Sprintf("%s has %d malformed line(s)", filepath.Base(jsonlPath), malformed),
Detail: strings.Join(examples, "\n"),
Fix: "Restore the JSONL file from git or from a backup (no database available for auto-repair).",
}
}
return DoctorCheck{
Name: "JSONL Integrity",
Status: StatusError,
Message: fmt.Sprintf("%s has %d malformed line(s)", filepath.Base(jsonlPath), malformed),
Detail: strings.Join(examples, "\n"),
Fix: "Run 'bd doctor --fix' to back up the JSONL and regenerate it from the database.",
}
}
func isSystemJSONLFilename(name string) bool {
switch name {
case "deletions.jsonl", "interactions.jsonl", "molecules.jsonl":
return true
default:
return false
}
}

View File

@@ -1,43 +0,0 @@
package doctor
import (
"os"
"path/filepath"
"testing"
)
func TestCheckJSONLIntegrity_MalformedLine(t *testing.T) {
ws := t.TempDir()
beadsDir := filepath.Join(ws, ".beads")
if err := os.MkdirAll(beadsDir, 0755); err != nil {
t.Fatal(err)
}
jsonlPath := filepath.Join(beadsDir, "issues.jsonl")
if err := os.WriteFile(jsonlPath, []byte("{\"id\":\"t-1\"}\n{not json}\n"), 0644); err != nil {
t.Fatal(err)
}
// Ensure DB exists so check suggests auto-repair.
if err := os.WriteFile(filepath.Join(beadsDir, "beads.db"), []byte("x"), 0644); err != nil {
t.Fatal(err)
}
check := CheckJSONLIntegrity(ws)
if check.Status != StatusError {
t.Fatalf("expected StatusError, got %v (%s)", check.Status, check.Message)
}
if check.Fix == "" {
t.Fatalf("expected Fix guidance")
}
}
func TestCheckJSONLIntegrity_NoJSONL(t *testing.T) {
ws := t.TempDir()
beadsDir := filepath.Join(ws, ".beads")
if err := os.MkdirAll(beadsDir, 0755); err != nil {
t.Fatal(err)
}
check := CheckJSONLIntegrity(ws)
if check.Status != StatusOK {
t.Fatalf("expected StatusOK, got %v (%s)", check.Status, check.Message)
}
}

View File

@@ -53,7 +53,7 @@ func CheckLegacyBeadsSlashCommands(repoPath string) DoctorCheck {
Name: "Legacy Commands",
Status: "warning",
Message: fmt.Sprintf("Old beads integration detected in %s", strings.Join(filesWithLegacyCommands, ", ")),
Detail: "Found: /beads:* slash command references (deprecated)\n" +
Detail: "Found: /beads:* slash command references (deprecated)\n" +
" These commands are token-inefficient (~10.5k tokens per session)",
Fix: "Migrate to bd prime hooks for better token efficiency:\n" +
"\n" +
@@ -104,7 +104,7 @@ func CheckAgentDocumentation(repoPath string) DoctorCheck {
Name: "Agent Documentation",
Status: "warning",
Message: "No agent documentation found",
Detail: "Missing: AGENTS.md or CLAUDE.md\n" +
Detail: "Missing: AGENTS.md or CLAUDE.md\n" +
" Documenting workflow helps AI agents work more effectively",
Fix: "Add agent documentation:\n" +
" • Run 'bd onboard' to create AGENTS.md with workflow guidance\n" +
@@ -187,10 +187,10 @@ func CheckLegacyJSONLFilename(repoPath string) DoctorCheck {
Name: "JSONL Files",
Status: "warning",
Message: fmt.Sprintf("Multiple JSONL files found: %s", strings.Join(realJSONLFiles, ", ")),
Detail: "Having multiple JSONL files can cause sync and merge conflicts.\n" +
Detail: "Having multiple JSONL files can cause sync and merge conflicts.\n" +
" Only one JSONL file should be used per repository.",
Fix: "Determine which file is current and remove the others:\n" +
" 1. Check .beads/metadata.json for 'jsonl_export' setting\n" +
" 1. Check 'bd stats' to see which file is being used\n" +
" 2. Verify with 'git log .beads/*.jsonl' to see commit history\n" +
" 3. Remove the unused file(s): git rm .beads/<unused>.jsonl\n" +
" 4. Commit the change",
@@ -235,7 +235,7 @@ func CheckLegacyJSONLConfig(repoPath string) DoctorCheck {
Name: "JSONL Config",
Status: "warning",
Message: "Using legacy beads.jsonl filename",
Detail: "The canonical filename is now issues.jsonl (bd-6xd).\n" +
Detail: "The canonical filename is now issues.jsonl (bd-6xd).\n" +
" Legacy beads.jsonl is still supported but should be migrated.",
Fix: "Run 'bd doctor --fix' to auto-migrate, or manually:\n" +
" 1. git mv .beads/beads.jsonl .beads/issues.jsonl\n" +
@@ -251,7 +251,7 @@ func CheckLegacyJSONLConfig(repoPath string) DoctorCheck {
Status: "warning",
Message: "Config references beads.jsonl but issues.jsonl exists",
Detail: "metadata.json says beads.jsonl but the actual file is issues.jsonl",
Fix: "Run 'bd doctor --fix' to update the configuration",
Fix: "Run 'bd doctor --fix' to update the configuration",
}
}
}
@@ -303,16 +303,6 @@ func CheckDatabaseConfig(repoPath string) DoctorCheck {
// Check if configured JSONL exists
if cfg.JSONLExport != "" {
if cfg.JSONLExport == "deletions.jsonl" || cfg.JSONLExport == "interactions.jsonl" || cfg.JSONLExport == "molecules.jsonl" {
return DoctorCheck{
Name: "Database Config",
Status: "error",
Message: fmt.Sprintf("Invalid jsonl_export %q (system file)", cfg.JSONLExport),
Detail: "metadata.json jsonl_export must reference the git-tracked issues export (typically issues.jsonl), not a system log file.",
Fix: "Run 'bd doctor --fix' to reset metadata.json jsonl_export to issues.jsonl, then commit the change.",
}
}
jsonlPath := cfg.JSONLPath(beadsDir)
if _, err := os.Stat(jsonlPath); os.IsNotExist(err) {
// Check if other .jsonl files exist
@@ -325,15 +315,7 @@ func CheckDatabaseConfig(repoPath string) DoctorCheck {
lowerName := strings.ToLower(name)
if !strings.Contains(lowerName, "backup") &&
!strings.Contains(lowerName, ".orig") &&
!strings.Contains(lowerName, ".bak") &&
!strings.Contains(lowerName, "~") &&
!strings.HasPrefix(lowerName, "backup_") &&
name != "deletions.jsonl" &&
name != "interactions.jsonl" &&
name != "molecules.jsonl" &&
!strings.Contains(lowerName, ".base.jsonl") &&
!strings.Contains(lowerName, ".left.jsonl") &&
!strings.Contains(lowerName, ".right.jsonl") {
!strings.Contains(lowerName, ".bak") {
otherJSONLs = append(otherJSONLs, name)
}
}
@@ -439,7 +421,7 @@ func CheckFreshClone(repoPath string) DoctorCheck {
Name: "Fresh Clone",
Status: "warning",
Message: fmt.Sprintf("Fresh clone detected (%d issues in %s, no database)", issueCount, jsonlName),
Detail: "This appears to be a freshly cloned repository.\n" +
Detail: "This appears to be a freshly cloned repository.\n" +
" The JSONL file contains issues but no local database exists.\n" +
" Run 'bd init' to create the database and import existing issues.",
Fix: fmt.Sprintf("Run '%s' to initialize the database and import issues", fixCmd),

View File

@@ -410,49 +410,6 @@ func TestCheckLegacyJSONLConfig(t *testing.T) {
}
}
func TestCheckDatabaseConfig_IgnoresSystemJSONLs(t *testing.T) {
tmpDir := t.TempDir()
beadsDir := filepath.Join(tmpDir, ".beads")
if err := os.Mkdir(beadsDir, 0750); err != nil {
t.Fatal(err)
}
// Configure issues.jsonl, but only create interactions.jsonl.
metadataPath := filepath.Join(beadsDir, "metadata.json")
if err := os.WriteFile(metadataPath, []byte(`{"database":"beads.db","jsonl_export":"issues.jsonl"}`), 0644); err != nil {
t.Fatal(err)
}
if err := os.WriteFile(filepath.Join(beadsDir, "interactions.jsonl"), []byte(`{"id":"x"}`), 0644); err != nil {
t.Fatal(err)
}
check := CheckDatabaseConfig(tmpDir)
if check.Status != "ok" {
t.Fatalf("expected ok, got %s: %s\n%s", check.Status, check.Message, check.Detail)
}
}
func TestCheckDatabaseConfig_SystemJSONLExportIsError(t *testing.T) {
tmpDir := t.TempDir()
beadsDir := filepath.Join(tmpDir, ".beads")
if err := os.Mkdir(beadsDir, 0750); err != nil {
t.Fatal(err)
}
metadataPath := filepath.Join(beadsDir, "metadata.json")
if err := os.WriteFile(metadataPath, []byte(`{"database":"beads.db","jsonl_export":"interactions.jsonl"}`), 0644); err != nil {
t.Fatal(err)
}
if err := os.WriteFile(filepath.Join(beadsDir, "interactions.jsonl"), []byte(`{"id":"x"}`), 0644); err != nil {
t.Fatal(err)
}
check := CheckDatabaseConfig(tmpDir)
if check.Status != "error" {
t.Fatalf("expected error, got %s: %s", check.Status, check.Message)
}
}
func TestCheckFreshClone(t *testing.T) {
tests := []struct {
name string

View File

@@ -312,7 +312,7 @@ func CheckCompactionCandidates(path string) DoctorCheck {
// the actual beads directory location.
func resolveBeadsDir(beadsDir string) string {
redirectFile := filepath.Join(beadsDir, "redirect")
data, err := os.ReadFile(redirectFile) //nolint:gosec // redirect file path is constructed from known beadsDir
data, err := os.ReadFile(redirectFile)
if err != nil {
// No redirect file - use original path
return beadsDir

View File

@@ -1,54 +0,0 @@
package doctor
import (
"fmt"
"os"
"strings"
"time"
)
func sqliteConnString(path string, readOnly bool) string {
path = strings.TrimSpace(path)
if path == "" {
return ""
}
// Best-effort: honor the same env var viper uses (BD_LOCK_TIMEOUT).
busy := 30 * time.Second
if v := strings.TrimSpace(os.Getenv("BD_LOCK_TIMEOUT")); v != "" {
if d, err := time.ParseDuration(v); err == nil {
busy = d
}
}
busyMs := int64(busy / time.Millisecond)
// If it's already a URI, append pragmas if absent.
if strings.HasPrefix(path, "file:") {
conn := path
sep := "?"
if strings.Contains(conn, "?") {
sep = "&"
}
if readOnly && !strings.Contains(conn, "mode=") {
conn += sep + "mode=ro"
sep = "&"
}
if !strings.Contains(conn, "_pragma=busy_timeout") {
conn += fmt.Sprintf("%s_pragma=busy_timeout(%d)", sep, busyMs)
sep = "&"
}
if !strings.Contains(conn, "_pragma=foreign_keys") {
conn += sep + "_pragma=foreign_keys(ON)"
sep = "&"
}
if !strings.Contains(conn, "_time_format=") {
conn += sep + "_time_format=sqlite"
}
return conn
}
if readOnly {
return fmt.Sprintf("file:%s?mode=ro&_pragma=foreign_keys(ON)&_pragma=busy_timeout(%d)&_time_format=sqlite", path, busyMs)
}
return fmt.Sprintf("file:%s?_pragma=foreign_keys(ON)&_pragma=busy_timeout(%d)&_time_format=sqlite", path, busyMs)
}

View File

@@ -333,14 +333,12 @@ func CheckChildParentDependencies(path string) DoctorCheck {
}
defer db.Close()
// Query for child→parent BLOCKING dependencies where issue_id starts with depends_on_id + "."
// Only matches blocking types (blocks, conditional-blocks, waits-for) that cause deadlock.
// Excludes 'parent-child' type which is a legitimate structural hierarchy relationship.
// Query for child→parent dependencies where issue_id starts with depends_on_id + "."
// This uses SQLite's LIKE pattern matching
query := `
SELECT d.issue_id, d.depends_on_id
FROM dependencies d
WHERE d.issue_id LIKE d.depends_on_id || '.%'
AND d.type IN ('blocks', 'conditional-blocks', 'waits-for')
`
rows, err := db.Query(query)
if err != nil {

View File

@@ -1,378 +0,0 @@
//go:build chaos
package main
import (
"bytes"
"context"
"database/sql"
"io"
"os"
"os/exec"
"path/filepath"
"strings"
"testing"
"time"
_ "github.com/ncruces/go-sqlite3/driver"
)
func TestDoctorRepair_CorruptDatabase_NotADatabase_RebuildFromJSONL(t *testing.T) {
bdExe := buildBDForTest(t)
ws := mkTmpDirInTmp(t, "bd-doctor-chaos-*")
dbPath := filepath.Join(ws, ".beads", "beads.db")
jsonlPath := filepath.Join(ws, ".beads", "issues.jsonl")
if _, err := runBDSideDB(t, bdExe, ws, dbPath, "init", "--prefix", "chaos", "--quiet"); err != nil {
t.Fatalf("bd init failed: %v", err)
}
if _, err := runBDSideDB(t, bdExe, ws, dbPath, "create", "Chaos issue", "-p", "1"); err != nil {
t.Fatalf("bd create failed: %v", err)
}
if _, err := runBDSideDB(t, bdExe, ws, dbPath, "export", "-o", jsonlPath, "--force"); err != nil {
t.Fatalf("bd export failed: %v", err)
}
// Make the DB unreadable.
if err := os.WriteFile(dbPath, []byte("not a database"), 0644); err != nil {
t.Fatalf("corrupt db: %v", err)
}
if _, err := runBDSideDB(t, bdExe, ws, dbPath, "doctor", "--fix", "--yes"); err != nil {
t.Fatalf("bd doctor --fix failed: %v", err)
}
if out, err := runBDSideDB(t, bdExe, ws, dbPath, "doctor"); err != nil {
t.Fatalf("bd doctor after fix failed: %v\n%s", err, out)
}
}
func TestDoctorRepair_CorruptDatabase_NoJSONL_FixFails(t *testing.T) {
bdExe := buildBDForTest(t)
ws := mkTmpDirInTmp(t, "bd-doctor-chaos-nojsonl-*")
dbPath := filepath.Join(ws, ".beads", "beads.db")
if _, err := runBDSideDB(t, bdExe, ws, dbPath, "init", "--prefix", "chaos", "--quiet"); err != nil {
t.Fatalf("bd init failed: %v", err)
}
if _, err := runBDSideDB(t, bdExe, ws, dbPath, "create", "Chaos issue", "-p", "1"); err != nil {
t.Fatalf("bd create failed: %v", err)
}
// Some workflows keep JSONL in sync automatically; force it to be missing.
_ = os.Remove(filepath.Join(ws, ".beads", "issues.jsonl"))
_ = os.Remove(filepath.Join(ws, ".beads", "beads.jsonl"))
// Corrupt without providing JSONL source-of-truth.
if err := os.Truncate(dbPath, 64); err != nil {
t.Fatalf("truncate db: %v", err)
}
out, err := runBDSideDB(t, bdExe, ws, dbPath, "doctor", "--fix", "--yes")
if err == nil {
t.Fatalf("expected bd doctor --fix to fail without JSONL")
}
if !strings.Contains(out, "cannot auto-recover") {
t.Fatalf("expected auto-recover error, got:\n%s", out)
}
// Ensure we don't mis-configure jsonl_export to a system file during failure.
metadata, readErr := os.ReadFile(filepath.Join(ws, ".beads", "metadata.json"))
if readErr == nil {
if strings.Contains(string(metadata), "interactions.jsonl") {
t.Fatalf("unexpected metadata.json jsonl_export set to interactions.jsonl:\n%s", string(metadata))
}
}
}
func TestDoctorRepair_CorruptDatabase_BacksUpSidecars(t *testing.T) {
bdExe := buildBDForTest(t)
ws := mkTmpDirInTmp(t, "bd-doctor-chaos-sidecars-*")
dbPath := filepath.Join(ws, ".beads", "beads.db")
jsonlPath := filepath.Join(ws, ".beads", "issues.jsonl")
if _, err := runBDSideDB(t, bdExe, ws, dbPath, "init", "--prefix", "chaos", "--quiet"); err != nil {
t.Fatalf("bd init failed: %v", err)
}
if _, err := runBDSideDB(t, bdExe, ws, dbPath, "create", "Chaos issue", "-p", "1"); err != nil {
t.Fatalf("bd create failed: %v", err)
}
if _, err := runBDSideDB(t, bdExe, ws, dbPath, "export", "-o", jsonlPath, "--force"); err != nil {
t.Fatalf("bd export failed: %v", err)
}
// Ensure sidecars exist so we can verify they get moved with the backup.
for _, suffix := range []string{"-wal", "-shm", "-journal"} {
if err := os.WriteFile(dbPath+suffix, []byte("x"), 0644); err != nil {
t.Fatalf("write sidecar %s: %v", suffix, err)
}
}
if err := os.Truncate(dbPath, 64); err != nil {
t.Fatalf("truncate db: %v", err)
}
if _, err := runBDSideDB(t, bdExe, ws, dbPath, "doctor", "--fix", "--yes"); err != nil {
t.Fatalf("bd doctor --fix failed: %v", err)
}
// Verify a backup exists, and at least one sidecar got moved.
entries, err := os.ReadDir(filepath.Join(ws, ".beads"))
if err != nil {
t.Fatalf("readdir: %v", err)
}
var backup string
for _, e := range entries {
if strings.Contains(e.Name(), ".corrupt.backup.db") {
backup = filepath.Join(ws, ".beads", e.Name())
break
}
}
if backup == "" {
t.Fatalf("expected backup db in .beads, found none")
}
wal := backup + "-wal"
if _, err := os.Stat(wal); err != nil {
// At minimum, the backup DB itself should exist; sidecar backup is best-effort.
if _, err2 := os.Stat(backup); err2 != nil {
t.Fatalf("backup db missing: %v", err2)
}
}
}
func TestDoctorRepair_CorruptDatabase_WithRunningDaemon_FixSucceeds(t *testing.T) {
bdExe := buildBDForTest(t)
ws := mkTmpDirInTmp(t, "bd-doctor-chaos-daemon-*")
dbPath := filepath.Join(ws, ".beads", "beads.db")
jsonlPath := filepath.Join(ws, ".beads", "issues.jsonl")
if _, err := runBDSideDB(t, bdExe, ws, dbPath, "init", "--prefix", "chaos", "--quiet"); err != nil {
t.Fatalf("bd init failed: %v", err)
}
if _, err := runBDSideDB(t, bdExe, ws, dbPath, "create", "Chaos issue", "-p", "1"); err != nil {
t.Fatalf("bd create failed: %v", err)
}
if _, err := runBDSideDB(t, bdExe, ws, dbPath, "export", "-o", jsonlPath, "--force"); err != nil {
t.Fatalf("bd export failed: %v", err)
}
cmd := startDaemonForChaosTest(t, bdExe, ws, dbPath)
defer func() {
if cmd.Process != nil && (cmd.ProcessState == nil || !cmd.ProcessState.Exited()) {
_ = cmd.Process.Kill()
_, _ = cmd.Process.Wait()
}
}()
// Corrupt the DB.
if err := os.WriteFile(dbPath, []byte("not a database"), 0644); err != nil {
t.Fatalf("corrupt db: %v", err)
}
if _, err := runBDSideDB(t, bdExe, ws, dbPath, "doctor", "--fix", "--yes"); err != nil {
t.Fatalf("bd doctor --fix failed: %v", err)
}
// Ensure we can cleanly stop the daemon afterwards (repair shouldn't wedge it).
if cmd.Process != nil {
_ = cmd.Process.Kill()
done := make(chan error, 1)
go func() { done <- cmd.Wait() }()
select {
case <-time.After(3 * time.Second):
t.Fatalf("expected daemon to exit when killed")
case <-done:
// ok
}
}
}
func TestDoctorRepair_JSONLIntegrity_MalformedLine_ReexportFromDB(t *testing.T) {
bdExe := buildBDForTest(t)
ws := mkTmpDirInTmp(t, "bd-doctor-chaos-jsonl-*")
dbPath := filepath.Join(ws, ".beads", "beads.db")
jsonlPath := filepath.Join(ws, ".beads", "issues.jsonl")
if _, err := runBDSideDB(t, bdExe, ws, dbPath, "init", "--prefix", "chaos", "--quiet"); err != nil {
t.Fatalf("bd init failed: %v", err)
}
if _, err := runBDSideDB(t, bdExe, ws, dbPath, "create", "Chaos issue", "-p", "1"); err != nil {
t.Fatalf("bd create failed: %v", err)
}
if _, err := runBDSideDB(t, bdExe, ws, dbPath, "export", "-o", jsonlPath, "--force"); err != nil {
t.Fatalf("bd export failed: %v", err)
}
// Corrupt JSONL (leave DB intact).
f, err := os.OpenFile(jsonlPath, os.O_APPEND|os.O_WRONLY, 0644)
if err != nil {
t.Fatalf("open jsonl: %v", err)
}
if _, err := f.WriteString("{not json}\n"); err != nil {
_ = f.Close()
t.Fatalf("append corrupt jsonl: %v", err)
}
_ = f.Close()
if _, err := runBDSideDB(t, bdExe, ws, dbPath, "doctor", "--fix", "--yes"); err != nil {
t.Fatalf("bd doctor --fix failed: %v", err)
}
data, err := os.ReadFile(jsonlPath)
if err != nil {
t.Fatalf("read jsonl: %v", err)
}
if strings.Contains(string(data), "{not json}") {
t.Fatalf("expected JSONL to be regenerated without corrupt line")
}
}
func TestDoctorRepair_DatabaseIntegrity_DBWriteLocked_ImportFailsFast(t *testing.T) {
bdExe := buildBDForTest(t)
ws := mkTmpDirInTmp(t, "bd-doctor-chaos-db-locked-*")
dbPath := filepath.Join(ws, ".beads", "beads.db")
jsonlPath := filepath.Join(ws, ".beads", "issues.jsonl")
if _, err := runBDSideDB(t, bdExe, ws, dbPath, "init", "--prefix", "chaos", "--quiet"); err != nil {
t.Fatalf("bd init failed: %v", err)
}
if _, err := runBDSideDB(t, bdExe, ws, dbPath, "create", "Chaos issue", "-p", "1"); err != nil {
t.Fatalf("bd create failed: %v", err)
}
if _, err := runBDSideDB(t, bdExe, ws, dbPath, "export", "-o", jsonlPath, "--force"); err != nil {
t.Fatalf("bd export failed: %v", err)
}
// Lock the DB for writes in-process.
db, err := sql.Open("sqlite3", dbPath)
if err != nil {
t.Fatalf("open db: %v", err)
}
defer db.Close()
tx, err := db.Begin()
if err != nil {
t.Fatalf("begin tx: %v", err)
}
if _, err := tx.Exec("INSERT INTO issues (id, title, status) VALUES ('lock-test', 'Lock Test', 'open')"); err != nil {
_ = tx.Rollback()
t.Fatalf("insert lock row: %v", err)
}
defer func() { _ = tx.Rollback() }()
ctx, cancel := context.WithTimeout(context.Background(), 5*time.Second)
defer cancel()
out, err := runBDWithEnv(ctx, bdExe, ws, dbPath, map[string]string{
"BD_LOCK_TIMEOUT": "200ms",
}, "import", "-i", jsonlPath, "--force", "--skip-existing", "--no-git-history")
if err == nil {
t.Fatalf("expected bd import to fail under DB write lock")
}
if ctx.Err() == context.DeadlineExceeded {
t.Fatalf("import exceeded timeout (likely hung); output:\n%s", out)
}
low := strings.ToLower(out)
if !strings.Contains(low, "locked") && !strings.Contains(low, "busy") && !strings.Contains(low, "timeout") {
t.Fatalf("expected lock/busy/timeout error, got:\n%s", out)
}
}
func TestDoctorRepair_CorruptDatabase_ReadOnlyBeadsDir_PermissionsFixMakesWritable(t *testing.T) {
bdExe := buildBDForTest(t)
ws := mkTmpDirInTmp(t, "bd-doctor-chaos-readonly-*")
beadsDir := filepath.Join(ws, ".beads")
dbPath := filepath.Join(beadsDir, "beads.db")
jsonlPath := filepath.Join(beadsDir, "issues.jsonl")
if _, err := runBDSideDB(t, bdExe, ws, dbPath, "init", "--prefix", "chaos", "--quiet"); err != nil {
t.Fatalf("bd init failed: %v", err)
}
if _, err := runBDSideDB(t, bdExe, ws, dbPath, "create", "Chaos issue", "-p", "1"); err != nil {
t.Fatalf("bd create failed: %v", err)
}
if _, err := runBDSideDB(t, bdExe, ws, dbPath, "export", "-o", jsonlPath, "--force"); err != nil {
t.Fatalf("bd export failed: %v", err)
}
// Corrupt the DB.
if err := os.Truncate(dbPath, 64); err != nil {
t.Fatalf("truncate db: %v", err)
}
// Make .beads read-only; the Permissions fix should make it writable again.
if err := os.Chmod(beadsDir, 0555); err != nil {
t.Fatalf("chmod beads dir: %v", err)
}
t.Cleanup(func() { _ = os.Chmod(beadsDir, 0755) })
if out, err := runBDSideDB(t, bdExe, ws, dbPath, "doctor", "--fix", "--yes"); err != nil {
t.Fatalf("expected bd doctor --fix to succeed (permissions auto-fix), got: %v\n%s", err, out)
}
info, err := os.Stat(beadsDir)
if err != nil {
t.Fatalf("stat beads dir: %v", err)
}
if info.Mode().Perm()&0200 == 0 {
t.Fatalf("expected .beads to be writable after permissions fix, mode=%v", info.Mode().Perm())
}
}
func startDaemonForChaosTest(t *testing.T, bdExe, ws, dbPath string) *exec.Cmd {
t.Helper()
cmd := exec.Command(bdExe, "--db", dbPath, "daemon", "--start", "--foreground", "--local", "--interval", "10m")
cmd.Dir = ws
var stdout, stderr bytes.Buffer
cmd.Stdout = &stdout
cmd.Stderr = &stderr
// Inherit environment, but explicitly ensure daemon mode is allowed.
env := make([]string, 0, len(os.Environ())+1)
for _, e := range os.Environ() {
if strings.HasPrefix(e, "BEADS_NO_DAEMON=") {
continue
}
env = append(env, e)
}
cmd.Env = env
if err := cmd.Start(); err != nil {
t.Fatalf("start daemon: %v", err)
}
// Wait for socket to appear.
sock := filepath.Join(ws, ".beads", "bd.sock")
deadline := time.Now().Add(8 * time.Second)
for time.Now().Before(deadline) {
if _, err := os.Stat(sock); err == nil {
// Put the process back into the caller's control.
cmd.Stdout = io.Discard
cmd.Stderr = io.Discard
return cmd
}
time.Sleep(50 * time.Millisecond)
}
_ = cmd.Process.Kill()
_ = cmd.Wait()
t.Fatalf("daemon failed to start (no socket: %s)\nstdout:\n%s\nstderr:\n%s", sock, stdout.String(), stderr.String())
return nil
}
func runBDWithEnv(ctx context.Context, exe, dir, dbPath string, env map[string]string, args ...string) (string, error) {
fullArgs := []string{"--db", dbPath}
if len(args) > 0 && args[0] != "init" {
fullArgs = append(fullArgs, "--no-daemon")
}
fullArgs = append(fullArgs, args...)
cmd := exec.CommandContext(ctx, exe, fullArgs...)
cmd.Dir = dir
cmd.Env = append(os.Environ(),
"BEADS_NO_DAEMON=1",
"BEADS_DIR="+filepath.Join(dir, ".beads"),
)
for k, v := range env {
cmd.Env = append(cmd.Env, k+"="+v)
}
out, err := cmd.CombinedOutput()
return string(out), err
}

View File

@@ -1,151 +0,0 @@
package main
import (
"encoding/json"
"os"
"os/exec"
"path/filepath"
"runtime"
"strings"
"testing"
)
func buildBDForTest(t *testing.T) string {
t.Helper()
exeName := "bd"
if runtime.GOOS == "windows" {
exeName = "bd.exe"
}
binDir := t.TempDir()
exe := filepath.Join(binDir, exeName)
cmd := exec.Command("go", "build", "-o", exe, ".")
out, err := cmd.CombinedOutput()
if err != nil {
t.Fatalf("go build failed: %v\n%s", err, string(out))
}
return exe
}
func mkTmpDirInTmp(t *testing.T, prefix string) string {
t.Helper()
dir, err := os.MkdirTemp("/tmp", prefix)
if err != nil {
// Fallback for platforms without /tmp (e.g. Windows).
dir, err = os.MkdirTemp("", prefix)
if err != nil {
t.Fatalf("failed to create temp dir: %v", err)
}
}
t.Cleanup(func() { _ = os.RemoveAll(dir) })
return dir
}
func runBDSideDB(t *testing.T, exe, dir, dbPath string, args ...string) (string, error) {
t.Helper()
fullArgs := []string{"--db", dbPath}
if len(args) > 0 && args[0] != "init" {
fullArgs = append(fullArgs, "--no-daemon")
}
fullArgs = append(fullArgs, args...)
cmd := exec.Command(exe, fullArgs...)
cmd.Dir = dir
cmd.Env = append(os.Environ(),
"BEADS_NO_DAEMON=1",
"BEADS_DIR="+filepath.Join(dir, ".beads"),
)
out, err := cmd.CombinedOutput()
return string(out), err
}
func TestDoctorRepair_CorruptDatabase_RebuildFromJSONL(t *testing.T) {
if testing.Short() {
t.Skip("skipping slow repair test in short mode")
}
bdExe := buildBDForTest(t)
ws := mkTmpDirInTmp(t, "bd-doctor-repair-*")
dbPath := filepath.Join(ws, ".beads", "beads.db")
jsonlPath := filepath.Join(ws, ".beads", "issues.jsonl")
if _, err := runBDSideDB(t, bdExe, ws, dbPath, "init", "--prefix", "chaos", "--quiet"); err != nil {
t.Fatalf("bd init failed: %v", err)
}
if _, err := runBDSideDB(t, bdExe, ws, dbPath, "create", "Chaos issue", "-p", "1"); err != nil {
t.Fatalf("bd create failed: %v", err)
}
if _, err := runBDSideDB(t, bdExe, ws, dbPath, "export", "-o", jsonlPath, "--force"); err != nil {
t.Fatalf("bd export failed: %v", err)
}
// Corrupt the SQLite file (truncate) and verify doctor reports an integrity error.
if err := os.Truncate(dbPath, 128); err != nil {
t.Fatalf("truncate db: %v", err)
}
out, err := runBDSideDB(t, bdExe, ws, dbPath, "doctor", "--json")
if err == nil {
t.Fatalf("expected bd doctor to fail on corrupt db")
}
jsonStart := strings.Index(out, "{")
if jsonStart < 0 {
t.Fatalf("doctor output missing JSON: %s", out)
}
var before doctorResult
if err := json.Unmarshal([]byte(out[jsonStart:]), &before); err != nil {
t.Fatalf("unmarshal doctor json: %v\n%s", err, out)
}
var foundIntegrity bool
for _, c := range before.Checks {
if c.Name == "Database Integrity" {
foundIntegrity = true
if c.Status != statusError {
t.Fatalf("Database Integrity status=%q want %q", c.Status, statusError)
}
}
}
if !foundIntegrity {
t.Fatalf("Database Integrity check not found")
}
// Attempt auto-repair.
out, err = runBDSideDB(t, bdExe, ws, dbPath, "doctor", "--fix", "--yes")
if err != nil {
t.Fatalf("bd doctor --fix failed: %v\n%s", err, out)
}
// Doctor should now pass.
out, err = runBDSideDB(t, bdExe, ws, dbPath, "doctor", "--json")
if err != nil {
t.Fatalf("bd doctor after fix failed: %v\n%s", err, out)
}
jsonStart = strings.Index(out, "{")
if jsonStart < 0 {
t.Fatalf("doctor output missing JSON: %s", out)
}
var after doctorResult
if err := json.Unmarshal([]byte(out[jsonStart:]), &after); err != nil {
t.Fatalf("unmarshal doctor json: %v\n%s", err, out)
}
if !after.OverallOK {
t.Fatalf("expected overall_ok=true after repair")
}
// Data should still be present.
out, err = runBDSideDB(t, bdExe, ws, dbPath, "list", "--json")
if err != nil {
t.Fatalf("bd list failed after repair: %v\n%s", err, out)
}
jsonStart = strings.Index(out, "[")
if jsonStart < 0 {
t.Fatalf("list output missing JSON array: %s", out)
}
var issues []map[string]any
if err := json.Unmarshal([]byte(out[jsonStart:]), &issues); err != nil {
t.Fatalf("unmarshal list json: %v\n%s", err, out)
}
if len(issues) != 1 {
t.Fatalf("expected 1 issue after repair, got %d", len(issues))
}
}

View File

@@ -156,7 +156,7 @@ Examples:
_ = daemonClient.Close()
daemonClient = nil
}
// Note: We used to check database file timestamps here, but WAL files
// get created when opening the DB, making timestamp checks unreliable.
// Instead, we check issue counts after loading (see below).
@@ -168,7 +168,7 @@ Examples:
fmt.Fprintf(os.Stderr, "Error: no database path found\n")
os.Exit(1)
}
store, err = sqlite.NewWithTimeout(rootCtx, dbPath, lockTimeout)
store, err = sqlite.New(rootCtx, dbPath)
if err != nil {
fmt.Fprintf(os.Stderr, "Error: failed to open database: %v\n", err)
os.Exit(1)
@@ -302,20 +302,20 @@ Examples:
// Safety check: prevent exporting stale database that would lose issues
if output != "" && !force {
debug.Logf("Debug: checking staleness - output=%s, force=%v\n", output, force)
// Read existing JSONL to get issue IDs
jsonlIDs, err := getIssueIDsFromJSONL(output)
if err != nil && !os.IsNotExist(err) {
fmt.Fprintf(os.Stderr, "Warning: failed to read existing JSONL for staleness check: %v\n", err)
}
if err == nil && len(jsonlIDs) > 0 {
// Build set of DB issue IDs
dbIDs := make(map[string]bool)
for _, issue := range issues {
dbIDs[issue.ID] = true
}
// Check if JSONL has any issues that DB doesn't have
var missingIDs []string
for id := range jsonlIDs {
@@ -323,17 +323,17 @@ Examples:
missingIDs = append(missingIDs, id)
}
}
debug.Logf("Debug: JSONL has %d issues, DB has %d issues, missing %d\n",
debug.Logf("Debug: JSONL has %d issues, DB has %d issues, missing %d\n",
len(jsonlIDs), len(issues), len(missingIDs))
if len(missingIDs) > 0 {
slices.Sort(missingIDs)
fmt.Fprintf(os.Stderr, "Error: refusing to export stale database that would lose issues\n")
fmt.Fprintf(os.Stderr, " Database has %d issues\n", len(issues))
fmt.Fprintf(os.Stderr, " JSONL has %d issues\n", len(jsonlIDs))
fmt.Fprintf(os.Stderr, " Export would lose %d issue(s):\n", len(missingIDs))
// Show first 10 missing issues
showCount := len(missingIDs)
if showCount > 10 {
@@ -345,7 +345,7 @@ Examples:
if len(missingIDs) > 10 {
fmt.Fprintf(os.Stderr, " ... and %d more\n", len(missingIDs)-10)
}
fmt.Fprintf(os.Stderr, "\n")
fmt.Fprintf(os.Stderr, "This usually means:\n")
fmt.Fprintf(os.Stderr, " 1. You need to run 'bd import -i %s' to sync the latest changes\n", output)
@@ -362,7 +362,7 @@ Examples:
// Wisps exist only in SQLite and are shared via .beads/redirect, not JSONL.
filtered := make([]*types.Issue, 0, len(issues))
for _, issue := range issues {
if !issue.Ephemeral {
if !issue.Wisp {
filtered = append(filtered, issue)
}
}
@@ -434,8 +434,8 @@ Examples:
skippedCount := 0
for _, issue := range issues {
if err := encoder.Encode(issue); err != nil {
fmt.Fprintf(os.Stderr, "Error encoding issue %s: %v\n", issue.ID, err)
os.Exit(1)
fmt.Fprintf(os.Stderr, "Error encoding issue %s: %v\n", issue.ID, err)
os.Exit(1)
}
exportedIDs = append(exportedIDs, issue.ID)
@@ -495,19 +495,19 @@ Examples:
}
}
// Verify JSONL file integrity after export
actualCount, err := countIssuesInJSONL(finalPath)
if err != nil {
fmt.Fprintf(os.Stderr, "Error: Export verification failed: %v\n", err)
os.Exit(1)
}
if actualCount != len(exportedIDs) {
fmt.Fprintf(os.Stderr, "Error: Export verification failed\n")
fmt.Fprintf(os.Stderr, " Expected: %d issues\n", len(exportedIDs))
fmt.Fprintf(os.Stderr, " JSONL file: %d lines\n", actualCount)
fmt.Fprintf(os.Stderr, " Mismatch indicates export failed to write all issues\n")
os.Exit(1)
}
// Verify JSONL file integrity after export
actualCount, err := countIssuesInJSONL(finalPath)
if err != nil {
fmt.Fprintf(os.Stderr, "Error: Export verification failed: %v\n", err)
os.Exit(1)
}
if actualCount != len(exportedIDs) {
fmt.Fprintf(os.Stderr, "Error: Export verification failed\n")
fmt.Fprintf(os.Stderr, " Expected: %d issues\n", len(exportedIDs))
fmt.Fprintf(os.Stderr, " JSONL file: %d lines\n", actualCount)
fmt.Fprintf(os.Stderr, " Mismatch indicates export failed to write all issues\n")
os.Exit(1)
}
// Update database mtime to be >= JSONL mtime (fixes #278, #301, #321)
// Only do this when exporting to default JSONL path (not arbitrary outputs)
@@ -520,9 +520,9 @@ Examples:
fmt.Fprintf(os.Stderr, "Warning: failed to update database mtime: %v\n", err)
}
}
}
}
// Output statistics if JSON format requested
// Output statistics if JSON format requested
if jsonOutput {
stats := map[string]interface{}{
"success": true,

View File

@@ -1,15 +1,12 @@
package main
import (
"bytes"
"encoding/json"
"fmt"
"os"
"path/filepath"
"sort"
"strings"
"github.com/BurntSushi/toml"
"github.com/spf13/cobra"
"github.com/steveyegge/beads/internal/formula"
"github.com/steveyegge/beads/internal/ui"
@@ -21,8 +18,8 @@ var formulaCmd = &cobra.Command{
Short: "Manage workflow formulas",
Long: `Manage workflow formulas - the source layer for molecule templates.
Formulas are YAML/JSON files that define workflows with composition rules.
They are "cooked" into proto beads which can then be poured or wisped.
Formulas are JSON files (.formula.json) that define workflows with composition rules.
They are "cooked" into ephemeral protos which can then be poured or wisped.
The Rig → Cook → Run lifecycle:
- Rig: Compose formulas (extends, compose)
@@ -366,7 +363,7 @@ func getFormulaSearchPaths() []string {
return paths
}
// scanFormulaDir scans a directory for formula files (both TOML and JSON).
// scanFormulaDir scans a directory for formula files.
func scanFormulaDir(dir string) ([]*formula.Formula, error) {
entries, err := os.ReadDir(dir)
if err != nil {
@@ -380,13 +377,11 @@ func scanFormulaDir(dir string) ([]*formula.Formula, error) {
if entry.IsDir() {
continue
}
// Support both .formula.toml and .formula.json
name := entry.Name()
if !strings.HasSuffix(name, formula.FormulaExtTOML) && !strings.HasSuffix(name, formula.FormulaExtJSON) {
if !strings.HasSuffix(entry.Name(), formula.FormulaExt) {
continue
}
path := filepath.Join(dir, name)
path := filepath.Join(dir, entry.Name())
f, err := parser.ParseFile(path)
if err != nil {
continue // Skip invalid formulas
@@ -476,297 +471,10 @@ func printFormulaStepsTree(steps []*formula.Step, indent string) {
}
}
// formulaConvertCmd converts JSON formulas to TOML.
var formulaConvertCmd = &cobra.Command{
Use: "convert <formula-name|path> [--all]",
Short: "Convert formula from JSON to TOML",
Long: `Convert formula files from JSON to TOML format.
TOML format provides better ergonomics:
- Multi-line strings without \n escaping
- Human-readable diffs
- Comments allowed
The convert command reads a .formula.json file and outputs .formula.toml.
The original JSON file is preserved (use --delete to remove it).
Examples:
bd formula convert shiny # Convert shiny.formula.json to .toml
bd formula convert ./my.formula.json # Convert specific file
bd formula convert --all # Convert all JSON formulas
bd formula convert shiny --delete # Convert and remove JSON file
bd formula convert shiny --stdout # Print TOML to stdout`,
Run: runFormulaConvert,
}
var (
convertAll bool
convertDelete bool
convertStdout bool
)
func runFormulaConvert(cmd *cobra.Command, args []string) {
if convertAll {
convertAllFormulas()
return
}
if len(args) == 0 {
fmt.Fprintf(os.Stderr, "Error: formula name or path required\n")
fmt.Fprintf(os.Stderr, "Usage: bd formula convert <name|path> [--all]\n")
os.Exit(1)
}
name := args[0]
// Determine the JSON file path
var jsonPath string
if strings.HasSuffix(name, formula.FormulaExtJSON) {
// Direct path provided
jsonPath = name
} else if strings.HasSuffix(name, formula.FormulaExtTOML) {
fmt.Fprintf(os.Stderr, "Error: %s is already a TOML file\n", name)
os.Exit(1)
} else {
// Search for the formula in search paths
jsonPath = findFormulaJSON(name)
if jsonPath == "" {
fmt.Fprintf(os.Stderr, "Error: JSON formula %q not found\n", name)
fmt.Fprintf(os.Stderr, "\nSearch paths:\n")
for _, p := range getFormulaSearchPaths() {
fmt.Fprintf(os.Stderr, " %s\n", p)
}
os.Exit(1)
}
}
// Parse the JSON file
parser := formula.NewParser()
f, err := parser.ParseFile(jsonPath)
if err != nil {
fmt.Fprintf(os.Stderr, "Error parsing %s: %v\n", jsonPath, err)
os.Exit(1)
}
// Convert to TOML
tomlData, err := formulaToTOML(f)
if err != nil {
fmt.Fprintf(os.Stderr, "Error converting to TOML: %v\n", err)
os.Exit(1)
}
if convertStdout {
fmt.Print(string(tomlData))
return
}
// Determine output path
tomlPath := strings.TrimSuffix(jsonPath, formula.FormulaExtJSON) + formula.FormulaExtTOML
// Write the TOML file
if err := os.WriteFile(tomlPath, tomlData, 0600); err != nil {
fmt.Fprintf(os.Stderr, "Error writing %s: %v\n", tomlPath, err)
os.Exit(1)
}
fmt.Printf("✓ Converted: %s\n", tomlPath)
if convertDelete {
if err := os.Remove(jsonPath); err != nil {
fmt.Fprintf(os.Stderr, "Warning: could not delete %s: %v\n", jsonPath, err)
} else {
fmt.Printf("✓ Deleted: %s\n", jsonPath)
}
}
}
func convertAllFormulas() {
converted := 0
errors := 0
for _, dir := range getFormulaSearchPaths() {
entries, err := os.ReadDir(dir)
if err != nil {
continue
}
parser := formula.NewParser(dir)
for _, entry := range entries {
if entry.IsDir() {
continue
}
if !strings.HasSuffix(entry.Name(), formula.FormulaExtJSON) {
continue
}
jsonPath := filepath.Join(dir, entry.Name())
tomlPath := strings.TrimSuffix(jsonPath, formula.FormulaExtJSON) + formula.FormulaExtTOML
// Skip if TOML already exists
if _, err := os.Stat(tomlPath); err == nil {
fmt.Printf("⏭ Skipped (TOML exists): %s\n", entry.Name())
continue
}
f, err := parser.ParseFile(jsonPath)
if err != nil {
fmt.Fprintf(os.Stderr, "✗ Error parsing %s: %v\n", jsonPath, err)
errors++
continue
}
tomlData, err := formulaToTOML(f)
if err != nil {
fmt.Fprintf(os.Stderr, "✗ Error converting %s: %v\n", jsonPath, err)
errors++
continue
}
if err := os.WriteFile(tomlPath, tomlData, 0600); err != nil {
fmt.Fprintf(os.Stderr, "✗ Error writing %s: %v\n", tomlPath, err)
errors++
continue
}
fmt.Printf("✓ Converted: %s\n", tomlPath)
converted++
if convertDelete {
if err := os.Remove(jsonPath); err != nil {
fmt.Fprintf(os.Stderr, "Warning: could not delete %s: %v\n", jsonPath, err)
}
}
}
}
fmt.Printf("\nConverted %d formulas", converted)
if errors > 0 {
fmt.Printf(" (%d errors)", errors)
}
fmt.Println()
}
// findFormulaJSON searches for a JSON formula file by name.
func findFormulaJSON(name string) string {
for _, dir := range getFormulaSearchPaths() {
path := filepath.Join(dir, name+formula.FormulaExtJSON)
if _, err := os.Stat(path); err == nil {
return path
}
}
return ""
}
// formulaToTOML converts a Formula to TOML bytes.
// Uses a custom structure optimized for TOML readability.
func formulaToTOML(f *formula.Formula) ([]byte, error) {
// We need to re-read the original JSON to get the raw structure
// because the Formula struct loses some ordering/formatting
if f.Source == "" {
return nil, fmt.Errorf("formula has no source path")
}
// Read the original JSON
jsonData, err := os.ReadFile(f.Source)
if err != nil {
return nil, fmt.Errorf("reading source: %w", err)
}
// Parse into a map to preserve structure
var raw map[string]interface{}
if err := json.Unmarshal(jsonData, &raw); err != nil {
return nil, fmt.Errorf("parsing JSON: %w", err)
}
// Fix float64 to int for known integer fields
fixIntegerFields(raw)
// Encode to TOML
var buf bytes.Buffer
encoder := toml.NewEncoder(&buf)
encoder.Indent = ""
if err := encoder.Encode(raw); err != nil {
return nil, fmt.Errorf("encoding TOML: %w", err)
}
// Post-process to convert escaped \n in strings to multi-line strings
result := convertToMultiLineStrings(buf.String())
return []byte(result), nil
}
// convertToMultiLineStrings post-processes TOML to use multi-line strings
// where strings contain newlines. This improves readability for descriptions.
func convertToMultiLineStrings(input string) string {
// Regular expression to match key = "value with \n"
// We look for description fields specifically as those benefit most
lines := strings.Split(input, "\n")
var result []string
for _, line := range lines {
// Check if this line has a string with escaped newlines
if strings.Contains(line, "\\n") {
// Find the key = "..." pattern
eqIdx := strings.Index(line, " = \"")
if eqIdx > 0 && strings.HasSuffix(line, "\"") {
key := strings.TrimSpace(line[:eqIdx])
// Only convert description fields
if key == "description" {
// Extract the value (without quotes)
value := line[eqIdx+4 : len(line)-1]
// Unescape the newlines
value = strings.ReplaceAll(value, "\\n", "\n")
// Use multi-line string syntax
result = append(result, fmt.Sprintf("%s = \"\"\"\n%s\"\"\"", key, value))
continue
}
}
}
result = append(result, line)
}
return strings.Join(result, "\n")
}
// fixIntegerFields recursively fixes float64 values that should be integers.
// JSON unmarshals all numbers as float64, but TOML needs proper int types.
func fixIntegerFields(m map[string]interface{}) {
// Known integer fields
intFields := map[string]bool{
"version": true,
"priority": true,
"count": true,
"max": true,
}
for k, v := range m {
switch val := v.(type) {
case float64:
// Convert whole numbers to int64 if they're known int fields
if intFields[k] && val == float64(int64(val)) {
m[k] = int64(val)
}
case map[string]interface{}:
fixIntegerFields(val)
case []interface{}:
for _, item := range val {
if subMap, ok := item.(map[string]interface{}); ok {
fixIntegerFields(subMap)
}
}
}
}
}
func init() {
formulaListCmd.Flags().String("type", "", "Filter by type (workflow, expansion, aspect)")
formulaConvertCmd.Flags().BoolVar(&convertAll, "all", false, "Convert all JSON formulas")
formulaConvertCmd.Flags().BoolVar(&convertDelete, "delete", false, "Delete JSON file after conversion")
formulaConvertCmd.Flags().BoolVar(&convertStdout, "stdout", false, "Print TOML to stdout instead of file")
formulaCmd.AddCommand(formulaListCmd)
formulaCmd.AddCommand(formulaShowCmd)
formulaCmd.AddCommand(formulaConvertCmd)
rootCmd.AddCommand(formulaCmd)
}

View File

@@ -1,17 +1,14 @@
package main
import (
"context"
"encoding/json"
"fmt"
"os"
"os/exec"
"strings"
"time"
"github.com/spf13/cobra"
"github.com/steveyegge/beads/internal/rpc"
"github.com/steveyegge/beads/internal/storage"
"github.com/steveyegge/beads/internal/storage/sqlite"
"github.com/steveyegge/beads/internal/types"
"github.com/steveyegge/beads/internal/ui"
@@ -104,16 +101,6 @@ Examples:
}
}
// For timer gates, the await_id IS the duration - use it as timeout if not explicitly set
if awaitType == "timer" && timeout == 0 {
var err error
timeout, err = time.ParseDuration(awaitID)
if err != nil {
fmt.Fprintf(os.Stderr, "Error: invalid timer duration %q: %v\n", awaitID, err)
os.Exit(1)
}
}
// Generate title if not provided
if title == "" {
title = fmt.Sprintf("Gate: %s:%s", awaitType, awaitID)
@@ -157,7 +144,7 @@ Examples:
Status: types.StatusOpen,
Priority: 1, // Gates are typically high priority
// Assignee left empty - orchestrator decides who processes gates
Ephemeral: true, // Gates are wisps (ephemeral)
Wisp: true, // Gates are wisps (ephemeral)
AwaitType: awaitType,
AwaitID: awaitID,
Timeout: timeout,
@@ -447,129 +434,6 @@ var gateCloseCmd = &cobra.Command{
},
}
var gateApproveCmd = &cobra.Command{
Use: "approve <gate-id>",
Short: "Approve a human gate",
Long: `Approve a human gate, closing it and notifying waiters.
Human gates (created with --await human:<prompt>) require explicit approval
to close. This is the command that provides that approval.
Example:
bd gate create --await human:approve-deploy --notify gastown/witness
# ... later, when ready to approve ...
bd gate approve <gate-id>
bd gate approve <gate-id> --comment "Reviewed and approved by Steve"`,
Args: cobra.ExactArgs(1),
Run: func(cmd *cobra.Command, args []string) {
CheckReadonly("gate approve")
ctx := rootCtx
comment, _ := cmd.Flags().GetString("comment")
var closedGate *types.Issue
var gateID string
// Try daemon first, fall back to direct store access
if daemonClient != nil {
// First get the gate to verify it's a human gate
showResp, err := daemonClient.GateShow(&rpc.GateShowArgs{ID: args[0]})
if err != nil {
FatalError("gate approve: %v", err)
}
var gate types.Issue
if err := json.Unmarshal(showResp.Data, &gate); err != nil {
FatalError("failed to parse gate: %v", err)
}
if gate.AwaitType != "human" {
fmt.Fprintf(os.Stderr, "Error: %s is not a human gate (type: %s:%s)\n", args[0], gate.AwaitType, gate.AwaitID)
os.Exit(1)
}
if gate.Status == types.StatusClosed {
fmt.Fprintf(os.Stderr, "Error: gate %s is already closed\n", args[0])
os.Exit(1)
}
// Close with approval reason
reason := fmt.Sprintf("Human approval granted: %s", gate.AwaitID)
if comment != "" {
reason = fmt.Sprintf("Human approval granted: %s (%s)", gate.AwaitID, comment)
}
resp, err := daemonClient.GateClose(&rpc.GateCloseArgs{
ID: args[0],
Reason: reason,
})
if err != nil {
FatalError("gate approve: %v", err)
}
if err := json.Unmarshal(resp.Data, &closedGate); err != nil {
FatalError("failed to parse gate: %v", err)
}
gateID = closedGate.ID
} else if store != nil {
var err error
gateID, err = utils.ResolvePartialID(ctx, store, args[0])
if err != nil {
fmt.Fprintf(os.Stderr, "Error: %v\n", err)
os.Exit(1)
}
// Get gate and verify it's a human gate
gate, err := store.GetIssue(ctx, gateID)
if err != nil {
fmt.Fprintf(os.Stderr, "Error: %v\n", err)
os.Exit(1)
}
if gate == nil {
fmt.Fprintf(os.Stderr, "Error: gate %s not found\n", gateID)
os.Exit(1)
}
if gate.IssueType != types.TypeGate {
fmt.Fprintf(os.Stderr, "Error: %s is not a gate (type: %s)\n", gateID, gate.IssueType)
os.Exit(1)
}
if gate.AwaitType != "human" {
fmt.Fprintf(os.Stderr, "Error: %s is not a human gate (type: %s:%s)\n", gateID, gate.AwaitType, gate.AwaitID)
os.Exit(1)
}
if gate.Status == types.StatusClosed {
fmt.Fprintf(os.Stderr, "Error: gate %s is already closed\n", gateID)
os.Exit(1)
}
// Close with approval reason
reason := fmt.Sprintf("Human approval granted: %s", gate.AwaitID)
if comment != "" {
reason = fmt.Sprintf("Human approval granted: %s (%s)", gate.AwaitID, comment)
}
if err := store.CloseIssue(ctx, gateID, reason, actor); err != nil {
fmt.Fprintf(os.Stderr, "Error closing gate: %v\n", err)
os.Exit(1)
}
markDirtyAndScheduleFlush()
closedGate, _ = store.GetIssue(ctx, gateID)
} else {
fmt.Fprintf(os.Stderr, "Error: no database connection\n")
os.Exit(1)
}
if jsonOutput {
outputJSON(closedGate)
return
}
fmt.Printf("%s Approved gate: %s\n", ui.RenderPass("✓"), gateID)
if closedGate != nil && closedGate.CloseReason != "" {
fmt.Printf(" %s\n", closedGate.CloseReason)
}
if closedGate != nil && len(closedGate.Waiters) > 0 {
fmt.Printf(" Waiters notified: %s\n", strings.Join(closedGate.Waiters, ", "))
}
},
}
var gateWaitCmd = &cobra.Command{
Use: "wait <gate-id>",
Short: "Add a waiter to an existing gate",
@@ -700,314 +564,7 @@ var gateWaitCmd = &cobra.Command{
},
}
var gateEvalCmd = &cobra.Command{
Use: "eval",
Short: "Evaluate pending gates and close elapsed ones",
Long: `Evaluate all open gates and close those whose conditions are met.
Supported gate types:
- timer gates: closed when elapsed time exceeds timeout
- gh:run gates: closed when GitHub Actions run completes (requires gh CLI)
- gh:pr gates: closed when PR is merged/closed (requires gh CLI)
This command is idempotent and safe to run repeatedly.`,
Run: func(cmd *cobra.Command, args []string) {
CheckReadonly("gate eval")
ctx := rootCtx
dryRun, _ := cmd.Flags().GetBool("dry-run")
var gates []*types.Issue
// Get all open gates
if daemonClient != nil {
resp, err := daemonClient.GateList(&rpc.GateListArgs{All: false})
if err != nil {
FatalError("gate eval: %v", err)
}
if err := json.Unmarshal(resp.Data, &gates); err != nil {
FatalError("failed to parse gates: %v", err)
}
} else if store != nil {
gateType := types.TypeGate
openStatus := types.StatusOpen
filter := types.IssueFilter{
IssueType: &gateType,
Status: &openStatus,
}
var err error
gates, err = store.SearchIssues(ctx, "", filter)
if err != nil {
FatalError("listing gates: %v", err)
}
} else {
FatalError("no database connection")
}
if len(gates) == 0 {
if !jsonOutput {
fmt.Println("No open gates to evaluate")
}
return
}
var closed []string
var skipped []string
var awaitingHuman []string
var awaitingMail []string
now := time.Now()
for _, gate := range gates {
var shouldClose bool
var reason string
switch gate.AwaitType {
case "timer":
shouldClose, reason = evalTimerGate(gate, now)
case "gh:run":
shouldClose, reason = evalGHRunGate(gate)
case "gh:pr":
shouldClose, reason = evalGHPRGate(gate)
case "human":
// Human gates require explicit approval via 'bd gate approve'
awaitingHuman = append(awaitingHuman, gate.ID)
continue
case "mail":
// Mail gates check for messages matching the pattern
if store != nil {
shouldClose, reason = evalMailGate(ctx, store, gate)
} else {
// Daemon mode - can't evaluate mail gates without store access
awaitingMail = append(awaitingMail, gate.ID)
continue
}
default:
// Unsupported gate type - skip
skipped = append(skipped, gate.ID)
continue
}
if !shouldClose {
continue
}
// Gate condition met - close it
if dryRun {
fmt.Printf("Would close gate %s (%s)\n", gate.ID, reason)
closed = append(closed, gate.ID)
continue
}
if daemonClient != nil {
_, err := daemonClient.GateClose(&rpc.GateCloseArgs{
ID: gate.ID,
Reason: reason,
})
if err != nil {
fmt.Fprintf(os.Stderr, "Warning: failed to close gate %s: %v\n", gate.ID, err)
continue
}
} else if store != nil {
if err := store.CloseIssue(ctx, gate.ID, reason, actor); err != nil {
fmt.Fprintf(os.Stderr, "Warning: failed to close gate %s: %v\n", gate.ID, err)
continue
}
markDirtyAndScheduleFlush()
}
closed = append(closed, gate.ID)
}
if jsonOutput {
outputJSON(map[string]interface{}{
"evaluated": len(gates),
"closed": closed,
"awaiting_human": awaitingHuman,
"awaiting_mail": awaitingMail,
"skipped": skipped,
})
return
}
if len(closed) == 0 {
fmt.Printf("Evaluated %d gates, none ready to close\n", len(gates))
} else {
action := "Closed"
if dryRun {
action = "Would close"
}
fmt.Printf("%s %s %d gate(s)\n", ui.RenderPass("✓"), action, len(closed))
for _, id := range closed {
fmt.Printf(" %s\n", id)
}
}
if len(awaitingHuman) > 0 {
fmt.Printf("Awaiting human approval: %s\n", strings.Join(awaitingHuman, ", "))
fmt.Printf(" Use 'bd gate approve <id>' to approve\n")
}
if len(awaitingMail) > 0 {
fmt.Printf("Awaiting mail: %s\n", strings.Join(awaitingMail, ", "))
}
if len(skipped) > 0 {
fmt.Printf("Skipped %d unsupported gate(s): %s\n", len(skipped), strings.Join(skipped, ", "))
}
},
}
// evalTimerGate checks if a timer gate's duration has elapsed.
func evalTimerGate(gate *types.Issue, now time.Time) (bool, string) {
if gate.Timeout <= 0 {
return false, "" // No timeout set
}
elapsed := now.Sub(gate.CreatedAt)
if elapsed < gate.Timeout {
return false, "" // Not yet elapsed
}
return true, fmt.Sprintf("Timer elapsed (%v)", gate.Timeout)
}
// ghRunStatus represents the JSON output of `gh run view --json`
type ghRunStatus struct {
Status string `json:"status"` // queued, in_progress, completed
Conclusion string `json:"conclusion"` // success, failure, canceled, skipped, etc.
}
// evalGHRunGate checks if a GitHub Actions run has completed.
// Uses `gh run view <run_id> --json status,conclusion` to check status.
func evalGHRunGate(gate *types.Issue) (bool, string) {
runID := gate.AwaitID
if runID == "" {
return false, ""
}
// Run gh CLI to get run status
cmd := exec.Command("gh", "run", "view", runID, "--json", "status,conclusion") //nolint:gosec // runID is from trusted issue.AwaitID field
output, err := cmd.Output()
if err != nil {
// gh CLI failed - could be network issue, invalid run ID, or gh not installed
// Don't close the gate, just skip it
return false, ""
}
var status ghRunStatus
if err := json.Unmarshal(output, &status); err != nil {
return false, ""
}
// Only close if status is "completed"
if status.Status != "completed" {
return false, ""
}
// Run completed - include conclusion in reason
reason := fmt.Sprintf("GitHub Actions run %s completed", runID)
if status.Conclusion != "" {
reason = fmt.Sprintf("GitHub Actions run %s: %s", runID, status.Conclusion)
}
return true, reason
}
// ghPRStatus represents the JSON output of `gh pr view --json`
type ghPRStatus struct {
State string `json:"state"` // OPEN, CLOSED, MERGED
MergedAt string `json:"mergedAt"` // non-empty if merged
}
// evalGHPRGate checks if a GitHub PR has been merged or closed.
// Uses `gh pr view <pr_number> --json state,mergedAt` to check status.
func evalGHPRGate(gate *types.Issue) (bool, string) {
prNumber := gate.AwaitID
if prNumber == "" {
return false, ""
}
// Run gh CLI to get PR status
cmd := exec.Command("gh", "pr", "view", prNumber, "--json", "state,mergedAt") //nolint:gosec // prNumber is from trusted issue.AwaitID field
output, err := cmd.Output()
if err != nil {
// gh CLI failed - could be network issue, invalid PR, or gh not installed
// Don't close the gate, just skip it
return false, ""
}
var status ghPRStatus
if err := json.Unmarshal(output, &status); err != nil {
return false, ""
}
// Close gate if PR is no longer OPEN
// State is "MERGED" for merged PRs, "CLOSED" for closed-without-merge
switch status.State {
case "MERGED":
return true, fmt.Sprintf("PR #%s merged", prNumber)
case "CLOSED":
return true, fmt.Sprintf("PR #%s closed without merge", prNumber)
default:
// Still OPEN
return false, ""
}
}
// evalMailGate checks if any message matching the pattern exists.
// The pattern (await_id) is matched as a case-insensitive substring of message subjects.
// If waiters are specified, only messages addressed to those waiters are considered.
func evalMailGate(ctx context.Context, store storage.Storage, gate *types.Issue) (bool, string) {
pattern := gate.AwaitID
if pattern == "" {
return false, ""
}
// Search for messages
msgType := types.TypeMessage
openStatus := types.StatusOpen
filter := types.IssueFilter{
IssueType: &msgType,
Status: &openStatus,
}
messages, err := store.SearchIssues(ctx, "", filter)
if err != nil {
return false, ""
}
// Convert pattern to lowercase for case-insensitive matching
patternLower := strings.ToLower(pattern)
// Build waiter set for efficient lookup (if waiters specified)
waiterSet := make(map[string]bool)
for _, w := range gate.Waiters {
waiterSet[w] = true
}
// Check each message
for _, msg := range messages {
// Check subject contains pattern (case-insensitive)
if !strings.Contains(strings.ToLower(msg.Title), patternLower) {
continue
}
// If waiters specified, check if message is addressed to a waiter
// Messages use Assignee field for recipient
if len(waiterSet) > 0 {
if !waiterSet[msg.Assignee] {
continue
}
}
// Found a matching message
return true, fmt.Sprintf("Mail received: %s", msg.Title)
}
return false, ""
}
func init() {
// Gate eval flags
gateEvalCmd.Flags().Bool("dry-run", false, "Show what would be closed without actually closing")
gateEvalCmd.Flags().Bool("json", false, "Output JSON format")
// Gate create flags
gateCreateCmd.Flags().String("await", "", "Await spec: gh:run:<id>, gh:pr:<id>, timer:<duration>, human:<prompt>, mail:<pattern> (required)")
gateCreateCmd.Flags().String("timeout", "", "Timeout duration (e.g., 30m, 1h)")
@@ -1026,10 +583,6 @@ func init() {
gateCloseCmd.Flags().StringP("reason", "r", "", "Reason for closing")
gateCloseCmd.Flags().Bool("json", false, "Output JSON format")
// Gate approve flags
gateApproveCmd.Flags().String("comment", "", "Optional approval comment")
gateApproveCmd.Flags().Bool("json", false, "Output JSON format")
// Gate wait flags
gateWaitCmd.Flags().StringSlice("notify", nil, "Mail addresses to add as waiters (repeatable, required)")
gateWaitCmd.Flags().Bool("json", false, "Output JSON format")
@@ -1039,9 +592,7 @@ func init() {
gateCmd.AddCommand(gateShowCmd)
gateCmd.AddCommand(gateListCmd)
gateCmd.AddCommand(gateCloseCmd)
gateCmd.AddCommand(gateApproveCmd)
gateCmd.AddCommand(gateWaitCmd)
gateCmd.AddCommand(gateEvalCmd)
// Add gate command to root
rootCmd.AddCommand(gateCmd)

View File

@@ -26,36 +26,36 @@ func TestGitPullSyncIntegration(t *testing.T) {
// Create temp directory for test repositories
tempDir := t.TempDir()
// Create "remote" repository
remoteDir := filepath.Join(tempDir, "remote")
if err := os.MkdirAll(remoteDir, 0750); err != nil {
t.Fatalf("Failed to create remote dir: %v", err)
}
// Initialize remote git repo
runGitCmd(t, remoteDir, "init", "--bare", "-b", "master")
runGitCmd(t, remoteDir, "init", "--bare")
// Create "clone1" repository
clone1Dir := filepath.Join(tempDir, "clone1")
runGitCmd(t, tempDir, "clone", remoteDir, clone1Dir)
configureGit(t, clone1Dir)
// Initialize beads in clone1
clone1BeadsDir := filepath.Join(clone1Dir, ".beads")
if err := os.MkdirAll(clone1BeadsDir, 0750); err != nil {
t.Fatalf("Failed to create .beads dir: %v", err)
}
clone1DBPath := filepath.Join(clone1BeadsDir, "test.db")
clone1Store := newTestStore(t, clone1DBPath)
defer clone1Store.Close()
ctx := context.Background()
if err := clone1Store.SetMetadata(ctx, "issue_prefix", "test"); err != nil {
t.Fatalf("Failed to set prefix: %v", err)
}
// Create and close an issue in clone1
issue := &types.Issue{
Title: "Test sync issue",
@@ -69,80 +69,80 @@ func TestGitPullSyncIntegration(t *testing.T) {
t.Fatalf("Failed to create issue: %v", err)
}
issueID := issue.ID
// Close the issue
if err := clone1Store.CloseIssue(ctx, issueID, "Test completed", "test-user"); err != nil {
t.Fatalf("Failed to close issue: %v", err)
}
// Export to JSONL
jsonlPath := filepath.Join(clone1BeadsDir, "issues.jsonl")
if err := exportIssuesToJSONL(ctx, clone1Store, jsonlPath); err != nil {
t.Fatalf("Failed to export: %v", err)
}
// Commit and push from clone1
runGitCmd(t, clone1Dir, "add", ".beads")
runGitCmd(t, clone1Dir, "commit", "-m", "Add closed issue")
runGitCmd(t, clone1Dir, "push", "origin", "master")
// Create "clone2" repository
clone2Dir := filepath.Join(tempDir, "clone2")
runGitCmd(t, tempDir, "clone", remoteDir, clone2Dir)
configureGit(t, clone2Dir)
// Initialize empty database in clone2
clone2BeadsDir := filepath.Join(clone2Dir, ".beads")
clone2DBPath := filepath.Join(clone2BeadsDir, "test.db")
clone2Store := newTestStore(t, clone2DBPath)
defer clone2Store.Close()
if err := clone2Store.SetMetadata(ctx, "issue_prefix", "test"); err != nil {
t.Fatalf("Failed to set prefix: %v", err)
}
// Import the existing JSONL (simulating initial sync)
clone2JSONLPath := filepath.Join(clone2BeadsDir, "issues.jsonl")
if err := importJSONLToStore(ctx, clone2Store, clone2DBPath, clone2JSONLPath); err != nil {
t.Fatalf("Failed to import: %v", err)
}
// Verify issue exists and is closed
verifyIssueClosed(t, clone2Store, issueID)
// Note: We don't commit in clone2 - it stays clean as a read-only consumer
// Now test git pull scenario: Clone1 makes a change (update priority)
if err := clone1Store.UpdateIssue(ctx, issueID, map[string]interface{}{
"priority": 0,
}, "test-user"); err != nil {
t.Fatalf("Failed to update issue: %v", err)
}
if err := exportIssuesToJSONL(ctx, clone1Store, jsonlPath); err != nil {
t.Fatalf("Failed to export after update: %v", err)
}
runGitCmd(t, clone1Dir, "add", ".beads/issues.jsonl")
runGitCmd(t, clone1Dir, "commit", "-m", "Update priority")
runGitCmd(t, clone1Dir, "push", "origin", "master")
// Clone2 pulls the change
runGitCmd(t, clone2Dir, "pull")
// Test auto-import in non-daemon mode
t.Run("NonDaemonAutoImport", func(t *testing.T) {
// Use a temporary local store for this test
localStore := newTestStore(t, clone2DBPath)
defer localStore.Close()
// Manually import to simulate auto-import behavior
startTime := time.Now()
if err := importJSONLToStore(ctx, localStore, clone2DBPath, clone2JSONLPath); err != nil {
t.Fatalf("Failed to auto-import: %v", err)
}
elapsed := time.Since(startTime)
// Verify priority was updated
issue, err := localStore.GetIssue(ctx, issueID)
if err != nil {
@@ -151,13 +151,13 @@ func TestGitPullSyncIntegration(t *testing.T) {
if issue.Priority != 0 {
t.Errorf("Expected priority 0 after auto-import, got %d", issue.Priority)
}
// Verify performance: import should be fast
if elapsed > 100*time.Millisecond {
t.Logf("Info: import took %v", elapsed)
}
})
// Test bd sync --import-only command
t.Run("BdSyncCommand", func(t *testing.T) {
// Make another change in clone1 (change priority back to 1)
@@ -166,27 +166,27 @@ func TestGitPullSyncIntegration(t *testing.T) {
}, "test-user"); err != nil {
t.Fatalf("Failed to update issue: %v", err)
}
if err := exportIssuesToJSONL(ctx, clone1Store, jsonlPath); err != nil {
t.Fatalf("Failed to export: %v", err)
}
runGitCmd(t, clone1Dir, "add", ".beads/issues.jsonl")
runGitCmd(t, clone1Dir, "commit", "-m", "Update priority")
runGitCmd(t, clone1Dir, "push", "origin", "master")
// Clone2 pulls
runGitCmd(t, clone2Dir, "pull")
// Use a fresh store for import
syncStore := newTestStore(t, clone2DBPath)
defer syncStore.Close()
// Manually trigger import via in-process equivalent
if err := importJSONLToStore(ctx, syncStore, clone2DBPath, clone2JSONLPath); err != nil {
t.Fatalf("Failed to import via sync: %v", err)
}
// Verify priority was updated back to 1
issue, err := syncStore.GetIssue(ctx, issueID)
if err != nil {
@@ -214,7 +214,7 @@ func configureGit(t *testing.T, dir string) {
runGitCmd(t, dir, "config", "user.email", "test@example.com")
runGitCmd(t, dir, "config", "user.name", "Test User")
runGitCmd(t, dir, "config", "pull.rebase", "false")
// Create .gitignore to prevent test database files from being tracked
gitignorePath := filepath.Join(dir, ".gitignore")
gitignoreContent := `# Test database files
@@ -233,7 +233,7 @@ func exportIssuesToJSONL(ctx context.Context, store *sqlite.SQLiteStorage, jsonl
if err != nil {
return err
}
// Populate dependencies
allDeps, err := store.GetAllDependencyRecords(ctx)
if err != nil {
@@ -244,20 +244,20 @@ func exportIssuesToJSONL(ctx context.Context, store *sqlite.SQLiteStorage, jsonl
labels, _ := store.GetLabels(ctx, issue.ID)
issue.Labels = labels
}
f, err := os.Create(jsonlPath)
if err != nil {
return err
}
defer f.Close()
encoder := json.NewEncoder(f)
for _, issue := range issues {
if err := encoder.Encode(issue); err != nil {
return err
}
}
return nil
}
@@ -266,7 +266,7 @@ func importJSONLToStore(ctx context.Context, store *sqlite.SQLiteStorage, dbPath
if err != nil {
return err
}
// Use the autoimport package's AutoImportIfNewer function
// For testing, we'll directly parse and import
var issues []*types.Issue
@@ -278,7 +278,7 @@ func importJSONLToStore(ctx context.Context, store *sqlite.SQLiteStorage, dbPath
}
issues = append(issues, &issue)
}
// Import each issue
for _, issue := range issues {
existing, _ := store.GetIssue(ctx, issue.ID)
@@ -298,12 +298,12 @@ func importJSONLToStore(ctx context.Context, store *sqlite.SQLiteStorage, dbPath
}
}
}
// Set last_import_time metadata so staleness check works
if err := store.SetMetadata(ctx, "last_import_time", time.Now().Format(time.RFC3339)); err != nil {
return err
}
return nil
}

View File

@@ -11,7 +11,6 @@ import (
"github.com/spf13/cobra"
"github.com/steveyegge/beads/internal/rpc"
"github.com/steveyegge/beads/internal/storage"
"github.com/steveyegge/beads/internal/storage/sqlite"
"github.com/steveyegge/beads/internal/types"
"github.com/steveyegge/beads/internal/ui"
"github.com/steveyegge/beads/internal/utils"
@@ -81,17 +80,6 @@ Colors indicate status:
os.Exit(1)
}
// If daemon is running but doesn't support this command, use direct storage
if daemonClient != nil && store == nil {
var err error
store, err = sqlite.New(ctx, dbPath)
if err != nil {
fmt.Fprintf(os.Stderr, "Error: failed to open database: %v\n", err)
os.Exit(1)
}
defer func() { _ = store.Close() }()
}
// Load the subgraph
subgraph, err := loadGraphSubgraph(ctx, store, issueID)
if err != nil {

View File

@@ -87,8 +87,8 @@ func runHook(cmd *cobra.Command, args []string) {
for _, issue := range issues {
phase := "mol"
if issue.Ephemeral {
phase = "ephemeral"
if issue.Wisp {
phase = "wisp"
}
fmt.Printf(" 📌 %s (%s) - %s\n", issue.ID, phase, issue.Status)
fmt.Printf(" %s\n", issue.Title)

View File

@@ -1,107 +0,0 @@
package main
import (
"os"
"os/exec"
"path/filepath"
"strings"
"testing"
"time"
"github.com/steveyegge/beads/internal/types"
)
func TestTouchDatabaseFile_UsesJSONLMtime(t *testing.T) {
tmp := t.TempDir()
dbPath := filepath.Join(tmp, "beads.db")
jsonlPath := filepath.Join(tmp, "issues.jsonl")
if err := os.WriteFile(dbPath, []byte(""), 0o600); err != nil {
t.Fatalf("WriteFile db: %v", err)
}
if err := os.WriteFile(jsonlPath, []byte("{}\n"), 0o600); err != nil {
t.Fatalf("WriteFile jsonl: %v", err)
}
jsonlTime := time.Now().Add(2 * time.Second)
if err := os.Chtimes(jsonlPath, jsonlTime, jsonlTime); err != nil {
t.Fatalf("Chtimes jsonl: %v", err)
}
if err := TouchDatabaseFile(dbPath, jsonlPath); err != nil {
t.Fatalf("TouchDatabaseFile: %v", err)
}
info, err := os.Stat(dbPath)
if err != nil {
t.Fatalf("Stat db: %v", err)
}
if info.ModTime().Before(jsonlTime) {
t.Fatalf("db mtime %v should be >= jsonl mtime %v", info.ModTime(), jsonlTime)
}
}
func TestImportDetectPrefixFromIssues(t *testing.T) {
if detectPrefixFromIssues(nil) != "" {
t.Fatalf("expected empty")
}
issues := []*types.Issue{
{ID: "test-1"},
{ID: "test-2"},
{ID: "other-1"},
}
if got := detectPrefixFromIssues(issues); got != "test" {
t.Fatalf("got %q, want %q", got, "test")
}
}
func TestCountLines(t *testing.T) {
tmp := t.TempDir()
p := filepath.Join(tmp, "f.txt")
if err := os.WriteFile(p, []byte("a\n\nb\n"), 0o600); err != nil {
t.Fatalf("WriteFile: %v", err)
}
if got := countLines(p); got != 3 {
t.Fatalf("countLines=%d, want 3", got)
}
}
func TestCheckUncommittedChanges_Warns(t *testing.T) {
_, cleanup := setupGitRepo(t)
defer cleanup()
if err := os.WriteFile("issues.jsonl", []byte("{\"id\":\"test-1\"}\n"), 0o600); err != nil {
t.Fatalf("WriteFile: %v", err)
}
_ = execCmd(t, "git", "add", "issues.jsonl")
_ = execCmd(t, "git", "commit", "-m", "add issues")
// Modify without committing.
if err := os.WriteFile("issues.jsonl", []byte("{\"id\":\"test-1\"}\n{\"id\":\"test-2\"}\n"), 0o600); err != nil {
t.Fatalf("WriteFile: %v", err)
}
warn := captureStderr(t, func() {
checkUncommittedChanges("issues.jsonl", &ImportResult{})
})
if !strings.Contains(warn, "uncommitted changes") {
t.Fatalf("expected warning, got: %q", warn)
}
noWarn := captureStderr(t, func() {
checkUncommittedChanges("issues.jsonl", &ImportResult{Created: 1})
})
if noWarn != "" {
t.Fatalf("expected no warning, got: %q", noWarn)
}
}
func execCmd(t *testing.T, name string, args ...string) string {
t.Helper()
out, err := exec.Command(name, args...).CombinedOutput()
if err != nil {
t.Fatalf("%s %v failed: %v\n%s", name, args, err, out)
}
return string(out)
}

View File

@@ -288,47 +288,10 @@ type VersionChange struct {
// versionChanges contains agent-actionable changes for recent versions
var versionChanges = []VersionChange{
{
Version: "0.38.0",
Date: "2025-12-27",
Changes: []string{
"NEW: Prefix-based routing (bd-9gvf) - bd commands auto-route to correct rig via routes.jsonl",
"NEW: Cross-rig ID auto-resolve (bd-lfiu) - bd dep add auto-resolves IDs across rigs",
"NEW: bd mol pour/wisp moved under bd mol subcommand (bd-2fs7) - cleaner command hierarchy",
"NEW: bd show displays comments (GH#177) - Comments now visible in issue details",
"NEW: created_by field on issues (GH#748) - Track issue creator for audit trail",
"NEW: Database corruption recovery in bd doctor --fix (GH#753) - Auto-repair corrupted databases",
"NEW: JSONL integrity check in bd doctor (GH#753) - Detect and fix malformed JSONL",
"NEW: Git hygiene checks in bd doctor - Detect stale branches and sync issues",
"NEW: pre-commit config for local lint enforcement - Consistent code quality",
"NEW: Chaos testing flag for release script (bd-kx1j) - --run-chaos-tests for thorough validation",
"CHANGED: Sync backoff and tips consolidation (GH#753) - Smarter daemon sync timing",
"CHANGED: Wisp/Ephemeral name finalized as 'wisp' - bd mol wisp is the canonical command",
"FIX: Comments display outside dependents block (GH#756) - Proper formatting",
"FIX: no-db mode storeActive initialization (GH#761) - JSONL-only mode works correctly",
"FIX: --resolution alias restored for bd close (GH#746) - Backwards compatibility",
"FIX: bd graph works with daemon running (GH#751) - Graph generation no longer conflicts",
"FIX: created_by field in RPC path (GH#754) - Daemon correctly propagates creator",
"FIX: Migration 028 idempotency (GH#757) - Migration handles partial/re-runs",
"FIX: Routed IDs bypass daemon in show command (bd-uu8p) - Cross-rig show works correctly",
"FIX: Storage connections closed per iteration (bd-uu8p) - Prevents resource leaks",
"FIX: Modern git init compatibility (GH#753) - Tests use --initial-branch=main",
"FIX: golangci-lint errors resolved (GH#753) - Clean lint on all platforms",
"IMPROVED: Test coverage - doctor, daemon, storage, RPC client paths covered",
},
},
{
Version: "0.37.0",
Date: "2025-12-26",
Date: "2025-12-25",
Changes: []string{
"BREAKING: Ephemeral API rename (bd-o18s) - Wisp→Ephemeral: JSON 'wisp'→'ephemeral', bd wisp→bd ephemeral",
"NEW: bd gate create/show/list/close/wait (bd-udsi) - Async coordination primitives for agent workflows",
"NEW: bd gate eval (gt-twjr5.2) - Evaluate timer gates and GitHub gates (gh:run, gh:pr, mail)",
"NEW: bd gate approve (gt-twjr5.4) - Human gate approval command",
"NEW: bd close --suggest-next (GH#679) - Show newly unblocked issues after close",
"NEW: bd ready/blocked --parent (GH#743) - Scope by epic or parent bead",
"NEW: TOML support for formulas (gt-xmyha) - .formula.toml files alongside JSON",
"NEW: Fork repo auto-detection (GH#742) - Offer to configure .git/info/exclude",
"NEW: Control flow operators (gt-8tmz.4) - loop and gate operators for formula composition",
"NEW: Aspect composition (gt-8tmz.5) - Cross-cutting concerns via aspects field in formulas",
"NEW: Runtime expansion (gt-8tmz.8) - on_complete and for-each dynamic step generation",
@@ -341,9 +304,6 @@ var versionChanges = []VersionChange{
"CHANGED: Formula format YAML→JSON - Formulas now use .formula.json extension",
"CHANGED: bd mol run removed - Orchestration moved to gt commands",
"CHANGED: Wisp architecture simplified (bd-bkul) - Single DB with Wisp=true flag",
"FIX: Gate await fields preserved during upsert (bd-gr4q) - Multirepo sync fix",
"FIX: Tombstones retain closed_at timestamp - Preserves close time in soft deletes",
"FIX: Git detection caching (bd-7di) - Eliminates worktree slowness",
"FIX: installed_plugins.json v2 format (GH#741) - bd doctor handles new Claude Code format",
"FIX: git.IsWorktree() hang on Windows (GH#727) - bd init no longer hangs outside git repos",
"FIX: Skill files deleted by bd sync (GH#738) - .claude/ files now preserved",
@@ -352,8 +312,6 @@ var versionChanges = []VersionChange{
"FIX: Aspect self-matching recursion (gt-8tmz.16) - Prevents infinite loops",
"FIX: Map expansion nested matching (gt-8tmz.33) - Correctly matches child steps",
"FIX: Content-level merge for divergence (bd-kpy) - Better conflict resolution",
"FIX: Windows MCP graceful fallback (GH#387) - Daemon mode on Windows",
"FIX: Windows npm postinstall file locking (GH#670) - Install reliability",
},
},
{

View File

@@ -426,24 +426,6 @@ With --stealth: configures per-repository git settings for invisible beads usage
fmt.Fprintf(os.Stderr, "Warning: failed to close database: %v\n", err)
}
// Fork detection: offer to configure .git/info/exclude (GH#742)
setupExclude, _ := cmd.Flags().GetBool("setup-exclude")
if setupExclude {
// Manual flag - always configure
if err := setupForkExclude(!quiet); err != nil {
fmt.Fprintf(os.Stderr, "Warning: failed to configure git exclude: %v\n", err)
}
} else if !stealth && isGitRepo() {
// Auto-detect fork and prompt (skip if stealth - it handles exclude already)
if isFork, upstreamURL := detectForkSetup(); isFork {
if promptForkExclude(upstreamURL, quiet) {
if err := setupForkExclude(!quiet); err != nil {
fmt.Fprintf(os.Stderr, "Warning: failed to configure git exclude: %v\n", err)
}
}
}
}
// Check if we're in a git repo and hooks aren't installed
// Install by default unless --skip-hooks is passed
if !skipHooks && isGitRepo() && !hooksInstalled() {
@@ -509,7 +491,6 @@ func init() {
initCmd.Flags().Bool("contributor", false, "Run OSS contributor setup wizard")
initCmd.Flags().Bool("team", false, "Run team workflow setup wizard")
initCmd.Flags().Bool("stealth", false, "Enable stealth mode: global gitattributes and gitignore, no local repo tracking")
initCmd.Flags().Bool("setup-exclude", false, "Configure .git/info/exclude to keep beads files local (for forks)")
initCmd.Flags().Bool("skip-hooks", false, "Skip git hooks installation")
initCmd.Flags().Bool("skip-merge-driver", false, "Skip git merge driver setup")
initCmd.Flags().Bool("force", false, "Force re-initialization even if JSONL already has issues (may cause data loss)")
@@ -1482,103 +1463,6 @@ func setupGitExclude(verbose bool) error {
return nil
}
// setupForkExclude configures .git/info/exclude for fork workflows (GH#742)
// Adds beads files and Claude artifacts to keep PRs to upstream clean.
// This is separate from stealth mode - fork protection is specifically about
// preventing beads/Claude files from appearing in upstream PRs.
func setupForkExclude(verbose bool) error {
gitDir, err := exec.Command("git", "rev-parse", "--git-dir").Output()
if err != nil {
return fmt.Errorf("not a git repository")
}
gitDirPath := strings.TrimSpace(string(gitDir))
excludePath := filepath.Join(gitDirPath, "info", "exclude")
// Ensure info directory exists
if err := os.MkdirAll(filepath.Join(gitDirPath, "info"), 0755); err != nil {
return fmt.Errorf("failed to create git info directory: %w", err)
}
// Read existing content
var existingContent string
// #nosec G304 - git config path
if content, err := os.ReadFile(excludePath); err == nil {
existingContent = string(content)
}
// Patterns to add for fork protection
patterns := []string{".beads/", "**/RECOVERY*.md", "**/SESSION*.md"}
var toAdd []string
for _, p := range patterns {
// Check for exact line match (pattern alone on a line)
// This avoids false positives like ".beads/issues.jsonl" matching ".beads/"
if !containsExactPattern(existingContent, p) {
toAdd = append(toAdd, p)
}
}
if len(toAdd) == 0 {
if verbose {
fmt.Printf("%s Git exclude already configured\n", ui.RenderPass("✓"))
}
return nil
}
// Append patterns
newContent := existingContent
if !strings.HasSuffix(newContent, "\n") && len(newContent) > 0 {
newContent += "\n"
}
newContent += "\n# Beads fork protection (bd init)\n"
for _, p := range toAdd {
newContent += p + "\n"
}
// #nosec G306 - config file needs 0644
if err := os.WriteFile(excludePath, []byte(newContent), 0644); err != nil {
return fmt.Errorf("failed to write git exclude: %w", err)
}
if verbose {
fmt.Printf("\n%s Added to .git/info/exclude:\n", ui.RenderPass("✓"))
for _, p := range toAdd {
fmt.Printf(" %s\n", p)
}
fmt.Println("\nNote: .git/info/exclude is local-only and won't affect upstream.")
}
return nil
}
// containsExactPattern checks if content contains the pattern as an exact line
// This avoids false positives like ".beads/issues.jsonl" matching ".beads/"
func containsExactPattern(content, pattern string) bool {
for _, line := range strings.Split(content, "\n") {
if strings.TrimSpace(line) == pattern {
return true
}
}
return false
}
// promptForkExclude asks if user wants to configure .git/info/exclude for fork workflow (GH#742)
func promptForkExclude(upstreamURL string, quiet bool) bool {
if quiet {
return false // Don't prompt in quiet mode
}
fmt.Printf("\n%s Detected fork (upstream: %s)\n\n", ui.RenderAccent("▶"), upstreamURL)
fmt.Println("Would you like to configure .git/info/exclude to keep beads files local?")
fmt.Println("This prevents beads from appearing in PRs to upstream.")
fmt.Print("\n[Y/n]: ")
reader := bufio.NewReader(os.Stdin)
response, _ := reader.ReadString('\n')
response = strings.TrimSpace(strings.ToLower(response))
// Default to yes (empty or "y" or "yes")
return response == "" || response == "y" || response == "yes"
}
// setupGlobalGitIgnore configures global gitignore to ignore beads and claude files for a specific project
// DEPRECATED: This function uses absolute paths which don't work in gitignore (GitHub #704).
// Use setupGitExclude instead for new code.

View File

@@ -1,116 +0,0 @@
package main
import (
"strings"
"testing"
"time"
"github.com/steveyegge/beads/internal/types"
)
func TestListParseTimeFlag(t *testing.T) {
cases := []string{
"2025-12-26",
"2025-12-26T12:34:56",
"2025-12-26 12:34:56",
time.DateOnly,
time.RFC3339,
}
for _, c := range cases {
// Just make sure we accept the expected formats.
var s string
switch c {
case time.DateOnly:
s = "2025-12-26"
case time.RFC3339:
s = "2025-12-26T12:34:56Z"
default:
s = c
}
got, err := parseTimeFlag(s)
if err != nil {
t.Fatalf("parseTimeFlag(%q) error: %v", s, err)
}
if got.Year() != 2025 {
t.Fatalf("parseTimeFlag(%q) year=%d, want 2025", s, got.Year())
}
}
if _, err := parseTimeFlag("not-a-date"); err == nil {
t.Fatalf("expected error")
}
}
func TestListPinIndicator(t *testing.T) {
if pinIndicator(&types.Issue{Pinned: true}) == "" {
t.Fatalf("expected pin indicator")
}
if pinIndicator(&types.Issue{Pinned: false}) != "" {
t.Fatalf("expected empty pin indicator")
}
}
func TestListFormatPrettyIssue_BadgesAndDefaults(t *testing.T) {
iss := &types.Issue{ID: "bd-1", Title: "Hello", Status: "wat", Priority: 99, IssueType: "bug"}
out := formatPrettyIssue(iss)
if !strings.Contains(out, "bd-1") || !strings.Contains(out, "Hello") {
t.Fatalf("unexpected output: %q", out)
}
if !strings.Contains(out, "[BUG]") {
t.Fatalf("expected BUG badge: %q", out)
}
}
func TestListBuildIssueTree_ParentChildByDotID(t *testing.T) {
parent := &types.Issue{ID: "bd-1", Title: "Parent", Status: types.StatusOpen, Priority: 2, IssueType: types.TypeTask}
child := &types.Issue{ID: "bd-1.1", Title: "Child", Status: types.StatusOpen, Priority: 2, IssueType: types.TypeTask}
orphan := &types.Issue{ID: "bd-2.1", Title: "Orphan", Status: types.StatusOpen, Priority: 2, IssueType: types.TypeTask}
roots, children := buildIssueTree([]*types.Issue{child, parent, orphan})
if len(children["bd-1"]) != 1 || children["bd-1"][0].ID != "bd-1.1" {
t.Fatalf("expected bd-1 to have bd-1.1 child: %+v", children)
}
if len(roots) != 2 {
t.Fatalf("expected 2 roots (parent + orphan), got %d", len(roots))
}
}
func TestListSortIssues_ClosedNilLast(t *testing.T) {
t1 := time.Now().Add(-2 * time.Hour)
t2 := time.Now().Add(-1 * time.Hour)
closedOld := &types.Issue{ID: "bd-1", ClosedAt: &t1}
closedNew := &types.Issue{ID: "bd-2", ClosedAt: &t2}
open := &types.Issue{ID: "bd-3", ClosedAt: nil}
issues := []*types.Issue{open, closedOld, closedNew}
sortIssues(issues, "closed", false)
if issues[0].ID != "bd-2" || issues[1].ID != "bd-1" || issues[2].ID != "bd-3" {
t.Fatalf("unexpected order: %s, %s, %s", issues[0].ID, issues[1].ID, issues[2].ID)
}
}
func TestListDisplayPrettyList(t *testing.T) {
out := captureStdout(t, func() error {
displayPrettyList(nil, false)
return nil
})
if !strings.Contains(out, "No issues found") {
t.Fatalf("unexpected output: %q", out)
}
issues := []*types.Issue{
{ID: "bd-1", Title: "A", Status: types.StatusOpen, Priority: 2, IssueType: types.TypeTask},
{ID: "bd-2", Title: "B", Status: types.StatusInProgress, Priority: 1, IssueType: types.TypeFeature},
{ID: "bd-1.1", Title: "C", Status: types.StatusOpen, Priority: 2, IssueType: types.TypeTask},
}
out = captureStdout(t, func() error {
displayPrettyList(issues, false)
return nil
})
if !strings.Contains(out, "bd-1") || !strings.Contains(out, "bd-1.1") || !strings.Contains(out, "Total:") {
t.Fatalf("unexpected output: %q", out)
}
}

View File

@@ -2,7 +2,6 @@ package main
import (
"context"
"encoding/json"
"fmt"
"os"
"os/signal"
@@ -313,9 +312,6 @@ var rootCmd = &cobra.Command{
// Set up signal-aware context for graceful cancellation
rootCtx, rootCancel = signal.NotifyContext(context.Background(), os.Interrupt, syscall.SIGTERM)
// Signal Gas Town daemon about bd activity (best-effort, for exponential backoff)
defer signalGasTownActivity()
// Apply verbosity flags early (before any output)
debug.SetVerbose(verboseFlag)
debug.SetQuiet(quietFlag)
@@ -627,13 +623,6 @@ var rootCmd = &cobra.Command{
FallbackReason: FallbackNone,
}
// Doctor should always run in direct mode. It's specifically used to diagnose and
// repair daemon/DB issues, so attempting to connect to (or auto-start) a daemon
// can add noise and timeouts.
if cmd.Name() == "doctor" {
noDaemon = true
}
// Try to connect to daemon first (unless --no-daemon flag is set or worktree safety check fails)
if noDaemon {
daemonStatus.FallbackReason = FallbackFlagNoDaemon
@@ -875,9 +864,6 @@ var rootCmd = &cobra.Command{
debug.Logf("loaded %d molecules from %v", result.Loaded, result.Sources)
}
}
// Tips (including sync conflict proactive checks) are shown via maybeShowTip()
// after successful command execution, not in PreRun
},
PersistentPostRun: func(cmd *cobra.Command, args []string) {
// Handle --no-db mode: write memory storage back to JSONL
@@ -931,14 +917,8 @@ var rootCmd = &cobra.Command{
if store != nil {
_ = store.Close()
}
if profileFile != nil {
pprof.StopCPUProfile()
_ = profileFile.Close()
}
if traceFile != nil {
trace.Stop()
_ = traceFile.Close()
}
if profileFile != nil { pprof.StopCPUProfile(); _ = profileFile.Close() }
if traceFile != nil { trace.Stop(); _ = traceFile.Close() }
// Cancel the signal context to clean up resources
if rootCancel != nil {
@@ -951,80 +931,6 @@ var rootCmd = &cobra.Command{
// Configurable via config file or BEADS_FLUSH_DEBOUNCE env var (e.g., "500ms", "10s")
// Defaults to 5 seconds if not set or invalid
// signalGasTownActivity writes an activity signal for Gas Town daemon.
// This enables exponential backoff based on bd usage detection (gt-ws8ol).
// Best-effort: silent on any failure, never affects bd operation.
func signalGasTownActivity() {
// Determine town root
// Priority: GT_ROOT env > detect from cwd path > skip
townRoot := os.Getenv("GT_ROOT")
if townRoot == "" {
// Try to detect from cwd - if under ~/gt/, use that as town root
home, err := os.UserHomeDir()
if err != nil {
return
}
gtRoot := filepath.Join(home, "gt")
cwd, err := os.Getwd()
if err != nil {
return
}
if strings.HasPrefix(cwd, gtRoot+string(os.PathSeparator)) {
townRoot = gtRoot
}
}
if townRoot == "" {
return // Not in Gas Town, skip
}
// Ensure daemon directory exists
daemonDir := filepath.Join(townRoot, "daemon")
if err := os.MkdirAll(daemonDir, 0755); err != nil {
return
}
// Build command line from os.Args
cmdLine := strings.Join(os.Args, " ")
// Determine actor (use package-level var if set, else fall back to env)
actorName := actor
if actorName == "" {
if bdActor := os.Getenv("BD_ACTOR"); bdActor != "" {
actorName = bdActor
} else if user := os.Getenv("USER"); user != "" {
actorName = user
} else {
actorName = "unknown"
}
}
// Build activity signal
activity := struct {
LastCommand string `json:"last_command"`
Actor string `json:"actor"`
Timestamp string `json:"timestamp"`
}{
LastCommand: cmdLine,
Actor: actorName,
Timestamp: time.Now().UTC().Format(time.RFC3339),
}
data, err := json.Marshal(activity)
if err != nil {
return
}
// Write atomically (write to temp, rename)
activityPath := filepath.Join(daemonDir, "activity.json")
tmpPath := activityPath + ".tmp"
// nolint:gosec // G306: 0644 is appropriate for a status file
if err := os.WriteFile(tmpPath, data, 0644); err != nil {
return
}
_ = os.Rename(tmpPath, activityPath)
}
func main() {
if err := rootCmd.Execute(); err != nil {
os.Exit(1)

View File

@@ -18,9 +18,10 @@ import (
// - Compound: Result of bonding
//
// Usage:
// bd mol catalog # List available protos
// bd mol show <id> # Show proto/molecule structure
// bd mol pour <id> --var key=value # Instantiate proto → persistent mol
// bd mol wisp <id> --var key=value # Instantiate proto → ephemeral wisp
// bd pour <id> --var key=value # Instantiate proto → persistent mol
// bd wisp create <id> --var key=value # Instantiate proto → ephemeral wisp
// MoleculeLabel is the label used to identify molecules (templates)
// Molecules use the same label as templates - they ARE templates with workflow semantics
@@ -47,15 +48,14 @@ The molecule metaphor:
- Distilling extracts a proto from an ad-hoc epic
Commands:
show Show proto/molecule structure and variables
pour Instantiate proto as persistent mol (liquid phase)
wisp Instantiate proto as ephemeral wisp (vapor phase)
bond Polymorphic combine: proto+proto, proto+mol, mol+mol
squash Condense molecule to digest
burn Discard wisp
distill Extract proto from ad-hoc epic
catalog List available protos
show Show proto/molecule structure and variables
bond Polymorphic combine: proto+proto, proto+mol, mol+mol
distill Extract proto from ad-hoc epic
Use "bd formula list" to list available formulas.`,
See also:
bd pour <proto> # Instantiate as persistent mol (liquid phase)
bd wisp create <proto> # Instantiate as ephemeral wisp (vapor phase)`,
}
// =============================================================================
@@ -72,7 +72,7 @@ func spawnMolecule(ctx context.Context, s storage.Storage, subgraph *MoleculeSub
Vars: vars,
Assignee: assignee,
Actor: actorName,
Ephemeral: ephemeral,
Wisp: ephemeral,
Prefix: prefix,
}
return cloneSubgraph(ctx, s, subgraph, opts)

View File

@@ -40,12 +40,12 @@ Bond types:
Phase control:
By default, spawned protos follow the target's phase:
- Attaching to mol (Ephemeral=false) → spawns as persistent (Ephemeral=false)
- Attaching to ephemeral issue (Ephemeral=true) → spawns as ephemeral (Ephemeral=true)
- Attaching to mol (Wisp=false) → spawns as persistent (Wisp=false)
- Attaching to wisp (Wisp=true) → spawns as ephemeral (Wisp=true)
Override with:
--pour Force spawn as liquid (persistent, Ephemeral=false)
--ephemeral Force spawn as vapor (ephemeral, Ephemeral=true, excluded from JSONL export)
--pour Force spawn as liquid (persistent, Wisp=false)
--wisp Force spawn as vapor (ephemeral, Wisp=true, excluded from JSONL export)
Dynamic bonding (Christmas Ornament pattern):
Use --ref to specify a custom child reference with variable substitution.
@@ -57,7 +57,7 @@ Dynamic bonding (Christmas Ornament pattern):
Use cases:
- Found important bug during patrol? Use --pour to persist it
- Need ephemeral diagnostic on persistent feature? Use --ephemeral
- Need ephemeral diagnostic on persistent feature? Use --wisp
- Spawning per-worker arms on a patrol? Use --ref for readable IDs
Examples:
@@ -66,7 +66,7 @@ Examples:
bd mol bond mol-feature bd-abc123 # Attach proto to molecule
bd mol bond bd-abc123 bd-def456 # Join two molecules
bd mol bond mol-critical-bug wisp-patrol --pour # Persist found bug
bd mol bond mol-temp-check bd-feature --ephemeral # Ephemeral diagnostic
bd mol bond mol-temp-check bd-feature --wisp # Ephemeral diagnostic
bd mol bond mol-arm bd-patrol --ref arm-{{name}} --var name=ace # Dynamic child ID`,
Args: cobra.ExactArgs(2),
Run: runMolBond,
@@ -102,20 +102,20 @@ func runMolBond(cmd *cobra.Command, args []string) {
customTitle, _ := cmd.Flags().GetString("as")
dryRun, _ := cmd.Flags().GetBool("dry-run")
varFlags, _ := cmd.Flags().GetStringSlice("var")
ephemeral, _ := cmd.Flags().GetBool("ephemeral")
wisp, _ := cmd.Flags().GetBool("wisp")
pour, _ := cmd.Flags().GetBool("pour")
childRef, _ := cmd.Flags().GetString("ref")
// Validate phase flags are not both set
if ephemeral && pour {
fmt.Fprintf(os.Stderr, "Error: cannot use both --ephemeral and --pour\n")
if wisp && pour {
fmt.Fprintf(os.Stderr, "Error: cannot use both --wisp and --pour\n")
os.Exit(1)
}
// All issues go in the main store; ephemeral vs pour determines the Wisp flag
// --ephemeral: create with Ephemeral=true (ephemeral, excluded from JSONL export)
// --pour: create with Ephemeral=false (persistent, exported to JSONL)
// Default: follow target's phase (ephemeral if target is ephemeral, otherwise persistent)
// All issues go in the main store; wisp vs pour determines the Wisp flag
// --wisp: create with Wisp=true (ephemeral, excluded from JSONL export)
// --pour: create with Wisp=false (persistent, exported to JSONL)
// Default: follow target's phase (wisp if target is wisp, otherwise persistent)
// Validate bond type
if bondType != types.BondTypeSequential && bondType != types.BondTypeParallel && bondType != types.BondTypeConditional {
@@ -181,8 +181,8 @@ func runMolBond(cmd *cobra.Command, args []string) {
fmt.Printf(" B: %s (%s)\n", issueB.Title, operandType(bIsProto))
}
fmt.Printf(" Bond type: %s\n", bondType)
if ephemeral {
fmt.Printf(" Phase override: vapor (--ephemeral)\n")
if wisp {
fmt.Printf(" Phase override: vapor (--wisp)\n")
} else if pour {
fmt.Printf(" Phase override: liquid (--pour)\n")
}
@@ -240,16 +240,16 @@ func runMolBond(cmd *cobra.Command, args []string) {
case aIsProto && !bIsProto:
// Pass subgraph directly if cooked from formula
if cookedA {
result, err = bondProtoMolWithSubgraph(ctx, store, subgraphA, issueA, issueB, bondType, vars, childRef, actor, ephemeral, pour)
result, err = bondProtoMolWithSubgraph(ctx, store, subgraphA, issueA, issueB, bondType, vars, childRef, actor, wisp, pour)
} else {
result, err = bondProtoMol(ctx, store, issueA, issueB, bondType, vars, childRef, actor, ephemeral, pour)
result, err = bondProtoMol(ctx, store, issueA, issueB, bondType, vars, childRef, actor, wisp, pour)
}
case !aIsProto && bIsProto:
// Pass subgraph directly if cooked from formula
if cookedB {
result, err = bondProtoMolWithSubgraph(ctx, store, subgraphB, issueB, issueA, bondType, vars, childRef, actor, ephemeral, pour)
result, err = bondProtoMolWithSubgraph(ctx, store, subgraphB, issueB, issueA, bondType, vars, childRef, actor, wisp, pour)
} else {
result, err = bondMolProto(ctx, store, issueA, issueB, bondType, vars, childRef, actor, ephemeral, pour)
result, err = bondMolProto(ctx, store, issueA, issueB, bondType, vars, childRef, actor, wisp, pour)
}
default:
result, err = bondMolMol(ctx, store, issueA, issueB, bondType, actor)
@@ -273,10 +273,10 @@ func runMolBond(cmd *cobra.Command, args []string) {
if result.Spawned > 0 {
fmt.Printf(" Spawned: %d issues\n", result.Spawned)
}
if ephemeral {
fmt.Printf(" Phase: vapor (ephemeral, Ephemeral=true)\n")
if wisp {
fmt.Printf(" Phase: vapor (ephemeral, Wisp=true)\n")
} else if pour {
fmt.Printf(" Phase: liquid (persistent, Ephemeral=false)\n")
fmt.Printf(" Phase: liquid (persistent, Wisp=false)\n")
}
}
@@ -386,12 +386,12 @@ func bondProtoProto(ctx context.Context, s storage.Storage, protoA, protoB *type
// bondProtoMol bonds a proto to an existing molecule by spawning the proto.
// If childRef is provided, generates custom IDs like "parent.childref" (dynamic bonding).
// protoSubgraph can be nil if proto is from DB (will be loaded), or pre-loaded for formulas.
func bondProtoMol(ctx context.Context, s storage.Storage, proto, mol *types.Issue, bondType string, vars map[string]string, childRef string, actorName string, ephemeralFlag, pourFlag bool) (*BondResult, error) {
return bondProtoMolWithSubgraph(ctx, s, nil, proto, mol, bondType, vars, childRef, actorName, ephemeralFlag, pourFlag)
func bondProtoMol(ctx context.Context, s storage.Storage, proto, mol *types.Issue, bondType string, vars map[string]string, childRef string, actorName string, wispFlag, pourFlag bool) (*BondResult, error) {
return bondProtoMolWithSubgraph(ctx, s, nil, proto, mol, bondType, vars, childRef, actorName, wispFlag, pourFlag)
}
// bondProtoMolWithSubgraph is the internal implementation that accepts a pre-loaded subgraph.
func bondProtoMolWithSubgraph(ctx context.Context, s storage.Storage, protoSubgraph *TemplateSubgraph, proto, mol *types.Issue, bondType string, vars map[string]string, childRef string, actorName string, ephemeralFlag, pourFlag bool) (*BondResult, error) {
func bondProtoMolWithSubgraph(ctx context.Context, s storage.Storage, protoSubgraph *TemplateSubgraph, proto, mol *types.Issue, bondType string, vars map[string]string, childRef string, actorName string, wispFlag, pourFlag bool) (*BondResult, error) {
// Use provided subgraph or load from DB
subgraph := protoSubgraph
if subgraph == nil {
@@ -414,20 +414,20 @@ func bondProtoMolWithSubgraph(ctx context.Context, s storage.Storage, protoSubgr
return nil, fmt.Errorf("missing required variables: %s (use --var)", strings.Join(missingVars, ", "))
}
// Determine ephemeral flag based on explicit flags or target's phase
// --ephemeral: force ephemeral=true, --pour: force ephemeral=false, neither: follow target
makeEphemeral := mol.Ephemeral // Default: follow target's phase
if ephemeralFlag {
makeEphemeral = true
// Determine wisp flag based on explicit flags or target's phase
// --wisp: force wisp=true, --pour: force wisp=false, neither: follow target
makeWisp := mol.Wisp // Default: follow target's phase
if wispFlag {
makeWisp = true
} else if pourFlag {
makeEphemeral = false
makeWisp = false
}
// Build CloneOptions for spawning
opts := CloneOptions{
Vars: vars,
Actor: actorName,
Ephemeral: makeEphemeral,
Wisp: makeWisp,
}
// Dynamic bonding: use custom IDs if childRef is provided
@@ -482,9 +482,9 @@ func bondProtoMolWithSubgraph(ctx context.Context, s storage.Storage, protoSubgr
}
// bondMolProto bonds a molecule to a proto (symmetric with bondProtoMol)
func bondMolProto(ctx context.Context, s storage.Storage, mol, proto *types.Issue, bondType string, vars map[string]string, childRef string, actorName string, ephemeralFlag, pourFlag bool) (*BondResult, error) {
func bondMolProto(ctx context.Context, s storage.Storage, mol, proto *types.Issue, bondType string, vars map[string]string, childRef string, actorName string, wispFlag, pourFlag bool) (*BondResult, error) {
// Same as bondProtoMol but with arguments swapped
return bondProtoMol(ctx, s, proto, mol, bondType, vars, childRef, actorName, ephemeralFlag, pourFlag)
return bondProtoMol(ctx, s, proto, mol, bondType, vars, childRef, actorName, wispFlag, pourFlag)
}
// bondMolMol bonds two molecules together
@@ -630,8 +630,8 @@ func init() {
molBondCmd.Flags().String("as", "", "Custom title for compound proto (proto+proto only)")
molBondCmd.Flags().Bool("dry-run", false, "Preview what would be created")
molBondCmd.Flags().StringSlice("var", []string{}, "Variable substitution for spawned protos (key=value)")
molBondCmd.Flags().Bool("ephemeral", false, "Force spawn as vapor (ephemeral, Ephemeral=true)")
molBondCmd.Flags().Bool("pour", false, "Force spawn as liquid (persistent, Ephemeral=false)")
molBondCmd.Flags().Bool("wisp", false, "Force spawn as vapor (ephemeral, Wisp=true)")
molBondCmd.Flags().Bool("pour", false, "Force spawn as liquid (persistent, Wisp=false)")
molBondCmd.Flags().String("ref", "", "Custom child reference with {{var}} substitution (e.g., arm-{{polecat_name}})")
molCmd.AddCommand(molBondCmd)

View File

@@ -23,8 +23,8 @@ completely removes the wisp with no trace. Use this for:
- Test/debug wisps you don't want to preserve
The burn operation:
1. Verifies the molecule has Ephemeral=true (is ephemeral)
2. Deletes the molecule and all its ephemeral children
1. Verifies the molecule has Wisp=true (is ephemeral)
2. Deletes the molecule and all its wisp children
3. No digest is created (use 'bd mol squash' if you want a digest)
CAUTION: This is a destructive operation. The wisp's data will be
@@ -81,8 +81,8 @@ func runMolBurn(cmd *cobra.Command, args []string) {
}
// Verify it's a wisp
if !rootIssue.Ephemeral {
fmt.Fprintf(os.Stderr, "Error: molecule %s is not a wisp (Ephemeral=false)\n", resolvedID)
if !rootIssue.Wisp {
fmt.Fprintf(os.Stderr, "Error: molecule %s is not a wisp (Wisp=false)\n", resolvedID)
fmt.Fprintf(os.Stderr, "Hint: mol burn only works with wisp molecules\n")
fmt.Fprintf(os.Stderr, " Use 'bd delete' to remove non-wisp issues\n")
os.Exit(1)
@@ -98,7 +98,7 @@ func runMolBurn(cmd *cobra.Command, args []string) {
// Collect wisp issue IDs to delete (only delete wisps, not regular children)
var wispIDs []string
for _, issue := range subgraph.Issues {
if issue.Ephemeral {
if issue.Wisp {
wispIDs = append(wispIDs, issue.ID)
}
}
@@ -120,7 +120,7 @@ func runMolBurn(cmd *cobra.Command, args []string) {
fmt.Printf("Root: %s\n", subgraph.Root.Title)
fmt.Printf("\nWisp issues to delete (%d total):\n", len(wispIDs))
for _, issue := range subgraph.Issues {
if !issue.Ephemeral {
if !issue.Wisp {
continue
}
status := string(issue.Status)
@@ -166,7 +166,7 @@ func runMolBurn(cmd *cobra.Command, args []string) {
}
fmt.Printf("%s Burned wisp: %d issues deleted\n", ui.RenderPass("✓"), result.DeletedCount)
fmt.Printf(" Ephemeral: %s\n", resolvedID)
fmt.Printf(" Wisp: %s\n", resolvedID)
fmt.Printf(" No digest created.\n")
}

134
cmd/bd/mol_catalog.go Normal file
View File

@@ -0,0 +1,134 @@
package main
import (
"fmt"
"sort"
"strings"
"github.com/spf13/cobra"
"github.com/steveyegge/beads/internal/ui"
)
// CatalogEntry represents a formula in the catalog.
type CatalogEntry struct {
Name string `json:"name"`
Type string `json:"type"`
Description string `json:"description"`
Source string `json:"source"`
Steps int `json:"steps"`
Vars []string `json:"vars,omitempty"`
}
var molCatalogCmd = &cobra.Command{
Use: "catalog",
Aliases: []string{"list", "ls"},
Short: "List available molecule formulas",
Long: `List formulas available for bd pour / bd wisp create.
Formulas are ephemeral proto definitions stored as .formula.json files.
They are cooked inline when pouring, never stored as database beads.
Search paths (in priority order):
1. .beads/formulas/ (project-level)
2. ~/.beads/formulas/ (user-level)
3. ~/gt/.beads/formulas/ (Gas Town level)`,
Run: func(cmd *cobra.Command, args []string) {
typeFilter, _ := cmd.Flags().GetString("type")
// Get all search paths and scan for formulas
searchPaths := getFormulaSearchPaths()
seen := make(map[string]bool)
var entries []CatalogEntry
for _, dir := range searchPaths {
formulas, err := scanFormulaDir(dir)
if err != nil {
continue // Skip inaccessible directories
}
for _, f := range formulas {
if seen[f.Formula] {
continue // Skip shadowed formulas
}
seen[f.Formula] = true
// Apply type filter
if typeFilter != "" && string(f.Type) != typeFilter {
continue
}
// Extract variable names
var varNames []string
for name := range f.Vars {
varNames = append(varNames, name)
}
sort.Strings(varNames)
entries = append(entries, CatalogEntry{
Name: f.Formula,
Type: string(f.Type),
Description: truncateDescription(f.Description, 60),
Source: f.Source,
Steps: countSteps(f.Steps),
Vars: varNames,
})
}
}
// Sort by name
sort.Slice(entries, func(i, j int) bool {
return entries[i].Name < entries[j].Name
})
if jsonOutput {
outputJSON(entries)
return
}
if len(entries) == 0 {
fmt.Println("No formulas found.")
fmt.Println("\nTo create a formula, write a .formula.json file:")
fmt.Println(" .beads/formulas/my-workflow.formula.json")
fmt.Println("\nOr distill from existing work:")
fmt.Println(" bd mol distill <epic-id> my-workflow")
fmt.Println("\nTo instantiate from formula:")
fmt.Println(" bd pour <formula-name> --var key=value # persistent mol")
fmt.Println(" bd wisp create <formula-name> --var key=value # ephemeral wisp")
return
}
fmt.Printf("%s\n\n", ui.RenderPass("Formulas (for bd pour / bd wisp create):"))
// Group by type for display
byType := make(map[string][]CatalogEntry)
for _, e := range entries {
byType[e.Type] = append(byType[e.Type], e)
}
// Print workflow types first (most common for pour/wisp)
typeOrder := []string{"workflow", "expansion", "aspect"}
for _, t := range typeOrder {
typeEntries := byType[t]
if len(typeEntries) == 0 {
continue
}
typeIcon := getTypeIcon(t)
fmt.Printf("%s %s:\n", typeIcon, strings.Title(t))
for _, e := range typeEntries {
varInfo := ""
if len(e.Vars) > 0 {
varInfo = fmt.Sprintf(" (vars: %s)", strings.Join(e.Vars, ", "))
}
fmt.Printf(" %s: %s%s\n", ui.RenderAccent(e.Name), e.Description, varInfo)
}
fmt.Println()
}
},
}
func init() {
molCatalogCmd.Flags().String("type", "", "Filter by formula type (workflow, expansion, aspect)")
molCmd.AddCommand(molCatalogCmd)
}

View File

@@ -100,7 +100,7 @@ The output shows all steps with status indicators:
}
fmt.Println(".")
fmt.Println("\nTo start work on a molecule:")
fmt.Println(" bd mol pour <proto-id> # Instantiate a molecule from template")
fmt.Println(" bd pour <proto-id> # Instantiate a molecule from template")
fmt.Println(" bd update <step-id> --status in_progress # Claim a step")
return
}

View File

@@ -15,21 +15,22 @@ import (
)
var molDistillCmd = &cobra.Command{
Use: "distill <epic-id> [formula-name]",
Short: "Extract a formula from an existing epic",
Long: `Distill a molecule by extracting a reusable formula from an existing epic.
Use: "distill <id> [formula-name]",
Short: "Extract a formula from a mol, wisp, or epic",
Long: `Extract a reusable formula from completed work.
This is the reverse of pour: instead of formula → molecule, it's molecule → formula.
This is the reverse of pour: instead of formula → mol, it's mol → formula.
Works with any hierarchical work: mols, wisps, or plain epics.
The distill command:
1. Loads the existing epic and all its children
1. Loads the work item and all its children
2. Converts the structure to a .formula.json file
3. Replaces concrete values with {{variable}} placeholders (via --var flags)
Use cases:
- Team develops good workflow organically, wants to reuse it
- Capture tribal knowledge as executable templates
- Create starting point for similar future work
- Emergent patterns: structured work manually, want to templatize
- Modified execution: poured formula, added steps, want to capture
- Learning from success: extract what made a workflow succeed
Variable syntax (both work - we detect which side is the concrete value):
--var branch=feature-auth Spawn-style: variable=value (recommended)
@@ -40,8 +41,10 @@ Output locations (first writable wins):
2. ~/.beads/formulas/ (user-level, if project not writable)
Examples:
bd mol distill bd-o5xe my-workflow
bd mol distill bd-abc release-workflow --var feature_name=auth-refactor`,
bd mol distill bd-mol-xyz my-workflow
bd mol distill bd-wisp-abc patrol-template
bd mol distill bd-epic-123 release-workflow --var version=1.2.3
bd mol distill bd-xyz workflow -o ./formulas/`,
Args: cobra.RangeArgs(1, 2),
Run: runMolDistill,
}
@@ -102,14 +105,9 @@ func parseDistillVar(varFlag, searchableText string) (string, string, error) {
func runMolDistill(cmd *cobra.Command, args []string) {
ctx := rootCtx
// mol distill requires direct store access for reading the epic
if store == nil {
if daemonClient != nil {
fmt.Fprintf(os.Stderr, "Error: mol distill requires direct database access\n")
fmt.Fprintf(os.Stderr, "Hint: use --no-daemon flag: bd --no-daemon mol distill %s ...\n", args[0])
} else {
fmt.Fprintf(os.Stderr, "Error: no database connection\n")
}
// Check we have some database access
if store == nil && daemonClient == nil {
fmt.Fprintf(os.Stderr, "Error: no database connection\n")
os.Exit(1)
}
@@ -117,17 +115,23 @@ func runMolDistill(cmd *cobra.Command, args []string) {
dryRun, _ := cmd.Flags().GetBool("dry-run")
outputDir, _ := cmd.Flags().GetString("output")
// Resolve epic ID
epicID, err := utils.ResolvePartialID(ctx, store, args[0])
if err != nil {
fmt.Fprintf(os.Stderr, "Error: '%s' not found\n", args[0])
os.Exit(1)
// Load the subgraph (works with daemon or direct)
// Show/GetIssue handle partial ID resolution
var subgraph *TemplateSubgraph
var err error
if daemonClient != nil {
subgraph, err = loadTemplateSubgraphViaDaemon(daemonClient, args[0])
} else {
// Resolve ID for direct access
issueID, resolveErr := utils.ResolvePartialID(ctx, store, args[0])
if resolveErr != nil {
fmt.Fprintf(os.Stderr, "Error: '%s' not found\n", args[0])
os.Exit(1)
}
subgraph, err = loadTemplateSubgraph(ctx, store, issueID)
}
// Load the epic subgraph
subgraph, err := loadTemplateSubgraph(ctx, store, epicID)
if err != nil {
fmt.Fprintf(os.Stderr, "Error loading epic: %v\n", err)
fmt.Fprintf(os.Stderr, "Error loading issue: %v\n", err)
os.Exit(1)
}
@@ -172,7 +176,7 @@ func runMolDistill(cmd *cobra.Command, args []string) {
}
if dryRun {
fmt.Printf("\nDry run: would distill %d steps from %s into formula\n\n", countSteps(f.Steps), epicID)
fmt.Printf("\nDry run: would distill %d steps from %s into formula\n\n", countSteps(f.Steps), subgraph.Root.ID)
fmt.Printf("Formula: %s\n", formulaName)
fmt.Printf("Output: %s\n", outputPath)
if len(replacements) > 0 {
@@ -225,7 +229,7 @@ func runMolDistill(cmd *cobra.Command, args []string) {
fmt.Printf(" Variables: %s\n", strings.Join(result.Variables, ", "))
}
fmt.Printf("\nTo instantiate:\n")
fmt.Printf(" bd mol pour %s", result.FormulaName)
fmt.Printf(" bd pour %s", result.FormulaName)
for _, v := range result.Variables {
fmt.Printf(" --var %s=<value>", v)
}
@@ -254,9 +258,9 @@ func findWritableFormulaDir(formulaName string) string {
if err := os.MkdirAll(dir, 0755); err == nil {
// Check if we can write to it
testPath := filepath.Join(dir, ".write-test")
if f, err := os.Create(testPath); err == nil { //nolint:gosec // testPath is constructed from known search paths
_ = f.Close()
_ = os.Remove(testPath)
if f, err := os.Create(testPath); err == nil {
f.Close()
os.Remove(testPath)
return filepath.Join(dir, formulaName+formula.FormulaExt)
}
}
@@ -365,7 +369,7 @@ func subgraphToFormula(subgraph *TemplateSubgraph, name string, replacements map
func init() {
molDistillCmd.Flags().StringSlice("var", []string{}, "Replace value with {{variable}} placeholder (variable=value)")
molDistillCmd.Flags().Bool("dry-run", false, "Preview what would be created")
molDistillCmd.Flags().String("output", "", "Output directory for formula file")
molDistillCmd.Flags().StringP("output", "o", "", "Output directory for formula file")
molCmd.AddCommand(molDistillCmd)
}

View File

@@ -18,17 +18,17 @@ import (
var molSquashCmd = &cobra.Command{
Use: "squash <molecule-id>",
Short: "Compress molecule execution into a digest",
Long: `Squash a molecule's ephemeral children into a single digest issue.
Long: `Squash a molecule's wisp children into a single digest issue.
This command collects all ephemeral child issues of a molecule (Ephemeral=true),
This command collects all wisp child issues of a molecule (Wisp=true),
generates a summary digest, and promotes the wisps to persistent by
clearing their Wisp flag (or optionally deletes them).
The squash operation:
1. Loads the molecule and all its children
2. Filters to only wisps (ephemeral issues with Ephemeral=true)
2. Filters to only wisps (ephemeral issues with Wisp=true)
3. Generates a digest (summary of work done)
4. Creates a permanent digest issue (Ephemeral=false)
4. Creates a permanent digest issue (Wisp=false)
5. Clears Wisp flag on children (promotes to persistent)
OR deletes them with --delete-children
@@ -95,13 +95,13 @@ func runMolSquash(cmd *cobra.Command, args []string) {
os.Exit(1)
}
// Filter to only ephemeral children (exclude root)
// Filter to only wisp children (exclude root)
var wispChildren []*types.Issue
for _, issue := range subgraph.Issues {
if issue.ID == subgraph.Root.ID {
continue // Skip root
}
if issue.Ephemeral {
if issue.Wisp {
wispChildren = append(wispChildren, issue)
}
}
@@ -113,13 +113,13 @@ func runMolSquash(cmd *cobra.Command, args []string) {
SquashedCount: 0,
})
} else {
fmt.Printf("No ephemeral children found for molecule %s\n", moleculeID)
fmt.Printf("No wisp children found for molecule %s\n", moleculeID)
}
return
}
if dryRun {
fmt.Printf("\nDry run: would squash %d ephemeral children of %s\n\n", len(wispChildren), moleculeID)
fmt.Printf("\nDry run: would squash %d wisp children of %s\n\n", len(wispChildren), moleculeID)
fmt.Printf("Root: %s\n", subgraph.Root.Title)
fmt.Printf("\nWisp children to squash:\n")
for _, issue := range wispChildren {
@@ -247,7 +247,7 @@ func squashMolecule(ctx context.Context, s storage.Storage, root *types.Issue, c
CloseReason: fmt.Sprintf("Squashed from %d wisps", len(children)),
Priority: root.Priority,
IssueType: types.TypeTask,
Ephemeral: false, // Digest is permanent, not a wisp
Wisp: false, // Digest is permanent, not a wisp
ClosedAt: &now,
}
@@ -283,7 +283,7 @@ func squashMolecule(ctx context.Context, s storage.Storage, root *types.Issue, c
return nil, err
}
// Delete ephemeral children (outside transaction for better error handling)
// Delete wisp children (outside transaction for better error handling)
if !keepChildren {
deleted, err := deleteWispChildren(ctx, s, childIDs)
if err != nil {
@@ -319,7 +319,7 @@ func deleteWispChildren(ctx context.Context, s storage.Storage, ids []string) (i
func init() {
molSquashCmd.Flags().Bool("dry-run", false, "Preview what would be squashed")
molSquashCmd.Flags().Bool("keep-children", false, "Don't delete ephemeral children after squash")
molSquashCmd.Flags().Bool("keep-children", false, "Don't delete wisp children after squash")
molSquashCmd.Flags().String("summary", "", "Agent-provided summary (bypasses auto-generation)")
molCmd.AddCommand(molSquashCmd)

View File

@@ -136,7 +136,7 @@ func findStaleMolecules(ctx context.Context, s storage.Storage, blockingOnly, un
}
// Get blocked issues to find what each stale molecule is blocking
blockedIssues, err := s.GetBlockedIssues(ctx, types.WorkFilter{})
blockedIssues, err := s.GetBlockedIssues(ctx)
if err != nil {
return nil, fmt.Errorf("querying blocked issues: %w", err)
}

View File

@@ -489,7 +489,7 @@ func TestSquashMolecule(t *testing.T) {
Status: types.StatusClosed,
Priority: 2,
IssueType: types.TypeTask,
Ephemeral: true,
Wisp: true,
CloseReason: "Completed design",
}
child2 := &types.Issue{
@@ -498,7 +498,7 @@ func TestSquashMolecule(t *testing.T) {
Status: types.StatusClosed,
Priority: 2,
IssueType: types.TypeTask,
Ephemeral: true,
Wisp: true,
CloseReason: "Code merged",
}
@@ -547,7 +547,7 @@ func TestSquashMolecule(t *testing.T) {
if err != nil {
t.Fatalf("Failed to get digest: %v", err)
}
if digest.Ephemeral {
if digest.Wisp {
t.Error("Digest should NOT be ephemeral")
}
if digest.Status != types.StatusClosed {
@@ -595,7 +595,7 @@ func TestSquashMoleculeWithDelete(t *testing.T) {
Status: types.StatusClosed,
Priority: 2,
IssueType: types.TypeTask,
Ephemeral: true,
Wisp: true,
}
if err := s.CreateIssue(ctx, child, "test"); err != nil {
t.Fatalf("Failed to create child: %v", err)
@@ -705,7 +705,7 @@ func TestSquashMoleculeWithAgentSummary(t *testing.T) {
Status: types.StatusClosed,
Priority: 2,
IssueType: types.TypeTask,
Ephemeral: true,
Wisp: true,
CloseReason: "Done",
}
if err := s.CreateIssue(ctx, child, "test"); err != nil {
@@ -1304,14 +1304,14 @@ func TestWispFilteringFromExport(t *testing.T) {
Status: types.StatusOpen,
Priority: 1,
IssueType: types.TypeTask,
Ephemeral: false,
Wisp: false,
}
wispIssue := &types.Issue{
Title: "Wisp Issue",
Status: types.StatusOpen,
Priority: 2,
IssueType: types.TypeTask,
Ephemeral: true,
Wisp: true,
}
if err := s.CreateIssue(ctx, normalIssue, "test"); err != nil {
@@ -1333,7 +1333,7 @@ func TestWispFilteringFromExport(t *testing.T) {
// Filter wisp issues (simulating export behavior)
exportableIssues := make([]*types.Issue, 0)
for _, issue := range allIssues {
if !issue.Ephemeral {
if !issue.Wisp {
exportableIssues = append(exportableIssues, issue)
}
}
@@ -2364,166 +2364,3 @@ func TestCalculateBlockingDepths(t *testing.T) {
t.Errorf("step3 depth = %d, want 3", depths["step3"])
}
}
// TestSpawnMoleculeEphemeralFlag verifies that spawnMolecule with ephemeral=true
// creates issues with the Ephemeral flag set (bd-phin)
func TestSpawnMoleculeEphemeralFlag(t *testing.T) {
ctx := context.Background()
dbPath := t.TempDir() + "/test.db"
s, err := sqlite.New(ctx, dbPath)
if err != nil {
t.Fatalf("Failed to create store: %v", err)
}
defer s.Close()
if err := s.SetConfig(ctx, "issue_prefix", "test"); err != nil {
t.Fatalf("Failed to set config: %v", err)
}
// Create a template with a child (IDs will be auto-generated)
root := &types.Issue{
Title: "Template Epic",
Status: types.StatusOpen,
Priority: 2,
IssueType: types.TypeEpic,
Labels: []string{MoleculeLabel}, // Required for loadTemplateSubgraph
}
child := &types.Issue{
Title: "Template Task",
Status: types.StatusOpen,
Priority: 2,
IssueType: types.TypeTask,
}
if err := s.CreateIssue(ctx, root, "test"); err != nil {
t.Fatalf("Failed to create template root: %v", err)
}
if err := s.CreateIssue(ctx, child, "test"); err != nil {
t.Fatalf("Failed to create template child: %v", err)
}
// Add parent-child dependency
if err := s.AddDependency(ctx, &types.Dependency{
IssueID: child.ID,
DependsOnID: root.ID,
Type: types.DepParentChild,
}, "test"); err != nil {
t.Fatalf("Failed to add parent-child dependency: %v", err)
}
// Load subgraph
subgraph, err := loadTemplateSubgraph(ctx, s, root.ID)
if err != nil {
t.Fatalf("Failed to load subgraph: %v", err)
}
// Spawn with ephemeral=true
result, err := spawnMolecule(ctx, s, subgraph, nil, "", "test", true, "eph")
if err != nil {
t.Fatalf("spawnMolecule failed: %v", err)
}
// Verify all spawned issues have Ephemeral=true
for oldID, newID := range result.IDMapping {
spawned, err := s.GetIssue(ctx, newID)
if err != nil {
t.Fatalf("Failed to get spawned issue %s: %v", newID, err)
}
if !spawned.Ephemeral {
t.Errorf("Spawned issue %s (from %s) should have Ephemeral=true, got false", newID, oldID)
}
}
// Verify spawned issues have the correct prefix
for _, newID := range result.IDMapping {
if !strings.HasPrefix(newID, "test-eph-") {
t.Errorf("Spawned issue ID %s should have prefix 'test-eph-'", newID)
}
}
}
// TestSpawnMoleculeFromFormulaEphemeral verifies that spawning from a cooked formula
// with ephemeral=true creates issues with the Ephemeral flag set (bd-phin)
func TestSpawnMoleculeFromFormulaEphemeral(t *testing.T) {
ctx := context.Background()
dbPath := t.TempDir() + "/test.db"
s, err := sqlite.New(ctx, dbPath)
if err != nil {
t.Fatalf("Failed to create store: %v", err)
}
defer s.Close()
if err := s.SetConfig(ctx, "issue_prefix", "test"); err != nil {
t.Fatalf("Failed to set config: %v", err)
}
// Create a minimal in-memory subgraph (simulating cookFormulaToSubgraph output)
root := &types.Issue{
ID: "test-formula",
Title: "Test Formula",
Status: types.StatusOpen,
Priority: 2,
IssueType: types.TypeEpic,
IsTemplate: true,
}
step := &types.Issue{
ID: "test-formula.step1",
Title: "Step 1",
Status: types.StatusOpen,
Priority: 2,
IssueType: types.TypeTask,
IsTemplate: true,
}
subgraph := &TemplateSubgraph{
Root: root,
Issues: []*types.Issue{root, step},
Dependencies: []*types.Dependency{
{
IssueID: step.ID,
DependsOnID: root.ID,
Type: types.DepParentChild,
},
},
IssueMap: map[string]*types.Issue{
root.ID: root,
step.ID: step,
},
}
// Spawn with ephemeral=true (simulating bd mol wisp <formula>)
result, err := spawnMolecule(ctx, s, subgraph, nil, "", "test", true, "eph")
if err != nil {
t.Fatalf("spawnMolecule failed: %v", err)
}
// Verify all spawned issues have Ephemeral=true
for oldID, newID := range result.IDMapping {
spawned, err := s.GetIssue(ctx, newID)
if err != nil {
t.Fatalf("Failed to get spawned issue %s: %v", newID, err)
}
if !spawned.Ephemeral {
t.Errorf("Spawned issue %s (from %s) should have Ephemeral=true, got false", newID, oldID)
}
t.Logf("Issue %s: Ephemeral=%v", newID, spawned.Ephemeral)
}
// Verify they have the correct prefix
for _, newID := range result.IDMapping {
if !strings.HasPrefix(newID, "test-eph-") {
t.Errorf("Spawned issue ID %s should have prefix 'test-eph-'", newID)
}
}
// Verify ephemeral issues are excluded from ready work
readyWork, err := s.GetReadyWork(ctx, types.WorkFilter{})
if err != nil {
t.Fatalf("GetReadyWork failed: %v", err)
}
for _, issue := range readyWork {
for _, spawnedID := range result.IDMapping {
if issue.ID == spawnedID {
t.Errorf("Ephemeral issue %s should not appear in ready work", spawnedID)
}
}
}
}

View File

@@ -72,11 +72,8 @@ func initializeNoDbMode() error {
debug.Logf("using prefix '%s'", prefix)
// Set global store and mark as active (fixes bd comment --no-db)
storeMutex.Lock()
// Set global store
store = memStore
storeActive = true
storeMutex.Unlock()
return nil
}
@@ -221,7 +218,7 @@ func writeIssuesToJSONL(memStore *memory.MemoryStorage, beadsDir string) error {
// Wisps exist only in SQLite and are shared via .beads/redirect, not JSONL.
filtered := make([]*types.Issue, 0, len(issues))
for _, issue := range issues {
if !issue.Ephemeral {
if !issue.Wisp {
filtered = append(filtered, issue)
}
}

View File

@@ -158,90 +158,6 @@ func TestDetectPrefix(t *testing.T) {
})
}
func TestInitializeNoDbMode_SetsStoreActive(t *testing.T) {
// This test verifies the fix for bd comment --no-db not working.
// The bug was that initializeNoDbMode() set `store` but not `storeActive`,
// so ensureStoreActive() would try to find a SQLite database.
tempDir := t.TempDir()
beadsDir := filepath.Join(tempDir, ".beads")
if err := os.MkdirAll(beadsDir, 0o755); err != nil {
t.Fatalf("Failed to create .beads dir: %v", err)
}
// Create a minimal JSONL file with one issue
jsonlPath := filepath.Join(beadsDir, "issues.jsonl")
content := `{"id":"bd-1","title":"Test Issue","status":"open"}
`
if err := os.WriteFile(jsonlPath, []byte(content), 0o600); err != nil {
t.Fatalf("Failed to write JSONL: %v", err)
}
// Save and restore global state
oldStore := store
oldStoreActive := storeActive
oldCwd, _ := os.Getwd()
defer func() {
storeMutex.Lock()
store = oldStore
storeActive = oldStoreActive
storeMutex.Unlock()
_ = os.Chdir(oldCwd)
}()
// Change to temp dir so initializeNoDbMode finds .beads
if err := os.Chdir(tempDir); err != nil {
t.Fatalf("Failed to chdir: %v", err)
}
// Reset global state
storeMutex.Lock()
store = nil
storeActive = false
storeMutex.Unlock()
// Initialize no-db mode
if err := initializeNoDbMode(); err != nil {
t.Fatalf("initializeNoDbMode failed: %v", err)
}
// Verify storeActive is now true
storeMutex.Lock()
active := storeActive
s := store
storeMutex.Unlock()
if !active {
t.Error("storeActive should be true after initializeNoDbMode")
}
if s == nil {
t.Fatal("store should not be nil after initializeNoDbMode")
}
// ensureStoreActive should now return immediately without error
if err := ensureStoreActive(); err != nil {
t.Errorf("ensureStoreActive should succeed after initializeNoDbMode: %v", err)
}
// Verify comments work (this was the failing case)
ctx := rootCtx
comment, err := s.AddIssueComment(ctx, "bd-1", "testuser", "Test comment")
if err != nil {
t.Fatalf("AddIssueComment failed: %v", err)
}
if comment.Text != "Test comment" {
t.Errorf("Expected 'Test comment', got %s", comment.Text)
}
comments, err := s.GetIssueComments(ctx, "bd-1")
if err != nil {
t.Fatalf("GetIssueComments failed: %v", err)
}
if len(comments) != 1 {
t.Errorf("Expected 1 comment, got %d", len(comments))
}
}
func TestWriteIssuesToJSONL(t *testing.T) {
tempDir := t.TempDir()
beadsDir := filepath.Join(tempDir, ".beads")

View File

@@ -32,9 +32,9 @@ Use pour for:
- Anything you might need to reference later
Examples:
bd mol pour mol-feature --var name=auth # Create persistent mol from proto
bd mol pour mol-release --var version=1.0 # Release workflow
bd mol pour mol-review --var pr=123 # Code review workflow`,
bd pour mol-feature --var name=auth # Create persistent mol from proto
bd pour mol-release --var version=1.0 # Release workflow
bd pour mol-review --var pr=123 # Code review workflow`,
Args: cobra.ExactArgs(1),
Run: runPour,
}
@@ -260,5 +260,5 @@ func init() {
pourCmd.Flags().StringSlice("attach", []string{}, "Proto to attach after spawning (repeatable)")
pourCmd.Flags().String("attach-type", types.BondTypeSequential, "Bond type for attachments: sequential, parallel, or conditional")
molCmd.AddCommand(pourCmd)
rootCmd.AddCommand(pourCmd)
}

View File

@@ -39,7 +39,6 @@ This is useful for agents executing molecules to see which steps can run next.`,
labels, _ := cmd.Flags().GetStringSlice("label")
labelsAny, _ := cmd.Flags().GetStringSlice("label-any")
issueType, _ := cmd.Flags().GetString("type")
parentID, _ := cmd.Flags().GetString("parent")
// Use global jsonOutput set by PersistentPreRun (respects config.yaml + env vars)
// Normalize labels: trim, dedupe, remove empty
@@ -70,9 +69,6 @@ This is useful for agents executing molecules to see which steps can run next.`,
if assignee != "" && !unassigned {
filter.Assignee = &assignee
}
if parentID != "" {
filter.ParentID = &parentID
}
// Validate sort policy
if !filter.SortPolicy.IsValid() {
fmt.Fprintf(os.Stderr, "Error: invalid sort policy '%s'. Valid values: hybrid, priority, oldest\n", sortPolicy)
@@ -88,7 +84,6 @@ This is useful for agents executing molecules to see which steps can run next.`,
SortPolicy: sortPolicy,
Labels: labels,
LabelsAny: labelsAny,
ParentID: parentID,
}
if cmd.Flags().Changed("priority") {
priority, _ := cmd.Flags().GetInt("priority")
@@ -234,17 +229,12 @@ var blockedCmd = &cobra.Command{
var err error
store, err = sqlite.New(ctx, dbPath)
if err != nil {
fmt.Fprintf(os.Stderr, "Error: failed to open database: %v\n", err)
os.Exit(1)
fmt.Fprintf(os.Stderr, "Error: failed to open database: %v\n", err)
os.Exit(1)
}
defer func() { _ = store.Close() }()
}
parentID, _ := cmd.Flags().GetString("parent")
var blockedFilter types.WorkFilter
if parentID != "" {
blockedFilter.ParentID = &parentID
}
blocked, err := store.GetBlockedIssues(ctx, blockedFilter)
}
blocked, err := store.GetBlockedIssues(ctx)
if err != nil {
fmt.Fprintf(os.Stderr, "Error: %v\n", err)
os.Exit(1)
@@ -420,8 +410,6 @@ func init() {
readyCmd.Flags().StringSlice("label-any", []string{}, "Filter by labels (OR: must have AT LEAST ONE). Can combine with --label")
readyCmd.Flags().StringP("type", "t", "", "Filter by issue type (task, bug, feature, epic, merge-request)")
readyCmd.Flags().String("mol", "", "Filter to steps within a specific molecule")
readyCmd.Flags().String("parent", "", "Filter to descendants of this bead/epic")
rootCmd.AddCommand(readyCmd)
blockedCmd.Flags().String("parent", "", "Filter to descendants of this bead/epic")
rootCmd.AddCommand(blockedCmd)
}

View File

@@ -9,7 +9,6 @@ import (
"runtime"
"testing"
"github.com/steveyegge/beads/internal/git"
"github.com/steveyegge/beads/internal/storage/sqlite"
"github.com/steveyegge/beads/internal/types"
)
@@ -92,11 +91,6 @@ func testFreshCloneAutoImport(t *testing.T) {
// Test checkGitForIssues detects issues.jsonl
t.Chdir(dir)
git.ResetCaches() // Reset git caches after changing directory
git.ResetCaches()
count, path, gitRef := checkGitForIssues()
if count != 1 {
t.Errorf("Expected 1 issue in git, got %d", count)
@@ -176,11 +170,6 @@ func testDatabaseRemovalScenario(t *testing.T) {
// Change to test directory
t.Chdir(dir)
git.ResetCaches() // Reset git caches after changing directory
git.ResetCaches()
// Test checkGitForIssues finds issues.jsonl (canonical name)
count, path, gitRef := checkGitForIssues()
if count != 2 {
@@ -259,11 +248,6 @@ func testLegacyFilenameSupport(t *testing.T) {
// Change to test directory
t.Chdir(dir)
git.ResetCaches() // Reset git caches after changing directory
git.ResetCaches()
// Test checkGitForIssues finds issues.jsonl
count, path, gitRef := checkGitForIssues()
if count != 1 {
@@ -340,11 +324,6 @@ func testPrecedenceTest(t *testing.T) {
// Change to test directory
t.Chdir(dir)
git.ResetCaches() // Reset git caches after changing directory
git.ResetCaches()
// Test checkGitForIssues prefers issues.jsonl
count, path, _ := checkGitForIssues()
if count != 2 {
@@ -391,11 +370,6 @@ func testInitSafetyCheck(t *testing.T) {
// Change to test directory
t.Chdir(dir)
git.ResetCaches() // Reset git caches after changing directory
git.ResetCaches()
// Create empty database (simulating failed import)
dbPath := filepath.Join(beadsDir, "test.db")
store, err := sqlite.New(context.Background(), dbPath)
@@ -435,14 +409,8 @@ func testInitSafetyCheck(t *testing.T) {
// Helper functions
// runCmd runs a command and fails the test if it returns an error
// If the command is "git init", it automatically adds --initial-branch=main
// for modern git compatibility.
func runCmd(t *testing.T, dir string, name string, args ...string) {
t.Helper()
// Add --initial-branch=main to git init for modern git compatibility
if name == "git" && len(args) > 0 && args[0] == "init" {
args = append(args, "--initial-branch=main")
}
cmd := exec.Command(name, args...)
cmd.Dir = dir
if output, err := cmd.CombinedOutput(); err != nil {

View File

@@ -204,7 +204,7 @@ func TestRelateCommand(t *testing.T) {
}
// Issue1 should NOT be blocked (relates-to doesn't block)
blocked, err := s.GetBlockedIssues(ctx, types.WorkFilter{})
blocked, err := s.GetBlockedIssues(ctx)
if err != nil {
t.Fatalf("GetBlockedIssues failed: %v", err)
}

View File

@@ -1,183 +0,0 @@
package main
import (
"context"
"path/filepath"
"github.com/steveyegge/beads/internal/routing"
"github.com/steveyegge/beads/internal/storage"
"github.com/steveyegge/beads/internal/types"
"github.com/steveyegge/beads/internal/utils"
)
// RoutedResult contains the result of a routed issue lookup
type RoutedResult struct {
Issue *types.Issue
Store storage.Storage // The store that contains this issue (may be routed)
Routed bool // true if the issue was found via routing
ResolvedID string // The resolved (full) issue ID
closeFn func() // Function to close routed storage (if any)
}
// Close closes any routed storage. Safe to call if Routed is false.
func (r *RoutedResult) Close() {
if r.closeFn != nil {
r.closeFn()
}
}
// resolveAndGetIssueWithRouting resolves a partial ID and gets the issue,
// using routes.jsonl for prefix-based routing if needed.
// This enables cross-repo issue lookups (e.g., `bd show gt-xyz` from ~/gt).
//
// The resolution happens in the correct store based on the ID prefix.
// Returns a RoutedResult containing the issue, resolved ID, and the store to use.
// The caller MUST call result.Close() when done to release any routed storage.
func resolveAndGetIssueWithRouting(ctx context.Context, localStore storage.Storage, id string) (*RoutedResult, error) {
// Step 1: Check if routing is needed based on ID prefix
if dbPath == "" {
// No routing without a database path - use local store
return resolveAndGetFromStore(ctx, localStore, id, false)
}
beadsDir := filepath.Dir(dbPath)
routedStorage, err := routing.GetRoutedStorageForID(ctx, id, beadsDir)
if err != nil {
return nil, err
}
if routedStorage != nil {
// Step 2: Resolve and get from routed store
result, err := resolveAndGetFromStore(ctx, routedStorage.Storage, id, true)
if err != nil {
_ = routedStorage.Close()
return nil, err
}
if result != nil {
result.closeFn = func() { _ = routedStorage.Close() }
return result, nil
}
_ = routedStorage.Close()
}
// Step 3: Fall back to local store
return resolveAndGetFromStore(ctx, localStore, id, false)
}
// resolveAndGetFromStore resolves a partial ID and gets the issue from a specific store.
func resolveAndGetFromStore(ctx context.Context, s storage.Storage, id string, routed bool) (*RoutedResult, error) {
// First, resolve the partial ID
resolvedID, err := utils.ResolvePartialID(ctx, s, id)
if err != nil {
return nil, err
}
// Then get the issue
issue, err := s.GetIssue(ctx, resolvedID)
if err != nil {
return nil, err
}
if issue == nil {
return nil, nil
}
return &RoutedResult{
Issue: issue,
Store: s,
Routed: routed,
ResolvedID: resolvedID,
}, nil
}
// getIssueWithRouting tries to get an issue from the local store first,
// then falls back to checking routes.jsonl for prefix-based routing.
// This enables cross-repo issue lookups (e.g., `bd show gt-xyz` from ~/gt).
//
// Returns a RoutedResult containing the issue and the store to use for related queries.
// The caller MUST call result.Close() when done to release any routed storage.
func getIssueWithRouting(ctx context.Context, localStore storage.Storage, id string) (*RoutedResult, error) {
// Step 1: Try local store first (current behavior)
issue, err := localStore.GetIssue(ctx, id)
if err == nil && issue != nil {
return &RoutedResult{
Issue: issue,
Store: localStore,
Routed: false,
ResolvedID: id,
}, nil
}
// Step 2: Check routes.jsonl for prefix-based routing
if dbPath == "" {
// No routing without a database path - return original result
return &RoutedResult{
Issue: issue,
Store: localStore,
Routed: false,
ResolvedID: id,
}, err
}
beadsDir := filepath.Dir(dbPath)
routedStorage, routeErr := routing.GetRoutedStorageForID(ctx, id, beadsDir)
if routeErr != nil || routedStorage == nil {
// No routing found or error - return original result
return &RoutedResult{
Issue: issue,
Store: localStore,
Routed: false,
ResolvedID: id,
}, err
}
// Step 3: Try the routed storage
routedIssue, routedErr := routedStorage.Storage.GetIssue(ctx, id)
if routedErr != nil || routedIssue == nil {
_ = routedStorage.Close()
// Return the original error if routing also failed
if err != nil {
return nil, err
}
return nil, routedErr
}
// Return the issue with the routed store
return &RoutedResult{
Issue: routedIssue,
Store: routedStorage.Storage,
Routed: true,
ResolvedID: id,
closeFn: func() {
_ = routedStorage.Close()
},
}, nil
}
// getRoutedStoreForID returns a storage connection for an issue ID if routing is needed.
// Returns nil if no routing is needed (issue should be in local store).
// The caller is responsible for closing the returned storage.
func getRoutedStoreForID(ctx context.Context, id string) (*routing.RoutedStorage, error) {
if dbPath == "" {
return nil, nil
}
beadsDir := filepath.Dir(dbPath)
return routing.GetRoutedStorageForID(ctx, id, beadsDir)
}
// needsRouting checks if an ID would be routed to a different beads directory.
// This is used to decide whether to bypass the daemon for cross-repo lookups.
func needsRouting(id string) bool {
if dbPath == "" {
return false
}
beadsDir := filepath.Dir(dbPath)
targetDir, routed, err := routing.ResolveBeadsDirForID(context.Background(), id, beadsDir)
if err != nil || !routed {
return false
}
// Check if the routed directory is different from the current one
return targetDir != beadsDir
}

View File

@@ -37,17 +37,11 @@ var showCmd = &cobra.Command{
}
}
// Resolve partial IDs first (daemon mode only - direct mode uses routed resolution)
// Resolve partial IDs first
var resolvedIDs []string
var routedArgs []string // IDs that need cross-repo routing (bypass daemon)
if daemonClient != nil {
// In daemon mode, resolve via RPC - but check routing first
// In daemon mode, resolve via RPC
for _, id := range args {
// Check if this ID needs routing to a different beads directory
if needsRouting(id) {
routedArgs = append(routedArgs, id)
continue
}
resolveArgs := &rpc.ResolveIDArgs{ID: id}
resp, err := daemonClient.ResolveID(resolveArgs)
if err != nil {
@@ -59,87 +53,25 @@ var showCmd = &cobra.Command{
}
resolvedIDs = append(resolvedIDs, resolvedID)
}
} else {
// In direct mode, resolve via storage
var err error
resolvedIDs, err = utils.ResolvePartialIDs(ctx, store, args)
if err != nil {
FatalErrorRespectJSON("%v", err)
}
}
// Note: Direct mode uses resolveAndGetIssueWithRouting for prefix-based routing
// Handle --thread flag: show full conversation thread
if showThread {
if daemonClient != nil && len(resolvedIDs) > 0 {
showMessageThread(ctx, resolvedIDs[0], jsonOutput)
return
} else if len(args) > 0 {
// Direct mode - resolve first arg with routing
result, err := resolveAndGetIssueWithRouting(ctx, store, args[0])
if result != nil {
defer result.Close()
}
if err == nil && result != nil && result.ResolvedID != "" {
showMessageThread(ctx, result.ResolvedID, jsonOutput)
return
}
}
if showThread && len(resolvedIDs) > 0 {
showMessageThread(ctx, resolvedIDs[0], jsonOutput)
return
}
// If daemon is running, use RPC (but fall back to direct mode for routed IDs)
// If daemon is running, use RPC
if daemonClient != nil {
allDetails := []interface{}{}
displayIdx := 0
// First, handle routed IDs via direct mode
for _, id := range routedArgs {
result, err := resolveAndGetIssueWithRouting(ctx, store, id)
if err != nil {
if result != nil {
result.Close()
}
fmt.Fprintf(os.Stderr, "Error fetching %s: %v\n", id, err)
continue
}
if result == nil || result.Issue == nil {
if result != nil {
result.Close()
}
fmt.Fprintf(os.Stderr, "Issue %s not found\n", id)
continue
}
issue := result.Issue
issueStore := result.Store
if jsonOutput {
// Get labels and deps for JSON output
type IssueDetails struct {
*types.Issue
Labels []string `json:"labels,omitempty"`
Dependencies []*types.IssueWithDependencyMetadata `json:"dependencies,omitempty"`
Dependents []*types.IssueWithDependencyMetadata `json:"dependents,omitempty"`
Comments []*types.Comment `json:"comments,omitempty"`
}
details := &IssueDetails{Issue: issue}
details.Labels, _ = issueStore.GetLabels(ctx, issue.ID)
if sqliteStore, ok := issueStore.(*sqlite.SQLiteStorage); ok {
details.Dependencies, _ = sqliteStore.GetDependenciesWithMetadata(ctx, issue.ID)
details.Dependents, _ = sqliteStore.GetDependentsWithMetadata(ctx, issue.ID)
}
details.Comments, _ = issueStore.GetIssueComments(ctx, issue.ID)
allDetails = append(allDetails, details)
} else {
if displayIdx > 0 {
fmt.Println("\n" + strings.Repeat("─", 60))
}
fmt.Printf("\n%s: %s\n", ui.RenderAccent(issue.ID), issue.Title)
fmt.Printf("Status: %s\n", issue.Status)
fmt.Printf("Priority: P%d\n", issue.Priority)
fmt.Printf("Type: %s\n", issue.IssueType)
if issue.Description != "" {
fmt.Printf("\nDescription:\n%s\n", issue.Description)
}
fmt.Println()
displayIdx++
}
result.Close() // Close immediately after processing each routed ID
}
// Then, handle local IDs via daemon
for _, id := range resolvedIDs {
for idx, id := range resolvedIDs {
showArgs := &rpc.ShowArgs{ID: id}
resp, err := daemonClient.Show(showArgs)
if err != nil {
@@ -153,7 +85,6 @@ var showCmd = &cobra.Command{
Labels []string `json:"labels,omitempty"`
Dependencies []*types.IssueWithDependencyMetadata `json:"dependencies,omitempty"`
Dependents []*types.IssueWithDependencyMetadata `json:"dependents,omitempty"`
Comments []*types.Comment `json:"comments,omitempty"`
}
var details IssueDetails
if err := json.Unmarshal(resp.Data, &details); err == nil {
@@ -165,10 +96,9 @@ var showCmd = &cobra.Command{
fmt.Fprintf(os.Stderr, "Issue %s not found\n", id)
continue
}
if displayIdx > 0 {
if idx > 0 {
fmt.Println("\n" + strings.Repeat("─", 60))
}
displayIdx++
// Parse response and use existing formatting code
type IssueDetails struct {
@@ -176,7 +106,6 @@ var showCmd = &cobra.Command{
Labels []string `json:"labels,omitempty"`
Dependencies []*types.IssueWithDependencyMetadata `json:"dependencies,omitempty"`
Dependents []*types.IssueWithDependencyMetadata `json:"dependents,omitempty"`
Comments []*types.Comment `json:"comments,omitempty"`
}
var details IssueDetails
if err := json.Unmarshal(resp.Data, &details); err != nil {
@@ -211,9 +140,6 @@ var showCmd = &cobra.Command{
fmt.Printf("Estimated: %d minutes\n", *issue.EstimatedMinutes)
}
fmt.Printf("Created: %s\n", issue.CreatedAt.Format("2006-01-02 15:04"))
if issue.CreatedBy != "" {
fmt.Printf("Created by: %s\n", issue.CreatedBy)
}
fmt.Printf("Updated: %s\n", issue.UpdatedAt.Format("2006-01-02 15:04"))
// Show compaction status
@@ -307,17 +233,6 @@ var showCmd = &cobra.Command{
}
}
if len(details.Comments) > 0 {
fmt.Printf("\nComments (%d):\n", len(details.Comments))
for _, comment := range details.Comments {
fmt.Printf(" [%s] %s\n", comment.Author, comment.CreatedAt.Format("2006-01-02 15:04"))
commentLines := strings.Split(comment.Text, "\n")
for _, line := range commentLines {
fmt.Printf(" %s\n", line)
}
}
}
fmt.Println()
}
}
@@ -328,28 +243,18 @@ var showCmd = &cobra.Command{
return
}
// Direct mode - use routed resolution for cross-repo lookups
// Direct mode
allDetails := []interface{}{}
for idx, id := range args {
// Resolve and get issue with routing (e.g., gt-xyz routes to gastown)
result, err := resolveAndGetIssueWithRouting(ctx, store, id)
for idx, id := range resolvedIDs {
issue, err := store.GetIssue(ctx, id)
if err != nil {
if result != nil {
result.Close()
}
fmt.Fprintf(os.Stderr, "Error fetching %s: %v\n", id, err)
continue
}
if result == nil || result.Issue == nil {
if result != nil {
result.Close()
}
if issue == nil {
fmt.Fprintf(os.Stderr, "Issue %s not found\n", id)
continue
}
issue := result.Issue
issueStore := result.Store // Use the store that contains this issue
// Note: result.Close() called at end of loop iteration
if jsonOutput {
// Include labels, dependencies (with metadata), dependents (with metadata), and comments in JSON output
@@ -361,27 +266,26 @@ var showCmd = &cobra.Command{
Comments []*types.Comment `json:"comments,omitempty"`
}
details := &IssueDetails{Issue: issue}
details.Labels, _ = issueStore.GetLabels(ctx, issue.ID)
details.Labels, _ = store.GetLabels(ctx, issue.ID)
// Get dependencies with metadata (dependency_type field)
if sqliteStore, ok := issueStore.(*sqlite.SQLiteStorage); ok {
if sqliteStore, ok := store.(*sqlite.SQLiteStorage); ok {
details.Dependencies, _ = sqliteStore.GetDependenciesWithMetadata(ctx, issue.ID)
details.Dependents, _ = sqliteStore.GetDependentsWithMetadata(ctx, issue.ID)
} else {
// Fallback to regular methods without metadata for other storage backends
deps, _ := issueStore.GetDependencies(ctx, issue.ID)
deps, _ := store.GetDependencies(ctx, issue.ID)
for _, dep := range deps {
details.Dependencies = append(details.Dependencies, &types.IssueWithDependencyMetadata{Issue: *dep})
}
dependents, _ := issueStore.GetDependents(ctx, issue.ID)
dependents, _ := store.GetDependents(ctx, issue.ID)
for _, dependent := range dependents {
details.Dependents = append(details.Dependents, &types.IssueWithDependencyMetadata{Issue: *dependent})
}
}
details.Comments, _ = issueStore.GetIssueComments(ctx, issue.ID)
details.Comments, _ = store.GetIssueComments(ctx, issue.ID)
allDetails = append(allDetails, details)
result.Close() // Close before continuing to next iteration
continue
}
@@ -415,9 +319,6 @@ var showCmd = &cobra.Command{
fmt.Printf("Estimated: %d minutes\n", *issue.EstimatedMinutes)
}
fmt.Printf("Created: %s\n", issue.CreatedAt.Format("2006-01-02 15:04"))
if issue.CreatedBy != "" {
fmt.Printf("Created by: %s\n", issue.CreatedBy)
}
fmt.Printf("Updated: %s\n", issue.UpdatedAt.Format("2006-01-02 15:04"))
// Show compaction status footer
@@ -459,13 +360,13 @@ var showCmd = &cobra.Command{
}
// Show labels
labels, _ := issueStore.GetLabels(ctx, issue.ID)
labels, _ := store.GetLabels(ctx, issue.ID)
if len(labels) > 0 {
fmt.Printf("\nLabels: %v\n", labels)
}
// Show dependencies
deps, _ := issueStore.GetDependencies(ctx, issue.ID)
deps, _ := store.GetDependencies(ctx, issue.ID)
if len(deps) > 0 {
fmt.Printf("\nDepends on (%d):\n", len(deps))
for _, dep := range deps {
@@ -475,7 +376,7 @@ var showCmd = &cobra.Command{
// Show dependents - grouped by dependency type for clarity
// Use GetDependentsWithMetadata to get the dependency type
sqliteStore, ok := issueStore.(*sqlite.SQLiteStorage)
sqliteStore, ok := store.(*sqlite.SQLiteStorage)
if ok {
dependentsWithMeta, _ := sqliteStore.GetDependentsWithMetadata(ctx, issue.ID)
if len(dependentsWithMeta) > 0 {
@@ -523,7 +424,7 @@ var showCmd = &cobra.Command{
}
} else {
// Fallback for non-SQLite storage
dependents, _ := issueStore.GetDependents(ctx, issue.ID)
dependents, _ := store.GetDependents(ctx, issue.ID)
if len(dependents) > 0 {
fmt.Printf("\nBlocks (%d):\n", len(dependents))
for _, dep := range dependents {
@@ -533,7 +434,7 @@ var showCmd = &cobra.Command{
}
// Show comments
comments, _ := issueStore.GetIssueComments(ctx, issue.ID)
comments, _ := store.GetIssueComments(ctx, issue.ID)
if len(comments) > 0 {
fmt.Printf("\nComments (%d):\n", len(comments))
for _, comment := range comments {
@@ -542,7 +443,6 @@ var showCmd = &cobra.Command{
}
fmt.Println()
result.Close() // Close routed storage after each iteration
}
if jsonOutput && len(allDetails) > 0 {
@@ -763,8 +663,8 @@ var updateCmd = &cobra.Command{
fmt.Fprintf(os.Stderr, "Error getting %s: %v\n", id, err)
continue
}
if err := validateIssueUpdatable(id, issue); err != nil {
fmt.Fprintf(os.Stderr, "%s\n", err)
if issue != nil && issue.IsTemplate {
fmt.Fprintf(os.Stderr, "Error: cannot update template %s: templates are read-only; use 'bd molecule instantiate' to create a work item\n", id)
continue
}
@@ -783,21 +683,48 @@ var updateCmd = &cobra.Command{
}
// Handle label operations
var setLabels, addLabels, removeLabels []string
if v, ok := updates["set_labels"].([]string); ok {
setLabels = v
}
if v, ok := updates["add_labels"].([]string); ok {
addLabels = v
}
if v, ok := updates["remove_labels"].([]string); ok {
removeLabels = v
}
if len(setLabels) > 0 || len(addLabels) > 0 || len(removeLabels) > 0 {
if err := applyLabelUpdates(ctx, store, id, actor, setLabels, addLabels, removeLabels); err != nil {
fmt.Fprintf(os.Stderr, "Error updating labels for %s: %v\n", id, err)
// Set labels (replaces all existing labels)
if setLabels, ok := updates["set_labels"].([]string); ok && len(setLabels) > 0 {
// Get current labels
currentLabels, err := store.GetLabels(ctx, id)
if err != nil {
fmt.Fprintf(os.Stderr, "Error getting labels for %s: %v\n", id, err)
continue
}
// Remove all current labels
for _, label := range currentLabels {
if err := store.RemoveLabel(ctx, id, label, actor); err != nil {
fmt.Fprintf(os.Stderr, "Error removing label %s from %s: %v\n", label, id, err)
continue
}
}
// Add new labels
for _, label := range setLabels {
if err := store.AddLabel(ctx, id, label, actor); err != nil {
fmt.Fprintf(os.Stderr, "Error setting label %s on %s: %v\n", label, id, err)
continue
}
}
}
// Add labels
if addLabels, ok := updates["add_labels"].([]string); ok {
for _, label := range addLabels {
if err := store.AddLabel(ctx, id, label, actor); err != nil {
fmt.Fprintf(os.Stderr, "Error adding label %s to %s: %v\n", label, id, err)
continue
}
}
}
// Remove labels
if removeLabels, ok := updates["remove_labels"].([]string); ok {
for _, label := range removeLabels {
if err := store.RemoveLabel(ctx, id, label, actor); err != nil {
fmt.Fprintf(os.Stderr, "Error removing label %s from %s: %v\n", label, id, err)
continue
}
}
}
// Run update hook (bd-kwro.8)
@@ -1019,17 +946,12 @@ var closeCmd = &cobra.Command{
Run: func(cmd *cobra.Command, args []string) {
CheckReadonly("close")
reason, _ := cmd.Flags().GetString("reason")
if reason == "" {
// Check --resolution alias (Jira CLI convention)
reason, _ = cmd.Flags().GetString("resolution")
}
if reason == "" {
reason = "Closed"
}
force, _ := cmd.Flags().GetBool("force")
continueFlag, _ := cmd.Flags().GetBool("continue")
noAuto, _ := cmd.Flags().GetBool("no-auto")
suggestNext, _ := cmd.Flags().GetBool("suggest-next")
ctx := rootCtx
@@ -1038,11 +960,6 @@ var closeCmd = &cobra.Command{
FatalErrorRespectJSON("--continue only works when closing a single issue")
}
// --suggest-next only works with a single issue
if suggestNext && len(args) > 1 {
FatalErrorRespectJSON("--suggest-next only works when closing a single issue")
}
// Resolve partial IDs first
var resolvedIDs []string
if daemonClient != nil {
@@ -1076,17 +993,22 @@ var closeCmd = &cobra.Command{
if showErr == nil {
var issue types.Issue
if json.Unmarshal(showResp.Data, &issue) == nil {
if err := validateIssueClosable(id, &issue, force); err != nil {
fmt.Fprintf(os.Stderr, "%s\n", err)
// Check if issue is a template (beads-1ra): templates are read-only
if issue.IsTemplate {
fmt.Fprintf(os.Stderr, "Error: cannot close template %s: templates are read-only\n", id)
continue
}
// Check if issue is pinned (bd-6v2)
if !force && issue.Status == types.StatusPinned {
fmt.Fprintf(os.Stderr, "Error: cannot close pinned issue %s (use --force to override)\n", id)
continue
}
}
}
closeArgs := &rpc.CloseArgs{
ID: id,
Reason: reason,
SuggestNext: suggestNext,
ID: id,
Reason: reason,
}
resp, err := daemonClient.CloseIssue(closeArgs)
if err != nil {
@@ -1094,45 +1016,19 @@ var closeCmd = &cobra.Command{
continue
}
// Handle response based on whether SuggestNext was requested (GH#679)
if suggestNext {
var result rpc.CloseResult
if err := json.Unmarshal(resp.Data, &result); err == nil {
if result.Closed != nil {
// Run close hook (bd-kwro.8)
if hookRunner != nil {
hookRunner.Run(hooks.EventClose, result.Closed)
}
if jsonOutput {
closedIssues = append(closedIssues, result.Closed)
}
}
if !jsonOutput {
fmt.Printf("%s Closed %s: %s\n", ui.RenderPass("✓"), id, reason)
// Display newly unblocked issues (GH#679)
if len(result.Unblocked) > 0 {
fmt.Printf("\nNewly unblocked:\n")
for _, issue := range result.Unblocked {
fmt.Printf(" • %s %q (P%d)\n", issue.ID, issue.Title, issue.Priority)
}
}
}
var issue types.Issue
if err := json.Unmarshal(resp.Data, &issue); err == nil {
// Run close hook (bd-kwro.8)
if hookRunner != nil {
hookRunner.Run(hooks.EventClose, &issue)
}
} else {
var issue types.Issue
if err := json.Unmarshal(resp.Data, &issue); err == nil {
// Run close hook (bd-kwro.8)
if hookRunner != nil {
hookRunner.Run(hooks.EventClose, &issue)
}
if jsonOutput {
closedIssues = append(closedIssues, &issue)
}
}
if !jsonOutput {
fmt.Printf("%s Closed %s: %s\n", ui.RenderPass("✓"), id, reason)
if jsonOutput {
closedIssues = append(closedIssues, &issue)
}
}
if !jsonOutput {
fmt.Printf("%s Closed %s: %s\n", ui.RenderPass("✓"), id, reason)
}
}
// Handle --continue flag in daemon mode (bd-ieyy)
@@ -1155,11 +1051,20 @@ var closeCmd = &cobra.Command{
// Get issue for checks
issue, _ := store.GetIssue(ctx, id)
if err := validateIssueClosable(id, issue, force); err != nil {
fmt.Fprintf(os.Stderr, "%s\n", err)
// Check if issue is a template (beads-1ra): templates are read-only
if issue != nil && issue.IsTemplate {
fmt.Fprintf(os.Stderr, "Error: cannot close template %s: templates are read-only\n", id)
continue
}
// Check if issue is pinned (bd-6v2)
if !force {
if issue != nil && issue.Status == types.StatusPinned {
fmt.Fprintf(os.Stderr, "Error: cannot close pinned issue %s (use --force to override)\n", id)
continue
}
}
if err := store.CloseIssue(ctx, id, reason, actor); err != nil {
fmt.Fprintf(os.Stderr, "Error closing %s: %v\n", id, err)
continue
@@ -1182,24 +1087,6 @@ var closeCmd = &cobra.Command{
}
}
// Handle --suggest-next flag in direct mode (GH#679)
if suggestNext && len(resolvedIDs) == 1 && closedCount > 0 {
unblocked, err := store.GetNewlyUnblockedByClose(ctx, resolvedIDs[0])
if err == nil && len(unblocked) > 0 {
if jsonOutput {
outputJSON(map[string]interface{}{
"closed": closedIssues,
"unblocked": unblocked,
})
return
}
fmt.Printf("\nNewly unblocked:\n")
for _, issue := range unblocked {
fmt.Printf(" • %s %q (P%d)\n", issue.ID, issue.Title, issue.Priority)
}
}
}
// Schedule auto-flush if any issues were closed
if len(args) > 0 {
markDirtyAndScheduleFlush()
@@ -1404,13 +1291,15 @@ func findRepliesTo(ctx context.Context, issueID string, daemonClient *rpc.Client
return ""
}
// Direct mode - query storage
deps, err := store.GetDependencyRecords(ctx, issueID)
if err != nil {
return ""
}
for _, dep := range deps {
if dep.Type == types.DepRepliesTo {
return dep.DependsOnID
if sqliteStore, ok := store.(*sqlite.SQLiteStorage); ok {
deps, err := sqliteStore.GetDependenciesWithMetadata(ctx, issueID)
if err != nil {
return ""
}
for _, dep := range deps {
if dep.DependencyType == types.DepRepliesTo {
return dep.ID
}
}
}
return ""
@@ -1459,25 +1348,7 @@ func findReplies(ctx context.Context, issueID string, daemonClient *rpc.Client,
}
return replies
}
allDeps, err := store.GetAllDependencyRecords(ctx)
if err != nil {
return nil
}
var replies []*types.Issue
for childID, deps := range allDeps {
for _, dep := range deps {
if dep.Type == types.DepRepliesTo && dep.DependsOnID == issueID {
issue, _ := store.GetIssue(ctx, childID)
if issue != nil {
replies = append(replies, issue)
}
}
}
}
return replies
return nil
}
func init() {
@@ -1506,11 +1377,8 @@ func init() {
rootCmd.AddCommand(editCmd)
closeCmd.Flags().StringP("reason", "r", "", "Reason for closing")
closeCmd.Flags().String("resolution", "", "Alias for --reason (Jira CLI convention)")
_ = closeCmd.Flags().MarkHidden("resolution") // Hidden alias for agent/CLI ergonomics
closeCmd.Flags().BoolP("force", "f", false, "Force close pinned issues")
closeCmd.Flags().Bool("continue", false, "Auto-advance to next step in molecule")
closeCmd.Flags().Bool("no-auto", false, "With --continue, show next step but don't claim it")
closeCmd.Flags().Bool("suggest-next", false, "Show newly unblocked issues after closing (GH#679)")
rootCmd.AddCommand(closeCmd)
}

View File

@@ -1,68 +0,0 @@
package main
import (
"context"
"fmt"
"github.com/steveyegge/beads/internal/storage"
"github.com/steveyegge/beads/internal/types"
)
func validateIssueUpdatable(id string, issue *types.Issue) error {
if issue == nil {
return nil
}
if issue.IsTemplate {
return fmt.Errorf("Error: cannot update template %s: templates are read-only; use 'bd molecule instantiate' to create a work item", id)
}
return nil
}
func validateIssueClosable(id string, issue *types.Issue, force bool) error {
if issue == nil {
return nil
}
if issue.IsTemplate {
return fmt.Errorf("Error: cannot close template %s: templates are read-only", id)
}
if !force && issue.Status == types.StatusPinned {
return fmt.Errorf("Error: cannot close pinned issue %s (use --force to override)", id)
}
return nil
}
func applyLabelUpdates(ctx context.Context, st storage.Storage, issueID, actor string, setLabels, addLabels, removeLabels []string) error {
// Set labels (replaces all existing labels)
if len(setLabels) > 0 {
currentLabels, err := st.GetLabels(ctx, issueID)
if err != nil {
return err
}
for _, label := range currentLabels {
if err := st.RemoveLabel(ctx, issueID, label, actor); err != nil {
return err
}
}
for _, label := range setLabels {
if err := st.AddLabel(ctx, issueID, label, actor); err != nil {
return err
}
}
}
// Add labels
for _, label := range addLabels {
if err := st.AddLabel(ctx, issueID, label, actor); err != nil {
return err
}
}
// Remove labels
for _, label := range removeLabels {
if err := st.RemoveLabel(ctx, issueID, label, actor); err != nil {
return err
}
}
return nil
}

View File

@@ -1,139 +0,0 @@
package main
import (
"context"
"testing"
"github.com/steveyegge/beads/internal/storage/memory"
"github.com/steveyegge/beads/internal/types"
)
func TestValidateIssueUpdatable(t *testing.T) {
if err := validateIssueUpdatable("x", nil); err != nil {
t.Fatalf("expected nil error, got %v", err)
}
if err := validateIssueUpdatable("x", &types.Issue{IsTemplate: false}); err != nil {
t.Fatalf("expected nil error, got %v", err)
}
if err := validateIssueUpdatable("bd-1", &types.Issue{IsTemplate: true}); err == nil {
t.Fatalf("expected error")
}
}
func TestValidateIssueClosable(t *testing.T) {
if err := validateIssueClosable("x", nil, false); err != nil {
t.Fatalf("expected nil error, got %v", err)
}
if err := validateIssueClosable("bd-1", &types.Issue{IsTemplate: true}, false); err == nil {
t.Fatalf("expected template close error")
}
if err := validateIssueClosable("bd-2", &types.Issue{Status: types.StatusPinned}, false); err == nil {
t.Fatalf("expected pinned close error")
}
if err := validateIssueClosable("bd-2", &types.Issue{Status: types.StatusPinned}, true); err != nil {
t.Fatalf("expected pinned close to succeed with force, got %v", err)
}
}
func TestApplyLabelUpdates_SetAddRemove(t *testing.T) {
ctx := context.Background()
st := memory.New("")
if err := st.SetConfig(ctx, "issue_prefix", "test"); err != nil {
t.Fatalf("SetConfig: %v", err)
}
issue := &types.Issue{Title: "x", Status: types.StatusOpen, Priority: 2, IssueType: types.TypeTask}
if err := st.CreateIssue(ctx, issue, "tester"); err != nil {
t.Fatalf("CreateIssue: %v", err)
}
_ = st.AddLabel(ctx, issue.ID, "old1", "tester")
_ = st.AddLabel(ctx, issue.ID, "old2", "tester")
if err := applyLabelUpdates(ctx, st, issue.ID, "tester", []string{"a", "b"}, []string{"b", "c"}, []string{"a"}); err != nil {
t.Fatalf("applyLabelUpdates: %v", err)
}
labels, _ := st.GetLabels(ctx, issue.ID)
if len(labels) != 2 {
t.Fatalf("expected 2 labels, got %v", labels)
}
// Order is not guaranteed.
foundB := false
foundC := false
for _, l := range labels {
if l == "b" {
foundB = true
}
if l == "c" {
foundC = true
}
if l == "old1" || l == "old2" || l == "a" {
t.Fatalf("unexpected label %q in %v", l, labels)
}
}
if !foundB || !foundC {
t.Fatalf("expected labels b and c, got %v", labels)
}
}
func TestApplyLabelUpdates_AddRemoveOnly(t *testing.T) {
ctx := context.Background()
st := memory.New("")
issue := &types.Issue{Title: "x", Status: types.StatusOpen, Priority: 2, IssueType: types.TypeTask}
if err := st.CreateIssue(ctx, issue, "tester"); err != nil {
t.Fatalf("CreateIssue: %v", err)
}
_ = st.AddLabel(ctx, issue.ID, "a", "tester")
if err := applyLabelUpdates(ctx, st, issue.ID, "tester", nil, []string{"b"}, []string{"a"}); err != nil {
t.Fatalf("applyLabelUpdates: %v", err)
}
labels, _ := st.GetLabels(ctx, issue.ID)
if len(labels) != 1 || labels[0] != "b" {
t.Fatalf("expected [b], got %v", labels)
}
}
func TestFindRepliesToAndReplies_WorksWithMemoryStorage(t *testing.T) {
ctx := context.Background()
st := memory.New("")
if err := st.SetConfig(ctx, "issue_prefix", "test"); err != nil {
t.Fatalf("SetConfig: %v", err)
}
root := &types.Issue{Title: "root", Status: types.StatusOpen, Priority: 2, IssueType: types.TypeMessage, Sender: "a", Assignee: "b"}
reply1 := &types.Issue{Title: "r1", Status: types.StatusOpen, Priority: 2, IssueType: types.TypeMessage, Sender: "b", Assignee: "a"}
reply2 := &types.Issue{Title: "r2", Status: types.StatusOpen, Priority: 2, IssueType: types.TypeMessage, Sender: "a", Assignee: "b"}
if err := st.CreateIssue(ctx, root, "tester"); err != nil {
t.Fatalf("CreateIssue(root): %v", err)
}
if err := st.CreateIssue(ctx, reply1, "tester"); err != nil {
t.Fatalf("CreateIssue(reply1): %v", err)
}
if err := st.CreateIssue(ctx, reply2, "tester"); err != nil {
t.Fatalf("CreateIssue(reply2): %v", err)
}
if err := st.AddDependency(ctx, &types.Dependency{IssueID: reply1.ID, DependsOnID: root.ID, Type: types.DepRepliesTo}, "tester"); err != nil {
t.Fatalf("AddDependency(reply1->root): %v", err)
}
if err := st.AddDependency(ctx, &types.Dependency{IssueID: reply2.ID, DependsOnID: reply1.ID, Type: types.DepRepliesTo}, "tester"); err != nil {
t.Fatalf("AddDependency(reply2->reply1): %v", err)
}
if got := findRepliesTo(ctx, root.ID, nil, st); got != "" {
t.Fatalf("expected root replies-to to be empty, got %q", got)
}
if got := findRepliesTo(ctx, reply2.ID, nil, st); got != reply1.ID {
t.Fatalf("expected reply2 parent %q, got %q", reply1.ID, got)
}
rootReplies := findReplies(ctx, root.ID, nil, st)
if len(rootReplies) != 1 || rootReplies[0].ID != reply1.ID {
t.Fatalf("expected root replies [%s], got %+v", reply1.ID, rootReplies)
}
r1Replies := findReplies(ctx, reply1.ID, nil, st)
if len(r1Replies) != 1 || r1Replies[0].ID != reply2.ID {
t.Fatalf("expected reply1 replies [%s], got %+v", reply2.ID, r1Replies)
}
}

View File

@@ -361,9 +361,6 @@ Use --merge to merge the sync branch back to main branch.`,
}
}
// Clear sync state on successful sync (daemon backoff/hints)
_ = ClearSyncState(beadsDir)
fmt.Println("\n✓ Sync complete")
return
}
@@ -714,11 +711,6 @@ Use --merge to merge the sync branch back to main branch.`,
skipFinalFlush = true
}
// Clear sync state on successful sync (daemon backoff/hints)
if bd := beads.FindBeadsDir(); bd != "" {
_ = ClearSyncState(bd)
}
fmt.Println("\n✓ Sync complete")
}
},

View File

@@ -65,7 +65,7 @@ func exportToJSONL(ctx context.Context, jsonlPath string) error {
// This prevents "zombie" issues that resurrect after mol squash deletes them.
filteredIssues := make([]*types.Issue, 0, len(issues))
for _, issue := range issues {
if issue.Ephemeral {
if issue.Wisp {
continue
}
filteredIssues = append(filteredIssues, issue)

View File

@@ -1,71 +0,0 @@
package main
import (
"context"
"os"
"os/exec"
"path/filepath"
"strings"
"testing"
"github.com/steveyegge/beads/internal/config"
)
func TestBuildGitCommitArgs_ConfigOptions(t *testing.T) {
if err := config.Initialize(); err != nil {
t.Fatalf("config.Initialize: %v", err)
}
config.Set("git.author", "Test User <test@example.com>")
config.Set("git.no-gpg-sign", true)
args := buildGitCommitArgs("/repo", "hello", "--", ".beads")
joined := strings.Join(args, " ")
if !strings.Contains(joined, "--author") {
t.Fatalf("expected --author in args: %v", args)
}
if !strings.Contains(joined, "--no-gpg-sign") {
t.Fatalf("expected --no-gpg-sign in args: %v", args)
}
if !strings.Contains(joined, "-m hello") {
t.Fatalf("expected message in args: %v", args)
}
}
func TestGitCommitBeadsDir_PathspecDoesNotCommitOtherStagedFiles(t *testing.T) {
_, cleanup := setupGitRepo(t)
defer cleanup()
if err := config.Initialize(); err != nil {
t.Fatalf("config.Initialize: %v", err)
}
if err := os.MkdirAll(".beads", 0o755); err != nil {
t.Fatalf("MkdirAll: %v", err)
}
// Stage an unrelated file before running gitCommitBeadsDir.
if err := os.WriteFile("other.txt", []byte("x\n"), 0o600); err != nil {
t.Fatalf("WriteFile other: %v", err)
}
_ = exec.Command("git", "add", "other.txt").Run()
// Create a beads sync file to commit.
issuesPath := filepath.Join(".beads", "issues.jsonl")
if err := os.WriteFile(issuesPath, []byte("{\"id\":\"test-1\"}\n"), 0o600); err != nil {
t.Fatalf("WriteFile issues: %v", err)
}
ctx := context.Background()
if err := gitCommitBeadsDir(ctx, "beads commit"); err != nil {
t.Fatalf("gitCommitBeadsDir: %v", err)
}
// other.txt should still be staged after the beads-only commit.
out, err := exec.Command("git", "diff", "--cached", "--name-only").CombinedOutput()
if err != nil {
t.Fatalf("git diff --cached: %v\n%s", err, out)
}
if strings.TrimSpace(string(out)) != "other.txt" {
t.Fatalf("expected other.txt still staged, got: %q", out)
}
}

View File

@@ -42,10 +42,10 @@ type InstantiateResult struct {
// CloneOptions controls how the subgraph is cloned during spawn/bond
type CloneOptions struct {
Vars map[string]string // Variable substitutions for {{key}} placeholders
Assignee string // Assign the root epic to this agent/user
Actor string // Actor performing the operation
Ephemeral bool // If true, spawned issues are marked for bulk deletion
Vars map[string]string // Variable substitutions for {{key}} placeholders
Assignee string // Assign the root epic to this agent/user
Actor string // Actor performing the operation
Wisp bool // If true, spawned issues are marked for bulk deletion
Prefix string // Override prefix for ID generation (bd-hobo: distinct prefixes)
// Dynamic bonding fields (for Christmas Ornament pattern)
@@ -60,7 +60,7 @@ var templateCmd = &cobra.Command{
Use: "template",
GroupID: "setup",
Short: "Manage issue templates",
Deprecated: "use 'bd mol' instead (formula list, mol show, mol bond)",
Deprecated: "use 'bd mol' instead (mol catalog, mol show, mol bond)",
Long: `Manage Beads templates for creating issue hierarchies.
Templates are epics with the "template" label. They can have child issues
@@ -78,7 +78,7 @@ To use a template:
var templateListCmd = &cobra.Command{
Use: "list",
Short: "List available templates",
Deprecated: "use 'bd formula list' instead",
Deprecated: "use 'bd mol catalog' instead",
Run: func(cmd *cobra.Command, args []string) {
ctx := rootCtx
var beadsTemplates []*types.Issue
@@ -327,7 +327,7 @@ Example:
Vars: vars,
Assignee: assignee,
Actor: actor,
Ephemeral: false,
Wisp: false,
}
var result *InstantiateResult
if daemonClient != nil {
@@ -713,7 +713,7 @@ func cloneSubgraphViaDaemon(client *rpc.Client, subgraph *TemplateSubgraph, opts
AcceptanceCriteria: substituteVariables(oldIssue.AcceptanceCriteria, opts.Vars),
Assignee: issueAssignee,
EstimatedMinutes: oldIssue.EstimatedMinutes,
Ephemeral: opts.Ephemeral,
Wisp: opts.Wisp,
IDPrefix: opts.Prefix, // bd-hobo: distinct prefixes for mols/wisps
}
@@ -960,7 +960,7 @@ func cloneSubgraph(ctx context.Context, s storage.Storage, subgraph *TemplateSub
IssueType: oldIssue.IssueType,
Assignee: issueAssignee,
EstimatedMinutes: oldIssue.EstimatedMinutes,
Ephemeral: opts.Ephemeral, // bd-2vh3: mark for cleanup when closed
Wisp: opts.Wisp, // bd-2vh3: mark for cleanup when closed
IDPrefix: opts.Prefix, // bd-hobo: distinct prefixes for mols/wisps
CreatedAt: time.Now(),
UpdatedAt: time.Now(),

View File

@@ -1,6 +1,6 @@
#!/bin/sh
# bd-shim v1
# bd-hooks-version: 0.38.0
# bd-hooks-version: 0.36.0
#
# bd (beads) post-checkout hook - thin shim
#

View File

@@ -1,6 +1,6 @@
#!/bin/sh
# bd-shim v1
# bd-hooks-version: 0.38.0
# bd-hooks-version: 0.36.0
#
# bd (beads) post-merge hook - thin shim
#

View File

@@ -1,6 +1,6 @@
#!/bin/sh
# bd-shim v1
# bd-hooks-version: 0.38.0
# bd-hooks-version: 0.36.0
#
# bd (beads) pre-commit hook - thin shim
#

View File

@@ -1,6 +1,6 @@
#!/bin/sh
# bd-shim v1
# bd-hooks-version: 0.38.0
# bd-hooks-version: 0.36.0
#
# bd (beads) pre-push hook - thin shim
#

View File

@@ -1,118 +0,0 @@
package main
import (
"fmt"
"os"
"path/filepath"
"testing"
"time"
)
// Guardrail: ensure the cmd/bd test suite does not touch the real repo .beads state.
// Disable with BEADS_TEST_GUARD_DISABLE=1 (useful when running tests while actively using beads).
func TestMain(m *testing.M) {
if os.Getenv("BEADS_TEST_GUARD_DISABLE") != "" {
os.Exit(m.Run())
}
repoRoot := findRepoRoot()
if repoRoot == "" {
os.Exit(m.Run())
}
repoBeadsDir := filepath.Join(repoRoot, ".beads")
if _, err := os.Stat(repoBeadsDir); err != nil {
os.Exit(m.Run())
}
watch := []string{
"beads.db",
"beads.db-wal",
"beads.db-shm",
"beads.db-journal",
"issues.jsonl",
"beads.jsonl",
"metadata.json",
"interactions.jsonl",
"deletions.jsonl",
"molecules.jsonl",
"daemon.lock",
"daemon.pid",
"bd.sock",
}
before := snapshotFiles(repoBeadsDir, watch)
code := m.Run()
after := snapshotFiles(repoBeadsDir, watch)
if diff := diffSnapshots(before, after); diff != "" {
fmt.Fprintf(os.Stderr, "ERROR: test suite modified repo .beads state:\n%s\n", diff)
if code == 0 {
code = 1
}
}
os.Exit(code)
}
type fileSnap struct {
exists bool
size int64
modUnix int64
}
func snapshotFiles(dir string, names []string) map[string]fileSnap {
out := make(map[string]fileSnap, len(names))
for _, name := range names {
p := filepath.Join(dir, name)
info, err := os.Stat(p)
if err != nil {
out[name] = fileSnap{exists: false}
continue
}
out[name] = fileSnap{exists: true, size: info.Size(), modUnix: info.ModTime().UnixNano()}
}
return out
}
func diffSnapshots(before, after map[string]fileSnap) string {
var out string
for name, b := range before {
a := after[name]
if b.exists != a.exists {
out += fmt.Sprintf("- %s: exists %v → %v\n", name, b.exists, a.exists)
continue
}
if !b.exists {
continue
}
if b.size != a.size || b.modUnix != a.modUnix {
out += fmt.Sprintf("- %s: size %d → %d, mtime %s → %s\n",
name,
b.size,
a.size,
time.Unix(0, b.modUnix).UTC().Format(time.RFC3339Nano),
time.Unix(0, a.modUnix).UTC().Format(time.RFC3339Nano),
)
}
}
return out
}
func findRepoRoot() string {
wd, err := os.Getwd()
if err != nil {
return ""
}
for i := 0; i < 25; i++ {
if _, err := os.Stat(filepath.Join(wd, "go.mod")); err == nil {
return wd
}
parent := filepath.Dir(wd)
if parent == wd {
break
}
wd = parent
}
return ""
}

View File

@@ -5,8 +5,6 @@ import (
"os/exec"
"testing"
"time"
"github.com/steveyegge/beads/internal/git"
)
// waitFor repeatedly evaluates pred until it returns true or timeout expires.
@@ -39,15 +37,11 @@ func setupGitRepo(t *testing.T) (repoPath string, cleanup func()) {
t.Fatalf("failed to change to temp directory: %v", err)
}
// Reset git caches after changing directory
git.ResetCaches()
// Initialize git repo with 'main' as default branch (modern git convention)
if err := exec.Command("git", "init", "--initial-branch=main").Run(); err != nil {
// Initialize git repo
if err := exec.Command("git", "init").Run(); err != nil {
_ = os.Chdir(originalWd)
t.Fatalf("failed to init git repo: %v", err)
}
git.ResetCaches()
// Configure git
_ = exec.Command("git", "config", "user.email", "test@test.com").Run()
@@ -66,7 +60,6 @@ func setupGitRepo(t *testing.T) (repoPath string, cleanup func()) {
cleanup = func() {
_ = os.Chdir(originalWd)
git.ResetCaches()
}
return tmpDir, cleanup
@@ -87,15 +80,11 @@ func setupGitRepoWithBranch(t *testing.T, branch string) (repoPath string, clean
t.Fatalf("failed to change to temp directory: %v", err)
}
// Reset git caches after changing directory
git.ResetCaches()
// Initialize git repo with specific branch
if err := exec.Command("git", "init", "-b", branch).Run(); err != nil {
_ = os.Chdir(originalWd)
t.Fatalf("failed to init git repo: %v", err)
}
git.ResetCaches()
// Configure git
_ = exec.Command("git", "config", "user.email", "test@test.com").Run()
@@ -114,7 +103,6 @@ func setupGitRepoWithBranch(t *testing.T, branch string) (repoPath string, clean
cleanup = func() {
_ = os.Chdir(originalWd)
git.ResetCaches()
}
return tmpDir, cleanup
@@ -135,11 +123,8 @@ func setupMinimalGitRepo(t *testing.T) (repoPath string, cleanup func()) {
t.Fatalf("failed to change to temp directory: %v", err)
}
// Reset git caches after changing directory
git.ResetCaches()
// Initialize git repo with 'main' as default branch (modern git convention)
if err := exec.Command("git", "init", "--initial-branch=main").Run(); err != nil {
// Initialize git repo
if err := exec.Command("git", "init").Run(); err != nil {
_ = os.Chdir(originalWd)
t.Fatalf("failed to init git repo: %v", err)
}
@@ -150,7 +135,6 @@ func setupMinimalGitRepo(t *testing.T) (repoPath string, cleanup func()) {
cleanup = func() {
_ = os.Chdir(originalWd)
git.ResetCaches()
}
return tmpDir, cleanup

View File

@@ -27,7 +27,7 @@ func TestThreadTraversal(t *testing.T) {
IssueType: types.TypeMessage,
Assignee: "worker",
Sender: "manager",
Ephemeral: true,
Wisp: true,
CreatedAt: now,
UpdatedAt: now,
}
@@ -43,7 +43,7 @@ func TestThreadTraversal(t *testing.T) {
IssueType: types.TypeMessage,
Assignee: "manager",
Sender: "worker",
Ephemeral: true,
Wisp: true,
CreatedAt: now.Add(time.Minute),
UpdatedAt: now.Add(time.Minute),
}
@@ -59,7 +59,7 @@ func TestThreadTraversal(t *testing.T) {
IssueType: types.TypeMessage,
Assignee: "worker",
Sender: "manager",
Ephemeral: true,
Wisp: true,
CreatedAt: now.Add(2 * time.Minute),
UpdatedAt: now.Add(2 * time.Minute),
}
@@ -190,7 +190,7 @@ func TestThreadTraversalEmptyThread(t *testing.T) {
IssueType: types.TypeMessage,
Assignee: "user",
Sender: "sender",
Ephemeral: true,
Wisp: true,
CreatedAt: now,
UpdatedAt: now,
}
@@ -228,7 +228,7 @@ func TestThreadTraversalBranching(t *testing.T) {
IssueType: types.TypeMessage,
Assignee: "user",
Sender: "sender",
Ephemeral: true,
Wisp: true,
CreatedAt: now,
UpdatedAt: now,
}
@@ -245,7 +245,7 @@ func TestThreadTraversalBranching(t *testing.T) {
IssueType: types.TypeMessage,
Assignee: "sender",
Sender: "user",
Ephemeral: true,
Wisp: true,
CreatedAt: now.Add(time.Minute),
UpdatedAt: now.Add(time.Minute),
}
@@ -261,7 +261,7 @@ func TestThreadTraversalBranching(t *testing.T) {
IssueType: types.TypeMessage,
Assignee: "sender",
Sender: "another-user",
Ephemeral: true,
Wisp: true,
CreatedAt: now.Add(2 * time.Minute),
UpdatedAt: now.Add(2 * time.Minute),
}
@@ -364,7 +364,7 @@ func TestThreadTraversalOnlyRepliesTo(t *testing.T) {
IssueType: types.TypeMessage,
Assignee: "user",
Sender: "sender",
Ephemeral: true,
Wisp: true,
CreatedAt: now,
UpdatedAt: now,
}
@@ -380,7 +380,7 @@ func TestThreadTraversalOnlyRepliesTo(t *testing.T) {
IssueType: types.TypeMessage,
Assignee: "user",
Sender: "sender",
Ephemeral: true,
Wisp: true,
CreatedAt: now.Add(time.Minute),
UpdatedAt: now.Add(time.Minute),
}

View File

@@ -14,7 +14,6 @@ import (
"sync"
"time"
"github.com/steveyegge/beads/internal/beads"
"github.com/steveyegge/beads/internal/storage"
)
@@ -354,30 +353,6 @@ func initDefaultTips() {
return isClaudeDetected() && !isClaudeSetupComplete()
},
)
// Sync conflict tip - ALWAYS show when sync has failed and needs manual intervention
// This is a proactive health check that trumps educational tips (ox-cli pattern)
InjectTip(
"sync_conflict",
"Run 'bd sync' to resolve sync conflict",
200, // Higher than Claude setup - sync issues are urgent
0, // No frequency limit - always show when applicable
1.0, // 100% probability - always show when condition is true
syncConflictCondition,
)
}
// syncConflictCondition checks if there's a sync conflict that needs manual resolution.
// This is the condition function for the sync_conflict tip.
func syncConflictCondition() bool {
// Find beads directory to check sync state
beadsDir := beads.FindBeadsDir()
if beadsDir == "" {
return false
}
state := LoadSyncState(beadsDir)
return state.NeedsManualSync
}
// init initializes the tip system with default tips

View File

@@ -14,7 +14,7 @@ import (
var (
// Version is the current version of bd (overridden by ldflags at build time)
Version = "0.38.0"
Version = "0.36.0"
// Build can be set via ldflags at compile time
Build = "dev"
// Commit and branch the git revision the binary was built from (optional ldflag)

View File

@@ -18,43 +18,33 @@ import (
// Wisp commands - manage ephemeral molecules
//
// Wisps are ephemeral issues with Ephemeral=true in the main database.
// Wisps are ephemeral issues with Wisp=true in the main database.
// They're used for patrol cycles and operational loops that shouldn't
// be exported to JSONL (and thus not synced via git).
//
// Commands:
// bd mol wisp list - List all wisps in current context
// bd mol wisp gc - Garbage collect orphaned wisps
// bd wisp list - List all wisps in current context
// bd wisp gc - Garbage collect orphaned wisps
var wispCmd = &cobra.Command{
Use: "wisp [proto-id]",
Short: "Create or manage wisps (ephemeral molecules)",
Long: `Create or manage wisps - ephemeral molecules for operational workflows.
Use: "wisp",
Short: "Manage ephemeral molecules (wisps)",
Long: `Manage wisps - ephemeral molecules for operational workflows.
When called with a proto-id argument, creates a wisp from that proto.
When called with a subcommand (list, gc), manages existing wisps.
Wisps are issues with Ephemeral=true in the main database. They're stored
Wisps are issues with Wisp=true in the main database. They're stored
locally but NOT exported to JSONL (and thus not synced via git).
They're used for patrol cycles, operational loops, and other workflows
that shouldn't accumulate in the shared issue database.
The wisp lifecycle:
1. Create: bd mol wisp <proto> or bd create --ephemeral
2. Execute: Normal bd operations work on wisp issues
3. Squash: bd mol squash <id> (clears Ephemeral flag, promotes to persistent)
4. Or burn: bd mol burn <id> (deletes without creating digest)
1. Create: bd wisp create <proto> or bd create --wisp
2. Execute: Normal bd operations work on wisps
3. Squash: bd mol squash <id> (clears Wisp flag, promotes to persistent)
4. Or burn: bd mol burn <id> (deletes wisp without creating digest)
Examples:
bd mol wisp mol-patrol # Create wisp from proto
bd mol wisp list # List all wisps
bd mol wisp gc # Garbage collect old wisps
Subcommands:
Commands:
list List all wisps in current context
gc Garbage collect orphaned wisps`,
Args: cobra.MaximumNArgs(1),
Run: runWisp,
}
// WispListItem represents a wisp in list output
@@ -78,44 +68,32 @@ type WispListResult struct {
// OldThreshold is how old a wisp must be to be flagged as old (time-based, for ephemeral cleanup)
const OldThreshold = 24 * time.Hour
// runWisp handles the wisp command when called directly with a proto-id
// It delegates to runWispCreate for the actual work
func runWisp(cmd *cobra.Command, args []string) {
if len(args) == 0 {
// No proto-id provided, show help
cmd.Help()
return
}
// Delegate to the create logic
runWispCreate(cmd, args)
}
// wispCreateCmd instantiates a proto as an ephemeral wisp (kept for backwards compat)
// wispCreateCmd instantiates a proto as an ephemeral wisp
var wispCreateCmd = &cobra.Command{
Use: "create <proto-id>",
Short: "Instantiate a proto as a wisp (solid -> vapor)",
Short: "Instantiate a proto as an ephemeral wisp (solid -> vapor)",
Long: `Create a wisp from a proto - sublimation from solid to vapor.
This is the chemistry-inspired command for creating ephemeral work from templates.
The resulting wisp is stored in the main database with Ephemeral=true and NOT exported to JSONL.
The resulting wisp is stored in the main database with Wisp=true and NOT exported to JSONL.
Phase transition: Proto (solid) -> Wisp (vapor)
Use wisp for:
Use wisp create for:
- Patrol cycles (deacon, witness)
- Health checks and monitoring
- One-shot orchestration runs
- Routine operations with no audit value
The wisp will:
- Be stored in main database with Ephemeral=true flag
- Be stored in main database with Wisp=true flag
- NOT be exported to JSONL (and thus not synced via git)
- Either evaporate (burn) or condense to digest (squash)
Examples:
bd mol wisp create mol-patrol # Ephemeral patrol cycle
bd mol wisp create mol-health-check # One-time health check
bd mol wisp create mol-diagnostics --var target=db # Diagnostic run`,
bd wisp create mol-patrol # Ephemeral patrol cycle
bd wisp create mol-health-check # One-time health check
bd wisp create mol-diagnostics --var target=db # Diagnostic run`,
Args: cobra.ExactArgs(1),
Run: runWispCreate,
}
@@ -129,7 +107,7 @@ func runWispCreate(cmd *cobra.Command, args []string) {
if store == nil {
if daemonClient != nil {
fmt.Fprintf(os.Stderr, "Error: wisp create requires direct database access\n")
fmt.Fprintf(os.Stderr, "Hint: use --no-daemon flag: bd --no-daemon mol wisp %s ...\n", args[0])
fmt.Fprintf(os.Stderr, "Hint: use --no-daemon flag: bd --no-daemon wisp create %s ...\n", args[0])
} else {
fmt.Fprintf(os.Stderr, "Error: no database connection\n")
}
@@ -237,7 +215,7 @@ func runWispCreate(cmd *cobra.Command, args []string) {
if dryRun {
fmt.Printf("\nDry run: would create wisp with %d issues from proto %s\n\n", len(subgraph.Issues), protoID)
fmt.Printf("Storage: main database (ephemeral=true, not exported to JSONL)\n\n")
fmt.Printf("Storage: main database (wisp=true, not exported to JSONL)\n\n")
for _, issue := range subgraph.Issues {
newTitle := substituteVariables(issue.Title, vars)
fmt.Printf(" - %s (from %s)\n", newTitle, issue.ID)
@@ -245,15 +223,15 @@ func runWispCreate(cmd *cobra.Command, args []string) {
return
}
// Spawn as ephemeral in main database (Ephemeral=true, skips JSONL export)
// bd-hobo: Use "eph" prefix for distinct visual recognition
result, err := spawnMolecule(ctx, store, subgraph, vars, "", actor, true, "eph")
// Spawn as wisp in main database (ephemeral=true sets Wisp flag, skips JSONL export)
// bd-hobo: Use "wisp" prefix for distinct visual recognition
result, err := spawnMolecule(ctx, store, subgraph, vars, "", actor, true, "wisp")
if err != nil {
fmt.Fprintf(os.Stderr, "Error creating wisp: %v\n", err)
os.Exit(1)
}
// Wisp issues are in main db but don't trigger JSONL export (Ephemeral flag excludes them)
// Wisps are in main db but don't trigger JSONL export (Wisp flag excludes them)
if jsonOutput {
type wispCreateResult struct {
@@ -308,9 +286,9 @@ func resolvePartialIDDirect(ctx context.Context, partial string) (string, error)
var wispListCmd = &cobra.Command{
Use: "list",
Short: "List all wisps in current context",
Long: `List all wisps (ephemeral molecules) in the current context.
Long: `List all ephemeral molecules (wisps) in the current context.
Wisps are issues with Ephemeral=true in the main database. They are stored
Wisps are issues with Wisp=true in the main database. They are stored
locally but not exported to JSONL (and thus not synced via git).
The list shows:
@@ -322,12 +300,12 @@ The list shows:
Old wisp detection:
- Old wisps haven't been updated in 24+ hours
- Use 'bd mol wisp gc' to clean up old/abandoned wisps
- Use 'bd wisp gc' to clean up old/abandoned wisps
Examples:
bd mol wisp list # List all wisps
bd mol wisp list --json # JSON output for programmatic use
bd mol wisp list --all # Include closed wisps`,
bd wisp list # List all wisps
bd wisp list --json # JSON output for programmatic use
bd wisp list --all # Include closed wisps`,
Run: runWispList,
}
@@ -349,15 +327,15 @@ func runWispList(cmd *cobra.Command, args []string) {
return
}
// Query wisps from main database using Ephemeral filter
ephemeralFlag := true
// Query wisps from main database using Wisp filter
wispFlag := true
var issues []*types.Issue
var err error
if daemonClient != nil {
// Use daemon RPC
resp, rpcErr := daemonClient.List(&rpc.ListArgs{
Ephemeral: &ephemeralFlag,
Wisp: &wispFlag,
})
if rpcErr != nil {
err = rpcErr
@@ -369,7 +347,7 @@ func runWispList(cmd *cobra.Command, args []string) {
} else {
// Direct database access
filter := types.IssueFilter{
Ephemeral: &ephemeralFlag,
Wisp: &wispFlag,
}
issues, err = store.SearchIssues(ctx, "", filter)
}
@@ -466,7 +444,7 @@ func runWispList(cmd *cobra.Command, args []string) {
if oldCount > 0 {
fmt.Printf("\n%s %d old wisp(s) (not updated in 24+ hours)\n",
ui.RenderWarn("⚠"), oldCount)
fmt.Println(" Hint: Use 'bd mol wisp gc' to clean up old wisps")
fmt.Println(" Hint: Use 'bd wisp gc' to clean up old wisps")
}
}
@@ -515,10 +493,10 @@ Note: This uses time-based cleanup, appropriate for ephemeral wisps.
For graph-pressure staleness detection (blocking other work), see 'bd mol stale'.
Examples:
bd mol wisp gc # Clean abandoned wisps (default: 1h threshold)
bd mol wisp gc --dry-run # Preview what would be cleaned
bd mol wisp gc --age 24h # Custom age threshold
bd mol wisp gc --all # Also clean closed wisps older than threshold`,
bd wisp gc # Clean abandoned wisps (default: 1h threshold)
bd wisp gc --dry-run # Preview what would be cleaned
bd wisp gc --age 24h # Custom age threshold
bd wisp gc --all # Also clean closed wisps older than threshold`,
Run: runWispGC,
}
@@ -554,17 +532,17 @@ func runWispGC(cmd *cobra.Command, args []string) {
if store == nil {
if daemonClient != nil {
fmt.Fprintf(os.Stderr, "Error: wisp gc requires direct database access\n")
fmt.Fprintf(os.Stderr, "Hint: use --no-daemon flag: bd --no-daemon mol wisp gc\n")
fmt.Fprintf(os.Stderr, "Hint: use --no-daemon flag: bd --no-daemon wisp gc\n")
} else {
fmt.Fprintf(os.Stderr, "Error: no database connection\n")
}
os.Exit(1)
}
// Query wisps from main database using Ephemeral filter
ephemeralFlag := true
// Query wisps from main database using Wisp filter
wispFlag := true
filter := types.IssueFilter{
Ephemeral: &ephemeralFlag,
Wisp: &wispFlag,
}
issues, err := store.SearchIssues(ctx, "", filter)
if err != nil {
@@ -656,11 +634,7 @@ func runWispGC(cmd *cobra.Command, args []string) {
}
func init() {
// Wisp command flags (for direct create: bd mol wisp <proto>)
wispCmd.Flags().StringSlice("var", []string{}, "Variable substitution (key=value)")
wispCmd.Flags().Bool("dry-run", false, "Preview what would be created")
// Wisp create command flags (kept for backwards compat: bd mol wisp create <proto>)
// Wisp create command flags
wispCreateCmd.Flags().StringSlice("var", []string{}, "Variable substitution (key=value)")
wispCreateCmd.Flags().Bool("dry-run", false, "Preview what would be created")
@@ -673,5 +647,5 @@ func init() {
wispCmd.AddCommand(wispCreateCmd)
wispCmd.AddCommand(wispListCmd)
wispCmd.AddCommand(wispGCCmd)
molCmd.AddCommand(wispCmd)
rootCmd.AddCommand(wispCmd)
}

View File

@@ -7,7 +7,6 @@ import (
"testing"
"github.com/steveyegge/beads/internal/config"
"github.com/steveyegge/beads/internal/git"
// Import SQLite driver for test database creation
_ "github.com/ncruces/go-sqlite3/driver"
@@ -71,20 +70,14 @@ func TestShouldDisableDaemonForWorktree(t *testing.T) {
// Change to the worktree directory
origDir, _ := os.Getwd()
defer func() {
defer func() {
_ = os.Chdir(origDir)
// Reset git caches after changing directory
git.ResetCaches()
// Reinitialize config to restore original state
_ = config.Initialize()
}()
if err := os.Chdir(worktreeDir); err != nil {
t.Fatalf("Failed to change to worktree dir: %v", err)
}
git.ResetCaches()
// Reset git caches after changing directory (required for IsWorktree to re-detect)
git.ResetCaches()
// No sync-branch configured
os.Unsetenv("BEADS_SYNC_BRANCH")
@@ -113,18 +106,13 @@ func TestShouldDisableDaemonForWorktree(t *testing.T) {
// Change to the worktree directory
origDir, _ := os.Getwd()
defer func() {
defer func() {
_ = os.Chdir(origDir)
git.ResetCaches()
_ = config.Initialize()
}()
if err := os.Chdir(worktreeDir); err != nil {
t.Fatalf("Failed to change to worktree dir: %v", err)
}
git.ResetCaches()
// Reset git caches after changing directory
git.ResetCaches()
// Reinitialize config to pick up the new directory's config.yaml
if err := config.Initialize(); err != nil {
@@ -149,18 +137,13 @@ func TestShouldDisableDaemonForWorktree(t *testing.T) {
// Change to the worktree directory
origDir, _ := os.Getwd()
defer func() {
defer func() {
_ = os.Chdir(origDir)
git.ResetCaches()
_ = config.Initialize()
}()
if err := os.Chdir(worktreeDir); err != nil {
t.Fatalf("Failed to change to worktree dir: %v", err)
}
git.ResetCaches()
// Reset git caches after changing directory
git.ResetCaches()
// Reinitialize config to pick up the new directory's config.yaml
if err := config.Initialize(); err != nil {
@@ -204,18 +187,13 @@ func TestShouldAutoStartDaemonWorktreeIntegration(t *testing.T) {
// Change to the worktree directory
origDir, _ := os.Getwd()
defer func() {
defer func() {
_ = os.Chdir(origDir)
git.ResetCaches()
_ = config.Initialize()
}()
if err := os.Chdir(worktreeDir); err != nil {
t.Fatalf("Failed to change to worktree dir: %v", err)
}
git.ResetCaches()
// Reset git caches after changing directory
git.ResetCaches()
// Clear all daemon-related env vars
os.Unsetenv("BEADS_NO_DAEMON")
@@ -242,18 +220,13 @@ func TestShouldAutoStartDaemonWorktreeIntegration(t *testing.T) {
// Change to the worktree directory
origDir, _ := os.Getwd()
defer func() {
defer func() {
_ = os.Chdir(origDir)
git.ResetCaches()
_ = config.Initialize()
}()
if err := os.Chdir(worktreeDir); err != nil {
t.Fatalf("Failed to change to worktree dir: %v", err)
}
git.ResetCaches()
// Reset git caches after changing directory
git.ResetCaches()
// Reinitialize config to pick up the new directory's config.yaml
if err := config.Initialize(); err != nil {
@@ -280,18 +253,13 @@ func TestShouldAutoStartDaemonWorktreeIntegration(t *testing.T) {
// Change to the worktree directory
origDir, _ := os.Getwd()
defer func() {
defer func() {
_ = os.Chdir(origDir)
git.ResetCaches()
_ = config.Initialize()
}()
if err := os.Chdir(worktreeDir); err != nil {
t.Fatalf("Failed to change to worktree dir: %v", err)
}
git.ResetCaches()
// Reset git caches after changing directory
git.ResetCaches()
// Reinitialize config to pick up the new directory's config.yaml
if err := config.Initialize(); err != nil {
@@ -334,8 +302,8 @@ func setupWorktreeTestRepo(t *testing.T) (mainDir, worktreeDir string) {
// Create main repo directory
mainDir = t.TempDir()
// Initialize git repo with 'main' as default branch (modern git convention)
cmd := exec.Command("git", "init", "--initial-branch=main")
// Initialize git repo
cmd := exec.Command("git", "init")
cmd.Dir = mainDir
if output, err := cmd.CombinedOutput(); err != nil {
t.Fatalf("Failed to init git repo: %v\n%s", err, output)