Merge branch 'main' of https://github.com/steveyegge/beads
This commit is contained in:
@@ -314,6 +314,14 @@ func markDirtyAndScheduleFullExport() {
|
||||
|
||||
// clearAutoFlushState cancels pending flush and marks DB as clean (after manual export)
|
||||
func clearAutoFlushState() {
|
||||
// With FlushManager, clearing state is unnecessary (new path)
|
||||
// If a flush is pending and fires after manual export, flushToJSONLWithState()
|
||||
// will detect nothing is dirty and skip the flush. This is harmless.
|
||||
if flushManager != nil {
|
||||
return
|
||||
}
|
||||
|
||||
// Legacy path for backward compatibility with tests
|
||||
flushMutex.Lock()
|
||||
defer flushMutex.Unlock()
|
||||
|
||||
|
||||
@@ -9,213 +9,14 @@ import (
|
||||
"io"
|
||||
"os"
|
||||
"path/filepath"
|
||||
"runtime"
|
||||
"strings"
|
||||
"sync"
|
||||
"testing"
|
||||
"time"
|
||||
|
||||
"github.com/steveyegge/beads/internal/types"
|
||||
)
|
||||
|
||||
// TestAutoFlushDirtyMarking tests that markDirtyAndScheduleFlush() correctly marks DB as dirty
|
||||
func TestAutoFlushDirtyMarking(t *testing.T) {
|
||||
// Reset auto-flush state
|
||||
autoFlushEnabled = true
|
||||
isDirty = false
|
||||
if flushTimer != nil {
|
||||
flushTimer.Stop()
|
||||
flushTimer = nil
|
||||
}
|
||||
|
||||
// Call markDirtyAndScheduleFlush
|
||||
markDirtyAndScheduleFlush()
|
||||
|
||||
// Verify dirty flag is set
|
||||
flushMutex.Lock()
|
||||
dirty := isDirty
|
||||
hasTimer := flushTimer != nil
|
||||
flushMutex.Unlock()
|
||||
|
||||
if !dirty {
|
||||
t.Error("Expected isDirty to be true after markDirtyAndScheduleFlush()")
|
||||
}
|
||||
|
||||
if !hasTimer {
|
||||
t.Error("Expected flushTimer to be set after markDirtyAndScheduleFlush()")
|
||||
}
|
||||
|
||||
// Clean up
|
||||
flushMutex.Lock()
|
||||
if flushTimer != nil {
|
||||
flushTimer.Stop()
|
||||
flushTimer = nil
|
||||
}
|
||||
isDirty = false
|
||||
flushMutex.Unlock()
|
||||
}
|
||||
|
||||
// TestAutoFlushDisabled tests that --no-auto-flush flag disables the feature
|
||||
func TestAutoFlushDisabled(t *testing.T) {
|
||||
// Disable auto-flush
|
||||
autoFlushEnabled = false
|
||||
isDirty = false
|
||||
if flushTimer != nil {
|
||||
flushTimer.Stop()
|
||||
flushTimer = nil
|
||||
}
|
||||
|
||||
// Call markDirtyAndScheduleFlush
|
||||
markDirtyAndScheduleFlush()
|
||||
|
||||
// Verify dirty flag is NOT set
|
||||
flushMutex.Lock()
|
||||
dirty := isDirty
|
||||
hasTimer := flushTimer != nil
|
||||
flushMutex.Unlock()
|
||||
|
||||
if dirty {
|
||||
t.Error("Expected isDirty to remain false when autoFlushEnabled=false")
|
||||
}
|
||||
|
||||
if hasTimer {
|
||||
t.Error("Expected flushTimer to remain nil when autoFlushEnabled=false")
|
||||
}
|
||||
|
||||
// Re-enable for other tests
|
||||
autoFlushEnabled = true
|
||||
}
|
||||
|
||||
// TestAutoFlushDebounce tests that rapid operations result in a single flush
|
||||
func TestAutoFlushDebounce(t *testing.T) {
|
||||
// NOTE(bd-159): This test is obsolete - debouncing is now tested in flush_manager_test.go
|
||||
// The codebase moved from module-level autoFlushEnabled/flushTimer to FlushManager
|
||||
t.Skip("Test obsolete - debouncing tested in flush_manager_test.go (see bd-159)")
|
||||
|
||||
// Create temp directory for test database
|
||||
tmpDir, err := os.MkdirTemp("", "bd-test-autoflush-*")
|
||||
if err != nil {
|
||||
t.Fatalf("Failed to create temp dir: %v", err)
|
||||
}
|
||||
defer func() {
|
||||
if err := os.RemoveAll(tmpDir); err != nil {
|
||||
t.Logf("Warning: cleanup failed: %v", err)
|
||||
}
|
||||
}()
|
||||
|
||||
dbPath = filepath.Join(tmpDir, "test.db")
|
||||
jsonlPath := filepath.Join(tmpDir, "issues.jsonl")
|
||||
|
||||
// Create store
|
||||
testStore := newTestStore(t, dbPath)
|
||||
|
||||
store = testStore
|
||||
storeMutex.Lock()
|
||||
storeActive = true
|
||||
storeMutex.Unlock()
|
||||
|
||||
// Reset auto-flush state
|
||||
autoFlushEnabled = true
|
||||
isDirty = false
|
||||
if flushTimer != nil {
|
||||
flushTimer.Stop()
|
||||
flushTimer = nil
|
||||
}
|
||||
|
||||
ctx := context.Background()
|
||||
|
||||
// Create initial issue to have something in the DB
|
||||
issue := &types.Issue{
|
||||
ID: "test-1",
|
||||
Title: "Test issue",
|
||||
Status: types.StatusOpen,
|
||||
Priority: 1,
|
||||
IssueType: types.TypeTask,
|
||||
CreatedAt: time.Now(),
|
||||
UpdatedAt: time.Now(),
|
||||
}
|
||||
if err := testStore.CreateIssue(ctx, issue, "test"); err != nil {
|
||||
t.Fatalf("Failed to create issue: %v", err)
|
||||
}
|
||||
|
||||
// Simulate rapid CRUD operations by marking the issue as dirty in the DB
|
||||
for i := 0; i < 5; i++ {
|
||||
// Mark issue dirty in database (not just global flag)
|
||||
if err := testStore.MarkIssueDirty(ctx, issue.ID); err != nil {
|
||||
t.Fatalf("Failed to mark dirty: %v", err)
|
||||
}
|
||||
markDirtyAndScheduleFlush()
|
||||
time.Sleep(10 * time.Millisecond) // Small delay between marks (< debounce)
|
||||
}
|
||||
|
||||
// Wait for debounce to complete
|
||||
time.Sleep(20 * time.Millisecond) // 10x faster, still reliable
|
||||
|
||||
// Check that JSONL file was created (flush happened)
|
||||
if _, err := os.Stat(jsonlPath); os.IsNotExist(err) {
|
||||
t.Error("Expected JSONL file to be created after debounce period")
|
||||
}
|
||||
|
||||
// Verify only one flush occurred by checking file content
|
||||
// (should have exactly 1 issue)
|
||||
f, err := os.Open(jsonlPath)
|
||||
if err != nil {
|
||||
t.Fatalf("Failed to open JSONL file: %v", err)
|
||||
}
|
||||
defer f.Close()
|
||||
|
||||
scanner := bufio.NewScanner(f)
|
||||
lineCount := 0
|
||||
for scanner.Scan() {
|
||||
lineCount++
|
||||
}
|
||||
|
||||
if lineCount != 1 {
|
||||
t.Errorf("Expected 1 issue in JSONL, got %d (debounce may have failed)", lineCount)
|
||||
}
|
||||
|
||||
// Clean up
|
||||
storeMutex.Lock()
|
||||
storeActive = false
|
||||
storeMutex.Unlock()
|
||||
}
|
||||
|
||||
// TestAutoFlushClearState tests that clearAutoFlushState() properly resets state
|
||||
func TestAutoFlushClearState(t *testing.T) {
|
||||
// Set up dirty state
|
||||
autoFlushEnabled = true
|
||||
isDirty = true
|
||||
flushTimer = time.AfterFunc(5*time.Second, func() {})
|
||||
|
||||
// Clear state
|
||||
clearAutoFlushState()
|
||||
|
||||
// Verify state is cleared
|
||||
flushMutex.Lock()
|
||||
dirty := isDirty
|
||||
hasTimer := flushTimer != nil
|
||||
failCount := flushFailureCount
|
||||
lastErr := lastFlushError
|
||||
flushMutex.Unlock()
|
||||
|
||||
if dirty {
|
||||
t.Error("Expected isDirty to be false after clearAutoFlushState()")
|
||||
}
|
||||
|
||||
if hasTimer {
|
||||
t.Error("Expected flushTimer to be nil after clearAutoFlushState()")
|
||||
}
|
||||
|
||||
if failCount != 0 {
|
||||
t.Errorf("Expected flushFailureCount to be 0, got %d", failCount)
|
||||
}
|
||||
|
||||
if lastErr != nil {
|
||||
t.Errorf("Expected lastFlushError to be nil, got %v", lastErr)
|
||||
}
|
||||
}
|
||||
|
||||
// TestAutoFlushOnExit tests that flush happens on program exit
|
||||
// TestAutoFlushOnExit tests that PersistentPostRun performs final flush before exit
|
||||
func TestAutoFlushOnExit(t *testing.T) {
|
||||
// FIX: Initialize rootCtx for flush operations
|
||||
ctx, cancel := context.WithTimeout(context.Background(), 30*time.Second)
|
||||
@@ -352,96 +153,7 @@ func TestAutoFlushOnExit(t *testing.T) {
|
||||
}
|
||||
|
||||
// TestAutoFlushConcurrency tests that concurrent operations don't cause races
|
||||
func TestAutoFlushConcurrency(t *testing.T) {
|
||||
// Reset auto-flush state
|
||||
autoFlushEnabled = true
|
||||
isDirty = false
|
||||
if flushTimer != nil {
|
||||
flushTimer.Stop()
|
||||
flushTimer = nil
|
||||
}
|
||||
|
||||
// Run multiple goroutines calling markDirtyAndScheduleFlush
|
||||
var wg sync.WaitGroup
|
||||
for i := 0; i < 10; i++ {
|
||||
wg.Add(1)
|
||||
go func() {
|
||||
defer wg.Done()
|
||||
for j := 0; j < 100; j++ {
|
||||
markDirtyAndScheduleFlush()
|
||||
}
|
||||
}()
|
||||
}
|
||||
|
||||
wg.Wait()
|
||||
|
||||
// Verify no panic and state is valid
|
||||
flushMutex.Lock()
|
||||
dirty := isDirty
|
||||
hasTimer := flushTimer != nil
|
||||
flushMutex.Unlock()
|
||||
|
||||
if !dirty {
|
||||
t.Error("Expected isDirty to be true after concurrent marks")
|
||||
}
|
||||
|
||||
if !hasTimer {
|
||||
t.Error("Expected flushTimer to be set after concurrent marks")
|
||||
}
|
||||
|
||||
// Clean up
|
||||
flushMutex.Lock()
|
||||
if flushTimer != nil {
|
||||
flushTimer.Stop()
|
||||
flushTimer = nil
|
||||
}
|
||||
isDirty = false
|
||||
flushMutex.Unlock()
|
||||
}
|
||||
|
||||
// TestAutoFlushStoreInactive tests that flush doesn't run when store is inactive
|
||||
func TestAutoFlushStoreInactive(t *testing.T) {
|
||||
// Create temp directory for test database
|
||||
tmpDir, err := os.MkdirTemp("", "bd-test-inactive-*")
|
||||
if err != nil {
|
||||
t.Fatalf("Failed to create temp dir: %v", err)
|
||||
}
|
||||
defer func() {
|
||||
if err := os.RemoveAll(tmpDir); err != nil {
|
||||
t.Logf("Warning: cleanup failed: %v", err)
|
||||
}
|
||||
}()
|
||||
|
||||
dbPath = filepath.Join(tmpDir, "test.db")
|
||||
jsonlPath := filepath.Join(tmpDir, "issues.jsonl")
|
||||
|
||||
// Create store
|
||||
testStore := newTestStore(t, dbPath)
|
||||
|
||||
store = testStore
|
||||
|
||||
// Set store as INACTIVE (simulating closed store)
|
||||
storeMutex.Lock()
|
||||
storeActive = false
|
||||
storeMutex.Unlock()
|
||||
|
||||
// Reset auto-flush state
|
||||
autoFlushEnabled = true
|
||||
flushMutex.Lock()
|
||||
isDirty = true
|
||||
flushMutex.Unlock()
|
||||
|
||||
// Call flushToJSONL (should return early due to inactive store)
|
||||
flushToJSONL()
|
||||
|
||||
// Verify JSONL was NOT created (flush was skipped)
|
||||
if _, err := os.Stat(jsonlPath); !os.IsNotExist(err) {
|
||||
t.Error("Expected JSONL file to NOT be created when store is inactive")
|
||||
}
|
||||
|
||||
testStore.Close()
|
||||
}
|
||||
|
||||
// TestAutoFlushJSONLContent tests that flushed JSONL has correct content
|
||||
func TestAutoFlushJSONLContent(t *testing.T) {
|
||||
// FIX: Initialize rootCtx for flush operations
|
||||
@@ -578,125 +290,6 @@ func TestAutoFlushJSONLContent(t *testing.T) {
|
||||
storeMutex.Unlock()
|
||||
}
|
||||
|
||||
// TestAutoFlushErrorHandling tests error scenarios in flush operations
|
||||
func TestAutoFlushErrorHandling(t *testing.T) {
|
||||
if runtime.GOOS == windowsOS {
|
||||
t.Skip("chmod-based read-only directory behavior is not reliable on Windows")
|
||||
}
|
||||
|
||||
// FIX: Initialize rootCtx for flush operations
|
||||
ctx, cancel := context.WithTimeout(context.Background(), 30*time.Second)
|
||||
defer cancel()
|
||||
|
||||
oldRootCtx := rootCtx
|
||||
rootCtx = ctx
|
||||
defer func() { rootCtx = oldRootCtx }()
|
||||
|
||||
// Note: We create issues.jsonl as a directory to force os.Create() to fail,
|
||||
// which works even when running as root (unlike chmod-based approaches)
|
||||
|
||||
// Create temp directory for test database
|
||||
tmpDir, err := os.MkdirTemp("", "bd-test-error-*")
|
||||
if err != nil {
|
||||
t.Fatalf("Failed to create temp dir: %v", err)
|
||||
}
|
||||
defer func() {
|
||||
if err := os.RemoveAll(tmpDir); err != nil {
|
||||
t.Logf("Warning: cleanup failed: %v", err)
|
||||
}
|
||||
}()
|
||||
|
||||
dbPath = filepath.Join(tmpDir, "test.db")
|
||||
|
||||
// Create store
|
||||
testStore := newTestStore(t, dbPath)
|
||||
|
||||
store = testStore
|
||||
storeMutex.Lock()
|
||||
storeActive = true
|
||||
storeMutex.Unlock()
|
||||
|
||||
// ctx already declared above for rootCtx initialization
|
||||
|
||||
// Create test issue
|
||||
issue := &types.Issue{
|
||||
ID: "test-error-1",
|
||||
Title: "Error test issue",
|
||||
Status: types.StatusOpen,
|
||||
Priority: 1,
|
||||
IssueType: types.TypeTask,
|
||||
CreatedAt: time.Now(),
|
||||
UpdatedAt: time.Now(),
|
||||
}
|
||||
if err := testStore.CreateIssue(ctx, issue, "test"); err != nil {
|
||||
t.Fatalf("Failed to create issue: %v", err)
|
||||
}
|
||||
|
||||
// Mark issue as dirty so flushToJSONL will try to export it
|
||||
if err := testStore.MarkIssueDirty(ctx, issue.ID); err != nil {
|
||||
t.Fatalf("Failed to mark issue dirty: %v", err)
|
||||
}
|
||||
|
||||
// Create a directory where the JSONL file should be, to force write failure
|
||||
// os.Create() will fail when trying to create a file with a path that's already a directory
|
||||
failDir := filepath.Join(tmpDir, "faildir")
|
||||
if err := os.MkdirAll(failDir, 0755); err != nil {
|
||||
t.Fatalf("Failed to create fail dir: %v", err)
|
||||
}
|
||||
|
||||
// Create issues.jsonl as a directory (not a file) to force Create() to fail
|
||||
jsonlAsDir := filepath.Join(failDir, "issues.jsonl")
|
||||
if err := os.MkdirAll(jsonlAsDir, 0755); err != nil {
|
||||
t.Fatalf("Failed to create issues.jsonl as directory: %v", err)
|
||||
}
|
||||
|
||||
// Set dbPath to point to faildir
|
||||
originalDBPath := dbPath
|
||||
dbPath = filepath.Join(failDir, "test.db")
|
||||
|
||||
// Verify issue is actually marked as dirty
|
||||
dirtyIDs, err := testStore.GetDirtyIssues(ctx)
|
||||
if err != nil {
|
||||
t.Fatalf("Failed to get dirty issues: %v", err)
|
||||
}
|
||||
t.Logf("Dirty issues before flush: %v", dirtyIDs)
|
||||
|
||||
// Reset failure counter
|
||||
flushMutex.Lock()
|
||||
flushFailureCount = 0
|
||||
lastFlushError = nil
|
||||
isDirty = true
|
||||
flushMutex.Unlock()
|
||||
|
||||
t.Logf("dbPath set to: %s", dbPath)
|
||||
t.Logf("Expected JSONL path (which is a directory): %s", filepath.Join(failDir, "issues.jsonl"))
|
||||
|
||||
// Attempt flush (should fail)
|
||||
flushToJSONL()
|
||||
|
||||
// Verify failure was recorded
|
||||
flushMutex.Lock()
|
||||
failCount := flushFailureCount
|
||||
hasError := lastFlushError != nil
|
||||
flushMutex.Unlock()
|
||||
|
||||
if failCount != 1 {
|
||||
t.Errorf("Expected flushFailureCount to be 1, got %d", failCount)
|
||||
}
|
||||
|
||||
if !hasError {
|
||||
t.Error("Expected lastFlushError to be set after flush failure")
|
||||
}
|
||||
|
||||
// Restore dbPath
|
||||
dbPath = originalDBPath
|
||||
|
||||
// Clean up
|
||||
storeMutex.Lock()
|
||||
storeActive = false
|
||||
storeMutex.Unlock()
|
||||
}
|
||||
|
||||
// TestAutoImportIfNewer tests that auto-import triggers when JSONL is newer than DB
|
||||
func TestAutoImportIfNewer(t *testing.T) {
|
||||
// FIX: Initialize rootCtx for auto-import operations
|
||||
|
||||
@@ -9,6 +9,139 @@ import (
|
||||
"github.com/spf13/cobra"
|
||||
)
|
||||
|
||||
const copilotInstructionsContent = `# GitHub Copilot Instructions for Beads
|
||||
|
||||
## Project Overview
|
||||
|
||||
**beads** (command: ` + "`bd`" + `) is a Git-backed issue tracker designed for AI-supervised coding workflows. We dogfood our own tool for all task tracking.
|
||||
|
||||
**Key Features:**
|
||||
- Dependency-aware issue tracking
|
||||
- Auto-sync with Git via JSONL
|
||||
- AI-optimized CLI with JSON output
|
||||
- Built-in daemon for background operations
|
||||
- MCP server integration for Claude and other AI assistants
|
||||
|
||||
## Tech Stack
|
||||
|
||||
- **Language**: Go 1.21+
|
||||
- **Storage**: SQLite (internal/storage/sqlite/)
|
||||
- **CLI Framework**: Cobra
|
||||
- **Testing**: Go standard testing + table-driven tests
|
||||
- **CI/CD**: GitHub Actions
|
||||
- **MCP Server**: Python (integrations/beads-mcp/)
|
||||
|
||||
## Coding Guidelines
|
||||
|
||||
### Testing
|
||||
- Always write tests for new features
|
||||
- Use ` + "`BEADS_DB=/tmp/test.db`" + ` to avoid polluting production database
|
||||
- Run ` + "`go test -short ./...`" + ` before committing
|
||||
- Never create test issues in production DB (use temporary DB)
|
||||
|
||||
### Code Style
|
||||
- Run ` + "`golangci-lint run ./...`" + ` before committing
|
||||
- Follow existing patterns in ` + "`cmd/bd/`" + ` for new commands
|
||||
- Add ` + "`--json`" + ` flag to all commands for programmatic use
|
||||
- Update docs when changing behavior
|
||||
|
||||
### Git Workflow
|
||||
- Always commit ` + "`.beads/issues.jsonl`" + ` with code changes
|
||||
- Run ` + "`bd sync`" + ` at end of work sessions
|
||||
- Install git hooks: ` + "`bd hooks install`" + ` (ensures DB ↔ JSONL consistency)
|
||||
|
||||
## Issue Tracking with bd
|
||||
|
||||
**CRITICAL**: This project uses **bd** for ALL task tracking. Do NOT create markdown TODO lists.
|
||||
|
||||
### Essential Commands
|
||||
|
||||
` + "```bash" + `
|
||||
# Find work
|
||||
bd ready --json # Unblocked issues
|
||||
bd stale --days 30 --json # Forgotten issues
|
||||
|
||||
# Create and manage
|
||||
bd create "Title" -t bug|feature|task -p 0-4 --json
|
||||
bd update <id> --status in_progress --json
|
||||
bd close <id> --reason "Done" --json
|
||||
|
||||
# Search
|
||||
bd list --status open --priority 1 --json
|
||||
bd show <id> --json
|
||||
|
||||
# Sync (CRITICAL at end of session!)
|
||||
bd sync # Force immediate export/commit/push
|
||||
` + "```" + `
|
||||
|
||||
### Workflow
|
||||
|
||||
1. **Check ready work**: ` + "`bd ready --json`" + `
|
||||
2. **Claim task**: ` + "`bd update <id> --status in_progress`" + `
|
||||
3. **Work on it**: Implement, test, document
|
||||
4. **Discover new work?** ` + "`bd create \"Found bug\" -p 1 --deps discovered-from:<parent-id> --json`" + `
|
||||
5. **Complete**: ` + "`bd close <id> --reason \"Done\" --json`" + `
|
||||
6. **Sync**: ` + "`bd sync`" + ` (flushes changes to git immediately)
|
||||
|
||||
### Priorities
|
||||
|
||||
- ` + "`0`" + ` - Critical (security, data loss, broken builds)
|
||||
- ` + "`1`" + ` - High (major features, important bugs)
|
||||
- ` + "`2`" + ` - Medium (default, nice-to-have)
|
||||
- ` + "`3`" + ` - Low (polish, optimization)
|
||||
- ` + "`4`" + ` - Backlog (future ideas)
|
||||
|
||||
## Project Structure
|
||||
|
||||
` + "```" + `
|
||||
beads/
|
||||
├── cmd/bd/ # CLI commands (add new commands here)
|
||||
├── internal/
|
||||
│ ├── types/ # Core data types
|
||||
│ └── storage/ # Storage layer
|
||||
│ └── sqlite/ # SQLite implementation
|
||||
├── integrations/
|
||||
│ └── beads-mcp/ # MCP server (Python)
|
||||
├── examples/ # Integration examples
|
||||
├── docs/ # Documentation
|
||||
└── .beads/
|
||||
├── beads.db # SQLite database (DO NOT COMMIT)
|
||||
└── issues.jsonl # Git-synced issue storage
|
||||
` + "```" + `
|
||||
|
||||
## Available Resources
|
||||
|
||||
### MCP Server (Recommended)
|
||||
Use the beads MCP server for native function calls instead of shell commands:
|
||||
- Install: ` + "`pip install beads-mcp`" + `
|
||||
- Functions: ` + "`mcp__beads__ready()`" + `, ` + "`mcp__beads__create()`" + `, etc.
|
||||
- See ` + "`integrations/beads-mcp/README.md`" + `
|
||||
|
||||
### Scripts
|
||||
- ` + "`./scripts/bump-version.sh <version> --commit`" + ` - Update all version files atomically
|
||||
- ` + "`./scripts/release.sh <version>`" + ` - Complete release workflow
|
||||
- ` + "`./scripts/update-homebrew.sh <version>`" + ` - Update Homebrew formula
|
||||
|
||||
### Key Documentation
|
||||
- **AGENTS.md** - Comprehensive AI agent guide (detailed workflows, advanced features)
|
||||
- **AGENT_INSTRUCTIONS.md** - Development procedures, testing, releases
|
||||
- **README.md** - User-facing documentation
|
||||
- **docs/CLI_REFERENCE.md** - Complete command reference
|
||||
|
||||
## Important Rules
|
||||
|
||||
- ✅ Use bd for ALL task tracking
|
||||
- ✅ Always use ` + "`--json`" + ` flag for programmatic use
|
||||
- ✅ Run ` + "`bd sync`" + ` at end of sessions
|
||||
- ✅ Test with ` + "`BEADS_DB=/tmp/test.db`" + `
|
||||
- ❌ Do NOT create markdown TODO lists
|
||||
- ❌ Do NOT create test issues in production DB
|
||||
- ❌ Do NOT commit ` + "`.beads/beads.db`" + ` (JSONL only)
|
||||
|
||||
---
|
||||
|
||||
**For detailed workflows and advanced features, see [AGENTS.md](../AGENTS.md)**`
|
||||
|
||||
const agentsContent = `## Issue Tracking with bd (beads)
|
||||
|
||||
**IMPORTANT**: This project uses **bd (beads)** for ALL issue tracking. Do NOT use markdown TODOs, task lists, or other tracking methods.
|
||||
@@ -77,6 +210,11 @@ bd automatically syncs with git:
|
||||
- Imports from JSONL when newer (e.g., after ` + "`git pull`" + `)
|
||||
- No manual export/import needed!
|
||||
|
||||
### GitHub Copilot Integration
|
||||
|
||||
If using GitHub Copilot, also create ` + "`.github/copilot-instructions.md`" + ` for automatic instruction loading.
|
||||
Run ` + "`bd onboard`" + ` to get the content, or see step 2 of the onboard instructions.
|
||||
|
||||
### MCP Server (Recommended)
|
||||
|
||||
If using Claude or MCP-compatible clients, install the beads MCP server:
|
||||
@@ -190,7 +328,30 @@ func renderOnboardInstructions(w io.Writer) error {
|
||||
return err
|
||||
}
|
||||
|
||||
if err := writef("%s\n", bold("2. Update CLAUDE.md (if present)")); err != nil {
|
||||
if err := writef("%s\n", bold("2. Create .github/copilot-instructions.md (for GitHub Copilot)")); err != nil {
|
||||
return err
|
||||
}
|
||||
if err := writeln(" GitHub Copilot automatically loads instructions from .github/copilot-instructions.md"); err != nil {
|
||||
return err
|
||||
}
|
||||
if err := writeln(" Create the .github directory if it doesn't exist, then add this file:"); err != nil {
|
||||
return err
|
||||
}
|
||||
if err := writeBlank(); err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
if err := writef("%s\n", cyan("--- BEGIN .GITHUB/COPILOT-INSTRUCTIONS.MD CONTENT ---")); err != nil {
|
||||
return err
|
||||
}
|
||||
if err := writeln(copilotInstructionsContent); err != nil {
|
||||
return err
|
||||
}
|
||||
if err := writef("%s\n\n", cyan("--- END .GITHUB/COPILOT-INSTRUCTIONS.MD CONTENT ---")); err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
if err := writef("%s\n", bold("3. Update CLAUDE.md (if present)")); err != nil {
|
||||
return err
|
||||
}
|
||||
if err := writeln(" If CLAUDE.md exists in this directory, add this note at the top:"); err != nil {
|
||||
@@ -212,7 +373,7 @@ func renderOnboardInstructions(w io.Writer) error {
|
||||
return err
|
||||
}
|
||||
|
||||
if err := writef("%s\n", bold("3. Remove bootstrap instruction")); err != nil {
|
||||
if err := writef("%s\n", bold("4. Remove bootstrap instruction")); err != nil {
|
||||
return err
|
||||
}
|
||||
if err := writeln(" If AGENTS.md or CLAUDE.md contains a line like:"); err != nil {
|
||||
|
||||
Reference in New Issue
Block a user