Merge: use remote version

This commit is contained in:
Steve Yegge
2025-10-29 22:56:19 -07:00
16 changed files with 848 additions and 65 deletions

File diff suppressed because one or more lines are too long

View File

@@ -9,7 +9,7 @@
"name": "beads", "name": "beads",
"source": "./", "source": "./",
"description": "AI-supervised issue tracker for coding workflows", "description": "AI-supervised issue tracker for coding workflows",
"version": "0.18.0" "version": "0.19.0"
} }
] ]
} }

View File

@@ -1,7 +1,7 @@
{ {
"name": "beads", "name": "beads",
"description": "AI-supervised issue tracker for coding workflows. Manage tasks, discover work, and maintain context with simple CLI commands.", "description": "AI-supervised issue tracker for coding workflows. Manage tasks, discover work, and maintain context with simple CLI commands.",
"version": "0.18.0", "version": "0.19.0",
"author": { "author": {
"name": "Steve Yegge", "name": "Steve Yegge",
"url": "https://github.com/steveyegge" "url": "https://github.com/steveyegge"

View File

@@ -160,6 +160,12 @@ func autoImportIfNewer() {
return return
} }
// Clear export_hashes before import to prevent staleness (bd-160)
// Import operations may add/update issues, so export_hashes entries become invalid
if err := store.ClearAllExportHashes(ctx); err != nil {
fmt.Fprintf(os.Stderr, "Warning: failed to clear export_hashes before import: %v\n", err)
}
// Use shared import logic (bd-157) // Use shared import logic (bd-157)
opts := ImportOptions{ opts := ImportOptions{
ResolveCollisions: true, // Auto-import always resolves collisions ResolveCollisions: true, // Auto-import always resolves collisions
@@ -433,6 +439,54 @@ func shouldSkipExport(ctx context.Context, issue *types.Issue) (bool, error) {
return currentHash == storedHash, nil return currentHash == storedHash, nil
} }
// validateJSONLIntegrity checks if JSONL file hash matches stored hash.
// If mismatch detected, clears export_hashes and logs warning (bd-160).
func validateJSONLIntegrity(ctx context.Context, jsonlPath string) error {
// Get stored JSONL file hash
storedHash, err := store.GetJSONLFileHash(ctx)
if err != nil {
return fmt.Errorf("failed to get stored JSONL hash: %w", err)
}
// If no hash stored, this is first export - skip validation
if storedHash == "" {
return nil
}
// Read current JSONL file
jsonlData, err := os.ReadFile(jsonlPath)
if err != nil {
if os.IsNotExist(err) {
// JSONL doesn't exist but we have a stored hash - clear export_hashes
fmt.Fprintf(os.Stderr, "⚠️ WARNING: JSONL file missing but export_hashes exist. Clearing export_hashes.\n")
if err := store.ClearAllExportHashes(ctx); err != nil {
return fmt.Errorf("failed to clear export_hashes: %w", err)
}
return nil
}
return fmt.Errorf("failed to read JSONL file: %w", err)
}
// Compute current JSONL hash
hasher := sha256.New()
hasher.Write(jsonlData)
currentHash := hex.EncodeToString(hasher.Sum(nil))
// Compare hashes
if currentHash != storedHash {
fmt.Fprintf(os.Stderr, "⚠️ WARNING: JSONL file hash mismatch detected (bd-160)\n")
fmt.Fprintf(os.Stderr, " This indicates JSONL and export_hashes are out of sync.\n")
fmt.Fprintf(os.Stderr, " Clearing export_hashes to force full re-export.\n")
// Clear export_hashes to force full re-export
if err := store.ClearAllExportHashes(ctx); err != nil {
return fmt.Errorf("failed to clear export_hashes: %w", err)
}
}
return nil
}
func writeJSONLAtomic(jsonlPath string, issues []*types.Issue) ([]string, error) { func writeJSONLAtomic(jsonlPath string, issues []*types.Issue) ([]string, error) {
// Sort issues by ID for consistent output // Sort issues by ID for consistent output
sort.Slice(issues, func(i, j int) bool { sort.Slice(issues, func(i, j int) bool {
@@ -600,6 +654,13 @@ func flushToJSONL() {
} }
ctx := context.Background() ctx := context.Background()
// Validate JSONL integrity before export (bd-160)
// This detects if JSONL and export_hashes are out of sync (e.g., after git operations)
if err := validateJSONLIntegrity(ctx, jsonlPath); err != nil {
recordFailure(fmt.Errorf("JSONL integrity check failed: %w", err))
return
}
// Determine which issues to export // Determine which issues to export
var dirtyIDs []string var dirtyIDs []string
@@ -711,6 +772,11 @@ func flushToJSONL() {
if err := store.SetMetadata(ctx, "last_import_hash", exportedHash); err != nil { if err := store.SetMetadata(ctx, "last_import_hash", exportedHash); err != nil {
fmt.Fprintf(os.Stderr, "Warning: failed to update last_import_hash after export: %v\n", err) fmt.Fprintf(os.Stderr, "Warning: failed to update last_import_hash after export: %v\n", err)
} }
// Store JSONL file hash for integrity validation (bd-160)
if err := store.SetJSONLFileHash(ctx, exportedHash); err != nil {
fmt.Fprintf(os.Stderr, "Warning: failed to update jsonl_file_hash after export: %v\n", err)
}
} }
// Success! // Success!

View File

@@ -12,6 +12,7 @@ type Debouncer struct {
timer *time.Timer timer *time.Timer
duration time.Duration duration time.Duration
action func() action func()
seq uint64 // Sequence number to prevent stale timer fires
} }
// NewDebouncer creates a new debouncer with the given duration and action. // NewDebouncer creates a new debouncer with the given duration and action.
@@ -34,11 +35,21 @@ func (d *Debouncer) Trigger() {
d.timer.Stop() d.timer.Stop()
} }
// Increment sequence number to invalidate any pending timers
d.seq++
currentSeq := d.seq
d.timer = time.AfterFunc(d.duration, func() { d.timer = time.AfterFunc(d.duration, func() {
d.action()
d.mu.Lock() d.mu.Lock()
d.timer = nil defer d.mu.Unlock()
d.mu.Unlock()
// Only fire if this is still the latest trigger
if d.seq == currentSeq {
d.timer = nil
d.mu.Unlock() // Unlock before calling action to avoid holding lock during callback
d.action()
d.mu.Lock() // Re-lock for defer
}
}) })
} }

View File

@@ -13,16 +13,20 @@ import (
// FileWatcher monitors JSONL and git ref changes using filesystem events or polling. // FileWatcher monitors JSONL and git ref changes using filesystem events or polling.
type FileWatcher struct { type FileWatcher struct {
watcher *fsnotify.Watcher watcher *fsnotify.Watcher
debouncer *Debouncer debouncer *Debouncer
jsonlPath string jsonlPath string
pollingMode bool parentDir string
lastModTime time.Time pollingMode bool
lastExists bool lastModTime time.Time
lastSize int64 lastExists bool
pollInterval time.Duration lastSize int64
gitRefsPath string pollInterval time.Duration
cancel context.CancelFunc gitRefsPath string
gitHeadPath string
lastHeadModTime time.Time
lastHeadExists bool
cancel context.CancelFunc
} }
// NewFileWatcher creates a file watcher for the given JSONL path. // NewFileWatcher creates a file watcher for the given JSONL path.
@@ -31,6 +35,7 @@ type FileWatcher struct {
func NewFileWatcher(jsonlPath string, onChanged func()) (*FileWatcher, error) { func NewFileWatcher(jsonlPath string, onChanged func()) (*FileWatcher, error) {
fw := &FileWatcher{ fw := &FileWatcher{
jsonlPath: jsonlPath, jsonlPath: jsonlPath,
parentDir: filepath.Dir(jsonlPath),
debouncer: NewDebouncer(500*time.Millisecond, onChanged), debouncer: NewDebouncer(500*time.Millisecond, onChanged),
pollInterval: 5 * time.Second, pollInterval: 5 * time.Second,
} }
@@ -46,8 +51,16 @@ func NewFileWatcher(jsonlPath string, onChanged func()) (*FileWatcher, error) {
fallbackEnv := os.Getenv("BEADS_WATCHER_FALLBACK") fallbackEnv := os.Getenv("BEADS_WATCHER_FALLBACK")
fallbackDisabled := fallbackEnv == "false" || fallbackEnv == "0" fallbackDisabled := fallbackEnv == "false" || fallbackEnv == "0"
// Store git refs path for filtering // Store git paths for filtering
fw.gitRefsPath = filepath.Join(filepath.Dir(jsonlPath), "..", ".git", "refs", "heads") gitDir := filepath.Join(fw.parentDir, "..", ".git")
fw.gitRefsPath = filepath.Join(gitDir, "refs", "heads")
fw.gitHeadPath = filepath.Join(gitDir, "HEAD")
// Get initial git HEAD state for polling
if stat, err := os.Stat(fw.gitHeadPath); err == nil {
fw.lastHeadModTime = stat.ModTime()
fw.lastHeadExists = true
}
watcher, err := fsnotify.NewWatcher() watcher, err := fsnotify.NewWatcher()
if err != nil { if err != nil {
@@ -63,22 +76,33 @@ func NewFileWatcher(jsonlPath string, onChanged func()) (*FileWatcher, error) {
fw.watcher = watcher fw.watcher = watcher
// Watch the JSONL file // Watch the parent directory (catches creates/renames)
if err := watcher.Add(jsonlPath); err != nil { if err := watcher.Add(fw.parentDir); err != nil {
watcher.Close() fmt.Fprintf(os.Stderr, "Warning: failed to watch parent directory %s: %v\n", fw.parentDir, err)
if fallbackDisabled {
return nil, fmt.Errorf("failed to watch JSONL and BEADS_WATCHER_FALLBACK is disabled: %w", err)
}
// Fall back to polling mode
fmt.Fprintf(os.Stderr, "Warning: failed to watch JSONL (%v), falling back to polling mode (%v interval)\n", err, fw.pollInterval)
fmt.Fprintf(os.Stderr, "Set BEADS_WATCHER_FALLBACK=false to disable this fallback and require fsnotify\n")
fw.pollingMode = true
fw.watcher = nil
return fw, nil
} }
// Also watch .git/refs/heads for branch changes (best effort) // Watch the JSONL file (may not exist yet)
if err := watcher.Add(jsonlPath); err != nil {
if os.IsNotExist(err) {
// File doesn't exist yet - rely on parent dir watch
fmt.Fprintf(os.Stderr, "Info: JSONL file %s doesn't exist yet, watching parent directory\n", jsonlPath)
} else {
watcher.Close()
if fallbackDisabled {
return nil, fmt.Errorf("failed to watch JSONL and BEADS_WATCHER_FALLBACK is disabled: %w", err)
}
// Fall back to polling mode
fmt.Fprintf(os.Stderr, "Warning: failed to watch JSONL (%v), falling back to polling mode (%v interval)\n", err, fw.pollInterval)
fmt.Fprintf(os.Stderr, "Set BEADS_WATCHER_FALLBACK=false to disable this fallback and require fsnotify\n")
fw.pollingMode = true
fw.watcher = nil
return fw, nil
}
}
// Also watch .git/refs/heads and .git/HEAD for branch changes (best effort)
_ = watcher.Add(fw.gitRefsPath) // Ignore error - not all setups have this _ = watcher.Add(fw.gitRefsPath) // Ignore error - not all setups have this
_ = watcher.Add(fw.gitHeadPath) // Ignore error - not all setups have this
return fw, nil return fw, nil
} }
@@ -97,6 +121,8 @@ func (fw *FileWatcher) Start(ctx context.Context, log daemonLogger) {
} }
go func() { go func() {
jsonlBase := filepath.Base(fw.jsonlPath)
for { for {
select { select {
case event, ok := <-fw.watcher.Events: case event, ok := <-fw.watcher.Events:
@@ -104,30 +130,43 @@ func (fw *FileWatcher) Start(ctx context.Context, log daemonLogger) {
return return
} }
// Handle JSONL write events // Handle parent directory events (file create/replace)
if event.Name == fw.jsonlPath && event.Op&fsnotify.Write != 0 { if event.Name == filepath.Join(fw.parentDir, jsonlBase) && event.Op&fsnotify.Create != 0 {
log.log("File change detected: %s", event.Name) log.log("JSONL file created: %s", event.Name)
// Ensure we're watching the file directly
_ = fw.watcher.Add(fw.jsonlPath)
fw.debouncer.Trigger() fw.debouncer.Trigger()
continue
}
// Handle JSONL write/chmod events
if event.Name == fw.jsonlPath && event.Op&(fsnotify.Write|fsnotify.Create|fsnotify.Chmod) != 0 {
log.log("File change detected: %s (op: %v)", event.Name, event.Op)
fw.debouncer.Trigger()
continue
} }
// Handle JSONL removal/rename (e.g., git checkout) // Handle JSONL removal/rename (e.g., git checkout)
if event.Name == fw.jsonlPath && (event.Op&fsnotify.Remove != 0 || event.Op&fsnotify.Rename != 0) { if event.Name == fw.jsonlPath && (event.Op&fsnotify.Remove != 0 || event.Op&fsnotify.Rename != 0) {
log.log("JSONL removed/renamed, re-establishing watch") log.log("JSONL removed/renamed, re-establishing watch")
fw.watcher.Remove(fw.jsonlPath) fw.watcher.Remove(fw.jsonlPath)
// Brief wait for file to be recreated // Retry with exponential backoff
time.Sleep(100 * time.Millisecond) fw.reEstablishWatch(ctx, log)
if err := fw.watcher.Add(fw.jsonlPath); err != nil { continue
log.log("Failed to re-watch JSONL: %v", err) }
} else {
// File was recreated, trigger to reload // Handle .git/HEAD changes (branch switches)
fw.debouncer.Trigger() if event.Name == fw.gitHeadPath && event.Op&(fsnotify.Write|fsnotify.Create) != 0 {
} log.log("Git HEAD change detected: %s", event.Name)
fw.debouncer.Trigger()
continue
} }
// Handle git ref changes (only events under gitRefsPath) // Handle git ref changes (only events under gitRefsPath)
if event.Op&fsnotify.Write != 0 && strings.HasPrefix(event.Name, fw.gitRefsPath) { if event.Op&fsnotify.Write != 0 && strings.HasPrefix(event.Name, fw.gitRefsPath) {
log.log("Git ref change detected: %s", event.Name) log.log("Git ref change detected: %s", event.Name)
fw.debouncer.Trigger() fw.debouncer.Trigger()
continue
} }
case err, ok := <-fw.watcher.Errors: case err, ok := <-fw.watcher.Errors:
@@ -143,6 +182,32 @@ func (fw *FileWatcher) Start(ctx context.Context, log daemonLogger) {
}() }()
} }
// reEstablishWatch attempts to re-add the JSONL watch with exponential backoff.
func (fw *FileWatcher) reEstablishWatch(ctx context.Context, log daemonLogger) {
delays := []time.Duration{50 * time.Millisecond, 100 * time.Millisecond, 200 * time.Millisecond, 400 * time.Millisecond}
for _, delay := range delays {
select {
case <-ctx.Done():
return
case <-time.After(delay):
if err := fw.watcher.Add(fw.jsonlPath); err != nil {
if os.IsNotExist(err) {
log.log("JSONL still missing after %v, retrying...", delay)
continue
}
log.log("Failed to re-watch JSONL after %v: %v", delay, err)
return
}
// Success!
log.log("Successfully re-established JSONL watch after %v", delay)
fw.debouncer.Trigger()
return
}
}
log.log("Failed to re-establish JSONL watch after all retries")
}
// startPolling begins polling for file changes using a ticker. // startPolling begins polling for file changes using a ticker.
func (fw *FileWatcher) startPolling(ctx context.Context, log daemonLogger) { func (fw *FileWatcher) startPolling(ctx context.Context, log daemonLogger) {
log.log("Starting polling mode with %v interval", fw.pollInterval) log.log("Starting polling mode with %v interval", fw.pollInterval)
@@ -152,6 +217,9 @@ func (fw *FileWatcher) startPolling(ctx context.Context, log daemonLogger) {
for { for {
select { select {
case <-ticker.C: case <-ticker.C:
changed := false
// Check JSONL file
stat, err := os.Stat(fw.jsonlPath) stat, err := os.Stat(fw.jsonlPath)
if err != nil { if err != nil {
if os.IsNotExist(err) { if os.IsNotExist(err) {
@@ -161,32 +229,61 @@ func (fw *FileWatcher) startPolling(ctx context.Context, log daemonLogger) {
fw.lastModTime = time.Time{} fw.lastModTime = time.Time{}
fw.lastSize = 0 fw.lastSize = 0
log.log("File missing (polling): %s", fw.jsonlPath) log.log("File missing (polling): %s", fw.jsonlPath)
fw.debouncer.Trigger() changed = true
} }
continue } else {
log.log("Polling error: %v", err)
}
} else {
// File exists
if !fw.lastExists {
// File appeared
fw.lastExists = true
fw.lastModTime = stat.ModTime()
fw.lastSize = stat.Size()
log.log("File appeared (polling): %s", fw.jsonlPath)
changed = true
} else if !stat.ModTime().Equal(fw.lastModTime) || stat.Size() != fw.lastSize {
// File exists and existed before - check for changes
fw.lastModTime = stat.ModTime()
fw.lastSize = stat.Size()
log.log("File change detected (polling): %s", fw.jsonlPath)
changed = true
} }
log.log("Polling error: %v", err)
continue
} }
// File exists // Check .git/HEAD for branch changes
if !fw.lastExists { headStat, err := os.Stat(fw.gitHeadPath)
// File appeared if err != nil {
fw.lastExists = true if os.IsNotExist(err) {
fw.lastModTime = stat.ModTime() if fw.lastHeadExists {
fw.lastSize = stat.Size() fw.lastHeadExists = false
log.log("File appeared (polling): %s", fw.jsonlPath) fw.lastHeadModTime = time.Time{}
fw.debouncer.Trigger() log.log("Git HEAD missing (polling): %s", fw.gitHeadPath)
continue changed = true
}
}
// Ignore other errors for HEAD - it's optional
} else {
// HEAD exists
if !fw.lastHeadExists {
// HEAD appeared
fw.lastHeadExists = true
fw.lastHeadModTime = headStat.ModTime()
log.log("Git HEAD appeared (polling): %s", fw.gitHeadPath)
changed = true
} else if !headStat.ModTime().Equal(fw.lastHeadModTime) {
// HEAD changed (branch switch)
fw.lastHeadModTime = headStat.ModTime()
log.log("Git HEAD change detected (polling): %s", fw.gitHeadPath)
changed = true
}
} }
// File exists and existed before - check for changes if changed {
if !stat.ModTime().Equal(fw.lastModTime) || stat.Size() != fw.lastSize {
fw.lastModTime = stat.ModTime()
fw.lastSize = stat.Size()
log.log("File change detected (polling): %s", fw.jsonlPath)
fw.debouncer.Trigger() fw.debouncer.Trigger()
} }
case <-ctx.Done(): case <-ctx.Done():
return return
} }

View File

@@ -2,6 +2,8 @@ package main
import ( import (
"context" "context"
"crypto/sha256"
"encoding/hex"
"encoding/json" "encoding/json"
"fmt" "fmt"
"os" "os"
@@ -273,6 +275,17 @@ Output to stdout by default, or use -o flag for file output.`,
// Clear auto-flush state since we just manually exported // Clear auto-flush state since we just manually exported
// This cancels any pending auto-flush timer and marks DB as clean // This cancels any pending auto-flush timer and marks DB as clean
clearAutoFlushState() clearAutoFlushState()
// Store JSONL file hash for integrity validation (bd-160)
jsonlData, err := os.ReadFile(finalPath)
if err == nil {
hasher := sha256.New()
hasher.Write(jsonlData)
fileHash := hex.EncodeToString(hasher.Sum(nil))
if err := store.SetJSONLFileHash(ctx, fileHash); err != nil {
fmt.Fprintf(os.Stderr, "Warning: failed to update jsonl_file_hash: %v\n", err)
}
}
} }
// If writing to file, atomically replace the target file // If writing to file, atomically replace the target file

View File

@@ -0,0 +1,294 @@
package main
import (
"context"
"os"
"path/filepath"
"testing"
"github.com/steveyegge/beads/internal/storage/sqlite"
"github.com/steveyegge/beads/internal/types"
)
// TestExportIntegrityAfterJSONLTruncation simulates the bd-160 bug scenario.
// This integration test would have caught the export deduplication bug.
func TestExportIntegrityAfterJSONLTruncation(t *testing.T) {
// Setup: Create a database with multiple issues
tmpDir := t.TempDir()
dbPath := filepath.Join(tmpDir, ".beads", "beads.db")
jsonlPath := filepath.Join(tmpDir, ".beads", "issues.jsonl")
if err := os.MkdirAll(filepath.Dir(dbPath), 0755); err != nil {
t.Fatalf("failed to create .beads directory: %v", err)
}
testStore, err := sqlite.New(dbPath)
if err != nil {
t.Fatalf("failed to create database: %v", err)
}
defer testStore.Close()
ctx := context.Background()
// Initialize database
if err := testStore.SetConfig(ctx, "issue_prefix", "bd"); err != nil {
t.Fatalf("failed to set issue prefix: %v", err)
}
// Create 10 issues
const numIssues = 10
var allIssues []*types.Issue
for i := 1; i <= numIssues; i++ {
issue := &types.Issue{
ID: "bd-" + string(rune('0'+i)),
Title: "Test issue " + string(rune('0'+i)),
Description: "Description " + string(rune('0'+i)),
Status: types.StatusOpen,
Priority: 1,
IssueType: types.TypeTask,
}
allIssues = append(allIssues, issue)
if err := testStore.CreateIssue(ctx, issue, "test"); err != nil {
t.Fatalf("failed to create issue %s: %v", issue.ID, err)
}
}
// Step 1: Export all issues
exportedIDs, err := writeJSONLAtomic(jsonlPath, allIssues)
if err != nil {
t.Fatalf("initial export failed: %v", err)
}
if len(exportedIDs) != numIssues {
t.Fatalf("expected %d exported issues, got %d", numIssues, len(exportedIDs))
}
// Store JSONL file hash (simulating what the system should do)
jsonlData, err := os.ReadFile(jsonlPath)
if err != nil {
t.Fatalf("failed to read JSONL: %v", err)
}
initialSize := len(jsonlData)
// Step 2: Simulate git operation that truncates JSONL (the bd-160 scenario)
// This simulates: git reset --hard <old-commit>, git checkout <branch>, etc.
truncatedData := jsonlData[:len(jsonlData)/2] // Keep only first half
if err := os.WriteFile(jsonlPath, truncatedData, 0644); err != nil {
t.Fatalf("failed to truncate JSONL: %v", err)
}
// Verify JSONL is indeed truncated
truncatedSize := len(truncatedData)
if truncatedSize >= initialSize {
t.Fatalf("JSONL should be truncated, but size is %d (was %d)", truncatedSize, initialSize)
}
// Step 3: Run export again with integrity validation enabled
// Set global store for validateJSONLIntegrity
oldStore := store
store = testStore
defer func() { store = oldStore }()
// This should detect the mismatch and clear export_hashes
if err := validateJSONLIntegrity(ctx, jsonlPath); err != nil {
t.Fatalf("integrity validation failed: %v", err)
}
// Step 4: Export all issues again
exportedIDs2, err := writeJSONLAtomic(jsonlPath, allIssues)
if err != nil {
t.Fatalf("second export failed: %v", err)
}
// Step 5: Verify all issues were exported (not skipped)
if len(exportedIDs2) != numIssues {
t.Errorf("INTEGRITY VIOLATION: expected %d exported issues after truncation, got %d",
numIssues, len(exportedIDs2))
t.Errorf("This indicates the bug bd-160 would have occurred!")
// Read JSONL to count actual lines
finalData, _ := os.ReadFile(jsonlPath)
lines := 0
for _, b := range finalData {
if b == '\n' {
lines++
}
}
t.Errorf("JSONL has %d lines, DB has %d issues", lines, numIssues)
}
// Step 6: Verify JSONL has all issues
finalData, err := os.ReadFile(jsonlPath)
if err != nil {
t.Fatalf("failed to read final JSONL: %v", err)
}
// Count newlines to verify all issues present
lineCount := 0
for _, b := range finalData {
if b == '\n' {
lineCount++
}
}
if lineCount != numIssues {
t.Errorf("JSONL should have %d lines (issues), got %d", numIssues, lineCount)
t.Errorf("Data loss detected - this is the bd-160 bug!")
}
}
// TestExportIntegrityAfterJSONLDeletion tests recovery when JSONL is deleted
func TestExportIntegrityAfterJSONLDeletion(t *testing.T) {
tmpDir := t.TempDir()
dbPath := filepath.Join(tmpDir, ".beads", "beads.db")
jsonlPath := filepath.Join(tmpDir, ".beads", "issues.jsonl")
if err := os.MkdirAll(filepath.Dir(dbPath), 0755); err != nil {
t.Fatalf("failed to create .beads directory: %v", err)
}
testStore, err := sqlite.New(dbPath)
if err != nil {
t.Fatalf("failed to create database: %v", err)
}
defer testStore.Close()
ctx := context.Background()
if err := testStore.SetConfig(ctx, "issue_prefix", "bd"); err != nil {
t.Fatalf("failed to set issue prefix: %v", err)
}
// Create issues and export
issue := &types.Issue{
ID: "bd-1",
Title: "Test issue",
Status: types.StatusOpen,
Priority: 1,
IssueType: types.TypeTask,
}
if err := testStore.CreateIssue(ctx, issue, "test"); err != nil {
t.Fatalf("failed to create issue: %v", err)
}
_, err = writeJSONLAtomic(jsonlPath, []*types.Issue{issue})
if err != nil {
t.Fatalf("export failed: %v", err)
}
// Store JSONL hash (would happen in real export)
_ , _ = os.ReadFile(jsonlPath)
// Set global store
oldStore := store
store = testStore
defer func() { store = oldStore }()
// Delete JSONL (simulating user error or git clean)
if err := os.Remove(jsonlPath); err != nil {
t.Fatalf("failed to remove JSONL: %v", err)
}
// Integrity validation should detect missing file
// (In real system, this happens before next export)
if err := validateJSONLIntegrity(ctx, jsonlPath); err != nil {
// Error is OK if file doesn't exist
if !os.IsNotExist(err) {
t.Fatalf("unexpected error: %v", err)
}
}
// Export again should recreate JSONL
_, err = writeJSONLAtomic(jsonlPath, []*types.Issue{issue})
if err != nil {
t.Fatalf("export after deletion failed: %v", err)
}
// Verify JSONL was recreated
if _, err := os.Stat(jsonlPath); os.IsNotExist(err) {
t.Fatal("JSONL should have been recreated")
}
// Verify content
newData, err := os.ReadFile(jsonlPath)
if err != nil {
t.Fatalf("failed to read recreated JSONL: %v", err)
}
if len(newData) == 0 {
t.Fatal("Recreated JSONL is empty - data loss!")
}
}
// TestMultipleExportsStayConsistent tests that repeated exports maintain integrity
func TestMultipleExportsStayConsistent(t *testing.T) {
tmpDir := t.TempDir()
dbPath := filepath.Join(tmpDir, ".beads", "beads.db")
jsonlPath := filepath.Join(tmpDir, ".beads", "issues.jsonl")
if err := os.MkdirAll(filepath.Dir(dbPath), 0755); err != nil {
t.Fatalf("failed to create .beads directory: %v", err)
}
testStore, err := sqlite.New(dbPath)
if err != nil {
t.Fatalf("failed to create database: %v", err)
}
defer testStore.Close()
ctx := context.Background()
if err := testStore.SetConfig(ctx, "issue_prefix", "bd"); err != nil {
t.Fatalf("failed to set issue prefix: %v", err)
}
// Create 5 issues
var issues []*types.Issue
for i := 1; i <= 5; i++ {
issue := &types.Issue{
ID: "bd-" + string(rune('0'+i)),
Title: "Issue " + string(rune('0'+i)),
Status: types.StatusOpen,
Priority: 1,
IssueType: types.TypeTask,
}
issues = append(issues, issue)
if err := testStore.CreateIssue(ctx, issue, "test"); err != nil {
t.Fatalf("failed to create issue: %v", err)
}
}
// Export multiple times and verify consistency
for iteration := 0; iteration < 3; iteration++ {
exportedIDs, err := writeJSONLAtomic(jsonlPath, issues)
if err != nil {
t.Fatalf("export iteration %d failed: %v", iteration, err)
}
if len(exportedIDs) != len(issues) {
t.Errorf("iteration %d: expected %d exports, got %d",
iteration, len(issues), len(exportedIDs))
}
// Count lines in JSONL
data, err := os.ReadFile(jsonlPath)
if err != nil {
t.Fatalf("failed to read JSONL: %v", err)
}
lines := 0
for _, b := range data {
if b == '\n' {
lines++
}
}
if lines != len(issues) {
t.Errorf("iteration %d: JSONL has %d lines, expected %d",
iteration, lines, len(issues))
}
}
}

View File

@@ -0,0 +1,237 @@
package main
import (
"context"
"crypto/sha256"
"encoding/hex"
"os"
"path/filepath"
"testing"
"github.com/steveyegge/beads/internal/storage/sqlite"
"github.com/steveyegge/beads/internal/types"
)
const testActor = "test"
// TestJSONLIntegrityValidation tests the JSONL integrity validation (bd-160)
func TestJSONLIntegrityValidation(t *testing.T) {
// Create temp directory
tmpDir := t.TempDir()
dbPath := filepath.Join(tmpDir, ".beads", "beads.db")
jsonlPath := filepath.Join(tmpDir, ".beads", "issues.jsonl")
// Ensure .beads directory exists
if err := os.MkdirAll(filepath.Dir(dbPath), 0755); err != nil {
t.Fatalf("failed to create .beads directory: %v", err)
}
// Create database
testStore, err := sqlite.New(dbPath)
if err != nil {
t.Fatalf("failed to create database: %v", err)
}
defer testStore.Close()
// Set global store for validateJSONLIntegrity
oldStore := store
store = testStore
defer func() { store = oldStore }()
ctx := context.Background()
// Initialize database with prefix
if err := testStore.SetConfig(ctx, "issue_prefix", "bd"); err != nil {
t.Fatalf("failed to set issue prefix: %v", err)
}
// Create a test issue
issue := &types.Issue{
ID: "bd-1",
Title: "Test issue",
Description: "Test description",
Status: types.StatusOpen,
Priority: 1,
IssueType: types.TypeTask,
}
if err := testStore.CreateIssue(ctx, issue, testActor); err != nil {
t.Fatalf("failed to create issue: %v", err)
}
// Export to JSONL
issues := []*types.Issue{issue}
exportedIDs, err := writeJSONLAtomic(jsonlPath, issues)
if err != nil {
t.Fatalf("failed to write JSONL: %v", err)
}
if len(exportedIDs) != 1 {
t.Fatalf("expected 1 exported ID, got %d", len(exportedIDs))
}
// Compute and store JSONL file hash
jsonlData, err := os.ReadFile(jsonlPath)
if err != nil {
t.Fatalf("failed to read JSONL: %v", err)
}
hasher := sha256.New()
hasher.Write(jsonlData)
fileHash := hex.EncodeToString(hasher.Sum(nil))
if err := testStore.SetJSONLFileHash(ctx, fileHash); err != nil {
t.Fatalf("failed to set JSONL file hash: %v", err)
}
// Test 1: Validate with matching hash (should succeed)
t.Run("MatchingHash", func(t *testing.T) {
if err := validateJSONLIntegrity(ctx, jsonlPath); err != nil {
t.Fatalf("validation failed with matching hash: %v", err)
}
})
// Test 2: Modify JSONL file (simulating git pull) and validate
t.Run("MismatchedHash", func(t *testing.T) {
// Modify the JSONL file
if err := os.WriteFile(jsonlPath, []byte(`{"id":"bd-1","title":"Modified"}`+"\n"), 0644); err != nil {
t.Fatalf("failed to modify JSONL: %v", err)
}
// Add an export hash to verify it gets cleared
if err := testStore.SetExportHash(ctx, "bd-1", "dummy-hash"); err != nil {
t.Fatalf("failed to set export hash: %v", err)
}
// Validate should detect mismatch and clear export_hashes
if err := validateJSONLIntegrity(ctx, jsonlPath); err != nil {
t.Fatalf("validation failed: %v", err)
}
// Verify export_hashes were cleared
hash, err := testStore.GetExportHash(ctx, "bd-1")
if err != nil {
t.Fatalf("failed to get export hash: %v", err)
}
if hash != "" {
t.Fatalf("expected export hash to be cleared, got %q", hash)
}
})
// Test 3: Missing JSONL file
t.Run("MissingJSONL", func(t *testing.T) {
// Store a hash to simulate previous export
if err := testStore.SetJSONLFileHash(ctx, "some-hash"); err != nil {
t.Fatalf("failed to set JSONL file hash: %v", err)
}
// Add an export hash
if err := testStore.SetExportHash(ctx, "bd-1", "dummy-hash"); err != nil {
t.Fatalf("failed to set export hash: %v", err)
}
// Remove JSONL file
if err := os.Remove(jsonlPath); err != nil {
t.Fatalf("failed to remove JSONL: %v", err)
}
// Validate should detect missing file and clear export_hashes
if err := validateJSONLIntegrity(ctx, jsonlPath); err != nil {
t.Fatalf("validation failed: %v", err)
}
// Verify export_hashes were cleared
hash, err := testStore.GetExportHash(ctx, "bd-1")
if err != nil {
t.Fatalf("failed to get export hash: %v", err)
}
if hash != "" {
t.Fatalf("expected export hash to be cleared, got %q", hash)
}
})
}
// TestImportClearsExportHashes tests that imports clear export_hashes (bd-160)
func TestImportClearsExportHashes(t *testing.T) {
// Create temp directory
tmpDir := t.TempDir()
dbPath := filepath.Join(tmpDir, ".beads", "beads.db")
// Ensure .beads directory exists
if err := os.MkdirAll(filepath.Dir(dbPath), 0755); err != nil {
t.Fatalf("failed to create .beads directory: %v", err)
}
// Create database
testStore, err := sqlite.New(dbPath)
if err != nil {
t.Fatalf("failed to create database: %v", err)
}
defer testStore.Close()
ctx := context.Background()
// Initialize database with prefix
if err := testStore.SetConfig(ctx, "issue_prefix", "bd"); err != nil {
t.Fatalf("failed to set issue prefix: %v", err)
}
// Create a test issue
issue := &types.Issue{
ID: "bd-1",
Title: "Test issue",
Description: "Test description",
Status: types.StatusOpen,
Priority: 1,
IssueType: types.TypeTask,
}
if err := testStore.CreateIssue(ctx, issue, testActor); err != nil {
t.Fatalf("failed to create issue: %v", err)
}
// Set an export hash
if err := testStore.SetExportHash(ctx, "bd-1", "dummy-hash"); err != nil {
t.Fatalf("failed to set export hash: %v", err)
}
// Verify hash is set
hash, err := testStore.GetExportHash(ctx, "bd-1")
if err != nil {
t.Fatalf("failed to get export hash: %v", err)
}
if hash != "dummy-hash" {
t.Fatalf("expected hash 'dummy-hash', got %q", hash)
}
// Import another issue (should clear export_hashes)
issue2 := &types.Issue{
ID: "bd-2",
Title: "Another issue",
Description: "Another description",
Status: types.StatusOpen,
Priority: 2,
IssueType: types.TypeTask,
}
opts := ImportOptions{
ResolveCollisions: false,
DryRun: false,
SkipUpdate: false,
Strict: false,
SkipPrefixValidation: true,
}
_, err = importIssuesCore(ctx, dbPath, testStore, []*types.Issue{issue2}, opts)
if err != nil {
t.Fatalf("import failed: %v", err)
}
// Verify export_hashes were cleared
hash, err = testStore.GetExportHash(ctx, "bd-1")
if err != nil {
t.Fatalf("failed to get export hash after import: %v", err)
}
if hash != "" {
t.Fatalf("expected export hash to be cleared after import, got %q", hash)
}
}

View File

@@ -11,7 +11,7 @@ import (
var ( var (
// Version is the current version of bd (overridden by ldflags at build time) // Version is the current version of bd (overridden by ldflags at build time)
Version = "0.18.0" Version = "0.19.0"
// Build can be set via ldflags at compile time // Build can be set via ldflags at compile time
Build = "dev" Build = "dev"
) )

View File

@@ -1,6 +1,6 @@
[project] [project]
name = "beads-mcp" name = "beads-mcp"
version = "0.18.0" version = "0.19.0"
description = "MCP server for beads issue tracker." description = "MCP server for beads issue tracker."
readme = "README.md" readme = "README.md"
requires-python = ">=3.10" requires-python = ">=3.10"

View File

@@ -4,4 +4,4 @@ This package provides an MCP (Model Context Protocol) server that exposes
beads (bd) issue tracker functionality to MCP Clients. beads (bd) issue tracker functionality to MCP Clients.
""" """
__version__ = "0.18.0" __version__ = "0.19.0"

View File

@@ -76,6 +76,14 @@ func ImportIssues(ctx context.Context, dbPath string, store storage.Storage, iss
if needCloseStore { if needCloseStore {
defer func() { _ = sqliteStore.Close() }() defer func() { _ = sqliteStore.Close() }()
} }
// Clear export_hashes before import to prevent staleness (bd-160)
// Import operations may add/update issues, so export_hashes entries become invalid
if !opts.DryRun {
if err := sqliteStore.ClearAllExportHashes(ctx); err != nil {
fmt.Fprintf(os.Stderr, "Warning: failed to clear export_hashes before import: %v\n", err)
}
}
// Check and handle prefix mismatches // Check and handle prefix mismatches
if err := handlePrefixMismatch(ctx, sqliteStore, issues, opts, result); err != nil { if err := handlePrefixMismatch(ctx, sqliteStore, issues, opts, result); err != nil {

View File

@@ -595,6 +595,24 @@ func (m *MemoryStorage) SetExportHash(ctx context.Context, issueID, hash string)
return nil return nil
} }
// ClearAllExportHashes clears all export hashes
func (m *MemoryStorage) ClearAllExportHashes(ctx context.Context) error {
// Memory storage doesn't track export hashes, no-op
return nil
}
// GetJSONLFileHash gets the JSONL file hash
func (m *MemoryStorage) GetJSONLFileHash(ctx context.Context) (string, error) {
// Memory storage doesn't track JSONL file hashes, return empty string
return "", nil
}
// SetJSONLFileHash sets the JSONL file hash
func (m *MemoryStorage) SetJSONLFileHash(ctx context.Context, fileHash string) error {
// Memory storage doesn't track JSONL file hashes, no-op
return nil
}
// GetDependencyTree gets the dependency tree for an issue // GetDependencyTree gets the dependency tree for an issue
func (m *MemoryStorage) GetDependencyTree(ctx context.Context, issueID string, maxDepth int, showAllPaths bool, reverse bool) ([]*types.TreeNode, error) { func (m *MemoryStorage) GetDependencyTree(ctx context.Context, issueID string, maxDepth int, showAllPaths bool, reverse bool) ([]*types.TreeNode, error) {
// Simplified implementation - just return direct dependencies // Simplified implementation - just return direct dependencies

View File

@@ -50,3 +50,36 @@ func (s *SQLiteStorage) ClearAllExportHashes(ctx context.Context) error {
} }
return nil return nil
} }
// GetJSONLFileHash retrieves the stored hash of the JSONL file.
// Returns empty string if no hash is stored (bd-160).
func (s *SQLiteStorage) GetJSONLFileHash(ctx context.Context) (string, error) {
var hash string
err := s.db.QueryRowContext(ctx, `
SELECT value FROM metadata WHERE key = 'jsonl_file_hash'
`).Scan(&hash)
if err == sql.ErrNoRows {
return "", nil // No hash stored yet
}
if err != nil {
return "", fmt.Errorf("failed to get jsonl_file_hash: %w", err)
}
return hash, nil
}
// SetJSONLFileHash stores the hash of the JSONL file after export (bd-160).
func (s *SQLiteStorage) SetJSONLFileHash(ctx context.Context, fileHash string) error {
_, err := s.db.ExecContext(ctx, `
INSERT INTO metadata (key, value)
VALUES ('jsonl_file_hash', ?)
ON CONFLICT(key) DO UPDATE SET value = excluded.value
`, fileHash)
if err != nil {
return fmt.Errorf("failed to set jsonl_file_hash: %w", err)
}
return nil
}

View File

@@ -59,6 +59,11 @@ type Storage interface {
// Export hash tracking (for timestamp-only dedup, bd-164) // Export hash tracking (for timestamp-only dedup, bd-164)
GetExportHash(ctx context.Context, issueID string) (string, error) GetExportHash(ctx context.Context, issueID string) (string, error)
SetExportHash(ctx context.Context, issueID, contentHash string) error SetExportHash(ctx context.Context, issueID, contentHash string) error
ClearAllExportHashes(ctx context.Context) error
// JSONL file integrity (bd-160)
GetJSONLFileHash(ctx context.Context) (string, error)
SetJSONLFileHash(ctx context.Context, fileHash string) error
// Config // Config
SetConfig(ctx context.Context, key, value string) error SetConfig(ctx context.Context, key, value string) error