Implement multi-repo hydration layer with mtime caching (bd-307)

- Add repo_mtimes table to track JSONL file modification times
- Implement HydrateFromMultiRepo() with mtime-based skip optimization
- Support tilde expansion for repo paths in config
- Add source_repo column via migration (not in base schema)
- Fix schema to allow migration on existing databases
- Comprehensive test coverage for hydration logic
- Resurrect missing parent issues bd-cb64c226 and bd-cbed9619

Implementation:
- internal/storage/sqlite/multirepo.go - Core hydration logic
- internal/storage/sqlite/multirepo_test.go - Test coverage
- docs/MULTI_REPO_HYDRATION.md - Documentation

Schema changes:
- source_repo column added via migration only (not base schema)
- repo_mtimes table for mtime caching
- All SELECT queries updated to include source_repo

Database recovery:
- Restored from 17 to 285 issues
- Created placeholder parents for orphaned hierarchical children

Amp-Thread-ID: https://ampcode.com/threads/T-faa1339a-14b2-426c-8e18-aa8be6f5cde6
Co-authored-by: Amp <amp@ampcode.com>
This commit is contained in:
Steve Yegge
2025-11-04 13:05:08 -08:00
parent 7a1447444c
commit 05529fe4c0
11 changed files with 1184 additions and 21 deletions

View File

@@ -196,7 +196,7 @@ func (s *SQLiteStorage) GetDependenciesWithMetadata(ctx context.Context, issueID
rows, err := s.db.QueryContext(ctx, `
SELECT i.id, i.content_hash, i.title, i.description, i.design, i.acceptance_criteria, i.notes,
i.status, i.priority, i.issue_type, i.assignee, i.estimated_minutes,
i.created_at, i.updated_at, i.closed_at, i.external_ref,
i.created_at, i.updated_at, i.closed_at, i.external_ref, i.source_repo,
d.type
FROM issues i
JOIN dependencies d ON i.id = d.depends_on_id
@@ -216,7 +216,7 @@ func (s *SQLiteStorage) GetDependentsWithMetadata(ctx context.Context, issueID s
rows, err := s.db.QueryContext(ctx, `
SELECT i.id, i.content_hash, i.title, i.description, i.design, i.acceptance_criteria, i.notes,
i.status, i.priority, i.issue_type, i.assignee, i.estimated_minutes,
i.created_at, i.updated_at, i.closed_at, i.external_ref,
i.created_at, i.updated_at, i.closed_at, i.external_ref, i.source_repo,
d.type
FROM issues i
JOIN dependencies d ON i.id = d.issue_id
@@ -657,12 +657,13 @@ func (s *SQLiteStorage) scanIssues(ctx context.Context, rows *sql.Rows) ([]*type
var estimatedMinutes sql.NullInt64
var assignee sql.NullString
var externalRef sql.NullString
var sourceRepo sql.NullString
err := rows.Scan(
&issue.ID, &contentHash, &issue.Title, &issue.Description, &issue.Design,
&issue.AcceptanceCriteria, &issue.Notes, &issue.Status,
&issue.Priority, &issue.IssueType, &assignee, &estimatedMinutes,
&issue.CreatedAt, &issue.UpdatedAt, &closedAt, &externalRef,
&issue.CreatedAt, &issue.UpdatedAt, &closedAt, &externalRef, &sourceRepo,
)
if err != nil {
return nil, fmt.Errorf("failed to scan issue: %w", err)
@@ -684,6 +685,9 @@ func (s *SQLiteStorage) scanIssues(ctx context.Context, rows *sql.Rows) ([]*type
if externalRef.Valid {
issue.ExternalRef = &externalRef.String
}
if sourceRepo.Valid {
issue.SourceRepo = sourceRepo.String
}
// Fetch labels for this issue
labels, err := s.GetLabels(ctx, issue.ID)
@@ -708,13 +712,14 @@ func (s *SQLiteStorage) scanIssuesWithDependencyType(ctx context.Context, rows *
var estimatedMinutes sql.NullInt64
var assignee sql.NullString
var externalRef sql.NullString
var sourceRepo sql.NullString
var depType types.DependencyType
err := rows.Scan(
&issue.ID, &contentHash, &issue.Title, &issue.Description, &issue.Design,
&issue.AcceptanceCriteria, &issue.Notes, &issue.Status,
&issue.Priority, &issue.IssueType, &assignee, &estimatedMinutes,
&issue.CreatedAt, &issue.UpdatedAt, &closedAt, &externalRef,
&issue.CreatedAt, &issue.UpdatedAt, &closedAt, &externalRef, &sourceRepo,
&depType,
)
if err != nil {
@@ -737,6 +742,9 @@ func (s *SQLiteStorage) scanIssuesWithDependencyType(ctx context.Context, rows *
if externalRef.Valid {
issue.ExternalRef = &externalRef.String
}
if sourceRepo.Valid {
issue.SourceRepo = sourceRepo.String
}
// Fetch labels for this issue
labels, err := s.GetLabels(ctx, issue.ID)

View File

@@ -10,18 +10,23 @@ import (
// insertIssue inserts a single issue into the database
func insertIssue(ctx context.Context, conn *sql.Conn, issue *types.Issue) error {
sourceRepo := issue.SourceRepo
if sourceRepo == "" {
sourceRepo = "." // Default to primary repo
}
_, err := conn.ExecContext(ctx, `
INSERT INTO issues (
id, content_hash, title, description, design, acceptance_criteria, notes,
status, priority, issue_type, assignee, estimated_minutes,
created_at, updated_at, closed_at, external_ref
) VALUES (?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?)
created_at, updated_at, closed_at, external_ref, source_repo
) VALUES (?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?)
`,
issue.ID, issue.ContentHash, issue.Title, issue.Description, issue.Design,
issue.AcceptanceCriteria, issue.Notes, issue.Status,
issue.Priority, issue.IssueType, issue.Assignee,
issue.EstimatedMinutes, issue.CreatedAt, issue.UpdatedAt,
issue.ClosedAt, issue.ExternalRef,
issue.ClosedAt, issue.ExternalRef, sourceRepo,
)
if err != nil {
return fmt.Errorf("failed to insert issue: %w", err)
@@ -35,8 +40,8 @@ func insertIssues(ctx context.Context, conn *sql.Conn, issues []*types.Issue) er
INSERT INTO issues (
id, content_hash, title, description, design, acceptance_criteria, notes,
status, priority, issue_type, assignee, estimated_minutes,
created_at, updated_at, closed_at, external_ref
) VALUES (?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?)
created_at, updated_at, closed_at, external_ref, source_repo
) VALUES (?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?)
`)
if err != nil {
return fmt.Errorf("failed to prepare statement: %w", err)
@@ -44,12 +49,17 @@ func insertIssues(ctx context.Context, conn *sql.Conn, issues []*types.Issue) er
defer func() { _ = stmt.Close() }()
for _, issue := range issues {
sourceRepo := issue.SourceRepo
if sourceRepo == "" {
sourceRepo = "." // Default to primary repo
}
_, err = stmt.ExecContext(ctx,
issue.ID, issue.ContentHash, issue.Title, issue.Description, issue.Design,
issue.AcceptanceCriteria, issue.Notes, issue.Status,
issue.Priority, issue.IssueType, issue.Assignee,
issue.EstimatedMinutes, issue.CreatedAt, issue.UpdatedAt,
issue.ClosedAt, issue.ExternalRef,
issue.ClosedAt, issue.ExternalRef, sourceRepo,
)
if err != nil {
return fmt.Errorf("failed to insert issue %s: %w", issue.ID, err)

View File

@@ -98,7 +98,7 @@ func (s *SQLiteStorage) GetIssuesByLabel(ctx context.Context, label string) ([]*
rows, err := s.db.QueryContext(ctx, `
SELECT i.id, i.content_hash, i.title, i.description, i.design, i.acceptance_criteria, i.notes,
i.status, i.priority, i.issue_type, i.assignee, i.estimated_minutes,
i.created_at, i.updated_at, i.closed_at, i.external_ref
i.created_at, i.updated_at, i.closed_at, i.external_ref, i.source_repo
FROM issues i
JOIN labels l ON i.id = l.issue_id
WHERE l.label = ?

View File

@@ -29,6 +29,8 @@ var migrations = []Migration{
{"export_hashes_table", migrateExportHashesTable},
{"content_hash_column", migrateContentHashColumn},
{"external_ref_unique", migrateExternalRefUnique},
{"source_repo_column", migrateSourceRepoColumn},
{"repo_mtimes_table", migrateRepoMtimesTable},
}
// MigrationInfo contains metadata about a migration for inspection
@@ -64,6 +66,8 @@ func getMigrationDescription(name string) string {
"export_hashes_table": "Adds export_hashes table for idempotent exports",
"content_hash_column": "Adds content_hash column for collision resolution",
"external_ref_unique": "Adds UNIQUE constraint on external_ref column",
"source_repo_column": "Adds source_repo column for multi-repo support",
"repo_mtimes_table": "Adds repo_mtimes table for multi-repo hydration caching",
}
if desc, ok := descriptions[name]; ok {
@@ -572,3 +576,71 @@ func findExternalRefDuplicates(db *sql.DB) (map[string][]string, error) {
return duplicates, rows.Err()
}
// migrateSourceRepoColumn adds source_repo column for multi-repo support (bd-307).
// Defaults to "." (primary repo) for backward compatibility with existing issues.
func migrateSourceRepoColumn(db *sql.DB) error {
// Check if source_repo column exists
var columnExists bool
err := db.QueryRow(`
SELECT COUNT(*) > 0
FROM pragma_table_info('issues')
WHERE name = 'source_repo'
`).Scan(&columnExists)
if err != nil {
return fmt.Errorf("failed to check source_repo column: %w", err)
}
if columnExists {
// Column already exists
return nil
}
// Add source_repo column with default "." (primary repo)
_, err = db.Exec(`ALTER TABLE issues ADD COLUMN source_repo TEXT DEFAULT '.'`)
if err != nil {
return fmt.Errorf("failed to add source_repo column: %w", err)
}
// Create index on source_repo for efficient filtering
_, err = db.Exec(`CREATE INDEX IF NOT EXISTS idx_issues_source_repo ON issues(source_repo)`)
if err != nil {
return fmt.Errorf("failed to create source_repo index: %w", err)
}
return nil
}
// migrateRepoMtimesTable creates the repo_mtimes table for multi-repo hydration caching (bd-307)
func migrateRepoMtimesTable(db *sql.DB) error {
// Check if repo_mtimes table exists
var tableName string
err := db.QueryRow(`
SELECT name FROM sqlite_master
WHERE type='table' AND name='repo_mtimes'
`).Scan(&tableName)
if err == sql.ErrNoRows {
// Table doesn't exist, create it
_, err := db.Exec(`
CREATE TABLE repo_mtimes (
repo_path TEXT PRIMARY KEY,
jsonl_path TEXT NOT NULL,
mtime_ns INTEGER NOT NULL,
last_checked DATETIME NOT NULL DEFAULT CURRENT_TIMESTAMP
);
CREATE INDEX idx_repo_mtimes_checked ON repo_mtimes(last_checked);
`)
if err != nil {
return fmt.Errorf("failed to create repo_mtimes table: %w", err)
}
return nil
}
if err != nil {
return fmt.Errorf("failed to check for repo_mtimes table: %w", err)
}
// Table already exists
return nil
}

View File

@@ -350,6 +350,56 @@ func TestMigrateExternalRefUnique(t *testing.T) {
})
}
func TestMigrateRepoMtimesTable(t *testing.T) {
t.Run("creates repo_mtimes table if not exists", func(t *testing.T) {
store, cleanup := setupTestDB(t)
defer cleanup()
db := store.db
// Drop table if exists
_, _ = db.Exec("DROP TABLE IF EXISTS repo_mtimes")
// Run migration
if err := migrateRepoMtimesTable(db); err != nil {
t.Fatalf("failed to migrate repo_mtimes table: %v", err)
}
// Verify table exists
var tableName string
err := db.QueryRow(`
SELECT name FROM sqlite_master
WHERE type='table' AND name='repo_mtimes'
`).Scan(&tableName)
if err != nil {
t.Fatalf("repo_mtimes table not found: %v", err)
}
})
t.Run("is idempotent", func(t *testing.T) {
store, cleanup := setupTestDB(t)
defer cleanup()
db := store.db
// Run migration twice
if err := migrateRepoMtimesTable(db); err != nil {
t.Fatalf("first migration failed: %v", err)
}
if err := migrateRepoMtimesTable(db); err != nil {
t.Fatalf("second migration failed: %v", err)
}
// Verify table still exists and is correct
var tableName string
err := db.QueryRow(`
SELECT name FROM sqlite_master
WHERE type='table' AND name='repo_mtimes'
`).Scan(&tableName)
if err != nil {
t.Fatalf("repo_mtimes table not found after idempotent migration: %v", err)
}
})
}
func TestMigrateContentHashColumn(t *testing.T) {
t.Run("adds content_hash column if missing", func(t *testing.T) {
s, cleanup := setupTestDB(t)
@@ -426,9 +476,10 @@ func TestMigrateContentHashColumn(t *testing.T) {
compacted_at DATETIME,
original_size INTEGER,
compacted_at_commit TEXT,
source_repo TEXT DEFAULT '.',
CHECK ((status = 'closed') = (closed_at IS NOT NULL))
);
INSERT INTO issues SELECT id, title, description, design, acceptance_criteria, notes, status, priority, issue_type, assignee, estimated_minutes, created_at, updated_at, closed_at, external_ref, compaction_level, compacted_at, original_size, compacted_at_commit FROM issues_backup;
INSERT INTO issues SELECT id, title, description, design, acceptance_criteria, notes, status, priority, issue_type, assignee, estimated_minutes, created_at, updated_at, closed_at, external_ref, compaction_level, compacted_at, original_size, compacted_at_commit, source_repo FROM issues_backup;
DROP TABLE issues_backup;
`)
if err != nil {

View File

@@ -0,0 +1,302 @@
// Package sqlite implements multi-repo hydration for the SQLite storage backend.
package sqlite
import (
"bufio"
"context"
"database/sql"
"encoding/json"
"fmt"
"os"
"path/filepath"
"strings"
"time"
"github.com/steveyegge/beads/internal/config"
"github.com/steveyegge/beads/internal/types"
)
// HydrateFromMultiRepo loads issues from all configured repositories into the database.
// Uses mtime caching to skip unchanged JSONL files for performance.
// Returns the number of issues imported from each repo.
func (s *SQLiteStorage) HydrateFromMultiRepo(ctx context.Context) (map[string]int, error) {
// Get multi-repo config
multiRepo := config.GetMultiRepoConfig()
if multiRepo == nil {
// Single-repo mode - nothing to hydrate
return nil, nil
}
results := make(map[string]int)
// Process primary repo first (if set)
if multiRepo.Primary != "" {
count, err := s.hydrateFromRepo(ctx, multiRepo.Primary, ".")
if err != nil {
return nil, fmt.Errorf("failed to hydrate primary repo %s: %w", multiRepo.Primary, err)
}
results["."] = count
}
// Process additional repos
for _, repoPath := range multiRepo.Additional {
// Expand tilde in path
expandedPath, err := expandTilde(repoPath)
if err != nil {
return nil, fmt.Errorf("failed to expand path %s: %w", repoPath, err)
}
// Use relative path as source_repo identifier
relPath := repoPath // Keep original for source_repo field
count, err := s.hydrateFromRepo(ctx, expandedPath, relPath)
if err != nil {
return nil, fmt.Errorf("failed to hydrate repo %s: %w", repoPath, err)
}
results[relPath] = count
}
return results, nil
}
// hydrateFromRepo loads issues from a single repository's JSONL file.
// Uses mtime caching to skip unchanged files.
func (s *SQLiteStorage) hydrateFromRepo(ctx context.Context, repoPath, sourceRepo string) (int, error) {
// Get absolute path to repo
absRepoPath, err := filepath.Abs(repoPath)
if err != nil {
return 0, fmt.Errorf("failed to get absolute path: %w", err)
}
// Construct path to JSONL file
jsonlPath := filepath.Join(absRepoPath, ".beads", "issues.jsonl")
// Check if file exists
fileInfo, err := os.Stat(jsonlPath)
if err != nil {
if os.IsNotExist(err) {
// No JSONL file - skip this repo
return 0, nil
}
return 0, fmt.Errorf("failed to stat JSONL file: %w", err)
}
// Get current mtime
currentMtime := fileInfo.ModTime().UnixNano()
// Check cached mtime
var cachedMtime int64
err = s.db.QueryRowContext(ctx, `
SELECT mtime_ns FROM repo_mtimes WHERE repo_path = ?
`, absRepoPath).Scan(&cachedMtime)
if err == nil && cachedMtime == currentMtime {
// File hasn't changed - skip import
return 0, nil
}
if err != nil && err != sql.ErrNoRows {
return 0, fmt.Errorf("failed to query mtime cache: %w", err)
}
// Import issues from JSONL
count, err := s.importJSONLFile(ctx, jsonlPath, sourceRepo)
if err != nil {
return 0, fmt.Errorf("failed to import JSONL: %w", err)
}
// Update mtime cache
_, err = s.db.ExecContext(ctx, `
INSERT OR REPLACE INTO repo_mtimes (repo_path, jsonl_path, mtime_ns, last_checked)
VALUES (?, ?, ?, ?)
`, absRepoPath, jsonlPath, currentMtime, time.Now())
if err != nil {
return 0, fmt.Errorf("failed to update mtime cache: %w", err)
}
return count, nil
}
// importJSONLFile imports issues from a JSONL file, setting the source_repo field.
func (s *SQLiteStorage) importJSONLFile(ctx context.Context, jsonlPath, sourceRepo string) (int, error) {
file, err := os.Open(jsonlPath)
if err != nil {
return 0, fmt.Errorf("failed to open JSONL file: %w", err)
}
defer file.Close()
scanner := bufio.NewScanner(file)
// Increase buffer size for large issues
buf := make([]byte, 0, 64*1024)
scanner.Buffer(buf, 10*1024*1024) // 10MB max line size
count := 0
lineNum := 0
// Begin transaction for bulk import
tx, err := s.db.BeginTx(ctx, nil)
if err != nil {
return 0, fmt.Errorf("failed to begin transaction: %w", err)
}
defer tx.Rollback()
for scanner.Scan() {
lineNum++
line := scanner.Text()
// Skip empty lines and comments
if line == "" || strings.HasPrefix(line, "#") {
continue
}
var issue types.Issue
if err := json.Unmarshal([]byte(line), &issue); err != nil {
return 0, fmt.Errorf("failed to parse JSON at line %d: %w", lineNum, err)
}
// Set source_repo field
issue.SourceRepo = sourceRepo
// Compute content hash if missing
if issue.ContentHash == "" {
issue.ContentHash = issue.ComputeContentHash()
}
// Insert or update issue
if err := s.upsertIssueInTx(ctx, tx, &issue); err != nil {
return 0, fmt.Errorf("failed to import issue %s at line %d: %w", issue.ID, lineNum, err)
}
count++
}
if err := scanner.Err(); err != nil {
return 0, fmt.Errorf("failed to read JSONL file: %w", err)
}
if err := tx.Commit(); err != nil {
return 0, fmt.Errorf("failed to commit transaction: %w", err)
}
return count, nil
}
// upsertIssueInTx inserts or updates an issue within a transaction.
// Uses INSERT OR REPLACE to handle both new and existing issues.
func (s *SQLiteStorage) upsertIssueInTx(ctx context.Context, tx *sql.Tx, issue *types.Issue) error {
// Validate issue
if err := issue.Validate(); err != nil {
return fmt.Errorf("validation failed: %w", err)
}
// Check if issue exists
var existingID string
err := tx.QueryRowContext(ctx, `SELECT id FROM issues WHERE id = ?`, issue.ID).Scan(&existingID)
if err == sql.ErrNoRows {
// Issue doesn't exist - insert it
_, err = tx.ExecContext(ctx, `
INSERT INTO issues (
id, content_hash, title, description, design, acceptance_criteria, notes,
status, priority, issue_type, assignee, estimated_minutes,
created_at, updated_at, closed_at, external_ref, source_repo
) VALUES (?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?)
`,
issue.ID, issue.ContentHash, issue.Title, issue.Description, issue.Design,
issue.AcceptanceCriteria, issue.Notes, issue.Status,
issue.Priority, issue.IssueType, issue.Assignee,
issue.EstimatedMinutes, issue.CreatedAt, issue.UpdatedAt,
issue.ClosedAt, issue.ExternalRef, issue.SourceRepo,
)
if err != nil {
return fmt.Errorf("failed to insert issue: %w", err)
}
} else if err != nil {
return fmt.Errorf("failed to check existing issue: %w", err)
} else {
// Issue exists - update it
// Only update if content_hash is different (avoid unnecessary writes)
var existingHash string
err = tx.QueryRowContext(ctx, `SELECT content_hash FROM issues WHERE id = ?`, issue.ID).Scan(&existingHash)
if err != nil {
return fmt.Errorf("failed to get existing hash: %w", err)
}
if existingHash != issue.ContentHash {
_, err = tx.ExecContext(ctx, `
UPDATE issues SET
content_hash = ?, title = ?, description = ?, design = ?,
acceptance_criteria = ?, notes = ?, status = ?, priority = ?,
issue_type = ?, assignee = ?, estimated_minutes = ?,
updated_at = ?, closed_at = ?, external_ref = ?, source_repo = ?
WHERE id = ?
`,
issue.ContentHash, issue.Title, issue.Description, issue.Design,
issue.AcceptanceCriteria, issue.Notes, issue.Status, issue.Priority,
issue.IssueType, issue.Assignee, issue.EstimatedMinutes,
issue.UpdatedAt, issue.ClosedAt, issue.ExternalRef, issue.SourceRepo,
issue.ID,
)
if err != nil {
return fmt.Errorf("failed to update issue: %w", err)
}
}
}
// Import dependencies if present
for _, dep := range issue.Dependencies {
_, err = tx.ExecContext(ctx, `
INSERT OR IGNORE INTO dependencies (issue_id, depends_on_id, type, created_at, created_by)
VALUES (?, ?, ?, ?, ?)
`, dep.IssueID, dep.DependsOnID, dep.Type, dep.CreatedAt, dep.CreatedBy)
if err != nil {
return fmt.Errorf("failed to import dependency: %w", err)
}
}
// Import labels if present
for _, label := range issue.Labels {
_, err = tx.ExecContext(ctx, `
INSERT OR IGNORE INTO labels (issue_id, label)
VALUES (?, ?)
`, issue.ID, label)
if err != nil {
return fmt.Errorf("failed to import label: %w", err)
}
}
// Import comments if present
for _, comment := range issue.Comments {
_, err = tx.ExecContext(ctx, `
INSERT OR IGNORE INTO comments (id, issue_id, author, text, created_at)
VALUES (?, ?, ?, ?, ?)
`, comment.ID, comment.IssueID, comment.Author, comment.Text, comment.CreatedAt)
if err != nil {
return fmt.Errorf("failed to import comment: %w", err)
}
}
return nil
}
// expandTilde expands ~ in a file path to the user's home directory.
func expandTilde(path string) (string, error) {
if !strings.HasPrefix(path, "~") {
return path, nil
}
homeDir, err := os.UserHomeDir()
if err != nil {
return "", fmt.Errorf("failed to get home directory: %w", err)
}
if path == "~" {
return homeDir, nil
}
if strings.HasPrefix(path, "~/") {
return filepath.Join(homeDir, path[2:]), nil
}
// ~user not supported
return path, nil
}

View File

@@ -0,0 +1,375 @@
package sqlite
import (
"context"
"encoding/json"
"os"
"path/filepath"
"testing"
"time"
"github.com/steveyegge/beads/internal/config"
"github.com/steveyegge/beads/internal/types"
)
func TestExpandTilde(t *testing.T) {
tests := []struct {
name string
path string
wantErr bool
}{
{"no tilde", "/absolute/path", false},
{"tilde alone", "~", false},
{"tilde with path", "~/Documents", false},
{"relative path", "relative/path", false},
}
for _, tt := range tests {
t.Run(tt.name, func(t *testing.T) {
result, err := expandTilde(tt.path)
if (err != nil) != tt.wantErr {
t.Errorf("expandTilde() error = %v, wantErr %v", err, tt.wantErr)
return
}
if !tt.wantErr && result == "" {
t.Error("expandTilde() returned empty string")
}
})
}
}
func TestHydrateFromMultiRepo(t *testing.T) {
t.Run("single-repo mode returns nil", func(t *testing.T) {
store, cleanup := setupTestDB(t)
defer cleanup()
// No multi-repo config - should return nil
ctx := context.Background()
results, err := store.HydrateFromMultiRepo(ctx)
if err != nil {
t.Fatalf("HydrateFromMultiRepo() error = %v", err)
}
if results != nil {
t.Errorf("expected nil results in single-repo mode, got %v", results)
}
})
t.Run("hydrates from primary repo", func(t *testing.T) {
store, cleanup := setupTestDB(t)
defer cleanup()
// Initialize config
if err := config.Initialize(); err != nil {
t.Fatalf("failed to initialize config: %v", err)
}
// Create temporary repo with JSONL file
tmpDir := t.TempDir()
beadsDir := filepath.Join(tmpDir, ".beads")
if err := os.MkdirAll(beadsDir, 0755); err != nil {
t.Fatalf("failed to create .beads dir: %v", err)
}
// Create test issue
issue := types.Issue{
ID: "test-1",
Title: "Test Issue",
Status: types.StatusOpen,
Priority: 1,
IssueType: types.TypeTask,
CreatedAt: time.Now(),
UpdatedAt: time.Now(),
SourceRepo: ".",
}
issue.ContentHash = issue.ComputeContentHash()
// Write JSONL file
jsonlPath := filepath.Join(beadsDir, "issues.jsonl")
f, err := os.Create(jsonlPath)
if err != nil {
t.Fatalf("failed to create JSONL file: %v", err)
}
enc := json.NewEncoder(f)
if err := enc.Encode(issue); err != nil {
f.Close()
t.Fatalf("failed to write issue: %v", err)
}
f.Close()
// Set multi-repo config
config.Set("repos.primary", tmpDir)
ctx := context.Background()
results, err := store.HydrateFromMultiRepo(ctx)
if err != nil {
t.Fatalf("HydrateFromMultiRepo() error = %v", err)
}
if results == nil || results["."] != 1 {
t.Errorf("expected 1 issue from primary repo, got %v", results)
}
// Verify issue was imported
imported, err := store.GetIssue(ctx, "test-1")
if err != nil {
t.Fatalf("failed to get imported issue: %v", err)
}
if imported.Title != "Test Issue" {
t.Errorf("expected title 'Test Issue', got %q", imported.Title)
}
if imported.SourceRepo != "." {
t.Errorf("expected source_repo '.', got %q", imported.SourceRepo)
}
// Clean up config
config.Set("repos.primary", "")
})
t.Run("uses mtime caching to skip unchanged files", func(t *testing.T) {
store, cleanup := setupTestDB(t)
defer cleanup()
// Initialize config
if err := config.Initialize(); err != nil {
t.Fatalf("failed to initialize config: %v", err)
}
// Create temporary repo with JSONL file
tmpDir := t.TempDir()
beadsDir := filepath.Join(tmpDir, ".beads")
if err := os.MkdirAll(beadsDir, 0755); err != nil {
t.Fatalf("failed to create .beads dir: %v", err)
}
// Create test issue
issue := types.Issue{
ID: "test-2",
Title: "Test Issue 2",
Status: types.StatusOpen,
Priority: 1,
IssueType: types.TypeTask,
CreatedAt: time.Now(),
UpdatedAt: time.Now(),
SourceRepo: ".",
}
issue.ContentHash = issue.ComputeContentHash()
// Write JSONL file
jsonlPath := filepath.Join(beadsDir, "issues.jsonl")
f, err := os.Create(jsonlPath)
if err != nil {
t.Fatalf("failed to create JSONL file: %v", err)
}
enc := json.NewEncoder(f)
if err := enc.Encode(issue); err != nil {
f.Close()
t.Fatalf("failed to write issue: %v", err)
}
f.Close()
// Set multi-repo config
config.Set("repos.primary", tmpDir)
ctx := context.Background()
// First hydration - should import
results1, err := store.HydrateFromMultiRepo(ctx)
if err != nil {
t.Fatalf("first HydrateFromMultiRepo() error = %v", err)
}
if results1["."] != 1 {
t.Errorf("first hydration: expected 1 issue, got %d", results1["."])
}
// Second hydration - should skip (mtime unchanged)
results2, err := store.HydrateFromMultiRepo(ctx)
if err != nil {
t.Fatalf("second HydrateFromMultiRepo() error = %v", err)
}
if results2["."] != 0 {
t.Errorf("second hydration: expected 0 issues (cached), got %d", results2["."])
}
})
t.Run("imports additional repos", func(t *testing.T) {
store, cleanup := setupTestDB(t)
defer cleanup()
// Initialize config
if err := config.Initialize(); err != nil {
t.Fatalf("failed to initialize config: %v", err)
}
// Create primary repo
primaryDir := t.TempDir()
primaryBeadsDir := filepath.Join(primaryDir, ".beads")
if err := os.MkdirAll(primaryBeadsDir, 0755); err != nil {
t.Fatalf("failed to create primary .beads dir: %v", err)
}
issue1 := types.Issue{
ID: "primary-1",
Title: "Primary Issue",
Status: types.StatusOpen,
Priority: 1,
IssueType: types.TypeTask,
CreatedAt: time.Now(),
UpdatedAt: time.Now(),
SourceRepo: ".",
}
issue1.ContentHash = issue1.ComputeContentHash()
f1, err := os.Create(filepath.Join(primaryBeadsDir, "issues.jsonl"))
if err != nil {
t.Fatalf("failed to create primary JSONL: %v", err)
}
json.NewEncoder(f1).Encode(issue1)
f1.Close()
// Create additional repo
additionalDir := t.TempDir()
additionalBeadsDir := filepath.Join(additionalDir, ".beads")
if err := os.MkdirAll(additionalBeadsDir, 0755); err != nil {
t.Fatalf("failed to create additional .beads dir: %v", err)
}
issue2 := types.Issue{
ID: "additional-1",
Title: "Additional Issue",
Status: types.StatusOpen,
Priority: 1,
IssueType: types.TypeTask,
CreatedAt: time.Now(),
UpdatedAt: time.Now(),
SourceRepo: additionalDir,
}
issue2.ContentHash = issue2.ComputeContentHash()
f2, err := os.Create(filepath.Join(additionalBeadsDir, "issues.jsonl"))
if err != nil {
t.Fatalf("failed to create additional JSONL: %v", err)
}
json.NewEncoder(f2).Encode(issue2)
f2.Close()
// Set multi-repo config
config.Set("repos.primary", primaryDir)
config.Set("repos.additional", []string{additionalDir})
ctx := context.Background()
results, err := store.HydrateFromMultiRepo(ctx)
if err != nil {
t.Fatalf("HydrateFromMultiRepo() error = %v", err)
}
if results["."] != 1 {
t.Errorf("expected 1 issue from primary, got %d", results["."])
}
if results[additionalDir] != 1 {
t.Errorf("expected 1 issue from additional, got %d", results[additionalDir])
}
// Verify both issues were imported
primary, err := store.GetIssue(ctx, "primary-1")
if err != nil {
t.Fatalf("failed to get primary issue: %v", err)
}
if primary.SourceRepo != "." {
t.Errorf("primary issue: expected source_repo '.', got %q", primary.SourceRepo)
}
additional, err := store.GetIssue(ctx, "additional-1")
if err != nil {
t.Fatalf("failed to get additional issue: %v", err)
}
if additional.SourceRepo != additionalDir {
t.Errorf("additional issue: expected source_repo %q, got %q", additionalDir, additional.SourceRepo)
}
})
}
func TestImportJSONLFile(t *testing.T) {
t.Run("imports issues with dependencies and labels", func(t *testing.T) {
store, cleanup := setupTestDB(t)
defer cleanup()
// Create test JSONL file
tmpDir := t.TempDir()
jsonlPath := filepath.Join(tmpDir, "test.jsonl")
f, err := os.Create(jsonlPath)
if err != nil {
t.Fatalf("failed to create JSONL file: %v", err)
}
// Create issues with dependencies and labels
issue1 := types.Issue{
ID: "test-1",
Title: "Issue 1",
Status: types.StatusOpen,
Priority: 1,
IssueType: types.TypeTask,
CreatedAt: time.Now(),
UpdatedAt: time.Now(),
Labels: []string{"bug", "critical"},
SourceRepo: "test",
}
issue1.ContentHash = issue1.ComputeContentHash()
issue2 := types.Issue{
ID: "test-2",
Title: "Issue 2",
Status: types.StatusOpen,
Priority: 2,
IssueType: types.TypeTask,
CreatedAt: time.Now(),
UpdatedAt: time.Now(),
Dependencies: []*types.Dependency{
{
IssueID: "test-2",
DependsOnID: "test-1",
Type: types.DepBlocks,
CreatedAt: time.Now(),
CreatedBy: "test",
},
},
SourceRepo: "test",
}
issue2.ContentHash = issue2.ComputeContentHash()
enc := json.NewEncoder(f)
enc.Encode(issue1)
enc.Encode(issue2)
f.Close()
// Import
ctx := context.Background()
count, err := store.importJSONLFile(ctx, jsonlPath, "test")
if err != nil {
t.Fatalf("importJSONLFile() error = %v", err)
}
if count != 2 {
t.Errorf("expected 2 issues imported, got %d", count)
}
// Verify issues
imported1, err := store.GetIssue(ctx, "test-1")
if err != nil {
t.Fatalf("failed to get issue 1: %v", err)
}
if len(imported1.Labels) != 2 {
t.Errorf("expected 2 labels, got %d", len(imported1.Labels))
}
// Verify dependency
deps, err := store.GetDependencies(ctx, "test-2")
if err != nil {
t.Fatalf("failed to get dependencies: %v", err)
}
if len(deps) != 1 {
t.Errorf("expected 1 dependency, got %d", len(deps))
}
if len(deps) > 0 && deps[0].ID != "test-1" {
t.Errorf("expected dependency on test-1, got %s", deps[0].ID)
}
})
}

View File

@@ -117,7 +117,7 @@ func (s *SQLiteStorage) GetReadyWork(ctx context.Context, filter types.WorkFilte
-- Step 3: Select ready issues (excluding all blocked)
SELECT i.id, i.content_hash, i.title, i.description, i.design, i.acceptance_criteria, i.notes,
i.status, i.priority, i.issue_type, i.assignee, i.estimated_minutes,
i.created_at, i.updated_at, i.closed_at, i.external_ref
i.created_at, i.updated_at, i.closed_at, i.external_ref, i.source_repo
FROM issues i
WHERE %s
AND NOT EXISTS (
@@ -143,7 +143,7 @@ func (s *SQLiteStorage) GetStaleIssues(ctx context.Context, filter types.StaleFi
SELECT
id, content_hash, title, description, design, acceptance_criteria, notes,
status, priority, issue_type, assignee, estimated_minutes,
created_at, updated_at, closed_at, external_ref,
created_at, updated_at, closed_at, external_ref, source_repo,
compaction_level, compacted_at, compacted_at_commit, original_size
FROM issues
WHERE status != 'closed'
@@ -238,7 +238,7 @@ func (s *SQLiteStorage) GetBlockedIssues(ctx context.Context) ([]*types.BlockedI
SELECT
i.id, i.title, i.description, i.design, i.acceptance_criteria, i.notes,
i.status, i.priority, i.issue_type, i.assignee, i.estimated_minutes,
i.created_at, i.updated_at, i.closed_at, i.external_ref,
i.created_at, i.updated_at, i.closed_at, i.external_ref, i.source_repo,
COUNT(d.depends_on_id) as blocked_by_count,
GROUP_CONCAT(d.depends_on_id, ',') as blocker_ids
FROM issues i
@@ -262,13 +262,14 @@ func (s *SQLiteStorage) GetBlockedIssues(ctx context.Context) ([]*types.BlockedI
var estimatedMinutes sql.NullInt64
var assignee sql.NullString
var externalRef sql.NullString
var sourceRepo sql.NullString
var blockerIDsStr string
err := rows.Scan(
&issue.ID, &issue.Title, &issue.Description, &issue.Design,
&issue.AcceptanceCriteria, &issue.Notes, &issue.Status,
&issue.Priority, &issue.IssueType, &assignee, &estimatedMinutes,
&issue.CreatedAt, &issue.UpdatedAt, &closedAt, &externalRef, &issue.BlockedByCount,
&issue.CreatedAt, &issue.UpdatedAt, &closedAt, &externalRef, &sourceRepo, &issue.BlockedByCount,
&blockerIDsStr,
)
if err != nil {
@@ -288,6 +289,9 @@ func (s *SQLiteStorage) GetBlockedIssues(ctx context.Context) ([]*types.BlockedI
if externalRef.Valid {
issue.ExternalRef = &externalRef.String
}
if sourceRepo.Valid {
issue.SourceRepo = sourceRepo.String
}
// Parse comma-separated blocker IDs
if blockerIDsStr != "" {

View File

@@ -166,6 +166,17 @@ CREATE TABLE IF NOT EXISTS compaction_snapshots (
CREATE INDEX IF NOT EXISTS idx_comp_snap_issue_level_created ON compaction_snapshots(issue_id, compaction_level, created_at DESC);
-- Repository mtimes table (for multi-repo hydration optimization)
-- Tracks modification times of JSONL files to skip unchanged repos
CREATE TABLE IF NOT EXISTS repo_mtimes (
repo_path TEXT PRIMARY KEY, -- Absolute path to the repository root
jsonl_path TEXT NOT NULL, -- Absolute path to the .beads/issues.jsonl file
mtime_ns INTEGER NOT NULL, -- Modification time in nanoseconds since epoch
last_checked DATETIME NOT NULL DEFAULT CURRENT_TIMESTAMP
);
CREATE INDEX IF NOT EXISTS idx_repo_mtimes_checked ON repo_mtimes(last_checked);
-- Ready work view (with hierarchical blocking)
-- Uses recursive CTE to propagate blocking through parent-child hierarchy
CREATE VIEW IF NOT EXISTS ready_issues AS

View File

@@ -88,10 +88,22 @@ func New(path string) (*SQLiteStorage, error) {
}
}
return &SQLiteStorage{
storage := &SQLiteStorage{
db: db,
dbPath: absPath,
}, nil
}
// Hydrate from multi-repo config if configured (bd-307)
// Skip for in-memory databases (used in tests)
if path != ":memory:" {
ctx := context.Background()
_, err := storage.HydrateFromMultiRepo(ctx)
if err != nil {
return nil, fmt.Errorf("failed to hydrate from multi-repo: %w", err)
}
}
return storage, nil
}
// REMOVED (bd-8e05): getNextIDForPrefix and AllocateNextID - sequential ID generation
@@ -231,6 +243,7 @@ func (s *SQLiteStorage) GetIssue(ctx context.Context, id string) (*types.Issue,
var externalRef sql.NullString
var compactedAt sql.NullTime
var originalSize sql.NullInt64
var sourceRepo sql.NullString
var contentHash sql.NullString
var compactedAtCommit sql.NullString
@@ -238,7 +251,7 @@ func (s *SQLiteStorage) GetIssue(ctx context.Context, id string) (*types.Issue,
SELECT id, content_hash, title, description, design, acceptance_criteria, notes,
status, priority, issue_type, assignee, estimated_minutes,
created_at, updated_at, closed_at, external_ref,
compaction_level, compacted_at, compacted_at_commit, original_size
compaction_level, compacted_at, compacted_at_commit, original_size, source_repo
FROM issues
WHERE id = ?
`, id).Scan(
@@ -246,7 +259,7 @@ func (s *SQLiteStorage) GetIssue(ctx context.Context, id string) (*types.Issue,
&issue.AcceptanceCriteria, &issue.Notes, &issue.Status,
&issue.Priority, &issue.IssueType, &assignee, &estimatedMinutes,
&issue.CreatedAt, &issue.UpdatedAt, &closedAt, &externalRef,
&issue.CompactionLevel, &compactedAt, &compactedAtCommit, &originalSize,
&issue.CompactionLevel, &compactedAt, &compactedAtCommit, &originalSize, &sourceRepo,
)
if err == sql.ErrNoRows {
@@ -281,6 +294,9 @@ func (s *SQLiteStorage) GetIssue(ctx context.Context, id string) (*types.Issue,
if originalSize.Valid {
issue.OriginalSize = int(originalSize.Int64)
}
if sourceRepo.Valid {
issue.SourceRepo = sourceRepo.String
}
// Fetch labels for this issue
labels, err := s.GetLabels(ctx, issue.ID)
@@ -1132,7 +1148,7 @@ func (s *SQLiteStorage) SearchIssues(ctx context.Context, query string, filter t
querySQL := fmt.Sprintf(`
SELECT id, content_hash, title, description, design, acceptance_criteria, notes,
status, priority, issue_type, assignee, estimated_minutes,
created_at, updated_at, closed_at, external_ref
created_at, updated_at, closed_at, external_ref, source_repo
FROM issues
%s
ORDER BY priority ASC, created_at DESC