feat(storage): add Dolt backend for version-controlled issue storage

Implements a complete Dolt storage backend that mirrors the SQLite implementation
with MySQL-compatible syntax and adds version control capabilities.

Key features:
- Full Storage interface implementation (~50 methods)
- Version control operations: commit, push, pull, branch, merge, checkout
- History queries via AS OF and dolt_history_* tables
- Cell-level merge instead of line-level JSONL merge
- SQL injection protection with input validation

Bug fixes applied during implementation:
- Added missing quality_score, work_type, source_system to scanIssue
- Fixed Status() to properly parse boolean staged column
- Added validation to CreateIssues (was missing in batch create)
- Made RenameDependencyPrefix transactional
- Expanded GetIssueHistory to return more complete data

Test coverage: 17 tests covering CRUD, dependencies, labels, search,
comments, events, statistics, and SQL injection protection.

Co-Authored-By: Claude Opus 4.5 <noreply@anthropic.com>
This commit is contained in:
mayor
2026-01-14 21:06:10 -08:00
committed by Steve Yegge
parent bf7d51a73f
commit 1dc36098a3
15 changed files with 5867 additions and 1 deletions

View File

@@ -0,0 +1,110 @@
package dolt
import (
"context"
"database/sql"
"fmt"
"strings"
)
// SetConfig sets a configuration value
func (s *DoltStore) SetConfig(ctx context.Context, key, value string) error {
_, err := s.db.ExecContext(ctx, `
INSERT INTO config (` + "`key`" + `, value) VALUES (?, ?)
ON DUPLICATE KEY UPDATE value = VALUES(value)
`, key, value)
if err != nil {
return fmt.Errorf("failed to set config %s: %w", key, err)
}
return nil
}
// GetConfig retrieves a configuration value
func (s *DoltStore) GetConfig(ctx context.Context, key string) (string, error) {
var value string
err := s.db.QueryRowContext(ctx, "SELECT value FROM config WHERE `key` = ?", key).Scan(&value)
if err == sql.ErrNoRows {
return "", nil
}
if err != nil {
return "", fmt.Errorf("failed to get config %s: %w", key, err)
}
return value, nil
}
// GetAllConfig retrieves all configuration values
func (s *DoltStore) GetAllConfig(ctx context.Context) (map[string]string, error) {
rows, err := s.db.QueryContext(ctx, "SELECT `key`, value FROM config")
if err != nil {
return nil, fmt.Errorf("failed to get all config: %w", err)
}
defer rows.Close()
config := make(map[string]string)
for rows.Next() {
var key, value string
if err := rows.Scan(&key, &value); err != nil {
return nil, fmt.Errorf("failed to scan config: %w", err)
}
config[key] = value
}
return config, rows.Err()
}
// DeleteConfig removes a configuration value
func (s *DoltStore) DeleteConfig(ctx context.Context, key string) error {
_, err := s.db.ExecContext(ctx, "DELETE FROM config WHERE `key` = ?", key)
if err != nil {
return fmt.Errorf("failed to delete config %s: %w", key, err)
}
return nil
}
// SetMetadata sets a metadata value
func (s *DoltStore) SetMetadata(ctx context.Context, key, value string) error {
_, err := s.db.ExecContext(ctx, `
INSERT INTO metadata (` + "`key`" + `, value) VALUES (?, ?)
ON DUPLICATE KEY UPDATE value = VALUES(value)
`, key, value)
if err != nil {
return fmt.Errorf("failed to set metadata %s: %w", key, err)
}
return nil
}
// GetMetadata retrieves a metadata value
func (s *DoltStore) GetMetadata(ctx context.Context, key string) (string, error) {
var value string
err := s.db.QueryRowContext(ctx, "SELECT value FROM metadata WHERE `key` = ?", key).Scan(&value)
if err == sql.ErrNoRows {
return "", nil
}
if err != nil {
return "", fmt.Errorf("failed to get metadata %s: %w", key, err)
}
return value, nil
}
// GetCustomStatuses returns custom status values from config
func (s *DoltStore) GetCustomStatuses(ctx context.Context) ([]string, error) {
value, err := s.GetConfig(ctx, "status.custom")
if err != nil {
return nil, err
}
if value == "" {
return nil, nil
}
return strings.Split(value, ","), nil
}
// GetCustomTypes returns custom issue type values from config
func (s *DoltStore) GetCustomTypes(ctx context.Context) ([]string, error) {
value, err := s.GetConfig(ctx, "types.custom")
if err != nil {
return nil, err
}
if value == "" {
return nil, nil
}
return strings.Split(value, ","), nil
}

View File

@@ -0,0 +1,496 @@
package dolt
import (
"context"
"database/sql"
"fmt"
"strings"
"github.com/steveyegge/beads/internal/types"
)
// AddDependency adds a dependency between two issues
func (s *DoltStore) AddDependency(ctx context.Context, dep *types.Dependency, actor string) error {
metadata := dep.Metadata
if metadata == "" {
metadata = "{}"
}
_, err := s.db.ExecContext(ctx, `
INSERT INTO dependencies (issue_id, depends_on_id, type, created_at, created_by, metadata, thread_id)
VALUES (?, ?, ?, NOW(), ?, ?, ?)
ON DUPLICATE KEY UPDATE type = VALUES(type), metadata = VALUES(metadata)
`, dep.IssueID, dep.DependsOnID, dep.Type, actor, metadata, dep.ThreadID)
if err != nil {
return fmt.Errorf("failed to add dependency: %w", err)
}
return nil
}
// RemoveDependency removes a dependency between two issues
func (s *DoltStore) RemoveDependency(ctx context.Context, issueID, dependsOnID string, actor string) error {
_, err := s.db.ExecContext(ctx, `
DELETE FROM dependencies WHERE issue_id = ? AND depends_on_id = ?
`, issueID, dependsOnID)
if err != nil {
return fmt.Errorf("failed to remove dependency: %w", err)
}
return nil
}
// GetDependencies retrieves issues that this issue depends on
func (s *DoltStore) GetDependencies(ctx context.Context, issueID string) ([]*types.Issue, error) {
rows, err := s.db.QueryContext(ctx, `
SELECT i.id FROM issues i
JOIN dependencies d ON i.id = d.depends_on_id
WHERE d.issue_id = ?
ORDER BY i.priority ASC, i.created_at DESC
`, issueID)
if err != nil {
return nil, fmt.Errorf("failed to get dependencies: %w", err)
}
defer rows.Close()
return s.scanIssueIDs(ctx, rows)
}
// GetDependents retrieves issues that depend on this issue
func (s *DoltStore) GetDependents(ctx context.Context, issueID string) ([]*types.Issue, error) {
rows, err := s.db.QueryContext(ctx, `
SELECT i.id FROM issues i
JOIN dependencies d ON i.id = d.issue_id
WHERE d.depends_on_id = ?
ORDER BY i.priority ASC, i.created_at DESC
`, issueID)
if err != nil {
return nil, fmt.Errorf("failed to get dependents: %w", err)
}
defer rows.Close()
return s.scanIssueIDs(ctx, rows)
}
// GetDependenciesWithMetadata returns dependencies with metadata
func (s *DoltStore) GetDependenciesWithMetadata(ctx context.Context, issueID string) ([]*types.IssueWithDependencyMetadata, error) {
rows, err := s.db.QueryContext(ctx, `
SELECT d.depends_on_id, d.type, d.created_at, d.created_by, d.metadata, d.thread_id
FROM dependencies d
WHERE d.issue_id = ?
`, issueID)
if err != nil {
return nil, fmt.Errorf("failed to get dependencies with metadata: %w", err)
}
defer rows.Close()
var results []*types.IssueWithDependencyMetadata
for rows.Next() {
var depID, depType, createdBy string
var createdAt sql.NullTime
var metadata, threadID sql.NullString
if err := rows.Scan(&depID, &depType, &createdAt, &createdBy, &metadata, &threadID); err != nil {
return nil, fmt.Errorf("failed to scan dependency: %w", err)
}
issue, err := s.GetIssue(ctx, depID)
if err != nil {
return nil, err
}
if issue == nil {
continue
}
result := &types.IssueWithDependencyMetadata{
Issue: *issue,
DependencyType: types.DependencyType(depType),
}
results = append(results, result)
}
return results, rows.Err()
}
// GetDependentsWithMetadata returns dependents with metadata
func (s *DoltStore) GetDependentsWithMetadata(ctx context.Context, issueID string) ([]*types.IssueWithDependencyMetadata, error) {
rows, err := s.db.QueryContext(ctx, `
SELECT d.issue_id, d.type, d.created_at, d.created_by, d.metadata, d.thread_id
FROM dependencies d
WHERE d.depends_on_id = ?
`, issueID)
if err != nil {
return nil, fmt.Errorf("failed to get dependents with metadata: %w", err)
}
defer rows.Close()
var results []*types.IssueWithDependencyMetadata
for rows.Next() {
var depID, depType, createdBy string
var createdAt sql.NullTime
var metadata, threadID sql.NullString
if err := rows.Scan(&depID, &depType, &createdAt, &createdBy, &metadata, &threadID); err != nil {
return nil, fmt.Errorf("failed to scan dependent: %w", err)
}
issue, err := s.GetIssue(ctx, depID)
if err != nil {
return nil, err
}
if issue == nil {
continue
}
result := &types.IssueWithDependencyMetadata{
Issue: *issue,
DependencyType: types.DependencyType(depType),
}
results = append(results, result)
}
return results, rows.Err()
}
// GetDependencyRecords returns raw dependency records for an issue
func (s *DoltStore) GetDependencyRecords(ctx context.Context, issueID string) ([]*types.Dependency, error) {
rows, err := s.db.QueryContext(ctx, `
SELECT issue_id, depends_on_id, type, created_at, created_by, metadata, thread_id
FROM dependencies
WHERE issue_id = ?
`, issueID)
if err != nil {
return nil, fmt.Errorf("failed to get dependency records: %w", err)
}
defer rows.Close()
return scanDependencyRows(rows)
}
// GetAllDependencyRecords returns all dependency records
func (s *DoltStore) GetAllDependencyRecords(ctx context.Context) (map[string][]*types.Dependency, error) {
rows, err := s.db.QueryContext(ctx, `
SELECT issue_id, depends_on_id, type, created_at, created_by, metadata, thread_id
FROM dependencies
ORDER BY issue_id
`)
if err != nil {
return nil, fmt.Errorf("failed to get all dependency records: %w", err)
}
defer rows.Close()
result := make(map[string][]*types.Dependency)
for rows.Next() {
dep, err := scanDependencyRow(rows)
if err != nil {
return nil, err
}
result[dep.IssueID] = append(result[dep.IssueID], dep)
}
return result, rows.Err()
}
// GetDependencyCounts returns dependency counts for multiple issues
func (s *DoltStore) GetDependencyCounts(ctx context.Context, issueIDs []string) (map[string]*types.DependencyCounts, error) {
if len(issueIDs) == 0 {
return make(map[string]*types.DependencyCounts), nil
}
placeholders := make([]string, len(issueIDs))
args := make([]interface{}, len(issueIDs))
for i, id := range issueIDs {
placeholders[i] = "?"
args[i] = id
}
inClause := strings.Join(placeholders, ",")
// Query for dependencies (blockers)
depQuery := fmt.Sprintf(`
SELECT issue_id, COUNT(*) as cnt
FROM dependencies
WHERE issue_id IN (%s) AND type = 'blocks'
GROUP BY issue_id
`, inClause)
depRows, err := s.db.QueryContext(ctx, depQuery, args...)
if err != nil {
return nil, fmt.Errorf("failed to get dependency counts: %w", err)
}
defer depRows.Close()
result := make(map[string]*types.DependencyCounts)
for _, id := range issueIDs {
result[id] = &types.DependencyCounts{}
}
for depRows.Next() {
var id string
var cnt int
if err := depRows.Scan(&id, &cnt); err != nil {
return nil, fmt.Errorf("failed to scan dep count: %w", err)
}
if c, ok := result[id]; ok {
c.DependencyCount = cnt
}
}
// Query for dependents (blocking)
blockingQuery := fmt.Sprintf(`
SELECT depends_on_id, COUNT(*) as cnt
FROM dependencies
WHERE depends_on_id IN (%s) AND type = 'blocks'
GROUP BY depends_on_id
`, inClause)
blockingRows, err := s.db.QueryContext(ctx, blockingQuery, args...)
if err != nil {
return nil, fmt.Errorf("failed to get blocking counts: %w", err)
}
defer blockingRows.Close()
for blockingRows.Next() {
var id string
var cnt int
if err := blockingRows.Scan(&id, &cnt); err != nil {
return nil, fmt.Errorf("failed to scan blocking count: %w", err)
}
if c, ok := result[id]; ok {
c.DependentCount = cnt
}
}
return result, nil
}
// GetDependencyTree returns a dependency tree for visualization
func (s *DoltStore) GetDependencyTree(ctx context.Context, issueID string, maxDepth int, showAllPaths bool, reverse bool) ([]*types.TreeNode, error) {
// Simple implementation - can be optimized with CTE
visited := make(map[string]bool)
return s.buildDependencyTree(ctx, issueID, 0, maxDepth, reverse, visited)
}
func (s *DoltStore) buildDependencyTree(ctx context.Context, issueID string, depth, maxDepth int, reverse bool, visited map[string]bool) ([]*types.TreeNode, error) {
if depth >= maxDepth || visited[issueID] {
return nil, nil
}
visited[issueID] = true
issue, err := s.GetIssue(ctx, issueID)
if err != nil || issue == nil {
return nil, err
}
var childIDs []string
var query string
if reverse {
query = "SELECT issue_id FROM dependencies WHERE depends_on_id = ?"
} else {
query = "SELECT depends_on_id FROM dependencies WHERE issue_id = ?"
}
rows, err := s.db.QueryContext(ctx, query, issueID)
if err != nil {
return nil, err
}
defer rows.Close()
for rows.Next() {
var id string
if err := rows.Scan(&id); err != nil {
return nil, err
}
childIDs = append(childIDs, id)
}
node := &types.TreeNode{
Issue: *issue,
Depth: depth,
}
// TreeNode doesn't have Children field - return flat list
nodes := []*types.TreeNode{node}
for _, childID := range childIDs {
children, err := s.buildDependencyTree(ctx, childID, depth+1, maxDepth, reverse, visited)
if err != nil {
return nil, err
}
nodes = append(nodes, children...)
}
return nodes, nil
}
// DetectCycles finds circular dependencies
func (s *DoltStore) DetectCycles(ctx context.Context) ([][]*types.Issue, error) {
// Get all dependencies
deps, err := s.GetAllDependencyRecords(ctx)
if err != nil {
return nil, err
}
// Build adjacency list
graph := make(map[string][]string)
for issueID, records := range deps {
for _, dep := range records {
if dep.Type == types.DepBlocks {
graph[issueID] = append(graph[issueID], dep.DependsOnID)
}
}
}
// Find cycles using DFS
var cycles [][]*types.Issue
visited := make(map[string]bool)
recStack := make(map[string]bool)
path := make([]string, 0)
var dfs func(node string) bool
dfs = func(node string) bool {
visited[node] = true
recStack[node] = true
path = append(path, node)
for _, neighbor := range graph[node] {
if !visited[neighbor] {
if dfs(neighbor) {
return true
}
} else if recStack[neighbor] {
// Found cycle - extract it
cycleStart := -1
for i, n := range path {
if n == neighbor {
cycleStart = i
break
}
}
if cycleStart >= 0 {
cyclePath := path[cycleStart:]
var cycleIssues []*types.Issue
for _, id := range cyclePath {
issue, _ := s.GetIssue(ctx, id)
if issue != nil {
cycleIssues = append(cycleIssues, issue)
}
}
if len(cycleIssues) > 0 {
cycles = append(cycles, cycleIssues)
}
}
}
}
path = path[:len(path)-1]
recStack[node] = false
return false
}
for node := range graph {
if !visited[node] {
dfs(node)
}
}
return cycles, nil
}
// IsBlocked checks if an issue has open blockers
func (s *DoltStore) IsBlocked(ctx context.Context, issueID string) (bool, []string, error) {
rows, err := s.db.QueryContext(ctx, `
SELECT d.depends_on_id
FROM dependencies d
JOIN issues i ON d.depends_on_id = i.id
WHERE d.issue_id = ?
AND d.type = 'blocks'
AND i.status IN ('open', 'in_progress', 'blocked', 'deferred', 'hooked')
`, issueID)
if err != nil {
return false, nil, fmt.Errorf("failed to check blockers: %w", err)
}
defer rows.Close()
var blockers []string
for rows.Next() {
var id string
if err := rows.Scan(&id); err != nil {
return false, nil, err
}
blockers = append(blockers, id)
}
return len(blockers) > 0, blockers, rows.Err()
}
// GetNewlyUnblockedByClose finds issues that become unblocked when an issue is closed
func (s *DoltStore) GetNewlyUnblockedByClose(ctx context.Context, closedIssueID string) ([]*types.Issue, error) {
// Find issues that were blocked only by the closed issue
rows, err := s.db.QueryContext(ctx, `
SELECT DISTINCT d.issue_id
FROM dependencies d
JOIN issues i ON d.issue_id = i.id
WHERE d.depends_on_id = ?
AND d.type = 'blocks'
AND i.status IN ('open', 'blocked')
AND NOT EXISTS (
SELECT 1 FROM dependencies d2
JOIN issues blocker ON d2.depends_on_id = blocker.id
WHERE d2.issue_id = d.issue_id
AND d2.type = 'blocks'
AND d2.depends_on_id != ?
AND blocker.status IN ('open', 'in_progress', 'blocked', 'deferred', 'hooked')
)
`, closedIssueID, closedIssueID)
if err != nil {
return nil, fmt.Errorf("failed to find newly unblocked: %w", err)
}
defer rows.Close()
return s.scanIssueIDs(ctx, rows)
}
// Helper functions
func (s *DoltStore) scanIssueIDs(ctx context.Context, rows *sql.Rows) ([]*types.Issue, error) {
var issues []*types.Issue
for rows.Next() {
var id string
if err := rows.Scan(&id); err != nil {
return nil, fmt.Errorf("failed to scan issue id: %w", err)
}
issue, err := s.GetIssue(ctx, id)
if err != nil {
return nil, err
}
if issue != nil {
issues = append(issues, issue)
}
}
return issues, rows.Err()
}
func scanDependencyRows(rows *sql.Rows) ([]*types.Dependency, error) {
var deps []*types.Dependency
for rows.Next() {
dep, err := scanDependencyRow(rows)
if err != nil {
return nil, err
}
deps = append(deps, dep)
}
return deps, rows.Err()
}
func scanDependencyRow(rows *sql.Rows) (*types.Dependency, error) {
var dep types.Dependency
var createdAt sql.NullTime
var metadata, threadID sql.NullString
if err := rows.Scan(&dep.IssueID, &dep.DependsOnID, &dep.Type, &createdAt, &dep.CreatedBy, &metadata, &threadID); err != nil {
return nil, fmt.Errorf("failed to scan dependency: %w", err)
}
if createdAt.Valid {
dep.CreatedAt = createdAt.Time
}
if threadID.Valid {
dep.ThreadID = threadID.String
}
return &dep, nil
}

View File

@@ -0,0 +1,108 @@
package dolt
import (
"context"
"fmt"
"strings"
"time"
)
// GetDirtyIssues returns IDs of issues that have been modified since last export
func (s *DoltStore) GetDirtyIssues(ctx context.Context) ([]string, error) {
rows, err := s.db.QueryContext(ctx, `
SELECT issue_id FROM dirty_issues ORDER BY marked_at ASC
`)
if err != nil {
return nil, fmt.Errorf("failed to get dirty issues: %w", err)
}
defer rows.Close()
var ids []string
for rows.Next() {
var id string
if err := rows.Scan(&id); err != nil {
return nil, fmt.Errorf("failed to scan issue id: %w", err)
}
ids = append(ids, id)
}
return ids, rows.Err()
}
// GetDirtyIssueHash returns the dirty hash for a specific issue
func (s *DoltStore) GetDirtyIssueHash(ctx context.Context, issueID string) (string, error) {
var hash string
err := s.db.QueryRowContext(ctx, `
SELECT i.content_hash FROM issues i
JOIN dirty_issues d ON i.id = d.issue_id
WHERE d.issue_id = ?
`, issueID).Scan(&hash)
if err != nil {
return "", fmt.Errorf("failed to get dirty issue hash: %w", err)
}
return hash, nil
}
// ClearDirtyIssuesByID removes specific issues from the dirty list
func (s *DoltStore) ClearDirtyIssuesByID(ctx context.Context, issueIDs []string) error {
if len(issueIDs) == 0 {
return nil
}
placeholders := make([]string, len(issueIDs))
args := make([]interface{}, len(issueIDs))
for i, id := range issueIDs {
placeholders[i] = "?"
args[i] = id
}
query := fmt.Sprintf("DELETE FROM dirty_issues WHERE issue_id IN (%s)", strings.Join(placeholders, ","))
_, err := s.db.ExecContext(ctx, query, args...)
if err != nil {
return fmt.Errorf("failed to clear dirty issues: %w", err)
}
return nil
}
// GetExportHash returns the last export hash for an issue
func (s *DoltStore) GetExportHash(ctx context.Context, issueID string) (string, error) {
var hash string
err := s.db.QueryRowContext(ctx, `
SELECT content_hash FROM export_hashes WHERE issue_id = ?
`, issueID).Scan(&hash)
if err != nil {
return "", nil // Not found is OK
}
return hash, nil
}
// SetExportHash stores the export hash for an issue
func (s *DoltStore) SetExportHash(ctx context.Context, issueID, contentHash string) error {
_, err := s.db.ExecContext(ctx, `
INSERT INTO export_hashes (issue_id, content_hash, exported_at)
VALUES (?, ?, ?)
ON DUPLICATE KEY UPDATE content_hash = VALUES(content_hash), exported_at = VALUES(exported_at)
`, issueID, contentHash, time.Now())
if err != nil {
return fmt.Errorf("failed to set export hash: %w", err)
}
return nil
}
// ClearAllExportHashes removes all export hashes (for full re-export)
func (s *DoltStore) ClearAllExportHashes(ctx context.Context) error {
_, err := s.db.ExecContext(ctx, "DELETE FROM export_hashes")
if err != nil {
return fmt.Errorf("failed to clear export hashes: %w", err)
}
return nil
}
// GetJSONLFileHash returns the stored JSONL file hash
func (s *DoltStore) GetJSONLFileHash(ctx context.Context) (string, error) {
return s.GetMetadata(ctx, "jsonl_file_hash")
}
// SetJSONLFileHash stores the JSONL file hash
func (s *DoltStore) SetJSONLFileHash(ctx context.Context, fileHash string) error {
return s.SetMetadata(ctx, "jsonl_file_hash", fileHash)
}

View File

@@ -0,0 +1,853 @@
package dolt
import (
"context"
"os"
"testing"
"github.com/steveyegge/beads/internal/types"
)
// skipIfNoDolt skips the test if Dolt is not installed
func skipIfNoDolt(t *testing.T) {
t.Helper()
if _, err := os.Stat("/usr/local/bin/dolt"); os.IsNotExist(err) {
t.Skip("Dolt not installed, skipping test")
}
}
// setupTestStore creates a test store with a temporary directory
func setupTestStore(t *testing.T) (*DoltStore, func()) {
t.Helper()
skipIfNoDolt(t)
ctx := context.Background()
tmpDir, err := os.MkdirTemp("", "dolt-test-*")
if err != nil {
t.Fatalf("failed to create temp dir: %v", err)
}
cfg := &Config{
Path: tmpDir,
CommitterName: "test",
CommitterEmail: "test@example.com",
Database: "testdb",
}
store, err := New(ctx, cfg)
if err != nil {
os.RemoveAll(tmpDir)
t.Fatalf("failed to create Dolt store: %v", err)
}
// Set up issue prefix
if err := store.SetConfig(ctx, "issue_prefix", "test"); err != nil {
store.Close()
os.RemoveAll(tmpDir)
t.Fatalf("failed to set prefix: %v", err)
}
cleanup := func() {
store.Close()
os.RemoveAll(tmpDir)
}
return store, cleanup
}
func TestNewDoltStore(t *testing.T) {
skipIfNoDolt(t)
ctx := context.Background()
tmpDir, err := os.MkdirTemp("", "dolt-test-*")
if err != nil {
t.Fatalf("failed to create temp dir: %v", err)
}
defer os.RemoveAll(tmpDir)
cfg := &Config{
Path: tmpDir,
CommitterName: "test",
CommitterEmail: "test@example.com",
Database: "testdb",
}
store, err := New(ctx, cfg)
if err != nil {
t.Fatalf("failed to create Dolt store: %v", err)
}
defer store.Close()
// Verify store path
if store.Path() != tmpDir {
t.Errorf("expected path %s, got %s", tmpDir, store.Path())
}
// Verify not closed
if store.IsClosed() {
t.Error("store should not be closed")
}
}
func TestDoltStoreConfig(t *testing.T) {
store, cleanup := setupTestStore(t)
defer cleanup()
ctx := context.Background()
// Test SetConfig
if err := store.SetConfig(ctx, "test_key", "test_value"); err != nil {
t.Fatalf("failed to set config: %v", err)
}
// Test GetConfig
value, err := store.GetConfig(ctx, "test_key")
if err != nil {
t.Fatalf("failed to get config: %v", err)
}
if value != "test_value" {
t.Errorf("expected 'test_value', got %q", value)
}
// Test GetAllConfig
allConfig, err := store.GetAllConfig(ctx)
if err != nil {
t.Fatalf("failed to get all config: %v", err)
}
if allConfig["test_key"] != "test_value" {
t.Errorf("expected test_key in all config")
}
// Test DeleteConfig
if err := store.DeleteConfig(ctx, "test_key"); err != nil {
t.Fatalf("failed to delete config: %v", err)
}
value, err = store.GetConfig(ctx, "test_key")
if err != nil {
t.Fatalf("failed to get deleted config: %v", err)
}
if value != "" {
t.Errorf("expected empty value after delete, got %q", value)
}
}
func TestDoltStoreIssue(t *testing.T) {
store, cleanup := setupTestStore(t)
defer cleanup()
ctx := context.Background()
// Create an issue
issue := &types.Issue{
Title: "Test Issue",
Description: "Test description",
Status: types.StatusOpen,
Priority: 2,
IssueType: types.TypeTask,
}
if err := store.CreateIssue(ctx, issue, "tester"); err != nil {
t.Fatalf("failed to create issue: %v", err)
}
// Verify ID was generated
if issue.ID == "" {
t.Error("expected issue ID to be generated")
}
// Get the issue back
retrieved, err := store.GetIssue(ctx, issue.ID)
if err != nil {
t.Fatalf("failed to get issue: %v", err)
}
if retrieved == nil {
t.Fatal("expected to retrieve issue")
}
if retrieved.Title != issue.Title {
t.Errorf("expected title %q, got %q", issue.Title, retrieved.Title)
}
}
func TestDoltStoreIssueUpdate(t *testing.T) {
store, cleanup := setupTestStore(t)
defer cleanup()
ctx := context.Background()
// Create an issue
issue := &types.Issue{
Title: "Original Title",
Description: "Original description",
Status: types.StatusOpen,
Priority: 2,
IssueType: types.TypeTask,
}
if err := store.CreateIssue(ctx, issue, "tester"); err != nil {
t.Fatalf("failed to create issue: %v", err)
}
// Update the issue
updates := map[string]interface{}{
"title": "Updated Title",
"priority": 1,
"status": string(types.StatusInProgress),
}
if err := store.UpdateIssue(ctx, issue.ID, updates, "tester"); err != nil {
t.Fatalf("failed to update issue: %v", err)
}
// Get the updated issue
retrieved, err := store.GetIssue(ctx, issue.ID)
if err != nil {
t.Fatalf("failed to get issue: %v", err)
}
if retrieved.Title != "Updated Title" {
t.Errorf("expected title 'Updated Title', got %q", retrieved.Title)
}
if retrieved.Priority != 1 {
t.Errorf("expected priority 1, got %d", retrieved.Priority)
}
if retrieved.Status != types.StatusInProgress {
t.Errorf("expected status in_progress, got %s", retrieved.Status)
}
}
func TestDoltStoreIssueClose(t *testing.T) {
store, cleanup := setupTestStore(t)
defer cleanup()
ctx := context.Background()
// Create an issue
issue := &types.Issue{
Title: "Issue to Close",
Description: "Will be closed",
Status: types.StatusOpen,
Priority: 2,
IssueType: types.TypeTask,
}
if err := store.CreateIssue(ctx, issue, "tester"); err != nil {
t.Fatalf("failed to create issue: %v", err)
}
// Close the issue
if err := store.CloseIssue(ctx, issue.ID, "completed", "tester", "session123"); err != nil {
t.Fatalf("failed to close issue: %v", err)
}
// Get the closed issue
retrieved, err := store.GetIssue(ctx, issue.ID)
if err != nil {
t.Fatalf("failed to get issue: %v", err)
}
if retrieved.Status != types.StatusClosed {
t.Errorf("expected status closed, got %s", retrieved.Status)
}
if retrieved.ClosedAt == nil {
t.Error("expected closed_at to be set")
}
}
func TestDoltStoreLabels(t *testing.T) {
store, cleanup := setupTestStore(t)
defer cleanup()
ctx := context.Background()
// Create an issue
issue := &types.Issue{
Title: "Issue with Labels",
Description: "Test labels",
Status: types.StatusOpen,
Priority: 2,
IssueType: types.TypeTask,
}
if err := store.CreateIssue(ctx, issue, "tester"); err != nil {
t.Fatalf("failed to create issue: %v", err)
}
// Add labels
if err := store.AddLabel(ctx, issue.ID, "bug", "tester"); err != nil {
t.Fatalf("failed to add label: %v", err)
}
if err := store.AddLabel(ctx, issue.ID, "priority", "tester"); err != nil {
t.Fatalf("failed to add second label: %v", err)
}
// Get labels
labels, err := store.GetLabels(ctx, issue.ID)
if err != nil {
t.Fatalf("failed to get labels: %v", err)
}
if len(labels) != 2 {
t.Errorf("expected 2 labels, got %d", len(labels))
}
// Remove label
if err := store.RemoveLabel(ctx, issue.ID, "bug", "tester"); err != nil {
t.Fatalf("failed to remove label: %v", err)
}
// Verify removal
labels, err = store.GetLabels(ctx, issue.ID)
if err != nil {
t.Fatalf("failed to get labels after removal: %v", err)
}
if len(labels) != 1 {
t.Errorf("expected 1 label after removal, got %d", len(labels))
}
}
func TestDoltStoreDependencies(t *testing.T) {
store, cleanup := setupTestStore(t)
defer cleanup()
ctx := context.Background()
// Create parent and child issues
parent := &types.Issue{
ID: "test-parent",
Title: "Parent Issue",
Description: "Parent description",
Status: types.StatusOpen,
Priority: 1,
IssueType: types.TypeEpic,
}
child := &types.Issue{
ID: "test-child",
Title: "Child Issue",
Description: "Child description",
Status: types.StatusOpen,
Priority: 2,
IssueType: types.TypeTask,
}
if err := store.CreateIssue(ctx, parent, "tester"); err != nil {
t.Fatalf("failed to create parent issue: %v", err)
}
if err := store.CreateIssue(ctx, child, "tester"); err != nil {
t.Fatalf("failed to create child issue: %v", err)
}
// Add dependency (child depends on parent)
dep := &types.Dependency{
IssueID: child.ID,
DependsOnID: parent.ID,
Type: types.DepBlocks,
}
if err := store.AddDependency(ctx, dep, "tester"); err != nil {
t.Fatalf("failed to add dependency: %v", err)
}
// Get dependencies
deps, err := store.GetDependencies(ctx, child.ID)
if err != nil {
t.Fatalf("failed to get dependencies: %v", err)
}
if len(deps) != 1 {
t.Errorf("expected 1 dependency, got %d", len(deps))
}
if deps[0].ID != parent.ID {
t.Errorf("expected dependency on %s, got %s", parent.ID, deps[0].ID)
}
// Get dependents
dependents, err := store.GetDependents(ctx, parent.ID)
if err != nil {
t.Fatalf("failed to get dependents: %v", err)
}
if len(dependents) != 1 {
t.Errorf("expected 1 dependent, got %d", len(dependents))
}
// Check if blocked
blocked, blockers, err := store.IsBlocked(ctx, child.ID)
if err != nil {
t.Fatalf("failed to check if blocked: %v", err)
}
if !blocked {
t.Error("expected child to be blocked")
}
if len(blockers) != 1 || blockers[0] != parent.ID {
t.Errorf("expected blocker %s, got %v", parent.ID, blockers)
}
// Remove dependency
if err := store.RemoveDependency(ctx, child.ID, parent.ID, "tester"); err != nil {
t.Fatalf("failed to remove dependency: %v", err)
}
// Verify removal
deps, err = store.GetDependencies(ctx, child.ID)
if err != nil {
t.Fatalf("failed to get dependencies after removal: %v", err)
}
if len(deps) != 0 {
t.Errorf("expected 0 dependencies after removal, got %d", len(deps))
}
}
func TestDoltStoreSearch(t *testing.T) {
store, cleanup := setupTestStore(t)
defer cleanup()
ctx := context.Background()
// Create multiple issues
issues := []*types.Issue{
{
ID: "test-search-1",
Title: "First Issue",
Description: "Search test one",
Status: types.StatusOpen,
Priority: 1,
IssueType: types.TypeTask,
},
{
ID: "test-search-2",
Title: "Second Issue",
Description: "Search test two",
Status: types.StatusOpen,
Priority: 2,
IssueType: types.TypeBug,
},
{
ID: "test-search-3",
Title: "Third Issue",
Description: "Different content",
Status: types.StatusClosed,
Priority: 3,
IssueType: types.TypeTask,
},
}
for _, issue := range issues {
if err := store.CreateIssue(ctx, issue, "tester"); err != nil {
t.Fatalf("failed to create issue %s: %v", issue.ID, err)
}
}
// Search by query
results, err := store.SearchIssues(ctx, "Search test", types.IssueFilter{})
if err != nil {
t.Fatalf("failed to search issues: %v", err)
}
if len(results) != 2 {
t.Errorf("expected 2 results for 'Search test', got %d", len(results))
}
// Search with status filter
openStatus := types.StatusOpen
results, err = store.SearchIssues(ctx, "", types.IssueFilter{Status: &openStatus})
if err != nil {
t.Fatalf("failed to search with status filter: %v", err)
}
if len(results) != 2 {
t.Errorf("expected 2 open issues, got %d", len(results))
}
// Search by issue type
bugType := types.TypeBug
results, err = store.SearchIssues(ctx, "", types.IssueFilter{IssueType: &bugType})
if err != nil {
t.Fatalf("failed to search by type: %v", err)
}
if len(results) != 1 {
t.Errorf("expected 1 bug, got %d", len(results))
}
}
func TestDoltStoreCreateIssues(t *testing.T) {
store, cleanup := setupTestStore(t)
defer cleanup()
ctx := context.Background()
// Create multiple issues in batch
issues := []*types.Issue{
{
ID: "test-batch-1",
Title: "Batch Issue 1",
Description: "First batch issue",
Status: types.StatusOpen,
Priority: 1,
IssueType: types.TypeTask,
},
{
ID: "test-batch-2",
Title: "Batch Issue 2",
Description: "Second batch issue",
Status: types.StatusOpen,
Priority: 2,
IssueType: types.TypeTask,
},
}
if err := store.CreateIssues(ctx, issues, "tester"); err != nil {
t.Fatalf("failed to create issues: %v", err)
}
// Verify all issues were created
for _, issue := range issues {
retrieved, err := store.GetIssue(ctx, issue.ID)
if err != nil {
t.Fatalf("failed to get issue %s: %v", issue.ID, err)
}
if retrieved == nil {
t.Errorf("expected to retrieve issue %s", issue.ID)
}
if retrieved.Title != issue.Title {
t.Errorf("expected title %q, got %q", issue.Title, retrieved.Title)
}
}
}
func TestDoltStoreComments(t *testing.T) {
store, cleanup := setupTestStore(t)
defer cleanup()
ctx := context.Background()
// Create an issue
issue := &types.Issue{
ID: "test-comment-issue",
Title: "Issue with Comments",
Description: "Test comments",
Status: types.StatusOpen,
Priority: 2,
IssueType: types.TypeTask,
}
if err := store.CreateIssue(ctx, issue, "tester"); err != nil {
t.Fatalf("failed to create issue: %v", err)
}
// Add comments
comment1, err := store.AddIssueComment(ctx, issue.ID, "user1", "First comment")
if err != nil {
t.Fatalf("failed to add first comment: %v", err)
}
if comment1.ID == 0 {
t.Error("expected comment ID to be generated")
}
_, err = store.AddIssueComment(ctx, issue.ID, "user2", "Second comment")
if err != nil {
t.Fatalf("failed to add second comment: %v", err)
}
// Get comments
comments, err := store.GetIssueComments(ctx, issue.ID)
if err != nil {
t.Fatalf("failed to get comments: %v", err)
}
if len(comments) != 2 {
t.Errorf("expected 2 comments, got %d", len(comments))
}
if comments[0].Text != "First comment" {
t.Errorf("expected 'First comment', got %q", comments[0].Text)
}
}
func TestDoltStoreEvents(t *testing.T) {
store, cleanup := setupTestStore(t)
defer cleanup()
ctx := context.Background()
// Create an issue (this creates a creation event)
issue := &types.Issue{
ID: "test-event-issue",
Title: "Issue with Events",
Description: "Test events",
Status: types.StatusOpen,
Priority: 2,
IssueType: types.TypeTask,
}
if err := store.CreateIssue(ctx, issue, "tester"); err != nil {
t.Fatalf("failed to create issue: %v", err)
}
// Add a comment event
if err := store.AddComment(ctx, issue.ID, "user1", "A comment"); err != nil {
t.Fatalf("failed to add comment: %v", err)
}
// Get events
events, err := store.GetEvents(ctx, issue.ID, 10)
if err != nil {
t.Fatalf("failed to get events: %v", err)
}
if len(events) < 2 {
t.Errorf("expected at least 2 events, got %d", len(events))
}
}
func TestDoltStoreDeleteIssue(t *testing.T) {
store, cleanup := setupTestStore(t)
defer cleanup()
ctx := context.Background()
// Create an issue
issue := &types.Issue{
ID: "test-delete-issue",
Title: "Issue to Delete",
Description: "Will be deleted",
Status: types.StatusOpen,
Priority: 2,
IssueType: types.TypeTask,
}
if err := store.CreateIssue(ctx, issue, "tester"); err != nil {
t.Fatalf("failed to create issue: %v", err)
}
// Verify it exists
retrieved, err := store.GetIssue(ctx, issue.ID)
if err != nil || retrieved == nil {
t.Fatalf("issue should exist before delete")
}
// Delete the issue
if err := store.DeleteIssue(ctx, issue.ID); err != nil {
t.Fatalf("failed to delete issue: %v", err)
}
// Verify it's gone
retrieved, err = store.GetIssue(ctx, issue.ID)
if err != nil {
t.Fatalf("failed to get issue after delete: %v", err)
}
if retrieved != nil {
t.Error("expected issue to be deleted")
}
}
func TestDoltStoreDirtyTracking(t *testing.T) {
store, cleanup := setupTestStore(t)
defer cleanup()
ctx := context.Background()
// Create an issue (marks it dirty)
issue := &types.Issue{
ID: "test-dirty-issue",
Title: "Dirty Issue",
Description: "Will be dirty",
Status: types.StatusOpen,
Priority: 2,
IssueType: types.TypeTask,
}
if err := store.CreateIssue(ctx, issue, "tester"); err != nil {
t.Fatalf("failed to create issue: %v", err)
}
// Get dirty issues
dirtyIDs, err := store.GetDirtyIssues(ctx)
if err != nil {
t.Fatalf("failed to get dirty issues: %v", err)
}
found := false
for _, id := range dirtyIDs {
if id == issue.ID {
found = true
break
}
}
if !found {
t.Error("expected issue to be in dirty list")
}
// Clear dirty issues
if err := store.ClearDirtyIssuesByID(ctx, []string{issue.ID}); err != nil {
t.Fatalf("failed to clear dirty issues: %v", err)
}
// Verify it's cleared
dirtyIDs, err = store.GetDirtyIssues(ctx)
if err != nil {
t.Fatalf("failed to get dirty issues after clear: %v", err)
}
for _, id := range dirtyIDs {
if id == issue.ID {
t.Error("expected issue to be cleared from dirty list")
}
}
}
func TestDoltStoreStatistics(t *testing.T) {
store, cleanup := setupTestStore(t)
defer cleanup()
ctx := context.Background()
// Create some issues
issues := []*types.Issue{
{ID: "test-stat-1", Title: "Open 1", Status: types.StatusOpen, Priority: 1, IssueType: types.TypeTask},
{ID: "test-stat-2", Title: "Open 2", Status: types.StatusOpen, Priority: 2, IssueType: types.TypeTask},
{ID: "test-stat-3", Title: "Closed", Status: types.StatusClosed, Priority: 1, IssueType: types.TypeTask},
}
for _, issue := range issues {
if err := store.CreateIssue(ctx, issue, "tester"); err != nil {
t.Fatalf("failed to create issue: %v", err)
}
}
// Get statistics
stats, err := store.GetStatistics(ctx)
if err != nil {
t.Fatalf("failed to get statistics: %v", err)
}
if stats.OpenIssues < 2 {
t.Errorf("expected at least 2 open issues, got %d", stats.OpenIssues)
}
if stats.ClosedIssues < 1 {
t.Errorf("expected at least 1 closed issue, got %d", stats.ClosedIssues)
}
}
// Test SQL injection protection
func TestValidateRef(t *testing.T) {
tests := []struct {
name string
ref string
wantErr bool
}{
{"valid hash", "abc123def456", false},
{"valid branch", "main", false},
{"valid with underscore", "feature_branch", false},
{"valid with dash", "feature-branch", false},
{"empty", "", true},
{"too long", string(make([]byte, 200)), true},
{"with SQL injection", "main'; DROP TABLE issues; --", true},
{"with quotes", "main'test", true},
{"with semicolon", "main;test", true},
{"with space", "main test", true},
}
for _, tt := range tests {
t.Run(tt.name, func(t *testing.T) {
err := validateRef(tt.ref)
if (err != nil) != tt.wantErr {
t.Errorf("validateRef(%q) error = %v, wantErr %v", tt.ref, err, tt.wantErr)
}
})
}
}
func TestValidateTableName(t *testing.T) {
tests := []struct {
name string
tableName string
wantErr bool
}{
{"valid table", "issues", false},
{"valid with underscore", "dirty_issues", false},
{"valid with numbers", "table123", false},
{"empty", "", true},
{"too long", string(make([]byte, 100)), true},
{"starts with number", "123table", true},
{"with SQL injection", "issues'; DROP TABLE issues; --", true},
{"with space", "my table", true},
{"with dash", "my-table", true},
}
for _, tt := range tests {
t.Run(tt.name, func(t *testing.T) {
err := validateTableName(tt.tableName)
if (err != nil) != tt.wantErr {
t.Errorf("validateTableName(%q) error = %v, wantErr %v", tt.tableName, err, tt.wantErr)
}
})
}
}
func TestDoltStoreGetReadyWork(t *testing.T) {
store, cleanup := setupTestStore(t)
defer cleanup()
ctx := context.Background()
// Create issues: one blocked, one ready
blocker := &types.Issue{
ID: "test-blocker",
Title: "Blocker",
Description: "Blocks another issue",
Status: types.StatusOpen,
Priority: 1,
IssueType: types.TypeTask,
}
blocked := &types.Issue{
ID: "test-blocked",
Title: "Blocked",
Description: "Is blocked",
Status: types.StatusOpen,
Priority: 2,
IssueType: types.TypeTask,
}
ready := &types.Issue{
ID: "test-ready",
Title: "Ready",
Description: "Is ready",
Status: types.StatusOpen,
Priority: 3,
IssueType: types.TypeTask,
}
for _, issue := range []*types.Issue{blocker, blocked, ready} {
if err := store.CreateIssue(ctx, issue, "tester"); err != nil {
t.Fatalf("failed to create issue %s: %v", issue.ID, err)
}
}
// Add blocking dependency
dep := &types.Dependency{
IssueID: blocked.ID,
DependsOnID: blocker.ID,
Type: types.DepBlocks,
}
if err := store.AddDependency(ctx, dep, "tester"); err != nil {
t.Fatalf("failed to add dependency: %v", err)
}
// Get ready work
readyWork, err := store.GetReadyWork(ctx, types.WorkFilter{})
if err != nil {
t.Fatalf("failed to get ready work: %v", err)
}
// Should include blocker and ready, but not blocked
foundBlocker := false
foundBlocked := false
foundReady := false
for _, issue := range readyWork {
switch issue.ID {
case blocker.ID:
foundBlocker = true
case blocked.ID:
foundBlocked = true
case ready.ID:
foundReady = true
}
}
if !foundBlocker {
t.Error("expected blocker to be in ready work")
}
if foundBlocked {
t.Error("expected blocked issue to NOT be in ready work")
}
if !foundReady {
t.Error("expected ready issue to be in ready work")
}
}

View File

@@ -0,0 +1,160 @@
package dolt
import (
"context"
"database/sql"
"fmt"
"time"
"github.com/steveyegge/beads/internal/types"
)
// AddComment adds a comment event to an issue
func (s *DoltStore) AddComment(ctx context.Context, issueID, actor, comment string) error {
_, err := s.db.ExecContext(ctx, `
INSERT INTO events (issue_id, event_type, actor, comment)
VALUES (?, ?, ?, ?)
`, issueID, types.EventCommented, actor, comment)
if err != nil {
return fmt.Errorf("failed to add comment: %w", err)
}
return nil
}
// GetEvents retrieves events for an issue
func (s *DoltStore) GetEvents(ctx context.Context, issueID string, limit int) ([]*types.Event, error) {
query := `
SELECT id, issue_id, event_type, actor, old_value, new_value, comment, created_at
FROM events
WHERE issue_id = ?
ORDER BY created_at DESC
`
args := []interface{}{issueID}
if limit > 0 {
query += fmt.Sprintf(" LIMIT %d", limit)
}
rows, err := s.db.QueryContext(ctx, query, args...)
if err != nil {
return nil, fmt.Errorf("failed to get events: %w", err)
}
defer rows.Close()
var events []*types.Event
for rows.Next() {
var event types.Event
var oldValue, newValue, comment sql.NullString
if err := rows.Scan(&event.ID, &event.IssueID, &event.EventType, &event.Actor,
&oldValue, &newValue, &comment, &event.CreatedAt); err != nil {
return nil, fmt.Errorf("failed to scan event: %w", err)
}
if oldValue.Valid {
event.OldValue = &oldValue.String
}
if newValue.Valid {
event.NewValue = &newValue.String
}
if comment.Valid {
event.Comment = &comment.String
}
events = append(events, &event)
}
return events, rows.Err()
}
// AddIssueComment adds a comment to an issue (structured comment)
func (s *DoltStore) AddIssueComment(ctx context.Context, issueID, author, text string) (*types.Comment, error) {
result, err := s.db.ExecContext(ctx, `
INSERT INTO comments (issue_id, author, text, created_at)
VALUES (?, ?, ?, ?)
`, issueID, author, text, time.Now())
if err != nil {
return nil, fmt.Errorf("failed to add comment: %w", err)
}
id, err := result.LastInsertId()
if err != nil {
return nil, fmt.Errorf("failed to get comment id: %w", err)
}
return &types.Comment{
ID: id,
IssueID: issueID,
Author: author,
Text: text,
CreatedAt: time.Now(),
}, nil
}
// GetIssueComments retrieves all comments for an issue
func (s *DoltStore) GetIssueComments(ctx context.Context, issueID string) ([]*types.Comment, error) {
rows, err := s.db.QueryContext(ctx, `
SELECT id, issue_id, author, text, created_at
FROM comments
WHERE issue_id = ?
ORDER BY created_at ASC
`, issueID)
if err != nil {
return nil, fmt.Errorf("failed to get comments: %w", err)
}
defer rows.Close()
var comments []*types.Comment
for rows.Next() {
var c types.Comment
if err := rows.Scan(&c.ID, &c.IssueID, &c.Author, &c.Text, &c.CreatedAt); err != nil {
return nil, fmt.Errorf("failed to scan comment: %w", err)
}
comments = append(comments, &c)
}
return comments, rows.Err()
}
// GetCommentsForIssues retrieves comments for multiple issues
func (s *DoltStore) GetCommentsForIssues(ctx context.Context, issueIDs []string) (map[string][]*types.Comment, error) {
if len(issueIDs) == 0 {
return make(map[string][]*types.Comment), nil
}
placeholders := make([]string, len(issueIDs))
args := make([]interface{}, len(issueIDs))
for i, id := range issueIDs {
placeholders[i] = "?"
args[i] = id
}
query := fmt.Sprintf(`
SELECT id, issue_id, author, text, created_at
FROM comments
WHERE issue_id IN (%s)
ORDER BY issue_id, created_at ASC
`, joinStrings(placeholders, ","))
rows, err := s.db.QueryContext(ctx, query, args...)
if err != nil {
return nil, fmt.Errorf("failed to get comments: %w", err)
}
defer rows.Close()
result := make(map[string][]*types.Comment)
for rows.Next() {
var c types.Comment
if err := rows.Scan(&c.ID, &c.IssueID, &c.Author, &c.Text, &c.CreatedAt); err != nil {
return nil, fmt.Errorf("failed to scan comment: %w", err)
}
result[c.IssueID] = append(result[c.IssueID], &c)
}
return result, rows.Err()
}
func joinStrings(strs []string, sep string) string {
if len(strs) == 0 {
return ""
}
result := strs[0]
for _, s := range strs[1:] {
result += sep + s
}
return result
}

View File

@@ -0,0 +1,342 @@
package dolt
import (
"context"
"database/sql"
"fmt"
"regexp"
"time"
"github.com/steveyegge/beads/internal/types"
)
// validRefPattern matches valid Dolt commit hashes (32 hex chars) or branch names
var validRefPattern = regexp.MustCompile(`^[a-zA-Z0-9_\-]+$`)
// validTablePattern matches valid table names
var validTablePattern = regexp.MustCompile(`^[a-zA-Z_][a-zA-Z0-9_]*$`)
// validateRef checks if a ref is safe to use in queries
func validateRef(ref string) error {
if ref == "" {
return fmt.Errorf("ref cannot be empty")
}
if len(ref) > 128 {
return fmt.Errorf("ref too long")
}
if !validRefPattern.MatchString(ref) {
return fmt.Errorf("invalid ref format: %s", ref)
}
return nil
}
// validateTableName checks if a table name is safe to use in queries
func validateTableName(table string) error {
if table == "" {
return fmt.Errorf("table name cannot be empty")
}
if len(table) > 64 {
return fmt.Errorf("table name too long")
}
if !validTablePattern.MatchString(table) {
return fmt.Errorf("invalid table name: %s", table)
}
return nil
}
// IssueHistory represents an issue at a specific point in history
type IssueHistory struct {
Issue *types.Issue
CommitHash string
Committer string
CommitDate time.Time
}
// GetIssueHistory returns the complete history of an issue
func (s *DoltStore) GetIssueHistory(ctx context.Context, issueID string) ([]*IssueHistory, error) {
rows, err := s.db.QueryContext(ctx, `
SELECT
id, title, description, design, acceptance_criteria, notes,
status, priority, issue_type, assignee, owner, created_by,
estimated_minutes, created_at, updated_at, closed_at, close_reason,
pinned, mol_type,
commit_hash, committer, commit_date
FROM dolt_history_issues
WHERE id = ?
ORDER BY commit_date DESC
`, issueID)
if err != nil {
return nil, fmt.Errorf("failed to get issue history: %w", err)
}
defer rows.Close()
var history []*IssueHistory
for rows.Next() {
var h IssueHistory
var issue types.Issue
var closedAt sql.NullTime
var assignee, owner, createdBy, closeReason, molType sql.NullString
var estimatedMinutes sql.NullInt64
var pinned sql.NullInt64
if err := rows.Scan(
&issue.ID, &issue.Title, &issue.Description, &issue.Design, &issue.AcceptanceCriteria, &issue.Notes,
&issue.Status, &issue.Priority, &issue.IssueType, &assignee, &owner, &createdBy,
&estimatedMinutes, &issue.CreatedAt, &issue.UpdatedAt, &closedAt, &closeReason,
&pinned, &molType,
&h.CommitHash, &h.Committer, &h.CommitDate,
); err != nil {
return nil, fmt.Errorf("failed to scan history: %w", err)
}
if closedAt.Valid {
issue.ClosedAt = &closedAt.Time
}
if assignee.Valid {
issue.Assignee = assignee.String
}
if owner.Valid {
issue.Owner = owner.String
}
if createdBy.Valid {
issue.CreatedBy = createdBy.String
}
if estimatedMinutes.Valid {
mins := int(estimatedMinutes.Int64)
issue.EstimatedMinutes = &mins
}
if closeReason.Valid {
issue.CloseReason = closeReason.String
}
if pinned.Valid && pinned.Int64 != 0 {
issue.Pinned = true
}
if molType.Valid {
issue.MolType = types.MolType(molType.String)
}
h.Issue = &issue
history = append(history, &h)
}
return history, rows.Err()
}
// GetIssueAsOf returns an issue as it existed at a specific commit or time
func (s *DoltStore) GetIssueAsOf(ctx context.Context, issueID string, ref string) (*types.Issue, error) {
// Validate ref to prevent SQL injection
if err := validateRef(ref); err != nil {
return nil, fmt.Errorf("invalid ref: %w", err)
}
var issue types.Issue
var closedAt sql.NullTime
var assignee, owner, contentHash sql.NullString
var estimatedMinutes sql.NullInt64
// Note: AS OF requires literal value, but we've validated ref is safe
query := fmt.Sprintf(`
SELECT id, content_hash, title, description, status, priority, issue_type, assignee, estimated_minutes,
created_at, created_by, owner, updated_at, closed_at
FROM issues AS OF '%s'
WHERE id = ?
`, ref)
err := s.db.QueryRowContext(ctx, query, issueID).Scan(
&issue.ID, &contentHash, &issue.Title, &issue.Description, &issue.Status, &issue.Priority, &issue.IssueType, &assignee, &estimatedMinutes,
&issue.CreatedAt, &issue.CreatedBy, &owner, &issue.UpdatedAt, &closedAt,
)
if err == sql.ErrNoRows {
return nil, nil
}
if err != nil {
return nil, fmt.Errorf("failed to get issue as of %s: %w", ref, err)
}
if contentHash.Valid {
issue.ContentHash = contentHash.String
}
if closedAt.Valid {
issue.ClosedAt = &closedAt.Time
}
if assignee.Valid {
issue.Assignee = assignee.String
}
if owner.Valid {
issue.Owner = owner.String
}
if estimatedMinutes.Valid {
mins := int(estimatedMinutes.Int64)
issue.EstimatedMinutes = &mins
}
return &issue, nil
}
// DiffEntry represents a change between two commits
type DiffEntry struct {
TableName string
DiffType string // "added", "modified", "removed"
FromCommit string
ToCommit string
RowID string
}
// GetDiff returns changes between two commits
func (s *DoltStore) GetDiff(ctx context.Context, fromRef, toRef string) ([]*DiffEntry, error) {
rows, err := s.db.QueryContext(ctx, `
SELECT table_name, diff_type, from_commit, to_commit
FROM dolt_diff(?, ?)
`, fromRef, toRef)
if err != nil {
return nil, fmt.Errorf("failed to get diff: %w", err)
}
defer rows.Close()
var entries []*DiffEntry
for rows.Next() {
var e DiffEntry
if err := rows.Scan(&e.TableName, &e.DiffType, &e.FromCommit, &e.ToCommit); err != nil {
return nil, fmt.Errorf("failed to scan diff entry: %w", err)
}
entries = append(entries, &e)
}
return entries, rows.Err()
}
// GetIssueDiff returns detailed changes to a specific issue between commits
func (s *DoltStore) GetIssueDiff(ctx context.Context, issueID, fromRef, toRef string) (*IssueDiff, error) {
// Validate refs to prevent SQL injection
if err := validateRef(fromRef); err != nil {
return nil, fmt.Errorf("invalid fromRef: %w", err)
}
if err := validateRef(toRef); err != nil {
return nil, fmt.Errorf("invalid toRef: %w", err)
}
// Note: dolt_diff_issues requires literal values, but we've validated refs are safe
query := fmt.Sprintf(`
SELECT
from_id, to_id,
from_title, to_title,
from_status, to_status,
from_description, to_description,
diff_type
FROM dolt_diff_issues('%s', '%s')
WHERE from_id = ? OR to_id = ?
`, fromRef, toRef)
var diff IssueDiff
var fromID, toID, fromTitle, toTitle, fromStatus, toStatus sql.NullString
var fromDesc, toDesc sql.NullString
err := s.db.QueryRowContext(ctx, query, issueID, issueID).Scan(
&fromID, &toID,
&fromTitle, &toTitle,
&fromStatus, &toStatus,
&fromDesc, &toDesc,
&diff.DiffType,
)
if err == sql.ErrNoRows {
return nil, nil
}
if err != nil {
return nil, fmt.Errorf("failed to get issue diff: %w", err)
}
if fromID.Valid {
diff.FromID = fromID.String
}
if toID.Valid {
diff.ToID = toID.String
}
if fromTitle.Valid {
diff.FromTitle = fromTitle.String
}
if toTitle.Valid {
diff.ToTitle = toTitle.String
}
if fromStatus.Valid {
diff.FromStatus = fromStatus.String
}
if toStatus.Valid {
diff.ToStatus = toStatus.String
}
if fromDesc.Valid {
diff.FromDescription = fromDesc.String
}
if toDesc.Valid {
diff.ToDescription = toDesc.String
}
return &diff, nil
}
// IssueDiff represents changes to an issue between two commits
type IssueDiff struct {
DiffType string // "added", "modified", "removed"
FromID string
ToID string
FromTitle string
ToTitle string
FromStatus string
ToStatus string
FromDescription string
ToDescription string
}
// GetConflicts returns any merge conflicts in the current state
func (s *DoltStore) GetConflicts(ctx context.Context) ([]*Conflict, error) {
rows, err := s.db.QueryContext(ctx, `
SELECT table_name, num_conflicts FROM dolt_conflicts
`)
if err != nil {
return nil, fmt.Errorf("failed to get conflicts: %w", err)
}
defer rows.Close()
var conflicts []*Conflict
for rows.Next() {
var c Conflict
if err := rows.Scan(&c.TableName, &c.NumConflicts); err != nil {
return nil, fmt.Errorf("failed to scan conflict: %w", err)
}
conflicts = append(conflicts, &c)
}
return conflicts, rows.Err()
}
// Conflict represents a merge conflict
type Conflict struct {
TableName string
NumConflicts int
}
// ResolveConflicts resolves conflicts using the specified strategy
func (s *DoltStore) ResolveConflicts(ctx context.Context, table string, strategy string) error {
// Validate table name to prevent SQL injection
if err := validateTableName(table); err != nil {
return fmt.Errorf("invalid table name: %w", err)
}
var query string
switch strategy {
case "ours":
// Note: DOLT_CONFLICTS_RESOLVE requires literal value, but we've validated table is safe
query = fmt.Sprintf("CALL DOLT_CONFLICTS_RESOLVE('--ours', '%s')", table)
case "theirs":
query = fmt.Sprintf("CALL DOLT_CONFLICTS_RESOLVE('--theirs', '%s')", table)
default:
return fmt.Errorf("unknown conflict resolution strategy: %s", strategy)
}
_, err := s.db.ExecContext(ctx, query)
if err != nil {
return fmt.Errorf("failed to resolve conflicts: %w", err)
}
return nil
}

View File

@@ -0,0 +1,726 @@
package dolt
import (
"context"
"database/sql"
"encoding/json"
"fmt"
"strings"
"time"
"github.com/steveyegge/beads/internal/types"
)
// CreateIssue creates a new issue
func (s *DoltStore) CreateIssue(ctx context.Context, issue *types.Issue, actor string) error {
// Fetch custom statuses and types for validation
customStatuses, err := s.GetCustomStatuses(ctx)
if err != nil {
return fmt.Errorf("failed to get custom statuses: %w", err)
}
customTypes, err := s.GetCustomTypes(ctx)
if err != nil {
return fmt.Errorf("failed to get custom types: %w", err)
}
// Set timestamps
now := time.Now()
if issue.CreatedAt.IsZero() {
issue.CreatedAt = now
}
if issue.UpdatedAt.IsZero() {
issue.UpdatedAt = now
}
// Defensive fix for closed_at invariant
if issue.Status == types.StatusClosed && issue.ClosedAt == nil {
maxTime := issue.CreatedAt
if issue.UpdatedAt.After(maxTime) {
maxTime = issue.UpdatedAt
}
closedAt := maxTime.Add(time.Second)
issue.ClosedAt = &closedAt
}
// Defensive fix for deleted_at invariant
if issue.Status == types.StatusTombstone && issue.DeletedAt == nil {
maxTime := issue.CreatedAt
if issue.UpdatedAt.After(maxTime) {
maxTime = issue.UpdatedAt
}
deletedAt := maxTime.Add(time.Second)
issue.DeletedAt = &deletedAt
}
// Validate issue
if err := issue.ValidateWithCustom(customStatuses, customTypes); err != nil {
return fmt.Errorf("validation failed: %w", err)
}
// Compute content hash
if issue.ContentHash == "" {
issue.ContentHash = issue.ComputeContentHash()
}
// Start transaction
tx, err := s.db.BeginTx(ctx, nil)
if err != nil {
return fmt.Errorf("failed to begin transaction: %w", err)
}
defer func() { _ = tx.Rollback() }()
// Get prefix from config
var configPrefix string
err = tx.QueryRowContext(ctx, "SELECT value FROM config WHERE `key` = ?", "issue_prefix").Scan(&configPrefix)
if err == sql.ErrNoRows || configPrefix == "" {
return fmt.Errorf("database not initialized: issue_prefix config is missing (run 'bd init --prefix <prefix>' first)")
} else if err != nil {
return fmt.Errorf("failed to get config: %w", err)
}
// Determine prefix for ID generation
prefix := configPrefix
if issue.PrefixOverride != "" {
prefix = issue.PrefixOverride
} else if issue.IDPrefix != "" {
prefix = configPrefix + "-" + issue.IDPrefix
}
// Generate or validate ID
if issue.ID == "" {
generatedID, err := generateIssueID(ctx, tx, prefix, issue, actor)
if err != nil {
return fmt.Errorf("failed to generate issue ID: %w", err)
}
issue.ID = generatedID
}
// Insert issue
if err := insertIssue(ctx, tx, issue); err != nil {
return fmt.Errorf("failed to insert issue: %w", err)
}
// Record creation event
if err := recordEvent(ctx, tx, issue.ID, types.EventCreated, actor, "", ""); err != nil {
return fmt.Errorf("failed to record creation event: %w", err)
}
// Mark issue as dirty
if err := markDirty(ctx, tx, issue.ID); err != nil {
return fmt.Errorf("failed to mark issue dirty: %w", err)
}
return tx.Commit()
}
// CreateIssues creates multiple issues in a single transaction
func (s *DoltStore) CreateIssues(ctx context.Context, issues []*types.Issue, actor string) error {
if len(issues) == 0 {
return nil
}
// Fetch custom statuses and types for validation
customStatuses, err := s.GetCustomStatuses(ctx)
if err != nil {
return fmt.Errorf("failed to get custom statuses: %w", err)
}
customTypes, err := s.GetCustomTypes(ctx)
if err != nil {
return fmt.Errorf("failed to get custom types: %w", err)
}
tx, err := s.db.BeginTx(ctx, nil)
if err != nil {
return fmt.Errorf("failed to begin transaction: %w", err)
}
defer func() { _ = tx.Rollback() }()
for _, issue := range issues {
now := time.Now()
if issue.CreatedAt.IsZero() {
issue.CreatedAt = now
}
if issue.UpdatedAt.IsZero() {
issue.UpdatedAt = now
}
// Defensive fix for closed_at invariant
if issue.Status == types.StatusClosed && issue.ClosedAt == nil {
maxTime := issue.CreatedAt
if issue.UpdatedAt.After(maxTime) {
maxTime = issue.UpdatedAt
}
closedAt := maxTime.Add(time.Second)
issue.ClosedAt = &closedAt
}
// Defensive fix for deleted_at invariant
if issue.Status == types.StatusTombstone && issue.DeletedAt == nil {
maxTime := issue.CreatedAt
if issue.UpdatedAt.After(maxTime) {
maxTime = issue.UpdatedAt
}
deletedAt := maxTime.Add(time.Second)
issue.DeletedAt = &deletedAt
}
// Validate issue
if err := issue.ValidateWithCustom(customStatuses, customTypes); err != nil {
return fmt.Errorf("validation failed for issue %s: %w", issue.ID, err)
}
if issue.ContentHash == "" {
issue.ContentHash = issue.ComputeContentHash()
}
if err := insertIssue(ctx, tx, issue); err != nil {
return fmt.Errorf("failed to insert issue %s: %w", issue.ID, err)
}
if err := recordEvent(ctx, tx, issue.ID, types.EventCreated, actor, "", ""); err != nil {
return fmt.Errorf("failed to record event for %s: %w", issue.ID, err)
}
if err := markDirty(ctx, tx, issue.ID); err != nil {
return fmt.Errorf("failed to mark dirty %s: %w", issue.ID, err)
}
}
return tx.Commit()
}
// GetIssue retrieves an issue by ID
func (s *DoltStore) GetIssue(ctx context.Context, id string) (*types.Issue, error) {
s.mu.RLock()
defer s.mu.RUnlock()
issue, err := scanIssue(ctx, s.db, id)
if err != nil {
return nil, err
}
if issue == nil {
return nil, nil
}
// Fetch labels
labels, err := s.GetLabels(ctx, issue.ID)
if err != nil {
return nil, fmt.Errorf("failed to get labels: %w", err)
}
issue.Labels = labels
return issue, nil
}
// GetIssueByExternalRef retrieves an issue by external reference
func (s *DoltStore) GetIssueByExternalRef(ctx context.Context, externalRef string) (*types.Issue, error) {
s.mu.RLock()
defer s.mu.RUnlock()
var id string
err := s.db.QueryRowContext(ctx, "SELECT id FROM issues WHERE external_ref = ?", externalRef).Scan(&id)
if err == sql.ErrNoRows {
return nil, nil
}
if err != nil {
return nil, fmt.Errorf("failed to get issue by external_ref: %w", err)
}
return s.GetIssue(ctx, id)
}
// UpdateIssue updates fields on an issue
func (s *DoltStore) UpdateIssue(ctx context.Context, id string, updates map[string]interface{}, actor string) error {
oldIssue, err := s.GetIssue(ctx, id)
if err != nil {
return fmt.Errorf("failed to get issue for update: %w", err)
}
if oldIssue == nil {
return fmt.Errorf("issue %s not found", id)
}
// Build update query
setClauses := []string{"updated_at = ?"}
args := []interface{}{time.Now()}
for key, value := range updates {
if !isAllowedUpdateField(key) {
return fmt.Errorf("invalid field for update: %s", key)
}
columnName := key
if key == "wisp" {
columnName = "ephemeral"
}
setClauses = append(setClauses, fmt.Sprintf("`%s` = ?", columnName))
args = append(args, value)
}
// Auto-manage closed_at
setClauses, args = manageClosedAt(oldIssue, updates, setClauses, args)
args = append(args, id)
tx, err := s.db.BeginTx(ctx, nil)
if err != nil {
return fmt.Errorf("failed to begin transaction: %w", err)
}
defer func() { _ = tx.Rollback() }()
query := fmt.Sprintf("UPDATE issues SET %s WHERE id = ?", strings.Join(setClauses, ", "))
if _, err := tx.ExecContext(ctx, query, args...); err != nil {
return fmt.Errorf("failed to update issue: %w", err)
}
// Record event
oldData, _ := json.Marshal(oldIssue)
newData, _ := json.Marshal(updates)
eventType := determineEventType(oldIssue, updates)
if err := recordEvent(ctx, tx, id, eventType, actor, string(oldData), string(newData)); err != nil {
return fmt.Errorf("failed to record event: %w", err)
}
if err := markDirty(ctx, tx, id); err != nil {
return fmt.Errorf("failed to mark dirty: %w", err)
}
return tx.Commit()
}
// CloseIssue closes an issue with a reason
func (s *DoltStore) CloseIssue(ctx context.Context, id string, reason string, actor string, session string) error {
now := time.Now()
tx, err := s.db.BeginTx(ctx, nil)
if err != nil {
return fmt.Errorf("failed to begin transaction: %w", err)
}
defer func() { _ = tx.Rollback() }()
result, err := tx.ExecContext(ctx, `
UPDATE issues SET status = ?, closed_at = ?, updated_at = ?, close_reason = ?, closed_by_session = ?
WHERE id = ?
`, types.StatusClosed, now, now, reason, session, id)
if err != nil {
return fmt.Errorf("failed to close issue: %w", err)
}
rows, err := result.RowsAffected()
if err != nil {
return fmt.Errorf("failed to get rows affected: %w", err)
}
if rows == 0 {
return fmt.Errorf("issue not found: %s", id)
}
if err := recordEvent(ctx, tx, id, types.EventClosed, actor, "", reason); err != nil {
return fmt.Errorf("failed to record event: %w", err)
}
if err := markDirty(ctx, tx, id); err != nil {
return fmt.Errorf("failed to mark dirty: %w", err)
}
return tx.Commit()
}
// DeleteIssue permanently removes an issue
func (s *DoltStore) DeleteIssue(ctx context.Context, id string) error {
tx, err := s.db.BeginTx(ctx, nil)
if err != nil {
return fmt.Errorf("failed to begin transaction: %w", err)
}
defer func() { _ = tx.Rollback() }()
// Delete related data (foreign keys will cascade, but be explicit)
tables := []string{"dependencies", "events", "comments", "labels", "dirty_issues"}
for _, table := range tables {
if table == "dependencies" {
_, err = tx.ExecContext(ctx, fmt.Sprintf("DELETE FROM %s WHERE issue_id = ? OR depends_on_id = ?", table), id, id)
} else {
_, err = tx.ExecContext(ctx, fmt.Sprintf("DELETE FROM %s WHERE issue_id = ?", table), id)
}
if err != nil {
return fmt.Errorf("failed to delete from %s: %w", table, err)
}
}
result, err := tx.ExecContext(ctx, "DELETE FROM issues WHERE id = ?", id)
if err != nil {
return fmt.Errorf("failed to delete issue: %w", err)
}
rows, err := result.RowsAffected()
if err != nil {
return fmt.Errorf("failed to get rows affected: %w", err)
}
if rows == 0 {
return fmt.Errorf("issue not found: %s", id)
}
return tx.Commit()
}
// =============================================================================
// Helper functions
// =============================================================================
func insertIssue(ctx context.Context, tx *sql.Tx, issue *types.Issue) error {
_, err := tx.ExecContext(ctx, `
INSERT INTO issues (
id, content_hash, title, description, design, acceptance_criteria, notes,
status, priority, issue_type, assignee, estimated_minutes,
created_at, created_by, owner, updated_at, closed_at, external_ref,
compaction_level, compacted_at, compacted_at_commit, original_size,
deleted_at, deleted_by, delete_reason, original_type,
sender, ephemeral, pinned, is_template, crystallizes,
mol_type, work_type, quality_score, source_system, source_repo, close_reason,
event_kind, actor, target, payload,
await_type, await_id, timeout_ns, waiters,
hook_bead, role_bead, agent_state, last_activity, role_type, rig,
due_at, defer_until
) VALUES (
?, ?, ?, ?, ?, ?, ?,
?, ?, ?, ?, ?,
?, ?, ?, ?, ?, ?,
?, ?, ?, ?,
?, ?, ?, ?,
?, ?, ?, ?, ?,
?, ?, ?, ?, ?, ?,
?, ?, ?, ?,
?, ?, ?, ?,
?, ?, ?, ?, ?, ?,
?, ?
)
`,
issue.ID, issue.ContentHash, issue.Title, issue.Description, issue.Design, issue.AcceptanceCriteria, issue.Notes,
issue.Status, issue.Priority, issue.IssueType, nullString(issue.Assignee), nullInt(issue.EstimatedMinutes),
issue.CreatedAt, issue.CreatedBy, issue.Owner, issue.UpdatedAt, issue.ClosedAt, nullStringPtr(issue.ExternalRef),
issue.CompactionLevel, issue.CompactedAt, nullStringPtr(issue.CompactedAtCommit), nullIntVal(issue.OriginalSize),
issue.DeletedAt, issue.DeletedBy, issue.DeleteReason, issue.OriginalType,
issue.Sender, issue.Ephemeral, issue.Pinned, issue.IsTemplate, issue.Crystallizes,
issue.MolType, issue.WorkType, issue.QualityScore, issue.SourceSystem, issue.SourceRepo, issue.CloseReason,
issue.EventKind, issue.Actor, issue.Target, issue.Payload,
issue.AwaitType, issue.AwaitID, issue.Timeout.Nanoseconds(), formatJSONStringArray(issue.Waiters),
issue.HookBead, issue.RoleBead, issue.AgentState, issue.LastActivity, issue.RoleType, issue.Rig,
issue.DueAt, issue.DeferUntil,
)
return err
}
func scanIssue(ctx context.Context, db *sql.DB, id string) (*types.Issue, error) {
var issue types.Issue
var closedAt, compactedAt, deletedAt, lastActivity, dueAt, deferUntil sql.NullTime
var estimatedMinutes, originalSize, timeoutNs sql.NullInt64
var assignee, externalRef, compactedAtCommit, owner sql.NullString
var contentHash, sourceRepo, closeReason, deletedBy, deleteReason, originalType sql.NullString
var workType, sourceSystem sql.NullString
var sender, molType, eventKind, actor, target, payload sql.NullString
var awaitType, awaitID, waiters sql.NullString
var hookBead, roleBead, agentState, roleType, rig sql.NullString
var ephemeral, pinned, isTemplate, crystallizes sql.NullInt64
var qualityScore sql.NullFloat64
err := db.QueryRowContext(ctx, `
SELECT id, content_hash, title, description, design, acceptance_criteria, notes,
status, priority, issue_type, assignee, estimated_minutes,
created_at, created_by, owner, updated_at, closed_at, external_ref,
compaction_level, compacted_at, compacted_at_commit, original_size, source_repo, close_reason,
deleted_at, deleted_by, delete_reason, original_type,
sender, ephemeral, pinned, is_template, crystallizes,
await_type, await_id, timeout_ns, waiters,
hook_bead, role_bead, agent_state, last_activity, role_type, rig, mol_type,
event_kind, actor, target, payload,
due_at, defer_until,
quality_score, work_type, source_system
FROM issues
WHERE id = ?
`, id).Scan(
&issue.ID, &contentHash, &issue.Title, &issue.Description, &issue.Design,
&issue.AcceptanceCriteria, &issue.Notes, &issue.Status,
&issue.Priority, &issue.IssueType, &assignee, &estimatedMinutes,
&issue.CreatedAt, &issue.CreatedBy, &owner, &issue.UpdatedAt, &closedAt, &externalRef,
&issue.CompactionLevel, &compactedAt, &compactedAtCommit, &originalSize, &sourceRepo, &closeReason,
&deletedAt, &deletedBy, &deleteReason, &originalType,
&sender, &ephemeral, &pinned, &isTemplate, &crystallizes,
&awaitType, &awaitID, &timeoutNs, &waiters,
&hookBead, &roleBead, &agentState, &lastActivity, &roleType, &rig, &molType,
&eventKind, &actor, &target, &payload,
&dueAt, &deferUntil,
&qualityScore, &workType, &sourceSystem,
)
if err == sql.ErrNoRows {
return nil, nil
}
if err != nil {
return nil, fmt.Errorf("failed to get issue: %w", err)
}
// Map nullable fields
if contentHash.Valid {
issue.ContentHash = contentHash.String
}
if closedAt.Valid {
issue.ClosedAt = &closedAt.Time
}
if estimatedMinutes.Valid {
mins := int(estimatedMinutes.Int64)
issue.EstimatedMinutes = &mins
}
if assignee.Valid {
issue.Assignee = assignee.String
}
if owner.Valid {
issue.Owner = owner.String
}
if externalRef.Valid {
issue.ExternalRef = &externalRef.String
}
if compactedAt.Valid {
issue.CompactedAt = &compactedAt.Time
}
if compactedAtCommit.Valid {
issue.CompactedAtCommit = &compactedAtCommit.String
}
if originalSize.Valid {
issue.OriginalSize = int(originalSize.Int64)
}
if sourceRepo.Valid {
issue.SourceRepo = sourceRepo.String
}
if closeReason.Valid {
issue.CloseReason = closeReason.String
}
if deletedAt.Valid {
issue.DeletedAt = &deletedAt.Time
}
if deletedBy.Valid {
issue.DeletedBy = deletedBy.String
}
if deleteReason.Valid {
issue.DeleteReason = deleteReason.String
}
if originalType.Valid {
issue.OriginalType = originalType.String
}
if sender.Valid {
issue.Sender = sender.String
}
if ephemeral.Valid && ephemeral.Int64 != 0 {
issue.Ephemeral = true
}
if pinned.Valid && pinned.Int64 != 0 {
issue.Pinned = true
}
if isTemplate.Valid && isTemplate.Int64 != 0 {
issue.IsTemplate = true
}
if crystallizes.Valid && crystallizes.Int64 != 0 {
issue.Crystallizes = true
}
if awaitType.Valid {
issue.AwaitType = awaitType.String
}
if awaitID.Valid {
issue.AwaitID = awaitID.String
}
if timeoutNs.Valid {
issue.Timeout = time.Duration(timeoutNs.Int64)
}
if waiters.Valid && waiters.String != "" {
issue.Waiters = parseJSONStringArray(waiters.String)
}
if hookBead.Valid {
issue.HookBead = hookBead.String
}
if roleBead.Valid {
issue.RoleBead = roleBead.String
}
if agentState.Valid {
issue.AgentState = types.AgentState(agentState.String)
}
if lastActivity.Valid {
issue.LastActivity = &lastActivity.Time
}
if roleType.Valid {
issue.RoleType = roleType.String
}
if rig.Valid {
issue.Rig = rig.String
}
if molType.Valid {
issue.MolType = types.MolType(molType.String)
}
if eventKind.Valid {
issue.EventKind = eventKind.String
}
if actor.Valid {
issue.Actor = actor.String
}
if target.Valid {
issue.Target = target.String
}
if payload.Valid {
issue.Payload = payload.String
}
if dueAt.Valid {
issue.DueAt = &dueAt.Time
}
if deferUntil.Valid {
issue.DeferUntil = &deferUntil.Time
}
if qualityScore.Valid {
qs := float32(qualityScore.Float64)
issue.QualityScore = &qs
}
if workType.Valid {
issue.WorkType = types.WorkType(workType.String)
}
if sourceSystem.Valid {
issue.SourceSystem = sourceSystem.String
}
return &issue, nil
}
func recordEvent(ctx context.Context, tx *sql.Tx, issueID string, eventType types.EventType, actor, oldValue, newValue string) error {
_, err := tx.ExecContext(ctx, `
INSERT INTO events (issue_id, event_type, actor, old_value, new_value)
VALUES (?, ?, ?, ?, ?)
`, issueID, eventType, actor, oldValue, newValue)
return err
}
func markDirty(ctx context.Context, tx *sql.Tx, issueID string) error {
_, err := tx.ExecContext(ctx, `
INSERT INTO dirty_issues (issue_id, marked_at)
VALUES (?, ?)
ON DUPLICATE KEY UPDATE marked_at = VALUES(marked_at)
`, issueID, time.Now())
return err
}
func generateIssueID(ctx context.Context, tx *sql.Tx, prefix string, issue *types.Issue, actor string) (string, error) {
// Simple hash-based ID generation
// Use first 6 chars of content hash
hash := issue.ComputeContentHash()
if len(hash) > 6 {
hash = hash[:6]
}
return fmt.Sprintf("%s-%s", prefix, hash), nil
}
func isAllowedUpdateField(key string) bool {
allowed := map[string]bool{
"status": true, "priority": true, "title": true, "assignee": true,
"description": true, "design": true, "acceptance_criteria": true, "notes": true,
"issue_type": true, "estimated_minutes": true, "external_ref": true,
"closed_at": true, "close_reason": true, "closed_by_session": true,
"sender": true, "wisp": true, "pinned": true,
"hook_bead": true, "role_bead": true, "agent_state": true, "last_activity": true,
"role_type": true, "rig": true, "mol_type": true,
"event_category": true, "event_actor": true, "event_target": true, "event_payload": true,
"due_at": true, "defer_until": true, "await_id": true,
}
return allowed[key]
}
func manageClosedAt(oldIssue *types.Issue, updates map[string]interface{}, setClauses []string, args []interface{}) ([]string, []interface{}) {
statusVal, hasStatus := updates["status"]
_, hasExplicitClosedAt := updates["closed_at"]
if hasExplicitClosedAt || !hasStatus {
return setClauses, args
}
var newStatus string
switch v := statusVal.(type) {
case string:
newStatus = v
case types.Status:
newStatus = string(v)
default:
return setClauses, args
}
if newStatus == string(types.StatusClosed) {
now := time.Now()
setClauses = append(setClauses, "closed_at = ?")
args = append(args, now)
} else if oldIssue.Status == types.StatusClosed {
setClauses = append(setClauses, "closed_at = ?", "close_reason = ?")
args = append(args, nil, "")
}
return setClauses, args
}
func determineEventType(oldIssue *types.Issue, updates map[string]interface{}) types.EventType {
statusVal, hasStatus := updates["status"]
if !hasStatus {
return types.EventUpdated
}
newStatus, ok := statusVal.(string)
if !ok {
return types.EventUpdated
}
if newStatus == string(types.StatusClosed) {
return types.EventClosed
}
if oldIssue.Status == types.StatusClosed {
return types.EventReopened
}
return types.EventStatusChanged
}
// Helper functions for nullable values
func nullString(s string) interface{} {
if s == "" {
return nil
}
return s
}
func nullStringPtr(s *string) interface{} {
if s == nil {
return nil
}
return *s
}
func nullInt(i *int) interface{} {
if i == nil {
return nil
}
return *i
}
func nullIntVal(i int) interface{} {
if i == 0 {
return nil
}
return i
}
func parseJSONStringArray(s string) []string {
if s == "" {
return nil
}
var result []string
if err := json.Unmarshal([]byte(s), &result); err != nil {
return nil
}
return result
}
func formatJSONStringArray(arr []string) string {
if len(arr) == 0 {
return ""
}
data, err := json.Marshal(arr)
if err != nil {
return ""
}
return string(data)
}

View File

@@ -0,0 +1,118 @@
package dolt
import (
"context"
"fmt"
"strings"
"github.com/steveyegge/beads/internal/types"
)
// AddLabel adds a label to an issue
func (s *DoltStore) AddLabel(ctx context.Context, issueID, label, actor string) error {
_, err := s.db.ExecContext(ctx, `
INSERT IGNORE INTO labels (issue_id, label) VALUES (?, ?)
`, issueID, label)
if err != nil {
return fmt.Errorf("failed to add label: %w", err)
}
return nil
}
// RemoveLabel removes a label from an issue
func (s *DoltStore) RemoveLabel(ctx context.Context, issueID, label, actor string) error {
_, err := s.db.ExecContext(ctx, `
DELETE FROM labels WHERE issue_id = ? AND label = ?
`, issueID, label)
if err != nil {
return fmt.Errorf("failed to remove label: %w", err)
}
return nil
}
// GetLabels retrieves all labels for an issue
func (s *DoltStore) GetLabels(ctx context.Context, issueID string) ([]string, error) {
rows, err := s.db.QueryContext(ctx, `
SELECT label FROM labels WHERE issue_id = ? ORDER BY label
`, issueID)
if err != nil {
return nil, fmt.Errorf("failed to get labels: %w", err)
}
defer rows.Close()
var labels []string
for rows.Next() {
var label string
if err := rows.Scan(&label); err != nil {
return nil, fmt.Errorf("failed to scan label: %w", err)
}
labels = append(labels, label)
}
return labels, rows.Err()
}
// GetLabelsForIssues retrieves labels for multiple issues
func (s *DoltStore) GetLabelsForIssues(ctx context.Context, issueIDs []string) (map[string][]string, error) {
if len(issueIDs) == 0 {
return make(map[string][]string), nil
}
placeholders := make([]string, len(issueIDs))
args := make([]interface{}, len(issueIDs))
for i, id := range issueIDs {
placeholders[i] = "?"
args[i] = id
}
query := fmt.Sprintf(`
SELECT issue_id, label FROM labels
WHERE issue_id IN (%s)
ORDER BY issue_id, label
`, strings.Join(placeholders, ","))
rows, err := s.db.QueryContext(ctx, query, args...)
if err != nil {
return nil, fmt.Errorf("failed to get labels for issues: %w", err)
}
defer rows.Close()
result := make(map[string][]string)
for rows.Next() {
var issueID, label string
if err := rows.Scan(&issueID, &label); err != nil {
return nil, fmt.Errorf("failed to scan label: %w", err)
}
result[issueID] = append(result[issueID], label)
}
return result, rows.Err()
}
// GetIssuesByLabel retrieves all issues with a specific label
func (s *DoltStore) GetIssuesByLabel(ctx context.Context, label string) ([]*types.Issue, error) {
rows, err := s.db.QueryContext(ctx, `
SELECT i.id FROM issues i
JOIN labels l ON i.id = l.issue_id
WHERE l.label = ?
ORDER BY i.priority ASC, i.created_at DESC
`, label)
if err != nil {
return nil, fmt.Errorf("failed to get issues by label: %w", err)
}
defer rows.Close()
var issues []*types.Issue
for rows.Next() {
var id string
if err := rows.Scan(&id); err != nil {
return nil, fmt.Errorf("failed to scan issue id: %w", err)
}
issue, err := s.GetIssue(ctx, id)
if err != nil {
return nil, err
}
if issue != nil {
issues = append(issues, issue)
}
}
return issues, rows.Err()
}

View File

@@ -0,0 +1,547 @@
package dolt
import (
"context"
"database/sql"
"fmt"
"strings"
"time"
"github.com/steveyegge/beads/internal/types"
)
// SearchIssues finds issues matching query and filters
func (s *DoltStore) SearchIssues(ctx context.Context, query string, filter types.IssueFilter) ([]*types.Issue, error) {
s.mu.RLock()
defer s.mu.RUnlock()
whereClauses := []string{}
args := []interface{}{}
if query != "" {
whereClauses = append(whereClauses, "(title LIKE ? OR description LIKE ? OR id LIKE ?)")
pattern := "%" + query + "%"
args = append(args, pattern, pattern, pattern)
}
if filter.TitleSearch != "" {
whereClauses = append(whereClauses, "title LIKE ?")
args = append(args, "%"+filter.TitleSearch+"%")
}
if filter.TitleContains != "" {
whereClauses = append(whereClauses, "title LIKE ?")
args = append(args, "%"+filter.TitleContains+"%")
}
if filter.DescriptionContains != "" {
whereClauses = append(whereClauses, "description LIKE ?")
args = append(args, "%"+filter.DescriptionContains+"%")
}
if filter.NotesContains != "" {
whereClauses = append(whereClauses, "notes LIKE ?")
args = append(args, "%"+filter.NotesContains+"%")
}
if filter.Status != nil {
whereClauses = append(whereClauses, "status = ?")
args = append(args, *filter.Status)
} else if !filter.IncludeTombstones {
whereClauses = append(whereClauses, "status != ?")
args = append(args, types.StatusTombstone)
}
if len(filter.ExcludeStatus) > 0 {
placeholders := make([]string, len(filter.ExcludeStatus))
for i, s := range filter.ExcludeStatus {
placeholders[i] = "?"
args = append(args, string(s))
}
whereClauses = append(whereClauses, fmt.Sprintf("status NOT IN (%s)", strings.Join(placeholders, ",")))
}
if len(filter.ExcludeTypes) > 0 {
placeholders := make([]string, len(filter.ExcludeTypes))
for i, t := range filter.ExcludeTypes {
placeholders[i] = "?"
args = append(args, string(t))
}
whereClauses = append(whereClauses, fmt.Sprintf("issue_type NOT IN (%s)", strings.Join(placeholders, ",")))
}
if filter.Priority != nil {
whereClauses = append(whereClauses, "priority = ?")
args = append(args, *filter.Priority)
}
if filter.PriorityMin != nil {
whereClauses = append(whereClauses, "priority >= ?")
args = append(args, *filter.PriorityMin)
}
if filter.PriorityMax != nil {
whereClauses = append(whereClauses, "priority <= ?")
args = append(args, *filter.PriorityMax)
}
if filter.IssueType != nil {
whereClauses = append(whereClauses, "issue_type = ?")
args = append(args, *filter.IssueType)
}
if filter.Assignee != nil {
whereClauses = append(whereClauses, "assignee = ?")
args = append(args, *filter.Assignee)
}
// Date ranges
if filter.CreatedAfter != nil {
whereClauses = append(whereClauses, "created_at > ?")
args = append(args, filter.CreatedAfter.Format(time.RFC3339))
}
if filter.CreatedBefore != nil {
whereClauses = append(whereClauses, "created_at < ?")
args = append(args, filter.CreatedBefore.Format(time.RFC3339))
}
if filter.UpdatedAfter != nil {
whereClauses = append(whereClauses, "updated_at > ?")
args = append(args, filter.UpdatedAfter.Format(time.RFC3339))
}
if filter.UpdatedBefore != nil {
whereClauses = append(whereClauses, "updated_at < ?")
args = append(args, filter.UpdatedBefore.Format(time.RFC3339))
}
// Empty/null checks
if filter.EmptyDescription {
whereClauses = append(whereClauses, "(description IS NULL OR description = '')")
}
if filter.NoAssignee {
whereClauses = append(whereClauses, "(assignee IS NULL OR assignee = '')")
}
if filter.NoLabels {
whereClauses = append(whereClauses, "id NOT IN (SELECT DISTINCT issue_id FROM labels)")
}
// Label filtering (AND)
if len(filter.Labels) > 0 {
for _, label := range filter.Labels {
whereClauses = append(whereClauses, "id IN (SELECT issue_id FROM labels WHERE label = ?)")
args = append(args, label)
}
}
// Label filtering (OR)
if len(filter.LabelsAny) > 0 {
placeholders := make([]string, len(filter.LabelsAny))
for i, label := range filter.LabelsAny {
placeholders[i] = "?"
args = append(args, label)
}
whereClauses = append(whereClauses, fmt.Sprintf("id IN (SELECT issue_id FROM labels WHERE label IN (%s))", strings.Join(placeholders, ", ")))
}
// ID filtering
if len(filter.IDs) > 0 {
placeholders := make([]string, len(filter.IDs))
for i, id := range filter.IDs {
placeholders[i] = "?"
args = append(args, id)
}
whereClauses = append(whereClauses, fmt.Sprintf("id IN (%s)", strings.Join(placeholders, ", ")))
}
if filter.IDPrefix != "" {
whereClauses = append(whereClauses, "id LIKE ?")
args = append(args, filter.IDPrefix+"%")
}
// Wisp filtering
if filter.Ephemeral != nil {
if *filter.Ephemeral {
whereClauses = append(whereClauses, "ephemeral = 1")
} else {
whereClauses = append(whereClauses, "(ephemeral = 0 OR ephemeral IS NULL)")
}
}
// Pinned filtering
if filter.Pinned != nil {
if *filter.Pinned {
whereClauses = append(whereClauses, "pinned = 1")
} else {
whereClauses = append(whereClauses, "(pinned = 0 OR pinned IS NULL)")
}
}
// Template filtering
if filter.IsTemplate != nil {
if *filter.IsTemplate {
whereClauses = append(whereClauses, "is_template = 1")
} else {
whereClauses = append(whereClauses, "(is_template = 0 OR is_template IS NULL)")
}
}
// Parent filtering
if filter.ParentID != nil {
whereClauses = append(whereClauses, "id IN (SELECT issue_id FROM dependencies WHERE type = 'parent-child' AND depends_on_id = ?)")
args = append(args, *filter.ParentID)
}
// Molecule type filtering
if filter.MolType != nil {
whereClauses = append(whereClauses, "mol_type = ?")
args = append(args, string(*filter.MolType))
}
// Time-based scheduling filters
if filter.Deferred {
whereClauses = append(whereClauses, "defer_until IS NOT NULL")
}
if filter.Overdue {
whereClauses = append(whereClauses, "due_at IS NOT NULL AND due_at < ? AND status != ?")
args = append(args, time.Now().Format(time.RFC3339), types.StatusClosed)
}
whereSQL := ""
if len(whereClauses) > 0 {
whereSQL = "WHERE " + strings.Join(whereClauses, " AND ")
}
limitSQL := ""
if filter.Limit > 0 {
limitSQL = fmt.Sprintf(" LIMIT %d", filter.Limit)
}
querySQL := fmt.Sprintf(`
SELECT id FROM issues
%s
ORDER BY priority ASC, created_at DESC
%s
`, whereSQL, limitSQL)
rows, err := s.db.QueryContext(ctx, querySQL, args...)
if err != nil {
return nil, fmt.Errorf("failed to search issues: %w", err)
}
defer rows.Close()
return s.scanIssueIDs(ctx, rows)
}
// GetReadyWork returns issues that are ready to work on (not blocked)
func (s *DoltStore) GetReadyWork(ctx context.Context, filter types.WorkFilter) ([]*types.Issue, error) {
s.mu.RLock()
defer s.mu.RUnlock()
whereClauses := []string{"status = 'open'", "(ephemeral = 0 OR ephemeral IS NULL)"}
args := []interface{}{}
if filter.Priority != nil {
whereClauses = append(whereClauses, "priority = ?")
args = append(args, *filter.Priority)
}
if filter.Type != "" {
whereClauses = append(whereClauses, "issue_type = ?")
args = append(args, filter.Type)
}
if filter.Assignee != nil {
whereClauses = append(whereClauses, "assignee = ?")
args = append(args, *filter.Assignee)
}
if len(filter.Labels) > 0 {
for _, label := range filter.Labels {
whereClauses = append(whereClauses, "id IN (SELECT issue_id FROM labels WHERE label = ?)")
args = append(args, label)
}
}
// Exclude blocked issues using subquery
whereClauses = append(whereClauses, `
id NOT IN (
SELECT DISTINCT d.issue_id
FROM dependencies d
JOIN issues blocker ON d.depends_on_id = blocker.id
WHERE d.type = 'blocks'
AND blocker.status IN ('open', 'in_progress', 'blocked', 'deferred', 'hooked')
)
`)
whereSQL := "WHERE " + strings.Join(whereClauses, " AND ")
limitSQL := ""
if filter.Limit > 0 {
limitSQL = fmt.Sprintf(" LIMIT %d", filter.Limit)
}
query := fmt.Sprintf(`
SELECT id FROM issues
%s
ORDER BY priority ASC, created_at DESC
%s
`, whereSQL, limitSQL)
rows, err := s.db.QueryContext(ctx, query, args...)
if err != nil {
return nil, fmt.Errorf("failed to get ready work: %w", err)
}
defer rows.Close()
return s.scanIssueIDs(ctx, rows)
}
// GetBlockedIssues returns issues that are blocked by other issues
func (s *DoltStore) GetBlockedIssues(ctx context.Context, filter types.WorkFilter) ([]*types.BlockedIssue, error) {
s.mu.RLock()
defer s.mu.RUnlock()
rows, err := s.db.QueryContext(ctx, `
SELECT i.id, COUNT(d.depends_on_id) as blocked_by_count
FROM issues i
JOIN dependencies d ON i.id = d.issue_id
JOIN issues blocker ON d.depends_on_id = blocker.id
WHERE i.status IN ('open', 'in_progress', 'blocked', 'deferred', 'hooked')
AND d.type = 'blocks'
AND blocker.status IN ('open', 'in_progress', 'blocked', 'deferred', 'hooked')
GROUP BY i.id
ORDER BY i.priority ASC, i.created_at DESC
`)
if err != nil {
return nil, fmt.Errorf("failed to get blocked issues: %w", err)
}
defer rows.Close()
var results []*types.BlockedIssue
for rows.Next() {
var id string
var count int
if err := rows.Scan(&id, &count); err != nil {
return nil, err
}
issue, err := s.GetIssue(ctx, id)
if err != nil || issue == nil {
continue
}
// Get blocker IDs
var blockerIDs []string
blockerRows, err := s.db.QueryContext(ctx, `
SELECT d.depends_on_id
FROM dependencies d
JOIN issues blocker ON d.depends_on_id = blocker.id
WHERE d.issue_id = ?
AND d.type = 'blocks'
AND blocker.status IN ('open', 'in_progress', 'blocked', 'deferred', 'hooked')
`, id)
if err != nil {
return nil, err
}
for blockerRows.Next() {
var blockerID string
if err := blockerRows.Scan(&blockerID); err != nil {
blockerRows.Close()
return nil, err
}
blockerIDs = append(blockerIDs, blockerID)
}
blockerRows.Close()
results = append(results, &types.BlockedIssue{
Issue: *issue,
BlockedByCount: count,
BlockedBy: blockerIDs,
})
}
return results, rows.Err()
}
// GetEpicsEligibleForClosure returns epics whose children are all closed
func (s *DoltStore) GetEpicsEligibleForClosure(ctx context.Context) ([]*types.EpicStatus, error) {
rows, err := s.db.QueryContext(ctx, `
SELECT e.id,
(SELECT COUNT(*) FROM dependencies d JOIN issues c ON d.issue_id = c.id
WHERE d.depends_on_id = e.id AND d.type = 'parent-child') as total_children,
(SELECT COUNT(*) FROM dependencies d JOIN issues c ON d.issue_id = c.id
WHERE d.depends_on_id = e.id AND d.type = 'parent-child' AND c.status = 'closed') as closed_children
FROM issues e
WHERE e.issue_type = 'epic'
AND e.status != 'closed'
AND e.status != 'tombstone'
HAVING total_children > 0 AND total_children = closed_children
`)
if err != nil {
return nil, fmt.Errorf("failed to get epics eligible for closure: %w", err)
}
defer rows.Close()
var results []*types.EpicStatus
for rows.Next() {
var id string
var total, closed int
if err := rows.Scan(&id, &total, &closed); err != nil {
return nil, err
}
issue, err := s.GetIssue(ctx, id)
if err != nil || issue == nil {
continue
}
results = append(results, &types.EpicStatus{
Epic: issue,
TotalChildren: total,
ClosedChildren: closed,
EligibleForClose: total > 0 && total == closed,
})
}
return results, rows.Err()
}
// GetStaleIssues returns issues that haven't been updated recently
func (s *DoltStore) GetStaleIssues(ctx context.Context, filter types.StaleFilter) ([]*types.Issue, error) {
cutoff := time.Now().AddDate(0, 0, -filter.Days)
statusClause := "status IN ('open', 'in_progress')"
if filter.Status != "" {
statusClause = "status = ?"
}
query := fmt.Sprintf(`
SELECT id FROM issues
WHERE updated_at < ?
AND %s
AND (ephemeral = 0 OR ephemeral IS NULL)
ORDER BY updated_at ASC
`, statusClause)
args := []interface{}{cutoff}
if filter.Status != "" {
args = append(args, filter.Status)
}
if filter.Limit > 0 {
query += fmt.Sprintf(" LIMIT %d", filter.Limit)
}
rows, err := s.db.QueryContext(ctx, query, args...)
if err != nil {
return nil, fmt.Errorf("failed to get stale issues: %w", err)
}
defer rows.Close()
return s.scanIssueIDs(ctx, rows)
}
// GetStatistics returns summary statistics
func (s *DoltStore) GetStatistics(ctx context.Context) (*types.Statistics, error) {
stats := &types.Statistics{}
// Count by status
err := s.db.QueryRowContext(ctx, `
SELECT
COUNT(*) as total,
SUM(CASE WHEN status = 'open' THEN 1 ELSE 0 END) as open_count,
SUM(CASE WHEN status = 'in_progress' THEN 1 ELSE 0 END) as in_progress,
SUM(CASE WHEN status = 'closed' THEN 1 ELSE 0 END) as closed,
SUM(CASE WHEN status = 'blocked' THEN 1 ELSE 0 END) as blocked,
SUM(CASE WHEN status = 'deferred' THEN 1 ELSE 0 END) as deferred,
SUM(CASE WHEN status = 'tombstone' THEN 1 ELSE 0 END) as tombstone,
SUM(CASE WHEN pinned = 1 THEN 1 ELSE 0 END) as pinned
FROM issues
WHERE status != 'tombstone'
`).Scan(
&stats.TotalIssues,
&stats.OpenIssues,
&stats.InProgressIssues,
&stats.ClosedIssues,
&stats.BlockedIssues,
&stats.DeferredIssues,
&stats.TombstoneIssues,
&stats.PinnedIssues,
)
if err != nil {
return nil, fmt.Errorf("failed to get statistics: %w", err)
}
return stats, nil
}
// GetMoleculeProgress returns progress stats for a molecule
func (s *DoltStore) GetMoleculeProgress(ctx context.Context, moleculeID string) (*types.MoleculeProgressStats, error) {
stats := &types.MoleculeProgressStats{
MoleculeID: moleculeID,
}
// Get molecule title
var title sql.NullString
err := s.db.QueryRowContext(ctx, "SELECT title FROM issues WHERE id = ?", moleculeID).Scan(&title)
if err == nil && title.Valid {
stats.MoleculeTitle = title.String
}
err = s.db.QueryRowContext(ctx, `
SELECT
COUNT(*) as total,
SUM(CASE WHEN status = 'closed' THEN 1 ELSE 0 END) as completed,
SUM(CASE WHEN status = 'in_progress' THEN 1 ELSE 0 END) as in_progress
FROM issues i
JOIN dependencies d ON i.id = d.issue_id
WHERE d.depends_on_id = ?
AND d.type = 'parent-child'
`, moleculeID).Scan(&stats.Total, &stats.Completed, &stats.InProgress)
if err != nil {
return nil, fmt.Errorf("failed to get molecule progress: %w", err)
}
// Get first in_progress step ID
var stepID sql.NullString
_ = s.db.QueryRowContext(ctx, `
SELECT i.id FROM issues i
JOIN dependencies d ON i.id = d.issue_id
WHERE d.depends_on_id = ?
AND d.type = 'parent-child'
AND i.status = 'in_progress'
ORDER BY i.created_at ASC
LIMIT 1
`, moleculeID).Scan(&stepID)
if stepID.Valid {
stats.CurrentStepID = stepID.String
}
return stats, nil
}
// GetNextChildID returns the next available child ID for a parent
func (s *DoltStore) GetNextChildID(ctx context.Context, parentID string) (string, error) {
tx, err := s.db.BeginTx(ctx, nil)
if err != nil {
return "", err
}
defer tx.Rollback()
// Get or create counter
var lastChild int
err = tx.QueryRowContext(ctx, "SELECT last_child FROM child_counters WHERE parent_id = ?", parentID).Scan(&lastChild)
if err == sql.ErrNoRows {
lastChild = 0
} else if err != nil {
return "", err
}
nextChild := lastChild + 1
_, err = tx.ExecContext(ctx, `
INSERT INTO child_counters (parent_id, last_child) VALUES (?, ?)
ON DUPLICATE KEY UPDATE last_child = ?
`, parentID, nextChild, nextChild)
if err != nil {
return "", err
}
if err := tx.Commit(); err != nil {
return "", err
}
return fmt.Sprintf("%s.%d", parentID, nextChild), nil
}

View File

@@ -0,0 +1,129 @@
package dolt
import (
"context"
"fmt"
"time"
"github.com/steveyegge/beads/internal/types"
)
// UpdateIssueID updates an issue ID and all its references
func (s *DoltStore) UpdateIssueID(ctx context.Context, oldID, newID string, issue *types.Issue, actor string) error {
tx, err := s.db.BeginTx(ctx, nil)
if err != nil {
return fmt.Errorf("failed to begin transaction: %w", err)
}
defer func() { _ = tx.Rollback() }()
// Update the issue itself
result, err := tx.ExecContext(ctx, `
UPDATE issues
SET id = ?, title = ?, description = ?, design = ?, acceptance_criteria = ?, notes = ?, updated_at = ?
WHERE id = ?
`, newID, issue.Title, issue.Description, issue.Design, issue.AcceptanceCriteria, issue.Notes, time.Now(), oldID)
if err != nil {
return fmt.Errorf("failed to update issue ID: %w", err)
}
rows, err := result.RowsAffected()
if err != nil {
return fmt.Errorf("failed to get rows affected: %w", err)
}
if rows == 0 {
return fmt.Errorf("issue not found: %s", oldID)
}
// Update references in dependencies
_, err = tx.ExecContext(ctx, `UPDATE dependencies SET issue_id = ? WHERE issue_id = ?`, newID, oldID)
if err != nil {
return fmt.Errorf("failed to update issue_id in dependencies: %w", err)
}
_, err = tx.ExecContext(ctx, `UPDATE dependencies SET depends_on_id = ? WHERE depends_on_id = ?`, newID, oldID)
if err != nil {
return fmt.Errorf("failed to update depends_on_id in dependencies: %w", err)
}
// Update references in events
_, err = tx.ExecContext(ctx, `UPDATE events SET issue_id = ? WHERE issue_id = ?`, newID, oldID)
if err != nil {
return fmt.Errorf("failed to update events: %w", err)
}
// Update references in labels
_, err = tx.ExecContext(ctx, `UPDATE labels SET issue_id = ? WHERE issue_id = ?`, newID, oldID)
if err != nil {
return fmt.Errorf("failed to update labels: %w", err)
}
// Update references in comments
_, err = tx.ExecContext(ctx, `UPDATE comments SET issue_id = ? WHERE issue_id = ?`, newID, oldID)
if err != nil {
return fmt.Errorf("failed to update comments: %w", err)
}
// Update dirty_issues
_, err = tx.ExecContext(ctx, `
INSERT INTO dirty_issues (issue_id, marked_at)
VALUES (?, ?)
ON DUPLICATE KEY UPDATE marked_at = VALUES(marked_at)
`, newID, time.Now())
if err != nil {
return fmt.Errorf("failed to mark issue dirty: %w", err)
}
// Delete old dirty entry
_, err = tx.ExecContext(ctx, `DELETE FROM dirty_issues WHERE issue_id = ?`, oldID)
if err != nil {
return fmt.Errorf("failed to delete old dirty entry: %w", err)
}
// Record rename event
_, err = tx.ExecContext(ctx, `
INSERT INTO events (issue_id, event_type, actor, old_value, new_value)
VALUES (?, 'renamed', ?, ?, ?)
`, newID, actor, oldID, newID)
if err != nil {
return fmt.Errorf("failed to record rename event: %w", err)
}
return tx.Commit()
}
// RenameDependencyPrefix updates the prefix in all dependency records
func (s *DoltStore) RenameDependencyPrefix(ctx context.Context, oldPrefix, newPrefix string) error {
tx, err := s.db.BeginTx(ctx, nil)
if err != nil {
return fmt.Errorf("failed to begin transaction: %w", err)
}
defer func() { _ = tx.Rollback() }()
// Update issue_id column
_, err = tx.ExecContext(ctx, `
UPDATE dependencies
SET issue_id = CONCAT(?, SUBSTRING(issue_id, LENGTH(?) + 1))
WHERE issue_id LIKE CONCAT(?, '%')
`, newPrefix, oldPrefix, oldPrefix)
if err != nil {
return fmt.Errorf("failed to update issue_id in dependencies: %w", err)
}
// Update depends_on_id column
_, err = tx.ExecContext(ctx, `
UPDATE dependencies
SET depends_on_id = CONCAT(?, SUBSTRING(depends_on_id, LENGTH(?) + 1))
WHERE depends_on_id LIKE CONCAT(?, '%')
`, newPrefix, oldPrefix, oldPrefix)
if err != nil {
return fmt.Errorf("failed to update depends_on_id in dependencies: %w", err)
}
return tx.Commit()
}
// RenameCounterPrefix is a no-op with hash-based IDs
func (s *DoltStore) RenameCounterPrefix(ctx context.Context, oldPrefix, newPrefix string) error {
// Hash-based IDs don't use counters
return nil
}

View File

@@ -0,0 +1,267 @@
package dolt
// schema defines the MySQL-compatible database schema for Dolt.
// This mirrors the SQLite schema but uses MySQL syntax.
const schema = `
-- Issues table
CREATE TABLE IF NOT EXISTS issues (
id VARCHAR(255) PRIMARY KEY,
content_hash VARCHAR(64),
title VARCHAR(500) NOT NULL,
description TEXT NOT NULL,
design TEXT NOT NULL,
acceptance_criteria TEXT NOT NULL,
notes TEXT NOT NULL,
status VARCHAR(32) NOT NULL DEFAULT 'open',
priority INT NOT NULL DEFAULT 2,
issue_type VARCHAR(32) NOT NULL DEFAULT 'task',
assignee VARCHAR(255),
estimated_minutes INT,
created_at DATETIME NOT NULL DEFAULT CURRENT_TIMESTAMP,
created_by VARCHAR(255) DEFAULT '',
owner VARCHAR(255) DEFAULT '',
updated_at DATETIME NOT NULL DEFAULT CURRENT_TIMESTAMP ON UPDATE CURRENT_TIMESTAMP,
closed_at DATETIME,
closed_by_session VARCHAR(255) DEFAULT '',
external_ref VARCHAR(255),
compaction_level INT DEFAULT 0,
compacted_at DATETIME,
compacted_at_commit VARCHAR(64),
original_size INT,
deleted_at DATETIME,
deleted_by VARCHAR(255) DEFAULT '',
delete_reason TEXT DEFAULT '',
original_type VARCHAR(32) DEFAULT '',
-- Messaging fields
sender VARCHAR(255) DEFAULT '',
ephemeral TINYINT(1) DEFAULT 0,
-- Pinned field
pinned TINYINT(1) DEFAULT 0,
-- Template field
is_template TINYINT(1) DEFAULT 0,
-- Work economics field (HOP Decision 006)
crystallizes TINYINT(1) DEFAULT 0,
-- Molecule type field
mol_type VARCHAR(32) DEFAULT '',
-- Work type field (Decision 006: mutex vs open_competition)
work_type VARCHAR(32) DEFAULT 'mutex',
-- HOP quality score field (0.0-1.0)
quality_score DOUBLE,
-- Federation source system field
source_system VARCHAR(255) DEFAULT '',
-- Source repo for multi-repo
source_repo VARCHAR(512) DEFAULT '',
-- Close reason
close_reason TEXT DEFAULT '',
-- Event fields
event_kind VARCHAR(32) DEFAULT '',
actor VARCHAR(255) DEFAULT '',
target VARCHAR(255) DEFAULT '',
payload TEXT DEFAULT '',
-- Gate fields
await_type VARCHAR(32) DEFAULT '',
await_id VARCHAR(255) DEFAULT '',
timeout_ns BIGINT DEFAULT 0,
waiters TEXT DEFAULT '',
-- Agent fields
hook_bead VARCHAR(255) DEFAULT '',
role_bead VARCHAR(255) DEFAULT '',
agent_state VARCHAR(32) DEFAULT '',
last_activity DATETIME,
role_type VARCHAR(32) DEFAULT '',
rig VARCHAR(255) DEFAULT '',
-- Time-based scheduling fields
due_at DATETIME,
defer_until DATETIME,
INDEX idx_issues_status (status),
INDEX idx_issues_priority (priority),
INDEX idx_issues_assignee (assignee),
INDEX idx_issues_created_at (created_at),
INDEX idx_issues_external_ref (external_ref)
);
-- Dependencies table (edge schema)
CREATE TABLE IF NOT EXISTS dependencies (
issue_id VARCHAR(255) NOT NULL,
depends_on_id VARCHAR(255) NOT NULL,
type VARCHAR(32) NOT NULL DEFAULT 'blocks',
created_at DATETIME NOT NULL DEFAULT CURRENT_TIMESTAMP,
created_by VARCHAR(255) NOT NULL,
metadata JSON DEFAULT (JSON_OBJECT()),
thread_id VARCHAR(255) DEFAULT '',
PRIMARY KEY (issue_id, depends_on_id),
INDEX idx_dependencies_issue (issue_id),
INDEX idx_dependencies_depends_on (depends_on_id),
INDEX idx_dependencies_depends_on_type (depends_on_id, type),
INDEX idx_dependencies_thread (thread_id),
CONSTRAINT fk_dep_issue FOREIGN KEY (issue_id) REFERENCES issues(id) ON DELETE CASCADE,
CONSTRAINT fk_dep_depends_on FOREIGN KEY (depends_on_id) REFERENCES issues(id) ON DELETE CASCADE
);
-- Labels table
CREATE TABLE IF NOT EXISTS labels (
issue_id VARCHAR(255) NOT NULL,
label VARCHAR(255) NOT NULL,
PRIMARY KEY (issue_id, label),
INDEX idx_labels_label (label),
CONSTRAINT fk_labels_issue FOREIGN KEY (issue_id) REFERENCES issues(id) ON DELETE CASCADE
);
-- Comments table
CREATE TABLE IF NOT EXISTS comments (
id BIGINT AUTO_INCREMENT PRIMARY KEY,
issue_id VARCHAR(255) NOT NULL,
author VARCHAR(255) NOT NULL,
text TEXT NOT NULL,
created_at DATETIME NOT NULL DEFAULT CURRENT_TIMESTAMP,
INDEX idx_comments_issue (issue_id),
INDEX idx_comments_created_at (created_at),
CONSTRAINT fk_comments_issue FOREIGN KEY (issue_id) REFERENCES issues(id) ON DELETE CASCADE
);
-- Events table (audit trail)
CREATE TABLE IF NOT EXISTS events (
id BIGINT AUTO_INCREMENT PRIMARY KEY,
issue_id VARCHAR(255) NOT NULL,
event_type VARCHAR(32) NOT NULL,
actor VARCHAR(255) NOT NULL,
old_value TEXT,
new_value TEXT,
comment TEXT,
created_at DATETIME NOT NULL DEFAULT CURRENT_TIMESTAMP,
INDEX idx_events_issue (issue_id),
INDEX idx_events_created_at (created_at),
CONSTRAINT fk_events_issue FOREIGN KEY (issue_id) REFERENCES issues(id) ON DELETE CASCADE
);
-- Config table
CREATE TABLE IF NOT EXISTS config (
` + "`key`" + ` VARCHAR(255) PRIMARY KEY,
value TEXT NOT NULL
);
-- Metadata table
CREATE TABLE IF NOT EXISTS metadata (
` + "`key`" + ` VARCHAR(255) PRIMARY KEY,
value TEXT NOT NULL
);
-- Dirty issues table (for incremental export)
CREATE TABLE IF NOT EXISTS dirty_issues (
issue_id VARCHAR(255) PRIMARY KEY,
marked_at DATETIME NOT NULL DEFAULT CURRENT_TIMESTAMP,
INDEX idx_dirty_issues_marked_at (marked_at),
CONSTRAINT fk_dirty_issue FOREIGN KEY (issue_id) REFERENCES issues(id) ON DELETE CASCADE
);
-- Export hashes table
CREATE TABLE IF NOT EXISTS export_hashes (
issue_id VARCHAR(255) PRIMARY KEY,
content_hash VARCHAR(64) NOT NULL,
exported_at DATETIME NOT NULL DEFAULT CURRENT_TIMESTAMP,
CONSTRAINT fk_export_issue FOREIGN KEY (issue_id) REFERENCES issues(id) ON DELETE CASCADE
);
-- Child counters table
CREATE TABLE IF NOT EXISTS child_counters (
parent_id VARCHAR(255) PRIMARY KEY,
last_child INT NOT NULL DEFAULT 0,
CONSTRAINT fk_counter_parent FOREIGN KEY (parent_id) REFERENCES issues(id) ON DELETE CASCADE
);
-- Issue snapshots table (for compaction)
CREATE TABLE IF NOT EXISTS issue_snapshots (
id BIGINT AUTO_INCREMENT PRIMARY KEY,
issue_id VARCHAR(255) NOT NULL,
snapshot_time DATETIME NOT NULL,
compaction_level INT NOT NULL,
original_size INT NOT NULL,
compressed_size INT NOT NULL,
original_content TEXT NOT NULL,
archived_events TEXT,
INDEX idx_snapshots_issue (issue_id),
INDEX idx_snapshots_level (compaction_level),
CONSTRAINT fk_snapshots_issue FOREIGN KEY (issue_id) REFERENCES issues(id) ON DELETE CASCADE
);
-- Compaction snapshots table
CREATE TABLE IF NOT EXISTS compaction_snapshots (
id BIGINT AUTO_INCREMENT PRIMARY KEY,
issue_id VARCHAR(255) NOT NULL,
compaction_level INT NOT NULL,
snapshot_json BLOB NOT NULL,
created_at DATETIME NOT NULL DEFAULT CURRENT_TIMESTAMP,
INDEX idx_comp_snap_issue (issue_id, compaction_level, created_at DESC),
CONSTRAINT fk_comp_snap_issue FOREIGN KEY (issue_id) REFERENCES issues(id) ON DELETE CASCADE
);
-- Repository mtimes table (for multi-repo)
CREATE TABLE IF NOT EXISTS repo_mtimes (
repo_path VARCHAR(512) PRIMARY KEY,
jsonl_path VARCHAR(512) NOT NULL,
mtime_ns BIGINT NOT NULL,
last_checked DATETIME NOT NULL DEFAULT CURRENT_TIMESTAMP,
INDEX idx_repo_mtimes_checked (last_checked)
);
`
// defaultConfig contains the default configuration values
const defaultConfig = `
INSERT IGNORE INTO config (` + "`key`" + `, value) VALUES
('compaction_enabled', 'false'),
('compact_tier1_days', '30'),
('compact_tier1_dep_levels', '2'),
('compact_tier2_days', '90'),
('compact_tier2_dep_levels', '5'),
('compact_tier2_commits', '100'),
('compact_model', 'claude-3-5-haiku-20241022'),
('compact_batch_size', '50'),
('compact_parallel_workers', '5'),
('auto_compact_enabled', 'false');
`
// readyIssuesView is a MySQL-compatible view for ready work
// Note: Dolt supports recursive CTEs like SQLite
const readyIssuesView = `
CREATE OR REPLACE VIEW ready_issues AS
WITH RECURSIVE
blocked_directly AS (
SELECT DISTINCT d.issue_id
FROM dependencies d
JOIN issues blocker ON d.depends_on_id = blocker.id
WHERE d.type = 'blocks'
AND blocker.status IN ('open', 'in_progress', 'blocked', 'deferred', 'hooked')
),
blocked_transitively AS (
SELECT issue_id, 0 as depth
FROM blocked_directly
UNION ALL
SELECT d.issue_id, bt.depth + 1
FROM blocked_transitively bt
JOIN dependencies d ON d.depends_on_id = bt.issue_id
WHERE d.type = 'parent-child'
AND bt.depth < 50
)
SELECT i.*
FROM issues i
WHERE i.status = 'open'
AND (i.ephemeral = 0 OR i.ephemeral IS NULL)
AND NOT EXISTS (
SELECT 1 FROM blocked_transitively WHERE issue_id = i.id
);
`
// blockedIssuesView is a MySQL-compatible view for blocked issues
const blockedIssuesView = `
CREATE OR REPLACE VIEW blocked_issues AS
SELECT
i.*,
COUNT(d.depends_on_id) as blocked_by_count
FROM issues i
JOIN dependencies d ON i.id = d.issue_id
JOIN issues blocker ON d.depends_on_id = blocker.id
WHERE i.status IN ('open', 'in_progress', 'blocked', 'deferred', 'hooked')
AND d.type = 'blocks'
AND blocker.status IN ('open', 'in_progress', 'blocked', 'deferred', 'hooked')
GROUP BY i.id;
`

View File

@@ -0,0 +1,344 @@
// Package dolt implements the storage interface using Dolt (versioned MySQL-compatible database).
//
// Dolt provides native version control for SQL data with cell-level merge, history queries,
// and federation via Dolt remotes. This backend eliminates the need for JSONL sync layers
// by making the database itself version-controlled.
//
// Key differences from SQLite backend:
// - Uses github.com/dolthub/driver for embedded Dolt access
// - Supports version control operations (commit, push, pull, branch, merge)
// - History queries via AS OF and dolt_history_* tables
// - Cell-level merge instead of line-level JSONL merge
//
// Connection modes:
// - Embedded: No server required, database/sql interface via dolthub/driver
// - Server: Connect to running dolt sql-server for multi-writer scenarios
package dolt
import (
"context"
"database/sql"
"fmt"
"os"
"path/filepath"
"sync"
"sync/atomic"
"time"
// Import Dolt driver
_ "github.com/dolthub/driver"
)
// DoltStore implements the Storage interface using Dolt
type DoltStore struct {
db *sql.DB
dbPath string // Path to Dolt database directory
closed atomic.Bool // Tracks whether Close() has been called
connStr string // Connection string for reconnection
mu sync.RWMutex // Protects concurrent access
readOnly bool // True if opened in read-only mode
// Version control config
committerName string
committerEmail string
remote string // Default remote for push/pull
branch string // Current branch
}
// Config holds Dolt database configuration
type Config struct {
Path string // Path to Dolt database directory
CommitterName string // Git-style committer name
CommitterEmail string // Git-style committer email
Remote string // Default remote name (e.g., "origin")
Database string // Database name within Dolt (default: "beads")
}
// New creates a new Dolt storage backend
func New(ctx context.Context, cfg *Config) (*DoltStore, error) {
if cfg.Path == "" {
return nil, fmt.Errorf("database path is required")
}
// Default values
if cfg.Database == "" {
cfg.Database = "beads"
}
if cfg.CommitterName == "" {
cfg.CommitterName = os.Getenv("GIT_AUTHOR_NAME")
if cfg.CommitterName == "" {
cfg.CommitterName = "beads"
}
}
if cfg.CommitterEmail == "" {
cfg.CommitterEmail = os.Getenv("GIT_AUTHOR_EMAIL")
if cfg.CommitterEmail == "" {
cfg.CommitterEmail = "beads@local"
}
}
if cfg.Remote == "" {
cfg.Remote = "origin"
}
// Ensure directory exists
if err := os.MkdirAll(cfg.Path, 0o750); err != nil {
return nil, fmt.Errorf("failed to create database directory: %w", err)
}
// Build Dolt connection string
// Format: file:///path/to/db?commitname=Name&commitemail=email&database=dbname
connStr := fmt.Sprintf(
"file://%s?commitname=%s&commitemail=%s&database=%s",
cfg.Path, cfg.CommitterName, cfg.CommitterEmail, cfg.Database)
db, err := sql.Open("dolt", connStr)
if err != nil {
return nil, fmt.Errorf("failed to open Dolt database: %w", err)
}
// Configure connection pool
// Dolt embedded mode is single-writer like SQLite
db.SetMaxOpenConns(1)
db.SetMaxIdleConns(1)
db.SetConnMaxLifetime(0)
// Test connection
if err := db.PingContext(ctx); err != nil {
return nil, fmt.Errorf("failed to ping Dolt database: %w", err)
}
// Convert to absolute path
absPath, err := filepath.Abs(cfg.Path)
if err != nil {
return nil, fmt.Errorf("failed to get absolute path: %w", err)
}
store := &DoltStore{
db: db,
dbPath: absPath,
connStr: connStr,
committerName: cfg.CommitterName,
committerEmail: cfg.CommitterEmail,
remote: cfg.Remote,
branch: "main",
}
// Initialize schema
if err := store.initSchema(ctx); err != nil {
return nil, fmt.Errorf("failed to initialize schema: %w", err)
}
return store, nil
}
// initSchema creates all tables if they don't exist
func (s *DoltStore) initSchema(ctx context.Context) error {
// Execute schema creation
if _, err := s.db.ExecContext(ctx, schema); err != nil {
return fmt.Errorf("failed to create schema: %w", err)
}
// Insert default config values
if _, err := s.db.ExecContext(ctx, defaultConfig); err != nil {
return fmt.Errorf("failed to insert default config: %w", err)
}
// Create views
if _, err := s.db.ExecContext(ctx, readyIssuesView); err != nil {
return fmt.Errorf("failed to create ready_issues view: %w", err)
}
if _, err := s.db.ExecContext(ctx, blockedIssuesView); err != nil {
return fmt.Errorf("failed to create blocked_issues view: %w", err)
}
return nil
}
// Close closes the database connection
func (s *DoltStore) Close() error {
s.closed.Store(true)
s.mu.Lock()
defer s.mu.Unlock()
return s.db.Close()
}
// Path returns the database directory path
func (s *DoltStore) Path() string {
return s.dbPath
}
// IsClosed returns true if Close() has been called
func (s *DoltStore) IsClosed() bool {
return s.closed.Load()
}
// UnderlyingDB returns the underlying *sql.DB connection
func (s *DoltStore) UnderlyingDB() *sql.DB {
return s.db
}
// UnderlyingConn returns a connection from the pool
func (s *DoltStore) UnderlyingConn(ctx context.Context) (*sql.Conn, error) {
return s.db.Conn(ctx)
}
// =============================================================================
// Version Control Operations (Dolt-specific extensions)
// =============================================================================
// Commit creates a Dolt commit with the given message
func (s *DoltStore) Commit(ctx context.Context, message string) error {
_, err := s.db.ExecContext(ctx, "CALL DOLT_COMMIT('-Am', ?)", message)
if err != nil {
return fmt.Errorf("failed to commit: %w", err)
}
return nil
}
// Push pushes commits to the remote
func (s *DoltStore) Push(ctx context.Context) error {
_, err := s.db.ExecContext(ctx, "CALL DOLT_PUSH(?, ?)", s.remote, s.branch)
if err != nil {
return fmt.Errorf("failed to push to %s/%s: %w", s.remote, s.branch, err)
}
return nil
}
// Pull pulls changes from the remote
func (s *DoltStore) Pull(ctx context.Context) error {
_, err := s.db.ExecContext(ctx, "CALL DOLT_PULL(?)", s.remote)
if err != nil {
return fmt.Errorf("failed to pull from %s: %w", s.remote, err)
}
return nil
}
// Branch creates a new branch
func (s *DoltStore) Branch(ctx context.Context, name string) error {
_, err := s.db.ExecContext(ctx, "CALL DOLT_BRANCH(?)", name)
if err != nil {
return fmt.Errorf("failed to create branch %s: %w", name, err)
}
return nil
}
// Checkout switches to the specified branch
func (s *DoltStore) Checkout(ctx context.Context, branch string) error {
_, err := s.db.ExecContext(ctx, "CALL DOLT_CHECKOUT(?)", branch)
if err != nil {
return fmt.Errorf("failed to checkout branch %s: %w", branch, err)
}
s.branch = branch
return nil
}
// Merge merges the specified branch into the current branch
func (s *DoltStore) Merge(ctx context.Context, branch string) error {
_, err := s.db.ExecContext(ctx, "CALL DOLT_MERGE(?)", branch)
if err != nil {
return fmt.Errorf("failed to merge branch %s: %w", branch, err)
}
return nil
}
// CurrentBranch returns the current branch name
func (s *DoltStore) CurrentBranch(ctx context.Context) (string, error) {
var branch string
err := s.db.QueryRowContext(ctx, "SELECT active_branch()").Scan(&branch)
if err != nil {
return "", fmt.Errorf("failed to get current branch: %w", err)
}
return branch, nil
}
// Log returns recent commit history
func (s *DoltStore) Log(ctx context.Context, limit int) ([]CommitInfo, error) {
rows, err := s.db.QueryContext(ctx, `
SELECT commit_hash, committer, email, date, message
FROM dolt_log
LIMIT ?
`, limit)
if err != nil {
return nil, fmt.Errorf("failed to get log: %w", err)
}
defer rows.Close()
var commits []CommitInfo
for rows.Next() {
var c CommitInfo
if err := rows.Scan(&c.Hash, &c.Author, &c.Email, &c.Date, &c.Message); err != nil {
return nil, fmt.Errorf("failed to scan commit: %w", err)
}
commits = append(commits, c)
}
return commits, rows.Err()
}
// CommitInfo represents a Dolt commit
type CommitInfo struct {
Hash string
Author string
Email string
Date time.Time
Message string
}
// HistoryEntry represents a row from dolt_history_* table
type HistoryEntry struct {
CommitHash string
Committer string
CommitDate time.Time
// Issue data at that commit
IssueData map[string]interface{}
}
// AddRemote adds a Dolt remote
func (s *DoltStore) AddRemote(ctx context.Context, name, url string) error {
_, err := s.db.ExecContext(ctx, "CALL DOLT_REMOTE('add', ?, ?)", name, url)
if err != nil {
return fmt.Errorf("failed to add remote %s: %w", name, err)
}
return nil
}
// Status returns the current Dolt status (staged/unstaged changes)
func (s *DoltStore) Status(ctx context.Context) (*DoltStatus, error) {
rows, err := s.db.QueryContext(ctx, "SELECT table_name, staged, status FROM dolt_status")
if err != nil {
return nil, fmt.Errorf("failed to get status: %w", err)
}
defer rows.Close()
status := &DoltStatus{
Staged: make([]StatusEntry, 0),
Unstaged: make([]StatusEntry, 0),
}
for rows.Next() {
var tableName string
var staged bool
var statusInt int
if err := rows.Scan(&tableName, &staged, &statusInt); err != nil {
return nil, fmt.Errorf("failed to scan status: %w", err)
}
entry := StatusEntry{Table: tableName, Status: statusInt}
if staged {
status.Staged = append(status.Staged, entry)
} else {
status.Unstaged = append(status.Unstaged, entry)
}
}
return status, rows.Err()
}
// DoltStatus represents the current repository status
type DoltStatus struct {
Staged []StatusEntry
Unstaged []StatusEntry
}
// StatusEntry represents a changed table
type StatusEntry struct {
Table string
Status int // 1=new, 2=modified, 3=deleted
}

View File

@@ -0,0 +1,325 @@
package dolt
import (
"context"
"database/sql"
"fmt"
"strings"
"time"
"github.com/steveyegge/beads/internal/storage"
"github.com/steveyegge/beads/internal/types"
)
// doltTransaction implements storage.Transaction for Dolt
type doltTransaction struct {
tx *sql.Tx
store *DoltStore
}
// RunInTransaction executes a function within a database transaction
func (s *DoltStore) RunInTransaction(ctx context.Context, fn func(tx storage.Transaction) error) error {
sqlTx, err := s.db.BeginTx(ctx, nil)
if err != nil {
return fmt.Errorf("failed to begin transaction: %w", err)
}
tx := &doltTransaction{tx: sqlTx, store: s}
defer func() {
if r := recover(); r != nil {
_ = sqlTx.Rollback()
panic(r)
}
}()
if err := fn(tx); err != nil {
_ = sqlTx.Rollback()
return err
}
return sqlTx.Commit()
}
// CreateIssue creates an issue within the transaction
func (t *doltTransaction) CreateIssue(ctx context.Context, issue *types.Issue, actor string) error {
now := time.Now()
if issue.CreatedAt.IsZero() {
issue.CreatedAt = now
}
if issue.UpdatedAt.IsZero() {
issue.UpdatedAt = now
}
if issue.ContentHash == "" {
issue.ContentHash = issue.ComputeContentHash()
}
return insertIssueTx(ctx, t.tx, issue)
}
// CreateIssues creates multiple issues within the transaction
func (t *doltTransaction) CreateIssues(ctx context.Context, issues []*types.Issue, actor string) error {
for _, issue := range issues {
if err := t.CreateIssue(ctx, issue, actor); err != nil {
return err
}
}
return nil
}
// GetIssue retrieves an issue within the transaction
func (t *doltTransaction) GetIssue(ctx context.Context, id string) (*types.Issue, error) {
return scanIssueTx(ctx, t.tx, id)
}
// SearchIssues searches for issues within the transaction
func (t *doltTransaction) SearchIssues(ctx context.Context, query string, filter types.IssueFilter) ([]*types.Issue, error) {
// Simplified search for transaction context
whereClauses := []string{}
args := []interface{}{}
if query != "" {
whereClauses = append(whereClauses, "(title LIKE ? OR description LIKE ? OR id LIKE ?)")
pattern := "%" + query + "%"
args = append(args, pattern, pattern, pattern)
}
if filter.Status != nil {
whereClauses = append(whereClauses, "status = ?")
args = append(args, *filter.Status)
}
whereSQL := ""
if len(whereClauses) > 0 {
whereSQL = "WHERE " + strings.Join(whereClauses, " AND ")
}
rows, err := t.tx.QueryContext(ctx, fmt.Sprintf(`
SELECT id FROM issues %s ORDER BY priority ASC, created_at DESC
`, whereSQL), args...)
if err != nil {
return nil, err
}
defer rows.Close()
var issues []*types.Issue
for rows.Next() {
var id string
if err := rows.Scan(&id); err != nil {
return nil, err
}
issue, err := t.GetIssue(ctx, id)
if err != nil {
return nil, err
}
if issue != nil {
issues = append(issues, issue)
}
}
return issues, rows.Err()
}
// UpdateIssue updates an issue within the transaction
func (t *doltTransaction) UpdateIssue(ctx context.Context, id string, updates map[string]interface{}, actor string) error {
setClauses := []string{"updated_at = ?"}
args := []interface{}{time.Now()}
for key, value := range updates {
if !isAllowedUpdateField(key) {
return fmt.Errorf("invalid field for update: %s", key)
}
columnName := key
if key == "wisp" {
columnName = "ephemeral"
}
setClauses = append(setClauses, fmt.Sprintf("`%s` = ?", columnName))
args = append(args, value)
}
args = append(args, id)
query := fmt.Sprintf("UPDATE issues SET %s WHERE id = ?", strings.Join(setClauses, ", "))
_, err := t.tx.ExecContext(ctx, query, args...)
return err
}
// CloseIssue closes an issue within the transaction
func (t *doltTransaction) CloseIssue(ctx context.Context, id string, reason string, actor string, session string) error {
now := time.Now()
_, err := t.tx.ExecContext(ctx, `
UPDATE issues SET status = ?, closed_at = ?, updated_at = ?, close_reason = ?, closed_by_session = ?
WHERE id = ?
`, types.StatusClosed, now, now, reason, session, id)
return err
}
// DeleteIssue deletes an issue within the transaction
func (t *doltTransaction) DeleteIssue(ctx context.Context, id string) error {
_, err := t.tx.ExecContext(ctx, "DELETE FROM issues WHERE id = ?", id)
return err
}
// AddDependency adds a dependency within the transaction
func (t *doltTransaction) AddDependency(ctx context.Context, dep *types.Dependency, actor string) error {
_, err := t.tx.ExecContext(ctx, `
INSERT INTO dependencies (issue_id, depends_on_id, type, created_at, created_by, thread_id)
VALUES (?, ?, ?, NOW(), ?, ?)
ON DUPLICATE KEY UPDATE type = VALUES(type)
`, dep.IssueID, dep.DependsOnID, dep.Type, actor, dep.ThreadID)
return err
}
// RemoveDependency removes a dependency within the transaction
func (t *doltTransaction) RemoveDependency(ctx context.Context, issueID, dependsOnID string, actor string) error {
_, err := t.tx.ExecContext(ctx, `
DELETE FROM dependencies WHERE issue_id = ? AND depends_on_id = ?
`, issueID, dependsOnID)
return err
}
// AddLabel adds a label within the transaction
func (t *doltTransaction) AddLabel(ctx context.Context, issueID, label, actor string) error {
_, err := t.tx.ExecContext(ctx, `
INSERT IGNORE INTO labels (issue_id, label) VALUES (?, ?)
`, issueID, label)
return err
}
// RemoveLabel removes a label within the transaction
func (t *doltTransaction) RemoveLabel(ctx context.Context, issueID, label, actor string) error {
_, err := t.tx.ExecContext(ctx, `
DELETE FROM labels WHERE issue_id = ? AND label = ?
`, issueID, label)
return err
}
// SetConfig sets a config value within the transaction
func (t *doltTransaction) SetConfig(ctx context.Context, key, value string) error {
_, err := t.tx.ExecContext(ctx, `
INSERT INTO config (`+"`key`"+`, value) VALUES (?, ?)
ON DUPLICATE KEY UPDATE value = VALUES(value)
`, key, value)
return err
}
// GetConfig gets a config value within the transaction
func (t *doltTransaction) GetConfig(ctx context.Context, key string) (string, error) {
var value string
err := t.tx.QueryRowContext(ctx, "SELECT value FROM config WHERE `key` = ?", key).Scan(&value)
if err == sql.ErrNoRows {
return "", nil
}
return value, err
}
// SetMetadata sets a metadata value within the transaction
func (t *doltTransaction) SetMetadata(ctx context.Context, key, value string) error {
_, err := t.tx.ExecContext(ctx, `
INSERT INTO metadata (`+"`key`"+`, value) VALUES (?, ?)
ON DUPLICATE KEY UPDATE value = VALUES(value)
`, key, value)
return err
}
// GetMetadata gets a metadata value within the transaction
func (t *doltTransaction) GetMetadata(ctx context.Context, key string) (string, error) {
var value string
err := t.tx.QueryRowContext(ctx, "SELECT value FROM metadata WHERE `key` = ?", key).Scan(&value)
if err == sql.ErrNoRows {
return "", nil
}
return value, err
}
// AddComment adds a comment within the transaction
func (t *doltTransaction) AddComment(ctx context.Context, issueID, actor, comment string) error {
_, err := t.tx.ExecContext(ctx, `
INSERT INTO events (issue_id, event_type, actor, comment)
VALUES (?, ?, ?, ?)
`, issueID, types.EventCommented, actor, comment)
return err
}
// Helper functions for transaction context
func insertIssueTx(ctx context.Context, tx *sql.Tx, issue *types.Issue) error {
_, err := tx.ExecContext(ctx, `
INSERT INTO issues (
id, content_hash, title, description, design, acceptance_criteria, notes,
status, priority, issue_type, assignee, estimated_minutes,
created_at, created_by, owner, updated_at, closed_at,
sender, ephemeral, pinned, is_template, crystallizes
) VALUES (
?, ?, ?, ?, ?, ?, ?,
?, ?, ?, ?, ?,
?, ?, ?, ?, ?,
?, ?, ?, ?, ?
)
`,
issue.ID, issue.ContentHash, issue.Title, issue.Description, issue.Design, issue.AcceptanceCriteria, issue.Notes,
issue.Status, issue.Priority, issue.IssueType, nullString(issue.Assignee), nullInt(issue.EstimatedMinutes),
issue.CreatedAt, issue.CreatedBy, issue.Owner, issue.UpdatedAt, issue.ClosedAt,
issue.Sender, issue.Ephemeral, issue.Pinned, issue.IsTemplate, issue.Crystallizes,
)
return err
}
func scanIssueTx(ctx context.Context, tx *sql.Tx, id string) (*types.Issue, error) {
var issue types.Issue
var closedAt sql.NullTime
var estimatedMinutes sql.NullInt64
var assignee, owner, contentHash sql.NullString
var ephemeral, pinned, isTemplate, crystallizes sql.NullInt64
err := tx.QueryRowContext(ctx, `
SELECT id, content_hash, title, description, design, acceptance_criteria, notes,
status, priority, issue_type, assignee, estimated_minutes,
created_at, created_by, owner, updated_at, closed_at,
ephemeral, pinned, is_template, crystallizes
FROM issues
WHERE id = ?
`, id).Scan(
&issue.ID, &contentHash, &issue.Title, &issue.Description, &issue.Design,
&issue.AcceptanceCriteria, &issue.Notes, &issue.Status,
&issue.Priority, &issue.IssueType, &assignee, &estimatedMinutes,
&issue.CreatedAt, &issue.CreatedBy, &owner, &issue.UpdatedAt, &closedAt,
&ephemeral, &pinned, &isTemplate, &crystallizes,
)
if err == sql.ErrNoRows {
return nil, nil
}
if err != nil {
return nil, err
}
if contentHash.Valid {
issue.ContentHash = contentHash.String
}
if closedAt.Valid {
issue.ClosedAt = &closedAt.Time
}
if estimatedMinutes.Valid {
mins := int(estimatedMinutes.Int64)
issue.EstimatedMinutes = &mins
}
if assignee.Valid {
issue.Assignee = assignee.String
}
if owner.Valid {
issue.Owner = owner.String
}
if ephemeral.Valid && ephemeral.Int64 != 0 {
issue.Ephemeral = true
}
if pinned.Valid && pinned.Int64 != 0 {
issue.Pinned = true
}
if isTemplate.Valid && isTemplate.Int64 != 0 {
issue.IsTemplate = true
}
if crystallizes.Valid && crystallizes.Int64 != 0 {
issue.Crystallizes = true
}
return &issue, nil
}