Refactor sqlite.go: Extract hash IDs, batch ops, validators (bd-90a5, bd-c796, bd-d9e0)
- Extract hash ID generation to hash_ids.go (bd-90a5) - generateHashID, getNextChildNumber, GetNextChildID - Reduced sqlite.go from 1880 to 1799 lines - Extract batch operations to batch_ops.go (bd-c796) - validateBatchIssues, generateBatchIDs, bulkInsertIssues - bulkRecordEvents, bulkMarkDirty, CreateIssues - Reduced sqlite.go from 1799 to 1511 lines - Extract validation functions to validators.go (bd-d9e0) - validatePriority, validateStatus, validateIssueType - validateTitle, validateEstimatedMinutes, validateFieldUpdate - Reduced sqlite.go from 1511 to 1447 lines - Add comprehensive validator tests (bd-3b7f) - validators_test.go with full coverage Total reduction: 2298 → 1447 lines (851 lines extracted, 37% reduction) Part of epic bd-fc2d to modularize sqlite.go All tests pass Amp-Thread-ID: https://ampcode.com/threads/T-09c4383b-bc5c-455e-be24-02b4f9df7d78 Co-authored-by: Amp <amp@ampcode.com>
This commit is contained in:
File diff suppressed because one or more lines are too long
301
internal/storage/sqlite/batch_ops.go
Normal file
301
internal/storage/sqlite/batch_ops.go
Normal file
@@ -0,0 +1,301 @@
|
|||||||
|
package sqlite
|
||||||
|
|
||||||
|
import (
|
||||||
|
"context"
|
||||||
|
"database/sql"
|
||||||
|
"encoding/json"
|
||||||
|
"fmt"
|
||||||
|
"strings"
|
||||||
|
"time"
|
||||||
|
|
||||||
|
"github.com/steveyegge/beads/internal/types"
|
||||||
|
)
|
||||||
|
|
||||||
|
// validateBatchIssues validates all issues in a batch and sets timestamps if not provided
|
||||||
|
func validateBatchIssues(issues []*types.Issue) error {
|
||||||
|
now := time.Now()
|
||||||
|
for i, issue := range issues {
|
||||||
|
if issue == nil {
|
||||||
|
return fmt.Errorf("issue %d is nil", i)
|
||||||
|
}
|
||||||
|
|
||||||
|
// Only set timestamps if not already provided
|
||||||
|
if issue.CreatedAt.IsZero() {
|
||||||
|
issue.CreatedAt = now
|
||||||
|
}
|
||||||
|
if issue.UpdatedAt.IsZero() {
|
||||||
|
issue.UpdatedAt = now
|
||||||
|
}
|
||||||
|
|
||||||
|
if err := issue.Validate(); err != nil {
|
||||||
|
return fmt.Errorf("validation failed for issue %d: %w", i, err)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
|
||||||
|
// generateBatchIDs generates IDs for all issues that need them atomically
|
||||||
|
func generateBatchIDs(ctx context.Context, conn *sql.Conn, issues []*types.Issue, actor string) error {
|
||||||
|
// Get prefix from config (needed for both generation and validation)
|
||||||
|
var prefix string
|
||||||
|
err := conn.QueryRowContext(ctx, `SELECT value FROM config WHERE key = ?`, "issue_prefix").Scan(&prefix)
|
||||||
|
if err == sql.ErrNoRows || prefix == "" {
|
||||||
|
// CRITICAL: Reject operation if issue_prefix config is missing (bd-166)
|
||||||
|
return fmt.Errorf("database not initialized: issue_prefix config is missing (run 'bd init --prefix <prefix>' first)")
|
||||||
|
} else if err != nil {
|
||||||
|
return fmt.Errorf("failed to get config: %w", err)
|
||||||
|
}
|
||||||
|
|
||||||
|
// Validate explicitly provided IDs and generate IDs for those that need them
|
||||||
|
expectedPrefix := prefix + "-"
|
||||||
|
usedIDs := make(map[string]bool)
|
||||||
|
|
||||||
|
// First pass: record explicitly provided IDs
|
||||||
|
for i := range issues {
|
||||||
|
if issues[i].ID != "" {
|
||||||
|
// Validate that explicitly provided ID matches the configured prefix (bd-177)
|
||||||
|
if !strings.HasPrefix(issues[i].ID, expectedPrefix) {
|
||||||
|
return fmt.Errorf("issue ID '%s' does not match configured prefix '%s'", issues[i].ID, prefix)
|
||||||
|
}
|
||||||
|
usedIDs[issues[i].ID] = true
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// Second pass: generate IDs for issues that need them
|
||||||
|
// Hash mode: generate with adaptive length based on database size (bd-ea2a13)
|
||||||
|
// Get adaptive base length based on current database size
|
||||||
|
baseLength, err := GetAdaptiveIDLength(ctx, conn, prefix)
|
||||||
|
if err != nil {
|
||||||
|
// Fallback to 6 on error
|
||||||
|
baseLength = 6
|
||||||
|
}
|
||||||
|
|
||||||
|
// Try baseLength, baseLength+1, baseLength+2, up to max of 8
|
||||||
|
maxLength := 8
|
||||||
|
if baseLength > maxLength {
|
||||||
|
baseLength = maxLength
|
||||||
|
}
|
||||||
|
|
||||||
|
for i := range issues {
|
||||||
|
if issues[i].ID == "" {
|
||||||
|
var generated bool
|
||||||
|
// Try lengths from baseLength to maxLength with progressive fallback
|
||||||
|
for length := baseLength; length <= maxLength && !generated; length++ {
|
||||||
|
for nonce := 0; nonce < 10; nonce++ {
|
||||||
|
candidate := generateHashID(prefix, issues[i].Title, issues[i].Description, actor, issues[i].CreatedAt, length, nonce)
|
||||||
|
|
||||||
|
// Check if this ID is already used in this batch or in the database
|
||||||
|
if usedIDs[candidate] {
|
||||||
|
continue
|
||||||
|
}
|
||||||
|
|
||||||
|
var count int
|
||||||
|
err := conn.QueryRowContext(ctx, `SELECT COUNT(*) FROM issues WHERE id = ?`, candidate).Scan(&count)
|
||||||
|
if err != nil {
|
||||||
|
return fmt.Errorf("failed to check for ID collision: %w", err)
|
||||||
|
}
|
||||||
|
|
||||||
|
if count == 0 {
|
||||||
|
issues[i].ID = candidate
|
||||||
|
usedIDs[candidate] = true
|
||||||
|
generated = true
|
||||||
|
break
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
if !generated {
|
||||||
|
return fmt.Errorf("failed to generate unique ID for issue %d after trying lengths 6-8 with 10 nonces each", i)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// Compute content hashes
|
||||||
|
for i := range issues {
|
||||||
|
if issues[i].ContentHash == "" {
|
||||||
|
issues[i].ContentHash = issues[i].ComputeContentHash()
|
||||||
|
}
|
||||||
|
}
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
|
||||||
|
// bulkInsertIssues inserts all issues using a prepared statement
|
||||||
|
func bulkInsertIssues(ctx context.Context, conn *sql.Conn, issues []*types.Issue) error {
|
||||||
|
stmt, err := conn.PrepareContext(ctx, `
|
||||||
|
INSERT INTO issues (
|
||||||
|
id, content_hash, title, description, design, acceptance_criteria, notes,
|
||||||
|
status, priority, issue_type, assignee, estimated_minutes,
|
||||||
|
created_at, updated_at, closed_at, external_ref
|
||||||
|
) VALUES (?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?)
|
||||||
|
`)
|
||||||
|
if err != nil {
|
||||||
|
return fmt.Errorf("failed to prepare statement: %w", err)
|
||||||
|
}
|
||||||
|
defer func() { _ = stmt.Close() }()
|
||||||
|
|
||||||
|
for _, issue := range issues {
|
||||||
|
_, err = stmt.ExecContext(ctx,
|
||||||
|
issue.ID, issue.ContentHash, issue.Title, issue.Description, issue.Design,
|
||||||
|
issue.AcceptanceCriteria, issue.Notes, issue.Status,
|
||||||
|
issue.Priority, issue.IssueType, issue.Assignee,
|
||||||
|
issue.EstimatedMinutes, issue.CreatedAt, issue.UpdatedAt,
|
||||||
|
issue.ClosedAt, issue.ExternalRef,
|
||||||
|
)
|
||||||
|
if err != nil {
|
||||||
|
return fmt.Errorf("failed to insert issue %s: %w", issue.ID, err)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
|
||||||
|
// bulkRecordEvents records creation events for all issues
|
||||||
|
func bulkRecordEvents(ctx context.Context, conn *sql.Conn, issues []*types.Issue, actor string) error {
|
||||||
|
stmt, err := conn.PrepareContext(ctx, `
|
||||||
|
INSERT INTO events (issue_id, event_type, actor, new_value)
|
||||||
|
VALUES (?, ?, ?, ?)
|
||||||
|
`)
|
||||||
|
if err != nil {
|
||||||
|
return fmt.Errorf("failed to prepare event statement: %w", err)
|
||||||
|
}
|
||||||
|
defer func() { _ = stmt.Close() }()
|
||||||
|
|
||||||
|
for _, issue := range issues {
|
||||||
|
eventData, err := json.Marshal(issue)
|
||||||
|
if err != nil {
|
||||||
|
// Fall back to minimal description if marshaling fails
|
||||||
|
eventData = []byte(fmt.Sprintf(`{"id":"%s","title":"%s"}`, issue.ID, issue.Title))
|
||||||
|
}
|
||||||
|
|
||||||
|
_, err = stmt.ExecContext(ctx, issue.ID, types.EventCreated, actor, string(eventData))
|
||||||
|
if err != nil {
|
||||||
|
return fmt.Errorf("failed to record event for %s: %w", issue.ID, err)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
|
||||||
|
// bulkMarkDirty marks all issues as dirty for incremental export
|
||||||
|
func bulkMarkDirty(ctx context.Context, conn *sql.Conn, issues []*types.Issue) error {
|
||||||
|
stmt, err := conn.PrepareContext(ctx, `
|
||||||
|
INSERT INTO dirty_issues (issue_id, marked_at)
|
||||||
|
VALUES (?, ?)
|
||||||
|
ON CONFLICT (issue_id) DO UPDATE SET marked_at = excluded.marked_at
|
||||||
|
`)
|
||||||
|
if err != nil {
|
||||||
|
return fmt.Errorf("failed to prepare dirty statement: %w", err)
|
||||||
|
}
|
||||||
|
defer func() { _ = stmt.Close() }()
|
||||||
|
|
||||||
|
dirtyTime := time.Now()
|
||||||
|
for _, issue := range issues {
|
||||||
|
_, err = stmt.ExecContext(ctx, issue.ID, dirtyTime)
|
||||||
|
if err != nil {
|
||||||
|
return fmt.Errorf("failed to mark dirty %s: %w", issue.ID, err)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
|
||||||
|
// CreateIssues creates multiple issues atomically in a single transaction.
|
||||||
|
// This provides significant performance improvements over calling CreateIssue in a loop:
|
||||||
|
// - Single connection acquisition
|
||||||
|
// - Single transaction
|
||||||
|
// - Atomic ID range reservation (one counter update for N issues)
|
||||||
|
// - All-or-nothing atomicity
|
||||||
|
//
|
||||||
|
// Expected 5-10x speedup for batches of 10+ issues.
|
||||||
|
// CreateIssues creates multiple issues atomically in a single transaction.
|
||||||
|
//
|
||||||
|
// This method is optimized for bulk issue creation and provides significant
|
||||||
|
// performance improvements over calling CreateIssue in a loop:
|
||||||
|
// - Single database connection and transaction
|
||||||
|
// - Atomic ID range reservation (one counter update for N IDs)
|
||||||
|
// - All-or-nothing semantics (rolls back on any error)
|
||||||
|
// - 5-15x faster than sequential CreateIssue calls
|
||||||
|
//
|
||||||
|
// All issues are validated before any database changes occur. If any issue
|
||||||
|
// fails validation, the entire batch is rejected.
|
||||||
|
//
|
||||||
|
// ID Assignment:
|
||||||
|
// - Issues with empty ID get auto-generated IDs from a reserved range
|
||||||
|
// - Issues with explicit IDs use those IDs (caller must ensure uniqueness)
|
||||||
|
// - Mix of explicit and auto-generated IDs is supported
|
||||||
|
//
|
||||||
|
// Timestamps:
|
||||||
|
// - All issues in the batch receive identical created_at/updated_at timestamps
|
||||||
|
// - This reflects that they were created as a single atomic operation
|
||||||
|
//
|
||||||
|
// Usage:
|
||||||
|
// // Bulk import from external source
|
||||||
|
// issues := []*types.Issue{...}
|
||||||
|
// if err := store.CreateIssues(ctx, issues, "import"); err != nil {
|
||||||
|
// return err
|
||||||
|
// }
|
||||||
|
//
|
||||||
|
// // After importing with explicit IDs, sync counters to prevent collisions
|
||||||
|
// REMOVED (bd-c7af): SyncAllCounters example - no longer needed with hash IDs
|
||||||
|
//
|
||||||
|
// Performance:
|
||||||
|
// - 100 issues: ~30ms (vs ~900ms with CreateIssue loop)
|
||||||
|
// - 1000 issues: ~950ms (vs estimated 9s with CreateIssue loop)
|
||||||
|
//
|
||||||
|
// When to use:
|
||||||
|
// - Bulk imports from external systems (use CreateIssues)
|
||||||
|
// - Creating multiple related issues at once (use CreateIssues)
|
||||||
|
// - Single issue creation (use CreateIssue for simplicity)
|
||||||
|
// - Interactive user operations (use CreateIssue)
|
||||||
|
func (s *SQLiteStorage) CreateIssues(ctx context.Context, issues []*types.Issue, actor string) error {
|
||||||
|
if len(issues) == 0 {
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
|
||||||
|
// Phase 1: Validate all issues first (fail-fast)
|
||||||
|
if err := validateBatchIssues(issues); err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
|
||||||
|
// Phase 2: Acquire connection and start transaction
|
||||||
|
conn, err := s.db.Conn(ctx)
|
||||||
|
if err != nil {
|
||||||
|
return fmt.Errorf("failed to acquire connection: %w", err)
|
||||||
|
}
|
||||||
|
defer func() { _ = conn.Close() }()
|
||||||
|
|
||||||
|
if _, err := conn.ExecContext(ctx, "BEGIN IMMEDIATE"); err != nil {
|
||||||
|
return fmt.Errorf("failed to begin immediate transaction: %w", err)
|
||||||
|
}
|
||||||
|
|
||||||
|
committed := false
|
||||||
|
defer func() {
|
||||||
|
if !committed {
|
||||||
|
_, _ = conn.ExecContext(context.Background(), "ROLLBACK")
|
||||||
|
}
|
||||||
|
}()
|
||||||
|
|
||||||
|
// Phase 3: Generate IDs for issues that need them
|
||||||
|
if err := generateBatchIDs(ctx, conn, issues, actor); err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
|
||||||
|
// Phase 4: Bulk insert issues
|
||||||
|
if err := bulkInsertIssues(ctx, conn, issues); err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
|
||||||
|
// Phase 5: Record creation events
|
||||||
|
if err := bulkRecordEvents(ctx, conn, issues, actor); err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
|
||||||
|
// Phase 6: Mark issues dirty for incremental export
|
||||||
|
if err := bulkMarkDirty(ctx, conn, issues); err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
|
||||||
|
// Phase 7: Commit transaction
|
||||||
|
if _, err := conn.ExecContext(ctx, "COMMIT"); err != nil {
|
||||||
|
return fmt.Errorf("failed to commit transaction: %w", err)
|
||||||
|
}
|
||||||
|
committed = true
|
||||||
|
return nil
|
||||||
|
}
|
||||||
93
internal/storage/sqlite/hash_ids.go
Normal file
93
internal/storage/sqlite/hash_ids.go
Normal file
@@ -0,0 +1,93 @@
|
|||||||
|
package sqlite
|
||||||
|
|
||||||
|
import (
|
||||||
|
"context"
|
||||||
|
"crypto/sha256"
|
||||||
|
"encoding/hex"
|
||||||
|
"fmt"
|
||||||
|
"strings"
|
||||||
|
"time"
|
||||||
|
)
|
||||||
|
|
||||||
|
// getNextChildNumber atomically increments and returns the next child counter for a parent issue.
|
||||||
|
// Uses INSERT...ON CONFLICT to ensure atomicity without explicit locking.
|
||||||
|
func (s *SQLiteStorage) getNextChildNumber(ctx context.Context, parentID string) (int, error) {
|
||||||
|
var nextChild int
|
||||||
|
err := s.db.QueryRowContext(ctx, `
|
||||||
|
INSERT INTO child_counters (parent_id, last_child)
|
||||||
|
VALUES (?, 1)
|
||||||
|
ON CONFLICT(parent_id) DO UPDATE SET
|
||||||
|
last_child = last_child + 1
|
||||||
|
RETURNING last_child
|
||||||
|
`, parentID).Scan(&nextChild)
|
||||||
|
if err != nil {
|
||||||
|
return 0, fmt.Errorf("failed to generate next child number for parent %s: %w", parentID, err)
|
||||||
|
}
|
||||||
|
return nextChild, nil
|
||||||
|
}
|
||||||
|
|
||||||
|
// GetNextChildID generates the next hierarchical child ID for a given parent
|
||||||
|
// Returns formatted ID as parentID.{counter} (e.g., bd-a3f8e9.1 or bd-a3f8e9.1.5)
|
||||||
|
// Works at any depth (max 3 levels)
|
||||||
|
func (s *SQLiteStorage) GetNextChildID(ctx context.Context, parentID string) (string, error) {
|
||||||
|
// Validate parent exists
|
||||||
|
var count int
|
||||||
|
err := s.db.QueryRowContext(ctx, `SELECT COUNT(*) FROM issues WHERE id = ?`, parentID).Scan(&count)
|
||||||
|
if err != nil {
|
||||||
|
return "", fmt.Errorf("failed to check parent existence: %w", err)
|
||||||
|
}
|
||||||
|
if count == 0 {
|
||||||
|
return "", fmt.Errorf("parent issue %s does not exist", parentID)
|
||||||
|
}
|
||||||
|
|
||||||
|
// Calculate current depth by counting dots
|
||||||
|
depth := strings.Count(parentID, ".")
|
||||||
|
if depth >= 3 {
|
||||||
|
return "", fmt.Errorf("maximum hierarchy depth (3) exceeded for parent %s", parentID)
|
||||||
|
}
|
||||||
|
|
||||||
|
// Get next child number atomically
|
||||||
|
nextNum, err := s.getNextChildNumber(ctx, parentID)
|
||||||
|
if err != nil {
|
||||||
|
return "", err
|
||||||
|
}
|
||||||
|
|
||||||
|
// Format as parentID.counter
|
||||||
|
childID := fmt.Sprintf("%s.%d", parentID, nextNum)
|
||||||
|
return childID, nil
|
||||||
|
}
|
||||||
|
|
||||||
|
// generateHashID creates a hash-based ID for a top-level issue.
|
||||||
|
// For child issues, use the parent ID with a numeric suffix (e.g., "bd-a3f8e9.1").
|
||||||
|
// Supports adaptive length from 4-8 chars based on database size (bd-ea2a13).
|
||||||
|
// Includes a nonce parameter to handle same-length collisions.
|
||||||
|
func generateHashID(prefix, title, description, creator string, timestamp time.Time, length, nonce int) string {
|
||||||
|
// Combine inputs into a stable content string
|
||||||
|
// Include nonce to handle hash collisions
|
||||||
|
content := fmt.Sprintf("%s|%s|%s|%d|%d", title, description, creator, timestamp.UnixNano(), nonce)
|
||||||
|
|
||||||
|
// Hash the content
|
||||||
|
hash := sha256.Sum256([]byte(content))
|
||||||
|
|
||||||
|
// Use variable length (4-8 hex chars)
|
||||||
|
// length determines how many bytes to use (2, 2.5, 3, 3.5, or 4)
|
||||||
|
var shortHash string
|
||||||
|
switch length {
|
||||||
|
case 4:
|
||||||
|
shortHash = hex.EncodeToString(hash[:2])
|
||||||
|
case 5:
|
||||||
|
// 2.5 bytes: use 3 bytes but take only first 5 chars
|
||||||
|
shortHash = hex.EncodeToString(hash[:3])[:5]
|
||||||
|
case 6:
|
||||||
|
shortHash = hex.EncodeToString(hash[:3])
|
||||||
|
case 7:
|
||||||
|
// 3.5 bytes: use 4 bytes but take only first 7 chars
|
||||||
|
shortHash = hex.EncodeToString(hash[:4])[:7]
|
||||||
|
case 8:
|
||||||
|
shortHash = hex.EncodeToString(hash[:4])
|
||||||
|
default:
|
||||||
|
shortHash = hex.EncodeToString(hash[:3]) // default to 6
|
||||||
|
}
|
||||||
|
|
||||||
|
return fmt.Sprintf("%s-%s", prefix, shortHash)
|
||||||
|
}
|
||||||
428
internal/storage/sqlite/migrations.go
Normal file
428
internal/storage/sqlite/migrations.go
Normal file
@@ -0,0 +1,428 @@
|
|||||||
|
// Package sqlite - database migrations
|
||||||
|
package sqlite
|
||||||
|
|
||||||
|
import (
|
||||||
|
"database/sql"
|
||||||
|
"fmt"
|
||||||
|
|
||||||
|
"github.com/steveyegge/beads/internal/types"
|
||||||
|
)
|
||||||
|
|
||||||
|
func migrateDirtyIssuesTable(db *sql.DB) error {
|
||||||
|
// Check if dirty_issues table exists
|
||||||
|
var tableName string
|
||||||
|
err := db.QueryRow(`
|
||||||
|
SELECT name FROM sqlite_master
|
||||||
|
WHERE type='table' AND name='dirty_issues'
|
||||||
|
`).Scan(&tableName)
|
||||||
|
|
||||||
|
if err == sql.ErrNoRows {
|
||||||
|
// Table doesn't exist, create it
|
||||||
|
_, err := db.Exec(`
|
||||||
|
CREATE TABLE dirty_issues (
|
||||||
|
issue_id TEXT PRIMARY KEY,
|
||||||
|
marked_at DATETIME NOT NULL DEFAULT CURRENT_TIMESTAMP,
|
||||||
|
FOREIGN KEY (issue_id) REFERENCES issues(id) ON DELETE CASCADE
|
||||||
|
);
|
||||||
|
CREATE INDEX idx_dirty_issues_marked_at ON dirty_issues(marked_at);
|
||||||
|
`)
|
||||||
|
if err != nil {
|
||||||
|
return fmt.Errorf("failed to create dirty_issues table: %w", err)
|
||||||
|
}
|
||||||
|
// Table created successfully - no need to log, happens silently
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
|
||||||
|
if err != nil {
|
||||||
|
return fmt.Errorf("failed to check for dirty_issues table: %w", err)
|
||||||
|
}
|
||||||
|
|
||||||
|
// Table exists, check if content_hash column exists (migration for bd-164)
|
||||||
|
var hasContentHash bool
|
||||||
|
err = db.QueryRow(`
|
||||||
|
SELECT COUNT(*) > 0 FROM pragma_table_info('dirty_issues')
|
||||||
|
WHERE name = 'content_hash'
|
||||||
|
`).Scan(&hasContentHash)
|
||||||
|
|
||||||
|
if err != nil {
|
||||||
|
return fmt.Errorf("failed to check for content_hash column: %w", err)
|
||||||
|
}
|
||||||
|
|
||||||
|
if !hasContentHash {
|
||||||
|
// Add content_hash column to existing table
|
||||||
|
_, err = db.Exec(`ALTER TABLE dirty_issues ADD COLUMN content_hash TEXT`)
|
||||||
|
if err != nil {
|
||||||
|
return fmt.Errorf("failed to add content_hash column: %w", err)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
|
||||||
|
// migrateExternalRefColumn checks if the external_ref column exists and adds it if missing.
|
||||||
|
// This ensures existing databases created before the external reference feature get migrated automatically.
|
||||||
|
func migrateExternalRefColumn(db *sql.DB) error {
|
||||||
|
// Check if external_ref column exists
|
||||||
|
var columnExists bool
|
||||||
|
rows, err := db.Query("PRAGMA table_info(issues)")
|
||||||
|
if err != nil {
|
||||||
|
return fmt.Errorf("failed to check schema: %w", err)
|
||||||
|
}
|
||||||
|
defer func() { _ = rows.Close() }()
|
||||||
|
|
||||||
|
for rows.Next() {
|
||||||
|
var cid int
|
||||||
|
var name, typ string
|
||||||
|
var notnull, pk int
|
||||||
|
var dflt *string
|
||||||
|
err := rows.Scan(&cid, &name, &typ, ¬null, &dflt, &pk)
|
||||||
|
if err != nil {
|
||||||
|
return fmt.Errorf("failed to scan column info: %w", err)
|
||||||
|
}
|
||||||
|
if name == "external_ref" {
|
||||||
|
columnExists = true
|
||||||
|
break
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
if err := rows.Err(); err != nil {
|
||||||
|
return fmt.Errorf("error reading column info: %w", err)
|
||||||
|
}
|
||||||
|
|
||||||
|
if !columnExists {
|
||||||
|
// Add external_ref column
|
||||||
|
_, err := db.Exec(`ALTER TABLE issues ADD COLUMN external_ref TEXT`)
|
||||||
|
if err != nil {
|
||||||
|
return fmt.Errorf("failed to add external_ref column: %w", err)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
|
||||||
|
// migrateCompositeIndexes checks if composite indexes exist and creates them if missing.
|
||||||
|
// This ensures existing databases get performance optimizations from new indexes.
|
||||||
|
func migrateCompositeIndexes(db *sql.DB) error {
|
||||||
|
// Check if idx_dependencies_depends_on_type exists
|
||||||
|
var indexName string
|
||||||
|
err := db.QueryRow(`
|
||||||
|
SELECT name FROM sqlite_master
|
||||||
|
WHERE type='index' AND name='idx_dependencies_depends_on_type'
|
||||||
|
`).Scan(&indexName)
|
||||||
|
|
||||||
|
if err == sql.ErrNoRows {
|
||||||
|
// Index doesn't exist, create it
|
||||||
|
_, err := db.Exec(`
|
||||||
|
CREATE INDEX idx_dependencies_depends_on_type ON dependencies(depends_on_id, type)
|
||||||
|
`)
|
||||||
|
if err != nil {
|
||||||
|
return fmt.Errorf("failed to create composite index idx_dependencies_depends_on_type: %w", err)
|
||||||
|
}
|
||||||
|
// Index created successfully
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
|
||||||
|
if err != nil {
|
||||||
|
return fmt.Errorf("failed to check for composite index: %w", err)
|
||||||
|
}
|
||||||
|
|
||||||
|
// Index exists, no migration needed
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
|
||||||
|
// migrateClosedAtConstraint cleans up inconsistent status/closed_at data.
|
||||||
|
// The CHECK constraint is in the schema for new databases, but we can't easily
|
||||||
|
// add it to existing tables without recreating them. Instead, we clean the data
|
||||||
|
// and rely on application code (UpdateIssue, import.go) to maintain the invariant.
|
||||||
|
func migrateClosedAtConstraint(db *sql.DB) error {
|
||||||
|
// Check if there are any inconsistent rows
|
||||||
|
var count int
|
||||||
|
err := db.QueryRow(`
|
||||||
|
SELECT COUNT(*)
|
||||||
|
FROM issues
|
||||||
|
WHERE (CASE WHEN status = 'closed' THEN 1 ELSE 0 END) <>
|
||||||
|
(CASE WHEN closed_at IS NOT NULL THEN 1 ELSE 0 END)
|
||||||
|
`).Scan(&count)
|
||||||
|
if err != nil {
|
||||||
|
return fmt.Errorf("failed to count inconsistent issues: %w", err)
|
||||||
|
}
|
||||||
|
|
||||||
|
if count == 0 {
|
||||||
|
// No inconsistent data, nothing to do
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
|
||||||
|
// Clean inconsistent data: trust the status field
|
||||||
|
// Strategy: If status != 'closed' but closed_at is set, clear closed_at
|
||||||
|
// If status = 'closed' but closed_at is not set, set it to updated_at (best guess)
|
||||||
|
_, err = db.Exec(`
|
||||||
|
UPDATE issues
|
||||||
|
SET closed_at = NULL
|
||||||
|
WHERE status != 'closed' AND closed_at IS NOT NULL
|
||||||
|
`)
|
||||||
|
if err != nil {
|
||||||
|
return fmt.Errorf("failed to clear closed_at for non-closed issues: %w", err)
|
||||||
|
}
|
||||||
|
|
||||||
|
_, err = db.Exec(`
|
||||||
|
UPDATE issues
|
||||||
|
SET closed_at = COALESCE(updated_at, CURRENT_TIMESTAMP)
|
||||||
|
WHERE status = 'closed' AND closed_at IS NULL
|
||||||
|
`)
|
||||||
|
if err != nil {
|
||||||
|
return fmt.Errorf("failed to set closed_at for closed issues: %w", err)
|
||||||
|
}
|
||||||
|
|
||||||
|
// Migration complete - data is now consistent
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
|
||||||
|
// migrateCompactionColumns adds compaction_level, compacted_at, and original_size columns to the issues table.
|
||||||
|
// This migration is idempotent and safe to run multiple times.
|
||||||
|
func migrateCompactionColumns(db *sql.DB) error {
|
||||||
|
// Check if compaction_level column exists
|
||||||
|
var columnExists bool
|
||||||
|
err := db.QueryRow(`
|
||||||
|
SELECT COUNT(*) > 0
|
||||||
|
FROM pragma_table_info('issues')
|
||||||
|
WHERE name = 'compaction_level'
|
||||||
|
`).Scan(&columnExists)
|
||||||
|
if err != nil {
|
||||||
|
return fmt.Errorf("failed to check compaction_level column: %w", err)
|
||||||
|
}
|
||||||
|
|
||||||
|
if columnExists {
|
||||||
|
// Columns already exist, nothing to do
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
|
||||||
|
// Add the three compaction columns
|
||||||
|
_, err = db.Exec(`
|
||||||
|
ALTER TABLE issues ADD COLUMN compaction_level INTEGER DEFAULT 0;
|
||||||
|
ALTER TABLE issues ADD COLUMN compacted_at DATETIME;
|
||||||
|
ALTER TABLE issues ADD COLUMN original_size INTEGER;
|
||||||
|
`)
|
||||||
|
if err != nil {
|
||||||
|
return fmt.Errorf("failed to add compaction columns: %w", err)
|
||||||
|
}
|
||||||
|
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
|
||||||
|
// migrateSnapshotsTable creates the issue_snapshots table if it doesn't exist.
|
||||||
|
// This migration is idempotent and safe to run multiple times.
|
||||||
|
func migrateSnapshotsTable(db *sql.DB) error {
|
||||||
|
// Check if issue_snapshots table exists
|
||||||
|
var tableExists bool
|
||||||
|
err := db.QueryRow(`
|
||||||
|
SELECT COUNT(*) > 0
|
||||||
|
FROM sqlite_master
|
||||||
|
WHERE type='table' AND name='issue_snapshots'
|
||||||
|
`).Scan(&tableExists)
|
||||||
|
if err != nil {
|
||||||
|
return fmt.Errorf("failed to check issue_snapshots table: %w", err)
|
||||||
|
}
|
||||||
|
|
||||||
|
if tableExists {
|
||||||
|
// Table already exists, nothing to do
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
|
||||||
|
// Create the table and indexes
|
||||||
|
_, err = db.Exec(`
|
||||||
|
CREATE TABLE issue_snapshots (
|
||||||
|
id INTEGER PRIMARY KEY AUTOINCREMENT,
|
||||||
|
issue_id TEXT NOT NULL,
|
||||||
|
snapshot_time DATETIME NOT NULL,
|
||||||
|
compaction_level INTEGER NOT NULL,
|
||||||
|
original_size INTEGER NOT NULL,
|
||||||
|
compressed_size INTEGER NOT NULL,
|
||||||
|
original_content TEXT NOT NULL,
|
||||||
|
archived_events TEXT,
|
||||||
|
FOREIGN KEY (issue_id) REFERENCES issues(id) ON DELETE CASCADE
|
||||||
|
);
|
||||||
|
CREATE INDEX idx_snapshots_issue ON issue_snapshots(issue_id);
|
||||||
|
CREATE INDEX idx_snapshots_level ON issue_snapshots(compaction_level);
|
||||||
|
`)
|
||||||
|
if err != nil {
|
||||||
|
return fmt.Errorf("failed to create issue_snapshots table: %w", err)
|
||||||
|
}
|
||||||
|
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
|
||||||
|
// migrateCompactionConfig adds default compaction configuration values.
|
||||||
|
// This migration is idempotent and safe to run multiple times (INSERT OR IGNORE).
|
||||||
|
func migrateCompactionConfig(db *sql.DB) error {
|
||||||
|
_, err := db.Exec(`
|
||||||
|
INSERT OR IGNORE INTO config (key, value) VALUES
|
||||||
|
('compaction_enabled', 'false'),
|
||||||
|
('compact_tier1_days', '30'),
|
||||||
|
('compact_tier1_dep_levels', '2'),
|
||||||
|
('compact_tier2_days', '90'),
|
||||||
|
('compact_tier2_dep_levels', '5'),
|
||||||
|
('compact_tier2_commits', '100'),
|
||||||
|
('compact_model', 'claude-3-5-haiku-20241022'),
|
||||||
|
('compact_batch_size', '50'),
|
||||||
|
('compact_parallel_workers', '5'),
|
||||||
|
('auto_compact_enabled', 'false')
|
||||||
|
`)
|
||||||
|
if err != nil {
|
||||||
|
return fmt.Errorf("failed to add compaction config defaults: %w", err)
|
||||||
|
}
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
|
||||||
|
// migrateCompactedAtCommitColumn adds compacted_at_commit column to the issues table.
|
||||||
|
// This migration is idempotent and safe to run multiple times.
|
||||||
|
func migrateCompactedAtCommitColumn(db *sql.DB) error {
|
||||||
|
var columnExists bool
|
||||||
|
err := db.QueryRow(`
|
||||||
|
SELECT COUNT(*) > 0
|
||||||
|
FROM pragma_table_info('issues')
|
||||||
|
WHERE name = 'compacted_at_commit'
|
||||||
|
`).Scan(&columnExists)
|
||||||
|
if err != nil {
|
||||||
|
return fmt.Errorf("failed to check compacted_at_commit column: %w", err)
|
||||||
|
}
|
||||||
|
|
||||||
|
if columnExists {
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
|
||||||
|
_, err = db.Exec(`ALTER TABLE issues ADD COLUMN compacted_at_commit TEXT`)
|
||||||
|
if err != nil {
|
||||||
|
return fmt.Errorf("failed to add compacted_at_commit column: %w", err)
|
||||||
|
}
|
||||||
|
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
|
||||||
|
// migrateExportHashesTable ensures the export_hashes table exists for timestamp-only dedup (bd-164)
|
||||||
|
func migrateExportHashesTable(db *sql.DB) error {
|
||||||
|
// Check if export_hashes table exists
|
||||||
|
var tableName string
|
||||||
|
err := db.QueryRow(`
|
||||||
|
SELECT name FROM sqlite_master
|
||||||
|
WHERE type='table' AND name='export_hashes'
|
||||||
|
`).Scan(&tableName)
|
||||||
|
|
||||||
|
if err == sql.ErrNoRows {
|
||||||
|
// Table doesn't exist, create it
|
||||||
|
_, err := db.Exec(`
|
||||||
|
CREATE TABLE export_hashes (
|
||||||
|
issue_id TEXT PRIMARY KEY,
|
||||||
|
content_hash TEXT NOT NULL,
|
||||||
|
exported_at DATETIME NOT NULL DEFAULT CURRENT_TIMESTAMP,
|
||||||
|
FOREIGN KEY (issue_id) REFERENCES issues(id) ON DELETE CASCADE
|
||||||
|
)
|
||||||
|
`)
|
||||||
|
if err != nil {
|
||||||
|
return fmt.Errorf("failed to create export_hashes table: %w", err)
|
||||||
|
}
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
|
||||||
|
if err != nil {
|
||||||
|
return fmt.Errorf("failed to check export_hashes table: %w", err)
|
||||||
|
}
|
||||||
|
|
||||||
|
// Table already exists
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
|
||||||
|
// migrateContentHashColumn adds the content_hash column to the issues table if missing (bd-95).
|
||||||
|
// This enables global N-way collision resolution by providing content-addressable identity.
|
||||||
|
func migrateContentHashColumn(db *sql.DB) error {
|
||||||
|
// Check if content_hash column exists
|
||||||
|
var colName string
|
||||||
|
err := db.QueryRow(`
|
||||||
|
SELECT name FROM pragma_table_info('issues')
|
||||||
|
WHERE name = 'content_hash'
|
||||||
|
`).Scan(&colName)
|
||||||
|
|
||||||
|
if err == sql.ErrNoRows {
|
||||||
|
// Column doesn't exist, add it
|
||||||
|
_, err := db.Exec(`ALTER TABLE issues ADD COLUMN content_hash TEXT`)
|
||||||
|
if err != nil {
|
||||||
|
return fmt.Errorf("failed to add content_hash column: %w", err)
|
||||||
|
}
|
||||||
|
|
||||||
|
// Create index on content_hash for fast lookups
|
||||||
|
_, err = db.Exec(`CREATE INDEX IF NOT EXISTS idx_issues_content_hash ON issues(content_hash)`)
|
||||||
|
if err != nil {
|
||||||
|
return fmt.Errorf("failed to create content_hash index: %w", err)
|
||||||
|
}
|
||||||
|
|
||||||
|
// Populate content_hash for all existing issues
|
||||||
|
rows, err := db.Query(`
|
||||||
|
SELECT id, title, description, design, acceptance_criteria, notes,
|
||||||
|
status, priority, issue_type, assignee, external_ref
|
||||||
|
FROM issues
|
||||||
|
`)
|
||||||
|
if err != nil {
|
||||||
|
return fmt.Errorf("failed to query existing issues: %w", err)
|
||||||
|
}
|
||||||
|
defer rows.Close()
|
||||||
|
|
||||||
|
// Collect issues and compute hashes
|
||||||
|
updates := make(map[string]string) // id -> content_hash
|
||||||
|
for rows.Next() {
|
||||||
|
var issue types.Issue
|
||||||
|
var assignee sql.NullString
|
||||||
|
var externalRef sql.NullString
|
||||||
|
err := rows.Scan(
|
||||||
|
&issue.ID, &issue.Title, &issue.Description, &issue.Design,
|
||||||
|
&issue.AcceptanceCriteria, &issue.Notes, &issue.Status,
|
||||||
|
&issue.Priority, &issue.IssueType, &assignee, &externalRef,
|
||||||
|
)
|
||||||
|
if err != nil {
|
||||||
|
return fmt.Errorf("failed to scan issue: %w", err)
|
||||||
|
}
|
||||||
|
if assignee.Valid {
|
||||||
|
issue.Assignee = assignee.String
|
||||||
|
}
|
||||||
|
if externalRef.Valid {
|
||||||
|
issue.ExternalRef = &externalRef.String
|
||||||
|
}
|
||||||
|
|
||||||
|
// Compute and store hash
|
||||||
|
updates[issue.ID] = issue.ComputeContentHash()
|
||||||
|
}
|
||||||
|
if err := rows.Err(); err != nil {
|
||||||
|
return fmt.Errorf("error iterating issues: %w", err)
|
||||||
|
}
|
||||||
|
|
||||||
|
// Apply hash updates in batch
|
||||||
|
tx, err := db.Begin()
|
||||||
|
if err != nil {
|
||||||
|
return fmt.Errorf("failed to begin transaction: %w", err)
|
||||||
|
}
|
||||||
|
defer tx.Rollback()
|
||||||
|
|
||||||
|
stmt, err := tx.Prepare(`UPDATE issues SET content_hash = ? WHERE id = ?`)
|
||||||
|
if err != nil {
|
||||||
|
return fmt.Errorf("failed to prepare update statement: %w", err)
|
||||||
|
}
|
||||||
|
defer stmt.Close()
|
||||||
|
|
||||||
|
for id, hash := range updates {
|
||||||
|
if _, err := stmt.Exec(hash, id); err != nil {
|
||||||
|
return fmt.Errorf("failed to update content_hash for issue %s: %w", id, err)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
if err := tx.Commit(); err != nil {
|
||||||
|
return fmt.Errorf("failed to commit transaction: %w", err)
|
||||||
|
}
|
||||||
|
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
|
||||||
|
if err != nil {
|
||||||
|
return fmt.Errorf("failed to check content_hash column: %w", err)
|
||||||
|
}
|
||||||
|
|
||||||
|
// Column already exists
|
||||||
|
return nil
|
||||||
|
}
|
||||||
@@ -3,9 +3,7 @@ package sqlite
|
|||||||
|
|
||||||
import (
|
import (
|
||||||
"context"
|
"context"
|
||||||
"crypto/sha256"
|
|
||||||
"database/sql"
|
"database/sql"
|
||||||
"encoding/hex"
|
|
||||||
"encoding/json"
|
"encoding/json"
|
||||||
"fmt"
|
"fmt"
|
||||||
"os"
|
"os"
|
||||||
@@ -137,475 +135,13 @@ func New(path string) (*SQLiteStorage, error) {
|
|||||||
|
|
||||||
// migrateDirtyIssuesTable checks if the dirty_issues table exists and creates it if missing.
|
// migrateDirtyIssuesTable checks if the dirty_issues table exists and creates it if missing.
|
||||||
// This ensures existing databases created before the incremental export feature get migrated automatically.
|
// This ensures existing databases created before the incremental export feature get migrated automatically.
|
||||||
func migrateDirtyIssuesTable(db *sql.DB) error {
|
|
||||||
// Check if dirty_issues table exists
|
|
||||||
var tableName string
|
|
||||||
err := db.QueryRow(`
|
|
||||||
SELECT name FROM sqlite_master
|
|
||||||
WHERE type='table' AND name='dirty_issues'
|
|
||||||
`).Scan(&tableName)
|
|
||||||
|
|
||||||
if err == sql.ErrNoRows {
|
|
||||||
// Table doesn't exist, create it
|
|
||||||
_, err := db.Exec(`
|
|
||||||
CREATE TABLE dirty_issues (
|
|
||||||
issue_id TEXT PRIMARY KEY,
|
|
||||||
marked_at DATETIME NOT NULL DEFAULT CURRENT_TIMESTAMP,
|
|
||||||
FOREIGN KEY (issue_id) REFERENCES issues(id) ON DELETE CASCADE
|
|
||||||
);
|
|
||||||
CREATE INDEX idx_dirty_issues_marked_at ON dirty_issues(marked_at);
|
|
||||||
`)
|
|
||||||
if err != nil {
|
|
||||||
return fmt.Errorf("failed to create dirty_issues table: %w", err)
|
|
||||||
}
|
|
||||||
// Table created successfully - no need to log, happens silently
|
|
||||||
return nil
|
|
||||||
}
|
|
||||||
|
|
||||||
if err != nil {
|
|
||||||
return fmt.Errorf("failed to check for dirty_issues table: %w", err)
|
|
||||||
}
|
|
||||||
|
|
||||||
// Table exists, check if content_hash column exists (migration for bd-164)
|
|
||||||
var hasContentHash bool
|
|
||||||
err = db.QueryRow(`
|
|
||||||
SELECT COUNT(*) > 0 FROM pragma_table_info('dirty_issues')
|
|
||||||
WHERE name = 'content_hash'
|
|
||||||
`).Scan(&hasContentHash)
|
|
||||||
|
|
||||||
if err != nil {
|
|
||||||
return fmt.Errorf("failed to check for content_hash column: %w", err)
|
|
||||||
}
|
|
||||||
|
|
||||||
if !hasContentHash {
|
|
||||||
// Add content_hash column to existing table
|
|
||||||
_, err = db.Exec(`ALTER TABLE dirty_issues ADD COLUMN content_hash TEXT`)
|
|
||||||
if err != nil {
|
|
||||||
return fmt.Errorf("failed to add content_hash column: %w", err)
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
return nil
|
|
||||||
}
|
|
||||||
|
|
||||||
// migrateExternalRefColumn checks if the external_ref column exists and adds it if missing.
|
|
||||||
// This ensures existing databases created before the external reference feature get migrated automatically.
|
|
||||||
func migrateExternalRefColumn(db *sql.DB) error {
|
|
||||||
// Check if external_ref column exists
|
|
||||||
var columnExists bool
|
|
||||||
rows, err := db.Query("PRAGMA table_info(issues)")
|
|
||||||
if err != nil {
|
|
||||||
return fmt.Errorf("failed to check schema: %w", err)
|
|
||||||
}
|
|
||||||
defer func() { _ = rows.Close() }()
|
|
||||||
|
|
||||||
for rows.Next() {
|
|
||||||
var cid int
|
|
||||||
var name, typ string
|
|
||||||
var notnull, pk int
|
|
||||||
var dflt *string
|
|
||||||
err := rows.Scan(&cid, &name, &typ, ¬null, &dflt, &pk)
|
|
||||||
if err != nil {
|
|
||||||
return fmt.Errorf("failed to scan column info: %w", err)
|
|
||||||
}
|
|
||||||
if name == "external_ref" {
|
|
||||||
columnExists = true
|
|
||||||
break
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
if err := rows.Err(); err != nil {
|
|
||||||
return fmt.Errorf("error reading column info: %w", err)
|
|
||||||
}
|
|
||||||
|
|
||||||
if !columnExists {
|
|
||||||
// Add external_ref column
|
|
||||||
_, err := db.Exec(`ALTER TABLE issues ADD COLUMN external_ref TEXT`)
|
|
||||||
if err != nil {
|
|
||||||
return fmt.Errorf("failed to add external_ref column: %w", err)
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
return nil
|
|
||||||
}
|
|
||||||
|
|
||||||
// migrateCompositeIndexes checks if composite indexes exist and creates them if missing.
|
|
||||||
// This ensures existing databases get performance optimizations from new indexes.
|
|
||||||
func migrateCompositeIndexes(db *sql.DB) error {
|
|
||||||
// Check if idx_dependencies_depends_on_type exists
|
|
||||||
var indexName string
|
|
||||||
err := db.QueryRow(`
|
|
||||||
SELECT name FROM sqlite_master
|
|
||||||
WHERE type='index' AND name='idx_dependencies_depends_on_type'
|
|
||||||
`).Scan(&indexName)
|
|
||||||
|
|
||||||
if err == sql.ErrNoRows {
|
|
||||||
// Index doesn't exist, create it
|
|
||||||
_, err := db.Exec(`
|
|
||||||
CREATE INDEX idx_dependencies_depends_on_type ON dependencies(depends_on_id, type)
|
|
||||||
`)
|
|
||||||
if err != nil {
|
|
||||||
return fmt.Errorf("failed to create composite index idx_dependencies_depends_on_type: %w", err)
|
|
||||||
}
|
|
||||||
// Index created successfully
|
|
||||||
return nil
|
|
||||||
}
|
|
||||||
|
|
||||||
if err != nil {
|
|
||||||
return fmt.Errorf("failed to check for composite index: %w", err)
|
|
||||||
}
|
|
||||||
|
|
||||||
// Index exists, no migration needed
|
|
||||||
return nil
|
|
||||||
}
|
|
||||||
|
|
||||||
// migrateClosedAtConstraint cleans up inconsistent status/closed_at data.
|
|
||||||
// The CHECK constraint is in the schema for new databases, but we can't easily
|
|
||||||
// add it to existing tables without recreating them. Instead, we clean the data
|
|
||||||
// and rely on application code (UpdateIssue, import.go) to maintain the invariant.
|
|
||||||
func migrateClosedAtConstraint(db *sql.DB) error {
|
|
||||||
// Check if there are any inconsistent rows
|
|
||||||
var count int
|
|
||||||
err := db.QueryRow(`
|
|
||||||
SELECT COUNT(*)
|
|
||||||
FROM issues
|
|
||||||
WHERE (CASE WHEN status = 'closed' THEN 1 ELSE 0 END) <>
|
|
||||||
(CASE WHEN closed_at IS NOT NULL THEN 1 ELSE 0 END)
|
|
||||||
`).Scan(&count)
|
|
||||||
if err != nil {
|
|
||||||
return fmt.Errorf("failed to count inconsistent issues: %w", err)
|
|
||||||
}
|
|
||||||
|
|
||||||
if count == 0 {
|
|
||||||
// No inconsistent data, nothing to do
|
|
||||||
return nil
|
|
||||||
}
|
|
||||||
|
|
||||||
// Clean inconsistent data: trust the status field
|
|
||||||
// Strategy: If status != 'closed' but closed_at is set, clear closed_at
|
|
||||||
// If status = 'closed' but closed_at is not set, set it to updated_at (best guess)
|
|
||||||
_, err = db.Exec(`
|
|
||||||
UPDATE issues
|
|
||||||
SET closed_at = NULL
|
|
||||||
WHERE status != 'closed' AND closed_at IS NOT NULL
|
|
||||||
`)
|
|
||||||
if err != nil {
|
|
||||||
return fmt.Errorf("failed to clear closed_at for non-closed issues: %w", err)
|
|
||||||
}
|
|
||||||
|
|
||||||
_, err = db.Exec(`
|
|
||||||
UPDATE issues
|
|
||||||
SET closed_at = COALESCE(updated_at, CURRENT_TIMESTAMP)
|
|
||||||
WHERE status = 'closed' AND closed_at IS NULL
|
|
||||||
`)
|
|
||||||
if err != nil {
|
|
||||||
return fmt.Errorf("failed to set closed_at for closed issues: %w", err)
|
|
||||||
}
|
|
||||||
|
|
||||||
// Migration complete - data is now consistent
|
|
||||||
return nil
|
|
||||||
}
|
|
||||||
|
|
||||||
// migrateCompactionColumns adds compaction_level, compacted_at, and original_size columns to the issues table.
|
|
||||||
// This migration is idempotent and safe to run multiple times.
|
|
||||||
func migrateCompactionColumns(db *sql.DB) error {
|
|
||||||
// Check if compaction_level column exists
|
|
||||||
var columnExists bool
|
|
||||||
err := db.QueryRow(`
|
|
||||||
SELECT COUNT(*) > 0
|
|
||||||
FROM pragma_table_info('issues')
|
|
||||||
WHERE name = 'compaction_level'
|
|
||||||
`).Scan(&columnExists)
|
|
||||||
if err != nil {
|
|
||||||
return fmt.Errorf("failed to check compaction_level column: %w", err)
|
|
||||||
}
|
|
||||||
|
|
||||||
if columnExists {
|
|
||||||
// Columns already exist, nothing to do
|
|
||||||
return nil
|
|
||||||
}
|
|
||||||
|
|
||||||
// Add the three compaction columns
|
|
||||||
_, err = db.Exec(`
|
|
||||||
ALTER TABLE issues ADD COLUMN compaction_level INTEGER DEFAULT 0;
|
|
||||||
ALTER TABLE issues ADD COLUMN compacted_at DATETIME;
|
|
||||||
ALTER TABLE issues ADD COLUMN original_size INTEGER;
|
|
||||||
`)
|
|
||||||
if err != nil {
|
|
||||||
return fmt.Errorf("failed to add compaction columns: %w", err)
|
|
||||||
}
|
|
||||||
|
|
||||||
return nil
|
|
||||||
}
|
|
||||||
|
|
||||||
// migrateSnapshotsTable creates the issue_snapshots table if it doesn't exist.
|
|
||||||
// This migration is idempotent and safe to run multiple times.
|
|
||||||
func migrateSnapshotsTable(db *sql.DB) error {
|
|
||||||
// Check if issue_snapshots table exists
|
|
||||||
var tableExists bool
|
|
||||||
err := db.QueryRow(`
|
|
||||||
SELECT COUNT(*) > 0
|
|
||||||
FROM sqlite_master
|
|
||||||
WHERE type='table' AND name='issue_snapshots'
|
|
||||||
`).Scan(&tableExists)
|
|
||||||
if err != nil {
|
|
||||||
return fmt.Errorf("failed to check issue_snapshots table: %w", err)
|
|
||||||
}
|
|
||||||
|
|
||||||
if tableExists {
|
|
||||||
// Table already exists, nothing to do
|
|
||||||
return nil
|
|
||||||
}
|
|
||||||
|
|
||||||
// Create the table and indexes
|
|
||||||
_, err = db.Exec(`
|
|
||||||
CREATE TABLE issue_snapshots (
|
|
||||||
id INTEGER PRIMARY KEY AUTOINCREMENT,
|
|
||||||
issue_id TEXT NOT NULL,
|
|
||||||
snapshot_time DATETIME NOT NULL,
|
|
||||||
compaction_level INTEGER NOT NULL,
|
|
||||||
original_size INTEGER NOT NULL,
|
|
||||||
compressed_size INTEGER NOT NULL,
|
|
||||||
original_content TEXT NOT NULL,
|
|
||||||
archived_events TEXT,
|
|
||||||
FOREIGN KEY (issue_id) REFERENCES issues(id) ON DELETE CASCADE
|
|
||||||
);
|
|
||||||
CREATE INDEX idx_snapshots_issue ON issue_snapshots(issue_id);
|
|
||||||
CREATE INDEX idx_snapshots_level ON issue_snapshots(compaction_level);
|
|
||||||
`)
|
|
||||||
if err != nil {
|
|
||||||
return fmt.Errorf("failed to create issue_snapshots table: %w", err)
|
|
||||||
}
|
|
||||||
|
|
||||||
return nil
|
|
||||||
}
|
|
||||||
|
|
||||||
// migrateCompactionConfig adds default compaction configuration values.
|
|
||||||
// This migration is idempotent and safe to run multiple times (INSERT OR IGNORE).
|
|
||||||
func migrateCompactionConfig(db *sql.DB) error {
|
|
||||||
_, err := db.Exec(`
|
|
||||||
INSERT OR IGNORE INTO config (key, value) VALUES
|
|
||||||
('compaction_enabled', 'false'),
|
|
||||||
('compact_tier1_days', '30'),
|
|
||||||
('compact_tier1_dep_levels', '2'),
|
|
||||||
('compact_tier2_days', '90'),
|
|
||||||
('compact_tier2_dep_levels', '5'),
|
|
||||||
('compact_tier2_commits', '100'),
|
|
||||||
('compact_model', 'claude-3-5-haiku-20241022'),
|
|
||||||
('compact_batch_size', '50'),
|
|
||||||
('compact_parallel_workers', '5'),
|
|
||||||
('auto_compact_enabled', 'false')
|
|
||||||
`)
|
|
||||||
if err != nil {
|
|
||||||
return fmt.Errorf("failed to add compaction config defaults: %w", err)
|
|
||||||
}
|
|
||||||
return nil
|
|
||||||
}
|
|
||||||
|
|
||||||
// migrateCompactedAtCommitColumn adds compacted_at_commit column to the issues table.
|
|
||||||
// This migration is idempotent and safe to run multiple times.
|
|
||||||
func migrateCompactedAtCommitColumn(db *sql.DB) error {
|
|
||||||
var columnExists bool
|
|
||||||
err := db.QueryRow(`
|
|
||||||
SELECT COUNT(*) > 0
|
|
||||||
FROM pragma_table_info('issues')
|
|
||||||
WHERE name = 'compacted_at_commit'
|
|
||||||
`).Scan(&columnExists)
|
|
||||||
if err != nil {
|
|
||||||
return fmt.Errorf("failed to check compacted_at_commit column: %w", err)
|
|
||||||
}
|
|
||||||
|
|
||||||
if columnExists {
|
|
||||||
return nil
|
|
||||||
}
|
|
||||||
|
|
||||||
_, err = db.Exec(`ALTER TABLE issues ADD COLUMN compacted_at_commit TEXT`)
|
|
||||||
if err != nil {
|
|
||||||
return fmt.Errorf("failed to add compacted_at_commit column: %w", err)
|
|
||||||
}
|
|
||||||
|
|
||||||
return nil
|
|
||||||
}
|
|
||||||
|
|
||||||
// migrateExportHashesTable ensures the export_hashes table exists for timestamp-only dedup (bd-164)
|
|
||||||
func migrateExportHashesTable(db *sql.DB) error {
|
|
||||||
// Check if export_hashes table exists
|
|
||||||
var tableName string
|
|
||||||
err := db.QueryRow(`
|
|
||||||
SELECT name FROM sqlite_master
|
|
||||||
WHERE type='table' AND name='export_hashes'
|
|
||||||
`).Scan(&tableName)
|
|
||||||
|
|
||||||
if err == sql.ErrNoRows {
|
|
||||||
// Table doesn't exist, create it
|
|
||||||
_, err := db.Exec(`
|
|
||||||
CREATE TABLE export_hashes (
|
|
||||||
issue_id TEXT PRIMARY KEY,
|
|
||||||
content_hash TEXT NOT NULL,
|
|
||||||
exported_at DATETIME NOT NULL DEFAULT CURRENT_TIMESTAMP,
|
|
||||||
FOREIGN KEY (issue_id) REFERENCES issues(id) ON DELETE CASCADE
|
|
||||||
)
|
|
||||||
`)
|
|
||||||
if err != nil {
|
|
||||||
return fmt.Errorf("failed to create export_hashes table: %w", err)
|
|
||||||
}
|
|
||||||
return nil
|
|
||||||
}
|
|
||||||
|
|
||||||
if err != nil {
|
|
||||||
return fmt.Errorf("failed to check export_hashes table: %w", err)
|
|
||||||
}
|
|
||||||
|
|
||||||
// Table already exists
|
|
||||||
return nil
|
|
||||||
}
|
|
||||||
|
|
||||||
// migrateContentHashColumn adds the content_hash column to the issues table if missing (bd-95).
|
|
||||||
// This enables global N-way collision resolution by providing content-addressable identity.
|
|
||||||
func migrateContentHashColumn(db *sql.DB) error {
|
|
||||||
// Check if content_hash column exists
|
|
||||||
var colName string
|
|
||||||
err := db.QueryRow(`
|
|
||||||
SELECT name FROM pragma_table_info('issues')
|
|
||||||
WHERE name = 'content_hash'
|
|
||||||
`).Scan(&colName)
|
|
||||||
|
|
||||||
if err == sql.ErrNoRows {
|
|
||||||
// Column doesn't exist, add it
|
|
||||||
_, err := db.Exec(`ALTER TABLE issues ADD COLUMN content_hash TEXT`)
|
|
||||||
if err != nil {
|
|
||||||
return fmt.Errorf("failed to add content_hash column: %w", err)
|
|
||||||
}
|
|
||||||
|
|
||||||
// Create index on content_hash for fast lookups
|
|
||||||
_, err = db.Exec(`CREATE INDEX IF NOT EXISTS idx_issues_content_hash ON issues(content_hash)`)
|
|
||||||
if err != nil {
|
|
||||||
return fmt.Errorf("failed to create content_hash index: %w", err)
|
|
||||||
}
|
|
||||||
|
|
||||||
// Populate content_hash for all existing issues
|
|
||||||
rows, err := db.Query(`
|
|
||||||
SELECT id, title, description, design, acceptance_criteria, notes,
|
|
||||||
status, priority, issue_type, assignee, external_ref
|
|
||||||
FROM issues
|
|
||||||
`)
|
|
||||||
if err != nil {
|
|
||||||
return fmt.Errorf("failed to query existing issues: %w", err)
|
|
||||||
}
|
|
||||||
defer rows.Close()
|
|
||||||
|
|
||||||
// Collect issues and compute hashes
|
|
||||||
updates := make(map[string]string) // id -> content_hash
|
|
||||||
for rows.Next() {
|
|
||||||
var issue types.Issue
|
|
||||||
var assignee sql.NullString
|
|
||||||
var externalRef sql.NullString
|
|
||||||
err := rows.Scan(
|
|
||||||
&issue.ID, &issue.Title, &issue.Description, &issue.Design,
|
|
||||||
&issue.AcceptanceCriteria, &issue.Notes, &issue.Status,
|
|
||||||
&issue.Priority, &issue.IssueType, &assignee, &externalRef,
|
|
||||||
)
|
|
||||||
if err != nil {
|
|
||||||
return fmt.Errorf("failed to scan issue: %w", err)
|
|
||||||
}
|
|
||||||
if assignee.Valid {
|
|
||||||
issue.Assignee = assignee.String
|
|
||||||
}
|
|
||||||
if externalRef.Valid {
|
|
||||||
issue.ExternalRef = &externalRef.String
|
|
||||||
}
|
|
||||||
|
|
||||||
// Compute and store hash
|
|
||||||
updates[issue.ID] = issue.ComputeContentHash()
|
|
||||||
}
|
|
||||||
if err := rows.Err(); err != nil {
|
|
||||||
return fmt.Errorf("error iterating issues: %w", err)
|
|
||||||
}
|
|
||||||
|
|
||||||
// Apply hash updates in batch
|
|
||||||
tx, err := db.Begin()
|
|
||||||
if err != nil {
|
|
||||||
return fmt.Errorf("failed to begin transaction: %w", err)
|
|
||||||
}
|
|
||||||
defer tx.Rollback()
|
|
||||||
|
|
||||||
stmt, err := tx.Prepare(`UPDATE issues SET content_hash = ? WHERE id = ?`)
|
|
||||||
if err != nil {
|
|
||||||
return fmt.Errorf("failed to prepare update statement: %w", err)
|
|
||||||
}
|
|
||||||
defer stmt.Close()
|
|
||||||
|
|
||||||
for id, hash := range updates {
|
|
||||||
if _, err := stmt.Exec(hash, id); err != nil {
|
|
||||||
return fmt.Errorf("failed to update content_hash for issue %s: %w", id, err)
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
if err := tx.Commit(); err != nil {
|
|
||||||
return fmt.Errorf("failed to commit transaction: %w", err)
|
|
||||||
}
|
|
||||||
|
|
||||||
return nil
|
|
||||||
}
|
|
||||||
|
|
||||||
if err != nil {
|
|
||||||
return fmt.Errorf("failed to check content_hash column: %w", err)
|
|
||||||
}
|
|
||||||
|
|
||||||
// Column already exists
|
|
||||||
return nil
|
|
||||||
}
|
|
||||||
|
|
||||||
// REMOVED (bd-8e05): getNextIDForPrefix and AllocateNextID - sequential ID generation
|
// REMOVED (bd-8e05): getNextIDForPrefix and AllocateNextID - sequential ID generation
|
||||||
// no longer needed with hash-based IDs
|
// no longer needed with hash-based IDs
|
||||||
|
// Migration functions moved to migrations.go (bd-fc2d)
|
||||||
|
|
||||||
// getNextChildNumber atomically generates the next child number for a parent ID
|
// getNextChildNumber atomically generates the next child number for a parent ID
|
||||||
// Uses the child_counters table for atomic, cross-process child ID generation
|
// Uses the child_counters table for atomic, cross-process child ID generation
|
||||||
func (s *SQLiteStorage) getNextChildNumber(ctx context.Context, parentID string) (int, error) {
|
// Hash ID generation functions moved to hash_ids.go (bd-90a5)
|
||||||
var nextChild int
|
|
||||||
err := s.db.QueryRowContext(ctx, `
|
|
||||||
INSERT INTO child_counters (parent_id, last_child)
|
|
||||||
VALUES (?, 1)
|
|
||||||
ON CONFLICT(parent_id) DO UPDATE SET
|
|
||||||
last_child = last_child + 1
|
|
||||||
RETURNING last_child
|
|
||||||
`, parentID).Scan(&nextChild)
|
|
||||||
if err != nil {
|
|
||||||
return 0, fmt.Errorf("failed to generate next child number for parent %s: %w", parentID, err)
|
|
||||||
}
|
|
||||||
return nextChild, nil
|
|
||||||
}
|
|
||||||
|
|
||||||
// GetNextChildID generates the next hierarchical child ID for a given parent
|
|
||||||
// Returns formatted ID as parentID.{counter} (e.g., bd-a3f8e9.1 or bd-a3f8e9.1.5)
|
|
||||||
// Works at any depth (max 3 levels)
|
|
||||||
func (s *SQLiteStorage) GetNextChildID(ctx context.Context, parentID string) (string, error) {
|
|
||||||
// Validate parent exists
|
|
||||||
var count int
|
|
||||||
err := s.db.QueryRowContext(ctx, `SELECT COUNT(*) FROM issues WHERE id = ?`, parentID).Scan(&count)
|
|
||||||
if err != nil {
|
|
||||||
return "", fmt.Errorf("failed to check parent existence: %w", err)
|
|
||||||
}
|
|
||||||
if count == 0 {
|
|
||||||
return "", fmt.Errorf("parent issue %s does not exist", parentID)
|
|
||||||
}
|
|
||||||
|
|
||||||
// Calculate current depth by counting dots
|
|
||||||
depth := strings.Count(parentID, ".")
|
|
||||||
if depth >= 3 {
|
|
||||||
return "", fmt.Errorf("maximum hierarchy depth (3) exceeded for parent %s", parentID)
|
|
||||||
}
|
|
||||||
|
|
||||||
// Get next child number atomically
|
|
||||||
nextNum, err := s.getNextChildNumber(ctx, parentID)
|
|
||||||
if err != nil {
|
|
||||||
return "", err
|
|
||||||
}
|
|
||||||
|
|
||||||
// Format as parentID.counter
|
|
||||||
childID := fmt.Sprintf("%s.%d", parentID, nextNum)
|
|
||||||
return childID, nil
|
|
||||||
}
|
|
||||||
|
|
||||||
// REMOVED (bd-c7af): SyncAllCounters - no longer needed with hash IDs
|
// REMOVED (bd-c7af): SyncAllCounters - no longer needed with hash IDs
|
||||||
|
|
||||||
@@ -613,43 +149,6 @@ func (s *SQLiteStorage) GetNextChildID(ctx context.Context, parentID string) (st
|
|||||||
// The database should ALWAYS have issue_prefix config set explicitly (by 'bd init' or auto-import)
|
// The database should ALWAYS have issue_prefix config set explicitly (by 'bd init' or auto-import)
|
||||||
// Never derive prefix from filename - it leads to silent data corruption
|
// Never derive prefix from filename - it leads to silent data corruption
|
||||||
|
|
||||||
|
|
||||||
|
|
||||||
// generateHashID creates a hash-based ID for a top-level issue.
|
|
||||||
// For child issues, use the parent ID with a numeric suffix (e.g., "bd-a3f8e9.1").
|
|
||||||
// Supports adaptive length from 4-8 chars based on database size (bd-ea2a13).
|
|
||||||
// Includes a nonce parameter to handle same-length collisions.
|
|
||||||
func generateHashID(prefix, title, description, creator string, timestamp time.Time, length, nonce int) string {
|
|
||||||
// Combine inputs into a stable content string
|
|
||||||
// Include nonce to handle hash collisions
|
|
||||||
content := fmt.Sprintf("%s|%s|%s|%d|%d", title, description, creator, timestamp.UnixNano(), nonce)
|
|
||||||
|
|
||||||
// Hash the content
|
|
||||||
hash := sha256.Sum256([]byte(content))
|
|
||||||
|
|
||||||
// Use variable length (4-8 hex chars)
|
|
||||||
// length determines how many bytes to use (2, 2.5, 3, 3.5, or 4)
|
|
||||||
var shortHash string
|
|
||||||
switch length {
|
|
||||||
case 4:
|
|
||||||
shortHash = hex.EncodeToString(hash[:2])
|
|
||||||
case 5:
|
|
||||||
// 2.5 bytes: use 3 bytes but take only first 5 chars
|
|
||||||
shortHash = hex.EncodeToString(hash[:3])[:5]
|
|
||||||
case 6:
|
|
||||||
shortHash = hex.EncodeToString(hash[:3])
|
|
||||||
case 7:
|
|
||||||
// 3.5 bytes: use 4 bytes but take only first 7 chars
|
|
||||||
shortHash = hex.EncodeToString(hash[:4])[:7]
|
|
||||||
case 8:
|
|
||||||
shortHash = hex.EncodeToString(hash[:4])
|
|
||||||
default:
|
|
||||||
shortHash = hex.EncodeToString(hash[:3]) // default to 6
|
|
||||||
}
|
|
||||||
|
|
||||||
return fmt.Sprintf("%s-%s", prefix, shortHash)
|
|
||||||
}
|
|
||||||
|
|
||||||
// CreateIssue creates a new issue
|
// CreateIssue creates a new issue
|
||||||
func (s *SQLiteStorage) CreateIssue(ctx context.Context, issue *types.Issue, actor string) error {
|
func (s *SQLiteStorage) CreateIssue(ctx context.Context, issue *types.Issue, actor string) error {
|
||||||
// Validate issue before creating
|
// Validate issue before creating
|
||||||
@@ -831,293 +330,7 @@ func (s *SQLiteStorage) CreateIssue(ctx context.Context, issue *types.Issue, act
|
|||||||
}
|
}
|
||||||
|
|
||||||
// validateBatchIssues validates all issues in a batch and sets timestamps
|
// validateBatchIssues validates all issues in a batch and sets timestamps
|
||||||
func validateBatchIssues(issues []*types.Issue) error {
|
// Batch operation functions moved to batch_ops.go (bd-c796)
|
||||||
now := time.Now()
|
|
||||||
for i, issue := range issues {
|
|
||||||
if issue == nil {
|
|
||||||
return fmt.Errorf("issue %d is nil", i)
|
|
||||||
}
|
|
||||||
|
|
||||||
// Only set timestamps if not already provided
|
|
||||||
if issue.CreatedAt.IsZero() {
|
|
||||||
issue.CreatedAt = now
|
|
||||||
}
|
|
||||||
if issue.UpdatedAt.IsZero() {
|
|
||||||
issue.UpdatedAt = now
|
|
||||||
}
|
|
||||||
|
|
||||||
if err := issue.Validate(); err != nil {
|
|
||||||
return fmt.Errorf("validation failed for issue %d: %w", i, err)
|
|
||||||
}
|
|
||||||
}
|
|
||||||
return nil
|
|
||||||
}
|
|
||||||
|
|
||||||
// generateBatchIDs generates IDs for all issues that need them atomically
|
|
||||||
func generateBatchIDs(ctx context.Context, conn *sql.Conn, issues []*types.Issue, actor string) error {
|
|
||||||
// Get prefix from config (needed for both generation and validation)
|
|
||||||
var prefix string
|
|
||||||
err := conn.QueryRowContext(ctx, `SELECT value FROM config WHERE key = ?`, "issue_prefix").Scan(&prefix)
|
|
||||||
if err == sql.ErrNoRows || prefix == "" {
|
|
||||||
// CRITICAL: Reject operation if issue_prefix config is missing (bd-166)
|
|
||||||
return fmt.Errorf("database not initialized: issue_prefix config is missing (run 'bd init --prefix <prefix>' first)")
|
|
||||||
} else if err != nil {
|
|
||||||
return fmt.Errorf("failed to get config: %w", err)
|
|
||||||
}
|
|
||||||
|
|
||||||
// Validate explicitly provided IDs and generate IDs for those that need them
|
|
||||||
expectedPrefix := prefix + "-"
|
|
||||||
usedIDs := make(map[string]bool)
|
|
||||||
|
|
||||||
// First pass: record explicitly provided IDs
|
|
||||||
for i := range issues {
|
|
||||||
if issues[i].ID != "" {
|
|
||||||
// Validate that explicitly provided ID matches the configured prefix (bd-177)
|
|
||||||
if !strings.HasPrefix(issues[i].ID, expectedPrefix) {
|
|
||||||
return fmt.Errorf("issue ID '%s' does not match configured prefix '%s'", issues[i].ID, prefix)
|
|
||||||
}
|
|
||||||
usedIDs[issues[i].ID] = true
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
// Second pass: generate IDs for issues that need them
|
|
||||||
// Hash mode: generate with adaptive length based on database size (bd-ea2a13)
|
|
||||||
// Get adaptive base length based on current database size
|
|
||||||
baseLength, err := GetAdaptiveIDLength(ctx, conn, prefix)
|
|
||||||
if err != nil {
|
|
||||||
// Fallback to 6 on error
|
|
||||||
baseLength = 6
|
|
||||||
}
|
|
||||||
|
|
||||||
// Try baseLength, baseLength+1, baseLength+2, up to max of 8
|
|
||||||
maxLength := 8
|
|
||||||
if baseLength > maxLength {
|
|
||||||
baseLength = maxLength
|
|
||||||
}
|
|
||||||
|
|
||||||
for i := range issues {
|
|
||||||
if issues[i].ID == "" {
|
|
||||||
var generated bool
|
|
||||||
// Try lengths from baseLength to maxLength with progressive fallback
|
|
||||||
for length := baseLength; length <= maxLength && !generated; length++ {
|
|
||||||
for nonce := 0; nonce < 10; nonce++ {
|
|
||||||
candidate := generateHashID(prefix, issues[i].Title, issues[i].Description, actor, issues[i].CreatedAt, length, nonce)
|
|
||||||
|
|
||||||
// Check if this ID is already used in this batch or in the database
|
|
||||||
if usedIDs[candidate] {
|
|
||||||
continue
|
|
||||||
}
|
|
||||||
|
|
||||||
var count int
|
|
||||||
err := conn.QueryRowContext(ctx, `SELECT COUNT(*) FROM issues WHERE id = ?`, candidate).Scan(&count)
|
|
||||||
if err != nil {
|
|
||||||
return fmt.Errorf("failed to check for ID collision: %w", err)
|
|
||||||
}
|
|
||||||
|
|
||||||
if count == 0 {
|
|
||||||
issues[i].ID = candidate
|
|
||||||
usedIDs[candidate] = true
|
|
||||||
generated = true
|
|
||||||
break
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
if !generated {
|
|
||||||
return fmt.Errorf("failed to generate unique ID for issue %d after trying lengths 6-8 with 10 nonces each", i)
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
// Compute content hashes
|
|
||||||
for i := range issues {
|
|
||||||
if issues[i].ContentHash == "" {
|
|
||||||
issues[i].ContentHash = issues[i].ComputeContentHash()
|
|
||||||
}
|
|
||||||
}
|
|
||||||
return nil
|
|
||||||
}
|
|
||||||
|
|
||||||
// bulkInsertIssues inserts all issues using a prepared statement
|
|
||||||
func bulkInsertIssues(ctx context.Context, conn *sql.Conn, issues []*types.Issue) error {
|
|
||||||
stmt, err := conn.PrepareContext(ctx, `
|
|
||||||
INSERT INTO issues (
|
|
||||||
id, content_hash, title, description, design, acceptance_criteria, notes,
|
|
||||||
status, priority, issue_type, assignee, estimated_minutes,
|
|
||||||
created_at, updated_at, closed_at, external_ref
|
|
||||||
) VALUES (?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?)
|
|
||||||
`)
|
|
||||||
if err != nil {
|
|
||||||
return fmt.Errorf("failed to prepare statement: %w", err)
|
|
||||||
}
|
|
||||||
defer func() { _ = stmt.Close() }()
|
|
||||||
|
|
||||||
for _, issue := range issues {
|
|
||||||
_, err = stmt.ExecContext(ctx,
|
|
||||||
issue.ID, issue.ContentHash, issue.Title, issue.Description, issue.Design,
|
|
||||||
issue.AcceptanceCriteria, issue.Notes, issue.Status,
|
|
||||||
issue.Priority, issue.IssueType, issue.Assignee,
|
|
||||||
issue.EstimatedMinutes, issue.CreatedAt, issue.UpdatedAt,
|
|
||||||
issue.ClosedAt, issue.ExternalRef,
|
|
||||||
)
|
|
||||||
if err != nil {
|
|
||||||
return fmt.Errorf("failed to insert issue %s: %w", issue.ID, err)
|
|
||||||
}
|
|
||||||
}
|
|
||||||
return nil
|
|
||||||
}
|
|
||||||
|
|
||||||
// bulkRecordEvents records creation events for all issues
|
|
||||||
func bulkRecordEvents(ctx context.Context, conn *sql.Conn, issues []*types.Issue, actor string) error {
|
|
||||||
stmt, err := conn.PrepareContext(ctx, `
|
|
||||||
INSERT INTO events (issue_id, event_type, actor, new_value)
|
|
||||||
VALUES (?, ?, ?, ?)
|
|
||||||
`)
|
|
||||||
if err != nil {
|
|
||||||
return fmt.Errorf("failed to prepare event statement: %w", err)
|
|
||||||
}
|
|
||||||
defer func() { _ = stmt.Close() }()
|
|
||||||
|
|
||||||
for _, issue := range issues {
|
|
||||||
eventData, err := json.Marshal(issue)
|
|
||||||
if err != nil {
|
|
||||||
// Fall back to minimal description if marshaling fails
|
|
||||||
eventData = []byte(fmt.Sprintf(`{"id":"%s","title":"%s"}`, issue.ID, issue.Title))
|
|
||||||
}
|
|
||||||
|
|
||||||
_, err = stmt.ExecContext(ctx, issue.ID, types.EventCreated, actor, string(eventData))
|
|
||||||
if err != nil {
|
|
||||||
return fmt.Errorf("failed to record event for %s: %w", issue.ID, err)
|
|
||||||
}
|
|
||||||
}
|
|
||||||
return nil
|
|
||||||
}
|
|
||||||
|
|
||||||
// bulkMarkDirty marks all issues as dirty for incremental export
|
|
||||||
func bulkMarkDirty(ctx context.Context, conn *sql.Conn, issues []*types.Issue) error {
|
|
||||||
stmt, err := conn.PrepareContext(ctx, `
|
|
||||||
INSERT INTO dirty_issues (issue_id, marked_at)
|
|
||||||
VALUES (?, ?)
|
|
||||||
ON CONFLICT (issue_id) DO UPDATE SET marked_at = excluded.marked_at
|
|
||||||
`)
|
|
||||||
if err != nil {
|
|
||||||
return fmt.Errorf("failed to prepare dirty statement: %w", err)
|
|
||||||
}
|
|
||||||
defer func() { _ = stmt.Close() }()
|
|
||||||
|
|
||||||
dirtyTime := time.Now()
|
|
||||||
for _, issue := range issues {
|
|
||||||
_, err = stmt.ExecContext(ctx, issue.ID, dirtyTime)
|
|
||||||
if err != nil {
|
|
||||||
return fmt.Errorf("failed to mark dirty %s: %w", issue.ID, err)
|
|
||||||
}
|
|
||||||
}
|
|
||||||
return nil
|
|
||||||
}
|
|
||||||
|
|
||||||
// CreateIssues creates multiple issues atomically in a single transaction.
|
|
||||||
// This provides significant performance improvements over calling CreateIssue in a loop:
|
|
||||||
// - Single connection acquisition
|
|
||||||
// - Single transaction
|
|
||||||
// - Atomic ID range reservation (one counter update for N issues)
|
|
||||||
// - All-or-nothing atomicity
|
|
||||||
//
|
|
||||||
// Expected 5-10x speedup for batches of 10+ issues.
|
|
||||||
// CreateIssues creates multiple issues atomically in a single transaction.
|
|
||||||
//
|
|
||||||
// This method is optimized for bulk issue creation and provides significant
|
|
||||||
// performance improvements over calling CreateIssue in a loop:
|
|
||||||
// - Single database connection and transaction
|
|
||||||
// - Atomic ID range reservation (one counter update for N IDs)
|
|
||||||
// - All-or-nothing semantics (rolls back on any error)
|
|
||||||
// - 5-15x faster than sequential CreateIssue calls
|
|
||||||
//
|
|
||||||
// All issues are validated before any database changes occur. If any issue
|
|
||||||
// fails validation, the entire batch is rejected.
|
|
||||||
//
|
|
||||||
// ID Assignment:
|
|
||||||
// - Issues with empty ID get auto-generated IDs from a reserved range
|
|
||||||
// - Issues with explicit IDs use those IDs (caller must ensure uniqueness)
|
|
||||||
// - Mix of explicit and auto-generated IDs is supported
|
|
||||||
//
|
|
||||||
// Timestamps:
|
|
||||||
// - All issues in the batch receive identical created_at/updated_at timestamps
|
|
||||||
// - This reflects that they were created as a single atomic operation
|
|
||||||
//
|
|
||||||
// Usage:
|
|
||||||
// // Bulk import from external source
|
|
||||||
// issues := []*types.Issue{...}
|
|
||||||
// if err := store.CreateIssues(ctx, issues, "import"); err != nil {
|
|
||||||
// return err
|
|
||||||
// }
|
|
||||||
//
|
|
||||||
// // After importing with explicit IDs, sync counters to prevent collisions
|
|
||||||
// REMOVED (bd-c7af): SyncAllCounters example - no longer needed with hash IDs
|
|
||||||
//
|
|
||||||
// Performance:
|
|
||||||
// - 100 issues: ~30ms (vs ~900ms with CreateIssue loop)
|
|
||||||
// - 1000 issues: ~950ms (vs estimated 9s with CreateIssue loop)
|
|
||||||
//
|
|
||||||
// When to use:
|
|
||||||
// - Bulk imports from external systems (use CreateIssues)
|
|
||||||
// - Creating multiple related issues at once (use CreateIssues)
|
|
||||||
// - Single issue creation (use CreateIssue for simplicity)
|
|
||||||
// - Interactive user operations (use CreateIssue)
|
|
||||||
func (s *SQLiteStorage) CreateIssues(ctx context.Context, issues []*types.Issue, actor string) error {
|
|
||||||
if len(issues) == 0 {
|
|
||||||
return nil
|
|
||||||
}
|
|
||||||
|
|
||||||
// Phase 1: Validate all issues first (fail-fast)
|
|
||||||
if err := validateBatchIssues(issues); err != nil {
|
|
||||||
return err
|
|
||||||
}
|
|
||||||
|
|
||||||
// Phase 2: Acquire connection and start transaction
|
|
||||||
conn, err := s.db.Conn(ctx)
|
|
||||||
if err != nil {
|
|
||||||
return fmt.Errorf("failed to acquire connection: %w", err)
|
|
||||||
}
|
|
||||||
defer func() { _ = conn.Close() }()
|
|
||||||
|
|
||||||
if _, err := conn.ExecContext(ctx, "BEGIN IMMEDIATE"); err != nil {
|
|
||||||
return fmt.Errorf("failed to begin immediate transaction: %w", err)
|
|
||||||
}
|
|
||||||
|
|
||||||
committed := false
|
|
||||||
defer func() {
|
|
||||||
if !committed {
|
|
||||||
_, _ = conn.ExecContext(context.Background(), "ROLLBACK")
|
|
||||||
}
|
|
||||||
}()
|
|
||||||
|
|
||||||
// Phase 3: Generate IDs for issues that need them
|
|
||||||
if err := generateBatchIDs(ctx, conn, issues, actor); err != nil {
|
|
||||||
return err
|
|
||||||
}
|
|
||||||
|
|
||||||
// Phase 4: Bulk insert issues
|
|
||||||
if err := bulkInsertIssues(ctx, conn, issues); err != nil {
|
|
||||||
return err
|
|
||||||
}
|
|
||||||
|
|
||||||
// Phase 5: Record creation events
|
|
||||||
if err := bulkRecordEvents(ctx, conn, issues, actor); err != nil {
|
|
||||||
return err
|
|
||||||
}
|
|
||||||
|
|
||||||
// Phase 6: Mark issues dirty for incremental export
|
|
||||||
if err := bulkMarkDirty(ctx, conn, issues); err != nil {
|
|
||||||
return err
|
|
||||||
}
|
|
||||||
|
|
||||||
// Phase 7: Commit transaction
|
|
||||||
if _, err := conn.ExecContext(ctx, "COMMIT"); err != nil {
|
|
||||||
return fmt.Errorf("failed to commit transaction: %w", err)
|
|
||||||
}
|
|
||||||
committed = true
|
|
||||||
return nil
|
|
||||||
}
|
|
||||||
|
|
||||||
// GetIssue retrieves an issue by ID
|
// GetIssue retrieves an issue by ID
|
||||||
func (s *SQLiteStorage) GetIssue(ctx context.Context, id string) (*types.Issue, error) {
|
func (s *SQLiteStorage) GetIssue(ctx context.Context, id string) (*types.Issue, error) {
|
||||||
@@ -1205,71 +418,7 @@ var allowedUpdateFields = map[string]bool{
|
|||||||
}
|
}
|
||||||
|
|
||||||
// validatePriority validates a priority value
|
// validatePriority validates a priority value
|
||||||
func validatePriority(value interface{}) error {
|
// Validation functions moved to validators.go (bd-d9e0)
|
||||||
if priority, ok := value.(int); ok {
|
|
||||||
if priority < 0 || priority > 4 {
|
|
||||||
return fmt.Errorf("priority must be between 0 and 4 (got %d)", priority)
|
|
||||||
}
|
|
||||||
}
|
|
||||||
return nil
|
|
||||||
}
|
|
||||||
|
|
||||||
// validateStatus validates a status value
|
|
||||||
func validateStatus(value interface{}) error {
|
|
||||||
if status, ok := value.(string); ok {
|
|
||||||
if !types.Status(status).IsValid() {
|
|
||||||
return fmt.Errorf("invalid status: %s", status)
|
|
||||||
}
|
|
||||||
}
|
|
||||||
return nil
|
|
||||||
}
|
|
||||||
|
|
||||||
// validateIssueType validates an issue type value
|
|
||||||
func validateIssueType(value interface{}) error {
|
|
||||||
if issueType, ok := value.(string); ok {
|
|
||||||
if !types.IssueType(issueType).IsValid() {
|
|
||||||
return fmt.Errorf("invalid issue type: %s", issueType)
|
|
||||||
}
|
|
||||||
}
|
|
||||||
return nil
|
|
||||||
}
|
|
||||||
|
|
||||||
// validateTitle validates a title value
|
|
||||||
func validateTitle(value interface{}) error {
|
|
||||||
if title, ok := value.(string); ok {
|
|
||||||
if len(title) == 0 || len(title) > 500 {
|
|
||||||
return fmt.Errorf("title must be 1-500 characters")
|
|
||||||
}
|
|
||||||
}
|
|
||||||
return nil
|
|
||||||
}
|
|
||||||
|
|
||||||
// validateEstimatedMinutes validates an estimated_minutes value
|
|
||||||
func validateEstimatedMinutes(value interface{}) error {
|
|
||||||
if mins, ok := value.(int); ok {
|
|
||||||
if mins < 0 {
|
|
||||||
return fmt.Errorf("estimated_minutes cannot be negative")
|
|
||||||
}
|
|
||||||
}
|
|
||||||
return nil
|
|
||||||
}
|
|
||||||
|
|
||||||
// fieldValidators maps field names to their validation functions
|
|
||||||
var fieldValidators = map[string]func(interface{}) error{
|
|
||||||
"priority": validatePriority,
|
|
||||||
"status": validateStatus,
|
|
||||||
"issue_type": validateIssueType,
|
|
||||||
"title": validateTitle,
|
|
||||||
"estimated_minutes": validateEstimatedMinutes,
|
|
||||||
}
|
|
||||||
|
|
||||||
// validateFieldUpdate validates a field update value
|
|
||||||
func validateFieldUpdate(key string, value interface{}) error {
|
|
||||||
if validator, ok := fieldValidators[key]; ok {
|
|
||||||
return validator(value)
|
|
||||||
}
|
|
||||||
return nil
|
|
||||||
}
|
|
||||||
|
|
||||||
// determineEventType determines the event type for an update based on old and new status
|
// determineEventType determines the event type for an update based on old and new status
|
||||||
func determineEventType(oldIssue *types.Issue, updates map[string]interface{}) types.EventType {
|
func determineEventType(oldIssue *types.Issue, updates map[string]interface{}) types.EventType {
|
||||||
|
|||||||
74
internal/storage/sqlite/validators.go
Normal file
74
internal/storage/sqlite/validators.go
Normal file
@@ -0,0 +1,74 @@
|
|||||||
|
package sqlite
|
||||||
|
|
||||||
|
import (
|
||||||
|
"fmt"
|
||||||
|
|
||||||
|
"github.com/steveyegge/beads/internal/types"
|
||||||
|
)
|
||||||
|
|
||||||
|
// validatePriority validates a priority value
|
||||||
|
func validatePriority(value interface{}) error {
|
||||||
|
if priority, ok := value.(int); ok {
|
||||||
|
if priority < 0 || priority > 4 {
|
||||||
|
return fmt.Errorf("priority must be between 0 and 4 (got %d)", priority)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
|
||||||
|
// validateStatus validates a status value
|
||||||
|
func validateStatus(value interface{}) error {
|
||||||
|
if status, ok := value.(string); ok {
|
||||||
|
if !types.Status(status).IsValid() {
|
||||||
|
return fmt.Errorf("invalid status: %s", status)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
|
||||||
|
// validateIssueType validates an issue type value
|
||||||
|
func validateIssueType(value interface{}) error {
|
||||||
|
if issueType, ok := value.(string); ok {
|
||||||
|
if !types.IssueType(issueType).IsValid() {
|
||||||
|
return fmt.Errorf("invalid issue type: %s", issueType)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
|
||||||
|
// validateTitle validates a title value
|
||||||
|
func validateTitle(value interface{}) error {
|
||||||
|
if title, ok := value.(string); ok {
|
||||||
|
if len(title) == 0 || len(title) > 500 {
|
||||||
|
return fmt.Errorf("title must be 1-500 characters")
|
||||||
|
}
|
||||||
|
}
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
|
||||||
|
// validateEstimatedMinutes validates an estimated_minutes value
|
||||||
|
func validateEstimatedMinutes(value interface{}) error {
|
||||||
|
if mins, ok := value.(int); ok {
|
||||||
|
if mins < 0 {
|
||||||
|
return fmt.Errorf("estimated_minutes cannot be negative")
|
||||||
|
}
|
||||||
|
}
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
|
||||||
|
// fieldValidators maps field names to their validation functions
|
||||||
|
var fieldValidators = map[string]func(interface{}) error{
|
||||||
|
"priority": validatePriority,
|
||||||
|
"status": validateStatus,
|
||||||
|
"issue_type": validateIssueType,
|
||||||
|
"title": validateTitle,
|
||||||
|
"estimated_minutes": validateEstimatedMinutes,
|
||||||
|
}
|
||||||
|
|
||||||
|
// validateFieldUpdate validates a field update value
|
||||||
|
func validateFieldUpdate(key string, value interface{}) error {
|
||||||
|
if validator, ok := fieldValidators[key]; ok {
|
||||||
|
return validator(value)
|
||||||
|
}
|
||||||
|
return nil
|
||||||
|
}
|
||||||
151
internal/storage/sqlite/validators_test.go
Normal file
151
internal/storage/sqlite/validators_test.go
Normal file
@@ -0,0 +1,151 @@
|
|||||||
|
package sqlite
|
||||||
|
|
||||||
|
import (
|
||||||
|
"testing"
|
||||||
|
|
||||||
|
"github.com/steveyegge/beads/internal/types"
|
||||||
|
)
|
||||||
|
|
||||||
|
func TestValidatePriority(t *testing.T) {
|
||||||
|
tests := []struct {
|
||||||
|
name string
|
||||||
|
value interface{}
|
||||||
|
wantErr bool
|
||||||
|
}{
|
||||||
|
{"valid priority 0", 0, false},
|
||||||
|
{"valid priority 1", 1, false},
|
||||||
|
{"valid priority 2", 2, false},
|
||||||
|
{"valid priority 3", 3, false},
|
||||||
|
{"valid priority 4", 4, false},
|
||||||
|
{"invalid negative", -1, true},
|
||||||
|
{"invalid too high", 5, true},
|
||||||
|
{"non-int ignored", "not an int", false},
|
||||||
|
}
|
||||||
|
|
||||||
|
for _, tt := range tests {
|
||||||
|
t.Run(tt.name, func(t *testing.T) {
|
||||||
|
err := validatePriority(tt.value)
|
||||||
|
if (err != nil) != tt.wantErr {
|
||||||
|
t.Errorf("validatePriority() error = %v, wantErr %v", err, tt.wantErr)
|
||||||
|
}
|
||||||
|
})
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
func TestValidateStatus(t *testing.T) {
|
||||||
|
tests := []struct {
|
||||||
|
name string
|
||||||
|
value interface{}
|
||||||
|
wantErr bool
|
||||||
|
}{
|
||||||
|
{"valid open", string(types.StatusOpen), false},
|
||||||
|
{"valid in_progress", string(types.StatusInProgress), false},
|
||||||
|
{"valid blocked", string(types.StatusBlocked), false},
|
||||||
|
{"valid closed", string(types.StatusClosed), false},
|
||||||
|
{"invalid status", "invalid", true},
|
||||||
|
{"non-string ignored", 123, false},
|
||||||
|
}
|
||||||
|
|
||||||
|
for _, tt := range tests {
|
||||||
|
t.Run(tt.name, func(t *testing.T) {
|
||||||
|
err := validateStatus(tt.value)
|
||||||
|
if (err != nil) != tt.wantErr {
|
||||||
|
t.Errorf("validateStatus() error = %v, wantErr %v", err, tt.wantErr)
|
||||||
|
}
|
||||||
|
})
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
func TestValidateIssueType(t *testing.T) {
|
||||||
|
tests := []struct {
|
||||||
|
name string
|
||||||
|
value interface{}
|
||||||
|
wantErr bool
|
||||||
|
}{
|
||||||
|
{"valid bug", string(types.TypeBug), false},
|
||||||
|
{"valid feature", string(types.TypeFeature), false},
|
||||||
|
{"valid task", string(types.TypeTask), false},
|
||||||
|
{"valid epic", string(types.TypeEpic), false},
|
||||||
|
{"valid chore", string(types.TypeChore), false},
|
||||||
|
{"invalid type", "invalid", true},
|
||||||
|
{"non-string ignored", 123, false},
|
||||||
|
}
|
||||||
|
|
||||||
|
for _, tt := range tests {
|
||||||
|
t.Run(tt.name, func(t *testing.T) {
|
||||||
|
err := validateIssueType(tt.value)
|
||||||
|
if (err != nil) != tt.wantErr {
|
||||||
|
t.Errorf("validateIssueType() error = %v, wantErr %v", err, tt.wantErr)
|
||||||
|
}
|
||||||
|
})
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
func TestValidateTitle(t *testing.T) {
|
||||||
|
tests := []struct {
|
||||||
|
name string
|
||||||
|
value interface{}
|
||||||
|
wantErr bool
|
||||||
|
}{
|
||||||
|
{"valid title", "Valid Title", false},
|
||||||
|
{"empty title", "", true},
|
||||||
|
{"max length title", string(make([]byte, 500)), false},
|
||||||
|
{"too long title", string(make([]byte, 501)), true},
|
||||||
|
{"non-string ignored", 123, false},
|
||||||
|
}
|
||||||
|
|
||||||
|
for _, tt := range tests {
|
||||||
|
t.Run(tt.name, func(t *testing.T) {
|
||||||
|
err := validateTitle(tt.value)
|
||||||
|
if (err != nil) != tt.wantErr {
|
||||||
|
t.Errorf("validateTitle() error = %v, wantErr %v", err, tt.wantErr)
|
||||||
|
}
|
||||||
|
})
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
func TestValidateEstimatedMinutes(t *testing.T) {
|
||||||
|
tests := []struct {
|
||||||
|
name string
|
||||||
|
value interface{}
|
||||||
|
wantErr bool
|
||||||
|
}{
|
||||||
|
{"valid zero", 0, false},
|
||||||
|
{"valid positive", 60, false},
|
||||||
|
{"invalid negative", -1, true},
|
||||||
|
{"non-int ignored", "not an int", false},
|
||||||
|
}
|
||||||
|
|
||||||
|
for _, tt := range tests {
|
||||||
|
t.Run(tt.name, func(t *testing.T) {
|
||||||
|
err := validateEstimatedMinutes(tt.value)
|
||||||
|
if (err != nil) != tt.wantErr {
|
||||||
|
t.Errorf("validateEstimatedMinutes() error = %v, wantErr %v", err, tt.wantErr)
|
||||||
|
}
|
||||||
|
})
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
func TestValidateFieldUpdate(t *testing.T) {
|
||||||
|
tests := []struct {
|
||||||
|
name string
|
||||||
|
key string
|
||||||
|
value interface{}
|
||||||
|
wantErr bool
|
||||||
|
}{
|
||||||
|
{"valid priority", "priority", 1, false},
|
||||||
|
{"invalid priority", "priority", 5, true},
|
||||||
|
{"valid status", "status", string(types.StatusOpen), false},
|
||||||
|
{"invalid status", "status", "invalid", true},
|
||||||
|
{"unknown field", "unknown_field", "any value", false},
|
||||||
|
}
|
||||||
|
|
||||||
|
for _, tt := range tests {
|
||||||
|
t.Run(tt.name, func(t *testing.T) {
|
||||||
|
err := validateFieldUpdate(tt.key, tt.value)
|
||||||
|
if (err != nil) != tt.wantErr {
|
||||||
|
t.Errorf("validateFieldUpdate() error = %v, wantErr %v", err, tt.wantErr)
|
||||||
|
}
|
||||||
|
})
|
||||||
|
}
|
||||||
|
}
|
||||||
Reference in New Issue
Block a user