Extract SQLite migrations into separate files (bd-fb95094c.7)
- Created migrations/ subdirectory with 14 individual migration files - Reduced migrations.go from 680 to 98 lines (orchestration only) - Updated test imports to use migrations package - Updated MULTI_REPO_HYDRATION.md documentation - All tests passing
This commit is contained in:
@@ -47,7 +47,7 @@ repos:
|
|||||||
|
|
||||||
**Modified Files:**
|
**Modified Files:**
|
||||||
- `internal/storage/sqlite/schema.go` - Added `repo_mtimes` table
|
- `internal/storage/sqlite/schema.go` - Added `repo_mtimes` table
|
||||||
- `internal/storage/sqlite/migrations.go` - Added migration for `repo_mtimes`
|
- `internal/storage/sqlite/migrations/013_repo_mtimes_table.go` - Migration for `repo_mtimes` table
|
||||||
- `internal/storage/sqlite/sqlite.go` - Integrated hydration into storage initialization
|
- `internal/storage/sqlite/sqlite.go` - Integrated hydration into storage initialization
|
||||||
- `internal/storage/sqlite/ready.go` - Added `source_repo` to all SELECT queries
|
- `internal/storage/sqlite/ready.go` - Added `source_repo` to all SELECT queries
|
||||||
- `internal/storage/sqlite/labels.go` - Added `source_repo` to SELECT query
|
- `internal/storage/sqlite/labels.go` - Added `source_repo` to SELECT query
|
||||||
@@ -239,8 +239,8 @@ repos:
|
|||||||
The `repo_mtimes` table is created via standard migration system:
|
The `repo_mtimes` table is created via standard migration system:
|
||||||
|
|
||||||
```go
|
```go
|
||||||
// internal/storage/sqlite/migrations.go
|
// internal/storage/sqlite/migrations/013_repo_mtimes_table.go
|
||||||
func migrateRepoMtimesTable(db *sql.DB) error {
|
func MigrateRepoMtimesTable(db *sql.DB) error {
|
||||||
// Check if table exists
|
// Check if table exists
|
||||||
var tableName string
|
var tableName string
|
||||||
err := db.QueryRow(`
|
err := db.QueryRow(`
|
||||||
|
|||||||
@@ -6,6 +6,7 @@ import (
|
|||||||
"os"
|
"os"
|
||||||
"testing"
|
"testing"
|
||||||
|
|
||||||
|
"github.com/steveyegge/beads/internal/storage/sqlite/migrations"
|
||||||
"github.com/steveyegge/beads/internal/types"
|
"github.com/steveyegge/beads/internal/types"
|
||||||
)
|
)
|
||||||
|
|
||||||
@@ -51,7 +52,7 @@ func TestMigrateChildCountersTable(t *testing.T) {
|
|||||||
}
|
}
|
||||||
|
|
||||||
// Run migration
|
// Run migration
|
||||||
err = migrateChildCountersTable(db)
|
err = migrations.MigrateChildCountersTable(db)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
t.Fatalf("migration failed: %v", err)
|
t.Fatalf("migration failed: %v", err)
|
||||||
}
|
}
|
||||||
@@ -77,12 +78,12 @@ func TestMigrateChildCountersTable(t *testing.T) {
|
|||||||
db := s.db
|
db := s.db
|
||||||
|
|
||||||
// Run migration twice
|
// Run migration twice
|
||||||
err := migrateChildCountersTable(db)
|
err := migrations.MigrateChildCountersTable(db)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
t.Fatalf("first migration failed: %v", err)
|
t.Fatalf("first migration failed: %v", err)
|
||||||
}
|
}
|
||||||
|
|
||||||
err = migrateChildCountersTable(db)
|
err = migrations.MigrateChildCountersTable(db)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
t.Fatalf("second migration failed (not idempotent): %v", err)
|
t.Fatalf("second migration failed (not idempotent): %v", err)
|
||||||
}
|
}
|
||||||
|
|||||||
@@ -4,9 +4,8 @@ package sqlite
|
|||||||
import (
|
import (
|
||||||
"database/sql"
|
"database/sql"
|
||||||
"fmt"
|
"fmt"
|
||||||
"strings"
|
|
||||||
|
|
||||||
"github.com/steveyegge/beads/internal/types"
|
"github.com/steveyegge/beads/internal/storage/sqlite/migrations"
|
||||||
)
|
)
|
||||||
|
|
||||||
// Migration represents a single database migration
|
// Migration represents a single database migration
|
||||||
@@ -17,21 +16,21 @@ type Migration struct {
|
|||||||
|
|
||||||
// migrations is the ordered list of all migrations to run
|
// migrations is the ordered list of all migrations to run
|
||||||
// Migrations are run in order during database initialization
|
// Migrations are run in order during database initialization
|
||||||
var migrations = []Migration{
|
var migrationsList = []Migration{
|
||||||
{"dirty_issues_table", migrateDirtyIssuesTable},
|
{"dirty_issues_table", migrations.MigrateDirtyIssuesTable},
|
||||||
{"external_ref_column", migrateExternalRefColumn},
|
{"external_ref_column", migrations.MigrateExternalRefColumn},
|
||||||
{"composite_indexes", migrateCompositeIndexes},
|
{"composite_indexes", migrations.MigrateCompositeIndexes},
|
||||||
{"closed_at_constraint", migrateClosedAtConstraint},
|
{"closed_at_constraint", migrations.MigrateClosedAtConstraint},
|
||||||
{"compaction_columns", migrateCompactionColumns},
|
{"compaction_columns", migrations.MigrateCompactionColumns},
|
||||||
{"snapshots_table", migrateSnapshotsTable},
|
{"snapshots_table", migrations.MigrateSnapshotsTable},
|
||||||
{"compaction_config", migrateCompactionConfig},
|
{"compaction_config", migrations.MigrateCompactionConfig},
|
||||||
{"compacted_at_commit_column", migrateCompactedAtCommitColumn},
|
{"compacted_at_commit_column", migrations.MigrateCompactedAtCommitColumn},
|
||||||
{"export_hashes_table", migrateExportHashesTable},
|
{"export_hashes_table", migrations.MigrateExportHashesTable},
|
||||||
{"content_hash_column", migrateContentHashColumn},
|
{"content_hash_column", migrations.MigrateContentHashColumn},
|
||||||
{"external_ref_unique", migrateExternalRefUnique},
|
{"external_ref_unique", migrations.MigrateExternalRefUnique},
|
||||||
{"source_repo_column", migrateSourceRepoColumn},
|
{"source_repo_column", migrations.MigrateSourceRepoColumn},
|
||||||
{"repo_mtimes_table", migrateRepoMtimesTable},
|
{"repo_mtimes_table", migrations.MigrateRepoMtimesTable},
|
||||||
{"child_counters_table", migrateChildCountersTable},
|
{"child_counters_table", migrations.MigrateChildCountersTable},
|
||||||
}
|
}
|
||||||
|
|
||||||
// MigrationInfo contains metadata about a migration for inspection
|
// MigrationInfo contains metadata about a migration for inspection
|
||||||
@@ -43,8 +42,8 @@ type MigrationInfo struct {
|
|||||||
// ListMigrations returns list of all registered migrations with descriptions
|
// ListMigrations returns list of all registered migrations with descriptions
|
||||||
// Note: This returns ALL registered migrations, not just pending ones (all are idempotent)
|
// Note: This returns ALL registered migrations, not just pending ones (all are idempotent)
|
||||||
func ListMigrations() []MigrationInfo {
|
func ListMigrations() []MigrationInfo {
|
||||||
result := make([]MigrationInfo, len(migrations))
|
result := make([]MigrationInfo, len(migrationsList))
|
||||||
for i, m := range migrations {
|
for i, m := range migrationsList {
|
||||||
result[i] = MigrationInfo{
|
result[i] = MigrationInfo{
|
||||||
Name: m.Name,
|
Name: m.Name,
|
||||||
Description: getMigrationDescription(m.Name),
|
Description: getMigrationDescription(m.Name),
|
||||||
@@ -80,601 +79,20 @@ func getMigrationDescription(name string) string {
|
|||||||
|
|
||||||
// RunMigrations executes all registered migrations in order with invariant checking
|
// RunMigrations executes all registered migrations in order with invariant checking
|
||||||
func RunMigrations(db *sql.DB) error {
|
func RunMigrations(db *sql.DB) error {
|
||||||
// Capture pre-migration snapshot for validation
|
|
||||||
snapshot, err := captureSnapshot(db)
|
snapshot, err := captureSnapshot(db)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return fmt.Errorf("failed to capture pre-migration snapshot: %w", err)
|
return fmt.Errorf("failed to capture pre-migration snapshot: %w", err)
|
||||||
}
|
}
|
||||||
|
|
||||||
// Run migrations (they are already idempotent)
|
for _, migration := range migrationsList {
|
||||||
for _, migration := range migrations {
|
|
||||||
if err := migration.Func(db); err != nil {
|
if err := migration.Func(db); err != nil {
|
||||||
return fmt.Errorf("migration %s failed: %w", migration.Name, err)
|
return fmt.Errorf("migration %s failed: %w", migration.Name, err)
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
// Verify invariants after migrations complete
|
|
||||||
if err := verifyInvariants(db, snapshot); err != nil {
|
if err := verifyInvariants(db, snapshot); err != nil {
|
||||||
return fmt.Errorf("post-migration validation failed: %w", err)
|
return fmt.Errorf("post-migration validation failed: %w", err)
|
||||||
}
|
}
|
||||||
|
|
||||||
return nil
|
return nil
|
||||||
}
|
}
|
||||||
|
|
||||||
func migrateDirtyIssuesTable(db *sql.DB) error {
|
|
||||||
// Check if dirty_issues table exists
|
|
||||||
var tableName string
|
|
||||||
err := db.QueryRow(`
|
|
||||||
SELECT name FROM sqlite_master
|
|
||||||
WHERE type='table' AND name='dirty_issues'
|
|
||||||
`).Scan(&tableName)
|
|
||||||
|
|
||||||
if err == sql.ErrNoRows {
|
|
||||||
// Table doesn't exist, create it
|
|
||||||
_, err := db.Exec(`
|
|
||||||
CREATE TABLE dirty_issues (
|
|
||||||
issue_id TEXT PRIMARY KEY,
|
|
||||||
marked_at DATETIME NOT NULL DEFAULT CURRENT_TIMESTAMP,
|
|
||||||
FOREIGN KEY (issue_id) REFERENCES issues(id) ON DELETE CASCADE
|
|
||||||
);
|
|
||||||
CREATE INDEX idx_dirty_issues_marked_at ON dirty_issues(marked_at);
|
|
||||||
`)
|
|
||||||
if err != nil {
|
|
||||||
return fmt.Errorf("failed to create dirty_issues table: %w", err)
|
|
||||||
}
|
|
||||||
// Table created successfully - no need to log, happens silently
|
|
||||||
return nil
|
|
||||||
}
|
|
||||||
|
|
||||||
if err != nil {
|
|
||||||
return fmt.Errorf("failed to check for dirty_issues table: %w", err)
|
|
||||||
}
|
|
||||||
|
|
||||||
// Table exists, check if content_hash column exists (migration for bd-164)
|
|
||||||
var hasContentHash bool
|
|
||||||
err = db.QueryRow(`
|
|
||||||
SELECT COUNT(*) > 0 FROM pragma_table_info('dirty_issues')
|
|
||||||
WHERE name = 'content_hash'
|
|
||||||
`).Scan(&hasContentHash)
|
|
||||||
|
|
||||||
if err != nil {
|
|
||||||
return fmt.Errorf("failed to check for content_hash column: %w", err)
|
|
||||||
}
|
|
||||||
|
|
||||||
if !hasContentHash {
|
|
||||||
// Add content_hash column to existing table
|
|
||||||
_, err = db.Exec(`ALTER TABLE dirty_issues ADD COLUMN content_hash TEXT`)
|
|
||||||
if err != nil {
|
|
||||||
return fmt.Errorf("failed to add content_hash column: %w", err)
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
return nil
|
|
||||||
}
|
|
||||||
|
|
||||||
// migrateExternalRefColumn checks if the external_ref column exists and adds it if missing.
|
|
||||||
// This ensures existing databases created before the external reference feature get migrated automatically.
|
|
||||||
func migrateExternalRefColumn(db *sql.DB) error {
|
|
||||||
// Check if external_ref column exists
|
|
||||||
var columnExists bool
|
|
||||||
rows, err := db.Query("PRAGMA table_info(issues)")
|
|
||||||
if err != nil {
|
|
||||||
return fmt.Errorf("failed to check schema: %w", err)
|
|
||||||
}
|
|
||||||
defer func() { _ = rows.Close() }()
|
|
||||||
|
|
||||||
for rows.Next() {
|
|
||||||
var cid int
|
|
||||||
var name, typ string
|
|
||||||
var notnull, pk int
|
|
||||||
var dflt *string
|
|
||||||
err := rows.Scan(&cid, &name, &typ, ¬null, &dflt, &pk)
|
|
||||||
if err != nil {
|
|
||||||
return fmt.Errorf("failed to scan column info: %w", err)
|
|
||||||
}
|
|
||||||
if name == "external_ref" {
|
|
||||||
columnExists = true
|
|
||||||
break
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
if err := rows.Err(); err != nil {
|
|
||||||
return fmt.Errorf("error reading column info: %w", err)
|
|
||||||
}
|
|
||||||
|
|
||||||
if !columnExists {
|
|
||||||
// Add external_ref column
|
|
||||||
_, err := db.Exec(`ALTER TABLE issues ADD COLUMN external_ref TEXT`)
|
|
||||||
if err != nil {
|
|
||||||
return fmt.Errorf("failed to add external_ref column: %w", err)
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
return nil
|
|
||||||
}
|
|
||||||
|
|
||||||
// migrateCompositeIndexes checks if composite indexes exist and creates them if missing.
|
|
||||||
// This ensures existing databases get performance optimizations from new indexes.
|
|
||||||
func migrateCompositeIndexes(db *sql.DB) error {
|
|
||||||
// Check if idx_dependencies_depends_on_type exists
|
|
||||||
var indexName string
|
|
||||||
err := db.QueryRow(`
|
|
||||||
SELECT name FROM sqlite_master
|
|
||||||
WHERE type='index' AND name='idx_dependencies_depends_on_type'
|
|
||||||
`).Scan(&indexName)
|
|
||||||
|
|
||||||
if err == sql.ErrNoRows {
|
|
||||||
// Index doesn't exist, create it
|
|
||||||
_, err := db.Exec(`
|
|
||||||
CREATE INDEX idx_dependencies_depends_on_type ON dependencies(depends_on_id, type)
|
|
||||||
`)
|
|
||||||
if err != nil {
|
|
||||||
return fmt.Errorf("failed to create composite index idx_dependencies_depends_on_type: %w", err)
|
|
||||||
}
|
|
||||||
// Index created successfully
|
|
||||||
return nil
|
|
||||||
}
|
|
||||||
|
|
||||||
if err != nil {
|
|
||||||
return fmt.Errorf("failed to check for composite index: %w", err)
|
|
||||||
}
|
|
||||||
|
|
||||||
// Index exists, no migration needed
|
|
||||||
return nil
|
|
||||||
}
|
|
||||||
|
|
||||||
// migrateClosedAtConstraint cleans up inconsistent status/closed_at data.
|
|
||||||
// The CHECK constraint is in the schema for new databases, but we can't easily
|
|
||||||
// add it to existing tables without recreating them. Instead, we clean the data
|
|
||||||
// and rely on application code (UpdateIssue, import.go) to maintain the invariant.
|
|
||||||
func migrateClosedAtConstraint(db *sql.DB) error {
|
|
||||||
// Check if there are any inconsistent rows
|
|
||||||
var count int
|
|
||||||
err := db.QueryRow(`
|
|
||||||
SELECT COUNT(*)
|
|
||||||
FROM issues
|
|
||||||
WHERE (CASE WHEN status = 'closed' THEN 1 ELSE 0 END) <>
|
|
||||||
(CASE WHEN closed_at IS NOT NULL THEN 1 ELSE 0 END)
|
|
||||||
`).Scan(&count)
|
|
||||||
if err != nil {
|
|
||||||
return fmt.Errorf("failed to count inconsistent issues: %w", err)
|
|
||||||
}
|
|
||||||
|
|
||||||
if count == 0 {
|
|
||||||
// No inconsistent data, nothing to do
|
|
||||||
return nil
|
|
||||||
}
|
|
||||||
|
|
||||||
// Clean inconsistent data: trust the status field
|
|
||||||
// Strategy: If status != 'closed' but closed_at is set, clear closed_at
|
|
||||||
// If status = 'closed' but closed_at is not set, set it to updated_at (best guess)
|
|
||||||
_, err = db.Exec(`
|
|
||||||
UPDATE issues
|
|
||||||
SET closed_at = NULL
|
|
||||||
WHERE status != 'closed' AND closed_at IS NOT NULL
|
|
||||||
`)
|
|
||||||
if err != nil {
|
|
||||||
return fmt.Errorf("failed to clear closed_at for non-closed issues: %w", err)
|
|
||||||
}
|
|
||||||
|
|
||||||
_, err = db.Exec(`
|
|
||||||
UPDATE issues
|
|
||||||
SET closed_at = COALESCE(updated_at, CURRENT_TIMESTAMP)
|
|
||||||
WHERE status = 'closed' AND closed_at IS NULL
|
|
||||||
`)
|
|
||||||
if err != nil {
|
|
||||||
return fmt.Errorf("failed to set closed_at for closed issues: %w", err)
|
|
||||||
}
|
|
||||||
|
|
||||||
// Migration complete - data is now consistent
|
|
||||||
return nil
|
|
||||||
}
|
|
||||||
|
|
||||||
// migrateCompactionColumns adds compaction_level, compacted_at, and original_size columns to the issues table.
|
|
||||||
// This migration is idempotent and safe to run multiple times.
|
|
||||||
func migrateCompactionColumns(db *sql.DB) error {
|
|
||||||
// Check if compaction_level column exists
|
|
||||||
var columnExists bool
|
|
||||||
err := db.QueryRow(`
|
|
||||||
SELECT COUNT(*) > 0
|
|
||||||
FROM pragma_table_info('issues')
|
|
||||||
WHERE name = 'compaction_level'
|
|
||||||
`).Scan(&columnExists)
|
|
||||||
if err != nil {
|
|
||||||
return fmt.Errorf("failed to check compaction_level column: %w", err)
|
|
||||||
}
|
|
||||||
|
|
||||||
if columnExists {
|
|
||||||
// Columns already exist, nothing to do
|
|
||||||
return nil
|
|
||||||
}
|
|
||||||
|
|
||||||
// Add the three compaction columns
|
|
||||||
_, err = db.Exec(`
|
|
||||||
ALTER TABLE issues ADD COLUMN compaction_level INTEGER DEFAULT 0;
|
|
||||||
ALTER TABLE issues ADD COLUMN compacted_at DATETIME;
|
|
||||||
ALTER TABLE issues ADD COLUMN original_size INTEGER;
|
|
||||||
`)
|
|
||||||
if err != nil {
|
|
||||||
return fmt.Errorf("failed to add compaction columns: %w", err)
|
|
||||||
}
|
|
||||||
|
|
||||||
return nil
|
|
||||||
}
|
|
||||||
|
|
||||||
// migrateSnapshotsTable creates the issue_snapshots table if it doesn't exist.
|
|
||||||
// This migration is idempotent and safe to run multiple times.
|
|
||||||
func migrateSnapshotsTable(db *sql.DB) error {
|
|
||||||
// Check if issue_snapshots table exists
|
|
||||||
var tableExists bool
|
|
||||||
err := db.QueryRow(`
|
|
||||||
SELECT COUNT(*) > 0
|
|
||||||
FROM sqlite_master
|
|
||||||
WHERE type='table' AND name='issue_snapshots'
|
|
||||||
`).Scan(&tableExists)
|
|
||||||
if err != nil {
|
|
||||||
return fmt.Errorf("failed to check issue_snapshots table: %w", err)
|
|
||||||
}
|
|
||||||
|
|
||||||
if tableExists {
|
|
||||||
// Table already exists, nothing to do
|
|
||||||
return nil
|
|
||||||
}
|
|
||||||
|
|
||||||
// Create the table and indexes
|
|
||||||
_, err = db.Exec(`
|
|
||||||
CREATE TABLE issue_snapshots (
|
|
||||||
id INTEGER PRIMARY KEY AUTOINCREMENT,
|
|
||||||
issue_id TEXT NOT NULL,
|
|
||||||
snapshot_time DATETIME NOT NULL,
|
|
||||||
compaction_level INTEGER NOT NULL,
|
|
||||||
original_size INTEGER NOT NULL,
|
|
||||||
compressed_size INTEGER NOT NULL,
|
|
||||||
original_content TEXT NOT NULL,
|
|
||||||
archived_events TEXT,
|
|
||||||
FOREIGN KEY (issue_id) REFERENCES issues(id) ON DELETE CASCADE
|
|
||||||
);
|
|
||||||
CREATE INDEX idx_snapshots_issue ON issue_snapshots(issue_id);
|
|
||||||
CREATE INDEX idx_snapshots_level ON issue_snapshots(compaction_level);
|
|
||||||
`)
|
|
||||||
if err != nil {
|
|
||||||
return fmt.Errorf("failed to create issue_snapshots table: %w", err)
|
|
||||||
}
|
|
||||||
|
|
||||||
return nil
|
|
||||||
}
|
|
||||||
|
|
||||||
// migrateCompactionConfig adds default compaction configuration values.
|
|
||||||
// This migration is idempotent and safe to run multiple times (INSERT OR IGNORE).
|
|
||||||
func migrateCompactionConfig(db *sql.DB) error {
|
|
||||||
_, err := db.Exec(`
|
|
||||||
INSERT OR IGNORE INTO config (key, value) VALUES
|
|
||||||
('compaction_enabled', 'false'),
|
|
||||||
('compact_tier1_days', '30'),
|
|
||||||
('compact_tier1_dep_levels', '2'),
|
|
||||||
('compact_tier2_days', '90'),
|
|
||||||
('compact_tier2_dep_levels', '5'),
|
|
||||||
('compact_tier2_commits', '100'),
|
|
||||||
('compact_model', 'claude-3-5-haiku-20241022'),
|
|
||||||
('compact_batch_size', '50'),
|
|
||||||
('compact_parallel_workers', '5'),
|
|
||||||
('auto_compact_enabled', 'false')
|
|
||||||
`)
|
|
||||||
if err != nil {
|
|
||||||
return fmt.Errorf("failed to add compaction config defaults: %w", err)
|
|
||||||
}
|
|
||||||
return nil
|
|
||||||
}
|
|
||||||
|
|
||||||
// migrateCompactedAtCommitColumn adds compacted_at_commit column to the issues table.
|
|
||||||
// This migration is idempotent and safe to run multiple times.
|
|
||||||
func migrateCompactedAtCommitColumn(db *sql.DB) error {
|
|
||||||
var columnExists bool
|
|
||||||
err := db.QueryRow(`
|
|
||||||
SELECT COUNT(*) > 0
|
|
||||||
FROM pragma_table_info('issues')
|
|
||||||
WHERE name = 'compacted_at_commit'
|
|
||||||
`).Scan(&columnExists)
|
|
||||||
if err != nil {
|
|
||||||
return fmt.Errorf("failed to check compacted_at_commit column: %w", err)
|
|
||||||
}
|
|
||||||
|
|
||||||
if columnExists {
|
|
||||||
return nil
|
|
||||||
}
|
|
||||||
|
|
||||||
_, err = db.Exec(`ALTER TABLE issues ADD COLUMN compacted_at_commit TEXT`)
|
|
||||||
if err != nil {
|
|
||||||
return fmt.Errorf("failed to add compacted_at_commit column: %w", err)
|
|
||||||
}
|
|
||||||
|
|
||||||
return nil
|
|
||||||
}
|
|
||||||
|
|
||||||
// migrateExportHashesTable ensures the export_hashes table exists for timestamp-only dedup (bd-164)
|
|
||||||
func migrateExportHashesTable(db *sql.DB) error {
|
|
||||||
// Check if export_hashes table exists
|
|
||||||
var tableName string
|
|
||||||
err := db.QueryRow(`
|
|
||||||
SELECT name FROM sqlite_master
|
|
||||||
WHERE type='table' AND name='export_hashes'
|
|
||||||
`).Scan(&tableName)
|
|
||||||
|
|
||||||
if err == sql.ErrNoRows {
|
|
||||||
// Table doesn't exist, create it
|
|
||||||
_, err := db.Exec(`
|
|
||||||
CREATE TABLE export_hashes (
|
|
||||||
issue_id TEXT PRIMARY KEY,
|
|
||||||
content_hash TEXT NOT NULL,
|
|
||||||
exported_at DATETIME NOT NULL DEFAULT CURRENT_TIMESTAMP,
|
|
||||||
FOREIGN KEY (issue_id) REFERENCES issues(id) ON DELETE CASCADE
|
|
||||||
)
|
|
||||||
`)
|
|
||||||
if err != nil {
|
|
||||||
return fmt.Errorf("failed to create export_hashes table: %w", err)
|
|
||||||
}
|
|
||||||
return nil
|
|
||||||
}
|
|
||||||
|
|
||||||
if err != nil {
|
|
||||||
return fmt.Errorf("failed to check export_hashes table: %w", err)
|
|
||||||
}
|
|
||||||
|
|
||||||
// Table already exists
|
|
||||||
return nil
|
|
||||||
}
|
|
||||||
|
|
||||||
// migrateContentHashColumn adds the content_hash column to the issues table if missing (bd-95).
|
|
||||||
// This enables global N-way collision resolution by providing content-addressable identity.
|
|
||||||
func migrateContentHashColumn(db *sql.DB) error {
|
|
||||||
// Check if content_hash column exists
|
|
||||||
var colName string
|
|
||||||
err := db.QueryRow(`
|
|
||||||
SELECT name FROM pragma_table_info('issues')
|
|
||||||
WHERE name = 'content_hash'
|
|
||||||
`).Scan(&colName)
|
|
||||||
|
|
||||||
if err == sql.ErrNoRows {
|
|
||||||
// Column doesn't exist, add it
|
|
||||||
_, err := db.Exec(`ALTER TABLE issues ADD COLUMN content_hash TEXT`)
|
|
||||||
if err != nil {
|
|
||||||
return fmt.Errorf("failed to add content_hash column: %w", err)
|
|
||||||
}
|
|
||||||
|
|
||||||
// Create index on content_hash for fast lookups
|
|
||||||
_, err = db.Exec(`CREATE INDEX IF NOT EXISTS idx_issues_content_hash ON issues(content_hash)`)
|
|
||||||
if err != nil {
|
|
||||||
return fmt.Errorf("failed to create content_hash index: %w", err)
|
|
||||||
}
|
|
||||||
|
|
||||||
// Populate content_hash for all existing issues
|
|
||||||
rows, err := db.Query(`
|
|
||||||
SELECT id, title, description, design, acceptance_criteria, notes,
|
|
||||||
status, priority, issue_type, assignee, external_ref
|
|
||||||
FROM issues
|
|
||||||
`)
|
|
||||||
if err != nil {
|
|
||||||
return fmt.Errorf("failed to query existing issues: %w", err)
|
|
||||||
}
|
|
||||||
defer rows.Close()
|
|
||||||
|
|
||||||
// Collect issues and compute hashes
|
|
||||||
updates := make(map[string]string) // id -> content_hash
|
|
||||||
for rows.Next() {
|
|
||||||
var issue types.Issue
|
|
||||||
var assignee sql.NullString
|
|
||||||
var externalRef sql.NullString
|
|
||||||
err := rows.Scan(
|
|
||||||
&issue.ID, &issue.Title, &issue.Description, &issue.Design,
|
|
||||||
&issue.AcceptanceCriteria, &issue.Notes, &issue.Status,
|
|
||||||
&issue.Priority, &issue.IssueType, &assignee, &externalRef,
|
|
||||||
)
|
|
||||||
if err != nil {
|
|
||||||
return fmt.Errorf("failed to scan issue: %w", err)
|
|
||||||
}
|
|
||||||
if assignee.Valid {
|
|
||||||
issue.Assignee = assignee.String
|
|
||||||
}
|
|
||||||
if externalRef.Valid {
|
|
||||||
issue.ExternalRef = &externalRef.String
|
|
||||||
}
|
|
||||||
|
|
||||||
// Compute and store hash
|
|
||||||
updates[issue.ID] = issue.ComputeContentHash()
|
|
||||||
}
|
|
||||||
if err := rows.Err(); err != nil {
|
|
||||||
return fmt.Errorf("error iterating issues: %w", err)
|
|
||||||
}
|
|
||||||
|
|
||||||
// Apply hash updates in batch
|
|
||||||
tx, err := db.Begin()
|
|
||||||
if err != nil {
|
|
||||||
return fmt.Errorf("failed to begin transaction: %w", err)
|
|
||||||
}
|
|
||||||
defer tx.Rollback()
|
|
||||||
|
|
||||||
stmt, err := tx.Prepare(`UPDATE issues SET content_hash = ? WHERE id = ?`)
|
|
||||||
if err != nil {
|
|
||||||
return fmt.Errorf("failed to prepare update statement: %w", err)
|
|
||||||
}
|
|
||||||
defer stmt.Close()
|
|
||||||
|
|
||||||
for id, hash := range updates {
|
|
||||||
if _, err := stmt.Exec(hash, id); err != nil {
|
|
||||||
return fmt.Errorf("failed to update content_hash for issue %s: %w", id, err)
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
if err := tx.Commit(); err != nil {
|
|
||||||
return fmt.Errorf("failed to commit transaction: %w", err)
|
|
||||||
}
|
|
||||||
|
|
||||||
return nil
|
|
||||||
}
|
|
||||||
|
|
||||||
if err != nil {
|
|
||||||
return fmt.Errorf("failed to check content_hash column: %w", err)
|
|
||||||
}
|
|
||||||
|
|
||||||
// Column already exists
|
|
||||||
return nil
|
|
||||||
}
|
|
||||||
|
|
||||||
func migrateExternalRefUnique(db *sql.DB) error {
|
|
||||||
var hasConstraint bool
|
|
||||||
err := db.QueryRow(`
|
|
||||||
SELECT COUNT(*) > 0
|
|
||||||
FROM sqlite_master
|
|
||||||
WHERE type = 'index'
|
|
||||||
AND name = 'idx_issues_external_ref_unique'
|
|
||||||
`).Scan(&hasConstraint)
|
|
||||||
if err != nil {
|
|
||||||
return fmt.Errorf("failed to check for UNIQUE constraint: %w", err)
|
|
||||||
}
|
|
||||||
|
|
||||||
if hasConstraint {
|
|
||||||
return nil
|
|
||||||
}
|
|
||||||
|
|
||||||
existingDuplicates, err := findExternalRefDuplicates(db)
|
|
||||||
if err != nil {
|
|
||||||
return fmt.Errorf("failed to check for duplicate external_ref values: %w", err)
|
|
||||||
}
|
|
||||||
|
|
||||||
if len(existingDuplicates) > 0 {
|
|
||||||
return fmt.Errorf("cannot add UNIQUE constraint: found %d duplicate external_ref values (resolve with 'bd duplicates' or manually)", len(existingDuplicates))
|
|
||||||
}
|
|
||||||
|
|
||||||
_, err = db.Exec(`CREATE UNIQUE INDEX IF NOT EXISTS idx_issues_external_ref_unique ON issues(external_ref) WHERE external_ref IS NOT NULL`)
|
|
||||||
if err != nil {
|
|
||||||
return fmt.Errorf("failed to create UNIQUE index on external_ref: %w", err)
|
|
||||||
}
|
|
||||||
|
|
||||||
return nil
|
|
||||||
}
|
|
||||||
|
|
||||||
func findExternalRefDuplicates(db *sql.DB) (map[string][]string, error) {
|
|
||||||
rows, err := db.Query(`
|
|
||||||
SELECT external_ref, GROUP_CONCAT(id, ',') as ids
|
|
||||||
FROM issues
|
|
||||||
WHERE external_ref IS NOT NULL
|
|
||||||
GROUP BY external_ref
|
|
||||||
HAVING COUNT(*) > 1
|
|
||||||
`)
|
|
||||||
if err != nil {
|
|
||||||
return nil, err
|
|
||||||
}
|
|
||||||
defer rows.Close()
|
|
||||||
|
|
||||||
duplicates := make(map[string][]string)
|
|
||||||
for rows.Next() {
|
|
||||||
var externalRef, idsCSV string
|
|
||||||
if err := rows.Scan(&externalRef, &idsCSV); err != nil {
|
|
||||||
return nil, err
|
|
||||||
}
|
|
||||||
ids := strings.Split(idsCSV, ",")
|
|
||||||
duplicates[externalRef] = ids
|
|
||||||
}
|
|
||||||
|
|
||||||
return duplicates, rows.Err()
|
|
||||||
}
|
|
||||||
|
|
||||||
// migrateSourceRepoColumn adds source_repo column for multi-repo support (bd-307).
|
|
||||||
// Defaults to "." (primary repo) for backward compatibility with existing issues.
|
|
||||||
func migrateSourceRepoColumn(db *sql.DB) error {
|
|
||||||
// Check if source_repo column exists
|
|
||||||
var columnExists bool
|
|
||||||
err := db.QueryRow(`
|
|
||||||
SELECT COUNT(*) > 0
|
|
||||||
FROM pragma_table_info('issues')
|
|
||||||
WHERE name = 'source_repo'
|
|
||||||
`).Scan(&columnExists)
|
|
||||||
if err != nil {
|
|
||||||
return fmt.Errorf("failed to check source_repo column: %w", err)
|
|
||||||
}
|
|
||||||
|
|
||||||
if columnExists {
|
|
||||||
// Column already exists
|
|
||||||
return nil
|
|
||||||
}
|
|
||||||
|
|
||||||
// Add source_repo column with default "." (primary repo)
|
|
||||||
_, err = db.Exec(`ALTER TABLE issues ADD COLUMN source_repo TEXT DEFAULT '.'`)
|
|
||||||
if err != nil {
|
|
||||||
return fmt.Errorf("failed to add source_repo column: %w", err)
|
|
||||||
}
|
|
||||||
|
|
||||||
// Create index on source_repo for efficient filtering
|
|
||||||
_, err = db.Exec(`CREATE INDEX IF NOT EXISTS idx_issues_source_repo ON issues(source_repo)`)
|
|
||||||
if err != nil {
|
|
||||||
return fmt.Errorf("failed to create source_repo index: %w", err)
|
|
||||||
}
|
|
||||||
|
|
||||||
return nil
|
|
||||||
}
|
|
||||||
|
|
||||||
// migrateRepoMtimesTable creates the repo_mtimes table for multi-repo hydration caching (bd-307)
|
|
||||||
func migrateRepoMtimesTable(db *sql.DB) error {
|
|
||||||
// Check if repo_mtimes table exists
|
|
||||||
var tableName string
|
|
||||||
err := db.QueryRow(`
|
|
||||||
SELECT name FROM sqlite_master
|
|
||||||
WHERE type='table' AND name='repo_mtimes'
|
|
||||||
`).Scan(&tableName)
|
|
||||||
|
|
||||||
if err == sql.ErrNoRows {
|
|
||||||
// Table doesn't exist, create it
|
|
||||||
_, err := db.Exec(`
|
|
||||||
CREATE TABLE repo_mtimes (
|
|
||||||
repo_path TEXT PRIMARY KEY,
|
|
||||||
jsonl_path TEXT NOT NULL,
|
|
||||||
mtime_ns INTEGER NOT NULL,
|
|
||||||
last_checked DATETIME NOT NULL DEFAULT CURRENT_TIMESTAMP
|
|
||||||
);
|
|
||||||
CREATE INDEX idx_repo_mtimes_checked ON repo_mtimes(last_checked);
|
|
||||||
`)
|
|
||||||
if err != nil {
|
|
||||||
return fmt.Errorf("failed to create repo_mtimes table: %w", err)
|
|
||||||
}
|
|
||||||
return nil
|
|
||||||
}
|
|
||||||
|
|
||||||
if err != nil {
|
|
||||||
return fmt.Errorf("failed to check for repo_mtimes table: %w", err)
|
|
||||||
}
|
|
||||||
|
|
||||||
// Table already exists
|
|
||||||
return nil
|
|
||||||
}
|
|
||||||
|
|
||||||
// migrateChildCountersTable creates the child_counters table for hierarchical ID generation (bd-bb08)
|
|
||||||
func migrateChildCountersTable(db *sql.DB) error {
|
|
||||||
// Check if child_counters table exists
|
|
||||||
var tableName string
|
|
||||||
err := db.QueryRow(`
|
|
||||||
SELECT name FROM sqlite_master
|
|
||||||
WHERE type='table' AND name='child_counters'
|
|
||||||
`).Scan(&tableName)
|
|
||||||
|
|
||||||
if err == sql.ErrNoRows {
|
|
||||||
// Table doesn't exist, create it
|
|
||||||
_, err := db.Exec(`
|
|
||||||
CREATE TABLE child_counters (
|
|
||||||
parent_id TEXT PRIMARY KEY,
|
|
||||||
last_child INTEGER NOT NULL DEFAULT 0,
|
|
||||||
FOREIGN KEY (parent_id) REFERENCES issues(id) ON DELETE CASCADE
|
|
||||||
)
|
|
||||||
`)
|
|
||||||
if err != nil {
|
|
||||||
return fmt.Errorf("failed to create child_counters table: %w", err)
|
|
||||||
}
|
|
||||||
return nil
|
|
||||||
}
|
|
||||||
|
|
||||||
if err != nil {
|
|
||||||
return fmt.Errorf("failed to check for child_counters table: %w", err)
|
|
||||||
}
|
|
||||||
|
|
||||||
// Table already exists
|
|
||||||
return nil
|
|
||||||
}
|
|
||||||
|
|||||||
52
internal/storage/sqlite/migrations/001_dirty_issues_table.go
Normal file
52
internal/storage/sqlite/migrations/001_dirty_issues_table.go
Normal file
@@ -0,0 +1,52 @@
|
|||||||
|
package migrations
|
||||||
|
|
||||||
|
import (
|
||||||
|
"database/sql"
|
||||||
|
"fmt"
|
||||||
|
)
|
||||||
|
|
||||||
|
func MigrateDirtyIssuesTable(db *sql.DB) error {
|
||||||
|
var tableName string
|
||||||
|
err := db.QueryRow(`
|
||||||
|
SELECT name FROM sqlite_master
|
||||||
|
WHERE type='table' AND name='dirty_issues'
|
||||||
|
`).Scan(&tableName)
|
||||||
|
|
||||||
|
if err == sql.ErrNoRows {
|
||||||
|
_, err := db.Exec(`
|
||||||
|
CREATE TABLE dirty_issues (
|
||||||
|
issue_id TEXT PRIMARY KEY,
|
||||||
|
marked_at DATETIME NOT NULL DEFAULT CURRENT_TIMESTAMP,
|
||||||
|
FOREIGN KEY (issue_id) REFERENCES issues(id) ON DELETE CASCADE
|
||||||
|
);
|
||||||
|
CREATE INDEX idx_dirty_issues_marked_at ON dirty_issues(marked_at);
|
||||||
|
`)
|
||||||
|
if err != nil {
|
||||||
|
return fmt.Errorf("failed to create dirty_issues table: %w", err)
|
||||||
|
}
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
|
||||||
|
if err != nil {
|
||||||
|
return fmt.Errorf("failed to check for dirty_issues table: %w", err)
|
||||||
|
}
|
||||||
|
|
||||||
|
var hasContentHash bool
|
||||||
|
err = db.QueryRow(`
|
||||||
|
SELECT COUNT(*) > 0 FROM pragma_table_info('dirty_issues')
|
||||||
|
WHERE name = 'content_hash'
|
||||||
|
`).Scan(&hasContentHash)
|
||||||
|
|
||||||
|
if err != nil {
|
||||||
|
return fmt.Errorf("failed to check for content_hash column: %w", err)
|
||||||
|
}
|
||||||
|
|
||||||
|
if !hasContentHash {
|
||||||
|
_, err = db.Exec(`ALTER TABLE dirty_issues ADD COLUMN content_hash TEXT`)
|
||||||
|
if err != nil {
|
||||||
|
return fmt.Errorf("failed to add content_hash column: %w", err)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
return nil
|
||||||
|
}
|
||||||
@@ -0,0 +1,43 @@
|
|||||||
|
package migrations
|
||||||
|
|
||||||
|
import (
|
||||||
|
"database/sql"
|
||||||
|
"fmt"
|
||||||
|
)
|
||||||
|
|
||||||
|
func MigrateExternalRefColumn(db *sql.DB) error {
|
||||||
|
var columnExists bool
|
||||||
|
rows, err := db.Query("PRAGMA table_info(issues)")
|
||||||
|
if err != nil {
|
||||||
|
return fmt.Errorf("failed to check schema: %w", err)
|
||||||
|
}
|
||||||
|
defer func() { _ = rows.Close() }()
|
||||||
|
|
||||||
|
for rows.Next() {
|
||||||
|
var cid int
|
||||||
|
var name, typ string
|
||||||
|
var notnull, pk int
|
||||||
|
var dflt *string
|
||||||
|
err := rows.Scan(&cid, &name, &typ, ¬null, &dflt, &pk)
|
||||||
|
if err != nil {
|
||||||
|
return fmt.Errorf("failed to scan column info: %w", err)
|
||||||
|
}
|
||||||
|
if name == "external_ref" {
|
||||||
|
columnExists = true
|
||||||
|
break
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
if err := rows.Err(); err != nil {
|
||||||
|
return fmt.Errorf("error reading column info: %w", err)
|
||||||
|
}
|
||||||
|
|
||||||
|
if !columnExists {
|
||||||
|
_, err := db.Exec(`ALTER TABLE issues ADD COLUMN external_ref TEXT`)
|
||||||
|
if err != nil {
|
||||||
|
return fmt.Errorf("failed to add external_ref column: %w", err)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
return nil
|
||||||
|
}
|
||||||
30
internal/storage/sqlite/migrations/003_composite_indexes.go
Normal file
30
internal/storage/sqlite/migrations/003_composite_indexes.go
Normal file
@@ -0,0 +1,30 @@
|
|||||||
|
package migrations
|
||||||
|
|
||||||
|
import (
|
||||||
|
"database/sql"
|
||||||
|
"fmt"
|
||||||
|
)
|
||||||
|
|
||||||
|
func MigrateCompositeIndexes(db *sql.DB) error {
|
||||||
|
var indexName string
|
||||||
|
err := db.QueryRow(`
|
||||||
|
SELECT name FROM sqlite_master
|
||||||
|
WHERE type='index' AND name='idx_dependencies_depends_on_type'
|
||||||
|
`).Scan(&indexName)
|
||||||
|
|
||||||
|
if err == sql.ErrNoRows {
|
||||||
|
_, err := db.Exec(`
|
||||||
|
CREATE INDEX idx_dependencies_depends_on_type ON dependencies(depends_on_id, type)
|
||||||
|
`)
|
||||||
|
if err != nil {
|
||||||
|
return fmt.Errorf("failed to create composite index idx_dependencies_depends_on_type: %w", err)
|
||||||
|
}
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
|
||||||
|
if err != nil {
|
||||||
|
return fmt.Errorf("failed to check for composite index: %w", err)
|
||||||
|
}
|
||||||
|
|
||||||
|
return nil
|
||||||
|
}
|
||||||
@@ -0,0 +1,43 @@
|
|||||||
|
package migrations
|
||||||
|
|
||||||
|
import (
|
||||||
|
"database/sql"
|
||||||
|
"fmt"
|
||||||
|
)
|
||||||
|
|
||||||
|
func MigrateClosedAtConstraint(db *sql.DB) error {
|
||||||
|
var count int
|
||||||
|
err := db.QueryRow(`
|
||||||
|
SELECT COUNT(*)
|
||||||
|
FROM issues
|
||||||
|
WHERE (CASE WHEN status = 'closed' THEN 1 ELSE 0 END) <>
|
||||||
|
(CASE WHEN closed_at IS NOT NULL THEN 1 ELSE 0 END)
|
||||||
|
`).Scan(&count)
|
||||||
|
if err != nil {
|
||||||
|
return fmt.Errorf("failed to count inconsistent issues: %w", err)
|
||||||
|
}
|
||||||
|
|
||||||
|
if count == 0 {
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
|
||||||
|
_, err = db.Exec(`
|
||||||
|
UPDATE issues
|
||||||
|
SET closed_at = NULL
|
||||||
|
WHERE status != 'closed' AND closed_at IS NOT NULL
|
||||||
|
`)
|
||||||
|
if err != nil {
|
||||||
|
return fmt.Errorf("failed to clear closed_at for non-closed issues: %w", err)
|
||||||
|
}
|
||||||
|
|
||||||
|
_, err = db.Exec(`
|
||||||
|
UPDATE issues
|
||||||
|
SET closed_at = COALESCE(updated_at, CURRENT_TIMESTAMP)
|
||||||
|
WHERE status = 'closed' AND closed_at IS NULL
|
||||||
|
`)
|
||||||
|
if err != nil {
|
||||||
|
return fmt.Errorf("failed to set closed_at for closed issues: %w", err)
|
||||||
|
}
|
||||||
|
|
||||||
|
return nil
|
||||||
|
}
|
||||||
33
internal/storage/sqlite/migrations/005_compaction_columns.go
Normal file
33
internal/storage/sqlite/migrations/005_compaction_columns.go
Normal file
@@ -0,0 +1,33 @@
|
|||||||
|
package migrations
|
||||||
|
|
||||||
|
import (
|
||||||
|
"database/sql"
|
||||||
|
"fmt"
|
||||||
|
)
|
||||||
|
|
||||||
|
func MigrateCompactionColumns(db *sql.DB) error {
|
||||||
|
var columnExists bool
|
||||||
|
err := db.QueryRow(`
|
||||||
|
SELECT COUNT(*) > 0
|
||||||
|
FROM pragma_table_info('issues')
|
||||||
|
WHERE name = 'compaction_level'
|
||||||
|
`).Scan(&columnExists)
|
||||||
|
if err != nil {
|
||||||
|
return fmt.Errorf("failed to check compaction_level column: %w", err)
|
||||||
|
}
|
||||||
|
|
||||||
|
if columnExists {
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
|
||||||
|
_, err = db.Exec(`
|
||||||
|
ALTER TABLE issues ADD COLUMN compaction_level INTEGER DEFAULT 0;
|
||||||
|
ALTER TABLE issues ADD COLUMN compacted_at DATETIME;
|
||||||
|
ALTER TABLE issues ADD COLUMN original_size INTEGER;
|
||||||
|
`)
|
||||||
|
if err != nil {
|
||||||
|
return fmt.Errorf("failed to add compaction columns: %w", err)
|
||||||
|
}
|
||||||
|
|
||||||
|
return nil
|
||||||
|
}
|
||||||
43
internal/storage/sqlite/migrations/006_snapshots_table.go
Normal file
43
internal/storage/sqlite/migrations/006_snapshots_table.go
Normal file
@@ -0,0 +1,43 @@
|
|||||||
|
package migrations
|
||||||
|
|
||||||
|
import (
|
||||||
|
"database/sql"
|
||||||
|
"fmt"
|
||||||
|
)
|
||||||
|
|
||||||
|
func MigrateSnapshotsTable(db *sql.DB) error {
|
||||||
|
var tableExists bool
|
||||||
|
err := db.QueryRow(`
|
||||||
|
SELECT COUNT(*) > 0
|
||||||
|
FROM sqlite_master
|
||||||
|
WHERE type='table' AND name='issue_snapshots'
|
||||||
|
`).Scan(&tableExists)
|
||||||
|
if err != nil {
|
||||||
|
return fmt.Errorf("failed to check issue_snapshots table: %w", err)
|
||||||
|
}
|
||||||
|
|
||||||
|
if tableExists {
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
|
||||||
|
_, err = db.Exec(`
|
||||||
|
CREATE TABLE issue_snapshots (
|
||||||
|
id INTEGER PRIMARY KEY AUTOINCREMENT,
|
||||||
|
issue_id TEXT NOT NULL,
|
||||||
|
snapshot_time DATETIME NOT NULL,
|
||||||
|
compaction_level INTEGER NOT NULL,
|
||||||
|
original_size INTEGER NOT NULL,
|
||||||
|
compressed_size INTEGER NOT NULL,
|
||||||
|
original_content TEXT NOT NULL,
|
||||||
|
archived_events TEXT,
|
||||||
|
FOREIGN KEY (issue_id) REFERENCES issues(id) ON DELETE CASCADE
|
||||||
|
);
|
||||||
|
CREATE INDEX idx_snapshots_issue ON issue_snapshots(issue_id);
|
||||||
|
CREATE INDEX idx_snapshots_level ON issue_snapshots(compaction_level);
|
||||||
|
`)
|
||||||
|
if err != nil {
|
||||||
|
return fmt.Errorf("failed to create issue_snapshots table: %w", err)
|
||||||
|
}
|
||||||
|
|
||||||
|
return nil
|
||||||
|
}
|
||||||
26
internal/storage/sqlite/migrations/007_compaction_config.go
Normal file
26
internal/storage/sqlite/migrations/007_compaction_config.go
Normal file
@@ -0,0 +1,26 @@
|
|||||||
|
package migrations
|
||||||
|
|
||||||
|
import (
|
||||||
|
"database/sql"
|
||||||
|
"fmt"
|
||||||
|
)
|
||||||
|
|
||||||
|
func MigrateCompactionConfig(db *sql.DB) error {
|
||||||
|
_, err := db.Exec(`
|
||||||
|
INSERT OR IGNORE INTO config (key, value) VALUES
|
||||||
|
('compaction_enabled', 'false'),
|
||||||
|
('compact_tier1_days', '30'),
|
||||||
|
('compact_tier1_dep_levels', '2'),
|
||||||
|
('compact_tier2_days', '90'),
|
||||||
|
('compact_tier2_dep_levels', '5'),
|
||||||
|
('compact_tier2_commits', '100'),
|
||||||
|
('compact_model', 'claude-3-5-haiku-20241022'),
|
||||||
|
('compact_batch_size', '50'),
|
||||||
|
('compact_parallel_workers', '5'),
|
||||||
|
('auto_compact_enabled', 'false')
|
||||||
|
`)
|
||||||
|
if err != nil {
|
||||||
|
return fmt.Errorf("failed to add compaction config defaults: %w", err)
|
||||||
|
}
|
||||||
|
return nil
|
||||||
|
}
|
||||||
@@ -0,0 +1,29 @@
|
|||||||
|
package migrations
|
||||||
|
|
||||||
|
import (
|
||||||
|
"database/sql"
|
||||||
|
"fmt"
|
||||||
|
)
|
||||||
|
|
||||||
|
func MigrateCompactedAtCommitColumn(db *sql.DB) error {
|
||||||
|
var columnExists bool
|
||||||
|
err := db.QueryRow(`
|
||||||
|
SELECT COUNT(*) > 0
|
||||||
|
FROM pragma_table_info('issues')
|
||||||
|
WHERE name = 'compacted_at_commit'
|
||||||
|
`).Scan(&columnExists)
|
||||||
|
if err != nil {
|
||||||
|
return fmt.Errorf("failed to check compacted_at_commit column: %w", err)
|
||||||
|
}
|
||||||
|
|
||||||
|
if columnExists {
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
|
||||||
|
_, err = db.Exec(`ALTER TABLE issues ADD COLUMN compacted_at_commit TEXT`)
|
||||||
|
if err != nil {
|
||||||
|
return fmt.Errorf("failed to add compacted_at_commit column: %w", err)
|
||||||
|
}
|
||||||
|
|
||||||
|
return nil
|
||||||
|
}
|
||||||
@@ -0,0 +1,35 @@
|
|||||||
|
package migrations
|
||||||
|
|
||||||
|
import (
|
||||||
|
"database/sql"
|
||||||
|
"fmt"
|
||||||
|
)
|
||||||
|
|
||||||
|
func MigrateExportHashesTable(db *sql.DB) error {
|
||||||
|
var tableName string
|
||||||
|
err := db.QueryRow(`
|
||||||
|
SELECT name FROM sqlite_master
|
||||||
|
WHERE type='table' AND name='export_hashes'
|
||||||
|
`).Scan(&tableName)
|
||||||
|
|
||||||
|
if err == sql.ErrNoRows {
|
||||||
|
_, err := db.Exec(`
|
||||||
|
CREATE TABLE export_hashes (
|
||||||
|
issue_id TEXT PRIMARY KEY,
|
||||||
|
content_hash TEXT NOT NULL,
|
||||||
|
exported_at DATETIME NOT NULL DEFAULT CURRENT_TIMESTAMP,
|
||||||
|
FOREIGN KEY (issue_id) REFERENCES issues(id) ON DELETE CASCADE
|
||||||
|
)
|
||||||
|
`)
|
||||||
|
if err != nil {
|
||||||
|
return fmt.Errorf("failed to create export_hashes table: %w", err)
|
||||||
|
}
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
|
||||||
|
if err != nil {
|
||||||
|
return fmt.Errorf("failed to check export_hashes table: %w", err)
|
||||||
|
}
|
||||||
|
|
||||||
|
return nil
|
||||||
|
}
|
||||||
@@ -0,0 +1,94 @@
|
|||||||
|
package migrations
|
||||||
|
|
||||||
|
import (
|
||||||
|
"database/sql"
|
||||||
|
"fmt"
|
||||||
|
|
||||||
|
"github.com/steveyegge/beads/internal/types"
|
||||||
|
)
|
||||||
|
|
||||||
|
func MigrateContentHashColumn(db *sql.DB) error {
|
||||||
|
var colName string
|
||||||
|
err := db.QueryRow(`
|
||||||
|
SELECT name FROM pragma_table_info('issues')
|
||||||
|
WHERE name = 'content_hash'
|
||||||
|
`).Scan(&colName)
|
||||||
|
|
||||||
|
if err == sql.ErrNoRows {
|
||||||
|
_, err := db.Exec(`ALTER TABLE issues ADD COLUMN content_hash TEXT`)
|
||||||
|
if err != nil {
|
||||||
|
return fmt.Errorf("failed to add content_hash column: %w", err)
|
||||||
|
}
|
||||||
|
|
||||||
|
_, err = db.Exec(`CREATE INDEX IF NOT EXISTS idx_issues_content_hash ON issues(content_hash)`)
|
||||||
|
if err != nil {
|
||||||
|
return fmt.Errorf("failed to create content_hash index: %w", err)
|
||||||
|
}
|
||||||
|
|
||||||
|
rows, err := db.Query(`
|
||||||
|
SELECT id, title, description, design, acceptance_criteria, notes,
|
||||||
|
status, priority, issue_type, assignee, external_ref
|
||||||
|
FROM issues
|
||||||
|
`)
|
||||||
|
if err != nil {
|
||||||
|
return fmt.Errorf("failed to query existing issues: %w", err)
|
||||||
|
}
|
||||||
|
defer rows.Close()
|
||||||
|
|
||||||
|
updates := make(map[string]string)
|
||||||
|
for rows.Next() {
|
||||||
|
var issue types.Issue
|
||||||
|
var assignee sql.NullString
|
||||||
|
var externalRef sql.NullString
|
||||||
|
err := rows.Scan(
|
||||||
|
&issue.ID, &issue.Title, &issue.Description, &issue.Design,
|
||||||
|
&issue.AcceptanceCriteria, &issue.Notes, &issue.Status,
|
||||||
|
&issue.Priority, &issue.IssueType, &assignee, &externalRef,
|
||||||
|
)
|
||||||
|
if err != nil {
|
||||||
|
return fmt.Errorf("failed to scan issue: %w", err)
|
||||||
|
}
|
||||||
|
if assignee.Valid {
|
||||||
|
issue.Assignee = assignee.String
|
||||||
|
}
|
||||||
|
if externalRef.Valid {
|
||||||
|
issue.ExternalRef = &externalRef.String
|
||||||
|
}
|
||||||
|
|
||||||
|
updates[issue.ID] = issue.ComputeContentHash()
|
||||||
|
}
|
||||||
|
if err := rows.Err(); err != nil {
|
||||||
|
return fmt.Errorf("error iterating issues: %w", err)
|
||||||
|
}
|
||||||
|
|
||||||
|
tx, err := db.Begin()
|
||||||
|
if err != nil {
|
||||||
|
return fmt.Errorf("failed to begin transaction: %w", err)
|
||||||
|
}
|
||||||
|
defer tx.Rollback()
|
||||||
|
|
||||||
|
stmt, err := tx.Prepare(`UPDATE issues SET content_hash = ? WHERE id = ?`)
|
||||||
|
if err != nil {
|
||||||
|
return fmt.Errorf("failed to prepare update statement: %w", err)
|
||||||
|
}
|
||||||
|
defer stmt.Close()
|
||||||
|
|
||||||
|
for id, hash := range updates {
|
||||||
|
if _, err := stmt.Exec(hash, id); err != nil {
|
||||||
|
return fmt.Errorf("failed to update content_hash for issue %s: %w", id, err)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
if err := tx.Commit(); err != nil {
|
||||||
|
return fmt.Errorf("failed to commit transaction: %w", err)
|
||||||
|
}
|
||||||
|
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
|
||||||
|
if err != nil {
|
||||||
|
return fmt.Errorf("failed to check content_hash column: %w", err)
|
||||||
|
}
|
||||||
|
|
||||||
|
return nil
|
||||||
|
}
|
||||||
@@ -0,0 +1,66 @@
|
|||||||
|
package migrations
|
||||||
|
|
||||||
|
import (
|
||||||
|
"database/sql"
|
||||||
|
"fmt"
|
||||||
|
"strings"
|
||||||
|
)
|
||||||
|
|
||||||
|
func MigrateExternalRefUnique(db *sql.DB) error {
|
||||||
|
var hasConstraint bool
|
||||||
|
err := db.QueryRow(`
|
||||||
|
SELECT COUNT(*) > 0
|
||||||
|
FROM sqlite_master
|
||||||
|
WHERE type = 'index'
|
||||||
|
AND name = 'idx_issues_external_ref_unique'
|
||||||
|
`).Scan(&hasConstraint)
|
||||||
|
if err != nil {
|
||||||
|
return fmt.Errorf("failed to check for UNIQUE constraint: %w", err)
|
||||||
|
}
|
||||||
|
|
||||||
|
if hasConstraint {
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
|
||||||
|
existingDuplicates, err := findExternalRefDuplicates(db)
|
||||||
|
if err != nil {
|
||||||
|
return fmt.Errorf("failed to check for duplicate external_ref values: %w", err)
|
||||||
|
}
|
||||||
|
|
||||||
|
if len(existingDuplicates) > 0 {
|
||||||
|
return fmt.Errorf("cannot add UNIQUE constraint: found %d duplicate external_ref values (resolve with 'bd duplicates' or manually)", len(existingDuplicates))
|
||||||
|
}
|
||||||
|
|
||||||
|
_, err = db.Exec(`CREATE UNIQUE INDEX IF NOT EXISTS idx_issues_external_ref_unique ON issues(external_ref) WHERE external_ref IS NOT NULL`)
|
||||||
|
if err != nil {
|
||||||
|
return fmt.Errorf("failed to create UNIQUE index on external_ref: %w", err)
|
||||||
|
}
|
||||||
|
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
|
||||||
|
func findExternalRefDuplicates(db *sql.DB) (map[string][]string, error) {
|
||||||
|
rows, err := db.Query(`
|
||||||
|
SELECT external_ref, GROUP_CONCAT(id, ',') as ids
|
||||||
|
FROM issues
|
||||||
|
WHERE external_ref IS NOT NULL
|
||||||
|
GROUP BY external_ref
|
||||||
|
HAVING COUNT(*) > 1
|
||||||
|
`)
|
||||||
|
if err != nil {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
defer rows.Close()
|
||||||
|
|
||||||
|
duplicates := make(map[string][]string)
|
||||||
|
for rows.Next() {
|
||||||
|
var externalRef, idsCSV string
|
||||||
|
if err := rows.Scan(&externalRef, &idsCSV); err != nil {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
ids := strings.Split(idsCSV, ",")
|
||||||
|
duplicates[externalRef] = ids
|
||||||
|
}
|
||||||
|
|
||||||
|
return duplicates, rows.Err()
|
||||||
|
}
|
||||||
34
internal/storage/sqlite/migrations/012_source_repo_column.go
Normal file
34
internal/storage/sqlite/migrations/012_source_repo_column.go
Normal file
@@ -0,0 +1,34 @@
|
|||||||
|
package migrations
|
||||||
|
|
||||||
|
import (
|
||||||
|
"database/sql"
|
||||||
|
"fmt"
|
||||||
|
)
|
||||||
|
|
||||||
|
func MigrateSourceRepoColumn(db *sql.DB) error {
|
||||||
|
var columnExists bool
|
||||||
|
err := db.QueryRow(`
|
||||||
|
SELECT COUNT(*) > 0
|
||||||
|
FROM pragma_table_info('issues')
|
||||||
|
WHERE name = 'source_repo'
|
||||||
|
`).Scan(&columnExists)
|
||||||
|
if err != nil {
|
||||||
|
return fmt.Errorf("failed to check source_repo column: %w", err)
|
||||||
|
}
|
||||||
|
|
||||||
|
if columnExists {
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
|
||||||
|
_, err = db.Exec(`ALTER TABLE issues ADD COLUMN source_repo TEXT DEFAULT '.'`)
|
||||||
|
if err != nil {
|
||||||
|
return fmt.Errorf("failed to add source_repo column: %w", err)
|
||||||
|
}
|
||||||
|
|
||||||
|
_, err = db.Exec(`CREATE INDEX IF NOT EXISTS idx_issues_source_repo ON issues(source_repo)`)
|
||||||
|
if err != nil {
|
||||||
|
return fmt.Errorf("failed to create source_repo index: %w", err)
|
||||||
|
}
|
||||||
|
|
||||||
|
return nil
|
||||||
|
}
|
||||||
36
internal/storage/sqlite/migrations/013_repo_mtimes_table.go
Normal file
36
internal/storage/sqlite/migrations/013_repo_mtimes_table.go
Normal file
@@ -0,0 +1,36 @@
|
|||||||
|
package migrations
|
||||||
|
|
||||||
|
import (
|
||||||
|
"database/sql"
|
||||||
|
"fmt"
|
||||||
|
)
|
||||||
|
|
||||||
|
func MigrateRepoMtimesTable(db *sql.DB) error {
|
||||||
|
var tableName string
|
||||||
|
err := db.QueryRow(`
|
||||||
|
SELECT name FROM sqlite_master
|
||||||
|
WHERE type='table' AND name='repo_mtimes'
|
||||||
|
`).Scan(&tableName)
|
||||||
|
|
||||||
|
if err == sql.ErrNoRows {
|
||||||
|
_, err := db.Exec(`
|
||||||
|
CREATE TABLE repo_mtimes (
|
||||||
|
repo_path TEXT PRIMARY KEY,
|
||||||
|
jsonl_path TEXT NOT NULL,
|
||||||
|
mtime_ns INTEGER NOT NULL,
|
||||||
|
last_checked DATETIME NOT NULL DEFAULT CURRENT_TIMESTAMP
|
||||||
|
);
|
||||||
|
CREATE INDEX idx_repo_mtimes_checked ON repo_mtimes(last_checked);
|
||||||
|
`)
|
||||||
|
if err != nil {
|
||||||
|
return fmt.Errorf("failed to create repo_mtimes table: %w", err)
|
||||||
|
}
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
|
||||||
|
if err != nil {
|
||||||
|
return fmt.Errorf("failed to check for repo_mtimes table: %w", err)
|
||||||
|
}
|
||||||
|
|
||||||
|
return nil
|
||||||
|
}
|
||||||
@@ -0,0 +1,34 @@
|
|||||||
|
package migrations
|
||||||
|
|
||||||
|
import (
|
||||||
|
"database/sql"
|
||||||
|
"fmt"
|
||||||
|
)
|
||||||
|
|
||||||
|
func MigrateChildCountersTable(db *sql.DB) error {
|
||||||
|
var tableName string
|
||||||
|
err := db.QueryRow(`
|
||||||
|
SELECT name FROM sqlite_master
|
||||||
|
WHERE type='table' AND name='child_counters'
|
||||||
|
`).Scan(&tableName)
|
||||||
|
|
||||||
|
if err == sql.ErrNoRows {
|
||||||
|
_, err := db.Exec(`
|
||||||
|
CREATE TABLE child_counters (
|
||||||
|
parent_id TEXT PRIMARY KEY,
|
||||||
|
last_child INTEGER NOT NULL DEFAULT 0,
|
||||||
|
FOREIGN KEY (parent_id) REFERENCES issues(id) ON DELETE CASCADE
|
||||||
|
)
|
||||||
|
`)
|
||||||
|
if err != nil {
|
||||||
|
return fmt.Errorf("failed to create child_counters table: %w", err)
|
||||||
|
}
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
|
||||||
|
if err != nil {
|
||||||
|
return fmt.Errorf("failed to check for child_counters table: %w", err)
|
||||||
|
}
|
||||||
|
|
||||||
|
return nil
|
||||||
|
}
|
||||||
@@ -6,6 +6,7 @@ import (
|
|||||||
"strings"
|
"strings"
|
||||||
"testing"
|
"testing"
|
||||||
|
|
||||||
|
"github.com/steveyegge/beads/internal/storage/sqlite/migrations"
|
||||||
"github.com/steveyegge/beads/internal/types"
|
"github.com/steveyegge/beads/internal/types"
|
||||||
)
|
)
|
||||||
|
|
||||||
@@ -19,7 +20,7 @@ func TestMigrateDirtyIssuesTable(t *testing.T) {
|
|||||||
_, _ = db.Exec("DROP TABLE IF EXISTS dirty_issues")
|
_, _ = db.Exec("DROP TABLE IF EXISTS dirty_issues")
|
||||||
|
|
||||||
// Run migration
|
// Run migration
|
||||||
if err := migrateDirtyIssuesTable(db); err != nil {
|
if err := migrations.MigrateDirtyIssuesTable(db); err != nil {
|
||||||
t.Fatalf("failed to migrate dirty_issues table: %v", err)
|
t.Fatalf("failed to migrate dirty_issues table: %v", err)
|
||||||
}
|
}
|
||||||
|
|
||||||
@@ -52,7 +53,7 @@ func TestMigrateDirtyIssuesTable(t *testing.T) {
|
|||||||
}
|
}
|
||||||
|
|
||||||
// Run migration
|
// Run migration
|
||||||
if err := migrateDirtyIssuesTable(db); err != nil {
|
if err := migrations.MigrateDirtyIssuesTable(db); err != nil {
|
||||||
t.Fatalf("failed to migrate dirty_issues table: %v", err)
|
t.Fatalf("failed to migrate dirty_issues table: %v", err)
|
||||||
}
|
}
|
||||||
|
|
||||||
@@ -77,7 +78,7 @@ func TestMigrateExternalRefColumn(t *testing.T) {
|
|||||||
db := store.db
|
db := store.db
|
||||||
|
|
||||||
// Run migration
|
// Run migration
|
||||||
if err := migrateExternalRefColumn(db); err != nil {
|
if err := migrations.MigrateExternalRefColumn(db); err != nil {
|
||||||
t.Fatalf("failed to migrate external_ref column: %v", err)
|
t.Fatalf("failed to migrate external_ref column: %v", err)
|
||||||
}
|
}
|
||||||
|
|
||||||
@@ -117,7 +118,7 @@ func TestMigrateCompositeIndexes(t *testing.T) {
|
|||||||
_, _ = db.Exec("DROP INDEX IF EXISTS idx_dependencies_depends_on_type")
|
_, _ = db.Exec("DROP INDEX IF EXISTS idx_dependencies_depends_on_type")
|
||||||
|
|
||||||
// Run migration
|
// Run migration
|
||||||
if err := migrateCompositeIndexes(db); err != nil {
|
if err := migrations.MigrateCompositeIndexes(db); err != nil {
|
||||||
t.Fatalf("failed to migrate composite indexes: %v", err)
|
t.Fatalf("failed to migrate composite indexes: %v", err)
|
||||||
}
|
}
|
||||||
|
|
||||||
@@ -147,7 +148,7 @@ func TestMigrateClosedAtConstraint(t *testing.T) {
|
|||||||
}
|
}
|
||||||
|
|
||||||
// Run migration (should succeed with no inconsistent data)
|
// Run migration (should succeed with no inconsistent data)
|
||||||
if err := migrateClosedAtConstraint(s.db); err != nil {
|
if err := migrations.MigrateClosedAtConstraint(s.db); err != nil {
|
||||||
t.Fatalf("failed to migrate closed_at constraint: %v", err)
|
t.Fatalf("failed to migrate closed_at constraint: %v", err)
|
||||||
}
|
}
|
||||||
|
|
||||||
@@ -172,7 +173,7 @@ func TestMigrateCompactionColumns(t *testing.T) {
|
|||||||
|
|
||||||
// Run migration (will fail since columns don't exist, but that's okay for this test)
|
// Run migration (will fail since columns don't exist, but that's okay for this test)
|
||||||
// The migration should handle this gracefully
|
// The migration should handle this gracefully
|
||||||
_ = migrateCompactionColumns(s.db)
|
_ = migrations.MigrateCompactionColumns(s.db)
|
||||||
|
|
||||||
// Verify at least one column exists by querying
|
// Verify at least one column exists by querying
|
||||||
var exists bool
|
var exists bool
|
||||||
@@ -198,7 +199,7 @@ func TestMigrateSnapshotsTable(t *testing.T) {
|
|||||||
_, _ = db.Exec("DROP TABLE IF EXISTS issue_snapshots")
|
_, _ = db.Exec("DROP TABLE IF EXISTS issue_snapshots")
|
||||||
|
|
||||||
// Run migration
|
// Run migration
|
||||||
if err := migrateSnapshotsTable(db); err != nil {
|
if err := migrations.MigrateSnapshotsTable(db); err != nil {
|
||||||
t.Fatalf("failed to migrate snapshots table: %v", err)
|
t.Fatalf("failed to migrate snapshots table: %v", err)
|
||||||
}
|
}
|
||||||
|
|
||||||
@@ -226,7 +227,7 @@ func TestMigrateCompactionConfig(t *testing.T) {
|
|||||||
_, _ = db.Exec("DELETE FROM config WHERE key LIKE 'compact%'")
|
_, _ = db.Exec("DELETE FROM config WHERE key LIKE 'compact%'")
|
||||||
|
|
||||||
// Run migration
|
// Run migration
|
||||||
if err := migrateCompactionConfig(db); err != nil {
|
if err := migrations.MigrateCompactionConfig(db); err != nil {
|
||||||
t.Fatalf("failed to migrate compaction config: %v", err)
|
t.Fatalf("failed to migrate compaction config: %v", err)
|
||||||
}
|
}
|
||||||
|
|
||||||
@@ -247,7 +248,7 @@ func TestMigrateCompactedAtCommitColumn(t *testing.T) {
|
|||||||
db := store.db
|
db := store.db
|
||||||
|
|
||||||
// Run migration
|
// Run migration
|
||||||
if err := migrateCompactedAtCommitColumn(db); err != nil {
|
if err := migrations.MigrateCompactedAtCommitColumn(db); err != nil {
|
||||||
t.Fatalf("failed to migrate compacted_at_commit column: %v", err)
|
t.Fatalf("failed to migrate compacted_at_commit column: %v", err)
|
||||||
}
|
}
|
||||||
|
|
||||||
@@ -275,7 +276,7 @@ func TestMigrateExportHashesTable(t *testing.T) {
|
|||||||
_, _ = db.Exec("DROP TABLE IF EXISTS export_hashes")
|
_, _ = db.Exec("DROP TABLE IF EXISTS export_hashes")
|
||||||
|
|
||||||
// Run migration
|
// Run migration
|
||||||
if err := migrateExportHashesTable(db); err != nil {
|
if err := migrations.MigrateExportHashesTable(db); err != nil {
|
||||||
t.Fatalf("failed to migrate export_hashes table: %v", err)
|
t.Fatalf("failed to migrate export_hashes table: %v", err)
|
||||||
}
|
}
|
||||||
|
|
||||||
@@ -308,7 +309,7 @@ func TestMigrateExternalRefUnique(t *testing.T) {
|
|||||||
t.Fatalf("failed to create issue2: %v", err)
|
t.Fatalf("failed to create issue2: %v", err)
|
||||||
}
|
}
|
||||||
|
|
||||||
if err := migrateExternalRefUnique(db); err != nil {
|
if err := migrations.MigrateExternalRefUnique(db); err != nil {
|
||||||
t.Fatalf("failed to migrate external_ref unique constraint: %v", err)
|
t.Fatalf("failed to migrate external_ref unique constraint: %v", err)
|
||||||
}
|
}
|
||||||
|
|
||||||
@@ -340,7 +341,7 @@ func TestMigrateExternalRefUnique(t *testing.T) {
|
|||||||
t.Fatalf("failed to create duplicate: %v", err)
|
t.Fatalf("failed to create duplicate: %v", err)
|
||||||
}
|
}
|
||||||
|
|
||||||
err = migrateExternalRefUnique(db)
|
err = migrations.MigrateExternalRefUnique(db)
|
||||||
if err == nil {
|
if err == nil {
|
||||||
t.Error("Expected migration to fail with duplicates present")
|
t.Error("Expected migration to fail with duplicates present")
|
||||||
}
|
}
|
||||||
@@ -360,7 +361,7 @@ func TestMigrateRepoMtimesTable(t *testing.T) {
|
|||||||
_, _ = db.Exec("DROP TABLE IF EXISTS repo_mtimes")
|
_, _ = db.Exec("DROP TABLE IF EXISTS repo_mtimes")
|
||||||
|
|
||||||
// Run migration
|
// Run migration
|
||||||
if err := migrateRepoMtimesTable(db); err != nil {
|
if err := migrations.MigrateRepoMtimesTable(db); err != nil {
|
||||||
t.Fatalf("failed to migrate repo_mtimes table: %v", err)
|
t.Fatalf("failed to migrate repo_mtimes table: %v", err)
|
||||||
}
|
}
|
||||||
|
|
||||||
@@ -381,10 +382,10 @@ func TestMigrateRepoMtimesTable(t *testing.T) {
|
|||||||
db := store.db
|
db := store.db
|
||||||
|
|
||||||
// Run migration twice
|
// Run migration twice
|
||||||
if err := migrateRepoMtimesTable(db); err != nil {
|
if err := migrations.MigrateRepoMtimesTable(db); err != nil {
|
||||||
t.Fatalf("first migration failed: %v", err)
|
t.Fatalf("first migration failed: %v", err)
|
||||||
}
|
}
|
||||||
if err := migrateRepoMtimesTable(db); err != nil {
|
if err := migrations.MigrateRepoMtimesTable(db); err != nil {
|
||||||
t.Fatalf("second migration failed: %v", err)
|
t.Fatalf("second migration failed: %v", err)
|
||||||
}
|
}
|
||||||
|
|
||||||
@@ -406,7 +407,7 @@ func TestMigrateContentHashColumn(t *testing.T) {
|
|||||||
defer cleanup()
|
defer cleanup()
|
||||||
|
|
||||||
// Run migration (should be idempotent)
|
// Run migration (should be idempotent)
|
||||||
if err := migrateContentHashColumn(s.db); err != nil {
|
if err := migrations.MigrateContentHashColumn(s.db); err != nil {
|
||||||
t.Fatalf("failed to migrate content_hash column: %v", err)
|
t.Fatalf("failed to migrate content_hash column: %v", err)
|
||||||
}
|
}
|
||||||
|
|
||||||
@@ -487,7 +488,7 @@ func TestMigrateContentHashColumn(t *testing.T) {
|
|||||||
}
|
}
|
||||||
|
|
||||||
// Run migration - this should add the column and populate it
|
// Run migration - this should add the column and populate it
|
||||||
if err := migrateContentHashColumn(s.db); err != nil {
|
if err := migrations.MigrateContentHashColumn(s.db); err != nil {
|
||||||
t.Fatalf("failed to migrate content_hash column: %v", err)
|
t.Fatalf("failed to migrate content_hash column: %v", err)
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|||||||
Reference in New Issue
Block a user