refactor: remove unused bd pin/unpin/hook commands (bd-x0zl)

Analysis found these commands are dead code:
- gt never calls `bd pin` - uses `bd update --status=pinned` instead
- Beads.Pin() wrapper exists but is never called
- bd hook functionality duplicated by gt mol status
- Code comment says "pinned field is cosmetic for bd hook visibility"

Removed:
- cmd/bd/pin.go
- cmd/bd/unpin.go
- cmd/bd/hook.go

🤖 Generated with [Claude Code](https://claude.com/claude-code)

Co-Authored-By: Claude Opus 4.5 <noreply@anthropic.com>
This commit is contained in:
Steve Yegge
2025-12-27 16:02:15 -08:00
parent c8b912cbe6
commit 1611f16751
178 changed files with 10291 additions and 1682 deletions

View File

@@ -20,10 +20,6 @@ func MigrateMessagingFields(db *sql.DB) error {
}{
{"sender", "TEXT DEFAULT ''"},
{"ephemeral", "INTEGER DEFAULT 0"},
{"replies_to", "TEXT DEFAULT ''"},
{"relates_to", "TEXT DEFAULT ''"},
{"duplicate_of", "TEXT DEFAULT ''"},
{"superseded_by", "TEXT DEFAULT ''"},
}
for _, col := range columns {
@@ -59,11 +55,5 @@ func MigrateMessagingFields(db *sql.DB) error {
return fmt.Errorf("failed to create sender index: %w", err)
}
// Add index for replies_to (for efficient thread queries)
_, err = db.Exec(`CREATE INDEX IF NOT EXISTS idx_issues_replies_to ON issues(replies_to) WHERE replies_to != ''`)
if err != nil {
return fmt.Errorf("failed to create replies_to index: %w", err)
}
return nil
}

View File

@@ -21,137 +21,176 @@ import (
func MigrateEdgeFields(db *sql.DB) error {
now := time.Now()
hasColumn := func(name string) (bool, error) {
var exists bool
err := db.QueryRow(`
SELECT COUNT(*) > 0
FROM pragma_table_info('issues')
WHERE name = ?
`, name).Scan(&exists)
return exists, err
}
hasRepliesTo, err := hasColumn("replies_to")
if err != nil {
return fmt.Errorf("failed to check replies_to column: %w", err)
}
hasRelatesTo, err := hasColumn("relates_to")
if err != nil {
return fmt.Errorf("failed to check relates_to column: %w", err)
}
hasDuplicateOf, err := hasColumn("duplicate_of")
if err != nil {
return fmt.Errorf("failed to check duplicate_of column: %w", err)
}
hasSupersededBy, err := hasColumn("superseded_by")
if err != nil {
return fmt.Errorf("failed to check superseded_by column: %w", err)
}
if !hasRepliesTo && !hasRelatesTo && !hasDuplicateOf && !hasSupersededBy {
return nil
}
// Migrate replies_to fields to replies-to edges
// For thread_id, use the parent's ID as the thread root for first-level replies
// (more sophisticated thread detection would require recursive queries)
rows, err := db.Query(`
SELECT id, replies_to
FROM issues
WHERE replies_to != '' AND replies_to IS NOT NULL
`)
if err != nil {
return fmt.Errorf("failed to query replies_to fields: %w", err)
}
defer rows.Close()
for rows.Next() {
var issueID, repliesTo string
if err := rows.Scan(&issueID, &repliesTo); err != nil {
return fmt.Errorf("failed to scan replies_to row: %w", err)
}
// Use repliesTo as thread_id (the root of the thread)
// This is a simplification - existing threads will have the parent as thread root
_, err := db.Exec(`
INSERT OR IGNORE INTO dependencies (issue_id, depends_on_id, type, created_at, created_by, metadata, thread_id)
VALUES (?, ?, 'replies-to', ?, 'migration', '{}', ?)
`, issueID, repliesTo, now, repliesTo)
if hasRepliesTo {
rows, err := db.Query(`
SELECT id, replies_to
FROM issues
WHERE replies_to != '' AND replies_to IS NOT NULL
`)
if err != nil {
return fmt.Errorf("failed to create replies-to edge for %s: %w", issueID, err)
return fmt.Errorf("failed to query replies_to fields: %w", err)
}
defer rows.Close()
for rows.Next() {
var issueID, repliesTo string
if err := rows.Scan(&issueID, &repliesTo); err != nil {
return fmt.Errorf("failed to scan replies_to row: %w", err)
}
// Use repliesTo as thread_id (the root of the thread)
// This is a simplification - existing threads will have the parent as thread root
_, err := db.Exec(`
INSERT OR IGNORE INTO dependencies (issue_id, depends_on_id, type, created_at, created_by, metadata, thread_id)
VALUES (?, ?, 'replies-to', ?, 'migration', '{}', ?)
`, issueID, repliesTo, now, repliesTo)
if err != nil {
return fmt.Errorf("failed to create replies-to edge for %s: %w", issueID, err)
}
}
if err := rows.Err(); err != nil {
return fmt.Errorf("error iterating replies_to rows: %w", err)
}
}
if err := rows.Err(); err != nil {
return fmt.Errorf("error iterating replies_to rows: %w", err)
}
// Migrate relates_to fields to relates-to edges
// relates_to is stored as JSON array string
rows, err = db.Query(`
SELECT id, relates_to
FROM issues
WHERE relates_to != '' AND relates_to != '[]' AND relates_to IS NOT NULL
`)
if err != nil {
return fmt.Errorf("failed to query relates_to fields: %w", err)
}
defer rows.Close()
for rows.Next() {
var issueID, relatesTo string
if err := rows.Scan(&issueID, &relatesTo); err != nil {
return fmt.Errorf("failed to scan relates_to row: %w", err)
if hasRelatesTo {
rows, err := db.Query(`
SELECT id, relates_to
FROM issues
WHERE relates_to != '' AND relates_to != '[]' AND relates_to IS NOT NULL
`)
if err != nil {
return fmt.Errorf("failed to query relates_to fields: %w", err)
}
defer rows.Close()
// Parse JSON array
var relatedIDs []string
if err := json.Unmarshal([]byte(relatesTo), &relatedIDs); err != nil {
// Skip malformed JSON
continue
}
for rows.Next() {
var issueID, relatesTo string
if err := rows.Scan(&issueID, &relatesTo); err != nil {
return fmt.Errorf("failed to scan relates_to row: %w", err)
}
for _, relatedID := range relatedIDs {
if relatedID == "" {
// Parse JSON array
var relatedIDs []string
if err := json.Unmarshal([]byte(relatesTo), &relatedIDs); err != nil {
// Skip malformed JSON
continue
}
_, err := db.Exec(`
INSERT OR IGNORE INTO dependencies (issue_id, depends_on_id, type, created_at, created_by, metadata, thread_id)
VALUES (?, ?, 'relates-to', ?, 'migration', '{}', '')
`, issueID, relatedID, now)
if err != nil {
return fmt.Errorf("failed to create relates-to edge for %s -> %s: %w", issueID, relatedID, err)
for _, relatedID := range relatedIDs {
if relatedID == "" {
continue
}
_, err := db.Exec(`
INSERT OR IGNORE INTO dependencies (issue_id, depends_on_id, type, created_at, created_by, metadata, thread_id)
VALUES (?, ?, 'relates-to', ?, 'migration', '{}', '')
`, issueID, relatedID, now)
if err != nil {
return fmt.Errorf("failed to create relates-to edge for %s -> %s: %w", issueID, relatedID, err)
}
}
}
}
if err := rows.Err(); err != nil {
return fmt.Errorf("error iterating relates_to rows: %w", err)
if err := rows.Err(); err != nil {
return fmt.Errorf("error iterating relates_to rows: %w", err)
}
}
// Migrate duplicate_of fields to duplicates edges
rows, err = db.Query(`
SELECT id, duplicate_of
FROM issues
WHERE duplicate_of != '' AND duplicate_of IS NOT NULL
`)
if err != nil {
return fmt.Errorf("failed to query duplicate_of fields: %w", err)
}
defer rows.Close()
for rows.Next() {
var issueID, duplicateOf string
if err := rows.Scan(&issueID, &duplicateOf); err != nil {
return fmt.Errorf("failed to scan duplicate_of row: %w", err)
}
_, err := db.Exec(`
INSERT OR IGNORE INTO dependencies (issue_id, depends_on_id, type, created_at, created_by, metadata, thread_id)
VALUES (?, ?, 'duplicates', ?, 'migration', '{}', '')
`, issueID, duplicateOf, now)
if hasDuplicateOf {
rows, err := db.Query(`
SELECT id, duplicate_of
FROM issues
WHERE duplicate_of != '' AND duplicate_of IS NOT NULL
`)
if err != nil {
return fmt.Errorf("failed to create duplicates edge for %s: %w", issueID, err)
return fmt.Errorf("failed to query duplicate_of fields: %w", err)
}
defer rows.Close()
for rows.Next() {
var issueID, duplicateOf string
if err := rows.Scan(&issueID, &duplicateOf); err != nil {
return fmt.Errorf("failed to scan duplicate_of row: %w", err)
}
_, err := db.Exec(`
INSERT OR IGNORE INTO dependencies (issue_id, depends_on_id, type, created_at, created_by, metadata, thread_id)
VALUES (?, ?, 'duplicates', ?, 'migration', '{}', '')
`, issueID, duplicateOf, now)
if err != nil {
return fmt.Errorf("failed to create duplicates edge for %s: %w", issueID, err)
}
}
if err := rows.Err(); err != nil {
return fmt.Errorf("error iterating duplicate_of rows: %w", err)
}
}
if err := rows.Err(); err != nil {
return fmt.Errorf("error iterating duplicate_of rows: %w", err)
}
// Migrate superseded_by fields to supersedes edges
rows, err = db.Query(`
SELECT id, superseded_by
FROM issues
WHERE superseded_by != '' AND superseded_by IS NOT NULL
`)
if err != nil {
return fmt.Errorf("failed to query superseded_by fields: %w", err)
}
defer rows.Close()
for rows.Next() {
var issueID, supersededBy string
if err := rows.Scan(&issueID, &supersededBy); err != nil {
return fmt.Errorf("failed to scan superseded_by row: %w", err)
}
_, err := db.Exec(`
INSERT OR IGNORE INTO dependencies (issue_id, depends_on_id, type, created_at, created_by, metadata, thread_id)
VALUES (?, ?, 'supersedes', ?, 'migration', '{}', '')
`, issueID, supersededBy, now)
if hasSupersededBy {
rows, err := db.Query(`
SELECT id, superseded_by
FROM issues
WHERE superseded_by != '' AND superseded_by IS NOT NULL
`)
if err != nil {
return fmt.Errorf("failed to create supersedes edge for %s: %w", issueID, err)
return fmt.Errorf("failed to query superseded_by fields: %w", err)
}
defer rows.Close()
for rows.Next() {
var issueID, supersededBy string
if err := rows.Scan(&issueID, &supersededBy); err != nil {
return fmt.Errorf("failed to scan superseded_by row: %w", err)
}
_, err := db.Exec(`
INSERT OR IGNORE INTO dependencies (issue_id, depends_on_id, type, created_at, created_by, metadata, thread_id)
VALUES (?, ?, 'supersedes', ?, 'migration', '{}', '')
`, issueID, supersededBy, now)
if err != nil {
return fmt.Errorf("failed to create supersedes edge for %s: %w", issueID, err)
}
}
if err := rows.Err(); err != nil {
return fmt.Errorf("error iterating superseded_by rows: %w", err)
}
}
if err := rows.Err(); err != nil {
return fmt.Errorf("error iterating superseded_by rows: %w", err)
}
return nil

View File

@@ -57,6 +57,57 @@ func MigrateDropEdgeColumns(db *sql.DB) error {
return nil
}
// Preserve newer columns if they already exist (migration may run on partially-migrated DBs).
hasPinned, err := checkCol("pinned")
if err != nil {
return fmt.Errorf("failed to check pinned column: %w", err)
}
hasIsTemplate, err := checkCol("is_template")
if err != nil {
return fmt.Errorf("failed to check is_template column: %w", err)
}
hasAwaitType, err := checkCol("await_type")
if err != nil {
return fmt.Errorf("failed to check await_type column: %w", err)
}
hasAwaitID, err := checkCol("await_id")
if err != nil {
return fmt.Errorf("failed to check await_id column: %w", err)
}
hasTimeoutNs, err := checkCol("timeout_ns")
if err != nil {
return fmt.Errorf("failed to check timeout_ns column: %w", err)
}
hasWaiters, err := checkCol("waiters")
if err != nil {
return fmt.Errorf("failed to check waiters column: %w", err)
}
pinnedExpr := "0"
if hasPinned {
pinnedExpr = "pinned"
}
isTemplateExpr := "0"
if hasIsTemplate {
isTemplateExpr = "is_template"
}
awaitTypeExpr := "''"
if hasAwaitType {
awaitTypeExpr = "await_type"
}
awaitIDExpr := "''"
if hasAwaitID {
awaitIDExpr = "await_id"
}
timeoutNsExpr := "0"
if hasTimeoutNs {
timeoutNsExpr = "timeout_ns"
}
waitersExpr := "''"
if hasWaiters {
waitersExpr = "waiters"
}
// SQLite 3.35.0+ supports DROP COLUMN, but we use table recreation for compatibility
// This is idempotent - we recreate the table without the deprecated columns
@@ -117,6 +168,12 @@ func MigrateDropEdgeColumns(db *sql.DB) error {
original_type TEXT DEFAULT '',
sender TEXT DEFAULT '',
ephemeral INTEGER DEFAULT 0,
pinned INTEGER DEFAULT 0,
is_template INTEGER DEFAULT 0,
await_type TEXT,
await_id TEXT,
timeout_ns INTEGER,
waiters TEXT,
close_reason TEXT DEFAULT '',
CHECK ((status = 'closed') = (closed_at IS NOT NULL))
)
@@ -132,7 +189,8 @@ func MigrateDropEdgeColumns(db *sql.DB) error {
notes, status, priority, issue_type, assignee, estimated_minutes,
created_at, updated_at, closed_at, external_ref, source_repo, compaction_level,
compacted_at, compacted_at_commit, original_size, deleted_at,
deleted_by, delete_reason, original_type, sender, ephemeral, close_reason
deleted_by, delete_reason, original_type, sender, ephemeral, pinned, is_template,
await_type, await_id, timeout_ns, waiters, close_reason
)
SELECT
id, content_hash, title, description, design, acceptance_criteria,
@@ -140,9 +198,11 @@ func MigrateDropEdgeColumns(db *sql.DB) error {
created_at, updated_at, closed_at, external_ref, COALESCE(source_repo, ''), compaction_level,
compacted_at, compacted_at_commit, original_size, deleted_at,
deleted_by, delete_reason, original_type, sender, ephemeral,
%s, %s,
%s, %s, %s, %s,
COALESCE(close_reason, '')
FROM issues
`)
`, pinnedExpr, isTemplateExpr, awaitTypeExpr, awaitIDExpr, timeoutNsExpr, waitersExpr)
if err != nil {
return fmt.Errorf("failed to copy issues data: %w", err)
}

View File

@@ -20,6 +20,11 @@ func MigratePinnedColumn(db *sql.DB) error {
}
if columnExists {
// Column exists (e.g. created by new schema); ensure index exists.
_, err = db.Exec(`CREATE INDEX IF NOT EXISTS idx_issues_pinned ON issues(pinned) WHERE pinned = 1`)
if err != nil {
return fmt.Errorf("failed to create pinned index: %w", err)
}
return nil
}

View File

@@ -21,6 +21,11 @@ func MigrateIsTemplateColumn(db *sql.DB) error {
}
if columnExists {
// Column exists (e.g. created by new schema); ensure index exists.
_, err = db.Exec(`CREATE INDEX IF NOT EXISTS idx_issues_is_template ON issues(is_template) WHERE is_template = 1`)
if err != nil {
return fmt.Errorf("failed to create is_template index: %w", err)
}
return nil
}

View File

@@ -0,0 +1,251 @@
package migrations
import (
"database/sql"
"fmt"
"strings"
)
// MigrateTombstoneClosedAt updates the closed_at constraint to allow tombstones
// to retain their closed_at timestamp from before deletion.
//
// Previously: CHECK ((status = 'closed') = (closed_at IS NOT NULL))
// - This required clearing closed_at when creating tombstones from closed issues
//
// Now: CHECK (closed + tombstone OR non-closed/tombstone with no closed_at)
// - closed issues must have closed_at
// - tombstones may have closed_at (from before deletion) or not
// - other statuses must NOT have closed_at
//
// This allows importing tombstones that were closed before being deleted,
// preserving the historical closed_at timestamp for audit purposes.
func MigrateTombstoneClosedAt(db *sql.DB) error {
// SQLite doesn't support ALTER TABLE to modify CHECK constraints
// We must recreate the table with the new constraint
// Idempotency check: see if the new CHECK constraint already exists
// The new constraint contains "status = 'tombstone'" which the old one didn't
var tableSql string
err := db.QueryRow(`SELECT sql FROM sqlite_master WHERE type='table' AND name='issues'`).Scan(&tableSql)
if err != nil {
return fmt.Errorf("failed to get issues table schema: %w", err)
}
// If the schema already has the tombstone clause, migration is already applied
if strings.Contains(tableSql, "status = 'tombstone'") || strings.Contains(tableSql, `status = "tombstone"`) {
return nil
}
// Step 0: Drop views that depend on the issues table
_, err = db.Exec(`DROP VIEW IF EXISTS ready_issues`)
if err != nil {
return fmt.Errorf("failed to drop ready_issues view: %w", err)
}
_, err = db.Exec(`DROP VIEW IF EXISTS blocked_issues`)
if err != nil {
return fmt.Errorf("failed to drop blocked_issues view: %w", err)
}
// Step 1: Create new table with updated constraint
_, err = db.Exec(`
CREATE TABLE IF NOT EXISTS issues_new (
id TEXT PRIMARY KEY,
content_hash TEXT,
title TEXT NOT NULL CHECK(length(title) <= 500),
description TEXT NOT NULL DEFAULT '',
design TEXT NOT NULL DEFAULT '',
acceptance_criteria TEXT NOT NULL DEFAULT '',
notes TEXT NOT NULL DEFAULT '',
status TEXT NOT NULL DEFAULT 'open',
priority INTEGER NOT NULL DEFAULT 2 CHECK(priority >= 0 AND priority <= 4),
issue_type TEXT NOT NULL DEFAULT 'task',
assignee TEXT,
estimated_minutes INTEGER,
created_at DATETIME NOT NULL DEFAULT CURRENT_TIMESTAMP,
created_by TEXT DEFAULT '',
updated_at DATETIME NOT NULL DEFAULT CURRENT_TIMESTAMP,
closed_at DATETIME,
external_ref TEXT,
source_repo TEXT DEFAULT '',
compaction_level INTEGER DEFAULT 0,
compacted_at DATETIME,
compacted_at_commit TEXT,
original_size INTEGER,
deleted_at DATETIME,
deleted_by TEXT DEFAULT '',
delete_reason TEXT DEFAULT '',
original_type TEXT DEFAULT '',
sender TEXT DEFAULT '',
ephemeral INTEGER DEFAULT 0,
close_reason TEXT DEFAULT '',
pinned INTEGER DEFAULT 0,
is_template INTEGER DEFAULT 0,
await_type TEXT,
await_id TEXT,
timeout_ns INTEGER,
waiters TEXT,
CHECK (
(status = 'closed' AND closed_at IS NOT NULL) OR
(status = 'tombstone') OR
(status NOT IN ('closed', 'tombstone') AND closed_at IS NULL)
)
)
`)
if err != nil {
return fmt.Errorf("failed to create new issues table: %w", err)
}
// Step 2: Copy data from old table to new table
// We need to check if created_by column exists in the old table
// If not, we insert a default empty string for it
var hasCreatedBy bool
rows, err := db.Query(`PRAGMA table_info(issues)`)
if err != nil {
return fmt.Errorf("failed to get table info: %w", err)
}
for rows.Next() {
var cid int
var name, ctype string
var notnull, pk int
var dflt interface{}
if err := rows.Scan(&cid, &name, &ctype, &notnull, &dflt, &pk); err != nil {
rows.Close()
return fmt.Errorf("failed to scan table info: %w", err)
}
if name == "created_by" {
hasCreatedBy = true
break
}
}
rows.Close()
var insertSQL string
if hasCreatedBy {
// Old table has created_by, copy all columns directly
insertSQL = `
INSERT INTO issues_new (
id, content_hash, title, description, design, acceptance_criteria, notes,
status, priority, issue_type, assignee, estimated_minutes, created_at,
created_by, updated_at, closed_at, external_ref, source_repo, compaction_level,
compacted_at, compacted_at_commit, original_size, deleted_at, deleted_by,
delete_reason, original_type, sender, ephemeral, close_reason, pinned,
is_template, await_type, await_id, timeout_ns, waiters
)
SELECT
id, content_hash, title, description, design, acceptance_criteria, notes,
status, priority, issue_type, assignee, estimated_minutes, created_at,
created_by, updated_at, closed_at, external_ref, source_repo, compaction_level,
compacted_at, compacted_at_commit, original_size, deleted_at, deleted_by,
delete_reason, original_type, sender, ephemeral, close_reason, pinned,
is_template, await_type, await_id, timeout_ns, waiters
FROM issues
`
} else {
// Old table doesn't have created_by, use empty string default
insertSQL = `
INSERT INTO issues_new (
id, content_hash, title, description, design, acceptance_criteria, notes,
status, priority, issue_type, assignee, estimated_minutes, created_at,
created_by, updated_at, closed_at, external_ref, source_repo, compaction_level,
compacted_at, compacted_at_commit, original_size, deleted_at, deleted_by,
delete_reason, original_type, sender, ephemeral, close_reason, pinned,
is_template, await_type, await_id, timeout_ns, waiters
)
SELECT
id, content_hash, title, description, design, acceptance_criteria, notes,
status, priority, issue_type, assignee, estimated_minutes, created_at,
'', updated_at, closed_at, external_ref, source_repo, compaction_level,
compacted_at, compacted_at_commit, original_size, deleted_at, deleted_by,
delete_reason, original_type, sender, ephemeral, close_reason, pinned,
is_template, await_type, await_id, timeout_ns, waiters
FROM issues
`
}
_, err = db.Exec(insertSQL)
if err != nil {
return fmt.Errorf("failed to copy issues data: %w", err)
}
// Step 3: Drop old table
_, err = db.Exec(`DROP TABLE issues`)
if err != nil {
return fmt.Errorf("failed to drop old issues table: %w", err)
}
// Step 4: Rename new table to original name
_, err = db.Exec(`ALTER TABLE issues_new RENAME TO issues`)
if err != nil {
return fmt.Errorf("failed to rename new issues table: %w", err)
}
// Step 5: Recreate indexes (they were dropped with the table)
indexes := []string{
`CREATE INDEX IF NOT EXISTS idx_issues_status ON issues(status)`,
`CREATE INDEX IF NOT EXISTS idx_issues_priority ON issues(priority)`,
`CREATE INDEX IF NOT EXISTS idx_issues_assignee ON issues(assignee)`,
`CREATE INDEX IF NOT EXISTS idx_issues_created_at ON issues(created_at)`,
`CREATE INDEX IF NOT EXISTS idx_issues_external_ref ON issues(external_ref) WHERE external_ref IS NOT NULL`,
`CREATE INDEX IF NOT EXISTS idx_issues_pinned ON issues(pinned) WHERE pinned = 1`,
`CREATE INDEX IF NOT EXISTS idx_issues_is_template ON issues(is_template) WHERE is_template = 1`,
`CREATE INDEX IF NOT EXISTS idx_issues_updated_at ON issues(updated_at)`,
`CREATE INDEX IF NOT EXISTS idx_issues_status_priority ON issues(status, priority)`,
`CREATE INDEX IF NOT EXISTS idx_issues_gate ON issues(issue_type) WHERE issue_type = 'gate'`,
}
for _, idx := range indexes {
if _, err := db.Exec(idx); err != nil {
return fmt.Errorf("failed to create index: %w", err)
}
}
// Step 6: Recreate views that we dropped
_, err = db.Exec(`
CREATE VIEW IF NOT EXISTS ready_issues AS
WITH RECURSIVE
blocked_directly AS (
SELECT DISTINCT d.issue_id
FROM dependencies d
JOIN issues blocker ON d.depends_on_id = blocker.id
WHERE d.type = 'blocks'
AND blocker.status IN ('open', 'in_progress', 'blocked', 'deferred')
),
blocked_transitively AS (
SELECT issue_id, 0 as depth
FROM blocked_directly
UNION ALL
SELECT d.issue_id, bt.depth + 1
FROM blocked_transitively bt
JOIN dependencies d ON d.depends_on_id = bt.issue_id
WHERE d.type = 'parent-child'
AND bt.depth < 50
)
SELECT i.*
FROM issues i
WHERE i.status = 'open'
AND NOT EXISTS (
SELECT 1 FROM blocked_transitively WHERE issue_id = i.id
)
`)
if err != nil {
return fmt.Errorf("failed to recreate ready_issues view: %w", err)
}
_, err = db.Exec(`
CREATE VIEW IF NOT EXISTS blocked_issues AS
SELECT
i.*,
COUNT(d.depends_on_id) as blocked_by_count
FROM issues i
JOIN dependencies d ON i.id = d.issue_id
JOIN issues blocker ON d.depends_on_id = blocker.id
WHERE i.status IN ('open', 'in_progress', 'blocked', 'deferred')
AND d.type = 'blocks'
AND blocker.status IN ('open', 'in_progress', 'blocked', 'deferred')
GROUP BY i.id
`)
if err != nil {
return fmt.Errorf("failed to recreate blocked_issues view: %w", err)
}
return nil
}

View File

@@ -0,0 +1,34 @@
package migrations
import (
"database/sql"
"fmt"
)
// MigrateCreatedByColumn adds the created_by column to the issues table.
// This tracks who created the issue, using the same actor chain as comment authors
// (--actor flag, BD_ACTOR env, or $USER). GH#748.
func MigrateCreatedByColumn(db *sql.DB) error {
// Check if column already exists
var columnExists bool
err := db.QueryRow(`
SELECT COUNT(*) > 0
FROM pragma_table_info('issues')
WHERE name = 'created_by'
`).Scan(&columnExists)
if err != nil {
return fmt.Errorf("failed to check created_by column: %w", err)
}
if columnExists {
return nil
}
// Add the created_by column
_, err = db.Exec(`ALTER TABLE issues ADD COLUMN created_by TEXT DEFAULT ''`)
if err != nil {
return fmt.Errorf("failed to add created_by column: %w", err)
}
return nil
}