Initial commit: Beads issue tracker with security fixes

Core features:
- Dependency-aware issue tracking with SQLite backend
- Ready work detection (issues with no open blockers)
- Dependency tree visualization
- Cycle detection and prevention
- Full audit trail
- CLI with colored output

Security and correctness fixes applied:
- Fixed SQL injection vulnerability in UpdateIssue (whitelisted fields)
- Fixed race condition in ID generation (added mutex)
- Fixed cycle detection to return full paths (not just issue IDs)
- Added cycle prevention in AddDependency (validates before commit)
- Added comprehensive input validation (priority, status, types, etc.)
- Fixed N+1 query in GetBlockedIssues (using GROUP_CONCAT)
- Improved query building in GetReadyWork (proper string joining)
- Fixed P0 priority filter bug (using Changed() instead of value check)

All critical and major issues from code review have been addressed.

🤖 Generated with Claude Code
This commit is contained in:
Steve Yegge
2025-10-11 20:07:36 -07:00
commit 704515125d
19 changed files with 3976 additions and 0 deletions

23
.gitignore vendored Normal file
View File

@@ -0,0 +1,23 @@
# Binaries
beads
*.exe
*.dll
*.so
*.dylib
# Test binaries
*.test
*.out
# Go workspace file
go.work
# IDE
.vscode/
.idea/
*.swp
*.swo
*~
# Database (if you want to exclude)
# *.db

784
DESIGN.md Normal file
View File

@@ -0,0 +1,784 @@
# Beads - Dependency-Aware Issue Tracker
**Tagline**: "Issues chained together like beads"
## Vision
A lightweight, standalone issue tracker that makes dependency graphs first-class citizens. The killer feature: automatic detection of "ready work" - issues with no open blockers.
**Philosophy**:
- SQLite by default (zero setup, single binary + database file)
- PostgreSQL for teams/scale (multi-user, better performance)
- CLI-first, with clean output for both humans and scripts
- Dependencies are the core primitive, not an afterthought
- Full audit trail of all changes
---
## Core Data Model
### Issues
```go
type Issue struct {
ID string // "bd-1", "bd-2" (beads- prefix)
Title string // max 500 chars
Description string // problem statement (what/why)
Design string // solution design (how)
AcceptanceCriteria string // definition of done
Notes string // working notes
Status Status // open, in_progress, blocked, closed
Priority int // 0 (highest) to 4 (lowest), default 2
IssueType IssueType // bug, feature, task, epic, chore
Assignee string // optional
EstimatedMinutes *int // optional
CreatedAt time.Time
UpdatedAt time.Time
ClosedAt *time.Time
}
type Status string
const (
StatusOpen Status = "open"
StatusInProgress Status = "in_progress"
StatusBlocked Status = "blocked"
StatusClosed Status = "closed"
)
type IssueType string
const (
TypeBug IssueType = "bug"
TypeFeature IssueType = "feature"
TypeTask IssueType = "task"
TypeEpic IssueType = "epic"
TypeChore IssueType = "chore"
)
```
### Dependencies
```go
type Dependency struct {
IssueID string // the issue that depends
DependsOnID string // the issue it depends on
Type DependencyType // relationship type
CreatedAt time.Time
CreatedBy string
}
type DependencyType string
const (
DepBlocks DependencyType = "blocks" // hard blocker
DepRelated DependencyType = "related" // soft relationship
DepParentChild DependencyType = "parent-child" // epic/subtask
)
```
### Labels
```go
type Label struct {
IssueID string
Label string // freeform tag
}
```
### Events (Audit Trail)
```go
type Event struct {
ID int64
IssueID string
EventType EventType
Actor string // who made the change
OldValue *string // before state (JSON)
NewValue *string // after state (JSON)
Comment *string // for comment events
CreatedAt time.Time
}
type EventType string
const (
EventCreated EventType = "created"
EventUpdated EventType = "updated"
EventStatusChanged EventType = "status_changed"
EventCommented EventType = "commented"
EventClosed EventType = "closed"
EventReopened EventType = "reopened"
EventDependencyAdded EventType = "dependency_added"
EventDependencyRemoved EventType = "dependency_removed"
EventLabelAdded EventType = "label_added"
EventLabelRemoved EventType = "label_removed"
)
```
---
## Backend Abstraction
### Storage Interface
```go
// Storage defines the interface for issue storage backends
type Storage interface {
// Issues
CreateIssue(ctx context.Context, issue *Issue, actor string) error
GetIssue(ctx context.Context, id string) (*Issue, error)
UpdateIssue(ctx context.Context, id string, updates map[string]interface{}, actor string) error
CloseIssue(ctx context.Context, id string, reason string, actor string) error
SearchIssues(ctx context.Context, query string, filter IssueFilter) ([]*Issue, error)
// Dependencies
AddDependency(ctx context.Context, dep *Dependency, actor string) error
RemoveDependency(ctx context.Context, issueID, dependsOnID string, actor string) error
GetDependencies(ctx context.Context, issueID string) ([]*Issue, error)
GetDependents(ctx context.Context, issueID string) ([]*Issue, error)
GetDependencyTree(ctx context.Context, issueID string, maxDepth int) ([]*TreeNode, error)
DetectCycles(ctx context.Context) ([][]*Issue, error)
// Labels
AddLabel(ctx context.Context, issueID, label, actor string) error
RemoveLabel(ctx context.Context, issueID, label, actor string) error
GetLabels(ctx context.Context, issueID string) ([]string, error)
GetIssuesByLabel(ctx context.Context, label string) ([]*Issue, error)
// Ready Work & Blocking
GetReadyWork(ctx context.Context, filter WorkFilter) ([]*Issue, error)
GetBlockedIssues(ctx context.Context) ([]*BlockedIssue, error)
// Events
AddComment(ctx context.Context, issueID, actor, comment string) error
GetEvents(ctx context.Context, issueID string, limit int) ([]*Event, error)
// Statistics
GetStatistics(ctx context.Context) (*Statistics, error)
// Lifecycle
Close() error
}
type IssueFilter struct {
Status *Status
Priority *int
IssueType *IssueType
Assignee *string
Labels []string
Limit int
}
type WorkFilter struct {
Status Status // default: open
Priority *int // filter by priority
Assignee *string
Limit int // default: 10
}
type BlockedIssue struct {
Issue
BlockedByCount int
BlockedBy []string // issue IDs
}
type TreeNode struct {
Issue
Depth int
Truncated bool // if hit max depth
}
type Statistics struct {
TotalIssues int
OpenIssues int
InProgressIssues int
ClosedIssues int
BlockedIssues int
ReadyIssues int
AverageLeadTime float64 // hours from open to closed
}
```
### Backend Implementations
```
storage/
storage.go // Interface definition
sqlite/
sqlite.go // SQLite implementation
migrations.go // Schema migrations
postgres/
postgres.go // PostgreSQL implementation
migrations.go // Schema migrations
factory.go // Backend factory
```
### Factory Pattern
```go
type Config struct {
Backend string // "sqlite" or "postgres"
// SQLite config
Path string // default: ~/.beads/beads.db
// PostgreSQL config
Host string
Port int
Database string
User string
Password string
SSLMode string
}
func NewStorage(config Config) (Storage, error) {
switch config.Backend {
case "sqlite":
return sqlite.New(config.Path)
case "postgres":
return postgres.New(config.Host, config.Port, config.Database,
config.User, config.Password, config.SSLMode)
default:
return nil, fmt.Errorf("unknown backend: %s", config.Backend)
}
}
```
---
## Schema Design
### SQLite Schema
```sql
-- Issues table
CREATE TABLE issues (
id TEXT PRIMARY KEY,
title TEXT NOT NULL CHECK(length(title) <= 500),
description TEXT NOT NULL DEFAULT '',
design TEXT NOT NULL DEFAULT '',
acceptance_criteria TEXT NOT NULL DEFAULT '',
notes TEXT NOT NULL DEFAULT '',
status TEXT NOT NULL DEFAULT 'open',
priority INTEGER NOT NULL DEFAULT 2 CHECK(priority >= 0 AND priority <= 4),
issue_type TEXT NOT NULL DEFAULT 'task',
assignee TEXT,
estimated_minutes INTEGER,
created_at DATETIME NOT NULL DEFAULT CURRENT_TIMESTAMP,
updated_at DATETIME NOT NULL DEFAULT CURRENT_TIMESTAMP,
closed_at DATETIME
);
CREATE INDEX idx_issues_status ON issues(status);
CREATE INDEX idx_issues_priority ON issues(priority);
CREATE INDEX idx_issues_assignee ON issues(assignee);
CREATE INDEX idx_issues_created_at ON issues(created_at);
-- Dependencies table
CREATE TABLE dependencies (
issue_id TEXT NOT NULL,
depends_on_id TEXT NOT NULL,
type TEXT NOT NULL DEFAULT 'blocks',
created_at DATETIME NOT NULL DEFAULT CURRENT_TIMESTAMP,
created_by TEXT NOT NULL,
PRIMARY KEY (issue_id, depends_on_id),
FOREIGN KEY (issue_id) REFERENCES issues(id) ON DELETE CASCADE,
FOREIGN KEY (depends_on_id) REFERENCES issues(id) ON DELETE CASCADE
);
CREATE INDEX idx_dependencies_issue ON dependencies(issue_id);
CREATE INDEX idx_dependencies_depends_on ON dependencies(depends_on_id);
-- Labels table (many-to-many)
CREATE TABLE labels (
issue_id TEXT NOT NULL,
label TEXT NOT NULL,
PRIMARY KEY (issue_id, label),
FOREIGN KEY (issue_id) REFERENCES issues(id) ON DELETE CASCADE
);
CREATE INDEX idx_labels_label ON labels(label);
-- Events table (audit trail)
CREATE TABLE events (
id INTEGER PRIMARY KEY AUTOINCREMENT,
issue_id TEXT NOT NULL,
event_type TEXT NOT NULL,
actor TEXT NOT NULL,
old_value TEXT,
new_value TEXT,
comment TEXT,
created_at DATETIME NOT NULL DEFAULT CURRENT_TIMESTAMP,
FOREIGN KEY (issue_id) REFERENCES issues(id) ON DELETE CASCADE
);
CREATE INDEX idx_events_issue ON events(issue_id);
CREATE INDEX idx_events_created_at ON events(created_at);
-- Ready work view (materialized via trigger)
-- Issues with no open dependencies
CREATE VIEW ready_issues AS
SELECT i.*
FROM issues i
WHERE i.status = 'open'
AND NOT EXISTS (
SELECT 1 FROM dependencies d
JOIN issues blocked ON d.depends_on_id = blocked.id
WHERE d.issue_id = i.id
AND d.type = 'blocks'
AND blocked.status IN ('open', 'in_progress', 'blocked')
);
-- Blocked issues view
CREATE VIEW blocked_issues AS
SELECT
i.*,
COUNT(d.depends_on_id) as blocked_by_count
FROM issues i
JOIN dependencies d ON i.id = d.issue_id
JOIN issues blocker ON d.depends_on_id = blocker.id
WHERE i.status IN ('open', 'in_progress', 'blocked')
AND d.type = 'blocks'
AND blocker.status IN ('open', 'in_progress', 'blocked')
GROUP BY i.id;
```
### PostgreSQL Schema Extensions
PostgreSQL can leverage more advanced features:
```sql
-- Use JSONB for flexible metadata
ALTER TABLE issues ADD COLUMN metadata JSONB;
CREATE INDEX idx_issues_metadata ON issues USING GIN(metadata);
-- Use array type for labels (alternative to junction table)
-- (Keep junction table for compatibility, but could optimize)
-- Use recursive CTEs for dependency trees (more efficient)
-- Implement as stored function:
CREATE OR REPLACE FUNCTION get_dependency_tree(root_issue_id TEXT, max_depth INT DEFAULT 50)
RETURNS TABLE (
issue_id TEXT,
title TEXT,
status TEXT,
priority INT,
depth INT,
path TEXT[]
) AS $$
WITH RECURSIVE tree AS (
SELECT
i.id as issue_id,
i.title,
i.status,
i.priority,
0 as depth,
ARRAY[i.id] as path
FROM issues i
WHERE i.id = root_issue_id
UNION ALL
SELECT
i.id,
i.title,
i.status,
i.priority,
t.depth + 1,
t.path || i.id
FROM issues i
JOIN dependencies d ON i.id = d.depends_on_id
JOIN tree t ON d.issue_id = t.issue_id
WHERE t.depth < max_depth
AND NOT (i.id = ANY(t.path)) -- cycle detection
)
SELECT * FROM tree ORDER BY depth, priority;
$$ LANGUAGE SQL;
-- Cycle detection using CTEs
CREATE OR REPLACE FUNCTION detect_dependency_cycles()
RETURNS TABLE (cycle_path TEXT[]) AS $$
WITH RECURSIVE paths AS (
SELECT
issue_id,
depends_on_id,
ARRAY[issue_id, depends_on_id] as path,
false as is_cycle
FROM dependencies
UNION ALL
SELECT
p.issue_id,
d.depends_on_id,
p.path || d.depends_on_id,
d.depends_on_id = ANY(p.path)
FROM paths p
JOIN dependencies d ON p.depends_on_id = d.issue_id
WHERE NOT p.is_cycle
AND array_length(p.path, 1) < 100
)
SELECT DISTINCT path
FROM paths
WHERE is_cycle
ORDER BY path;
$$ LANGUAGE SQL;
```
---
## CLI Design
### Command Structure
```
beads [global options] <command> [command options]
Global Options:
--db <path> Database path (default: ~/.beads/beads.db)
--backend <type> Backend type: sqlite, postgres (default: sqlite)
--config <path> Config file path (default: ~/.beads/config.yaml)
--format <format> Output format: text, json, yaml (default: text)
--no-color Disable colored output
Commands:
init Initialize a new beads database
create Create a new issue
update Update an issue
show Show issue details
list List issues
search Search issues by text
close Close one or more issues
reopen Reopen a closed issue
comment Add a comment to an issue
dep add Add a dependency
dep remove Remove a dependency
dep tree Show dependency tree
dep cycles Detect dependency cycles
label add Add a label
label remove Remove a label
label list List all labels
label issues List issues with label
ready Show ready work (no blockers)
blocked Show blocked issues
stats Show statistics
config Manage configuration
export Export database to JSON/YAML
import Import from JSON/YAML
migrate Migrate from other issue trackers
help Show help
version Show version
```
### Example Commands
```bash
# Initialize
beads init # Creates ~/.beads/beads.db
beads init --db ./project.db # Project-local database
beads init --backend postgres # Interactive setup for PostgreSQL
# Create
beads create "Fix login bug" \
--description "Users can't log in with Google OAuth" \
--priority 1 \
--type bug \
--label "auth,critical"
# Update
beads update bd-1 --status in_progress --assignee "alice"
# Show
beads show bd-1 # Full details with dependencies
beads show bd-1 --format json # JSON output
# List
beads list # All open issues
beads list --status closed # Closed issues
beads list --priority 1 # P1 issues
beads list --label "auth" # Issues with label
# Dependencies
beads dep add bd-2 bd-1 # bd-2 depends on bd-1
beads dep tree bd-2 # Show full tree
beads dep cycles # Check for cycles
# Ready work
beads ready # Top 10 ready issues
beads ready --limit 20 --assignee alice
# Comments
beads comment bd-1 "Started investigation"
beads comment bd-1 --file notes.md # From file
# Close
beads close bd-1 "Fixed in commit abc123"
beads close bd-1 bd-2 bd-3 --reason "Duplicate"
# Search
beads search "oauth" # Full-text search
beads search "oauth" --status open # With filters
# Stats
beads stats # Overall statistics
beads stats --format json # Machine-readable
# Export/Import
beads export --output backup.json
beads import --input backup.json
beads migrate --from github --repo owner/repo
```
---
## Configuration
### Config File (~/.beads/config.yaml)
```yaml
# Default backend
backend: sqlite
# SQLite config
sqlite:
path: ~/.beads/beads.db
# PostgreSQL config
postgres:
host: localhost
port: 5432
database: beads
user: beads
password: ""
sslmode: prefer
# Display preferences
display:
color: true
format: text # text, json, yaml
date_format: "2006-01-02 15:04"
# Issue defaults
defaults:
priority: 2
type: task
status: open
# ID prefix (default: "bd-")
id_prefix: "bd-"
# Actor name (for audit trail)
actor: $USER
```
---
## Implementation Roadmap
### Phase 1: Core Foundation
- [ ] Project setup (go.mod, directory structure)
- [ ] Data model (types, interfaces)
- [ ] SQLite storage implementation
- [ ] Basic CLI (create, list, show, update, close)
- [ ] Dependency management (add, remove, tree)
- [ ] Ready work detection
- [ ] Tests
### Phase 2: Polish & Features
- [ ] Labels support
- [ ] Comments and events
- [ ] Full-text search
- [ ] Cycle detection
- [ ] Statistics
- [ ] Colored CLI output
- [ ] JSON/YAML output formats
### Phase 3: PostgreSQL
- [ ] PostgreSQL storage implementation
- [ ] Config file support
- [ ] Backend switching
- [ ] Migration utilities
- [ ] Performance optimization
### Phase 4: Advanced
- [ ] Export/import
- [ ] GitHub/GitLab/Jira migration tools
- [ ] TUI (bubble tea?)
- [ ] Web UI (templ?)
- [ ] API server mode
- [ ] Multi-user workflows
---
## Key Design Decisions
### Why SQLite Default?
1. **Zero setup**: Single binary + database file
2. **Portability**: Database is a file, easy to backup/share
3. **Performance**: More than enough for <100k issues
4. **Simplicity**: No server to run
5. **Git-friendly**: Can commit database file for small teams
### Why Support PostgreSQL?
1. **Scale**: Better for large teams (>10 people)
2. **Concurrency**: Better multi-user support
3. **Features**: Recursive CTEs, JSONB, full-text search
4. **Existing infrastructure**: Teams already running PostgreSQL
### ID Prefix: "bd-" vs "beads-"
- **bd-**: Shorter, easier to type
- **beads-**: More explicit
- **Configurable**: Let users choose in config
I lean toward **bd-** for brevity.
### Dependency Types
- **blocks**: Hard blocker (affects ready work calculation)
- **related**: Soft relationship (just for context)
- **parent-child**: Epic/subtask hierarchy
Only "blocks" affects ready work detection.
### Status vs. Blocked Field
Should we have a separate `blocked` status, or compute it dynamically?
**Decision**: Compute dynamically
- `blocked` status is redundant with dependency graph
- Auto-blocking based on dependencies is error-prone
- Let users manually set `blocked` if they want (e.g., blocked on external dependency)
- `ready` command shows what's actually unblocked
### Event Storage
Full audit trail in `events` table. This enables:
- Change history for issues
- Comment threads
- "Who changed what when" debugging
- Potential undo/revert functionality
---
## What to Port from VibeCoder
### ✅ Keep
- Core data model (issues, dependencies, labels, events)
- Ready work detection algorithm
- Dependency tree traversal
- Cycle detection
- CLI structure (create, update, show, list, etc.)
- Priority system (1-5)
- Issue types (bug, feature, task, epic, chore)
### ❌ Leave Behind
- MCP server (can add later as separate project)
- VibeCoder-specific concepts (missions, campaigns, amps)
- Temporal workflows
- Web portal integration
- Mission tracking
- Campaign aggregation views
### 🤔 Maybe Later
- Web UI (keep CLI-first)
- API server mode
- TUI with bubble tea
- GitHub/GitLab sync
- Email notifications
- Webhooks
---
## Go Dependencies
Minimal dependencies:
```go
// Core
database/sql
github.com/mattn/go-sqlite3 // SQLite driver
github.com/lib/pq // PostgreSQL driver
// CLI
github.com/spf13/cobra // CLI framework
github.com/spf13/viper // Config management
github.com/fatih/color // Terminal colors
// Serialization
gopkg.in/yaml.v3 // YAML support
// Testing
github.com/stretchr/testify // Test assertions
```
No frameworks, no ORMs. Keep it simple.
---
## Open Questions
1. **Multi-database support**: Should one beads installation manage multiple databases?
- Probably yes: `beads --db project1.db` vs `beads --db project2.db`
2. **Git integration**: Should beads auto-commit the database?
- Probably no: Let users manage their own git workflow
- But provide hooks/examples
3. **Web UI**: Build one, or keep it CLI-only?
- Start CLI-only
- Web UI as separate project later (beads-web?)
4. **API server**: Should beads run as a server?
- Start as CLI tool
- Add `beads serve` command later for HTTP API
5. **Migrations**: How to handle schema changes?
- Embed migrations in binary
- Track schema version in database
- Auto-migrate on startup (with backup)
6. **Concurrency**: SQLite WAL mode for better concurrency?
- Yes, enable by default
- Document limitations (single writer at a time)
7. **Full-text search**: SQLite FTS5 or simple LIKE queries?
- Start with LIKE queries (simpler)
- Add FTS5 in phase 2
8. **Transactions**: Where do we need them?
- Issue creation (issue + labels + event)
- Dependency changes (dep + event + cycle check)
- Bulk operations (close multiple issues)
---
## Success Metrics
Beads is successful if:
1. **Installation**: `go install github.com/user/beads@latest` just works
2. **First use**: `beads init && beads create "test"` works in <10 seconds
3. **Performance**: Can handle 10k issues with instant CLI responses
4. **Portability**: Database file can be moved between machines, checked into git
5. **Adoption**: Used by at least 3 other developers/teams within 6 months
---
## Next Steps
1. Create `~/src/beads` directory structure
2. Initialize Go module
3. Implement core types (Issue, Dependency, Storage interface)
4. Build SQLite storage implementation
5. Build basic CLI (create, show, list)
6. Test with VibeCoder's issue data (export and import)
7. Iterate toward parity with scripts/issue.ts
8. Release v0.1.0
Let's build something beautiful. 🔗✨

21
LICENSE Normal file
View File

@@ -0,0 +1,21 @@
MIT License
Copyright (c) 2025 Beads Contributors
Permission is hereby granted, free of charge, to any person obtaining a copy
of this software and associated documentation files (the "Software"), to deal
in the Software without restriction, including without limitation the rights
to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
copies of the Software, and to permit persons to whom the Software is
furnished to do so, subject to the following conditions:
The above copyright notice and this permission notice shall be included in all
copies or substantial portions of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
SOFTWARE.

104
QUICKSTART.md Normal file
View File

@@ -0,0 +1,104 @@
# Beads Quickstart
Get up and running with Beads in 2 minutes.
## Installation
```bash
cd ~/src/beads
go build -o beads ./cmd/beads
./beads --help
```
## Your First Issues
```bash
# Create a few issues
./beads create "Set up database" -p 1 -t task
./beads create "Create API" -p 2 -t feature
./beads create "Add authentication" -p 2 -t feature
# List them
./beads list
```
## Add Dependencies
```bash
# API depends on database
./beads dep add bd-2 bd-1
# Auth depends on API
./beads dep add bd-3 bd-2
# View the tree
./beads dep tree bd-3
```
Output:
```
🌲 Dependency tree for bd-3:
→ bd-3: Add authentication [P2] (open)
→ bd-2: Create API [P2] (open)
→ bd-1: Set up database [P1] (open)
```
## Find Ready Work
```bash
./beads ready
```
Output:
```
📋 Ready work (1 issues with no blockers):
1. [P1] bd-1: Set up database
```
Only bd-1 is ready because bd-2 and bd-3 are blocked!
## Work the Queue
```bash
# Start working on bd-1
./beads update bd-1 --status in_progress
# Complete it
./beads close bd-1 --reason "Database setup complete"
# Check ready work again
./beads ready
```
Now bd-2 is ready! 🎉
## Track Progress
```bash
# See blocked issues
./beads blocked
# View statistics
./beads stats
```
## Database Location
By default: `~/.beads/beads.db`
You can use project-specific databases:
```bash
./beads --db ./my-project.db create "Task"
```
## Next Steps
- Add labels: `./beads create "Task" -l "backend,urgent"`
- Filter ready work: `./beads ready --priority 1`
- Search issues: `./beads list --status open`
- Detect cycles: `./beads dep cycles`
See [README.md](README.md) for full documentation.

247
README.md Normal file
View File

@@ -0,0 +1,247 @@
# Beads 🔗
**Issues chained together like beads.**
A lightweight, dependency-aware issue tracker with first-class support for tracking blockers and finding ready work.
## Features
-**Zero setup** - Single binary + SQLite database file
- 🔗 **Dependency tracking** - First-class support for issue dependencies
- 📋 **Ready work detection** - Automatically finds issues with no open blockers
- 🌲 **Dependency trees** - Visualize full dependency graphs
- 🚫 **Blocker analysis** - See what's blocking your issues
- 📊 **Statistics** - Track progress and lead times
- 🎨 **Colored CLI** - Beautiful terminal output
- 💾 **Full audit trail** - Every change is logged
## Installation
```bash
go install github.com/steveyackey/beads/cmd/beads@latest
```
Or build from source:
```bash
git clone https://github.com/steveyackey/beads
cd beads
go build -o beads ./cmd/beads
```
## Quick Start
```bash
# Create your first issue
beads create "Build login page" -d "Need user authentication" -p 1 -t feature
# Create another issue that depends on it
beads create "Add OAuth support" -p 2
beads dep add bd-2 bd-1 # bd-2 depends on bd-1
# See what's ready to work on
beads ready
# Show dependency tree
beads dep tree bd-2
```
## Usage
### Creating Issues
```bash
beads create "Fix bug" -d "Description" -p 1 -t bug
beads create "Add feature" --description "Long description" --priority 2 --type feature
beads create "Task" -l "backend,urgent" --assignee alice
```
Options:
- `-d, --description` - Issue description
- `-p, --priority` - Priority (0-4, 0=highest)
- `-t, --type` - Type (bug|feature|task|epic|chore)
- `-a, --assignee` - Assign to user
- `-l, --labels` - Comma-separated labels
### Viewing Issues
```bash
beads show bd-1 # Show full details
beads list # List all issues
beads list --status open # Filter by status
beads list --priority 1 # Filter by priority
beads list --assignee alice # Filter by assignee
```
### Updating Issues
```bash
beads update bd-1 --status in_progress
beads update bd-1 --priority 2
beads update bd-1 --assignee bob
beads close bd-1 --reason "Completed"
beads close bd-1 bd-2 bd-3 # Close multiple
```
### Dependencies
```bash
# Add dependency (bd-2 depends on bd-1)
beads dep add bd-2 bd-1
beads dep add bd-3 bd-1 --type blocks
# Remove dependency
beads dep remove bd-2 bd-1
# Show dependency tree
beads dep tree bd-2
# Detect cycles
beads dep cycles
```
### Finding Work
```bash
# Show ready work (no blockers)
beads ready
beads ready --limit 20
beads ready --priority 1
beads ready --assignee alice
# Show blocked issues
beads blocked
# Statistics
beads stats
```
## Database
By default, Beads stores data in `~/.beads/beads.db` using SQLite.
You can use a different database:
```bash
beads --db ./project.db create "Issue"
```
Or set it via environment:
```bash
export BEADS_DB=/path/to/db
beads create "Issue"
```
## Dependency Model
Beads has three types of dependencies:
1. **blocks** - Hard blocker (affects ready work calculation)
2. **related** - Soft relationship (just for context)
3. **parent-child** - Epic/subtask hierarchy
Only `blocks` dependencies affect the ready work queue.
## Ready Work Algorithm
An issue is "ready" if:
- Status is `open`
- It has NO open `blocks` dependencies
- All blockers are either closed or non-existent
Example:
```
bd-1 [open] ← blocks ← bd-2 [open] ← blocks ← bd-3 [open]
```
Ready work: `[bd-1]`
Blocked: `[bd-2, bd-3]`
## Issue Lifecycle
```
open → in_progress → closed
blocked (manually set, or has open blockers)
```
## Architecture
```
beads/
├── cmd/beads/ # CLI entry point
├── internal/
│ ├── types/ # Core data types
│ ├── storage/ # Storage interface
│ │ └── sqlite/ # SQLite implementation
│ └── ...
└── DESIGN.md # Full design doc
```
## Comparison to Other Tools
| Feature | Beads | GitHub Issues | Jira | Linear |
|---------|-------|---------------|------|--------|
| Zero setup | ✅ | ❌ | ❌ | ❌ |
| Dependency tracking | ✅ | ⚠️ | ✅ | ✅ |
| Ready work detection | ✅ | ❌ | ❌ | ❌ |
| Offline first | ✅ | ❌ | ❌ | ❌ |
| Git-friendly | ✅ | ❌ | ❌ | ❌ |
| Self-hosted | ✅ | ⚠️ | ⚠️ | ❌ |
## Future Plans
- [ ] PostgreSQL backend for teams
- [ ] Config file support
- [ ] Export/import (JSON, CSV)
- [ ] GitHub/Jira migration tools
- [ ] TUI with bubble tea
- [ ] Web UI (optional)
- [ ] API server mode
## Why Beads?
We built Beads after getting frustrated with heavyweight issue trackers that:
- Required complex setup
- Didn't treat dependencies as first-class citizens
- Couldn't easily show "what's ready to work on"
- Required internet connectivity
- Weren't git-friendly for small teams
Beads is designed for developers who want:
- **Zero setup** - Just run a binary
- **Dependency awareness** - Built-in from day one
- **Offline first** - Local SQLite database
- **Git-friendly** - Check in your database with your code
- **Simple** - No complicated workflows or ceremony
## Documentation
- **[README.md](README.md)** - You are here! Quick reference
- **[QUICKSTART.md](QUICKSTART.md)** - 2-minute tutorial
- **[WORKFLOW.md](WORKFLOW.md)** - Complete workflow guide (vibe coding, database structure, git workflow)
- **[DESIGN.md](DESIGN.md)** - Full technical design document
## Development
```bash
# Run tests
go test ./...
# Build
go build -o beads ./cmd/beads
# Run
./beads create "Test issue"
```
## License
MIT
## Credits
Built with ❤️ by developers who love tracking dependencies and finding ready work.
Inspired by the need for a simpler, dependency-aware issue tracker.

620
WORKFLOW.md Normal file
View File

@@ -0,0 +1,620 @@
# Beads Workflow Guide
Complete guide to using Beads for solo development and with AI coding assistants like Claude Code.
## Table of Contents
- [Vibe Coding with Claude Code](#vibe-coding-with-claude-code)
- [Database Structure](#database-structure)
- [Git Workflow](#git-workflow)
- [Advanced Usage](#advanced-usage)
---
## Vibe Coding with Claude Code
### The "Let's Continue" Protocol
**Start of every session:**
```bash
# 1. Check for abandoned work
beads list --status in_progress
# 2. If none, get ready work
beads ready --limit 5
# 3. Show top priority
beads show bd-X
```
Tell Claude: **"Let's continue"** and it runs these commands.
### Full Project Workflow
#### Session 1: Project Kickoff
**You:** "Starting a new e-commerce project. Help me plan it."
**Claude creates issues:**
```bash
cd ~/my-project
alias beads="~/src/beads/beads --db ./project.db"
beads create "Set up Next.js project" -p 0 -t task
beads create "Design database schema" -p 0 -t task
beads create "Build authentication system" -p 1 -t feature
beads create "Create API routes" -p 1 -t feature
beads create "Build UI components" -p 2 -t feature
beads create "Add tests" -p 2 -t task
beads create "Deploy to production" -p 3 -t task
```
**Map dependencies:**
```bash
beads dep add bd-4 bd-2 # API depends on schema
beads dep add bd-3 bd-2 # Auth depends on schema
beads dep add bd-5 bd-4 # UI depends on API
beads dep add bd-6 bd-3 # Tests depend on auth
beads dep add bd-6 bd-5 # Tests depend on UI
beads dep add bd-7 bd-6 # Deploy depends on tests
```
**Visualize:**
```bash
beads dep tree bd-7
```
Output:
```
🌲 Dependency tree for bd-7:
→ bd-7: Deploy to production [P3] (open)
→ bd-6: Add tests [P2] (open)
→ bd-3: Build authentication system [P1] (open)
→ bd-2: Design database schema [P0] (open)
→ bd-5: Build UI components [P2] (open)
→ bd-4: Create API routes [P1] (open)
→ bd-2: Design database schema [P0] (open)
```
**Check ready work:**
```bash
beads ready
```
```
📋 Ready work (2 issues with no blockers):
1. [P0] bd-1: Set up Next.js project
2. [P0] bd-2: Design database schema
```
#### Session 2: Foundation
**You:** "Let's continue"
**Claude:**
```bash
beads ready
# Shows: bd-1, bd-2
```
**You:** "Work on bd-2"
**Claude:**
```bash
beads update bd-2 --status in_progress
beads show bd-2
# ... designs schema, creates migrations ...
beads close bd-2 --reason "Schema designed with Prisma, migrations created"
beads ready
```
Now shows:
```
📋 Ready work (3 issues):
1. [P0] bd-1: Set up Next.js project
2. [P1] bd-3: Build authentication system ← Unblocked!
3. [P1] bd-4: Create API routes ← Unblocked!
```
#### Session 3: Building Features
**You:** "Let's continue, work on bd-3"
**Claude:**
```bash
beads ready # Confirms bd-3 is ready
beads update bd-3 --status in_progress
# ... implements JWT auth, middleware ...
beads close bd-3 --reason "Auth complete with JWT tokens and protected routes"
```
#### Session 4: Discovering Blockers
**You:** "Let's continue, work on bd-4"
**Claude starts working, then:**
**You:** "We need to add OAuth before we can finish the API properly"
**Claude:**
```bash
beads create "Set up OAuth providers (Google, GitHub)" -p 1 -t task
beads dep add bd-4 bd-8 # API now depends on OAuth
beads update bd-4 --status blocked
beads ready
```
Shows:
```
📋 Ready work (2 issues):
1. [P0] bd-1: Set up Next.js project
2. [P1] bd-8: Set up OAuth providers ← New blocker must be done first
```
**Claude:** "I've blocked bd-4 and created bd-8 as a prerequisite. Should I work on OAuth setup now?"
#### Session 5: Unblocking
**You:** "Yes, do bd-8"
**Claude completes OAuth setup:**
```bash
beads close bd-8 --reason "OAuth configured for Google and GitHub"
beads update bd-4 --status open # Manually unblock
beads ready
```
Now bd-4 is ready again!
### Pro Tips for AI Pairing
**1. Add context with comments:**
```bash
beads update bd-5 --status in_progress
# Work session ends mid-task
beads comment bd-5 "Implemented navbar and footer, still need shopping cart icon"
```
Next session, Claude reads the comment and continues.
**2. Break down epics when too big:**
```bash
beads create "Epic: User Management" -p 1 -t epic
beads create "User registration flow" -p 1 -t task
beads create "User login/logout" -p 1 -t task
beads create "Password reset" -p 2 -t task
beads dep add bd-10 bd-9 --type parent-child
beads dep add bd-11 bd-9 --type parent-child
beads dep add bd-12 bd-9 --type parent-child
```
**3. Use labels for filtering:**
```bash
beads create "Fix login timeout" -p 0 -l "bug,auth,urgent"
beads create "Add loading spinner" -p 2 -l "ui,polish"
# Later
beads list --status open | grep urgent
```
**4. Track estimates:**
```bash
beads create "Refactor user service" -p 2 --estimated-minutes 120
beads ready # Shows estimates for planning
```
---
## Database Structure
### What's Inside project.db?
A single **SQLite database file** (typically 72KB-1MB) containing:
#### Tables
**1. `issues` - Core issue data**
```sql
CREATE TABLE issues (
id TEXT PRIMARY KEY, -- "bd-1", "bd-2", etc.
title TEXT NOT NULL,
description TEXT,
design TEXT, -- Solution design
acceptance_criteria TEXT, -- Definition of done
notes TEXT, -- Working notes
status TEXT DEFAULT 'open', -- open|in_progress|blocked|closed
priority INTEGER DEFAULT 2, -- 0-4 (0=highest)
issue_type TEXT DEFAULT 'task', -- bug|feature|task|epic|chore
assignee TEXT,
estimated_minutes INTEGER,
created_at DATETIME,
updated_at DATETIME,
closed_at DATETIME
);
```
**2. `dependencies` - Relationship graph**
```sql
CREATE TABLE dependencies (
issue_id TEXT NOT NULL, -- "bd-2"
depends_on_id TEXT NOT NULL, -- "bd-1" (bd-2 depends on bd-1)
type TEXT DEFAULT 'blocks', -- blocks|related|parent-child
created_at DATETIME,
created_by TEXT,
PRIMARY KEY (issue_id, depends_on_id)
);
```
**3. `labels` - Tags for categorization**
```sql
CREATE TABLE labels (
issue_id TEXT NOT NULL,
label TEXT NOT NULL,
PRIMARY KEY (issue_id, label)
);
```
**4. `events` - Complete audit trail**
```sql
CREATE TABLE events (
id INTEGER PRIMARY KEY AUTOINCREMENT,
issue_id TEXT NOT NULL,
event_type TEXT NOT NULL, -- created|updated|commented|closed|etc
actor TEXT NOT NULL, -- who made the change
old_value TEXT, -- before (JSON)
new_value TEXT, -- after (JSON)
comment TEXT, -- for comments and close reasons
created_at DATETIME
);
```
**5. `ready_issues` - VIEW (auto-computed)**
```sql
-- Shows issues with NO open blockers
-- This is the magic that powers "beads ready"
CREATE VIEW ready_issues AS
SELECT i.*
FROM issues i
WHERE i.status = 'open'
AND NOT EXISTS (
SELECT 1 FROM dependencies d
JOIN issues blocked ON d.depends_on_id = blocked.id
WHERE d.issue_id = i.id
AND d.type = 'blocks'
AND blocked.status IN ('open', 'in_progress', 'blocked')
);
```
**6. `blocked_issues` - VIEW (auto-computed)**
```sql
-- Shows issues WITH open blockers
CREATE VIEW blocked_issues AS
SELECT
i.*,
COUNT(d.depends_on_id) as blocked_by_count
FROM issues i
JOIN dependencies d ON i.id = d.issue_id
JOIN issues blocker ON d.depends_on_id = blocker.id
WHERE i.status IN ('open', 'in_progress', 'blocked')
AND d.type = 'blocks'
AND blocker.status IN ('open', 'in_progress', 'blocked')
GROUP BY i.id;
```
### Example Data
**Issues table:**
```
bd-1|Critical bug|Fix login timeout|||open|0|bug|||2025-10-11 19:23:10|2025-10-11 19:23:10|
bd-2|High priority||Need auth first||open|1|feature|||2025-10-11 19:23:11|2025-10-11 19:23:11|
```
**Dependencies table:**
```
bd-2|bd-1|blocks|2025-10-11 19:23:16|stevey
```
Translation: "bd-2 depends on bd-1 (blocks type), created by stevey"
**Events table:**
```
1|bd-1|created|stevey||{"id":"bd-1","title":"Critical bug",...}||2025-10-11 19:23:10
2|bd-2|created|stevey||{"id":"bd-2","title":"High priority",...}||2025-10-11 19:23:11
3|bd-2|dependency_added|stevey|||Added dependency: bd-2 blocks bd-1|2025-10-11 19:23:16
```
### Inspecting the Database
**Show all tables:**
```bash
sqlite3 project.db ".tables"
```
**View schema:**
```bash
sqlite3 project.db ".schema issues"
```
**Query directly:**
```bash
# Find all P0 issues
sqlite3 project.db "SELECT id, title FROM issues WHERE priority = 0;"
# See dependency graph
sqlite3 project.db "SELECT issue_id, depends_on_id FROM dependencies;"
# View audit trail for an issue
sqlite3 project.db "SELECT * FROM events WHERE issue_id = 'bd-5' ORDER BY created_at;"
# Who's working on what?
sqlite3 project.db "SELECT assignee, COUNT(*) FROM issues WHERE status = 'in_progress' GROUP BY assignee;"
# See what's ready (same as beads ready)
sqlite3 project.db "SELECT id, title, priority FROM ready_issues ORDER BY priority;"
```
**Export to CSV:**
```bash
sqlite3 project.db -header -csv "SELECT * FROM issues;" > issues.csv
```
**Database size:**
```bash
ls -lh project.db
# Typically: 72KB (empty) to ~1MB (1000 issues)
```
---
## Git Workflow
### Committing the Database
**The database IS your project state.** Commit it!
```bash
# Add database to git
git add project.db
# Commit with meaningful message
git commit -m "Updated tracker: completed auth (bd-3), ready for API work"
# Push
git push
```
### Multi-Machine Workflow
**Machine 1:**
```bash
beads create "New task" -p 1
beads update bd-5 --status in_progress
git add project.db
git commit -m "Started working on bd-5"
git push
```
**Machine 2:**
```bash
git pull
beads ready # Sees bd-5 is in progress
beads list --status in_progress # See what you were working on
```
### Team Workflow
**Each developer has their own database:**
```bash
# Alice's machine
beads --db alice.db create "Fix bug"
# Bob's machine
beads --db bob.db create "Add feature"
# Merge by convention:
# - Alice handles backend issues (bd-1 to bd-50)
# - Bob handles frontend issues (bd-51 to bd-100)
```
Or use **PostgreSQL** for shared state (future feature).
### Branching Strategy
**Option 1: Database per branch**
```bash
git checkout -b feature/auth
cp main.db auth.db
beads --db auth.db create "Add OAuth" -p 1
# Work on branch...
git add auth.db
git commit -m "Auth implementation progress"
```
**Option 2: Single database, label by branch**
```bash
beads create "Add OAuth" -p 1 -l "branch:feature/auth"
beads list | grep "branch:feature/auth"
```
---
## Advanced Usage
### Alias Setup
Add to `~/.bashrc` or `~/.zshrc`:
```bash
# Project-specific
alias b="~/src/beads/beads --db ./project.db"
# Usage
b create "Task" -p 1
b ready
b show bd-5
```
### Scripting Beads
**Find all unassigned P0 issues:**
```bash
#!/bin/bash
beads list --priority 0 --status open | grep -v "Assignee:"
```
**Auto-close issues from git commits:**
```bash
#!/bin/bash
# In git hook: .git/hooks/commit-msg
COMMIT_MSG=$(cat $1)
if [[ $COMMIT_MSG =~ bd-([0-9]+) ]]; then
ISSUE_ID="bd-${BASH_REMATCH[1]}"
~/src/beads/beads --db ./project.db close "$ISSUE_ID" \
--reason "Auto-closed from commit: $(git rev-parse --short HEAD)"
fi
```
**Weekly report:**
```bash
#!/bin/bash
echo "Issues closed this week:"
sqlite3 project.db "
SELECT id, title, closed_at
FROM issues
WHERE closed_at > date('now', '-7 days')
ORDER BY closed_at DESC;
"
```
### Multi-Project Management
**Use different databases:**
```bash
# Personal projects
beads --db ~/personal.db create "Task"
# Work projects
beads --db ~/work.db create "Task"
# Client A
beads --db ~/clients/client-a.db create "Task"
```
**Or use labels:**
```bash
beads create "Task" -l "project:website"
beads create "Task" -l "project:mobile-app"
# Filter by project
sqlite3 ~/.beads/beads.db "
SELECT i.id, i.title
FROM issues i
JOIN labels l ON i.id = l.issue_id
WHERE l.label = 'project:website';
"
```
### Export/Import
**Export issues to JSON:**
```bash
sqlite3 project.db -json "SELECT * FROM issues;" > backup.json
```
**Export dependency graph:**
```bash
# DOT format for Graphviz
sqlite3 project.db "
SELECT 'digraph G {'
UNION ALL
SELECT ' \"' || issue_id || '\" -> \"' || depends_on_id || '\";'
FROM dependencies
UNION ALL
SELECT '}';
" > graph.dot
dot -Tpng graph.dot -o graph.png
```
### Performance Tips
**Vacuum regularly for large databases:**
```bash
sqlite3 project.db "VACUUM;"
```
**Add custom indexes:**
```bash
sqlite3 project.db "CREATE INDEX idx_labels_custom ON labels(label) WHERE label LIKE 'project:%';"
```
**Archive old issues:**
```bash
sqlite3 project.db "
DELETE FROM issues
WHERE status = 'closed'
AND closed_at < date('now', '-6 months');
"
```
---
## Troubleshooting
**Database locked:**
```bash
# Another process is using it
lsof project.db
# Kill the process or wait for it to finish
```
**Corrupted database:**
```bash
# Check integrity
sqlite3 project.db "PRAGMA integrity_check;"
# Recover
sqlite3 project.db ".dump" | sqlite3 recovered.db
```
**Reset everything:**
```bash
rm ~/.beads/beads.db
beads create "Fresh start" -p 1
```
---
## Summary
**Beads is:**
- A single binary
- A single database file
- Simple commands
- Powerful dependency tracking
- Perfect for solo dev or AI pairing
**The workflow:**
1. Brain dump all tasks → `beads create`
2. Map dependencies → `beads dep add`
3. Find ready work → `beads ready`
4. Work on it → `beads update --status in_progress`
5. Complete it → `beads close`
6. Commit database → `git add project.db`
7. Repeat
**The magic:**
- Database knows what's ready
- Git tracks your progress
- AI can query and update
- You never lose track of "what's next"

138
cmd/beads/dep.go Normal file
View File

@@ -0,0 +1,138 @@
package main
import (
"context"
"fmt"
"os"
"github.com/fatih/color"
"github.com/spf13/cobra"
"github.com/steveyackey/beads/internal/types"
)
var depCmd = &cobra.Command{
Use: "dep",
Short: "Manage dependencies",
}
var depAddCmd = &cobra.Command{
Use: "add [issue-id] [depends-on-id]",
Short: "Add a dependency",
Args: cobra.ExactArgs(2),
Run: func(cmd *cobra.Command, args []string) {
depType, _ := cmd.Flags().GetString("type")
dep := &types.Dependency{
IssueID: args[0],
DependsOnID: args[1],
Type: types.DependencyType(depType),
}
ctx := context.Background()
if err := store.AddDependency(ctx, dep, actor); err != nil {
fmt.Fprintf(os.Stderr, "Error: %v\n", err)
os.Exit(1)
}
green := color.New(color.FgGreen).SprintFunc()
fmt.Printf("%s Added dependency: %s depends on %s (%s)\n",
green("✓"), args[0], args[1], depType)
},
}
var depRemoveCmd = &cobra.Command{
Use: "remove [issue-id] [depends-on-id]",
Short: "Remove a dependency",
Args: cobra.ExactArgs(2),
Run: func(cmd *cobra.Command, args []string) {
ctx := context.Background()
if err := store.RemoveDependency(ctx, args[0], args[1], actor); err != nil {
fmt.Fprintf(os.Stderr, "Error: %v\n", err)
os.Exit(1)
}
green := color.New(color.FgGreen).SprintFunc()
fmt.Printf("%s Removed dependency: %s no longer depends on %s\n",
green("✓"), args[0], args[1])
},
}
var depTreeCmd = &cobra.Command{
Use: "tree [issue-id]",
Short: "Show dependency tree",
Args: cobra.ExactArgs(1),
Run: func(cmd *cobra.Command, args []string) {
ctx := context.Background()
tree, err := store.GetDependencyTree(ctx, args[0], 50)
if err != nil {
fmt.Fprintf(os.Stderr, "Error: %v\n", err)
os.Exit(1)
}
if len(tree) == 0 {
fmt.Printf("\n%s has no dependencies\n", args[0])
return
}
cyan := color.New(color.FgCyan).SprintFunc()
fmt.Printf("\n%s Dependency tree for %s:\n\n", cyan("🌲"), args[0])
hasTruncation := false
for _, node := range tree {
indent := ""
for i := 0; i < node.Depth; i++ {
indent += " "
}
fmt.Printf("%s→ %s: %s [P%d] (%s)\n",
indent, node.ID, node.Title, node.Priority, node.Status)
if node.Truncated {
hasTruncation = true
}
}
if hasTruncation {
yellow := color.New(color.FgYellow).SprintFunc()
fmt.Printf("\n%s Warning: Tree truncated at depth 50 (safety limit)\n",
yellow("⚠"))
}
fmt.Println()
},
}
var depCyclesCmd = &cobra.Command{
Use: "cycles",
Short: "Detect dependency cycles",
Run: func(cmd *cobra.Command, args []string) {
ctx := context.Background()
cycles, err := store.DetectCycles(ctx)
if err != nil {
fmt.Fprintf(os.Stderr, "Error: %v\n", err)
os.Exit(1)
}
if len(cycles) == 0 {
green := color.New(color.FgGreen).SprintFunc()
fmt.Printf("\n%s No dependency cycles detected\n\n", green("✓"))
return
}
red := color.New(color.FgRed).SprintFunc()
fmt.Printf("\n%s Found %d dependency cycles:\n\n", red("⚠"), len(cycles))
for i, cycle := range cycles {
fmt.Printf("%d. Cycle involving:\n", i+1)
for _, issue := range cycle {
fmt.Printf(" - %s: %s\n", issue.ID, issue.Title)
}
fmt.Println()
}
},
}
func init() {
depAddCmd.Flags().StringP("type", "t", "blocks", "Dependency type (blocks|related|parent-child)")
depCmd.AddCommand(depAddCmd)
depCmd.AddCommand(depRemoveCmd)
depCmd.AddCommand(depTreeCmd)
depCmd.AddCommand(depCyclesCmd)
rootCmd.AddCommand(depCmd)
}

325
cmd/beads/main.go Normal file
View File

@@ -0,0 +1,325 @@
package main
import (
"context"
"fmt"
"os"
"path/filepath"
"github.com/fatih/color"
"github.com/spf13/cobra"
"github.com/steveyackey/beads/internal/storage"
"github.com/steveyackey/beads/internal/storage/sqlite"
"github.com/steveyackey/beads/internal/types"
)
var (
dbPath string
actor string
store storage.Storage
)
var rootCmd = &cobra.Command{
Use: "beads",
Short: "Beads - Dependency-aware issue tracker",
Long: `Issues chained together like beads. A lightweight issue tracker with first-class dependency support.`,
PersistentPreRun: func(cmd *cobra.Command, args []string) {
// Initialize storage
if dbPath == "" {
home, _ := os.UserHomeDir()
dbPath = filepath.Join(home, ".beads", "beads.db")
}
var err error
store, err = sqlite.New(dbPath)
if err != nil {
fmt.Fprintf(os.Stderr, "Error: failed to open database: %v\n", err)
os.Exit(1)
}
// Set actor from env or default
if actor == "" {
actor = os.Getenv("USER")
if actor == "" {
actor = "unknown"
}
}
},
PersistentPostRun: func(cmd *cobra.Command, args []string) {
if store != nil {
store.Close()
}
},
}
func init() {
rootCmd.PersistentFlags().StringVar(&dbPath, "db", "", "Database path (default: ~/.beads/beads.db)")
rootCmd.PersistentFlags().StringVar(&actor, "actor", "", "Actor name for audit trail (default: $USER)")
}
var createCmd = &cobra.Command{
Use: "create [title]",
Short: "Create a new issue",
Args: cobra.MinimumNArgs(1),
Run: func(cmd *cobra.Command, args []string) {
title := args[0]
description, _ := cmd.Flags().GetString("description")
design, _ := cmd.Flags().GetString("design")
acceptance, _ := cmd.Flags().GetString("acceptance")
priority, _ := cmd.Flags().GetInt("priority")
issueType, _ := cmd.Flags().GetString("type")
assignee, _ := cmd.Flags().GetString("assignee")
labels, _ := cmd.Flags().GetStringSlice("labels")
issue := &types.Issue{
Title: title,
Description: description,
Design: design,
AcceptanceCriteria: acceptance,
Status: types.StatusOpen,
Priority: priority,
IssueType: types.IssueType(issueType),
Assignee: assignee,
}
ctx := context.Background()
if err := store.CreateIssue(ctx, issue, actor); err != nil {
fmt.Fprintf(os.Stderr, "Error: %v\n", err)
os.Exit(1)
}
// Add labels if specified
for _, label := range labels {
if err := store.AddLabel(ctx, issue.ID, label, actor); err != nil {
fmt.Fprintf(os.Stderr, "Warning: failed to add label %s: %v\n", label, err)
}
}
green := color.New(color.FgGreen).SprintFunc()
fmt.Printf("%s Created issue: %s\n", green("✓"), issue.ID)
fmt.Printf(" Title: %s\n", issue.Title)
fmt.Printf(" Priority: P%d\n", issue.Priority)
fmt.Printf(" Status: %s\n", issue.Status)
},
}
func init() {
createCmd.Flags().StringP("description", "d", "", "Issue description")
createCmd.Flags().String("design", "", "Design notes")
createCmd.Flags().String("acceptance", "", "Acceptance criteria")
createCmd.Flags().IntP("priority", "p", 2, "Priority (0-4, 0=highest)")
createCmd.Flags().StringP("type", "t", "task", "Issue type (bug|feature|task|epic|chore)")
createCmd.Flags().StringP("assignee", "a", "", "Assignee")
createCmd.Flags().StringSliceP("labels", "l", []string{}, "Labels (comma-separated)")
rootCmd.AddCommand(createCmd)
}
var showCmd = &cobra.Command{
Use: "show [id]",
Short: "Show issue details",
Args: cobra.ExactArgs(1),
Run: func(cmd *cobra.Command, args []string) {
ctx := context.Background()
issue, err := store.GetIssue(ctx, args[0])
if err != nil {
fmt.Fprintf(os.Stderr, "Error: %v\n", err)
os.Exit(1)
}
if issue == nil {
fmt.Fprintf(os.Stderr, "Issue %s not found\n", args[0])
os.Exit(1)
}
cyan := color.New(color.FgCyan).SprintFunc()
fmt.Printf("\n%s: %s\n", cyan(issue.ID), issue.Title)
fmt.Printf("Status: %s\n", issue.Status)
fmt.Printf("Priority: P%d\n", issue.Priority)
fmt.Printf("Type: %s\n", issue.IssueType)
if issue.Assignee != "" {
fmt.Printf("Assignee: %s\n", issue.Assignee)
}
if issue.EstimatedMinutes != nil {
fmt.Printf("Estimated: %d minutes\n", *issue.EstimatedMinutes)
}
fmt.Printf("Created: %s\n", issue.CreatedAt.Format("2006-01-02 15:04"))
fmt.Printf("Updated: %s\n", issue.UpdatedAt.Format("2006-01-02 15:04"))
if issue.Description != "" {
fmt.Printf("\nDescription:\n%s\n", issue.Description)
}
if issue.Design != "" {
fmt.Printf("\nDesign:\n%s\n", issue.Design)
}
if issue.AcceptanceCriteria != "" {
fmt.Printf("\nAcceptance Criteria:\n%s\n", issue.AcceptanceCriteria)
}
// Show labels
labels, _ := store.GetLabels(ctx, issue.ID)
if len(labels) > 0 {
fmt.Printf("\nLabels: %v\n", labels)
}
// Show dependencies
deps, _ := store.GetDependencies(ctx, issue.ID)
if len(deps) > 0 {
fmt.Printf("\nDepends on (%d):\n", len(deps))
for _, dep := range deps {
fmt.Printf(" → %s: %s [P%d]\n", dep.ID, dep.Title, dep.Priority)
}
}
// Show dependents
dependents, _ := store.GetDependents(ctx, issue.ID)
if len(dependents) > 0 {
fmt.Printf("\nBlocks (%d):\n", len(dependents))
for _, dep := range dependents {
fmt.Printf(" ← %s: %s [P%d]\n", dep.ID, dep.Title, dep.Priority)
}
}
fmt.Println()
},
}
func init() {
rootCmd.AddCommand(showCmd)
}
var listCmd = &cobra.Command{
Use: "list",
Short: "List issues",
Run: func(cmd *cobra.Command, args []string) {
status, _ := cmd.Flags().GetString("status")
assignee, _ := cmd.Flags().GetString("assignee")
issueType, _ := cmd.Flags().GetString("type")
limit, _ := cmd.Flags().GetInt("limit")
filter := types.IssueFilter{
Limit: limit,
}
if status != "" {
s := types.Status(status)
filter.Status = &s
}
// Use Changed() to properly handle P0 (priority=0)
if cmd.Flags().Changed("priority") {
priority, _ := cmd.Flags().GetInt("priority")
filter.Priority = &priority
}
if assignee != "" {
filter.Assignee = &assignee
}
if issueType != "" {
t := types.IssueType(issueType)
filter.IssueType = &t
}
ctx := context.Background()
issues, err := store.SearchIssues(ctx, "", filter)
if err != nil {
fmt.Fprintf(os.Stderr, "Error: %v\n", err)
os.Exit(1)
}
fmt.Printf("\nFound %d issues:\n\n", len(issues))
for _, issue := range issues {
fmt.Printf("%s [P%d] %s\n", issue.ID, issue.Priority, issue.Status)
fmt.Printf(" %s\n", issue.Title)
if issue.Assignee != "" {
fmt.Printf(" Assignee: %s\n", issue.Assignee)
}
fmt.Println()
}
},
}
func init() {
listCmd.Flags().StringP("status", "s", "", "Filter by status")
listCmd.Flags().IntP("priority", "p", 0, "Filter by priority")
listCmd.Flags().StringP("assignee", "a", "", "Filter by assignee")
listCmd.Flags().StringP("type", "t", "", "Filter by type")
listCmd.Flags().IntP("limit", "n", 0, "Limit results")
rootCmd.AddCommand(listCmd)
}
var updateCmd = &cobra.Command{
Use: "update [id]",
Short: "Update an issue",
Args: cobra.ExactArgs(1),
Run: func(cmd *cobra.Command, args []string) {
updates := make(map[string]interface{})
if cmd.Flags().Changed("status") {
status, _ := cmd.Flags().GetString("status")
updates["status"] = status
}
if cmd.Flags().Changed("priority") {
priority, _ := cmd.Flags().GetInt("priority")
updates["priority"] = priority
}
if cmd.Flags().Changed("title") {
title, _ := cmd.Flags().GetString("title")
updates["title"] = title
}
if cmd.Flags().Changed("assignee") {
assignee, _ := cmd.Flags().GetString("assignee")
updates["assignee"] = assignee
}
if len(updates) == 0 {
fmt.Println("No updates specified")
return
}
ctx := context.Background()
if err := store.UpdateIssue(ctx, args[0], updates, actor); err != nil {
fmt.Fprintf(os.Stderr, "Error: %v\n", err)
os.Exit(1)
}
green := color.New(color.FgGreen).SprintFunc()
fmt.Printf("%s Updated issue: %s\n", green("✓"), args[0])
},
}
func init() {
updateCmd.Flags().StringP("status", "s", "", "New status")
updateCmd.Flags().IntP("priority", "p", 0, "New priority")
updateCmd.Flags().String("title", "", "New title")
updateCmd.Flags().StringP("assignee", "a", "", "New assignee")
rootCmd.AddCommand(updateCmd)
}
var closeCmd = &cobra.Command{
Use: "close [id...]",
Short: "Close one or more issues",
Args: cobra.MinimumNArgs(1),
Run: func(cmd *cobra.Command, args []string) {
reason, _ := cmd.Flags().GetString("reason")
if reason == "" {
reason = "Closed"
}
ctx := context.Background()
for _, id := range args {
if err := store.CloseIssue(ctx, id, reason, actor); err != nil {
fmt.Fprintf(os.Stderr, "Error closing %s: %v\n", id, err)
continue
}
green := color.New(color.FgGreen).SprintFunc()
fmt.Printf("%s Closed %s: %s\n", green("✓"), id, reason)
}
},
}
func init() {
closeCmd.Flags().StringP("reason", "r", "", "Reason for closing")
rootCmd.AddCommand(closeCmd)
}
func main() {
if err := rootCmd.Execute(); err != nil {
os.Exit(1)
}
}

129
cmd/beads/ready.go Normal file
View File

@@ -0,0 +1,129 @@
package main
import (
"context"
"fmt"
"os"
"github.com/fatih/color"
"github.com/spf13/cobra"
"github.com/steveyackey/beads/internal/types"
)
var readyCmd = &cobra.Command{
Use: "ready",
Short: "Show ready work (no blockers)",
Run: func(cmd *cobra.Command, args []string) {
limit, _ := cmd.Flags().GetInt("limit")
assignee, _ := cmd.Flags().GetString("assignee")
filter := types.WorkFilter{
Status: types.StatusOpen,
Limit: limit,
}
// Use Changed() to properly handle P0 (priority=0)
if cmd.Flags().Changed("priority") {
priority, _ := cmd.Flags().GetInt("priority")
filter.Priority = &priority
}
if assignee != "" {
filter.Assignee = &assignee
}
ctx := context.Background()
issues, err := store.GetReadyWork(ctx, filter)
if err != nil {
fmt.Fprintf(os.Stderr, "Error: %v\n", err)
os.Exit(1)
}
if len(issues) == 0 {
yellow := color.New(color.FgYellow).SprintFunc()
fmt.Printf("\n%s No ready work found (all issues have blocking dependencies)\n\n",
yellow("✨"))
return
}
cyan := color.New(color.FgCyan).SprintFunc()
fmt.Printf("\n%s Ready work (%d issues with no blockers):\n\n", cyan("📋"), len(issues))
for i, issue := range issues {
fmt.Printf("%d. [P%d] %s: %s\n", i+1, issue.Priority, issue.ID, issue.Title)
if issue.EstimatedMinutes != nil {
fmt.Printf(" Estimate: %d min\n", *issue.EstimatedMinutes)
}
if issue.Assignee != "" {
fmt.Printf(" Assignee: %s\n", issue.Assignee)
}
}
fmt.Println()
},
}
var blockedCmd = &cobra.Command{
Use: "blocked",
Short: "Show blocked issues",
Run: func(cmd *cobra.Command, args []string) {
ctx := context.Background()
blocked, err := store.GetBlockedIssues(ctx)
if err != nil {
fmt.Fprintf(os.Stderr, "Error: %v\n", err)
os.Exit(1)
}
if len(blocked) == 0 {
green := color.New(color.FgGreen).SprintFunc()
fmt.Printf("\n%s No blocked issues\n\n", green("✨"))
return
}
red := color.New(color.FgRed).SprintFunc()
fmt.Printf("\n%s Blocked issues (%d):\n\n", red("🚫"), len(blocked))
for _, issue := range blocked {
fmt.Printf("[P%d] %s: %s\n", issue.Priority, issue.ID, issue.Title)
fmt.Printf(" Blocked by %d open dependencies: %v\n",
issue.BlockedByCount, issue.BlockedBy)
fmt.Println()
}
},
}
var statsCmd = &cobra.Command{
Use: "stats",
Short: "Show statistics",
Run: func(cmd *cobra.Command, args []string) {
ctx := context.Background()
stats, err := store.GetStatistics(ctx)
if err != nil {
fmt.Fprintf(os.Stderr, "Error: %v\n", err)
os.Exit(1)
}
cyan := color.New(color.FgCyan).SprintFunc()
green := color.New(color.FgGreen).SprintFunc()
yellow := color.New(color.FgYellow).SprintFunc()
fmt.Printf("\n%s Beads Statistics:\n\n", cyan("📊"))
fmt.Printf("Total Issues: %d\n", stats.TotalIssues)
fmt.Printf("Open: %s\n", green(fmt.Sprintf("%d", stats.OpenIssues)))
fmt.Printf("In Progress: %s\n", yellow(fmt.Sprintf("%d", stats.InProgressIssues)))
fmt.Printf("Closed: %d\n", stats.ClosedIssues)
fmt.Printf("Blocked: %d\n", stats.BlockedIssues)
fmt.Printf("Ready: %s\n", green(fmt.Sprintf("%d", stats.ReadyIssues)))
if stats.AverageLeadTime > 0 {
fmt.Printf("Avg Lead Time: %.1f hours\n", stats.AverageLeadTime)
}
fmt.Println()
},
}
func init() {
readyCmd.Flags().IntP("limit", "n", 10, "Maximum issues to show")
readyCmd.Flags().IntP("priority", "p", 0, "Filter by priority")
readyCmd.Flags().StringP("assignee", "a", "", "Filter by assignee")
rootCmd.AddCommand(readyCmd)
rootCmd.AddCommand(blockedCmd)
rootCmd.AddCommand(statsCmd)
}

25
go.mod Normal file
View File

@@ -0,0 +1,25 @@
module github.com/steveyackey/beads
go 1.25.2
require (
github.com/fatih/color v1.18.0 // indirect
github.com/fsnotify/fsnotify v1.9.0 // indirect
github.com/go-viper/mapstructure/v2 v2.4.0 // indirect
github.com/inconshreveable/mousetrap v1.1.0 // indirect
github.com/mattn/go-colorable v0.1.13 // indirect
github.com/mattn/go-isatty v0.0.20 // indirect
github.com/mattn/go-sqlite3 v1.14.32 // indirect
github.com/pelletier/go-toml/v2 v2.2.4 // indirect
github.com/sagikazarmark/locafero v0.11.0 // indirect
github.com/sourcegraph/conc v0.3.1-0.20240121214520-5f936abd7ae8 // indirect
github.com/spf13/afero v1.15.0 // indirect
github.com/spf13/cast v1.10.0 // indirect
github.com/spf13/cobra v1.10.1 // indirect
github.com/spf13/pflag v1.0.10 // indirect
github.com/spf13/viper v1.21.0 // indirect
github.com/subosito/gotenv v1.6.0 // indirect
go.yaml.in/yaml/v3 v3.0.4 // indirect
golang.org/x/sys v0.29.0 // indirect
golang.org/x/text v0.28.0 // indirect
)

46
go.sum Normal file
View File

@@ -0,0 +1,46 @@
github.com/cpuguy83/go-md2man/v2 v2.0.6/go.mod h1:oOW0eioCTA6cOiMLiUPZOpcVxMig6NIQQ7OS05n1F4g=
github.com/fatih/color v1.18.0 h1:S8gINlzdQ840/4pfAwic/ZE0djQEH3wM94VfqLTZcOM=
github.com/fatih/color v1.18.0/go.mod h1:4FelSpRwEGDpQ12mAdzqdOukCy4u8WUtOY6lkT/6HfU=
github.com/fsnotify/fsnotify v1.9.0 h1:2Ml+OJNzbYCTzsxtv8vKSFD9PbJjmhYF14k/jKC7S9k=
github.com/fsnotify/fsnotify v1.9.0/go.mod h1:8jBTzvmWwFyi3Pb8djgCCO5IBqzKJ/Jwo8TRcHyHii0=
github.com/go-viper/mapstructure/v2 v2.4.0 h1:EBsztssimR/CONLSZZ04E8qAkxNYq4Qp9LvH92wZUgs=
github.com/go-viper/mapstructure/v2 v2.4.0/go.mod h1:oJDH3BJKyqBA2TXFhDsKDGDTlndYOZ6rGS0BRZIxGhM=
github.com/inconshreveable/mousetrap v1.1.0 h1:wN+x4NVGpMsO7ErUn/mUI3vEoE6Jt13X2s0bqwp9tc8=
github.com/inconshreveable/mousetrap v1.1.0/go.mod h1:vpF70FUmC8bwa3OWnCshd2FqLfsEA9PFc4w1p2J65bw=
github.com/mattn/go-colorable v0.1.13 h1:fFA4WZxdEF4tXPZVKMLwD8oUnCTTo08duU7wxecdEvA=
github.com/mattn/go-colorable v0.1.13/go.mod h1:7S9/ev0klgBDR4GtXTXX8a3vIGJpMovkB8vQcUbaXHg=
github.com/mattn/go-isatty v0.0.16/go.mod h1:kYGgaQfpe5nmfYZH+SKPsOc2e4SrIfOl2e/yFXSvRLM=
github.com/mattn/go-isatty v0.0.20 h1:xfD0iDuEKnDkl03q4limB+vH+GxLEtL/jb4xVJSWWEY=
github.com/mattn/go-isatty v0.0.20/go.mod h1:W+V8PltTTMOvKvAeJH7IuucS94S2C6jfK/D7dTCTo3Y=
github.com/mattn/go-sqlite3 v1.14.32 h1:JD12Ag3oLy1zQA+BNn74xRgaBbdhbNIDYvQUEuuErjs=
github.com/mattn/go-sqlite3 v1.14.32/go.mod h1:Uh1q+B4BYcTPb+yiD3kU8Ct7aC0hY9fxUwlHK0RXw+Y=
github.com/pelletier/go-toml/v2 v2.2.4 h1:mye9XuhQ6gvn5h28+VilKrrPoQVanw5PMw/TB0t5Ec4=
github.com/pelletier/go-toml/v2 v2.2.4/go.mod h1:2gIqNv+qfxSVS7cM2xJQKtLSTLUE9V8t9Stt+h56mCY=
github.com/russross/blackfriday/v2 v2.1.0/go.mod h1:+Rmxgy9KzJVeS9/2gXHxylqXiyQDYRxCVz55jmeOWTM=
github.com/sagikazarmark/locafero v0.11.0 h1:1iurJgmM9G3PA/I+wWYIOw/5SyBtxapeHDcg+AAIFXc=
github.com/sagikazarmark/locafero v0.11.0/go.mod h1:nVIGvgyzw595SUSUE6tvCp3YYTeHs15MvlmU87WwIik=
github.com/sourcegraph/conc v0.3.1-0.20240121214520-5f936abd7ae8 h1:+jumHNA0Wrelhe64i8F6HNlS8pkoyMv5sreGx2Ry5Rw=
github.com/sourcegraph/conc v0.3.1-0.20240121214520-5f936abd7ae8/go.mod h1:3n1Cwaq1E1/1lhQhtRK2ts/ZwZEhjcQeJQ1RuC6Q/8U=
github.com/spf13/afero v1.15.0 h1:b/YBCLWAJdFWJTN9cLhiXXcD7mzKn9Dm86dNnfyQw1I=
github.com/spf13/afero v1.15.0/go.mod h1:NC2ByUVxtQs4b3sIUphxK0NioZnmxgyCrfzeuq8lxMg=
github.com/spf13/cast v1.10.0 h1:h2x0u2shc1QuLHfxi+cTJvs30+ZAHOGRic8uyGTDWxY=
github.com/spf13/cast v1.10.0/go.mod h1:jNfB8QC9IA6ZuY2ZjDp0KtFO2LZZlg4S/7bzP6qqeHo=
github.com/spf13/cobra v1.10.1 h1:lJeBwCfmrnXthfAupyUTzJ/J4Nc1RsHC/mSRU2dll/s=
github.com/spf13/cobra v1.10.1/go.mod h1:7SmJGaTHFVBY0jW4NXGluQoLvhqFQM+6XSKD+P4XaB0=
github.com/spf13/pflag v1.0.9/go.mod h1:McXfInJRrz4CZXVZOBLb0bTZqETkiAhM9Iw0y3An2Bg=
github.com/spf13/pflag v1.0.10 h1:4EBh2KAYBwaONj6b2Ye1GiHfwjqyROoF4RwYO+vPwFk=
github.com/spf13/pflag v1.0.10/go.mod h1:McXfInJRrz4CZXVZOBLb0bTZqETkiAhM9Iw0y3An2Bg=
github.com/spf13/viper v1.21.0 h1:x5S+0EU27Lbphp4UKm1C+1oQO+rKx36vfCoaVebLFSU=
github.com/spf13/viper v1.21.0/go.mod h1:P0lhsswPGWD/1lZJ9ny3fYnVqxiegrlNrEmgLjbTCAY=
github.com/subosito/gotenv v1.6.0 h1:9NlTDc1FTs4qu0DDq7AEtTPNw6SVm7uBMsUCUjABIf8=
github.com/subosito/gotenv v1.6.0/go.mod h1:Dk4QP5c2W3ibzajGcXpNraDfq2IrhjMIvMSWPKKo0FU=
go.yaml.in/yaml/v3 v3.0.4 h1:tfq32ie2Jv2UxXFdLJdh3jXuOzWiL1fo0bu/FbuKpbc=
go.yaml.in/yaml/v3 v3.0.4/go.mod h1:DhzuOOF2ATzADvBadXxruRBLzYTpT36CKvDb3+aBEFg=
golang.org/x/sys v0.0.0-20220811171246-fbc7d0a398ab/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
golang.org/x/sys v0.6.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
golang.org/x/sys v0.29.0 h1:TPYlXGxvx1MGTn2GiZDhnjPA9wZzZeGKHHmKhHYvgaU=
golang.org/x/sys v0.29.0/go.mod h1:/VUhepiaJMQUp4+oa/7Zr1D23ma6VTLIYjOOTFZPUcA=
golang.org/x/text v0.28.0 h1:rhazDwis8INMIwQ4tpjLDzUhx6RlXqZNPEM0huQojng=
golang.org/x/text v0.28.0/go.mod h1:U8nCwOR8jO/marOQ0QbDiOngZVEBB7MAiitBuMjXiNU=
gopkg.in/check.v1 v0.0.0-20161208181325-20d25e280405/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0=
gopkg.in/yaml.v3 v3.0.1/go.mod h1:K4uyk7z7BCEPqu6E+C64Yfv1cQ7kz7rIZviUmN+EgEM=

View File

@@ -0,0 +1,362 @@
package sqlite
import (
"context"
"database/sql"
"fmt"
"strings"
"time"
"github.com/steveyackey/beads/internal/types"
)
// AddDependency adds a dependency between issues with cycle prevention
func (s *SQLiteStorage) AddDependency(ctx context.Context, dep *types.Dependency, actor string) error {
// Validate that both issues exist
issueExists, err := s.GetIssue(ctx, dep.IssueID)
if err != nil {
return fmt.Errorf("failed to check issue %s: %w", dep.IssueID, err)
}
if issueExists == nil {
return fmt.Errorf("issue %s not found", dep.IssueID)
}
dependsOnExists, err := s.GetIssue(ctx, dep.DependsOnID)
if err != nil {
return fmt.Errorf("failed to check dependency %s: %w", dep.DependsOnID, err)
}
if dependsOnExists == nil {
return fmt.Errorf("dependency target %s not found", dep.DependsOnID)
}
// Prevent self-dependency
if dep.IssueID == dep.DependsOnID {
return fmt.Errorf("issue cannot depend on itself")
}
dep.CreatedAt = time.Now()
dep.CreatedBy = actor
tx, err := s.db.BeginTx(ctx, nil)
if err != nil {
return fmt.Errorf("failed to begin transaction: %w", err)
}
defer tx.Rollback()
// Insert dependency
_, err = tx.ExecContext(ctx, `
INSERT INTO dependencies (issue_id, depends_on_id, type, created_at, created_by)
VALUES (?, ?, ?, ?, ?)
`, dep.IssueID, dep.DependsOnID, dep.Type, dep.CreatedAt, dep.CreatedBy)
if err != nil {
return fmt.Errorf("failed to add dependency: %w", err)
}
// Check if this creates a cycle (only for 'blocks' type dependencies)
// We need to check if we can reach IssueID from DependsOnID
// If yes, adding "IssueID depends on DependsOnID" would create a cycle
if dep.Type == types.DepBlocks {
var cycleExists bool
err = tx.QueryRowContext(ctx, `
WITH RECURSIVE paths AS (
SELECT
issue_id,
depends_on_id,
1 as depth
FROM dependencies
WHERE type = 'blocks'
AND issue_id = ?
UNION ALL
SELECT
d.issue_id,
d.depends_on_id,
p.depth + 1
FROM dependencies d
JOIN paths p ON d.issue_id = p.depends_on_id
WHERE d.type = 'blocks'
AND p.depth < 100
)
SELECT EXISTS(
SELECT 1 FROM paths
WHERE depends_on_id = ?
)
`, dep.DependsOnID, dep.IssueID).Scan(&cycleExists)
if err != nil {
return fmt.Errorf("failed to check for cycles: %w", err)
}
if cycleExists {
return fmt.Errorf("cannot add dependency: would create a cycle (%s → %s → ... → %s)",
dep.IssueID, dep.DependsOnID, dep.IssueID)
}
}
// Record event
_, err = tx.ExecContext(ctx, `
INSERT INTO events (issue_id, event_type, actor, comment)
VALUES (?, ?, ?, ?)
`, dep.IssueID, types.EventDependencyAdded, actor,
fmt.Sprintf("Added dependency: %s %s %s", dep.IssueID, dep.Type, dep.DependsOnID))
if err != nil {
return fmt.Errorf("failed to record event: %w", err)
}
return tx.Commit()
}
// RemoveDependency removes a dependency
func (s *SQLiteStorage) RemoveDependency(ctx context.Context, issueID, dependsOnID string, actor string) error {
tx, err := s.db.BeginTx(ctx, nil)
if err != nil {
return fmt.Errorf("failed to begin transaction: %w", err)
}
defer tx.Rollback()
_, err = tx.ExecContext(ctx, `
DELETE FROM dependencies WHERE issue_id = ? AND depends_on_id = ?
`, issueID, dependsOnID)
if err != nil {
return fmt.Errorf("failed to remove dependency: %w", err)
}
_, err = tx.ExecContext(ctx, `
INSERT INTO events (issue_id, event_type, actor, comment)
VALUES (?, ?, ?, ?)
`, issueID, types.EventDependencyRemoved, actor,
fmt.Sprintf("Removed dependency on %s", dependsOnID))
if err != nil {
return fmt.Errorf("failed to record event: %w", err)
}
return tx.Commit()
}
// GetDependencies returns issues that this issue depends on
func (s *SQLiteStorage) GetDependencies(ctx context.Context, issueID string) ([]*types.Issue, error) {
rows, err := s.db.QueryContext(ctx, `
SELECT i.id, i.title, i.description, i.design, i.acceptance_criteria, i.notes,
i.status, i.priority, i.issue_type, i.assignee, i.estimated_minutes,
i.created_at, i.updated_at, i.closed_at
FROM issues i
JOIN dependencies d ON i.id = d.depends_on_id
WHERE d.issue_id = ?
ORDER BY i.priority ASC
`, issueID)
if err != nil {
return nil, fmt.Errorf("failed to get dependencies: %w", err)
}
defer rows.Close()
return scanIssues(rows)
}
// GetDependents returns issues that depend on this issue
func (s *SQLiteStorage) GetDependents(ctx context.Context, issueID string) ([]*types.Issue, error) {
rows, err := s.db.QueryContext(ctx, `
SELECT i.id, i.title, i.description, i.design, i.acceptance_criteria, i.notes,
i.status, i.priority, i.issue_type, i.assignee, i.estimated_minutes,
i.created_at, i.updated_at, i.closed_at
FROM issues i
JOIN dependencies d ON i.id = d.issue_id
WHERE d.depends_on_id = ?
ORDER BY i.priority ASC
`, issueID)
if err != nil {
return nil, fmt.Errorf("failed to get dependents: %w", err)
}
defer rows.Close()
return scanIssues(rows)
}
// GetDependencyTree returns the full dependency tree
func (s *SQLiteStorage) GetDependencyTree(ctx context.Context, issueID string, maxDepth int) ([]*types.TreeNode, error) {
if maxDepth <= 0 {
maxDepth = 50
}
// Use recursive CTE to build tree
rows, err := s.db.QueryContext(ctx, `
WITH RECURSIVE tree AS (
SELECT
i.id, i.title, i.status, i.priority, i.description, i.design,
i.acceptance_criteria, i.notes, i.issue_type, i.assignee,
i.estimated_minutes, i.created_at, i.updated_at, i.closed_at,
0 as depth
FROM issues i
WHERE i.id = ?
UNION ALL
SELECT
i.id, i.title, i.status, i.priority, i.description, i.design,
i.acceptance_criteria, i.notes, i.issue_type, i.assignee,
i.estimated_minutes, i.created_at, i.updated_at, i.closed_at,
t.depth + 1
FROM issues i
JOIN dependencies d ON i.id = d.depends_on_id
JOIN tree t ON d.issue_id = t.id
WHERE t.depth < ?
)
SELECT * FROM tree
ORDER BY depth, priority
`, issueID, maxDepth)
if err != nil {
return nil, fmt.Errorf("failed to get dependency tree: %w", err)
}
defer rows.Close()
var nodes []*types.TreeNode
for rows.Next() {
var node types.TreeNode
var closedAt sql.NullTime
var estimatedMinutes sql.NullInt64
var assignee sql.NullString
err := rows.Scan(
&node.ID, &node.Title, &node.Status, &node.Priority,
&node.Description, &node.Design, &node.AcceptanceCriteria,
&node.Notes, &node.IssueType, &assignee, &estimatedMinutes,
&node.CreatedAt, &node.UpdatedAt, &closedAt, &node.Depth,
)
if err != nil {
return nil, fmt.Errorf("failed to scan tree node: %w", err)
}
if closedAt.Valid {
node.ClosedAt = &closedAt.Time
}
if estimatedMinutes.Valid {
mins := int(estimatedMinutes.Int64)
node.EstimatedMinutes = &mins
}
if assignee.Valid {
node.Assignee = assignee.String
}
node.Truncated = node.Depth == maxDepth
nodes = append(nodes, &node)
}
return nodes, nil
}
// DetectCycles finds circular dependencies and returns the actual cycle paths
func (s *SQLiteStorage) DetectCycles(ctx context.Context) ([][]*types.Issue, error) {
// Use recursive CTE to find cycles with full paths
// We track the path as a string to work around SQLite's lack of arrays
rows, err := s.db.QueryContext(ctx, `
WITH RECURSIVE paths AS (
SELECT
issue_id,
depends_on_id,
issue_id as start_id,
issue_id || '→' || depends_on_id as path,
0 as depth
FROM dependencies
UNION ALL
SELECT
d.issue_id,
d.depends_on_id,
p.start_id,
p.path || '→' || d.depends_on_id,
p.depth + 1
FROM dependencies d
JOIN paths p ON d.issue_id = p.depends_on_id
WHERE p.depth < 100
AND p.path NOT LIKE '%' || d.depends_on_id || '→%'
)
SELECT DISTINCT path || '→' || start_id as cycle_path
FROM paths
WHERE depends_on_id = start_id
ORDER BY cycle_path
`)
if err != nil {
return nil, fmt.Errorf("failed to detect cycles: %w", err)
}
defer rows.Close()
var cycles [][]*types.Issue
seen := make(map[string]bool)
for rows.Next() {
var pathStr string
if err := rows.Scan(&pathStr); err != nil {
return nil, err
}
// Skip if we've already seen this cycle (can happen with different entry points)
if seen[pathStr] {
continue
}
seen[pathStr] = true
// Parse the path string: "bd-1→bd-2→bd-3→bd-1"
issueIDs := strings.Split(pathStr, "→")
// Remove the duplicate last element (cycle closes back to start)
if len(issueIDs) > 1 && issueIDs[0] == issueIDs[len(issueIDs)-1] {
issueIDs = issueIDs[:len(issueIDs)-1]
}
// Fetch full issue details for each ID in the cycle
var cycleIssues []*types.Issue
for _, issueID := range issueIDs {
issue, err := s.GetIssue(ctx, issueID)
if err != nil {
return nil, fmt.Errorf("failed to get issue %s: %w", issueID, err)
}
if issue != nil {
cycleIssues = append(cycleIssues, issue)
}
}
if len(cycleIssues) > 0 {
cycles = append(cycles, cycleIssues)
}
}
return cycles, nil
}
// Helper function to scan issues from rows
func scanIssues(rows *sql.Rows) ([]*types.Issue, error) {
var issues []*types.Issue
for rows.Next() {
var issue types.Issue
var closedAt sql.NullTime
var estimatedMinutes sql.NullInt64
var assignee sql.NullString
err := rows.Scan(
&issue.ID, &issue.Title, &issue.Description, &issue.Design,
&issue.AcceptanceCriteria, &issue.Notes, &issue.Status,
&issue.Priority, &issue.IssueType, &assignee, &estimatedMinutes,
&issue.CreatedAt, &issue.UpdatedAt, &closedAt,
)
if err != nil {
return nil, fmt.Errorf("failed to scan issue: %w", err)
}
if closedAt.Valid {
issue.ClosedAt = &closedAt.Time
}
if estimatedMinutes.Valid {
mins := int(estimatedMinutes.Int64)
issue.EstimatedMinutes = &mins
}
if assignee.Valid {
issue.Assignee = assignee.String
}
issues = append(issues, &issue)
}
return issues, nil
}

View File

@@ -0,0 +1,147 @@
package sqlite
import (
"context"
"database/sql"
"fmt"
"github.com/steveyackey/beads/internal/types"
)
// AddComment adds a comment to an issue
func (s *SQLiteStorage) AddComment(ctx context.Context, issueID, actor, comment string) error {
_, err := s.db.ExecContext(ctx, `
INSERT INTO events (issue_id, event_type, actor, comment)
VALUES (?, ?, ?, ?)
`, issueID, types.EventCommented, actor, comment)
if err != nil {
return fmt.Errorf("failed to add comment: %w", err)
}
// Update issue updated_at timestamp
_, err = s.db.ExecContext(ctx, `
UPDATE issues SET updated_at = CURRENT_TIMESTAMP WHERE id = ?
`, issueID)
if err != nil {
return fmt.Errorf("failed to update timestamp: %w", err)
}
return nil
}
// GetEvents returns the event history for an issue
func (s *SQLiteStorage) GetEvents(ctx context.Context, issueID string, limit int) ([]*types.Event, error) {
limitSQL := ""
if limit > 0 {
limitSQL = fmt.Sprintf(" LIMIT %d", limit)
}
query := fmt.Sprintf(`
SELECT id, issue_id, event_type, actor, old_value, new_value, comment, created_at
FROM events
WHERE issue_id = ?
ORDER BY created_at DESC
%s
`, limitSQL)
rows, err := s.db.QueryContext(ctx, query, issueID)
if err != nil {
return nil, fmt.Errorf("failed to get events: %w", err)
}
defer rows.Close()
var events []*types.Event
for rows.Next() {
var event types.Event
var oldValue, newValue, comment sql.NullString
err := rows.Scan(
&event.ID, &event.IssueID, &event.EventType, &event.Actor,
&oldValue, &newValue, &comment, &event.CreatedAt,
)
if err != nil {
return nil, fmt.Errorf("failed to scan event: %w", err)
}
if oldValue.Valid {
event.OldValue = &oldValue.String
}
if newValue.Valid {
event.NewValue = &newValue.String
}
if comment.Valid {
event.Comment = &comment.String
}
events = append(events, &event)
}
return events, nil
}
// GetStatistics returns aggregate statistics
func (s *SQLiteStorage) GetStatistics(ctx context.Context) (*types.Statistics, error) {
var stats types.Statistics
// Get counts
err := s.db.QueryRowContext(ctx, `
SELECT
COUNT(*) as total,
SUM(CASE WHEN status = 'open' THEN 1 ELSE 0 END) as open,
SUM(CASE WHEN status = 'in_progress' THEN 1 ELSE 0 END) as in_progress,
SUM(CASE WHEN status = 'closed' THEN 1 ELSE 0 END) as closed
FROM issues
`).Scan(&stats.TotalIssues, &stats.OpenIssues, &stats.InProgressIssues, &stats.ClosedIssues)
if err != nil {
return nil, fmt.Errorf("failed to get issue counts: %w", err)
}
// Get blocked count
err = s.db.QueryRowContext(ctx, `
SELECT COUNT(DISTINCT i.id)
FROM issues i
JOIN dependencies d ON i.id = d.issue_id
JOIN issues blocker ON d.depends_on_id = blocker.id
WHERE i.status IN ('open', 'in_progress', 'blocked')
AND d.type = 'blocks'
AND blocker.status IN ('open', 'in_progress', 'blocked')
`).Scan(&stats.BlockedIssues)
if err != nil {
return nil, fmt.Errorf("failed to get blocked count: %w", err)
}
// Get ready count
err = s.db.QueryRowContext(ctx, `
SELECT COUNT(*)
FROM issues i
WHERE i.status = 'open'
AND NOT EXISTS (
SELECT 1 FROM dependencies d
JOIN issues blocked ON d.depends_on_id = blocked.id
WHERE d.issue_id = i.id
AND d.type = 'blocks'
AND blocked.status IN ('open', 'in_progress', 'blocked')
)
`).Scan(&stats.ReadyIssues)
if err != nil {
return nil, fmt.Errorf("failed to get ready count: %w", err)
}
// Get average lead time (hours from created to closed)
var avgLeadTime sql.NullFloat64
err = s.db.QueryRowContext(ctx, `
SELECT AVG(
(julianday(closed_at) - julianday(created_at)) * 24
)
FROM issues
WHERE closed_at IS NOT NULL
`).Scan(&avgLeadTime)
if err != nil && err != sql.ErrNoRows {
return nil, fmt.Errorf("failed to get lead time: %w", err)
}
if avgLeadTime.Valid {
stats.AverageLeadTime = avgLeadTime.Float64
}
return &stats, nil
}

View File

@@ -0,0 +1,102 @@
package sqlite
import (
"context"
"fmt"
"github.com/steveyackey/beads/internal/types"
)
// AddLabel adds a label to an issue
func (s *SQLiteStorage) AddLabel(ctx context.Context, issueID, label, actor string) error {
tx, err := s.db.BeginTx(ctx, nil)
if err != nil {
return fmt.Errorf("failed to begin transaction: %w", err)
}
defer tx.Rollback()
_, err = tx.ExecContext(ctx, `
INSERT OR IGNORE INTO labels (issue_id, label)
VALUES (?, ?)
`, issueID, label)
if err != nil {
return fmt.Errorf("failed to add label: %w", err)
}
_, err = tx.ExecContext(ctx, `
INSERT INTO events (issue_id, event_type, actor, comment)
VALUES (?, ?, ?, ?)
`, issueID, types.EventLabelAdded, actor, fmt.Sprintf("Added label: %s", label))
if err != nil {
return fmt.Errorf("failed to record event: %w", err)
}
return tx.Commit()
}
// RemoveLabel removes a label from an issue
func (s *SQLiteStorage) RemoveLabel(ctx context.Context, issueID, label, actor string) error {
tx, err := s.db.BeginTx(ctx, nil)
if err != nil {
return fmt.Errorf("failed to begin transaction: %w", err)
}
defer tx.Rollback()
_, err = tx.ExecContext(ctx, `
DELETE FROM labels WHERE issue_id = ? AND label = ?
`, issueID, label)
if err != nil {
return fmt.Errorf("failed to remove label: %w", err)
}
_, err = tx.ExecContext(ctx, `
INSERT INTO events (issue_id, event_type, actor, comment)
VALUES (?, ?, ?, ?)
`, issueID, types.EventLabelRemoved, actor, fmt.Sprintf("Removed label: %s", label))
if err != nil {
return fmt.Errorf("failed to record event: %w", err)
}
return tx.Commit()
}
// GetLabels returns all labels for an issue
func (s *SQLiteStorage) GetLabels(ctx context.Context, issueID string) ([]string, error) {
rows, err := s.db.QueryContext(ctx, `
SELECT label FROM labels WHERE issue_id = ? ORDER BY label
`, issueID)
if err != nil {
return nil, fmt.Errorf("failed to get labels: %w", err)
}
defer rows.Close()
var labels []string
for rows.Next() {
var label string
if err := rows.Scan(&label); err != nil {
return nil, err
}
labels = append(labels, label)
}
return labels, nil
}
// GetIssuesByLabel returns issues with a specific label
func (s *SQLiteStorage) GetIssuesByLabel(ctx context.Context, label string) ([]*types.Issue, error) {
rows, err := s.db.QueryContext(ctx, `
SELECT i.id, i.title, i.description, i.design, i.acceptance_criteria, i.notes,
i.status, i.priority, i.issue_type, i.assignee, i.estimated_minutes,
i.created_at, i.updated_at, i.closed_at
FROM issues i
JOIN labels l ON i.id = l.issue_id
WHERE l.label = ?
ORDER BY i.priority ASC, i.created_at DESC
`, label)
if err != nil {
return nil, fmt.Errorf("failed to get issues by label: %w", err)
}
defer rows.Close()
return scanIssues(rows)
}

View File

@@ -0,0 +1,135 @@
package sqlite
import (
"context"
"database/sql"
"fmt"
"strings"
"github.com/steveyackey/beads/internal/types"
)
// GetReadyWork returns issues with no open blockers
func (s *SQLiteStorage) GetReadyWork(ctx context.Context, filter types.WorkFilter) ([]*types.Issue, error) {
whereClauses := []string{}
args := []interface{}{}
// Default to open status if not specified
if filter.Status == "" {
filter.Status = types.StatusOpen
}
whereClauses = append(whereClauses, "i.status = ?")
args = append(args, filter.Status)
if filter.Priority != nil {
whereClauses = append(whereClauses, "i.priority = ?")
args = append(args, *filter.Priority)
}
if filter.Assignee != nil {
whereClauses = append(whereClauses, "i.assignee = ?")
args = append(args, *filter.Assignee)
}
// Build WHERE clause properly
whereSQL := strings.Join(whereClauses, " AND ")
// Build LIMIT clause using parameter
limitSQL := ""
if filter.Limit > 0 {
limitSQL = " LIMIT ?"
args = append(args, filter.Limit)
}
// Single query template
query := fmt.Sprintf(`
SELECT i.id, i.title, i.description, i.design, i.acceptance_criteria, i.notes,
i.status, i.priority, i.issue_type, i.assignee, i.estimated_minutes,
i.created_at, i.updated_at, i.closed_at
FROM issues i
WHERE %s
AND NOT EXISTS (
SELECT 1 FROM dependencies d
JOIN issues blocked ON d.depends_on_id = blocked.id
WHERE d.issue_id = i.id
AND d.type = 'blocks'
AND blocked.status IN ('open', 'in_progress', 'blocked')
)
ORDER BY i.priority ASC, i.created_at DESC
%s
`, whereSQL, limitSQL)
rows, err := s.db.QueryContext(ctx, query, args...)
if err != nil {
return nil, fmt.Errorf("failed to get ready work: %w", err)
}
defer rows.Close()
return scanIssues(rows)
}
// GetBlockedIssues returns issues that are blocked by dependencies
func (s *SQLiteStorage) GetBlockedIssues(ctx context.Context) ([]*types.BlockedIssue, error) {
// Use GROUP_CONCAT to get all blocker IDs in a single query (no N+1)
rows, err := s.db.QueryContext(ctx, `
SELECT
i.id, i.title, i.description, i.design, i.acceptance_criteria, i.notes,
i.status, i.priority, i.issue_type, i.assignee, i.estimated_minutes,
i.created_at, i.updated_at, i.closed_at,
COUNT(d.depends_on_id) as blocked_by_count,
GROUP_CONCAT(d.depends_on_id, ',') as blocker_ids
FROM issues i
JOIN dependencies d ON i.id = d.issue_id
JOIN issues blocker ON d.depends_on_id = blocker.id
WHERE i.status IN ('open', 'in_progress', 'blocked')
AND d.type = 'blocks'
AND blocker.status IN ('open', 'in_progress', 'blocked')
GROUP BY i.id
ORDER BY i.priority ASC
`)
if err != nil {
return nil, fmt.Errorf("failed to get blocked issues: %w", err)
}
defer rows.Close()
var blocked []*types.BlockedIssue
for rows.Next() {
var issue types.BlockedIssue
var closedAt sql.NullTime
var estimatedMinutes sql.NullInt64
var assignee sql.NullString
var blockerIDsStr string
err := rows.Scan(
&issue.ID, &issue.Title, &issue.Description, &issue.Design,
&issue.AcceptanceCriteria, &issue.Notes, &issue.Status,
&issue.Priority, &issue.IssueType, &assignee, &estimatedMinutes,
&issue.CreatedAt, &issue.UpdatedAt, &closedAt, &issue.BlockedByCount,
&blockerIDsStr,
)
if err != nil {
return nil, fmt.Errorf("failed to scan blocked issue: %w", err)
}
if closedAt.Valid {
issue.ClosedAt = &closedAt.Time
}
if estimatedMinutes.Valid {
mins := int(estimatedMinutes.Int64)
issue.EstimatedMinutes = &mins
}
if assignee.Valid {
issue.Assignee = assignee.String
}
// Parse comma-separated blocker IDs
if blockerIDsStr != "" {
issue.BlockedBy = strings.Split(blockerIDsStr, ",")
}
blocked = append(blocked, &issue)
}
return blocked, nil
}

View File

@@ -0,0 +1,93 @@
package sqlite
const schema = `
-- Issues table
CREATE TABLE IF NOT EXISTS issues (
id TEXT PRIMARY KEY,
title TEXT NOT NULL CHECK(length(title) <= 500),
description TEXT NOT NULL DEFAULT '',
design TEXT NOT NULL DEFAULT '',
acceptance_criteria TEXT NOT NULL DEFAULT '',
notes TEXT NOT NULL DEFAULT '',
status TEXT NOT NULL DEFAULT 'open',
priority INTEGER NOT NULL DEFAULT 2 CHECK(priority >= 0 AND priority <= 4),
issue_type TEXT NOT NULL DEFAULT 'task',
assignee TEXT,
estimated_minutes INTEGER,
created_at DATETIME NOT NULL DEFAULT CURRENT_TIMESTAMP,
updated_at DATETIME NOT NULL DEFAULT CURRENT_TIMESTAMP,
closed_at DATETIME
);
CREATE INDEX IF NOT EXISTS idx_issues_status ON issues(status);
CREATE INDEX IF NOT EXISTS idx_issues_priority ON issues(priority);
CREATE INDEX IF NOT EXISTS idx_issues_assignee ON issues(assignee);
CREATE INDEX IF NOT EXISTS idx_issues_created_at ON issues(created_at);
-- Dependencies table
CREATE TABLE IF NOT EXISTS dependencies (
issue_id TEXT NOT NULL,
depends_on_id TEXT NOT NULL,
type TEXT NOT NULL DEFAULT 'blocks',
created_at DATETIME NOT NULL DEFAULT CURRENT_TIMESTAMP,
created_by TEXT NOT NULL,
PRIMARY KEY (issue_id, depends_on_id),
FOREIGN KEY (issue_id) REFERENCES issues(id) ON DELETE CASCADE,
FOREIGN KEY (depends_on_id) REFERENCES issues(id) ON DELETE CASCADE
);
CREATE INDEX IF NOT EXISTS idx_dependencies_issue ON dependencies(issue_id);
CREATE INDEX IF NOT EXISTS idx_dependencies_depends_on ON dependencies(depends_on_id);
-- Labels table
CREATE TABLE IF NOT EXISTS labels (
issue_id TEXT NOT NULL,
label TEXT NOT NULL,
PRIMARY KEY (issue_id, label),
FOREIGN KEY (issue_id) REFERENCES issues(id) ON DELETE CASCADE
);
CREATE INDEX IF NOT EXISTS idx_labels_label ON labels(label);
-- Events table (audit trail)
CREATE TABLE IF NOT EXISTS events (
id INTEGER PRIMARY KEY AUTOINCREMENT,
issue_id TEXT NOT NULL,
event_type TEXT NOT NULL,
actor TEXT NOT NULL,
old_value TEXT,
new_value TEXT,
comment TEXT,
created_at DATETIME NOT NULL DEFAULT CURRENT_TIMESTAMP,
FOREIGN KEY (issue_id) REFERENCES issues(id) ON DELETE CASCADE
);
CREATE INDEX IF NOT EXISTS idx_events_issue ON events(issue_id);
CREATE INDEX IF NOT EXISTS idx_events_created_at ON events(created_at);
-- Ready work view
CREATE VIEW IF NOT EXISTS ready_issues AS
SELECT i.*
FROM issues i
WHERE i.status = 'open'
AND NOT EXISTS (
SELECT 1 FROM dependencies d
JOIN issues blocked ON d.depends_on_id = blocked.id
WHERE d.issue_id = i.id
AND d.type = 'blocks'
AND blocked.status IN ('open', 'in_progress', 'blocked')
);
-- Blocked issues view
CREATE VIEW IF NOT EXISTS blocked_issues AS
SELECT
i.*,
COUNT(d.depends_on_id) as blocked_by_count
FROM issues i
JOIN dependencies d ON i.id = d.issue_id
JOIN issues blocker ON d.depends_on_id = blocker.id
WHERE i.status IN ('open', 'in_progress', 'blocked')
AND d.type = 'blocks'
AND blocker.status IN ('open', 'in_progress', 'blocked')
GROUP BY i.id;
`

View File

@@ -0,0 +1,424 @@
package sqlite
import (
"context"
"database/sql"
"encoding/json"
"fmt"
"os"
"path/filepath"
"strings"
"sync"
"time"
_ "github.com/mattn/go-sqlite3"
"github.com/steveyackey/beads/internal/types"
)
// SQLiteStorage implements the Storage interface using SQLite
type SQLiteStorage struct {
db *sql.DB
nextID int
idMu sync.Mutex // Protects nextID from concurrent access
}
// New creates a new SQLite storage backend
func New(path string) (*SQLiteStorage, error) {
// Ensure directory exists
dir := filepath.Dir(path)
if err := os.MkdirAll(dir, 0755); err != nil {
return nil, fmt.Errorf("failed to create directory: %w", err)
}
// Open database with WAL mode for better concurrency
db, err := sql.Open("sqlite3", path+"?_journal_mode=WAL&_foreign_keys=ON")
if err != nil {
return nil, fmt.Errorf("failed to open database: %w", err)
}
// Test connection
if err := db.Ping(); err != nil {
return nil, fmt.Errorf("failed to ping database: %w", err)
}
// Initialize schema
if _, err := db.Exec(schema); err != nil {
return nil, fmt.Errorf("failed to initialize schema: %w", err)
}
// Get next ID
nextID, err := getNextID(db)
if err != nil {
return nil, err
}
return &SQLiteStorage{
db: db,
nextID: nextID,
}, nil
}
// getNextID determines the next issue ID to use
func getNextID(db *sql.DB) (int, error) {
var maxID sql.NullString
err := db.QueryRow("SELECT MAX(id) FROM issues").Scan(&maxID)
if err != nil {
return 1, nil // Start from 1 if table is empty
}
if !maxID.Valid || maxID.String == "" {
return 1, nil
}
// Parse "bd-123" to get 123
parts := strings.Split(maxID.String, "-")
if len(parts) != 2 {
return 1, nil
}
var num int
if _, err := fmt.Sscanf(parts[1], "%d", &num); err != nil {
return 1, nil
}
return num + 1, nil
}
// CreateIssue creates a new issue
func (s *SQLiteStorage) CreateIssue(ctx context.Context, issue *types.Issue, actor string) error {
// Validate issue before creating
if err := issue.Validate(); err != nil {
return fmt.Errorf("validation failed: %w", err)
}
// Generate ID if not set (thread-safe)
if issue.ID == "" {
s.idMu.Lock()
issue.ID = fmt.Sprintf("bd-%d", s.nextID)
s.nextID++
s.idMu.Unlock()
}
// Set timestamps
now := time.Now()
issue.CreatedAt = now
issue.UpdatedAt = now
// Start transaction
tx, err := s.db.BeginTx(ctx, nil)
if err != nil {
return fmt.Errorf("failed to begin transaction: %w", err)
}
defer tx.Rollback()
// Insert issue
_, err = tx.ExecContext(ctx, `
INSERT INTO issues (
id, title, description, design, acceptance_criteria, notes,
status, priority, issue_type, assignee, estimated_minutes,
created_at, updated_at
) VALUES (?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?)
`,
issue.ID, issue.Title, issue.Description, issue.Design,
issue.AcceptanceCriteria, issue.Notes, issue.Status,
issue.Priority, issue.IssueType, issue.Assignee,
issue.EstimatedMinutes, issue.CreatedAt, issue.UpdatedAt,
)
if err != nil {
return fmt.Errorf("failed to insert issue: %w", err)
}
// Record creation event
eventData, _ := json.Marshal(issue)
eventDataStr := string(eventData)
_, err = tx.ExecContext(ctx, `
INSERT INTO events (issue_id, event_type, actor, new_value)
VALUES (?, ?, ?, ?)
`, issue.ID, types.EventCreated, actor, eventDataStr)
if err != nil {
return fmt.Errorf("failed to record event: %w", err)
}
return tx.Commit()
}
// GetIssue retrieves an issue by ID
func (s *SQLiteStorage) GetIssue(ctx context.Context, id string) (*types.Issue, error) {
var issue types.Issue
var closedAt sql.NullTime
var estimatedMinutes sql.NullInt64
var assignee sql.NullString
err := s.db.QueryRowContext(ctx, `
SELECT id, title, description, design, acceptance_criteria, notes,
status, priority, issue_type, assignee, estimated_minutes,
created_at, updated_at, closed_at
FROM issues
WHERE id = ?
`, id).Scan(
&issue.ID, &issue.Title, &issue.Description, &issue.Design,
&issue.AcceptanceCriteria, &issue.Notes, &issue.Status,
&issue.Priority, &issue.IssueType, &assignee, &estimatedMinutes,
&issue.CreatedAt, &issue.UpdatedAt, &closedAt,
)
if err == sql.ErrNoRows {
return nil, nil
}
if err != nil {
return nil, fmt.Errorf("failed to get issue: %w", err)
}
if closedAt.Valid {
issue.ClosedAt = &closedAt.Time
}
if estimatedMinutes.Valid {
mins := int(estimatedMinutes.Int64)
issue.EstimatedMinutes = &mins
}
if assignee.Valid {
issue.Assignee = assignee.String
}
return &issue, nil
}
// Allowed fields for update to prevent SQL injection
var allowedUpdateFields = map[string]bool{
"status": true,
"priority": true,
"title": true,
"assignee": true,
"description": true,
"design": true,
"acceptance_criteria": true,
"notes": true,
"issue_type": true,
"estimated_minutes": true,
}
// UpdateIssue updates fields on an issue
func (s *SQLiteStorage) UpdateIssue(ctx context.Context, id string, updates map[string]interface{}, actor string) error {
// Get old issue for event
oldIssue, err := s.GetIssue(ctx, id)
if err != nil {
return err
}
if oldIssue == nil {
return fmt.Errorf("issue %s not found", id)
}
// Build update query with validated field names
setClauses := []string{"updated_at = ?"}
args := []interface{}{time.Now()}
for key, value := range updates {
// Prevent SQL injection by validating field names
if !allowedUpdateFields[key] {
return fmt.Errorf("invalid field for update: %s", key)
}
// Validate field values
switch key {
case "priority":
if priority, ok := value.(int); ok {
if priority < 0 || priority > 4 {
return fmt.Errorf("priority must be between 0 and 4 (got %d)", priority)
}
}
case "status":
if status, ok := value.(string); ok {
if !types.Status(status).IsValid() {
return fmt.Errorf("invalid status: %s", status)
}
}
case "issue_type":
if issueType, ok := value.(string); ok {
if !types.IssueType(issueType).IsValid() {
return fmt.Errorf("invalid issue type: %s", issueType)
}
}
case "title":
if title, ok := value.(string); ok {
if len(title) == 0 || len(title) > 500 {
return fmt.Errorf("title must be 1-500 characters")
}
}
case "estimated_minutes":
if mins, ok := value.(int); ok {
if mins < 0 {
return fmt.Errorf("estimated_minutes cannot be negative")
}
}
}
setClauses = append(setClauses, fmt.Sprintf("%s = ?", key))
args = append(args, value)
}
args = append(args, id)
// Start transaction
tx, err := s.db.BeginTx(ctx, nil)
if err != nil {
return fmt.Errorf("failed to begin transaction: %w", err)
}
defer tx.Rollback()
// Update issue
query := fmt.Sprintf("UPDATE issues SET %s WHERE id = ?", strings.Join(setClauses, ", "))
_, err = tx.ExecContext(ctx, query, args...)
if err != nil {
return fmt.Errorf("failed to update issue: %w", err)
}
// Record event
oldData, _ := json.Marshal(oldIssue)
newData, _ := json.Marshal(updates)
oldDataStr := string(oldData)
newDataStr := string(newData)
eventType := types.EventUpdated
if statusVal, ok := updates["status"]; ok {
if statusVal == string(types.StatusClosed) {
eventType = types.EventClosed
} else {
eventType = types.EventStatusChanged
}
}
_, err = tx.ExecContext(ctx, `
INSERT INTO events (issue_id, event_type, actor, old_value, new_value)
VALUES (?, ?, ?, ?, ?)
`, id, eventType, actor, oldDataStr, newDataStr)
if err != nil {
return fmt.Errorf("failed to record event: %w", err)
}
return tx.Commit()
}
// CloseIssue closes an issue with a reason
func (s *SQLiteStorage) CloseIssue(ctx context.Context, id string, reason string, actor string) error {
now := time.Now()
// Update with special event handling
tx, err := s.db.BeginTx(ctx, nil)
if err != nil {
return fmt.Errorf("failed to begin transaction: %w", err)
}
defer tx.Rollback()
_, err = tx.ExecContext(ctx, `
UPDATE issues SET status = ?, closed_at = ?, updated_at = ?
WHERE id = ?
`, types.StatusClosed, now, now, id)
if err != nil {
return fmt.Errorf("failed to close issue: %w", err)
}
_, err = tx.ExecContext(ctx, `
INSERT INTO events (issue_id, event_type, actor, comment)
VALUES (?, ?, ?, ?)
`, id, types.EventClosed, actor, reason)
if err != nil {
return fmt.Errorf("failed to record event: %w", err)
}
return tx.Commit()
}
// SearchIssues finds issues matching query and filters
func (s *SQLiteStorage) SearchIssues(ctx context.Context, query string, filter types.IssueFilter) ([]*types.Issue, error) {
whereClauses := []string{}
args := []interface{}{}
if query != "" {
whereClauses = append(whereClauses, "(title LIKE ? OR description LIKE ? OR id LIKE ?)")
pattern := "%" + query + "%"
args = append(args, pattern, pattern, pattern)
}
if filter.Status != nil {
whereClauses = append(whereClauses, "status = ?")
args = append(args, *filter.Status)
}
if filter.Priority != nil {
whereClauses = append(whereClauses, "priority = ?")
args = append(args, *filter.Priority)
}
if filter.IssueType != nil {
whereClauses = append(whereClauses, "issue_type = ?")
args = append(args, *filter.IssueType)
}
if filter.Assignee != nil {
whereClauses = append(whereClauses, "assignee = ?")
args = append(args, *filter.Assignee)
}
whereSQL := ""
if len(whereClauses) > 0 {
whereSQL = "WHERE " + strings.Join(whereClauses, " AND ")
}
limitSQL := ""
if filter.Limit > 0 {
limitSQL = fmt.Sprintf(" LIMIT %d", filter.Limit)
}
querySQL := fmt.Sprintf(`
SELECT id, title, description, design, acceptance_criteria, notes,
status, priority, issue_type, assignee, estimated_minutes,
created_at, updated_at, closed_at
FROM issues
%s
ORDER BY priority ASC, created_at DESC
%s
`, whereSQL, limitSQL)
rows, err := s.db.QueryContext(ctx, querySQL, args...)
if err != nil {
return nil, fmt.Errorf("failed to search issues: %w", err)
}
defer rows.Close()
var issues []*types.Issue
for rows.Next() {
var issue types.Issue
var closedAt sql.NullTime
var estimatedMinutes sql.NullInt64
var assignee sql.NullString
err := rows.Scan(
&issue.ID, &issue.Title, &issue.Description, &issue.Design,
&issue.AcceptanceCriteria, &issue.Notes, &issue.Status,
&issue.Priority, &issue.IssueType, &assignee, &estimatedMinutes,
&issue.CreatedAt, &issue.UpdatedAt, &closedAt,
)
if err != nil {
return nil, fmt.Errorf("failed to scan issue: %w", err)
}
if closedAt.Valid {
issue.ClosedAt = &closedAt.Time
}
if estimatedMinutes.Valid {
mins := int(estimatedMinutes.Int64)
issue.EstimatedMinutes = &mins
}
if assignee.Valid {
issue.Assignee = assignee.String
}
issues = append(issues, &issue)
}
return issues, nil
}
// Close closes the database connection
func (s *SQLiteStorage) Close() error {
return s.db.Close()
}

View File

@@ -0,0 +1,61 @@
package storage
import (
"context"
"github.com/steveyackey/beads/internal/types"
)
// Storage defines the interface for issue storage backends
type Storage interface {
// Issues
CreateIssue(ctx context.Context, issue *types.Issue, actor string) error
GetIssue(ctx context.Context, id string) (*types.Issue, error)
UpdateIssue(ctx context.Context, id string, updates map[string]interface{}, actor string) error
CloseIssue(ctx context.Context, id string, reason string, actor string) error
SearchIssues(ctx context.Context, query string, filter types.IssueFilter) ([]*types.Issue, error)
// Dependencies
AddDependency(ctx context.Context, dep *types.Dependency, actor string) error
RemoveDependency(ctx context.Context, issueID, dependsOnID string, actor string) error
GetDependencies(ctx context.Context, issueID string) ([]*types.Issue, error)
GetDependents(ctx context.Context, issueID string) ([]*types.Issue, error)
GetDependencyTree(ctx context.Context, issueID string, maxDepth int) ([]*types.TreeNode, error)
DetectCycles(ctx context.Context) ([][]*types.Issue, error)
// Labels
AddLabel(ctx context.Context, issueID, label, actor string) error
RemoveLabel(ctx context.Context, issueID, label, actor string) error
GetLabels(ctx context.Context, issueID string) ([]string, error)
GetIssuesByLabel(ctx context.Context, label string) ([]*types.Issue, error)
// Ready Work & Blocking
GetReadyWork(ctx context.Context, filter types.WorkFilter) ([]*types.Issue, error)
GetBlockedIssues(ctx context.Context) ([]*types.BlockedIssue, error)
// Events
AddComment(ctx context.Context, issueID, actor, comment string) error
GetEvents(ctx context.Context, issueID string, limit int) ([]*types.Event, error)
// Statistics
GetStatistics(ctx context.Context) (*types.Statistics, error)
// Lifecycle
Close() error
}
// Config holds database configuration
type Config struct {
Backend string // "sqlite" or "postgres"
// SQLite config
Path string // database file path
// PostgreSQL config
Host string
Port int
Database string
User string
Password string
SSLMode string
}

190
internal/types/types.go Normal file
View File

@@ -0,0 +1,190 @@
package types
import (
"fmt"
"time"
)
// Issue represents a trackable work item
type Issue struct {
ID string `json:"id"`
Title string `json:"title"`
Description string `json:"description"`
Design string `json:"design,omitempty"`
AcceptanceCriteria string `json:"acceptance_criteria,omitempty"`
Notes string `json:"notes,omitempty"`
Status Status `json:"status"`
Priority int `json:"priority"`
IssueType IssueType `json:"issue_type"`
Assignee string `json:"assignee,omitempty"`
EstimatedMinutes *int `json:"estimated_minutes,omitempty"`
CreatedAt time.Time `json:"created_at"`
UpdatedAt time.Time `json:"updated_at"`
ClosedAt *time.Time `json:"closed_at,omitempty"`
}
// Validate checks if the issue has valid field values
func (i *Issue) Validate() error {
if len(i.Title) == 0 {
return fmt.Errorf("title is required")
}
if len(i.Title) > 500 {
return fmt.Errorf("title must be 500 characters or less (got %d)", len(i.Title))
}
if i.Priority < 0 || i.Priority > 4 {
return fmt.Errorf("priority must be between 0 and 4 (got %d)", i.Priority)
}
if !i.Status.IsValid() {
return fmt.Errorf("invalid status: %s", i.Status)
}
if !i.IssueType.IsValid() {
return fmt.Errorf("invalid issue type: %s", i.IssueType)
}
if i.EstimatedMinutes != nil && *i.EstimatedMinutes < 0 {
return fmt.Errorf("estimated_minutes cannot be negative")
}
return nil
}
// Status represents the current state of an issue
type Status string
const (
StatusOpen Status = "open"
StatusInProgress Status = "in_progress"
StatusBlocked Status = "blocked"
StatusClosed Status = "closed"
)
// IsValid checks if the status value is valid
func (s Status) IsValid() bool {
switch s {
case StatusOpen, StatusInProgress, StatusBlocked, StatusClosed:
return true
}
return false
}
// IssueType categorizes the kind of work
type IssueType string
const (
TypeBug IssueType = "bug"
TypeFeature IssueType = "feature"
TypeTask IssueType = "task"
TypeEpic IssueType = "epic"
TypeChore IssueType = "chore"
)
// IsValid checks if the issue type value is valid
func (t IssueType) IsValid() bool {
switch t {
case TypeBug, TypeFeature, TypeTask, TypeEpic, TypeChore:
return true
}
return false
}
// Dependency represents a relationship between issues
type Dependency struct {
IssueID string `json:"issue_id"`
DependsOnID string `json:"depends_on_id"`
Type DependencyType `json:"type"`
CreatedAt time.Time `json:"created_at"`
CreatedBy string `json:"created_by"`
}
// DependencyType categorizes the relationship
type DependencyType string
const (
DepBlocks DependencyType = "blocks"
DepRelated DependencyType = "related"
DepParentChild DependencyType = "parent-child"
)
// IsValid checks if the dependency type value is valid
func (d DependencyType) IsValid() bool {
switch d {
case DepBlocks, DepRelated, DepParentChild:
return true
}
return false
}
// Label represents a tag on an issue
type Label struct {
IssueID string `json:"issue_id"`
Label string `json:"label"`
}
// Event represents an audit trail entry
type Event struct {
ID int64 `json:"id"`
IssueID string `json:"issue_id"`
EventType EventType `json:"event_type"`
Actor string `json:"actor"`
OldValue *string `json:"old_value,omitempty"`
NewValue *string `json:"new_value,omitempty"`
Comment *string `json:"comment,omitempty"`
CreatedAt time.Time `json:"created_at"`
}
// EventType categorizes audit trail events
type EventType string
const (
EventCreated EventType = "created"
EventUpdated EventType = "updated"
EventStatusChanged EventType = "status_changed"
EventCommented EventType = "commented"
EventClosed EventType = "closed"
EventReopened EventType = "reopened"
EventDependencyAdded EventType = "dependency_added"
EventDependencyRemoved EventType = "dependency_removed"
EventLabelAdded EventType = "label_added"
EventLabelRemoved EventType = "label_removed"
)
// BlockedIssue extends Issue with blocking information
type BlockedIssue struct {
Issue
BlockedByCount int `json:"blocked_by_count"`
BlockedBy []string `json:"blocked_by"`
}
// TreeNode represents a node in a dependency tree
type TreeNode struct {
Issue
Depth int `json:"depth"`
Truncated bool `json:"truncated"`
}
// Statistics provides aggregate metrics
type Statistics struct {
TotalIssues int `json:"total_issues"`
OpenIssues int `json:"open_issues"`
InProgressIssues int `json:"in_progress_issues"`
ClosedIssues int `json:"closed_issues"`
BlockedIssues int `json:"blocked_issues"`
ReadyIssues int `json:"ready_issues"`
AverageLeadTime float64 `json:"average_lead_time_hours"`
}
// IssueFilter is used to filter issue queries
type IssueFilter struct {
Status *Status
Priority *int
IssueType *IssueType
Assignee *string
Labels []string
Limit int
}
// WorkFilter is used to filter ready work queries
type WorkFilter struct {
Status Status
Priority *int
Assignee *string
Limit int
}