feat: Add bd repos multi-repo commands and fix bd ready for in_progress issues
- Add 'bd repos' command for multi-repository management (bd-123)
- bd repos list: show all cached repositories
- bd repos ready: aggregate ready work across repos
- bd repos stats: combined statistics across repos
- bd repos clear-cache: clear repository cache
- Requires global daemon (bd daemon --global)
- Fix bd ready to show in_progress issues (bd-165)
- bd ready now shows both 'open' and 'in_progress' issues with no blockers
- Allows epics/tasks ready to close to appear in ready work
- Critical P0 bug fix for workflow
- Apply code review improvements to repos implementation
- Use strongly typed RPC responses (remove interface{})
- Fix clear-cache lock handling (close connections outside lock)
- Add error collection for per-repo failures
- Add context timeouts (1-2s) to prevent hangs
- Add lock strategy comments
- Update documentation (README.md, AGENTS.md)
- Add comprehensive tests for both features
Amp-Thread-ID: https://ampcode.com/threads/T-1de989a1-1890-492c-9847-a34144259e0f
Co-authored-by: Amp <amp@ampcode.com>
This commit is contained in:
@@ -190,3 +190,23 @@ func (c *Client) RemoveLabel(args *LabelRemoveArgs) (*Response, error) {
|
||||
func (c *Client) Batch(args *BatchArgs) (*Response, error) {
|
||||
return c.Execute(OpBatch, args)
|
||||
}
|
||||
|
||||
// ReposList lists all cached repositories
|
||||
func (c *Client) ReposList() (*Response, error) {
|
||||
return c.Execute(OpReposList, struct{}{})
|
||||
}
|
||||
|
||||
// ReposReady gets ready work across all repositories
|
||||
func (c *Client) ReposReady(args *ReposReadyArgs) (*Response, error) {
|
||||
return c.Execute(OpReposReady, args)
|
||||
}
|
||||
|
||||
// ReposStats gets combined statistics across all repositories
|
||||
func (c *Client) ReposStats() (*Response, error) {
|
||||
return c.Execute(OpReposStats, struct{}{})
|
||||
}
|
||||
|
||||
// ReposClearCache clears the repository cache
|
||||
func (c *Client) ReposClearCache() (*Response, error) {
|
||||
return c.Execute(OpReposClearCache, struct{}{})
|
||||
}
|
||||
|
||||
@@ -1,23 +1,31 @@
|
||||
package rpc
|
||||
|
||||
import "encoding/json"
|
||||
import (
|
||||
"encoding/json"
|
||||
|
||||
"github.com/steveyegge/beads/internal/types"
|
||||
)
|
||||
|
||||
// Operation constants for all bd commands
|
||||
const (
|
||||
OpPing = "ping"
|
||||
OpCreate = "create"
|
||||
OpUpdate = "update"
|
||||
OpClose = "close"
|
||||
OpList = "list"
|
||||
OpShow = "show"
|
||||
OpReady = "ready"
|
||||
OpStats = "stats"
|
||||
OpDepAdd = "dep_add"
|
||||
OpDepRemove = "dep_remove"
|
||||
OpDepTree = "dep_tree"
|
||||
OpLabelAdd = "label_add"
|
||||
OpLabelRemove = "label_remove"
|
||||
OpBatch = "batch"
|
||||
OpPing = "ping"
|
||||
OpCreate = "create"
|
||||
OpUpdate = "update"
|
||||
OpClose = "close"
|
||||
OpList = "list"
|
||||
OpShow = "show"
|
||||
OpReady = "ready"
|
||||
OpStats = "stats"
|
||||
OpDepAdd = "dep_add"
|
||||
OpDepRemove = "dep_remove"
|
||||
OpDepTree = "dep_tree"
|
||||
OpLabelAdd = "label_add"
|
||||
OpLabelRemove = "label_remove"
|
||||
OpBatch = "batch"
|
||||
OpReposList = "repos_list"
|
||||
OpReposReady = "repos_ready"
|
||||
OpReposStats = "repos_stats"
|
||||
OpReposClearCache = "repos_clear_cache"
|
||||
)
|
||||
|
||||
// Request represents an RPC request from client to daemon
|
||||
@@ -151,3 +159,38 @@ type BatchResult struct {
|
||||
Data json.RawMessage `json:"data,omitempty"`
|
||||
Error string `json:"error,omitempty"`
|
||||
}
|
||||
|
||||
// ReposReadyArgs represents arguments for repos ready operation
|
||||
type ReposReadyArgs struct {
|
||||
Assignee string `json:"assignee,omitempty"`
|
||||
Priority *int `json:"priority,omitempty"`
|
||||
Limit int `json:"limit,omitempty"`
|
||||
GroupByRepo bool `json:"group_by_repo,omitempty"`
|
||||
}
|
||||
|
||||
// RepoInfo represents information about a cached repository
|
||||
type RepoInfo struct {
|
||||
Path string `json:"path"`
|
||||
Prefix string `json:"prefix"`
|
||||
IssueCount int `json:"issue_count"`
|
||||
LastAccess string `json:"last_access"`
|
||||
}
|
||||
|
||||
// RepoReadyWork represents ready work for a single repository
|
||||
type RepoReadyWork struct {
|
||||
RepoPath string `json:"repo_path"`
|
||||
Issues []*types.Issue `json:"issues"`
|
||||
}
|
||||
|
||||
// ReposReadyIssue represents an issue with repo context
|
||||
type ReposReadyIssue struct {
|
||||
RepoPath string `json:"repo_path"`
|
||||
Issue *types.Issue `json:"issue"`
|
||||
}
|
||||
|
||||
// ReposStatsResponse contains combined statistics across repos
|
||||
type ReposStatsResponse struct {
|
||||
Total types.Statistics `json:"total"`
|
||||
PerRepo map[string]types.Statistics `json:"per_repo"`
|
||||
Errors map[string]string `json:"errors,omitempty"`
|
||||
}
|
||||
|
||||
@@ -11,6 +11,7 @@ import (
|
||||
"path/filepath"
|
||||
"sync"
|
||||
"syscall"
|
||||
"time"
|
||||
|
||||
"github.com/steveyegge/beads/internal/storage"
|
||||
"github.com/steveyegge/beads/internal/storage/sqlite"
|
||||
@@ -176,6 +177,14 @@ func (s *Server) handleRequest(req *Request) Response {
|
||||
return s.handleLabelRemove(req)
|
||||
case OpBatch:
|
||||
return s.handleBatch(req)
|
||||
case OpReposList:
|
||||
return s.handleReposList(req)
|
||||
case OpReposReady:
|
||||
return s.handleReposReady(req)
|
||||
case OpReposStats:
|
||||
return s.handleReposStats(req)
|
||||
case OpReposClearCache:
|
||||
return s.handleReposClearCache(req)
|
||||
default:
|
||||
return Response{
|
||||
Success: false,
|
||||
@@ -766,3 +775,203 @@ func (s *Server) writeResponse(writer *bufio.Writer, resp Response) {
|
||||
writer.WriteByte('\n')
|
||||
writer.Flush()
|
||||
}
|
||||
|
||||
// Multi-repo handlers
|
||||
|
||||
func (s *Server) handleReposList(_ *Request) Response {
|
||||
// Keep read lock during iteration to prevent stores from being closed mid-query
|
||||
s.cacheMu.RLock()
|
||||
defer s.cacheMu.RUnlock()
|
||||
|
||||
repos := make([]RepoInfo, 0, len(s.storageCache))
|
||||
for path, store := range s.storageCache {
|
||||
ctx, cancel := context.WithTimeout(context.Background(), 2*time.Second)
|
||||
stats, err := store.GetStatistics(ctx)
|
||||
cancel()
|
||||
if err != nil {
|
||||
continue
|
||||
}
|
||||
|
||||
// Extract prefix from a sample issue
|
||||
filter := types.IssueFilter{Limit: 1}
|
||||
ctx2, cancel2 := context.WithTimeout(context.Background(), 1*time.Second)
|
||||
issues, err := store.SearchIssues(ctx2, "", filter)
|
||||
cancel2()
|
||||
prefix := ""
|
||||
if err == nil && len(issues) > 0 && len(issues[0].ID) > 0 {
|
||||
// Extract prefix (everything before the last hyphen and number)
|
||||
id := issues[0].ID
|
||||
for i := len(id) - 1; i >= 0; i-- {
|
||||
if id[i] == '-' {
|
||||
prefix = id[:i+1]
|
||||
break
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
repos = append(repos, RepoInfo{
|
||||
Path: path,
|
||||
Prefix: prefix,
|
||||
IssueCount: stats.TotalIssues,
|
||||
LastAccess: "active",
|
||||
})
|
||||
}
|
||||
|
||||
data, _ := json.Marshal(repos)
|
||||
return Response{
|
||||
Success: true,
|
||||
Data: data,
|
||||
}
|
||||
}
|
||||
|
||||
func (s *Server) handleReposReady(req *Request) Response {
|
||||
var args ReposReadyArgs
|
||||
if err := json.Unmarshal(req.Args, &args); err != nil {
|
||||
return Response{
|
||||
Success: false,
|
||||
Error: fmt.Sprintf("invalid args: %v", err),
|
||||
}
|
||||
}
|
||||
|
||||
// Keep read lock during iteration to prevent stores from being closed mid-query
|
||||
s.cacheMu.RLock()
|
||||
defer s.cacheMu.RUnlock()
|
||||
|
||||
if args.GroupByRepo {
|
||||
result := make([]RepoReadyWork, 0, len(s.storageCache))
|
||||
for path, store := range s.storageCache {
|
||||
filter := types.WorkFilter{
|
||||
Status: types.StatusOpen,
|
||||
Limit: args.Limit,
|
||||
}
|
||||
if args.Priority != nil {
|
||||
filter.Priority = args.Priority
|
||||
}
|
||||
if args.Assignee != "" {
|
||||
filter.Assignee = &args.Assignee
|
||||
}
|
||||
|
||||
ctx, cancel := context.WithTimeout(context.Background(), 2*time.Second)
|
||||
issues, err := store.GetReadyWork(ctx, filter)
|
||||
cancel()
|
||||
if err != nil || len(issues) == 0 {
|
||||
continue
|
||||
}
|
||||
|
||||
result = append(result, RepoReadyWork{
|
||||
RepoPath: path,
|
||||
Issues: issues,
|
||||
})
|
||||
}
|
||||
|
||||
data, _ := json.Marshal(result)
|
||||
return Response{
|
||||
Success: true,
|
||||
Data: data,
|
||||
}
|
||||
}
|
||||
|
||||
// Flat list of all ready issues across all repos
|
||||
allIssues := make([]ReposReadyIssue, 0)
|
||||
for path, store := range s.storageCache {
|
||||
filter := types.WorkFilter{
|
||||
Status: types.StatusOpen,
|
||||
Limit: args.Limit,
|
||||
}
|
||||
if args.Priority != nil {
|
||||
filter.Priority = args.Priority
|
||||
}
|
||||
if args.Assignee != "" {
|
||||
filter.Assignee = &args.Assignee
|
||||
}
|
||||
|
||||
ctx, cancel := context.WithTimeout(context.Background(), 2*time.Second)
|
||||
issues, err := store.GetReadyWork(ctx, filter)
|
||||
cancel()
|
||||
if err != nil {
|
||||
continue
|
||||
}
|
||||
|
||||
for _, issue := range issues {
|
||||
allIssues = append(allIssues, ReposReadyIssue{
|
||||
RepoPath: path,
|
||||
Issue: issue,
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
data, _ := json.Marshal(allIssues)
|
||||
return Response{
|
||||
Success: true,
|
||||
Data: data,
|
||||
}
|
||||
}
|
||||
|
||||
func (s *Server) handleReposStats(_ *Request) Response {
|
||||
// Keep read lock during iteration to prevent stores from being closed mid-query
|
||||
s.cacheMu.RLock()
|
||||
defer s.cacheMu.RUnlock()
|
||||
|
||||
total := types.Statistics{}
|
||||
perRepo := make(map[string]types.Statistics)
|
||||
errors := make(map[string]string)
|
||||
|
||||
for path, store := range s.storageCache {
|
||||
ctx, cancel := context.WithTimeout(context.Background(), 2*time.Second)
|
||||
stats, err := store.GetStatistics(ctx)
|
||||
cancel()
|
||||
if err != nil {
|
||||
errors[path] = err.Error()
|
||||
continue
|
||||
}
|
||||
|
||||
perRepo[path] = *stats
|
||||
|
||||
// Aggregate totals
|
||||
total.TotalIssues += stats.TotalIssues
|
||||
total.OpenIssues += stats.OpenIssues
|
||||
total.InProgressIssues += stats.InProgressIssues
|
||||
total.ClosedIssues += stats.ClosedIssues
|
||||
total.BlockedIssues += stats.BlockedIssues
|
||||
total.ReadyIssues += stats.ReadyIssues
|
||||
total.EpicsEligibleForClosure += stats.EpicsEligibleForClosure
|
||||
}
|
||||
|
||||
result := ReposStatsResponse{
|
||||
Total: total,
|
||||
PerRepo: perRepo,
|
||||
}
|
||||
if len(errors) > 0 {
|
||||
result.Errors = errors
|
||||
}
|
||||
|
||||
data, _ := json.Marshal(result)
|
||||
return Response{
|
||||
Success: true,
|
||||
Data: data,
|
||||
}
|
||||
}
|
||||
|
||||
func (s *Server) handleReposClearCache(_ *Request) Response {
|
||||
// Copy stores under write lock, clear cache, then close outside lock
|
||||
// to avoid holding lock during potentially slow Close() operations
|
||||
s.cacheMu.Lock()
|
||||
stores := make([]storage.Storage, 0, len(s.storageCache))
|
||||
for _, store := range s.storageCache {
|
||||
stores = append(stores, store)
|
||||
}
|
||||
s.storageCache = make(map[string]storage.Storage)
|
||||
s.cacheMu.Unlock()
|
||||
|
||||
// Close all storage connections without holding lock
|
||||
for _, store := range stores {
|
||||
if err := store.Close(); err != nil {
|
||||
fmt.Fprintf(os.Stderr, "Warning: failed to close storage: %v\n", err)
|
||||
}
|
||||
}
|
||||
|
||||
return Response{
|
||||
Success: true,
|
||||
Data: json.RawMessage(`{"message":"Cache cleared successfully"}`),
|
||||
}
|
||||
}
|
||||
|
||||
Reference in New Issue
Block a user