Merge feature/hash-ids into main
Amp-Thread-ID: https://ampcode.com/threads/T-ffbe22bb-92f1-4763-aa5d-1bb84697c866 Co-authored-by: Amp <amp@ampcode.com>
This commit is contained in:
File diff suppressed because one or more lines are too long
@@ -210,6 +210,8 @@ bd merge bd-42 bd-43 --into bd-41 --dry-run # Preview merge
|
||||
bd migrate # Detect and migrate old databases
|
||||
bd migrate --dry-run # Preview migration
|
||||
bd migrate --cleanup --yes # Migrate and remove old files
|
||||
bd migrate --to-hash-ids # Migrate sequential IDs to hash-based IDs
|
||||
bd migrate --to-hash-ids --dry-run # Preview hash ID migration
|
||||
```
|
||||
|
||||
### Managing Daemons
|
||||
|
||||
@@ -11,6 +11,7 @@ import (
|
||||
"github.com/spf13/cobra"
|
||||
"github.com/steveyegge/beads/internal/rpc"
|
||||
"github.com/steveyegge/beads/internal/types"
|
||||
"github.com/steveyegge/beads/internal/utils"
|
||||
)
|
||||
|
||||
var commentsCmd = &cobra.Command{
|
||||
@@ -63,6 +64,13 @@ Examples:
|
||||
os.Exit(1)
|
||||
}
|
||||
ctx := context.Background()
|
||||
fullID, err := utils.ResolvePartialID(ctx, store, issueID)
|
||||
if err != nil {
|
||||
fmt.Fprintf(os.Stderr, "Error resolving %s: %v\n", issueID, err)
|
||||
os.Exit(1)
|
||||
}
|
||||
issueID = fullID
|
||||
|
||||
result, err := store.GetIssueComments(ctx, issueID)
|
||||
if err != nil {
|
||||
fmt.Fprintf(os.Stderr, "Error getting comments: %v\n", err)
|
||||
@@ -176,7 +184,14 @@ Examples:
|
||||
os.Exit(1)
|
||||
}
|
||||
ctx := context.Background()
|
||||
var err error
|
||||
|
||||
fullID, err := utils.ResolvePartialID(ctx, store, issueID)
|
||||
if err != nil {
|
||||
fmt.Fprintf(os.Stderr, "Error resolving %s: %v\n", issueID, err)
|
||||
os.Exit(1)
|
||||
}
|
||||
issueID = fullID
|
||||
|
||||
comment, err = store.AddIssueComment(ctx, issueID, author, commentText)
|
||||
if err != nil {
|
||||
fmt.Fprintf(os.Stderr, "Error adding comment: %v\n", err)
|
||||
|
||||
@@ -60,28 +60,54 @@ var createCmd = &cobra.Command{
|
||||
assignee, _ := cmd.Flags().GetString("assignee")
|
||||
labels, _ := cmd.Flags().GetStringSlice("labels")
|
||||
explicitID, _ := cmd.Flags().GetString("id")
|
||||
parentID, _ := cmd.Flags().GetString("parent")
|
||||
externalRef, _ := cmd.Flags().GetString("external-ref")
|
||||
deps, _ := cmd.Flags().GetStringSlice("deps")
|
||||
forceCreate, _ := cmd.Flags().GetBool("force")
|
||||
jsonOutput, _ := cmd.Flags().GetBool("json")
|
||||
|
||||
// Validate explicit ID format if provided (prefix-number)
|
||||
if explicitID != "" {
|
||||
// Check format: must contain hyphen and have numeric suffix
|
||||
parts := strings.Split(explicitID, "-")
|
||||
if len(parts) != 2 {
|
||||
fmt.Fprintf(os.Stderr, "Error: invalid ID format '%s' (expected format: prefix-number, e.g., 'bd-42')\n", explicitID)
|
||||
// Check for conflicting flags
|
||||
if explicitID != "" && parentID != "" {
|
||||
fmt.Fprintf(os.Stderr, "Error: cannot specify both --id and --parent flags\n")
|
||||
os.Exit(1)
|
||||
}
|
||||
|
||||
// If parent is specified, generate child ID
|
||||
if parentID != "" {
|
||||
ctx := context.Background()
|
||||
var childID string
|
||||
var err error
|
||||
|
||||
if daemonClient != nil {
|
||||
// TODO: Add RPC support for GetNextChildID (bd-171)
|
||||
fmt.Fprintf(os.Stderr, "Error: --parent flag not yet supported in daemon mode\n")
|
||||
os.Exit(1)
|
||||
} else {
|
||||
// Direct mode - use storage
|
||||
childID, err = store.GetNextChildID(ctx, parentID)
|
||||
if err != nil {
|
||||
fmt.Fprintf(os.Stderr, "Error: %v\n", err)
|
||||
os.Exit(1)
|
||||
}
|
||||
}
|
||||
// Validate numeric suffix
|
||||
if _, err := fmt.Sscanf(parts[1], "%d", new(int)); err != nil {
|
||||
fmt.Fprintf(os.Stderr, "Error: invalid ID format '%s' (numeric suffix required, e.g., 'bd-42')\n", explicitID)
|
||||
explicitID = childID // Set as explicit ID for the rest of the flow
|
||||
}
|
||||
|
||||
// Validate explicit ID format if provided
|
||||
// Supports: prefix-number (bd-42), prefix-hash (bd-a3f8e9), or hierarchical (bd-a3f8e9.1)
|
||||
if explicitID != "" {
|
||||
// Must contain hyphen
|
||||
if !strings.Contains(explicitID, "-") {
|
||||
fmt.Fprintf(os.Stderr, "Error: invalid ID format '%s' (expected format: prefix-hash or prefix-hash.number, e.g., 'bd-a3f8e9' or 'bd-a3f8e9.1')\n", explicitID)
|
||||
os.Exit(1)
|
||||
}
|
||||
|
||||
// Extract prefix (before the first hyphen)
|
||||
hyphenIdx := strings.Index(explicitID, "-")
|
||||
requestedPrefix := explicitID[:hyphenIdx]
|
||||
|
||||
// Validate prefix matches database prefix (unless --force is used)
|
||||
if !forceCreate {
|
||||
requestedPrefix := parts[0]
|
||||
ctx := context.Background()
|
||||
|
||||
// Get database prefix from config
|
||||
@@ -96,8 +122,7 @@ var createCmd = &cobra.Command{
|
||||
|
||||
if dbPrefix != "" && dbPrefix != requestedPrefix {
|
||||
fmt.Fprintf(os.Stderr, "Error: prefix mismatch detected\n")
|
||||
fmt.Fprintf(os.Stderr, " This database uses prefix '%s-', but you specified '%s-'\n", dbPrefix, requestedPrefix)
|
||||
fmt.Fprintf(os.Stderr, " Did you mean to create '%s-%s'?\n", dbPrefix, parts[1])
|
||||
fmt.Fprintf(os.Stderr, " This database uses prefix '%s', but you specified '%s'\n", dbPrefix, requestedPrefix)
|
||||
fmt.Fprintf(os.Stderr, " Use --force to create with mismatched prefix anyway\n")
|
||||
os.Exit(1)
|
||||
}
|
||||
@@ -243,6 +268,7 @@ func init() {
|
||||
createCmd.Flags().StringP("assignee", "a", "", "Assignee")
|
||||
createCmd.Flags().StringSliceP("labels", "l", []string{}, "Labels (comma-separated)")
|
||||
createCmd.Flags().String("id", "", "Explicit issue ID (e.g., 'bd-42' for partitioning)")
|
||||
createCmd.Flags().String("parent", "", "Parent issue ID for hierarchical child (e.g., 'bd-a3f8e9')")
|
||||
createCmd.Flags().String("external-ref", "", "External reference (e.g., 'gh-9', 'jira-ABC')")
|
||||
createCmd.Flags().StringSlice("deps", []string{}, "Dependencies in format 'type:id' or 'id' (e.g., 'discovered-from:bd-20,blocks:bd-15' or 'bd-20')")
|
||||
createCmd.Flags().Bool("force", false, "Force creation even if prefix doesn't match database prefix")
|
||||
|
||||
@@ -11,6 +11,7 @@ import (
|
||||
"github.com/steveyegge/beads/internal/rpc"
|
||||
"github.com/steveyegge/beads/internal/storage/sqlite"
|
||||
"github.com/steveyegge/beads/internal/types"
|
||||
"github.com/steveyegge/beads/internal/utils"
|
||||
)
|
||||
|
||||
var depCmd = &cobra.Command{
|
||||
@@ -51,13 +52,26 @@ var depAddCmd = &cobra.Command{
|
||||
}
|
||||
|
||||
// Direct mode
|
||||
ctx := context.Background()
|
||||
|
||||
fullFromID, err := utils.ResolvePartialID(ctx, store, args[0])
|
||||
if err != nil {
|
||||
fmt.Fprintf(os.Stderr, "Error resolving issue ID %s: %v\n", args[0], err)
|
||||
os.Exit(1)
|
||||
}
|
||||
|
||||
fullToID, err := utils.ResolvePartialID(ctx, store, args[1])
|
||||
if err != nil {
|
||||
fmt.Fprintf(os.Stderr, "Error resolving dependency ID %s: %v\n", args[1], err)
|
||||
os.Exit(1)
|
||||
}
|
||||
|
||||
dep := &types.Dependency{
|
||||
IssueID: args[0],
|
||||
DependsOnID: args[1],
|
||||
IssueID: fullFromID,
|
||||
DependsOnID: fullToID,
|
||||
Type: types.DependencyType(depType),
|
||||
}
|
||||
|
||||
ctx := context.Background()
|
||||
if err := store.AddDependency(ctx, dep, actor); err != nil {
|
||||
fmt.Fprintf(os.Stderr, "Error: %v\n", err)
|
||||
os.Exit(1)
|
||||
@@ -94,8 +108,8 @@ var depAddCmd = &cobra.Command{
|
||||
if jsonOutput {
|
||||
outputJSON(map[string]interface{}{
|
||||
"status": "added",
|
||||
"issue_id": args[0],
|
||||
"depends_on_id": args[1],
|
||||
"issue_id": fullFromID,
|
||||
"depends_on_id": fullToID,
|
||||
"type": depType,
|
||||
})
|
||||
return
|
||||
@@ -103,7 +117,7 @@ var depAddCmd = &cobra.Command{
|
||||
|
||||
green := color.New(color.FgGreen).SprintFunc()
|
||||
fmt.Printf("%s Added dependency: %s depends on %s (%s)\n",
|
||||
green("✓"), args[0], args[1], depType)
|
||||
green("✓"), fullFromID, fullToID, depType)
|
||||
},
|
||||
}
|
||||
|
||||
@@ -138,7 +152,20 @@ var depRemoveCmd = &cobra.Command{
|
||||
|
||||
// Direct mode
|
||||
ctx := context.Background()
|
||||
if err := store.RemoveDependency(ctx, args[0], args[1], actor); err != nil {
|
||||
|
||||
fullFromID, err := utils.ResolvePartialID(ctx, store, args[0])
|
||||
if err != nil {
|
||||
fmt.Fprintf(os.Stderr, "Error resolving issue ID %s: %v\n", args[0], err)
|
||||
os.Exit(1)
|
||||
}
|
||||
|
||||
fullToID, err := utils.ResolvePartialID(ctx, store, args[1])
|
||||
if err != nil {
|
||||
fmt.Fprintf(os.Stderr, "Error resolving dependency ID %s: %v\n", args[1], err)
|
||||
os.Exit(1)
|
||||
}
|
||||
|
||||
if err := store.RemoveDependency(ctx, fullFromID, fullToID, actor); err != nil {
|
||||
fmt.Fprintf(os.Stderr, "Error: %v\n", err)
|
||||
os.Exit(1)
|
||||
}
|
||||
@@ -149,15 +176,15 @@ var depRemoveCmd = &cobra.Command{
|
||||
if jsonOutput {
|
||||
outputJSON(map[string]interface{}{
|
||||
"status": "removed",
|
||||
"issue_id": args[0],
|
||||
"depends_on_id": args[1],
|
||||
"issue_id": fullFromID,
|
||||
"depends_on_id": fullToID,
|
||||
})
|
||||
return
|
||||
}
|
||||
|
||||
green := color.New(color.FgGreen).SprintFunc()
|
||||
fmt.Printf("%s Removed dependency: %s no longer depends on %s\n",
|
||||
green("✓"), args[0], args[1])
|
||||
green("✓"), fullFromID, fullToID)
|
||||
},
|
||||
}
|
||||
|
||||
@@ -187,7 +214,14 @@ var depTreeCmd = &cobra.Command{
|
||||
}
|
||||
|
||||
ctx := context.Background()
|
||||
tree, err := store.GetDependencyTree(ctx, args[0], maxDepth, showAllPaths, reverse)
|
||||
|
||||
fullID, err := utils.ResolvePartialID(ctx, store, args[0])
|
||||
if err != nil {
|
||||
fmt.Fprintf(os.Stderr, "Error resolving %s: %v\n", args[0], err)
|
||||
os.Exit(1)
|
||||
}
|
||||
|
||||
tree, err := store.GetDependencyTree(ctx, fullID, maxDepth, showAllPaths, reverse)
|
||||
if err != nil {
|
||||
fmt.Fprintf(os.Stderr, "Error: %v\n", err)
|
||||
os.Exit(1)
|
||||
@@ -204,18 +238,18 @@ var depTreeCmd = &cobra.Command{
|
||||
|
||||
if len(tree) == 0 {
|
||||
if reverse {
|
||||
fmt.Printf("\n%s has no dependents\n", args[0])
|
||||
fmt.Printf("\n%s has no dependents\n", fullID)
|
||||
} else {
|
||||
fmt.Printf("\n%s has no dependencies\n", args[0])
|
||||
fmt.Printf("\n%s has no dependencies\n", fullID)
|
||||
}
|
||||
return
|
||||
}
|
||||
|
||||
cyan := color.New(color.FgCyan).SprintFunc()
|
||||
if reverse {
|
||||
fmt.Printf("\n%s Dependent tree for %s:\n\n", cyan("🌲"), args[0])
|
||||
fmt.Printf("\n%s Dependent tree for %s:\n\n", cyan("🌲"), fullID)
|
||||
} else {
|
||||
fmt.Printf("\n%s Dependency tree for %s:\n\n", cyan("🌲"), args[0])
|
||||
fmt.Printf("\n%s Dependency tree for %s:\n\n", cyan("🌲"), fullID)
|
||||
}
|
||||
|
||||
hasTruncation := false
|
||||
|
||||
@@ -13,6 +13,7 @@ import (
|
||||
"github.com/spf13/cobra"
|
||||
"github.com/steveyegge/beads/internal/rpc"
|
||||
"github.com/steveyegge/beads/internal/types"
|
||||
"github.com/steveyegge/beads/internal/utils"
|
||||
)
|
||||
|
||||
var labelCmd = &cobra.Command{
|
||||
@@ -79,6 +80,22 @@ var labelAddCmd = &cobra.Command{
|
||||
Args: cobra.MinimumNArgs(2),
|
||||
Run: func(cmd *cobra.Command, args []string) {
|
||||
issueIDs, label := parseLabelArgs(args)
|
||||
|
||||
// Resolve partial IDs if in direct mode
|
||||
if daemonClient == nil {
|
||||
ctx := context.Background()
|
||||
resolvedIDs := make([]string, 0, len(issueIDs))
|
||||
for _, id := range issueIDs {
|
||||
fullID, err := utils.ResolvePartialID(ctx, store, id)
|
||||
if err != nil {
|
||||
fmt.Fprintf(os.Stderr, "Error resolving %s: %v\n", id, err)
|
||||
continue
|
||||
}
|
||||
resolvedIDs = append(resolvedIDs, fullID)
|
||||
}
|
||||
issueIDs = resolvedIDs
|
||||
}
|
||||
|
||||
processBatchLabelOperation(issueIDs, label, "added",
|
||||
func(issueID, lbl string) error {
|
||||
_, err := daemonClient.AddLabel(&rpc.LabelAddArgs{ID: issueID, Label: lbl})
|
||||
@@ -97,6 +114,22 @@ var labelRemoveCmd = &cobra.Command{
|
||||
Args: cobra.MinimumNArgs(2),
|
||||
Run: func(cmd *cobra.Command, args []string) {
|
||||
issueIDs, label := parseLabelArgs(args)
|
||||
|
||||
// Resolve partial IDs if in direct mode
|
||||
if daemonClient == nil {
|
||||
ctx := context.Background()
|
||||
resolvedIDs := make([]string, 0, len(issueIDs))
|
||||
for _, id := range issueIDs {
|
||||
fullID, err := utils.ResolvePartialID(ctx, store, id)
|
||||
if err != nil {
|
||||
fmt.Fprintf(os.Stderr, "Error resolving %s: %v\n", id, err)
|
||||
continue
|
||||
}
|
||||
resolvedIDs = append(resolvedIDs, fullID)
|
||||
}
|
||||
issueIDs = resolvedIDs
|
||||
}
|
||||
|
||||
processBatchLabelOperation(issueIDs, label, "removed",
|
||||
func(issueID, lbl string) error {
|
||||
_, err := daemonClient.RemoveLabel(&rpc.LabelRemoveArgs{ID: issueID, Label: lbl})
|
||||
@@ -117,6 +150,16 @@ var labelListCmd = &cobra.Command{
|
||||
|
||||
ctx := context.Background()
|
||||
var labels []string
|
||||
|
||||
// Resolve partial ID if in direct mode
|
||||
if daemonClient == nil {
|
||||
fullID, err := utils.ResolvePartialID(ctx, store, issueID)
|
||||
if err != nil {
|
||||
fmt.Fprintf(os.Stderr, "Error resolving %s: %v\n", issueID, err)
|
||||
os.Exit(1)
|
||||
}
|
||||
issueID = fullID
|
||||
}
|
||||
|
||||
// Use daemon if available
|
||||
if daemonClient != nil {
|
||||
|
||||
@@ -7,11 +7,13 @@ import (
|
||||
"os"
|
||||
"path/filepath"
|
||||
"strings"
|
||||
"time"
|
||||
|
||||
"github.com/fatih/color"
|
||||
"github.com/spf13/cobra"
|
||||
"github.com/steveyegge/beads"
|
||||
"github.com/steveyegge/beads/internal/storage/sqlite"
|
||||
"github.com/steveyegge/beads/internal/types"
|
||||
_ "modernc.org/sqlite"
|
||||
)
|
||||
|
||||
@@ -25,12 +27,14 @@ This command:
|
||||
- Checks schema versions
|
||||
- Migrates old databases to beads.db
|
||||
- Updates schema version metadata
|
||||
- Migrates sequential IDs to hash-based IDs (with --to-hash-ids)
|
||||
- Removes stale databases (with confirmation)`,
|
||||
Run: func(cmd *cobra.Command, _ []string) {
|
||||
autoYes, _ := cmd.Flags().GetBool("yes")
|
||||
cleanup, _ := cmd.Flags().GetBool("cleanup")
|
||||
dryRun, _ := cmd.Flags().GetBool("dry-run")
|
||||
updateRepoID, _ := cmd.Flags().GetBool("update-repo-id")
|
||||
toHashIDs, _ := cmd.Flags().GetBool("to-hash-ids")
|
||||
|
||||
// Handle --update-repo-id first
|
||||
if updateRepoID {
|
||||
@@ -275,6 +279,107 @@ This command:
|
||||
}
|
||||
}
|
||||
|
||||
// Migrate to hash IDs if requested
|
||||
if toHashIDs {
|
||||
if !jsonOutput {
|
||||
fmt.Println("\n→ Migrating to hash-based IDs...")
|
||||
}
|
||||
|
||||
store, err := sqlite.New(targetPath)
|
||||
if err != nil {
|
||||
if jsonOutput {
|
||||
outputJSON(map[string]interface{}{
|
||||
"error": "hash_migration_failed",
|
||||
"message": err.Error(),
|
||||
})
|
||||
} else {
|
||||
fmt.Fprintf(os.Stderr, "Error: failed to open database: %v\n", err)
|
||||
}
|
||||
os.Exit(1)
|
||||
}
|
||||
|
||||
ctx := context.Background()
|
||||
issues, err := store.SearchIssues(ctx, "", types.IssueFilter{})
|
||||
if err != nil {
|
||||
store.Close()
|
||||
if jsonOutput {
|
||||
outputJSON(map[string]interface{}{
|
||||
"error": "hash_migration_failed",
|
||||
"message": err.Error(),
|
||||
})
|
||||
} else {
|
||||
fmt.Fprintf(os.Stderr, "Error: failed to list issues: %v\n", err)
|
||||
}
|
||||
os.Exit(1)
|
||||
}
|
||||
|
||||
if len(issues) > 0 && !isHashID(issues[0].ID) {
|
||||
// Create backup
|
||||
if !dryRun {
|
||||
backupPath := strings.TrimSuffix(targetPath, ".db") + ".backup-pre-hash-" + time.Now().Format("20060102-150405") + ".db"
|
||||
if err := copyFile(targetPath, backupPath); err != nil {
|
||||
store.Close()
|
||||
if jsonOutput {
|
||||
outputJSON(map[string]interface{}{
|
||||
"error": "backup_failed",
|
||||
"message": err.Error(),
|
||||
})
|
||||
} else {
|
||||
fmt.Fprintf(os.Stderr, "Error: failed to create backup: %v\n", err)
|
||||
}
|
||||
os.Exit(1)
|
||||
}
|
||||
if !jsonOutput {
|
||||
color.Green("✓ Created backup: %s\n", filepath.Base(backupPath))
|
||||
}
|
||||
}
|
||||
|
||||
mapping, err := migrateToHashIDs(ctx, store, issues, dryRun)
|
||||
store.Close()
|
||||
|
||||
if err != nil {
|
||||
if jsonOutput {
|
||||
outputJSON(map[string]interface{}{
|
||||
"error": "hash_migration_failed",
|
||||
"message": err.Error(),
|
||||
})
|
||||
} else {
|
||||
fmt.Fprintf(os.Stderr, "Error: hash ID migration failed: %v\n", err)
|
||||
}
|
||||
os.Exit(1)
|
||||
}
|
||||
|
||||
if !jsonOutput {
|
||||
if dryRun {
|
||||
fmt.Printf("\nWould migrate %d issues to hash-based IDs\n", len(mapping))
|
||||
} else {
|
||||
color.Green("✓ Migrated %d issues to hash-based IDs\n", len(mapping))
|
||||
}
|
||||
}
|
||||
|
||||
// Set id_mode=hash after successful migration (not in dry-run)
|
||||
if !dryRun {
|
||||
store, err := sqlite.New(targetPath)
|
||||
if err == nil {
|
||||
ctx := context.Background()
|
||||
if err := store.SetConfig(ctx, "id_mode", "hash"); err != nil {
|
||||
if !jsonOutput {
|
||||
fmt.Fprintf(os.Stderr, "Warning: failed to set id_mode=hash: %v\n", err)
|
||||
}
|
||||
} else if !jsonOutput {
|
||||
color.Green("✓ Switched database to hash ID mode\n")
|
||||
}
|
||||
store.Close()
|
||||
}
|
||||
}
|
||||
} else {
|
||||
store.Close()
|
||||
if !jsonOutput {
|
||||
fmt.Println("Database already uses hash-based IDs")
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// Final status
|
||||
if jsonOutput {
|
||||
outputJSON(map[string]interface{}{
|
||||
@@ -497,5 +602,6 @@ func init() {
|
||||
migrateCmd.Flags().Bool("cleanup", false, "Remove old database files after migration")
|
||||
migrateCmd.Flags().Bool("dry-run", false, "Show what would be done without making changes")
|
||||
migrateCmd.Flags().Bool("update-repo-id", false, "Update repository ID (use after changing git remote)")
|
||||
migrateCmd.Flags().Bool("to-hash-ids", false, "Migrate sequential IDs to hash-based IDs")
|
||||
rootCmd.AddCommand(migrateCmd)
|
||||
}
|
||||
|
||||
391
cmd/bd/migrate_hash_ids.go
Normal file
391
cmd/bd/migrate_hash_ids.go
Normal file
@@ -0,0 +1,391 @@
|
||||
package main
|
||||
|
||||
import (
|
||||
"context"
|
||||
"crypto/sha256"
|
||||
"encoding/hex"
|
||||
"encoding/json"
|
||||
"fmt"
|
||||
"os"
|
||||
"path/filepath"
|
||||
"regexp"
|
||||
"sort"
|
||||
"strings"
|
||||
"time"
|
||||
|
||||
"github.com/fatih/color"
|
||||
"github.com/spf13/cobra"
|
||||
"github.com/steveyegge/beads"
|
||||
"github.com/steveyegge/beads/internal/storage/sqlite"
|
||||
"github.com/steveyegge/beads/internal/types"
|
||||
)
|
||||
|
||||
var migrateHashIDsCmd = &cobra.Command{
|
||||
Use: "migrate-hash-ids",
|
||||
Short: "Migrate sequential IDs to hash-based IDs",
|
||||
Long: `Migrate database from sequential IDs (bd-1, bd-2) to hash-based IDs (bd-a3f8e9a2).
|
||||
|
||||
This command:
|
||||
- Generates hash IDs for all top-level issues
|
||||
- Assigns hierarchical child IDs (bd-a3f8e9a2.1) for epic children
|
||||
- Updates all references (dependencies, comments, external refs)
|
||||
- Creates mapping file for reference
|
||||
- Validates all relationships are intact
|
||||
|
||||
Use --dry-run to preview changes before applying.`,
|
||||
Run: func(cmd *cobra.Command, _ []string) {
|
||||
dryRun, _ := cmd.Flags().GetBool("dry-run")
|
||||
|
||||
ctx := context.Background()
|
||||
|
||||
// Find database
|
||||
dbPath := beads.FindDatabasePath()
|
||||
if dbPath == "" {
|
||||
if jsonOutput {
|
||||
outputJSON(map[string]interface{}{
|
||||
"error": "no_database",
|
||||
"message": "No beads database found. Run 'bd init' first.",
|
||||
})
|
||||
} else {
|
||||
fmt.Fprintf(os.Stderr, "Error: no beads database found\n")
|
||||
fmt.Fprintf(os.Stderr, "Hint: run 'bd init' to initialize bd\n")
|
||||
}
|
||||
os.Exit(1)
|
||||
}
|
||||
|
||||
// Create backup before migration
|
||||
if !dryRun {
|
||||
backupPath := strings.TrimSuffix(dbPath, ".db") + ".backup-" + time.Now().Format("20060102-150405") + ".db"
|
||||
if err := copyFile(dbPath, backupPath); err != nil {
|
||||
if jsonOutput {
|
||||
outputJSON(map[string]interface{}{
|
||||
"error": "backup_failed",
|
||||
"message": err.Error(),
|
||||
})
|
||||
} else {
|
||||
fmt.Fprintf(os.Stderr, "Error: failed to create backup: %v\n", err)
|
||||
}
|
||||
os.Exit(1)
|
||||
}
|
||||
if !jsonOutput {
|
||||
color.Green("✓ Created backup: %s\n\n", filepath.Base(backupPath))
|
||||
}
|
||||
}
|
||||
|
||||
// Open database
|
||||
store, err := sqlite.New(dbPath)
|
||||
if err != nil {
|
||||
if jsonOutput {
|
||||
outputJSON(map[string]interface{}{
|
||||
"error": "open_failed",
|
||||
"message": err.Error(),
|
||||
})
|
||||
} else {
|
||||
fmt.Fprintf(os.Stderr, "Error: failed to open database: %v\n", err)
|
||||
}
|
||||
os.Exit(1)
|
||||
}
|
||||
defer store.Close()
|
||||
|
||||
// Get all issues using SearchIssues with empty query and no filters
|
||||
issues, err := store.SearchIssues(ctx, "", types.IssueFilter{})
|
||||
if err != nil {
|
||||
if jsonOutput {
|
||||
outputJSON(map[string]interface{}{
|
||||
"error": "list_failed",
|
||||
"message": err.Error(),
|
||||
})
|
||||
} else {
|
||||
fmt.Fprintf(os.Stderr, "Error: failed to list issues: %v\n", err)
|
||||
}
|
||||
os.Exit(1)
|
||||
}
|
||||
|
||||
if len(issues) == 0 {
|
||||
if jsonOutput {
|
||||
outputJSON(map[string]interface{}{
|
||||
"status": "no_issues",
|
||||
"message": "No issues to migrate",
|
||||
})
|
||||
} else {
|
||||
fmt.Println("No issues to migrate")
|
||||
}
|
||||
return
|
||||
}
|
||||
|
||||
// Check if already using hash IDs
|
||||
if isHashID(issues[0].ID) {
|
||||
if jsonOutput {
|
||||
outputJSON(map[string]interface{}{
|
||||
"status": "already_migrated",
|
||||
"message": "Database already uses hash-based IDs",
|
||||
})
|
||||
} else {
|
||||
fmt.Println("Database already uses hash-based IDs")
|
||||
}
|
||||
return
|
||||
}
|
||||
|
||||
// Perform migration
|
||||
mapping, err := migrateToHashIDs(ctx, store, issues, dryRun)
|
||||
if err != nil {
|
||||
if jsonOutput {
|
||||
outputJSON(map[string]interface{}{
|
||||
"error": "migration_failed",
|
||||
"message": err.Error(),
|
||||
})
|
||||
} else {
|
||||
fmt.Fprintf(os.Stderr, "Error: migration failed: %v\n", err)
|
||||
}
|
||||
os.Exit(1)
|
||||
}
|
||||
|
||||
// Save mapping to file
|
||||
if !dryRun {
|
||||
mappingPath := filepath.Join(filepath.Dir(dbPath), "hash-id-mapping.json")
|
||||
if err := saveMappingFile(mappingPath, mapping); err != nil {
|
||||
if !jsonOutput {
|
||||
color.Yellow("Warning: failed to save mapping file: %v\n", err)
|
||||
}
|
||||
} else if !jsonOutput {
|
||||
color.Green("✓ Saved mapping to: %s\n", filepath.Base(mappingPath))
|
||||
}
|
||||
}
|
||||
|
||||
// Output results
|
||||
if jsonOutput {
|
||||
outputJSON(map[string]interface{}{
|
||||
"status": "success",
|
||||
"dry_run": dryRun,
|
||||
"issues_migrated": len(mapping),
|
||||
"mapping": mapping,
|
||||
})
|
||||
} else {
|
||||
if dryRun {
|
||||
fmt.Println("\nDry run complete - no changes made")
|
||||
fmt.Printf("Would migrate %d issues\n\n", len(mapping))
|
||||
fmt.Println("Preview of mapping (first 10):")
|
||||
count := 0
|
||||
for old, new := range mapping {
|
||||
if count >= 10 {
|
||||
fmt.Printf("... and %d more\n", len(mapping)-10)
|
||||
break
|
||||
}
|
||||
fmt.Printf(" %s → %s\n", old, new)
|
||||
count++
|
||||
}
|
||||
} else {
|
||||
color.Green("\n✓ Migration complete!\n\n")
|
||||
fmt.Printf("Migrated %d issues to hash-based IDs\n", len(mapping))
|
||||
fmt.Println("\nNext steps:")
|
||||
fmt.Println(" 1. Run 'bd export' to update JSONL file")
|
||||
fmt.Println(" 2. Commit changes to git")
|
||||
fmt.Println(" 3. Notify team members to pull and re-initialize")
|
||||
}
|
||||
}
|
||||
},
|
||||
}
|
||||
|
||||
// migrateToHashIDs performs the actual migration
|
||||
func migrateToHashIDs(ctx context.Context, store *sqlite.SQLiteStorage, issues []*types.Issue, dryRun bool) (map[string]string, error) {
|
||||
// Build dependency graph to determine top-level vs child issues
|
||||
parentMap := make(map[string]string) // child ID → parent ID
|
||||
|
||||
// Get all dependencies to find parent-child relationships
|
||||
for _, issue := range issues {
|
||||
deps, err := store.GetDependencyRecords(ctx, issue.ID)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("failed to get dependencies for %s: %w", issue.ID, err)
|
||||
}
|
||||
|
||||
for _, dep := range deps {
|
||||
if dep.Type == types.DepParentChild {
|
||||
// issue depends on parent
|
||||
parentMap[issue.ID] = dep.DependsOnID
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// Get prefix from config or use default
|
||||
prefix, err := store.GetConfig(ctx, "issue_prefix")
|
||||
if err != nil || prefix == "" {
|
||||
prefix = "bd"
|
||||
}
|
||||
|
||||
// Generate mapping: old ID → new hash ID
|
||||
mapping := make(map[string]string)
|
||||
childCounters := make(map[string]int) // parent hash ID → next child number
|
||||
|
||||
// First pass: generate hash IDs for top-level issues (no parent)
|
||||
for _, issue := range issues {
|
||||
if _, hasParent := parentMap[issue.ID]; !hasParent {
|
||||
// Top-level issue - generate hash ID
|
||||
hashID := generateHashIDForIssue(prefix, issue)
|
||||
mapping[issue.ID] = hashID
|
||||
}
|
||||
}
|
||||
|
||||
// Second pass: assign hierarchical IDs to child issues
|
||||
for _, issue := range issues {
|
||||
if parentID, hasParent := parentMap[issue.ID]; hasParent {
|
||||
// Child issue - use parent's hash ID + sequential number
|
||||
parentHashID, ok := mapping[parentID]
|
||||
if !ok {
|
||||
return nil, fmt.Errorf("parent %s not yet mapped for child %s", parentID, issue.ID)
|
||||
}
|
||||
|
||||
// Get next child number for this parent
|
||||
childNum := childCounters[parentHashID] + 1
|
||||
childCounters[parentHashID] = childNum
|
||||
|
||||
// Assign hierarchical ID
|
||||
mapping[issue.ID] = fmt.Sprintf("%s.%d", parentHashID, childNum)
|
||||
}
|
||||
}
|
||||
|
||||
if dryRun {
|
||||
return mapping, nil
|
||||
}
|
||||
|
||||
// Apply the migration
|
||||
// UpdateIssueID handles updating the issue, dependencies, comments, events, labels, and dirty_issues
|
||||
// We need to also update text references in descriptions, notes, design, acceptance criteria
|
||||
|
||||
// Sort issues by ID to process parents before children
|
||||
sort.Slice(issues, func(i, j int) bool {
|
||||
return issues[i].ID < issues[j].ID
|
||||
})
|
||||
|
||||
// Update all issues
|
||||
for _, issue := range issues {
|
||||
newID := mapping[issue.ID]
|
||||
|
||||
// Update text references in this issue
|
||||
issue.Description = replaceIDReferences(issue.Description, mapping)
|
||||
if issue.Design != "" {
|
||||
issue.Design = replaceIDReferences(issue.Design, mapping)
|
||||
}
|
||||
if issue.Notes != "" {
|
||||
issue.Notes = replaceIDReferences(issue.Notes, mapping)
|
||||
}
|
||||
if issue.AcceptanceCriteria != "" {
|
||||
issue.AcceptanceCriteria = replaceIDReferences(issue.AcceptanceCriteria, mapping)
|
||||
}
|
||||
if issue.ExternalRef != nil {
|
||||
updated := replaceIDReferences(*issue.ExternalRef, mapping)
|
||||
issue.ExternalRef = &updated
|
||||
}
|
||||
|
||||
// Use UpdateIssueID to change the primary key and cascade to all foreign keys
|
||||
// This method handles dependencies, comments, events, labels, and dirty_issues
|
||||
oldID := issue.ID
|
||||
if err := store.UpdateIssueID(ctx, oldID, newID, issue, "migration"); err != nil {
|
||||
return nil, fmt.Errorf("failed to update issue %s → %s: %w", oldID, newID, err)
|
||||
}
|
||||
}
|
||||
|
||||
return mapping, nil
|
||||
}
|
||||
|
||||
// generateHashIDForIssue generates a hash-based ID for an issue
|
||||
func generateHashIDForIssue(prefix string, issue *types.Issue) string {
|
||||
// Use the same algorithm as generateHashID in sqlite.go
|
||||
// Use "system" as the actor for migration to ensure deterministic IDs
|
||||
content := fmt.Sprintf("%s|%s|%s|%d|%d",
|
||||
issue.Title,
|
||||
issue.Description,
|
||||
"system", // Use consistent actor for migration
|
||||
issue.CreatedAt.UnixNano(),
|
||||
0, // nonce
|
||||
)
|
||||
|
||||
hash := sha256Hash(content)
|
||||
shortHash := hash[:8] // First 8 hex chars
|
||||
|
||||
return fmt.Sprintf("%s-%s", prefix, shortHash)
|
||||
}
|
||||
|
||||
// sha256Hash computes SHA256 hash and returns first 8 hex chars
|
||||
func sha256Hash(content string) string {
|
||||
h := sha256.Sum256([]byte(content))
|
||||
return hex.EncodeToString(h[:4]) // 4 bytes = 8 hex chars
|
||||
}
|
||||
|
||||
// replaceIDReferences replaces all old ID references with new hash IDs
|
||||
func replaceIDReferences(text string, mapping map[string]string) string {
|
||||
// Match patterns like "bd-123" or "bd-123.4"
|
||||
re := regexp.MustCompile(`\bbd-\d+(?:\.\d+)*\b`)
|
||||
|
||||
return re.ReplaceAllStringFunc(text, func(match string) string {
|
||||
if newID, ok := mapping[match]; ok {
|
||||
return newID
|
||||
}
|
||||
return match // Keep unchanged if not in mapping
|
||||
})
|
||||
}
|
||||
|
||||
// isHashID checks if an ID is hash-based (not sequential)
|
||||
func isHashID(id string) bool {
|
||||
// Hash IDs contain hex characters after the prefix
|
||||
// Sequential IDs are only digits
|
||||
parts := strings.SplitN(id, "-", 2)
|
||||
if len(parts) != 2 {
|
||||
return false
|
||||
}
|
||||
|
||||
// Check if the suffix starts with a hex digit (a-f)
|
||||
suffix := parts[1]
|
||||
if len(suffix) == 0 {
|
||||
return false
|
||||
}
|
||||
|
||||
// If it contains any letter a-f, it's a hash ID
|
||||
return regexp.MustCompile(`[a-f]`).MatchString(suffix)
|
||||
}
|
||||
|
||||
// saveMappingFile saves the ID mapping to a JSON file
|
||||
func saveMappingFile(path string, mapping map[string]string) error {
|
||||
// Convert to sorted array for readability
|
||||
type mappingEntry struct {
|
||||
OldID string `json:"old_id"`
|
||||
NewID string `json:"new_id"`
|
||||
}
|
||||
|
||||
entries := make([]mappingEntry, 0, len(mapping))
|
||||
for old, new := range mapping {
|
||||
entries = append(entries, mappingEntry{
|
||||
OldID: old,
|
||||
NewID: new,
|
||||
})
|
||||
}
|
||||
|
||||
// Sort by old ID for readability
|
||||
sort.Slice(entries, func(i, j int) bool {
|
||||
return entries[i].OldID < entries[j].OldID
|
||||
})
|
||||
|
||||
data, err := json.MarshalIndent(map[string]interface{}{
|
||||
"migrated_at": time.Now().Format(time.RFC3339),
|
||||
"count": len(entries),
|
||||
"mapping": entries,
|
||||
}, "", " ")
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
return os.WriteFile(path, data, 0644)
|
||||
}
|
||||
|
||||
// copyFile copies a file from src to dst
|
||||
func copyFile(src, dst string) error {
|
||||
data, err := os.ReadFile(src)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
return os.WriteFile(dst, data, 0644)
|
||||
}
|
||||
|
||||
func init() {
|
||||
migrateHashIDsCmd.Flags().Bool("dry-run", false, "Show what would be done without making changes")
|
||||
rootCmd.AddCommand(migrateHashIDsCmd)
|
||||
}
|
||||
296
cmd/bd/migrate_hash_ids_test.go
Normal file
296
cmd/bd/migrate_hash_ids_test.go
Normal file
@@ -0,0 +1,296 @@
|
||||
package main
|
||||
|
||||
import (
|
||||
"context"
|
||||
"os"
|
||||
"path/filepath"
|
||||
"testing"
|
||||
|
||||
"github.com/steveyegge/beads/internal/storage/sqlite"
|
||||
"github.com/steveyegge/beads/internal/types"
|
||||
)
|
||||
|
||||
func TestMigrateHashIDs(t *testing.T) {
|
||||
// Create temporary directory for test database
|
||||
tmpDir := t.TempDir()
|
||||
dbPath := filepath.Join(tmpDir, "test.db")
|
||||
|
||||
// Create test database with sequential IDs
|
||||
store, err := sqlite.New(dbPath)
|
||||
if err != nil {
|
||||
t.Fatalf("Failed to create database: %v", err)
|
||||
}
|
||||
|
||||
ctx := context.Background()
|
||||
|
||||
// Set ID prefix config
|
||||
if err := store.SetConfig(ctx, "issue_prefix", "bd"); err != nil {
|
||||
t.Fatalf("Failed to set prefix: %v", err)
|
||||
}
|
||||
|
||||
// Create test issues with sequential IDs
|
||||
issue1 := &types.Issue{
|
||||
ID: "bd-1",
|
||||
Title: "First issue",
|
||||
Description: "This is issue bd-1",
|
||||
Status: types.StatusOpen,
|
||||
Priority: 1,
|
||||
IssueType: types.TypeTask,
|
||||
}
|
||||
if err := store.CreateIssue(ctx, issue1, "test"); err != nil {
|
||||
t.Fatalf("Failed to create issue 1: %v", err)
|
||||
}
|
||||
|
||||
issue2 := &types.Issue{
|
||||
ID: "bd-2",
|
||||
Title: "Second issue",
|
||||
Description: "This is issue bd-2 which references bd-1",
|
||||
Status: types.StatusOpen,
|
||||
Priority: 1,
|
||||
IssueType: types.TypeTask,
|
||||
}
|
||||
if err := store.CreateIssue(ctx, issue2, "test"); err != nil {
|
||||
t.Fatalf("Failed to create issue 2: %v", err)
|
||||
}
|
||||
|
||||
// Create a dependency
|
||||
dep := &types.Dependency{
|
||||
IssueID: "bd-2",
|
||||
DependsOnID: "bd-1",
|
||||
Type: types.DepBlocks,
|
||||
}
|
||||
if err := store.AddDependency(ctx, dep, "test"); err != nil {
|
||||
t.Fatalf("Failed to add dependency: %v", err)
|
||||
}
|
||||
|
||||
// Close store before migration
|
||||
store.Close()
|
||||
|
||||
// Test dry run
|
||||
store, err = sqlite.New(dbPath)
|
||||
if err != nil {
|
||||
t.Fatalf("Failed to reopen database: %v", err)
|
||||
}
|
||||
|
||||
issues, err := store.SearchIssues(ctx, "", types.IssueFilter{})
|
||||
if err != nil {
|
||||
t.Fatalf("Failed to get issues: %v", err)
|
||||
}
|
||||
|
||||
mapping, err := migrateToHashIDs(ctx, store, issues, true)
|
||||
if err != nil {
|
||||
t.Fatalf("Dry run failed: %v", err)
|
||||
}
|
||||
|
||||
if len(mapping) != 2 {
|
||||
t.Errorf("Expected 2 issues in mapping, got %d", len(mapping))
|
||||
}
|
||||
|
||||
// Check mapping contains both IDs
|
||||
if _, ok := mapping["bd-1"]; !ok {
|
||||
t.Error("Mapping missing bd-1")
|
||||
}
|
||||
if _, ok := mapping["bd-2"]; !ok {
|
||||
t.Error("Mapping missing bd-2")
|
||||
}
|
||||
|
||||
// Verify new IDs are hash-based
|
||||
for old, new := range mapping {
|
||||
if !isHashID(new) {
|
||||
t.Errorf("New ID %s for %s is not a hash ID", new, old)
|
||||
}
|
||||
}
|
||||
|
||||
store.Close()
|
||||
|
||||
// Test actual migration
|
||||
store, err = sqlite.New(dbPath)
|
||||
if err != nil {
|
||||
t.Fatalf("Failed to reopen database: %v", err)
|
||||
}
|
||||
defer store.Close()
|
||||
|
||||
issues, err = store.SearchIssues(ctx, "", types.IssueFilter{})
|
||||
if err != nil {
|
||||
t.Fatalf("Failed to get issues: %v", err)
|
||||
}
|
||||
|
||||
mapping, err = migrateToHashIDs(ctx, store, issues, false)
|
||||
if err != nil {
|
||||
t.Fatalf("Migration failed: %v", err)
|
||||
}
|
||||
|
||||
// Verify migration
|
||||
newID1 := mapping["bd-1"]
|
||||
newID2 := mapping["bd-2"]
|
||||
|
||||
// Get migrated issues
|
||||
migratedIssue1, err := store.GetIssue(ctx, newID1)
|
||||
if err != nil {
|
||||
t.Fatalf("Failed to get migrated issue 1: %v", err)
|
||||
}
|
||||
|
||||
migratedIssue2, err := store.GetIssue(ctx, newID2)
|
||||
if err != nil {
|
||||
t.Fatalf("Failed to get migrated issue 2: %v", err)
|
||||
}
|
||||
|
||||
// Verify content is preserved
|
||||
if migratedIssue1.Title != "First issue" {
|
||||
t.Errorf("Issue 1 title changed: %s", migratedIssue1.Title)
|
||||
}
|
||||
if migratedIssue2.Title != "Second issue" {
|
||||
t.Errorf("Issue 2 title changed: %s", migratedIssue2.Title)
|
||||
}
|
||||
|
||||
// Verify text reference was updated
|
||||
if migratedIssue2.Description != "This is issue "+newID2+" which references "+newID1 {
|
||||
t.Errorf("Text references not updated: %s", migratedIssue2.Description)
|
||||
}
|
||||
|
||||
// Verify dependency was updated
|
||||
deps, err := store.GetDependencyRecords(ctx, newID2)
|
||||
if err != nil {
|
||||
t.Fatalf("Failed to get dependencies: %v", err)
|
||||
}
|
||||
|
||||
if len(deps) != 1 {
|
||||
t.Fatalf("Expected 1 dependency, got %d", len(deps))
|
||||
}
|
||||
|
||||
if deps[0].IssueID != newID2 {
|
||||
t.Errorf("Dependency issue_id not updated: %s", deps[0].IssueID)
|
||||
}
|
||||
if deps[0].DependsOnID != newID1 {
|
||||
t.Errorf("Dependency depends_on_id not updated: %s", deps[0].DependsOnID)
|
||||
}
|
||||
}
|
||||
|
||||
func TestMigrateHashIDsWithParentChild(t *testing.T) {
|
||||
// Create temporary directory for test database
|
||||
tmpDir := t.TempDir()
|
||||
dbPath := filepath.Join(tmpDir, "test.db")
|
||||
|
||||
// Create test database
|
||||
store, err := sqlite.New(dbPath)
|
||||
if err != nil {
|
||||
t.Fatalf("Failed to create database: %v", err)
|
||||
}
|
||||
defer store.Close()
|
||||
|
||||
ctx := context.Background()
|
||||
|
||||
// Set ID prefix config
|
||||
if err := store.SetConfig(ctx, "issue_prefix", "bd"); err != nil {
|
||||
t.Fatalf("Failed to set prefix: %v", err)
|
||||
}
|
||||
|
||||
// Create epic (parent)
|
||||
epic := &types.Issue{
|
||||
ID: "bd-1",
|
||||
Title: "Epic issue",
|
||||
Description: "This is an epic",
|
||||
Status: types.StatusOpen,
|
||||
Priority: 1,
|
||||
IssueType: types.TypeEpic,
|
||||
}
|
||||
if err := store.CreateIssue(ctx, epic, "test"); err != nil {
|
||||
t.Fatalf("Failed to create epic: %v", err)
|
||||
}
|
||||
|
||||
// Create child issue
|
||||
child := &types.Issue{
|
||||
ID: "bd-2",
|
||||
Title: "Child issue",
|
||||
Description: "This is a child of bd-1",
|
||||
Status: types.StatusOpen,
|
||||
Priority: 1,
|
||||
IssueType: types.TypeTask,
|
||||
}
|
||||
if err := store.CreateIssue(ctx, child, "test"); err != nil {
|
||||
t.Fatalf("Failed to create child: %v", err)
|
||||
}
|
||||
|
||||
// Create parent-child dependency
|
||||
dep := &types.Dependency{
|
||||
IssueID: "bd-2",
|
||||
DependsOnID: "bd-1",
|
||||
Type: types.DepParentChild,
|
||||
}
|
||||
if err := store.AddDependency(ctx, dep, "test"); err != nil {
|
||||
t.Fatalf("Failed to add dependency: %v", err)
|
||||
}
|
||||
|
||||
// Migrate
|
||||
issues, err := store.SearchIssues(ctx, "", types.IssueFilter{})
|
||||
if err != nil {
|
||||
t.Fatalf("Failed to get issues: %v", err)
|
||||
}
|
||||
|
||||
mapping, err := migrateToHashIDs(ctx, store, issues, false)
|
||||
if err != nil {
|
||||
t.Fatalf("Migration failed: %v", err)
|
||||
}
|
||||
|
||||
// Verify parent got hash ID
|
||||
newEpicID := mapping["bd-1"]
|
||||
if !isHashID(newEpicID) {
|
||||
t.Errorf("Epic ID is not a hash ID: %s", newEpicID)
|
||||
}
|
||||
|
||||
// Verify child got hierarchical ID (parent.1)
|
||||
newChildID := mapping["bd-2"]
|
||||
expectedChildID := newEpicID + ".1"
|
||||
if newChildID != expectedChildID {
|
||||
t.Errorf("Child ID should be %s, got %s", expectedChildID, newChildID)
|
||||
}
|
||||
}
|
||||
|
||||
func TestIsHashID(t *testing.T) {
|
||||
tests := []struct {
|
||||
id string
|
||||
expected bool
|
||||
}{
|
||||
{"bd-1", false},
|
||||
{"bd-123", false},
|
||||
{"bd-a3f8e9a2", true},
|
||||
{"bd-abc123", true},
|
||||
{"bd-123abc", true},
|
||||
{"bd-a3f8e9a2.1", true},
|
||||
{"bd-a3f8e9a2.1.2", true},
|
||||
}
|
||||
|
||||
for _, tt := range tests {
|
||||
result := isHashID(tt.id)
|
||||
if result != tt.expected {
|
||||
t.Errorf("isHashID(%s) = %v, want %v", tt.id, result, tt.expected)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func TestCopyFile(t *testing.T) {
|
||||
tmpDir := t.TempDir()
|
||||
src := filepath.Join(tmpDir, "source.txt")
|
||||
dst := filepath.Join(tmpDir, "dest.txt")
|
||||
|
||||
// Write test file
|
||||
content := []byte("test content")
|
||||
if err := os.WriteFile(src, content, 0644); err != nil {
|
||||
t.Fatalf("Failed to write source file: %v", err)
|
||||
}
|
||||
|
||||
// Copy file
|
||||
if err := copyFile(src, dst); err != nil {
|
||||
t.Fatalf("copyFile failed: %v", err)
|
||||
}
|
||||
|
||||
// Verify copy
|
||||
copied, err := os.ReadFile(dst)
|
||||
if err != nil {
|
||||
t.Fatalf("Failed to read destination file: %v", err)
|
||||
}
|
||||
|
||||
if string(copied) != string(content) {
|
||||
t.Errorf("Content mismatch: got %s, want %s", copied, content)
|
||||
}
|
||||
}
|
||||
@@ -44,6 +44,7 @@ func TestRenumberWithGaps(t *testing.T) {
|
||||
|
||||
for _, tc := range testIssues {
|
||||
issue := &types.Issue{
|
||||
ID: tc.id, // Set explicit ID to simulate gaps
|
||||
Title: tc.title,
|
||||
Description: "Test issue for renumbering",
|
||||
Priority: 1,
|
||||
@@ -53,10 +54,6 @@ func TestRenumberWithGaps(t *testing.T) {
|
||||
if err := testStore.CreateIssue(ctx, issue, "test"); err != nil {
|
||||
t.Fatalf("failed to create issue: %v", err)
|
||||
}
|
||||
// Manually update ID to simulate gaps
|
||||
if err := testStore.UpdateIssueID(ctx, issue.ID, tc.id, issue, "test"); err != nil {
|
||||
t.Fatalf("failed to set issue ID to %s: %v", tc.id, err)
|
||||
}
|
||||
}
|
||||
|
||||
// Add a dependency to test that it gets updated
|
||||
|
||||
@@ -10,6 +10,7 @@ import (
|
||||
"github.com/spf13/cobra"
|
||||
"github.com/steveyegge/beads/internal/rpc"
|
||||
"github.com/steveyegge/beads/internal/types"
|
||||
"github.com/steveyegge/beads/internal/utils"
|
||||
)
|
||||
|
||||
var reopenCmd = &cobra.Command{
|
||||
@@ -73,24 +74,30 @@ This is more explicit than 'bd update --status open' and emits a Reopened event.
|
||||
}
|
||||
|
||||
for _, id := range args {
|
||||
fullID, err := utils.ResolvePartialID(ctx, store, id)
|
||||
if err != nil {
|
||||
fmt.Fprintf(os.Stderr, "Error resolving %s: %v\n", id, err)
|
||||
continue
|
||||
}
|
||||
|
||||
// UpdateIssue automatically clears closed_at when status changes from closed
|
||||
updates := map[string]interface{}{
|
||||
"status": string(types.StatusOpen),
|
||||
}
|
||||
if err := store.UpdateIssue(ctx, id, updates, actor); err != nil {
|
||||
fmt.Fprintf(os.Stderr, "Error reopening %s: %v\n", id, err)
|
||||
if err := store.UpdateIssue(ctx, fullID, updates, actor); err != nil {
|
||||
fmt.Fprintf(os.Stderr, "Error reopening %s: %v\n", fullID, err)
|
||||
continue
|
||||
}
|
||||
|
||||
// Add reason as a comment if provided
|
||||
if reason != "" {
|
||||
if err := store.AddComment(ctx, id, actor, reason); err != nil {
|
||||
fmt.Fprintf(os.Stderr, "Warning: failed to add comment to %s: %v\n", id, err)
|
||||
if err := store.AddComment(ctx, fullID, actor, reason); err != nil {
|
||||
fmt.Fprintf(os.Stderr, "Warning: failed to add comment to %s: %v\n", fullID, err)
|
||||
}
|
||||
}
|
||||
|
||||
if jsonOutput {
|
||||
issue, _ := store.GetIssue(ctx, id)
|
||||
issue, _ := store.GetIssue(ctx, fullID)
|
||||
if issue != nil {
|
||||
reopenedIssues = append(reopenedIssues, issue)
|
||||
}
|
||||
@@ -100,7 +107,7 @@ This is more explicit than 'bd update --status open' and emits a Reopened event.
|
||||
if reason != "" {
|
||||
reasonMsg = ": " + reason
|
||||
}
|
||||
fmt.Printf("%s Reopened %s%s\n", blue("↻"), id, reasonMsg)
|
||||
fmt.Printf("%s Reopened %s%s\n", blue("↻"), fullID, reasonMsg)
|
||||
}
|
||||
}
|
||||
|
||||
|
||||
@@ -12,6 +12,7 @@ import (
|
||||
"github.com/spf13/cobra"
|
||||
"github.com/steveyegge/beads/internal/rpc"
|
||||
"github.com/steveyegge/beads/internal/types"
|
||||
"github.com/steveyegge/beads/internal/utils"
|
||||
)
|
||||
|
||||
var showCmd = &cobra.Command{
|
||||
@@ -160,13 +161,19 @@ var showCmd = &cobra.Command{
|
||||
ctx := context.Background()
|
||||
allDetails := []interface{}{}
|
||||
for idx, id := range args {
|
||||
issue, err := store.GetIssue(ctx, id)
|
||||
fullID, err := utils.ResolvePartialID(ctx, store, id)
|
||||
if err != nil {
|
||||
fmt.Fprintf(os.Stderr, "Error fetching %s: %v\n", id, err)
|
||||
fmt.Fprintf(os.Stderr, "Error resolving %s: %v\n", id, err)
|
||||
continue
|
||||
}
|
||||
|
||||
issue, err := store.GetIssue(ctx, fullID)
|
||||
if err != nil {
|
||||
fmt.Fprintf(os.Stderr, "Error fetching %s: %v\n", fullID, err)
|
||||
continue
|
||||
}
|
||||
if issue == nil {
|
||||
fmt.Fprintf(os.Stderr, "Issue %s not found\n", id)
|
||||
fmt.Fprintf(os.Stderr, "Issue %s not found\n", fullID)
|
||||
continue
|
||||
}
|
||||
|
||||
@@ -412,19 +419,25 @@ var updateCmd = &cobra.Command{
|
||||
ctx := context.Background()
|
||||
updatedIssues := []*types.Issue{}
|
||||
for _, id := range args {
|
||||
if err := store.UpdateIssue(ctx, id, updates, actor); err != nil {
|
||||
fmt.Fprintf(os.Stderr, "Error updating %s: %v\n", id, err)
|
||||
fullID, err := utils.ResolvePartialID(ctx, store, id)
|
||||
if err != nil {
|
||||
fmt.Fprintf(os.Stderr, "Error resolving %s: %v\n", id, err)
|
||||
continue
|
||||
}
|
||||
|
||||
if err := store.UpdateIssue(ctx, fullID, updates, actor); err != nil {
|
||||
fmt.Fprintf(os.Stderr, "Error updating %s: %v\n", fullID, err)
|
||||
continue
|
||||
}
|
||||
|
||||
if jsonOutput {
|
||||
issue, _ := store.GetIssue(ctx, id)
|
||||
issue, _ := store.GetIssue(ctx, fullID)
|
||||
if issue != nil {
|
||||
updatedIssues = append(updatedIssues, issue)
|
||||
}
|
||||
} else {
|
||||
green := color.New(color.FgGreen).SprintFunc()
|
||||
fmt.Printf("%s Updated issue: %s\n", green("✓"), id)
|
||||
fmt.Printf("%s Updated issue: %s\n", green("✓"), fullID)
|
||||
}
|
||||
}
|
||||
|
||||
@@ -456,6 +469,16 @@ Examples:
|
||||
Run: func(cmd *cobra.Command, args []string) {
|
||||
id := args[0]
|
||||
ctx := context.Background()
|
||||
|
||||
// Resolve partial ID if in direct mode
|
||||
if daemonClient == nil {
|
||||
fullID, err := utils.ResolvePartialID(ctx, store, id)
|
||||
if err != nil {
|
||||
fmt.Fprintf(os.Stderr, "Error resolving %s: %v\n", id, err)
|
||||
os.Exit(1)
|
||||
}
|
||||
id = fullID
|
||||
}
|
||||
|
||||
// Determine which field to edit
|
||||
fieldToEdit := "description"
|
||||
@@ -670,18 +693,24 @@ var closeCmd = &cobra.Command{
|
||||
ctx := context.Background()
|
||||
closedIssues := []*types.Issue{}
|
||||
for _, id := range args {
|
||||
if err := store.CloseIssue(ctx, id, reason, actor); err != nil {
|
||||
fmt.Fprintf(os.Stderr, "Error closing %s: %v\n", id, err)
|
||||
fullID, err := utils.ResolvePartialID(ctx, store, id)
|
||||
if err != nil {
|
||||
fmt.Fprintf(os.Stderr, "Error resolving %s: %v\n", id, err)
|
||||
continue
|
||||
}
|
||||
|
||||
if err := store.CloseIssue(ctx, fullID, reason, actor); err != nil {
|
||||
fmt.Fprintf(os.Stderr, "Error closing %s: %v\n", fullID, err)
|
||||
continue
|
||||
}
|
||||
if jsonOutput {
|
||||
issue, _ := store.GetIssue(ctx, id)
|
||||
issue, _ := store.GetIssue(ctx, fullID)
|
||||
if issue != nil {
|
||||
closedIssues = append(closedIssues, issue)
|
||||
}
|
||||
} else {
|
||||
green := color.New(color.FgGreen).SprintFunc()
|
||||
fmt.Printf("%s Closed %s: %s\n", green("✓"), id, reason)
|
||||
fmt.Printf("%s Closed %s: %s\n", green("✓"), fullID, reason)
|
||||
}
|
||||
}
|
||||
|
||||
|
||||
@@ -827,6 +827,32 @@ func (m *MemoryStorage) ClearDirtyIssuesByID(ctx context.Context, issueIDs []str
|
||||
return nil
|
||||
}
|
||||
|
||||
// ID Generation
|
||||
func (m *MemoryStorage) GetNextChildID(ctx context.Context, parentID string) (string, error) {
|
||||
m.mu.Lock()
|
||||
defer m.mu.Unlock()
|
||||
|
||||
// Validate parent exists
|
||||
if _, exists := m.issues[parentID]; !exists {
|
||||
return "", fmt.Errorf("parent issue %s does not exist", parentID)
|
||||
}
|
||||
|
||||
// Calculate depth (count dots)
|
||||
depth := strings.Count(parentID, ".")
|
||||
if depth >= 3 {
|
||||
return "", fmt.Errorf("maximum hierarchy depth (3) exceeded for parent %s", parentID)
|
||||
}
|
||||
|
||||
// Get or initialize counter for this parent
|
||||
counter := m.counters[parentID]
|
||||
counter++
|
||||
m.counters[parentID] = counter
|
||||
|
||||
// Format as parentID.counter
|
||||
childID := fmt.Sprintf("%s.%d", parentID, counter)
|
||||
return childID, nil
|
||||
}
|
||||
|
||||
// Config
|
||||
func (m *MemoryStorage) SetConfig(ctx context.Context, key, value string) error {
|
||||
m.mu.Lock()
|
||||
|
||||
203
internal/storage/sqlite/child_id_test.go
Normal file
203
internal/storage/sqlite/child_id_test.go
Normal file
@@ -0,0 +1,203 @@
|
||||
package sqlite
|
||||
|
||||
import (
|
||||
"context"
|
||||
"os"
|
||||
"testing"
|
||||
|
||||
"github.com/steveyegge/beads/internal/types"
|
||||
)
|
||||
|
||||
func TestGetNextChildID(t *testing.T) {
|
||||
tmpFile := t.TempDir() + "/test.db"
|
||||
defer os.Remove(tmpFile)
|
||||
store := newTestStore(t, tmpFile)
|
||||
defer store.Close()
|
||||
ctx := context.Background()
|
||||
|
||||
// Create a parent issue with hash ID
|
||||
parent := &types.Issue{
|
||||
ID: "bd-a3f8e9",
|
||||
Title: "Parent Epic",
|
||||
Description: "Parent issue",
|
||||
Status: types.StatusOpen,
|
||||
Priority: 1,
|
||||
IssueType: types.TypeEpic,
|
||||
}
|
||||
if err := store.CreateIssue(ctx, parent, "test"); err != nil {
|
||||
t.Fatalf("failed to create parent: %v", err)
|
||||
}
|
||||
|
||||
// Test: Generate first child ID
|
||||
childID1, err := store.GetNextChildID(ctx, parent.ID)
|
||||
if err != nil {
|
||||
t.Fatalf("GetNextChildID failed: %v", err)
|
||||
}
|
||||
expectedID1 := "bd-a3f8e9.1"
|
||||
if childID1 != expectedID1 {
|
||||
t.Errorf("expected %s, got %s", expectedID1, childID1)
|
||||
}
|
||||
|
||||
// Test: Generate second child ID (sequential)
|
||||
childID2, err := store.GetNextChildID(ctx, parent.ID)
|
||||
if err != nil {
|
||||
t.Fatalf("GetNextChildID failed: %v", err)
|
||||
}
|
||||
expectedID2 := "bd-a3f8e9.2"
|
||||
if childID2 != expectedID2 {
|
||||
t.Errorf("expected %s, got %s", expectedID2, childID2)
|
||||
}
|
||||
|
||||
// Create the first child and test nested hierarchy
|
||||
child1 := &types.Issue{
|
||||
ID: childID1,
|
||||
Title: "Child Task 1",
|
||||
Description: "First child",
|
||||
Status: types.StatusOpen,
|
||||
Priority: 1,
|
||||
IssueType: types.TypeTask,
|
||||
}
|
||||
if err := store.CreateIssue(ctx, child1, "test"); err != nil {
|
||||
t.Fatalf("failed to create child: %v", err)
|
||||
}
|
||||
|
||||
// Test: Generate nested child (depth 2)
|
||||
nestedID1, err := store.GetNextChildID(ctx, childID1)
|
||||
if err != nil {
|
||||
t.Fatalf("GetNextChildID failed for nested: %v", err)
|
||||
}
|
||||
expectedNested1 := "bd-a3f8e9.1.1"
|
||||
if nestedID1 != expectedNested1 {
|
||||
t.Errorf("expected %s, got %s", expectedNested1, nestedID1)
|
||||
}
|
||||
|
||||
// Create the nested child
|
||||
nested1 := &types.Issue{
|
||||
ID: nestedID1,
|
||||
Title: "Nested Task",
|
||||
Description: "Nested child",
|
||||
Status: types.StatusOpen,
|
||||
Priority: 1,
|
||||
IssueType: types.TypeTask,
|
||||
}
|
||||
if err := store.CreateIssue(ctx, nested1, "test"); err != nil {
|
||||
t.Fatalf("failed to create nested child: %v", err)
|
||||
}
|
||||
|
||||
// Test: Generate third level (depth 3, maximum)
|
||||
deepID1, err := store.GetNextChildID(ctx, nestedID1)
|
||||
if err != nil {
|
||||
t.Fatalf("GetNextChildID failed for depth 3: %v", err)
|
||||
}
|
||||
expectedDeep1 := "bd-a3f8e9.1.1.1"
|
||||
if deepID1 != expectedDeep1 {
|
||||
t.Errorf("expected %s, got %s", expectedDeep1, deepID1)
|
||||
}
|
||||
|
||||
// Create the deep child
|
||||
deep1 := &types.Issue{
|
||||
ID: deepID1,
|
||||
Title: "Deep Task",
|
||||
Description: "Third level",
|
||||
Status: types.StatusOpen,
|
||||
Priority: 1,
|
||||
IssueType: types.TypeTask,
|
||||
}
|
||||
if err := store.CreateIssue(ctx, deep1, "test"); err != nil {
|
||||
t.Fatalf("failed to create deep child: %v", err)
|
||||
}
|
||||
|
||||
// Test: Attempt to create fourth level (should fail)
|
||||
_, err = store.GetNextChildID(ctx, deepID1)
|
||||
if err == nil {
|
||||
t.Errorf("expected error for depth 4, got nil")
|
||||
}
|
||||
if err != nil && err.Error() != "maximum hierarchy depth (3) exceeded for parent bd-a3f8e9.1.1.1" {
|
||||
t.Errorf("unexpected error message: %v", err)
|
||||
}
|
||||
}
|
||||
|
||||
func TestGetNextChildID_ParentNotExists(t *testing.T) {
|
||||
tmpFile := t.TempDir() + "/test.db"
|
||||
defer os.Remove(tmpFile)
|
||||
store := newTestStore(t, tmpFile)
|
||||
defer store.Close()
|
||||
ctx := context.Background()
|
||||
|
||||
// Test: Attempt to get child ID for non-existent parent
|
||||
_, err := store.GetNextChildID(ctx, "bd-nonexistent")
|
||||
if err == nil {
|
||||
t.Errorf("expected error for non-existent parent, got nil")
|
||||
}
|
||||
if err != nil && err.Error() != "parent issue bd-nonexistent does not exist" {
|
||||
t.Errorf("unexpected error message: %v", err)
|
||||
}
|
||||
}
|
||||
|
||||
func TestCreateIssue_HierarchicalID(t *testing.T) {
|
||||
tmpFile := t.TempDir() + "/test.db"
|
||||
defer os.Remove(tmpFile)
|
||||
store := newTestStore(t, tmpFile)
|
||||
defer store.Close()
|
||||
ctx := context.Background()
|
||||
|
||||
// Create parent
|
||||
parent := &types.Issue{
|
||||
ID: "bd-parent1",
|
||||
Title: "Parent",
|
||||
Description: "Parent issue",
|
||||
Status: types.StatusOpen,
|
||||
Priority: 1,
|
||||
IssueType: types.TypeEpic,
|
||||
}
|
||||
if err := store.CreateIssue(ctx, parent, "test"); err != nil {
|
||||
t.Fatalf("failed to create parent: %v", err)
|
||||
}
|
||||
|
||||
// Test: Create child with explicit hierarchical ID
|
||||
child := &types.Issue{
|
||||
ID: "bd-parent1.1",
|
||||
Title: "Child",
|
||||
Description: "Child issue",
|
||||
Status: types.StatusOpen,
|
||||
Priority: 1,
|
||||
IssueType: types.TypeTask,
|
||||
}
|
||||
if err := store.CreateIssue(ctx, child, "test"); err != nil {
|
||||
t.Fatalf("failed to create child: %v", err)
|
||||
}
|
||||
|
||||
// Verify child was created
|
||||
retrieved, err := store.GetIssue(ctx, child.ID)
|
||||
if err != nil {
|
||||
t.Fatalf("failed to retrieve child: %v", err)
|
||||
}
|
||||
if retrieved.ID != child.ID {
|
||||
t.Errorf("expected ID %s, got %s", child.ID, retrieved.ID)
|
||||
}
|
||||
}
|
||||
|
||||
func TestCreateIssue_HierarchicalID_ParentNotExists(t *testing.T) {
|
||||
tmpFile := t.TempDir() + "/test.db"
|
||||
defer os.Remove(tmpFile)
|
||||
store := newTestStore(t, tmpFile)
|
||||
defer store.Close()
|
||||
ctx := context.Background()
|
||||
|
||||
// Test: Attempt to create child without parent
|
||||
child := &types.Issue{
|
||||
ID: "bd-nonexistent.1",
|
||||
Title: "Child",
|
||||
Description: "Child issue",
|
||||
Status: types.StatusOpen,
|
||||
Priority: 1,
|
||||
IssueType: types.TypeTask,
|
||||
}
|
||||
err := store.CreateIssue(ctx, child, "test")
|
||||
if err == nil {
|
||||
t.Errorf("expected error for child without parent, got nil")
|
||||
}
|
||||
if err != nil && err.Error() != "parent issue bd-nonexistent does not exist" {
|
||||
t.Errorf("unexpected error message: %v", err)
|
||||
}
|
||||
}
|
||||
198
internal/storage/sqlite/hash_id_test.go
Normal file
198
internal/storage/sqlite/hash_id_test.go
Normal file
@@ -0,0 +1,198 @@
|
||||
package sqlite
|
||||
|
||||
import (
|
||||
"context"
|
||||
"testing"
|
||||
"time"
|
||||
|
||||
"github.com/steveyegge/beads/internal/types"
|
||||
)
|
||||
|
||||
func TestHashIDGeneration(t *testing.T) {
|
||||
store, err := New(":memory:")
|
||||
if err != nil {
|
||||
t.Fatalf("Failed to create storage: %v", err)
|
||||
}
|
||||
defer func() { _ = store.Close() }()
|
||||
|
||||
ctx := context.Background()
|
||||
|
||||
// Set up database with prefix and hash mode
|
||||
if err := store.SetConfig(ctx, "issue_prefix", "bd"); err != nil {
|
||||
t.Fatalf("Failed to set prefix: %v", err)
|
||||
}
|
||||
if err := store.SetConfig(ctx, "id_mode", "hash"); err != nil {
|
||||
t.Fatalf("Failed to set id_mode: %v", err)
|
||||
}
|
||||
|
||||
// Create an issue - should get a hash ID
|
||||
issue := &types.Issue{
|
||||
Title: "Test Issue",
|
||||
Description: "Test description",
|
||||
Status: types.StatusOpen,
|
||||
Priority: 1,
|
||||
IssueType: types.TypeTask,
|
||||
}
|
||||
|
||||
if err := store.CreateIssue(ctx, issue, "test-actor"); err != nil {
|
||||
t.Fatalf("Failed to create issue: %v", err)
|
||||
}
|
||||
|
||||
// Verify hash ID format: bd-<8 hex chars>
|
||||
if len(issue.ID) != 11 { // "bd-" (3) + 8 hex chars = 11
|
||||
t.Errorf("Expected ID length 11, got %d: %s", len(issue.ID), issue.ID)
|
||||
}
|
||||
|
||||
if issue.ID[:3] != "bd-" {
|
||||
t.Errorf("Expected ID to start with 'bd-', got: %s", issue.ID)
|
||||
}
|
||||
|
||||
// Verify we can retrieve the issue
|
||||
retrieved, err := store.GetIssue(ctx, issue.ID)
|
||||
if err != nil {
|
||||
t.Fatalf("Failed to get issue: %v", err)
|
||||
}
|
||||
|
||||
if retrieved.Title != issue.Title {
|
||||
t.Errorf("Expected title %q, got %q", issue.Title, retrieved.Title)
|
||||
}
|
||||
}
|
||||
|
||||
func TestHashIDDeterministic(t *testing.T) {
|
||||
// Same inputs should produce same hash (with same nonce)
|
||||
prefix := "bd"
|
||||
title := "Test Issue"
|
||||
description := "Test description"
|
||||
actor := "test-actor"
|
||||
timestamp := time.Now()
|
||||
|
||||
id1 := generateHashID(prefix, title, description, actor, timestamp, 0)
|
||||
id2 := generateHashID(prefix, title, description, actor, timestamp, 0)
|
||||
|
||||
if id1 != id2 {
|
||||
t.Errorf("Expected same hash for same inputs, got %s and %s", id1, id2)
|
||||
}
|
||||
}
|
||||
|
||||
func TestHashIDCollisionHandling(t *testing.T) {
|
||||
store, err := New(":memory:")
|
||||
if err != nil {
|
||||
t.Fatalf("Failed to create storage: %v", err)
|
||||
}
|
||||
defer func() { _ = store.Close() }()
|
||||
|
||||
ctx := context.Background()
|
||||
|
||||
// Set up database with prefix
|
||||
if err := store.SetConfig(ctx, "issue_prefix", "bd"); err != nil {
|
||||
t.Fatalf("Failed to set prefix: %v", err)
|
||||
}
|
||||
|
||||
// Create first issue
|
||||
issue1 := &types.Issue{
|
||||
Title: "Duplicate Title",
|
||||
Description: "Same description",
|
||||
Status: types.StatusOpen,
|
||||
Priority: 1,
|
||||
IssueType: types.TypeTask,
|
||||
}
|
||||
|
||||
if err := store.CreateIssue(ctx, issue1, "actor"); err != nil {
|
||||
t.Fatalf("Failed to create first issue: %v", err)
|
||||
}
|
||||
|
||||
// Create second issue with same content at same time
|
||||
// This should get a different hash due to nonce increment
|
||||
issue2 := &types.Issue{
|
||||
Title: "Duplicate Title",
|
||||
Description: "Same description",
|
||||
Status: types.StatusOpen,
|
||||
Priority: 1,
|
||||
IssueType: types.TypeTask,
|
||||
CreatedAt: issue1.CreatedAt, // Force same timestamp
|
||||
}
|
||||
|
||||
if err := store.CreateIssue(ctx, issue2, "actor"); err != nil {
|
||||
t.Fatalf("Failed to create second issue: %v", err)
|
||||
}
|
||||
|
||||
// Verify both issues exist with different IDs
|
||||
if issue1.ID == issue2.ID {
|
||||
t.Errorf("Expected different IDs for duplicate content, both got: %s", issue1.ID)
|
||||
}
|
||||
|
||||
// Verify both can be retrieved
|
||||
_, err = store.GetIssue(ctx, issue1.ID)
|
||||
if err != nil {
|
||||
t.Errorf("Failed to retrieve first issue: %v", err)
|
||||
}
|
||||
|
||||
_, err = store.GetIssue(ctx, issue2.ID)
|
||||
if err != nil {
|
||||
t.Errorf("Failed to retrieve second issue: %v", err)
|
||||
}
|
||||
}
|
||||
|
||||
func TestHashIDBatchCreation(t *testing.T) {
|
||||
store, err := New(":memory:")
|
||||
if err != nil {
|
||||
t.Fatalf("Failed to create storage: %v", err)
|
||||
}
|
||||
defer func() { _ = store.Close() }()
|
||||
|
||||
ctx := context.Background()
|
||||
|
||||
// Set up database with prefix and hash mode
|
||||
if err := store.SetConfig(ctx, "issue_prefix", "bd"); err != nil {
|
||||
t.Fatalf("Failed to set prefix: %v", err)
|
||||
}
|
||||
if err := store.SetConfig(ctx, "id_mode", "hash"); err != nil {
|
||||
t.Fatalf("Failed to set id_mode: %v", err)
|
||||
}
|
||||
|
||||
// Create multiple issues with similar content
|
||||
issues := []*types.Issue{
|
||||
{
|
||||
Title: "Issue 1",
|
||||
Description: "Description",
|
||||
Status: types.StatusOpen,
|
||||
Priority: 1,
|
||||
IssueType: types.TypeTask,
|
||||
},
|
||||
{
|
||||
Title: "Issue 1", // Same title
|
||||
Description: "Description",
|
||||
Status: types.StatusOpen,
|
||||
Priority: 1,
|
||||
IssueType: types.TypeTask,
|
||||
},
|
||||
{
|
||||
Title: "Issue 2",
|
||||
Description: "Description",
|
||||
Status: types.StatusOpen,
|
||||
Priority: 1,
|
||||
IssueType: types.TypeTask,
|
||||
},
|
||||
}
|
||||
|
||||
if err := store.CreateIssues(ctx, issues, "actor"); err != nil {
|
||||
t.Fatalf("Failed to create issues: %v", err)
|
||||
}
|
||||
|
||||
// Verify all issues got unique IDs
|
||||
ids := make(map[string]bool)
|
||||
for _, issue := range issues {
|
||||
if ids[issue.ID] {
|
||||
t.Errorf("Duplicate ID found: %s", issue.ID)
|
||||
}
|
||||
ids[issue.ID] = true
|
||||
|
||||
// Verify hash ID format
|
||||
if len(issue.ID) != 11 {
|
||||
t.Errorf("Expected ID length 11, got %d: %s", len(issue.ID), issue.ID)
|
||||
}
|
||||
if issue.ID[:3] != "bd-" {
|
||||
t.Errorf("Expected ID to start with 'bd-', got: %s", issue.ID)
|
||||
}
|
||||
}
|
||||
}
|
||||
@@ -3,7 +3,9 @@ package sqlite
|
||||
|
||||
import (
|
||||
"context"
|
||||
"crypto/sha256"
|
||||
"database/sql"
|
||||
"encoding/hex"
|
||||
"encoding/json"
|
||||
"fmt"
|
||||
"os"
|
||||
@@ -663,6 +665,37 @@ func (s *SQLiteStorage) getNextChildNumber(ctx context.Context, parentID string)
|
||||
return nextChild, nil
|
||||
}
|
||||
|
||||
// GetNextChildID generates the next hierarchical child ID for a given parent
|
||||
// Returns formatted ID as parentID.{counter} (e.g., bd-a3f8e9.1 or bd-a3f8e9.1.5)
|
||||
// Works at any depth (max 3 levels)
|
||||
func (s *SQLiteStorage) GetNextChildID(ctx context.Context, parentID string) (string, error) {
|
||||
// Validate parent exists
|
||||
var count int
|
||||
err := s.db.QueryRowContext(ctx, `SELECT COUNT(*) FROM issues WHERE id = ?`, parentID).Scan(&count)
|
||||
if err != nil {
|
||||
return "", fmt.Errorf("failed to check parent existence: %w", err)
|
||||
}
|
||||
if count == 0 {
|
||||
return "", fmt.Errorf("parent issue %s does not exist", parentID)
|
||||
}
|
||||
|
||||
// Calculate current depth by counting dots
|
||||
depth := strings.Count(parentID, ".")
|
||||
if depth >= 3 {
|
||||
return "", fmt.Errorf("maximum hierarchy depth (3) exceeded for parent %s", parentID)
|
||||
}
|
||||
|
||||
// Get next child number atomically
|
||||
nextNum, err := s.getNextChildNumber(ctx, parentID)
|
||||
if err != nil {
|
||||
return "", err
|
||||
}
|
||||
|
||||
// Format as parentID.counter
|
||||
childID := fmt.Sprintf("%s.%d", parentID, nextNum)
|
||||
return childID, nil
|
||||
}
|
||||
|
||||
// SyncAllCounters synchronizes all ID counters based on existing issues in the database
|
||||
// This scans all issues and updates counters to prevent ID collisions with auto-generated IDs
|
||||
// Note: This unconditionally overwrites counter values, allowing them to decrease after deletions
|
||||
@@ -704,6 +737,61 @@ func (s *SQLiteStorage) SyncAllCounters(ctx context.Context) error {
|
||||
// The database should ALWAYS have issue_prefix config set explicitly (by 'bd init' or auto-import)
|
||||
// Never derive prefix from filename - it leads to silent data corruption
|
||||
|
||||
// getIDMode returns the ID generation mode from config (sequential or hash).
|
||||
// Defaults to "sequential" for backward compatibility if not set.
|
||||
func getIDMode(ctx context.Context, conn *sql.Conn) string {
|
||||
var mode string
|
||||
err := conn.QueryRowContext(ctx, `SELECT value FROM config WHERE key = ?`, "id_mode").Scan(&mode)
|
||||
if err != nil || mode == "" {
|
||||
return "sequential" // Default to sequential for backward compatibility
|
||||
}
|
||||
return mode
|
||||
}
|
||||
|
||||
// nextSequentialID atomically increments and returns the next sequential ID number.
|
||||
// Must be called inside an IMMEDIATE transaction on the same connection.
|
||||
// Implements lazy initialization: if counter doesn't exist, initializes from existing issues.
|
||||
func nextSequentialID(ctx context.Context, conn *sql.Conn, prefix string) (int, error) {
|
||||
var nextID int
|
||||
|
||||
// The query handles three cases atomically:
|
||||
// 1. Counter doesn't exist: initialize from MAX(existing IDs) or 1, then return that + 1
|
||||
// 2. Counter exists but lower than max ID: update to max and return max + 1
|
||||
// 3. Counter exists and correct: just increment and return next ID
|
||||
err := conn.QueryRowContext(ctx, `
|
||||
INSERT INTO issue_counters (prefix, last_id)
|
||||
SELECT ?, COALESCE(MAX(CAST(substr(id, LENGTH(?) + 2) AS INTEGER)), 0) + 1
|
||||
FROM issues
|
||||
WHERE id LIKE ? || '-%'
|
||||
AND substr(id, LENGTH(?) + 2) GLOB '[0-9]*'
|
||||
AND instr(substr(id, LENGTH(?) + 2), '.') = 0
|
||||
ON CONFLICT(prefix) DO UPDATE SET
|
||||
last_id = last_id + 1
|
||||
RETURNING last_id
|
||||
`, prefix, prefix, prefix, prefix, prefix).Scan(&nextID)
|
||||
if err != nil {
|
||||
return 0, fmt.Errorf("failed to generate next sequential ID for prefix %s: %w", prefix, err)
|
||||
}
|
||||
return nextID, nil
|
||||
}
|
||||
|
||||
// generateHashID creates a hash-based ID for a top-level issue.
|
||||
// For child issues, use the parent ID with a numeric suffix (e.g., "bd-a3f8e9a2.1").
|
||||
// Includes a nonce parameter to handle collisions.
|
||||
func generateHashID(prefix, title, description, creator string, timestamp time.Time, nonce int) string {
|
||||
// Combine inputs into a stable content string
|
||||
// Include nonce to handle hash collisions
|
||||
content := fmt.Sprintf("%s|%s|%s|%d|%d", title, description, creator, timestamp.UnixNano(), nonce)
|
||||
|
||||
// Hash the content
|
||||
hash := sha256.Sum256([]byte(content))
|
||||
|
||||
// Use first 4 bytes (8 hex chars) for short, readable IDs
|
||||
shortHash := hex.EncodeToString(hash[:4])
|
||||
|
||||
return fmt.Sprintf("%s-%s", prefix, shortHash)
|
||||
}
|
||||
|
||||
// CreateIssue creates a new issue
|
||||
func (s *SQLiteStorage) CreateIssue(ctx context.Context, issue *types.Issue, actor string) error {
|
||||
// Validate issue before creating
|
||||
@@ -763,48 +851,64 @@ func (s *SQLiteStorage) CreateIssue(ctx context.Context, issue *types.Issue, act
|
||||
|
||||
// Generate ID if not set (inside transaction to prevent race conditions)
|
||||
if issue.ID == "" {
|
||||
// Atomically initialize counter (if needed) and get next ID (within transaction)
|
||||
// This ensures the counter starts from the max existing ID, not 1
|
||||
// CRITICAL: We rely on BEGIN IMMEDIATE above to serialize this operation across processes
|
||||
//
|
||||
// The query works as follows:
|
||||
// 1. Try to INSERT with last_id = MAX(existing IDs) or 1 if none exist
|
||||
// 2. ON CONFLICT: update last_id to MAX(existing last_id, new calculated last_id) + 1
|
||||
// 3. RETURNING gives us the final incremented value
|
||||
//
|
||||
// This atomically handles three cases:
|
||||
// - Counter doesn't exist: initialize from existing issues and return next ID
|
||||
// - Counter exists but lower than max ID: update to max and return next ID
|
||||
// - Counter exists and correct: just increment and return next ID
|
||||
var nextID int
|
||||
err = conn.QueryRowContext(ctx, `
|
||||
INSERT INTO issue_counters (prefix, last_id)
|
||||
SELECT ?, COALESCE(MAX(CAST(substr(id, LENGTH(?) + 2) AS INTEGER)), 0) + 1
|
||||
FROM issues
|
||||
WHERE id LIKE ? || '-%'
|
||||
AND substr(id, LENGTH(?) + 2) GLOB '[0-9]*'
|
||||
ON CONFLICT(prefix) DO UPDATE SET
|
||||
last_id = MAX(
|
||||
last_id,
|
||||
(SELECT COALESCE(MAX(CAST(substr(id, LENGTH(?) + 2) AS INTEGER)), 0)
|
||||
FROM issues
|
||||
WHERE id LIKE ? || '-%'
|
||||
AND substr(id, LENGTH(?) + 2) GLOB '[0-9]*')
|
||||
) + 1
|
||||
RETURNING last_id
|
||||
`, prefix, prefix, prefix, prefix, prefix, prefix, prefix).Scan(&nextID)
|
||||
if err != nil {
|
||||
return fmt.Errorf("failed to generate next ID for prefix %s: %w", prefix, err)
|
||||
// Check id_mode config to determine ID generation strategy
|
||||
idMode := getIDMode(ctx, conn)
|
||||
|
||||
if idMode == "hash" {
|
||||
// Generate hash-based ID with collision detection (bd-168)
|
||||
// Try up to 10 times with different nonces to avoid collisions
|
||||
var err error
|
||||
for nonce := 0; nonce < 10; nonce++ {
|
||||
candidate := generateHashID(prefix, issue.Title, issue.Description, actor, issue.CreatedAt, nonce)
|
||||
|
||||
// Check if this ID already exists
|
||||
var count int
|
||||
err = conn.QueryRowContext(ctx, `SELECT COUNT(*) FROM issues WHERE id = ?`, candidate).Scan(&count)
|
||||
if err != nil {
|
||||
return fmt.Errorf("failed to check for ID collision: %w", err)
|
||||
}
|
||||
|
||||
if count == 0 {
|
||||
issue.ID = candidate
|
||||
break
|
||||
}
|
||||
}
|
||||
|
||||
if issue.ID == "" {
|
||||
return fmt.Errorf("failed to generate unique ID after 10 attempts")
|
||||
}
|
||||
} else {
|
||||
// Default: generate sequential ID using counter
|
||||
nextID, err := nextSequentialID(ctx, conn, prefix)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
issue.ID = fmt.Sprintf("%s-%d", prefix, nextID)
|
||||
}
|
||||
|
||||
issue.ID = fmt.Sprintf("%s-%d", prefix, nextID)
|
||||
} else {
|
||||
// Validate that explicitly provided ID matches the configured prefix (bd-177)
|
||||
// This prevents wrong-prefix bugs when IDs are manually specified
|
||||
// Support both top-level (bd-a3f8e9) and hierarchical (bd-a3f8e9.1) IDs
|
||||
expectedPrefix := prefix + "-"
|
||||
if !strings.HasPrefix(issue.ID, expectedPrefix) {
|
||||
return fmt.Errorf("issue ID '%s' does not match configured prefix '%s'", issue.ID, prefix)
|
||||
}
|
||||
|
||||
// For hierarchical IDs (bd-a3f8e9.1), validate parent exists
|
||||
if strings.Contains(issue.ID, ".") {
|
||||
// Extract parent ID (everything before the last dot)
|
||||
lastDot := strings.LastIndex(issue.ID, ".")
|
||||
parentID := issue.ID[:lastDot]
|
||||
|
||||
var parentCount int
|
||||
err = conn.QueryRowContext(ctx, `SELECT COUNT(*) FROM issues WHERE id = ?`, parentID).Scan(&parentCount)
|
||||
if err != nil {
|
||||
return fmt.Errorf("failed to check parent existence: %w", err)
|
||||
}
|
||||
if parentCount == 0 {
|
||||
return fmt.Errorf("parent issue %s does not exist", parentID)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// Insert issue
|
||||
@@ -882,7 +986,7 @@ func validateBatchIssues(issues []*types.Issue) error {
|
||||
}
|
||||
|
||||
// generateBatchIDs generates IDs for all issues that need them atomically
|
||||
func generateBatchIDs(ctx context.Context, conn *sql.Conn, issues []*types.Issue, dbPath string) error {
|
||||
func generateBatchIDs(ctx context.Context, conn *sql.Conn, issues []*types.Issue, actor string) error {
|
||||
// Get prefix from config (needed for both generation and validation)
|
||||
var prefix string
|
||||
err := conn.QueryRowContext(ctx, `SELECT value FROM config WHERE key = ?`, "issue_prefix").Scan(&prefix)
|
||||
@@ -893,54 +997,73 @@ func generateBatchIDs(ctx context.Context, conn *sql.Conn, issues []*types.Issue
|
||||
return fmt.Errorf("failed to get config: %w", err)
|
||||
}
|
||||
|
||||
// Count how many issues need IDs and validate explicitly provided IDs
|
||||
needIDCount := 0
|
||||
// Check id_mode config to determine ID generation strategy
|
||||
idMode := getIDMode(ctx, conn)
|
||||
|
||||
// Validate explicitly provided IDs and generate IDs for those that need them
|
||||
expectedPrefix := prefix + "-"
|
||||
for _, issue := range issues {
|
||||
if issue.ID == "" {
|
||||
needIDCount++
|
||||
} else {
|
||||
usedIDs := make(map[string]bool)
|
||||
|
||||
// First pass: record explicitly provided IDs
|
||||
for i := range issues {
|
||||
if issues[i].ID != "" {
|
||||
// Validate that explicitly provided ID matches the configured prefix (bd-177)
|
||||
if !strings.HasPrefix(issue.ID, expectedPrefix) {
|
||||
return fmt.Errorf("issue ID '%s' does not match configured prefix '%s'", issue.ID, prefix)
|
||||
if !strings.HasPrefix(issues[i].ID, expectedPrefix) {
|
||||
return fmt.Errorf("issue ID '%s' does not match configured prefix '%s'", issues[i].ID, prefix)
|
||||
}
|
||||
usedIDs[issues[i].ID] = true
|
||||
}
|
||||
}
|
||||
|
||||
// Second pass: generate IDs for issues that need them
|
||||
if idMode == "hash" {
|
||||
// Hash mode: generate with collision detection
|
||||
for i := range issues {
|
||||
if issues[i].ID == "" {
|
||||
var generated bool
|
||||
for nonce := 0; nonce < 10; nonce++ {
|
||||
candidate := generateHashID(prefix, issues[i].Title, issues[i].Description, actor, issues[i].CreatedAt, nonce)
|
||||
|
||||
// Check if this ID is already used in this batch or in the database
|
||||
if usedIDs[candidate] {
|
||||
continue
|
||||
}
|
||||
|
||||
var count int
|
||||
err := conn.QueryRowContext(ctx, `SELECT COUNT(*) FROM issues WHERE id = ?`, candidate).Scan(&count)
|
||||
if err != nil {
|
||||
return fmt.Errorf("failed to check for ID collision: %w", err)
|
||||
}
|
||||
|
||||
if count == 0 {
|
||||
issues[i].ID = candidate
|
||||
usedIDs[candidate] = true
|
||||
generated = true
|
||||
break
|
||||
}
|
||||
}
|
||||
|
||||
if !generated {
|
||||
return fmt.Errorf("failed to generate unique ID for issue %d after 10 attempts", i)
|
||||
}
|
||||
}
|
||||
}
|
||||
} else {
|
||||
// Sequential mode: allocate sequential IDs for all issues that need them
|
||||
for i := range issues {
|
||||
if issues[i].ID == "" {
|
||||
nextID, err := nextSequentialID(ctx, conn, prefix)
|
||||
if err != nil {
|
||||
return fmt.Errorf("failed to generate sequential ID for issue %d: %w", i, err)
|
||||
}
|
||||
issues[i].ID = fmt.Sprintf("%s-%d", prefix, nextID)
|
||||
usedIDs[issues[i].ID] = true
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
if needIDCount == 0 {
|
||||
return nil
|
||||
}
|
||||
|
||||
// Atomically reserve ID range
|
||||
var nextID int
|
||||
err = conn.QueryRowContext(ctx, `
|
||||
INSERT INTO issue_counters (prefix, last_id)
|
||||
SELECT ?, COALESCE(MAX(CAST(substr(id, LENGTH(?) + 2) AS INTEGER)), 0) + ?
|
||||
FROM issues
|
||||
WHERE id LIKE ? || '-%'
|
||||
AND substr(id, LENGTH(?) + 2) GLOB '[0-9]*'
|
||||
ON CONFLICT(prefix) DO UPDATE SET
|
||||
last_id = MAX(
|
||||
last_id,
|
||||
(SELECT COALESCE(MAX(CAST(substr(id, LENGTH(?) + 2) AS INTEGER)), 0)
|
||||
FROM issues
|
||||
WHERE id LIKE ? || '-%'
|
||||
AND substr(id, LENGTH(?) + 2) GLOB '[0-9]*')
|
||||
) + ?
|
||||
RETURNING last_id
|
||||
`, prefix, prefix, needIDCount, prefix, prefix, prefix, prefix, prefix, needIDCount).Scan(&nextID)
|
||||
if err != nil {
|
||||
return fmt.Errorf("failed to generate ID range: %w", err)
|
||||
}
|
||||
|
||||
// Assign IDs sequentially from the reserved range and compute content hashes
|
||||
currentID := nextID - needIDCount + 1
|
||||
|
||||
// Compute content hashes
|
||||
for i := range issues {
|
||||
if issues[i].ID == "" {
|
||||
issues[i].ID = fmt.Sprintf("%s-%d", prefix, currentID)
|
||||
currentID++
|
||||
}
|
||||
// Compute content hash if not already set (bd-95)
|
||||
if issues[i].ContentHash == "" {
|
||||
issues[i].ContentHash = issues[i].ComputeContentHash()
|
||||
}
|
||||
@@ -1104,7 +1227,7 @@ func (s *SQLiteStorage) CreateIssues(ctx context.Context, issues []*types.Issue,
|
||||
}()
|
||||
|
||||
// Phase 3: Generate IDs for issues that need them
|
||||
if err := generateBatchIDs(ctx, conn, issues, s.dbPath); err != nil {
|
||||
if err := generateBatchIDs(ctx, conn, issues, actor); err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
|
||||
@@ -65,6 +65,9 @@ type Storage interface {
|
||||
GetJSONLFileHash(ctx context.Context) (string, error)
|
||||
SetJSONLFileHash(ctx context.Context, fileHash string) error
|
||||
|
||||
// ID Generation
|
||||
GetNextChildID(ctx context.Context, parentID string) (string, error)
|
||||
|
||||
// Config
|
||||
SetConfig(ctx context.Context, key, value string) error
|
||||
GetConfig(ctx context.Context, key string) (string, error)
|
||||
|
||||
95
internal/utils/id_parser.go
Normal file
95
internal/utils/id_parser.go
Normal file
@@ -0,0 +1,95 @@
|
||||
// Package utils provides utility functions for issue ID parsing and resolution.
|
||||
package utils
|
||||
|
||||
import (
|
||||
"context"
|
||||
"fmt"
|
||||
"strings"
|
||||
|
||||
"github.com/steveyegge/beads/internal/storage"
|
||||
"github.com/steveyegge/beads/internal/types"
|
||||
)
|
||||
|
||||
// ParseIssueID ensures an issue ID has the configured prefix.
|
||||
// If the input already has the prefix (e.g., "bd-a3f8e9"), returns it as-is.
|
||||
// If the input lacks the prefix (e.g., "a3f8e9"), adds the configured prefix.
|
||||
// Works with hierarchical IDs too: "a3f8e9.1.2" → "bd-a3f8e9.1.2"
|
||||
func ParseIssueID(input string, prefix string) string {
|
||||
if prefix == "" {
|
||||
prefix = "bd-"
|
||||
}
|
||||
|
||||
if strings.HasPrefix(input, prefix) {
|
||||
return input
|
||||
}
|
||||
|
||||
return prefix + input
|
||||
}
|
||||
|
||||
// ResolvePartialID resolves a potentially partial issue ID to a full ID.
|
||||
// Supports:
|
||||
// - Full IDs: "bd-a3f8e9" or "a3f8e9" → "bd-a3f8e9"
|
||||
// - Partial IDs: "a3f8" → "bd-a3f8e9" (if unique match, requires hash IDs)
|
||||
// - Hierarchical: "a3f8e9.1" → "bd-a3f8e9.1"
|
||||
//
|
||||
// Returns an error if:
|
||||
// - No issue found matching the ID
|
||||
// - Multiple issues match (ambiguous prefix)
|
||||
//
|
||||
// Note: Partial ID matching (shorter prefixes) requires hash-based IDs (bd-165).
|
||||
// For now, this primarily handles prefix-optional input (bd-a3f8e9 vs a3f8e9).
|
||||
func ResolvePartialID(ctx context.Context, store storage.Storage, input string) (string, error) {
|
||||
// Get the configured prefix
|
||||
prefix, err := store.GetConfig(ctx, "issue_prefix")
|
||||
if err != nil || prefix == "" {
|
||||
prefix = "bd-"
|
||||
}
|
||||
|
||||
// Ensure the input has the prefix
|
||||
parsedID := ParseIssueID(input, prefix)
|
||||
|
||||
// First try exact match
|
||||
_, err = store.GetIssue(ctx, parsedID)
|
||||
if err == nil {
|
||||
return parsedID, nil
|
||||
}
|
||||
|
||||
// If exact match failed, try prefix search
|
||||
filter := types.IssueFilter{}
|
||||
|
||||
issues, err := store.SearchIssues(ctx, "", filter)
|
||||
if err != nil {
|
||||
return "", fmt.Errorf("failed to search issues: %w", err)
|
||||
}
|
||||
|
||||
var matches []string
|
||||
for _, issue := range issues {
|
||||
if strings.HasPrefix(issue.ID, parsedID) {
|
||||
matches = append(matches, issue.ID)
|
||||
}
|
||||
}
|
||||
|
||||
if len(matches) == 0 {
|
||||
return "", fmt.Errorf("no issue found matching %q", input)
|
||||
}
|
||||
|
||||
if len(matches) > 1 {
|
||||
return "", fmt.Errorf("ambiguous ID %q matches %d issues: %v\nUse more characters to disambiguate", input, len(matches), matches)
|
||||
}
|
||||
|
||||
return matches[0], nil
|
||||
}
|
||||
|
||||
// ResolvePartialIDs resolves multiple potentially partial issue IDs.
|
||||
// Returns the resolved IDs and any errors encountered.
|
||||
func ResolvePartialIDs(ctx context.Context, store storage.Storage, inputs []string) ([]string, error) {
|
||||
var resolved []string
|
||||
for _, input := range inputs {
|
||||
fullID, err := ResolvePartialID(ctx, store, input)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
resolved = append(resolved, fullID)
|
||||
}
|
||||
return resolved, nil
|
||||
}
|
||||
247
internal/utils/id_parser_test.go
Normal file
247
internal/utils/id_parser_test.go
Normal file
@@ -0,0 +1,247 @@
|
||||
package utils
|
||||
|
||||
import (
|
||||
"context"
|
||||
"testing"
|
||||
|
||||
"github.com/steveyegge/beads/internal/storage/memory"
|
||||
"github.com/steveyegge/beads/internal/types"
|
||||
)
|
||||
|
||||
func TestParseIssueID(t *testing.T) {
|
||||
tests := []struct {
|
||||
name string
|
||||
input string
|
||||
prefix string
|
||||
expected string
|
||||
}{
|
||||
{
|
||||
name: "already has prefix",
|
||||
input: "bd-a3f8e9",
|
||||
prefix: "bd-",
|
||||
expected: "bd-a3f8e9",
|
||||
},
|
||||
{
|
||||
name: "missing prefix",
|
||||
input: "a3f8e9",
|
||||
prefix: "bd-",
|
||||
expected: "bd-a3f8e9",
|
||||
},
|
||||
{
|
||||
name: "hierarchical with prefix",
|
||||
input: "bd-a3f8e9.1.2",
|
||||
prefix: "bd-",
|
||||
expected: "bd-a3f8e9.1.2",
|
||||
},
|
||||
{
|
||||
name: "hierarchical without prefix",
|
||||
input: "a3f8e9.1.2",
|
||||
prefix: "bd-",
|
||||
expected: "bd-a3f8e9.1.2",
|
||||
},
|
||||
{
|
||||
name: "custom prefix with ID",
|
||||
input: "ticket-123",
|
||||
prefix: "ticket-",
|
||||
expected: "ticket-123",
|
||||
},
|
||||
{
|
||||
name: "custom prefix without ID",
|
||||
input: "123",
|
||||
prefix: "ticket-",
|
||||
expected: "ticket-123",
|
||||
},
|
||||
}
|
||||
|
||||
for _, tt := range tests {
|
||||
t.Run(tt.name, func(t *testing.T) {
|
||||
result := ParseIssueID(tt.input, tt.prefix)
|
||||
if result != tt.expected {
|
||||
t.Errorf("ParseIssueID(%q, %q) = %q; want %q", tt.input, tt.prefix, result, tt.expected)
|
||||
}
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
func TestResolvePartialID(t *testing.T) {
|
||||
ctx := context.Background()
|
||||
store := memory.New("")
|
||||
|
||||
// Create test issues with sequential IDs (current implementation)
|
||||
// When hash IDs (bd-165) are implemented, these can be hash-based
|
||||
issue1 := &types.Issue{
|
||||
ID: "bd-1",
|
||||
Title: "Test Issue 1",
|
||||
Status: types.StatusOpen,
|
||||
Priority: 1,
|
||||
IssueType: types.TypeTask,
|
||||
}
|
||||
issue2 := &types.Issue{
|
||||
ID: "bd-2",
|
||||
Title: "Test Issue 2",
|
||||
Status: types.StatusOpen,
|
||||
Priority: 1,
|
||||
IssueType: types.TypeTask,
|
||||
}
|
||||
issue3 := &types.Issue{
|
||||
ID: "bd-10",
|
||||
Title: "Test Issue 3",
|
||||
Status: types.StatusOpen,
|
||||
Priority: 1,
|
||||
IssueType: types.TypeTask,
|
||||
}
|
||||
|
||||
if err := store.CreateIssue(ctx, issue1, "test"); err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
if err := store.CreateIssue(ctx, issue2, "test"); err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
if err := store.CreateIssue(ctx, issue3, "test"); err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
|
||||
// Set config for prefix
|
||||
if err := store.SetConfig(ctx, "issue_prefix", "bd-"); err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
|
||||
tests := []struct {
|
||||
name string
|
||||
input string
|
||||
expected string
|
||||
shouldError bool
|
||||
errorMsg string
|
||||
}{
|
||||
{
|
||||
name: "exact match with prefix",
|
||||
input: "bd-1",
|
||||
expected: "bd-1",
|
||||
},
|
||||
{
|
||||
name: "exact match without prefix",
|
||||
input: "1",
|
||||
expected: "bd-1",
|
||||
},
|
||||
{
|
||||
name: "exact match with prefix (two digits)",
|
||||
input: "bd-10",
|
||||
expected: "bd-10",
|
||||
},
|
||||
{
|
||||
name: "exact match without prefix (two digits)",
|
||||
input: "10",
|
||||
expected: "bd-10",
|
||||
},
|
||||
}
|
||||
|
||||
for _, tt := range tests {
|
||||
t.Run(tt.name, func(t *testing.T) {
|
||||
result, err := ResolvePartialID(ctx, store, tt.input)
|
||||
|
||||
if tt.shouldError {
|
||||
if err == nil {
|
||||
t.Errorf("ResolvePartialID(%q) expected error containing %q, got nil", tt.input, tt.errorMsg)
|
||||
} else if tt.errorMsg != "" && !contains(err.Error(), tt.errorMsg) {
|
||||
t.Errorf("ResolvePartialID(%q) error = %q; want error containing %q", tt.input, err.Error(), tt.errorMsg)
|
||||
}
|
||||
} else {
|
||||
if err != nil {
|
||||
t.Errorf("ResolvePartialID(%q) unexpected error: %v", tt.input, err)
|
||||
}
|
||||
if result != tt.expected {
|
||||
t.Errorf("ResolvePartialID(%q) = %q; want %q", tt.input, result, tt.expected)
|
||||
}
|
||||
}
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
func TestResolvePartialIDs(t *testing.T) {
|
||||
ctx := context.Background()
|
||||
store := memory.New("")
|
||||
|
||||
// Create test issues
|
||||
issue1 := &types.Issue{
|
||||
ID: "bd-1",
|
||||
Title: "Test Issue 1",
|
||||
Status: types.StatusOpen,
|
||||
Priority: 1,
|
||||
IssueType: types.TypeTask,
|
||||
}
|
||||
issue2 := &types.Issue{
|
||||
ID: "bd-2",
|
||||
Title: "Test Issue 2",
|
||||
Status: types.StatusOpen,
|
||||
Priority: 1,
|
||||
IssueType: types.TypeTask,
|
||||
}
|
||||
|
||||
if err := store.CreateIssue(ctx, issue1, "test"); err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
if err := store.CreateIssue(ctx, issue2, "test"); err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
|
||||
if err := store.SetConfig(ctx, "issue_prefix", "bd-"); err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
|
||||
tests := []struct {
|
||||
name string
|
||||
inputs []string
|
||||
expected []string
|
||||
shouldError bool
|
||||
}{
|
||||
{
|
||||
name: "resolve multiple IDs without prefix",
|
||||
inputs: []string{"1", "2"},
|
||||
expected: []string{"bd-1", "bd-2"},
|
||||
},
|
||||
{
|
||||
name: "resolve mixed full and partial IDs",
|
||||
inputs: []string{"bd-1", "2"},
|
||||
expected: []string{"bd-1", "bd-2"},
|
||||
},
|
||||
}
|
||||
|
||||
for _, tt := range tests {
|
||||
t.Run(tt.name, func(t *testing.T) {
|
||||
result, err := ResolvePartialIDs(ctx, store, tt.inputs)
|
||||
|
||||
if tt.shouldError {
|
||||
if err == nil {
|
||||
t.Errorf("ResolvePartialIDs(%v) expected error, got nil", tt.inputs)
|
||||
}
|
||||
} else {
|
||||
if err != nil {
|
||||
t.Errorf("ResolvePartialIDs(%v) unexpected error: %v", tt.inputs, err)
|
||||
}
|
||||
if len(result) != len(tt.expected) {
|
||||
t.Errorf("ResolvePartialIDs(%v) returned %d results; want %d", tt.inputs, len(result), len(tt.expected))
|
||||
}
|
||||
for i := range result {
|
||||
if result[i] != tt.expected[i] {
|
||||
t.Errorf("ResolvePartialIDs(%v)[%d] = %q; want %q", tt.inputs, i, result[i], tt.expected[i])
|
||||
}
|
||||
}
|
||||
}
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
func contains(s, substr string) bool {
|
||||
return len(s) >= len(substr) && (s == substr || len(s) > len(substr) &&
|
||||
(s[:len(substr)] == substr || s[len(s)-len(substr):] == substr ||
|
||||
findSubstring(s, substr)))
|
||||
}
|
||||
|
||||
func findSubstring(s, substr string) bool {
|
||||
for i := 0; i <= len(s)-len(substr); i++ {
|
||||
if s[i:i+len(substr)] == substr {
|
||||
return true
|
||||
}
|
||||
}
|
||||
return false
|
||||
}
|
||||
Reference in New Issue
Block a user