Fix issue-prefix config fallback to config.yaml (GH#1145) (#1146)
The config.yaml file uses "issue-prefix" (with hyphen) but this setting was only read during bd init. After initialization, all code read from the database's "issue_prefix" key, so subsequent changes to config.yaml were silently ignored. This fix adds a fallback to config.yaml's "issue-prefix" in the key locations where the prefix is retrieved from the database: 1. autoflush.go: Auto-import now checks config.yaml before falling back to auto-detection from JSONL or directory name 2. autoflush.go: filterByMultiRepoPrefix now checks config.yaml as fallback 3. create.go: Direct mode prefix validation now checks config.yaml as fallback Priority order is preserved: 1. Database issue_prefix (if set) 2. Config.yaml issue-prefix (new fallback) 3. Auto-detection from JSONL/directory (existing fallback) Fixes #1145 Co-authored-by: Claude Opus 4.5 <noreply@anthropic.com>
This commit is contained in:
@@ -253,8 +253,14 @@ func autoImportIfNewer() {
|
|||||||
if store != nil {
|
if store != nil {
|
||||||
prefix, prefixErr := store.GetConfig(ctx, "issue_prefix")
|
prefix, prefixErr := store.GetConfig(ctx, "issue_prefix")
|
||||||
if prefixErr != nil || prefix == "" {
|
if prefixErr != nil || prefix == "" {
|
||||||
// Database needs initialization - detect prefix from JSONL or directory
|
// GH#1145: Check config.yaml for issue-prefix before auto-detecting
|
||||||
detectedPrefix := detectPrefixFromJSONL(jsonlData)
|
detectedPrefix := config.GetString("issue-prefix")
|
||||||
|
|
||||||
|
// If config.yaml doesn't have it, try to detect from JSONL
|
||||||
|
if detectedPrefix == "" {
|
||||||
|
detectedPrefix = detectPrefixFromJSONL(jsonlData)
|
||||||
|
}
|
||||||
|
|
||||||
if detectedPrefix == "" {
|
if detectedPrefix == "" {
|
||||||
// Fallback: detect from directory name
|
// Fallback: detect from directory name
|
||||||
beadsDir := filepath.Dir(jsonlPath)
|
beadsDir := filepath.Dir(jsonlPath)
|
||||||
@@ -338,7 +344,7 @@ func autoImportIfNewer() {
|
|||||||
if err := store.ClearAllExportHashes(ctx); err != nil {
|
if err := store.ClearAllExportHashes(ctx); err != nil {
|
||||||
fmt.Fprintf(os.Stderr, "Warning: failed to clear export_hashes before import: %v\n", err)
|
fmt.Fprintf(os.Stderr, "Warning: failed to clear export_hashes before import: %v\n", err)
|
||||||
}
|
}
|
||||||
|
|
||||||
// Use shared import logic
|
// Use shared import logic
|
||||||
opts := ImportOptions{
|
opts := ImportOptions{
|
||||||
DryRun: false,
|
DryRun: false,
|
||||||
@@ -418,8 +424,6 @@ func autoImportIfNewer() {
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
||||||
|
|
||||||
// markDirtyAndScheduleFlush marks the database as dirty and schedules a flush
|
// markDirtyAndScheduleFlush marks the database as dirty and schedules a flush
|
||||||
// markDirtyAndScheduleFlush marks the database as dirty and schedules a debounced
|
// markDirtyAndScheduleFlush marks the database as dirty and schedules a debounced
|
||||||
// export to JSONL. Uses FlushManager's event-driven architecture.
|
// export to JSONL. Uses FlushManager's event-driven architecture.
|
||||||
@@ -485,12 +489,12 @@ func validateJSONLIntegrity(ctx context.Context, jsonlPath string) (bool, error)
|
|||||||
if err != nil {
|
if err != nil {
|
||||||
return false, fmt.Errorf("failed to get stored JSONL hash: %w", err)
|
return false, fmt.Errorf("failed to get stored JSONL hash: %w", err)
|
||||||
}
|
}
|
||||||
|
|
||||||
// If no hash stored, this is first export - skip validation
|
// If no hash stored, this is first export - skip validation
|
||||||
if storedHash == "" {
|
if storedHash == "" {
|
||||||
return false, nil
|
return false, nil
|
||||||
}
|
}
|
||||||
|
|
||||||
// Read current JSONL file
|
// Read current JSONL file
|
||||||
jsonlData, err := os.ReadFile(jsonlPath)
|
jsonlData, err := os.ReadFile(jsonlPath)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
@@ -508,12 +512,12 @@ func validateJSONLIntegrity(ctx context.Context, jsonlPath string) (bool, error)
|
|||||||
}
|
}
|
||||||
return false, fmt.Errorf("failed to read JSONL file: %w", err)
|
return false, fmt.Errorf("failed to read JSONL file: %w", err)
|
||||||
}
|
}
|
||||||
|
|
||||||
// Compute current JSONL hash
|
// Compute current JSONL hash
|
||||||
hasher := sha256.New()
|
hasher := sha256.New()
|
||||||
hasher.Write(jsonlData)
|
hasher.Write(jsonlData)
|
||||||
currentHash := hex.EncodeToString(hasher.Sum(nil))
|
currentHash := hex.EncodeToString(hasher.Sum(nil))
|
||||||
|
|
||||||
// Compare hashes
|
// Compare hashes
|
||||||
if currentHash != storedHash {
|
if currentHash != storedHash {
|
||||||
fmt.Fprintf(os.Stderr, "⚠️ WARNING: JSONL file hash mismatch detected\n")
|
fmt.Fprintf(os.Stderr, "⚠️ WARNING: JSONL file hash mismatch detected\n")
|
||||||
@@ -530,7 +534,7 @@ func validateJSONLIntegrity(ctx context.Context, jsonlPath string) (bool, error)
|
|||||||
}
|
}
|
||||||
return true, nil // Signal full export needed
|
return true, nil // Signal full export needed
|
||||||
}
|
}
|
||||||
|
|
||||||
return false, nil
|
return false, nil
|
||||||
}
|
}
|
||||||
|
|
||||||
@@ -559,15 +563,15 @@ func writeJSONLAtomic(jsonlPath string, issues []*types.Issue) ([]string, error)
|
|||||||
encoder := json.NewEncoder(f)
|
encoder := json.NewEncoder(f)
|
||||||
skippedCount := 0
|
skippedCount := 0
|
||||||
exportedIDs := make([]string, 0, len(issues))
|
exportedIDs := make([]string, 0, len(issues))
|
||||||
|
|
||||||
for _, issue := range issues {
|
for _, issue := range issues {
|
||||||
if err := encoder.Encode(issue); err != nil {
|
if err := encoder.Encode(issue); err != nil {
|
||||||
return nil, fmt.Errorf("failed to encode issue %s: %w", issue.ID, err)
|
return nil, fmt.Errorf("failed to encode issue %s: %w", issue.ID, err)
|
||||||
}
|
}
|
||||||
|
|
||||||
exportedIDs = append(exportedIDs, issue.ID)
|
exportedIDs = append(exportedIDs, issue.ID)
|
||||||
}
|
}
|
||||||
|
|
||||||
// Report skipped issues if any (helps debugging)
|
// Report skipped issues if any (helps debugging)
|
||||||
if skippedCount > 0 {
|
if skippedCount > 0 {
|
||||||
debug.Logf("auto-flush skipped %d issue(s) with timestamp-only changes", skippedCount)
|
debug.Logf("auto-flush skipped %d issue(s) with timestamp-only changes", skippedCount)
|
||||||
@@ -726,10 +730,13 @@ func filterByMultiRepoPrefix(ctx context.Context, s storage.Storage, issues []*t
|
|||||||
return issues
|
return issues
|
||||||
}
|
}
|
||||||
|
|
||||||
// Get our configured prefix
|
// Get our configured prefix (GH#1145: fallback to config.yaml)
|
||||||
prefix, prefixErr := s.GetConfig(ctx, "issue_prefix")
|
prefix, prefixErr := s.GetConfig(ctx, "issue_prefix")
|
||||||
if prefixErr != nil || prefix == "" {
|
if prefixErr != nil || prefix == "" {
|
||||||
return issues
|
prefix = config.GetString("issue-prefix")
|
||||||
|
if prefix == "" {
|
||||||
|
return issues
|
||||||
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
// Determine if we're the primary repo
|
// Determine if we're the primary repo
|
||||||
|
|||||||
@@ -432,8 +432,11 @@ var createCmd = &cobra.Command{
|
|||||||
}
|
}
|
||||||
// If error, continue without validation (non-fatal)
|
// If error, continue without validation (non-fatal)
|
||||||
} else {
|
} else {
|
||||||
// Direct mode - check config
|
// Direct mode - check config (GH#1145: fallback to config.yaml)
|
||||||
dbPrefix, _ = store.GetConfig(ctx, "issue_prefix")
|
dbPrefix, _ = store.GetConfig(ctx, "issue_prefix")
|
||||||
|
if dbPrefix == "" {
|
||||||
|
dbPrefix = config.GetString("issue-prefix")
|
||||||
|
}
|
||||||
allowedPrefixes, _ = store.GetConfig(ctx, "allowed_prefixes")
|
allowedPrefixes, _ = store.GetConfig(ctx, "allowed_prefixes")
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|||||||
Reference in New Issue
Block a user