Fix bd-ar2 code review issues: metadata tracking and multi-repo support
This commit addresses critical code review findings from bd-dvd and bd-ymj fixes: ## Completed Tasks ### bd-ar2.1: Extract duplicated metadata update code - Created `updateExportMetadata()` helper function - Eliminated 22-line duplication between createExportFunc and createSyncFunc - Single source of truth for metadata updates ### bd-ar2.2: Add multi-repo support to export metadata updates - Added per-repo metadata key tracking with keySuffix parameter - Both export and sync functions now update metadata for all repos ### bd-ar2.3: Fix tests to use actual daemon functions - TestExportUpdatesMetadata now calls updateExportMetadata() directly - Added TestUpdateExportMetadataMultiRepo() for multi-repo testing - Fixed export_mtime_test.go tests to call updateExportMetadata() ### bd-ar2.9: Fix variable shadowing in GetNextChildID - Changed `err` to `resurrectErr` to avoid shadowing - Improves code clarity and passes linter checks ### bd-ar2.10: Fix hasJSONLChanged to support per-repo keys - Updated hasJSONLChanged() to accept keySuffix parameter - Reads metadata with correct per-repo keys - All callers updated (validatePreExport, daemon import, sync command) ### bd-ar2.11: Use stable repo identifiers instead of paths - Added getRepoKeyForPath() helper function - Uses stable identifiers like ".", "../frontend" instead of absolute paths - Metadata keys now portable across machines and clones - Prevents orphaned metadata when repos are moved ## Files Changed - cmd/bd/daemon_sync.go: Helper functions, metadata updates - cmd/bd/integrity.go: hasJSONLChanged() with keySuffix support - cmd/bd/sync.go: Updated to use getRepoKeyForPath() - cmd/bd/*_test.go: Tests updated for new signatures - internal/storage/sqlite/hash_ids.go: Fixed variable shadowing ## Testing All export, sync, and integrity tests pass. 🤖 Generated with [Claude Code](https://claude.com/claude-code) Co-Authored-By: Claude <noreply@anthropic.com>
This commit is contained in:
@@ -8,9 +8,11 @@ import (
|
||||
"os"
|
||||
"path/filepath"
|
||||
"sort"
|
||||
"strings"
|
||||
"time"
|
||||
|
||||
"github.com/steveyegge/beads/internal/beads"
|
||||
"github.com/steveyegge/beads/internal/config"
|
||||
"github.com/steveyegge/beads/internal/storage"
|
||||
"github.com/steveyegge/beads/internal/storage/sqlite"
|
||||
"github.com/steveyegge/beads/internal/types"
|
||||
@@ -199,6 +201,81 @@ func importToJSONLWithStore(ctx context.Context, store storage.Storage, jsonlPat
|
||||
return err
|
||||
}
|
||||
|
||||
// getRepoKeyForPath extracts the stable repo identifier from a JSONL path.
|
||||
// For single-repo mode, returns empty string (no suffix needed).
|
||||
// For multi-repo mode, extracts the repo path (e.g., ".", "../frontend").
|
||||
// This creates portable metadata keys that work across different machine paths.
|
||||
func getRepoKeyForPath(jsonlPath string) string {
|
||||
multiRepo := config.GetMultiRepoConfig()
|
||||
if multiRepo == nil {
|
||||
return "" // Single-repo mode
|
||||
}
|
||||
|
||||
// Normalize the jsonlPath for comparison
|
||||
// Remove trailing "/.beads/issues.jsonl" to get repo path
|
||||
const suffix = "/.beads/issues.jsonl"
|
||||
if strings.HasSuffix(jsonlPath, suffix) {
|
||||
repoPath := strings.TrimSuffix(jsonlPath, suffix)
|
||||
|
||||
// Try to match against primary repo
|
||||
primaryPath := multiRepo.Primary
|
||||
if primaryPath == "" {
|
||||
primaryPath = "."
|
||||
}
|
||||
if repoPath == primaryPath {
|
||||
return primaryPath
|
||||
}
|
||||
|
||||
// Try to match against additional repos
|
||||
for _, additional := range multiRepo.Additional {
|
||||
if repoPath == additional {
|
||||
return additional
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// Fallback: return empty string for single-repo mode behavior
|
||||
return ""
|
||||
}
|
||||
|
||||
// updateExportMetadata updates last_import_hash and related metadata after a successful export.
|
||||
// This prevents "JSONL content has changed since last import" errors on subsequent exports (bd-ymj fix).
|
||||
// In multi-repo mode, keySuffix should be the stable repo identifier (e.g., ".", "../frontend").
|
||||
func updateExportMetadata(ctx context.Context, store storage.Storage, jsonlPath string, log daemonLogger, keySuffix string) {
|
||||
currentHash, err := computeJSONLHash(jsonlPath)
|
||||
if err != nil {
|
||||
log.log("Warning: failed to compute JSONL hash for metadata update: %v", err)
|
||||
return
|
||||
}
|
||||
|
||||
// Build metadata keys with optional suffix for per-repo tracking
|
||||
hashKey := "last_import_hash"
|
||||
timeKey := "last_import_time"
|
||||
mtimeKey := "last_import_mtime"
|
||||
if keySuffix != "" {
|
||||
hashKey += ":" + keySuffix
|
||||
timeKey += ":" + keySuffix
|
||||
mtimeKey += ":" + keySuffix
|
||||
}
|
||||
|
||||
if err := store.SetMetadata(ctx, hashKey, currentHash); err != nil {
|
||||
log.log("Warning: failed to update %s: %v", hashKey, err)
|
||||
}
|
||||
|
||||
exportTime := time.Now().Format(time.RFC3339)
|
||||
if err := store.SetMetadata(ctx, timeKey, exportTime); err != nil {
|
||||
log.log("Warning: failed to update %s: %v", timeKey, err)
|
||||
}
|
||||
|
||||
// Store mtime for fast-path optimization
|
||||
if jsonlInfo, statErr := os.Stat(jsonlPath); statErr == nil {
|
||||
mtimeStr := fmt.Sprintf("%d", jsonlInfo.ModTime().Unix())
|
||||
if err := store.SetMetadata(ctx, mtimeKey, mtimeStr); err != nil {
|
||||
log.log("Warning: failed to update %s: %v", mtimeKey, err)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// validateDatabaseFingerprint checks that the database belongs to this repository
|
||||
func validateDatabaseFingerprint(ctx context.Context, store storage.Storage, log *daemonLogger) error {
|
||||
|
||||
@@ -306,25 +383,17 @@ func createExportFunc(ctx context.Context, store storage.Storage, autoCommit, au
|
||||
}
|
||||
log.log("Exported to JSONL")
|
||||
|
||||
// Update last_import_hash metadata to prevent "content has changed" errors (bd-ymj fix)
|
||||
// This keeps metadata in sync after export so next export doesn't fail
|
||||
if currentHash, err := computeJSONLHash(jsonlPath); err == nil {
|
||||
if err := store.SetMetadata(exportCtx, "last_import_hash", currentHash); err != nil {
|
||||
log.log("Warning: failed to update last_import_hash: %v", err)
|
||||
}
|
||||
exportTime := time.Now().Format(time.RFC3339)
|
||||
if err := store.SetMetadata(exportCtx, "last_import_time", exportTime); err != nil {
|
||||
log.log("Warning: failed to update last_import_time: %v", err)
|
||||
}
|
||||
// Store mtime for fast-path optimization
|
||||
if jsonlInfo, statErr := os.Stat(jsonlPath); statErr == nil {
|
||||
mtimeStr := fmt.Sprintf("%d", jsonlInfo.ModTime().Unix())
|
||||
if err := store.SetMetadata(exportCtx, "last_import_mtime", mtimeStr); err != nil {
|
||||
log.log("Warning: failed to update last_import_mtime: %v", err)
|
||||
}
|
||||
// Update export metadata (bd-ymj fix, bd-ar2.2 multi-repo support, bd-ar2.11 stable keys)
|
||||
multiRepoPaths := getMultiRepoJSONLPaths()
|
||||
if multiRepoPaths != nil {
|
||||
// Multi-repo mode: update metadata for each JSONL with stable repo key
|
||||
for _, path := range multiRepoPaths {
|
||||
repoKey := getRepoKeyForPath(path)
|
||||
updateExportMetadata(exportCtx, store, path, log, repoKey)
|
||||
}
|
||||
} else {
|
||||
log.log("Warning: failed to compute JSONL hash for metadata update: %v", err)
|
||||
// Single-repo mode: update metadata for main JSONL
|
||||
updateExportMetadata(exportCtx, store, jsonlPath, log, "")
|
||||
}
|
||||
|
||||
// Update database mtime to be >= JSONL mtime (fixes #278, #301, #321)
|
||||
@@ -408,7 +477,9 @@ func createAutoImportFunc(ctx context.Context, store storage.Storage, log daemon
|
||||
|
||||
// Check JSONL content hash to avoid redundant imports
|
||||
// Use content-based check (not mtime) to avoid git resurrection bug (bd-khnb)
|
||||
if !hasJSONLChanged(importCtx, store, jsonlPath) {
|
||||
// Use getRepoKeyForPath for multi-repo support (bd-ar2.10, bd-ar2.11)
|
||||
repoKey := getRepoKeyForPath(jsonlPath)
|
||||
if !hasJSONLChanged(importCtx, store, jsonlPath, repoKey) {
|
||||
log.log("Skipping import: JSONL content unchanged")
|
||||
return
|
||||
}
|
||||
@@ -517,25 +588,16 @@ func createSyncFunc(ctx context.Context, store storage.Storage, autoCommit, auto
|
||||
}
|
||||
log.log("Exported to JSONL")
|
||||
|
||||
// Update last_import_hash metadata to prevent "content has changed" errors (bd-ymj fix)
|
||||
// This keeps metadata in sync after export so next export doesn't fail
|
||||
if currentHash, err := computeJSONLHash(jsonlPath); err == nil {
|
||||
if err := store.SetMetadata(syncCtx, "last_import_hash", currentHash); err != nil {
|
||||
log.log("Warning: failed to update last_import_hash: %v", err)
|
||||
}
|
||||
exportTime := time.Now().Format(time.RFC3339)
|
||||
if err := store.SetMetadata(syncCtx, "last_import_time", exportTime); err != nil {
|
||||
log.log("Warning: failed to update last_import_time: %v", err)
|
||||
}
|
||||
// Store mtime for fast-path optimization
|
||||
if jsonlInfo, statErr := os.Stat(jsonlPath); statErr == nil {
|
||||
mtimeStr := fmt.Sprintf("%d", jsonlInfo.ModTime().Unix())
|
||||
if err := store.SetMetadata(syncCtx, "last_import_mtime", mtimeStr); err != nil {
|
||||
log.log("Warning: failed to update last_import_mtime: %v", err)
|
||||
}
|
||||
// Update export metadata (bd-ymj fix, bd-ar2.2 multi-repo support, bd-ar2.11 stable keys)
|
||||
if multiRepoPaths != nil {
|
||||
// Multi-repo mode: update metadata for each JSONL with stable repo key
|
||||
for _, path := range multiRepoPaths {
|
||||
repoKey := getRepoKeyForPath(path)
|
||||
updateExportMetadata(syncCtx, store, path, log, repoKey)
|
||||
}
|
||||
} else {
|
||||
log.log("Warning: failed to compute JSONL hash for metadata update: %v", err)
|
||||
// Single-repo mode: update metadata for main JSONL
|
||||
updateExportMetadata(syncCtx, store, jsonlPath, log, "")
|
||||
}
|
||||
|
||||
// Update database mtime to be >= JSONL mtime (fixes #278, #301, #321)
|
||||
|
||||
@@ -3,7 +3,6 @@ package main
|
||||
import (
|
||||
"context"
|
||||
"encoding/json"
|
||||
"fmt"
|
||||
"os"
|
||||
"path/filepath"
|
||||
"testing"
|
||||
@@ -334,25 +333,14 @@ func TestExportUpdatesMetadata(t *testing.T) {
|
||||
t.Fatalf("first export failed: %v", err)
|
||||
}
|
||||
|
||||
// Manually update metadata as daemon would (this is what we're testing)
|
||||
// Note: In production, createExportFunc and createSyncFunc do this
|
||||
currentHash, err := computeJSONLHash(jsonlPath)
|
||||
if err != nil {
|
||||
t.Fatalf("failed to compute JSONL hash: %v", err)
|
||||
}
|
||||
if err := store.SetMetadata(ctx, "last_import_hash", currentHash); err != nil {
|
||||
t.Fatalf("failed to set last_import_hash: %v", err)
|
||||
}
|
||||
exportTime := time.Now().Format(time.RFC3339)
|
||||
if err := store.SetMetadata(ctx, "last_import_time", exportTime); err != nil {
|
||||
t.Fatalf("failed to set last_import_time: %v", err)
|
||||
}
|
||||
if jsonlInfo, statErr := os.Stat(jsonlPath); statErr == nil {
|
||||
mtimeStr := jsonlInfo.ModTime().Unix()
|
||||
if err := store.SetMetadata(ctx, "last_import_mtime", fmt.Sprintf("%d", mtimeStr)); err != nil {
|
||||
t.Fatalf("failed to set last_import_mtime: %v", err)
|
||||
}
|
||||
// Update metadata using the actual daemon helper function (bd-ar2.3 fix)
|
||||
// This verifies that updateExportMetadata (used by createExportFunc and createSyncFunc) works correctly
|
||||
mockLogger := daemonLogger{
|
||||
logFunc: func(format string, args ...interface{}) {
|
||||
t.Logf(format, args...)
|
||||
},
|
||||
}
|
||||
updateExportMetadata(ctx, store, jsonlPath, mockLogger, "")
|
||||
|
||||
// Verify metadata was set
|
||||
lastHash, err := store.GetMetadata(ctx, "last_import_hash")
|
||||
@@ -381,3 +369,129 @@ func TestExportUpdatesMetadata(t *testing.T) {
|
||||
t.Fatalf("validatePreExport failed after metadata update: %v", err)
|
||||
}
|
||||
}
|
||||
|
||||
func TestUpdateExportMetadataMultiRepo(t *testing.T) {
|
||||
tmpDir := t.TempDir()
|
||||
dbPath := filepath.Join(tmpDir, ".beads", "beads.db")
|
||||
jsonlPath1 := filepath.Join(tmpDir, "repo1", ".beads", "issues.jsonl")
|
||||
jsonlPath2 := filepath.Join(tmpDir, "repo2", ".beads", "issues.jsonl")
|
||||
|
||||
// Create storage
|
||||
store, err := sqlite.New(context.Background(), dbPath)
|
||||
if err != nil {
|
||||
t.Fatalf("failed to create store: %v", err)
|
||||
}
|
||||
defer store.Close()
|
||||
|
||||
ctx := context.Background()
|
||||
|
||||
// Set issue_prefix
|
||||
if err := store.SetConfig(ctx, "issue_prefix", "test"); err != nil {
|
||||
t.Fatalf("failed to set issue_prefix: %v", err)
|
||||
}
|
||||
|
||||
// Create test issues for each repo
|
||||
issue1 := &types.Issue{
|
||||
ID: "test-1",
|
||||
Title: "Test Issue 1",
|
||||
Description: "Repo 1 issue",
|
||||
IssueType: types.TypeBug,
|
||||
Priority: 1,
|
||||
Status: types.StatusOpen,
|
||||
CreatedAt: time.Now(),
|
||||
UpdatedAt: time.Now(),
|
||||
SourceRepo: "repo1",
|
||||
}
|
||||
issue2 := &types.Issue{
|
||||
ID: "test-2",
|
||||
Title: "Test Issue 2",
|
||||
Description: "Repo 2 issue",
|
||||
IssueType: types.TypeBug,
|
||||
Priority: 1,
|
||||
Status: types.StatusOpen,
|
||||
CreatedAt: time.Now(),
|
||||
UpdatedAt: time.Now(),
|
||||
SourceRepo: "repo2",
|
||||
}
|
||||
|
||||
if err := store.CreateIssue(ctx, issue1, "test"); err != nil {
|
||||
t.Fatalf("failed to create issue1: %v", err)
|
||||
}
|
||||
if err := store.CreateIssue(ctx, issue2, "test"); err != nil {
|
||||
t.Fatalf("failed to create issue2: %v", err)
|
||||
}
|
||||
|
||||
// Create directories for JSONL files
|
||||
if err := os.MkdirAll(filepath.Dir(jsonlPath1), 0755); err != nil {
|
||||
t.Fatalf("failed to create dir for jsonlPath1: %v", err)
|
||||
}
|
||||
if err := os.MkdirAll(filepath.Dir(jsonlPath2), 0755); err != nil {
|
||||
t.Fatalf("failed to create dir for jsonlPath2: %v", err)
|
||||
}
|
||||
|
||||
// Export issues to JSONL files
|
||||
if err := exportToJSONLWithStore(ctx, store, jsonlPath1); err != nil {
|
||||
t.Fatalf("failed to export to jsonlPath1: %v", err)
|
||||
}
|
||||
if err := exportToJSONLWithStore(ctx, store, jsonlPath2); err != nil {
|
||||
t.Fatalf("failed to export to jsonlPath2: %v", err)
|
||||
}
|
||||
|
||||
// Create mock logger
|
||||
mockLogger := daemonLogger{
|
||||
logFunc: func(format string, args ...interface{}) {
|
||||
t.Logf(format, args...)
|
||||
},
|
||||
}
|
||||
|
||||
// Update metadata for each repo with different keys (bd-ar2.2 multi-repo support)
|
||||
updateExportMetadata(ctx, store, jsonlPath1, mockLogger, jsonlPath1)
|
||||
updateExportMetadata(ctx, store, jsonlPath2, mockLogger, jsonlPath2)
|
||||
|
||||
// Verify per-repo metadata was set with correct keys
|
||||
hash1Key := "last_import_hash:" + jsonlPath1
|
||||
hash1, err := store.GetMetadata(ctx, hash1Key)
|
||||
if err != nil {
|
||||
t.Fatalf("failed to get %s: %v", hash1Key, err)
|
||||
}
|
||||
if hash1 == "" {
|
||||
t.Errorf("expected %s to be set", hash1Key)
|
||||
}
|
||||
|
||||
hash2Key := "last_import_hash:" + jsonlPath2
|
||||
hash2, err := store.GetMetadata(ctx, hash2Key)
|
||||
if err != nil {
|
||||
t.Fatalf("failed to get %s: %v", hash2Key, err)
|
||||
}
|
||||
if hash2 == "" {
|
||||
t.Errorf("expected %s to be set", hash2Key)
|
||||
}
|
||||
|
||||
// Verify that single-repo metadata key is NOT set (we're using per-repo keys)
|
||||
globalHash, err := store.GetMetadata(ctx, "last_import_hash")
|
||||
if err != nil {
|
||||
t.Fatalf("failed to get last_import_hash: %v", err)
|
||||
}
|
||||
if globalHash != "" {
|
||||
t.Error("expected global last_import_hash to not be set when using per-repo keys")
|
||||
}
|
||||
|
||||
// Verify mtime metadata was also set per-repo
|
||||
mtime1Key := "last_import_mtime:" + jsonlPath1
|
||||
mtime1, err := store.GetMetadata(ctx, mtime1Key)
|
||||
if err != nil {
|
||||
t.Fatalf("failed to get %s: %v", mtime1Key, err)
|
||||
}
|
||||
if mtime1 == "" {
|
||||
t.Errorf("expected %s to be set", mtime1Key)
|
||||
}
|
||||
|
||||
mtime2Key := "last_import_mtime:" + jsonlPath2
|
||||
mtime2, err := store.GetMetadata(ctx, mtime2Key)
|
||||
if err != nil {
|
||||
t.Fatalf("failed to get %s: %v", mtime2Key, err)
|
||||
}
|
||||
if mtime2 == "" {
|
||||
t.Errorf("expected %s to be set", mtime2Key)
|
||||
}
|
||||
}
|
||||
|
||||
@@ -61,6 +61,14 @@ func TestExportUpdatesDatabaseMtime(t *testing.T) {
|
||||
t.Fatalf("Export failed: %v", err)
|
||||
}
|
||||
|
||||
// Update metadata after export (bd-ymj fix)
|
||||
mockLogger := daemonLogger{
|
||||
logFunc: func(format string, args ...interface{}) {
|
||||
t.Logf(format, args...)
|
||||
},
|
||||
}
|
||||
updateExportMetadata(ctx, store, jsonlPath, mockLogger, "")
|
||||
|
||||
// Get JSONL mtime
|
||||
jsonlInfo, err := os.Stat(jsonlPath)
|
||||
if err != nil {
|
||||
@@ -158,6 +166,14 @@ func TestDaemonExportScenario(t *testing.T) {
|
||||
t.Fatalf("Daemon export failed: %v", err)
|
||||
}
|
||||
|
||||
// Daemon updates metadata after export (bd-ymj fix)
|
||||
mockLogger := daemonLogger{
|
||||
logFunc: func(format string, args ...interface{}) {
|
||||
t.Logf(format, args...)
|
||||
},
|
||||
}
|
||||
updateExportMetadata(ctx, store, jsonlPath, mockLogger, "")
|
||||
|
||||
// THIS IS THE FIX: daemon now calls TouchDatabaseFile after export
|
||||
if err := TouchDatabaseFile(dbPath, jsonlPath); err != nil {
|
||||
t.Fatalf("TouchDatabaseFile failed: %v", err)
|
||||
@@ -229,6 +245,14 @@ func TestMultipleExportCycles(t *testing.T) {
|
||||
t.Fatalf("Cycle %d: Export failed: %v", i, err)
|
||||
}
|
||||
|
||||
// Update metadata after export (bd-ymj fix)
|
||||
mockLogger := daemonLogger{
|
||||
logFunc: func(format string, args ...interface{}) {
|
||||
t.Logf(format, args...)
|
||||
},
|
||||
}
|
||||
updateExportMetadata(ctx, store, jsonlPath, mockLogger, "")
|
||||
|
||||
// Apply fix
|
||||
if err := TouchDatabaseFile(dbPath, jsonlPath); err != nil {
|
||||
t.Fatalf("Cycle %d: TouchDatabaseFile failed: %v", i, err)
|
||||
|
||||
@@ -94,10 +94,20 @@ func computeJSONLHash(jsonlPath string) (string, error) {
|
||||
// Performance optimization: Checks mtime first as a fast-path. Only computes expensive
|
||||
// SHA256 hash if mtime changed. This makes 99% of checks instant (mtime unchanged = content
|
||||
// unchanged) while still catching git operations that restore old content with new mtimes.
|
||||
func hasJSONLChanged(ctx context.Context, store storage.Storage, jsonlPath string) bool {
|
||||
//
|
||||
// In multi-repo mode, keySuffix should be the stable repo identifier (e.g., ".", "../frontend").
|
||||
func hasJSONLChanged(ctx context.Context, store storage.Storage, jsonlPath string, keySuffix string) bool {
|
||||
// Build metadata keys with optional suffix for per-repo tracking (bd-ar2.10, bd-ar2.11)
|
||||
hashKey := "last_import_hash"
|
||||
mtimeKey := "last_import_mtime"
|
||||
if keySuffix != "" {
|
||||
hashKey += ":" + keySuffix
|
||||
mtimeKey += ":" + keySuffix
|
||||
}
|
||||
|
||||
// Fast-path: Check mtime first to avoid expensive hash computation
|
||||
// Get last known mtime from metadata
|
||||
lastMtimeStr, err := store.GetMetadata(ctx, "last_import_mtime")
|
||||
lastMtimeStr, err := store.GetMetadata(ctx, mtimeKey)
|
||||
if err == nil && lastMtimeStr != "" {
|
||||
// We have a previous mtime - check if file mtime changed
|
||||
jsonlInfo, statErr := os.Stat(jsonlPath)
|
||||
@@ -122,7 +132,7 @@ func hasJSONLChanged(ctx context.Context, store storage.Storage, jsonlPath strin
|
||||
}
|
||||
|
||||
// Get last import hash from metadata
|
||||
lastHash, err := store.GetMetadata(ctx, "last_import_hash")
|
||||
lastHash, err := store.GetMetadata(ctx, hashKey)
|
||||
if err != nil {
|
||||
// No previous import hash - this is the first run or metadata is missing
|
||||
// Assume changed to trigger import
|
||||
@@ -138,7 +148,9 @@ func hasJSONLChanged(ctx context.Context, store storage.Storage, jsonlPath strin
|
||||
func validatePreExport(ctx context.Context, store storage.Storage, jsonlPath string) error {
|
||||
// Check if JSONL content has changed since last import - if so, must import first
|
||||
// Uses content-based detection (bd-xwo fix) instead of mtime-based to avoid false positives from git operations
|
||||
if hasJSONLChanged(ctx, store, jsonlPath) {
|
||||
// Use getRepoKeyForPath to get stable repo identifier for multi-repo support (bd-ar2.10, bd-ar2.11)
|
||||
repoKey := getRepoKeyForPath(jsonlPath)
|
||||
if hasJSONLChanged(ctx, store, jsonlPath, repoKey) {
|
||||
return fmt.Errorf("refusing to export: JSONL content has changed since last import (import first to avoid data loss)")
|
||||
}
|
||||
|
||||
|
||||
@@ -287,7 +287,7 @@ func TestHasJSONLChanged(t *testing.T) {
|
||||
}
|
||||
|
||||
// Should return false (no change)
|
||||
if hasJSONLChanged(ctx, store, jsonlPath) {
|
||||
if hasJSONLChanged(ctx, store, jsonlPath, "") {
|
||||
t.Error("Expected hasJSONLChanged to return false for matching hash")
|
||||
}
|
||||
})
|
||||
@@ -324,7 +324,7 @@ func TestHasJSONLChanged(t *testing.T) {
|
||||
}
|
||||
|
||||
// Should return true (content changed)
|
||||
if !hasJSONLChanged(ctx, store, jsonlPath) {
|
||||
if !hasJSONLChanged(ctx, store, jsonlPath, "") {
|
||||
t.Error("Expected hasJSONLChanged to return true for different hash")
|
||||
}
|
||||
})
|
||||
@@ -343,7 +343,7 @@ func TestHasJSONLChanged(t *testing.T) {
|
||||
}
|
||||
|
||||
// Should return true (no previous hash, first run)
|
||||
if !hasJSONLChanged(ctx, store, jsonlPath) {
|
||||
if !hasJSONLChanged(ctx, store, jsonlPath, "") {
|
||||
t.Error("Expected hasJSONLChanged to return true for empty file with no metadata")
|
||||
}
|
||||
})
|
||||
@@ -364,7 +364,7 @@ func TestHasJSONLChanged(t *testing.T) {
|
||||
}
|
||||
|
||||
// No metadata stored - should return true (assume changed)
|
||||
if !hasJSONLChanged(ctx, store, jsonlPath) {
|
||||
if !hasJSONLChanged(ctx, store, jsonlPath, "") {
|
||||
t.Error("Expected hasJSONLChanged to return true when no metadata exists")
|
||||
}
|
||||
})
|
||||
@@ -378,7 +378,7 @@ func TestHasJSONLChanged(t *testing.T) {
|
||||
store := newTestStore(t, dbPath)
|
||||
|
||||
// File doesn't exist - should return false (don't auto-import broken files)
|
||||
if hasJSONLChanged(ctx, store, jsonlPath) {
|
||||
if hasJSONLChanged(ctx, store, jsonlPath, "") {
|
||||
t.Error("Expected hasJSONLChanged to return false for nonexistent file")
|
||||
}
|
||||
})
|
||||
@@ -419,7 +419,7 @@ func TestHasJSONLChanged(t *testing.T) {
|
||||
}
|
||||
|
||||
// Should return false using fast-path (mtime unchanged)
|
||||
if hasJSONLChanged(ctx, store, jsonlPath) {
|
||||
if hasJSONLChanged(ctx, store, jsonlPath, "") {
|
||||
t.Error("Expected hasJSONLChanged to return false using mtime fast-path")
|
||||
}
|
||||
})
|
||||
@@ -467,7 +467,7 @@ func TestHasJSONLChanged(t *testing.T) {
|
||||
}
|
||||
|
||||
// Should return false (content hasn't changed despite new mtime)
|
||||
if hasJSONLChanged(ctx, store, jsonlPath) {
|
||||
if hasJSONLChanged(ctx, store, jsonlPath, "") {
|
||||
t.Error("Expected hasJSONLChanged to return false for git operation with same content")
|
||||
}
|
||||
})
|
||||
|
||||
@@ -129,7 +129,9 @@ Use --merge to merge the sync branch back to main branch.`,
|
||||
// Smart conflict resolution: if JSONL content changed, auto-import first
|
||||
// Use content-based check (not mtime) to avoid git resurrection bug (bd-khnb)
|
||||
if err := ensureStoreActive(); err == nil && store != nil {
|
||||
if hasJSONLChanged(ctx, store, jsonlPath) {
|
||||
// Use getRepoKeyForPath for multi-repo support (bd-ar2.10, bd-ar2.11)
|
||||
repoKey := getRepoKeyForPath(jsonlPath)
|
||||
if hasJSONLChanged(ctx, store, jsonlPath, repoKey) {
|
||||
fmt.Println("→ JSONL content changed, importing first...")
|
||||
if err := importFromJSONL(ctx, jsonlPath, renameOnImport); err != nil {
|
||||
fmt.Fprintf(os.Stderr, "Error auto-importing: %v\n", err)
|
||||
|
||||
Reference in New Issue
Block a user