fix(export): populate export_hashes after successful export (GH#1278) (#1286)
Child issues created with --parent were missing from export_hashes table, which affects integrity tracking and future incremental export features. This fix ensures SetExportHash() is called for all exported issues: - Updated ExportResult to include IssueContentHashes map - Updated finalizeExport() to call SetExportHash() for each exported issue - Updated exportToJSONLDeferred() to collect content hashes during export - Updated performIncrementalExport() to collect content hashes for dirty issues - Updated exportToJSONLWithStore() to call SetExportHash() after export - Updated daemon's handleExport() to call SetExportHash() after export Added test TestExportPopulatesExportHashes to verify the fix works for both regular and hierarchical (child) issue IDs. Co-authored-by: Claude Opus 4.5 <noreply@anthropic.com>
This commit is contained in:
@@ -138,6 +138,17 @@ func exportToJSONLWithStore(ctx context.Context, store storage.Storage, jsonlPat
|
||||
return writeErr
|
||||
}
|
||||
|
||||
// Update export_hashes for all exported issues (GH#1278)
|
||||
// This ensures child issues created with --parent are properly registered
|
||||
for _, issue := range issues {
|
||||
if issue.ContentHash != "" {
|
||||
if err := store.SetExportHash(ctx, issue.ID, issue.ContentHash); err != nil {
|
||||
// Non-fatal warning - continue with other issues
|
||||
fmt.Fprintf(os.Stderr, "Warning: failed to set export hash for %s: %v\n", issue.ID, err)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
|
||||
@@ -265,3 +265,96 @@ func TestImportClearsExportHashes(t *testing.T) {
|
||||
t.Fatalf("expected export hash to be cleared after import, got %q", hash)
|
||||
}
|
||||
}
|
||||
|
||||
// TestExportPopulatesExportHashes tests that export populates export_hashes (GH#1278)
|
||||
// This ensures child issues created with --parent are properly registered after export.
|
||||
func TestExportPopulatesExportHashes(t *testing.T) {
|
||||
// Create temp directory
|
||||
tmpDir := t.TempDir()
|
||||
dbPath := filepath.Join(tmpDir, ".beads", "beads.db")
|
||||
jsonlPath := filepath.Join(tmpDir, ".beads", "issues.jsonl")
|
||||
|
||||
// Ensure .beads directory exists
|
||||
if err := os.MkdirAll(filepath.Dir(dbPath), 0755); err != nil {
|
||||
t.Fatalf("failed to create .beads directory: %v", err)
|
||||
}
|
||||
|
||||
// Create database
|
||||
testStore, err := sqlite.New(context.Background(), dbPath)
|
||||
if err != nil {
|
||||
t.Fatalf("failed to create database: %v", err)
|
||||
}
|
||||
defer testStore.Close()
|
||||
|
||||
ctx := context.Background()
|
||||
|
||||
// Initialize database with prefix
|
||||
if err := testStore.SetConfig(ctx, "issue_prefix", "bd"); err != nil {
|
||||
t.Fatalf("failed to set issue prefix: %v", err)
|
||||
}
|
||||
|
||||
// Create test issues including one with a hierarchical ID (child issue)
|
||||
parentIssue := &types.Issue{
|
||||
ID: "bd-parent",
|
||||
Title: "Parent epic",
|
||||
Description: "Parent issue",
|
||||
Status: types.StatusOpen,
|
||||
Priority: 1,
|
||||
IssueType: types.TypeEpic,
|
||||
}
|
||||
if err := testStore.CreateIssue(ctx, parentIssue, testActor); err != nil {
|
||||
t.Fatalf("failed to create parent issue: %v", err)
|
||||
}
|
||||
|
||||
childIssue := &types.Issue{
|
||||
ID: "bd-parent.1",
|
||||
Title: "Child task",
|
||||
Description: "Child issue with hierarchical ID",
|
||||
Status: types.StatusOpen,
|
||||
Priority: 2,
|
||||
IssueType: types.TypeTask,
|
||||
}
|
||||
if err := testStore.CreateIssue(ctx, childIssue, testActor); err != nil {
|
||||
t.Fatalf("failed to create child issue: %v", err)
|
||||
}
|
||||
|
||||
// Verify export_hashes is empty before export
|
||||
hash, err := testStore.GetExportHash(ctx, "bd-parent.1")
|
||||
if err != nil {
|
||||
t.Fatalf("failed to get export hash before export: %v", err)
|
||||
}
|
||||
if hash != "" {
|
||||
t.Fatalf("expected no export hash before export, got %q", hash)
|
||||
}
|
||||
|
||||
// Export to JSONL using the store-based function
|
||||
if err := exportToJSONLWithStore(ctx, testStore, jsonlPath); err != nil {
|
||||
t.Fatalf("export failed: %v", err)
|
||||
}
|
||||
|
||||
// Verify export_hashes is now populated for both issues
|
||||
parentHash, err := testStore.GetExportHash(ctx, "bd-parent")
|
||||
if err != nil {
|
||||
t.Fatalf("failed to get parent export hash after export: %v", err)
|
||||
}
|
||||
if parentHash == "" {
|
||||
t.Errorf("expected parent export hash to be populated after export")
|
||||
}
|
||||
|
||||
childHash, err := testStore.GetExportHash(ctx, "bd-parent.1")
|
||||
if err != nil {
|
||||
t.Fatalf("failed to get child export hash after export: %v", err)
|
||||
}
|
||||
if childHash == "" {
|
||||
t.Errorf("expected child export hash to be populated after export (GH#1278 fix)")
|
||||
}
|
||||
|
||||
// Verify the hashes match the content hashes
|
||||
if parentHash != parentIssue.ContentHash && parentHash != "" {
|
||||
// ContentHash might be computed differently, just verify it's not empty
|
||||
t.Logf("parent export hash: %s, content hash: %s", parentHash, parentIssue.ContentHash)
|
||||
}
|
||||
if childHash != childIssue.ContentHash && childHash != "" {
|
||||
t.Logf("child export hash: %s, content hash: %s", childHash, childIssue.ContentHash)
|
||||
}
|
||||
}
|
||||
|
||||
@@ -43,6 +43,10 @@ type ExportResult struct {
|
||||
|
||||
// ExportTime is when the export was performed (RFC3339Nano format)
|
||||
ExportTime string
|
||||
|
||||
// IssueContentHashes maps issue IDs to their content hashes (GH#1278)
|
||||
// Used to populate export_hashes table after successful export
|
||||
IssueContentHashes map[string]string
|
||||
}
|
||||
|
||||
// finalizeExport updates SQLite metadata after a successful git commit.
|
||||
@@ -69,6 +73,18 @@ func finalizeExport(ctx context.Context, result *ExportResult) {
|
||||
}
|
||||
}
|
||||
|
||||
// Update export_hashes for all exported issues (GH#1278)
|
||||
// This ensures child issues created with --parent are properly registered
|
||||
// for integrity tracking and incremental export detection.
|
||||
if len(result.IssueContentHashes) > 0 {
|
||||
for issueID, contentHash := range result.IssueContentHashes {
|
||||
if err := store.SetExportHash(ctx, issueID, contentHash); err != nil {
|
||||
// Non-fatal warning - continue with other issues
|
||||
fmt.Fprintf(os.Stderr, "Warning: failed to set export hash for %s: %v\n", issueID, err)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// Clear auto-flush state
|
||||
clearAutoFlushState()
|
||||
|
||||
@@ -233,14 +249,19 @@ func exportToJSONLDeferred(ctx context.Context, jsonlPath string) (*ExportResult
|
||||
_ = os.Remove(tempPath)
|
||||
}()
|
||||
|
||||
// Write JSONL
|
||||
// Write JSONL and collect content hashes (GH#1278)
|
||||
encoder := json.NewEncoder(tempFile)
|
||||
exportedIDs := make([]string, 0, len(issues))
|
||||
issueContentHashes := make(map[string]string, len(issues))
|
||||
for _, issue := range issues {
|
||||
if err := encoder.Encode(issue); err != nil {
|
||||
return nil, fmt.Errorf("failed to encode issue %s: %w", issue.ID, err)
|
||||
}
|
||||
exportedIDs = append(exportedIDs, issue.ID)
|
||||
// Collect content hash for export_hashes table
|
||||
if issue.ContentHash != "" {
|
||||
issueContentHashes[issue.ID] = issue.ContentHash
|
||||
}
|
||||
}
|
||||
|
||||
// Close temp file before rename (error checked implicitly by Rename success)
|
||||
@@ -262,10 +283,11 @@ func exportToJSONLDeferred(ctx context.Context, jsonlPath string) (*ExportResult
|
||||
exportTime := time.Now().Format(time.RFC3339Nano)
|
||||
|
||||
return &ExportResult{
|
||||
JSONLPath: jsonlPath,
|
||||
ExportedIDs: exportedIDs,
|
||||
ContentHash: contentHash,
|
||||
ExportTime: exportTime,
|
||||
JSONLPath: jsonlPath,
|
||||
ExportedIDs: exportedIDs,
|
||||
ContentHash: contentHash,
|
||||
ExportTime: exportTime,
|
||||
IssueContentHashes: issueContentHashes,
|
||||
}, nil
|
||||
}
|
||||
|
||||
@@ -366,8 +388,10 @@ func performIncrementalExport(ctx context.Context, jsonlPath string, dirtyIDs []
|
||||
}
|
||||
|
||||
// Query dirty issues from database and track which IDs were found
|
||||
// Also collect content hashes for export_hashes table (GH#1278)
|
||||
dirtyIssues := make([]*types.Issue, 0, len(dirtyIDs))
|
||||
issueByID := make(map[string]*types.Issue, len(dirtyIDs))
|
||||
issueContentHashes := make(map[string]string, len(dirtyIDs))
|
||||
for _, id := range dirtyIDs {
|
||||
issue, err := store.GetIssue(ctx, id)
|
||||
if err != nil {
|
||||
@@ -376,6 +400,9 @@ func performIncrementalExport(ctx context.Context, jsonlPath string, dirtyIDs []
|
||||
issueByID[id] = issue // Store result (may be nil for deleted issues)
|
||||
if issue != nil {
|
||||
dirtyIssues = append(dirtyIssues, issue)
|
||||
if issue.ContentHash != "" {
|
||||
issueContentHashes[issue.ID] = issue.ContentHash
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
@@ -502,10 +529,11 @@ func performIncrementalExport(ctx context.Context, jsonlPath string, dirtyIDs []
|
||||
// Note: exportedIDs contains ALL IDs in the file, but we only need to clear
|
||||
// dirty flags for the dirtyIDs (which we received as parameter)
|
||||
return &ExportResult{
|
||||
JSONLPath: jsonlPath,
|
||||
ExportedIDs: dirtyIDs, // Only clear dirty flags for actually dirty issues
|
||||
ContentHash: contentHash,
|
||||
ExportTime: exportTime,
|
||||
JSONLPath: jsonlPath,
|
||||
ExportedIDs: dirtyIDs, // Only clear dirty flags for actually dirty issues
|
||||
ContentHash: contentHash,
|
||||
ExportTime: exportTime,
|
||||
IssueContentHashes: issueContentHashes,
|
||||
}, nil
|
||||
}
|
||||
|
||||
|
||||
@@ -205,6 +205,17 @@ func (s *Server) handleExport(req *Request) Response {
|
||||
fmt.Fprintf(os.Stderr, "Warning: failed to clear dirty flags: %v\n", err)
|
||||
}
|
||||
|
||||
// Update export_hashes for all exported issues (GH#1278)
|
||||
// This ensures child issues created with --parent are properly registered
|
||||
for _, issue := range issues {
|
||||
if issue.ContentHash != "" {
|
||||
if err := store.SetExportHash(ctx, issue.ID, issue.ContentHash); err != nil {
|
||||
// Non-fatal, just log
|
||||
fmt.Fprintf(os.Stderr, "Warning: failed to set export hash for %s: %v\n", issue.ID, err)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// Write manifest if configured
|
||||
if manifest != nil {
|
||||
manifest.ExportedCount = len(exportedIDs)
|
||||
|
||||
Reference in New Issue
Block a user