After daemon auto-export, JSONL mtime could be newer than database mtime due to SQLite WAL mode not updating beads.db until checkpoint. This caused validatePreExport to incorrectly block subsequent exports with "JSONL is newer than database" error, leading to daemon shutdown. Solution: Call TouchDatabaseFile after all export operations to ensure database mtime >= JSONL mtime. This prevents false positives in validation
427 lines
13 KiB
Go
427 lines
13 KiB
Go
package main
|
|
|
|
import (
|
|
"context"
|
|
"crypto/sha256"
|
|
"encoding/hex"
|
|
"encoding/json"
|
|
"fmt"
|
|
"os"
|
|
"path/filepath"
|
|
"sort"
|
|
"strings"
|
|
|
|
"github.com/spf13/cobra"
|
|
"github.com/steveyegge/beads/internal/debug"
|
|
"github.com/steveyegge/beads/internal/storage/sqlite"
|
|
"github.com/steveyegge/beads/internal/types"
|
|
)
|
|
|
|
// countIssuesInJSONL counts the number of issues in a JSONL file
|
|
func countIssuesInJSONL(path string) (int, error) {
|
|
// #nosec G304 - controlled path from config
|
|
file, err := os.Open(path)
|
|
if err != nil {
|
|
return 0, err
|
|
}
|
|
defer func() {
|
|
if err := file.Close(); err != nil {
|
|
fmt.Fprintf(os.Stderr, "Warning: failed to close file: %v\n", err)
|
|
}
|
|
}()
|
|
|
|
count := 0
|
|
decoder := json.NewDecoder(file)
|
|
for {
|
|
var issue types.Issue
|
|
if err := decoder.Decode(&issue); err != nil {
|
|
if err.Error() == "EOF" {
|
|
break
|
|
}
|
|
// Return error for corrupt/invalid JSON
|
|
return count, fmt.Errorf("invalid JSON at issue %d: %w", count+1, err)
|
|
}
|
|
count++
|
|
}
|
|
return count, nil
|
|
}
|
|
|
|
// getIssueIDsFromJSONL reads a JSONL file and returns a set of issue IDs
|
|
func getIssueIDsFromJSONL(path string) (map[string]bool, error) {
|
|
// #nosec G304 - controlled path from config
|
|
file, err := os.Open(path)
|
|
if err != nil {
|
|
return nil, err
|
|
}
|
|
defer func() {
|
|
if err := file.Close(); err != nil {
|
|
fmt.Fprintf(os.Stderr, "Warning: failed to close file: %v\n", err)
|
|
}
|
|
}()
|
|
|
|
ids := make(map[string]bool)
|
|
decoder := json.NewDecoder(file)
|
|
lineNum := 0
|
|
for {
|
|
var issue types.Issue
|
|
if err := decoder.Decode(&issue); err != nil {
|
|
if err.Error() == "EOF" {
|
|
break
|
|
}
|
|
// Return error for corrupt/invalid JSON
|
|
return ids, fmt.Errorf("invalid JSON at line %d: %w", lineNum+1, err)
|
|
}
|
|
ids[issue.ID] = true
|
|
lineNum++
|
|
}
|
|
return ids, nil
|
|
}
|
|
|
|
// validateExportPath checks if the output path is safe to write to
|
|
func validateExportPath(path string) error {
|
|
// Get absolute path to normalize it
|
|
absPath, err := filepath.Abs(path)
|
|
if err != nil {
|
|
return fmt.Errorf("invalid path: %v", err)
|
|
}
|
|
|
|
// Convert to lowercase for case-insensitive comparison on Windows
|
|
absPathLower := strings.ToLower(absPath)
|
|
|
|
// List of sensitive system directories to avoid
|
|
sensitiveDirs := []string{
|
|
"c:\\windows",
|
|
"c:\\program files",
|
|
"c:\\program files (x86)",
|
|
"c:\\programdata",
|
|
"c:\\system volume information",
|
|
"c:\\$recycle.bin",
|
|
"c:\\boot",
|
|
"c:\\recovery",
|
|
}
|
|
|
|
for _, dir := range sensitiveDirs {
|
|
if strings.HasPrefix(absPathLower, strings.ToLower(dir)) {
|
|
return fmt.Errorf("cannot write to sensitive system directory: %s", dir)
|
|
}
|
|
}
|
|
|
|
return nil
|
|
}
|
|
|
|
var exportCmd = &cobra.Command{
|
|
Use: "export",
|
|
Short: "Export issues to JSONL format",
|
|
Long: `Export all issues to JSON Lines format (one JSON object per line).
|
|
Issues are sorted by ID for consistent diffs.
|
|
|
|
Output to stdout by default, or use -o flag for file output.`,
|
|
Run: func(cmd *cobra.Command, args []string) {
|
|
format, _ := cmd.Flags().GetString("format")
|
|
output, _ := cmd.Flags().GetString("output")
|
|
statusFilter, _ := cmd.Flags().GetString("status")
|
|
force, _ := cmd.Flags().GetBool("force")
|
|
|
|
debug.Logf("Debug: export flags - output=%q, force=%v\n", output, force)
|
|
|
|
if format != "jsonl" {
|
|
fmt.Fprintf(os.Stderr, "Error: only 'jsonl' format is currently supported\n")
|
|
os.Exit(1)
|
|
}
|
|
|
|
// Export command requires direct database access for consistent snapshot
|
|
// If daemon is connected, close it and open direct connection
|
|
if daemonClient != nil {
|
|
debug.Logf("Debug: export command forcing direct mode (closes daemon connection)\n")
|
|
_ = daemonClient.Close()
|
|
daemonClient = nil
|
|
}
|
|
|
|
// Note: We used to check database file timestamps here, but WAL files
|
|
// get created when opening the DB, making timestamp checks unreliable.
|
|
// Instead, we check issue counts after loading (see below).
|
|
|
|
// Ensure we have a direct store connection
|
|
if store == nil {
|
|
var err error
|
|
if dbPath == "" {
|
|
fmt.Fprintf(os.Stderr, "Error: no database path found\n")
|
|
os.Exit(1)
|
|
}
|
|
store, err = sqlite.New(dbPath)
|
|
if err != nil {
|
|
fmt.Fprintf(os.Stderr, "Error: failed to open database: %v\n", err)
|
|
os.Exit(1)
|
|
}
|
|
defer func() { _ = store.Close() }()
|
|
}
|
|
|
|
// Build filter
|
|
filter := types.IssueFilter{}
|
|
if statusFilter != "" {
|
|
status := types.Status(statusFilter)
|
|
filter.Status = &status
|
|
}
|
|
|
|
// Get all issues
|
|
ctx := context.Background()
|
|
issues, err := store.SearchIssues(ctx, "", filter)
|
|
if err != nil {
|
|
fmt.Fprintf(os.Stderr, "Error: %v\n", err)
|
|
os.Exit(1)
|
|
}
|
|
|
|
// Safety check: prevent exporting empty database over non-empty JSONL
|
|
if len(issues) == 0 && output != "" && !force {
|
|
existingCount, err := countIssuesInJSONL(output)
|
|
if err != nil {
|
|
// If we can't read the file, it might not exist yet, which is fine
|
|
if !os.IsNotExist(err) {
|
|
fmt.Fprintf(os.Stderr, "Warning: failed to read existing JSONL: %v\n", err)
|
|
}
|
|
} else if existingCount > 0 {
|
|
fmt.Fprintf(os.Stderr, "Error: refusing to export empty database over non-empty JSONL file\n")
|
|
fmt.Fprintf(os.Stderr, " Database has 0 issues, JSONL has %d issues\n", existingCount)
|
|
fmt.Fprintf(os.Stderr, " This would result in data loss!\n")
|
|
fmt.Fprintf(os.Stderr, "Hint: Use --force to override this safety check, or delete the JSONL file first:\n")
|
|
fmt.Fprintf(os.Stderr, " bd export -o %s --force\n", output)
|
|
fmt.Fprintf(os.Stderr, " rm %s\n", output)
|
|
os.Exit(1)
|
|
}
|
|
}
|
|
|
|
// Safety check: prevent exporting stale database that would lose issues
|
|
if output != "" && !force {
|
|
debug.Logf("Debug: checking staleness - output=%s, force=%v\n", output, force)
|
|
|
|
// Read existing JSONL to get issue IDs
|
|
jsonlIDs, err := getIssueIDsFromJSONL(output)
|
|
if err != nil && !os.IsNotExist(err) {
|
|
fmt.Fprintf(os.Stderr, "Warning: failed to read existing JSONL for staleness check: %v\n", err)
|
|
}
|
|
|
|
if err == nil && len(jsonlIDs) > 0 {
|
|
// Build set of DB issue IDs
|
|
dbIDs := make(map[string]bool)
|
|
for _, issue := range issues {
|
|
dbIDs[issue.ID] = true
|
|
}
|
|
|
|
// Check if JSONL has any issues that DB doesn't have
|
|
var missingIDs []string
|
|
for id := range jsonlIDs {
|
|
if !dbIDs[id] {
|
|
missingIDs = append(missingIDs, id)
|
|
}
|
|
}
|
|
|
|
debug.Logf("Debug: JSONL has %d issues, DB has %d issues, missing %d\n",
|
|
len(jsonlIDs), len(issues), len(missingIDs))
|
|
|
|
if len(missingIDs) > 0 {
|
|
sort.Strings(missingIDs)
|
|
fmt.Fprintf(os.Stderr, "Error: refusing to export stale database that would lose issues\n")
|
|
fmt.Fprintf(os.Stderr, " Database has %d issues\n", len(issues))
|
|
fmt.Fprintf(os.Stderr, " JSONL has %d issues\n", len(jsonlIDs))
|
|
fmt.Fprintf(os.Stderr, " Export would lose %d issue(s):\n", len(missingIDs))
|
|
|
|
// Show first 10 missing issues
|
|
showCount := len(missingIDs)
|
|
if showCount > 10 {
|
|
showCount = 10
|
|
}
|
|
for i := 0; i < showCount; i++ {
|
|
fmt.Fprintf(os.Stderr, " - %s\n", missingIDs[i])
|
|
}
|
|
if len(missingIDs) > 10 {
|
|
fmt.Fprintf(os.Stderr, " ... and %d more\n", len(missingIDs)-10)
|
|
}
|
|
|
|
fmt.Fprintf(os.Stderr, "\n")
|
|
fmt.Fprintf(os.Stderr, "This usually means:\n")
|
|
fmt.Fprintf(os.Stderr, " 1. You need to run 'bd import -i %s' to sync the latest changes\n", output)
|
|
fmt.Fprintf(os.Stderr, " 2. Or another workspace added issues that weren't synced to this database\n")
|
|
fmt.Fprintf(os.Stderr, "\n")
|
|
fmt.Fprintf(os.Stderr, "To force export anyway (will lose these issues):\n")
|
|
fmt.Fprintf(os.Stderr, " bd export -o %s --force\n", output)
|
|
os.Exit(1)
|
|
}
|
|
}
|
|
}
|
|
|
|
// Sort by ID for consistent output
|
|
sort.Slice(issues, func(i, j int) bool {
|
|
return issues[i].ID < issues[j].ID
|
|
})
|
|
|
|
// Populate dependencies for all issues in one query (avoids N+1 problem)
|
|
allDeps, err := store.GetAllDependencyRecords(ctx)
|
|
if err != nil {
|
|
fmt.Fprintf(os.Stderr, "Error getting dependencies: %v\n", err)
|
|
os.Exit(1)
|
|
}
|
|
for _, issue := range issues {
|
|
issue.Dependencies = allDeps[issue.ID]
|
|
}
|
|
|
|
// Populate labels for all issues
|
|
for _, issue := range issues {
|
|
labels, err := store.GetLabels(ctx, issue.ID)
|
|
if err != nil {
|
|
fmt.Fprintf(os.Stderr, "Error getting labels for %s: %v\n", issue.ID, err)
|
|
os.Exit(1)
|
|
}
|
|
issue.Labels = labels
|
|
}
|
|
|
|
// Open output
|
|
out := os.Stdout
|
|
var tempFile *os.File
|
|
var tempPath string
|
|
var finalPath string
|
|
if output != "" {
|
|
// Validate output path before creating files
|
|
if err := validateExportPath(output); err != nil {
|
|
fmt.Fprintf(os.Stderr, "Error: %v\n", err)
|
|
os.Exit(1)
|
|
}
|
|
|
|
// Create temporary file in same directory for atomic rename
|
|
dir := filepath.Dir(output)
|
|
base := filepath.Base(output)
|
|
var err error
|
|
tempFile, err = os.CreateTemp(dir, base+".tmp.*")
|
|
if err != nil {
|
|
fmt.Fprintf(os.Stderr, "Error creating temporary file: %v\n", err)
|
|
os.Exit(1)
|
|
}
|
|
tempPath = tempFile.Name()
|
|
finalPath = output
|
|
|
|
// Ensure cleanup on failure
|
|
defer func() {
|
|
if tempFile != nil {
|
|
_ = tempFile.Close()
|
|
_ = os.Remove(tempPath) // Clean up temp file if we haven't renamed it
|
|
}
|
|
}()
|
|
|
|
out = tempFile
|
|
}
|
|
|
|
// Write JSONL (timestamp-only deduplication DISABLED due to bd-160)
|
|
encoder := json.NewEncoder(out)
|
|
exportedIDs := make([]string, 0, len(issues))
|
|
skippedCount := 0
|
|
for _, issue := range issues {
|
|
if err := encoder.Encode(issue); err != nil {
|
|
fmt.Fprintf(os.Stderr, "Error encoding issue %s: %v\n", issue.ID, err)
|
|
os.Exit(1)
|
|
}
|
|
|
|
exportedIDs = append(exportedIDs, issue.ID)
|
|
}
|
|
|
|
// Report skipped issues if any (helps debugging bd-159)
|
|
if skippedCount > 0 && (output == "" || output == findJSONLPath()) {
|
|
fmt.Fprintf(os.Stderr, "Skipped %d issue(s) with timestamp-only changes\n", skippedCount)
|
|
}
|
|
|
|
// Only clear dirty issues and auto-flush state if exporting to the default JSONL path
|
|
// This prevents clearing dirty flags when exporting to custom paths (e.g., bd export -o backup.jsonl)
|
|
if output == "" || output == findJSONLPath() {
|
|
// Clear only the issues that were actually exported (fixes bd-52 race condition)
|
|
if err := store.ClearDirtyIssuesByID(ctx, exportedIDs); err != nil {
|
|
fmt.Fprintf(os.Stderr, "Warning: failed to clear dirty issues: %v\n", err)
|
|
}
|
|
|
|
// Clear auto-flush state since we just manually exported
|
|
// This cancels any pending auto-flush timer and marks DB as clean
|
|
clearAutoFlushState()
|
|
|
|
// Store JSONL file hash for integrity validation (bd-160)
|
|
// nolint:gosec // G304: finalPath is validated JSONL export path
|
|
jsonlData, err := os.ReadFile(finalPath)
|
|
if err == nil {
|
|
hasher := sha256.New()
|
|
hasher.Write(jsonlData)
|
|
fileHash := hex.EncodeToString(hasher.Sum(nil))
|
|
if err := store.SetJSONLFileHash(ctx, fileHash); err != nil {
|
|
fmt.Fprintf(os.Stderr, "Warning: failed to update jsonl_file_hash: %v\n", err)
|
|
}
|
|
}
|
|
}
|
|
|
|
// If writing to file, atomically replace the target file
|
|
if tempFile != nil {
|
|
// Close the temp file before renaming
|
|
if err := tempFile.Close(); err != nil {
|
|
fmt.Fprintf(os.Stderr, "Warning: failed to close temporary file: %v\n", err)
|
|
}
|
|
tempFile = nil // Prevent cleanup
|
|
|
|
// Atomically replace the target file
|
|
if err := os.Rename(tempPath, finalPath); err != nil {
|
|
_ = os.Remove(tempPath) // Clean up on failure
|
|
fmt.Fprintf(os.Stderr, "Error replacing output file: %v\n", err)
|
|
os.Exit(1)
|
|
}
|
|
|
|
// Set appropriate file permissions (0600: rw-------)
|
|
if err := os.Chmod(finalPath, 0600); err != nil {
|
|
fmt.Fprintf(os.Stderr, "Warning: failed to set file permissions: %v\n", err)
|
|
}
|
|
|
|
// Verify JSONL file integrity after export
|
|
actualCount, err := countIssuesInJSONL(finalPath)
|
|
if err != nil {
|
|
fmt.Fprintf(os.Stderr, "Error: Export verification failed: %v\n", err)
|
|
os.Exit(1)
|
|
}
|
|
if actualCount != len(exportedIDs) {
|
|
fmt.Fprintf(os.Stderr, "Error: Export verification failed\n")
|
|
fmt.Fprintf(os.Stderr, " Expected: %d issues\n", len(exportedIDs))
|
|
fmt.Fprintf(os.Stderr, " JSONL file: %d lines\n", actualCount)
|
|
fmt.Fprintf(os.Stderr, " Mismatch indicates export failed to write all issues\n")
|
|
os.Exit(1)
|
|
}
|
|
|
|
// Update database mtime to be >= JSONL mtime (fixes #278, #301, #321)
|
|
// Only do this when exporting to default JSONL path (not arbitrary outputs)
|
|
// This prevents validatePreExport from incorrectly blocking on next export
|
|
if output == "" || output == findJSONLPath() {
|
|
beadsDir := filepath.Dir(finalPath)
|
|
dbPath := filepath.Join(beadsDir, "beads.db")
|
|
if err := TouchDatabaseFile(dbPath, finalPath); err != nil {
|
|
// Log warning but don't fail export
|
|
fmt.Fprintf(os.Stderr, "Warning: failed to update database mtime: %v\n", err)
|
|
}
|
|
}
|
|
}
|
|
|
|
// Output statistics if JSON format requested
|
|
if jsonOutput {
|
|
stats := map[string]interface{}{
|
|
"success": true,
|
|
"exported": len(exportedIDs),
|
|
"skipped": skippedCount,
|
|
"total_issues": len(issues),
|
|
}
|
|
if output != "" {
|
|
stats["output_file"] = output
|
|
}
|
|
data, _ := json.MarshalIndent(stats, "", " ")
|
|
fmt.Fprintln(os.Stderr, string(data))
|
|
}
|
|
},
|
|
}
|
|
|
|
func init() {
|
|
exportCmd.Flags().StringP("format", "f", "jsonl", "Export format (jsonl)")
|
|
exportCmd.Flags().StringP("output", "o", "", "Output file (default: stdout)")
|
|
exportCmd.Flags().StringP("status", "s", "", "Filter by status")
|
|
exportCmd.Flags().Bool("force", false, "Force export even if database is empty")
|
|
exportCmd.Flags().BoolVar(&jsonOutput, "json", false, "Output export statistics in JSON format")
|
|
rootCmd.AddCommand(exportCmd)
|
|
}
|