Implement Tier 1 compaction logic (bd-257)
- Add Compactor with CompactTier1 and CompactTier1Batch methods - Single issue and batch compaction with 5 concurrent workers - Dry-run mode for testing without API calls - Smart size checking: keeps original if summary is longer - Improved Haiku prompts to emphasize compression - Add ApplyCompaction method for setting compaction metadata - Comprehensive tests including API integration tests - All tests passing
This commit is contained in:
283
internal/compact/compactor.go
Normal file
283
internal/compact/compactor.go
Normal file
@@ -0,0 +1,283 @@
|
||||
package compact
|
||||
|
||||
import (
|
||||
"context"
|
||||
"fmt"
|
||||
"sync"
|
||||
|
||||
"github.com/steveyegge/beads/internal/storage/sqlite"
|
||||
)
|
||||
|
||||
const (
|
||||
defaultConcurrency = 5
|
||||
)
|
||||
|
||||
type CompactConfig struct {
|
||||
APIKey string
|
||||
Concurrency int
|
||||
DryRun bool
|
||||
}
|
||||
|
||||
type Compactor struct {
|
||||
store *sqlite.SQLiteStorage
|
||||
haiku *HaikuClient
|
||||
config *CompactConfig
|
||||
}
|
||||
|
||||
func New(store *sqlite.SQLiteStorage, apiKey string, config *CompactConfig) (*Compactor, error) {
|
||||
if config == nil {
|
||||
config = &CompactConfig{
|
||||
Concurrency: defaultConcurrency,
|
||||
}
|
||||
}
|
||||
if config.Concurrency <= 0 {
|
||||
config.Concurrency = defaultConcurrency
|
||||
}
|
||||
if apiKey != "" {
|
||||
config.APIKey = apiKey
|
||||
}
|
||||
|
||||
var haikuClient *HaikuClient
|
||||
var err error
|
||||
if !config.DryRun {
|
||||
haikuClient, err = NewHaikuClient(config.APIKey)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("failed to create Haiku client: %w", err)
|
||||
}
|
||||
}
|
||||
|
||||
return &Compactor{
|
||||
store: store,
|
||||
haiku: haikuClient,
|
||||
config: config,
|
||||
}, nil
|
||||
}
|
||||
|
||||
type CompactResult struct {
|
||||
IssueID string
|
||||
OriginalSize int
|
||||
CompactedSize int
|
||||
Err error
|
||||
}
|
||||
|
||||
func (c *Compactor) CompactTier1(ctx context.Context, issueID string) error {
|
||||
if ctx.Err() != nil {
|
||||
return ctx.Err()
|
||||
}
|
||||
|
||||
eligible, reason, err := c.store.CheckEligibility(ctx, issueID, 1)
|
||||
if err != nil {
|
||||
return fmt.Errorf("failed to verify eligibility: %w", err)
|
||||
}
|
||||
|
||||
if !eligible {
|
||||
if reason != "" {
|
||||
return fmt.Errorf("issue %s is not eligible for Tier 1 compaction: %s", issueID, reason)
|
||||
}
|
||||
return fmt.Errorf("issue %s is not eligible for Tier 1 compaction", issueID)
|
||||
}
|
||||
|
||||
issue, err := c.store.GetIssue(ctx, issueID)
|
||||
if err != nil {
|
||||
return fmt.Errorf("failed to get issue: %w", err)
|
||||
}
|
||||
|
||||
originalSize := len(issue.Description) + len(issue.Design) + len(issue.Notes) + len(issue.AcceptanceCriteria)
|
||||
|
||||
if c.config.DryRun {
|
||||
return fmt.Errorf("dry-run: would compact %s (original size: %d bytes)", issueID, originalSize)
|
||||
}
|
||||
|
||||
if err := c.store.CreateSnapshot(ctx, issue, 1); err != nil {
|
||||
return fmt.Errorf("failed to create snapshot: %w", err)
|
||||
}
|
||||
|
||||
summary, err := c.haiku.SummarizeTier1(ctx, issue)
|
||||
if err != nil {
|
||||
return fmt.Errorf("failed to summarize with Haiku: %w", err)
|
||||
}
|
||||
|
||||
compactedSize := len(summary)
|
||||
|
||||
if compactedSize >= originalSize {
|
||||
warningMsg := fmt.Sprintf("Tier 1 compaction skipped: summary (%d bytes) not shorter than original (%d bytes)", compactedSize, originalSize)
|
||||
if err := c.store.AddComment(ctx, issueID, "compactor", warningMsg); err != nil {
|
||||
return fmt.Errorf("failed to record warning: %w", err)
|
||||
}
|
||||
return fmt.Errorf("compaction would increase size (%d → %d bytes), keeping original", originalSize, compactedSize)
|
||||
}
|
||||
|
||||
updates := map[string]interface{}{
|
||||
"description": summary,
|
||||
"design": "",
|
||||
"notes": "",
|
||||
"acceptance_criteria": "",
|
||||
}
|
||||
|
||||
if err := c.store.UpdateIssue(ctx, issueID, updates, "compactor"); err != nil {
|
||||
return fmt.Errorf("failed to update issue: %w", err)
|
||||
}
|
||||
|
||||
if err := c.store.ApplyCompaction(ctx, issueID, 1, originalSize); err != nil {
|
||||
return fmt.Errorf("failed to set compaction level: %w", err)
|
||||
}
|
||||
|
||||
savingBytes := originalSize - compactedSize
|
||||
eventData := fmt.Sprintf("Tier 1 compaction: %d → %d bytes (saved %d)", originalSize, compactedSize, savingBytes)
|
||||
if err := c.store.AddComment(ctx, issueID, "compactor", eventData); err != nil {
|
||||
return fmt.Errorf("failed to record event: %w", err)
|
||||
}
|
||||
|
||||
if err := c.store.MarkIssueDirty(ctx, issueID); err != nil {
|
||||
return fmt.Errorf("failed to mark dirty: %w", err)
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
func (c *Compactor) CompactTier1Batch(ctx context.Context, issueIDs []string) ([]*CompactResult, error) {
|
||||
if len(issueIDs) == 0 {
|
||||
return nil, nil
|
||||
}
|
||||
|
||||
eligibleIDs := make([]string, 0, len(issueIDs))
|
||||
results := make([]*CompactResult, 0, len(issueIDs))
|
||||
|
||||
for _, id := range issueIDs {
|
||||
eligible, reason, err := c.store.CheckEligibility(ctx, id, 1)
|
||||
if err != nil {
|
||||
results = append(results, &CompactResult{
|
||||
IssueID: id,
|
||||
Err: fmt.Errorf("failed to verify eligibility: %w", err),
|
||||
})
|
||||
continue
|
||||
}
|
||||
if !eligible {
|
||||
results = append(results, &CompactResult{
|
||||
IssueID: id,
|
||||
Err: fmt.Errorf("not eligible for Tier 1 compaction: %s", reason),
|
||||
})
|
||||
} else {
|
||||
eligibleIDs = append(eligibleIDs, id)
|
||||
}
|
||||
}
|
||||
|
||||
if len(eligibleIDs) == 0 {
|
||||
return results, nil
|
||||
}
|
||||
|
||||
if c.config.DryRun {
|
||||
for _, id := range eligibleIDs {
|
||||
issue, err := c.store.GetIssue(ctx, id)
|
||||
if err != nil {
|
||||
results = append(results, &CompactResult{
|
||||
IssueID: id,
|
||||
Err: fmt.Errorf("failed to get issue: %w", err),
|
||||
})
|
||||
continue
|
||||
}
|
||||
originalSize := len(issue.Description) + len(issue.Design) + len(issue.Notes) + len(issue.AcceptanceCriteria)
|
||||
results = append(results, &CompactResult{
|
||||
IssueID: id,
|
||||
OriginalSize: originalSize,
|
||||
Err: nil,
|
||||
})
|
||||
}
|
||||
return results, nil
|
||||
}
|
||||
|
||||
workCh := make(chan string, len(eligibleIDs))
|
||||
resultCh := make(chan *CompactResult, len(eligibleIDs))
|
||||
|
||||
var wg sync.WaitGroup
|
||||
for i := 0; i < c.config.Concurrency; i++ {
|
||||
wg.Add(1)
|
||||
go func() {
|
||||
defer wg.Done()
|
||||
for issueID := range workCh {
|
||||
result := &CompactResult{IssueID: issueID}
|
||||
|
||||
if err := c.compactSingleWithResult(ctx, issueID, result); err != nil {
|
||||
result.Err = err
|
||||
}
|
||||
|
||||
resultCh <- result
|
||||
}
|
||||
}()
|
||||
}
|
||||
|
||||
for _, id := range eligibleIDs {
|
||||
workCh <- id
|
||||
}
|
||||
close(workCh)
|
||||
|
||||
go func() {
|
||||
wg.Wait()
|
||||
close(resultCh)
|
||||
}()
|
||||
|
||||
for result := range resultCh {
|
||||
results = append(results, result)
|
||||
}
|
||||
|
||||
return results, nil
|
||||
}
|
||||
|
||||
func (c *Compactor) compactSingleWithResult(ctx context.Context, issueID string, result *CompactResult) error {
|
||||
if ctx.Err() != nil {
|
||||
return ctx.Err()
|
||||
}
|
||||
|
||||
issue, err := c.store.GetIssue(ctx, issueID)
|
||||
if err != nil {
|
||||
return fmt.Errorf("failed to get issue: %w", err)
|
||||
}
|
||||
|
||||
result.OriginalSize = len(issue.Description) + len(issue.Design) + len(issue.Notes) + len(issue.AcceptanceCriteria)
|
||||
|
||||
if err := c.store.CreateSnapshot(ctx, issue, 1); err != nil {
|
||||
return fmt.Errorf("failed to create snapshot: %w", err)
|
||||
}
|
||||
|
||||
summary, err := c.haiku.SummarizeTier1(ctx, issue)
|
||||
if err != nil {
|
||||
return fmt.Errorf("failed to summarize with Haiku: %w", err)
|
||||
}
|
||||
|
||||
result.CompactedSize = len(summary)
|
||||
|
||||
if result.CompactedSize >= result.OriginalSize {
|
||||
warningMsg := fmt.Sprintf("Tier 1 compaction skipped: summary (%d bytes) not shorter than original (%d bytes)", result.CompactedSize, result.OriginalSize)
|
||||
if err := c.store.AddComment(ctx, issueID, "compactor", warningMsg); err != nil {
|
||||
return fmt.Errorf("failed to record warning: %w", err)
|
||||
}
|
||||
return fmt.Errorf("compaction would increase size (%d → %d bytes), keeping original", result.OriginalSize, result.CompactedSize)
|
||||
}
|
||||
|
||||
updates := map[string]interface{}{
|
||||
"description": summary,
|
||||
"design": "",
|
||||
"notes": "",
|
||||
"acceptance_criteria": "",
|
||||
}
|
||||
|
||||
if err := c.store.UpdateIssue(ctx, issueID, updates, "compactor"); err != nil {
|
||||
return fmt.Errorf("failed to update issue: %w", err)
|
||||
}
|
||||
|
||||
if err := c.store.ApplyCompaction(ctx, issueID, 1, result.OriginalSize); err != nil {
|
||||
return fmt.Errorf("failed to set compaction level: %w", err)
|
||||
}
|
||||
|
||||
savingBytes := result.OriginalSize - result.CompactedSize
|
||||
eventData := fmt.Sprintf("Tier 1 compaction: %d → %d bytes (saved %d)", result.OriginalSize, result.CompactedSize, savingBytes)
|
||||
if err := c.store.AddComment(ctx, issueID, "compactor", eventData); err != nil {
|
||||
return fmt.Errorf("failed to record event: %w", err)
|
||||
}
|
||||
|
||||
if err := c.store.MarkIssueDirty(ctx, issueID); err != nil {
|
||||
return fmt.Errorf("failed to mark dirty: %w", err)
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
372
internal/compact/compactor_test.go
Normal file
372
internal/compact/compactor_test.go
Normal file
@@ -0,0 +1,372 @@
|
||||
package compact
|
||||
|
||||
import (
|
||||
"context"
|
||||
"os"
|
||||
"testing"
|
||||
"time"
|
||||
|
||||
"github.com/steveyegge/beads/internal/storage/sqlite"
|
||||
"github.com/steveyegge/beads/internal/types"
|
||||
)
|
||||
|
||||
func setupTestStorage(t *testing.T) *sqlite.SQLiteStorage {
|
||||
t.Helper()
|
||||
|
||||
tmpDB := t.TempDir() + "/test.db"
|
||||
store, err := sqlite.New(tmpDB)
|
||||
if err != nil {
|
||||
t.Fatalf("failed to create storage: %v", err)
|
||||
}
|
||||
|
||||
ctx := context.Background()
|
||||
if err := store.SetConfig(ctx, "compact_tier1_days", "0"); err != nil {
|
||||
t.Fatalf("failed to set config: %v", err)
|
||||
}
|
||||
if err := store.SetConfig(ctx, "compact_tier1_dep_levels", "2"); err != nil {
|
||||
t.Fatalf("failed to set config: %v", err)
|
||||
}
|
||||
|
||||
return store
|
||||
}
|
||||
|
||||
func createClosedIssue(t *testing.T, store *sqlite.SQLiteStorage, id string) *types.Issue {
|
||||
t.Helper()
|
||||
|
||||
ctx := context.Background()
|
||||
now := time.Now()
|
||||
issue := &types.Issue{
|
||||
ID: id,
|
||||
Title: "Test Issue",
|
||||
Description: `Implemented a comprehensive authentication system for the application.
|
||||
|
||||
The system includes JWT token generation, refresh token handling, password hashing with bcrypt,
|
||||
rate limiting on login attempts, and session management. We chose JWT for stateless authentication
|
||||
to enable horizontal scaling across multiple server instances.
|
||||
|
||||
The implementation follows OWASP security guidelines and includes protection against common attacks
|
||||
like brute force, timing attacks, and token theft. All sensitive operations are logged for audit purposes.`,
|
||||
Design: `Authentication Flow:
|
||||
1. User submits credentials
|
||||
2. Server validates against database
|
||||
3. On success, generate JWT with user claims
|
||||
4. Return JWT + refresh token
|
||||
5. Client stores tokens securely
|
||||
6. JWT used for API requests (Authorization header)
|
||||
7. Refresh token rotated on use
|
||||
|
||||
Security Measures:
|
||||
- Passwords hashed with bcrypt (cost factor 12)
|
||||
- Rate limiting: 5 attempts per 15 minutes
|
||||
- JWT expires after 1 hour
|
||||
- Refresh tokens expire after 30 days
|
||||
- All tokens stored in httpOnly cookies`,
|
||||
Notes: `Performance considerations:
|
||||
- JWT validation adds ~2ms latency per request
|
||||
- Consider caching user data in Redis for frequently accessed profiles
|
||||
- Monitor token refresh patterns for anomalies
|
||||
|
||||
Testing strategy:
|
||||
- Unit tests for each authentication component
|
||||
- Integration tests for full auth flow
|
||||
- Security tests for attack scenarios
|
||||
- Load tests for rate limiting behavior`,
|
||||
AcceptanceCriteria: `- Users can register with email/password
|
||||
- Users can login and receive valid JWT
|
||||
- Protected endpoints reject invalid/expired tokens
|
||||
- Rate limiting blocks brute force attempts
|
||||
- Tokens can be refreshed before expiry
|
||||
- Logout invalidates current session
|
||||
- All security requirements met per OWASP guidelines`,
|
||||
Status: types.StatusClosed,
|
||||
Priority: 2,
|
||||
IssueType: types.TypeTask,
|
||||
CreatedAt: now.Add(-48 * time.Hour),
|
||||
UpdatedAt: now.Add(-24 * time.Hour),
|
||||
ClosedAt: &now,
|
||||
}
|
||||
|
||||
if err := store.CreateIssue(ctx, issue, "test"); err != nil {
|
||||
t.Fatalf("failed to create issue: %v", err)
|
||||
}
|
||||
|
||||
return issue
|
||||
}
|
||||
|
||||
func TestNew(t *testing.T) {
|
||||
store := setupTestStorage(t)
|
||||
defer store.Close()
|
||||
|
||||
t.Run("creates compactor with config", func(t *testing.T) {
|
||||
config := &CompactConfig{
|
||||
Concurrency: 10,
|
||||
DryRun: true,
|
||||
}
|
||||
c, err := New(store, "", config)
|
||||
if err != nil {
|
||||
t.Fatalf("failed to create compactor: %v", err)
|
||||
}
|
||||
if c.config.Concurrency != 10 {
|
||||
t.Errorf("expected concurrency 10, got %d", c.config.Concurrency)
|
||||
}
|
||||
})
|
||||
|
||||
t.Run("uses default concurrency", func(t *testing.T) {
|
||||
c, err := New(store, "", nil)
|
||||
if err != nil {
|
||||
t.Fatalf("failed to create compactor: %v", err)
|
||||
}
|
||||
if c.config.Concurrency != defaultConcurrency {
|
||||
t.Errorf("expected default concurrency %d, got %d", defaultConcurrency, c.config.Concurrency)
|
||||
}
|
||||
})
|
||||
}
|
||||
|
||||
func TestCompactTier1_DryRun(t *testing.T) {
|
||||
store := setupTestStorage(t)
|
||||
defer store.Close()
|
||||
|
||||
issue := createClosedIssue(t, store, "test-1")
|
||||
|
||||
config := &CompactConfig{DryRun: true}
|
||||
c, err := New(store, "", config)
|
||||
if err != nil {
|
||||
t.Fatalf("failed to create compactor: %v", err)
|
||||
}
|
||||
|
||||
ctx := context.Background()
|
||||
err = c.CompactTier1(ctx, issue.ID)
|
||||
if err == nil {
|
||||
t.Fatal("expected dry-run error, got nil")
|
||||
}
|
||||
if err.Error()[:8] != "dry-run:" {
|
||||
t.Errorf("expected dry-run error prefix, got: %v", err)
|
||||
}
|
||||
|
||||
afterIssue, err := store.GetIssue(ctx, issue.ID)
|
||||
if err != nil {
|
||||
t.Fatalf("failed to get issue: %v", err)
|
||||
}
|
||||
if afterIssue.Description != issue.Description {
|
||||
t.Error("dry-run should not modify issue")
|
||||
}
|
||||
}
|
||||
|
||||
func TestCompactTier1_IneligibleIssue(t *testing.T) {
|
||||
store := setupTestStorage(t)
|
||||
defer store.Close()
|
||||
|
||||
ctx := context.Background()
|
||||
now := time.Now()
|
||||
issue := &types.Issue{
|
||||
ID: "test-open",
|
||||
Title: "Open Issue",
|
||||
Description: "Should not be compacted",
|
||||
Status: types.StatusOpen,
|
||||
Priority: 2,
|
||||
IssueType: types.TypeTask,
|
||||
CreatedAt: now,
|
||||
UpdatedAt: now,
|
||||
}
|
||||
if err := store.CreateIssue(ctx, issue, "test"); err != nil {
|
||||
t.Fatalf("failed to create issue: %v", err)
|
||||
}
|
||||
|
||||
config := &CompactConfig{DryRun: true}
|
||||
c, err := New(store, "", config)
|
||||
if err != nil {
|
||||
t.Fatalf("failed to create compactor: %v", err)
|
||||
}
|
||||
|
||||
err = c.CompactTier1(ctx, issue.ID)
|
||||
if err == nil {
|
||||
t.Fatal("expected error for ineligible issue, got nil")
|
||||
}
|
||||
if err.Error() != "issue test-open is not eligible for Tier 1 compaction: issue is not closed" {
|
||||
t.Errorf("unexpected error: %v", err)
|
||||
}
|
||||
}
|
||||
|
||||
func TestCompactTier1_WithAPI(t *testing.T) {
|
||||
if os.Getenv("ANTHROPIC_API_KEY") == "" {
|
||||
t.Skip("ANTHROPIC_API_KEY not set, skipping API test")
|
||||
}
|
||||
|
||||
store := setupTestStorage(t)
|
||||
defer store.Close()
|
||||
|
||||
issue := createClosedIssue(t, store, "test-api")
|
||||
|
||||
c, err := New(store, "", &CompactConfig{Concurrency: 1})
|
||||
if err != nil {
|
||||
t.Fatalf("failed to create compactor: %v", err)
|
||||
}
|
||||
|
||||
ctx := context.Background()
|
||||
if err := c.CompactTier1(ctx, issue.ID); err != nil {
|
||||
t.Fatalf("failed to compact: %v", err)
|
||||
}
|
||||
|
||||
afterIssue, err := store.GetIssue(ctx, issue.ID)
|
||||
if err != nil {
|
||||
t.Fatalf("failed to get issue: %v", err)
|
||||
}
|
||||
|
||||
if afterIssue.Description == issue.Description {
|
||||
t.Error("description should have changed")
|
||||
}
|
||||
if afterIssue.Design != "" {
|
||||
t.Error("design should be cleared")
|
||||
}
|
||||
if afterIssue.Notes != "" {
|
||||
t.Error("notes should be cleared")
|
||||
}
|
||||
if afterIssue.AcceptanceCriteria != "" {
|
||||
t.Error("acceptance criteria should be cleared")
|
||||
}
|
||||
|
||||
snapshots, err := store.GetSnapshots(ctx, issue.ID)
|
||||
if err != nil {
|
||||
t.Fatalf("failed to get snapshots: %v", err)
|
||||
}
|
||||
if len(snapshots) == 0 {
|
||||
t.Fatal("snapshot should exist")
|
||||
}
|
||||
snapshot := snapshots[0]
|
||||
if snapshot.Description != issue.Description {
|
||||
t.Error("snapshot should preserve original description")
|
||||
}
|
||||
}
|
||||
|
||||
func TestCompactTier1Batch_DryRun(t *testing.T) {
|
||||
store := setupTestStorage(t)
|
||||
defer store.Close()
|
||||
|
||||
issue1 := createClosedIssue(t, store, "test-batch-1")
|
||||
issue2 := createClosedIssue(t, store, "test-batch-2")
|
||||
|
||||
config := &CompactConfig{DryRun: true, Concurrency: 2}
|
||||
c, err := New(store, "", config)
|
||||
if err != nil {
|
||||
t.Fatalf("failed to create compactor: %v", err)
|
||||
}
|
||||
|
||||
ctx := context.Background()
|
||||
results, err := c.CompactTier1Batch(ctx, []string{issue1.ID, issue2.ID})
|
||||
if err != nil {
|
||||
t.Fatalf("failed to batch compact: %v", err)
|
||||
}
|
||||
|
||||
if len(results) != 2 {
|
||||
t.Fatalf("expected 2 results, got %d", len(results))
|
||||
}
|
||||
|
||||
for _, result := range results {
|
||||
if result.Err != nil {
|
||||
t.Errorf("unexpected error for %s: %v", result.IssueID, result.Err)
|
||||
}
|
||||
if result.OriginalSize == 0 {
|
||||
t.Errorf("expected non-zero original size for %s", result.IssueID)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func TestCompactTier1Batch_WithIneligible(t *testing.T) {
|
||||
store := setupTestStorage(t)
|
||||
defer store.Close()
|
||||
|
||||
closedIssue := createClosedIssue(t, store, "test-closed")
|
||||
|
||||
ctx := context.Background()
|
||||
now := time.Now()
|
||||
openIssue := &types.Issue{
|
||||
ID: "test-open",
|
||||
Title: "Open Issue",
|
||||
Description: "Should not be compacted",
|
||||
Status: types.StatusOpen,
|
||||
Priority: 2,
|
||||
IssueType: types.TypeTask,
|
||||
CreatedAt: now,
|
||||
UpdatedAt: now,
|
||||
}
|
||||
if err := store.CreateIssue(ctx, openIssue, "test"); err != nil {
|
||||
t.Fatalf("failed to create issue: %v", err)
|
||||
}
|
||||
|
||||
config := &CompactConfig{DryRun: true, Concurrency: 2}
|
||||
c, err := New(store, "", config)
|
||||
if err != nil {
|
||||
t.Fatalf("failed to create compactor: %v", err)
|
||||
}
|
||||
|
||||
results, err := c.CompactTier1Batch(ctx, []string{closedIssue.ID, openIssue.ID})
|
||||
if err != nil {
|
||||
t.Fatalf("failed to batch compact: %v", err)
|
||||
}
|
||||
|
||||
if len(results) != 2 {
|
||||
t.Fatalf("expected 2 results, got %d", len(results))
|
||||
}
|
||||
|
||||
for _, result := range results {
|
||||
if result.IssueID == openIssue.ID {
|
||||
if result.Err == nil {
|
||||
t.Error("expected error for ineligible issue")
|
||||
}
|
||||
} else if result.IssueID == closedIssue.ID {
|
||||
if result.Err != nil {
|
||||
t.Errorf("unexpected error for eligible issue: %v", result.Err)
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func TestCompactTier1Batch_WithAPI(t *testing.T) {
|
||||
if os.Getenv("ANTHROPIC_API_KEY") == "" {
|
||||
t.Skip("ANTHROPIC_API_KEY not set, skipping API test")
|
||||
}
|
||||
|
||||
store := setupTestStorage(t)
|
||||
defer store.Close()
|
||||
|
||||
issue1 := createClosedIssue(t, store, "test-api-batch-1")
|
||||
issue2 := createClosedIssue(t, store, "test-api-batch-2")
|
||||
issue3 := createClosedIssue(t, store, "test-api-batch-3")
|
||||
|
||||
c, err := New(store, "", &CompactConfig{Concurrency: 2})
|
||||
if err != nil {
|
||||
t.Fatalf("failed to create compactor: %v", err)
|
||||
}
|
||||
|
||||
ctx := context.Background()
|
||||
results, err := c.CompactTier1Batch(ctx, []string{issue1.ID, issue2.ID, issue3.ID})
|
||||
if err != nil {
|
||||
t.Fatalf("failed to batch compact: %v", err)
|
||||
}
|
||||
|
||||
if len(results) != 3 {
|
||||
t.Fatalf("expected 3 results, got %d", len(results))
|
||||
}
|
||||
|
||||
for _, result := range results {
|
||||
if result.Err != nil {
|
||||
t.Errorf("unexpected error for %s: %v", result.IssueID, result.Err)
|
||||
}
|
||||
if result.CompactedSize == 0 {
|
||||
t.Errorf("expected non-zero compacted size for %s", result.IssueID)
|
||||
}
|
||||
if result.CompactedSize >= result.OriginalSize {
|
||||
t.Errorf("expected size reduction for %s: %d → %d", result.IssueID, result.OriginalSize, result.CompactedSize)
|
||||
}
|
||||
}
|
||||
|
||||
for _, id := range []string{issue1.ID, issue2.ID, issue3.ID} {
|
||||
issue, err := store.GetIssue(ctx, id)
|
||||
if err != nil {
|
||||
t.Fatalf("failed to get issue %s: %v", id, err)
|
||||
}
|
||||
if issue.Design != "" || issue.Notes != "" || issue.AcceptanceCriteria != "" {
|
||||
t.Errorf("fields should be cleared for %s", id)
|
||||
}
|
||||
}
|
||||
}
|
||||
@@ -212,7 +212,7 @@ func (w *bytesWriter) Write(p []byte) (n int, err error) {
|
||||
return len(p), nil
|
||||
}
|
||||
|
||||
const tier1PromptTemplate = `You are summarizing a closed software issue for long-term storage. Compress the following issue into a concise summary that preserves key technical decisions and outcomes.
|
||||
const tier1PromptTemplate = `You are summarizing a closed software issue for long-term storage. Your goal is to COMPRESS the content - the output MUST be significantly shorter than the input while preserving key technical decisions and outcomes.
|
||||
|
||||
**Title:** {{.Title}}
|
||||
|
||||
@@ -231,13 +231,15 @@ const tier1PromptTemplate = `You are summarizing a closed software issue for lon
|
||||
{{.Notes}}
|
||||
{{end}}
|
||||
|
||||
IMPORTANT: Your summary must be shorter than the original. Be concise and eliminate redundancy.
|
||||
|
||||
Provide a summary in this exact format:
|
||||
|
||||
**Summary:** [2-3 sentences covering what was done and why]
|
||||
**Summary:** [2-3 concise sentences covering what was done and why]
|
||||
|
||||
**Key Decisions:** [Bullet points of important technical choices or design decisions]
|
||||
**Key Decisions:** [Brief bullet points of only the most important technical choices]
|
||||
|
||||
**Resolution:** [Final outcome and any lasting impact]`
|
||||
**Resolution:** [One sentence on final outcome and lasting impact]`
|
||||
|
||||
const tier2PromptTemplate = `You are performing ultra-compression on a closed software issue. The issue has already been summarized once. Your task is to create a single concise paragraph (≤150 words) that captures the essence.
|
||||
|
||||
|
||||
@@ -450,3 +450,24 @@ func (s *SQLiteStorage) GetSnapshots(ctx context.Context, issueID string) ([]*Sn
|
||||
|
||||
return snapshots, nil
|
||||
}
|
||||
|
||||
// ApplyCompaction updates the compaction metadata for an issue after successfully compacting it.
|
||||
// This sets compaction_level, compacted_at, and original_size fields.
|
||||
func (s *SQLiteStorage) ApplyCompaction(ctx context.Context, issueID string, level int, originalSize int) error {
|
||||
now := time.Now().UTC()
|
||||
|
||||
_, err := s.db.ExecContext(ctx, `
|
||||
UPDATE issues
|
||||
SET compaction_level = ?,
|
||||
compacted_at = ?,
|
||||
original_size = ?,
|
||||
updated_at = ?
|
||||
WHERE id = ?
|
||||
`, level, now, originalSize, now, issueID)
|
||||
|
||||
if err != nil {
|
||||
return fmt.Errorf("failed to apply compaction metadata: %w", err)
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
Reference in New Issue
Block a user