Implementing a streaming setup with Usenet

This commit is contained in:
Mukhtar Akere
2025-08-01 15:27:24 +01:00
parent afe577bf2f
commit f9861e3b54
65 changed files with 9437 additions and 924 deletions

141
pkg/usenet/cache.go Normal file
View File

@@ -0,0 +1,141 @@
package usenet
import (
"github.com/chrisfarms/yenc"
"github.com/puzpuzpuz/xsync/v4"
"github.com/rs/zerolog"
"sync/atomic"
"time"
)
// SegmentCache provides intelligent caching for NNTP segments
type SegmentCache struct {
cache *xsync.Map[string, *CachedSegment]
logger zerolog.Logger
maxSize int64
currentSize atomic.Int64
}
// CachedSegment represents a cached segment with metadata
type CachedSegment struct {
MessageID string `json:"message_id"`
Data []byte `json:"data"`
DecodedSize int64 `json:"decoded_size"` // Actual size after yEnc decoding
DeclaredSize int64 `json:"declared_size"` // Size declared in NZB
CachedAt time.Time `json:"cached_at"`
AccessCount int64 `json:"access_count"`
LastAccess time.Time `json:"last_access"`
FileBegin int64 `json:"file_begin"` // Start byte offset in the file
FileEnd int64 `json:"file_end"` // End byte offset in the file
}
// NewSegmentCache creates a new segment cache
func NewSegmentCache(logger zerolog.Logger) *SegmentCache {
sc := &SegmentCache{
cache: xsync.NewMap[string, *CachedSegment](),
logger: logger.With().Str("component", "segment_cache").Logger(),
maxSize: 50 * 1024 * 1024, // Default max size 100MB
}
return sc
}
// Get retrieves a segment from cache
func (sc *SegmentCache) Get(messageID string) (*CachedSegment, bool) {
segment, found := sc.cache.Load(messageID)
if !found {
return nil, false
}
segment.AccessCount++
segment.LastAccess = time.Now()
return segment, true
}
// Put stores a segment in cache with intelligent size management
func (sc *SegmentCache) Put(messageID string, data *yenc.Part, declaredSize int64) {
dataSize := data.Size
currentSize := sc.currentSize.Load()
// Check if we need to make room
wouldExceed := (currentSize + dataSize) > sc.maxSize
if wouldExceed {
sc.evictLRU(dataSize)
}
segment := &CachedSegment{
MessageID: messageID,
Data: make([]byte, data.Size),
DecodedSize: dataSize,
DeclaredSize: declaredSize,
CachedAt: time.Now(),
AccessCount: 1,
LastAccess: time.Now(),
}
copy(segment.Data, data.Body)
sc.cache.Store(messageID, segment)
sc.currentSize.Add(dataSize)
}
// evictLRU evicts least recently used segments to make room
func (sc *SegmentCache) evictLRU(neededSpace int64) {
if neededSpace <= 0 {
return // No need to evict if no space is needed
}
if sc.cache.Size() == 0 {
return // Nothing to evict
}
// Create a sorted list of segments by last access time
type segmentInfo struct {
key string
segment *CachedSegment
lastAccess time.Time
}
segments := make([]segmentInfo, 0, sc.cache.Size())
sc.cache.Range(func(key string, value *CachedSegment) bool {
segments = append(segments, segmentInfo{
key: key,
segment: value,
lastAccess: value.LastAccess,
})
return true // continue iteration
})
// Sort by last access time (oldest first)
for i := 0; i < len(segments)-1; i++ {
for j := i + 1; j < len(segments); j++ {
if segments[i].lastAccess.After(segments[j].lastAccess) {
segments[i], segments[j] = segments[j], segments[i]
}
}
}
// Evict segments until we have enough space
freedSpace := int64(0)
for _, seg := range segments {
if freedSpace >= neededSpace {
break
}
sc.cache.Delete(seg.key)
freedSpace += int64(len(seg.segment.Data))
}
}
// Clear removes all cached segments
func (sc *SegmentCache) Clear() {
sc.cache.Clear()
sc.currentSize.Store(0)
}
// Delete removes a specific segment from cache
func (sc *SegmentCache) Delete(messageID string) {
sc.cache.Delete(messageID)
}

281
pkg/usenet/downloader.go Normal file
View File

@@ -0,0 +1,281 @@
package usenet
import (
"context"
"errors"
"fmt"
"github.com/rs/zerolog"
"github.com/sirrobot01/decypharr/internal/config"
"github.com/sirrobot01/decypharr/internal/logger"
"github.com/sirrobot01/decypharr/internal/nntp"
"github.com/sirrobot01/decypharr/internal/utils"
"golang.org/x/sync/errgroup"
"os"
"path/filepath"
"time"
)
// DownloadWorker manages concurrent NZB downloads
type DownloadWorker struct {
client *nntp.Client
processor *Processor
logger zerolog.Logger
skipPreCache bool // Skip pre-caching for faster processing
mountFolder string // Folder where downloads are mounted
}
// DownloadJob represents a download job for an NZB
type DownloadJob struct {
NZB *NZB
Action string
Priority int
Callback func(*NZB, error)
DownloadDir string
}
// NewDownloadWorker creates a new download worker
func NewDownloadWorker(config *config.Usenet, client *nntp.Client, processor *Processor) *DownloadWorker {
dw := &DownloadWorker{
processor: processor,
client: client,
logger: logger.New("usenet-download-worker"),
skipPreCache: config.SkipPreCache,
mountFolder: config.MountFolder,
}
return dw
}
func (dw *DownloadWorker) CheckAvailability(ctx context.Context, job *DownloadJob) error {
dw.logger.Debug().
Str("nzb_id", job.NZB.ID).
Msg("Checking NZB availability")
// Grab first file to extract message IDs
firstFile := job.NZB.Files[0]
if len(firstFile.Segments) == 0 {
return fmt.Errorf("no segments found in first file of NZB")
}
segments := firstFile.Segments
// Smart sampling: check first, last, and some middle segments
samplesToCheck := dw.getSampleSegments(segments)
// Create error group for concurrent checking
g, gCtx := errgroup.WithContext(ctx)
// Limit concurrent goroutines to prevent overwhelming the NNTP server
maxConcurrency := len(samplesToCheck)
if maxConns := dw.client.MinimumMaxConns(); maxConns < maxConcurrency {
maxConcurrency = maxConns
}
g.SetLimit(maxConcurrency)
// Check each segment concurrently
for i, segment := range samplesToCheck {
segment := segment // capture loop variable
segmentNum := i + 1
g.Go(func() error {
select {
case <-gCtx.Done():
return gCtx.Err() // Return if context is canceled
default:
}
conn, cleanup, err := dw.client.GetConnection(gCtx)
if err != nil {
return fmt.Errorf("failed to get NNTP connection: %w", err)
}
defer cleanup() // Ensure connection is returned to the pool
// Check segment availability
seg, err := conn.GetSegment(segment.MessageID, segmentNum)
if err != nil {
return fmt.Errorf("failed to check segment %d availability: %w", segmentNum, err)
}
if seg == nil {
return fmt.Errorf("segment %d not found", segmentNum)
}
return nil
})
}
// Wait for all checks to complete
if err := g.Wait(); err != nil {
return fmt.Errorf("availability check failed: %w", err)
}
// Update storage with availability info
if err := dw.processor.store.Update(job.NZB); err != nil {
dw.logger.Warn().Err(err).Msg("Failed to update NZB with availability info")
}
return nil
}
func (dw *DownloadWorker) Process(ctx context.Context, job *DownloadJob) error {
var (
finalPath string
err error
)
defer func(err error) {
if job.Callback != nil {
job.Callback(job.NZB, err)
}
}(err)
switch job.Action {
case "download":
finalPath, err = dw.downloadNZB(ctx, job)
case "symlink":
finalPath, err = dw.symlinkNZB(ctx, job)
case "none":
return nil
default:
// Use symlink as default action
finalPath, err = dw.symlinkNZB(ctx, job)
}
if err != nil {
return err
}
if finalPath == "" {
err = fmt.Errorf("final path is empty after processing job: %s", job.Action)
return err
}
// Use atomic transition to completed state
return dw.processor.store.MarkAsCompleted(job.NZB.ID, finalPath)
}
// downloadNZB downloads an NZB to the specified directory
func (dw *DownloadWorker) downloadNZB(ctx context.Context, job *DownloadJob) (string, error) {
dw.logger.Info().
Str("nzb_id", job.NZB.ID).
Str("download_dir", job.DownloadDir).
Msg("Starting NZB download")
// TODO: implement download logic
return job.DownloadDir, nil
}
// getSampleMessageIDs returns a smart sample of message IDs to check
func (dw *DownloadWorker) getSampleSegments(segments []NZBSegment) []NZBSegment {
totalSegments := len(segments)
// For small NZBs, check all segments
if totalSegments <= 2 {
return segments
}
var samplesToCheck []NZBSegment
// Always check the first and last segments
samplesToCheck = append(samplesToCheck, segments[0]) // First segment
samplesToCheck = append(samplesToCheck, segments[totalSegments-1]) // Last segment
return samplesToCheck
}
func (dw *DownloadWorker) symlinkNZB(ctx context.Context, job *DownloadJob) (string, error) {
dw.logger.Info().
Str("nzb_id", job.NZB.ID).
Str("symlink_dir", job.DownloadDir).
Msg("Creating symlinks for NZB")
if job.NZB == nil {
return "", fmt.Errorf("NZB is nil")
}
mountFolder := filepath.Join(dw.mountFolder, job.NZB.Name) // e.g. /mnt/rclone/usenet/__all__/TV_SHOW
if mountFolder == "" {
return "", fmt.Errorf("mount folder is empty")
}
symlinkPath := filepath.Join(job.DownloadDir, job.NZB.Name) // e.g. /mnt/symlinks/usenet/sonarr/TV_SHOW
if err := os.MkdirAll(symlinkPath, 0755); err != nil {
return "", fmt.Errorf("failed to create symlink directory: %w", err)
}
return dw.createSymlinksWebdav(job.NZB, mountFolder, symlinkPath)
}
func (dw *DownloadWorker) createSymlinksWebdav(nzb *NZB, mountPath, symlinkPath string) (string, error) {
files := nzb.GetFiles()
remainingFiles := make(map[string]NZBFile)
for _, file := range files {
remainingFiles[file.Name] = file
}
ticker := time.NewTicker(100 * time.Millisecond)
defer ticker.Stop()
timeout := time.After(30 * time.Minute)
filePaths := make([]string, 0, len(files))
maxLogCount := 10 // Limit the number of log messages to avoid flooding
for len(remainingFiles) > 0 {
select {
case <-ticker.C:
entries, err := os.ReadDir(mountPath)
if err != nil {
if maxLogCount > 0 && !errors.Is(err, os.ErrNotExist) {
// Only log if it's not a "not found" error
// This is due to the fact the mount path may not exist YET
dw.logger.Warn().
Err(err).
Str("mount_path", mountPath).
Msg("Failed to read directory, retrying")
maxLogCount--
}
continue
}
// Check which files exist in this batch
for _, entry := range entries {
filename := entry.Name()
dw.logger.Info().
Str("filename", filename).
Msg("Checking file existence in mount path")
if file, exists := remainingFiles[filename]; exists {
fullFilePath := filepath.Join(mountPath, filename)
fileSymlinkPath := filepath.Join(symlinkPath, file.Name)
if err := os.Symlink(fullFilePath, fileSymlinkPath); err != nil && !os.IsExist(err) {
dw.logger.Debug().Msgf("Failed to create symlink: %s: %v", fileSymlinkPath, err)
} else {
filePaths = append(filePaths, fileSymlinkPath)
delete(remainingFiles, filename)
dw.logger.Info().Msgf("File is ready: %s", file.Name)
}
}
}
case <-timeout:
dw.logger.Warn().Msgf("Timeout waiting for files, %d files still pending", len(remainingFiles))
return symlinkPath, fmt.Errorf("timeout waiting for files")
}
}
if dw.skipPreCache {
return symlinkPath, nil
}
go func() {
defer func() {
if r := recover(); r != nil {
dw.logger.Error().
Interface("panic", r).
Str("nzbName", nzb.Name).
Msg("Recovered from panic in pre-cache goroutine")
}
}()
if err := utils.PreCacheFile(filePaths); err != nil {
dw.logger.Error().Msgf("Failed to pre-cache file: %s", err)
} else {
dw.logger.Debug().Msgf("Pre-cached %d files", len(filePaths))
}
}() // Pre-cache the files in the background
// Pre-cache the first 256KB and 1MB of the file
return symlinkPath, nil
}

353
pkg/usenet/errors.go Normal file
View File

@@ -0,0 +1,353 @@
package usenet
import (
"errors"
"fmt"
"net"
"strings"
"sync"
"time"
)
var (
ErrConnectionFailed = errors.New("failed to connect to NNTP server")
ErrServerUnavailable = errors.New("NNTP server unavailable")
ErrRateLimitExceeded = errors.New("rate limit exceeded")
ErrDownloadTimeout = errors.New("download timeout")
)
// ErrInvalidNZBf creates a formatted error for NZB validation failures
func ErrInvalidNZBf(format string, args ...interface{}) error {
return fmt.Errorf("invalid NZB: "+format, args...)
}
// Error represents a structured usenet error
type Error struct {
Code string
Message string
Err error
ServerAddr string
Timestamp time.Time
Retryable bool
}
func (e *Error) Error() string {
if e.ServerAddr != "" {
return fmt.Sprintf("usenet error [%s] on %s: %s", e.Code, e.ServerAddr, e.Message)
}
return fmt.Sprintf("usenet error [%s]: %s", e.Code, e.Message)
}
func (e *Error) Unwrap() error {
return e.Err
}
func (e *Error) Is(target error) bool {
if target == nil {
return false
}
return e.Err != nil && errors.Is(e.Err, target)
}
// NewUsenetError creates a new UsenetError
func NewUsenetError(code, message string, err error) *Error {
return &Error{
Code: code,
Message: message,
Err: err,
Timestamp: time.Now(),
Retryable: isRetryableError(err),
}
}
// NewServerError creates a new UsenetError with server address
func NewServerError(code, message, serverAddr string, err error) *Error {
return &Error{
Code: code,
Message: message,
Err: err,
ServerAddr: serverAddr,
Timestamp: time.Now(),
Retryable: isRetryableError(err),
}
}
// isRetryableError determines if an error is retryable
func isRetryableError(err error) bool {
if err == nil {
return false
}
// Network errors are generally retryable
var netErr net.Error
if errors.As(err, &netErr) {
return netErr.Timeout()
}
// DNS errors are retryable
var dnsErr *net.DNSError
if errors.As(err, &dnsErr) {
return dnsErr.Temporary()
}
// Connection refused is retryable
if errors.Is(err, net.ErrClosed) {
return true
}
// Check error message for retryable conditions
errMsg := strings.ToLower(err.Error())
retryableMessages := []string{
"connection refused",
"connection reset",
"connection timed out",
"network is unreachable",
"host is unreachable",
"temporary failure",
"service unavailable",
"server overloaded",
"rate limit",
"too many connections",
}
for _, msg := range retryableMessages {
if strings.Contains(errMsg, msg) {
return true
}
}
return false
}
// RetryConfig defines retry behavior
type RetryConfig struct {
MaxRetries int
InitialDelay time.Duration
MaxDelay time.Duration
BackoffFactor float64
RetryableErrors []error
}
// DefaultRetryConfig returns a default retry configuration
func DefaultRetryConfig() *RetryConfig {
return &RetryConfig{
MaxRetries: 3,
InitialDelay: 1 * time.Second,
MaxDelay: 30 * time.Second,
BackoffFactor: 2.0,
RetryableErrors: []error{
ErrConnectionFailed,
ErrServerUnavailable,
ErrRateLimitExceeded,
ErrDownloadTimeout,
},
}
}
// ShouldRetry determines if an error should be retried
func (rc *RetryConfig) ShouldRetry(err error, attempt int) bool {
if attempt >= rc.MaxRetries {
return false
}
// Check if it's a retryable UsenetError
var usenetErr *Error
if errors.As(err, &usenetErr) {
return usenetErr.Retryable
}
// Check if it's in the list of retryable errors
for _, retryableErr := range rc.RetryableErrors {
if errors.Is(err, retryableErr) {
return true
}
}
return isRetryableError(err)
}
// GetDelay calculates the delay for the next retry
func (rc *RetryConfig) GetDelay(attempt int) time.Duration {
if attempt <= 0 {
return rc.InitialDelay
}
delay := time.Duration(float64(rc.InitialDelay) * float64(attempt) * rc.BackoffFactor)
if delay > rc.MaxDelay {
delay = rc.MaxDelay
}
return delay
}
// RetryWithBackoff retries a function with exponential backoff
func RetryWithBackoff(config *RetryConfig, operation func() error) error {
var lastErr error
for attempt := 0; attempt <= config.MaxRetries; attempt++ {
if attempt > 0 {
delay := config.GetDelay(attempt)
time.Sleep(delay)
}
err := operation()
if err == nil {
return nil
}
lastErr = err
if !config.ShouldRetry(err, attempt) {
break
}
}
return lastErr
}
// CircuitBreakerConfig defines circuit breaker behavior
type CircuitBreakerConfig struct {
MaxFailures int
ResetTimeout time.Duration
CheckInterval time.Duration
FailureCallback func(error)
}
// CircuitBreaker implements a circuit breaker pattern for NNTP connections
type CircuitBreaker struct {
config *CircuitBreakerConfig
failures int
lastFailure time.Time
state string // "closed", "open", "half-open"
mu *sync.RWMutex
}
// NewCircuitBreaker creates a new circuit breaker
func NewCircuitBreaker(config *CircuitBreakerConfig) *CircuitBreaker {
if config == nil {
config = &CircuitBreakerConfig{
MaxFailures: 5,
ResetTimeout: 60 * time.Second,
CheckInterval: 10 * time.Second,
}
}
return &CircuitBreaker{
config: config,
state: "closed",
mu: &sync.RWMutex{},
}
}
// Execute executes an operation through the circuit breaker
func (cb *CircuitBreaker) Execute(operation func() error) error {
cb.mu.RLock()
state := cb.state
failures := cb.failures
lastFailure := cb.lastFailure
cb.mu.RUnlock()
// Check if we should attempt reset
if state == "open" && time.Since(lastFailure) > cb.config.ResetTimeout {
cb.mu.Lock()
cb.state = "half-open"
cb.mu.Unlock()
state = "half-open"
}
if state == "open" {
return NewUsenetError("circuit_breaker_open",
fmt.Sprintf("circuit breaker is open (failures: %d)", failures),
ErrServerUnavailable)
}
err := operation()
cb.mu.Lock()
defer cb.mu.Unlock()
if err != nil {
cb.failures++
cb.lastFailure = time.Now()
if cb.failures >= cb.config.MaxFailures {
cb.state = "open"
}
if cb.config.FailureCallback != nil {
go func() {
cb.config.FailureCallback(err)
}()
}
return err
}
// Success - reset if we were in half-open state
if cb.state == "half-open" {
cb.state = "closed"
cb.failures = 0
}
return nil
}
// GetState returns the current circuit breaker state
func (cb *CircuitBreaker) GetState() string {
cb.mu.RLock()
defer cb.mu.RUnlock()
return cb.state
}
// Reset manually resets the circuit breaker
func (cb *CircuitBreaker) Reset() {
cb.mu.Lock()
defer cb.mu.Unlock()
cb.state = "closed"
cb.failures = 0
}
// ValidationError represents validation errors
type ValidationError struct {
Field string
Value interface{}
Message string
}
func (e *ValidationError) Error() string {
return fmt.Sprintf("validation error for field '%s': %s", e.Field, e.Message)
}
// ValidateNZBContent validates NZB content
func ValidateNZBContent(content []byte) error {
if len(content) == 0 {
return &ValidationError{
Field: "content",
Value: len(content),
Message: "NZB content cannot be empty",
}
}
if len(content) > 100*1024*1024 { // 100MB limit
return &ValidationError{
Field: "content",
Value: len(content),
Message: "NZB content exceeds maximum size limit (100MB)",
}
}
contentStr := string(content)
if !strings.Contains(contentStr, "<nzb") {
maxLen := 100
if len(contentStr) < maxLen {
maxLen = len(contentStr)
}
return &ValidationError{
Field: "content",
Value: contentStr[:maxLen],
Message: "content does not appear to be valid NZB format",
}
}
return nil
}

83
pkg/usenet/misc.go Normal file
View File

@@ -0,0 +1,83 @@
package usenet
import (
"io"
"strings"
)
func (s *Streamer) isSkippableError(err error) bool {
if err == nil {
return false
}
// EOF is usually expected/skippable
if err == io.EOF {
return true
}
errMsg := strings.ToLower(err.Error())
// Client disconnection errors
if strings.Contains(errMsg, "client disconnected") ||
strings.Contains(errMsg, "broken pipe") ||
strings.Contains(errMsg, "connection reset") ||
strings.Contains(errMsg, "write failed") ||
strings.Contains(errMsg, "writer is nil") ||
strings.Contains(errMsg, "closed pipe") ||
strings.Contains(errMsg, "context canceled") ||
strings.Contains(errMsg, "operation timed out") ||
strings.Contains(errMsg, "eof") {
return true
}
return false
}
func RecalculateSegmentBoundaries(
segments []NZBSegment,
actualSizes map[string]int64,
) []NZBSegment {
if len(segments) == 0 {
return segments
}
result := make([]NZBSegment, len(segments))
var currentOffset int64
for i, seg := range segments {
// Copy original segment metadata
result[i] = seg
result[i].StartOffset = currentOffset
// Determine which size to use: actual decoded size, or fall back
var size int64
if actual, ok := actualSizes[seg.MessageID]; ok {
size = actual
} else {
// decoded size as computed by parser (EndOffset-StartOffset)
size = seg.EndOffset - seg.StartOffset
}
result[i].EndOffset = currentOffset + size
currentOffset += size
}
return result
}
// GetSegmentActualSizes extracts actual decoded sizes from cache
func GetSegmentActualSizes(segments []NZBSegment, cache *SegmentCache) map[string]int64 {
actualSizes := make(map[string]int64)
if cache == nil {
return actualSizes
}
for _, segment := range segments {
if cached, found := cache.Get(segment.MessageID); found {
actualSizes[segment.MessageID] = int64(len(cached.Data))
}
}
return actualSizes
}

152
pkg/usenet/nzb.go Normal file
View File

@@ -0,0 +1,152 @@
package usenet
import (
"fmt"
"strings"
)
type SegmentRange struct {
Segment NZBSegment // Reference to the segment
ByteStart int64 // Start offset within this segment
ByteEnd int64 // End offset within this segment
TotalStart int64 // Absolute start position in file
TotalEnd int64 // Absolute end position in file
}
func (nzb *NZB) GetFileByName(name string) *NZBFile {
for i := range nzb.Files {
f := nzb.Files[i]
if f.IsDeleted {
continue
}
if nzb.Files[i].Name == name {
return &nzb.Files[i]
}
}
return nil
}
func (nzb *NZB) MarkFileAsRemoved(fileName string) error {
for i, file := range nzb.Files {
if file.Name == fileName {
// Mark the file as deleted
nzb.Files[i].IsDeleted = true
return nil
}
}
return fmt.Errorf("file %s not found in NZB %s", fileName, nzb.ID)
}
func (nf *NZBFile) GetSegmentsInRange(segmentSize int64, start, end int64) []SegmentRange {
if end == -1 {
end = nf.Size - 1
}
var segmentRanges []SegmentRange
var cumulativeSize int64
for i, segment := range nf.Segments {
// Use the file's segment size (uniform)
if segmentSize <= 0 {
segmentSize = segment.Bytes // Fallback to actual segment size if not set
}
// Handle last segment which might be smaller
if i == len(nf.Segments)-1 {
segmentSize = segment.Bytes // Last segment uses actual size
}
cumulativeSize += segmentSize
// Skip segments that end before our start position
if cumulativeSize <= start {
continue
}
// Calculate this segment's boundaries
segmentStart := cumulativeSize - segmentSize
segmentEnd := cumulativeSize - 1
// Calculate intersection with requested range
rangeStart := max(start, segmentStart)
rangeEnd := min(end, segmentEnd)
segmentRange := SegmentRange{
Segment: segment,
ByteStart: rangeStart - segmentStart, // Offset within segment
ByteEnd: rangeEnd - segmentStart, // End offset within segment
TotalStart: rangeStart, // Absolute position
TotalEnd: rangeEnd, // Absolute position
}
segmentRanges = append(segmentRanges, segmentRange)
// Stop if we've covered the entire requested range
if cumulativeSize >= end+1 {
break
}
}
return segmentRanges
}
func (nf *NZBFile) ConvertToSegmentRanges(segments []NZBSegment) []SegmentRange {
var segmentRanges []SegmentRange
var cumulativeSize int64
for i, segment := range segments {
// Use the file's segment size (uniform)
segmentSize := nf.SegmentSize
// Handle last segment which might be smaller
if i == len(segments)-1 {
segmentSize = segment.Bytes // Last segment uses actual size
}
cumulativeSize += segmentSize
segmentRange := SegmentRange{
Segment: segment,
ByteStart: 0, // Always starts at 0 within the segment
ByteEnd: segmentSize - 1, // Ends at segment size - 1
TotalStart: cumulativeSize - segmentSize, // Absolute start position
TotalEnd: cumulativeSize - 1, // Absolute end position
}
segmentRanges = append(segmentRanges, segmentRange)
}
return segmentRanges
}
func (nf *NZBFile) GetCacheKey() string {
return fmt.Sprintf("rar_%s_%d", nf.Name, nf.Size)
}
func (nzb *NZB) GetFiles() []NZBFile {
files := make([]NZBFile, 0, len(nzb.Files))
for _, file := range nzb.Files {
if !file.IsDeleted {
files = append(files, file)
}
}
return files[:len(files):len(files)] // Return a slice to avoid aliasing
}
// ValidateNZB performs basic validation on NZB content
func ValidateNZB(content []byte) error {
if len(content) == 0 {
return fmt.Errorf("empty NZB content")
}
// Check for basic XML structure
if !strings.Contains(string(content), "<nzb") {
return fmt.Errorf("invalid NZB format: missing <nzb> tag")
}
if !strings.Contains(string(content), "<file") {
return fmt.Errorf("invalid NZB format: no files found")
}
return nil
}

863
pkg/usenet/parser.go Normal file
View File

@@ -0,0 +1,863 @@
package usenet
import (
"bytes"
"context"
"crypto/sha256"
"encoding/hex"
"fmt"
"github.com/Tensai75/nzbparser"
"github.com/chrisfarms/yenc"
"github.com/nwaples/rardecode/v2"
"github.com/rs/zerolog"
"github.com/sirrobot01/decypharr/internal/nntp"
"github.com/sirrobot01/decypharr/internal/utils"
"github.com/sourcegraph/conc/pool"
"io"
"path"
"path/filepath"
"regexp"
"sort"
"strings"
"sync"
)
// NZBParser provides a simplified, robust NZB parser
type NZBParser struct {
logger zerolog.Logger
client *nntp.Client
cache *SegmentCache
}
type FileGroup struct {
BaseName string
ActualFilename string
Type FileType
Files []nzbparser.NzbFile
Groups map[string]struct{}
}
type FileInfo struct {
Size int64
ChunkSize int64
Name string
}
// NewNZBParser creates a new simplified NZB parser
func NewNZBParser(client *nntp.Client, cache *SegmentCache, logger zerolog.Logger) *NZBParser {
return &NZBParser{
logger: logger.With().Str("component", "nzb_parser").Logger(),
client: client,
cache: cache,
}
}
type FileType int
const (
FileTypeMedia FileType = iota // Direct media files (.mkv, .mp4, etc.) // Check internal/utils.IsMediaFile
FileTypeRar // RAR archives (.rar, .r00, .r01, etc.)
FileTypeArchive // Other archives (.7z, .zip, etc.)
FileTypeIgnore // Files to ignore (.nfo, .txt, par2 etc.)
FileTypeUnknown
)
var (
// RAR file patterns - simplified and more accurate
rarMainPattern = regexp.MustCompile(`\.rar$`)
rarPartPattern = regexp.MustCompile(`\.r\d{2}$`) // .r00, .r01, etc.
rarVolumePattern = regexp.MustCompile(`\.part\d+\.rar$`)
ignoreExtensions = []string{".par2", ".sfv", ".nfo", ".jpg", ".png", ".txt", ".srt", ".idx", ".sub"}
sevenZMainPattern = regexp.MustCompile(`\.7z$`)
sevenZPartPattern = regexp.MustCompile(`\.7z\.\d{3}$`)
extWithNumberPattern = regexp.MustCompile(`\.[^ "\.]*\.\d+$`)
volPar2Pattern = regexp.MustCompile(`(?i)\.vol\d+\+\d+\.par2?$`)
partPattern = regexp.MustCompile(`(?i)\.part\d+\.[^ "\.]*$`)
regularExtPattern = regexp.MustCompile(`\.[^ "\.]*$`)
)
type PositionTracker struct {
reader io.Reader
position int64
}
func (pt *PositionTracker) Read(p []byte) (n int, err error) {
n, err = pt.reader.Read(p)
pt.position += int64(n)
return n, err
}
func (pt *PositionTracker) Position() int64 {
return pt.position
}
func (p *NZBParser) Parse(ctx context.Context, filename string, category string, content []byte) (*NZB, error) {
// Parse raw XML
raw, err := nzbparser.Parse(bytes.NewReader(content))
if err != nil {
return nil, fmt.Errorf("failed to parse NZB content: %w", err)
}
// Create base NZB structure
nzb := &NZB{
Files: []NZBFile{},
Status: "parsed",
Category: category,
Name: determineNZBName(filename, raw.Meta),
Title: raw.Meta["title"],
Password: raw.Meta["password"],
}
// Group files by base name and type
fileGroups := p.groupFiles(ctx, raw.Files)
// Process each group
files := p.processFileGroups(ctx, fileGroups, nzb.Password)
nzb.ID = generateID(nzb)
if len(files) == 0 {
return nil, fmt.Errorf("no valid files found in NZB")
}
// Calculate total size
for _, file := range files {
nzb.TotalSize += file.Size
file.NzbID = nzb.ID
nzb.Files = append(nzb.Files, file)
}
return nzb, nil
}
func (p *NZBParser) groupFiles(ctx context.Context, files nzbparser.NzbFiles) map[string]*FileGroup {
var unknownFiles []nzbparser.NzbFile
var knownFiles []struct {
file nzbparser.NzbFile
fileType FileType
}
for _, file := range files {
if len(file.Segments) == 0 {
continue
}
fileType := p.detectFileType(file.Filename)
if fileType == FileTypeUnknown {
unknownFiles = append(unknownFiles, file)
} else {
knownFiles = append(knownFiles, struct {
file nzbparser.NzbFile
fileType FileType
}{file, fileType})
}
}
p.logger.Info().
Int("known_files", len(knownFiles)).
Int("unknown_files", len(unknownFiles)).
Msg("File type detection")
unknownResults := p.batchDetectContentTypes(ctx, unknownFiles)
allFiles := make([]struct {
file nzbparser.NzbFile
fileType FileType
actualFilename string
}, 0, len(knownFiles)+len(unknownResults))
// Add known files
for _, known := range knownFiles {
allFiles = append(allFiles, struct {
file nzbparser.NzbFile
fileType FileType
actualFilename string
}{known.file, known.fileType, known.file.Filename})
}
// Add unknown results
allFiles = append(allFiles, unknownResults...)
return p.groupProcessedFiles(allFiles)
}
// Batch process unknown files in parallel
func (p *NZBParser) batchDetectContentTypes(ctx context.Context, unknownFiles []nzbparser.NzbFile) []struct {
file nzbparser.NzbFile
fileType FileType
actualFilename string
} {
if len(unknownFiles) == 0 {
return nil
}
// Use worker pool for parallel processing
workers := min(len(unknownFiles), 10) // Max 10 concurrent downloads
workerPool := pool.New().WithMaxGoroutines(workers).WithContext(ctx)
type result struct {
index int
file nzbparser.NzbFile
fileType FileType
actualFilename string
}
results := make([]result, len(unknownFiles))
var mu sync.Mutex
// Process each unknown file
for i, file := range unknownFiles {
i, file := i, file // Capture loop variables
workerPool.Go(func(ctx context.Context) error {
detectedType, actualFilename := p.detectFileTypeByContent(ctx, file)
mu.Lock()
results[i] = result{
index: i,
file: file,
fileType: detectedType,
actualFilename: actualFilename,
}
mu.Unlock()
return nil // Don't fail the entire batch for one file
})
}
// Wait for all to complete
if err := workerPool.Wait(); err != nil {
return nil
}
// Convert results
processedFiles := make([]struct {
file nzbparser.NzbFile
fileType FileType
actualFilename string
}, 0, len(results))
for _, result := range results {
if result.fileType != FileTypeUnknown {
processedFiles = append(processedFiles, struct {
file nzbparser.NzbFile
fileType FileType
actualFilename string
}{result.file, result.fileType, result.actualFilename})
}
}
return processedFiles
}
// Group already processed files (fast)
func (p *NZBParser) groupProcessedFiles(allFiles []struct {
file nzbparser.NzbFile
fileType FileType
actualFilename string
}) map[string]*FileGroup {
groups := make(map[string]*FileGroup)
for _, item := range allFiles {
// Skip unwanted files
if item.fileType == FileTypeIgnore || item.fileType == FileTypeArchive {
continue
}
var groupKey string
if item.actualFilename != "" && item.actualFilename != item.file.Filename {
groupKey = p.getBaseFilename(item.actualFilename)
} else {
groupKey = item.file.Basefilename
}
group, exists := groups[groupKey]
if !exists {
group = &FileGroup{
ActualFilename: item.actualFilename,
BaseName: groupKey,
Type: item.fileType,
Files: []nzbparser.NzbFile{},
Groups: make(map[string]struct{}),
}
groups[groupKey] = group
}
// Update filename
item.file.Filename = item.actualFilename
group.Files = append(group.Files, item.file)
for _, g := range item.file.Groups {
group.Groups[g] = struct{}{}
}
}
return groups
}
func (p *NZBParser) getBaseFilename(filename string) string {
if filename == "" {
return ""
}
// First remove any quotes and trim spaces
cleaned := strings.Trim(filename, `" -`)
// Check for vol\d+\+\d+\.par2? (PAR2 volume files)
if volPar2Pattern.MatchString(cleaned) {
return volPar2Pattern.ReplaceAllString(cleaned, "")
}
// Check for part\d+\.[^ "\.]* (part files like .part01.rar)
if partPattern.MatchString(cleaned) {
return partPattern.ReplaceAllString(cleaned, "")
}
// Check for [^ "\.]*\.\d+ (extensions with numbers like .7z.001, .r01, etc.)
if extWithNumberPattern.MatchString(cleaned) {
return extWithNumberPattern.ReplaceAllString(cleaned, "")
}
// Check for regular extensions [^ "\.]*
if regularExtPattern.MatchString(cleaned) {
return regularExtPattern.ReplaceAllString(cleaned, "")
}
return cleaned
}
// Simplified file type detection
func (p *NZBParser) detectFileType(filename string) FileType {
lower := strings.ToLower(filename)
// Check for media first
if p.isMediaFile(lower) {
return FileTypeMedia
}
// Check rar next
if p.isRarFile(lower) {
return FileTypeRar
}
// Check for 7z files
if sevenZMainPattern.MatchString(lower) || sevenZPartPattern.MatchString(lower) {
return FileTypeArchive
}
if strings.HasSuffix(lower, ".zip") || strings.HasSuffix(lower, ".tar") ||
strings.HasSuffix(lower, ".gz") || strings.HasSuffix(lower, ".bz2") {
return FileTypeArchive
}
// Check for ignored file types
for _, ext := range ignoreExtensions {
if strings.HasSuffix(lower, ext) {
return FileTypeIgnore
}
}
// Default to unknown type
return FileTypeUnknown
}
// Simplified RAR detection
func (p *NZBParser) isRarFile(filename string) bool {
return rarMainPattern.MatchString(filename) ||
rarPartPattern.MatchString(filename) ||
rarVolumePattern.MatchString(filename)
}
func (p *NZBParser) isMediaFile(filename string) bool {
return utils.IsMediaFile(filename)
}
func (p *NZBParser) processFileGroups(ctx context.Context, groups map[string]*FileGroup, password string) []NZBFile {
if len(groups) == 0 {
return nil
}
// Channel to collect results
results := make(chan *NZBFile, len(groups))
var wg sync.WaitGroup
// Process each group concurrently
for _, group := range groups {
wg.Add(1)
go func(g *FileGroup) {
defer wg.Done()
file := p.processFileGroup(ctx, g, password)
results <- file // nil values are fine, we'll filter later
}(group)
}
// Close results channel when all goroutines complete
go func() {
wg.Wait()
close(results)
}()
// Collect results
var files []NZBFile
for file := range results {
if file != nil {
files = append(files, *file)
}
}
return files
}
// Simplified individual group processing
func (p *NZBParser) processFileGroup(ctx context.Context, group *FileGroup, password string) *NZBFile {
switch group.Type {
case FileTypeMedia:
return p.processMediaFile(group, password)
case FileTypeRar:
return p.processRarArchive(ctx, group, password)
case FileTypeArchive:
return nil
default:
// Treat unknown files as media files with conservative estimation
return p.processMediaFile(group, password)
}
}
// Process regular media files
func (p *NZBParser) processMediaFile(group *FileGroup, password string) *NZBFile {
if len(group.Files) == 0 {
return nil
}
// Sort files for consistent ordering
sort.Slice(group.Files, func(i, j int) bool {
return group.Files[i].Number < group.Files[j].Number
})
// Determine extension
ext := p.determineExtension(group)
file := &NZBFile{
Name: group.BaseName + ext,
Groups: p.getGroupsList(group.Groups),
Segments: []NZBSegment{},
Password: password,
IsRarArchive: false,
}
currentOffset := int64(0)
ratio := 0.968
for _, nzbFile := range group.Files {
sort.Slice(nzbFile.Segments, func(i, j int) bool {
return nzbFile.Segments[i].Number < nzbFile.Segments[j].Number
})
for _, segment := range nzbFile.Segments {
decodedSize := int64(float64(segment.Bytes) * ratio)
seg := NZBSegment{
Number: segment.Number,
MessageID: segment.Id,
Bytes: int64(segment.Bytes),
StartOffset: currentOffset,
EndOffset: currentOffset + decodedSize,
Group: file.Groups[0],
}
file.Segments = append(file.Segments, seg)
currentOffset += decodedSize
}
}
fileInfo, err := p.getFileInfo(context.Background(), group)
if err != nil {
p.logger.Warn().Err(err).Msg("Failed to get file info, using fallback")
file.Size = currentOffset
file.SegmentSize = currentOffset / int64(len(file.Segments)) // Average segment size
} else {
file.Size = fileInfo.Size
file.SegmentSize = fileInfo.ChunkSize
}
return file
}
func (p *NZBParser) processRarArchive(ctx context.Context, group *FileGroup, password string) *NZBFile {
if len(group.Files) == 0 {
return nil
}
// Sort RAR files by part number
sort.Slice(group.Files, func(i, j int) bool {
return group.Files[i].Filename < group.Files[j].Filename
})
// Try to extract RAR info during parsing for better accuracy
extractedInfo := p.extractRarInfo(ctx, group, password)
filename := group.BaseName + ".mkv" // Default extension
if extractedInfo != nil && extractedInfo.FileName != "" {
filename = extractedInfo.FileName
}
filename = utils.RemoveInvalidChars(path.Base(filename))
file := &NZBFile{
Name: filename,
Groups: p.getGroupsList(group.Groups),
Segments: []NZBSegment{},
Password: password,
IsRarArchive: true,
}
// Build segments
ratio := 0.968
currentOffset := int64(0)
for _, nzbFile := range group.Files {
sort.Slice(nzbFile.Segments, func(i, j int) bool {
return nzbFile.Segments[i].Number < nzbFile.Segments[j].Number
})
for _, segment := range nzbFile.Segments {
decodedSize := int64(float64(segment.Bytes) * ratio)
seg := NZBSegment{
Number: segment.Number,
MessageID: segment.Id,
Bytes: int64(segment.Bytes),
StartOffset: currentOffset,
EndOffset: currentOffset + decodedSize,
Group: file.Groups[0],
}
file.Segments = append(file.Segments, seg)
currentOffset += decodedSize
}
}
if extractedInfo != nil {
file.Size = extractedInfo.FileSize
file.SegmentSize = extractedInfo.SegmentSize
file.StartOffset = extractedInfo.EstimatedStartOffset
} else {
file.Size = currentOffset
file.SegmentSize = currentOffset / int64(len(file.Segments)) // Average segment size
file.StartOffset = 0 // No accurate start offset available
}
return file
}
func (p *NZBParser) getFileInfo(ctx context.Context, group *FileGroup) (*FileInfo, error) {
if len(group.Files) == 0 {
return nil, fmt.Errorf("no files in group %s", group.BaseName)
}
// Sort files
sort.Slice(group.Files, func(i, j int) bool {
return group.Files[i].Filename < group.Files[j].Filename
})
firstFile := group.Files[0]
lastFile := group.Files[len(group.Files)-1]
firstInfo, err := p.client.DownloadHeader(ctx, firstFile.Segments[0].Id)
if err != nil {
return nil, err
}
lastInfo, err := p.client.DownloadHeader(ctx, lastFile.Segments[len(lastFile.Segments)-1].Id)
if err != nil {
p.logger.Warn().Err(err).Msg("Failed to download last segment header")
return nil, err
}
chunkSize := firstInfo.End - (firstInfo.Begin - 1)
totalFileSize := (int64(len(group.Files)-1) * firstInfo.Size) + lastInfo.Size
return &FileInfo{
Size: totalFileSize,
ChunkSize: chunkSize,
Name: firstInfo.Name,
}, nil
}
func (p *NZBParser) extractRarInfo(ctx context.Context, group *FileGroup, password string) *ExtractedFileInfo {
if len(group.Files) == 0 || len(group.Files[0].Segments) == 0 {
return nil
}
firstRarFile := group.Files[0]
segmentsToDownload := min(5, len(firstRarFile.Segments))
headerBuffer, err := p.downloadRarHeaders(ctx, firstRarFile.Segments[:segmentsToDownload])
if err != nil {
p.logger.Warn().Err(err).Msg("Failed to download RAR headers")
return nil
}
fileInfo, err := p.getFileInfo(ctx, group)
if err != nil {
p.logger.Warn().Err(err).Msg("Failed to get file info for RAR group")
return nil
}
// Pass the actual RAR size to the analysis function
return p.analyzeRarStructure(headerBuffer, password, fileInfo)
}
func (p *NZBParser) analyzeRarStructure(headerData []byte, password string, fileInfo *FileInfo) *ExtractedFileInfo {
reader := bytes.NewReader(headerData)
tracker := &PositionTracker{reader: reader, position: 0}
rarReader, err := rardecode.NewReader(tracker, rardecode.Password(password))
if err != nil {
return nil
}
for {
header, err := rarReader.Next()
if err != nil {
break
}
if !header.IsDir && p.isMediaFile(header.Name) {
compressionRatio := float64(fileInfo.Size) / float64(header.UnPackedSize)
if compressionRatio > 0.95 {
fileDataOffset := tracker.Position()
p.logger.Info().
Str("file", header.Name).
Int64("accurate_offset", fileDataOffset).
Float64("compression_ratio", compressionRatio).
Msg("Found accurate store RAR offset using position tracking")
return &ExtractedFileInfo{
FileName: header.Name,
FileSize: header.UnPackedSize,
SegmentSize: fileInfo.ChunkSize,
EstimatedStartOffset: fileDataOffset,
}
}
break
}
// Skip file content - this advances the tracker position
io.Copy(io.Discard, rarReader)
}
return nil
}
func (p *NZBParser) determineExtension(group *FileGroup) string {
// Try to determine extension from filenames
for _, file := range group.Files {
ext := filepath.Ext(file.Filename)
if ext != "" {
return ext
}
}
return ".mkv" // Default
}
func (p *NZBParser) getGroupsList(groups map[string]struct{}) []string {
result := make([]string, 0, len(groups))
for g := range groups {
result = append(result, g)
}
return result
}
// Download RAR headers from segments
func (p *NZBParser) downloadRarHeaders(ctx context.Context, segments []nzbparser.NzbSegment) ([]byte, error) {
var headerBuffer bytes.Buffer
for _, segment := range segments {
conn, cleanup, err := p.client.GetConnection(ctx)
if err != nil {
continue
}
data, err := conn.GetBody(segment.Id)
cleanup()
if err != nil {
if !nntp.IsRetryableError(err) {
return nil, err
}
continue
}
if len(data) == 0 {
continue
}
// yEnc decode
part, err := nntp.DecodeYenc(bytes.NewReader(data))
if err != nil || part == nil || len(part.Body) == 0 {
p.logger.Warn().Err(err).Str("segment_id", segment.Id).Msg("Failed to decode RAR header segment")
continue
}
headerBuffer.Write(part.Body)
// Stop if we have enough data (typically first segment is enough for headers)
if headerBuffer.Len() > 32768 { // 32KB should be plenty for RAR headers
break
}
}
if headerBuffer.Len() == 0 {
return nil, fmt.Errorf("no valid header data downloaded")
}
return headerBuffer.Bytes(), nil
}
func (p *NZBParser) detectFileTypeByContent(ctx context.Context, file nzbparser.NzbFile) (FileType, string) {
if len(file.Segments) == 0 {
return FileTypeUnknown, ""
}
// Download first segment to check file signature
firstSegment := file.Segments[0]
data, err := p.downloadFirstSegment(ctx, firstSegment)
if err != nil {
p.logger.Warn().Err(err).Msg("Failed to download first segment for content detection")
return FileTypeUnknown, ""
}
if data.Name != "" {
fileType := p.detectFileType(data.Name)
if fileType != FileTypeUnknown {
return fileType, data.Name
}
}
return p.detectFileTypeFromContent(data.Body), data.Name
}
func (p *NZBParser) detectFileTypeFromContent(data []byte) FileType {
if len(data) == 0 {
return FileTypeUnknown
}
// Check for RAR signatures (both RAR 4.x and 5.x)
if len(data) >= 7 {
// RAR 4.x signature
if bytes.Equal(data[:7], []byte("Rar!\x1A\x07\x00")) {
return FileTypeRar
}
}
if len(data) >= 8 {
// RAR 5.x signature
if bytes.Equal(data[:8], []byte("Rar!\x1A\x07\x01\x00")) {
return FileTypeRar
}
}
// Check for ZIP signature
if len(data) >= 4 && bytes.Equal(data[:4], []byte{0x50, 0x4B, 0x03, 0x04}) {
return FileTypeArchive
}
// Check for 7z signature
if len(data) >= 6 && bytes.Equal(data[:6], []byte{0x37, 0x7A, 0xBC, 0xAF, 0x27, 0x1C}) {
return FileTypeArchive
}
// Check for common media file signatures
if len(data) >= 4 {
// Matroska (MKV/WebM)
if bytes.Equal(data[:4], []byte{0x1A, 0x45, 0xDF, 0xA3}) {
return FileTypeMedia
}
// MP4/MOV (check for 'ftyp' at offset 4)
if len(data) >= 8 && bytes.Equal(data[4:8], []byte("ftyp")) {
return FileTypeMedia
}
// AVI
if len(data) >= 12 && bytes.Equal(data[:4], []byte("RIFF")) &&
bytes.Equal(data[8:12], []byte("AVI ")) {
return FileTypeMedia
}
}
// MPEG checks need more specific patterns
if len(data) >= 4 {
// MPEG-1/2 Program Stream
if bytes.Equal(data[:4], []byte{0x00, 0x00, 0x01, 0xBA}) {
return FileTypeMedia
}
// MPEG-1/2 Video Stream
if bytes.Equal(data[:4], []byte{0x00, 0x00, 0x01, 0xB3}) {
return FileTypeMedia
}
}
// Check for Transport Stream (TS files)
if len(data) >= 1 && data[0] == 0x47 {
// Additional validation for TS files
if len(data) >= 188 && data[188] == 0x47 {
return FileTypeMedia
}
}
return FileTypeUnknown
}
func (p *NZBParser) downloadFirstSegment(ctx context.Context, segment nzbparser.NzbSegment) (*yenc.Part, error) {
conn, cleanup, err := p.client.GetConnection(ctx)
if err != nil {
return nil, err
}
defer cleanup()
data, err := conn.GetBody(segment.Id)
if err != nil {
return nil, err
}
// yEnc decode
part, err := nntp.DecodeYenc(bytes.NewReader(data))
if err != nil || part == nil {
return nil, fmt.Errorf("failed to decode segment")
}
// Return both the filename and decoded data
return part, nil
}
// Calculate total archive size from all RAR parts in the group
func (p *NZBParser) calculateTotalArchiveSize(group *FileGroup) int64 {
var total int64
for _, file := range group.Files {
for _, segment := range file.Segments {
total += int64(segment.Bytes)
}
}
return total
}
func determineNZBName(filename string, meta map[string]string) string {
// Prefer filename if it exists
if filename != "" {
filename = strings.Replace(filename, filepath.Ext(filename), "", 1)
} else {
if name := meta["name"]; name != "" {
filename = name
} else if title := meta["title"]; title != "" {
filename = title
}
}
return utils.RemoveInvalidChars(filename)
}
func generateID(nzb *NZB) string {
h := sha256.New()
h.Write([]byte(nzb.Name))
h.Write([]byte(fmt.Sprintf("%d", nzb.TotalSize)))
h.Write([]byte(nzb.Category))
h.Write([]byte(nzb.Password))
return hex.EncodeToString(h.Sum(nil))[:16]
}

145
pkg/usenet/processor.go Normal file
View File

@@ -0,0 +1,145 @@
package usenet
import (
"context"
"fmt"
"github.com/rs/zerolog"
"github.com/sirrobot01/decypharr/internal/config"
"github.com/sirrobot01/decypharr/internal/nntp"
"github.com/sirrobot01/decypharr/pkg/arr"
"path/filepath"
"time"
)
// Processor handles NZB processing and download orchestration
type Processor struct {
store Store
parser *NZBParser
downloadWorker *DownloadWorker
logger zerolog.Logger
client *nntp.Client
}
// ProcessRequest represents a request to process an NZB
type ProcessRequest struct {
NZBContent []byte
Name string
Arr *arr.Arr
Action string // "download", "symlink", "none"
DownloadDir string
}
// NewProcessor creates a new usenet processor
func NewProcessor(config *config.Usenet, logger zerolog.Logger, store Store, client *nntp.Client) (*Processor, error) {
processor := &Processor{
store: store,
logger: logger.With().Str("component", "usenet-processor").Logger(),
client: client,
}
// Initialize download worker
processor.downloadWorker = NewDownloadWorker(config, client, processor)
processor.parser = NewNZBParser(client, nil, processor.logger)
return processor, nil
}
// Process processes an NZB for download/streaming
func (p *Processor) Process(ctx context.Context, req *ProcessRequest) (*NZB, error) {
if len(req.NZBContent) == 0 {
return nil, fmt.Errorf("NZB content is empty")
}
// Validate NZB content
if err := ValidateNZB(req.NZBContent); err != nil {
return nil, fmt.Errorf("invalid NZB content: %w", err)
}
nzb, err := p.process(ctx, req)
if err != nil {
p.logger.Error().
Err(err).
Msg("Failed to process NZB content")
return nil, fmt.Errorf("failed to process NZB content: %w", err)
}
return nzb, nil
}
func (p *Processor) process(ctx context.Context, req *ProcessRequest) (*NZB, error) {
nzb, err := p.parser.Parse(ctx, req.Name, req.Arr.Name, req.NZBContent)
if err != nil {
p.logger.Error().
Err(err).
Msg("Failed to parse NZB content")
return nil, fmt.Errorf("failed to parse NZB content: %w", err)
}
if nzb == nil {
p.logger.Error().
Msg("Parsed NZB is nil")
return nil, fmt.Errorf("parsed NZB is nil")
}
p.logger.Info().
Str("nzb_id", nzb.ID).
Msg("Successfully parsed NZB content")
if existing := p.store.Get(nzb.ID); existing != nil {
p.logger.Info().Str("nzb_id", nzb.ID).Msg("NZB already exists")
return existing, nil
}
p.logger.Info().
Str("nzb_id", nzb.ID).
Msg("Creating new NZB download job")
downloadDir := req.DownloadDir
if req.Arr != nil {
downloadDir = filepath.Join(downloadDir, req.Arr.Name)
}
job := &DownloadJob{
NZB: nzb,
Action: req.Action,
DownloadDir: downloadDir,
Callback: func(completedNZB *NZB, err error) {
if err != nil {
p.logger.Error().
Err(err).
Str("nzb_id", completedNZB.ID).
Msg("Download job failed")
return
}
p.logger.Info().
Str("nzb_id", completedNZB.ID).
Msg("Download job completed successfully")
},
}
// Check availability before submitting the job
//if err := p.downloadWorker.CheckAvailability(ctx, job); err != nil {
// p.logger.Error().
// Err(err).
// Str("nzb_id", nzb.ID).
// Msg("NZB availability check failed")
// return nil, fmt.Errorf("availability check failed for NZB %s: %w", nzb.ID, err)
//}
// Mark NZB as downloaded but not completed
nzb.Downloaded = true
nzb.AddedOn = time.Now()
p.store.AddToQueue(nzb)
if err := p.store.Add(nzb); err != nil {
return nil, err
} // Add the downloaded NZB to the store asynchronously
p.logger.Info().
Str("nzb_id", nzb.ID).
Msg("NZB added to queue")
go func() {
if err := p.downloadWorker.Process(ctx, job); err != nil {
p.logger.Error().
Err(err).
Str("nzb_id", nzb.ID).
Msg("Failed to submit download job")
}
}()
return nzb, nil
}

336
pkg/usenet/rar.go Normal file
View File

@@ -0,0 +1,336 @@
package usenet
import (
"bytes"
"context"
"fmt"
"github.com/nwaples/rardecode/v2"
"github.com/sirrobot01/decypharr/internal/utils"
"io"
"strings"
"time"
)
type RarParser struct {
streamer *Streamer
}
func NewRarParser(s *Streamer) *RarParser {
return &RarParser{streamer: s}
}
func (p *RarParser) ExtractFileRange(ctx context.Context, file *NZBFile, password string, start, end int64, writer io.Writer) error {
info, err := p.getFileInfo(ctx, file, password)
if err != nil {
return fmt.Errorf("failed to get file info: %w", err)
}
requiredSegments := p.calculateSmartSegmentRanges(file, info, start, end)
return p.extract(ctx, requiredSegments, password, info.FileName, start, end, writer)
}
func (p *RarParser) calculateSmartSegmentRanges(file *NZBFile, fileInfo *ExtractedFileInfo, start, end int64) []SegmentRange {
totalSegments := len(file.Segments)
// For store compression, be more conservative with seeking
compressionOverhead := 1.1 // Increase to 10% overhead
estimatedArchiveStart := int64(float64(start) * compressionOverhead)
estimatedArchiveEnd := int64(float64(end) * compressionOverhead)
startSegmentIndex := int(float64(estimatedArchiveStart) / float64(fileInfo.ArchiveSize) * float64(totalSegments))
endSegmentIndex := int(float64(estimatedArchiveEnd) / float64(fileInfo.ArchiveSize) * float64(totalSegments))
// More conservative buffers for seeking
if start > 0 {
// For seeking, always include more context
headerBuffer := min(10, startSegmentIndex) // Up to 10 segments back
startSegmentIndex = max(0, startSegmentIndex-headerBuffer)
} else {
startSegmentIndex = 0
}
// Larger end buffer for segment boundaries and RAR footer
endBuffer := 10 + int(float64(totalSegments)*0.02) // 2% of total segments as buffer
endSegmentIndex = min(totalSegments-1, endSegmentIndex+endBuffer)
// Ensure minimum segment count for seeking
minSegmentsForSeek := 20
if endSegmentIndex-startSegmentIndex < minSegmentsForSeek {
endSegmentIndex = min(totalSegments-1, startSegmentIndex+minSegmentsForSeek)
}
return convertSegmentIndicesToRanges(file, startSegmentIndex, endSegmentIndex)
}
func (p *RarParser) extract(ctx context.Context, segmentRanges []SegmentRange, password, targetFileName string, start, end int64, writer io.Writer) error {
pipeReader, pipeWriter := io.Pipe()
extractionErr := make(chan error, 1)
streamingErr := make(chan error, 1)
// RAR extraction goroutine
go func() {
defer func() {
pipeReader.Close()
if r := recover(); r != nil {
extractionErr <- fmt.Errorf("extraction panic: %v", r)
}
}()
rarReader, err := rardecode.NewReader(pipeReader, rardecode.Password(password))
if err != nil {
extractionErr <- fmt.Errorf("failed to create RAR reader: %w", err)
return
}
found := false
for {
select {
case <-ctx.Done():
extractionErr <- ctx.Err()
return
default:
}
header, err := rarReader.Next()
if err == io.EOF {
if !found {
extractionErr <- fmt.Errorf("target file %s not found in downloaded segments", targetFileName)
} else {
extractionErr <- fmt.Errorf("reached EOF before completing range extraction")
}
return
}
if err != nil {
extractionErr <- fmt.Errorf("failed to read RAR header: %w", err)
return
}
if header.Name == targetFileName || utils.IsMediaFile(header.Name) {
found = true
err = p.extractRangeFromReader(ctx, rarReader, start, end, writer)
extractionErr <- err
return
} else if !header.IsDir {
err = p.skipFileEfficiently(ctx, rarReader)
if err != nil && ctx.Err() == nil {
extractionErr <- fmt.Errorf("failed to skip file %s: %w", header.Name, err)
return
}
}
}
}()
// Streaming goroutine
go func() {
defer pipeWriter.Close()
err := p.streamer.stream(ctx, segmentRanges, pipeWriter)
streamingErr <- err
}()
// Wait with longer timeout for seeking operations
select {
case err := <-extractionErr:
return err
case err := <-streamingErr:
if err != nil && !p.isSkippableError(err) {
return fmt.Errorf("segment streaming failed: %w", err)
}
// Longer timeout for seeking operations
select {
case err := <-extractionErr:
return err
case <-time.After(30 * time.Second): // Increased from 5 seconds
return fmt.Errorf("extraction timeout after 30 seconds")
}
case <-ctx.Done():
return ctx.Err()
}
}
func (p *RarParser) extractRangeFromReader(ctx context.Context, reader io.Reader, start, end int64, writer io.Writer) error {
// Skip to start position efficiently
if start > 0 {
skipped, err := p.smartSkip(ctx, reader, start)
if err != nil {
return fmt.Errorf("failed to skip to position %d (skipped %d): %w", start, skipped, err)
}
}
// Copy requested range
bytesToCopy := end - start + 1
copied, err := p.smartCopy(ctx, writer, reader, bytesToCopy)
if err != nil && err != io.EOF {
return fmt.Errorf("failed to copy range (copied %d/%d): %w", copied, bytesToCopy, err)
}
return nil
}
func (p *RarParser) smartSkip(ctx context.Context, reader io.Reader, bytesToSkip int64) (int64, error) {
const skipBufferSize = 64 * 1024 // Larger buffer for skipping
buffer := make([]byte, skipBufferSize)
var totalSkipped int64
for totalSkipped < bytesToSkip {
select {
case <-ctx.Done():
return totalSkipped, ctx.Err()
default:
}
toRead := skipBufferSize
if remaining := bytesToSkip - totalSkipped; remaining < int64(toRead) {
toRead = int(remaining)
}
n, err := reader.Read(buffer[:toRead])
if n > 0 {
totalSkipped += int64(n)
}
if err != nil {
if err == io.EOF {
break
}
return totalSkipped, err
}
}
return totalSkipped, nil
}
func (p *RarParser) smartCopy(ctx context.Context, dst io.Writer, src io.Reader, bytesToCopy int64) (int64, error) {
const copyBufferSize = 32 * 1024
buffer := make([]byte, copyBufferSize)
var totalCopied int64
for totalCopied < bytesToCopy {
select {
case <-ctx.Done():
return totalCopied, ctx.Err()
default:
}
toRead := copyBufferSize
if remaining := bytesToCopy - totalCopied; remaining < int64(toRead) {
toRead = int(remaining)
}
n, err := src.Read(buffer[:toRead])
if n > 0 {
written, writeErr := dst.Write(buffer[:n])
if writeErr != nil {
return totalCopied, writeErr
}
totalCopied += int64(written)
}
if err != nil {
if err == io.EOF {
break
}
return totalCopied, err
}
}
return totalCopied, nil
}
func (p *RarParser) skipFileEfficiently(ctx context.Context, reader io.Reader) error {
_, err := p.smartSkip(ctx, reader, 1<<62) // Very large number
if err == io.EOF {
return nil // EOF is expected when skipping
}
return err
}
func (p *RarParser) getFileInfo(ctx context.Context, file *NZBFile, password string) (*ExtractedFileInfo, error) {
headerSegments := p.getMinimalHeaders(file)
var headerBuffer bytes.Buffer
err := p.streamer.stream(ctx, headerSegments, &headerBuffer)
if err != nil {
return nil, fmt.Errorf("failed to download headers: %w", err)
}
reader := bytes.NewReader(headerBuffer.Bytes())
rarReader, err := rardecode.NewReader(reader, rardecode.Password(password))
if err != nil {
return nil, fmt.Errorf("failed to create RAR reader (check password): %w", err)
}
totalArchiveSize := p.calculateTotalSize(file.SegmentSize, file.Segments)
for {
header, err := rarReader.Next()
if err == io.EOF {
break
}
if err != nil {
continue
}
if !header.IsDir && utils.IsMediaFile(header.Name) {
return &ExtractedFileInfo{
FileName: header.Name,
FileSize: header.UnPackedSize,
ArchiveSize: totalArchiveSize,
}, nil
}
}
return nil, fmt.Errorf("no media file found in RAR archive")
}
func (p *RarParser) getMinimalHeaders(file *NZBFile) []SegmentRange {
headerCount := min(len(file.Segments), 4) // Minimal for password+headers
return file.ConvertToSegmentRanges(file.Segments[:headerCount])
}
func (p *RarParser) calculateTotalSize(segmentSize int64, segments []NZBSegment) int64 {
total := int64(0)
for i, seg := range segments {
if segmentSize <= 0 {
segmentSize = seg.Bytes // Fallback to actual segment size if not set
}
if i == len(segments)-1 {
segmentSize = seg.Bytes // Last segment uses actual size
}
total += segmentSize
}
return total
}
func (p *RarParser) isSkippableError(err error) bool {
if err == nil {
return true
}
errStr := err.Error()
return strings.Contains(errStr, "client disconnected") ||
strings.Contains(errStr, "broken pipe") ||
strings.Contains(errStr, "connection reset")
}
func convertSegmentIndicesToRanges(file *NZBFile, startIndex, endIndex int) []SegmentRange {
var segmentRanges []SegmentRange
for i := startIndex; i <= endIndex && i < len(file.Segments); i++ {
segment := file.Segments[i]
// For RAR files, we want the entire segment (no partial byte ranges)
segmentRange := SegmentRange{
Segment: segment,
ByteStart: 0, // Always start at beginning of segment
ByteEnd: segment.Bytes - 1, // Always go to end of segment
TotalStart: 0, // Not used for this approach
TotalEnd: segment.Bytes - 1, // Not used for this approach
}
segmentRanges = append(segmentRanges, segmentRange)
}
return segmentRanges
}

619
pkg/usenet/store.go Normal file
View File

@@ -0,0 +1,619 @@
package usenet
import (
"context"
"encoding/json"
"fmt"
"github.com/puzpuzpuz/xsync/v4"
"github.com/rs/zerolog"
"github.com/sirrobot01/decypharr/internal/config"
"github.com/sourcegraph/conc/pool"
"io"
"io/fs"
"net/http"
"os"
"path/filepath"
"strings"
"sync"
"sync/atomic"
"time"
)
type fileInfo struct {
id string
name string
size int64
mode os.FileMode
modTime time.Time
isDir bool
}
func (fi *fileInfo) Name() string { return fi.name }
func (fi *fileInfo) Size() int64 { return fi.size }
func (fi *fileInfo) Mode() os.FileMode { return fi.mode }
func (fi *fileInfo) ModTime() time.Time { return fi.modTime }
func (fi *fileInfo) IsDir() bool { return fi.isDir }
func (fi *fileInfo) ID() string { return fi.id }
func (fi *fileInfo) Sys() interface{} { return nil }
type Store interface {
Add(nzb *NZB) error
Get(nzoID string) *NZB
GetByName(name string) *NZB
Update(nzb *NZB) error
UpdateFile(nzoID string, file *NZBFile) error
Delete(nzoID string) error
Count() int
Filter(category string, limit int, status ...string) []*NZB
GetHistory(category string, limit int) []*NZB
UpdateStatus(nzoID string, status string) error
Close() error
GetListing(folder string) []os.FileInfo
Load() error
// GetQueueItem Queue management
GetQueueItem(nzoID string) *NZB
AddToQueue(nzb *NZB)
RemoveFromQueue(nzoID string)
GetQueue() []*NZB
AtomicDelete(nzoID string) error
RemoveFile(nzoID string, filename string) error
MarkAsCompleted(nzoID string, storage string) error
}
type store struct {
storePath string
listing atomic.Value
badListing atomic.Value
queue *xsync.Map[string, *NZB]
titles *xsync.Map[string, string] // title -> nzoID
config *config.Usenet
logger zerolog.Logger
}
func NewStore(config *config.Config, logger zerolog.Logger) Store {
err := os.MkdirAll(config.NZBsPath(), 0755)
if err != nil {
return nil
}
s := &store{
storePath: config.NZBsPath(),
queue: xsync.NewMap[string, *NZB](),
titles: xsync.NewMap[string, string](),
config: config.Usenet,
logger: logger,
}
return s
}
func (ns *store) Load() error {
ids, err := ns.getAllIDs()
if err != nil {
return err
}
listing := make([]os.FileInfo, 0)
badListing := make([]os.FileInfo, 0)
for _, id := range ids {
nzb, err := ns.loadFromFile(id)
if err != nil {
continue // Skip if file cannot be loaded
}
ns.titles.Store(nzb.Name, nzb.ID)
fileInfo := &fileInfo{
id: nzb.ID,
name: nzb.Name,
size: nzb.TotalSize,
mode: 0644,
modTime: nzb.AddedOn,
isDir: true,
}
listing = append(listing, fileInfo)
if nzb.IsBad {
badListing = append(badListing, fileInfo)
}
}
ns.listing.Store(listing)
ns.badListing.Store(badListing)
return nil
}
// getFilePath returns the file path for an NZB
func (ns *store) getFilePath(nzoID string) string {
return filepath.Join(ns.storePath, nzoID+".json")
}
func (ns *store) loadFromFile(nzoID string) (*NZB, error) {
filePath := ns.getFilePath(nzoID)
data, err := os.ReadFile(filePath)
if err != nil {
return nil, err
}
var compact CompactNZB
if err := json.Unmarshal(data, &compact); err != nil {
return nil, err
}
return compact.toNZB(), nil
}
// saveToFile saves an NZB to file
func (ns *store) saveToFile(nzb *NZB) error {
filePath := ns.getFilePath(nzb.ID)
// Ensure directory exists
dir := filepath.Dir(filePath)
if err := os.MkdirAll(dir, 0755); err != nil {
return err
}
compact := nzb.toCompact()
data, err := json.Marshal(compact) // Use compact JSON
if err != nil {
return err
}
return os.WriteFile(filePath, data, 0644)
}
func (ns *store) refreshListing() error {
ids, err := ns.getAllIDs()
if err != nil {
return err
}
listing := make([]os.FileInfo, 0, len(ids))
badListing := make([]os.FileInfo, 0, len(ids))
for _, id := range ids {
nzb, err := ns.loadFromFile(id)
if err != nil {
continue // Skip if file cannot be loaded
}
fileInfo := &fileInfo{
id: nzb.ID,
name: nzb.Name,
size: nzb.TotalSize,
mode: 0644,
modTime: nzb.AddedOn,
isDir: true,
}
listing = append(listing, fileInfo)
ns.titles.Store(nzb.Name, nzb.ID)
if nzb.IsBad {
badListing = append(badListing, fileInfo)
}
}
// Update all structures atomically
ns.listing.Store(listing)
ns.badListing.Store(badListing)
// Refresh rclone if configured
go func() {
if err := ns.refreshRclone(); err != nil {
ns.logger.Error().Err(err).Msg("Failed to refresh rclone")
}
}()
return nil
}
func (ns *store) Add(nzb *NZB) error {
if nzb == nil {
return fmt.Errorf("nzb cannot be nil")
}
if err := ns.saveToFile(nzb); err != nil {
return err
}
ns.titles.Store(nzb.Name, nzb.ID)
go func() {
_ = ns.refreshListing()
}()
return nil
}
func (ns *store) GetByName(name string) *NZB {
if nzoID, exists := ns.titles.Load(name); exists {
return ns.Get(nzoID)
}
return nil
}
func (ns *store) GetQueueItem(nzoID string) *NZB {
if item, exists := ns.queue.Load(nzoID); exists {
return item
}
return nil
}
func (ns *store) AddToQueue(nzb *NZB) {
if nzb == nil {
return
}
ns.queue.Store(nzb.ID, nzb)
}
func (ns *store) RemoveFromQueue(nzoID string) {
if nzoID == "" {
return
}
ns.queue.Delete(nzoID)
}
func (ns *store) GetQueue() []*NZB {
var queueItems []*NZB
ns.queue.Range(func(_ string, value *NZB) bool {
queueItems = append(queueItems, value)
return true // continue iteration
})
return queueItems
}
func (ns *store) Get(nzoID string) *NZB {
nzb, err := ns.loadFromFile(nzoID)
if err != nil {
return nil
}
return nzb
}
func (ns *store) Update(nzb *NZB) error {
if err := ns.saveToFile(nzb); err != nil {
return err
}
return nil
}
func (ns *store) Delete(nzoID string) error {
return ns.AtomicDelete(nzoID)
}
// AtomicDelete performs an atomic delete operation across all data structures
func (ns *store) AtomicDelete(nzoID string) error {
if nzoID == "" {
return fmt.Errorf("nzoID cannot be empty")
}
filePath := ns.getFilePath(nzoID)
// Get NZB info before deletion for cleanup
nzb := ns.Get(nzoID)
if nzb == nil {
// Check if file exists on disk even if not in cache
if _, err := os.Stat(filePath); os.IsNotExist(err) {
return nil // Already deleted
}
}
ns.queue.Delete(nzoID)
if nzb != nil {
ns.titles.Delete(nzb.Name)
}
if currentListing := ns.listing.Load(); currentListing != nil {
oldListing := currentListing.([]os.FileInfo)
newListing := make([]os.FileInfo, 0, len(oldListing))
for _, fi := range oldListing {
if fileInfo, ok := fi.(*fileInfo); ok && fileInfo.id != nzoID {
newListing = append(newListing, fi)
}
}
ns.listing.Store(newListing)
}
if currentListing := ns.badListing.Load(); currentListing != nil {
oldListing := currentListing.([]os.FileInfo)
newListing := make([]os.FileInfo, 0, len(oldListing))
for _, fi := range oldListing {
if fileInfo, ok := fi.(*fileInfo); ok && fileInfo.id != nzoID {
newListing = append(newListing, fi)
}
}
ns.badListing.Store(newListing)
}
// Remove file from disk
return os.Remove(filePath)
}
func (ns *store) RemoveFile(nzoID string, filename string) error {
if nzoID == "" || filename == "" {
return fmt.Errorf("nzoID and filename cannot be empty")
}
nzb := ns.Get(nzoID)
if nzb == nil {
return fmt.Errorf("nzb with nzoID %s not found", nzoID)
}
err := nzb.MarkFileAsRemoved(filename)
if err != nil {
return err
}
if err := ns.Update(nzb); err != nil {
return fmt.Errorf("failed to update nzb after removing file %s: %w", filename, err)
}
// Refresh listing after file removal
_ = ns.refreshListing()
// Remove file from rclone cache if configured
return nil
}
func (ns *store) getAllIDs() ([]string, error) {
var ids []string
err := filepath.WalkDir(ns.storePath, func(path string, d fs.DirEntry, err error) error {
if err != nil {
return err
}
if !d.IsDir() && strings.HasSuffix(d.Name(), ".json") {
id := strings.TrimSuffix(d.Name(), ".json")
ids = append(ids, id)
}
return nil
})
return ids, err
}
func (ns *store) Filter(category string, limit int, status ...string) []*NZB {
ids, err := ns.getAllIDs()
if err != nil {
return nil
}
statusSet := make(map[string]struct{})
for _, s := range status {
statusSet[s] = struct{}{}
}
ctx, cancel := context.WithCancel(context.Background())
defer cancel()
p := pool.New().WithContext(ctx).WithMaxGoroutines(10)
var results []*NZB
var mu sync.Mutex
var found atomic.Int32
for _, id := range ids {
id := id
p.Go(func(ctx context.Context) error {
// Early exit if limit reached
if limit > 0 && found.Load() >= int32(limit) {
return nil
}
select {
case <-ctx.Done():
return ctx.Err()
default:
nzb := ns.Get(id)
if nzb == nil {
return nil
}
// Apply filters
if category != "" && nzb.Category != category {
return nil
}
if len(statusSet) > 0 {
if _, exists := statusSet[nzb.Status]; !exists {
return nil
}
}
// Add to results with limit check
mu.Lock()
if limit == 0 || len(results) < limit {
results = append(results, nzb)
found.Add(1)
// Cancel if we hit the limit
if limit > 0 && len(results) >= limit {
cancel()
}
}
mu.Unlock()
return nil
}
})
}
if err := p.Wait(); err != nil {
return nil
}
return results
}
func (ns *store) Count() int {
ids, err := ns.getAllIDs()
if err != nil {
return 0
}
return len(ids)
}
func (ns *store) GetHistory(category string, limit int) []*NZB {
return ns.Filter(category, limit, "completed", "failed", "error")
}
func (ns *store) UpdateStatus(nzoID string, status string) error {
nzb := ns.Get(nzoID)
if nzb == nil {
return fmt.Errorf("nzb with nzoID %s not found", nzoID)
}
nzb.Status = status
nzb.LastActivity = time.Now()
if status == "completed" {
nzb.CompletedOn = time.Now()
nzb.Progress = 100
nzb.Percentage = 100
}
if status == "failed" {
// Remove from cache if failed
err := ns.Delete(nzb.ID)
if err != nil {
return err
}
}
return ns.Update(nzb)
}
func (ns *store) Close() error {
// Clear cache
ns.queue = xsync.NewMap[string, *NZB]()
// Clear listings
ns.listing = atomic.Value{}
ns.badListing = atomic.Value{}
// Clear titles
ns.titles = xsync.NewMap[string, string]()
return nil
}
func (ns *store) UpdateFile(nzoID string, file *NZBFile) error {
if nzoID == "" || file == nil {
return fmt.Errorf("nzoID and file cannot be empty")
}
nzb := ns.Get(nzoID)
if nzb == nil {
return fmt.Errorf("nzb with nzoID %s not found", nzoID)
}
// Update file in NZB
for i, f := range nzb.Files {
if f.Name == file.Name {
nzb.Files[i] = *file
break
}
}
if err := ns.Update(nzb); err != nil {
return fmt.Errorf("failed to update nzb after updating file %s: %w", file.Name, err)
}
// Refresh listing after file update
return ns.refreshListing()
}
func (ns *store) GetListing(folder string) []os.FileInfo {
switch folder {
case "__bad__":
if badListing, ok := ns.badListing.Load().([]os.FileInfo); ok {
return badListing
}
return []os.FileInfo{}
default:
if listing, ok := ns.listing.Load().([]os.FileInfo); ok {
return listing
}
return []os.FileInfo{}
}
}
func (ns *store) MarkAsCompleted(nzoID string, storage string) error {
if nzoID == "" {
return fmt.Errorf("nzoID cannot be empty")
}
// Get NZB from queue
queueNZB := ns.GetQueueItem(nzoID)
if queueNZB == nil {
return fmt.Errorf("NZB %s not found in queue", nzoID)
}
// Update NZB status
queueNZB.Status = "completed"
queueNZB.Storage = storage
queueNZB.CompletedOn = time.Now()
queueNZB.LastActivity = time.Now()
queueNZB.Progress = 100
queueNZB.Percentage = 100
// Atomically: remove from queue and add to storage
ns.queue.Delete(nzoID)
if err := ns.Add(queueNZB); err != nil {
// Rollback: add back to queue if storage fails
ns.queue.Store(nzoID, queueNZB)
return fmt.Errorf("failed to store completed NZB: %w", err)
}
return nil
}
func (ns *store) refreshRclone() error {
if ns.config.RcUrl == "" {
return nil
}
client := http.DefaultClient
// Create form data
data := ns.buildRcloneRequestData()
if err := ns.sendRcloneRequest(client, "vfs/forget", data); err != nil {
ns.logger.Error().Err(err).Msg("Failed to send rclone vfs/forget request")
}
if err := ns.sendRcloneRequest(client, "vfs/refresh", data); err != nil {
ns.logger.Error().Err(err).Msg("Failed to send rclone vfs/refresh request")
}
return nil
}
func (ns *store) buildRcloneRequestData() string {
return "dir=__all__"
}
func (ns *store) sendRcloneRequest(client *http.Client, endpoint, data string) error {
req, err := http.NewRequest("POST", fmt.Sprintf("%s/%s", ns.config.RcUrl, endpoint), strings.NewReader(data))
if err != nil {
return err
}
req.Header.Set("Content-Type", "application/x-www-form-urlencoded")
if ns.config.RcUser != "" && ns.config.RcPass != "" {
req.SetBasicAuth(ns.config.RcUser, ns.config.RcPass)
}
resp, err := client.Do(req)
if err != nil {
return err
}
defer func(Body io.ReadCloser) {
err := Body.Close()
if err != nil {
ns.logger.Error().Err(err).Msg("Failed to close response body")
}
}(resp.Body)
if resp.StatusCode != 200 {
body, _ := io.ReadAll(io.LimitReader(resp.Body, 1024))
return fmt.Errorf("failed to perform %s: %s - %s", endpoint, resp.Status, string(body))
}
_, _ = io.Copy(io.Discard, resp.Body)
return nil
}

383
pkg/usenet/stream.go Normal file
View File

@@ -0,0 +1,383 @@
package usenet
import (
"bytes"
"context"
"errors"
"fmt"
"github.com/chrisfarms/yenc"
"github.com/rs/zerolog"
"github.com/sirrobot01/decypharr/internal/nntp"
"io"
"net/http"
"sync"
"time"
)
var groupCache = sync.Map{}
type Streamer struct {
logger zerolog.Logger
client *nntp.Client
store Store
cache *SegmentCache
chunkSize int
maxRetries int
retryDelayMs int
}
type segmentResult struct {
index int
data []byte
err error
}
type FlushingWriter struct {
writer io.Writer
}
func (fw *FlushingWriter) Write(data []byte) (int, error) {
if len(data) == 0 {
return 0, nil
}
written, err := fw.writer.Write(data)
if err != nil {
return written, err
}
if written != len(data) {
return written, io.ErrShortWrite
}
// Auto-flush if possible
if flusher, ok := fw.writer.(http.Flusher); ok {
flusher.Flush()
}
return written, nil
}
func (fw *FlushingWriter) WriteAndFlush(data []byte) (int64, error) {
if len(data) == 0 {
return 0, nil
}
written, err := fw.Write(data)
return int64(written), err
}
func (fw *FlushingWriter) WriteString(s string) (int, error) {
return fw.Write([]byte(s))
}
func (fw *FlushingWriter) WriteBytes(data []byte) (int, error) {
return fw.Write(data)
}
func NewStreamer(client *nntp.Client, cache *SegmentCache, store Store, chunkSize int, logger zerolog.Logger) *Streamer {
return &Streamer{
logger: logger.With().Str("component", "streamer").Logger(),
cache: cache,
store: store,
client: client,
chunkSize: chunkSize,
maxRetries: 3,
retryDelayMs: 2000,
}
}
func (s *Streamer) Stream(ctx context.Context, file *NZBFile, start, end int64, writer io.Writer) error {
if file == nil {
return fmt.Errorf("file cannot be nil")
}
if start < 0 {
start = 0
}
if err := s.getSegmentSize(ctx, file); err != nil {
return fmt.Errorf("failed to get segment size: %w", err)
}
if file.IsRarArchive {
return s.streamRarExtracted(ctx, file, start, end, writer)
}
if end >= file.Size {
end = file.Size - 1
}
if start > end {
return fmt.Errorf("invalid range: start=%d > end=%d", start, end)
}
ranges := file.GetSegmentsInRange(file.SegmentSize, start, end)
if len(ranges) == 0 {
return fmt.Errorf("no segments found for range [%d, %d]", start, end)
}
writer = &FlushingWriter{writer: writer}
return s.stream(ctx, ranges, writer)
}
func (s *Streamer) streamRarExtracted(ctx context.Context, file *NZBFile, start, end int64, writer io.Writer) error {
parser := NewRarParser(s)
return parser.ExtractFileRange(ctx, file, file.Password, start, end, writer)
}
func (s *Streamer) stream(ctx context.Context, ranges []SegmentRange, writer io.Writer) error {
chunkSize := s.chunkSize
for i := 0; i < len(ranges); i += chunkSize {
end := min(i+chunkSize, len(ranges))
chunk := ranges[i:end]
// Download chunk concurrently
results := make([]segmentResult, len(chunk))
var wg sync.WaitGroup
for j, segRange := range chunk {
wg.Add(1)
go func(idx int, sr SegmentRange) {
defer wg.Done()
data, err := s.processSegment(ctx, sr)
results[idx] = segmentResult{index: idx, data: data, err: err}
}(j, segRange)
}
wg.Wait()
// Write chunk sequentially
for j, result := range results {
if result.err != nil {
return fmt.Errorf("segment %d failed: %w", i+j, result.err)
}
if len(result.data) > 0 {
_, err := writer.Write(result.data)
if err != nil {
return err
}
}
}
}
return nil
}
func (s *Streamer) processSegment(ctx context.Context, segRange SegmentRange) ([]byte, error) {
segment := segRange.Segment
// Try cache first
if s.cache != nil {
if cached, found := s.cache.Get(segment.MessageID); found {
return s.extractRangeFromSegment(cached.Data, segRange)
}
}
// Download with retries
decodedData, err := s.downloadSegmentWithRetry(ctx, segment)
if err != nil {
return nil, fmt.Errorf("download failed: %w", err)
}
// Cache full segment for future seeks
if s.cache != nil {
s.cache.Put(segment.MessageID, decodedData, segment.Bytes)
}
// Extract the specific range from this segment
return s.extractRangeFromSegment(decodedData.Body, segRange)
}
func (s *Streamer) extractRangeFromSegment(data []byte, segRange SegmentRange) ([]byte, error) {
// Use the segment range's pre-calculated offsets
startOffset := segRange.ByteStart
endOffset := segRange.ByteEnd + 1 // ByteEnd is inclusive, we need exclusive for slicing
// Bounds check
if startOffset < 0 || startOffset >= int64(len(data)) {
return []byte{}, nil
}
if endOffset > int64(len(data)) {
endOffset = int64(len(data))
}
if startOffset >= endOffset {
return []byte{}, nil
}
// Extract the range
result := make([]byte, endOffset-startOffset)
copy(result, data[startOffset:endOffset])
return result, nil
}
func (s *Streamer) downloadSegmentWithRetry(ctx context.Context, segment NZBSegment) (*yenc.Part, error) {
var lastErr error
for attempt := 0; attempt < s.maxRetries; attempt++ {
// Check cancellation before each retry
select {
case <-ctx.Done():
return nil, ctx.Err()
default:
}
if attempt > 0 {
delay := time.Duration(s.retryDelayMs*(1<<(attempt-1))) * time.Millisecond
if delay > 5*time.Second {
delay = 5 * time.Second
}
select {
case <-ctx.Done():
return nil, ctx.Err()
case <-time.After(delay):
}
}
data, err := s.downloadSegment(ctx, segment)
if err == nil {
return data, nil
}
lastErr = err
if errors.Is(err, context.Canceled) || errors.Is(err, context.DeadlineExceeded) {
return nil, err
}
}
return nil, fmt.Errorf("segment download failed after %d attempts: %w", s.maxRetries, lastErr)
}
// Updated to work with NZBSegment from SegmentRange
func (s *Streamer) downloadSegment(ctx context.Context, segment NZBSegment) (*yenc.Part, error) {
select {
case <-ctx.Done():
return nil, ctx.Err()
default:
}
downloadCtx, cancel := context.WithTimeout(ctx, 5*time.Second)
defer cancel()
conn, cleanup, err := s.client.GetConnection(downloadCtx)
if err != nil {
return nil, err
}
defer cleanup()
select {
case <-ctx.Done():
return nil, ctx.Err()
default:
}
if segment.Group != "" {
if _, exists := groupCache.Load(segment.Group); !exists {
if _, err := conn.SelectGroup(segment.Group); err != nil {
return nil, fmt.Errorf("failed to select group %s: %w", segment.Group, err)
}
groupCache.Store(segment.Group, true)
}
}
body, err := conn.GetBody(segment.MessageID)
if err != nil {
return nil, fmt.Errorf("failed to get body for message %s: %w", segment.MessageID, err)
}
if body == nil || len(body) == 0 {
return nil, fmt.Errorf("no body found for message %s", segment.MessageID)
}
data, err := nntp.DecodeYenc(bytes.NewReader(body))
if err != nil {
return nil, fmt.Errorf("failed to decode yEnc: %w", err)
}
// Adjust begin offset
data.Begin -= 1
return data, nil
}
func (s *Streamer) copySegmentData(writer io.Writer, data []byte) (int64, error) {
if len(data) == 0 {
return 0, nil
}
reader := bytes.NewReader(data)
written, err := io.CopyN(writer, reader, int64(len(data)))
if err != nil {
return 0, fmt.Errorf("copyN failed %w", err)
}
if written != int64(len(data)) {
return 0, fmt.Errorf("expected to copy %d bytes, only copied %d", len(data), written)
}
if fl, ok := writer.(http.Flusher); ok {
fl.Flush()
}
return written, nil
}
func (s *Streamer) extractRangeWithGapHandling(data []byte, segStart, segEnd int64, globalStart, globalEnd int64) ([]byte, error) {
// Calculate intersection using actual bounds
intersectionStart := max(segStart, globalStart)
intersectionEnd := min(segEnd, globalEnd+1) // +1 because globalEnd is inclusive
// No overlap
if intersectionStart >= intersectionEnd {
return []byte{}, nil
}
// Calculate offsets within the actual data
offsetInData := intersectionStart - segStart
dataLength := intersectionEnd - intersectionStart
// Bounds check
if offsetInData < 0 || offsetInData >= int64(len(data)) {
return []byte{}, nil
}
endOffset := offsetInData + dataLength
if endOffset > int64(len(data)) {
endOffset = int64(len(data))
dataLength = endOffset - offsetInData
}
if dataLength <= 0 {
return []byte{}, nil
}
// Extract the range
result := make([]byte, dataLength)
copy(result, data[offsetInData:endOffset])
return result, nil
}
func (s *Streamer) getSegmentSize(ctx context.Context, file *NZBFile) error {
if file.SegmentSize > 0 {
return nil
}
if len(file.Segments) == 0 {
return fmt.Errorf("no segments available for file %s", file.Name)
}
// Fetch the segment size and then store it in the file
firstSegment := file.Segments[0]
firstInfo, err := s.client.DownloadHeader(ctx, firstSegment.MessageID)
if err != nil {
return err
}
chunkSize := firstInfo.End - (firstInfo.Begin - 1)
if chunkSize <= 0 {
return fmt.Errorf("invalid segment size for file %s: %d", file.Name, chunkSize)
}
file.SegmentSize = chunkSize
return s.store.UpdateFile(file.NzbID, file)
}

239
pkg/usenet/types.go Normal file
View File

@@ -0,0 +1,239 @@
package usenet
import "time"
// NZB represents a torrent-like structure for NZB files
type NZB struct {
ID string `json:"id"`
Name string `json:"name"`
Title string `json:"title,omitempty"`
TotalSize int64 `json:"total_size"`
DatePosted time.Time `json:"date_posted"`
Category string `json:"category"`
Groups []string `json:"groups"`
Files []NZBFile `json:"files"`
Downloaded bool `json:"downloaded"` // Whether the NZB has been downloaded
StreamingInfo StreamingInfo `json:"streaming_info"`
AddedOn time.Time `json:"added_on"` // When the NZB was added to the system
LastActivity time.Time `json:"last_activity"` // Last activity timestamp
Status string `json:"status"` // "queued", "downloading", "completed", "failed"
Progress float64 `json:"progress"` // Percentage of download completion
Percentage float64 `json:"percentage"` // Percentage of download completion
SizeDownloaded int64 `json:"size_downloaded"` // Total size downloaded so far
ETA int64 `json:"eta"` // Estimated time of arrival in seconds
Speed int64 `json:"speed"` // Download speed in bytes per second
CompletedOn time.Time `json:"completed_on"` // When the NZB was completed
IsBad bool `json:"is_bad"`
Storage string `json:"storage"`
FailMessage string `json:"fail_message,omitempty"` // Error message if the download failed
Password string `json:"-,omitempty"` // Password for encrypted RAR files
}
// StreamingInfo contains metadata for streaming capabilities
type StreamingInfo struct {
IsStreamable bool `json:"is_streamable"`
MainFileIndex int `json:"main_file_index"` // Index of the main media file
HasParFiles bool `json:"has_par_files"`
HasRarFiles bool `json:"has_rar_files"`
TotalSegments int `json:"total_segments"`
EstimatedTime int64 `json:"estimated_time"` // Estimated download time in seconds
}
type SegmentValidationInfo struct {
ExpectedSize int64
ActualSize int64
Validated bool
}
// NZBFile represents a grouped file with its segments
type NZBFile struct {
NzbID string `json:"nzo_id"`
Name string `json:"name"`
Size int64 `json:"size"`
StartOffset int64 `json:"start_offset"` // This is useful for removing rar headers
Segments []NZBSegment `json:"segments"`
Groups []string `json:"groups"`
SegmentValidation map[string]*SegmentValidationInfo `json:"-"`
IsRarArchive bool `json:"is_rar_archive"` // Whether this file is a RAR archive that needs extraction
Password string `json:"password,omitempty"` // Password for encrypted RAR files
IsDeleted bool `json:"is_deleted"`
SegmentSize int64 `json:"segment_size,omitempty"` // Size of each segment in bytes, if applicable
}
// NZBSegment represents a segment with all necessary download info
type NZBSegment struct {
Number int `json:"number"`
MessageID string `json:"message_id"`
Bytes int64 `json:"bytes"`
StartOffset int64 `json:"start_offset"` // Byte offset within the file
EndOffset int64 `json:"end_offset"` // End byte offset within the file
Group string `json:"group"`
}
// CompactNZB is a space-optimized version of NZB for storage
type CompactNZB struct {
ID string `json:"i"`
Name string `json:"n"`
Status string `json:"s"`
Category string `json:"c"`
Size int64 `json:"sz"`
Progress float64 `json:"p"`
Speed int64 `json:"sp,omitempty"`
ETA int64 `json:"e,omitempty"`
Added int64 `json:"a"` // Unix timestamp
Modified int64 `json:"m"` // Unix timestamp
Complete int64 `json:"co,omitempty"` // Unix timestamp
Groups []string `json:"g,omitempty"`
Files []CompactFile `json:"f,omitempty"`
Storage string `json:"st,omitempty"` // Storage path
FailMessage string `json:"fm,omitempty"` // Error message if the download failed
Downloaded bool `json:"d,omitempty"`
}
// CompactFile represents a file in compact format
type CompactFile struct {
Name string `json:"n"`
Size int64 `json:"s"`
Type string `json:"t"`
Main bool `json:"m,omitempty"`
Offset int64 `json:"o"`
Segments []CompactSegment `json:"seg,omitempty"`
IsRar bool `json:"r,omitempty"`
Password string `json:"p,omitempty"`
IsDeleted bool `json:"del,omitempty"` // Whether the file is marked as deleted
ExtractedFileInfo *ExtractedFileInfo `json:"efi,omitempty"` // Pre-extracted RAR file info
SegmentSize int64 `json:"ss,omitempty"` // Size of each segment in bytes, if applicable
}
// CompactSegment represents a segment in compact format
type CompactSegment struct {
Number int `json:"n"` // Segment number
MessageID string `json:"mid"` // Message-ID of the segment
Bytes int64 `json:"b"` // Size in bytes
StartOffset int64 `json:"so"` // Start byte offset within the file
EndOffset int64 `json:"eo"` // End byte offset within the file
Group string `json:"g,omitempty"` // Group associated with this segment
}
type ExtractedFileInfo struct {
FileName string `json:"fn,omitempty"`
FileSize int64 `json:"fs,omitempty"`
ArchiveSize int64 `json:"as,omitempty"` // Total size of the RAR archive
EstimatedStartOffset int64 `json:"eso,omitempty"` // Estimated start offset in the archive
SegmentSize int64 `json:"ss,omitempty"` // Size of each segment in the archive
}
// toCompact converts NZB to compact format
func (nzb *NZB) toCompact() *CompactNZB {
compact := &CompactNZB{
ID: nzb.ID,
Name: nzb.Name,
Status: nzb.Status,
Category: nzb.Category,
Size: nzb.TotalSize,
Progress: nzb.Progress,
Speed: nzb.Speed,
ETA: nzb.ETA,
Added: nzb.AddedOn.Unix(),
Modified: nzb.LastActivity.Unix(),
Storage: nzb.Storage,
Downloaded: nzb.Downloaded,
FailMessage: nzb.FailMessage,
}
if !nzb.CompletedOn.IsZero() {
compact.Complete = nzb.CompletedOn.Unix()
}
// Only store essential groups (first 3)
if len(nzb.Groups) > 0 {
maxGroups := 3
if len(nzb.Groups) < maxGroups {
maxGroups = len(nzb.Groups)
}
compact.Groups = nzb.Groups[:maxGroups]
}
// Store only essential file info
if len(nzb.Files) > 0 {
compact.Files = make([]CompactFile, len(nzb.Files))
for i, file := range nzb.Files {
compact.Files[i] = file.toCompact()
}
}
return compact
}
// fromCompact converts compact format back to NZB
func (compact *CompactNZB) toNZB() *NZB {
nzb := &NZB{
ID: compact.ID,
Name: compact.Name,
Status: compact.Status,
Category: compact.Category,
TotalSize: compact.Size,
Progress: compact.Progress,
Percentage: compact.Progress,
Speed: compact.Speed,
ETA: compact.ETA,
Groups: compact.Groups,
AddedOn: time.Unix(compact.Added, 0),
LastActivity: time.Unix(compact.Modified, 0),
Storage: compact.Storage,
Downloaded: compact.Downloaded,
FailMessage: compact.FailMessage,
StreamingInfo: StreamingInfo{
MainFileIndex: -1,
},
}
if compact.Complete > 0 {
nzb.CompletedOn = time.Unix(compact.Complete, 0)
}
// Reconstruct files
if len(compact.Files) > 0 {
nzb.Files = make([]NZBFile, len(compact.Files))
for i, file := range compact.Files {
nzb.Files[i] = file.toNZB()
}
// Set streaming info
nzb.StreamingInfo.TotalSegments = len(compact.Files)
nzb.StreamingInfo.IsStreamable = nzb.StreamingInfo.MainFileIndex >= 0
}
return nzb
}
func (nf *NZBFile) toCompact() CompactFile {
compact := CompactFile{
Name: nf.Name,
Size: nf.Size,
Offset: nf.StartOffset,
IsRar: nf.IsRarArchive,
IsDeleted: nf.IsDeleted,
Password: nf.Password,
SegmentSize: nf.SegmentSize,
}
for _, seg := range nf.Segments {
compact.Segments = append(compact.Segments, CompactSegment(seg))
}
return compact
}
func (compact *CompactFile) toNZB() NZBFile {
f := NZBFile{
Name: compact.Name,
Size: compact.Size,
StartOffset: compact.Offset,
IsRarArchive: compact.IsRar,
Password: compact.Password,
IsDeleted: compact.IsDeleted,
SegmentSize: compact.SegmentSize,
}
for _, seg := range compact.Segments {
f.Segments = append(f.Segments, NZBSegment(seg))
}
return f
}

180
pkg/usenet/usenet.go Normal file
View File

@@ -0,0 +1,180 @@
package usenet
import (
"context"
"fmt"
"github.com/rs/zerolog"
"github.com/sirrobot01/decypharr/internal/config"
"github.com/sirrobot01/decypharr/internal/logger"
"github.com/sirrobot01/decypharr/internal/nntp"
"io"
"os"
)
// Usenet interface for usenet operations
type Usenet interface {
Start(ctx context.Context) error
IsReady() chan struct{}
ProcessNZB(ctx context.Context, req *ProcessRequest) (*NZB, error)
GetDownloadByteRange(nzoID string, filename string) (int64, int64, error)
Close()
Logger() zerolog.Logger
Stream(ctx context.Context, nzbID string, filename string, start, end int64, writer io.Writer) error
Store() Store
Client() *nntp.Client
}
// Client implements UsenetClient
type usenet struct {
client *nntp.Client
store Store
processor *Processor
parser *NZBParser
streamer *Streamer
cache *SegmentCache
logger zerolog.Logger
ready chan struct{}
}
// New creates a new usenet client
func New() Usenet {
cfg := config.Get()
usenetConfig := cfg.Usenet
if usenetConfig == nil || len(usenetConfig.Providers) == 0 {
// No usenet providers configured, return nil
return nil
}
_logger := logger.New("usenet")
client, err := nntp.NewClient(usenetConfig.Providers)
if err != nil {
_logger.Error().Err(err).Msg("Failed to create usenet client")
return nil
}
store := NewStore(cfg, _logger)
processor, err := NewProcessor(usenetConfig, _logger, store, client)
if err != nil {
_logger.Error().Err(err).Msg("Failed to create usenet processor")
return nil
}
// Create cache and components
cache := NewSegmentCache(_logger)
parser := NewNZBParser(client, cache, _logger)
streamer := NewStreamer(client, cache, store, usenetConfig.Chunks, _logger)
return &usenet{
store: store,
client: client,
processor: processor,
parser: parser,
streamer: streamer,
cache: cache,
logger: _logger,
ready: make(chan struct{}),
}
}
func (c *usenet) Start(ctx context.Context) error {
// Init the client
if err := c.client.InitPools(); err != nil {
c.logger.Error().Err(err).Msg("Failed to initialize usenet client pools")
return fmt.Errorf("failed to initialize usenet client pools: %w", err)
}
// Initialize the store
if err := c.store.Load(); err != nil {
c.logger.Error().Err(err).Msg("Failed to initialize usenet store")
return fmt.Errorf("failed to initialize usenet store: %w", err)
}
close(c.ready)
c.logger.Info().Msg("Usenet client initialized")
return nil
}
func (c *usenet) IsReady() chan struct{} {
return c.ready
}
func (c *usenet) Store() Store {
return c.store
}
func (c *usenet) Client() *nntp.Client {
return c.client
}
func (c *usenet) Logger() zerolog.Logger {
return c.logger
}
func (c *usenet) ProcessNZB(ctx context.Context, req *ProcessRequest) (*NZB, error) {
return c.processor.Process(ctx, req)
}
// GetNZB retrieves an NZB by ID
func (c *usenet) GetNZB(nzoID string) *NZB {
return c.store.Get(nzoID)
}
// DeleteNZB deletes an NZB
func (c *usenet) DeleteNZB(nzoID string) error {
return c.store.Delete(nzoID)
}
// PauseNZB pauses an NZB download
func (c *usenet) PauseNZB(nzoID string) error {
return c.store.UpdateStatus(nzoID, "paused")
}
// ResumeNZB resumes an NZB download
func (c *usenet) ResumeNZB(nzoID string) error {
return c.store.UpdateStatus(nzoID, "downloading")
}
func (c *usenet) Close() {
if c.store != nil {
if err := c.store.Close(); err != nil {
c.logger.Error().Err(err).Msg("Failed to close store")
}
}
c.logger.Info().Msg("Usenet client closed")
}
// GetListing returns the file listing of the NZB directory
func (c *usenet) GetListing(folder string) []os.FileInfo {
return c.store.GetListing(folder)
}
func (c *usenet) GetDownloadByteRange(nzoID string, filename string) (int64, int64, error) {
return int64(0), int64(0), nil
}
func (c *usenet) RemoveNZB(nzoID string) error {
if err := c.store.Delete(nzoID); err != nil {
return fmt.Errorf("failed to delete NZB %s: %w", nzoID, err)
}
c.logger.Info().Msgf("NZB %s deleted successfully", nzoID)
return nil
}
// Stream streams a file using the new simplified streaming system
func (c *usenet) Stream(ctx context.Context, nzbID string, filename string, start, end int64, writer io.Writer) error {
// Get NZB from store
nzb := c.GetNZB(nzbID)
if nzb == nil {
return fmt.Errorf("NZB %s not found", nzbID)
}
// Get file
file := nzb.GetFileByName(filename)
if file == nil {
return fmt.Errorf("file %s not found in NZB %s", filename, nzbID)
}
if file.NzbID == "" {
file.NzbID = nzbID // Ensure NZB ID is set for the file
}
// Stream using the new streamer
return c.streamer.Stream(ctx, file, start, end, writer)
}