From fbd6cd503829bc23665db1dde006df059fc11b1e Mon Sep 17 00:00:00 2001 From: Elias Benbourenane Date: Tue, 27 May 2025 19:10:23 -0400 Subject: [PATCH 01/26] Random access for RARed RealDebrid torrents (#61) * feat: AI translated port of RARAR.py in Go * feat: Extract and cache byte ranges of RARed RD torrents * feat: Stream and download files with byte ranges if specified * refactor: Use a more structured data format for byte ranges * fix: Rework streaming to fix error handling * perf: More efficient RAR file pre-processing * feat: Made the RAR unpacker an optional config option * refactor: Remove unnecessary Rar prefix for more idiomatic code * refactor: More appropriate private method declaration * feat: Error handling for parsing RARed torrents with retry requests and EOF validation * fix: Correctly parse unicode file names * fix: Handle special character conversion for RAR torrent file names * refactor: Removed debug logs * feat: Only allow two concurrent RAR unpacking tasks * fix: Include "<" and ">" as unsafe chars for RAR unpacking * refactor: Seperate types into their own file * refactor: Don't read RAR files on reader initialization --- internal/config/config.go | 1 + pkg/debrid/debrid/download_link.go | 15 +- pkg/debrid/realdebrid/realdebrid.go | 160 ++++++- pkg/debrid/types/torrent.go | 9 +- pkg/qbit/downloader.go | 29 +- pkg/rar/rarar.go | 701 ++++++++++++++++++++++++++++ pkg/rar/types.go | 37 ++ pkg/web/templates/config.html | 10 +- pkg/webdav/file.go | 154 +++--- 9 files changed, 1018 insertions(+), 98 deletions(-) create mode 100644 pkg/rar/rarar.go create mode 100644 pkg/rar/types.go diff --git a/internal/config/config.go b/internal/config/config.go index 1a0103c..d80093e 100644 --- a/internal/config/config.go +++ b/internal/config/config.go @@ -27,6 +27,7 @@ type Debrid struct { CheckCached bool `json:"check_cached,omitempty"` RateLimit string `json:"rate_limit,omitempty"` // 200/minute or 10/second Proxy string `json:"proxy,omitempty"` + UnpackRar bool `json:"unpack_rar,omitempty"` AddSamples bool `json:"add_samples,omitempty"` UseWebDav bool `json:"use_webdav,omitempty"` diff --git a/pkg/debrid/debrid/download_link.go b/pkg/debrid/debrid/download_link.go index b7df841..9a6b152 100644 --- a/pkg/debrid/debrid/download_link.go +++ b/pkg/debrid/debrid/download_link.go @@ -3,10 +3,12 @@ package debrid import ( "errors" "fmt" - "github.com/sirrobot01/decypharr/internal/request" - "github.com/sirrobot01/decypharr/pkg/debrid/types" + "sync" "time" + + "github.com/sirrobot01/decypharr/internal/request" + "github.com/sirrobot01/decypharr/pkg/debrid/types" ) type linkCache struct { @@ -234,3 +236,12 @@ func (c *Cache) IsDownloadLinkInvalid(downloadLink string) bool { } return false } + +func (c *Cache) GetDownloadByteRange(torrentName, filename string) (*[2]int64, error) { + ct := c.GetTorrentByName(torrentName) + if ct == nil { + return nil, fmt.Errorf("torrent not found") + } + file := ct.Files[filename] + return file.ByteRange, nil +} diff --git a/pkg/debrid/realdebrid/realdebrid.go b/pkg/debrid/realdebrid/realdebrid.go index c38d1e8..da1825e 100644 --- a/pkg/debrid/realdebrid/realdebrid.go +++ b/pkg/debrid/realdebrid/realdebrid.go @@ -5,12 +5,6 @@ import ( "encoding/json" "errors" "fmt" - "github.com/rs/zerolog" - "github.com/sirrobot01/decypharr/internal/config" - "github.com/sirrobot01/decypharr/internal/logger" - "github.com/sirrobot01/decypharr/internal/request" - "github.com/sirrobot01/decypharr/internal/utils" - "github.com/sirrobot01/decypharr/pkg/debrid/types" "io" "net/http" gourl "net/url" @@ -20,6 +14,15 @@ import ( "strings" "sync" "time" + + "github.com/rs/zerolog" + "github.com/sirrobot01/decypharr/internal/config" + "github.com/sirrobot01/decypharr/internal/logger" + "github.com/sirrobot01/decypharr/internal/request" + "github.com/sirrobot01/decypharr/internal/utils" + "github.com/sirrobot01/decypharr/pkg/debrid/types" + + "github.com/sirrobot01/decypharr/pkg/rar" ) type RealDebrid struct { @@ -35,10 +38,13 @@ type RealDebrid struct { client *request.Client downloadClient *request.Client - MountPath string - logger zerolog.Logger - checkCached bool - addSamples bool + MountPath string + logger zerolog.Logger + UnpackRar bool + + rarSemaphore chan struct{} + checkCached bool + addSamples bool } func New(dc config.Debrid) *RealDebrid { @@ -70,6 +76,7 @@ func New(dc config.Debrid) *RealDebrid { APIKey: dc.APIKey, accounts: accounts, DownloadUncached: dc.DownloadUncached, + UnpackRar: dc.UnpackRar, client: request.New( request.WithHeaders(headers), request.WithRateLimiter(rl), @@ -88,6 +95,7 @@ func New(dc config.Debrid) *RealDebrid { currentDownloadKey: currentDownloadKey, MountPath: dc.Folder, logger: logger.New(dc.Name), + rarSemaphore: make(chan struct{}, 2), checkCached: dc.CheckCached, addSamples: dc.AddSamples, } @@ -101,30 +109,127 @@ func (r *RealDebrid) GetLogger() zerolog.Logger { return r.logger } -func getSelectedFiles(t *types.Torrent, data torrentInfo) map[string]types.File { +func (r *RealDebrid) getSelectedFiles(t *types.Torrent, data torrentInfo) (map[string]types.File, error) { + files := make(map[string]types.File) selectedFiles := make([]types.File, 0) + for _, f := range data.Files { if f.Selected == 1 { - name := filepath.Base(f.Path) - file := types.File{ + selectedFiles = append(selectedFiles, types.File{ TorrentId: t.Id, - Name: name, - Path: name, + Name: filepath.Base(f.Path), + Path: filepath.Base(f.Path), Size: f.Bytes, Id: strconv.Itoa(f.ID), - } - selectedFiles = append(selectedFiles, file) + }) } } + + if len(selectedFiles) == 0 { + return files, nil + } + + // Handle RARed torrents (single link, multiple files) + if len(data.Links) == 1 && len(selectedFiles) > 1 { + return r.handleRarArchive(t, data, selectedFiles) + } + + // Standard case - map files to links + if len(selectedFiles) > len(data.Links) { + r.logger.Warn().Msgf("More files than links available: %d files, %d links for %s", len(selectedFiles), len(data.Links), t.Name) + } + + for i, f := range selectedFiles { + if i < len(data.Links) { + f.Link = data.Links[i] + files[f.Name] = f + } else { + r.logger.Warn().Str("file", f.Name).Msg("No link available for file") + } + } + + return files, nil +} + +// handleRarArchive processes RAR archives with multiple files +func (r *RealDebrid) handleRarArchive(t *types.Torrent, data torrentInfo, selectedFiles []types.File) (map[string]types.File, error) { + // This will block if 2 RAR operations are already in progress + r.rarSemaphore <- struct{}{} + defer func() { + <-r.rarSemaphore + }() + files := make(map[string]types.File) - for index, f := range selectedFiles { - if index >= len(data.Links) { - break + + if !r.UnpackRar { + r.logger.Debug().Msgf("RAR file detected, but unpacking is disabled: %s", t.Name) + // Create a single file representing the RAR archive + file := types.File{ + TorrentId: t.Id, + Id: "0", + Name: t.Name + ".rar", + Size: 0, + IsRar: true, + ByteRange: nil, + Path: t.Name + ".rar", + Link: data.Links[0], + AccountId: selectedFiles[0].AccountId, + Generated: time.Now(), } - f.Link = data.Links[index] - files[f.Name] = f + files[file.Name] = file + return files, nil } - return files + + r.logger.Info().Msgf("RAR file detected, unpacking: %s", t.Name) + linkFile := &types.File{TorrentId: t.Id, Link: data.Links[0]} + downloadLinkObj, err := r.GetDownloadLink(t, linkFile) + + if err != nil { + return nil, fmt.Errorf("failed to get download link for RAR file: %w", err) + } + + dlLink := downloadLinkObj.DownloadLink + reader, err := rar.NewReader(dlLink) + + if err != nil { + return nil, fmt.Errorf("failed to create RAR reader: %w", err) + } + + rarFiles, err := reader.GetFiles() + + if err != nil { + return nil, fmt.Errorf("failed to read RAR files: %w", err) + } + + // Create lookup map for faster matching + fileMap := make(map[string]*types.File) + for i := range selectedFiles { + // RD converts special chars to '_' for RAR file paths + // TOOD: there might be more special chars to replace + safeName := strings.NewReplacer("|", "_", "\"", "_", "\\", "_", "?", "_", "*", "_", ":", "_", "<", "_", ">", "_").Replace(selectedFiles[i].Name) + fileMap[safeName] = &selectedFiles[i] + } + + for _, rarFile := range rarFiles { + if file, exists := fileMap[rarFile.Name()]; exists { + file.IsRar = true + file.ByteRange = rarFile.ByteRange() + file.Link = data.Links[0] + file.DownloadLink = &types.DownloadLink{ + Link: data.Links[0], + DownloadLink: dlLink, + Filename: file.Name, + Size: file.Size, + Generated: time.Now(), + } + + files[file.Name] = *file + } else if !rarFile.IsDirectory { + r.logger.Warn().Msgf("RAR file %s not found in torrent files", rarFile.Name()) + } + } + + return files, nil } // getTorrentFiles returns a list of torrent files from the torrent info @@ -338,7 +443,8 @@ func (r *RealDebrid) UpdateTorrent(t *types.Torrent) error { t.MountPath = r.MountPath t.Debrid = r.Name t.Added = data.Added - t.Files = getSelectedFiles(t, data) // Get selected files + t.Files, _ = r.getSelectedFiles(t, data) // Get selected files + return nil } @@ -390,7 +496,11 @@ func (r *RealDebrid) CheckStatus(t *types.Torrent, isSymlink bool) (*types.Torre return t, fmt.Errorf("realdebrid API error: Status: %d", res.StatusCode) } } else if status == "downloaded" { - t.Files = getSelectedFiles(t, data) // Get selected files + t.Files, err = r.getSelectedFiles(t, data) // Get selected files + if err != nil { + return t, err + } + r.logger.Info().Msgf("Torrent: %s downloaded to RD", t.Name) if !isSymlink { err = r.GenerateDownloadLinks(t) diff --git a/pkg/debrid/types/torrent.go b/pkg/debrid/types/torrent.go index b072efe..ac69ac4 100644 --- a/pkg/debrid/types/torrent.go +++ b/pkg/debrid/types/torrent.go @@ -2,13 +2,14 @@ package types import ( "fmt" - "github.com/sirrobot01/decypharr/internal/logger" - "github.com/sirrobot01/decypharr/internal/utils" - "github.com/sirrobot01/decypharr/pkg/arr" "os" "path/filepath" "sync" "time" + + "github.com/sirrobot01/decypharr/internal/logger" + "github.com/sirrobot01/decypharr/internal/utils" + "github.com/sirrobot01/decypharr/pkg/arr" ) type Torrent struct { @@ -99,6 +100,8 @@ type File struct { Id string `json:"id"` Name string `json:"name"` Size int64 `json:"size"` + IsRar bool `json:"is_rar"` + ByteRange *[2]int64 `json:"byte_range,omitempty"` Path string `json:"path"` Link string `json:"link"` DownloadLink *DownloadLink `json:"-"` diff --git a/pkg/qbit/downloader.go b/pkg/qbit/downloader.go index 3881f7b..fef91bd 100644 --- a/pkg/qbit/downloader.go +++ b/pkg/qbit/downloader.go @@ -2,22 +2,30 @@ package qbit import ( "fmt" - "github.com/cavaliergopher/grab/v3" - "github.com/sirrobot01/decypharr/internal/utils" - debridTypes "github.com/sirrobot01/decypharr/pkg/debrid/types" "io" "net/http" "os" "path/filepath" "sync" "time" + + "github.com/cavaliergopher/grab/v3" + "github.com/sirrobot01/decypharr/internal/utils" + debrid "github.com/sirrobot01/decypharr/pkg/debrid/types" ) -func Download(client *grab.Client, url, filename string, progressCallback func(int64, int64)) error { +func Download(client *grab.Client, url, filename string, byterange *[2]int64, progressCallback func(int64, int64)) error { req, err := grab.NewRequest(filename, url) if err != nil { return err } + + // Set byte range if specified + if byterange != nil { + byterangeStr := fmt.Sprintf("%d-%d", byterange[0], byterange[1]) + req.HTTPRequest.Header.Set("Range", "bytes="+byterangeStr) + } + resp := client.Do(req) t := time.NewTicker(time.Second * 2) @@ -107,7 +115,7 @@ func (q *QBit) downloadFiles(torrent *Torrent, parent string) { } wg.Add(1) q.downloadSemaphore <- struct{}{} - go func(file debridTypes.File) { + go func(file debrid.File) { defer wg.Done() defer func() { <-q.downloadSemaphore }() filename := file.Name @@ -116,6 +124,7 @@ func (q *QBit) downloadFiles(torrent *Torrent, parent string) { client, file.DownloadLink.DownloadLink, filepath.Join(parent, filename), + file.ByteRange, progressCallback, ) @@ -235,7 +244,7 @@ func (q *QBit) ProcessSymlink(torrent *Torrent) (string, error) { return torrentSymlinkPath, nil } -func (q *QBit) createSymlinksWebdav(debridTorrent *debridTypes.Torrent, rclonePath, torrentFolder string) (string, error) { +func (q *QBit) createSymlinksWebdav(debridTorrent *debrid.Torrent, rclonePath, torrentFolder string) (string, error) { files := debridTorrent.Files symlinkPath := filepath.Join(q.DownloadFolder, debridTorrent.Arr.Name, torrentFolder) // /mnt/symlinks/{category}/MyTVShow/ err := os.MkdirAll(symlinkPath, os.ModePerm) @@ -243,7 +252,7 @@ func (q *QBit) createSymlinksWebdav(debridTorrent *debridTypes.Torrent, rclonePa return "", fmt.Errorf("failed to create directory: %s: %v", symlinkPath, err) } - remainingFiles := make(map[string]debridTypes.File) + remainingFiles := make(map[string]debrid.File) for _, file := range files { remainingFiles[file.Name] = file } @@ -300,7 +309,7 @@ func (q *QBit) createSymlinksWebdav(debridTorrent *debridTypes.Torrent, rclonePa return symlinkPath, nil } -func (q *QBit) createSymlinks(debridTorrent *debridTypes.Torrent, rclonePath, torrentFolder string) (string, error) { +func (q *QBit) createSymlinks(debridTorrent *debrid.Torrent, rclonePath, torrentFolder string) (string, error) { files := debridTorrent.Files symlinkPath := filepath.Join(q.DownloadFolder, debridTorrent.Arr.Name, torrentFolder) // /mnt/symlinks/{category}/MyTVShow/ err := os.MkdirAll(symlinkPath, os.ModePerm) @@ -308,7 +317,7 @@ func (q *QBit) createSymlinks(debridTorrent *debridTypes.Torrent, rclonePath, to return "", fmt.Errorf("failed to create directory: %s: %v", symlinkPath, err) } - remainingFiles := make(map[string]debridTypes.File) + remainingFiles := make(map[string]debrid.File) for _, file := range files { remainingFiles[file.Path] = file } @@ -364,7 +373,7 @@ func (q *QBit) createSymlinks(debridTorrent *debridTypes.Torrent, rclonePath, to return symlinkPath, nil } -func (q *QBit) getTorrentPath(rclonePath string, debridTorrent *debridTypes.Torrent) (string, error) { +func (q *QBit) getTorrentPath(rclonePath string, debridTorrent *debrid.Torrent) (string, error) { for { torrentPath, err := debridTorrent.GetMountFolder(rclonePath) if err == nil { diff --git a/pkg/rar/rarar.go b/pkg/rar/rarar.go new file mode 100644 index 0000000..ae9f4b5 --- /dev/null +++ b/pkg/rar/rarar.go @@ -0,0 +1,701 @@ +// Source: https://github.com/eliasbenb/RARAR.py +// Note that this code only translates the original Python for RAR3 (not RAR5) support. + +package rar + +import ( + "bytes" + "encoding/binary" + "errors" + "fmt" + "io" + "math/rand" + "net/http" + "strings" + "time" + "unicode/utf8" +) + +// Constants from the Python code +var ( + // Chunk sizes + DefaultChunkSize = 4096 + HttpChunkSize = 32768 + MaxSearchSize = 1 << 20 // 1MB + + // RAR marker and block types + Rar3Marker = []byte{0x52, 0x61, 0x72, 0x21, 0x1A, 0x07, 0x00} + BlockFile = byte(0x74) + BlockHeader = byte(0x73) + BlockMarker = byte(0x72) + BlockEnd = byte(0x7B) + + // Header flags + FlagDirectory = 0xE0 + FlagHasHighSize = 0x100 + FlagHasUnicodeName = 0x200 + FlagHasData = 0x8000 +) + +// Compression methods +var CompressionMethods = map[byte]string{ + 0x30: "Store", + 0x31: "Fastest", + 0x32: "Fast", + 0x33: "Normal", + 0x34: "Good", + 0x35: "Best", +} + +// Error definitions +var ( + ErrMarkerNotFound = errors.New("RAR marker not found within search limit") + ErrInvalidFormat = errors.New("invalid RAR format") + ErrNetworkError = errors.New("network error") + ErrRangeRequestsNotSupported = errors.New("server does not support range requests") + ErrCompressionNotSupported = errors.New("compression method not supported") + ErrDirectoryExtractNotSupported = errors.New("directory extract not supported") +) + +// Name returns the base filename of the file +func (f *File) Name() string { + if i := strings.LastIndexAny(f.Path, "\\/"); i >= 0 { + return f.Path[i+1:] + } + return f.Path +} + +func (f *File) ByteRange() *[2]int64 { + return &[2]int64{f.DataOffset, f.DataOffset + f.CompressedSize - 1} +} + +func NewHttpFile(url string) (*HttpFile, error) { + client := &http.Client{} + file := &HttpFile{ + URL: url, + Position: 0, + Client: client, + MaxRetries: 3, + RetryDelay: time.Second, + } + + // Get file size + size, err := file.getFileSize() + if err != nil { + return nil, fmt.Errorf("failed to get file size: %w", err) + } + file.FileSize = size + + return file, nil +} + +func (f *HttpFile) doWithRetry(operation func() (interface{}, error)) (interface{}, error) { + var lastErr error + for attempt := 0; attempt <= f.MaxRetries; attempt++ { + if attempt > 0 { + // Jitter + exponential backoff delay + delay := f.RetryDelay * time.Duration(1< 0 { + remaining := f.FileSize - off + if remaining <= 0 { + return 0, io.EOF + } + if size > remaining { + size = remaining + p = p[:size] + } + } + + result, err := f.doWithRetry(func() (interface{}, error) { + // Create HTTP request with Range header + req, err := http.NewRequest("GET", f.URL, nil) + if err != nil { + return 0, fmt.Errorf("%w: %v", ErrNetworkError, err) + } + + end := off + size - 1 + req.Header.Set("Range", fmt.Sprintf("bytes=%d-%d", off, end)) + + // Make the request + resp, err := f.Client.Do(req) + if err != nil { + return 0, fmt.Errorf("%w: %v", ErrNetworkError, err) + } + defer resp.Body.Close() + + // Handle response + switch resp.StatusCode { + case http.StatusPartialContent: + // Read the content + bytesRead, err := io.ReadFull(resp.Body, p) + return bytesRead, err + case http.StatusOK: + // Some servers return the full content instead of partial + fullData, err := io.ReadAll(resp.Body) + if err != nil { + return 0, fmt.Errorf("%w: %v", ErrNetworkError, err) + } + + if int64(len(fullData)) <= off { + return 0, io.EOF + } + + end = off + size + if int64(len(fullData)) < end { + end = int64(len(fullData)) + } + + copy(p, fullData[off:end]) + return int(end - off), nil + case http.StatusRequestedRangeNotSatisfiable: + // We're at EOF + return 0, io.EOF + default: + return 0, fmt.Errorf("%w: unexpected status code: %d", ErrNetworkError, resp.StatusCode) + } + }) + + if err != nil { + return 0, err + } + + return result.(int), nil +} + +// NewReader creates a new RAR3 reader +func NewReader(url string) (*Reader, error) { + file, err := NewHttpFile(url) + if err != nil { + return nil, err + } + + reader := &Reader{ + File: file, + ChunkSize: HttpChunkSize, + Files: make([]*File, 0), + } + + // Find RAR marker + marker, err := reader.findMarker() + if err != nil { + return nil, err + } + reader.Marker = marker + pos := reader.Marker + int64(len(Rar3Marker)) // Skip marker block + + headerData, err := reader.readBytes(pos, 7) + if err != nil { + return nil, err + } + + if len(headerData) < 7 { + return nil, ErrInvalidFormat + } + + headType := headerData[2] + headSize := int(binary.LittleEndian.Uint16(headerData[5:7])) + + if headType != BlockHeader { + return nil, ErrInvalidFormat + } + + // Store the position after the archive header + reader.HeaderEndPos = pos + int64(headSize) + + return reader, nil +} + +// readBytes reads a range of bytes from the file +func (r *Reader) readBytes(start int64, length int) ([]byte, error) { + if length <= 0 { + return []byte{}, nil + } + + data := make([]byte, length) + n, err := r.File.ReadAt(data, start) + if err != nil && err != io.EOF { + return nil, err + } + + if n < length { + // Partial read, return what we got + return data[:n], nil + } + + return data, nil +} + +// findMarker finds the RAR marker in the file +func (r *Reader) findMarker() (int64, error) { + // First try to find marker in the first chunk + firstChunkSize := 8192 // 8KB + chunk, err := r.readBytes(0, firstChunkSize) + if err != nil { + return 0, err + } + + markerPos := bytes.Index(chunk, Rar3Marker) + if markerPos != -1 { + return int64(markerPos), nil + } + + // If not found, continue searching + position := int64(firstChunkSize - len(Rar3Marker) + 1) + maxSearch := int64(MaxSearchSize) + + for position < maxSearch { + chunkSize := min(r.ChunkSize, int(maxSearch-position)) + chunk, err := r.readBytes(position, chunkSize) + if err != nil || len(chunk) == 0 { + break + } + + markerPos := bytes.Index(chunk, Rar3Marker) + if markerPos != -1 { + return position + int64(markerPos), nil + } + + // Move forward by chunk size minus the marker length + position += int64(max(1, len(chunk)-len(Rar3Marker)+1)) + } + + return 0, ErrMarkerNotFound +} + +// decodeUnicode decodes RAR3 Unicode encoding +func decodeUnicode(asciiStr string, unicodeData []byte) string { + if len(unicodeData) == 0 { + return asciiStr + } + + result := []rune{} + asciiPos := 0 + dataPos := 0 + highByte := byte(0) + + for dataPos < len(unicodeData) { + flags := unicodeData[dataPos] + dataPos++ + + // Determine the number of character positions this flag byte controls + var flagBits uint + var flagCount int + var bitCount int + + if flags&0x80 != 0 { + // Extended flag - controls up to 32 characters (16 bit pairs) + flagBits = uint(flags) + bitCount = 1 + for (flagBits&(0x80>>bitCount) != 0) && dataPos < len(unicodeData) { + flagBits = ((flagBits & ((0x80 >> bitCount) - 1)) << 8) | uint(unicodeData[dataPos]) + dataPos++ + bitCount++ + } + flagCount = bitCount * 4 + } else { + // Simple flag - controls 4 characters (4 bit pairs) + flagBits = uint(flags) + flagCount = 4 + } + + // Process each 2-bit flag + for i := 0; i < flagCount; i++ { + if asciiPos >= len(asciiStr) && dataPos >= len(unicodeData) { + break + } + + flagValue := (flagBits >> (i * 2)) & 0x03 + + switch flagValue { + case 0: + // Use ASCII character + if asciiPos < len(asciiStr) { + result = append(result, rune(asciiStr[asciiPos])) + asciiPos++ + } + case 1: + // Unicode character with high byte 0 + if dataPos < len(unicodeData) { + result = append(result, rune(unicodeData[dataPos])) + dataPos++ + } + case 2: + // Unicode character with current high byte + if dataPos < len(unicodeData) { + lowByte := uint(unicodeData[dataPos]) + dataPos++ + result = append(result, rune(lowByte|(uint(highByte)<<8))) + } + case 3: + // Set new high byte + if dataPos < len(unicodeData) { + highByte = unicodeData[dataPos] + dataPos++ + } + } + } + } + + // Append any remaining ASCII characters + for asciiPos < len(asciiStr) { + result = append(result, rune(asciiStr[asciiPos])) + asciiPos++ + } + + return string(result) +} + +// readFiles reads all file entries in the archive +func (r *Reader) readFiles() error { + pos := r.Marker + pos += int64(len(Rar3Marker)) // Skip marker block + + // Read archive header + headerData, err := r.readBytes(pos, 7) + if err != nil { + return err + } + + if len(headerData) < 7 { + return ErrInvalidFormat + } + + headType := headerData[2] + headSize := int(binary.LittleEndian.Uint16(headerData[5:7])) + + if headType != BlockHeader { + return ErrInvalidFormat + } + + pos += int64(headSize) // Skip archive header + + // Track whether we've found the end marker + foundEndMarker := false + + // Process file entries + for !foundEndMarker { + headerData, err := r.readBytes(pos, 7) + if err != nil { + // Don't stop on EOF, might be temporary network error + // For definitive errors, return the error + if !errors.Is(err, io.EOF) && !errors.Is(err, ErrNetworkError) { + return fmt.Errorf("error reading block header: %w", err) + } + + // If we get EOF or network error, retry a few times + retryCount := 0 + maxRetries := 3 + retryDelay := time.Second + + for retryCount < maxRetries { + time.Sleep(retryDelay * time.Duration(1<= 7 { + break // Successfully got data + } + } + + if len(headerData) < 7 { + return fmt.Errorf("failed to read block header after retries: %w", err) + } + } + + if len(headerData) < 7 { + return fmt.Errorf("incomplete block header at position %d", pos) + } + + headType := headerData[2] + headFlags := int(binary.LittleEndian.Uint16(headerData[3:5])) + headSize := int(binary.LittleEndian.Uint16(headerData[5:7])) + + if headType == BlockEnd { + // End of archive + foundEndMarker = true + break + } + + if headType == BlockFile { + // Get complete header data + completeHeader, err := r.readBytes(pos, headSize) + if err != nil || len(completeHeader) < headSize { + // Retry logic for incomplete headers + retryCount := 0 + maxRetries := 3 + retryDelay := time.Second + + for retryCount < maxRetries && (err != nil || len(completeHeader) < headSize) { + time.Sleep(retryDelay * time.Duration(1<= headSize { + break // Successfully got data + } + } + + if len(completeHeader) < headSize { + return fmt.Errorf("failed to read complete file header after retries: %w", err) + } + } + + fileInfo, err := r.parseFileHeader(completeHeader, pos) + if err == nil && fileInfo != nil { + r.Files = append(r.Files, fileInfo) + pos = fileInfo.NextOffset + } else { + pos += int64(headSize) + } + } else { + // Skip non-file block + pos += int64(headSize) + + // Skip data if present + if headFlags&FlagHasData != 0 { + // Read data size + sizeData, err := r.readBytes(pos-4, 4) + if err != nil || len(sizeData) < 4 { + // Retry logic for data size read errors + retryCount := 0 + maxRetries := 3 + retryDelay := time.Second + + for retryCount < maxRetries && (err != nil || len(sizeData) < 4) { + time.Sleep(retryDelay * time.Duration(1<= 4 { + break // Successfully got data + } + } + + if len(sizeData) < 4 { + return fmt.Errorf("failed to read data size after retries: %w", err) + } + } + + dataSize := int64(binary.LittleEndian.Uint32(sizeData)) + pos += dataSize + } + } + } + + if !foundEndMarker { + return fmt.Errorf("end marker not found in archive") + } + + return nil +} + +// parseFileHeader parses a file header and returns file info +func (r *Reader) parseFileHeader(headerData []byte, position int64) (*File, error) { + if len(headerData) < 7 { + return nil, fmt.Errorf("header data too short") + } + + headType := headerData[2] + headFlags := int(binary.LittleEndian.Uint16(headerData[3:5])) + headSize := int(binary.LittleEndian.Uint16(headerData[5:7])) + + if headType != BlockFile { + return nil, fmt.Errorf("not a file block") + } + + // Check if we have enough data + if len(headerData) < 32 { + return nil, fmt.Errorf("file header too short") + } + + // Parse basic file header fields + packSize := binary.LittleEndian.Uint32(headerData[7:11]) + unpackSize := binary.LittleEndian.Uint32(headerData[11:15]) + // fileOS := headerData[15] + fileCRC := binary.LittleEndian.Uint32(headerData[16:20]) + // fileTime := binary.LittleEndian.Uint32(headerData[20:24]) + // unpVer := headerData[24] + method := headerData[25] + nameSize := binary.LittleEndian.Uint16(headerData[26:28]) + // fileAttr := binary.LittleEndian.Uint32(headerData[28:32]) + + // Handle high pack/unp sizes + highPackSize := uint32(0) + highUnpSize := uint32(0) + + offset := 32 // Start after basic header fields + + if headFlags&FlagHasHighSize != 0 { + if offset+8 <= len(headerData) { + highPackSize = binary.LittleEndian.Uint32(headerData[offset : offset+4]) + highUnpSize = binary.LittleEndian.Uint32(headerData[offset+4 : offset+8]) + } + offset += 8 + } + + // Calculate actual sizes + fullPackSize := int64(packSize) + (int64(highPackSize) << 32) + fullUnpSize := int64(unpackSize) + (int64(highUnpSize) << 32) + + // Read filename + var fileName string + if offset+int(nameSize) <= len(headerData) { + fileNameBytes := headerData[offset : offset+int(nameSize)] + + if headFlags&FlagHasUnicodeName != 0 { + zeroPos := bytes.IndexByte(fileNameBytes, 0) + if zeroPos != -1 { + // Try UTF-8 first + asciiPart := fileNameBytes[:zeroPos] + if utf8.Valid(asciiPart) { + fileName = string(asciiPart) + } else { + // Fall back to custom decoder + asciiStr := string(asciiPart) + unicodePart := fileNameBytes[zeroPos+1:] + fileName = decodeUnicode(asciiStr, unicodePart) + } + } else { + // No null byte + if utf8.Valid(fileNameBytes) { + fileName = string(fileNameBytes) + } else { + fileName = string(fileNameBytes) // Last resort + } + } + } else { + // Non-Unicode filename + if utf8.Valid(fileNameBytes) { + fileName = string(fileNameBytes) + } else { + fileName = string(fileNameBytes) // Fallback + } + } + } else { + fileName = fmt.Sprintf("UnknownFile%d", len(r.Files)) + } + + isDirectory := (headFlags & FlagDirectory) == FlagDirectory + + // Calculate data offsets + dataOffset := position + int64(headSize) + nextOffset := dataOffset + + // Only add data size if it's not a directory and has data + if !isDirectory && headFlags&FlagHasData != 0 { + nextOffset += fullPackSize + } + + return &File{ + Path: fileName, + Size: fullUnpSize, + CompressedSize: fullPackSize, + Method: method, + CRC: fileCRC, + IsDirectory: isDirectory, + DataOffset: dataOffset, + NextOffset: nextOffset, + }, nil +} + +// GetFiles returns all files in the archive +func (r *Reader) GetFiles() ([]*File, error) { + if len(r.Files) == 0 { + err := r.readFiles() + if err != nil { + return nil, err + } + } + + return r.Files, nil +} + +// ExtractFile extracts a file from the archive +func (r *Reader) ExtractFile(file *File) ([]byte, error) { + if file.IsDirectory { + return nil, ErrDirectoryExtractNotSupported + } + + // Only support "Store" method + if file.Method != 0x30 { // 0x30 = "Store" + return nil, ErrCompressionNotSupported + } + + return r.readBytes(file.DataOffset, int(file.CompressedSize)) +} + +// Helper functions +func min(a, b int) int { + if a < b { + return a + } + return b +} + +func max(a, b int) int { + if a > b { + return a + } + return b +} diff --git a/pkg/rar/types.go b/pkg/rar/types.go new file mode 100644 index 0000000..8156d6a --- /dev/null +++ b/pkg/rar/types.go @@ -0,0 +1,37 @@ +package rar + +import ( + "net/http" + "time" +) + +// File represents a file entry in a RAR archive +type File struct { + Path string + Size int64 + CompressedSize int64 + Method byte + CRC uint32 + IsDirectory bool + DataOffset int64 + NextOffset int64 +} + +// Access point for a RAR archive served through HTTP +type HttpFile struct { + URL string + Position int64 + Client *http.Client + FileSize int64 + MaxRetries int + RetryDelay time.Duration +} + +// Reader reads RAR3 format archives +type Reader struct { + File *HttpFile + ChunkSize int + Marker int64 + HeaderEndPos int64 // Position after the archive header + Files []*File +} diff --git a/pkg/web/templates/config.html b/pkg/web/templates/config.html index b044acb..5b009b4 100644 --- a/pkg/web/templates/config.html +++ b/pkg/web/templates/config.html @@ -363,11 +363,18 @@ Add samples, extras etc when adding torrent to debrid(disabled by default)
+
+ + +
+ Preprocess RARed torrents to allow reading the files inside +
+
- Create an internal webdav for this debrid + Create an internal webdav for this debrid
@@ -1092,6 +1099,7 @@ rate_limit: document.querySelector(`[name="debrid[${i}].rate_limit"]`).value, download_uncached: document.querySelector(`[name="debrid[${i}].download_uncached"]`).checked, check_cached: document.querySelector(`[name="debrid[${i}].check_cached"]`).checked, + unpack_rar: document.querySelector(`[name="debrid[${i}].unpack_rar"]`).checked, add_samples: document.querySelector(`[name="debrid[${i}].add_samples"]`).checked, use_webdav: document.querySelector(`[name="debrid[${i}].use_webdav"]`).checked }; diff --git a/pkg/webdav/file.go b/pkg/webdav/file.go index 528ae7f..babe36a 100644 --- a/pkg/webdav/file.go +++ b/pkg/webdav/file.go @@ -3,12 +3,13 @@ package webdav import ( "crypto/tls" "fmt" - "github.com/sirrobot01/decypharr/pkg/debrid/debrid" "io" "net/http" "os" "strings" "time" + + "github.com/sirrobot01/decypharr/pkg/debrid/debrid" ) var sharedClient = &http.Client{ @@ -76,104 +77,143 @@ func (f *File) getDownloadLink() (string, error) { return "", os.ErrNotExist } -func (f *File) stream() (*http.Response, error) { - client := sharedClient // Might be replaced with the custom client - _log := f.cache.GetLogger() - var ( - err error - downloadLink string - ) - - downloadLink, err = f.getDownloadLink() +func (f *File) getDownloadByteRange() (*[2]int64, error) { + byteRange, err := f.cache.GetDownloadByteRange(f.torrentName, f.name) if err != nil { - - _log.Trace().Msgf("Failed to get download link for %s. %s", f.name, err) - return nil, io.EOF + return nil, err } + return byteRange, nil +} + +func (f *File) stream() (*http.Response, error) { + client := sharedClient + _log := f.cache.GetLogger() + + downloadLink, err := f.getDownloadLink() + if err != nil { + _log.Trace().Msgf("Failed to get download link for %s: %v", f.name, err) + return nil, err + } + if downloadLink == "" { _log.Trace().Msgf("Failed to get download link for %s. Empty download link", f.name) - return nil, io.EOF + return nil, fmt.Errorf("empty download link") + } + + byteRange, err := f.getDownloadByteRange() + if err != nil { + _log.Trace().Msgf("Failed to get download byte range for %s: %v", f.name, err) + return nil, err } req, err := http.NewRequest("GET", downloadLink, nil) if err != nil { - _log.Trace().Msgf("Failed to create HTTP request: %s", err) - return nil, io.EOF + _log.Trace().Msgf("Failed to create HTTP request: %v", err) + return nil, err } - if f.offset > 0 { - req.Header.Set("Range", fmt.Sprintf("bytes=%d-", f.offset)) + if byteRange == nil { + req.Header.Set("Range", fmt.Sprintf("bytes=%d-", max(0, f.offset))) + } else { + req.Header.Set("Range", fmt.Sprintf("bytes=%d-", byteRange[0]+max(0, f.offset))) } + // Make the request resp, err := client.Do(req) if err != nil { - return resp, io.EOF + _log.Trace().Msgf("HTTP request failed: %v", err) + return nil, err } if resp.StatusCode != http.StatusOK && resp.StatusCode != http.StatusPartialContent { f.downloadLink = "" - closeResp := func() { - _, _ = io.Copy(io.Discard, resp.Body) - resp.Body.Close() + + cleanupResp := func() { + if resp.Body != nil { + io.Copy(io.Discard, resp.Body) + resp.Body.Close() + } } - if resp.StatusCode == http.StatusServiceUnavailable { - b, _ := io.ReadAll(resp.Body) - err := resp.Body.Close() - if err != nil { - _log.Trace().Msgf("Failed to close response body: %s", err) - return nil, io.EOF + switch resp.StatusCode { + case http.StatusServiceUnavailable: + // Read the body to check for specific error messages + body, readErr := io.ReadAll(resp.Body) + resp.Body.Close() + + if readErr != nil { + _log.Trace().Msgf("Failed to read response body: %v", readErr) + return nil, fmt.Errorf("failed to read error response: %w", readErr) } - if strings.Contains(string(b), "You can not download this file because you have exceeded your traffic on this hoster") { + + bodyStr := string(body) + if strings.Contains(bodyStr, "You can not download this file because you have exceeded your traffic on this hoster") { _log.Trace().Msgf("Bandwidth exceeded for %s. Download token will be disabled if you have more than one", f.name) f.cache.MarkDownloadLinkAsInvalid(f.link, downloadLink, "bandwidth_exceeded") // Retry with a different API key if it's available return f.stream() - } else { - _log.Trace().Msgf("Failed to get download link for %s. %s", f.name, string(b)) - return resp, io.EOF } - } else if resp.StatusCode == http.StatusNotFound { - closeResp() + return nil, fmt.Errorf("service unavailable: %s", bodyStr) + + case http.StatusNotFound: + cleanupResp() // Mark download link as not found // Regenerate a new download link + _log.Trace().Msgf("File not found (404) for %s. Marking link as invalid and regenerating", f.name) f.cache.MarkDownloadLinkAsInvalid(f.link, downloadLink, "link_not_found") // Generate a new download link - downloadLink, err = f.getDownloadLink() + downloadLink, err := f.getDownloadLink() if err != nil { _log.Trace().Msgf("Failed to get download link for %s. %s", f.name, err) - return nil, io.EOF + return nil, err } + if downloadLink == "" { _log.Trace().Msgf("Failed to get download link for %s", f.name) - return nil, io.EOF - } - req, err = http.NewRequest("GET", downloadLink, nil) - if err != nil { - return nil, io.EOF - } - if f.offset > 0 { - req.Header.Set("Range", fmt.Sprintf("bytes=%d-", f.offset)) + return nil, fmt.Errorf("failed to regenerate download link") } - resp, err = client.Do(req) + req, err := http.NewRequest("GET", downloadLink, nil) if err != nil { - return resp, fmt.Errorf("HTTP request error: %w", err) + return nil, err } - if resp.StatusCode != http.StatusOK && resp.StatusCode != http.StatusPartialContent { - closeResp() - // Read the body to consume the response + + // Set the range header again + if byteRange == nil { + req.Header.Set("Range", fmt.Sprintf("bytes=%d-", max(0, f.offset))) + } else { + req.Header.Set("Range", fmt.Sprintf("bytes=%d-", byteRange[0]+max(0, f.offset))) + } + + newResp, err := client.Do(req) + if err != nil { + return nil, err + } + + if newResp.StatusCode != http.StatusOK && newResp.StatusCode != http.StatusPartialContent { + cleanupBody := func() { + if newResp.Body != nil { + io.Copy(io.Discard, newResp.Body) + newResp.Body.Close() + } + } + + cleanupBody() + _log.Trace().Msgf("Regenerated link also failed with status %d", newResp.StatusCode) f.cache.MarkDownloadLinkAsInvalid(f.link, downloadLink, "link_not_found") - return resp, io.EOF + return nil, fmt.Errorf("failed with status code %d even after link regeneration", newResp.StatusCode) } - return resp, nil - } else { - closeResp() - return resp, io.EOF + return newResp, nil + + default: + body, _ := io.ReadAll(resp.Body) + resp.Body.Close() + + _log.Trace().Msgf("Unexpected status code %d for %s: %s", resp.StatusCode, f.name, string(body)) + return nil, fmt.Errorf("unexpected status code %d: %s", resp.StatusCode, string(body)) } - } return resp, nil } @@ -196,7 +236,7 @@ func (f *File) Read(p []byte) (n int, err error) { // If we haven't started streaming the file yet or need to reposition if f.reader == nil || f.seekPending { - if f.reader != nil && f.seekPending { + if f.reader != nil { f.reader.Close() f.reader = nil } @@ -207,7 +247,7 @@ func (f *File) Read(p []byte) (n int, err error) { return 0, err } if resp == nil { - return 0, io.EOF + return 0, fmt.Errorf("stream returned nil response") } f.reader = resp.Body From 60b8d87f1c4e3f24a631478f6eec0191b9b3412c Mon Sep 17 00:00:00 2001 From: Mukhtar Akere Date: Wed, 28 May 2025 00:14:43 +0100 Subject: [PATCH 02/26] hotfix rar PR --- pkg/qbit/downloader.go | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/pkg/qbit/downloader.go b/pkg/qbit/downloader.go index fef91bd..a1fb00e 100644 --- a/pkg/qbit/downloader.go +++ b/pkg/qbit/downloader.go @@ -195,7 +195,7 @@ func (q *QBit) ProcessSymlink(torrent *Torrent) (string, error) { q.logger.Warn().Msgf("Error while scanning rclone path: %v", err) } - pending := make(map[string]debridTypes.File) + pending := make(map[string]debrid.File) for _, file := range files { if realRelPath, ok := realPaths[file.Name]; ok { file.Path = realRelPath From f9c49cbbef2640425161a1a10368b8c6c895e4ff Mon Sep 17 00:00:00 2001 From: Elias Benbourenane Date: Wed, 28 May 2025 10:29:18 -0400 Subject: [PATCH 03/26] Torrent list context menu (#40) * feat: Torrent list context menu * style: Leave more padding on the context menu for smaller screens --- pkg/web/templates/index.html | 81 +++++++++++++++++++++++++++++++++++- 1 file changed, 80 insertions(+), 1 deletion(-) diff --git a/pkg/web/templates/index.html b/pkg/web/templates/index.html index 54e8cd0..a2b232e 100644 --- a/pkg/web/templates/index.html +++ b/pkg/web/templates/index.html @@ -64,6 +64,23 @@
+ + + + {{ end }} \ No newline at end of file From 1cd09239f985c998ced4dab2b94655f105435e21 Mon Sep 17 00:00:00 2001 From: Mukhtar Akere Date: Thu, 29 May 2025 04:05:44 +0100 Subject: [PATCH 04/26] - Add more indepth stats like number of torrents, profile details etc - Add torrent ingest endpoints - Add issue template --- .github/ISSUE_TEMPLATE/bug_report.yml | 76 ++++++++++ .github/ISSUE_TEMPLATE/feature_request.yml | 38 +++++ README.md | 5 - docs/docs/installation.md | 20 ++- internal/request/request.go | 28 +--- pkg/debrid/alldebrid/alldebrid.go | 8 +- pkg/debrid/debrid/debrid.go | 2 +- pkg/debrid/debrid/download_link.go | 10 ++ pkg/debrid/debrid/engine.go | 21 ++- pkg/debrid/debrid/misc.go | 15 ++ pkg/debrid/debrid_link/debrid_link.go | 80 ++++++----- pkg/debrid/realdebrid/realdebrid.go | 38 ++++- pkg/debrid/realdebrid/types.go | 12 ++ pkg/debrid/torbox/torbox.go | 8 +- pkg/debrid/types/client.go | 1 + pkg/debrid/types/torrent.go | 22 +++ pkg/qbit/downloader.go | 64 --------- pkg/repair/clean.go | 159 --------------------- pkg/server/debug.go | 116 +++++++++++++++ pkg/server/server.go | 36 +---- pkg/service/service.go | 6 - pkg/web/templates/layout.html | 2 +- pkg/webdav/file.go | 2 - pkg/webdav/handler.go | 1 - pkg/webdav/misc.go | 10 -- 25 files changed, 411 insertions(+), 369 deletions(-) create mode 100644 .github/ISSUE_TEMPLATE/bug_report.yml create mode 100644 .github/ISSUE_TEMPLATE/feature_request.yml delete mode 100644 pkg/repair/clean.go create mode 100644 pkg/server/debug.go diff --git a/.github/ISSUE_TEMPLATE/bug_report.yml b/.github/ISSUE_TEMPLATE/bug_report.yml new file mode 100644 index 0000000..5838303 --- /dev/null +++ b/.github/ISSUE_TEMPLATE/bug_report.yml @@ -0,0 +1,76 @@ +name: Bug Report +description: 'Report a new bug' +labels: ['Type: Bug', 'Status: Needs Triage'] +body: + - type: checkboxes + attributes: + label: Is there an existing issue for this? + description: Please search to see if an open or closed issue already exists for the bug you encountered. If a bug exists and is closed note that it may only be fixed in an unstable branch. + options: + - label: I have searched the existing open and closed issues + required: true + - type: textarea + attributes: + label: Current Behavior + description: A concise description of what you're experiencing. + validations: + required: true + - type: textarea + attributes: + label: Expected Behavior + description: A concise description of what you expected to happen. + validations: + required: true + - type: textarea + attributes: + label: Steps To Reproduce + description: Steps to reproduce the behavior. + placeholder: | + 1. In this environment... + 2. With this config... + 3. Run '...' + 4. See error... + validations: + required: false + - type: textarea + attributes: + label: Environment + description: | + examples: + - **OS**: Ubuntu 20.04 + - **Version**: v1.0.0 + - **Docker Install**: Yes + - **Browser**: Firefox 90 (If UI related) + value: | + - OS: + - Version: + - Docker Install: + - Browser: + render: markdown + validations: + required: true + - type: dropdown + attributes: + label: What branch are you running? + options: + - Main/Latest + - Beta + - Experimental + validations: + required: true + - type: textarea + attributes: + label: Trace Logs? **Not Optional** + description: | + Trace Logs + - are **required** for bug reports + - are not optional + validations: + required: true + - type: checkboxes + attributes: + label: Trace Logs have been provided as applicable + description: Trace logs are **generally required** and are not optional for all bug reports and contain `trace`. Info logs are invalid for bug reports and do not contain `debug` nor `trace` + options: + - label: I have read and followed the steps in the wiki link above and provided the required trace logs - the logs contain `trace` - that are relevant and show this issue. + required: true \ No newline at end of file diff --git a/.github/ISSUE_TEMPLATE/feature_request.yml b/.github/ISSUE_TEMPLATE/feature_request.yml new file mode 100644 index 0000000..bd8af5f --- /dev/null +++ b/.github/ISSUE_TEMPLATE/feature_request.yml @@ -0,0 +1,38 @@ +name: Feature Request +description: 'Suggest an idea for Decypharr' +labels: ['Type: Feature Request', 'Status: Needs Triage'] +body: + - type: checkboxes + attributes: + label: Is there an existing issue for this? + description: Please search to see if an open or closed issue already exists for the feature you are requesting. If a request exists and is closed note that it may only be fixed in an unstable branch. + options: + - label: I have searched the existing open and closed issues + required: true + - type: textarea + attributes: + label: Is your feature request related to a problem? Please describe + description: A clear and concise description of what the problem is. + validations: + required: true + - type: textarea + attributes: + label: Describe the solution you'd like + description: A clear and concise description of what you want to happen. + validations: + required: true + - type: textarea + attributes: + label: Describe alternatives you've considered + description: A clear and concise description of any alternative solutions or features you've considered. + validations: + required: true + - type: textarea + attributes: + label: Anything else? + description: | + Links? References? Mockups? Anything that will give us more context about the feature you are encountering! + + Tip: You can attach images or log files by clicking this area to highlight it and then dragging files in. + validations: + required: true \ No newline at end of file diff --git a/README.md b/README.md index 15fdb3d..320b45e 100644 --- a/README.md +++ b/README.md @@ -36,14 +36,9 @@ services: container_name: decypharr ports: - "8282:8282" # qBittorrent - user: "1000:1000" volumes: - /mnt/:/mnt - ./configs/:/app # config.json must be in this directory - environment: - - PUID=1000 - - PGID=1000 - - UMASK=002 restart: unless-stopped ``` diff --git a/docs/docs/installation.md b/docs/docs/installation.md index a9618a2..1f731dc 100644 --- a/docs/docs/installation.md +++ b/docs/docs/installation.md @@ -52,14 +52,9 @@ services: container_name: decypharr ports: - "8282:8282" - user: "1000:1000" volumes: - /mnt/:/mnt # Mount your media directory - ./config/:/app # config.json must be in this directory - environment: - - PUID=1000 - - PGID=1000 - - UMASK=002 - QBIT_PORT=8282 # qBittorrent Port (optional) restart: unless-stopped ``` @@ -69,6 +64,13 @@ Run the Docker Compose setup: docker-compose up -d ``` +#### Notes for Docker Users + +- Ensure that the `/mnt/` directory is mounted correctly to access your media files. +- The `./config/` directory should contain your `config.json` file. +- You can adjust the `PUID` and `PGID` environment variables to match your user and group IDs for proper file permissions. +- The `UMASK` environment variable can be set to control file permissions created by Decypharr. + ## Binary Installation If you prefer not to use Docker, you can download and run the binary directly. @@ -107,10 +109,4 @@ You can also configure Decypharr through the web interface, but it's recommended "log_level": "info", "port": "8282" } -``` - -### Few Notes - -- Make sure decypharr has access to the directories specified in the configuration file. -- Ensure decypharr have write permissions to the qbittorrent download folder. -- Make sure decypharr can write to the `./config/` directory. \ No newline at end of file +``` \ No newline at end of file diff --git a/internal/request/request.go b/internal/request/request.go index a6acd40..aacfe1e 100644 --- a/internal/request/request.go +++ b/internal/request/request.go @@ -2,7 +2,6 @@ package request import ( "bytes" - "compress/gzip" "context" "crypto/tls" "encoding/json" @@ -383,31 +382,6 @@ func JSONResponse(w http.ResponseWriter, data interface{}, code int) { } } -func Gzip(body []byte) []byte { - if len(body) == 0 { - return nil - } - - // Check if the pool is nil - buf := bytes.NewBuffer(make([]byte, 0, len(body))) - - gz, err := gzip.NewWriterLevel(buf, gzip.BestSpeed) - if err != nil { - return nil - } - - if _, err := gz.Write(body); err != nil { - return nil - } - if err := gz.Close(); err != nil { - return nil - } - result := make([]byte, buf.Len()) - copy(result, buf.Bytes()) - - return result -} - func Default() *Client { once.Do(func() { instance = New() @@ -435,7 +409,7 @@ func isRetryableError(err error) bool { var netErr net.Error if errors.As(err, &netErr) { // Retry on timeout errors and temporary errors - return netErr.Timeout() || netErr.Temporary() + return netErr.Timeout() } // Not a retryable error diff --git a/pkg/debrid/alldebrid/alldebrid.go b/pkg/debrid/alldebrid/alldebrid.go index 2824a83..7dec3d2 100644 --- a/pkg/debrid/alldebrid/alldebrid.go +++ b/pkg/debrid/alldebrid/alldebrid.go @@ -31,7 +31,11 @@ type AllDebrid struct { addSamples bool } -func New(dc config.Debrid) *AllDebrid { +func (ad *AllDebrid) GetProfile() (*types.Profile, error) { + return nil, nil +} + +func New(dc config.Debrid) (*AllDebrid, error) { rl := request.ParseRateLimit(dc.RateLimit) headers := map[string]string{ @@ -65,7 +69,7 @@ func New(dc config.Debrid) *AllDebrid { logger: logger.New(dc.Name), checkCached: dc.CheckCached, addSamples: dc.AddSamples, - } + }, nil } func (ad *AllDebrid) GetName() string { diff --git a/pkg/debrid/debrid/debrid.go b/pkg/debrid/debrid/debrid.go index 2a4a0a2..8169ae3 100644 --- a/pkg/debrid/debrid/debrid.go +++ b/pkg/debrid/debrid/debrid.go @@ -13,7 +13,7 @@ import ( "strings" ) -func createDebridClient(dc config.Debrid) types.Client { +func createDebridClient(dc config.Debrid) (types.Client, error) { switch dc.Name { case "realdebrid": return realdebrid.New(dc) diff --git a/pkg/debrid/debrid/download_link.go b/pkg/debrid/debrid/download_link.go index 9a6b152..53ed4ce 100644 --- a/pkg/debrid/debrid/download_link.go +++ b/pkg/debrid/debrid/download_link.go @@ -52,6 +52,12 @@ func (c *downloadLinkCache) Delete(key string) { delete(c.data, key) } +func (c *downloadLinkCache) Len() int { + c.mu.Lock() + defer c.mu.Unlock() + return len(c.data) +} + type downloadLinkRequest struct { result string err error @@ -245,3 +251,7 @@ func (c *Cache) GetDownloadByteRange(torrentName, filename string) (*[2]int64, e file := ct.Files[filename] return file.ByteRange, nil } + +func (c *Cache) GetTotalActiveDownloadLinks() int { + return c.downloadLinks.Len() +} diff --git a/pkg/debrid/debrid/engine.go b/pkg/debrid/debrid/engine.go index b2412c5..520ecdb 100644 --- a/pkg/debrid/debrid/engine.go +++ b/pkg/debrid/debrid/engine.go @@ -2,6 +2,7 @@ package debrid import ( "github.com/sirrobot01/decypharr/internal/config" + "github.com/sirrobot01/decypharr/internal/logger" "github.com/sirrobot01/decypharr/pkg/debrid/types" "sync" ) @@ -10,7 +11,7 @@ type Engine struct { Clients map[string]types.Client clientsMu sync.Mutex Caches map[string]*Cache - CacheMu sync.Mutex + cacheMu sync.Mutex LastUsed string } @@ -18,16 +19,22 @@ func NewEngine() *Engine { cfg := config.Get() clients := make(map[string]types.Client) + _logger := logger.Default() + caches := make(map[string]*Cache) for _, dc := range cfg.Debrids { - client := createDebridClient(dc) - logger := client.GetLogger() + client, err := createDebridClient(dc) + if err != nil { + _logger.Error().Err(err).Str("Debrid", dc.Name).Msg("failed to connect to debrid client") + continue + } + _log := client.GetLogger() if dc.UseWebDav { caches[dc.Name] = New(dc, client) - logger.Info().Msg("Debrid Service started with WebDAV") + _log.Info().Msg("Debrid Service started with WebDAV") } else { - logger.Info().Msg("Debrid Service started") + _log.Info().Msg("Debrid Service started") } clients[dc.Name] = client } @@ -51,9 +58,9 @@ func (d *Engine) Reset() { d.Clients = make(map[string]types.Client) d.clientsMu.Unlock() - d.CacheMu.Lock() + d.cacheMu.Lock() d.Caches = make(map[string]*Cache) - d.CacheMu.Unlock() + d.cacheMu.Unlock() } func (d *Engine) GetDebrids() map[string]types.Client { diff --git a/pkg/debrid/debrid/misc.go b/pkg/debrid/debrid/misc.go index b004078..501ea08 100644 --- a/pkg/debrid/debrid/misc.go +++ b/pkg/debrid/debrid/misc.go @@ -25,3 +25,18 @@ func mergeFiles(torrents ...CachedTorrent) map[string]types.File { } return merged } + +func (c *Cache) GetIngests() ([]types.IngestData, error) { + torrents := c.GetTorrents() + debridName := c.client.GetName() + var ingests []types.IngestData + for _, torrent := range torrents { + ingests = append(ingests, types.IngestData{ + Debrid: debridName, + Name: torrent.Filename, + Hash: torrent.InfoHash, + Size: torrent.Bytes, + }) + } + return ingests, nil +} diff --git a/pkg/debrid/debrid_link/debrid_link.go b/pkg/debrid/debrid_link/debrid_link.go index d8ece9a..6dd3edc 100644 --- a/pkg/debrid/debrid_link/debrid_link.go +++ b/pkg/debrid/debrid_link/debrid_link.go @@ -31,6 +31,48 @@ type DebridLink struct { addSamples bool } +func New(dc config.Debrid) (*DebridLink, error) { + rl := request.ParseRateLimit(dc.RateLimit) + + headers := map[string]string{ + "Authorization": fmt.Sprintf("Bearer %s", dc.APIKey), + "Content-Type": "application/json", + } + _log := logger.New(dc.Name) + client := request.New( + request.WithHeaders(headers), + request.WithLogger(_log), + request.WithRateLimiter(rl), + request.WithProxy(dc.Proxy), + ) + + accounts := make(map[string]types.Account) + for idx, key := range dc.DownloadAPIKeys { + id := strconv.Itoa(idx) + accounts[id] = types.Account{ + Name: key, + ID: id, + Token: key, + } + } + return &DebridLink{ + Name: "debridlink", + Host: "https://debrid-link.com/api/v2", + APIKey: dc.APIKey, + accounts: accounts, + DownloadUncached: dc.DownloadUncached, + client: client, + MountPath: dc.Folder, + logger: logger.New(dc.Name), + checkCached: dc.CheckCached, + addSamples: dc.AddSamples, + }, nil +} + +func (dl *DebridLink) GetProfile() (*types.Profile, error) { + return nil, nil +} + func (dl *DebridLink) GetName() string { return dl.Name } @@ -335,44 +377,6 @@ func (dl *DebridLink) GetDownloadUncached() bool { return dl.DownloadUncached } -func New(dc config.Debrid) *DebridLink { - rl := request.ParseRateLimit(dc.RateLimit) - - headers := map[string]string{ - "Authorization": fmt.Sprintf("Bearer %s", dc.APIKey), - "Content-Type": "application/json", - } - _log := logger.New(dc.Name) - client := request.New( - request.WithHeaders(headers), - request.WithLogger(_log), - request.WithRateLimiter(rl), - request.WithProxy(dc.Proxy), - ) - - accounts := make(map[string]types.Account) - for idx, key := range dc.DownloadAPIKeys { - id := strconv.Itoa(idx) - accounts[id] = types.Account{ - Name: key, - ID: id, - Token: key, - } - } - return &DebridLink{ - Name: "debridlink", - Host: "https://debrid-link.com/api/v2", - APIKey: dc.APIKey, - accounts: accounts, - DownloadUncached: dc.DownloadUncached, - client: client, - MountPath: dc.Folder, - logger: logger.New(dc.Name), - checkCached: dc.CheckCached, - addSamples: dc.AddSamples, - } -} - func (dl *DebridLink) GetTorrents() ([]*types.Torrent, error) { page := 0 perPage := 100 diff --git a/pkg/debrid/realdebrid/realdebrid.go b/pkg/debrid/realdebrid/realdebrid.go index da1825e..aa873db 100644 --- a/pkg/debrid/realdebrid/realdebrid.go +++ b/pkg/debrid/realdebrid/realdebrid.go @@ -45,9 +45,10 @@ type RealDebrid struct { rarSemaphore chan struct{} checkCached bool addSamples bool + Profile *types.Profile } -func New(dc config.Debrid) *RealDebrid { +func New(dc config.Debrid) (*RealDebrid, error) { rl := request.ParseRateLimit(dc.RateLimit) headers := map[string]string{ @@ -70,7 +71,7 @@ func New(dc config.Debrid) *RealDebrid { "Authorization": fmt.Sprintf("Bearer %s", currentDownloadKey), } - return &RealDebrid{ + r := &RealDebrid{ Name: "realdebrid", Host: "https://api.real-debrid.com/rest/1.0", APIKey: dc.APIKey, @@ -99,6 +100,12 @@ func New(dc config.Debrid) *RealDebrid { checkCached: dc.CheckCached, addSamples: dc.AddSamples, } + + if _, err := r.GetProfile(); err != nil { + return nil, err + } else { + return r, nil + } } func (r *RealDebrid) GetName() string { @@ -908,3 +915,30 @@ func (r *RealDebrid) DeleteDownloadLink(linkId string) error { } return nil } + +func (r *RealDebrid) GetProfile() (*types.Profile, error) { + if r.Profile != nil { + return r.Profile, nil + } + url := fmt.Sprintf("%s/user", r.Host) + req, _ := http.NewRequest(http.MethodGet, url, nil) + + resp, err := r.client.MakeRequest(req) + if err != nil { + return nil, err + } + var data profileResponse + if json.Unmarshal(resp, &data) != nil { + return nil, err + } + profile := &types.Profile{ + Id: data.Id, + Username: data.Username, + Email: data.Email, + Points: data.Points, + Premium: data.Premium, + Expiration: data.Expiration, + Type: data.Type, + } + return profile, nil +} diff --git a/pkg/debrid/realdebrid/types.go b/pkg/debrid/realdebrid/types.go index ab6879a..5195e60 100644 --- a/pkg/debrid/realdebrid/types.go +++ b/pkg/debrid/realdebrid/types.go @@ -139,3 +139,15 @@ type ErrorResponse struct { Error string `json:"error"` ErrorCode int `json:"error_code"` } + +type profileResponse struct { + Id int64 `json:"id"` + Username string `json:"username"` + Email string `json:"email"` + Points int64 `json:"points"` + Locale string `json:"locale"` + Avatar string `json:"avatar"` + Type string `json:"type"` + Premium int `json:"premium"` + Expiration time.Time `json:"expiration"` +} diff --git a/pkg/debrid/torbox/torbox.go b/pkg/debrid/torbox/torbox.go index 9ce0397..30ed9c6 100644 --- a/pkg/debrid/torbox/torbox.go +++ b/pkg/debrid/torbox/torbox.go @@ -37,7 +37,11 @@ type Torbox struct { addSamples bool } -func New(dc config.Debrid) *Torbox { +func (tb *Torbox) GetProfile() (*types.Profile, error) { + return nil, nil +} + +func New(dc config.Debrid) (*Torbox, error) { rl := request.ParseRateLimit(dc.RateLimit) headers := map[string]string{ @@ -73,7 +77,7 @@ func New(dc config.Debrid) *Torbox { logger: _log, checkCached: dc.CheckCached, addSamples: dc.AddSamples, - } + }, nil } func (tb *Torbox) GetName() string { diff --git a/pkg/debrid/types/client.go b/pkg/debrid/types/client.go index 857e1d0..f9d967b 100644 --- a/pkg/debrid/types/client.go +++ b/pkg/debrid/types/client.go @@ -25,4 +25,5 @@ type Client interface { DisableAccount(string) ResetActiveDownloadKeys() DeleteDownloadLink(linkId string) error + GetProfile() (*Profile, error) } diff --git a/pkg/debrid/types/torrent.go b/pkg/debrid/types/torrent.go index ac69ac4..9332943 100644 --- a/pkg/debrid/types/torrent.go +++ b/pkg/debrid/types/torrent.go @@ -125,3 +125,25 @@ type Account struct { Name string `json:"name"` Token string `json:"token"` } + +type IngestData struct { + Debrid string `json:"debrid"` + Name string `json:"name"` + Hash string `json:"hash"` + Size int64 `json:"size"` +} + +type Profile struct { + Name string `json:"name"` + Id int64 `json:"id"` + Username string `json:"username"` + Email string `json:"email"` + Points int64 `json:"points"` + Type string `json:"type"` + Premium int `json:"premium"` + Expiration time.Time `json:"expiration"` + + LibrarySize int `json:"library_size"` + BadTorrents int `json:"bad_torrents"` + ActiveLinks int `json:"active_links"` +} diff --git a/pkg/qbit/downloader.go b/pkg/qbit/downloader.go index a1fb00e..3321031 100644 --- a/pkg/qbit/downloader.go +++ b/pkg/qbit/downloader.go @@ -309,70 +309,6 @@ func (q *QBit) createSymlinksWebdav(debridTorrent *debrid.Torrent, rclonePath, t return symlinkPath, nil } -func (q *QBit) createSymlinks(debridTorrent *debrid.Torrent, rclonePath, torrentFolder string) (string, error) { - files := debridTorrent.Files - symlinkPath := filepath.Join(q.DownloadFolder, debridTorrent.Arr.Name, torrentFolder) // /mnt/symlinks/{category}/MyTVShow/ - err := os.MkdirAll(symlinkPath, os.ModePerm) - if err != nil { - return "", fmt.Errorf("failed to create directory: %s: %v", symlinkPath, err) - } - - remainingFiles := make(map[string]debrid.File) - for _, file := range files { - remainingFiles[file.Path] = file - } - - ticker := time.NewTicker(100 * time.Millisecond) - defer ticker.Stop() - timeout := time.After(30 * time.Minute) - filePaths := make([]string, 0, len(files)) - - for len(remainingFiles) > 0 { - select { - case <-ticker.C: - entries, err := os.ReadDir(rclonePath) - if err != nil { - continue - } - - // Check which files exist in this batch - for _, entry := range entries { - filename := entry.Name() - if file, exists := remainingFiles[filename]; exists { - fullFilePath := filepath.Join(rclonePath, filename) - fileSymlinkPath := filepath.Join(symlinkPath, file.Name) - - if err := os.Symlink(fullFilePath, fileSymlinkPath); err != nil && !os.IsExist(err) { - q.logger.Debug().Msgf("Failed to create symlink: %s: %v", fileSymlinkPath, err) - } else { - filePaths = append(filePaths, fileSymlinkPath) - delete(remainingFiles, filename) - q.logger.Info().Msgf("File is ready: %s", file.Name) - } - } - } - - case <-timeout: - q.logger.Warn().Msgf("Timeout waiting for files, %d files still pending", len(remainingFiles)) - return symlinkPath, fmt.Errorf("timeout waiting for files") - } - } - - if q.SkipPreCache { - return symlinkPath, nil - } - - go func() { - - if err := q.preCacheFile(debridTorrent.Name, filePaths); err != nil { - q.logger.Error().Msgf("Failed to pre-cache file: %s", err) - } else { - q.logger.Trace().Msgf("Pre-cached %d files", len(filePaths)) - } - }() // Pre-cache the files in the background - return symlinkPath, nil -} - func (q *QBit) getTorrentPath(rclonePath string, debridTorrent *debrid.Torrent) (string, error) { for { torrentPath, err := debridTorrent.GetMountFolder(rclonePath) diff --git a/pkg/repair/clean.go b/pkg/repair/clean.go deleted file mode 100644 index d992322..0000000 --- a/pkg/repair/clean.go +++ /dev/null @@ -1,159 +0,0 @@ -package repair - -//func (r *Repair) clean(job *Job) error { -// // Create a new error group -// g, ctx := errgroup.WithContext(context.Background()) -// -// uniqueItems := make(map[string]string) -// mu := sync.Mutex{} -// -// // Limit concurrent goroutines -// g.SetLimit(10) -// -// for _, a := range job.Arrs { -// a := a // Capture range variable -// g.Go(func() error { -// // Check if context was canceled -// select { -// case <-ctx.Done(): -// return ctx.Err() -// default: -// } -// -// items, err := r.cleanArr(job, a, "") -// if err != nil { -// r.logger.Error().Err(err).Msgf("Error cleaning %s", a) -// return err -// } -// -// // Safely append the found items to the shared slice -// if len(items) > 0 { -// mu.Lock() -// for k, v := range items { -// uniqueItems[k] = v -// } -// mu.Unlock() -// } -// -// return nil -// }) -// } -// -// if err := g.Wait(); err != nil { -// return err -// } -// -// if len(uniqueItems) == 0 { -// job.CompletedAt = time.Now() -// job.Status = JobCompleted -// -// go func() { -// if err := request.SendDiscordMessage("repair_clean_complete", "success", job.discordContext()); err != nil { -// r.logger.Error().Msgf("Error sending discord message: %v", err) -// } -// }() -// -// return nil -// } -// -// cache := r.deb.Caches["realdebrid"] -// if cache == nil { -// return fmt.Errorf("cache not found") -// } -// torrents := cache.GetTorrents() -// -// dangling := make([]string, 0) -// for _, t := range torrents { -// if _, ok := uniqueItems[t.Name]; !ok { -// dangling = append(dangling, t.Id) -// } -// } -// -// r.logger.Info().Msgf("Found %d delapitated items", len(dangling)) -// -// if len(dangling) == 0 { -// job.CompletedAt = time.Now() -// job.Status = JobCompleted -// return nil -// } -// -// client := r.deb.Clients["realdebrid"] -// if client == nil { -// return fmt.Errorf("client not found") -// } -// for _, id := range dangling { -// err := client.DeleteTorrent(id) -// if err != nil { -// return err -// } -// } -// -// return nil -//} -// -//func (r *Repair) cleanArr(j *Job, _arr string, tmdbId string) (map[string]string, error) { -// uniqueItems := make(map[string]string) -// a := r.arrs.Get(_arr) -// -// r.logger.Info().Msgf("Starting repair for %s", a.Name) -// media, err := a.GetMedia(tmdbId) -// if err != nil { -// r.logger.Info().Msgf("Failed to get %s media: %v", a.Name, err) -// return uniqueItems, err -// } -// -// // Create a new error group -// g, ctx := errgroup.WithContext(context.Background()) -// -// mu := sync.Mutex{} -// -// // Limit concurrent goroutines -// g.SetLimit(runtime.NumCPU() * 4) -// -// for _, m := range media { -// m := m // Create a new variable scoped to the loop iteration -// g.Go(func() error { -// // Check if context was canceled -// select { -// case <-ctx.Done(): -// return ctx.Err() -// default: -// } -// -// u := r.getUniquePaths(m) -// for k, v := range u { -// mu.Lock() -// uniqueItems[k] = v -// mu.Unlock() -// } -// return nil -// }) -// } -// -// if err := g.Wait(); err != nil { -// return uniqueItems, err -// } -// -// r.logger.Info().Msgf("Repair completed for %s. %d unique items", a.Name, len(uniqueItems)) -// return uniqueItems, nil -//} - -//func (r *Repair) getUniquePaths(media arr.Content) map[string]string { -// // Use zurg setup to check file availability with zurg -// // This reduces bandwidth usage significantly -// -// uniqueParents := make(map[string]string) -// files := media.Files -// for _, file := range files { -// target := getSymlinkTarget(file.Path) -// if target != "" { -// file.IsSymlink = true -// dir, f := filepath.Split(target) -// parent := filepath.Base(filepath.Clean(dir)) -// // Set target path folder/file.mkv -// file.TargetPath = f -// uniqueParents[parent] = target -// } -// } -// return uniqueParents -//} diff --git a/pkg/server/debug.go b/pkg/server/debug.go new file mode 100644 index 0000000..e7d1246 --- /dev/null +++ b/pkg/server/debug.go @@ -0,0 +1,116 @@ +package server + +import ( + "fmt" + "github.com/go-chi/chi/v5" + "github.com/sirrobot01/decypharr/internal/request" + debridTypes "github.com/sirrobot01/decypharr/pkg/debrid/types" + "github.com/sirrobot01/decypharr/pkg/service" + "net/http" + "runtime" +) + +func (s *Server) handleIngests(w http.ResponseWriter, r *http.Request) { + ingests := make([]debridTypes.IngestData, 0) + svc := service.GetService() + if svc.Debrid == nil { + http.Error(w, "Debrid service is not enabled", http.StatusInternalServerError) + return + } + for _, cache := range svc.Debrid.Caches { + if cache == nil { + s.logger.Error().Msg("Debrid cache is nil, skipping") + continue + } + data, err := cache.GetIngests() + if err != nil { + s.logger.Error().Err(err).Msg("Failed to get ingests from debrid cache") + http.Error(w, "Failed to get ingests: "+err.Error(), http.StatusInternalServerError) + return + } + ingests = append(ingests, data...) + } + + request.JSONResponse(w, ingests, 200) +} + +func (s *Server) handleIngestsByDebrid(w http.ResponseWriter, r *http.Request) { + debridName := chi.URLParam(r, "debrid") + if debridName == "" { + http.Error(w, "Debrid name is required", http.StatusBadRequest) + return + } + + svc := service.GetService() + if svc.Debrid == nil { + http.Error(w, "Debrid service is not enabled", http.StatusInternalServerError) + return + } + + cache, exists := svc.Debrid.Caches[debridName] + if !exists { + http.Error(w, "Debrid cache not found: "+debridName, http.StatusNotFound) + return + } + + data, err := cache.GetIngests() + if err != nil { + s.logger.Error().Err(err).Msg("Failed to get ingests from debrid cache") + http.Error(w, "Failed to get ingests: "+err.Error(), http.StatusInternalServerError) + return + } + + request.JSONResponse(w, data, 200) +} + +func (s *Server) handleStats(w http.ResponseWriter, r *http.Request) { + var memStats runtime.MemStats + runtime.ReadMemStats(&memStats) + + stats := map[string]any{ + // Memory stats + "heap_alloc_mb": fmt.Sprintf("%.2fMB", float64(memStats.HeapAlloc)/1024/1024), + "total_alloc_mb": fmt.Sprintf("%.2fMB", float64(memStats.TotalAlloc)/1024/1024), + "memory_used": fmt.Sprintf("%.2fMB", float64(memStats.Sys)/1024/1024), + + // GC stats + "gc_cycles": memStats.NumGC, + // Goroutine stats + "goroutines": runtime.NumGoroutine(), + + // System info + "num_cpu": runtime.NumCPU(), + + // OS info + "os": runtime.GOOS, + "arch": runtime.GOARCH, + "go_version": runtime.Version(), + } + + svc := service.GetService() + if svc.Debrid == nil { + request.JSONResponse(w, stats, http.StatusOK) + return + } + clients := svc.Debrid.GetDebrids() + profiles := make([]*debridTypes.Profile, 0) + for debridName, client := range clients { + profile, err := client.GetProfile() + profile.Name = debridName + if err != nil { + s.logger.Error().Err(err).Msg("Failed to get debrid profile") + continue + } + cache, ok := svc.Debrid.Caches[debridName] + if ok { + // Get torrent data + profile.LibrarySize = len(cache.GetTorrents()) + profile.BadTorrents = len(cache.GetListing("__bad__")) + profile.ActiveLinks = cache.GetTotalActiveDownloadLinks() + + } + profiles = append(profiles, profile) + } + stats["debrids"] = profiles + request.JSONResponse(w, stats, http.StatusOK) +} diff --git a/pkg/server/server.go b/pkg/server/server.go index 4640ef0..8dd793c 100644 --- a/pkg/server/server.go +++ b/pkg/server/server.go @@ -9,12 +9,10 @@ import ( "github.com/rs/zerolog" "github.com/sirrobot01/decypharr/internal/config" "github.com/sirrobot01/decypharr/internal/logger" - "github.com/sirrobot01/decypharr/internal/request" "io" "net/http" "net/url" "os" - "runtime" ) type Server struct { @@ -45,8 +43,12 @@ func New(handlers map[string]http.Handler) *Server { //logs r.Get("/logs", s.getLogs) - //stats - r.Get("/stats", s.getStats) + //debugs + r.Route("/debug", func(r chi.Router) { + r.Get("/stats", s.handleStats) + r.Get("/ingests", s.handleIngests) + r.Get("/ingests/{debrid}", s.handleIngestsByDebrid) + }) //webhooks r.Post("/webhooks/tautulli", s.handleTautulli) @@ -108,29 +110,3 @@ func (s *Server) getLogs(w http.ResponseWriter, r *http.Request) { return } } - -func (s *Server) getStats(w http.ResponseWriter, r *http.Request) { - var memStats runtime.MemStats - runtime.ReadMemStats(&memStats) - - stats := map[string]interface{}{ - // Memory stats - "heap_alloc_mb": fmt.Sprintf("%.2fMB", float64(memStats.HeapAlloc)/1024/1024), - "total_alloc_mb": fmt.Sprintf("%.2fMB", float64(memStats.TotalAlloc)/1024/1024), - "memory_used": fmt.Sprintf("%.2fMB", float64(memStats.Sys)/1024/1024), - - // GC stats - "gc_cycles": memStats.NumGC, - // Goroutine stats - "goroutines": runtime.NumGoroutine(), - - // System info - "num_cpu": runtime.NumCPU(), - - // OS info - "os": runtime.GOOS, - "arch": runtime.GOARCH, - "go_version": runtime.Version(), - } - request.JSONResponse(w, stats, http.StatusOK) -} diff --git a/pkg/service/service.go b/pkg/service/service.go index eb7378b..d41a3c2 100644 --- a/pkg/service/service.go +++ b/pkg/service/service.go @@ -37,12 +37,6 @@ func Reset() { if instance.Debrid != nil { instance.Debrid.Reset() } - if instance.Arr != nil { - //instance.Arr.Reset() - } - if instance.Repair != nil { - //instance.Repair.Reset() - } } once = sync.Once{} instance = nil diff --git a/pkg/web/templates/layout.html b/pkg/web/templates/layout.html index a30fd1c..b4f269b 100644 --- a/pkg/web/templates/layout.html +++ b/pkg/web/templates/layout.html @@ -169,7 +169,7 @@ - + Stats Loading... diff --git a/pkg/webdav/file.go b/pkg/webdav/file.go index babe36a..3fd6c0b 100644 --- a/pkg/webdav/file.go +++ b/pkg/webdav/file.go @@ -31,7 +31,6 @@ type File struct { cache *debrid.Cache fileId string torrentName string - torrentId string modTime time.Time @@ -47,7 +46,6 @@ type File struct { downloadLink string link string - canDelete bool } // File interface implementations for File diff --git a/pkg/webdav/handler.go b/pkg/webdav/handler.go index 95b6df0..8df178b 100644 --- a/pkg/webdav/handler.go +++ b/pkg/webdav/handler.go @@ -326,7 +326,6 @@ func (h *Handler) ServeHTTP(w http.ResponseWriter, r *http.Request) { }, } handler.ServeHTTP(w, r) - return } func getContentType(fileName string) string { diff --git a/pkg/webdav/misc.go b/pkg/webdav/misc.go index 3521c74..25deb85 100644 --- a/pkg/webdav/misc.go +++ b/pkg/webdav/misc.go @@ -11,16 +11,6 @@ import ( "time" ) -// getName: Returns the torrent name and filename from the path -func getName(rootDir, path string) (string, string) { - path = strings.TrimPrefix(path, rootDir) - parts := strings.Split(strings.TrimPrefix(path, string(os.PathSeparator)), string(os.PathSeparator)) - if len(parts) < 2 { - return "", "" - } - return parts[1], strings.Join(parts[2:], string(os.PathSeparator)) // Note the change from [0] to [1] -} - func isValidURL(str string) bool { u, err := url.Parse(str) // A valid URL should parse without error, and have a non-empty scheme and host. From 9c6c44d785b1da319e3a59a833ace0dba41daa06 Mon Sep 17 00:00:00 2001 From: Mukhtar Akere Date: Mon, 2 Jun 2025 12:57:36 +0100 Subject: [PATCH 05/26] - Revamp decypharr arch \n - Add callback_ur, download_folder to addContent API \n - Fix few bugs \n - More declarative UI keywords - Speed up repairs - Few other improvements/bug fixes --- Dockerfile | 2 - cmd/decypharr/main.go | 21 +- cmd/healthcheck/main.go | 5 +- docs/docs/guides/rclone.md | 24 +- docs/docs/installation.md | 39 ++- internal/config/config.go | 4 + internal/utils/file.go | 65 +++++ internal/utils/magnet.go | 11 +- pkg/arr/arr.go | 65 ++--- pkg/arr/import.go | 1 - pkg/debrid/debrid.go | 218 ++++++++++++++++ pkg/debrid/debrid/debrid.go | 103 -------- pkg/debrid/debrid/engine.go | 68 ----- pkg/debrid/debrid/xml.go | 1 - .../{ => providers}/alldebrid/alldebrid.go | 0 pkg/debrid/{ => providers}/alldebrid/types.go | 0 .../debrid_link/debrid_link.go | 0 .../{ => providers}/debrid_link/types.go | 0 .../{ => providers}/realdebrid/realdebrid.go | 3 +- .../{ => providers}/realdebrid/types.go | 0 pkg/debrid/{ => providers}/torbox/torbox.go | 0 pkg/debrid/{ => providers}/torbox/types.go | 0 pkg/debrid/{debrid => store}/cache.go | 8 +- pkg/debrid/{debrid => store}/download_link.go | 4 +- pkg/debrid/{debrid => store}/misc.go | 2 +- pkg/debrid/{debrid => store}/refresh.go | 2 +- pkg/debrid/{debrid => store}/repair.go | 2 +- pkg/debrid/{debrid => store}/torrent.go | 2 +- pkg/debrid/{debrid => store}/worker.go | 2 +- pkg/debrid/store/xml.go | 1 + pkg/debrid/types/torrent.go | 10 +- pkg/qbit/context.go | 127 +++++++++ pkg/qbit/http.go | 165 +++--------- pkg/qbit/import.go | 80 ------ pkg/qbit/qbit.go | 46 ++-- pkg/qbit/routes.go | 6 +- pkg/qbit/torrent.go | 247 ++---------------- pkg/qbit/types.go | 78 +----- pkg/repair/misc.go | 95 +++++++ pkg/repair/repair.go | 218 ++++++++++------ pkg/server/debug.go | 28 +- pkg/server/webhook.go | 5 +- pkg/service/service.go | 47 ---- pkg/{qbit => store}/downloader.go | 164 ++++-------- pkg/{qbit => store}/misc.go | 14 +- pkg/store/request.go | 103 ++++++++ pkg/store/store.go | 75 ++++++ pkg/store/torrent.go | 210 +++++++++++++++ .../storage.go => store/torrent_storage.go} | 94 ++++++- pkg/web/api.go | 119 +++++---- pkg/web/auth.go | 6 +- pkg/web/middlewares.go | 6 +- pkg/web/routes.go | 49 ++-- pkg/web/templates/config.html | 70 ++--- pkg/web/templates/download.html | 44 +++- pkg/web/templates/index.html | 6 +- pkg/web/templates/layout.html | 30 +++ pkg/web/templates/repair.html | 130 +++++---- pkg/web/ui.go | 49 ++-- pkg/web/{server.go => web.go} | 38 ++- pkg/webdav/file.go | 8 +- pkg/webdav/handler.go | 14 +- pkg/webdav/misc.go | 4 +- pkg/webdav/propfind.go | 18 +- pkg/webdav/webdav.go | 5 +- pkg/worker/worker.go | 72 ----- scripts/deploy.sh | 57 ---- 67 files changed, 1726 insertions(+), 1464 deletions(-) create mode 100644 pkg/debrid/debrid.go delete mode 100644 pkg/debrid/debrid/debrid.go delete mode 100644 pkg/debrid/debrid/engine.go delete mode 100644 pkg/debrid/debrid/xml.go rename pkg/debrid/{ => providers}/alldebrid/alldebrid.go (100%) rename pkg/debrid/{ => providers}/alldebrid/types.go (100%) rename pkg/debrid/{ => providers}/debrid_link/debrid_link.go (100%) rename pkg/debrid/{ => providers}/debrid_link/types.go (100%) rename pkg/debrid/{ => providers}/realdebrid/realdebrid.go (99%) rename pkg/debrid/{ => providers}/realdebrid/types.go (100%) rename pkg/debrid/{ => providers}/torbox/torbox.go (100%) rename pkg/debrid/{ => providers}/torbox/types.go (100%) rename pkg/debrid/{debrid => store}/cache.go (99%) rename pkg/debrid/{debrid => store}/download_link.go (99%) rename pkg/debrid/{debrid => store}/misc.go (98%) rename pkg/debrid/{debrid => store}/refresh.go (99%) rename pkg/debrid/{debrid => store}/repair.go (99%) rename pkg/debrid/{debrid => store}/torrent.go (99%) rename pkg/debrid/{debrid => store}/worker.go (99%) create mode 100644 pkg/debrid/store/xml.go create mode 100644 pkg/qbit/context.go delete mode 100644 pkg/qbit/import.go delete mode 100644 pkg/service/service.go rename pkg/{qbit => store}/downloader.go (61%) rename pkg/{qbit => store}/misc.go (57%) create mode 100644 pkg/store/request.go create mode 100644 pkg/store/store.go create mode 100644 pkg/store/torrent.go rename pkg/{qbit/storage.go => store/torrent_storage.go} (63%) rename pkg/web/{server.go => web.go} (70%) delete mode 100644 pkg/worker/worker.go delete mode 100755 scripts/deploy.sh diff --git a/Dockerfile b/Dockerfile index afe80a3..b12fe3b 100644 --- a/Dockerfile +++ b/Dockerfile @@ -61,6 +61,4 @@ EXPOSE 8282 VOLUME ["/app"] USER nonroot:nonroot -HEALTHCHECK --interval=3s --retries=10 CMD ["/usr/bin/healthcheck", "--config", "/app"] - CMD ["/usr/bin/decypharr", "--config", "/app"] \ No newline at end of file diff --git a/cmd/decypharr/main.go b/cmd/decypharr/main.go index 3e243e2..4cbc57a 100644 --- a/cmd/decypharr/main.go +++ b/cmd/decypharr/main.go @@ -7,11 +7,10 @@ import ( "github.com/sirrobot01/decypharr/internal/logger" "github.com/sirrobot01/decypharr/pkg/qbit" "github.com/sirrobot01/decypharr/pkg/server" - "github.com/sirrobot01/decypharr/pkg/service" + "github.com/sirrobot01/decypharr/pkg/store" "github.com/sirrobot01/decypharr/pkg/version" "github.com/sirrobot01/decypharr/pkg/web" "github.com/sirrobot01/decypharr/pkg/webdav" - "github.com/sirrobot01/decypharr/pkg/worker" "net/http" "os" "runtime" @@ -62,7 +61,7 @@ func Start(ctx context.Context) error { qb := qbit.New() wd := webdav.New() - ui := web.New(qb).Routes() + ui := web.New().Routes() webdavRoutes := wd.Routes() qbitRoutes := qb.Routes() @@ -95,14 +94,14 @@ func Start(ctx context.Context) error { _log.Info().Msg("Restarting Decypharr...") <-done // wait for them to finish qb.Reset() - service.Reset() + store.Reset() // rebuild svcCtx off the original parent svcCtx, cancelSvc = context.WithCancel(ctx) runtime.GC() config.Reload() - service.Reset() + store.Reset() // loop will restart services automatically } } @@ -146,11 +145,7 @@ func startServices(ctx context.Context, wd *webdav.WebDav, srv *server.Server) e }) safeGo(func() error { - return worker.Start(ctx) - }) - - safeGo(func() error { - arr := service.GetService().Arr + arr := store.GetStore().GetArr() if arr == nil { return nil } @@ -159,9 +154,9 @@ func startServices(ctx context.Context, wd *webdav.WebDav, srv *server.Server) e if cfg := config.Get(); cfg.Repair.Enabled { safeGo(func() error { - r := service.GetService().Repair - if r != nil { - if err := r.Start(ctx); err != nil { + repair := store.GetStore().GetRepair() + if repair != nil { + if err := repair.Start(ctx); err != nil { _log.Error().Err(err).Msg("repair failed") } } diff --git a/cmd/healthcheck/main.go b/cmd/healthcheck/main.go index d5d740b..3c7253a 100644 --- a/cmd/healthcheck/main.go +++ b/cmd/healthcheck/main.go @@ -145,5 +145,8 @@ func checkWebDAV(ctx context.Context, baseUrl, port, path string) bool { } defer resp.Body.Close() - return resp.StatusCode == 207 || resp.StatusCode == http.StatusOK + return resp.StatusCode == http.StatusMultiStatus || + resp.StatusCode == http.StatusOK || + resp.StatusCode == http.StatusServiceUnavailable // It's still indexing + } diff --git a/docs/docs/guides/rclone.md b/docs/docs/guides/rclone.md index 1fce513..bb0d40e 100644 --- a/docs/docs/guides/rclone.md +++ b/docs/docs/guides/rclone.md @@ -5,7 +5,7 @@ This guide will help you set up Decypharr with Rclone, allowing you to use your #### Rclone Make sure you have Rclone installed and configured on your system. You can follow the [Rclone installation guide](https://rclone.org/install/) for instructions. -It's recommended to use docker version of Rclone, as it provides a consistent environment across different platforms. +It's recommended to use a docker version of Rclone, as it provides a consistent environment across different platforms. ### Steps @@ -35,7 +35,7 @@ Create a `rclone.conf` file in `/opt/rclone/` with your Rclone configuration. ```conf [decypharr] type = webdav -url = https://your-ip-or-domain:8282/webdav/realdebrid +url = http://your-ip-or-domain:8282/webdav/realdebrid vendor = other pacer_min_sleep = 0 ``` @@ -69,13 +69,10 @@ services: decypharr: image: cy01/blackhole:latest container_name: decypharr - user: "1000:1000" volumes: - - /mnt/:/mnt + - /mnt/:/mnt:rslave - /opt/decypharr/:/app environment: - - PUID=1000 - - PGID=1000 - UMASK=002 ports: - "8282:8282/tcp" @@ -87,14 +84,11 @@ services: restart: unless-stopped environment: TZ: UTC - PUID: 1000 - PGID: 1000 ports: - 5572:5572 volumes: - /mnt/remote/realdebrid:/data:rshared - /opt/rclone/rclone.conf:/config/rclone/rclone.conf - - /mnt:/mnt cap_add: - SYS_ADMIN security_opt: @@ -105,9 +99,17 @@ services: decypharr: condition: service_healthy restart: true - command: "mount decypharr: /data --allow-non-empty --allow-other --uid=1000 --gid=1000 --umask=002 --dir-cache-time 10s --rc --rc-addr :5572 --rc-no-auth " + command: "mount decypharr: /data --allow-non-empty --allow-other --dir-cache-time 10s --rc --rc-addr :5572 --rc-no-auth " ``` +#### Docker Notes + +- Ensure that the `/mnt/` directory is mounted correctly to access your media files. +- You can check your current user and group IDs and UMASK by running `id -a` and `umask` commands in your terminal. +- You can adjust the `PUID` and `PGID` environment variables to match your user and group IDs for proper file permissions. +- Also adding `--uid=$YOUR_PUID --gid=$YOUR_PGID` to the `rclone mount` command can help with permissions. +- The `UMASK` environment variable can be set to control file permissions created by Decypharr. + Start the containers: ```bash docker-compose up -d @@ -132,7 +134,7 @@ For each provider, you'll need a different rclone. OR you can change your `rclon ```apache [decypharr] type = webdav -url = https://your-ip-or-domain:8282/webdav/ +url = http://your-ip-or-domain:8282/webdav/ vendor = other pacer_min_sleep = 0 ``` diff --git a/docs/docs/installation.md b/docs/docs/installation.md index 1f731dc..9e703bc 100644 --- a/docs/docs/installation.md +++ b/docs/docs/installation.md @@ -45,7 +45,6 @@ docker run -d \ Create a `docker-compose.yml` file with the following content: ```yaml -version: '3.7' services: decypharr: image: cy01/blackhole:latest @@ -64,20 +63,14 @@ Run the Docker Compose setup: docker-compose up -d ``` -#### Notes for Docker Users - -- Ensure that the `/mnt/` directory is mounted correctly to access your media files. -- The `./config/` directory should contain your `config.json` file. -- You can adjust the `PUID` and `PGID` environment variables to match your user and group IDs for proper file permissions. -- The `UMASK` environment variable can be set to control file permissions created by Decypharr. - ## Binary Installation If you prefer not to use Docker, you can download and run the binary directly. -Download the binary from the releases page +Download your OS-specific release from the [releases page](https://github.com/sirrobot01/decypharr/releases). Create a configuration file (see Configuration) Run the binary: + ```bash chmod +x decypharr ./decypharr --config /path/to/config/folder @@ -109,4 +102,30 @@ You can also configure Decypharr through the web interface, but it's recommended "log_level": "info", "port": "8282" } -``` \ No newline at end of file +``` + +### Notes for Docker Users + +- Ensure that the `/mnt/` directory is mounted correctly to access your media files. +- The `./config/` directory should contain your `config.json` file. +- You can adjust the `PUID` and `PGID` environment variables to match your user and group IDs for proper file permissions. +- The `UMASK` environment variable can be set to control file permissions created by Decypharr. + +##### Health Checks +- Health checks are disabled by default. You can enable them by adding a `healthcheck` section in your `docker-compose.yml` file. +- Health checks checks for availability of several parts of the application; + - The main web interface + - The qBittorrent API + - The WebDAV server (if enabled). You should disable health checks for the initial indexes as they can take a long time to complete. + +```yaml +services: + decypharr: + ... + ... + healthcheck: + test: ["CMD", "/usr/bin/healthcheck", "--config", "/app/"] + interval: 5s + timeout: 10s + retries: 3 +``` diff --git a/internal/config/config.go b/internal/config/config.go index d80093e..526d519 100644 --- a/internal/config/config.go +++ b/internal/config/config.go @@ -98,6 +98,10 @@ func (c *Config) AuthFile() string { return filepath.Join(c.Path, "auth.json") } +func (c *Config) TorrentsFile() string { + return filepath.Join(c.Path, "torrents.json") +} + func (c *Config) loadConfig() error { // Load the config file if configPath == "" { diff --git a/internal/utils/file.go b/internal/utils/file.go index f46423a..adc8a2d 100644 --- a/internal/utils/file.go +++ b/internal/utils/file.go @@ -1,7 +1,10 @@ package utils import ( + "fmt" + "io" "net/url" + "os" "strings" ) @@ -19,3 +22,65 @@ func PathUnescape(path string) string { return unescapedPath } + +func PreCacheFile(filePaths []string) error { + if len(filePaths) == 0 { + return fmt.Errorf("no file paths provided") + } + + for _, filePath := range filePaths { + err := func(f string) error { + + file, err := os.Open(f) + if err != nil { + if os.IsNotExist(err) { + // File has probably been moved by arr, return silently + return nil + } + return fmt.Errorf("failed to open file: %s: %v", f, err) + } + defer file.Close() + + // Pre-cache the file header (first 256KB) using 16KB chunks. + if err := readSmallChunks(file, 0, 256*1024, 16*1024); err != nil { + return err + } + if err := readSmallChunks(file, 1024*1024, 64*1024, 16*1024); err != nil { + return err + } + return nil + }(filePath) + if err != nil { + return err + } + } + return nil +} + +func readSmallChunks(file *os.File, startPos int64, totalToRead int, chunkSize int) error { + _, err := file.Seek(startPos, 0) + if err != nil { + return err + } + + buf := make([]byte, chunkSize) + bytesRemaining := totalToRead + + for bytesRemaining > 0 { + toRead := chunkSize + if bytesRemaining < chunkSize { + toRead = bytesRemaining + } + + n, err := file.Read(buf[:toRead]) + if err != nil { + if err == io.EOF { + break + } + return err + } + + bytesRemaining -= n + } + return nil +} diff --git a/internal/utils/magnet.go b/internal/utils/magnet.go index 02db58f..f9cc5ca 100644 --- a/internal/utils/magnet.go +++ b/internal/utils/magnet.go @@ -25,11 +25,11 @@ var ( ) type Magnet struct { - Name string - InfoHash string - Size int64 - Link string - File []byte + Name string `json:"name"` + InfoHash string `json:"infoHash"` + Size int64 `json:"size"` + Link string `json:"link"` + File []byte `json:"-"` } func (m *Magnet) IsTorrent() bool { @@ -83,7 +83,6 @@ func GetMagnetFromBytes(torrentData []byte) (*Magnet, error) { if err != nil { return nil, err } - log.Println("InfoHash: ", infoHash) magnet := &Magnet{ InfoHash: infoHash, Name: info.Name, diff --git a/pkg/arr/arr.go b/pkg/arr/arr.go index c592194..4b90efd 100644 --- a/pkg/arr/arr.go +++ b/pkg/arr/arr.go @@ -11,7 +11,6 @@ import ( "github.com/sirrobot01/decypharr/internal/request" "io" "net/http" - "strconv" "strings" "sync" "time" @@ -121,10 +120,10 @@ type Storage struct { logger zerolog.Logger } -func (as *Storage) Cleanup() { - as.mu.Lock() - defer as.mu.Unlock() - as.Arrs = make(map[string]*Arr) +func (s *Storage) Cleanup() { + s.mu.Lock() + defer s.mu.Unlock() + s.Arrs = make(map[string]*Arr) } func InferType(host, name string) Type { @@ -154,26 +153,26 @@ func NewStorage() *Storage { } } -func (as *Storage) AddOrUpdate(arr *Arr) { - as.mu.Lock() - defer as.mu.Unlock() +func (s *Storage) AddOrUpdate(arr *Arr) { + s.mu.Lock() + defer s.mu.Unlock() if arr.Name == "" { return } - as.Arrs[arr.Name] = arr + s.Arrs[arr.Name] = arr } -func (as *Storage) Get(name string) *Arr { - as.mu.Lock() - defer as.mu.Unlock() - return as.Arrs[name] +func (s *Storage) Get(name string) *Arr { + s.mu.Lock() + defer s.mu.Unlock() + return s.Arrs[name] } -func (as *Storage) GetAll() []*Arr { - as.mu.Lock() - defer as.mu.Unlock() - arrs := make([]*Arr, 0, len(as.Arrs)) - for _, arr := range as.Arrs { +func (s *Storage) GetAll() []*Arr { + s.mu.Lock() + defer s.mu.Unlock() + arrs := make([]*Arr, 0, len(s.Arrs)) + for _, arr := range s.Arrs { if arr.Host != "" && arr.Token != "" { arrs = append(arrs, arr) } @@ -181,19 +180,19 @@ func (as *Storage) GetAll() []*Arr { return arrs } -func (as *Storage) Clear() { - as.mu.Lock() - defer as.mu.Unlock() - as.Arrs = make(map[string]*Arr) +func (s *Storage) Clear() { + s.mu.Lock() + defer s.mu.Unlock() + s.Arrs = make(map[string]*Arr) } -func (as *Storage) StartSchedule(ctx context.Context) error { +func (s *Storage) StartSchedule(ctx context.Context) error { ticker := time.NewTicker(10 * time.Second) select { case <-ticker.C: - as.cleanupArrsQueue() + s.cleanupArrsQueue() case <-ctx.Done(): ticker.Stop() return nil @@ -201,9 +200,9 @@ func (as *Storage) StartSchedule(ctx context.Context) error { return nil } -func (as *Storage) cleanupArrsQueue() { +func (s *Storage) cleanupArrsQueue() { arrs := make([]*Arr, 0) - for _, arr := range as.Arrs { + for _, arr := range s.Arrs { if !arr.Cleanup { continue } @@ -212,26 +211,18 @@ func (as *Storage) cleanupArrsQueue() { if len(arrs) > 0 { for _, arr := range arrs { if err := arr.CleanupQueue(); err != nil { - as.logger.Error().Err(err).Msgf("Failed to cleanup arr %s", arr.Name) + s.logger.Error().Err(err).Msgf("Failed to cleanup arr %s", arr.Name) } } } } -func (a *Arr) Refresh() error { +func (a *Arr) Refresh() { payload := struct { Name string `json:"name"` }{ Name: "RefreshMonitoredDownloads", } - resp, err := a.Request(http.MethodPost, "api/v3/command", payload) - if err == nil && resp != nil { - statusOk := strconv.Itoa(resp.StatusCode)[0] == '2' - if statusOk { - return nil - } - } - - return fmt.Errorf("failed to refresh: %v", err) + _, _ = a.Request(http.MethodPost, "api/v3/command", payload) } diff --git a/pkg/arr/import.go b/pkg/arr/import.go index 9ef651b..b9709b8 100644 --- a/pkg/arr/import.go +++ b/pkg/arr/import.go @@ -205,5 +205,4 @@ func (a *Arr) Import(path string, seriesId int, seasons []int) (io.ReadCloser, e } defer resp.Body.Close() return resp.Body, nil - } diff --git a/pkg/debrid/debrid.go b/pkg/debrid/debrid.go new file mode 100644 index 0000000..0506181 --- /dev/null +++ b/pkg/debrid/debrid.go @@ -0,0 +1,218 @@ +package debrid + +import ( + "context" + "fmt" + "github.com/sirrobot01/decypharr/internal/config" + "github.com/sirrobot01/decypharr/internal/logger" + "github.com/sirrobot01/decypharr/internal/utils" + "github.com/sirrobot01/decypharr/pkg/arr" + "github.com/sirrobot01/decypharr/pkg/debrid/providers/alldebrid" + "github.com/sirrobot01/decypharr/pkg/debrid/providers/debrid_link" + "github.com/sirrobot01/decypharr/pkg/debrid/providers/realdebrid" + "github.com/sirrobot01/decypharr/pkg/debrid/providers/torbox" + "github.com/sirrobot01/decypharr/pkg/debrid/store" + "github.com/sirrobot01/decypharr/pkg/debrid/types" + "strings" + "sync" +) + +type Storage struct { + clients map[string]types.Client + clientsLock sync.Mutex + caches map[string]*store.Cache + cachesLock sync.Mutex + LastUsed string +} + +func NewStorage() *Storage { + cfg := config.Get() + clients := make(map[string]types.Client) + + _logger := logger.Default() + + caches := make(map[string]*store.Cache) + + for _, dc := range cfg.Debrids { + client, err := createDebridClient(dc) + if err != nil { + _logger.Error().Err(err).Str("Debrid", dc.Name).Msg("failed to connect to debrid client") + continue + } + _log := client.GetLogger() + if dc.UseWebDav { + caches[dc.Name] = store.NewDebridCache(dc, client) + _log.Info().Msg("Debrid Service started with WebDAV") + } else { + _log.Info().Msg("Debrid Service started") + } + clients[dc.Name] = client + } + + d := &Storage{ + clients: clients, + LastUsed: "", + caches: caches, + } + return d +} + +func (d *Storage) GetClient(name string) types.Client { + d.clientsLock.Lock() + defer d.clientsLock.Unlock() + client, exists := d.clients[name] + if !exists { + return nil + } + return client +} + +func (d *Storage) Reset() { + d.clientsLock.Lock() + d.clients = make(map[string]types.Client) + d.clientsLock.Unlock() + + d.cachesLock.Lock() + d.caches = make(map[string]*store.Cache) + d.cachesLock.Unlock() + d.LastUsed = "" +} + +func (d *Storage) GetClients() map[string]types.Client { + d.clientsLock.Lock() + defer d.clientsLock.Unlock() + clientsCopy := make(map[string]types.Client) + for name, client := range d.clients { + clientsCopy[name] = client + } + return clientsCopy +} + +func (d *Storage) GetCaches() map[string]*store.Cache { + d.clientsLock.Lock() + defer d.clientsLock.Unlock() + cachesCopy := make(map[string]*store.Cache) + for name, cache := range d.caches { + cachesCopy[name] = cache + } + return cachesCopy +} + +func (d *Storage) FilterClients(filter func(types.Client) bool) map[string]types.Client { + d.clientsLock.Lock() + defer d.clientsLock.Unlock() + filteredClients := make(map[string]types.Client) + for name, client := range d.clients { + if filter(client) { + filteredClients[name] = client + } + } + return filteredClients +} + +func (d *Storage) FilterCaches(filter func(*store.Cache) bool) map[string]*store.Cache { + d.cachesLock.Lock() + defer d.cachesLock.Unlock() + filteredCaches := make(map[string]*store.Cache) + for name, cache := range d.caches { + if filter(cache) { + filteredCaches[name] = cache + } + } + return filteredCaches +} + +func createDebridClient(dc config.Debrid) (types.Client, error) { + switch dc.Name { + case "realdebrid": + return realdebrid.New(dc) + case "torbox": + return torbox.New(dc) + case "debridlink": + return debrid_link.New(dc) + case "alldebrid": + return alldebrid.New(dc) + default: + return realdebrid.New(dc) + } +} + +func ProcessTorrent(ctx context.Context, store *Storage, selectedDebrid string, magnet *utils.Magnet, a *arr.Arr, isSymlink, overrideDownloadUncached bool) (*types.Torrent, error) { + + debridTorrent := &types.Torrent{ + InfoHash: magnet.InfoHash, + Magnet: magnet, + Name: magnet.Name, + Arr: a, + Size: magnet.Size, + Files: make(map[string]types.File), + } + + clients := store.FilterClients(func(c types.Client) bool { + if selectedDebrid != "" && c.GetName() != selectedDebrid { + return false + } + return true + }) + + if len(clients) == 0 { + return nil, fmt.Errorf("no debrid clients available") + } + + errs := make([]error, 0, len(clients)) + + // Override first, arr second, debrid third + + if overrideDownloadUncached { + debridTorrent.DownloadUncached = true + } else if a.DownloadUncached != nil { + // Arr cached is set + debridTorrent.DownloadUncached = *a.DownloadUncached + } else { + debridTorrent.DownloadUncached = false + } + + for index, db := range clients { + _logger := db.GetLogger() + _logger.Info(). + Str("Debrid", db.GetName()). + Str("Arr", a.Name). + Str("Hash", debridTorrent.InfoHash). + Str("Name", debridTorrent.Name). + Msg("Processing torrent") + + if !overrideDownloadUncached && a.DownloadUncached == nil { + debridTorrent.DownloadUncached = db.GetDownloadUncached() + } + + dbt, err := db.SubmitMagnet(debridTorrent) + if err != nil || dbt == nil || dbt.Id == "" { + errs = append(errs, err) + continue + } + dbt.Arr = a + _logger.Info().Str("id", dbt.Id).Msgf("Torrent: %s submitted to %s", dbt.Name, db.GetName()) + store.LastUsed = index + + torrent, err := db.CheckStatus(dbt, isSymlink) + if err != nil && torrent != nil && torrent.Id != "" { + // Delete the torrent if it was not downloaded + go func(id string) { + _ = db.DeleteTorrent(id) + }(torrent.Id) + } + return torrent, err + } + if len(errs) == 0 { + return nil, fmt.Errorf("failed to process torrent: no clients available") + } + if len(errs) == 1 { + return nil, fmt.Errorf("failed to process torrent: %w", errs[0]) + } else { + errStrings := make([]string, 0, len(errs)) + for _, err := range errs { + errStrings = append(errStrings, err.Error()) + } + return nil, fmt.Errorf("failed to process torrent: %s", strings.Join(errStrings, ", ")) + } +} diff --git a/pkg/debrid/debrid/debrid.go b/pkg/debrid/debrid/debrid.go deleted file mode 100644 index 8169ae3..0000000 --- a/pkg/debrid/debrid/debrid.go +++ /dev/null @@ -1,103 +0,0 @@ -package debrid - -import ( - "fmt" - "github.com/sirrobot01/decypharr/internal/config" - "github.com/sirrobot01/decypharr/internal/utils" - "github.com/sirrobot01/decypharr/pkg/arr" - "github.com/sirrobot01/decypharr/pkg/debrid/alldebrid" - "github.com/sirrobot01/decypharr/pkg/debrid/debrid_link" - "github.com/sirrobot01/decypharr/pkg/debrid/realdebrid" - "github.com/sirrobot01/decypharr/pkg/debrid/torbox" - "github.com/sirrobot01/decypharr/pkg/debrid/types" - "strings" -) - -func createDebridClient(dc config.Debrid) (types.Client, error) { - switch dc.Name { - case "realdebrid": - return realdebrid.New(dc) - case "torbox": - return torbox.New(dc) - case "debridlink": - return debrid_link.New(dc) - case "alldebrid": - return alldebrid.New(dc) - default: - return realdebrid.New(dc) - } -} - -func ProcessTorrent(d *Engine, magnet *utils.Magnet, a *arr.Arr, isSymlink, overrideDownloadUncached bool) (*types.Torrent, error) { - - debridTorrent := &types.Torrent{ - InfoHash: magnet.InfoHash, - Magnet: magnet, - Name: magnet.Name, - Arr: a, - Size: magnet.Size, - Files: make(map[string]types.File), - } - - errs := make([]error, 0, len(d.Clients)) - - // Override first, arr second, debrid third - - if overrideDownloadUncached { - debridTorrent.DownloadUncached = true - } else if a.DownloadUncached != nil { - // Arr cached is set - debridTorrent.DownloadUncached = *a.DownloadUncached - } else { - debridTorrent.DownloadUncached = false - } - - for index, db := range d.Clients { - logger := db.GetLogger() - logger.Info().Str("Debrid", db.GetName()).Str("Hash", debridTorrent.InfoHash).Msg("Processing torrent") - - if !overrideDownloadUncached && a.DownloadUncached == nil { - debridTorrent.DownloadUncached = db.GetDownloadUncached() - } - - //if db.GetCheckCached() { - // hash, exists := db.IsAvailable([]string{debridTorrent.InfoHash})[debridTorrent.InfoHash] - // if !exists || !hash { - // logger.Info().Msgf("Torrent: %s is not cached", debridTorrent.Name) - // continue - // } else { - // logger.Info().Msgf("Torrent: %s is cached(or downloading)", debridTorrent.Name) - // } - //} - - dbt, err := db.SubmitMagnet(debridTorrent) - if err != nil || dbt == nil || dbt.Id == "" { - errs = append(errs, err) - continue - } - dbt.Arr = a - logger.Info().Str("id", dbt.Id).Msgf("Torrent: %s submitted to %s", dbt.Name, db.GetName()) - d.LastUsed = index - - torrent, err := db.CheckStatus(dbt, isSymlink) - if err != nil && torrent != nil && torrent.Id != "" { - // Delete the torrent if it was not downloaded - go func(id string) { - _ = db.DeleteTorrent(id) - }(torrent.Id) - } - return torrent, err - } - if len(errs) == 0 { - return nil, fmt.Errorf("failed to process torrent: no clients available") - } - if len(errs) == 1 { - return nil, fmt.Errorf("failed to process torrent: %w", errs[0]) - } else { - errStrings := make([]string, 0, len(errs)) - for _, err := range errs { - errStrings = append(errStrings, err.Error()) - } - return nil, fmt.Errorf("failed to process torrent: %s", strings.Join(errStrings, ", ")) - } -} diff --git a/pkg/debrid/debrid/engine.go b/pkg/debrid/debrid/engine.go deleted file mode 100644 index 520ecdb..0000000 --- a/pkg/debrid/debrid/engine.go +++ /dev/null @@ -1,68 +0,0 @@ -package debrid - -import ( - "github.com/sirrobot01/decypharr/internal/config" - "github.com/sirrobot01/decypharr/internal/logger" - "github.com/sirrobot01/decypharr/pkg/debrid/types" - "sync" -) - -type Engine struct { - Clients map[string]types.Client - clientsMu sync.Mutex - Caches map[string]*Cache - cacheMu sync.Mutex - LastUsed string -} - -func NewEngine() *Engine { - cfg := config.Get() - clients := make(map[string]types.Client) - - _logger := logger.Default() - - caches := make(map[string]*Cache) - - for _, dc := range cfg.Debrids { - client, err := createDebridClient(dc) - if err != nil { - _logger.Error().Err(err).Str("Debrid", dc.Name).Msg("failed to connect to debrid client") - continue - } - _log := client.GetLogger() - if dc.UseWebDav { - caches[dc.Name] = New(dc, client) - _log.Info().Msg("Debrid Service started with WebDAV") - } else { - _log.Info().Msg("Debrid Service started") - } - clients[dc.Name] = client - } - - d := &Engine{ - Clients: clients, - LastUsed: "", - Caches: caches, - } - return d -} - -func (d *Engine) GetClient(name string) types.Client { - d.clientsMu.Lock() - defer d.clientsMu.Unlock() - return d.Clients[name] -} - -func (d *Engine) Reset() { - d.clientsMu.Lock() - d.Clients = make(map[string]types.Client) - d.clientsMu.Unlock() - - d.cacheMu.Lock() - d.Caches = make(map[string]*Cache) - d.cacheMu.Unlock() -} - -func (d *Engine) GetDebrids() map[string]types.Client { - return d.Clients -} diff --git a/pkg/debrid/debrid/xml.go b/pkg/debrid/debrid/xml.go deleted file mode 100644 index 57845ff..0000000 --- a/pkg/debrid/debrid/xml.go +++ /dev/null @@ -1 +0,0 @@ -package debrid diff --git a/pkg/debrid/alldebrid/alldebrid.go b/pkg/debrid/providers/alldebrid/alldebrid.go similarity index 100% rename from pkg/debrid/alldebrid/alldebrid.go rename to pkg/debrid/providers/alldebrid/alldebrid.go diff --git a/pkg/debrid/alldebrid/types.go b/pkg/debrid/providers/alldebrid/types.go similarity index 100% rename from pkg/debrid/alldebrid/types.go rename to pkg/debrid/providers/alldebrid/types.go diff --git a/pkg/debrid/debrid_link/debrid_link.go b/pkg/debrid/providers/debrid_link/debrid_link.go similarity index 100% rename from pkg/debrid/debrid_link/debrid_link.go rename to pkg/debrid/providers/debrid_link/debrid_link.go diff --git a/pkg/debrid/debrid_link/types.go b/pkg/debrid/providers/debrid_link/types.go similarity index 100% rename from pkg/debrid/debrid_link/types.go rename to pkg/debrid/providers/debrid_link/types.go diff --git a/pkg/debrid/realdebrid/realdebrid.go b/pkg/debrid/providers/realdebrid/realdebrid.go similarity index 99% rename from pkg/debrid/realdebrid/realdebrid.go rename to pkg/debrid/providers/realdebrid/realdebrid.go index aa873db..5dfa438 100644 --- a/pkg/debrid/realdebrid/realdebrid.go +++ b/pkg/debrid/providers/realdebrid/realdebrid.go @@ -5,6 +5,7 @@ import ( "encoding/json" "errors" "fmt" + "github.com/sirrobot01/decypharr/pkg/debrid/types" "io" "net/http" gourl "net/url" @@ -20,8 +21,6 @@ import ( "github.com/sirrobot01/decypharr/internal/logger" "github.com/sirrobot01/decypharr/internal/request" "github.com/sirrobot01/decypharr/internal/utils" - "github.com/sirrobot01/decypharr/pkg/debrid/types" - "github.com/sirrobot01/decypharr/pkg/rar" ) diff --git a/pkg/debrid/realdebrid/types.go b/pkg/debrid/providers/realdebrid/types.go similarity index 100% rename from pkg/debrid/realdebrid/types.go rename to pkg/debrid/providers/realdebrid/types.go diff --git a/pkg/debrid/torbox/torbox.go b/pkg/debrid/providers/torbox/torbox.go similarity index 100% rename from pkg/debrid/torbox/torbox.go rename to pkg/debrid/providers/torbox/torbox.go diff --git a/pkg/debrid/torbox/types.go b/pkg/debrid/providers/torbox/types.go similarity index 100% rename from pkg/debrid/torbox/types.go rename to pkg/debrid/providers/torbox/types.go diff --git a/pkg/debrid/debrid/cache.go b/pkg/debrid/store/cache.go similarity index 99% rename from pkg/debrid/debrid/cache.go rename to pkg/debrid/store/cache.go index 0c2d1ae..9d08d7d 100644 --- a/pkg/debrid/debrid/cache.go +++ b/pkg/debrid/store/cache.go @@ -1,4 +1,4 @@ -package debrid +package store import ( "bufio" @@ -6,6 +6,7 @@ import ( "context" "errors" "fmt" + "github.com/sirrobot01/decypharr/pkg/debrid/types" "os" "path" "path/filepath" @@ -22,7 +23,6 @@ import ( "github.com/sirrobot01/decypharr/internal/config" "github.com/sirrobot01/decypharr/internal/logger" "github.com/sirrobot01/decypharr/internal/utils" - "github.com/sirrobot01/decypharr/pkg/debrid/types" _ "time/tzdata" ) @@ -108,7 +108,7 @@ type Cache struct { customFolders []string } -func New(dc config.Debrid, client types.Client) *Cache { +func NewDebridCache(dc config.Debrid, client types.Client) *Cache { cfg := config.Get() cetSc, err := gocron.NewScheduler(gocron.WithLocation(time.UTC)) if err != nil { @@ -691,7 +691,7 @@ func (c *Cache) ProcessTorrent(t *types.Torrent) error { return nil } -func (c *Cache) AddTorrent(t *types.Torrent) error { +func (c *Cache) Add(t *types.Torrent) error { if len(t.Files) == 0 { if err := c.client.UpdateTorrent(t); err != nil { return fmt.Errorf("failed to update torrent: %w", err) diff --git a/pkg/debrid/debrid/download_link.go b/pkg/debrid/store/download_link.go similarity index 99% rename from pkg/debrid/debrid/download_link.go rename to pkg/debrid/store/download_link.go index 53ed4ce..a404d27 100644 --- a/pkg/debrid/debrid/download_link.go +++ b/pkg/debrid/store/download_link.go @@ -1,14 +1,14 @@ -package debrid +package store import ( "errors" "fmt" + "github.com/sirrobot01/decypharr/pkg/debrid/types" "sync" "time" "github.com/sirrobot01/decypharr/internal/request" - "github.com/sirrobot01/decypharr/pkg/debrid/types" ) type linkCache struct { diff --git a/pkg/debrid/debrid/misc.go b/pkg/debrid/store/misc.go similarity index 98% rename from pkg/debrid/debrid/misc.go rename to pkg/debrid/store/misc.go index 501ea08..d0c089b 100644 --- a/pkg/debrid/debrid/misc.go +++ b/pkg/debrid/store/misc.go @@ -1,4 +1,4 @@ -package debrid +package store import ( "github.com/sirrobot01/decypharr/pkg/debrid/types" diff --git a/pkg/debrid/debrid/refresh.go b/pkg/debrid/store/refresh.go similarity index 99% rename from pkg/debrid/debrid/refresh.go rename to pkg/debrid/store/refresh.go index cf9c728..882eb4e 100644 --- a/pkg/debrid/debrid/refresh.go +++ b/pkg/debrid/store/refresh.go @@ -1,4 +1,4 @@ -package debrid +package store import ( "context" diff --git a/pkg/debrid/debrid/repair.go b/pkg/debrid/store/repair.go similarity index 99% rename from pkg/debrid/debrid/repair.go rename to pkg/debrid/store/repair.go index fae4ecd..9234995 100644 --- a/pkg/debrid/debrid/repair.go +++ b/pkg/debrid/store/repair.go @@ -1,4 +1,4 @@ -package debrid +package store import ( "context" diff --git a/pkg/debrid/debrid/torrent.go b/pkg/debrid/store/torrent.go similarity index 99% rename from pkg/debrid/debrid/torrent.go rename to pkg/debrid/store/torrent.go index c782765..332a660 100644 --- a/pkg/debrid/debrid/torrent.go +++ b/pkg/debrid/store/torrent.go @@ -1,4 +1,4 @@ -package debrid +package store import ( "fmt" diff --git a/pkg/debrid/debrid/worker.go b/pkg/debrid/store/worker.go similarity index 99% rename from pkg/debrid/debrid/worker.go rename to pkg/debrid/store/worker.go index b0a3705..8fba929 100644 --- a/pkg/debrid/debrid/worker.go +++ b/pkg/debrid/store/worker.go @@ -1,4 +1,4 @@ -package debrid +package store import ( "context" diff --git a/pkg/debrid/store/xml.go b/pkg/debrid/store/xml.go new file mode 100644 index 0000000..72440ea --- /dev/null +++ b/pkg/debrid/store/xml.go @@ -0,0 +1 @@ +package store diff --git a/pkg/debrid/types/torrent.go b/pkg/debrid/types/torrent.go index 9332943..0975e0d 100644 --- a/pkg/debrid/types/torrent.go +++ b/pkg/debrid/types/torrent.go @@ -34,10 +34,12 @@ type Torrent struct { Debrid string `json:"debrid"` - Arr *arr.Arr `json:"arr"` - Mu sync.Mutex `json:"-"` - SizeDownloaded int64 `json:"-"` // This is used for local download - DownloadUncached bool `json:"-"` + Arr *arr.Arr `json:"arr"` + + SizeDownloaded int64 `json:"-"` // This is used for local download + DownloadUncached bool `json:"-"` + + sync.Mutex } type DownloadLink struct { diff --git a/pkg/qbit/context.go b/pkg/qbit/context.go new file mode 100644 index 0000000..893cc76 --- /dev/null +++ b/pkg/qbit/context.go @@ -0,0 +1,127 @@ +package qbit + +import ( + "context" + "encoding/base64" + "github.com/go-chi/chi/v5" + "github.com/sirrobot01/decypharr/pkg/arr" + "github.com/sirrobot01/decypharr/pkg/store" + "net/http" + "strings" +) + +type contextKey string + +const ( + categoryKey contextKey = "category" + hashesKey contextKey = "hashes" + arrKey contextKey = "arr" +) + +func getCategory(ctx context.Context) string { + if category, ok := ctx.Value(categoryKey).(string); ok { + return category + } + return "" +} + +func getHashes(ctx context.Context) []string { + if hashes, ok := ctx.Value(hashesKey).([]string); ok { + return hashes + } + return nil +} + +func getArr(ctx context.Context) *arr.Arr { + if a, ok := ctx.Value(arrKey).(*arr.Arr); ok { + return a + } + return nil +} + +func decodeAuthHeader(header string) (string, string, error) { + encodedTokens := strings.Split(header, " ") + if len(encodedTokens) != 2 { + return "", "", nil + } + encodedToken := encodedTokens[1] + + bytes, err := base64.StdEncoding.DecodeString(encodedToken) + if err != nil { + return "", "", err + } + + bearer := string(bytes) + + colonIndex := strings.LastIndex(bearer, ":") + host := bearer[:colonIndex] + token := bearer[colonIndex+1:] + + return host, token, nil +} + +func (q *QBit) categoryContext(next http.Handler) http.Handler { + return http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) { + category := strings.Trim(r.URL.Query().Get("category"), "") + if category == "" { + // Get from form + _ = r.ParseForm() + category = r.Form.Get("category") + if category == "" { + // Get from multipart form + _ = r.ParseMultipartForm(32 << 20) + category = r.FormValue("category") + } + } + ctx := context.WithValue(r.Context(), categoryKey, strings.TrimSpace(category)) + next.ServeHTTP(w, r.WithContext(ctx)) + }) +} + +func (q *QBit) authContext(next http.Handler) http.Handler { + return http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) { + host, token, err := decodeAuthHeader(r.Header.Get("Authorization")) + category := getCategory(r.Context()) + arrs := store.GetStore().GetArr() + // Check if arr exists + a := arrs.Get(category) + if a == nil { + downloadUncached := false + a = arr.New(category, "", "", false, false, &downloadUncached) + } + if err == nil { + host = strings.TrimSpace(host) + if host != "" { + a.Host = host + } + token = strings.TrimSpace(token) + if token != "" { + a.Token = token + } + } + + arrs.AddOrUpdate(a) + ctx := context.WithValue(r.Context(), arrKey, a) + next.ServeHTTP(w, r.WithContext(ctx)) + }) +} + +func hashesContext(next http.Handler) http.Handler { + return http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) { + _hashes := chi.URLParam(r, "hashes") + var hashes []string + if _hashes != "" { + hashes = strings.Split(_hashes, "|") + } + if hashes == nil { + // Get hashes from form + _ = r.ParseForm() + hashes = r.Form["hashes"] + } + for i, hash := range hashes { + hashes[i] = strings.TrimSpace(hash) + } + ctx := context.WithValue(r.Context(), hashesKey, hashes) + next.ServeHTTP(w, r.WithContext(ctx)) + }) +} diff --git a/pkg/qbit/http.go b/pkg/qbit/http.go index b6efde0..1f81e7f 100644 --- a/pkg/qbit/http.go +++ b/pkg/qbit/http.go @@ -1,107 +1,16 @@ package qbit import ( - "context" - "encoding/base64" - "github.com/go-chi/chi/v5" "github.com/sirrobot01/decypharr/internal/request" "github.com/sirrobot01/decypharr/pkg/arr" - "github.com/sirrobot01/decypharr/pkg/service" "net/http" "path/filepath" "strings" ) -func decodeAuthHeader(header string) (string, string, error) { - encodedTokens := strings.Split(header, " ") - if len(encodedTokens) != 2 { - return "", "", nil - } - encodedToken := encodedTokens[1] - - bytes, err := base64.StdEncoding.DecodeString(encodedToken) - if err != nil { - return "", "", err - } - - bearer := string(bytes) - - colonIndex := strings.LastIndex(bearer, ":") - host := bearer[:colonIndex] - token := bearer[colonIndex+1:] - - return host, token, nil -} - -func (q *QBit) CategoryContext(next http.Handler) http.Handler { - return http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) { - category := strings.Trim(r.URL.Query().Get("category"), "") - if category == "" { - // Get from form - _ = r.ParseForm() - category = r.Form.Get("category") - if category == "" { - // Get from multipart form - _ = r.ParseMultipartForm(32 << 20) - category = r.FormValue("category") - } - } - ctx := context.WithValue(r.Context(), "category", strings.TrimSpace(category)) - next.ServeHTTP(w, r.WithContext(ctx)) - }) -} - -func (q *QBit) authContext(next http.Handler) http.Handler { - return http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) { - host, token, err := decodeAuthHeader(r.Header.Get("Authorization")) - category := r.Context().Value("category").(string) - svc := service.GetService() - // Check if arr exists - a := svc.Arr.Get(category) - if a == nil { - downloadUncached := false - a = arr.New(category, "", "", false, false, &downloadUncached) - } - if err == nil { - host = strings.TrimSpace(host) - if host != "" { - a.Host = host - } - token = strings.TrimSpace(token) - if token != "" { - a.Token = token - } - } - - svc.Arr.AddOrUpdate(a) - ctx := context.WithValue(r.Context(), "arr", a) - next.ServeHTTP(w, r.WithContext(ctx)) - }) -} - -func HashesCtx(next http.Handler) http.Handler { - return http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) { - _hashes := chi.URLParam(r, "hashes") - var hashes []string - if _hashes != "" { - hashes = strings.Split(_hashes, "|") - } - if hashes == nil { - // Get hashes from form - _ = r.ParseForm() - hashes = r.Form["hashes"] - } - for i, hash := range hashes { - hashes[i] = strings.TrimSpace(hash) - } - ctx := context.WithValue(r.Context(), "hashes", hashes) - next.ServeHTTP(w, r.WithContext(ctx)) - }) -} - func (q *QBit) handleLogin(w http.ResponseWriter, r *http.Request) { ctx := r.Context() - _arr := ctx.Value("arr").(*arr.Arr) + _arr := getArr(ctx) if _arr == nil { // No arr _, _ = w.Write([]byte("Ok.")) @@ -122,7 +31,7 @@ func (q *QBit) handleWebAPIVersion(w http.ResponseWriter, r *http.Request) { } func (q *QBit) handlePreferences(w http.ResponseWriter, r *http.Request) { - preferences := NewAppPreferences() + preferences := getAppPreferences() preferences.WebUiUsername = q.Username preferences.SavePath = q.DownloadFolder @@ -150,10 +59,10 @@ func (q *QBit) handleShutdown(w http.ResponseWriter, r *http.Request) { func (q *QBit) handleTorrentsInfo(w http.ResponseWriter, r *http.Request) { //log all url params ctx := r.Context() - category := ctx.Value("category").(string) + category := getCategory(ctx) filter := strings.Trim(r.URL.Query().Get("filter"), "") - hashes, _ := ctx.Value("hashes").([]string) - torrents := q.Storage.GetAllSorted(category, filter, hashes, "added_on", false) + hashes := getHashes(ctx) + torrents := q.storage.GetAllSorted(category, filter, hashes, "added_on", false) request.JSONResponse(w, torrents, http.StatusOK) } @@ -180,9 +89,13 @@ func (q *QBit) handleTorrentsAdd(w http.ResponseWriter, r *http.Request) { } isSymlink := strings.ToLower(r.FormValue("sequentialDownload")) != "true" + debridName := r.FormValue("debrid") category := r.FormValue("category") + _arr := getArr(ctx) + if _arr == nil { + _arr = arr.New(category, "", "", false, false, nil) + } atleastOne := false - ctx = context.WithValue(ctx, "isSymlink", isSymlink) // Handle magnet URLs if urls := r.FormValue("urls"); urls != "" { @@ -191,7 +104,7 @@ func (q *QBit) handleTorrentsAdd(w http.ResponseWriter, r *http.Request) { urlList = append(urlList, strings.TrimSpace(u)) } for _, url := range urlList { - if err := q.AddMagnet(ctx, url, category); err != nil { + if err := q.addMagnet(ctx, url, _arr, debridName, isSymlink); err != nil { q.logger.Info().Msgf("Error adding magnet: %v", err) http.Error(w, err.Error(), http.StatusBadRequest) return @@ -204,7 +117,7 @@ func (q *QBit) handleTorrentsAdd(w http.ResponseWriter, r *http.Request) { if r.MultipartForm != nil && r.MultipartForm.File != nil { if files := r.MultipartForm.File["torrents"]; len(files) > 0 { for _, fileHeader := range files { - if err := q.AddTorrent(ctx, fileHeader, category); err != nil { + if err := q.addTorrent(ctx, fileHeader, _arr, debridName, isSymlink); err != nil { q.logger.Info().Msgf("Error adding torrent: %v", err) http.Error(w, err.Error(), http.StatusBadRequest) return @@ -224,14 +137,14 @@ func (q *QBit) handleTorrentsAdd(w http.ResponseWriter, r *http.Request) { func (q *QBit) handleTorrentsDelete(w http.ResponseWriter, r *http.Request) { ctx := r.Context() - hashes, _ := ctx.Value("hashes").([]string) + hashes := getHashes(ctx) if len(hashes) == 0 { http.Error(w, "No hashes provided", http.StatusBadRequest) return } - category := ctx.Value("category").(string) + category := getCategory(ctx) for _, hash := range hashes { - q.Storage.Delete(hash, category, false) + q.storage.Delete(hash, category, false) } w.WriteHeader(http.StatusOK) @@ -239,10 +152,10 @@ func (q *QBit) handleTorrentsDelete(w http.ResponseWriter, r *http.Request) { func (q *QBit) handleTorrentsPause(w http.ResponseWriter, r *http.Request) { ctx := r.Context() - hashes, _ := ctx.Value("hashes").([]string) - category := ctx.Value("category").(string) + hashes := getHashes(ctx) + category := getCategory(ctx) for _, hash := range hashes { - torrent := q.Storage.Get(hash, category) + torrent := q.storage.Get(hash, category) if torrent == nil { continue } @@ -254,10 +167,10 @@ func (q *QBit) handleTorrentsPause(w http.ResponseWriter, r *http.Request) { func (q *QBit) handleTorrentsResume(w http.ResponseWriter, r *http.Request) { ctx := r.Context() - hashes, _ := ctx.Value("hashes").([]string) - category := ctx.Value("category").(string) + hashes := getHashes(ctx) + category := getCategory(ctx) for _, hash := range hashes { - torrent := q.Storage.Get(hash, category) + torrent := q.storage.Get(hash, category) if torrent == nil { continue } @@ -269,10 +182,10 @@ func (q *QBit) handleTorrentsResume(w http.ResponseWriter, r *http.Request) { func (q *QBit) handleTorrentRecheck(w http.ResponseWriter, r *http.Request) { ctx := r.Context() - hashes, _ := ctx.Value("hashes").([]string) - category := ctx.Value("category").(string) + hashes := getHashes(ctx) + category := getCategory(ctx) for _, hash := range hashes { - torrent := q.Storage.Get(hash, category) + torrent := q.storage.Get(hash, category) if torrent == nil { continue } @@ -315,7 +228,7 @@ func (q *QBit) handleCreateCategory(w http.ResponseWriter, r *http.Request) { func (q *QBit) handleTorrentProperties(w http.ResponseWriter, r *http.Request) { ctx := r.Context() hash := r.URL.Query().Get("hash") - torrent := q.Storage.Get(hash, ctx.Value("category").(string)) + torrent := q.storage.Get(hash, getCategory(ctx)) properties := q.GetTorrentProperties(torrent) request.JSONResponse(w, properties, http.StatusOK) @@ -324,22 +237,22 @@ func (q *QBit) handleTorrentProperties(w http.ResponseWriter, r *http.Request) { func (q *QBit) handleTorrentFiles(w http.ResponseWriter, r *http.Request) { ctx := r.Context() hash := r.URL.Query().Get("hash") - torrent := q.Storage.Get(hash, ctx.Value("category").(string)) + torrent := q.storage.Get(hash, getCategory(ctx)) if torrent == nil { return } - files := q.GetTorrentFiles(torrent) + files := q.getTorrentFiles(torrent) request.JSONResponse(w, files, http.StatusOK) } func (q *QBit) handleSetCategory(w http.ResponseWriter, r *http.Request) { ctx := r.Context() - category := ctx.Value("category").(string) - hashes, _ := ctx.Value("hashes").([]string) - torrents := q.Storage.GetAll("", "", hashes) + category := getCategory(ctx) + hashes := getHashes(ctx) + torrents := q.storage.GetAll("", "", hashes) for _, torrent := range torrents { torrent.Category = category - q.Storage.AddOrUpdate(torrent) + q.storage.AddOrUpdate(torrent) } request.JSONResponse(w, nil, http.StatusOK) } @@ -351,33 +264,33 @@ func (q *QBit) handleAddTorrentTags(w http.ResponseWriter, r *http.Request) { return } ctx := r.Context() - hashes, _ := ctx.Value("hashes").([]string) + hashes := getHashes(ctx) tags := strings.Split(r.FormValue("tags"), ",") for i, tag := range tags { tags[i] = strings.TrimSpace(tag) } - torrents := q.Storage.GetAll("", "", hashes) + torrents := q.storage.GetAll("", "", hashes) for _, t := range torrents { - q.SetTorrentTags(t, tags) + q.setTorrentTags(t, tags) } request.JSONResponse(w, nil, http.StatusOK) } -func (q *QBit) handleRemoveTorrentTags(w http.ResponseWriter, r *http.Request) { +func (q *QBit) handleremoveTorrentTags(w http.ResponseWriter, r *http.Request) { err := r.ParseForm() if err != nil { http.Error(w, "Failed to parse form data", http.StatusBadRequest) return } ctx := r.Context() - hashes, _ := ctx.Value("hashes").([]string) + hashes := getHashes(ctx) tags := strings.Split(r.FormValue("tags"), ",") for i, tag := range tags { tags[i] = strings.TrimSpace(tag) } - torrents := q.Storage.GetAll("", "", hashes) + torrents := q.storage.GetAll("", "", hashes) for _, torrent := range torrents { - q.RemoveTorrentTags(torrent, tags) + q.removeTorrentTags(torrent, tags) } request.JSONResponse(w, nil, http.StatusOK) @@ -397,6 +310,6 @@ func (q *QBit) handleCreateTags(w http.ResponseWriter, r *http.Request) { for i, tag := range tags { tags[i] = strings.TrimSpace(tag) } - q.AddTags(tags) + q.addTags(tags) request.JSONResponse(w, nil, http.StatusOK) } diff --git a/pkg/qbit/import.go b/pkg/qbit/import.go deleted file mode 100644 index 438b141..0000000 --- a/pkg/qbit/import.go +++ /dev/null @@ -1,80 +0,0 @@ -package qbit - -import ( - "github.com/sirrobot01/decypharr/internal/utils" - "github.com/sirrobot01/decypharr/pkg/debrid/debrid" - "github.com/sirrobot01/decypharr/pkg/service" - "time" - - "github.com/google/uuid" - "github.com/sirrobot01/decypharr/pkg/arr" -) - -type ImportRequest struct { - ID string `json:"id"` - Path string `json:"path"` - Magnet *utils.Magnet `json:"magnet"` - Arr *arr.Arr `json:"arr"` - IsSymlink bool `json:"isSymlink"` - SeriesId int `json:"series"` - Seasons []int `json:"seasons"` - Episodes []string `json:"episodes"` - DownloadUncached bool `json:"downloadUncached"` - - Failed bool `json:"failed"` - FailedAt time.Time `json:"failedAt"` - Reason string `json:"reason"` - Completed bool `json:"completed"` - CompletedAt time.Time `json:"completedAt"` - Async bool `json:"async"` -} - -type ManualImportResponseSchema struct { - Priority string `json:"priority"` - Status string `json:"status"` - Result string `json:"result"` - Queued time.Time `json:"queued"` - Trigger string `json:"trigger"` - SendUpdatesToClient bool `json:"sendUpdatesToClient"` - UpdateScheduledTask bool `json:"updateScheduledTask"` - Id int `json:"id"` -} - -func NewImportRequest(magnet *utils.Magnet, arr *arr.Arr, isSymlink, downloadUncached bool) *ImportRequest { - return &ImportRequest{ - ID: uuid.NewString(), - Magnet: magnet, - Arr: arr, - Failed: false, - Completed: false, - Async: false, - IsSymlink: isSymlink, - DownloadUncached: downloadUncached, - } -} - -func (i *ImportRequest) Fail(reason string) { - i.Failed = true - i.FailedAt = time.Now() - i.Reason = reason -} - -func (i *ImportRequest) Complete() { - i.Completed = true - i.CompletedAt = time.Now() -} - -func (i *ImportRequest) Process(q *QBit) (err error) { - // Use this for now. - // This sends the torrent to the arr - svc := service.GetService() - torrent := createTorrentFromMagnet(i.Magnet, i.Arr.Name, "manual") - debridTorrent, err := debrid.ProcessTorrent(svc.Debrid, i.Magnet, i.Arr, i.IsSymlink, i.DownloadUncached) - if err != nil { - return err - } - torrent = q.UpdateTorrentMin(torrent, debridTorrent) - q.Storage.AddOrUpdate(torrent) - go q.ProcessFiles(torrent, debridTorrent, i.Arr, i.IsSymlink) - return nil -} diff --git a/pkg/qbit/qbit.go b/pkg/qbit/qbit.go index 732d411..04c92a4 100644 --- a/pkg/qbit/qbit.go +++ b/pkg/qbit/qbit.go @@ -1,52 +1,38 @@ package qbit import ( - "cmp" "github.com/rs/zerolog" "github.com/sirrobot01/decypharr/internal/config" "github.com/sirrobot01/decypharr/internal/logger" - "os" - "path/filepath" + "github.com/sirrobot01/decypharr/pkg/store" ) type QBit struct { - Username string `json:"username"` - Password string `json:"password"` - Port string `json:"port"` - DownloadFolder string `json:"download_folder"` - Categories []string `json:"categories"` - Storage *TorrentStorage - logger zerolog.Logger - Tags []string - RefreshInterval int - SkipPreCache bool - - downloadSemaphore chan struct{} + Username string + Password string + DownloadFolder string + Categories []string + storage *store.TorrentStorage + logger zerolog.Logger + Tags []string } func New() *QBit { _cfg := config.Get() cfg := _cfg.QBitTorrent - port := cmp.Or(_cfg.Port, os.Getenv("QBIT_PORT"), "8282") - refreshInterval := cmp.Or(cfg.RefreshInterval, 10) return &QBit{ - Username: cfg.Username, - Password: cfg.Password, - Port: port, - DownloadFolder: cfg.DownloadFolder, - Categories: cfg.Categories, - Storage: NewTorrentStorage(filepath.Join(_cfg.Path, "torrents.json")), - logger: logger.New("qbit"), - RefreshInterval: refreshInterval, - SkipPreCache: cfg.SkipPreCache, - downloadSemaphore: make(chan struct{}, cmp.Or(cfg.MaxDownloads, 5)), + Username: cfg.Username, + Password: cfg.Password, + DownloadFolder: cfg.DownloadFolder, + Categories: cfg.Categories, + storage: store.GetStore().GetTorrentStorage(), + logger: logger.New("qbit"), } } func (q *QBit) Reset() { - if q.Storage != nil { - q.Storage.Reset() + if q.storage != nil { + q.storage.Reset() } q.Tags = nil - close(q.downloadSemaphore) } diff --git a/pkg/qbit/routes.go b/pkg/qbit/routes.go index ca6f29a..9881247 100644 --- a/pkg/qbit/routes.go +++ b/pkg/qbit/routes.go @@ -7,12 +7,12 @@ import ( func (q *QBit) Routes() http.Handler { r := chi.NewRouter() - r.Use(q.CategoryContext) + r.Use(q.categoryContext) r.Group(func(r chi.Router) { r.Use(q.authContext) r.Post("/auth/login", q.handleLogin) r.Route("/torrents", func(r chi.Router) { - r.Use(HashesCtx) + r.Use(hashesContext) r.Get("/info", q.handleTorrentsInfo) r.Post("/add", q.handleTorrentsAdd) r.Post("/delete", q.handleTorrentsDelete) @@ -20,7 +20,7 @@ func (q *QBit) Routes() http.Handler { r.Post("/createCategory", q.handleCreateCategory) r.Post("/setCategory", q.handleSetCategory) r.Post("/addTags", q.handleAddTorrentTags) - r.Post("/removeTags", q.handleRemoveTorrentTags) + r.Post("/removeTags", q.handleremoveTorrentTags) r.Post("/createTags", q.handleCreateTags) r.Get("/tags", q.handleGetTags) r.Get("/pause", q.handleTorrentsPause) diff --git a/pkg/qbit/torrent.go b/pkg/qbit/torrent.go index 5b61e39..5e9ae77 100644 --- a/pkg/qbit/torrent.go +++ b/pkg/qbit/torrent.go @@ -1,38 +1,35 @@ package qbit import ( - "cmp" "context" "fmt" - "github.com/sirrobot01/decypharr/internal/request" "github.com/sirrobot01/decypharr/internal/utils" "github.com/sirrobot01/decypharr/pkg/arr" - "github.com/sirrobot01/decypharr/pkg/debrid/debrid" - debridTypes "github.com/sirrobot01/decypharr/pkg/debrid/types" - "github.com/sirrobot01/decypharr/pkg/service" + "github.com/sirrobot01/decypharr/pkg/store" "io" "mime/multipart" - "os" - "path/filepath" "strings" "time" ) -// All torrent related helpers goes here - -func (q *QBit) AddMagnet(ctx context.Context, url, category string) error { +// All torrent-related helpers goes here +func (q *QBit) addMagnet(ctx context.Context, url string, arr *arr.Arr, debrid string, isSymlink bool) error { magnet, err := utils.GetMagnetFromUrl(url) if err != nil { return fmt.Errorf("error parsing magnet link: %w", err) } - err = q.Process(ctx, magnet, category) + _store := store.GetStore() + + importReq := store.NewImportRequest(debrid, q.DownloadFolder, magnet, arr, isSymlink, false, "", store.ImportTypeQBitTorrent) + + err = _store.AddTorrent(ctx, importReq) if err != nil { return fmt.Errorf("failed to process torrent: %w", err) } return nil } -func (q *QBit) AddTorrent(ctx context.Context, fileHeader *multipart.FileHeader, category string) error { +func (q *QBit) addTorrent(ctx context.Context, fileHeader *multipart.FileHeader, arr *arr.Arr, debrid string, isSymlink bool) error { file, _ := fileHeader.Open() defer file.Close() var reader io.Reader = file @@ -40,226 +37,28 @@ func (q *QBit) AddTorrent(ctx context.Context, fileHeader *multipart.FileHeader, if err != nil { return fmt.Errorf("error reading file: %s \n %w", fileHeader.Filename, err) } - err = q.Process(ctx, magnet, category) + _store := store.GetStore() + importReq := store.NewImportRequest(debrid, q.DownloadFolder, magnet, arr, isSymlink, false, "", store.ImportTypeQBitTorrent) + err = _store.AddTorrent(ctx, importReq) if err != nil { return fmt.Errorf("failed to process torrent: %w", err) } return nil } -func (q *QBit) Process(ctx context.Context, magnet *utils.Magnet, category string) error { - svc := service.GetService() - torrent := createTorrentFromMagnet(magnet, category, "auto") - a, ok := ctx.Value("arr").(*arr.Arr) - if !ok { - return fmt.Errorf("arr not found in context") - } - isSymlink := ctx.Value("isSymlink").(bool) - debridTorrent, err := debrid.ProcessTorrent(svc.Debrid, magnet, a, isSymlink, false) - if err != nil || debridTorrent == nil { - if err == nil { - err = fmt.Errorf("failed to process torrent") - } - return err - } - torrent = q.UpdateTorrentMin(torrent, debridTorrent) - q.Storage.AddOrUpdate(torrent) - go q.ProcessFiles(torrent, debridTorrent, a, isSymlink) // We can send async for file processing not to delay the response - return nil -} - -func (q *QBit) ProcessFiles(torrent *Torrent, debridTorrent *debridTypes.Torrent, arr *arr.Arr, isSymlink bool) { - svc := service.GetService() - client := svc.Debrid.GetClient(debridTorrent.Debrid) - downloadingStatuses := client.GetDownloadingStatus() - for debridTorrent.Status != "downloaded" { - q.logger.Debug().Msgf("%s <- (%s) Download Progress: %.2f%%", debridTorrent.Debrid, debridTorrent.Name, debridTorrent.Progress) - dbT, err := client.CheckStatus(debridTorrent, isSymlink) - if err != nil { - if dbT != nil && dbT.Id != "" { - // Delete the torrent if it was not downloaded - go func() { - _ = client.DeleteTorrent(dbT.Id) - }() - } - q.logger.Error().Msgf("Error checking status: %v", err) - q.MarkAsFailed(torrent) - go func() { - if err := arr.Refresh(); err != nil { - q.logger.Error().Msgf("Error refreshing arr: %v", err) - } - }() - return - } - - debridTorrent = dbT - torrent = q.UpdateTorrentMin(torrent, debridTorrent) - - // Exit the loop for downloading statuses to prevent memory buildup - if debridTorrent.Status == "downloaded" || !utils.Contains(downloadingStatuses, debridTorrent.Status) { - break - } - if !utils.Contains(client.GetDownloadingStatus(), debridTorrent.Status) { - break - } - time.Sleep(time.Duration(q.RefreshInterval) * time.Second) - } - var torrentSymlinkPath string - var err error - debridTorrent.Arr = arr - - // Check if debrid supports webdav by checking cache - timer := time.Now() - if isSymlink { - cache, useWebdav := svc.Debrid.Caches[debridTorrent.Debrid] - if useWebdav { - q.logger.Info().Msgf("Using internal webdav for %s", debridTorrent.Debrid) - - // Use webdav to download the file - - if err := cache.AddTorrent(debridTorrent); err != nil { - q.logger.Error().Msgf("Error adding torrent to cache: %v", err) - q.MarkAsFailed(torrent) - return - } - - rclonePath := filepath.Join(debridTorrent.MountPath, cache.GetTorrentFolder(debridTorrent)) // /mnt/remote/realdebrid/MyTVShow - torrentFolderNoExt := utils.RemoveExtension(debridTorrent.Name) - torrentSymlinkPath, err = q.createSymlinksWebdav(debridTorrent, rclonePath, torrentFolderNoExt) // /mnt/symlinks/{category}/MyTVShow/ - - } else { - // User is using either zurg or debrid webdav - torrentSymlinkPath, err = q.ProcessSymlink(torrent) // /mnt/symlinks/{category}/MyTVShow/ - } - } else { - torrentSymlinkPath, err = q.ProcessManualFile(torrent) - } - if err != nil { - q.MarkAsFailed(torrent) - go func() { - _ = client.DeleteTorrent(debridTorrent.Id) - }() - q.logger.Info().Msgf("Error: %v", err) - return - } - torrent.TorrentPath = torrentSymlinkPath - q.UpdateTorrent(torrent, debridTorrent) - q.logger.Info().Msgf("Adding %s took %s", debridTorrent.Name, time.Since(timer)) - go func() { - if err := request.SendDiscordMessage("download_complete", "success", torrent.discordContext()); err != nil { - q.logger.Error().Msgf("Error sending discord message: %v", err) - } - }() - if err := arr.Refresh(); err != nil { - q.logger.Error().Msgf("Error refreshing arr: %v", err) - } -} - -func (q *QBit) MarkAsFailed(t *Torrent) *Torrent { - t.State = "error" - q.Storage.AddOrUpdate(t) - go func() { - if err := request.SendDiscordMessage("download_failed", "error", t.discordContext()); err != nil { - q.logger.Error().Msgf("Error sending discord message: %v", err) - } - }() - return t -} - -func (q *QBit) UpdateTorrentMin(t *Torrent, debridTorrent *debridTypes.Torrent) *Torrent { - if debridTorrent == nil { - return t - } - - addedOn, err := time.Parse(time.RFC3339, debridTorrent.Added) - if err != nil { - addedOn = time.Now() - } - totalSize := debridTorrent.Bytes - progress := (cmp.Or(debridTorrent.Progress, 0.0)) / 100.0 - sizeCompleted := int64(float64(totalSize) * progress) - - var speed int64 - if debridTorrent.Speed != 0 { - speed = debridTorrent.Speed - } - var eta int - if speed != 0 { - eta = int((totalSize - sizeCompleted) / speed) - } - t.ID = debridTorrent.Id - t.Name = debridTorrent.Name - t.AddedOn = addedOn.Unix() - t.DebridTorrent = debridTorrent - t.Debrid = debridTorrent.Debrid - t.Size = totalSize - t.Completed = sizeCompleted - t.Downloaded = sizeCompleted - t.DownloadedSession = sizeCompleted - t.Uploaded = sizeCompleted - t.UploadedSession = sizeCompleted - t.AmountLeft = totalSize - sizeCompleted - t.Progress = progress - t.Eta = eta - t.Dlspeed = speed - t.Upspeed = speed - t.SavePath = filepath.Join(q.DownloadFolder, t.Category) + string(os.PathSeparator) - t.ContentPath = filepath.Join(t.SavePath, t.Name) + string(os.PathSeparator) - return t -} - -func (q *QBit) UpdateTorrent(t *Torrent, debridTorrent *debridTypes.Torrent) *Torrent { - if debridTorrent == nil { - return t - } - - if debridClient := service.GetDebrid().GetClient(debridTorrent.Debrid); debridClient != nil { - if debridTorrent.Status != "downloaded" { - _ = debridClient.UpdateTorrent(debridTorrent) - } - } - t = q.UpdateTorrentMin(t, debridTorrent) - t.ContentPath = t.TorrentPath + string(os.PathSeparator) - - if t.IsReady() { - t.State = "pausedUP" - q.Storage.Update(t) - return t - } - - ticker := time.NewTicker(100 * time.Millisecond) - defer ticker.Stop() - - for { - select { - case <-ticker.C: - if t.IsReady() { - t.State = "pausedUP" - q.Storage.Update(t) - return t - } - updatedT := q.UpdateTorrent(t, debridTorrent) - t = updatedT - - case <-time.After(10 * time.Minute): // Add a timeout - return t - } - } -} - -func (q *QBit) ResumeTorrent(t *Torrent) bool { +func (q *QBit) ResumeTorrent(t *store.Torrent) bool { return true } -func (q *QBit) PauseTorrent(t *Torrent) bool { +func (q *QBit) PauseTorrent(t *store.Torrent) bool { return true } -func (q *QBit) RefreshTorrent(t *Torrent) bool { +func (q *QBit) RefreshTorrent(t *store.Torrent) bool { return true } -func (q *QBit) GetTorrentProperties(t *Torrent) *TorrentProperties { +func (q *QBit) GetTorrentProperties(t *store.Torrent) *TorrentProperties { return &TorrentProperties{ AdditionDate: t.AddedOn, Comment: "Debrid Blackhole ", @@ -284,7 +83,7 @@ func (q *QBit) GetTorrentProperties(t *Torrent) *TorrentProperties { } } -func (q *QBit) GetTorrentFiles(t *Torrent) []*TorrentFile { +func (q *QBit) getTorrentFiles(t *store.Torrent) []*TorrentFile { files := make([]*TorrentFile, 0) if t.DebridTorrent == nil { return files @@ -298,7 +97,7 @@ func (q *QBit) GetTorrentFiles(t *Torrent) []*TorrentFile { return files } -func (q *QBit) SetTorrentTags(t *Torrent, tags []string) bool { +func (q *QBit) setTorrentTags(t *store.Torrent, tags []string) bool { torrentTags := strings.Split(t.Tags, ",") for _, tag := range tags { if tag == "" { @@ -312,20 +111,20 @@ func (q *QBit) SetTorrentTags(t *Torrent, tags []string) bool { } } t.Tags = strings.Join(torrentTags, ",") - q.Storage.Update(t) + q.storage.Update(t) return true } -func (q *QBit) RemoveTorrentTags(t *Torrent, tags []string) bool { +func (q *QBit) removeTorrentTags(t *store.Torrent, tags []string) bool { torrentTags := strings.Split(t.Tags, ",") newTorrentTags := utils.RemoveItem(torrentTags, tags...) q.Tags = utils.RemoveItem(q.Tags, tags...) t.Tags = strings.Join(newTorrentTags, ",") - q.Storage.Update(t) + q.storage.Update(t) return true } -func (q *QBit) AddTags(tags []string) bool { +func (q *QBit) addTags(tags []string) bool { for _, tag := range tags { if tag == "" { continue @@ -337,7 +136,7 @@ func (q *QBit) AddTags(tags []string) bool { return true } -func (q *QBit) RemoveTags(tags []string) bool { +func (q *QBit) removeTags(tags []string) bool { q.Tags = utils.RemoveItem(q.Tags, tags...) return true } diff --git a/pkg/qbit/types.go b/pkg/qbit/types.go index 10179ee..719e75a 100644 --- a/pkg/qbit/types.go +++ b/pkg/qbit/types.go @@ -1,11 +1,5 @@ package qbit -import ( - "fmt" - "github.com/sirrobot01/decypharr/pkg/debrid/types" - "sync" -) - type BuildInfo struct { Libtorrent string `json:"libtorrent"` Bitness int `json:"bitness"` @@ -172,76 +166,6 @@ type TorrentCategory struct { SavePath string `json:"savePath"` } -type Torrent struct { - ID string `json:"id"` - DebridTorrent *types.Torrent `json:"-"` - Debrid string `json:"debrid"` - TorrentPath string `json:"-"` - - AddedOn int64 `json:"added_on,omitempty"` - AmountLeft int64 `json:"amount_left"` - AutoTmm bool `json:"auto_tmm"` - Availability float64 `json:"availability,omitempty"` - Category string `json:"category,omitempty"` - Completed int64 `json:"completed"` - CompletionOn int `json:"completion_on,omitempty"` - ContentPath string `json:"content_path"` - DlLimit int `json:"dl_limit"` - Dlspeed int64 `json:"dlspeed"` - Downloaded int64 `json:"downloaded"` - DownloadedSession int64 `json:"downloaded_session"` - Eta int `json:"eta"` - FlPiecePrio bool `json:"f_l_piece_prio,omitempty"` - ForceStart bool `json:"force_start,omitempty"` - Hash string `json:"hash"` - LastActivity int64 `json:"last_activity,omitempty"` - MagnetUri string `json:"magnet_uri,omitempty"` - MaxRatio int `json:"max_ratio,omitempty"` - MaxSeedingTime int `json:"max_seeding_time,omitempty"` - Name string `json:"name,omitempty"` - NumComplete int `json:"num_complete,omitempty"` - NumIncomplete int `json:"num_incomplete,omitempty"` - NumLeechs int `json:"num_leechs,omitempty"` - NumSeeds int `json:"num_seeds,omitempty"` - Priority int `json:"priority,omitempty"` - Progress float64 `json:"progress"` - Ratio int `json:"ratio,omitempty"` - RatioLimit int `json:"ratio_limit,omitempty"` - SavePath string `json:"save_path"` - SeedingTimeLimit int `json:"seeding_time_limit,omitempty"` - SeenComplete int64 `json:"seen_complete,omitempty"` - SeqDl bool `json:"seq_dl"` - Size int64 `json:"size,omitempty"` - State string `json:"state,omitempty"` - SuperSeeding bool `json:"super_seeding"` - Tags string `json:"tags,omitempty"` - TimeActive int `json:"time_active,omitempty"` - TotalSize int64 `json:"total_size,omitempty"` - Tracker string `json:"tracker,omitempty"` - UpLimit int64 `json:"up_limit,omitempty"` - Uploaded int64 `json:"uploaded,omitempty"` - UploadedSession int64 `json:"uploaded_session,omitempty"` - Upspeed int64 `json:"upspeed,omitempty"` - Source string `json:"source,omitempty"` - - Mu sync.Mutex `json:"-"` -} - -func (t *Torrent) IsReady() bool { - return (t.AmountLeft <= 0 || t.Progress == 1) && t.TorrentPath != "" -} - -func (t *Torrent) discordContext() string { - format := ` - **Name:** %s - **Arr:** %s - **Hash:** %s - **MagnetURI:** %s - **Debrid:** %s - ` - return fmt.Sprintf(format, t.Name, t.Category, t.Hash, t.MagnetUri, t.Debrid) -} - type TorrentProperties struct { AdditionDate int64 `json:"addition_date,omitempty"` Comment string `json:"comment,omitempty"` @@ -289,7 +213,7 @@ type TorrentFile struct { Availability float64 `json:"availability,omitempty"` } -func NewAppPreferences() *AppPreferences { +func getAppPreferences() *AppPreferences { preferences := &AppPreferences{ AddTrackers: "", AddTrackersEnabled: false, diff --git a/pkg/repair/misc.go b/pkg/repair/misc.go index 0cce790..f2d2b64 100644 --- a/pkg/repair/misc.go +++ b/pkg/repair/misc.go @@ -3,6 +3,8 @@ package repair import ( "fmt" "github.com/sirrobot01/decypharr/pkg/arr" + "github.com/sirrobot01/decypharr/pkg/debrid/store" + "github.com/sirrobot01/decypharr/pkg/debrid/types" "os" "path/filepath" ) @@ -82,3 +84,96 @@ func collectFiles(media arr.Content) map[string][]arr.ContentFile { } return uniqueParents } + +func (r *Repair) checkTorrentFiles(torrentPath string, files []arr.ContentFile, clients map[string]types.Client, caches map[string]*store.Cache) []arr.ContentFile { + brokenFiles := make([]arr.ContentFile, 0) + + r.logger.Debug().Msgf("Checking %s", torrentPath) + + // Get the debrid client + dir := filepath.Dir(torrentPath) + debridName := r.findDebridForPath(dir, clients) + if debridName == "" { + r.logger.Debug().Msgf("No debrid found for %s. Skipping", torrentPath) + return files // Return all files as broken if no debrid found + } + + cache, ok := caches[debridName] + if !ok { + r.logger.Debug().Msgf("No cache found for %s. Skipping", debridName) + return files // Return all files as broken if no cache found + } + + // Check if torrent exists + torrentName := filepath.Clean(filepath.Base(torrentPath)) + torrent := cache.GetTorrentByName(torrentName) + if torrent == nil { + r.logger.Debug().Msgf("No torrent found for %s. Skipping", torrentName) + return files // Return all files as broken if torrent not found + } + + // Batch check files + filePaths := make([]string, len(files)) + for i, file := range files { + filePaths[i] = file.TargetPath + } + + brokenFilePaths := cache.GetBrokenFiles(torrent, filePaths) + if len(brokenFilePaths) > 0 { + r.logger.Debug().Msgf("%d broken files found in %s", len(brokenFilePaths), torrentName) + + // Create a set for O(1) lookup + brokenSet := make(map[string]bool, len(brokenFilePaths)) + for _, brokenPath := range brokenFilePaths { + brokenSet[brokenPath] = true + } + + // Filter broken files + for _, contentFile := range files { + if brokenSet[contentFile.TargetPath] { + brokenFiles = append(brokenFiles, contentFile) + } + } + } + + return brokenFiles +} + +func (r *Repair) findDebridForPath(dir string, clients map[string]types.Client) string { + // Check cache first + r.cacheMutex.RLock() + if r.debridPathCache == nil { + r.debridPathCache = make(map[string]string) + } + if debridName, exists := r.debridPathCache[dir]; exists { + r.cacheMutex.RUnlock() + return debridName + } + r.cacheMutex.RUnlock() + + // Find debrid client + for _, client := range clients { + mountPath := client.GetMountPath() + if mountPath == "" { + continue + } + + if filepath.Clean(mountPath) == filepath.Clean(dir) { + debridName := client.GetName() + + // Cache the result + r.cacheMutex.Lock() + r.debridPathCache[dir] = debridName + r.cacheMutex.Unlock() + + return debridName + } + } + + // Cache empty result to avoid repeated lookups + r.cacheMutex.Lock() + r.debridPathCache[dir] = "" + r.cacheMutex.Unlock() + + return "" +} diff --git a/pkg/repair/repair.go b/pkg/repair/repair.go index 4cc96ec..7a50a54 100644 --- a/pkg/repair/repair.go +++ b/pkg/repair/repair.go @@ -3,6 +3,7 @@ package repair import ( "context" "encoding/json" + "errors" "fmt" "github.com/go-co-op/gocron/v2" "github.com/google/uuid" @@ -12,7 +13,7 @@ import ( "github.com/sirrobot01/decypharr/internal/request" "github.com/sirrobot01/decypharr/internal/utils" "github.com/sirrobot01/decypharr/pkg/arr" - "github.com/sirrobot01/decypharr/pkg/debrid/debrid" + "github.com/sirrobot01/decypharr/pkg/debrid" "golang.org/x/sync/errgroup" "net" "net/http" @@ -29,7 +30,7 @@ import ( type Repair struct { Jobs map[string]*Job arrs *arr.Storage - deb *debrid.Engine + deb *debrid.Storage interval string runOnStart bool ZurgURL string @@ -40,7 +41,10 @@ type Repair struct { filename string workers int scheduler gocron.Scheduler - ctx context.Context + + debridPathCache map[string]string // Cache for path -> debrid name mapping + cacheMutex sync.RWMutex + ctx context.Context } type JobStatus string @@ -51,6 +55,7 @@ const ( JobFailed JobStatus = "failed" JobCompleted JobStatus = "completed" JobProcessing JobStatus = "processing" + JobCancelled JobStatus = "cancelled" ) type Job struct { @@ -66,9 +71,12 @@ type Job struct { Recurrent bool `json:"recurrent"` Error string `json:"error"` + + cancelFunc context.CancelFunc + ctx context.Context } -func New(arrs *arr.Storage, engine *debrid.Engine) *Repair { +func New(arrs *arr.Storage, engine *debrid.Storage) *Repair { cfg := config.Get() workers := runtime.NumCPU() * 20 if cfg.Repair.Workers > 0 { @@ -220,7 +228,8 @@ func (r *Repair) newJob(arrsNames []string, mediaIDs []string) *Job { func (r *Repair) preRunChecks() error { if r.useWebdav { - if len(r.deb.Caches) == 0 { + caches := r.deb.GetCaches() + if len(caches) == 0 { return fmt.Errorf("no caches found") } return nil @@ -254,21 +263,59 @@ func (r *Repair) AddJob(arrsNames []string, mediaIDs []string, autoProcess, recu job.AutoProcess = autoProcess job.Recurrent = recurrent r.reset(job) + + job.ctx, job.cancelFunc = context.WithCancel(r.ctx) r.Jobs[key] = job go r.saveToFile() go func() { if err := r.repair(job); err != nil { r.logger.Error().Err(err).Msg("Error running repair") - r.logger.Error().Err(err).Msg("Error running repair") - job.FailedAt = time.Now() - job.Error = err.Error() - job.Status = JobFailed - job.CompletedAt = time.Now() + if !errors.Is(job.ctx.Err(), context.Canceled) { + job.FailedAt = time.Now() + job.Error = err.Error() + job.Status = JobFailed + job.CompletedAt = time.Now() + } else { + job.FailedAt = time.Now() + job.Error = err.Error() + job.Status = JobFailed + job.CompletedAt = time.Now() + } } }() return nil } +func (r *Repair) StopJob(id string) error { + job := r.GetJob(id) + if job == nil { + return fmt.Errorf("job %s not found", id) + } + + // Check if job can be stopped + if job.Status != JobStarted && job.Status != JobProcessing { + return fmt.Errorf("job %s cannot be stopped (status: %s)", id, job.Status) + } + + // Cancel the job + if job.cancelFunc != nil { + job.cancelFunc() + r.logger.Info().Msgf("Job %s cancellation requested", id) + go func() { + if job.Status == JobStarted || job.Status == JobProcessing { + job.Status = JobCancelled + job.CompletedAt = time.Now() + job.Error = "Job was cancelled by user" + r.saveToFile() + } + }() + + return nil + } + + return fmt.Errorf("job %s cannot be cancelled", id) +} + func (r *Repair) repair(job *Job) error { defer r.saveToFile() if err := r.preRunChecks(); err != nil { @@ -278,7 +325,7 @@ func (r *Repair) repair(job *Job) error { // Use a mutex to protect concurrent access to brokenItems var mu sync.Mutex brokenItems := map[string][]arr.ContentFile{} - g, ctx := errgroup.WithContext(r.ctx) + g, ctx := errgroup.WithContext(job.ctx) for _, a := range job.Arrs { a := a // Capture range variable @@ -321,6 +368,14 @@ func (r *Repair) repair(job *Job) error { // Wait for all goroutines to complete and check for errors if err := g.Wait(); err != nil { + // Check if j0b was canceled + if errors.Is(ctx.Err(), context.Canceled) { + job.Status = JobCancelled + job.CompletedAt = time.Now() + job.Error = "Job was cancelled" + return fmt.Errorf("job cancelled") + } + job.FailedAt = time.Now() job.Error = err.Error() job.Status = JobFailed @@ -367,7 +422,7 @@ func (r *Repair) repair(job *Job) error { return nil } -func (r *Repair) repairArr(j *Job, _arr string, tmdbId string) ([]arr.ContentFile, error) { +func (r *Repair) repairArr(job *Job, _arr string, tmdbId string) ([]arr.ContentFile, error) { brokenItems := make([]arr.ContentFile, 0) a := r.arrs.Get(_arr) @@ -384,7 +439,7 @@ func (r *Repair) repairArr(j *Job, _arr string, tmdbId string) ([]arr.ContentFil return brokenItems, nil } // Check first media to confirm mounts are accessible - if !r.isMediaAccessible(media[0]) { + if !r.isMediaAccessible(media) { r.logger.Info().Msgf("Skipping repair. Parent directory not accessible for. Check your mounts") return brokenItems, nil } @@ -400,14 +455,14 @@ func (r *Repair) repairArr(j *Job, _arr string, tmdbId string) ([]arr.ContentFil defer wg.Done() for m := range workerChan { select { - case <-r.ctx.Done(): + case <-job.ctx.Done(): return default: } - items := r.getBrokenFiles(m) + items := r.getBrokenFiles(job, m) if items != nil { r.logger.Debug().Msgf("Found %d broken files for %s", len(items), m.Title) - if j.AutoProcess { + if job.AutoProcess { r.logger.Info().Msgf("Auto processing %d broken items for %s", len(items), m.Title) // Delete broken items @@ -431,7 +486,7 @@ func (r *Repair) repairArr(j *Job, _arr string, tmdbId string) ([]arr.ContentFil for _, m := range media { select { - case <-r.ctx.Done(): + case <-job.ctx.Done(): break default: workerChan <- m @@ -449,43 +504,49 @@ func (r *Repair) repairArr(j *Job, _arr string, tmdbId string) ([]arr.ContentFil return brokenItems, nil } -func (r *Repair) isMediaAccessible(m arr.Content) bool { - files := m.Files +// isMediaAccessible checks if the mounts are accessible +func (r *Repair) isMediaAccessible(media []arr.Content) bool { + firstMedia := media[0] + for _, m := range media { + if len(m.Files) > 0 { + firstMedia = m + break + } + } + files := firstMedia.Files if len(files) == 0 { return false } firstFile := files[0] - r.logger.Debug().Msgf("Checking parent directory for %s", firstFile.Path) - //if _, err := os.Stat(firstFile.Path); os.IsNotExist(err) { - // r.logger.Debug().Msgf("Parent directory not accessible for %s", firstFile.Path) - // return false - //} - // Check symlink parent directory symlinkPath := getSymlinkTarget(firstFile.Path) r.logger.Debug().Msgf("Checking symlink parent directory for %s", symlinkPath) + parentSymlink := "" if symlinkPath != "" { - parentSymlink := filepath.Dir(filepath.Dir(symlinkPath)) // /mnt/zurg/torrents/movie/movie.mkv -> /mnt/zurg/torrents + parentSymlink = filepath.Dir(filepath.Dir(symlinkPath)) // /mnt/zurg/torrents/movie/movie.mkv -> /mnt/zurg/torrents + } + if parentSymlink != "" { if _, err := os.Stat(parentSymlink); os.IsNotExist(err) { return false } + return true } - return true + return false } -func (r *Repair) getBrokenFiles(media arr.Content) []arr.ContentFile { +func (r *Repair) getBrokenFiles(job *Job, media arr.Content) []arr.ContentFile { if r.useWebdav { - return r.getWebdavBrokenFiles(media) + return r.getWebdavBrokenFiles(job, media) } else if r.IsZurg { - return r.getZurgBrokenFiles(media) + return r.getZurgBrokenFiles(job, media) } else { - return r.getFileBrokenFiles(media) + return r.getFileBrokenFiles(job, media) } } -func (r *Repair) getFileBrokenFiles(media arr.Content) []arr.ContentFile { +func (r *Repair) getFileBrokenFiles(job *Job, media arr.Content) []arr.ContentFile { // This checks symlink target, try to get read a tiny bit of the file brokenFiles := make([]arr.ContentFile, 0) @@ -510,7 +571,7 @@ func (r *Repair) getFileBrokenFiles(media arr.Content) []arr.ContentFile { return brokenFiles } -func (r *Repair) getZurgBrokenFiles(media arr.Content) []arr.ContentFile { +func (r *Repair) getZurgBrokenFiles(job *Job, media arr.Content) []arr.ContentFile { // Use zurg setup to check file availability with zurg // This reduces bandwidth usage significantly @@ -550,12 +611,17 @@ func (r *Repair) getZurgBrokenFiles(media arr.Content) []arr.ContentFile { } if resp.StatusCode < 200 || resp.StatusCode >= 300 { r.logger.Debug().Msgf("Failed to get download url for %s", fullURL) - resp.Body.Close() + if err := resp.Body.Close(); err != nil { + return nil + } brokenFiles = append(brokenFiles, file) continue } downloadUrl := resp.Request.URL.String() - resp.Body.Close() + + if err := resp.Body.Close(); err != nil { + return nil + } if downloadUrl != "" { r.logger.Trace().Msgf("Found download url: %s", downloadUrl) } else { @@ -573,16 +639,16 @@ func (r *Repair) getZurgBrokenFiles(media arr.Content) []arr.ContentFile { return brokenFiles } -func (r *Repair) getWebdavBrokenFiles(media arr.Content) []arr.ContentFile { +func (r *Repair) getWebdavBrokenFiles(job *Job, media arr.Content) []arr.ContentFile { // Use internal webdav setup to check file availability - caches := r.deb.Caches + caches := r.deb.GetCaches() if len(caches) == 0 { r.logger.Info().Msg("No caches found. Can't use webdav") return nil } - clients := r.deb.Clients + clients := r.deb.GetClients() if len(clients) == 0 { r.logger.Info().Msg("No clients found. Can't use webdav") return nil @@ -590,58 +656,36 @@ func (r *Repair) getWebdavBrokenFiles(media arr.Content) []arr.ContentFile { brokenFiles := make([]arr.ContentFile, 0) uniqueParents := collectFiles(media) - for torrentPath, f := range uniqueParents { - r.logger.Debug().Msgf("Checking %s", torrentPath) - // Get the debrid first - dir := filepath.Dir(torrentPath) - debridName := "" - for _, client := range clients { - mountPath := client.GetMountPath() - if mountPath == "" { - continue + var brokenFilesMutex sync.Mutex + var wg sync.WaitGroup + + // Limit concurrent torrent checks + semaphore := make(chan struct{}, min(len(uniqueParents), 30)) // Limit to 5 concurrent checks + for torrentPath, files := range uniqueParents { + wg.Add(1) + go func(torrentPath string, files []arr.ContentFile) { + defer wg.Done() + semaphore <- struct{}{} // Acquire + defer func() { <-semaphore }() // Release + + select { + case <-job.ctx.Done(): + return + default: } - if filepath.Clean(mountPath) == filepath.Clean(dir) { - debridName = client.GetName() - break - } - } - if debridName == "" { - r.logger.Debug().Msgf("No debrid found for %s. Skipping", torrentPath) - continue - } - cache, ok := caches[debridName] - if !ok { - r.logger.Debug().Msgf("No cache found for %s. Skipping", debridName) - continue - } - // Check if torrent exists - torrentName := filepath.Clean(filepath.Base(torrentPath)) - torrent := cache.GetTorrentByName(torrentName) - if torrent == nil { - r.logger.Debug().Msgf("No torrent found for %s. Skipping", torrentName) - brokenFiles = append(brokenFiles, f...) - continue - } - files := make([]string, 0) - for _, file := range f { - files = append(files, file.TargetPath) - } + brokenFilesForTorrent := r.checkTorrentFiles(torrentPath, files, clients, caches) - _brokenFiles := cache.GetBrokenFiles(torrent, files) - totalBrokenFiles := len(_brokenFiles) - if totalBrokenFiles > 0 { - r.logger.Debug().Msgf("%d broken files found in %s", totalBrokenFiles, torrentName) - for _, contentFile := range f { - if utils.Contains(_brokenFiles, contentFile.TargetPath) { - brokenFiles = append(brokenFiles, contentFile) - } + if len(brokenFilesForTorrent) > 0 { + brokenFilesMutex.Lock() + brokenFiles = append(brokenFiles, brokenFilesForTorrent...) + brokenFilesMutex.Unlock() } - } - + }(torrentPath, files) } + + wg.Wait() if len(brokenFiles) == 0 { - r.logger.Debug().Msgf("No broken files found for %s", media.Title) return nil } r.logger.Debug().Msgf("%d broken files found for %s", len(brokenFiles), media.Title) @@ -696,7 +740,11 @@ func (r *Repair) ProcessJob(id string) error { return nil } - g, ctx := errgroup.WithContext(r.ctx) + if job.ctx == nil || job.ctx.Err() != nil { + job.ctx, job.cancelFunc = context.WithCancel(r.ctx) + } + + g, ctx := errgroup.WithContext(job.ctx) g.SetLimit(r.workers) for arrName, items := range brokenItems { diff --git a/pkg/server/debug.go b/pkg/server/debug.go index e7d1246..2b20b9a 100644 --- a/pkg/server/debug.go +++ b/pkg/server/debug.go @@ -5,19 +5,20 @@ import ( "github.com/go-chi/chi/v5" "github.com/sirrobot01/decypharr/internal/request" debridTypes "github.com/sirrobot01/decypharr/pkg/debrid/types" - "github.com/sirrobot01/decypharr/pkg/service" + "github.com/sirrobot01/decypharr/pkg/store" "net/http" "runtime" ) func (s *Server) handleIngests(w http.ResponseWriter, r *http.Request) { ingests := make([]debridTypes.IngestData, 0) - svc := service.GetService() - if svc.Debrid == nil { + _store := store.GetStore() + debrids := _store.GetDebrid() + if debrids == nil { http.Error(w, "Debrid service is not enabled", http.StatusInternalServerError) return } - for _, cache := range svc.Debrid.Caches { + for _, cache := range debrids.GetCaches() { if cache == nil { s.logger.Error().Msg("Debrid cache is nil, skipping") continue @@ -41,13 +42,17 @@ func (s *Server) handleIngestsByDebrid(w http.ResponseWriter, r *http.Request) { return } - svc := service.GetService() - if svc.Debrid == nil { + _store := store.GetStore() + debrids := _store.GetDebrid() + + if debrids == nil { http.Error(w, "Debrid service is not enabled", http.StatusInternalServerError) return } - cache, exists := svc.Debrid.Caches[debridName] + caches := debrids.GetCaches() + + cache, exists := caches[debridName] if !exists { http.Error(w, "Debrid cache not found: "+debridName, http.StatusNotFound) return @@ -87,12 +92,13 @@ func (s *Server) handleStats(w http.ResponseWriter, r *http.Request) { "go_version": runtime.Version(), } - svc := service.GetService() - if svc.Debrid == nil { + debrids := store.GetStore().GetDebrid() + if debrids == nil { request.JSONResponse(w, stats, http.StatusOK) return } - clients := svc.Debrid.GetDebrids() + clients := debrids.GetClients() + caches := debrids.GetCaches() profiles := make([]*debridTypes.Profile, 0) for debridName, client := range clients { profile, err := client.GetProfile() @@ -101,7 +107,7 @@ func (s *Server) handleStats(w http.ResponseWriter, r *http.Request) { s.logger.Error().Err(err).Msg("Failed to get debrid profile") continue } - cache, ok := svc.Debrid.Caches[debridName] + cache, ok := caches[debridName] if ok { // Get torrent data profile.LibrarySize = len(cache.GetTorrents()) diff --git a/pkg/server/webhook.go b/pkg/server/webhook.go index 0977a56..bc81ccb 100644 --- a/pkg/server/webhook.go +++ b/pkg/server/webhook.go @@ -3,7 +3,7 @@ package server import ( "cmp" "encoding/json" - "github.com/sirrobot01/decypharr/pkg/service" + "github.com/sirrobot01/decypharr/pkg/store" "net/http" ) @@ -38,8 +38,7 @@ func (s *Server) handleTautulli(w http.ResponseWriter, r *http.Request) { http.Error(w, "Invalid ID", http.StatusBadRequest) return } - svc := service.GetService() - repair := svc.Repair + repair := store.GetStore().GetRepair() mediaId := cmp.Or(payload.TmdbID, payload.TvdbID) diff --git a/pkg/service/service.go b/pkg/service/service.go deleted file mode 100644 index d41a3c2..0000000 --- a/pkg/service/service.go +++ /dev/null @@ -1,47 +0,0 @@ -package service - -import ( - "github.com/sirrobot01/decypharr/pkg/arr" - "github.com/sirrobot01/decypharr/pkg/debrid/debrid" - "github.com/sirrobot01/decypharr/pkg/repair" - "sync" -) - -type Service struct { - Repair *repair.Repair - Arr *arr.Storage - Debrid *debrid.Engine -} - -var ( - instance *Service - once sync.Once -) - -// GetService returns the singleton instance -func GetService() *Service { - once.Do(func() { - arrs := arr.NewStorage() - deb := debrid.NewEngine() - instance = &Service{ - Repair: repair.New(arrs, deb), - Arr: arrs, - Debrid: deb, - } - }) - return instance -} - -func Reset() { - if instance != nil { - if instance.Debrid != nil { - instance.Debrid.Reset() - } - } - once = sync.Once{} - instance = nil -} - -func GetDebrid() *debrid.Engine { - return GetService().Debrid -} diff --git a/pkg/qbit/downloader.go b/pkg/store/downloader.go similarity index 61% rename from pkg/qbit/downloader.go rename to pkg/store/downloader.go index 3321031..29692f3 100644 --- a/pkg/qbit/downloader.go +++ b/pkg/store/downloader.go @@ -1,8 +1,8 @@ -package qbit +package store import ( "fmt" - "io" + "github.com/sirrobot01/decypharr/pkg/debrid/types" "net/http" "os" "path/filepath" @@ -11,7 +11,6 @@ import ( "github.com/cavaliergopher/grab/v3" "github.com/sirrobot01/decypharr/internal/utils" - debrid "github.com/sirrobot01/decypharr/pkg/debrid/types" ) func Download(client *grab.Client, url, filename string, byterange *[2]int64, progressCallback func(int64, int64)) error { @@ -57,21 +56,21 @@ Loop: return resp.Err() } -func (q *QBit) ProcessManualFile(torrent *Torrent) (string, error) { +func (s *Store) ProcessManualFile(torrent *Torrent) (string, error) { debridTorrent := torrent.DebridTorrent - q.logger.Info().Msgf("Downloading %d files...", len(debridTorrent.Files)) - torrentPath := filepath.Join(q.DownloadFolder, debridTorrent.Arr.Name, utils.RemoveExtension(debridTorrent.OriginalFilename)) + s.logger.Info().Msgf("Downloading %d files...", len(debridTorrent.Files)) + torrentPath := filepath.Join(torrent.SavePath, utils.RemoveExtension(debridTorrent.OriginalFilename)) torrentPath = utils.RemoveInvalidChars(torrentPath) err := os.MkdirAll(torrentPath, os.ModePerm) if err != nil { - // add previous error to the error and return + // add the previous error to the error and return return "", fmt.Errorf("failed to create directory: %s: %v", torrentPath, err) } - q.downloadFiles(torrent, torrentPath) + s.downloadFiles(torrent, torrentPath) return torrentPath, nil } -func (q *QBit) downloadFiles(torrent *Torrent, parent string) { +func (s *Store) downloadFiles(torrent *Torrent, parent string) { debridTorrent := torrent.DebridTorrent var wg sync.WaitGroup @@ -79,15 +78,15 @@ func (q *QBit) downloadFiles(torrent *Torrent, parent string) { for _, file := range debridTorrent.GetFiles() { totalSize += file.Size } - debridTorrent.Mu.Lock() + debridTorrent.Lock() debridTorrent.SizeDownloaded = 0 // Reset downloaded bytes debridTorrent.Progress = 0 // Reset progress - debridTorrent.Mu.Unlock() + debridTorrent.Unlock() progressCallback := func(downloaded int64, speed int64) { - debridTorrent.Mu.Lock() - defer debridTorrent.Mu.Unlock() - torrent.Mu.Lock() - defer torrent.Mu.Unlock() + debridTorrent.Lock() + defer debridTorrent.Unlock() + torrent.Lock() + defer torrent.Unlock() // Update total downloaded bytes debridTorrent.SizeDownloaded += downloaded @@ -97,7 +96,7 @@ func (q *QBit) downloadFiles(torrent *Torrent, parent string) { if totalSize > 0 { debridTorrent.Progress = float64(debridTorrent.SizeDownloaded) / float64(totalSize) * 100 } - q.UpdateTorrentMin(torrent, debridTorrent) + s.UpdateTorrentMin(torrent, debridTorrent) } client := &grab.Client{ UserAgent: "Decypharr[QBitTorrent]", @@ -110,14 +109,14 @@ func (q *QBit) downloadFiles(torrent *Torrent, parent string) { errChan := make(chan error, len(debridTorrent.Files)) for _, file := range debridTorrent.GetFiles() { if file.DownloadLink == nil { - q.logger.Info().Msgf("No download link found for %s", file.Name) + s.logger.Info().Msgf("No download link found for %s", file.Name) continue } wg.Add(1) - q.downloadSemaphore <- struct{}{} - go func(file debrid.File) { + s.downloadSemaphore <- struct{}{} + go func(file types.File) { defer wg.Done() - defer func() { <-q.downloadSemaphore }() + defer func() { <-s.downloadSemaphore }() filename := file.Name err := Download( @@ -129,10 +128,10 @@ func (q *QBit) downloadFiles(torrent *Torrent, parent string) { ) if err != nil { - q.logger.Error().Msgf("Failed to download %s: %v", filename, err) + s.logger.Error().Msgf("Failed to download %s: %v", filename, err) errChan <- err } else { - q.logger.Info().Msgf("Downloaded %s", filename) + s.logger.Info().Msgf("Downloaded %s", filename) } }(file) } @@ -146,21 +145,21 @@ func (q *QBit) downloadFiles(torrent *Torrent, parent string) { } } if len(errors) > 0 { - q.logger.Error().Msgf("Errors occurred during download: %v", errors) + s.logger.Error().Msgf("Errors occurred during download: %v", errors) return } - q.logger.Info().Msgf("Downloaded all files for %s", debridTorrent.Name) + s.logger.Info().Msgf("Downloaded all files for %s", debridTorrent.Name) } -func (q *QBit) ProcessSymlink(torrent *Torrent) (string, error) { +func (s *Store) ProcessSymlink(torrent *Torrent) (string, error) { debridTorrent := torrent.DebridTorrent files := debridTorrent.Files if len(files) == 0 { return "", fmt.Errorf("no video files found") } - q.logger.Info().Msgf("Checking symlinks for %d files...", len(files)) + s.logger.Info().Msgf("Checking symlinks for %d files...", len(files)) rCloneBase := debridTorrent.MountPath - torrentPath, err := q.getTorrentPath(rCloneBase, debridTorrent) // /MyTVShow/ + torrentPath, err := s.getTorrentPath(rCloneBase, debridTorrent) // /MyTVShow/ // This returns filename.ext for alldebrid instead of the parent folder filename/ torrentFolder := torrentPath if err != nil { @@ -173,7 +172,7 @@ func (q *QBit) ProcessSymlink(torrent *Torrent) (string, error) { torrentFolder = utils.RemoveExtension(torrentFolder) torrentRclonePath = rCloneBase // /mnt/rclone/magnets/ // Remove the filename since it's in the root folder } - torrentSymlinkPath := filepath.Join(q.DownloadFolder, debridTorrent.Arr.Name, torrentFolder) // /mnt/symlinks/{category}/MyTVShow/ + torrentSymlinkPath := filepath.Join(torrent.SavePath, torrentFolder) // /mnt/symlinks/{category}/MyTVShow/ err = os.MkdirAll(torrentSymlinkPath, os.ModePerm) if err != nil { return "", fmt.Errorf("failed to create directory: %s: %v", torrentSymlinkPath, err) @@ -192,10 +191,10 @@ func (q *QBit) ProcessSymlink(torrent *Torrent) (string, error) { return nil }) if err != nil { - q.logger.Warn().Msgf("Error while scanning rclone path: %v", err) + s.logger.Warn().Msgf("Error while scanning rclone path: %v", err) } - pending := make(map[string]debrid.File) + pending := make(map[string]types.File) for _, file := range files { if realRelPath, ok := realPaths[file.Name]; ok { file.Path = realRelPath @@ -216,43 +215,43 @@ func (q *QBit) ProcessSymlink(torrent *Torrent) (string, error) { if _, err := os.Stat(fullFilePath); !os.IsNotExist(err) { fileSymlinkPath := filepath.Join(torrentSymlinkPath, file.Name) if err := os.Symlink(fullFilePath, fileSymlinkPath); err != nil && !os.IsExist(err) { - q.logger.Debug().Msgf("Failed to create symlink: %s: %v", fileSymlinkPath, err) + s.logger.Debug().Msgf("Failed to create symlink: %s: %v", fileSymlinkPath, err) } else { filePaths = append(filePaths, fileSymlinkPath) delete(pending, path) - q.logger.Info().Msgf("File is ready: %s", file.Name) + s.logger.Info().Msgf("File is ready: %s", file.Name) } } } case <-timeout: - q.logger.Warn().Msgf("Timeout waiting for files, %d files still pending", len(pending)) + s.logger.Warn().Msgf("Timeout waiting for files, %d files still pending", len(pending)) return torrentSymlinkPath, fmt.Errorf("timeout waiting for files: %d files still pending", len(pending)) } } - if q.SkipPreCache { + if s.skipPreCache { return torrentSymlinkPath, nil } go func() { - - if err := q.preCacheFile(debridTorrent.Name, filePaths); err != nil { - q.logger.Error().Msgf("Failed to pre-cache file: %s", err) + s.logger.Debug().Msgf("Pre-caching %s", debridTorrent.Name) + if err := utils.PreCacheFile(filePaths); err != nil { + s.logger.Error().Msgf("Failed to pre-cache file: %s", err) } else { - q.logger.Trace().Msgf("Pre-cached %d files", len(filePaths)) + s.logger.Trace().Msgf("Pre-cached %d files", len(filePaths)) } }() return torrentSymlinkPath, nil } -func (q *QBit) createSymlinksWebdav(debridTorrent *debrid.Torrent, rclonePath, torrentFolder string) (string, error) { +func (s *Store) createSymlinksWebdav(torrent *Torrent, debridTorrent *types.Torrent, rclonePath, torrentFolder string) (string, error) { files := debridTorrent.Files - symlinkPath := filepath.Join(q.DownloadFolder, debridTorrent.Arr.Name, torrentFolder) // /mnt/symlinks/{category}/MyTVShow/ + symlinkPath := filepath.Join(torrent.SavePath, torrentFolder) // /mnt/symlinks/{category}/MyTVShow/ err := os.MkdirAll(symlinkPath, os.ModePerm) if err != nil { return "", fmt.Errorf("failed to create directory: %s: %v", symlinkPath, err) } - remainingFiles := make(map[string]debrid.File) + remainingFiles := make(map[string]types.File) for _, file := range files { remainingFiles[file.Name] = file } @@ -278,107 +277,44 @@ func (q *QBit) createSymlinksWebdav(debridTorrent *debrid.Torrent, rclonePath, t fileSymlinkPath := filepath.Join(symlinkPath, file.Name) if err := os.Symlink(fullFilePath, fileSymlinkPath); err != nil && !os.IsExist(err) { - q.logger.Debug().Msgf("Failed to create symlink: %s: %v", fileSymlinkPath, err) + s.logger.Debug().Msgf("Failed to create symlink: %s: %v", fileSymlinkPath, err) } else { filePaths = append(filePaths, fileSymlinkPath) delete(remainingFiles, filename) - q.logger.Info().Msgf("File is ready: %s", file.Name) + s.logger.Info().Msgf("File is ready: %s", file.Name) } } } case <-timeout: - q.logger.Warn().Msgf("Timeout waiting for files, %d files still pending", len(remainingFiles)) + s.logger.Warn().Msgf("Timeout waiting for files, %d files still pending", len(remainingFiles)) return symlinkPath, fmt.Errorf("timeout waiting for files") } } - if q.SkipPreCache { + if s.skipPreCache { return symlinkPath, nil } go func() { - - if err := q.preCacheFile(debridTorrent.Name, filePaths); err != nil { - q.logger.Error().Msgf("Failed to pre-cache file: %s", err) + s.logger.Debug().Msgf("Pre-caching %s", debridTorrent.Name) + if err := utils.PreCacheFile(filePaths); err != nil { + s.logger.Error().Msgf("Failed to pre-cache file: %s", err) } else { - q.logger.Debug().Msgf("Pre-cached %d files", len(filePaths)) + s.logger.Debug().Msgf("Pre-cached %d files", len(filePaths)) } }() // Pre-cache the files in the background // Pre-cache the first 256KB and 1MB of the file return symlinkPath, nil } -func (q *QBit) getTorrentPath(rclonePath string, debridTorrent *debrid.Torrent) (string, error) { +func (s *Store) getTorrentPath(rclonePath string, debridTorrent *types.Torrent) (string, error) { for { torrentPath, err := debridTorrent.GetMountFolder(rclonePath) if err == nil { - q.logger.Debug().Msgf("Found torrent path: %s", torrentPath) + s.logger.Debug().Msgf("Found torrent path: %s", torrentPath) return torrentPath, err } time.Sleep(100 * time.Millisecond) } } - -func (q *QBit) preCacheFile(name string, filePaths []string) error { - q.logger.Trace().Msgf("Pre-caching torrent: %s", name) - if len(filePaths) == 0 { - return fmt.Errorf("no file paths provided") - } - - for _, filePath := range filePaths { - err := func(f string) error { - - file, err := os.Open(f) - if err != nil { - if os.IsNotExist(err) { - // File has probably been moved by arr, return silently - return nil - } - return fmt.Errorf("failed to open file: %s: %v", f, err) - } - defer file.Close() - - // Pre-cache the file header (first 256KB) using 16KB chunks. - if err := q.readSmallChunks(file, 0, 256*1024, 16*1024); err != nil { - return err - } - if err := q.readSmallChunks(file, 1024*1024, 64*1024, 16*1024); err != nil { - return err - } - return nil - }(filePath) - if err != nil { - return err - } - } - return nil -} - -func (q *QBit) readSmallChunks(file *os.File, startPos int64, totalToRead int, chunkSize int) error { - _, err := file.Seek(startPos, 0) - if err != nil { - return err - } - - buf := make([]byte, chunkSize) - bytesRemaining := totalToRead - - for bytesRemaining > 0 { - toRead := chunkSize - if bytesRemaining < chunkSize { - toRead = bytesRemaining - } - - n, err := file.Read(buf[:toRead]) - if err != nil { - if err == io.EOF { - break - } - return err - } - - bytesRemaining -= n - } - return nil -} diff --git a/pkg/qbit/misc.go b/pkg/store/misc.go similarity index 57% rename from pkg/qbit/misc.go rename to pkg/store/misc.go index bcc4d3d..630e269 100644 --- a/pkg/qbit/misc.go +++ b/pkg/store/misc.go @@ -1,18 +1,21 @@ -package qbit +package store import ( - "github.com/sirrobot01/decypharr/internal/utils" + "os" + "path/filepath" "strings" ) -func createTorrentFromMagnet(magnet *utils.Magnet, category, source string) *Torrent { +func createTorrentFromMagnet(req *ImportRequest) *Torrent { + magnet := req.Magnet + arrName := req.Arr.Name torrent := &Torrent{ ID: "", Hash: strings.ToLower(magnet.InfoHash), Name: magnet.Name, Size: magnet.Size, - Category: category, - Source: source, + Category: arrName, + Source: string(req.Type), State: "downloading", MagnetUri: magnet.Link, @@ -22,6 +25,7 @@ func createTorrentFromMagnet(magnet *utils.Magnet, category, source string) *Tor AutoTmm: false, Ratio: 1, RatioLimit: 1, + SavePath: filepath.Join(req.DownloadFolder, arrName) + string(os.PathSeparator), } return torrent } diff --git a/pkg/store/request.go b/pkg/store/request.go new file mode 100644 index 0000000..80439e0 --- /dev/null +++ b/pkg/store/request.go @@ -0,0 +1,103 @@ +package store + +import ( + "bytes" + "encoding/json" + "github.com/sirrobot01/decypharr/internal/request" + "github.com/sirrobot01/decypharr/internal/utils" + "github.com/sirrobot01/decypharr/pkg/arr" + debridTypes "github.com/sirrobot01/decypharr/pkg/debrid/types" + "net/http" + "net/url" + "time" +) + +type ImportType string + +const ( + ImportTypeQBitTorrent ImportType = "qbit" + ImportTypeAPI ImportType = "api" +) + +func NewImportRequest(debrid string, downloadFolder string, magnet *utils.Magnet, arr *arr.Arr, isSymlink, downloadUncached bool, callBackUrl string, importType ImportType) *ImportRequest { + return &ImportRequest{ + Status: "started", + DownloadFolder: downloadFolder, + Debrid: debrid, + Magnet: magnet, + Arr: arr, + IsSymlink: isSymlink, + DownloadUncached: downloadUncached, + CallBackUrl: callBackUrl, + Type: importType, + } +} + +type ImportRequest struct { + DownloadFolder string `json:"downloadFolder"` + Debrid string `json:"debrid"` + Magnet *utils.Magnet `json:"magnet"` + Arr *arr.Arr `json:"arr"` + IsSymlink bool `json:"isSymlink"` + DownloadUncached bool `json:"downloadUncached"` + CallBackUrl string `json:"callBackUrl"` + + Status string `json:"status"` + CompletedAt time.Time `json:"completedAt,omitempty"` + Error error `json:"error,omitempty"` + + Type ImportType `json:"type"` + Async bool `json:"async"` +} + +type importResponse struct { + Status string `json:"status"` + CompletedAt time.Time `json:"completedAt"` + Error error `json:"error"` + Torrent *Torrent `json:"torrent"` + Debrid *debridTypes.Torrent `json:"debrid"` +} + +func (i *ImportRequest) sendCallback(torrent *Torrent, debridTorrent *debridTypes.Torrent) { + if i.CallBackUrl == "" { + return + } + + // Check if the callback URL is valid + if _, err := url.ParseRequestURI(i.CallBackUrl); err != nil { + return + } + + client := request.New() + payload, err := json.Marshal(&importResponse{ + Status: i.Status, + Error: i.Error, + CompletedAt: i.CompletedAt, + Torrent: torrent, + Debrid: debridTorrent, + }) + if err != nil { + return + } + req, err := http.NewRequest("POST", i.CallBackUrl, bytes.NewReader(payload)) + if err != nil { + return + } + req.Header.Set("Content-Type", "application/json") + _, _ = client.Do(req) + +} + +func (i *ImportRequest) markAsFailed(err error, torrent *Torrent, debridTorrent *debridTypes.Torrent) { + i.Status = "failed" + i.Error = err + i.CompletedAt = time.Now() + i.sendCallback(torrent, debridTorrent) +} + +func (i *ImportRequest) markAsCompleted(torrent *Torrent, debridTorrent *debridTypes.Torrent) { + i.Status = "completed" + i.Error = nil + i.CompletedAt = time.Now() + i.sendCallback(torrent, debridTorrent) +} diff --git a/pkg/store/store.go b/pkg/store/store.go new file mode 100644 index 0000000..00f25bb --- /dev/null +++ b/pkg/store/store.go @@ -0,0 +1,75 @@ +package store + +import ( + "cmp" + "github.com/rs/zerolog" + "github.com/sirrobot01/decypharr/internal/config" + "github.com/sirrobot01/decypharr/internal/logger" + "github.com/sirrobot01/decypharr/pkg/arr" + "github.com/sirrobot01/decypharr/pkg/debrid" + "github.com/sirrobot01/decypharr/pkg/repair" + "sync" + "time" +) + +type Store struct { + repair *repair.Repair + arr *arr.Storage + debrid *debrid.Storage + torrents *TorrentStorage + logger zerolog.Logger + refreshInterval time.Duration + skipPreCache bool + downloadSemaphore chan struct{} +} + +var ( + instance *Store + once sync.Once +) + +// GetStore returns the singleton instance +func GetStore() *Store { + once.Do(func() { + arrs := arr.NewStorage() + deb := debrid.NewStorage() + cfg := config.Get() + qbitCfg := cfg.QBitTorrent + + instance = &Store{ + repair: repair.New(arrs, deb), + arr: arrs, + debrid: deb, + torrents: newTorrentStorage(cfg.TorrentsFile()), + logger: logger.New("store"), + refreshInterval: time.Duration(cmp.Or(qbitCfg.RefreshInterval, 10)) * time.Minute, + skipPreCache: qbitCfg.SkipPreCache, + downloadSemaphore: make(chan struct{}, cmp.Or(qbitCfg.MaxDownloads, 5)), + } + }) + return instance +} + +func Reset() { + if instance != nil { + if instance.debrid != nil { + instance.debrid.Reset() + } + close(instance.downloadSemaphore) + } + once = sync.Once{} + instance = nil +} + +func (s *Store) GetArr() *arr.Storage { + return s.arr +} +func (s *Store) GetDebrid() *debrid.Storage { + return s.debrid +} +func (s *Store) GetRepair() *repair.Repair { + return s.repair +} +func (s *Store) GetTorrentStorage() *TorrentStorage { + return s.torrents +} diff --git a/pkg/store/torrent.go b/pkg/store/torrent.go new file mode 100644 index 0000000..a5c0388 --- /dev/null +++ b/pkg/store/torrent.go @@ -0,0 +1,210 @@ +package store + +import ( + "cmp" + "context" + "fmt" + "github.com/sirrobot01/decypharr/internal/request" + "github.com/sirrobot01/decypharr/internal/utils" + debridTypes "github.com/sirrobot01/decypharr/pkg/debrid" + "github.com/sirrobot01/decypharr/pkg/debrid/types" + "os" + "path/filepath" + "time" +) + +func (s *Store) AddTorrent(ctx context.Context, importReq *ImportRequest) error { + torrent := createTorrentFromMagnet(importReq) + debridTorrent, err := debridTypes.ProcessTorrent(ctx, s.debrid, importReq.Debrid, importReq.Magnet, importReq.Arr, importReq.IsSymlink, importReq.DownloadUncached) + if err != nil || debridTorrent == nil { + if err == nil { + err = fmt.Errorf("failed to process torrent") + } + // This error is returned immediately to the user(no need for callback) + return err + } + torrent = s.UpdateTorrentMin(torrent, debridTorrent) + s.torrents.AddOrUpdate(torrent) + go s.processFiles(torrent, debridTorrent, importReq) // We can send async for file processing not to delay the response + return nil +} + +func (s *Store) processFiles(torrent *Torrent, debridTorrent *types.Torrent, importReq *ImportRequest) { + client := s.debrid.GetClient(debridTorrent.Debrid) + downloadingStatuses := client.GetDownloadingStatus() + _arr := importReq.Arr + for debridTorrent.Status != "downloaded" { + s.logger.Debug().Msgf("%s <- (%s) Download Progress: %.2f%%", debridTorrent.Debrid, debridTorrent.Name, debridTorrent.Progress) + dbT, err := client.CheckStatus(debridTorrent, importReq.IsSymlink) + if err != nil { + if dbT != nil && dbT.Id != "" { + // Delete the torrent if it was not downloaded + go func() { + _ = client.DeleteTorrent(dbT.Id) + }() + } + s.logger.Error().Msgf("Error checking status: %v", err) + s.markTorrentAsFailed(torrent) + go func() { + _arr.Refresh() + }() + importReq.markAsFailed(err, torrent, debridTorrent) + return + } + + debridTorrent = dbT + torrent = s.UpdateTorrentMin(torrent, debridTorrent) + + // Exit the loop for downloading statuses to prevent memory buildup + if debridTorrent.Status == "downloaded" || !utils.Contains(downloadingStatuses, debridTorrent.Status) { + break + } + if !utils.Contains(client.GetDownloadingStatus(), debridTorrent.Status) { + break + } + time.Sleep(s.refreshInterval) + } + var torrentSymlinkPath string + var err error + debridTorrent.Arr = _arr + + // Check if debrid supports webdav by checking cache + timer := time.Now() + if importReq.IsSymlink { + caches := s.debrid.GetCaches() + cache, useWebdav := caches[debridTorrent.Debrid] + if useWebdav { + s.logger.Info().Msgf("Using internal webdav for %s", debridTorrent.Debrid) + + // Use webdav to download the file + + if err := cache.Add(debridTorrent); err != nil { + s.logger.Error().Msgf("Error adding torrent to cache: %v", err) + s.markTorrentAsFailed(torrent) + importReq.markAsFailed(err, torrent, debridTorrent) + return + } + + rclonePath := filepath.Join(debridTorrent.MountPath, cache.GetTorrentFolder(debridTorrent)) // /mnt/remote/realdebrid/MyTVShow + torrentFolderNoExt := utils.RemoveExtension(debridTorrent.Name) + torrentSymlinkPath, err = s.createSymlinksWebdav(torrent, debridTorrent, rclonePath, torrentFolderNoExt) // /mnt/symlinks/{category}/MyTVShow/ + + } else { + // User is using either zurg or debrid webdav + torrentSymlinkPath, err = s.ProcessSymlink(torrent) // /mnt/symlinks/{category}/MyTVShow/ + } + } else { + torrentSymlinkPath, err = s.ProcessManualFile(torrent) + } + if err != nil { + s.markTorrentAsFailed(torrent) + go func() { + _ = client.DeleteTorrent(debridTorrent.Id) + }() + s.logger.Info().Msgf("Error: %v", err) + importReq.markAsFailed(err, torrent, debridTorrent) + return + } + torrent.TorrentPath = torrentSymlinkPath + s.UpdateTorrent(torrent, debridTorrent) + s.logger.Info().Msgf("Adding %s took %s", debridTorrent.Name, time.Since(timer)) + + go importReq.markAsCompleted(torrent, debridTorrent) // Mark the import request as completed, send callback if needed + go func() { + if err := request.SendDiscordMessage("download_complete", "success", torrent.discordContext()); err != nil { + s.logger.Error().Msgf("Error sending discord message: %v", err) + } + }() + _arr.Refresh() +} + +func (s *Store) markTorrentAsFailed(t *Torrent) *Torrent { + t.State = "error" + s.torrents.AddOrUpdate(t) + go func() { + if err := request.SendDiscordMessage("download_failed", "error", t.discordContext()); err != nil { + s.logger.Error().Msgf("Error sending discord message: %v", err) + } + }() + return t +} + +func (s *Store) UpdateTorrentMin(t *Torrent, debridTorrent *types.Torrent) *Torrent { + if debridTorrent == nil { + return t + } + + addedOn, err := time.Parse(time.RFC3339, debridTorrent.Added) + if err != nil { + addedOn = time.Now() + } + totalSize := debridTorrent.Bytes + progress := (cmp.Or(debridTorrent.Progress, 0.0)) / 100.0 + sizeCompleted := int64(float64(totalSize) * progress) + + var speed int64 + if debridTorrent.Speed != 0 { + speed = debridTorrent.Speed + } + var eta int + if speed != 0 { + eta = int((totalSize - sizeCompleted) / speed) + } + t.ID = debridTorrent.Id + t.Name = debridTorrent.Name + t.AddedOn = addedOn.Unix() + t.DebridTorrent = debridTorrent + t.Debrid = debridTorrent.Debrid + t.Size = totalSize + t.Completed = sizeCompleted + t.Downloaded = sizeCompleted + t.DownloadedSession = sizeCompleted + t.Uploaded = sizeCompleted + t.UploadedSession = sizeCompleted + t.AmountLeft = totalSize - sizeCompleted + t.Progress = progress + t.Eta = eta + t.Dlspeed = speed + t.Upspeed = speed + t.ContentPath = filepath.Join(t.SavePath, t.Name) + string(os.PathSeparator) + return t +} + +func (s *Store) UpdateTorrent(t *Torrent, debridTorrent *types.Torrent) *Torrent { + if debridTorrent == nil { + return t + } + + if debridClient := s.debrid.GetClients()[debridTorrent.Debrid]; debridClient != nil { + if debridTorrent.Status != "downloaded" { + _ = debridClient.UpdateTorrent(debridTorrent) + } + } + t = s.UpdateTorrentMin(t, debridTorrent) + t.ContentPath = t.TorrentPath + string(os.PathSeparator) + + if t.IsReady() { + t.State = "pausedUP" + s.torrents.Update(t) + return t + } + + ticker := time.NewTicker(100 * time.Millisecond) + defer ticker.Stop() + + for { + select { + case <-ticker.C: + if t.IsReady() { + t.State = "pausedUP" + s.torrents.Update(t) + return t + } + updatedT := s.UpdateTorrent(t, debridTorrent) + t = updatedT + + case <-time.After(10 * time.Minute): // Add a timeout + return t + } + } +} diff --git a/pkg/qbit/storage.go b/pkg/store/torrent_storage.go similarity index 63% rename from pkg/qbit/storage.go rename to pkg/store/torrent_storage.go index e2671bb..c6f9b8f 100644 --- a/pkg/qbit/storage.go +++ b/pkg/store/torrent_storage.go @@ -1,18 +1,15 @@ -package qbit +package store import ( "encoding/json" "fmt" - "github.com/sirrobot01/decypharr/pkg/service" + "github.com/sirrobot01/decypharr/pkg/debrid/types" "os" "sort" "sync" ) func keyPair(hash, category string) string { - if category == "" { - category = "uncategorized" - } return fmt.Sprintf("%s|%s", hash, category) } @@ -36,13 +33,13 @@ func loadTorrentsFromJSON(filename string) (Torrents, error) { return torrents, nil } -func NewTorrentStorage(filename string) *TorrentStorage { +func newTorrentStorage(filename string) *TorrentStorage { // Open the JSON file and read the data torrents, err := loadTorrentsFromJSON(filename) if err != nil { torrents = make(Torrents) } - // Create a new TorrentStorage + // Create a new Storage return &TorrentStorage{ torrents: torrents, filename: filename, @@ -187,12 +184,9 @@ func (ts *TorrentStorage) Delete(hash, category string, removeFromDebrid bool) { return } if removeFromDebrid && torrent.ID != "" && torrent.Debrid != "" { - dbClient := service.GetDebrid().GetClient(torrent.Debrid) + dbClient := GetStore().debrid.GetClient(torrent.Debrid) if dbClient != nil { - err := dbClient.DeleteTorrent(torrent.ID) - if err != nil { - fmt.Println(err) - } + _ = dbClient.DeleteTorrent(torrent.ID) } } @@ -244,10 +238,12 @@ func (ts *TorrentStorage) DeleteMultiple(hashes []string, removeFromDebrid bool) } }() + clients := GetStore().debrid.GetClients() + go func() { for id, debrid := range toDelete { - dbClient := service.GetDebrid().GetClient(debrid) - if dbClient == nil { + dbClient, ok := clients[debrid] + if !ok { continue } err := dbClient.DeleteTorrent(id) @@ -278,3 +274,73 @@ func (ts *TorrentStorage) Reset() { defer ts.mu.Unlock() ts.torrents = make(Torrents) } + +type Torrent struct { + ID string `json:"id"` + Debrid string `json:"debrid"` + TorrentPath string `json:"-"` + DebridTorrent *types.Torrent `json:"-"` + + AddedOn int64 `json:"added_on,omitempty"` + AmountLeft int64 `json:"amount_left"` + AutoTmm bool `json:"auto_tmm"` + Availability float64 `json:"availability,omitempty"` + Category string `json:"category,omitempty"` + Completed int64 `json:"completed"` + CompletionOn int `json:"completion_on,omitempty"` + ContentPath string `json:"content_path"` + DlLimit int `json:"dl_limit"` + Dlspeed int64 `json:"dlspeed"` + Downloaded int64 `json:"downloaded"` + DownloadedSession int64 `json:"downloaded_session"` + Eta int `json:"eta"` + FlPiecePrio bool `json:"f_l_piece_prio,omitempty"` + ForceStart bool `json:"force_start,omitempty"` + Hash string `json:"hash"` + LastActivity int64 `json:"last_activity,omitempty"` + MagnetUri string `json:"magnet_uri,omitempty"` + MaxRatio int `json:"max_ratio,omitempty"` + MaxSeedingTime int `json:"max_seeding_time,omitempty"` + Name string `json:"name,omitempty"` + NumComplete int `json:"num_complete,omitempty"` + NumIncomplete int `json:"num_incomplete,omitempty"` + NumLeechs int `json:"num_leechs,omitempty"` + NumSeeds int `json:"num_seeds,omitempty"` + Priority int `json:"priority,omitempty"` + Progress float64 `json:"progress"` + Ratio int `json:"ratio,omitempty"` + RatioLimit int `json:"ratio_limit,omitempty"` + SavePath string `json:"save_path"` + SeedingTimeLimit int `json:"seeding_time_limit,omitempty"` + SeenComplete int64 `json:"seen_complete,omitempty"` + SeqDl bool `json:"seq_dl"` + Size int64 `json:"size,omitempty"` + State string `json:"state,omitempty"` + SuperSeeding bool `json:"super_seeding"` + Tags string `json:"tags,omitempty"` + TimeActive int `json:"time_active,omitempty"` + TotalSize int64 `json:"total_size,omitempty"` + Tracker string `json:"tracker,omitempty"` + UpLimit int64 `json:"up_limit,omitempty"` + Uploaded int64 `json:"uploaded,omitempty"` + UploadedSession int64 `json:"uploaded_session,omitempty"` + Upspeed int64 `json:"upspeed,omitempty"` + Source string `json:"source,omitempty"` + + sync.Mutex +} + +func (t *Torrent) IsReady() bool { + return (t.AmountLeft <= 0 || t.Progress == 1) && t.TorrentPath != "" +} + +func (t *Torrent) discordContext() string { + format := ` + **Name:** %s + **Arr:** %s + **Hash:** %s + **MagnetURI:** %s + **Debrid:** %s + ` + return fmt.Sprintf(format, t.Name, t.Category, t.Hash, t.MagnetUri, t.Debrid) +} diff --git a/pkg/web/api.go b/pkg/web/api.go index b3ce47b..a9f6aba 100644 --- a/pkg/web/api.go +++ b/pkg/web/api.go @@ -2,6 +2,7 @@ package web import ( "fmt" + "github.com/sirrobot01/decypharr/pkg/store" "net/http" "strings" "time" @@ -12,34 +13,37 @@ import ( "github.com/sirrobot01/decypharr/internal/request" "github.com/sirrobot01/decypharr/internal/utils" "github.com/sirrobot01/decypharr/pkg/arr" - "github.com/sirrobot01/decypharr/pkg/qbit" - "github.com/sirrobot01/decypharr/pkg/service" "github.com/sirrobot01/decypharr/pkg/version" ) -func (ui *Handler) handleGetArrs(w http.ResponseWriter, r *http.Request) { - svc := service.GetService() - request.JSONResponse(w, svc.Arr.GetAll(), http.StatusOK) +func (wb *Web) handleGetArrs(w http.ResponseWriter, r *http.Request) { + _store := store.GetStore() + request.JSONResponse(w, _store.GetArr().GetAll(), http.StatusOK) } -func (ui *Handler) handleAddContent(w http.ResponseWriter, r *http.Request) { +func (wb *Web) handleAddContent(w http.ResponseWriter, r *http.Request) { + ctx := r.Context() if err := r.ParseMultipartForm(32 << 20); err != nil { http.Error(w, err.Error(), http.StatusBadRequest) return } - svc := service.GetService() + _store := store.GetStore() - results := make([]*qbit.ImportRequest, 0) + results := make([]*store.ImportRequest, 0) errs := make([]string, 0) arrName := r.FormValue("arr") notSymlink := r.FormValue("notSymlink") == "true" - downloadUncached := r.FormValue("downloadUncached") == "true" - if arrName == "" { - arrName = "uncategorized" + debridName := r.FormValue("debrid") + callbackUrl := r.FormValue("callbackUrl") + downloadFolder := r.FormValue("downloadFolder") + if downloadFolder == "" { + downloadFolder = config.Get().QBitTorrent.DownloadFolder } - _arr := svc.Arr.Get(arrName) + downloadUncached := r.FormValue("downloadUncached") == "true" + + _arr := _store.GetArr().Get(arrName) if _arr == nil { _arr = arr.New(arrName, "", "", false, false, &downloadUncached) } @@ -59,8 +63,9 @@ func (ui *Handler) handleAddContent(w http.ResponseWriter, r *http.Request) { errs = append(errs, fmt.Sprintf("Failed to parse URL %s: %v", url, err)) continue } - importReq := qbit.NewImportRequest(magnet, _arr, !notSymlink, downloadUncached) - if err := importReq.Process(ui.qbit); err != nil { + + importReq := store.NewImportRequest(debridName, downloadFolder, magnet, _arr, !notSymlink, downloadUncached, callbackUrl, store.ImportTypeAPI) + if err := _store.AddTorrent(ctx, importReq); err != nil { errs = append(errs, fmt.Sprintf("URL %s: %v", url, err)) continue } @@ -83,8 +88,8 @@ func (ui *Handler) handleAddContent(w http.ResponseWriter, r *http.Request) { continue } - importReq := qbit.NewImportRequest(magnet, _arr, !notSymlink, downloadUncached) - err = importReq.Process(ui.qbit) + importReq := store.NewImportRequest(debridName, downloadFolder, magnet, _arr, !notSymlink, downloadUncached, callbackUrl, store.ImportTypeAPI) + err = _store.AddTorrent(ctx, importReq) if err != nil { errs = append(errs, fmt.Sprintf("File %s: %v", fileHeader.Filename, err)) continue @@ -94,27 +99,27 @@ func (ui *Handler) handleAddContent(w http.ResponseWriter, r *http.Request) { } request.JSONResponse(w, struct { - Results []*qbit.ImportRequest `json:"results"` - Errors []string `json:"errors,omitempty"` + Results []*store.ImportRequest `json:"results"` + Errors []string `json:"errors,omitempty"` }{ Results: results, Errors: errs, }, http.StatusOK) } -func (ui *Handler) handleRepairMedia(w http.ResponseWriter, r *http.Request) { +func (wb *Web) handleRepairMedia(w http.ResponseWriter, r *http.Request) { var req RepairRequest if err := json.NewDecoder(r.Body).Decode(&req); err != nil { http.Error(w, err.Error(), http.StatusBadRequest) return } - svc := service.GetService() + _store := store.GetStore() var arrs []string if req.ArrName != "" { - _arr := svc.Arr.Get(req.ArrName) + _arr := _store.GetArr().Get(req.ArrName) if _arr == nil { http.Error(w, "No Arrs found to repair", http.StatusNotFound) return @@ -124,15 +129,15 @@ func (ui *Handler) handleRepairMedia(w http.ResponseWriter, r *http.Request) { if req.Async { go func() { - if err := svc.Repair.AddJob(arrs, req.MediaIds, req.AutoProcess, false); err != nil { - ui.logger.Error().Err(err).Msg("Failed to repair media") + if err := _store.GetRepair().AddJob(arrs, req.MediaIds, req.AutoProcess, false); err != nil { + wb.logger.Error().Err(err).Msg("Failed to repair media") } }() request.JSONResponse(w, "Repair process started", http.StatusOK) return } - if err := svc.Repair.AddJob([]string{req.ArrName}, req.MediaIds, req.AutoProcess, false); err != nil { + if err := _store.GetRepair().AddJob([]string{req.ArrName}, req.MediaIds, req.AutoProcess, false); err != nil { http.Error(w, fmt.Sprintf("Failed to repair: %v", err), http.StatusInternalServerError) return } @@ -140,16 +145,16 @@ func (ui *Handler) handleRepairMedia(w http.ResponseWriter, r *http.Request) { request.JSONResponse(w, "Repair completed", http.StatusOK) } -func (ui *Handler) handleGetVersion(w http.ResponseWriter, r *http.Request) { +func (wb *Web) handleGetVersion(w http.ResponseWriter, r *http.Request) { v := version.GetInfo() request.JSONResponse(w, v, http.StatusOK) } -func (ui *Handler) handleGetTorrents(w http.ResponseWriter, r *http.Request) { - request.JSONResponse(w, ui.qbit.Storage.GetAllSorted("", "", nil, "added_on", false), http.StatusOK) +func (wb *Web) handleGetTorrents(w http.ResponseWriter, r *http.Request) { + request.JSONResponse(w, wb.torrents.GetAllSorted("", "", nil, "added_on", false), http.StatusOK) } -func (ui *Handler) handleDeleteTorrent(w http.ResponseWriter, r *http.Request) { +func (wb *Web) handleDeleteTorrent(w http.ResponseWriter, r *http.Request) { hash := chi.URLParam(r, "hash") category := chi.URLParam(r, "category") removeFromDebrid := r.URL.Query().Get("removeFromDebrid") == "true" @@ -157,11 +162,11 @@ func (ui *Handler) handleDeleteTorrent(w http.ResponseWriter, r *http.Request) { http.Error(w, "No hash provided", http.StatusBadRequest) return } - ui.qbit.Storage.Delete(hash, category, removeFromDebrid) + wb.torrents.Delete(hash, category, removeFromDebrid) w.WriteHeader(http.StatusOK) } -func (ui *Handler) handleDeleteTorrents(w http.ResponseWriter, r *http.Request) { +func (wb *Web) handleDeleteTorrents(w http.ResponseWriter, r *http.Request) { hashesStr := r.URL.Query().Get("hashes") removeFromDebrid := r.URL.Query().Get("removeFromDebrid") == "true" if hashesStr == "" { @@ -169,15 +174,15 @@ func (ui *Handler) handleDeleteTorrents(w http.ResponseWriter, r *http.Request) return } hashes := strings.Split(hashesStr, ",") - ui.qbit.Storage.DeleteMultiple(hashes, removeFromDebrid) + wb.torrents.DeleteMultiple(hashes, removeFromDebrid) w.WriteHeader(http.StatusOK) } -func (ui *Handler) handleGetConfig(w http.ResponseWriter, r *http.Request) { +func (wb *Web) handleGetConfig(w http.ResponseWriter, r *http.Request) { cfg := config.Get() arrCfgs := make([]config.Arr, 0) - svc := service.GetService() - for _, a := range svc.Arr.GetAll() { + _store := store.GetStore() + for _, a := range _store.GetArr().GetAll() { arrCfgs = append(arrCfgs, config.Arr{ Host: a.Host, Name: a.Name, @@ -191,11 +196,11 @@ func (ui *Handler) handleGetConfig(w http.ResponseWriter, r *http.Request) { request.JSONResponse(w, cfg, http.StatusOK) } -func (ui *Handler) handleUpdateConfig(w http.ResponseWriter, r *http.Request) { +func (wb *Web) handleUpdateConfig(w http.ResponseWriter, r *http.Request) { // Decode the JSON body var updatedConfig config.Config if err := json.NewDecoder(r.Body).Decode(&updatedConfig); err != nil { - ui.logger.Error().Err(err).Msg("Failed to decode config update request") + wb.logger.Error().Err(err).Msg("Failed to decode config update request") http.Error(w, "Invalid request body: "+err.Error(), http.StatusBadRequest) return } @@ -232,11 +237,12 @@ func (ui *Handler) handleUpdateConfig(w http.ResponseWriter, r *http.Request) { } // Update Arrs through the service - svc := service.GetService() - svc.Arr.Clear() // Clear existing arrs + _store := store.GetStore() + _arr := _store.GetArr() + _arr.Clear() // Clear existing arrs for _, a := range updatedConfig.Arrs { - svc.Arr.AddOrUpdate(&arr.Arr{ + _arr.AddOrUpdate(&arr.Arr{ Name: a.Name, Host: a.Host, Token: a.Token, @@ -263,25 +269,25 @@ func (ui *Handler) handleUpdateConfig(w http.ResponseWriter, r *http.Request) { request.JSONResponse(w, map[string]string{"status": "success"}, http.StatusOK) } -func (ui *Handler) handleGetRepairJobs(w http.ResponseWriter, r *http.Request) { - svc := service.GetService() - request.JSONResponse(w, svc.Repair.GetJobs(), http.StatusOK) +func (wb *Web) handleGetRepairJobs(w http.ResponseWriter, r *http.Request) { + _store := store.GetStore() + request.JSONResponse(w, _store.GetRepair().GetJobs(), http.StatusOK) } -func (ui *Handler) handleProcessRepairJob(w http.ResponseWriter, r *http.Request) { +func (wb *Web) handleProcessRepairJob(w http.ResponseWriter, r *http.Request) { id := chi.URLParam(r, "id") if id == "" { http.Error(w, "No job ID provided", http.StatusBadRequest) return } - svc := service.GetService() - if err := svc.Repair.ProcessJob(id); err != nil { - ui.logger.Error().Err(err).Msg("Failed to process repair job") + _store := store.GetStore() + if err := _store.GetRepair().ProcessJob(id); err != nil { + wb.logger.Error().Err(err).Msg("Failed to process repair job") } w.WriteHeader(http.StatusOK) } -func (ui *Handler) handleDeleteRepairJob(w http.ResponseWriter, r *http.Request) { +func (wb *Web) handleDeleteRepairJob(w http.ResponseWriter, r *http.Request) { // Read ids from body var req struct { IDs []string `json:"ids"` @@ -295,7 +301,22 @@ func (ui *Handler) handleDeleteRepairJob(w http.ResponseWriter, r *http.Request) return } - svc := service.GetService() - svc.Repair.DeleteJobs(req.IDs) + _store := store.GetStore() + _store.GetRepair().DeleteJobs(req.IDs) + w.WriteHeader(http.StatusOK) +} + +func (wb *Web) handleStopRepairJob(w http.ResponseWriter, r *http.Request) { + id := chi.URLParam(r, "id") + if id == "" { + http.Error(w, "No job ID provided", http.StatusBadRequest) + return + } + _store := store.GetStore() + if err := _store.GetRepair().StopJob(id); err != nil { + wb.logger.Error().Err(err).Msg("Failed to stop repair job") + http.Error(w, "Failed to stop job: "+err.Error(), http.StatusInternalServerError) + return + } w.WriteHeader(http.StatusOK) } diff --git a/pkg/web/auth.go b/pkg/web/auth.go index d67d25e..95c45b8 100644 --- a/pkg/web/auth.go +++ b/pkg/web/auth.go @@ -6,7 +6,7 @@ import ( "net/http" ) -func (ui *Handler) verifyAuth(username, password string) bool { +func (wb *Web) verifyAuth(username, password string) bool { // If you're storing hashed password, use bcrypt to compare if username == "" { return false @@ -22,11 +22,11 @@ func (ui *Handler) verifyAuth(username, password string) bool { return err == nil } -func (ui *Handler) skipAuthHandler(w http.ResponseWriter, r *http.Request) { +func (wb *Web) skipAuthHandler(w http.ResponseWriter, r *http.Request) { cfg := config.Get() cfg.UseAuth = false if err := cfg.Save(); err != nil { - ui.logger.Error().Err(err).Msg("failed to save config") + wb.logger.Error().Err(err).Msg("failed to save config") http.Error(w, "failed to save config", http.StatusInternalServerError) return } diff --git a/pkg/web/middlewares.go b/pkg/web/middlewares.go index b029d66..7e334b7 100644 --- a/pkg/web/middlewares.go +++ b/pkg/web/middlewares.go @@ -6,7 +6,7 @@ import ( "net/http" ) -func (ui *Handler) setupMiddleware(next http.Handler) http.Handler { +func (wb *Web) setupMiddleware(next http.Handler) http.Handler { return http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) { cfg := config.Get() needsAuth := cfg.NeedsSetup() @@ -24,7 +24,7 @@ func (ui *Handler) setupMiddleware(next http.Handler) http.Handler { }) } -func (ui *Handler) authMiddleware(next http.Handler) http.Handler { +func (wb *Web) authMiddleware(next http.Handler) http.Handler { return http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) { // Check if setup is needed cfg := config.Get() @@ -38,7 +38,7 @@ func (ui *Handler) authMiddleware(next http.Handler) http.Handler { return } - session, _ := store.Get(r, "auth-session") + session, _ := wb.cookie.Get(r, "auth-session") auth, ok := session.Values["authenticated"].(bool) if !ok || !auth { diff --git a/pkg/web/routes.go b/pkg/web/routes.go index 005a4ee..2a96f2d 100644 --- a/pkg/web/routes.go +++ b/pkg/web/routes.go @@ -5,35 +5,36 @@ import ( "net/http" ) -func (ui *Handler) Routes() http.Handler { +func (wb *Web) Routes() http.Handler { r := chi.NewRouter() - r.Get("/login", ui.LoginHandler) - r.Post("/login", ui.LoginHandler) - r.Get("/register", ui.RegisterHandler) - r.Post("/register", ui.RegisterHandler) - r.Get("/skip-auth", ui.skipAuthHandler) - r.Get("/version", ui.handleGetVersion) + r.Get("/login", wb.LoginHandler) + r.Post("/login", wb.LoginHandler) + r.Get("/register", wb.RegisterHandler) + r.Post("/register", wb.RegisterHandler) + r.Get("/skip-auth", wb.skipAuthHandler) + r.Get("/version", wb.handleGetVersion) r.Group(func(r chi.Router) { - r.Use(ui.authMiddleware) - r.Use(ui.setupMiddleware) - r.Get("/", ui.IndexHandler) - r.Get("/download", ui.DownloadHandler) - r.Get("/repair", ui.RepairHandler) - r.Get("/config", ui.ConfigHandler) + r.Use(wb.authMiddleware) + r.Use(wb.setupMiddleware) + r.Get("/", wb.IndexHandler) + r.Get("/download", wb.DownloadHandler) + r.Get("/repair", wb.RepairHandler) + r.Get("/config", wb.ConfigHandler) r.Route("/api", func(r chi.Router) { - r.Get("/arrs", ui.handleGetArrs) - r.Post("/add", ui.handleAddContent) - r.Post("/repair", ui.handleRepairMedia) - r.Get("/repair/jobs", ui.handleGetRepairJobs) - r.Post("/repair/jobs/{id}/process", ui.handleProcessRepairJob) - r.Delete("/repair/jobs", ui.handleDeleteRepairJob) - r.Get("/torrents", ui.handleGetTorrents) - r.Delete("/torrents/{category}/{hash}", ui.handleDeleteTorrent) - r.Delete("/torrents/", ui.handleDeleteTorrents) - r.Get("/config", ui.handleGetConfig) - r.Post("/config", ui.handleUpdateConfig) + r.Get("/arrs", wb.handleGetArrs) + r.Post("/add", wb.handleAddContent) + r.Post("/repair", wb.handleRepairMedia) + r.Get("/repair/jobs", wb.handleGetRepairJobs) + r.Post("/repair/jobs/{id}/process", wb.handleProcessRepairJob) + r.Post("/repair/jobs/{id}/stop", wb.handleStopRepairJob) + r.Delete("/repair/jobs", wb.handleDeleteRepairJob) + r.Get("/torrents", wb.handleGetTorrents) + r.Delete("/torrents/{category}/{hash}", wb.handleDeleteTorrent) + r.Delete("/torrents/", wb.handleDeleteTorrents) + r.Get("/config", wb.handleGetConfig) + r.Post("/config", wb.handleUpdateConfig) }) }) diff --git a/pkg/web/templates/config.html b/pkg/web/templates/config.html index 5b009b4..9760612 100644 --- a/pkg/web/templates/config.html +++ b/pkg/web/templates/config.html @@ -245,43 +245,48 @@
-
+
- +
-
+
- + Interval for the repair process(e.g., 24h, 1d, 03:00, or a crontab)
+
+ + + Number of workers to use for the repair process +
- Speeds up the repair process by using Zurg + If you have Zurg running, you can use it to speed up the repair process
-
+
Use Internal Webdav for repair(make sure webdav is enabled in the debrid section
-
+
Run repair on startup
-
+
@@ -340,7 +345,14 @@ Rate limit for the debrid service. Confirm your debrid service rate limit
-
+
+
+
+ + +
+ Create an internal webdav for this debrid +
@@ -348,13 +360,6 @@
Download uncached files from the debrid service
-
-
- - -
- Check if the file is cached before downloading(Disabled) -
@@ -369,16 +374,10 @@
Preprocess RARed torrents to allow reading the files inside
-
-
- - -
- Create an internal webdav for this debrid -
-
-
Webdav
+
+
+
Webdav Settings
@@ -441,12 +440,12 @@
-
-
Custom Folders
+
+
Virtual Folders

Create virtual directories with filters to organize your content

- +
+
@@ -218,6 +221,27 @@ } } + // Return status text and class based on job status + function getStatus(status) { + switch (status) { + case 'started': + return {text: 'In Progress', class: 'text-primary'}; + case 'failed': + return {text: 'Failed', class: 'text-danger'}; + case 'completed': + return {text: 'Completed', class: 'text-success'}; + case 'pending': + return {text: 'Pending', class: 'text-warning'}; + case 'cancelled': + return {text: 'Cancelled', class: 'text-secondary'}; + case 'processing': + return {text: 'Processing', class: 'text-info'}; + default: + // Return status in title case if unknown + return {text: status.charAt(0).toUpperCase() + status.slice(1), class: 'text-secondary'}; + } + } + // Render jobs table with pagination function renderJobsTable(page) { const tableBody = document.getElementById('jobsTableBody'); @@ -254,24 +278,10 @@ const formattedDate = startedDate.toLocaleString(); // Determine status - let status = 'In Progress'; - let statusClass = 'text-primary'; + let status = getStatus(job.status); let canDelete = job.status !== "started"; let totalItems = job.broken_items ? Object.values(job.broken_items).reduce((sum, arr) => sum + arr.length, 0) : 0; - if (job.status === 'failed') { - status = 'Failed'; - statusClass = 'text-danger'; - } else if (job.status === 'completed') { - status = 'Completed'; - statusClass = 'text-success'; - } else if (job.status === 'pending') { - status = 'Pending'; - statusClass = 'text-warning'; - } else if (job.status === "processing") { - status = 'Processing'; - statusClass = 'text-info'; - } row.innerHTML = ` @@ -283,25 +293,31 @@ ${job.id.substring(0, 8)} ${job.arrs.join(', ')} ${formattedDate} - ${status} + ${status.text} ${totalItems} ${job.status === "pending" ? - `` : - `` - } + } + ${(job.status === "started" || job.status === "processing") ? + `` : + '' + } ${canDelete ? - `` : - `` - } + `` : + `` + } `; @@ -370,6 +386,13 @@ viewJobDetails(jobId); }); }); + + document.querySelectorAll('.stop-job').forEach(button => { + button.addEventListener('click', (e) => { + const jobId = e.currentTarget.dataset.id; + stopJob(jobId); + }); + }); } document.getElementById('selectAllJobs').addEventListener('change', function() { @@ -456,6 +479,25 @@ } } + async function stopJob(jobId) { + if (confirm('Are you sure you want to stop this job?')) { + try { + const response = await fetcher(`/api/repair/jobs/${jobId}/stop`, { + method: 'POST', + headers: { + 'Content-Type': 'application/json' + }, + }); + + if (!response.ok) throw new Error(await response.text()); + createToast('Job stop requested successfully'); + await loadJobs(currentPage); // Refresh the jobs list + } catch (error) { + createToast(`Error stopping job: ${error.message}`, 'error'); + } + } + } + // View job details function function viewJobDetails(jobId) { // Find the job @@ -477,24 +519,9 @@ } // Set status with color - let status = 'In Progress'; - let statusClass = 'text-primary'; + let status = getStatus(job.status); - if (job.status === 'failed') { - status = 'Failed'; - statusClass = 'text-danger'; - } else if (job.status === 'completed') { - status = 'Completed'; - statusClass = 'text-success'; - } else if (job.status === 'pending') { - status = 'Pending'; - statusClass = 'text-warning'; - } else if (job.status === "processing") { - status = 'Processing'; - statusClass = 'text-info'; - } - - document.getElementById('modalJobStatus').innerHTML = `${status}`; + document.getElementById('modalJobStatus').innerHTML = `${status.text}`; // Set other job details document.getElementById('modalJobArrs').textContent = job.arrs.join(', '); @@ -524,6 +551,19 @@ processBtn.classList.add('d-none'); } + // Stop button visibility + const stopBtn = document.getElementById('stopJobBtn'); // You'll need to add this button to the HTML + if (job.status === 'started' || job.status === 'processing') { + stopBtn.classList.remove('d-none'); + stopBtn.onclick = () => { + stopJob(job.id); + const modal = bootstrap.Modal.getInstance(document.getElementById('jobDetailsModal')); + modal.hide(); + }; + } else { + stopBtn.classList.add('d-none'); + } + // Populate broken items table const brokenItemsTableBody = document.getElementById('brokenItemsTableBody'); const noBrokenItemsMessage = document.getElementById('noBrokenItemsMessage'); diff --git a/pkg/web/ui.go b/pkg/web/ui.go index b7659a5..9ce2009 100644 --- a/pkg/web/ui.go +++ b/pkg/web/ui.go @@ -7,7 +7,7 @@ import ( "net/http" ) -func (ui *Handler) LoginHandler(w http.ResponseWriter, r *http.Request) { +func (wb *Web) LoginHandler(w http.ResponseWriter, r *http.Request) { cfg := config.Get() if cfg.NeedsAuth() { http.Redirect(w, r, "/register", http.StatusSeeOther) @@ -19,7 +19,7 @@ func (ui *Handler) LoginHandler(w http.ResponseWriter, r *http.Request) { "Page": "login", "Title": "Login", } - _ = templates.ExecuteTemplate(w, "layout", data) + _ = wb.templates.ExecuteTemplate(w, "layout", data) return } @@ -33,8 +33,8 @@ func (ui *Handler) LoginHandler(w http.ResponseWriter, r *http.Request) { return } - if ui.verifyAuth(credentials.Username, credentials.Password) { - session, _ := store.Get(r, "auth-session") + if wb.verifyAuth(credentials.Username, credentials.Password) { + session, _ := wb.cookie.Get(r, "auth-session") session.Values["authenticated"] = true session.Values["username"] = credentials.Username if err := session.Save(r, w); err != nil { @@ -48,8 +48,8 @@ func (ui *Handler) LoginHandler(w http.ResponseWriter, r *http.Request) { http.Error(w, "Invalid credentials", http.StatusUnauthorized) } -func (ui *Handler) LogoutHandler(w http.ResponseWriter, r *http.Request) { - session, _ := store.Get(r, "auth-session") +func (wb *Web) LogoutHandler(w http.ResponseWriter, r *http.Request) { + session, _ := wb.cookie.Get(r, "auth-session") session.Values["authenticated"] = false session.Options.MaxAge = -1 err := session.Save(r, w) @@ -59,7 +59,7 @@ func (ui *Handler) LogoutHandler(w http.ResponseWriter, r *http.Request) { http.Redirect(w, r, "/login", http.StatusSeeOther) } -func (ui *Handler) RegisterHandler(w http.ResponseWriter, r *http.Request) { +func (wb *Web) RegisterHandler(w http.ResponseWriter, r *http.Request) { cfg := config.Get() authCfg := cfg.GetAuth() @@ -69,7 +69,7 @@ func (ui *Handler) RegisterHandler(w http.ResponseWriter, r *http.Request) { "Page": "register", "Title": "Register", } - _ = templates.ExecuteTemplate(w, "layout", data) + _ = wb.templates.ExecuteTemplate(w, "layout", data) return } @@ -99,7 +99,7 @@ func (ui *Handler) RegisterHandler(w http.ResponseWriter, r *http.Request) { } // Create a session - session, _ := store.Get(r, "auth-session") + session, _ := wb.cookie.Get(r, "auth-session") session.Values["authenticated"] = true session.Values["username"] = username if err := session.Save(r, w); err != nil { @@ -110,42 +110,49 @@ func (ui *Handler) RegisterHandler(w http.ResponseWriter, r *http.Request) { http.Redirect(w, r, "/", http.StatusSeeOther) } -func (ui *Handler) IndexHandler(w http.ResponseWriter, r *http.Request) { +func (wb *Web) IndexHandler(w http.ResponseWriter, r *http.Request) { cfg := config.Get() data := map[string]interface{}{ "URLBase": cfg.URLBase, "Page": "index", "Title": "Torrents", } - _ = templates.ExecuteTemplate(w, "layout", data) + _ = wb.templates.ExecuteTemplate(w, "layout", data) } -func (ui *Handler) DownloadHandler(w http.ResponseWriter, r *http.Request) { +func (wb *Web) DownloadHandler(w http.ResponseWriter, r *http.Request) { cfg := config.Get() - data := map[string]interface{}{ - "URLBase": cfg.URLBase, - "Page": "download", - "Title": "Download", + debrids := make([]string, 0) + for _, d := range cfg.Debrids { + debrids = append(debrids, d.Name) } - _ = templates.ExecuteTemplate(w, "layout", data) + data := map[string]interface{}{ + "URLBase": cfg.URLBase, + "Page": "download", + "Title": "Download", + "Debrids": debrids, + "HasMultiDebrid": len(debrids) > 1, + "DownloadFolder": cfg.QBitTorrent.DownloadFolder, + } + _ = wb.templates.ExecuteTemplate(w, "layout", data) } -func (ui *Handler) RepairHandler(w http.ResponseWriter, r *http.Request) { +func (wb *Web) RepairHandler(w http.ResponseWriter, r *http.Request) { cfg := config.Get() data := map[string]interface{}{ "URLBase": cfg.URLBase, "Page": "repair", "Title": "Repair", } - _ = templates.ExecuteTemplate(w, "layout", data) + _ = wb.templates.ExecuteTemplate(w, "layout", data) } -func (ui *Handler) ConfigHandler(w http.ResponseWriter, r *http.Request) { +func (wb *Web) ConfigHandler(w http.ResponseWriter, r *http.Request) { cfg := config.Get() data := map[string]interface{}{ "URLBase": cfg.URLBase, "Page": "config", "Title": "Config", } - _ = templates.ExecuteTemplate(w, "layout", data) + _ = wb.templates.ExecuteTemplate(w, "layout", data) } diff --git a/pkg/web/server.go b/pkg/web/web.go similarity index 70% rename from pkg/web/server.go rename to pkg/web/web.go index 2f2703f..aa20c58 100644 --- a/pkg/web/server.go +++ b/pkg/web/web.go @@ -6,7 +6,7 @@ import ( "github.com/gorilla/sessions" "github.com/rs/zerolog" "github.com/sirrobot01/decypharr/internal/logger" - "github.com/sirrobot01/decypharr/pkg/qbit" + "github.com/sirrobot01/decypharr/pkg/store" "html/template" "os" ) @@ -50,26 +50,15 @@ type RepairRequest struct { //go:embed templates/* var content embed.FS -type Handler struct { - qbit *qbit.QBit - logger zerolog.Logger -} - -func New(qbit *qbit.QBit) *Handler { - return &Handler{ - qbit: qbit, - logger: logger.New("ui"), - } -} - -var ( - secretKey = cmp.Or(os.Getenv("DECYPHARR_SECRET_KEY"), "\"wqj(v%lj*!-+kf@4&i95rhh_!5_px5qnuwqbr%cjrvrozz_r*(\"") - store = sessions.NewCookieStore([]byte(secretKey)) +type Web struct { + logger zerolog.Logger + cookie *sessions.CookieStore templates *template.Template -) + torrents *store.TorrentStorage +} -func init() { - templates = template.Must(template.ParseFS( +func New() *Web { + templates := template.Must(template.ParseFS( content, "templates/layout.html", "templates/index.html", @@ -79,10 +68,17 @@ func init() { "templates/login.html", "templates/register.html", )) - - store.Options = &sessions.Options{ + secretKey := cmp.Or(os.Getenv("DECYPHARR_SECRET_KEY"), "\"wqj(v%lj*!-+kf@4&i95rhh_!5_px5qnuwqbr%cjrvrozz_r*(\"") + cookieStore := sessions.NewCookieStore([]byte(secretKey)) + cookieStore.Options = &sessions.Options{ Path: "/", MaxAge: 86400 * 7, HttpOnly: false, } + return &Web{ + logger: logger.New("ui"), + templates: templates, + cookie: cookieStore, + torrents: store.GetStore().GetTorrentStorage(), + } } diff --git a/pkg/webdav/file.go b/pkg/webdav/file.go index 3fd6c0b..20678ec 100644 --- a/pkg/webdav/file.go +++ b/pkg/webdav/file.go @@ -9,7 +9,7 @@ import ( "strings" "time" - "github.com/sirrobot01/decypharr/pkg/debrid/debrid" + "github.com/sirrobot01/decypharr/pkg/debrid/store" ) var sharedClient = &http.Client{ @@ -28,7 +28,7 @@ var sharedClient = &http.Client{ } type File struct { - cache *debrid.Cache + cache *store.Cache fileId string torrentName string @@ -128,7 +128,7 @@ func (f *File) stream() (*http.Response, error) { cleanupResp := func() { if resp.Body != nil { - io.Copy(io.Discard, resp.Body) + _, _ = io.Copy(io.Discard, resp.Body) resp.Body.Close() } } @@ -192,7 +192,7 @@ func (f *File) stream() (*http.Response, error) { if newResp.StatusCode != http.StatusOK && newResp.StatusCode != http.StatusPartialContent { cleanupBody := func() { if newResp.Body != nil { - io.Copy(io.Discard, newResp.Body) + _, _ = io.Copy(io.Discard, newResp.Body) newResp.Body.Close() } } diff --git a/pkg/webdav/handler.go b/pkg/webdav/handler.go index 8df178b..923cba4 100644 --- a/pkg/webdav/handler.go +++ b/pkg/webdav/handler.go @@ -3,6 +3,8 @@ package webdav import ( "context" "fmt" + "github.com/sirrobot01/decypharr/pkg/debrid/types" + "golang.org/x/net/webdav" "io" "mime" "net/http" @@ -15,21 +17,19 @@ import ( "github.com/rs/zerolog" "github.com/sirrobot01/decypharr/internal/utils" - "github.com/sirrobot01/decypharr/pkg/debrid/debrid" - "github.com/sirrobot01/decypharr/pkg/debrid/types" + "github.com/sirrobot01/decypharr/pkg/debrid/store" "github.com/sirrobot01/decypharr/pkg/version" - "golang.org/x/net/webdav" ) type Handler struct { Name string logger zerolog.Logger - cache *debrid.Cache + cache *store.Cache URLBase string RootPath string } -func NewHandler(name, urlBase string, cache *debrid.Cache, logger zerolog.Logger) *Handler { +func NewHandler(name, urlBase string, cache *store.Cache, logger zerolog.Logger) *Handler { h := &Handler{ Name: name, cache: cache, @@ -191,7 +191,7 @@ func (h *Handler) OpenFile(ctx context.Context, name string, flag int, perm os.F } name = utils.PathUnescape(path.Clean(name)) rootDir := path.Clean(h.RootPath) - metadataOnly := ctx.Value("metadataOnly") != nil + metadataOnly := ctx.Value(metadataOnlyKey) != nil now := time.Now() // 1) special case version.txt @@ -490,7 +490,7 @@ func (h *Handler) handleGet(w http.ResponseWriter, r *http.Request) { done := make(chan struct{}) go func() { defer close(done) - io.Copy(w, fRaw) + _, _ = io.Copy(w, fRaw) }() select { case <-ctx.Done(): diff --git a/pkg/webdav/misc.go b/pkg/webdav/misc.go index 25deb85..2e3ad64 100644 --- a/pkg/webdav/misc.go +++ b/pkg/webdav/misc.go @@ -84,9 +84,7 @@ func filesToXML(urlPath string, fi os.FileInfo, children []os.FileInfo) stringbu }) } - sb := builderPool.Get().(stringbuf.StringBuf) - sb.Reset() - defer builderPool.Put(sb) + sb := stringbuf.New("") // XML header and main element _, _ = sb.WriteString(``) diff --git a/pkg/webdav/propfind.go b/pkg/webdav/propfind.go index 27c0a59..62ccd51 100644 --- a/pkg/webdav/propfind.go +++ b/pkg/webdav/propfind.go @@ -8,21 +8,19 @@ import ( "path" "strconv" "strings" - "sync" "time" ) -var builderPool = sync.Pool{ +type contextKey string - New: func() interface{} { - buf := stringbuf.New("") - return buf - }, -} +const ( + // metadataOnlyKey is used to indicate that the request is for metadata only + metadataOnlyKey contextKey = "metadataOnly" +) func (h *Handler) handlePropfind(w http.ResponseWriter, r *http.Request) { // Setup context for metadata only - ctx := context.WithValue(r.Context(), "metadataOnly", true) + ctx := context.WithValue(r.Context(), metadataOnlyKey, true) r = r.WithContext(ctx) cleanPath := path.Clean(r.URL.Path) @@ -85,9 +83,7 @@ func (h *Handler) handlePropfind(w http.ResponseWriter, r *http.Request) { }) } - sb := builderPool.Get().(stringbuf.StringBuf) - sb.Reset() - defer builderPool.Put(sb) + sb := stringbuf.New("") // XML header and main element _, _ = sb.WriteString(``) diff --git a/pkg/webdav/webdav.go b/pkg/webdav/webdav.go index 6ead760..7259234 100644 --- a/pkg/webdav/webdav.go +++ b/pkg/webdav/webdav.go @@ -7,7 +7,7 @@ import ( "github.com/go-chi/chi/v5" "github.com/go-chi/chi/v5/middleware" "github.com/sirrobot01/decypharr/internal/config" - "github.com/sirrobot01/decypharr/pkg/service" + "github.com/sirrobot01/decypharr/pkg/store" "html/template" "net/http" "net/url" @@ -90,13 +90,12 @@ type WebDav struct { } func New() *WebDav { - svc := service.GetService() urlBase := config.Get().URLBase w := &WebDav{ Handlers: make([]*Handler, 0), URLBase: urlBase, } - for name, c := range svc.Debrid.Caches { + for name, c := range store.GetStore().GetDebrid().GetCaches() { h := NewHandler(name, urlBase, c, c.GetLogger()) w.Handlers = append(w.Handlers, h) } diff --git a/pkg/worker/worker.go b/pkg/worker/worker.go deleted file mode 100644 index 3c58e0f..0000000 --- a/pkg/worker/worker.go +++ /dev/null @@ -1,72 +0,0 @@ -package worker - -import ( - "context" - "github.com/rs/zerolog" - "github.com/sirrobot01/decypharr/internal/config" - "github.com/sirrobot01/decypharr/internal/logger" - "github.com/sirrobot01/decypharr/pkg/service" - "sync" - "time" -) - -var ( - _logInstance zerolog.Logger -) - -func getLogger() zerolog.Logger { - return _logInstance -} - -func Start(ctx context.Context) error { - cfg := config.Get() - // Start Arr Refresh Worker - _logInstance = logger.New("worker") - - var wg sync.WaitGroup - wg.Add(1) - go func() { - defer wg.Done() - cleanUpQueuesWorker(ctx, cfg) - }() - wg.Wait() - return nil -} - -func cleanUpQueuesWorker(ctx context.Context, cfg *config.Config) { - // Start Clean up Queues Worker - _logger := getLogger() - _logger.Debug().Msg("Clean up Queues Worker started") - cleanupCtx := context.WithValue(ctx, "worker", "cleanup") - cleanupTicker := time.NewTicker(time.Duration(10) * time.Second) - - var cleanupMutex sync.Mutex - - for { - select { - case <-cleanupCtx.Done(): - _logger.Debug().Msg("Clean up Queues Worker stopped") - return - case <-cleanupTicker.C: - if cleanupMutex.TryLock() { - go func() { - defer cleanupMutex.Unlock() - cleanUpQueues() - }() - } - } - } -} - -func cleanUpQueues() { - // Clean up queues - _logger := getLogger() - for _, a := range service.GetService().Arr.GetAll() { - if !a.Cleanup { - continue - } - if err := a.CleanupQueue(); err != nil { - _logger.Error().Err(err).Msg("Error cleaning up queue") - } - } -} diff --git a/scripts/deploy.sh b/scripts/deploy.sh deleted file mode 100755 index 662ecfa..0000000 --- a/scripts/deploy.sh +++ /dev/null @@ -1,57 +0,0 @@ -#!/bin/bash - -# deploy.sh - -# Function to display usage -usage() { - echo "Usage: $0 [-b|--beta] " - echo "Example for main: $0 v1.0.0" - echo "Example for beta: $0 -b v1.0.0" - exit 1 -} - -# Parse arguments -BETA=false - -while [[ "$#" -gt 0 ]]; do - case $1 in - -b|--beta) BETA=true; shift ;; - -*) echo "Unknown parameter: $1"; usage ;; - *) VERSION="$1"; shift ;; - esac -done - -# Check if version is provided -if [ -z "$VERSION" ]; then - echo "Error: Version is required" - usage -fi - -# Validate version format -if ! [[ $VERSION =~ ^v[0-9]+\.[0-9]+\.[0-9]+$ ]]; then - echo "Error: Version must be in format v1.0.0" - exit 1 -fi - -# Set tag based on branch -if [ "$BETA" = true ]; then - TAG="$VERSION-beta" - BRANCH="beta" -else - TAG="$VERSION" - BRANCH="main" -fi - -echo "Deploying version $VERSION to $BRANCH branch..." - -# Ensure we're on the right branch -git checkout $BRANCH || exit 1 - -# Create and push tag -echo "Creating tag $TAG..." -git tag "$TAG" || exit 1 -git push origin "$TAG" || exit 1 - -echo "Deployment initiated successfully!" -echo "GitHub Actions will handle the release process." -echo "Check the progress at: https://github.com/sirrobot01/decypharr/actions" \ No newline at end of file From 349a13468bd95195093956e3642ae748bd832e63 Mon Sep 17 00:00:00 2001 From: Mukhtar Akere Date: Mon, 2 Jun 2025 15:44:03 +0100 Subject: [PATCH 06/26] fix cloudflare, maybe? --- pkg/webdav/handler.go | 18 ++++++++++++++++-- 1 file changed, 16 insertions(+), 2 deletions(-) diff --git a/pkg/webdav/handler.go b/pkg/webdav/handler.go index 923cba4..789102e 100644 --- a/pkg/webdav/handler.go +++ b/pkg/webdav/handler.go @@ -8,6 +8,7 @@ import ( "io" "mime" "net/http" + "net/url" "os" "path" "path/filepath" @@ -459,8 +460,21 @@ func (h *Handler) handleGet(w http.ResponseWriter, r *http.Request) { } file.downloadLink = link if h.cache.StreamWithRclone() { - // Redirect to the download link - http.Redirect(w, r, file.downloadLink, http.StatusTemporaryRedirect) + redirectURL := file.downloadLink + + rangeHeader := r.Header.Get("Range") + cacheBuster := fmt.Sprintf("t=%d&r=%s", time.Now().UnixNano(), url.QueryEscape(rangeHeader)) + + if strings.Contains(redirectURL, "?") { + redirectURL += "&" + cacheBuster + } else { + redirectURL += "?" + cacheBuster + } + + w.Header().Set("Cache-Control", "no-store, no-cache, must-revalidate, max-age=0") + w.Header().Set("Pragma", "no-cache") + + http.Redirect(w, r, redirectURL, http.StatusTemporaryRedirect) return } } From f041ef47a75ead608dabb70331caaaa4c55f9fc2 Mon Sep 17 00:00:00 2001 From: Mukhtar Akere Date: Mon, 2 Jun 2025 20:04:41 +0100 Subject: [PATCH 07/26] fix cloudflare, probably? --- pkg/webdav/handler.go | 24 +++++++++--------------- 1 file changed, 9 insertions(+), 15 deletions(-) diff --git a/pkg/webdav/handler.go b/pkg/webdav/handler.go index 789102e..dcb0d8d 100644 --- a/pkg/webdav/handler.go +++ b/pkg/webdav/handler.go @@ -8,7 +8,6 @@ import ( "io" "mime" "net/http" - "net/url" "os" "path" "path/filepath" @@ -460,21 +459,16 @@ func (h *Handler) handleGet(w http.ResponseWriter, r *http.Request) { } file.downloadLink = link if h.cache.StreamWithRclone() { - redirectURL := file.downloadLink - - rangeHeader := r.Header.Get("Range") - cacheBuster := fmt.Sprintf("t=%d&r=%s", time.Now().UnixNano(), url.QueryEscape(rangeHeader)) - - if strings.Contains(redirectURL, "?") { - redirectURL += "&" + cacheBuster - } else { - redirectURL += "?" + cacheBuster - } - - w.Header().Set("Cache-Control", "no-store, no-cache, must-revalidate, max-age=0") + w.Header().Set("Cache-Control", "no-cache, no-store, must-revalidate") w.Header().Set("Pragma", "no-cache") - - http.Redirect(w, r, redirectURL, http.StatusTemporaryRedirect) + w.Header().Set("Expires", "0") + w.Header().Set("Content-Disposition", fmt.Sprintf("attachment; filename=\"%s\"", fi.Name())) + w.Header().Set("Content-Length", fmt.Sprintf("%d", fi.Size())) + w.Header().Set("Last-Modified", fi.ModTime().UTC().Format(http.TimeFormat)) + w.Header().Set("Accept-Ranges", "bytes") + w.Header().Set("X-Accel-Redirect", file.downloadLink) + w.Header().Set("X-Accel-Buffering", "no") + http.Redirect(w, r, file.downloadLink, http.StatusFound) return } } From 30a1dd74a7659d8a1129aa8d0d68ef166df1d15e Mon Sep 17 00:00:00 2001 From: Mukhtar Akere Date: Mon, 2 Jun 2025 20:45:39 +0100 Subject: [PATCH 08/26] Add Basic healtcheck --- Dockerfile | 4 ++++ cmd/healthcheck/main.go | 8 ++++++-- 2 files changed, 10 insertions(+), 2 deletions(-) diff --git a/Dockerfile b/Dockerfile index b12fe3b..ad5b10c 100644 --- a/Dockerfile +++ b/Dockerfile @@ -61,4 +61,8 @@ EXPOSE 8282 VOLUME ["/app"] USER nonroot:nonroot + +# Base healthcheck +HEALTHCHECK --interval=3s --retries=10 CMD ["/usr/bin/healthcheck", "--config", "/app", "--basic"] + CMD ["/usr/bin/decypharr", "--config", "/app"] \ No newline at end of file diff --git a/cmd/healthcheck/main.go b/cmd/healthcheck/main.go index 3c7253a..f2f459c 100644 --- a/cmd/healthcheck/main.go +++ b/cmd/healthcheck/main.go @@ -22,8 +22,12 @@ type HealthStatus struct { } func main() { - var configPath string + var ( + configPath string + isBasicCheck bool + ) flag.StringVar(&configPath, "config", "/data", "path to the data folder") + flag.BoolVar(&isBasicCheck, "basic", false, "perform basic health check without WebDAV") flag.Parse() config.SetConfigPath(configPath) cfg := config.Get() @@ -64,7 +68,7 @@ func main() { } // Check WebDAV if enabled - if webdavPath != "" { + if !isBasicCheck && webdavPath != "" { if checkWebDAV(ctx, baseUrl, port, webdavPath) { status.WebDAVService = true } From dfcf8708f18202b786fb07512be82538791a1147 Mon Sep 17 00:00:00 2001 From: Mukhtar Akere Date: Tue, 3 Jun 2025 10:45:23 +0100 Subject: [PATCH 09/26] final prep for 1.0.3 --- README.md | 20 ++++----- cmd/healthcheck/main.go | 41 ++++++++++++++----- pkg/debrid/providers/alldebrid/alldebrid.go | 6 +-- .../providers/debrid_link/debrid_link.go | 6 +-- pkg/debrid/providers/realdebrid/realdebrid.go | 6 +-- pkg/debrid/providers/torbox/torbox.go | 4 +- pkg/debrid/store/cache.go | 4 +- pkg/debrid/store/refresh.go | 3 -- pkg/qbit/http.go | 10 ++--- pkg/repair/repair.go | 21 +++++----- pkg/server/server.go | 2 +- pkg/store/store.go | 2 +- pkg/store/torrent.go | 2 +- 13 files changed, 71 insertions(+), 56 deletions(-) diff --git a/README.md b/README.md index 320b45e..2983b49 100644 --- a/README.md +++ b/README.md @@ -1,21 +1,21 @@ -# DecyphArr +# Decypharr ![ui](docs/docs/images/main.png) -**DecyphArr** is an implementation of QbitTorrent with **Multiple Debrid service support**, written in Go. +**Decypharr** is an implementation of QbitTorrent with **Multiple Debrid service support**, written in Go. -## What is DecyphArr? +## What is Decypharr? -DecyphArr combines the power of QBittorrent with popular Debrid services to enhance your media management. It provides a familiar interface for Sonarr, Radarr, and other \*Arr applications while leveraging the capabilities of Debrid providers. +Decypharr combines the power of QBittorrent with popular Debrid services to enhance your media management. It provides a familiar interface for Sonarr, Radarr, and other \*Arr applications. ## Features -- 🔄 Mock Qbittorent API that supports the Arrs (Sonarr, Radarr, Lidarr etc) -- 🖥️ Full-fledged UI for managing torrents -- 🛡️ Proxy support for filtering out un-cached Debrid torrents -- 🔌 Multiple Debrid providers support -- 📁 WebDAV server support for each debrid provider -- 🔧 Repair Worker for missing files +- Mock Qbittorent API that supports the Arrs (Sonarr, Radarr, Lidarr etc) +- Full-fledged UI for managing torrents +- Proxy support for filtering out un-cached Debrid torrents +- Multiple Debrid providers support +- WebDAV server support for each debrid provider +- Repair Worker for missing files ## Supported Debrid Providers diff --git a/cmd/healthcheck/main.go b/cmd/healthcheck/main.go index f2f459c..fe17f87 100644 --- a/cmd/healthcheck/main.go +++ b/cmd/healthcheck/main.go @@ -25,9 +25,11 @@ func main() { var ( configPath string isBasicCheck bool + debug bool ) flag.StringVar(&configPath, "config", "/data", "path to the data folder") flag.BoolVar(&isBasicCheck, "basic", false, "perform basic health check without WebDAV") + flag.BoolVar(&debug, "debug", false, "enable debug mode for detailed output") flag.Parse() config.SetConfigPath(configPath) cfg := config.Get() @@ -67,16 +69,17 @@ func main() { status.WebUI = true } - // Check WebDAV if enabled - if !isBasicCheck && webdavPath != "" { - if checkWebDAV(ctx, baseUrl, port, webdavPath) { + if isBasicCheck { + status.WebDAVService = checkBaseWebdav(ctx, baseUrl, port) + } else { + // If not a basic check, check WebDAV with debrid path + if webdavPath != "" { + status.WebDAVService = checkDebridWebDAV(ctx, baseUrl, port, webdavPath) + } else { + // If no WebDAV path is set, consider it healthy status.WebDAVService = true } - } else { - // If WebDAV is not enabled, consider it healthy - status.WebDAVService = true } - // Determine overall status // Consider the application healthy if core services are running status.OverallStatus = status.QbitAPI && status.WebUI @@ -85,7 +88,7 @@ func main() { } // Optional: output health status as JSON for logging - if os.Getenv("DEBUG") == "true" { + if debug { statusJSON, _ := json.MarshalIndent(status, "", " ") fmt.Println(string(statusJSON)) } @@ -136,7 +139,24 @@ func checkWebUI(ctx context.Context, baseUrl, port string) bool { return resp.StatusCode == http.StatusOK } -func checkWebDAV(ctx context.Context, baseUrl, port, path string) bool { +func checkBaseWebdav(ctx context.Context, baseUrl, port string) bool { + url := fmt.Sprintf("http://localhost:%s%swebdav/", port, baseUrl) + req, err := http.NewRequestWithContext(ctx, "PROPFIND", url, nil) + if err != nil { + return false + } + + resp, err := http.DefaultClient.Do(req) + if err != nil { + return false + } + defer resp.Body.Close() + + return resp.StatusCode == http.StatusMultiStatus || + resp.StatusCode == http.StatusOK +} + +func checkDebridWebDAV(ctx context.Context, baseUrl, port, path string) bool { url := fmt.Sprintf("http://localhost:%s%swebdav/%s", port, baseUrl, path) req, err := http.NewRequestWithContext(ctx, "PROPFIND", url, nil) if err != nil { @@ -150,7 +170,6 @@ func checkWebDAV(ctx context.Context, baseUrl, port, path string) bool { defer resp.Body.Close() return resp.StatusCode == http.StatusMultiStatus || - resp.StatusCode == http.StatusOK || - resp.StatusCode == http.StatusServiceUnavailable // It's still indexing + resp.StatusCode == http.StatusOK } diff --git a/pkg/debrid/providers/alldebrid/alldebrid.go b/pkg/debrid/providers/alldebrid/alldebrid.go index 7dec3d2..e870d68 100644 --- a/pkg/debrid/providers/alldebrid/alldebrid.go +++ b/pkg/debrid/providers/alldebrid/alldebrid.go @@ -190,7 +190,7 @@ func (ad *AllDebrid) GetTorrent(torrentId string) (*types.Torrent, error) { var res TorrentInfoResponse err = json.Unmarshal(resp, &res) if err != nil { - ad.logger.Info().Msgf("Error unmarshalling torrent info: %s", err) + ad.logger.Error().Err(err).Msgf("Error unmarshalling torrent info") return nil, err } data := res.Data.Magnets @@ -232,7 +232,7 @@ func (ad *AllDebrid) UpdateTorrent(t *types.Torrent) error { var res TorrentInfoResponse err = json.Unmarshal(resp, &res) if err != nil { - ad.logger.Info().Msgf("Error unmarshalling torrent info: %s", err) + ad.logger.Error().Err(err).Msgf("Error unmarshalling torrent info") return err } data := res.Data.Magnets @@ -393,7 +393,7 @@ func (ad *AllDebrid) GetTorrents() ([]*types.Torrent, error) { var res TorrentsListResponse err = json.Unmarshal(resp, &res) if err != nil { - ad.logger.Info().Msgf("Error unmarshalling torrent info: %s", err) + ad.logger.Error().Err(err).Msgf("Error unmarshalling torrent info") return torrents, err } for _, magnet := range res.Data.Magnets { diff --git a/pkg/debrid/providers/debrid_link/debrid_link.go b/pkg/debrid/providers/debrid_link/debrid_link.go index 6dd3edc..e15a35c 100644 --- a/pkg/debrid/providers/debrid_link/debrid_link.go +++ b/pkg/debrid/providers/debrid_link/debrid_link.go @@ -110,13 +110,13 @@ func (dl *DebridLink) IsAvailable(hashes []string) map[string]bool { req, _ := http.NewRequest(http.MethodGet, url, nil) resp, err := dl.client.MakeRequest(req) if err != nil { - dl.logger.Info().Msgf("Error checking availability: %v", err) + dl.logger.Error().Err(err).Msgf("Error checking availability") return result } var data AvailableResponse err = json.Unmarshal(resp, &data) if err != nil { - dl.logger.Info().Msgf("Error marshalling availability: %v", err) + dl.logger.Error().Err(err).Msgf("Error marshalling availability") return result } if data.Value == nil { @@ -406,7 +406,7 @@ func (dl *DebridLink) getTorrents(page, perPage int) ([]*types.Torrent, error) { var res torrentInfo err = json.Unmarshal(resp, &res) if err != nil { - dl.logger.Info().Msgf("Error unmarshalling torrent info: %s", err) + dl.logger.Error().Err(err).Msgf("Error unmarshalling torrent info") return torrents, err } diff --git a/pkg/debrid/providers/realdebrid/realdebrid.go b/pkg/debrid/providers/realdebrid/realdebrid.go index 5dfa438..0943e72 100644 --- a/pkg/debrid/providers/realdebrid/realdebrid.go +++ b/pkg/debrid/providers/realdebrid/realdebrid.go @@ -81,7 +81,7 @@ func New(dc config.Debrid) (*RealDebrid, error) { request.WithHeaders(headers), request.WithRateLimiter(rl), request.WithLogger(_log), - request.WithMaxRetries(5), + request.WithMaxRetries(10), request.WithRetryableStatus(429, 502), request.WithProxy(dc.Proxy), ), @@ -302,13 +302,13 @@ func (r *RealDebrid) IsAvailable(hashes []string) map[string]bool { req, _ := http.NewRequest(http.MethodGet, url, nil) resp, err := r.client.MakeRequest(req) if err != nil { - r.logger.Info().Msgf("Error checking availability: %v", err) + r.logger.Error().Err(err).Msgf("Error checking availability") return result } var data AvailabilityResponse err = json.Unmarshal(resp, &data) if err != nil { - r.logger.Info().Msgf("Error marshalling availability: %v", err) + r.logger.Error().Err(err).Msgf("Error marshalling availability") return result } for _, h := range hashes[i:end] { diff --git a/pkg/debrid/providers/torbox/torbox.go b/pkg/debrid/providers/torbox/torbox.go index 30ed9c6..7f22280 100644 --- a/pkg/debrid/providers/torbox/torbox.go +++ b/pkg/debrid/providers/torbox/torbox.go @@ -117,13 +117,13 @@ func (tb *Torbox) IsAvailable(hashes []string) map[string]bool { req, _ := http.NewRequest(http.MethodGet, url, nil) resp, err := tb.client.MakeRequest(req) if err != nil { - tb.logger.Info().Msgf("Error checking availability: %v", err) + tb.logger.Error().Err(err).Msgf("Error checking availability") return result } var res AvailableResponse err = json.Unmarshal(resp, &res) if err != nil { - tb.logger.Info().Msgf("Error marshalling availability: %v", err) + tb.logger.Error().Err(err).Msgf("Error marshalling availability") return result } if res.Data == nil { diff --git a/pkg/debrid/store/cache.go b/pkg/debrid/store/cache.go index 9d08d7d..9ac6412 100644 --- a/pkg/debrid/store/cache.go +++ b/pkg/debrid/store/cache.go @@ -231,6 +231,8 @@ func (c *Cache) Start(ctx context.Context) error { if err := c.Sync(ctx); err != nil { return fmt.Errorf("failed to sync cache: %w", err) } + // Fire the ready channel + close(c.ready) // initial download links go c.refreshDownloadLinks(ctx) @@ -242,8 +244,6 @@ func (c *Cache) Start(ctx context.Context) error { c.repairChan = make(chan RepairRequest, 100) go c.repairWorker(ctx) - // Fire the ready channel - close(c.ready) cfg := config.Get() name := c.client.GetName() addr := cfg.BindAddress + ":" + cfg.Port + cfg.URLBase + "webdav/" + name + "/" diff --git a/pkg/debrid/store/refresh.go b/pkg/debrid/store/refresh.go index 882eb4e..f9e5a5d 100644 --- a/pkg/debrid/store/refresh.go +++ b/pkg/debrid/store/refresh.go @@ -261,7 +261,4 @@ func (c *Cache) refreshDownloadLinks(ctx context.Context) { c.downloadLinks.Delete(k) } } - - c.logger.Trace().Msgf("Refreshed %d download links", len(downloadLinks)) - } diff --git a/pkg/qbit/http.go b/pkg/qbit/http.go index 1f81e7f..3115189 100644 --- a/pkg/qbit/http.go +++ b/pkg/qbit/http.go @@ -17,7 +17,7 @@ func (q *QBit) handleLogin(w http.ResponseWriter, r *http.Request) { return } if err := _arr.Validate(); err != nil { - q.logger.Info().Msgf("Error validating arr: %v", err) + q.logger.Error().Err(err).Msgf("Error validating arr") } _, _ = w.Write([]byte("Ok.")) } @@ -73,13 +73,13 @@ func (q *QBit) handleTorrentsAdd(w http.ResponseWriter, r *http.Request) { contentType := r.Header.Get("Content-Type") if strings.Contains(contentType, "multipart/form-data") { if err := r.ParseMultipartForm(32 << 20); err != nil { - q.logger.Info().Msgf("Error parsing multipart form: %v", err) + q.logger.Error().Err(err).Msgf("Error parsing multipart form") http.Error(w, err.Error(), http.StatusBadRequest) return } } else if strings.Contains(contentType, "application/x-www-form-urlencoded") { if err := r.ParseForm(); err != nil { - q.logger.Info().Msgf("Error parsing form: %v", err) + q.logger.Error().Err(err).Msgf("Error parsing form") http.Error(w, err.Error(), http.StatusBadRequest) return } @@ -105,7 +105,7 @@ func (q *QBit) handleTorrentsAdd(w http.ResponseWriter, r *http.Request) { } for _, url := range urlList { if err := q.addMagnet(ctx, url, _arr, debridName, isSymlink); err != nil { - q.logger.Info().Msgf("Error adding magnet: %v", err) + q.logger.Error().Err(err).Msgf("Error adding magnet") http.Error(w, err.Error(), http.StatusBadRequest) return } @@ -118,7 +118,7 @@ func (q *QBit) handleTorrentsAdd(w http.ResponseWriter, r *http.Request) { if files := r.MultipartForm.File["torrents"]; len(files) > 0 { for _, fileHeader := range files { if err := q.addTorrent(ctx, fileHeader, _arr, debridName, isSymlink); err != nil { - q.logger.Info().Msgf("Error adding torrent: %v", err) + q.logger.Error().Err(err).Msgf("Error adding torrent") http.Error(w, err.Error(), http.StatusBadRequest) return } diff --git a/pkg/repair/repair.go b/pkg/repair/repair.go index 7a50a54..f21a69a 100644 --- a/pkg/repair/repair.go +++ b/pkg/repair/repair.go @@ -440,7 +440,7 @@ func (r *Repair) repairArr(job *Job, _arr string, tmdbId string) ([]arr.ContentF } // Check first media to confirm mounts are accessible if !r.isMediaAccessible(media) { - r.logger.Info().Msgf("Skipping repair. Parent directory not accessible for. Check your mounts") + r.logger.Info().Msgf("Skipping repair. Parent directory not accessible. Check your mounts") return brokenItems, nil } @@ -520,19 +520,18 @@ func (r *Repair) isMediaAccessible(media []arr.Content) bool { firstFile := files[0] symlinkPath := getSymlinkTarget(firstFile.Path) + if symlinkPath == "" { + r.logger.Debug().Msgf("No symlink target found for %s", firstFile.Path) + return false + } r.logger.Debug().Msgf("Checking symlink parent directory for %s", symlinkPath) - parentSymlink := "" - if symlinkPath != "" { - parentSymlink = filepath.Dir(filepath.Dir(symlinkPath)) // /mnt/zurg/torrents/movie/movie.mkv -> /mnt/zurg/torrents + parentSymlink := filepath.Dir(filepath.Dir(symlinkPath)) // /mnt/zurg/torrents/movie/movie.mkv -> /mnt/zurg/torrents + if _, err := os.Stat(parentSymlink); os.IsNotExist(err) { + r.logger.Debug().Msgf("Cannot access parent directory %s for %s", parentSymlink, firstFile.Path) + return false } - if parentSymlink != "" { - if _, err := os.Stat(parentSymlink); os.IsNotExist(err) { - return false - } - return true - } - return false + return true } func (r *Repair) getBrokenFiles(job *Job, media arr.Content) []arr.ContentFile { diff --git a/pkg/server/server.go b/pkg/server/server.go index 8dd793c..66ac869 100644 --- a/pkg/server/server.go +++ b/pkg/server/server.go @@ -70,7 +70,7 @@ func (s *Server) Start(ctx context.Context) error { go func() { if err := srv.ListenAndServe(); err != nil && !errors.Is(err, http.ErrServerClosed) { - s.logger.Info().Msgf("Error starting server: %v", err) + s.logger.Error().Err(err).Msgf("Error starting server") } }() diff --git a/pkg/store/store.go b/pkg/store/store.go index 00f25bb..d404152 100644 --- a/pkg/store/store.go +++ b/pkg/store/store.go @@ -41,7 +41,7 @@ func GetStore() *Store { arr: arrs, debrid: deb, torrents: newTorrentStorage(cfg.TorrentsFile()), - logger: logger.New("store"), + logger: logger.Default(), // Use default logger [decypharr] refreshInterval: time.Duration(cmp.Or(qbitCfg.RefreshInterval, 10)) * time.Minute, skipPreCache: qbitCfg.SkipPreCache, downloadSemaphore: make(chan struct{}, cmp.Or(qbitCfg.MaxDownloads, 5)), diff --git a/pkg/store/torrent.go b/pkg/store/torrent.go index a5c0388..419c28b 100644 --- a/pkg/store/torrent.go +++ b/pkg/store/torrent.go @@ -101,7 +101,7 @@ func (s *Store) processFiles(torrent *Torrent, debridTorrent *types.Torrent, imp go func() { _ = client.DeleteTorrent(debridTorrent.Id) }() - s.logger.Info().Msgf("Error: %v", err) + s.logger.Error().Err(err).Msgf("Error occured while processing torrent %s", debridTorrent.Name) importReq.markAsFailed(err, torrent, debridTorrent) return } From 84603b084b8ce0515e7b99f69abd7915e3dceb42 Mon Sep 17 00:00:00 2001 From: Mukhtar Akere Date: Sat, 7 Jun 2025 10:03:01 +0100 Subject: [PATCH 10/26] Some improvements to beta --- pkg/debrid/providers/realdebrid/realdebrid.go | 2 +- pkg/debrid/store/cache.go | 27 ++++++++++++++----- pkg/qbit/http.go | 2 +- pkg/qbit/routes.go | 2 +- pkg/qbit/torrent.go | 5 ---- pkg/repair/repair.go | 20 +++++++------- pkg/webdav/file.go | 1 + pkg/webdav/handler.go | 6 ++++- 8 files changed, 38 insertions(+), 27 deletions(-) diff --git a/pkg/debrid/providers/realdebrid/realdebrid.go b/pkg/debrid/providers/realdebrid/realdebrid.go index 0943e72..261188d 100644 --- a/pkg/debrid/providers/realdebrid/realdebrid.go +++ b/pkg/debrid/providers/realdebrid/realdebrid.go @@ -211,7 +211,7 @@ func (r *RealDebrid) handleRarArchive(t *types.Torrent, data torrentInfo, select fileMap := make(map[string]*types.File) for i := range selectedFiles { // RD converts special chars to '_' for RAR file paths - // TOOD: there might be more special chars to replace + // @TODO: there might be more special chars to replace safeName := strings.NewReplacer("|", "_", "\"", "_", "\\", "_", "?", "_", "*", "_", ":", "_", "<", "_", ">", "_").Replace(selectedFiles[i].Name) fileMap[safeName] = &selectedFiles[i] } diff --git a/pkg/debrid/store/cache.go b/pkg/debrid/store/cache.go index 9ac6412..0412645 100644 --- a/pkg/debrid/store/cache.go +++ b/pkg/debrid/store/cache.go @@ -228,11 +228,14 @@ func (c *Cache) Start(ctx context.Context) error { return fmt.Errorf("failed to create cache directory: %w", err) } + c.logger.Info().Msgf("Started indexing...") + if err := c.Sync(ctx); err != nil { return fmt.Errorf("failed to sync cache: %w", err) } // Fire the ready channel close(c.ready) + c.logger.Info().Msgf("Indexing complete, %d torrents loaded", len(c.torrents.getAll())) // initial download links go c.refreshDownloadLinks(ctx) @@ -241,7 +244,7 @@ func (c *Cache) Start(ctx context.Context) error { c.logger.Error().Err(err).Msg("Failed to start cache worker") } - c.repairChan = make(chan RepairRequest, 100) + c.repairChan = make(chan RepairRequest, 100) // Initialize the repair channel, max 100 requests buffered go c.repairWorker(ctx) cfg := config.Get() @@ -398,9 +401,11 @@ func (c *Cache) Sync(ctx context.Context) error { if len(deletedTorrents) > 0 { c.logger.Info().Msgf("Found %d deleted torrents", len(deletedTorrents)) for _, id := range deletedTorrents { - if _, ok := cachedTorrents[id]; ok { - c.deleteTorrent(id, false) // delete from cache - } + // Remove from cache and debrid service + delete(cachedTorrents, id) + // Remove the json file from disk + c.removeFile(id, false) + } } @@ -752,13 +757,13 @@ func (c *Cache) deleteTorrent(id string, removeFromDebrid bool) bool { if torrent, ok := c.torrents.getByID(id); ok { c.torrents.removeId(id) // Delete id from cache defer func() { - c.removeFromDB(id) + c.removeFile(id, false) if removeFromDebrid { _ = c.client.DeleteTorrent(id) // Skip error handling, we don't care if it fails } }() // defer delete from debrid - torrentName := torrent.Name + torrentName := c.GetTorrentFolder(torrent.Torrent) if t, ok := c.torrents.getByName(torrentName); ok { @@ -795,7 +800,7 @@ func (c *Cache) DeleteTorrents(ids []string) { c.listingDebouncer.Call(true) } -func (c *Cache) removeFromDB(torrentId string) { +func (c *Cache) removeFile(torrentId string, moveToTrash bool) { // Moves the torrent file to the trash filePath := filepath.Join(c.dir, torrentId+".json") @@ -804,6 +809,14 @@ func (c *Cache) removeFromDB(torrentId string) { return } + if !moveToTrash { + // If not moving to trash, delete the file directly + if err := os.Remove(filePath); err != nil { + c.logger.Error().Err(err).Msgf("Failed to remove file: %s", filePath) + return + } + return + } // Move the file to the trash trashPath := filepath.Join(c.dir, "trash", torrentId+".json") if err := os.MkdirAll(filepath.Dir(trashPath), 0755); err != nil { diff --git a/pkg/qbit/http.go b/pkg/qbit/http.go index 3115189..f87ea58 100644 --- a/pkg/qbit/http.go +++ b/pkg/qbit/http.go @@ -276,7 +276,7 @@ func (q *QBit) handleAddTorrentTags(w http.ResponseWriter, r *http.Request) { request.JSONResponse(w, nil, http.StatusOK) } -func (q *QBit) handleremoveTorrentTags(w http.ResponseWriter, r *http.Request) { +func (q *QBit) handleRemoveTorrentTags(w http.ResponseWriter, r *http.Request) { err := r.ParseForm() if err != nil { http.Error(w, "Failed to parse form data", http.StatusBadRequest) diff --git a/pkg/qbit/routes.go b/pkg/qbit/routes.go index 9881247..c362df4 100644 --- a/pkg/qbit/routes.go +++ b/pkg/qbit/routes.go @@ -20,7 +20,7 @@ func (q *QBit) Routes() http.Handler { r.Post("/createCategory", q.handleCreateCategory) r.Post("/setCategory", q.handleSetCategory) r.Post("/addTags", q.handleAddTorrentTags) - r.Post("/removeTags", q.handleremoveTorrentTags) + r.Post("/removeTags", q.handleRemoveTorrentTags) r.Post("/createTags", q.handleCreateTags) r.Get("/tags", q.handleGetTags) r.Get("/pause", q.handleTorrentsPause) diff --git a/pkg/qbit/torrent.go b/pkg/qbit/torrent.go index 5e9ae77..482a299 100644 --- a/pkg/qbit/torrent.go +++ b/pkg/qbit/torrent.go @@ -135,8 +135,3 @@ func (q *QBit) addTags(tags []string) bool { } return true } - -func (q *QBit) removeTags(tags []string) bool { - q.Tags = utils.RemoveItem(q.Tags, tags...) - return true -} diff --git a/pkg/repair/repair.go b/pkg/repair/repair.go index f21a69a..453fb29 100644 --- a/pkg/repair/repair.go +++ b/pkg/repair/repair.go @@ -439,9 +439,9 @@ func (r *Repair) repairArr(job *Job, _arr string, tmdbId string) ([]arr.ContentF return brokenItems, nil } // Check first media to confirm mounts are accessible - if !r.isMediaAccessible(media) { - r.logger.Info().Msgf("Skipping repair. Parent directory not accessible. Check your mounts") - return brokenItems, nil + if err := r.checkMountUp(media); err != nil { + r.logger.Error().Err(err).Msgf("Mount check failed for %s", a.Name) + return brokenItems, fmt.Errorf("mount check failed: %w", err) } // Mutex for brokenItems @@ -504,8 +504,8 @@ func (r *Repair) repairArr(job *Job, _arr string, tmdbId string) ([]arr.ContentF return brokenItems, nil } -// isMediaAccessible checks if the mounts are accessible -func (r *Repair) isMediaAccessible(media []arr.Content) bool { +// checkMountUp checks if the mounts are accessible +func (r *Repair) checkMountUp(media []arr.Content) error { firstMedia := media[0] for _, m := range media { if len(m.Files) > 0 { @@ -515,23 +515,21 @@ func (r *Repair) isMediaAccessible(media []arr.Content) bool { } files := firstMedia.Files if len(files) == 0 { - return false + return fmt.Errorf("no files found in media %s", firstMedia.Title) } firstFile := files[0] symlinkPath := getSymlinkTarget(firstFile.Path) if symlinkPath == "" { - r.logger.Debug().Msgf("No symlink target found for %s", firstFile.Path) - return false + return fmt.Errorf("no symlink target found for %s", firstFile.Path) } r.logger.Debug().Msgf("Checking symlink parent directory for %s", symlinkPath) parentSymlink := filepath.Dir(filepath.Dir(symlinkPath)) // /mnt/zurg/torrents/movie/movie.mkv -> /mnt/zurg/torrents if _, err := os.Stat(parentSymlink); os.IsNotExist(err) { - r.logger.Debug().Msgf("Cannot access parent directory %s for %s", parentSymlink, firstFile.Path) - return false + return fmt.Errorf("parent directory %s not accessible for %s", parentSymlink, firstFile.Path) } - return true + return nil } func (r *Repair) getBrokenFiles(job *Job, media arr.Content) []arr.ContentFile { diff --git a/pkg/webdav/file.go b/pkg/webdav/file.go index 20678ec..3833b5f 100644 --- a/pkg/webdav/file.go +++ b/pkg/webdav/file.go @@ -41,6 +41,7 @@ type File struct { reader io.ReadCloser seekPending bool content []byte + isRar bool name string metadataOnly bool diff --git a/pkg/webdav/handler.go b/pkg/webdav/handler.go index dcb0d8d..22e54a0 100644 --- a/pkg/webdav/handler.go +++ b/pkg/webdav/handler.go @@ -245,6 +245,7 @@ func (h *Handler) OpenFile(ctx context.Context, name string, flag int, perm os.F size: file.Size, link: file.Link, metadataOnly: metadataOnly, + isRar: file.IsRar, modTime: cached.AddedOn, }, nil } @@ -409,6 +410,8 @@ func (h *Handler) serveDirectory(w http.ResponseWriter, r *http.Request, file we } } +// Handlers + func (h *Handler) handleGet(w http.ResponseWriter, r *http.Request) { fRaw, err := h.OpenFile(r.Context(), r.URL.Path, os.O_RDONLY, 0) if err != nil { @@ -458,7 +461,8 @@ func (h *Handler) handleGet(w http.ResponseWriter, r *http.Request) { return } file.downloadLink = link - if h.cache.StreamWithRclone() { + // If the torrent file is not a RAR file and users enabled proxy streaming + if !file.isRar && h.cache.StreamWithRclone() { w.Header().Set("Cache-Control", "no-cache, no-store, must-revalidate") w.Header().Set("Pragma", "no-cache") w.Header().Set("Expires", "0") From 5bf1dab5e62ddef3717e92e898f6edfc6fac5912 Mon Sep 17 00:00:00 2001 From: Mukhtar Akere <32229538+sirrobot01@users.noreply.github.com> Date: Sat, 7 Jun 2025 17:23:41 +0100 Subject: [PATCH 11/26] Torrent Queuing for Botched torrent (#83) * Implement a queue for handling failed torrent * Add checks for getting slots * Few other cleanups, change some function names --- cmd/decypharr/main.go | 8 +- internal/config/config.go | 5 + .../{request/errors.go => utils/error.go} | 14 +- pkg/debrid/debrid.go | 166 ++++++++++-------- pkg/debrid/providers/alldebrid/alldebrid.go | 32 ++-- .../providers/debrid_link/debrid_link.go | 21 ++- pkg/debrid/providers/realdebrid/realdebrid.go | 110 ++++++++---- pkg/debrid/providers/realdebrid/types.go | 5 + pkg/debrid/providers/torbox/torbox.go | 25 +-- pkg/debrid/store/cache.go | 10 +- pkg/debrid/store/download_link.go | 7 +- pkg/debrid/store/misc.go | 2 +- pkg/debrid/store/repair.go | 3 +- pkg/debrid/types/client.go | 5 +- pkg/qbit/context.go | 2 +- pkg/qbit/qbit.go | 2 +- pkg/qbit/torrent.go | 4 +- pkg/repair/misc.go | 2 +- pkg/repair/repair.go | 6 +- pkg/server/debug.go | 18 +- pkg/server/webhook.go | 2 +- pkg/store/downloader.go | 10 +- pkg/store/request.go | 119 +++++++++++-- pkg/store/store.go | 20 ++- pkg/store/torrent.go | 143 ++++++++++++--- pkg/store/torrent_storage.go | 4 +- pkg/web/api.go | 42 ++--- pkg/web/web.go | 2 +- pkg/webdav/file.go | 2 +- pkg/webdav/webdav.go | 4 +- 30 files changed, 556 insertions(+), 239 deletions(-) rename internal/{request/errors.go => utils/error.go} (69%) diff --git a/cmd/decypharr/main.go b/cmd/decypharr/main.go index 4cbc57a..30d5f72 100644 --- a/cmd/decypharr/main.go +++ b/cmd/decypharr/main.go @@ -145,7 +145,7 @@ func startServices(ctx context.Context, wd *webdav.WebDav, srv *server.Server) e }) safeGo(func() error { - arr := store.GetStore().GetArr() + arr := store.Get().Arr() if arr == nil { return nil } @@ -154,7 +154,7 @@ func startServices(ctx context.Context, wd *webdav.WebDav, srv *server.Server) e if cfg := config.Get(); cfg.Repair.Enabled { safeGo(func() error { - repair := store.GetStore().GetRepair() + repair := store.Get().Repair() if repair != nil { if err := repair.Start(ctx); err != nil { _log.Error().Err(err).Msg("repair failed") @@ -164,6 +164,10 @@ func startServices(ctx context.Context, wd *webdav.WebDav, srv *server.Server) e }) } + safeGo(func() error { + return store.Get().StartQueueSchedule(ctx) + }) + go func() { wg.Wait() close(errChan) diff --git a/internal/config/config.go b/internal/config/config.go index 526d519..dbe079e 100644 --- a/internal/config/config.go +++ b/internal/config/config.go @@ -29,6 +29,7 @@ type Debrid struct { Proxy string `json:"proxy,omitempty"` UnpackRar bool `json:"unpack_rar,omitempty"` AddSamples bool `json:"add_samples,omitempty"` + MinimumFreeSlot int `json:"minimum_free_slot,omitempty"` // Minimum active pots to use this debrid UseWebDav bool `json:"use_webdav,omitempty"` WebDav @@ -384,3 +385,7 @@ func Reload() { instance = nil once = sync.Once{} } + +func DefaultFreeSlot() int { + return 10 +} diff --git a/internal/request/errors.go b/internal/utils/error.go similarity index 69% rename from internal/request/errors.go rename to internal/utils/error.go index be40eb0..840a13c 100644 --- a/internal/request/errors.go +++ b/internal/utils/error.go @@ -1,4 +1,6 @@ -package request +package utils + +import "errors" type HTTPError struct { StatusCode int @@ -33,3 +35,13 @@ var TorrentNotFoundError = &HTTPError{ Message: "Torrent not found", Code: "torrent_not_found", } + +var TooManyActiveDownloadsError = &HTTPError{ + StatusCode: 509, + Message: "Too many active downloads", + Code: "too_many_active_downloads", +} + +func IsTooManyActiveDownloadsError(err error) bool { + return errors.As(err, &TooManyActiveDownloadsError) +} diff --git a/pkg/debrid/debrid.go b/pkg/debrid/debrid.go index 0506181..98d3ae5 100644 --- a/pkg/debrid/debrid.go +++ b/pkg/debrid/debrid.go @@ -2,6 +2,7 @@ package debrid import ( "context" + "errors" "fmt" "github.com/sirrobot01/decypharr/internal/config" "github.com/sirrobot01/decypharr/internal/logger" @@ -13,25 +14,34 @@ import ( "github.com/sirrobot01/decypharr/pkg/debrid/providers/torbox" "github.com/sirrobot01/decypharr/pkg/debrid/store" "github.com/sirrobot01/decypharr/pkg/debrid/types" - "strings" "sync" ) +type Debrid struct { + cache *store.Cache // Could be nil if not using WebDAV + client types.Client // HTTP client for making requests to the debrid service +} + +func (de *Debrid) Client() types.Client { + return de.client +} + +func (de *Debrid) Cache() *store.Cache { + return de.cache +} + type Storage struct { - clients map[string]types.Client - clientsLock sync.Mutex - caches map[string]*store.Cache - cachesLock sync.Mutex - LastUsed string + debrids map[string]*Debrid + mu sync.RWMutex + lastUsed string } func NewStorage() *Storage { cfg := config.Get() - clients := make(map[string]types.Client) _logger := logger.Default() - caches := make(map[string]*store.Cache) + debrids := make(map[string]*Debrid) for _, dc := range cfg.Debrids { client, err := createDebridClient(dc) @@ -39,89 +49,100 @@ func NewStorage() *Storage { _logger.Error().Err(err).Str("Debrid", dc.Name).Msg("failed to connect to debrid client") continue } - _log := client.GetLogger() + var cache *store.Cache + _log := client.Logger() if dc.UseWebDav { - caches[dc.Name] = store.NewDebridCache(dc, client) + cache = store.NewDebridCache(dc, client) _log.Info().Msg("Debrid Service started with WebDAV") } else { _log.Info().Msg("Debrid Service started") } - clients[dc.Name] = client + debrids[dc.Name] = &Debrid{ + cache: cache, + client: client, + } } d := &Storage{ - clients: clients, - LastUsed: "", - caches: caches, + debrids: debrids, + lastUsed: "", } return d } -func (d *Storage) GetClient(name string) types.Client { - d.clientsLock.Lock() - defer d.clientsLock.Unlock() - client, exists := d.clients[name] - if !exists { - return nil +func (d *Storage) Debrid(name string) *Debrid { + d.mu.RLock() + defer d.mu.RUnlock() + if debrid, exists := d.debrids[name]; exists { + return debrid } - return client + return nil +} + +func (d *Storage) Debrids() map[string]*Debrid { + d.mu.RLock() + defer d.mu.RUnlock() + debridsCopy := make(map[string]*Debrid) + for name, debrid := range d.debrids { + if debrid != nil { + debridsCopy[name] = debrid + } + } + return debridsCopy +} + +func (d *Storage) Client(name string) types.Client { + d.mu.RLock() + defer d.mu.RUnlock() + if client, exists := d.debrids[name]; exists { + return client.client + } + return nil } func (d *Storage) Reset() { - d.clientsLock.Lock() - d.clients = make(map[string]types.Client) - d.clientsLock.Unlock() - - d.cachesLock.Lock() - d.caches = make(map[string]*store.Cache) - d.cachesLock.Unlock() - d.LastUsed = "" + d.mu.Lock() + d.debrids = make(map[string]*Debrid) + d.mu.Unlock() + d.lastUsed = "" } -func (d *Storage) GetClients() map[string]types.Client { - d.clientsLock.Lock() - defer d.clientsLock.Unlock() +func (d *Storage) Clients() map[string]types.Client { + d.mu.RLock() + defer d.mu.RUnlock() clientsCopy := make(map[string]types.Client) - for name, client := range d.clients { - clientsCopy[name] = client + for name, debrid := range d.debrids { + if debrid != nil && debrid.client != nil { + clientsCopy[name] = debrid.client + } } return clientsCopy } -func (d *Storage) GetCaches() map[string]*store.Cache { - d.clientsLock.Lock() - defer d.clientsLock.Unlock() +func (d *Storage) Caches() map[string]*store.Cache { + d.mu.RLock() + defer d.mu.RUnlock() cachesCopy := make(map[string]*store.Cache) - for name, cache := range d.caches { - cachesCopy[name] = cache + for name, debrid := range d.debrids { + if debrid != nil && debrid.cache != nil { + cachesCopy[name] = debrid.cache + } } return cachesCopy } func (d *Storage) FilterClients(filter func(types.Client) bool) map[string]types.Client { - d.clientsLock.Lock() - defer d.clientsLock.Unlock() + d.mu.Lock() + defer d.mu.Unlock() filteredClients := make(map[string]types.Client) - for name, client := range d.clients { - if filter(client) { - filteredClients[name] = client + for name, client := range d.debrids { + if client != nil && filter(client.client) { + filteredClients[name] = client.client } } return filteredClients } -func (d *Storage) FilterCaches(filter func(*store.Cache) bool) map[string]*store.Cache { - d.cachesLock.Lock() - defer d.cachesLock.Unlock() - filteredCaches := make(map[string]*store.Cache) - for name, cache := range d.caches { - if filter(cache) { - filteredCaches[name] = cache - } - } - return filteredCaches -} - func createDebridClient(dc config.Debrid) (types.Client, error) { switch dc.Name { case "realdebrid": @@ -137,7 +158,7 @@ func createDebridClient(dc config.Debrid) (types.Client, error) { } } -func ProcessTorrent(ctx context.Context, store *Storage, selectedDebrid string, magnet *utils.Magnet, a *arr.Arr, isSymlink, overrideDownloadUncached bool) (*types.Torrent, error) { +func Process(ctx context.Context, store *Storage, selectedDebrid string, magnet *utils.Magnet, a *arr.Arr, isSymlink, overrideDownloadUncached bool) (*types.Torrent, error) { debridTorrent := &types.Torrent{ InfoHash: magnet.InfoHash, @@ -149,7 +170,7 @@ func ProcessTorrent(ctx context.Context, store *Storage, selectedDebrid string, } clients := store.FilterClients(func(c types.Client) bool { - if selectedDebrid != "" && c.GetName() != selectedDebrid { + if selectedDebrid != "" && c.Name() != selectedDebrid { return false } return true @@ -173,9 +194,9 @@ func ProcessTorrent(ctx context.Context, store *Storage, selectedDebrid string, } for index, db := range clients { - _logger := db.GetLogger() + _logger := db.Logger() _logger.Info(). - Str("Debrid", db.GetName()). + Str("Debrid", db.Name()). Str("Arr", a.Name). Str("Hash", debridTorrent.InfoHash). Str("Name", debridTorrent.Name). @@ -191,8 +212,8 @@ func ProcessTorrent(ctx context.Context, store *Storage, selectedDebrid string, continue } dbt.Arr = a - _logger.Info().Str("id", dbt.Id).Msgf("Torrent: %s submitted to %s", dbt.Name, db.GetName()) - store.LastUsed = index + _logger.Info().Str("id", dbt.Id).Msgf("Torrent: %s submitted to %s", dbt.Name, db.Name()) + store.lastUsed = index torrent, err := db.CheckStatus(dbt, isSymlink) if err != nil && torrent != nil && torrent.Id != "" { @@ -201,18 +222,19 @@ func ProcessTorrent(ctx context.Context, store *Storage, selectedDebrid string, _ = db.DeleteTorrent(id) }(torrent.Id) } - return torrent, err + if err != nil { + errs = append(errs, err) + continue + } + if torrent == nil { + errs = append(errs, fmt.Errorf("torrent %s returned nil after checking status", dbt.Name)) + continue + } + return torrent, nil } if len(errs) == 0 { return nil, fmt.Errorf("failed to process torrent: no clients available") } - if len(errs) == 1 { - return nil, fmt.Errorf("failed to process torrent: %w", errs[0]) - } else { - errStrings := make([]string, 0, len(errs)) - for _, err := range errs { - errStrings = append(errStrings, err.Error()) - } - return nil, fmt.Errorf("failed to process torrent: %s", strings.Join(errStrings, ", ")) - } + joinedErrors := errors.Join(errs...) + return nil, fmt.Errorf("failed to process torrent: %w", joinedErrors) } diff --git a/pkg/debrid/providers/alldebrid/alldebrid.go b/pkg/debrid/providers/alldebrid/alldebrid.go index e870d68..a8abaa6 100644 --- a/pkg/debrid/providers/alldebrid/alldebrid.go +++ b/pkg/debrid/providers/alldebrid/alldebrid.go @@ -18,17 +18,18 @@ import ( ) type AllDebrid struct { - Name string + name string Host string `json:"host"` APIKey string accounts map[string]types.Account DownloadUncached bool client *request.Client - MountPath string - logger zerolog.Logger - checkCached bool - addSamples bool + MountPath string + logger zerolog.Logger + checkCached bool + addSamples bool + minimumFreeSlot int } func (ad *AllDebrid) GetProfile() (*types.Profile, error) { @@ -59,7 +60,7 @@ func New(dc config.Debrid) (*AllDebrid, error) { } } return &AllDebrid{ - Name: "alldebrid", + name: "alldebrid", Host: "http://api.alldebrid.com/v4.1", APIKey: dc.APIKey, accounts: accounts, @@ -69,14 +70,15 @@ func New(dc config.Debrid) (*AllDebrid, error) { logger: logger.New(dc.Name), checkCached: dc.CheckCached, addSamples: dc.AddSamples, + minimumFreeSlot: dc.MinimumFreeSlot, }, nil } -func (ad *AllDebrid) GetName() string { - return ad.Name +func (ad *AllDebrid) Name() string { + return ad.name } -func (ad *AllDebrid) GetLogger() zerolog.Logger { +func (ad *AllDebrid) Logger() zerolog.Logger { return ad.logger } @@ -204,7 +206,7 @@ func (ad *AllDebrid) GetTorrent(torrentId string) (*types.Torrent, error) { OriginalFilename: name, Files: make(map[string]types.File), InfoHash: data.Hash, - Debrid: ad.Name, + Debrid: ad.name, MountPath: ad.MountPath, Added: time.Unix(data.CompletionDate, 0).Format(time.RFC3339), } @@ -244,7 +246,7 @@ func (ad *AllDebrid) UpdateTorrent(t *types.Torrent) error { t.OriginalFilename = name t.Folder = name t.MountPath = ad.MountPath - t.Debrid = ad.Name + t.Debrid = ad.name t.Bytes = data.Size t.Seeders = data.Seeders t.Added = time.Unix(data.CompletionDate, 0).Format(time.RFC3339) @@ -406,7 +408,7 @@ func (ad *AllDebrid) GetTorrents() ([]*types.Torrent, error) { OriginalFilename: magnet.Filename, Files: make(map[string]types.File), InfoHash: magnet.Hash, - Debrid: ad.Name, + Debrid: ad.name, MountPath: ad.MountPath, Added: time.Unix(magnet.CompletionDate, 0).Format(time.RFC3339), }) @@ -444,3 +446,9 @@ func (ad *AllDebrid) ResetActiveDownloadKeys() { func (ad *AllDebrid) DeleteDownloadLink(linkId string) error { return nil } + +func (ad *AllDebrid) GetAvailableSlots() (int, error) { + // This function is a placeholder for AllDebrid + //TODO: Implement the logic to check available slots for AllDebrid + return 0, fmt.Errorf("GetAvailableSlots not implemented for AllDebrid") +} diff --git a/pkg/debrid/providers/debrid_link/debrid_link.go b/pkg/debrid/providers/debrid_link/debrid_link.go index e15a35c..ffa3311 100644 --- a/pkg/debrid/providers/debrid_link/debrid_link.go +++ b/pkg/debrid/providers/debrid_link/debrid_link.go @@ -18,7 +18,7 @@ import ( ) type DebridLink struct { - Name string + name string Host string `json:"host"` APIKey string accounts map[string]types.Account @@ -56,7 +56,7 @@ func New(dc config.Debrid) (*DebridLink, error) { } } return &DebridLink{ - Name: "debridlink", + name: "debridlink", Host: "https://debrid-link.com/api/v2", APIKey: dc.APIKey, accounts: accounts, @@ -73,11 +73,11 @@ func (dl *DebridLink) GetProfile() (*types.Profile, error) { return nil, nil } -func (dl *DebridLink) GetName() string { - return dl.Name +func (dl *DebridLink) Name() string { + return dl.name } -func (dl *DebridLink) GetLogger() zerolog.Logger { +func (dl *DebridLink) Logger() zerolog.Logger { return dl.logger } @@ -163,7 +163,7 @@ func (dl *DebridLink) GetTorrent(torrentId string) (*types.Torrent, error) { Filename: name, OriginalFilename: name, MountPath: dl.MountPath, - Debrid: dl.Name, + Debrid: dl.name, Added: time.Unix(t.Created, 0).Format(time.RFC3339), } cfg := config.Get() @@ -288,7 +288,7 @@ func (dl *DebridLink) SubmitMagnet(t *types.Torrent) (*types.Torrent, error) { t.Filename = name t.OriginalFilename = name t.MountPath = dl.MountPath - t.Debrid = dl.Name + t.Debrid = dl.name t.Added = time.Unix(data.Created, 0).Format(time.RFC3339) for _, f := range data.Files { file := types.File{ @@ -428,7 +428,7 @@ func (dl *DebridLink) getTorrents(page, perPage int) ([]*types.Torrent, error) { OriginalFilename: t.Name, InfoHash: t.HashString, Files: make(map[string]types.File), - Debrid: dl.Name, + Debrid: dl.name, MountPath: dl.MountPath, Added: time.Unix(t.Created, 0).Format(time.RFC3339), } @@ -476,3 +476,8 @@ func (dl *DebridLink) ResetActiveDownloadKeys() { func (dl *DebridLink) DeleteDownloadLink(linkId string) error { return nil } + +func (dl *DebridLink) GetAvailableSlots() (int, error) { + //TODO: Implement the logic to check available slots for DebridLink + return 0, fmt.Errorf("GetAvailableSlots not implemented for DebridLink") +} diff --git a/pkg/debrid/providers/realdebrid/realdebrid.go b/pkg/debrid/providers/realdebrid/realdebrid.go index 261188d..297a075 100644 --- a/pkg/debrid/providers/realdebrid/realdebrid.go +++ b/pkg/debrid/providers/realdebrid/realdebrid.go @@ -25,7 +25,7 @@ import ( ) type RealDebrid struct { - Name string + name string Host string `json:"host"` APIKey string @@ -41,10 +41,12 @@ type RealDebrid struct { logger zerolog.Logger UnpackRar bool - rarSemaphore chan struct{} - checkCached bool - addSamples bool - Profile *types.Profile + rarSemaphore chan struct{} + checkCached bool + addSamples bool + Profile *types.Profile + minimumFreeSlot int // Minimum number of active pots to maintain (used for cached stuffs, etc.) + } func New(dc config.Debrid) (*RealDebrid, error) { @@ -71,7 +73,7 @@ func New(dc config.Debrid) (*RealDebrid, error) { } r := &RealDebrid{ - Name: "realdebrid", + name: "realdebrid", Host: "https://api.real-debrid.com/rest/1.0", APIKey: dc.APIKey, accounts: accounts, @@ -98,6 +100,7 @@ func New(dc config.Debrid) (*RealDebrid, error) { rarSemaphore: make(chan struct{}, 2), checkCached: dc.CheckCached, addSamples: dc.AddSamples, + minimumFreeSlot: dc.MinimumFreeSlot, } if _, err := r.GetProfile(); err != nil { @@ -107,11 +110,11 @@ func New(dc config.Debrid) (*RealDebrid, error) { } } -func (r *RealDebrid) GetName() string { - return r.Name +func (r *RealDebrid) Name() string { + return r.name } -func (r *RealDebrid) GetLogger() zerolog.Logger { +func (r *RealDebrid) Logger() zerolog.Logger { return r.logger } @@ -337,15 +340,30 @@ func (r *RealDebrid) addTorrent(t *types.Torrent) (*types.Torrent, error) { return nil, err } req.Header.Add("Content-Type", "application/x-bittorrent") - resp, err := r.client.MakeRequest(req) + resp, err := r.client.Do(req) if err != nil { return nil, err } - if err = json.Unmarshal(resp, &data); err != nil { + if resp.StatusCode != http.StatusOK && resp.StatusCode != http.StatusCreated { + // Handle multiple_downloads + + if resp.StatusCode == 509 { + return nil, utils.TooManyActiveDownloadsError + } + + bodyBytes, _ := io.ReadAll(resp.Body) + return nil, fmt.Errorf("realdebrid API error: Status: %d || Body: %s", resp.StatusCode, string(bodyBytes)) + } + defer resp.Body.Close() + bodyBytes, err := io.ReadAll(resp.Body) + if err != nil { + return nil, fmt.Errorf("reading response body: %w", err) + } + if err = json.Unmarshal(bodyBytes, &data); err != nil { return nil, err } t.Id = data.Id - t.Debrid = r.Name + t.Debrid = r.name t.MountPath = r.MountPath return t, nil } @@ -357,15 +375,30 @@ func (r *RealDebrid) addMagnet(t *types.Torrent) (*types.Torrent, error) { } var data AddMagnetSchema req, _ := http.NewRequest(http.MethodPost, url, strings.NewReader(payload.Encode())) - resp, err := r.client.MakeRequest(req) + resp, err := r.client.Do(req) if err != nil { return nil, err } - if err = json.Unmarshal(resp, &data); err != nil { + if resp.StatusCode != http.StatusOK && resp.StatusCode != http.StatusCreated { + // Handle multiple_downloads + + if resp.StatusCode == 509 { + return nil, utils.TooManyActiveDownloadsError + } + + bodyBytes, _ := io.ReadAll(resp.Body) + return nil, fmt.Errorf("realdebrid API error: Status: %d || Body: %s", resp.StatusCode, string(bodyBytes)) + } + defer resp.Body.Close() + bodyBytes, err := io.ReadAll(resp.Body) + if err != nil { + return nil, fmt.Errorf("reading response body: %w", err) + } + if err = json.Unmarshal(bodyBytes, &data); err != nil { return nil, err } t.Id = data.Id - t.Debrid = r.Name + t.Debrid = r.name t.MountPath = r.MountPath return t, nil } @@ -384,7 +417,7 @@ func (r *RealDebrid) GetTorrent(torrentId string) (*types.Torrent, error) { } if resp.StatusCode != http.StatusOK { if resp.StatusCode == http.StatusNotFound { - return nil, request.TorrentNotFoundError + return nil, utils.TorrentNotFoundError } return nil, fmt.Errorf("realdebrid API error: Status: %d || Body: %s", resp.StatusCode, string(bodyBytes)) } @@ -406,7 +439,7 @@ func (r *RealDebrid) GetTorrent(torrentId string) (*types.Torrent, error) { Filename: data.Filename, OriginalFilename: data.OriginalFilename, Links: data.Links, - Debrid: r.Name, + Debrid: r.name, MountPath: r.MountPath, } t.Files = r.getTorrentFiles(t, data) // Get selected files @@ -427,7 +460,7 @@ func (r *RealDebrid) UpdateTorrent(t *types.Torrent) error { } if resp.StatusCode != http.StatusOK { if resp.StatusCode == http.StatusNotFound { - return request.TorrentNotFoundError + return utils.TorrentNotFoundError } return fmt.Errorf("realdebrid API error: Status: %d || Body: %s", resp.StatusCode, string(bodyBytes)) } @@ -447,7 +480,7 @@ func (r *RealDebrid) UpdateTorrent(t *types.Torrent) error { t.OriginalFilename = data.OriginalFilename t.Links = data.Links t.MountPath = r.MountPath - t.Debrid = r.Name + t.Debrid = r.name t.Added = data.Added t.Files, _ = r.getSelectedFiles(t, data) // Get selected files @@ -478,7 +511,7 @@ func (r *RealDebrid) CheckStatus(t *types.Torrent, isSymlink bool) (*types.Torre t.Seeders = data.Seeders t.Links = data.Links t.Status = status - t.Debrid = r.Name + t.Debrid = r.name t.MountPath = r.MountPath if status == "waiting_files_selection" { t.Files = r.getTorrentFiles(t, data) @@ -499,6 +532,9 @@ func (r *RealDebrid) CheckStatus(t *types.Torrent, isSymlink bool) (*types.Torre return t, err } if res.StatusCode != http.StatusNoContent { + if res.StatusCode == 509 { + return nil, utils.TooManyActiveDownloadsError + } return t, fmt.Errorf("realdebrid API error: Status: %d", res.StatusCode) } } else if status == "downloaded" { @@ -593,7 +629,7 @@ func (r *RealDebrid) CheckLink(link string) error { return err } if resp.StatusCode == http.StatusNotFound { - return request.HosterUnavailableError // File has been removed + return utils.HosterUnavailableError // File has been removed } return nil } @@ -622,17 +658,17 @@ func (r *RealDebrid) _getDownloadLink(file *types.File) (*types.DownloadLink, er } switch data.ErrorCode { case 19: - return nil, request.HosterUnavailableError // File has been removed + return nil, utils.HosterUnavailableError // File has been removed case 23: - return nil, request.TrafficExceededError + return nil, utils.TrafficExceededError case 24: - return nil, request.HosterUnavailableError // Link has been nerfed + return nil, utils.HosterUnavailableError // Link has been nerfed case 34: - return nil, request.TrafficExceededError // traffic exceeded + return nil, utils.TrafficExceededError // traffic exceeded case 35: - return nil, request.HosterUnavailableError + return nil, utils.HosterUnavailableError case 36: - return nil, request.TrafficExceededError // traffic exceeded + return nil, utils.TrafficExceededError // traffic exceeded default: return nil, fmt.Errorf("realdebrid API error: Status: %d || Code: %d", resp.StatusCode, data.ErrorCode) } @@ -674,7 +710,7 @@ func (r *RealDebrid) GetDownloadLink(t *types.Torrent, file *types.File) (*types downloadLink, err := r._getDownloadLink(file) retries := 0 if err != nil { - if errors.Is(err, request.TrafficExceededError) { + if errors.Is(err, utils.TrafficExceededError) { // Retries generating retries = 5 } else { @@ -688,7 +724,7 @@ func (r *RealDebrid) GetDownloadLink(t *types.Torrent, file *types.File) (*types if err == nil { return downloadLink, nil } - if !errors.Is(err, request.TrafficExceededError) { + if !errors.Is(err, utils.TrafficExceededError) { return nil, err } // Add a delay before retrying @@ -750,7 +786,7 @@ func (r *RealDebrid) getTorrents(offset int, limit int) (int, []*types.Torrent, Links: t.Links, Files: make(map[string]types.File), InfoHash: t.Hash, - Debrid: r.Name, + Debrid: r.name, MountPath: r.MountPath, Added: t.Added.Format(time.RFC3339), }) @@ -941,3 +977,17 @@ func (r *RealDebrid) GetProfile() (*types.Profile, error) { } return profile, nil } + +func (r *RealDebrid) GetAvailableSlots() (int, error) { + url := fmt.Sprintf("%s/torrents/activeCount", r.Host) + req, _ := http.NewRequest(http.MethodGet, url, nil) + resp, err := r.client.MakeRequest(req) + if err != nil { + return 0, nil + } + var data AvailableSlotsResponse + if json.Unmarshal(resp, &data) != nil { + return 0, fmt.Errorf("error unmarshalling available slots response: %w", err) + } + return data.TotalSlots - data.ActiveSlots - r.minimumFreeSlot, nil // Ensure we maintain minimum active pots +} diff --git a/pkg/debrid/providers/realdebrid/types.go b/pkg/debrid/providers/realdebrid/types.go index 5195e60..f276e7d 100644 --- a/pkg/debrid/providers/realdebrid/types.go +++ b/pkg/debrid/providers/realdebrid/types.go @@ -151,3 +151,8 @@ type profileResponse struct { Premium int `json:"premium"` Expiration time.Time `json:"expiration"` } + +type AvailableSlotsResponse struct { + ActiveSlots int `json:"nb"` + TotalSlots int `json:"limit"` +} diff --git a/pkg/debrid/providers/torbox/torbox.go b/pkg/debrid/providers/torbox/torbox.go index 7f22280..337d598 100644 --- a/pkg/debrid/providers/torbox/torbox.go +++ b/pkg/debrid/providers/torbox/torbox.go @@ -24,7 +24,7 @@ import ( ) type Torbox struct { - Name string + name string Host string `json:"host"` APIKey string accounts map[string]types.Account @@ -67,7 +67,7 @@ func New(dc config.Debrid) (*Torbox, error) { } return &Torbox{ - Name: "torbox", + name: "torbox", Host: "https://api.torbox.app/v1", APIKey: dc.APIKey, accounts: accounts, @@ -80,11 +80,11 @@ func New(dc config.Debrid) (*Torbox, error) { }, nil } -func (tb *Torbox) GetName() string { - return tb.Name +func (tb *Torbox) Name() string { + return tb.name } -func (tb *Torbox) GetLogger() zerolog.Logger { +func (tb *Torbox) Logger() zerolog.Logger { return tb.logger } @@ -166,7 +166,7 @@ func (tb *Torbox) SubmitMagnet(torrent *types.Torrent) (*types.Torrent, error) { torrentId := strconv.Itoa(dt.Id) torrent.Id = torrentId torrent.MountPath = tb.MountPath - torrent.Debrid = tb.Name + torrent.Debrid = tb.name return torrent, nil } @@ -215,7 +215,7 @@ func (tb *Torbox) GetTorrent(torrentId string) (*types.Torrent, error) { Filename: data.Name, OriginalFilename: data.Name, MountPath: tb.MountPath, - Debrid: tb.Name, + Debrid: tb.name, Files: make(map[string]types.File), Added: data.CreatedAt.Format(time.RFC3339), } @@ -250,7 +250,7 @@ func (tb *Torbox) GetTorrent(torrentId string) (*types.Torrent, error) { } t.OriginalFilename = strings.Split(cleanPath, "/")[0] - t.Debrid = tb.Name + t.Debrid = tb.name return t, nil } @@ -279,7 +279,7 @@ func (tb *Torbox) UpdateTorrent(t *types.Torrent) error { t.Filename = name t.OriginalFilename = name t.MountPath = tb.MountPath - t.Debrid = tb.Name + t.Debrid = tb.name cfg := config.Get() for _, f := range data.Files { fileName := filepath.Base(f.Name) @@ -311,7 +311,7 @@ func (tb *Torbox) UpdateTorrent(t *types.Torrent) error { } t.OriginalFilename = strings.Split(cleanPath, "/")[0] - t.Debrid = tb.Name + t.Debrid = tb.name return nil } @@ -470,3 +470,8 @@ func (tb *Torbox) ResetActiveDownloadKeys() { func (tb *Torbox) DeleteDownloadLink(linkId string) error { return nil } + +func (tb *Torbox) GetAvailableSlots() (int, error) { + //TODO: Implement the logic to check available slots for Torbox + return 0, fmt.Errorf("not implemented") +} diff --git a/pkg/debrid/store/cache.go b/pkg/debrid/store/cache.go index 0412645..5edb3f5 100644 --- a/pkg/debrid/store/cache.go +++ b/pkg/debrid/store/cache.go @@ -143,7 +143,7 @@ func NewDebridCache(dc config.Debrid, client types.Client) *Cache { customFolders = append(customFolders, name) } - _log := logger.New(fmt.Sprintf("%s-webdav", client.GetName())) + _log := logger.New(fmt.Sprintf("%s-webdav", client.Name())) c := &Cache{ dir: filepath.Join(cfg.Path, "cache", dc.Name), // path to save cache files @@ -248,7 +248,7 @@ func (c *Cache) Start(ctx context.Context) error { go c.repairWorker(ctx) cfg := config.Get() - name := c.client.GetName() + name := c.client.Name() addr := cfg.BindAddress + ":" + cfg.Port + cfg.URLBase + "webdav/" + name + "/" c.logger.Info().Msgf("%s WebDav server running at %s", name, addr) @@ -379,7 +379,7 @@ func (c *Cache) Sync(ctx context.Context) error { totalTorrents := len(torrents) - c.logger.Info().Msgf("%d torrents found from %s", totalTorrents, c.client.GetName()) + c.logger.Info().Msgf("%d torrents found from %s", totalTorrents, c.client.Name()) newTorrents := make([]*types.Torrent, 0) idStore := make(map[string]struct{}, totalTorrents) @@ -719,7 +719,7 @@ func (c *Cache) Add(t *types.Torrent) error { } -func (c *Cache) GetClient() types.Client { +func (c *Cache) Client() types.Client { return c.client } @@ -866,6 +866,6 @@ func (c *Cache) RemoveFile(torrentId string, filename string) error { return nil } -func (c *Cache) GetLogger() zerolog.Logger { +func (c *Cache) Logger() zerolog.Logger { return c.logger } diff --git a/pkg/debrid/store/download_link.go b/pkg/debrid/store/download_link.go index a404d27..951cb1d 100644 --- a/pkg/debrid/store/download_link.go +++ b/pkg/debrid/store/download_link.go @@ -3,12 +3,11 @@ package store import ( "errors" "fmt" + "github.com/sirrobot01/decypharr/internal/utils" "github.com/sirrobot01/decypharr/pkg/debrid/types" "sync" "time" - - "github.com/sirrobot01/decypharr/internal/request" ) type linkCache struct { @@ -146,7 +145,7 @@ func (c *Cache) fetchDownloadLink(torrentName, filename, fileLink string) (strin c.logger.Trace().Msgf("Getting download link for %s(%s)", filename, file.Link) downloadLink, err := c.client.GetDownloadLink(ct.Torrent, &file) if err != nil { - if errors.Is(err, request.HosterUnavailableError) { + if errors.Is(err, utils.HosterUnavailableError) { newCt, err := c.reInsertTorrent(ct) if err != nil { return "", fmt.Errorf("failed to reinsert torrent: %w", err) @@ -166,7 +165,7 @@ func (c *Cache) fetchDownloadLink(torrentName, filename, fileLink string) (strin } c.updateDownloadLink(downloadLink) return "", nil - } else if errors.Is(err, request.TrafficExceededError) { + } else if errors.Is(err, utils.TrafficExceededError) { // This is likely a fair usage limit error return "", err } else { diff --git a/pkg/debrid/store/misc.go b/pkg/debrid/store/misc.go index d0c089b..7908187 100644 --- a/pkg/debrid/store/misc.go +++ b/pkg/debrid/store/misc.go @@ -28,7 +28,7 @@ func mergeFiles(torrents ...CachedTorrent) map[string]types.File { func (c *Cache) GetIngests() ([]types.IngestData, error) { torrents := c.GetTorrents() - debridName := c.client.GetName() + debridName := c.client.Name() var ingests []types.IngestData for _, torrent := range torrents { ingests = append(ingests, types.IngestData{ diff --git a/pkg/debrid/store/repair.go b/pkg/debrid/store/repair.go index 9234995..8fbdb04 100644 --- a/pkg/debrid/store/repair.go +++ b/pkg/debrid/store/repair.go @@ -4,7 +4,6 @@ import ( "context" "errors" "fmt" - "github.com/sirrobot01/decypharr/internal/request" "github.com/sirrobot01/decypharr/internal/utils" "github.com/sirrobot01/decypharr/pkg/debrid/types" "sync" @@ -98,7 +97,7 @@ func (c *Cache) GetBrokenFiles(t *CachedTorrent, filenames []string) []string { } else { // Check if file.Link not in the downloadLink Cache if err := c.client.CheckLink(f.Link); err != nil { - if errors.Is(err, request.HosterUnavailableError) { + if errors.Is(err, utils.HosterUnavailableError) { brokenFiles = append(brokenFiles, f.Name) } } diff --git a/pkg/debrid/types/client.go b/pkg/debrid/types/client.go index f9d967b..61b4f2b 100644 --- a/pkg/debrid/types/client.go +++ b/pkg/debrid/types/client.go @@ -16,8 +16,8 @@ type Client interface { UpdateTorrent(torrent *Torrent) error GetTorrent(torrentId string) (*Torrent, error) GetTorrents() ([]*Torrent, error) - GetName() string - GetLogger() zerolog.Logger + Name() string + Logger() zerolog.Logger GetDownloadingStatus() []string GetDownloads() (map[string]DownloadLink, error) CheckLink(link string) error @@ -26,4 +26,5 @@ type Client interface { ResetActiveDownloadKeys() DeleteDownloadLink(linkId string) error GetProfile() (*Profile, error) + GetAvailableSlots() (int, error) } diff --git a/pkg/qbit/context.go b/pkg/qbit/context.go index 893cc76..e6b941a 100644 --- a/pkg/qbit/context.go +++ b/pkg/qbit/context.go @@ -82,7 +82,7 @@ func (q *QBit) authContext(next http.Handler) http.Handler { return http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) { host, token, err := decodeAuthHeader(r.Header.Get("Authorization")) category := getCategory(r.Context()) - arrs := store.GetStore().GetArr() + arrs := store.Get().Arr() // Check if arr exists a := arrs.Get(category) if a == nil { diff --git a/pkg/qbit/qbit.go b/pkg/qbit/qbit.go index 04c92a4..77a4334 100644 --- a/pkg/qbit/qbit.go +++ b/pkg/qbit/qbit.go @@ -25,7 +25,7 @@ func New() *QBit { Password: cfg.Password, DownloadFolder: cfg.DownloadFolder, Categories: cfg.Categories, - storage: store.GetStore().GetTorrentStorage(), + storage: store.Get().Torrents(), logger: logger.New("qbit"), } } diff --git a/pkg/qbit/torrent.go b/pkg/qbit/torrent.go index 482a299..c3f37de 100644 --- a/pkg/qbit/torrent.go +++ b/pkg/qbit/torrent.go @@ -18,7 +18,7 @@ func (q *QBit) addMagnet(ctx context.Context, url string, arr *arr.Arr, debrid s if err != nil { return fmt.Errorf("error parsing magnet link: %w", err) } - _store := store.GetStore() + _store := store.Get() importReq := store.NewImportRequest(debrid, q.DownloadFolder, magnet, arr, isSymlink, false, "", store.ImportTypeQBitTorrent) @@ -37,7 +37,7 @@ func (q *QBit) addTorrent(ctx context.Context, fileHeader *multipart.FileHeader, if err != nil { return fmt.Errorf("error reading file: %s \n %w", fileHeader.Filename, err) } - _store := store.GetStore() + _store := store.Get() importReq := store.NewImportRequest(debrid, q.DownloadFolder, magnet, arr, isSymlink, false, "", store.ImportTypeQBitTorrent) err = _store.AddTorrent(ctx, importReq) if err != nil { diff --git a/pkg/repair/misc.go b/pkg/repair/misc.go index f2d2b64..d946a98 100644 --- a/pkg/repair/misc.go +++ b/pkg/repair/misc.go @@ -159,7 +159,7 @@ func (r *Repair) findDebridForPath(dir string, clients map[string]types.Client) } if filepath.Clean(mountPath) == filepath.Clean(dir) { - debridName := client.GetName() + debridName := client.Name() // Cache the result r.cacheMutex.Lock() diff --git a/pkg/repair/repair.go b/pkg/repair/repair.go index 453fb29..9a6daf8 100644 --- a/pkg/repair/repair.go +++ b/pkg/repair/repair.go @@ -228,7 +228,7 @@ func (r *Repair) newJob(arrsNames []string, mediaIDs []string) *Job { func (r *Repair) preRunChecks() error { if r.useWebdav { - caches := r.deb.GetCaches() + caches := r.deb.Caches() if len(caches) == 0 { return fmt.Errorf("no caches found") } @@ -639,13 +639,13 @@ func (r *Repair) getZurgBrokenFiles(job *Job, media arr.Content) []arr.ContentFi func (r *Repair) getWebdavBrokenFiles(job *Job, media arr.Content) []arr.ContentFile { // Use internal webdav setup to check file availability - caches := r.deb.GetCaches() + caches := r.deb.Caches() if len(caches) == 0 { r.logger.Info().Msg("No caches found. Can't use webdav") return nil } - clients := r.deb.GetClients() + clients := r.deb.Clients() if len(clients) == 0 { r.logger.Info().Msg("No clients found. Can't use webdav") return nil diff --git a/pkg/server/debug.go b/pkg/server/debug.go index 2b20b9a..4204e5a 100644 --- a/pkg/server/debug.go +++ b/pkg/server/debug.go @@ -12,13 +12,13 @@ import ( func (s *Server) handleIngests(w http.ResponseWriter, r *http.Request) { ingests := make([]debridTypes.IngestData, 0) - _store := store.GetStore() - debrids := _store.GetDebrid() + _store := store.Get() + debrids := _store.Debrid() if debrids == nil { http.Error(w, "Debrid service is not enabled", http.StatusInternalServerError) return } - for _, cache := range debrids.GetCaches() { + for _, cache := range debrids.Caches() { if cache == nil { s.logger.Error().Msg("Debrid cache is nil, skipping") continue @@ -42,15 +42,15 @@ func (s *Server) handleIngestsByDebrid(w http.ResponseWriter, r *http.Request) { return } - _store := store.GetStore() - debrids := _store.GetDebrid() + _store := store.Get() + debrids := _store.Debrid() if debrids == nil { http.Error(w, "Debrid service is not enabled", http.StatusInternalServerError) return } - caches := debrids.GetCaches() + caches := debrids.Caches() cache, exists := caches[debridName] if !exists { @@ -92,13 +92,13 @@ func (s *Server) handleStats(w http.ResponseWriter, r *http.Request) { "go_version": runtime.Version(), } - debrids := store.GetStore().GetDebrid() + debrids := store.Get().Debrid() if debrids == nil { request.JSONResponse(w, stats, http.StatusOK) return } - clients := debrids.GetClients() - caches := debrids.GetCaches() + clients := debrids.Clients() + caches := debrids.Caches() profiles := make([]*debridTypes.Profile, 0) for debridName, client := range clients { profile, err := client.GetProfile() diff --git a/pkg/server/webhook.go b/pkg/server/webhook.go index bc81ccb..fde99e9 100644 --- a/pkg/server/webhook.go +++ b/pkg/server/webhook.go @@ -38,7 +38,7 @@ func (s *Server) handleTautulli(w http.ResponseWriter, r *http.Request) { http.Error(w, "Invalid ID", http.StatusBadRequest) return } - repair := store.GetStore().GetRepair() + repair := store.Get().Repair() mediaId := cmp.Or(payload.TmdbID, payload.TvdbID) diff --git a/pkg/store/downloader.go b/pkg/store/downloader.go index 29692f3..dc2244c 100644 --- a/pkg/store/downloader.go +++ b/pkg/store/downloader.go @@ -13,7 +13,7 @@ import ( "github.com/sirrobot01/decypharr/internal/utils" ) -func Download(client *grab.Client, url, filename string, byterange *[2]int64, progressCallback func(int64, int64)) error { +func grabber(client *grab.Client, url, filename string, byterange *[2]int64, progressCallback func(int64, int64)) error { req, err := grab.NewRequest(filename, url) if err != nil { return err @@ -56,7 +56,7 @@ Loop: return resp.Err() } -func (s *Store) ProcessManualFile(torrent *Torrent) (string, error) { +func (s *Store) processDownload(torrent *Torrent) (string, error) { debridTorrent := torrent.DebridTorrent s.logger.Info().Msgf("Downloading %d files...", len(debridTorrent.Files)) torrentPath := filepath.Join(torrent.SavePath, utils.RemoveExtension(debridTorrent.OriginalFilename)) @@ -96,7 +96,7 @@ func (s *Store) downloadFiles(torrent *Torrent, parent string) { if totalSize > 0 { debridTorrent.Progress = float64(debridTorrent.SizeDownloaded) / float64(totalSize) * 100 } - s.UpdateTorrentMin(torrent, debridTorrent) + s.partialTorrentUpdate(torrent, debridTorrent) } client := &grab.Client{ UserAgent: "Decypharr[QBitTorrent]", @@ -119,7 +119,7 @@ func (s *Store) downloadFiles(torrent *Torrent, parent string) { defer func() { <-s.downloadSemaphore }() filename := file.Name - err := Download( + err := grabber( client, file.DownloadLink.DownloadLink, filepath.Join(parent, filename), @@ -151,7 +151,7 @@ func (s *Store) downloadFiles(torrent *Torrent, parent string) { s.logger.Info().Msgf("Downloaded all files for %s", debridTorrent.Name) } -func (s *Store) ProcessSymlink(torrent *Torrent) (string, error) { +func (s *Store) processSymlink(torrent *Torrent) (string, error) { debridTorrent := torrent.DebridTorrent files := debridTorrent.Files if len(files) == 0 { diff --git a/pkg/store/request.go b/pkg/store/request.go index 80439e0..86f0bca 100644 --- a/pkg/store/request.go +++ b/pkg/store/request.go @@ -2,13 +2,16 @@ package store import ( "bytes" + "context" "encoding/json" + "fmt" "github.com/sirrobot01/decypharr/internal/request" "github.com/sirrobot01/decypharr/internal/utils" "github.com/sirrobot01/decypharr/pkg/arr" debridTypes "github.com/sirrobot01/decypharr/pkg/debrid/types" "net/http" "net/url" + "sync" "time" ) @@ -19,23 +22,9 @@ const ( ImportTypeAPI ImportType = "api" ) -func NewImportRequest(debrid string, downloadFolder string, magnet *utils.Magnet, arr *arr.Arr, isSymlink, downloadUncached bool, callBackUrl string, importType ImportType) *ImportRequest { - return &ImportRequest{ - Status: "started", - DownloadFolder: downloadFolder, - Debrid: debrid, - Magnet: magnet, - Arr: arr, - IsSymlink: isSymlink, - DownloadUncached: downloadUncached, - CallBackUrl: callBackUrl, - Type: importType, - } -} - type ImportRequest struct { DownloadFolder string `json:"downloadFolder"` - Debrid string `json:"debrid"` + SelectedDebrid string `json:"debrid"` Magnet *utils.Magnet `json:"magnet"` Arr *arr.Arr `json:"arr"` IsSymlink bool `json:"isSymlink"` @@ -50,6 +39,20 @@ type ImportRequest struct { Async bool `json:"async"` } +func NewImportRequest(debrid string, downloadFolder string, magnet *utils.Magnet, arr *arr.Arr, isSymlink, downloadUncached bool, callBackUrl string, importType ImportType) *ImportRequest { + return &ImportRequest{ + Status: "started", + DownloadFolder: downloadFolder, + SelectedDebrid: debrid, + Magnet: magnet, + Arr: arr, + IsSymlink: isSymlink, + DownloadUncached: downloadUncached, + CallBackUrl: callBackUrl, + Type: importType, + } +} + type importResponse struct { Status string `json:"status"` CompletedAt time.Time `json:"completedAt"` @@ -101,3 +104,89 @@ func (i *ImportRequest) markAsCompleted(torrent *Torrent, debridTorrent *debridT i.CompletedAt = time.Now() i.sendCallback(torrent, debridTorrent) } + +type ImportQueue struct { + queue map[string]chan *ImportRequest // Map to hold queues for different debrid services + mu sync.RWMutex // Mutex to protect access to the queue map + ctx context.Context + cancel context.CancelFunc + capacity int // Capacity of each channel in the queue +} + +func NewImportQueue(ctx context.Context, capacity int) *ImportQueue { + ctx, cancel := context.WithCancel(ctx) + return &ImportQueue{ + queue: make(map[string]chan *ImportRequest), + ctx: ctx, + cancel: cancel, + capacity: capacity, + } +} + +func (iq *ImportQueue) Push(req *ImportRequest) error { + if req == nil { + return fmt.Errorf("import request cannot be nil") + } + + iq.mu.Lock() + defer iq.mu.Unlock() + + if _, exists := iq.queue[req.SelectedDebrid]; !exists { + iq.queue[req.SelectedDebrid] = make(chan *ImportRequest, iq.capacity) // Create a new channel for the debrid service + } + + select { + case iq.queue[req.SelectedDebrid] <- req: + return nil + case <-iq.ctx.Done(): + return fmt.Errorf("retry queue is shutting down") + } +} + +func (iq *ImportQueue) TryPop(selectedDebrid string) (*ImportRequest, error) { + iq.mu.RLock() + defer iq.mu.RUnlock() + + if ch, exists := iq.queue[selectedDebrid]; exists { + select { + case req := <-ch: + return req, nil + case <-iq.ctx.Done(): + return nil, fmt.Errorf("queue is shutting down") + default: + return nil, fmt.Errorf("no import request available for %s", selectedDebrid) + } + } + return nil, fmt.Errorf("no queue exists for %s", selectedDebrid) +} + +func (iq *ImportQueue) Size(selectedDebrid string) int { + iq.mu.RLock() + defer iq.mu.RUnlock() + + if ch, exists := iq.queue[selectedDebrid]; exists { + return len(ch) + } + return 0 +} + +func (iq *ImportQueue) Close() { + iq.cancel() + iq.mu.Lock() + defer iq.mu.Unlock() + + for _, ch := range iq.queue { + // Drain remaining items before closing + for { + select { + case <-ch: + // Discard remaining items + default: + close(ch) + goto nextChannel + } + } + nextChannel: + } + iq.queue = make(map[string]chan *ImportRequest) +} diff --git a/pkg/store/store.go b/pkg/store/store.go index d404152..4492ffc 100644 --- a/pkg/store/store.go +++ b/pkg/store/store.go @@ -2,6 +2,7 @@ package store import ( "cmp" + "context" "github.com/rs/zerolog" "github.com/sirrobot01/decypharr/internal/config" "github.com/sirrobot01/decypharr/internal/logger" @@ -16,6 +17,7 @@ type Store struct { repair *repair.Repair arr *arr.Storage debrid *debrid.Storage + importsQueue *ImportQueue // Queued import requests(probably from too_many_active_downloads) torrents *TorrentStorage logger zerolog.Logger refreshInterval time.Duration @@ -28,8 +30,8 @@ var ( once sync.Once ) -// GetStore returns the singleton instance -func GetStore() *Store { +// Get returns the singleton instance +func Get() *Store { once.Do(func() { arrs := arr.NewStorage() deb := debrid.NewStorage() @@ -45,6 +47,7 @@ func GetStore() *Store { refreshInterval: time.Duration(cmp.Or(qbitCfg.RefreshInterval, 10)) * time.Minute, skipPreCache: qbitCfg.SkipPreCache, downloadSemaphore: make(chan struct{}, cmp.Or(qbitCfg.MaxDownloads, 5)), + importsQueue: NewImportQueue(context.Background(), 1000), } }) return instance @@ -55,21 +58,26 @@ func Reset() { if instance.debrid != nil { instance.debrid.Reset() } + + if instance.importsQueue != nil { + instance.importsQueue.Close() + } + close(instance.downloadSemaphore) } once = sync.Once{} instance = nil } -func (s *Store) GetArr() *arr.Storage { +func (s *Store) Arr() *arr.Storage { return s.arr } -func (s *Store) GetDebrid() *debrid.Storage { +func (s *Store) Debrid() *debrid.Storage { return s.debrid } -func (s *Store) GetRepair() *repair.Repair { +func (s *Store) Repair() *repair.Repair { return s.repair } -func (s *Store) GetTorrentStorage() *TorrentStorage { +func (s *Store) Torrents() *TorrentStorage { return s.torrents } diff --git a/pkg/store/torrent.go b/pkg/store/torrent.go index 419c28b..22798b6 100644 --- a/pkg/store/torrent.go +++ b/pkg/store/torrent.go @@ -3,6 +3,7 @@ package store import ( "cmp" "context" + "errors" "fmt" "github.com/sirrobot01/decypharr/internal/request" "github.com/sirrobot01/decypharr/internal/utils" @@ -15,22 +16,125 @@ import ( func (s *Store) AddTorrent(ctx context.Context, importReq *ImportRequest) error { torrent := createTorrentFromMagnet(importReq) - debridTorrent, err := debridTypes.ProcessTorrent(ctx, s.debrid, importReq.Debrid, importReq.Magnet, importReq.Arr, importReq.IsSymlink, importReq.DownloadUncached) - if err != nil || debridTorrent == nil { - if err == nil { - err = fmt.Errorf("failed to process torrent") + debridTorrent, err := debridTypes.Process(ctx, s.debrid, importReq.SelectedDebrid, importReq.Magnet, importReq.Arr, importReq.IsSymlink, importReq.DownloadUncached) + + if err != nil { + var httpErr *utils.HTTPError + if ok := errors.As(err, &httpErr); ok { + switch httpErr.Code { + case "too_many_active_downloads": + // Handle too much active downloads error + s.logger.Warn().Msgf("Too many active downloads for %s, adding to queue", importReq.Magnet.Name) + err := s.addToQueue(importReq) + if err != nil { + s.logger.Error().Err(err).Msgf("Failed to add %s to queue", importReq.Magnet.Name) + return err + } + torrent.State = "queued" + default: + // Unhandled error, return it, caller logs it + return err + } + } else { + // Unhandled error, return it, caller logs it + return err } - // This error is returned immediately to the user(no need for callback) - return err } - torrent = s.UpdateTorrentMin(torrent, debridTorrent) + torrent = s.partialTorrentUpdate(torrent, debridTorrent) s.torrents.AddOrUpdate(torrent) go s.processFiles(torrent, debridTorrent, importReq) // We can send async for file processing not to delay the response return nil } +func (s *Store) addToQueue(importReq *ImportRequest) error { + if importReq.Magnet == nil { + return fmt.Errorf("magnet is required") + } + + if importReq.Arr == nil { + return fmt.Errorf("arr is required") + } + + importReq.Status = "queued" + importReq.CompletedAt = time.Time{} + importReq.Error = nil + err := s.importsQueue.Push(importReq) + if err != nil { + return err + } + return nil +} + +func (s *Store) processFromQueue(ctx context.Context, selectedDebrid string) error { + // Pop the next import request from the queue + importReq, err := s.importsQueue.TryPop(selectedDebrid) + if err != nil { + return err + } + if importReq == nil { + return nil + } + return s.AddTorrent(ctx, importReq) +} + +func (s *Store) StartQueueSchedule(ctx context.Context) error { + + s.trackAvailableSlots(ctx) // Initial tracking of available slots + + ticker := time.NewTicker(time.Minute) + + for { + select { + case <-ctx.Done(): + return nil + case <-ticker.C: + s.trackAvailableSlots(ctx) + } + } +} + +func (s *Store) trackAvailableSlots(ctx context.Context) { + // This function tracks the available slots for each debrid client + availableSlots := make(map[string]int) + + for name, deb := range s.debrid.Debrids() { + slots, err := deb.Client().GetAvailableSlots() + if err != nil { + continue + } + availableSlots[name] = slots + } + + for name, slots := range availableSlots { + if s.importsQueue.Size(name) <= 0 { + continue + } + s.logger.Debug().Msgf("Available slots for %s: %d", name, slots) + // If slots are available, process the next import request from the queue + for slots > 0 { + select { + case <-ctx.Done(): + return // Exit if context is done + default: + if err := s.processFromQueue(ctx, name); err != nil { + s.logger.Error().Err(err).Msg("Error processing from queue") + return // Exit on error + } + slots-- // Decrease the available slots after processing + } + } + } +} + func (s *Store) processFiles(torrent *Torrent, debridTorrent *types.Torrent, importReq *ImportRequest) { - client := s.debrid.GetClient(debridTorrent.Debrid) + + if debridTorrent == nil { + // Early return if debridTorrent is nil + return + } + + deb := s.debrid.Debrid(debridTorrent.Debrid) + client := deb.Client() downloadingStatuses := client.GetDownloadingStatus() _arr := importReq.Arr for debridTorrent.Status != "downloaded" { @@ -53,7 +157,7 @@ func (s *Store) processFiles(torrent *Torrent, debridTorrent *types.Torrent, imp } debridTorrent = dbT - torrent = s.UpdateTorrentMin(torrent, debridTorrent) + torrent = s.partialTorrentUpdate(torrent, debridTorrent) // Exit the loop for downloading statuses to prevent memory buildup if debridTorrent.Status == "downloaded" || !utils.Contains(downloadingStatuses, debridTorrent.Status) { @@ -71,9 +175,8 @@ func (s *Store) processFiles(torrent *Torrent, debridTorrent *types.Torrent, imp // Check if debrid supports webdav by checking cache timer := time.Now() if importReq.IsSymlink { - caches := s.debrid.GetCaches() - cache, useWebdav := caches[debridTorrent.Debrid] - if useWebdav { + cache := deb.Cache() + if cache != nil { s.logger.Info().Msgf("Using internal webdav for %s", debridTorrent.Debrid) // Use webdav to download the file @@ -91,10 +194,10 @@ func (s *Store) processFiles(torrent *Torrent, debridTorrent *types.Torrent, imp } else { // User is using either zurg or debrid webdav - torrentSymlinkPath, err = s.ProcessSymlink(torrent) // /mnt/symlinks/{category}/MyTVShow/ + torrentSymlinkPath, err = s.processSymlink(torrent) // /mnt/symlinks/{category}/MyTVShow/ } } else { - torrentSymlinkPath, err = s.ProcessManualFile(torrent) + torrentSymlinkPath, err = s.processDownload(torrent) } if err != nil { s.markTorrentAsFailed(torrent) @@ -106,7 +209,7 @@ func (s *Store) processFiles(torrent *Torrent, debridTorrent *types.Torrent, imp return } torrent.TorrentPath = torrentSymlinkPath - s.UpdateTorrent(torrent, debridTorrent) + s.updateTorrent(torrent, debridTorrent) s.logger.Info().Msgf("Adding %s took %s", debridTorrent.Name, time.Since(timer)) go importReq.markAsCompleted(torrent, debridTorrent) // Mark the import request as completed, send callback if needed @@ -129,7 +232,7 @@ func (s *Store) markTorrentAsFailed(t *Torrent) *Torrent { return t } -func (s *Store) UpdateTorrentMin(t *Torrent, debridTorrent *types.Torrent) *Torrent { +func (s *Store) partialTorrentUpdate(t *Torrent, debridTorrent *types.Torrent) *Torrent { if debridTorrent == nil { return t } @@ -170,17 +273,17 @@ func (s *Store) UpdateTorrentMin(t *Torrent, debridTorrent *types.Torrent) *Torr return t } -func (s *Store) UpdateTorrent(t *Torrent, debridTorrent *types.Torrent) *Torrent { +func (s *Store) updateTorrent(t *Torrent, debridTorrent *types.Torrent) *Torrent { if debridTorrent == nil { return t } - if debridClient := s.debrid.GetClients()[debridTorrent.Debrid]; debridClient != nil { + if debridClient := s.debrid.Clients()[debridTorrent.Debrid]; debridClient != nil { if debridTorrent.Status != "downloaded" { _ = debridClient.UpdateTorrent(debridTorrent) } } - t = s.UpdateTorrentMin(t, debridTorrent) + t = s.partialTorrentUpdate(t, debridTorrent) t.ContentPath = t.TorrentPath + string(os.PathSeparator) if t.IsReady() { @@ -200,7 +303,7 @@ func (s *Store) UpdateTorrent(t *Torrent, debridTorrent *types.Torrent) *Torrent s.torrents.Update(t) return t } - updatedT := s.UpdateTorrent(t, debridTorrent) + updatedT := s.updateTorrent(t, debridTorrent) t = updatedT case <-time.After(10 * time.Minute): // Add a timeout diff --git a/pkg/store/torrent_storage.go b/pkg/store/torrent_storage.go index c6f9b8f..e55dfab 100644 --- a/pkg/store/torrent_storage.go +++ b/pkg/store/torrent_storage.go @@ -184,7 +184,7 @@ func (ts *TorrentStorage) Delete(hash, category string, removeFromDebrid bool) { return } if removeFromDebrid && torrent.ID != "" && torrent.Debrid != "" { - dbClient := GetStore().debrid.GetClient(torrent.Debrid) + dbClient := Get().debrid.Client(torrent.Debrid) if dbClient != nil { _ = dbClient.DeleteTorrent(torrent.ID) } @@ -238,7 +238,7 @@ func (ts *TorrentStorage) DeleteMultiple(hashes []string, removeFromDebrid bool) } }() - clients := GetStore().debrid.GetClients() + clients := Get().debrid.Clients() go func() { for id, debrid := range toDelete { diff --git a/pkg/web/api.go b/pkg/web/api.go index a9f6aba..9c53e4c 100644 --- a/pkg/web/api.go +++ b/pkg/web/api.go @@ -17,8 +17,8 @@ import ( ) func (wb *Web) handleGetArrs(w http.ResponseWriter, r *http.Request) { - _store := store.GetStore() - request.JSONResponse(w, _store.GetArr().GetAll(), http.StatusOK) + _store := store.Get() + request.JSONResponse(w, _store.Arr().GetAll(), http.StatusOK) } func (wb *Web) handleAddContent(w http.ResponseWriter, r *http.Request) { @@ -27,7 +27,7 @@ func (wb *Web) handleAddContent(w http.ResponseWriter, r *http.Request) { http.Error(w, err.Error(), http.StatusBadRequest) return } - _store := store.GetStore() + _store := store.Get() results := make([]*store.ImportRequest, 0) errs := make([]string, 0) @@ -43,7 +43,7 @@ func (wb *Web) handleAddContent(w http.ResponseWriter, r *http.Request) { downloadUncached := r.FormValue("downloadUncached") == "true" - _arr := _store.GetArr().Get(arrName) + _arr := _store.Arr().Get(arrName) if _arr == nil { _arr = arr.New(arrName, "", "", false, false, &downloadUncached) } @@ -66,6 +66,7 @@ func (wb *Web) handleAddContent(w http.ResponseWriter, r *http.Request) { importReq := store.NewImportRequest(debridName, downloadFolder, magnet, _arr, !notSymlink, downloadUncached, callbackUrl, store.ImportTypeAPI) if err := _store.AddTorrent(ctx, importReq); err != nil { + wb.logger.Error().Err(err).Str("url", url).Msg("Failed to add torrent") errs = append(errs, fmt.Sprintf("URL %s: %v", url, err)) continue } @@ -91,6 +92,7 @@ func (wb *Web) handleAddContent(w http.ResponseWriter, r *http.Request) { importReq := store.NewImportRequest(debridName, downloadFolder, magnet, _arr, !notSymlink, downloadUncached, callbackUrl, store.ImportTypeAPI) err = _store.AddTorrent(ctx, importReq) if err != nil { + wb.logger.Error().Err(err).Str("file", fileHeader.Filename).Msg("Failed to add torrent") errs = append(errs, fmt.Sprintf("File %s: %v", fileHeader.Filename, err)) continue } @@ -114,12 +116,12 @@ func (wb *Web) handleRepairMedia(w http.ResponseWriter, r *http.Request) { return } - _store := store.GetStore() + _store := store.Get() var arrs []string if req.ArrName != "" { - _arr := _store.GetArr().Get(req.ArrName) + _arr := _store.Arr().Get(req.ArrName) if _arr == nil { http.Error(w, "No Arrs found to repair", http.StatusNotFound) return @@ -129,7 +131,7 @@ func (wb *Web) handleRepairMedia(w http.ResponseWriter, r *http.Request) { if req.Async { go func() { - if err := _store.GetRepair().AddJob(arrs, req.MediaIds, req.AutoProcess, false); err != nil { + if err := _store.Repair().AddJob(arrs, req.MediaIds, req.AutoProcess, false); err != nil { wb.logger.Error().Err(err).Msg("Failed to repair media") } }() @@ -137,7 +139,7 @@ func (wb *Web) handleRepairMedia(w http.ResponseWriter, r *http.Request) { return } - if err := _store.GetRepair().AddJob([]string{req.ArrName}, req.MediaIds, req.AutoProcess, false); err != nil { + if err := _store.Repair().AddJob([]string{req.ArrName}, req.MediaIds, req.AutoProcess, false); err != nil { http.Error(w, fmt.Sprintf("Failed to repair: %v", err), http.StatusInternalServerError) return } @@ -181,8 +183,8 @@ func (wb *Web) handleDeleteTorrents(w http.ResponseWriter, r *http.Request) { func (wb *Web) handleGetConfig(w http.ResponseWriter, r *http.Request) { cfg := config.Get() arrCfgs := make([]config.Arr, 0) - _store := store.GetStore() - for _, a := range _store.GetArr().GetAll() { + _store := store.Get() + for _, a := range _store.Arr().GetAll() { arrCfgs = append(arrCfgs, config.Arr{ Host: a.Host, Name: a.Name, @@ -237,8 +239,8 @@ func (wb *Web) handleUpdateConfig(w http.ResponseWriter, r *http.Request) { } // Update Arrs through the service - _store := store.GetStore() - _arr := _store.GetArr() + _store := store.Get() + _arr := _store.Arr() _arr.Clear() // Clear existing arrs for _, a := range updatedConfig.Arrs { @@ -270,8 +272,8 @@ func (wb *Web) handleUpdateConfig(w http.ResponseWriter, r *http.Request) { } func (wb *Web) handleGetRepairJobs(w http.ResponseWriter, r *http.Request) { - _store := store.GetStore() - request.JSONResponse(w, _store.GetRepair().GetJobs(), http.StatusOK) + _store := store.Get() + request.JSONResponse(w, _store.Repair().GetJobs(), http.StatusOK) } func (wb *Web) handleProcessRepairJob(w http.ResponseWriter, r *http.Request) { @@ -280,8 +282,8 @@ func (wb *Web) handleProcessRepairJob(w http.ResponseWriter, r *http.Request) { http.Error(w, "No job ID provided", http.StatusBadRequest) return } - _store := store.GetStore() - if err := _store.GetRepair().ProcessJob(id); err != nil { + _store := store.Get() + if err := _store.Repair().ProcessJob(id); err != nil { wb.logger.Error().Err(err).Msg("Failed to process repair job") } w.WriteHeader(http.StatusOK) @@ -301,8 +303,8 @@ func (wb *Web) handleDeleteRepairJob(w http.ResponseWriter, r *http.Request) { return } - _store := store.GetStore() - _store.GetRepair().DeleteJobs(req.IDs) + _store := store.Get() + _store.Repair().DeleteJobs(req.IDs) w.WriteHeader(http.StatusOK) } @@ -312,8 +314,8 @@ func (wb *Web) handleStopRepairJob(w http.ResponseWriter, r *http.Request) { http.Error(w, "No job ID provided", http.StatusBadRequest) return } - _store := store.GetStore() - if err := _store.GetRepair().StopJob(id); err != nil { + _store := store.Get() + if err := _store.Repair().StopJob(id); err != nil { wb.logger.Error().Err(err).Msg("Failed to stop repair job") http.Error(w, "Failed to stop job: "+err.Error(), http.StatusInternalServerError) return diff --git a/pkg/web/web.go b/pkg/web/web.go index aa20c58..3c24d87 100644 --- a/pkg/web/web.go +++ b/pkg/web/web.go @@ -79,6 +79,6 @@ func New() *Web { logger: logger.New("ui"), templates: templates, cookie: cookieStore, - torrents: store.GetStore().GetTorrentStorage(), + torrents: store.Get().Torrents(), } } diff --git a/pkg/webdav/file.go b/pkg/webdav/file.go index 3833b5f..2d6bc7a 100644 --- a/pkg/webdav/file.go +++ b/pkg/webdav/file.go @@ -86,7 +86,7 @@ func (f *File) getDownloadByteRange() (*[2]int64, error) { func (f *File) stream() (*http.Response, error) { client := sharedClient - _log := f.cache.GetLogger() + _log := f.cache.Logger() downloadLink, err := f.getDownloadLink() if err != nil { diff --git a/pkg/webdav/webdav.go b/pkg/webdav/webdav.go index 7259234..6a52b6b 100644 --- a/pkg/webdav/webdav.go +++ b/pkg/webdav/webdav.go @@ -95,8 +95,8 @@ func New() *WebDav { Handlers: make([]*Handler, 0), URLBase: urlBase, } - for name, c := range store.GetStore().GetDebrid().GetCaches() { - h := NewHandler(name, urlBase, c, c.GetLogger()) + for name, c := range store.Get().Debrid().Caches() { + h := NewHandler(name, urlBase, c, c.Logger()) w.Handlers = append(w.Handlers, h) } return w From 3efda45304b26371392e72374fd66c22998bf26a Mon Sep 17 00:00:00 2001 From: Mukhtar Akere Date: Sun, 8 Jun 2025 19:06:17 +0100 Subject: [PATCH 12/26] - IMplement multi-download api tokens - Move things around a bit --- internal/config/config.go | 10 +- pkg/debrid/providers/alldebrid/alldebrid.go | 93 +++---- .../providers/debrid_link/debrid_link.go | 137 ++++++----- pkg/debrid/providers/realdebrid/realdebrid.go | 220 +++++++---------- pkg/debrid/providers/torbox/torbox.go | 84 +++---- pkg/debrid/store/cache.go | 39 ++- pkg/debrid/store/download_link.go | 162 ++++-------- pkg/debrid/store/refresh.go | 20 +- pkg/debrid/store/repair.go | 2 +- pkg/debrid/types/account.go | 230 ++++++++++++++++++ pkg/debrid/types/client.go | 8 +- pkg/debrid/types/error.go | 30 +++ pkg/debrid/types/torrent.go | 37 ++- 13 files changed, 607 insertions(+), 465 deletions(-) create mode 100644 pkg/debrid/types/account.go create mode 100644 pkg/debrid/types/error.go diff --git a/internal/config/config.go b/internal/config/config.go index dbe079e..d84401d 100644 --- a/internal/config/config.go +++ b/internal/config/config.go @@ -277,9 +277,15 @@ func (c *Config) updateDebrid(d Debrid) Debrid { workers := runtime.NumCPU() * 50 perDebrid := workers / len(c.Debrids) - if len(d.DownloadAPIKeys) == 0 { - d.DownloadAPIKeys = append(d.DownloadAPIKeys, d.APIKey) + var downloadKeys []string + + if len(d.DownloadAPIKeys) > 0 { + downloadKeys = d.DownloadAPIKeys + } else { + // If no download API keys are specified, use the main API key + downloadKeys = []string{d.APIKey} } + d.DownloadAPIKeys = downloadKeys if !d.UseWebDav { return d diff --git a/pkg/debrid/providers/alldebrid/alldebrid.go b/pkg/debrid/providers/alldebrid/alldebrid.go index a8abaa6..e635992 100644 --- a/pkg/debrid/providers/alldebrid/alldebrid.go +++ b/pkg/debrid/providers/alldebrid/alldebrid.go @@ -18,12 +18,13 @@ import ( ) type AllDebrid struct { - name string - Host string `json:"host"` - APIKey string - accounts map[string]types.Account - DownloadUncached bool - client *request.Client + name string + Host string `json:"host"` + APIKey string + accounts *types.Accounts + autoExpiresLinksAfter time.Duration + DownloadUncached bool + client *request.Client MountPath string logger zerolog.Logger @@ -50,27 +51,23 @@ func New(dc config.Debrid) (*AllDebrid, error) { request.WithProxy(dc.Proxy), ) - accounts := make(map[string]types.Account) - for idx, key := range dc.DownloadAPIKeys { - id := strconv.Itoa(idx) - accounts[id] = types.Account{ - Name: key, - ID: id, - Token: key, - } + autoExpiresLinksAfter, err := time.ParseDuration(dc.AutoExpireLinksAfter) + if autoExpiresLinksAfter == 0 || err != nil { + autoExpiresLinksAfter = 48 * time.Hour } return &AllDebrid{ - name: "alldebrid", - Host: "http://api.alldebrid.com/v4.1", - APIKey: dc.APIKey, - accounts: accounts, - DownloadUncached: dc.DownloadUncached, - client: client, - MountPath: dc.Folder, - logger: logger.New(dc.Name), - checkCached: dc.CheckCached, - addSamples: dc.AddSamples, - minimumFreeSlot: dc.MinimumFreeSlot, + name: "alldebrid", + Host: "http://api.alldebrid.com/v4.1", + APIKey: dc.APIKey, + accounts: types.NewAccounts(dc), + DownloadUncached: dc.DownloadUncached, + autoExpiresLinksAfter: autoExpiresLinksAfter, + client: client, + MountPath: dc.Folder, + logger: logger.New(dc.Name), + checkCached: dc.CheckCached, + addSamples: dc.AddSamples, + minimumFreeSlot: dc.MinimumFreeSlot, }, nil } @@ -273,8 +270,8 @@ func (ad *AllDebrid) CheckStatus(torrent *types.Torrent, isSymlink bool) (*types if status == "downloaded" { ad.logger.Info().Msgf("Torrent: %s downloaded", torrent.Name) if !isSymlink { - err = ad.GenerateDownloadLinks(torrent) - if err != nil { + + if err = ad.GetFileDownloadLinks(torrent); err != nil { return torrent, err } } @@ -304,8 +301,9 @@ func (ad *AllDebrid) DeleteTorrent(torrentId string) error { return nil } -func (ad *AllDebrid) GenerateDownloadLinks(t *types.Torrent) error { +func (ad *AllDebrid) GetFileDownloadLinks(t *types.Torrent) error { filesCh := make(chan types.File, len(t.Files)) + linksCh := make(chan *types.DownloadLink, len(t.Files)) errCh := make(chan error, len(t.Files)) var wg sync.WaitGroup @@ -318,17 +316,19 @@ func (ad *AllDebrid) GenerateDownloadLinks(t *types.Torrent) error { errCh <- err return } - file.DownloadLink = link if link != nil { errCh <- fmt.Errorf("download link is empty") return } + linksCh <- link + file.DownloadLink = link filesCh <- file }(file) } go func() { wg.Wait() close(filesCh) + close(linksCh) close(errCh) }() files := make(map[string]types.File, len(t.Files)) @@ -336,10 +336,22 @@ func (ad *AllDebrid) GenerateDownloadLinks(t *types.Torrent) error { files[file.Name] = file } + // Collect download links + links := make(map[string]*types.DownloadLink, len(t.Files)) + + for link := range linksCh { + if link == nil { + continue + } + links[link.Link] = link + } + // Update the files with download links + ad.accounts.SetDownloadLinks(links) + // Check for errors for err := range errCh { if err != nil { - return err // Return the first error encountered + return err } } @@ -369,21 +381,18 @@ func (ad *AllDebrid) GetDownloadLink(t *types.Torrent, file *types.File) (*types if link == "" { return nil, fmt.Errorf("download link is empty") } + now := time.Now() return &types.DownloadLink{ Link: file.Link, DownloadLink: link, Id: data.Data.Id, Size: file.Size, Filename: file.Name, - Generated: time.Now(), - AccountId: "0", + Generated: now, + ExpiresAt: now.Add(ad.autoExpiresLinksAfter), }, nil } -func (ad *AllDebrid) GetCheckCached() bool { - return ad.checkCached -} - func (ad *AllDebrid) GetTorrents() ([]*types.Torrent, error) { url := fmt.Sprintf("%s/magnet/status?status=ready", ad.Host) req, _ := http.NewRequest(http.MethodGet, url, nil) @@ -417,7 +426,7 @@ func (ad *AllDebrid) GetTorrents() ([]*types.Torrent, error) { return torrents, nil } -func (ad *AllDebrid) GetDownloads() (map[string]types.DownloadLink, error) { +func (ad *AllDebrid) GetDownloadLinks() (map[string]*types.DownloadLink, error) { return nil, nil } @@ -437,12 +446,6 @@ func (ad *AllDebrid) GetMountPath() string { return ad.MountPath } -func (ad *AllDebrid) DisableAccount(accountId string) { -} - -func (ad *AllDebrid) ResetActiveDownloadKeys() { - -} func (ad *AllDebrid) DeleteDownloadLink(linkId string) error { return nil } @@ -452,3 +455,7 @@ func (ad *AllDebrid) GetAvailableSlots() (int, error) { //TODO: Implement the logic to check available slots for AllDebrid return 0, fmt.Errorf("GetAvailableSlots not implemented for AllDebrid") } + +func (ad *AllDebrid) Accounts() *types.Accounts { + return ad.accounts +} diff --git a/pkg/debrid/providers/debrid_link/debrid_link.go b/pkg/debrid/providers/debrid_link/debrid_link.go index ffa3311..a109495 100644 --- a/pkg/debrid/providers/debrid_link/debrid_link.go +++ b/pkg/debrid/providers/debrid_link/debrid_link.go @@ -10,7 +10,6 @@ import ( "github.com/sirrobot01/decypharr/internal/request" "github.com/sirrobot01/decypharr/internal/utils" "github.com/sirrobot01/decypharr/pkg/debrid/types" - "strconv" "time" "net/http" @@ -21,10 +20,12 @@ type DebridLink struct { name string Host string `json:"host"` APIKey string - accounts map[string]types.Account + accounts *types.Accounts DownloadUncached bool client *request.Client + autoExpiresLinksAfter time.Duration + MountPath string logger zerolog.Logger checkCached bool @@ -46,26 +47,22 @@ func New(dc config.Debrid) (*DebridLink, error) { request.WithProxy(dc.Proxy), ) - accounts := make(map[string]types.Account) - for idx, key := range dc.DownloadAPIKeys { - id := strconv.Itoa(idx) - accounts[id] = types.Account{ - Name: key, - ID: id, - Token: key, - } + autoExpiresLinksAfter, err := time.ParseDuration(dc.AutoExpireLinksAfter) + if autoExpiresLinksAfter == 0 || err != nil { + autoExpiresLinksAfter = 48 * time.Hour } return &DebridLink{ - name: "debridlink", - Host: "https://debrid-link.com/api/v2", - APIKey: dc.APIKey, - accounts: accounts, - DownloadUncached: dc.DownloadUncached, - client: client, - MountPath: dc.Folder, - logger: logger.New(dc.Name), - checkCached: dc.CheckCached, - addSamples: dc.AddSamples, + name: "debridlink", + Host: "https://debrid-link.com/api/v2", + APIKey: dc.APIKey, + accounts: types.NewAccounts(dc), + DownloadUncached: dc.DownloadUncached, + autoExpiresLinksAfter: autoExpiresLinksAfter, + client: client, + MountPath: dc.Folder, + logger: logger.New(dc.Name), + checkCached: dc.CheckCached, + addSamples: dc.AddSamples, }, nil } @@ -177,14 +174,7 @@ func (dl *DebridLink) GetTorrent(torrentId string) (*types.Torrent, error) { Name: f.Name, Size: f.Size, Path: f.Name, - DownloadLink: &types.DownloadLink{ - Filename: f.Name, - Link: f.DownloadURL, - DownloadLink: f.DownloadURL, - Generated: time.Now(), - AccountId: "0", - }, - Link: f.DownloadURL, + Link: f.DownloadURL, } torrent.Files[file.Name] = file } @@ -233,6 +223,8 @@ func (dl *DebridLink) UpdateTorrent(t *types.Torrent) error { t.OriginalFilename = name t.Added = time.Unix(data.Created, 0).Format(time.RFC3339) cfg := config.Get() + links := make(map[string]*types.DownloadLink) + now := time.Now() for _, f := range data.Files { if !cfg.IsSizeAllowed(f.Size) { continue @@ -243,17 +235,21 @@ func (dl *DebridLink) UpdateTorrent(t *types.Torrent) error { Name: f.Name, Size: f.Size, Path: f.Name, - DownloadLink: &types.DownloadLink{ - Filename: f.Name, - Link: f.DownloadURL, - DownloadLink: f.DownloadURL, - Generated: time.Now(), - AccountId: "0", - }, - Link: f.DownloadURL, + Link: f.DownloadURL, } + link := &types.DownloadLink{ + Filename: f.Name, + Link: f.DownloadURL, + DownloadLink: f.DownloadURL, + Generated: now, + ExpiresAt: now.Add(dl.autoExpiresLinksAfter), + } + links[file.Link] = link + file.DownloadLink = link t.Files[f.Name] = file } + + dl.accounts.SetDownloadLinks(links) return nil } @@ -290,6 +286,9 @@ func (dl *DebridLink) SubmitMagnet(t *types.Torrent) (*types.Torrent, error) { t.MountPath = dl.MountPath t.Debrid = dl.name t.Added = time.Unix(data.Created, 0).Format(time.RFC3339) + + links := make(map[string]*types.DownloadLink) + now := time.Now() for _, f := range data.Files { file := types.File{ TorrentId: t.Id, @@ -298,18 +297,22 @@ func (dl *DebridLink) SubmitMagnet(t *types.Torrent) (*types.Torrent, error) { Size: f.Size, Path: f.Name, Link: f.DownloadURL, - DownloadLink: &types.DownloadLink{ - Filename: f.Name, - Link: f.DownloadURL, - DownloadLink: f.DownloadURL, - Generated: time.Now(), - AccountId: "0", - }, - Generated: time.Now(), + Generated: now, } + link := &types.DownloadLink{ + Filename: f.Name, + Link: f.DownloadURL, + DownloadLink: f.DownloadURL, + Generated: now, + ExpiresAt: now.Add(dl.autoExpiresLinksAfter), + } + links[file.Link] = link + file.DownloadLink = link t.Files[f.Name] = file } + dl.accounts.SetDownloadLinks(links) + return t, nil } @@ -322,8 +325,8 @@ func (dl *DebridLink) CheckStatus(torrent *types.Torrent, isSymlink bool) (*type status := torrent.Status if status == "downloaded" { dl.logger.Info().Msgf("Torrent: %s downloaded", torrent.Name) - err = dl.GenerateDownloadLinks(torrent) - if err != nil { + + if err = dl.GetFileDownloadLinks(torrent); err != nil { return torrent, err } break @@ -352,27 +355,23 @@ func (dl *DebridLink) DeleteTorrent(torrentId string) error { return nil } -func (dl *DebridLink) GenerateDownloadLinks(t *types.Torrent) error { +func (dl *DebridLink) GetFileDownloadLinks(t *types.Torrent) error { // Download links are already generated return nil } -func (dl *DebridLink) GetDownloads() (map[string]types.DownloadLink, error) { +func (dl *DebridLink) GetDownloadLinks() (map[string]*types.DownloadLink, error) { return nil, nil } func (dl *DebridLink) GetDownloadLink(t *types.Torrent, file *types.File) (*types.DownloadLink, error) { - return file.DownloadLink, nil + return dl.accounts.GetDownloadLink(file.Link) } func (dl *DebridLink) GetDownloadingStatus() []string { return []string{"downloading"} } -func (dl *DebridLink) GetCheckCached() bool { - return dl.checkCached -} - func (dl *DebridLink) GetDownloadUncached() bool { return dl.DownloadUncached } @@ -411,6 +410,7 @@ func (dl *DebridLink) getTorrents(page, perPage int) ([]*types.Torrent, error) { } data := *res.Value + links := make(map[string]*types.DownloadLink) if len(data) == 0 { return torrents, nil @@ -433,6 +433,7 @@ func (dl *DebridLink) getTorrents(page, perPage int) ([]*types.Torrent, error) { Added: time.Unix(t.Created, 0).Format(time.RFC3339), } cfg := config.Get() + now := time.Now() for _, f := range t.Files { if !cfg.IsSizeAllowed(f.Size) { continue @@ -443,19 +444,23 @@ func (dl *DebridLink) getTorrents(page, perPage int) ([]*types.Torrent, error) { Name: f.Name, Size: f.Size, Path: f.Name, - DownloadLink: &types.DownloadLink{ - Filename: f.Name, - Link: f.DownloadURL, - DownloadLink: f.DownloadURL, - Generated: time.Now(), - AccountId: "0", - }, - Link: f.DownloadURL, + Link: f.DownloadURL, } + link := &types.DownloadLink{ + Filename: f.Name, + Link: f.DownloadURL, + DownloadLink: f.DownloadURL, + Generated: now, + ExpiresAt: now.Add(dl.autoExpiresLinksAfter), + } + links[file.Link] = link + file.DownloadLink = link torrent.Files[f.Name] = file } torrents = append(torrents, torrent) } + dl.accounts.SetDownloadLinks(links) + return torrents, nil } @@ -467,12 +472,6 @@ func (dl *DebridLink) GetMountPath() string { return dl.MountPath } -func (dl *DebridLink) DisableAccount(accountId string) { -} - -func (dl *DebridLink) ResetActiveDownloadKeys() { -} - func (dl *DebridLink) DeleteDownloadLink(linkId string) error { return nil } @@ -481,3 +480,7 @@ func (dl *DebridLink) GetAvailableSlots() (int, error) { //TODO: Implement the logic to check available slots for DebridLink return 0, fmt.Errorf("GetAvailableSlots not implemented for DebridLink") } + +func (dl *DebridLink) Accounts() *types.Accounts { + return dl.accounts +} diff --git a/pkg/debrid/providers/realdebrid/realdebrid.go b/pkg/debrid/providers/realdebrid/realdebrid.go index 297a075..ff102fa 100644 --- a/pkg/debrid/providers/realdebrid/realdebrid.go +++ b/pkg/debrid/providers/realdebrid/realdebrid.go @@ -10,7 +10,6 @@ import ( "net/http" gourl "net/url" "path/filepath" - "sort" "strconv" "strings" "sync" @@ -28,14 +27,13 @@ type RealDebrid struct { name string Host string `json:"host"` - APIKey string - currentDownloadKey string - accounts map[string]types.Account - accountsMutex sync.RWMutex + APIKey string + accounts *types.Accounts - DownloadUncached bool - client *request.Client - downloadClient *request.Client + DownloadUncached bool + client *request.Client + downloadClient *request.Client + autoExpiresLinksAfter time.Duration MountPath string logger zerolog.Logger @@ -57,28 +55,19 @@ func New(dc config.Debrid) (*RealDebrid, error) { } _log := logger.New(dc.Name) - accounts := make(map[string]types.Account) - currentDownloadKey := dc.DownloadAPIKeys[0] - for idx, key := range dc.DownloadAPIKeys { - id := strconv.Itoa(idx) - accounts[id] = types.Account{ - Name: key, - ID: id, - Token: key, - } - } - - downloadHeaders := map[string]string{ - "Authorization": fmt.Sprintf("Bearer %s", currentDownloadKey), + autoExpiresLinksAfter, err := time.ParseDuration(dc.AutoExpireLinksAfter) + if autoExpiresLinksAfter == 0 || err != nil { + autoExpiresLinksAfter = 48 * time.Hour } r := &RealDebrid{ - name: "realdebrid", - Host: "https://api.real-debrid.com/rest/1.0", - APIKey: dc.APIKey, - accounts: accounts, - DownloadUncached: dc.DownloadUncached, - UnpackRar: dc.UnpackRar, + name: "realdebrid", + Host: "https://api.real-debrid.com/rest/1.0", + APIKey: dc.APIKey, + accounts: types.NewAccounts(dc), + DownloadUncached: dc.DownloadUncached, + autoExpiresLinksAfter: autoExpiresLinksAfter, + UnpackRar: dc.UnpackRar, client: request.New( request.WithHeaders(headers), request.WithRateLimiter(rl), @@ -88,19 +77,17 @@ func New(dc config.Debrid) (*RealDebrid, error) { request.WithProxy(dc.Proxy), ), downloadClient: request.New( - request.WithHeaders(downloadHeaders), request.WithLogger(_log), request.WithMaxRetries(10), request.WithRetryableStatus(429, 447, 502), request.WithProxy(dc.Proxy), ), - currentDownloadKey: currentDownloadKey, - MountPath: dc.Folder, - logger: logger.New(dc.Name), - rarSemaphore: make(chan struct{}, 2), - checkCached: dc.CheckCached, - addSamples: dc.AddSamples, - minimumFreeSlot: dc.MinimumFreeSlot, + MountPath: dc.Folder, + logger: logger.New(dc.Name), + rarSemaphore: make(chan struct{}, 2), + checkCached: dc.CheckCached, + addSamples: dc.AddSamples, + minimumFreeSlot: dc.MinimumFreeSlot, } if _, err := r.GetProfile(); err != nil { @@ -182,7 +169,6 @@ func (r *RealDebrid) handleRarArchive(t *types.Torrent, data torrentInfo, select ByteRange: nil, Path: t.Name + ".rar", Link: data.Links[0], - AccountId: selectedFiles[0].AccountId, Generated: time.Now(), } files[file.Name] = file @@ -219,19 +205,14 @@ func (r *RealDebrid) handleRarArchive(t *types.Torrent, data torrentInfo, select fileMap[safeName] = &selectedFiles[i] } + now := time.Now() + for _, rarFile := range rarFiles { if file, exists := fileMap[rarFile.Name()]; exists { file.IsRar = true file.ByteRange = rarFile.ByteRange() file.Link = data.Links[0] - file.DownloadLink = &types.DownloadLink{ - Link: data.Links[0], - DownloadLink: dlLink, - Filename: file.Name, - Size: file.Size, - Generated: time.Now(), - } - + file.Generated = now files[file.Name] = *file } else if !rarFile.IsDirectory { r.logger.Warn().Msgf("RAR file %s not found in torrent files", rarFile.Name()) @@ -545,8 +526,7 @@ func (r *RealDebrid) CheckStatus(t *types.Torrent, isSymlink bool) (*types.Torre r.logger.Info().Msgf("Torrent: %s downloaded to RD", t.Name) if !isSymlink { - err = r.GenerateDownloadLinks(t) - if err != nil { + if err = r.GetFileDownloadLinks(t); err != nil { return t, err } } @@ -574,9 +554,10 @@ func (r *RealDebrid) DeleteTorrent(torrentId string) error { return nil } -func (r *RealDebrid) GenerateDownloadLinks(t *types.Torrent) error { +func (r *RealDebrid) GetFileDownloadLinks(t *types.Torrent) error { filesCh := make(chan types.File, len(t.Files)) errCh := make(chan error, len(t.Files)) + linksCh := make(chan *types.DownloadLink) var wg sync.WaitGroup wg.Add(len(t.Files)) @@ -589,7 +570,11 @@ func (r *RealDebrid) GenerateDownloadLinks(t *types.Torrent) error { errCh <- err return } - + if link == nil { + errCh <- fmt.Errorf("realdebrid API error: download link not found for file %s", file.Name) + return + } + linksCh <- link file.DownloadLink = link filesCh <- file }(f) @@ -598,6 +583,7 @@ func (r *RealDebrid) GenerateDownloadLinks(t *types.Torrent) error { go func() { wg.Wait() close(filesCh) + close(linksCh) close(errCh) }() @@ -607,6 +593,18 @@ func (r *RealDebrid) GenerateDownloadLinks(t *types.Torrent) error { files[file.Name] = file } + // Collect download links + links := make(map[string]*types.DownloadLink) + for link := range linksCh { + if link == nil { + continue + } + links[link.Link] = link + } + + // Add links to cache + r.accounts.SetDownloadLinks(links) + // Check for errors for err := range errCh { if err != nil { @@ -636,8 +634,12 @@ func (r *RealDebrid) CheckLink(link string) error { func (r *RealDebrid) _getDownloadLink(file *types.File) (*types.DownloadLink, error) { url := fmt.Sprintf("%s/unrestrict/link/", r.Host) + _link := file.Link + if strings.HasPrefix(_link, "https://real-debrid.com/d/") { + _link = file.Link[0:39] + } payload := gourl.Values{ - "link": {file.Link}, + "link": {_link}, } req, _ := http.NewRequest(http.MethodPost, url, strings.NewReader(payload.Encode())) resp, err := r.downloadClient.Do(req) @@ -684,32 +686,31 @@ func (r *RealDebrid) _getDownloadLink(file *types.File) (*types.DownloadLink, er if data.Download == "" { return nil, fmt.Errorf("realdebrid API error: download link not found") } + now := time.Now() return &types.DownloadLink{ Filename: data.Filename, Size: data.Filesize, Link: data.Link, DownloadLink: data.Download, - Generated: time.Now(), + Generated: now, + ExpiresAt: now.Add(r.autoExpiresLinksAfter), }, nil } func (r *RealDebrid) GetDownloadLink(t *types.Torrent, file *types.File) (*types.DownloadLink, error) { - if r.currentDownloadKey == "" { - // If no download key is set, use the first one - accounts := r.getActiveAccounts() - if len(accounts) < 1 { - // No active download keys. It's likely that the key has reached bandwidth limit - return nil, fmt.Errorf("no active download keys") - } - r.currentDownloadKey = accounts[0].Token - } + accounts := r.accounts.All() - r.downloadClient.SetHeader("Authorization", fmt.Sprintf("Bearer %s", r.currentDownloadKey)) - downloadLink, err := r._getDownloadLink(file) - retries := 0 - if err != nil { + for _, account := range accounts { + r.downloadClient.SetHeader("Authorization", fmt.Sprintf("Bearer %s", account.Token)) + downloadLink, err := r._getDownloadLink(file) + + if err == nil { + return downloadLink, nil + } + + retries := 0 if errors.Is(err, utils.TrafficExceededError) { // Retries generating retries = 5 @@ -717,25 +718,22 @@ func (r *RealDebrid) GetDownloadLink(t *types.Torrent, file *types.File) (*types // If the error is not traffic exceeded, return the error return nil, err } - } - backOff := 1 * time.Second - for retries > 0 { - downloadLink, err = r._getDownloadLink(file) - if err == nil { - return downloadLink, nil + backOff := 1 * time.Second + for retries > 0 { + downloadLink, err = r._getDownloadLink(file) + if err == nil { + return downloadLink, nil + } + if !errors.Is(err, utils.TrafficExceededError) { + return nil, err + } + // Add a delay before retrying + time.Sleep(backOff) + backOff *= 2 // Exponential backoff + retries-- } - if !errors.Is(err, utils.TrafficExceededError) { - return nil, err - } - // Add a delay before retrying - time.Sleep(backOff) - backOff *= 2 // Exponential backoff } - return downloadLink, nil -} - -func (r *RealDebrid) GetCheckCached() bool { - return r.checkCached + return nil, fmt.Errorf("realdebrid API error: download link not found") } func (r *RealDebrid) getTorrents(offset int, limit int) (int, []*types.Torrent, error) { @@ -824,18 +822,19 @@ func (r *RealDebrid) GetTorrents() ([]*types.Torrent, error) { return allTorrents, nil } -func (r *RealDebrid) GetDownloads() (map[string]types.DownloadLink, error) { - links := make(map[string]types.DownloadLink) +func (r *RealDebrid) GetDownloadLinks() (map[string]*types.DownloadLink, error) { + links := make(map[string]*types.DownloadLink) offset := 0 limit := 1000 - accounts := r.getActiveAccounts() + accounts := r.accounts.All() if len(accounts) < 1 { // No active download keys. It's likely that the key has reached bandwidth limit - return nil, fmt.Errorf("no active download keys") + return links, fmt.Errorf("no active download keys") } - r.downloadClient.SetHeader("Authorization", fmt.Sprintf("Bearer %s", accounts[0].Token)) + activeAccount := accounts[0] + r.downloadClient.SetHeader("Authorization", fmt.Sprintf("Bearer %s", activeAccount.Token)) for { dl, err := r._getDownloads(offset, limit) if err != nil { @@ -850,11 +849,12 @@ func (r *RealDebrid) GetDownloads() (map[string]types.DownloadLink, error) { // This is ordered by date, so we can skip the rest continue } - links[d.Link] = d + links[d.Link] = &d } offset += len(dl) } + return links, nil } @@ -880,6 +880,7 @@ func (r *RealDebrid) _getDownloads(offset int, limit int) ([]types.DownloadLink, Link: d.Link, DownloadLink: d.Download, Generated: d.Generated, + ExpiresAt: d.Generated.Add(r.autoExpiresLinksAfter), Id: d.Id, }) @@ -899,49 +900,6 @@ func (r *RealDebrid) GetMountPath() string { return r.MountPath } -func (r *RealDebrid) DisableAccount(accountId string) { - r.accountsMutex.Lock() - defer r.accountsMutex.Unlock() - if len(r.accounts) == 1 { - r.logger.Info().Msgf("Cannot disable last account: %s", accountId) - return - } - r.currentDownloadKey = "" - if value, ok := r.accounts[accountId]; ok { - value.Disabled = true - r.accounts[accountId] = value - r.logger.Info().Msgf("Disabled account Index: %s", value.ID) - } -} - -func (r *RealDebrid) ResetActiveDownloadKeys() { - r.accountsMutex.Lock() - defer r.accountsMutex.Unlock() - for key, value := range r.accounts { - value.Disabled = false - r.accounts[key] = value - } -} - -func (r *RealDebrid) getActiveAccounts() []types.Account { - r.accountsMutex.RLock() - defer r.accountsMutex.RUnlock() - accounts := make([]types.Account, 0) - - for _, value := range r.accounts { - if value.Disabled { - continue - } - accounts = append(accounts, value) - } - - // Sort accounts by ID - sort.Slice(accounts, func(i, j int) bool { - return accounts[i].ID < accounts[j].ID - }) - return accounts -} - func (r *RealDebrid) DeleteDownloadLink(linkId string) error { url := fmt.Sprintf("%s/downloads/delete/%s", r.Host, linkId) req, _ := http.NewRequest(http.MethodDelete, url, nil) @@ -991,3 +949,7 @@ func (r *RealDebrid) GetAvailableSlots() (int, error) { } return data.TotalSlots - data.ActiveSlots - r.minimumFreeSlot, nil // Ensure we maintain minimum active pots } + +func (r *RealDebrid) Accounts() *types.Accounts { + return r.accounts +} diff --git a/pkg/debrid/providers/torbox/torbox.go b/pkg/debrid/providers/torbox/torbox.go index 337d598..8d7e2ef 100644 --- a/pkg/debrid/providers/torbox/torbox.go +++ b/pkg/debrid/providers/torbox/torbox.go @@ -24,10 +24,12 @@ import ( ) type Torbox struct { - name string - Host string `json:"host"` - APIKey string - accounts map[string]types.Account + name string + Host string `json:"host"` + APIKey string + accounts *types.Accounts + autoExpiresLinksAfter time.Duration + DownloadUncached bool client *request.Client @@ -55,28 +57,23 @@ func New(dc config.Debrid) (*Torbox, error) { request.WithLogger(_log), request.WithProxy(dc.Proxy), ) - - accounts := make(map[string]types.Account) - for idx, key := range dc.DownloadAPIKeys { - id := strconv.Itoa(idx) - accounts[id] = types.Account{ - Name: key, - ID: id, - Token: key, - } + autoExpiresLinksAfter, err := time.ParseDuration(dc.AutoExpireLinksAfter) + if autoExpiresLinksAfter == 0 || err != nil { + autoExpiresLinksAfter = 48 * time.Hour } return &Torbox{ - name: "torbox", - Host: "https://api.torbox.app/v1", - APIKey: dc.APIKey, - accounts: accounts, - DownloadUncached: dc.DownloadUncached, - client: client, - MountPath: dc.Folder, - logger: _log, - checkCached: dc.CheckCached, - addSamples: dc.AddSamples, + name: "torbox", + Host: "https://api.torbox.app/v1", + APIKey: dc.APIKey, + accounts: types.NewAccounts(dc), + DownloadUncached: dc.DownloadUncached, + autoExpiresLinksAfter: autoExpiresLinksAfter, + client: client, + MountPath: dc.Folder, + logger: _log, + checkCached: dc.CheckCached, + addSamples: dc.AddSamples, }, nil } @@ -326,8 +323,7 @@ func (tb *Torbox) CheckStatus(torrent *types.Torrent, isSymlink bool) (*types.To if status == "downloaded" { tb.logger.Info().Msgf("Torrent: %s downloaded", torrent.Name) if !isSymlink { - err = tb.GenerateDownloadLinks(torrent) - if err != nil { + if err = tb.GetFileDownloadLinks(torrent); err != nil { return torrent, err } } @@ -359,8 +355,9 @@ func (tb *Torbox) DeleteTorrent(torrentId string) error { return nil } -func (tb *Torbox) GenerateDownloadLinks(t *types.Torrent) error { +func (tb *Torbox) GetFileDownloadLinks(t *types.Torrent) error { filesCh := make(chan types.File, len(t.Files)) + linkCh := make(chan *types.DownloadLink) errCh := make(chan error, len(t.Files)) var wg sync.WaitGroup @@ -373,13 +370,17 @@ func (tb *Torbox) GenerateDownloadLinks(t *types.Torrent) error { errCh <- err return } - file.DownloadLink = link + if link != nil { + linkCh <- link + file.DownloadLink = link + } filesCh <- file }() } go func() { wg.Wait() close(filesCh) + close(linkCh) close(errCh) }() @@ -389,6 +390,13 @@ func (tb *Torbox) GenerateDownloadLinks(t *types.Torrent) error { files[file.Name] = file } + // Collect download links + for link := range linkCh { + if link != nil { + tb.accounts.SetDownloadLink(link.Link, link) + } + } + // Check for errors for err := range errCh { if err != nil { @@ -423,12 +431,13 @@ func (tb *Torbox) GetDownloadLink(t *types.Torrent, file *types.File) (*types.Do if link == "" { return nil, fmt.Errorf("error getting download links") } + now := time.Now() return &types.DownloadLink{ Link: file.Link, DownloadLink: link, Id: file.Id, - AccountId: "0", - Generated: time.Now(), + Generated: now, + ExpiresAt: now.Add(tb.autoExpiresLinksAfter), }, nil } @@ -436,10 +445,6 @@ func (tb *Torbox) GetDownloadingStatus() []string { return []string{"downloading"} } -func (tb *Torbox) GetCheckCached() bool { - return tb.checkCached -} - func (tb *Torbox) GetTorrents() ([]*types.Torrent, error) { return nil, nil } @@ -448,7 +453,7 @@ func (tb *Torbox) GetDownloadUncached() bool { return tb.DownloadUncached } -func (tb *Torbox) GetDownloads() (map[string]types.DownloadLink, error) { +func (tb *Torbox) GetDownloadLinks() (map[string]*types.DownloadLink, error) { return nil, nil } @@ -460,13 +465,6 @@ func (tb *Torbox) GetMountPath() string { return tb.MountPath } -func (tb *Torbox) DisableAccount(accountId string) { -} - -func (tb *Torbox) ResetActiveDownloadKeys() { - -} - func (tb *Torbox) DeleteDownloadLink(linkId string) error { return nil } @@ -475,3 +473,7 @@ func (tb *Torbox) GetAvailableSlots() (int, error) { //TODO: Implement the logic to check available slots for Torbox return 0, fmt.Errorf("not implemented") } + +func (tb *Torbox) Accounts() *types.Accounts { + return tb.accounts +} diff --git a/pkg/debrid/store/cache.go b/pkg/debrid/store/cache.go index 5edb3f5..321091a 100644 --- a/pkg/debrid/store/cache.go +++ b/pkg/debrid/store/cache.go @@ -73,7 +73,6 @@ type Cache struct { logger zerolog.Logger torrents *torrentCache - downloadLinks *downloadLinkCache invalidDownloadLinks sync.Map folderNaming WebDavFolderNaming @@ -90,10 +89,9 @@ type Cache struct { ready chan struct{} // config - workers int - torrentRefreshInterval string - downloadLinksRefreshInterval string - autoExpiresLinksAfterDuration time.Duration + workers int + torrentRefreshInterval string + downloadLinksRefreshInterval string // refresh mutex downloadLinksRefreshMu sync.RWMutex // for refreshing download links @@ -121,10 +119,6 @@ func NewDebridCache(dc config.Debrid, client types.Client) *Cache { scheduler = cetSc } - autoExpiresLinksAfter, err := time.ParseDuration(dc.AutoExpireLinksAfter) - if autoExpiresLinksAfter == 0 || err != nil { - autoExpiresLinksAfter = 48 * time.Hour - } var customFolders []string dirFilters := map[string][]directoryFilter{} for name, value := range dc.Directories { @@ -147,18 +141,16 @@ func NewDebridCache(dc config.Debrid, client types.Client) *Cache { c := &Cache{ dir: filepath.Join(cfg.Path, "cache", dc.Name), // path to save cache files - torrents: newTorrentCache(dirFilters), - client: client, - logger: _log, - workers: dc.Workers, - downloadLinks: newDownloadLinkCache(), - torrentRefreshInterval: dc.TorrentsRefreshInterval, - downloadLinksRefreshInterval: dc.DownloadLinksRefreshInterval, - folderNaming: WebDavFolderNaming(dc.FolderNaming), - autoExpiresLinksAfterDuration: autoExpiresLinksAfter, - saveSemaphore: make(chan struct{}, 50), - cetScheduler: cetSc, - scheduler: scheduler, + torrents: newTorrentCache(dirFilters), + client: client, + logger: _log, + workers: dc.Workers, + torrentRefreshInterval: dc.TorrentsRefreshInterval, + downloadLinksRefreshInterval: dc.DownloadLinksRefreshInterval, + folderNaming: WebDavFolderNaming(dc.FolderNaming), + saveSemaphore: make(chan struct{}, 50), + cetScheduler: cetSc, + scheduler: scheduler, config: dc, customFolders: customFolders, @@ -202,9 +194,6 @@ func (c *Cache) Reset() { // 1. Reset torrent storage c.torrents.reset() - // 2. Reset download-link cache - c.downloadLinks.reset() - // 3. Clear any sync.Maps c.invalidDownloadLinks = sync.Map{} c.repairRequest = sync.Map{} @@ -714,7 +703,7 @@ func (c *Cache) Add(t *types.Torrent) error { c.setTorrent(ct, func(tor CachedTorrent) { c.RefreshListings(true) }) - go c.GenerateDownloadLinks(ct) + go c.GetFileDownloadLinks(ct) return nil } diff --git a/pkg/debrid/store/download_link.go b/pkg/debrid/store/download_link.go index 951cb1d..19c57d6 100644 --- a/pkg/debrid/store/download_link.go +++ b/pkg/debrid/store/download_link.go @@ -5,58 +5,8 @@ import ( "fmt" "github.com/sirrobot01/decypharr/internal/utils" "github.com/sirrobot01/decypharr/pkg/debrid/types" - - "sync" - "time" ) -type linkCache struct { - Id string - link string - accountId string - expiresAt time.Time -} - -type downloadLinkCache struct { - data map[string]linkCache - mu sync.Mutex -} - -func newDownloadLinkCache() *downloadLinkCache { - return &downloadLinkCache{ - data: make(map[string]linkCache), - } -} - -func (c *downloadLinkCache) reset() { - c.mu.Lock() - c.data = make(map[string]linkCache) - c.mu.Unlock() -} - -func (c *downloadLinkCache) Load(key string) (linkCache, bool) { - c.mu.Lock() - defer c.mu.Unlock() - dl, ok := c.data[key] - return dl, ok -} -func (c *downloadLinkCache) Store(key string, value linkCache) { - c.mu.Lock() - defer c.mu.Unlock() - c.data[key] = value -} -func (c *downloadLinkCache) Delete(key string) { - c.mu.Lock() - defer c.mu.Unlock() - delete(c.data, key) -} - -func (c *downloadLinkCache) Len() int { - c.mu.Lock() - defer c.mu.Unlock() - return len(c.data) -} - type downloadLinkRequest struct { result string err error @@ -82,8 +32,10 @@ func (r *downloadLinkRequest) Wait() (string, error) { func (c *Cache) GetDownloadLink(torrentName, filename, fileLink string) (string, error) { // Check link cache - if dl := c.checkDownloadLink(fileLink); dl != "" { + if dl, err := c.checkDownloadLink(fileLink); dl != "" && err == nil { return dl, nil + } else { + c.logger.Trace().Msgf("Download link check failed: %v", err) } if req, inFlight := c.downloadLinkRequests.Load(fileLink); inFlight { @@ -96,34 +48,36 @@ func (c *Cache) GetDownloadLink(torrentName, filename, fileLink string) (string, req := newDownloadLinkRequest() c.downloadLinkRequests.Store(fileLink, req) - downloadLink, err := c.fetchDownloadLink(torrentName, filename, fileLink) - - // Complete the request and remove it from the map - req.Complete(downloadLink, err) + dl, err := c.fetchDownloadLink(torrentName, filename, fileLink) + if err != nil { + req.Complete("", err) + c.downloadLinkRequests.Delete(fileLink) + return "", err + } + req.Complete(dl.DownloadLink, err) c.downloadLinkRequests.Delete(fileLink) - - return downloadLink, err + return dl.DownloadLink, err } -func (c *Cache) fetchDownloadLink(torrentName, filename, fileLink string) (string, error) { +func (c *Cache) fetchDownloadLink(torrentName, filename, fileLink string) (*types.DownloadLink, error) { ct := c.GetTorrentByName(torrentName) if ct == nil { - return "", fmt.Errorf("torrent not found") + return nil, fmt.Errorf("torrent not found") } file, ok := ct.GetFile(filename) if !ok { - return "", fmt.Errorf("file %s not found in torrent %s", filename, torrentName) + return nil, fmt.Errorf("file %s not found in torrent %s", filename, torrentName) } if file.Link == "" { // file link is empty, refresh the torrent to get restricted links ct = c.refreshTorrent(file.TorrentId) // Refresh the torrent from the debrid if ct == nil { - return "", fmt.Errorf("failed to refresh torrent") + return nil, fmt.Errorf("failed to refresh torrent") } else { file, ok = ct.GetFile(filename) if !ok { - return "", fmt.Errorf("file %s not found in refreshed torrent %s", filename, torrentName) + return nil, fmt.Errorf("file %s not found in refreshed torrent %s", filename, torrentName) } } } @@ -133,12 +87,12 @@ func (c *Cache) fetchDownloadLink(torrentName, filename, fileLink string) (strin // Try to reinsert the torrent? newCt, err := c.reInsertTorrent(ct) if err != nil { - return "", fmt.Errorf("failed to reinsert torrent. %w", err) + return nil, fmt.Errorf("failed to reinsert torrent. %w", err) } ct = newCt file, ok = ct.GetFile(filename) if !ok { - return "", fmt.Errorf("file %s not found in reinserted torrent %s", filename, torrentName) + return nil, fmt.Errorf("file %s not found in reinserted torrent %s", filename, torrentName) } } @@ -148,93 +102,71 @@ func (c *Cache) fetchDownloadLink(torrentName, filename, fileLink string) (strin if errors.Is(err, utils.HosterUnavailableError) { newCt, err := c.reInsertTorrent(ct) if err != nil { - return "", fmt.Errorf("failed to reinsert torrent: %w", err) + return nil, fmt.Errorf("failed to reinsert torrent: %w", err) } ct = newCt file, ok = ct.GetFile(filename) if !ok { - return "", fmt.Errorf("file %s not found in reinserted torrent %s", filename, torrentName) + return nil, fmt.Errorf("file %s not found in reinserted torrent %s", filename, torrentName) } // Retry getting the download link downloadLink, err = c.client.GetDownloadLink(ct.Torrent, &file) if err != nil { - return "", err + return nil, err } if downloadLink == nil { - return "", fmt.Errorf("download link is empty for") + return nil, fmt.Errorf("download link is empty for") } - c.updateDownloadLink(downloadLink) - return "", nil + return nil, nil } else if errors.Is(err, utils.TrafficExceededError) { // This is likely a fair usage limit error - return "", err + return nil, err } else { - return "", fmt.Errorf("failed to get download link: %w", err) + return nil, fmt.Errorf("failed to get download link: %w", err) } } if downloadLink == nil { - return "", fmt.Errorf("download link is empty") + return nil, fmt.Errorf("download link is empty") } - c.updateDownloadLink(downloadLink) - return downloadLink.DownloadLink, nil + + // Set link to cache + go c.client.Accounts().SetDownloadLink(fileLink, downloadLink) + return downloadLink, nil } -func (c *Cache) GenerateDownloadLinks(t CachedTorrent) { - if err := c.client.GenerateDownloadLinks(t.Torrent); err != nil { +func (c *Cache) GetFileDownloadLinks(t CachedTorrent) { + if err := c.client.GetFileDownloadLinks(t.Torrent); err != nil { c.logger.Error().Err(err).Str("torrent", t.Name).Msg("Failed to generate download links") return } - for _, file := range t.GetFiles() { - if file.DownloadLink != nil { - c.updateDownloadLink(file.DownloadLink) - } - } - c.setTorrent(t, nil) } -func (c *Cache) updateDownloadLink(dl *types.DownloadLink) { - c.downloadLinks.Store(dl.Link, linkCache{ - Id: dl.Id, - link: dl.DownloadLink, - expiresAt: time.Now().Add(c.autoExpiresLinksAfterDuration), - accountId: dl.AccountId, - }) -} +func (c *Cache) checkDownloadLink(link string) (string, error) { -func (c *Cache) checkDownloadLink(link string) string { - if dl, ok := c.downloadLinks.Load(link); ok { - if dl.expiresAt.After(time.Now()) && !c.IsDownloadLinkInvalid(dl.link) { - return dl.link - } + dl, err := c.client.Accounts().GetDownloadLink(link) + if err != nil { + return "", err } - return "" + if !c.downloadLinkIsInvalid(dl.DownloadLink) { + return dl.DownloadLink, nil + } + return "", fmt.Errorf("download link not found for %s", link) } func (c *Cache) MarkDownloadLinkAsInvalid(link, downloadLink, reason string) { c.invalidDownloadLinks.Store(downloadLink, reason) // Remove the download api key from active if reason == "bandwidth_exceeded" { - if dl, ok := c.downloadLinks.Load(link); ok { - if dl.accountId != "" && dl.link == downloadLink { - c.client.DisableAccount(dl.accountId) - } - } - } - c.removeDownloadLink(link) -} - -func (c *Cache) removeDownloadLink(link string) { - if dl, ok := c.downloadLinks.Load(link); ok { - // Delete dl from cache - c.downloadLinks.Delete(link) - // Delete dl from debrid - if dl.Id != "" { - _ = c.client.DeleteDownloadLink(dl.Id) + // Disable the account + _, account, err := c.client.Accounts().GetDownloadLinkWithAccount(link) + if err != nil { + return } + c.client.Accounts().Disable(account) } } -func (c *Cache) IsDownloadLinkInvalid(downloadLink string) bool { +func (c *Cache) downloadLinkIsInvalid(downloadLink string) bool { if reason, ok := c.invalidDownloadLinks.Load(downloadLink); ok { c.logger.Debug().Msgf("Download link %s is invalid: %s", downloadLink, reason) return true @@ -252,5 +184,5 @@ func (c *Cache) GetDownloadByteRange(torrentName, filename string) (*[2]int64, e } func (c *Cache) GetTotalActiveDownloadLinks() int { - return c.downloadLinks.Len() + return c.client.Accounts().GetLinksCount() } diff --git a/pkg/debrid/store/refresh.go b/pkg/debrid/store/refresh.go index f9e5a5d..0dd8c59 100644 --- a/pkg/debrid/store/refresh.go +++ b/pkg/debrid/store/refresh.go @@ -241,24 +241,14 @@ func (c *Cache) refreshDownloadLinks(ctx context.Context) { } defer c.downloadLinksRefreshMu.Unlock() - downloadLinks, err := c.client.GetDownloads() + links, err := c.client.GetDownloadLinks() if err != nil { c.logger.Error().Err(err).Msg("Failed to get download links") return } - for k, v := range downloadLinks { - // if link is generated in the last 24 hours, add it to cache - timeSince := time.Since(v.Generated) - if timeSince < c.autoExpiresLinksAfterDuration { - c.downloadLinks.Store(k, linkCache{ - Id: v.Id, - accountId: v.AccountId, - link: v.DownloadLink, - expiresAt: v.Generated.Add(c.autoExpiresLinksAfterDuration - timeSince), - }) - } else { - c.downloadLinks.Delete(k) - } - } + + c.client.Accounts().SetDownloadLinks(links) + + c.logger.Debug().Msgf("Refreshed download %d links", c.client.Accounts().GetLinksCount()) } diff --git a/pkg/debrid/store/repair.go b/pkg/debrid/store/repair.go index 8fbdb04..201109e 100644 --- a/pkg/debrid/store/repair.go +++ b/pkg/debrid/store/repair.go @@ -252,5 +252,5 @@ func (c *Cache) reInsertTorrent(ct *CachedTorrent) (*CachedTorrent, error) { func (c *Cache) resetInvalidLinks() { c.invalidDownloadLinks = sync.Map{} - c.client.ResetActiveDownloadKeys() // Reset the active download keys + c.client.Accounts().Reset() // Reset the active download keys } diff --git a/pkg/debrid/types/account.go b/pkg/debrid/types/account.go new file mode 100644 index 0000000..9187bca --- /dev/null +++ b/pkg/debrid/types/account.go @@ -0,0 +1,230 @@ +package types + +import ( + "github.com/sirrobot01/decypharr/internal/config" + "sync" + "time" +) + +type Accounts struct { + current *Account + accounts []*Account + mu sync.RWMutex +} + +func NewAccounts(debridConf config.Debrid) *Accounts { + accounts := make([]*Account, 0) + for idx, token := range debridConf.DownloadAPIKeys { + if token == "" { + continue + } + account := newAccount(token, idx) + accounts = append(accounts, account) + } + + var current *Account + if len(accounts) > 0 { + current = accounts[0] + } + return &Accounts{ + accounts: accounts, + current: current, + } +} + +type Account struct { + Order int + Disabled bool + Token string + links map[string]*DownloadLink + mu sync.RWMutex +} + +func (a *Accounts) All() []*Account { + a.mu.RLock() + defer a.mu.RUnlock() + activeAccounts := make([]*Account, 0) + for _, acc := range a.accounts { + if !acc.Disabled { + activeAccounts = append(activeAccounts, acc) + } + } + return activeAccounts +} + +func (a *Accounts) Current() *Account { + a.mu.RLock() + if a.current != nil { + current := a.current + a.mu.RUnlock() + return current + } + a.mu.RUnlock() + + a.mu.Lock() + defer a.mu.Unlock() + + // Double-check after acquiring write lock + if a.current != nil { + return a.current + } + + activeAccounts := make([]*Account, 0) + for _, acc := range a.accounts { + if !acc.Disabled { + activeAccounts = append(activeAccounts, acc) + } + } + + if len(activeAccounts) > 0 { + a.current = activeAccounts[0] + } + return a.current +} + +func (a *Accounts) Disable(account *Account) { + a.mu.Lock() + defer a.mu.Unlock() + account.disable() + + if a.current == account { + var newCurrent *Account + for _, acc := range a.accounts { + if !acc.Disabled { + newCurrent = acc + break + } + } + a.current = newCurrent + } +} + +func (a *Accounts) Reset() { + a.mu.Lock() + defer a.mu.Unlock() + for _, acc := range a.accounts { + acc.resetDownloadLinks() + acc.Disabled = false + } + if len(a.accounts) > 0 { + a.current = a.accounts[0] + } else { + a.current = nil + } +} + +func (a *Accounts) GetDownloadLink(fileLink string) (*DownloadLink, error) { + if a.Current() == nil { + return nil, NoActiveAccountsError + } + dl, ok := a.Current().getLink(fileLink) + if !ok { + return nil, NoDownloadLinkError + } + if dl.ExpiresAt.IsZero() || dl.ExpiresAt.Before(time.Now()) { + return nil, DownloadLinkExpiredError + } + if dl.DownloadLink == "" { + return nil, EmptyDownloadLinkError + } + return dl, nil +} + +func (a *Accounts) GetDownloadLinkWithAccount(fileLink string) (*DownloadLink, *Account, error) { + currentAccount := a.Current() + if currentAccount == nil { + return nil, nil, NoActiveAccountsError + } + dl, ok := currentAccount.getLink(fileLink) + if !ok { + return nil, nil, NoDownloadLinkError + } + if dl.ExpiresAt.IsZero() || dl.ExpiresAt.Before(time.Now()) { + return nil, currentAccount, DownloadLinkExpiredError + } + if dl.DownloadLink == "" { + return nil, currentAccount, EmptyDownloadLinkError + } + return dl, currentAccount, nil +} + +func (a *Accounts) SetDownloadLink(fileLink string, dl *DownloadLink) { + if a.Current() == nil { + return + } + a.Current().setLink(fileLink, dl) +} + +func (a *Accounts) DeleteDownloadLink(fileLink string) { + if a.Current() == nil { + return + } + a.Current().deleteLink(fileLink) +} + +func (a *Accounts) GetLinksCount() int { + if a.Current() == nil { + return 0 + } + return a.Current().LinksCount() +} + +func (a *Accounts) SetDownloadLinks(links map[string]*DownloadLink) { + if a.Current() == nil { + return + } + a.Current().setLinks(links) +} + +func newAccount(token string, index int) *Account { + return &Account{ + Token: token, + Order: index, + links: make(map[string]*DownloadLink), + } +} + +func (a *Account) getLink(fileLink string) (*DownloadLink, bool) { + a.mu.RLock() + defer a.mu.RUnlock() + dl, ok := a.links[fileLink[0:39]] + return dl, ok +} +func (a *Account) setLink(fileLink string, dl *DownloadLink) { + a.mu.Lock() + defer a.mu.Unlock() + a.links[fileLink[0:39]] = dl +} +func (a *Account) deleteLink(fileLink string) { + a.mu.Lock() + defer a.mu.Unlock() + + delete(a.links, fileLink[0:39]) +} +func (a *Account) resetDownloadLinks() { + a.mu.Lock() + defer a.mu.Unlock() + a.links = make(map[string]*DownloadLink) +} +func (a *Account) LinksCount() int { + a.mu.RLock() + defer a.mu.RUnlock() + return len(a.links) +} + +func (a *Account) disable() { + a.Disabled = true +} + +func (a *Account) setLinks(links map[string]*DownloadLink) { + a.mu.Lock() + defer a.mu.Unlock() + now := time.Now() + for _, dl := range links { + if !dl.ExpiresAt.IsZero() && dl.ExpiresAt.Before(now) { + // Expired, continue + continue + } + a.links[dl.Link[0:39]] = dl + } +} diff --git a/pkg/debrid/types/client.go b/pkg/debrid/types/client.go index 61b4f2b..8dfef25 100644 --- a/pkg/debrid/types/client.go +++ b/pkg/debrid/types/client.go @@ -7,11 +7,10 @@ import ( type Client interface { SubmitMagnet(tr *Torrent) (*Torrent, error) CheckStatus(tr *Torrent, isSymlink bool) (*Torrent, error) - GenerateDownloadLinks(tr *Torrent) error + GetFileDownloadLinks(tr *Torrent) error GetDownloadLink(tr *Torrent, file *File) (*DownloadLink, error) DeleteTorrent(torrentId string) error IsAvailable(infohashes []string) map[string]bool - GetCheckCached() bool GetDownloadUncached() bool UpdateTorrent(torrent *Torrent) error GetTorrent(torrentId string) (*Torrent, error) @@ -19,11 +18,10 @@ type Client interface { Name() string Logger() zerolog.Logger GetDownloadingStatus() []string - GetDownloads() (map[string]DownloadLink, error) + GetDownloadLinks() (map[string]*DownloadLink, error) CheckLink(link string) error GetMountPath() string - DisableAccount(string) - ResetActiveDownloadKeys() + Accounts() *Accounts // Returns the active download account/token DeleteDownloadLink(linkId string) error GetProfile() (*Profile, error) GetAvailableSlots() (int, error) diff --git a/pkg/debrid/types/error.go b/pkg/debrid/types/error.go new file mode 100644 index 0000000..c8cf016 --- /dev/null +++ b/pkg/debrid/types/error.go @@ -0,0 +1,30 @@ +package types + +type Error struct { + Message string `json:"message"` + Code string `json:"code"` +} + +func (e *Error) Error() string { + return e.Message +} + +var NoActiveAccountsError = &Error{ + Message: "No active accounts", + Code: "no_active_accounts", +} + +var NoDownloadLinkError = &Error{ + Message: "No download link found", + Code: "no_download_link", +} + +var DownloadLinkExpiredError = &Error{ + Message: "Download link expired", + Code: "download_link_expired", +} + +var EmptyDownloadLinkError = &Error{ + Message: "Download link is empty", + Code: "empty_download_link", +} diff --git a/pkg/debrid/types/torrent.go b/pkg/debrid/types/torrent.go index 0975e0d..60f8bc7 100644 --- a/pkg/debrid/types/torrent.go +++ b/pkg/debrid/types/torrent.go @@ -42,20 +42,6 @@ type Torrent struct { sync.Mutex } -type DownloadLink struct { - Filename string `json:"filename"` - Link string `json:"link"` - DownloadLink string `json:"download_link"` - Generated time.Time `json:"generated"` - Size int64 `json:"size"` - Id string `json:"id"` - AccountId string `json:"account_id"` -} - -func (d *DownloadLink) String() string { - return d.DownloadLink -} - func (t *Torrent) GetSymlinkFolder(parent string) string { return filepath.Join(parent, t.Arr.Name, t.Folder) } @@ -106,10 +92,10 @@ type File struct { ByteRange *[2]int64 `json:"byte_range,omitempty"` Path string `json:"path"` Link string `json:"link"` - DownloadLink *DownloadLink `json:"-"` AccountId string `json:"account_id"` Generated time.Time `json:"generated"` Deleted bool `json:"deleted"` + DownloadLink *DownloadLink `json:"-"` } func (t *Torrent) Cleanup(remove bool) { @@ -121,13 +107,6 @@ func (t *Torrent) Cleanup(remove bool) { } } -type Account struct { - ID string `json:"id"` - Disabled bool `json:"disabled"` - Name string `json:"name"` - Token string `json:"token"` -} - type IngestData struct { Debrid string `json:"debrid"` Name string `json:"name"` @@ -149,3 +128,17 @@ type Profile struct { BadTorrents int `json:"bad_torrents"` ActiveLinks int `json:"active_links"` } + +type DownloadLink struct { + Filename string `json:"filename"` + Link string `json:"link"` + DownloadLink string `json:"download_link"` + Generated time.Time `json:"generated"` + Size int64 `json:"size"` + Id string `json:"id"` + ExpiresAt time.Time +} + +func (d *DownloadLink) String() string { + return d.DownloadLink +} From a539aa53bdd3c4020698a4d66d65984694a44325 Mon Sep 17 00:00:00 2001 From: Mukhtar Akere Date: Sat, 14 Jun 2025 16:09:28 +0100 Subject: [PATCH 13/26] - Speed up repairs when checking links \n - Remove run on start for repairs since it causes issues \n - Add support for arr-specific debrid - Support for queuing system - Support for no-op when sending torrents to debrid --- docs/docs/configuration/index.md | 3 +- docs/docs/features/repair-worker.md | 2 - internal/config/config.go | 2 +- pkg/arr/arr.go | 6 +- pkg/debrid/debrid.go | 5 +- pkg/debrid/providers/alldebrid/alldebrid.go | 11 +- .../providers/debrid_link/debrid_link.go | 9 +- pkg/debrid/providers/realdebrid/misc.go | 1 + pkg/debrid/providers/realdebrid/realdebrid.go | 76 ++++----- pkg/debrid/providers/torbox/torbox.go | 10 +- pkg/debrid/store/download_link.go | 9 +- pkg/debrid/store/repair.go | 25 +-- pkg/debrid/types/client.go | 2 +- pkg/qbit/context.go | 2 +- pkg/qbit/http.go | 14 +- pkg/qbit/torrent.go | 22 +-- pkg/qbit/types.go | 11 -- pkg/repair/repair.go | 11 -- pkg/store/downloader.go | 11 +- pkg/store/misc.go | 2 +- pkg/store/request.go | 151 ++++++++++++------ pkg/store/torrent.go | 131 ++++++++++----- pkg/store/torrent_storage.go | 98 +++--------- pkg/store/types.go | 88 ++++++++++ pkg/web/api.go | 9 +- pkg/web/templates/config.html | 20 +-- pkg/web/templates/download.html | 31 ++-- pkg/webdav/file.go | 21 +-- 28 files changed, 428 insertions(+), 355 deletions(-) create mode 100644 pkg/debrid/providers/realdebrid/misc.go create mode 100644 pkg/store/types.go diff --git a/docs/docs/configuration/index.md b/docs/docs/configuration/index.md index 77b156b..3215985 100644 --- a/docs/docs/configuration/index.md +++ b/docs/docs/configuration/index.md @@ -23,8 +23,7 @@ Here's a minimal configuration to get started: }, "repair": { "enabled": false, - "interval": "12h", - "run_on_start": false + "interval": "12h" }, "use_auth": false, "log_level": "info" diff --git a/docs/docs/features/repair-worker.md b/docs/docs/features/repair-worker.md index 3d62400..a291822 100644 --- a/docs/docs/features/repair-worker.md +++ b/docs/docs/features/repair-worker.md @@ -19,7 +19,6 @@ To enable and configure the Repair Worker, add the following to your `config.jso "repair": { "enabled": true, "interval": "12h", - "run_on_start": false, "use_webdav": false, "zurg_url": "http://localhost:9999", "auto_process": true @@ -30,7 +29,6 @@ To enable and configure the Repair Worker, add the following to your `config.jso - `enabled`: Set to `true` to enable the Repair Worker. - `interval`: The time interval for the Repair Worker to run (e.g., `12h`, `1d`). -- `run_on_start`: If set to `true`, the Repair Worker will run immediately after Decypharr starts. - `use_webdav`: If set to `true`, the Repair Worker will use WebDAV for file operations. - `zurg_url`: The URL for the Zurg service (if using). - `auto_process`: If set to `true`, the Repair Worker will automatically process files that it finds issues with. diff --git a/internal/config/config.go b/internal/config/config.go index d84401d..329531e 100644 --- a/internal/config/config.go +++ b/internal/config/config.go @@ -53,12 +53,12 @@ type Arr struct { Cleanup bool `json:"cleanup,omitempty"` SkipRepair bool `json:"skip_repair,omitempty"` DownloadUncached *bool `json:"download_uncached,omitempty"` + SelectedDebrid string `json:"selected_debrid,omitempty"` } type Repair struct { Enabled bool `json:"enabled,omitempty"` Interval string `json:"interval,omitempty"` - RunOnStart bool `json:"run_on_start,omitempty"` ZurgURL string `json:"zurg_url,omitempty"` AutoProcess bool `json:"auto_process,omitempty"` UseWebDav bool `json:"use_webdav,omitempty"` diff --git a/pkg/arr/arr.go b/pkg/arr/arr.go index 4b90efd..c9ee2b2 100644 --- a/pkg/arr/arr.go +++ b/pkg/arr/arr.go @@ -34,10 +34,11 @@ type Arr struct { Cleanup bool `json:"cleanup"` SkipRepair bool `json:"skip_repair"` DownloadUncached *bool `json:"download_uncached"` + SelectedDebrid string `json:"selected_debrid,omitempty"` // The debrid service selected for this arr client *request.Client } -func New(name, host, token string, cleanup, skipRepair bool, downloadUncached *bool) *Arr { +func New(name, host, token string, cleanup, skipRepair bool, downloadUncached *bool, selectedDebrid string) *Arr { return &Arr{ Name: name, Host: host, @@ -47,6 +48,7 @@ func New(name, host, token string, cleanup, skipRepair bool, downloadUncached *b SkipRepair: skipRepair, DownloadUncached: downloadUncached, client: request.New(), + SelectedDebrid: selectedDebrid, } } @@ -145,7 +147,7 @@ func NewStorage() *Storage { arrs := make(map[string]*Arr) for _, a := range config.Get().Arrs { name := a.Name - arrs[name] = New(name, a.Host, a.Token, a.Cleanup, a.SkipRepair, a.DownloadUncached) + arrs[name] = New(name, a.Host, a.Token, a.Cleanup, a.SkipRepair, a.DownloadUncached, a.SelectedDebrid) } return &Storage{ Arrs: arrs, diff --git a/pkg/debrid/debrid.go b/pkg/debrid/debrid.go index 98d3ae5..e7762ae 100644 --- a/pkg/debrid/debrid.go +++ b/pkg/debrid/debrid.go @@ -158,7 +158,7 @@ func createDebridClient(dc config.Debrid) (types.Client, error) { } } -func Process(ctx context.Context, store *Storage, selectedDebrid string, magnet *utils.Magnet, a *arr.Arr, isSymlink, overrideDownloadUncached bool) (*types.Torrent, error) { +func Process(ctx context.Context, store *Storage, selectedDebrid string, magnet *utils.Magnet, a *arr.Arr, action string, overrideDownloadUncached bool) (*types.Torrent, error) { debridTorrent := &types.Torrent{ InfoHash: magnet.InfoHash, @@ -200,6 +200,7 @@ func Process(ctx context.Context, store *Storage, selectedDebrid string, magnet Str("Arr", a.Name). Str("Hash", debridTorrent.InfoHash). Str("Name", debridTorrent.Name). + Str("Action", action). Msg("Processing torrent") if !overrideDownloadUncached && a.DownloadUncached == nil { @@ -215,7 +216,7 @@ func Process(ctx context.Context, store *Storage, selectedDebrid string, magnet _logger.Info().Str("id", dbt.Id).Msgf("Torrent: %s submitted to %s", dbt.Name, db.Name()) store.lastUsed = index - torrent, err := db.CheckStatus(dbt, isSymlink) + torrent, err := db.CheckStatus(dbt) if err != nil && torrent != nil && torrent.Id != "" { // Delete the torrent if it was not downloaded go func(id string) { diff --git a/pkg/debrid/providers/alldebrid/alldebrid.go b/pkg/debrid/providers/alldebrid/alldebrid.go index e635992..c41748f 100644 --- a/pkg/debrid/providers/alldebrid/alldebrid.go +++ b/pkg/debrid/providers/alldebrid/alldebrid.go @@ -259,7 +259,7 @@ func (ad *AllDebrid) UpdateTorrent(t *types.Torrent) error { return nil } -func (ad *AllDebrid) CheckStatus(torrent *types.Torrent, isSymlink bool) (*types.Torrent, error) { +func (ad *AllDebrid) CheckStatus(torrent *types.Torrent) (*types.Torrent, error) { for { err := ad.UpdateTorrent(torrent) @@ -269,13 +269,7 @@ func (ad *AllDebrid) CheckStatus(torrent *types.Torrent, isSymlink bool) (*types status := torrent.Status if status == "downloaded" { ad.logger.Info().Msgf("Torrent: %s downloaded", torrent.Name) - if !isSymlink { - - if err = ad.GetFileDownloadLinks(torrent); err != nil { - return torrent, err - } - } - break + return torrent, nil } else if utils.Contains(ad.GetDownloadingStatus(), status) { if !torrent.DownloadUncached { return torrent, fmt.Errorf("torrent: %s not cached", torrent.Name) @@ -288,7 +282,6 @@ func (ad *AllDebrid) CheckStatus(torrent *types.Torrent, isSymlink bool) (*types } } - return torrent, nil } func (ad *AllDebrid) DeleteTorrent(torrentId string) error { diff --git a/pkg/debrid/providers/debrid_link/debrid_link.go b/pkg/debrid/providers/debrid_link/debrid_link.go index a109495..98df6d1 100644 --- a/pkg/debrid/providers/debrid_link/debrid_link.go +++ b/pkg/debrid/providers/debrid_link/debrid_link.go @@ -316,7 +316,7 @@ func (dl *DebridLink) SubmitMagnet(t *types.Torrent) (*types.Torrent, error) { return t, nil } -func (dl *DebridLink) CheckStatus(torrent *types.Torrent, isSymlink bool) (*types.Torrent, error) { +func (dl *DebridLink) CheckStatus(torrent *types.Torrent) (*types.Torrent, error) { for { err := dl.UpdateTorrent(torrent) if err != nil || torrent == nil { @@ -325,11 +325,7 @@ func (dl *DebridLink) CheckStatus(torrent *types.Torrent, isSymlink bool) (*type status := torrent.Status if status == "downloaded" { dl.logger.Info().Msgf("Torrent: %s downloaded", torrent.Name) - - if err = dl.GetFileDownloadLinks(torrent); err != nil { - return torrent, err - } - break + return torrent, nil } else if utils.Contains(dl.GetDownloadingStatus(), status) { if !torrent.DownloadUncached { return torrent, fmt.Errorf("torrent: %s not cached", torrent.Name) @@ -342,7 +338,6 @@ func (dl *DebridLink) CheckStatus(torrent *types.Torrent, isSymlink bool) (*type } } - return torrent, nil } func (dl *DebridLink) DeleteTorrent(torrentId string) error { diff --git a/pkg/debrid/providers/realdebrid/misc.go b/pkg/debrid/providers/realdebrid/misc.go new file mode 100644 index 0000000..c127ea7 --- /dev/null +++ b/pkg/debrid/providers/realdebrid/misc.go @@ -0,0 +1 @@ +package realdebrid diff --git a/pkg/debrid/providers/realdebrid/realdebrid.go b/pkg/debrid/providers/realdebrid/realdebrid.go index ff102fa..28354c4 100644 --- a/pkg/debrid/providers/realdebrid/realdebrid.go +++ b/pkg/debrid/providers/realdebrid/realdebrid.go @@ -468,7 +468,7 @@ func (r *RealDebrid) UpdateTorrent(t *types.Torrent) error { return nil } -func (r *RealDebrid) CheckStatus(t *types.Torrent, isSymlink bool) (*types.Torrent, error) { +func (r *RealDebrid) CheckStatus(t *types.Torrent) (*types.Torrent, error) { url := fmt.Sprintf("%s/torrents/info/%s", r.Host, t.Id) req, _ := http.NewRequest(http.MethodGet, url, nil) for { @@ -525,12 +525,7 @@ func (r *RealDebrid) CheckStatus(t *types.Torrent, isSymlink bool) (*types.Torre } r.logger.Info().Msgf("Torrent: %s downloaded to RD", t.Name) - if !isSymlink { - if err = r.GetFileDownloadLinks(t); err != nil { - return t, err - } - } - break + return t, nil } else if utils.Contains(r.GetDownloadingStatus(), status) { if !t.DownloadUncached { return t, fmt.Errorf("torrent: %s not cached", t.Name) @@ -541,7 +536,6 @@ func (r *RealDebrid) CheckStatus(t *types.Torrent, isSymlink bool) (*types.Torre } } - return t, nil } func (r *RealDebrid) DeleteTorrent(torrentId string) error { @@ -555,63 +549,55 @@ func (r *RealDebrid) DeleteTorrent(torrentId string) error { } func (r *RealDebrid) GetFileDownloadLinks(t *types.Torrent) error { - filesCh := make(chan types.File, len(t.Files)) - errCh := make(chan error, len(t.Files)) - linksCh := make(chan *types.DownloadLink) - var wg sync.WaitGroup - wg.Add(len(t.Files)) - for _, f := range t.Files { + var mu sync.Mutex + var firstErr error + + files := make(map[string]types.File) + links := make(map[string]*types.DownloadLink) + + _files := t.GetFiles() + wg.Add(len(_files)) + + for _, f := range _files { go func(file types.File) { defer wg.Done() link, err := r.GetDownloadLink(t, &file) if err != nil { - errCh <- err + mu.Lock() + if firstErr == nil { + firstErr = err + } + mu.Unlock() return } if link == nil { - errCh <- fmt.Errorf("realdebrid API error: download link not found for file %s", file.Name) + mu.Lock() + if firstErr == nil { + firstErr = fmt.Errorf("realdebrid API error: download link not found for file %s", file.Name) + } + mu.Unlock() return } - linksCh <- link + file.DownloadLink = link - filesCh <- file + + mu.Lock() + files[file.Name] = file + links[link.Link] = link + mu.Unlock() }(f) } - go func() { - wg.Wait() - close(filesCh) - close(linksCh) - close(errCh) - }() + wg.Wait() - // Collect results - files := make(map[string]types.File, len(t.Files)) - for file := range filesCh { - files[file.Name] = file - } - - // Collect download links - links := make(map[string]*types.DownloadLink) - for link := range linksCh { - if link == nil { - continue - } - links[link.Link] = link + if firstErr != nil { + return firstErr } // Add links to cache r.accounts.SetDownloadLinks(links) - - // Check for errors - for err := range errCh { - if err != nil { - return err // Return the first error encountered - } - } - t.Files = files return nil } diff --git a/pkg/debrid/providers/torbox/torbox.go b/pkg/debrid/providers/torbox/torbox.go index 8d7e2ef..7d346a1 100644 --- a/pkg/debrid/providers/torbox/torbox.go +++ b/pkg/debrid/providers/torbox/torbox.go @@ -312,7 +312,7 @@ func (tb *Torbox) UpdateTorrent(t *types.Torrent) error { return nil } -func (tb *Torbox) CheckStatus(torrent *types.Torrent, isSymlink bool) (*types.Torrent, error) { +func (tb *Torbox) CheckStatus(torrent *types.Torrent) (*types.Torrent, error) { for { err := tb.UpdateTorrent(torrent) @@ -322,12 +322,7 @@ func (tb *Torbox) CheckStatus(torrent *types.Torrent, isSymlink bool) (*types.To status := torrent.Status if status == "downloaded" { tb.logger.Info().Msgf("Torrent: %s downloaded", torrent.Name) - if !isSymlink { - if err = tb.GetFileDownloadLinks(torrent); err != nil { - return torrent, err - } - } - break + return torrent, nil } else if utils.Contains(tb.GetDownloadingStatus(), status) { if !torrent.DownloadUncached { return torrent, fmt.Errorf("torrent: %s not cached", torrent.Name) @@ -340,7 +335,6 @@ func (tb *Torbox) CheckStatus(torrent *types.Torrent, isSymlink bool) (*types.To } } - return torrent, nil } func (tb *Torbox) DeleteTorrent(torrentId string) error { diff --git a/pkg/debrid/store/download_link.go b/pkg/debrid/store/download_link.go index 19c57d6..0f73891 100644 --- a/pkg/debrid/store/download_link.go +++ b/pkg/debrid/store/download_link.go @@ -34,8 +34,6 @@ func (c *Cache) GetDownloadLink(torrentName, filename, fileLink string) (string, // Check link cache if dl, err := c.checkDownloadLink(fileLink); dl != "" && err == nil { return dl, nil - } else { - c.logger.Trace().Msgf("Download link check failed: %v", err) } if req, inFlight := c.downloadLinkRequests.Load(fileLink); inFlight { @@ -54,6 +52,13 @@ func (c *Cache) GetDownloadLink(torrentName, filename, fileLink string) (string, c.downloadLinkRequests.Delete(fileLink) return "", err } + + if dl == nil || dl.DownloadLink == "" { + err = fmt.Errorf("download link is empty for %s in torrent %s", filename, torrentName) + req.Complete("", err) + c.downloadLinkRequests.Delete(fileLink) + return "", err + } req.Complete(dl.DownloadLink, err) c.downloadLinkRequests.Delete(fileLink) return dl.DownloadLink, err diff --git a/pkg/debrid/store/repair.go b/pkg/debrid/store/repair.go index 201109e..53cd0ab 100644 --- a/pkg/debrid/store/repair.go +++ b/pkg/debrid/store/repair.go @@ -90,18 +90,25 @@ func (c *Cache) GetBrokenFiles(t *CachedTorrent, filenames []string) []string { files = t.Files + var wg sync.WaitGroup + + wg.Add(len(files)) + for _, f := range files { // Check if file link is still missing - if f.Link == "" { - brokenFiles = append(brokenFiles, f.Name) - } else { - // Check if file.Link not in the downloadLink Cache - if err := c.client.CheckLink(f.Link); err != nil { - if errors.Is(err, utils.HosterUnavailableError) { - brokenFiles = append(brokenFiles, f.Name) + go func(f types.File) { + defer wg.Done() + if f.Link == "" { + brokenFiles = append(brokenFiles, f.Name) + } else { + // Check if file.Link not in the downloadLink Cache + if err := c.client.CheckLink(f.Link); err != nil { + if errors.Is(err, utils.HosterUnavailableError) { + brokenFiles = append(brokenFiles, f.Name) + } } } - } + }(f) } // Try to reinsert the torrent if it's broken @@ -202,7 +209,7 @@ func (c *Cache) reInsertTorrent(ct *CachedTorrent) (*CachedTorrent, error) { return ct, fmt.Errorf("failed to submit magnet: empty torrent") } newTorrent.DownloadUncached = false // Set to false, avoid re-downloading - newTorrent, err = c.client.CheckStatus(newTorrent, true) + newTorrent, err = c.client.CheckStatus(newTorrent) if err != nil { if newTorrent != nil && newTorrent.Id != "" { // Delete the torrent if it was not downloaded diff --git a/pkg/debrid/types/client.go b/pkg/debrid/types/client.go index 8dfef25..fdcc2cd 100644 --- a/pkg/debrid/types/client.go +++ b/pkg/debrid/types/client.go @@ -6,7 +6,7 @@ import ( type Client interface { SubmitMagnet(tr *Torrent) (*Torrent, error) - CheckStatus(tr *Torrent, isSymlink bool) (*Torrent, error) + CheckStatus(tr *Torrent) (*Torrent, error) GetFileDownloadLinks(tr *Torrent) error GetDownloadLink(tr *Torrent, file *File) (*DownloadLink, error) DeleteTorrent(torrentId string) error diff --git a/pkg/qbit/context.go b/pkg/qbit/context.go index e6b941a..e68f6a2 100644 --- a/pkg/qbit/context.go +++ b/pkg/qbit/context.go @@ -87,7 +87,7 @@ func (q *QBit) authContext(next http.Handler) http.Handler { a := arrs.Get(category) if a == nil { downloadUncached := false - a = arr.New(category, "", "", false, false, &downloadUncached) + a = arr.New(category, "", "", false, false, &downloadUncached, "") } if err == nil { host = strings.TrimSpace(host) diff --git a/pkg/qbit/http.go b/pkg/qbit/http.go index f87ea58..a73468a 100644 --- a/pkg/qbit/http.go +++ b/pkg/qbit/http.go @@ -88,12 +88,15 @@ func (q *QBit) handleTorrentsAdd(w http.ResponseWriter, r *http.Request) { return } - isSymlink := strings.ToLower(r.FormValue("sequentialDownload")) != "true" + action := "symlink" + if strings.ToLower(r.FormValue("sequentialDownload")) != "true" { + action = "download" + } debridName := r.FormValue("debrid") category := r.FormValue("category") _arr := getArr(ctx) if _arr == nil { - _arr = arr.New(category, "", "", false, false, nil) + _arr = arr.New(category, "", "", false, false, nil, "") } atleastOne := false @@ -104,7 +107,7 @@ func (q *QBit) handleTorrentsAdd(w http.ResponseWriter, r *http.Request) { urlList = append(urlList, strings.TrimSpace(u)) } for _, url := range urlList { - if err := q.addMagnet(ctx, url, _arr, debridName, isSymlink); err != nil { + if err := q.addMagnet(ctx, url, _arr, debridName, action); err != nil { q.logger.Error().Err(err).Msgf("Error adding magnet") http.Error(w, err.Error(), http.StatusBadRequest) return @@ -117,7 +120,7 @@ func (q *QBit) handleTorrentsAdd(w http.ResponseWriter, r *http.Request) { if r.MultipartForm != nil && r.MultipartForm.File != nil { if files := r.MultipartForm.File["torrents"]; len(files) > 0 { for _, fileHeader := range files { - if err := q.addTorrent(ctx, fileHeader, _arr, debridName, isSymlink); err != nil { + if err := q.addTorrent(ctx, fileHeader, _arr, debridName, action); err != nil { q.logger.Error().Err(err).Msgf("Error adding torrent") http.Error(w, err.Error(), http.StatusBadRequest) return @@ -241,8 +244,7 @@ func (q *QBit) handleTorrentFiles(w http.ResponseWriter, r *http.Request) { if torrent == nil { return } - files := q.getTorrentFiles(torrent) - request.JSONResponse(w, files, http.StatusOK) + request.JSONResponse(w, torrent.Files, http.StatusOK) } func (q *QBit) handleSetCategory(w http.ResponseWriter, r *http.Request) { diff --git a/pkg/qbit/torrent.go b/pkg/qbit/torrent.go index c3f37de..fb40586 100644 --- a/pkg/qbit/torrent.go +++ b/pkg/qbit/torrent.go @@ -13,14 +13,14 @@ import ( ) // All torrent-related helpers goes here -func (q *QBit) addMagnet(ctx context.Context, url string, arr *arr.Arr, debrid string, isSymlink bool) error { +func (q *QBit) addMagnet(ctx context.Context, url string, arr *arr.Arr, debrid string, action string) error { magnet, err := utils.GetMagnetFromUrl(url) if err != nil { return fmt.Errorf("error parsing magnet link: %w", err) } _store := store.Get() - importReq := store.NewImportRequest(debrid, q.DownloadFolder, magnet, arr, isSymlink, false, "", store.ImportTypeQBitTorrent) + importReq := store.NewImportRequest(debrid, q.DownloadFolder, magnet, arr, action, false, "", store.ImportTypeQBitTorrent) err = _store.AddTorrent(ctx, importReq) if err != nil { @@ -29,7 +29,7 @@ func (q *QBit) addMagnet(ctx context.Context, url string, arr *arr.Arr, debrid s return nil } -func (q *QBit) addTorrent(ctx context.Context, fileHeader *multipart.FileHeader, arr *arr.Arr, debrid string, isSymlink bool) error { +func (q *QBit) addTorrent(ctx context.Context, fileHeader *multipart.FileHeader, arr *arr.Arr, debrid string, action string) error { file, _ := fileHeader.Open() defer file.Close() var reader io.Reader = file @@ -38,7 +38,7 @@ func (q *QBit) addTorrent(ctx context.Context, fileHeader *multipart.FileHeader, return fmt.Errorf("error reading file: %s \n %w", fileHeader.Filename, err) } _store := store.Get() - importReq := store.NewImportRequest(debrid, q.DownloadFolder, magnet, arr, isSymlink, false, "", store.ImportTypeQBitTorrent) + importReq := store.NewImportRequest(debrid, q.DownloadFolder, magnet, arr, action, false, "", store.ImportTypeQBitTorrent) err = _store.AddTorrent(ctx, importReq) if err != nil { return fmt.Errorf("failed to process torrent: %w", err) @@ -83,20 +83,6 @@ func (q *QBit) GetTorrentProperties(t *store.Torrent) *TorrentProperties { } } -func (q *QBit) getTorrentFiles(t *store.Torrent) []*TorrentFile { - files := make([]*TorrentFile, 0) - if t.DebridTorrent == nil { - return files - } - for _, file := range t.DebridTorrent.GetFiles() { - files = append(files, &TorrentFile{ - Name: file.Path, - Size: file.Size, - }) - } - return files -} - func (q *QBit) setTorrentTags(t *store.Torrent, tags []string) bool { torrentTags := strings.Split(t.Tags, ",") for _, tag := range tags { diff --git a/pkg/qbit/types.go b/pkg/qbit/types.go index 719e75a..8e95508 100644 --- a/pkg/qbit/types.go +++ b/pkg/qbit/types.go @@ -202,17 +202,6 @@ type TorrentProperties struct { UpSpeedAvg int `json:"up_speed_avg,omitempty"` } -type TorrentFile struct { - Index int `json:"index,omitempty"` - Name string `json:"name,omitempty"` - Size int64 `json:"size,omitempty"` - Progress int `json:"progress,omitempty"` - Priority int `json:"priority,omitempty"` - IsSeed bool `json:"is_seed,omitempty"` - PieceRange []int `json:"piece_range,omitempty"` - Availability float64 `json:"availability,omitempty"` -} - func getAppPreferences() *AppPreferences { preferences := &AppPreferences{ AddTrackers: "", diff --git a/pkg/repair/repair.go b/pkg/repair/repair.go index 9a6daf8..fa5d98f 100644 --- a/pkg/repair/repair.go +++ b/pkg/repair/repair.go @@ -32,7 +32,6 @@ type Repair struct { arrs *arr.Storage deb *debrid.Storage interval string - runOnStart bool ZurgURL string IsZurg bool useWebdav bool @@ -86,7 +85,6 @@ func New(arrs *arr.Storage, engine *debrid.Storage) *Repair { arrs: arrs, logger: logger.New("repair"), interval: cfg.Repair.Interval, - runOnStart: cfg.Repair.RunOnStart, ZurgURL: cfg.Repair.ZurgURL, useWebdav: cfg.Repair.UseWebDav, autoProcess: cfg.Repair.AutoProcess, @@ -121,15 +119,6 @@ func (r *Repair) Reset() { } func (r *Repair) Start(ctx context.Context) error { - //r.ctx = ctx - if r.runOnStart { - r.logger.Info().Msgf("Running initial repair") - go func() { - if err := r.AddJob([]string{}, []string{}, r.autoProcess, true); err != nil { - r.logger.Error().Err(err).Msg("Error running initial repair") - } - }() - } r.scheduler, _ = gocron.NewScheduler(gocron.WithLocation(time.Local)) diff --git a/pkg/store/downloader.go b/pkg/store/downloader.go index dc2244c..8510863 100644 --- a/pkg/store/downloader.go +++ b/pkg/store/downloader.go @@ -56,8 +56,7 @@ Loop: return resp.Err() } -func (s *Store) processDownload(torrent *Torrent) (string, error) { - debridTorrent := torrent.DebridTorrent +func (s *Store) processDownload(torrent *Torrent, debridTorrent *types.Torrent) (string, error) { s.logger.Info().Msgf("Downloading %d files...", len(debridTorrent.Files)) torrentPath := filepath.Join(torrent.SavePath, utils.RemoveExtension(debridTorrent.OriginalFilename)) torrentPath = utils.RemoveInvalidChars(torrentPath) @@ -66,12 +65,11 @@ func (s *Store) processDownload(torrent *Torrent) (string, error) { // add the previous error to the error and return return "", fmt.Errorf("failed to create directory: %s: %v", torrentPath, err) } - s.downloadFiles(torrent, torrentPath) + s.downloadFiles(torrent, debridTorrent, torrentPath) return torrentPath, nil } -func (s *Store) downloadFiles(torrent *Torrent, parent string) { - debridTorrent := torrent.DebridTorrent +func (s *Store) downloadFiles(torrent *Torrent, debridTorrent *types.Torrent, parent string) { var wg sync.WaitGroup totalSize := int64(0) @@ -151,8 +149,7 @@ func (s *Store) downloadFiles(torrent *Torrent, parent string) { s.logger.Info().Msgf("Downloaded all files for %s", debridTorrent.Name) } -func (s *Store) processSymlink(torrent *Torrent) (string, error) { - debridTorrent := torrent.DebridTorrent +func (s *Store) processSymlink(torrent *Torrent, debridTorrent *types.Torrent) (string, error) { files := debridTorrent.Files if len(files) == 0 { return "", fmt.Errorf("no video files found") diff --git a/pkg/store/misc.go b/pkg/store/misc.go index 630e269..892815f 100644 --- a/pkg/store/misc.go +++ b/pkg/store/misc.go @@ -10,7 +10,7 @@ func createTorrentFromMagnet(req *ImportRequest) *Torrent { magnet := req.Magnet arrName := req.Arr.Name torrent := &Torrent{ - ID: "", + ID: req.Id, Hash: strings.ToLower(magnet.InfoHash), Name: magnet.Name, Size: magnet.Size, diff --git a/pkg/store/request.go b/pkg/store/request.go index 86f0bca..dc1b38f 100644 --- a/pkg/store/request.go +++ b/pkg/store/request.go @@ -2,9 +2,11 @@ package store import ( "bytes" + "cmp" "context" "encoding/json" "fmt" + "github.com/google/uuid" "github.com/sirrobot01/decypharr/internal/request" "github.com/sirrobot01/decypharr/internal/utils" "github.com/sirrobot01/decypharr/pkg/arr" @@ -23,11 +25,12 @@ const ( ) type ImportRequest struct { + Id string `json:"id"` DownloadFolder string `json:"downloadFolder"` SelectedDebrid string `json:"debrid"` Magnet *utils.Magnet `json:"magnet"` Arr *arr.Arr `json:"arr"` - IsSymlink bool `json:"isSymlink"` + Action string `json:"action"` DownloadUncached bool `json:"downloadUncached"` CallBackUrl string `json:"callBackUrl"` @@ -39,14 +42,15 @@ type ImportRequest struct { Async bool `json:"async"` } -func NewImportRequest(debrid string, downloadFolder string, magnet *utils.Magnet, arr *arr.Arr, isSymlink, downloadUncached bool, callBackUrl string, importType ImportType) *ImportRequest { +func NewImportRequest(debrid string, downloadFolder string, magnet *utils.Magnet, arr *arr.Arr, action string, downloadUncached bool, callBackUrl string, importType ImportType) *ImportRequest { return &ImportRequest{ + Id: uuid.New().String(), Status: "started", DownloadFolder: downloadFolder, - SelectedDebrid: debrid, + SelectedDebrid: cmp.Or(arr.SelectedDebrid, debrid), // Use debrid from arr if available Magnet: magnet, Arr: arr, - IsSymlink: isSymlink, + Action: action, DownloadUncached: downloadUncached, CallBackUrl: callBackUrl, Type: importType, @@ -106,21 +110,22 @@ func (i *ImportRequest) markAsCompleted(torrent *Torrent, debridTorrent *debridT } type ImportQueue struct { - queue map[string]chan *ImportRequest // Map to hold queues for different debrid services - mu sync.RWMutex // Mutex to protect access to the queue map - ctx context.Context - cancel context.CancelFunc - capacity int // Capacity of each channel in the queue + queue []*ImportRequest + mu sync.RWMutex + ctx context.Context + cancel context.CancelFunc + cond *sync.Cond // For blocking operations } func NewImportQueue(ctx context.Context, capacity int) *ImportQueue { ctx, cancel := context.WithCancel(ctx) - return &ImportQueue{ - queue: make(map[string]chan *ImportRequest), - ctx: ctx, - cancel: cancel, - capacity: capacity, + iq := &ImportQueue{ + queue: make([]*ImportRequest, 0, capacity), + ctx: ctx, + cancel: cancel, } + iq.cond = sync.NewCond(&iq.mu) + return iq } func (iq *ImportQueue) Push(req *ImportRequest) error { @@ -131,62 +136,104 @@ func (iq *ImportQueue) Push(req *ImportRequest) error { iq.mu.Lock() defer iq.mu.Unlock() - if _, exists := iq.queue[req.SelectedDebrid]; !exists { - iq.queue[req.SelectedDebrid] = make(chan *ImportRequest, iq.capacity) // Create a new channel for the debrid service + select { + case <-iq.ctx.Done(): + return fmt.Errorf("queue is shutting down") + default: } + if len(iq.queue) >= cap(iq.queue) { + return fmt.Errorf("queue is full") + } + + iq.queue = append(iq.queue, req) + iq.cond.Signal() // Wake up any waiting Pop() + return nil +} + +func (iq *ImportQueue) Pop() (*ImportRequest, error) { + iq.mu.Lock() + defer iq.mu.Unlock() + select { - case iq.queue[req.SelectedDebrid] <- req: - return nil case <-iq.ctx.Done(): - return fmt.Errorf("retry queue is shutting down") + return nil, fmt.Errorf("queue is shutting down") + default: } + + if len(iq.queue) == 0 { + return nil, fmt.Errorf("no import requests available") + } + + req := iq.queue[0] + iq.queue = iq.queue[1:] + return req, nil } -func (iq *ImportQueue) TryPop(selectedDebrid string) (*ImportRequest, error) { - iq.mu.RLock() - defer iq.mu.RUnlock() +// Delete specific request by ID +func (iq *ImportQueue) Delete(requestID string) bool { + iq.mu.Lock() + defer iq.mu.Unlock() - if ch, exists := iq.queue[selectedDebrid]; exists { - select { - case req := <-ch: - return req, nil - case <-iq.ctx.Done(): - return nil, fmt.Errorf("queue is shutting down") - default: - return nil, fmt.Errorf("no import request available for %s", selectedDebrid) + for i, req := range iq.queue { + if req.Id == requestID { + // Remove from slice + iq.queue = append(iq.queue[:i], iq.queue[i+1:]...) + return true } } - return nil, fmt.Errorf("no queue exists for %s", selectedDebrid) + return false } -func (iq *ImportQueue) Size(selectedDebrid string) int { +// DeleteWhere requests matching a condition +func (iq *ImportQueue) DeleteWhere(predicate func(*ImportRequest) bool) int { + iq.mu.Lock() + defer iq.mu.Unlock() + + deleted := 0 + for i := len(iq.queue) - 1; i >= 0; i-- { + if predicate(iq.queue[i]) { + iq.queue = append(iq.queue[:i], iq.queue[i+1:]...) + deleted++ + } + } + return deleted +} + +// Find request without removing it +func (iq *ImportQueue) Find(requestID string) *ImportRequest { iq.mu.RLock() defer iq.mu.RUnlock() - if ch, exists := iq.queue[selectedDebrid]; exists { - return len(ch) + for _, req := range iq.queue { + if req.Id == requestID { + return req + } } - return 0 + return nil +} + +func (iq *ImportQueue) Size() int { + iq.mu.RLock() + defer iq.mu.RUnlock() + return len(iq.queue) +} + +func (iq *ImportQueue) IsEmpty() bool { + return iq.Size() == 0 +} + +// List all requests (copy to avoid race conditions) +func (iq *ImportQueue) List() []*ImportRequest { + iq.mu.RLock() + defer iq.mu.RUnlock() + + result := make([]*ImportRequest, len(iq.queue)) + copy(result, iq.queue) + return result } func (iq *ImportQueue) Close() { iq.cancel() - iq.mu.Lock() - defer iq.mu.Unlock() - - for _, ch := range iq.queue { - // Drain remaining items before closing - for { - select { - case <-ch: - // Discard remaining items - default: - close(ch) - goto nextChannel - } - } - nextChannel: - } - iq.queue = make(map[string]chan *ImportRequest) + iq.cond.Broadcast() } diff --git a/pkg/store/torrent.go b/pkg/store/torrent.go index 22798b6..aec09e0 100644 --- a/pkg/store/torrent.go +++ b/pkg/store/torrent.go @@ -16,7 +16,7 @@ import ( func (s *Store) AddTorrent(ctx context.Context, importReq *ImportRequest) error { torrent := createTorrentFromMagnet(importReq) - debridTorrent, err := debridTypes.Process(ctx, s.debrid, importReq.SelectedDebrid, importReq.Magnet, importReq.Arr, importReq.IsSymlink, importReq.DownloadUncached) + debridTorrent, err := debridTypes.Process(ctx, s.debrid, importReq.SelectedDebrid, importReq.Magnet, importReq.Arr, importReq.Action, importReq.DownloadUncached) if err != nil { var httpErr *utils.HTTPError @@ -25,8 +25,8 @@ func (s *Store) AddTorrent(ctx context.Context, importReq *ImportRequest) error case "too_many_active_downloads": // Handle too much active downloads error s.logger.Warn().Msgf("Too many active downloads for %s, adding to queue", importReq.Magnet.Name) - err := s.addToQueue(importReq) - if err != nil { + + if err := s.addToQueue(importReq); err != nil { s.logger.Error().Err(err).Msgf("Failed to add %s to queue", importReq.Magnet.Name) return err } @@ -65,9 +65,9 @@ func (s *Store) addToQueue(importReq *ImportRequest) error { return nil } -func (s *Store) processFromQueue(ctx context.Context, selectedDebrid string) error { +func (s *Store) processFromQueue(ctx context.Context) error { // Pop the next import request from the queue - importReq, err := s.importsQueue.TryPop(selectedDebrid) + importReq, err := s.importsQueue.Pop() if err != nil { return err } @@ -105,10 +105,13 @@ func (s *Store) trackAvailableSlots(ctx context.Context) { availableSlots[name] = slots } + if s.importsQueue.Size() <= 0 { + // Queue is empty, no need to process + return + } + for name, slots := range availableSlots { - if s.importsQueue.Size(name) <= 0 { - continue - } + s.logger.Debug().Msgf("Available slots for %s: %d", name, slots) // If slots are available, process the next import request from the queue for slots > 0 { @@ -116,7 +119,7 @@ func (s *Store) trackAvailableSlots(ctx context.Context) { case <-ctx.Done(): return // Exit if context is done default: - if err := s.processFromQueue(ctx, name); err != nil { + if err := s.processFromQueue(ctx); err != nil { s.logger.Error().Err(err).Msg("Error processing from queue") return // Exit on error } @@ -139,7 +142,7 @@ func (s *Store) processFiles(torrent *Torrent, debridTorrent *types.Torrent, imp _arr := importReq.Arr for debridTorrent.Status != "downloaded" { s.logger.Debug().Msgf("%s <- (%s) Download Progress: %.2f%%", debridTorrent.Debrid, debridTorrent.Name, debridTorrent.Progress) - dbT, err := client.CheckStatus(debridTorrent, importReq.IsSymlink) + dbT, err := client.CheckStatus(debridTorrent) if err != nil { if dbT != nil && dbT.Id != "" { // Delete the torrent if it was not downloaded @@ -174,17 +177,43 @@ func (s *Store) processFiles(torrent *Torrent, debridTorrent *types.Torrent, imp // Check if debrid supports webdav by checking cache timer := time.Now() - if importReq.IsSymlink { + + onFailed := func(err error) { + if err != nil { + s.markTorrentAsFailed(torrent) + go func() { + _ = client.DeleteTorrent(debridTorrent.Id) + }() + s.logger.Error().Err(err).Msgf("Error occured while processing torrent %s", debridTorrent.Name) + importReq.markAsFailed(err, torrent, debridTorrent) + return + } + } + + onSuccess := func(torrentSymlinkPath string) { + torrent.TorrentPath = torrentSymlinkPath + s.updateTorrent(torrent, debridTorrent) + s.logger.Info().Msgf("Adding %s took %s", debridTorrent.Name, time.Since(timer)) + + go importReq.markAsCompleted(torrent, debridTorrent) // Mark the import request as completed, send callback if needed + go func() { + if err := request.SendDiscordMessage("download_complete", "success", torrent.discordContext()); err != nil { + s.logger.Error().Msgf("Error sending discord message: %v", err) + } + }() + _arr.Refresh() + } + + switch importReq.Action { + case "symlink": + // Symlink action, we will create a symlink to the torrent + s.logger.Debug().Msgf("Post-Download Action: Symlink") cache := deb.Cache() if cache != nil { s.logger.Info().Msgf("Using internal webdav for %s", debridTorrent.Debrid) - // Use webdav to download the file - if err := cache.Add(debridTorrent); err != nil { - s.logger.Error().Msgf("Error adding torrent to cache: %v", err) - s.markTorrentAsFailed(torrent) - importReq.markAsFailed(err, torrent, debridTorrent) + onFailed(err) return } @@ -194,31 +223,45 @@ func (s *Store) processFiles(torrent *Torrent, debridTorrent *types.Torrent, imp } else { // User is using either zurg or debrid webdav - torrentSymlinkPath, err = s.processSymlink(torrent) // /mnt/symlinks/{category}/MyTVShow/ + torrentSymlinkPath, err = s.processSymlink(torrent, debridTorrent) // /mnt/symlinks/{category}/MyTVShow/ } - } else { - torrentSymlinkPath, err = s.processDownload(torrent) - } - if err != nil { - s.markTorrentAsFailed(torrent) - go func() { - _ = client.DeleteTorrent(debridTorrent.Id) - }() - s.logger.Error().Err(err).Msgf("Error occured while processing torrent %s", debridTorrent.Name) - importReq.markAsFailed(err, torrent, debridTorrent) + if err != nil { + onFailed(err) + return + } + if torrentSymlinkPath == "" { + err = fmt.Errorf("symlink path is empty for %s", debridTorrent.Name) + onFailed(err) + } + onSuccess(torrentSymlinkPath) return - } - torrent.TorrentPath = torrentSymlinkPath - s.updateTorrent(torrent, debridTorrent) - s.logger.Info().Msgf("Adding %s took %s", debridTorrent.Name, time.Since(timer)) - - go importReq.markAsCompleted(torrent, debridTorrent) // Mark the import request as completed, send callback if needed - go func() { - if err := request.SendDiscordMessage("download_complete", "success", torrent.discordContext()); err != nil { - s.logger.Error().Msgf("Error sending discord message: %v", err) + case "download": + // Download action, we will download the torrent to the specified folder + // Generate download links + s.logger.Debug().Msgf("Post-Download Action: Download") + if err := client.GetFileDownloadLinks(debridTorrent); err != nil { + onFailed(err) + return } - }() - _arr.Refresh() + s.logger.Debug().Msgf("Download Post-Download Action") + torrentSymlinkPath, err = s.processDownload(torrent, debridTorrent) + if err != nil { + onFailed(err) + return + } + if torrentSymlinkPath == "" { + err = fmt.Errorf("download path is empty for %s", debridTorrent.Name) + onFailed(err) + return + } + onSuccess(torrentSymlinkPath) + case "none": + s.logger.Debug().Msgf("Post-Download Action: None") + // No action, just update the torrent and mark it as completed + onSuccess(torrent.TorrentPath) + default: + // Action is none, do nothing, fallthrough + } } func (s *Store) markTorrentAsFailed(t *Torrent) *Torrent { @@ -253,10 +296,18 @@ func (s *Store) partialTorrentUpdate(t *Torrent, debridTorrent *types.Torrent) * if speed != 0 { eta = int((totalSize - sizeCompleted) / speed) } - t.ID = debridTorrent.Id + files := make([]*File, 0, len(debridTorrent.Files)) + for index, file := range debridTorrent.GetFiles() { + files = append(files, &File{ + Index: index, + Name: file.Path, + Size: file.Size, + }) + } + t.DebridID = debridTorrent.Id t.Name = debridTorrent.Name t.AddedOn = addedOn.Unix() - t.DebridTorrent = debridTorrent + t.Files = files t.Debrid = debridTorrent.Debrid t.Size = totalSize t.Completed = sizeCompleted diff --git a/pkg/store/torrent_storage.go b/pkg/store/torrent_storage.go index e55dfab..2b36ada 100644 --- a/pkg/store/torrent_storage.go +++ b/pkg/store/torrent_storage.go @@ -3,7 +3,6 @@ package store import ( "encoding/json" "fmt" - "github.com/sirrobot01/decypharr/pkg/debrid/types" "os" "sort" "sync" @@ -183,10 +182,18 @@ func (ts *TorrentStorage) Delete(hash, category string, removeFromDebrid bool) { if torrent == nil { return } - if removeFromDebrid && torrent.ID != "" && torrent.Debrid != "" { - dbClient := Get().debrid.Client(torrent.Debrid) + st := Get() + // Check if torrent is queued for download + + if torrent.State == "queued" && torrent.ID != "" { + // Remove the torrent from the import queue if it exists + st.importsQueue.Delete(torrent.ID) + } + + if removeFromDebrid && torrent.DebridID != "" && torrent.Debrid != "" { + dbClient := st.debrid.Client(torrent.Debrid) if dbClient != nil { - _ = dbClient.DeleteTorrent(torrent.ID) + _ = dbClient.DeleteTorrent(torrent.DebridID) } } @@ -212,14 +219,21 @@ func (ts *TorrentStorage) DeleteMultiple(hashes []string, removeFromDebrid bool) defer ts.mu.Unlock() toDelete := make(map[string]string) + st := Get() + for _, hash := range hashes { for key, torrent := range ts.torrents { if torrent == nil { continue } + + if torrent.State == "queued" && torrent.ID != "" { + // Remove the torrent from the import queue if it exists + st.importsQueue.Delete(torrent.ID) + } if torrent.Hash == hash { - if removeFromDebrid && torrent.ID != "" && torrent.Debrid != "" { - toDelete[torrent.ID] = torrent.Debrid + if removeFromDebrid && torrent.DebridID != "" && torrent.Debrid != "" { + toDelete[torrent.DebridID] = torrent.Debrid } delete(ts.torrents, key) if torrent.ContentPath != "" { @@ -238,7 +252,7 @@ func (ts *TorrentStorage) DeleteMultiple(hashes []string, removeFromDebrid bool) } }() - clients := Get().debrid.Clients() + clients := st.debrid.Clients() go func() { for id, debrid := range toDelete { @@ -274,73 +288,3 @@ func (ts *TorrentStorage) Reset() { defer ts.mu.Unlock() ts.torrents = make(Torrents) } - -type Torrent struct { - ID string `json:"id"` - Debrid string `json:"debrid"` - TorrentPath string `json:"-"` - DebridTorrent *types.Torrent `json:"-"` - - AddedOn int64 `json:"added_on,omitempty"` - AmountLeft int64 `json:"amount_left"` - AutoTmm bool `json:"auto_tmm"` - Availability float64 `json:"availability,omitempty"` - Category string `json:"category,omitempty"` - Completed int64 `json:"completed"` - CompletionOn int `json:"completion_on,omitempty"` - ContentPath string `json:"content_path"` - DlLimit int `json:"dl_limit"` - Dlspeed int64 `json:"dlspeed"` - Downloaded int64 `json:"downloaded"` - DownloadedSession int64 `json:"downloaded_session"` - Eta int `json:"eta"` - FlPiecePrio bool `json:"f_l_piece_prio,omitempty"` - ForceStart bool `json:"force_start,omitempty"` - Hash string `json:"hash"` - LastActivity int64 `json:"last_activity,omitempty"` - MagnetUri string `json:"magnet_uri,omitempty"` - MaxRatio int `json:"max_ratio,omitempty"` - MaxSeedingTime int `json:"max_seeding_time,omitempty"` - Name string `json:"name,omitempty"` - NumComplete int `json:"num_complete,omitempty"` - NumIncomplete int `json:"num_incomplete,omitempty"` - NumLeechs int `json:"num_leechs,omitempty"` - NumSeeds int `json:"num_seeds,omitempty"` - Priority int `json:"priority,omitempty"` - Progress float64 `json:"progress"` - Ratio int `json:"ratio,omitempty"` - RatioLimit int `json:"ratio_limit,omitempty"` - SavePath string `json:"save_path"` - SeedingTimeLimit int `json:"seeding_time_limit,omitempty"` - SeenComplete int64 `json:"seen_complete,omitempty"` - SeqDl bool `json:"seq_dl"` - Size int64 `json:"size,omitempty"` - State string `json:"state,omitempty"` - SuperSeeding bool `json:"super_seeding"` - Tags string `json:"tags,omitempty"` - TimeActive int `json:"time_active,omitempty"` - TotalSize int64 `json:"total_size,omitempty"` - Tracker string `json:"tracker,omitempty"` - UpLimit int64 `json:"up_limit,omitempty"` - Uploaded int64 `json:"uploaded,omitempty"` - UploadedSession int64 `json:"uploaded_session,omitempty"` - Upspeed int64 `json:"upspeed,omitempty"` - Source string `json:"source,omitempty"` - - sync.Mutex -} - -func (t *Torrent) IsReady() bool { - return (t.AmountLeft <= 0 || t.Progress == 1) && t.TorrentPath != "" -} - -func (t *Torrent) discordContext() string { - format := ` - **Name:** %s - **Arr:** %s - **Hash:** %s - **MagnetURI:** %s - **Debrid:** %s - ` - return fmt.Sprintf(format, t.Name, t.Category, t.Hash, t.MagnetUri, t.Debrid) -} diff --git a/pkg/store/types.go b/pkg/store/types.go new file mode 100644 index 0000000..ba9200f --- /dev/null +++ b/pkg/store/types.go @@ -0,0 +1,88 @@ +package store + +import ( + "fmt" + "sync" +) + +type File struct { + Index int `json:"index,omitempty"` + Name string `json:"name,omitempty"` + Size int64 `json:"size,omitempty"` + Progress int `json:"progress,omitempty"` + Priority int `json:"priority,omitempty"` + IsSeed bool `json:"is_seed,omitempty"` + PieceRange []int `json:"piece_range,omitempty"` + Availability float64 `json:"availability,omitempty"` +} + +type Torrent struct { + ID string `json:"id"` + DebridID string `json:"debrid_id"` + Debrid string `json:"debrid"` + TorrentPath string `json:"-"` + Files []*File `json:"files,omitempty"` + + AddedOn int64 `json:"added_on,omitempty"` + AmountLeft int64 `json:"amount_left"` + AutoTmm bool `json:"auto_tmm"` + Availability float64 `json:"availability,omitempty"` + Category string `json:"category,omitempty"` + Completed int64 `json:"completed"` + CompletionOn int `json:"completion_on,omitempty"` + ContentPath string `json:"content_path"` + DlLimit int `json:"dl_limit"` + Dlspeed int64 `json:"dlspeed"` + Downloaded int64 `json:"downloaded"` + DownloadedSession int64 `json:"downloaded_session"` + Eta int `json:"eta"` + FlPiecePrio bool `json:"f_l_piece_prio,omitempty"` + ForceStart bool `json:"force_start,omitempty"` + Hash string `json:"hash"` + LastActivity int64 `json:"last_activity,omitempty"` + MagnetUri string `json:"magnet_uri,omitempty"` + MaxRatio int `json:"max_ratio,omitempty"` + MaxSeedingTime int `json:"max_seeding_time,omitempty"` + Name string `json:"name,omitempty"` + NumComplete int `json:"num_complete,omitempty"` + NumIncomplete int `json:"num_incomplete,omitempty"` + NumLeechs int `json:"num_leechs,omitempty"` + NumSeeds int `json:"num_seeds,omitempty"` + Priority int `json:"priority,omitempty"` + Progress float64 `json:"progress"` + Ratio int `json:"ratio,omitempty"` + RatioLimit int `json:"ratio_limit,omitempty"` + SavePath string `json:"save_path"` + SeedingTimeLimit int `json:"seeding_time_limit,omitempty"` + SeenComplete int64 `json:"seen_complete,omitempty"` + SeqDl bool `json:"seq_dl"` + Size int64 `json:"size,omitempty"` + State string `json:"state,omitempty"` + SuperSeeding bool `json:"super_seeding"` + Tags string `json:"tags,omitempty"` + TimeActive int `json:"time_active,omitempty"` + TotalSize int64 `json:"total_size,omitempty"` + Tracker string `json:"tracker,omitempty"` + UpLimit int64 `json:"up_limit,omitempty"` + Uploaded int64 `json:"uploaded,omitempty"` + UploadedSession int64 `json:"uploaded_session,omitempty"` + Upspeed int64 `json:"upspeed,omitempty"` + Source string `json:"source,omitempty"` + + sync.Mutex +} + +func (t *Torrent) IsReady() bool { + return (t.AmountLeft <= 0 || t.Progress == 1) && t.TorrentPath != "" +} + +func (t *Torrent) discordContext() string { + format := ` + **Name:** %s + **Arr:** %s + **Hash:** %s + **MagnetURI:** %s + **Debrid:** %s + ` + return fmt.Sprintf(format, t.Name, t.Category, t.Hash, t.MagnetUri, t.Debrid) +} diff --git a/pkg/web/api.go b/pkg/web/api.go index 9c53e4c..c62adf9 100644 --- a/pkg/web/api.go +++ b/pkg/web/api.go @@ -33,7 +33,7 @@ func (wb *Web) handleAddContent(w http.ResponseWriter, r *http.Request) { errs := make([]string, 0) arrName := r.FormValue("arr") - notSymlink := r.FormValue("notSymlink") == "true" + action := r.FormValue("action") debridName := r.FormValue("debrid") callbackUrl := r.FormValue("callbackUrl") downloadFolder := r.FormValue("downloadFolder") @@ -45,7 +45,7 @@ func (wb *Web) handleAddContent(w http.ResponseWriter, r *http.Request) { _arr := _store.Arr().Get(arrName) if _arr == nil { - _arr = arr.New(arrName, "", "", false, false, &downloadUncached) + _arr = arr.New(arrName, "", "", false, false, &downloadUncached, "") } // Handle URLs @@ -64,7 +64,7 @@ func (wb *Web) handleAddContent(w http.ResponseWriter, r *http.Request) { continue } - importReq := store.NewImportRequest(debridName, downloadFolder, magnet, _arr, !notSymlink, downloadUncached, callbackUrl, store.ImportTypeAPI) + importReq := store.NewImportRequest(debridName, downloadFolder, magnet, _arr, action, downloadUncached, callbackUrl, store.ImportTypeAPI) if err := _store.AddTorrent(ctx, importReq); err != nil { wb.logger.Error().Err(err).Str("url", url).Msg("Failed to add torrent") errs = append(errs, fmt.Sprintf("URL %s: %v", url, err)) @@ -89,7 +89,7 @@ func (wb *Web) handleAddContent(w http.ResponseWriter, r *http.Request) { continue } - importReq := store.NewImportRequest(debridName, downloadFolder, magnet, _arr, !notSymlink, downloadUncached, callbackUrl, store.ImportTypeAPI) + importReq := store.NewImportRequest(debridName, downloadFolder, magnet, _arr, action, downloadUncached, callbackUrl, store.ImportTypeAPI) err = _store.AddTorrent(ctx, importReq) if err != nil { wb.logger.Error().Err(err).Str("file", fileHeader.Filename).Msg("Failed to add torrent") @@ -251,6 +251,7 @@ func (wb *Web) handleUpdateConfig(w http.ResponseWriter, r *http.Request) { Cleanup: a.Cleanup, SkipRepair: a.SkipRepair, DownloadUncached: a.DownloadUncached, + SelectedDebrid: a.SelectedDebrid, }) } currentConfig.Arrs = updatedConfig.Arrs diff --git a/pkg/web/templates/config.html b/pkg/web/templates/config.html index 9760612..341ea71 100644 --- a/pkg/web/templates/config.html +++ b/pkg/web/templates/config.html @@ -279,13 +279,6 @@
Use Internal Webdav for repair(make sure webdav is enabled in the debrid section
-
-
- - -
- Run repair on startup -
@@ -650,6 +643,15 @@
+
+ +
@@ -1068,7 +1070,6 @@ repair: { enabled: document.querySelector('[name="repair.enabled"]').checked, interval: document.querySelector('[name="repair.interval"]').value, - run_on_start: document.querySelector('[name="repair.run_on_start"]').checked, zurg_url: document.querySelector('[name="repair.zurg_url"]').value, workers: parseInt(document.querySelector('[name="repair.workers"]').value), use_webdav: document.querySelector('[name="repair.use_webdav"]').checked, @@ -1149,7 +1150,8 @@ token: document.querySelector(`[name="arr[${i}].token"]`).value, cleanup: document.querySelector(`[name="arr[${i}].cleanup"]`).checked, skip_repair: document.querySelector(`[name="arr[${i}].skip_repair"]`).checked, - download_uncached: document.querySelector(`[name="arr[${i}].download_uncached"]`).checked + download_uncached: document.querySelector(`[name="arr[${i}].download_uncached"]`).checked, + selectedDebrid: document.querySelector(`[name="arr[${i}].selected_debrid"]`).value }; if (arr.name && arr.host) { diff --git a/pkg/web/templates/download.html b/pkg/web/templates/download.html index bf3ab58..74e2d14 100644 --- a/pkg/web/templates/download.html +++ b/pkg/web/templates/download.html @@ -18,12 +18,21 @@
-
+
+ + + Choose how to handle the added torrent (Default to symlinks) +
+
Default is your qbittorent download_folder
-
+
Optional, leave empty if not using Arr @@ -45,12 +54,6 @@ {{ end }}
-
-
- - -
-
@@ -74,21 +77,21 @@ document.addEventListener('DOMContentLoaded', () => { const loadSavedDownloadOptions = () => { const savedCategory = localStorage.getItem('downloadCategory'); - const savedSymlink = localStorage.getItem('downloadSymlink'); + const savedAction = localStorage.getItem('downloadAction'); const savedDownloadUncached = localStorage.getItem('downloadUncached'); document.getElementById('arr').value = savedCategory || ''; - document.getElementById('isSymlink').checked = savedSymlink === 'true'; + document.getElementById('downloadAction').value = savedAction || 'symlink'; document.getElementById('downloadUncached').checked = savedDownloadUncached === 'true'; document.getElementById('downloadFolder').value = localStorage.getItem('downloadFolder') || downloadFolder || ''; }; const saveCurrentDownloadOptions = () => { const arr = document.getElementById('arr').value; - const isSymlink = document.getElementById('isSymlink').checked; + const downloadAction = document.getElementById('downloadAction').value; const downloadUncached = document.getElementById('downloadUncached').checked; const downloadFolder = document.getElementById('downloadFolder').value; localStorage.setItem('downloadCategory', arr); - localStorage.setItem('downloadSymlink', isSymlink.toString()); + localStorage.setItem('downloadAction', downloadAction); localStorage.setItem('downloadUncached', downloadUncached.toString()); localStorage.setItem('downloadFolder', downloadFolder); }; @@ -136,7 +139,7 @@ formData.append('arr', document.getElementById('arr').value); formData.append('downloadFolder', document.getElementById('downloadFolder').value); - formData.append('notSymlink', document.getElementById('isSymlink').checked); + formData.append('action', document.getElementById('downloadAction').value); formData.append('downloadUncached', document.getElementById('downloadUncached').checked); formData.append('debrid', document.getElementById('debrid') ? document.getElementById('debrid').value : ''); @@ -168,7 +171,7 @@ // Save the download options to local storage when they change document.getElementById('arr').addEventListener('change', saveCurrentDownloadOptions); - document.getElementById('isSymlink').addEventListener('change', saveCurrentDownloadOptions); + document.getElementById('downloadAction').addEventListener('change', saveCurrentDownloadOptions); // Read the URL parameters for a magnet link and add it to the download queue if found const urlParams = new URLSearchParams(window.location.search); diff --git a/pkg/webdav/file.go b/pkg/webdav/file.go index 2d6bc7a..06612b0 100644 --- a/pkg/webdav/file.go +++ b/pkg/webdav/file.go @@ -127,7 +127,7 @@ func (f *File) stream() (*http.Response, error) { if resp.StatusCode != http.StatusOK && resp.StatusCode != http.StatusPartialContent { f.downloadLink = "" - cleanupResp := func() { + cleanupResp := func(resp *http.Response) { if resp.Body != nil { _, _ = io.Copy(io.Discard, resp.Body) resp.Body.Close() @@ -138,7 +138,7 @@ func (f *File) stream() (*http.Response, error) { case http.StatusServiceUnavailable: // Read the body to check for specific error messages body, readErr := io.ReadAll(resp.Body) - resp.Body.Close() + cleanupResp(resp) if readErr != nil { _log.Trace().Msgf("Failed to read response body: %v", readErr) @@ -156,10 +156,10 @@ func (f *File) stream() (*http.Response, error) { return nil, fmt.Errorf("service unavailable: %s", bodyStr) case http.StatusNotFound: - cleanupResp() + cleanupResp(resp) // Mark download link as not found // Regenerate a new download link - _log.Trace().Msgf("File not found (404) for %s. Marking link as invalid and regenerating", f.name) + _log.Trace().Msgf("Link not found (404) for %s. Marking link as invalid and regenerating", f.name) f.cache.MarkDownloadLinkAsInvalid(f.link, downloadLink, "link_not_found") // Generate a new download link downloadLink, err := f.getDownloadLink() @@ -191,16 +191,9 @@ func (f *File) stream() (*http.Response, error) { } if newResp.StatusCode != http.StatusOK && newResp.StatusCode != http.StatusPartialContent { - cleanupBody := func() { - if newResp.Body != nil { - _, _ = io.Copy(io.Discard, newResp.Body) - newResp.Body.Close() - } - } - - cleanupBody() + cleanupResp(newResp) _log.Trace().Msgf("Regenerated link also failed with status %d", newResp.StatusCode) - f.cache.MarkDownloadLinkAsInvalid(f.link, downloadLink, "link_not_found") + f.cache.MarkDownloadLinkAsInvalid(f.link, downloadLink, newResp.Status) return nil, fmt.Errorf("failed with status code %d even after link regeneration", newResp.StatusCode) } @@ -208,7 +201,7 @@ func (f *File) stream() (*http.Response, error) { default: body, _ := io.ReadAll(resp.Body) - resp.Body.Close() + cleanupResp(resp) _log.Trace().Msgf("Unexpected status code %d for %s: %s", resp.StatusCode, f.name, string(body)) return nil, fmt.Errorf("unexpected status code %d: %s", resp.StatusCode, string(body)) From 22280f15cfadde160fdfd28a33b23c65a438a6b6 Mon Sep 17 00:00:00 2001 From: Mukhtar Akere Date: Sat, 14 Jun 2025 16:55:45 +0100 Subject: [PATCH 14/26] cleanup torrent cache --- pkg/debrid/store/cache.go | 4 ++ pkg/debrid/store/refresh.go | 4 +- pkg/debrid/store/torrent.go | 128 +++++++++++++++++++++--------------- pkg/repair/repair.go | 2 + pkg/server/debug.go | 2 +- 5 files changed, 83 insertions(+), 57 deletions(-) diff --git a/pkg/debrid/store/cache.go b/pkg/debrid/store/cache.go index 321091a..5170600 100644 --- a/pkg/debrid/store/cache.go +++ b/pkg/debrid/store/cache.go @@ -552,6 +552,10 @@ func (c *Cache) GetTorrents() map[string]CachedTorrent { return c.torrents.getAll() } +func (c *Cache) TotalTorrents() int { + return c.torrents.getAllCount() +} + func (c *Cache) GetTorrentByName(name string) *CachedTorrent { if torrent, ok := c.torrents.getByName(name); ok { return &torrent diff --git a/pkg/debrid/store/refresh.go b/pkg/debrid/store/refresh.go index 0dd8c59..6e53755 100644 --- a/pkg/debrid/store/refresh.go +++ b/pkg/debrid/store/refresh.go @@ -137,10 +137,10 @@ func (c *Cache) refreshRclone() error { } client := &http.Client{ - Timeout: 10 * time.Second, + Timeout: 60 * time.Second, Transport: &http.Transport{ MaxIdleConns: 10, - IdleConnTimeout: 30 * time.Second, + IdleConnTimeout: 60 * time.Second, DisableCompression: false, MaxIdleConnsPerHost: 5, }, diff --git a/pkg/debrid/store/torrent.go b/pkg/debrid/store/torrent.go index 332a660..54bfaae 100644 --- a/pkg/debrid/store/torrent.go +++ b/pkg/debrid/store/torrent.go @@ -40,13 +40,22 @@ type directoryFilter struct { ageThreshold time.Duration // only for last_added } +type torrents struct { + sync.RWMutex + byID map[string]CachedTorrent + byName map[string]CachedTorrent +} + +type folders struct { + sync.RWMutex + listing map[string][]os.FileInfo // folder name to file listing +} + type torrentCache struct { - mu sync.Mutex - byID map[string]CachedTorrent - byName map[string]CachedTorrent + torrents torrents + listing atomic.Value - folderListing map[string][]os.FileInfo - folderListingMu sync.RWMutex + folders folders directoriesFilters map[string][]directoryFilter sortNeeded atomic.Bool } @@ -62,9 +71,13 @@ type sortableFile struct { func newTorrentCache(dirFilters map[string][]directoryFilter) *torrentCache { tc := &torrentCache{ - byID: make(map[string]CachedTorrent), - byName: make(map[string]CachedTorrent), - folderListing: make(map[string][]os.FileInfo), + torrents: torrents{ + byID: make(map[string]CachedTorrent), + byName: make(map[string]CachedTorrent), + }, + folders: folders{ + listing: make(map[string][]os.FileInfo), + }, directoriesFilters: dirFilters, } @@ -74,41 +87,42 @@ func newTorrentCache(dirFilters map[string][]directoryFilter) *torrentCache { } func (tc *torrentCache) reset() { - tc.mu.Lock() - tc.byID = make(map[string]CachedTorrent) - tc.byName = make(map[string]CachedTorrent) - tc.mu.Unlock() + tc.torrents.Lock() + tc.torrents.byID = make(map[string]CachedTorrent) + tc.torrents.byName = make(map[string]CachedTorrent) + tc.torrents.Unlock() // reset the sorted listing tc.sortNeeded.Store(false) tc.listing.Store(make([]os.FileInfo, 0)) // reset any per-folder views - tc.folderListingMu.Lock() - tc.folderListing = make(map[string][]os.FileInfo) - tc.folderListingMu.Unlock() + tc.folders.Lock() + tc.folders.listing = make(map[string][]os.FileInfo) + tc.folders.Unlock() } func (tc *torrentCache) getByID(id string) (CachedTorrent, bool) { - tc.mu.Lock() - defer tc.mu.Unlock() - torrent, exists := tc.byID[id] + tc.torrents.RLock() + defer tc.torrents.RUnlock() + torrent, exists := tc.torrents.byID[id] return torrent, exists } func (tc *torrentCache) getByName(name string) (CachedTorrent, bool) { - tc.mu.Lock() - defer tc.mu.Unlock() - torrent, exists := tc.byName[name] + tc.torrents.RLock() + defer tc.torrents.RUnlock() + torrent, exists := tc.torrents.byName[name] return torrent, exists } func (tc *torrentCache) set(name string, torrent, newTorrent CachedTorrent) { - tc.mu.Lock() + tc.torrents.Lock() // Set the id first - tc.byID[newTorrent.Id] = torrent // This is the unadulterated torrent - tc.byName[name] = newTorrent // This is likely the modified torrent - tc.mu.Unlock() + + tc.torrents.byName[name] = torrent + tc.torrents.byID[torrent.Id] = torrent // This is the unadulterated torrent + tc.torrents.Unlock() tc.sortNeeded.Store(true) } @@ -124,12 +138,12 @@ func (tc *torrentCache) getListing() []os.FileInfo { } func (tc *torrentCache) getFolderListing(folderName string) []os.FileInfo { - tc.folderListingMu.RLock() - defer tc.folderListingMu.RUnlock() + tc.folders.RLock() + defer tc.folders.RUnlock() if folderName == "" { return tc.getListing() } - if folder, ok := tc.folderListing[folderName]; ok { + if folder, ok := tc.folders.listing[folderName]; ok { return folder } // If folder not found, return empty slice @@ -138,13 +152,13 @@ func (tc *torrentCache) getFolderListing(folderName string) []os.FileInfo { func (tc *torrentCache) refreshListing() { - tc.mu.Lock() - all := make([]sortableFile, 0, len(tc.byName)) - for name, t := range tc.byName { + tc.torrents.RLock() + all := make([]sortableFile, 0, len(tc.torrents.byName)) + for name, t := range tc.torrents.byName { all = append(all, sortableFile{t.Id, name, t.AddedOn, t.Bytes, t.Bad}) } tc.sortNeeded.Store(false) - tc.mu.Unlock() + tc.torrents.RUnlock() sort.Slice(all, func(i, j int) bool { if all[i].name != all[j].name { @@ -181,13 +195,13 @@ func (tc *torrentCache) refreshListing() { }) } } - tc.folderListingMu.Lock() + tc.folders.Lock() if len(listing) > 0 { - tc.folderListing["__bad__"] = listing + tc.folders.listing["__bad__"] = listing } else { - delete(tc.folderListing, "__bad__") + delete(tc.folders.listing, "__bad__") } - tc.folderListingMu.Unlock() + tc.folders.Unlock() }() wg.Done() @@ -207,13 +221,13 @@ func (tc *torrentCache) refreshListing() { } } - tc.folderListingMu.Lock() + tc.folders.Lock() if len(matched) > 0 { - tc.folderListing[dir] = matched + tc.folders.listing[dir] = matched } else { - delete(tc.folderListing, dir) + delete(tc.folders.listing, dir) } - tc.folderListingMu.Unlock() + tc.folders.Unlock() }(dir, filters) } @@ -264,35 +278,41 @@ func (tc *torrentCache) torrentMatchDirectory(filters []directoryFilter, file so } func (tc *torrentCache) getAll() map[string]CachedTorrent { - tc.mu.Lock() - defer tc.mu.Unlock() - result := make(map[string]CachedTorrent) - for name, torrent := range tc.byID { + tc.torrents.RLock() + defer tc.torrents.RUnlock() + result := make(map[string]CachedTorrent, len(tc.torrents.byID)) + for name, torrent := range tc.torrents.byID { result[name] = torrent } return result } +func (tc *torrentCache) getAllCount() int { + tc.torrents.RLock() + defer tc.torrents.RUnlock() + return len(tc.torrents.byID) +} + func (tc *torrentCache) getIdMaps() map[string]struct{} { - tc.mu.Lock() - defer tc.mu.Unlock() - res := make(map[string]struct{}, len(tc.byID)) - for id := range tc.byID { + tc.torrents.RLock() + defer tc.torrents.RUnlock() + res := make(map[string]struct{}, len(tc.torrents.byID)) + for id := range tc.torrents.byID { res[id] = struct{}{} } return res } func (tc *torrentCache) removeId(id string) { - tc.mu.Lock() - defer tc.mu.Unlock() - delete(tc.byID, id) + tc.torrents.Lock() + defer tc.torrents.Unlock() + delete(tc.torrents.byID, id) tc.sortNeeded.Store(true) } func (tc *torrentCache) remove(name string) { - tc.mu.Lock() - defer tc.mu.Unlock() - delete(tc.byName, name) + tc.torrents.Lock() + defer tc.torrents.Unlock() + delete(tc.torrents.byName, name) tc.sortNeeded.Store(true) } diff --git a/pkg/repair/repair.go b/pkg/repair/repair.go index fa5d98f..ee10024 100644 --- a/pkg/repair/repair.go +++ b/pkg/repair/repair.go @@ -293,6 +293,8 @@ func (r *Repair) StopJob(id string) error { go func() { if job.Status == JobStarted || job.Status == JobProcessing { job.Status = JobCancelled + job.BrokenItems = nil + job.ctx = nil // Clear context to prevent further processing job.CompletedAt = time.Now() job.Error = "Job was cancelled by user" r.saveToFile() diff --git a/pkg/server/debug.go b/pkg/server/debug.go index 4204e5a..cbfd705 100644 --- a/pkg/server/debug.go +++ b/pkg/server/debug.go @@ -110,7 +110,7 @@ func (s *Server) handleStats(w http.ResponseWriter, r *http.Request) { cache, ok := caches[debridName] if ok { // Get torrent data - profile.LibrarySize = len(cache.GetTorrents()) + profile.LibrarySize = cache.TotalTorrents() profile.BadTorrents = len(cache.GetListing("__bad__")) profile.ActiveLinks = cache.GetTotalActiveDownloadLinks() From 7cf25f53e76c0dacf62f88cbb5e52163f8351897 Mon Sep 17 00:00:00 2001 From: Mukhtar Akere Date: Sat, 14 Jun 2025 19:32:50 +0100 Subject: [PATCH 15/26] hotfix --- pkg/qbit/http.go | 2 +- pkg/store/torrent.go | 1 - 2 files changed, 1 insertion(+), 2 deletions(-) diff --git a/pkg/qbit/http.go b/pkg/qbit/http.go index a73468a..409677c 100644 --- a/pkg/qbit/http.go +++ b/pkg/qbit/http.go @@ -89,7 +89,7 @@ func (q *QBit) handleTorrentsAdd(w http.ResponseWriter, r *http.Request) { } action := "symlink" - if strings.ToLower(r.FormValue("sequentialDownload")) != "true" { + if strings.ToLower(r.FormValue("sequentialDownload")) == "true" { action = "download" } debridName := r.FormValue("debrid") diff --git a/pkg/store/torrent.go b/pkg/store/torrent.go index aec09e0..db73285 100644 --- a/pkg/store/torrent.go +++ b/pkg/store/torrent.go @@ -243,7 +243,6 @@ func (s *Store) processFiles(torrent *Torrent, debridTorrent *types.Torrent, imp onFailed(err) return } - s.logger.Debug().Msgf("Download Post-Download Action") torrentSymlinkPath, err = s.processDownload(torrent, debridTorrent) if err != nil { onFailed(err) From 8d87c602b95b5726c18e75c3e3a6fec280bf22c9 Mon Sep 17 00:00:00 2001 From: Mukhtar Akere Date: Sun, 15 Jun 2025 22:46:07 +0100 Subject: [PATCH 16/26] - Add remove stalled torrent - Few cleanup --- internal/config/config.go | 28 +++---- pkg/qbit/http.go | 4 +- pkg/store/queue.go | 143 ++++++++++++++++++++++++++++++++++ pkg/store/store.go | 38 ++++----- pkg/store/torrent.go | 84 +------------------- pkg/store/torrent_storage.go | 20 +++++ pkg/web/api.go | 1 + pkg/web/templates/config.html | 16 +++- 8 files changed, 216 insertions(+), 118 deletions(-) create mode 100644 pkg/store/queue.go diff --git a/internal/config/config.go b/internal/config/config.go index 329531e..f0c10db 100644 --- a/internal/config/config.go +++ b/internal/config/config.go @@ -10,6 +10,7 @@ import ( "runtime" "strings" "sync" + "time" ) var ( @@ -77,19 +78,20 @@ type Config struct { URLBase string `json:"url_base,omitempty"` Port string `json:"port,omitempty"` - LogLevel string `json:"log_level,omitempty"` - Debrids []Debrid `json:"debrids,omitempty"` - QBitTorrent QBitTorrent `json:"qbittorrent,omitempty"` - Arrs []Arr `json:"arrs,omitempty"` - Repair Repair `json:"repair,omitempty"` - WebDav WebDav `json:"webdav,omitempty"` - AllowedExt []string `json:"allowed_file_types,omitempty"` - MinFileSize string `json:"min_file_size,omitempty"` // Minimum file size to download, 10MB, 1GB, etc - MaxFileSize string `json:"max_file_size,omitempty"` // Maximum file size to download (0 means no limit) - Path string `json:"-"` // Path to save the config file - UseAuth bool `json:"use_auth,omitempty"` - Auth *Auth `json:"-"` - DiscordWebhook string `json:"discord_webhook_url,omitempty"` + LogLevel string `json:"log_level,omitempty"` + Debrids []Debrid `json:"debrids,omitempty"` + QBitTorrent QBitTorrent `json:"qbittorrent,omitempty"` + Arrs []Arr `json:"arrs,omitempty"` + Repair Repair `json:"repair,omitempty"` + WebDav WebDav `json:"webdav,omitempty"` + AllowedExt []string `json:"allowed_file_types,omitempty"` + MinFileSize string `json:"min_file_size,omitempty"` // Minimum file size to download, 10MB, 1GB, etc + MaxFileSize string `json:"max_file_size,omitempty"` // Maximum file size to download (0 means no limit) + Path string `json:"-"` // Path to save the config file + UseAuth bool `json:"use_auth,omitempty"` + Auth *Auth `json:"-"` + DiscordWebhook string `json:"discord_webhook_url,omitempty"` + RemoveStalledAfter time.Duration `json:"remove_stalled_after,omitempty"` } func (c *Config) JsonFile() string { diff --git a/pkg/qbit/http.go b/pkg/qbit/http.go index 409677c..210c523 100644 --- a/pkg/qbit/http.go +++ b/pkg/qbit/http.go @@ -108,7 +108,7 @@ func (q *QBit) handleTorrentsAdd(w http.ResponseWriter, r *http.Request) { } for _, url := range urlList { if err := q.addMagnet(ctx, url, _arr, debridName, action); err != nil { - q.logger.Error().Err(err).Msgf("Error adding magnet") + q.logger.Debug().Err(err).Msgf("Error adding magnet") http.Error(w, err.Error(), http.StatusBadRequest) return } @@ -121,7 +121,7 @@ func (q *QBit) handleTorrentsAdd(w http.ResponseWriter, r *http.Request) { if files := r.MultipartForm.File["torrents"]; len(files) > 0 { for _, fileHeader := range files { if err := q.addTorrent(ctx, fileHeader, _arr, debridName, action); err != nil { - q.logger.Error().Err(err).Msgf("Error adding torrent") + q.logger.Debug().Err(err).Msgf("Error adding torrent") http.Error(w, err.Error(), http.StatusBadRequest) return } diff --git a/pkg/store/queue.go b/pkg/store/queue.go new file mode 100644 index 0000000..f4d573e --- /dev/null +++ b/pkg/store/queue.go @@ -0,0 +1,143 @@ +package store + +import ( + "context" + "fmt" + "time" +) + +func (s *Store) addToQueue(importReq *ImportRequest) error { + if importReq.Magnet == nil { + return fmt.Errorf("magnet is required") + } + + if importReq.Arr == nil { + return fmt.Errorf("arr is required") + } + + importReq.Status = "queued" + importReq.CompletedAt = time.Time{} + importReq.Error = nil + err := s.importsQueue.Push(importReq) + if err != nil { + return err + } + return nil +} + +func (s *Store) StartQueueSchedule(ctx context.Context) error { + // Start the slots processing in a separate goroutine + go func() { + if err := s.processSlotsQueue(ctx); err != nil { + s.logger.Error().Err(err).Msg("Error processing slots queue") + } + }() + + // Start the remove stalled torrents processing in a separate goroutine + go func() { + if err := s.processRemoveStalledTorrents(ctx); err != nil { + s.logger.Error().Err(err).Msg("Error processing remove stalled torrents") + } + }() + + return nil +} + +func (s *Store) processSlotsQueue(ctx context.Context) error { + s.trackAvailableSlots(ctx) // Initial tracking of available slots + + ticker := time.NewTicker(30 * time.Second) + defer ticker.Stop() + + for { + select { + case <-ctx.Done(): + return nil + case <-ticker.C: + s.trackAvailableSlots(ctx) + } + } +} + +func (s *Store) processRemoveStalledTorrents(ctx context.Context) error { + if s.removeStalledAfter <= 0 { + return nil // No need to remove stalled torrents if the duration is not set + } + + ticker := time.NewTicker(time.Minute) + defer ticker.Stop() + + for { + select { + case <-ctx.Done(): + return nil + case <-ticker.C: + if err := s.removeStalledTorrents(ctx); err != nil { + s.logger.Error().Err(err).Msg("Error removing stalled torrents") + } + } + } +} + +func (s *Store) trackAvailableSlots(ctx context.Context) { + // This function tracks the available slots for each debrid client + availableSlots := make(map[string]int) + + for name, deb := range s.debrid.Debrids() { + slots, err := deb.Client().GetAvailableSlots() + if err != nil { + continue + } + availableSlots[name] = slots + } + + if s.importsQueue.Size() <= 0 { + // Queue is empty, no need to process + return + } + + for name, slots := range availableSlots { + + s.logger.Debug().Msgf("Available slots for %s: %d", name, slots) + // If slots are available, process the next import request from the queue + for slots > 0 { + select { + case <-ctx.Done(): + return // Exit if context is done + default: + if err := s.processFromQueue(ctx); err != nil { + s.logger.Error().Err(err).Msg("Error processing from queue") + return // Exit on error + } + slots-- // Decrease the available slots after processing + } + } + } +} + +func (s *Store) processFromQueue(ctx context.Context) error { + // Pop the next import request from the queue + importReq, err := s.importsQueue.Pop() + if err != nil { + return err + } + if importReq == nil { + return nil + } + return s.AddTorrent(ctx, importReq) +} + +func (s *Store) removeStalledTorrents(ctx context.Context) error { + // This function checks for stalled torrents and removes them + stalledTorrents := s.torrents.GetStalledTorrents(s.removeStalledAfter) + if len(stalledTorrents) == 0 { + return nil // No stalled torrents to remove + } + + for _, torrent := range stalledTorrents { + s.logger.Warn().Msgf("Removing stalled torrent: %s", torrent.Name) + s.torrents.Delete(torrent.Hash, torrent.Category, true) // Remove from store and delete from debrid + } + + return nil +} diff --git a/pkg/store/store.go b/pkg/store/store.go index 4492ffc..99226c3 100644 --- a/pkg/store/store.go +++ b/pkg/store/store.go @@ -14,15 +14,16 @@ import ( ) type Store struct { - repair *repair.Repair - arr *arr.Storage - debrid *debrid.Storage - importsQueue *ImportQueue // Queued import requests(probably from too_many_active_downloads) - torrents *TorrentStorage - logger zerolog.Logger - refreshInterval time.Duration - skipPreCache bool - downloadSemaphore chan struct{} + repair *repair.Repair + arr *arr.Storage + debrid *debrid.Storage + importsQueue *ImportQueue // Queued import requests(probably from too_many_active_downloads) + torrents *TorrentStorage + logger zerolog.Logger + refreshInterval time.Duration + skipPreCache bool + downloadSemaphore chan struct{} + removeStalledAfter time.Duration // Duration after which stalled torrents are removed } var ( @@ -39,15 +40,16 @@ func Get() *Store { qbitCfg := cfg.QBitTorrent instance = &Store{ - repair: repair.New(arrs, deb), - arr: arrs, - debrid: deb, - torrents: newTorrentStorage(cfg.TorrentsFile()), - logger: logger.Default(), // Use default logger [decypharr] - refreshInterval: time.Duration(cmp.Or(qbitCfg.RefreshInterval, 10)) * time.Minute, - skipPreCache: qbitCfg.SkipPreCache, - downloadSemaphore: make(chan struct{}, cmp.Or(qbitCfg.MaxDownloads, 5)), - importsQueue: NewImportQueue(context.Background(), 1000), + repair: repair.New(arrs, deb), + arr: arrs, + debrid: deb, + torrents: newTorrentStorage(cfg.TorrentsFile()), + logger: logger.Default(), // Use default logger [decypharr] + refreshInterval: time.Duration(cmp.Or(qbitCfg.RefreshInterval, 10)) * time.Minute, + skipPreCache: qbitCfg.SkipPreCache, + downloadSemaphore: make(chan struct{}, cmp.Or(qbitCfg.MaxDownloads, 5)), + importsQueue: NewImportQueue(context.Background(), 1000), + removeStalledAfter: cfg.RemoveStalledAfter, } }) return instance diff --git a/pkg/store/torrent.go b/pkg/store/torrent.go index db73285..17612d7 100644 --- a/pkg/store/torrent.go +++ b/pkg/store/torrent.go @@ -46,89 +46,6 @@ func (s *Store) AddTorrent(ctx context.Context, importReq *ImportRequest) error return nil } -func (s *Store) addToQueue(importReq *ImportRequest) error { - if importReq.Magnet == nil { - return fmt.Errorf("magnet is required") - } - - if importReq.Arr == nil { - return fmt.Errorf("arr is required") - } - - importReq.Status = "queued" - importReq.CompletedAt = time.Time{} - importReq.Error = nil - err := s.importsQueue.Push(importReq) - if err != nil { - return err - } - return nil -} - -func (s *Store) processFromQueue(ctx context.Context) error { - // Pop the next import request from the queue - importReq, err := s.importsQueue.Pop() - if err != nil { - return err - } - if importReq == nil { - return nil - } - return s.AddTorrent(ctx, importReq) -} - -func (s *Store) StartQueueSchedule(ctx context.Context) error { - - s.trackAvailableSlots(ctx) // Initial tracking of available slots - - ticker := time.NewTicker(time.Minute) - - for { - select { - case <-ctx.Done(): - return nil - case <-ticker.C: - s.trackAvailableSlots(ctx) - } - } -} - -func (s *Store) trackAvailableSlots(ctx context.Context) { - // This function tracks the available slots for each debrid client - availableSlots := make(map[string]int) - - for name, deb := range s.debrid.Debrids() { - slots, err := deb.Client().GetAvailableSlots() - if err != nil { - continue - } - availableSlots[name] = slots - } - - if s.importsQueue.Size() <= 0 { - // Queue is empty, no need to process - return - } - - for name, slots := range availableSlots { - - s.logger.Debug().Msgf("Available slots for %s: %d", name, slots) - // If slots are available, process the next import request from the queue - for slots > 0 { - select { - case <-ctx.Done(): - return // Exit if context is done - default: - if err := s.processFromQueue(ctx); err != nil { - s.logger.Error().Err(err).Msg("Error processing from queue") - return // Exit on error - } - slots-- // Decrease the available slots after processing - } - } - } -} - func (s *Store) processFiles(torrent *Torrent, debridTorrent *types.Torrent, importReq *ImportRequest) { if debridTorrent == nil { @@ -310,6 +227,7 @@ func (s *Store) partialTorrentUpdate(t *Torrent, debridTorrent *types.Torrent) * t.Debrid = debridTorrent.Debrid t.Size = totalSize t.Completed = sizeCompleted + t.NumSeeds = debridTorrent.Seeders t.Downloaded = sizeCompleted t.DownloadedSession = sizeCompleted t.Uploaded = sizeCompleted diff --git a/pkg/store/torrent_storage.go b/pkg/store/torrent_storage.go index 2b36ada..e228c48 100644 --- a/pkg/store/torrent_storage.go +++ b/pkg/store/torrent_storage.go @@ -6,6 +6,7 @@ import ( "os" "sort" "sync" + "time" ) func keyPair(hash, category string) string { @@ -288,3 +289,22 @@ func (ts *TorrentStorage) Reset() { defer ts.mu.Unlock() ts.torrents = make(Torrents) } + +// GetStalledTorrents returns a list of torrents that are stalled +// A torrent is considered stalled if it has no seeds, no progress, and has been downloading for longer than removeStalledAfter +// The torrent must have a DebridID and be in the "downloading" state +func (ts *TorrentStorage) GetStalledTorrents(removeAfter time.Duration) []*Torrent { + ts.mu.RLock() + defer ts.mu.RUnlock() + stalled := make([]*Torrent, 0) + currentTime := time.Now() + for _, torrent := range ts.torrents { + if torrent.DebridID != "" && torrent.State == "downloading" && torrent.NumSeeds == 0 && torrent.Progress == 0 { + addedOn := time.Unix(torrent.AddedOn, 0) + if currentTime.Sub(addedOn) > removeAfter { + stalled = append(stalled, torrent) + } + } + } + return stalled +} diff --git a/pkg/web/api.go b/pkg/web/api.go index c62adf9..6fc3a29 100644 --- a/pkg/web/api.go +++ b/pkg/web/api.go @@ -214,6 +214,7 @@ func (wb *Web) handleUpdateConfig(w http.ResponseWriter, r *http.Request) { currentConfig.LogLevel = updatedConfig.LogLevel currentConfig.MinFileSize = updatedConfig.MinFileSize currentConfig.MaxFileSize = updatedConfig.MaxFileSize + currentConfig.RemoveStalledAfter = updatedConfig.RemoveStalledAfter currentConfig.AllowedExt = updatedConfig.AllowedExt currentConfig.DiscordWebhook = updatedConfig.DiscordWebhook diff --git a/pkg/web/templates/config.html b/pkg/web/templates/config.html index 341ea71..47eb6d0 100644 --- a/pkg/web/templates/config.html +++ b/pkg/web/templates/config.html @@ -142,7 +142,7 @@
-
+
Minimum file size to download (Empty for no limit)
-
+
Maximum file size to download (Empty for no limit)
+
+
+ + + Remove torrents that have been stalled for this duration +
+
@@ -1056,6 +1067,7 @@ allowed_file_types: document.getElementById('allowedExtensions').value.split(',').map(ext => ext.trim()).filter(Boolean), min_file_size: document.getElementById('minFileSize').value, max_file_size: document.getElementById('maxFileSize').value, + remove_stalled_after: document.getElementById('removeStalledAfter').value, url_base: document.getElementById('urlBase').value, bind_address: document.getElementById('bindAddress').value, port: document.getElementById('port').value, From 605d5b81c2f3223814f0db34787ebe6ab1712534 Mon Sep 17 00:00:00 2001 From: Mukhtar Akere Date: Mon, 16 Jun 2025 13:55:02 +0100 Subject: [PATCH 17/26] Fix duration bug in config --- cmd/decypharr/main.go | 10 +++++++--- internal/config/config.go | 29 ++++++++++++++--------------- pkg/debrid/store/repair.go | 1 + pkg/store/store.go | 25 +++++++++++++++---------- 4 files changed, 37 insertions(+), 28 deletions(-) diff --git a/cmd/decypharr/main.go b/cmd/decypharr/main.go index 30d5f72..2bfa3e9 100644 --- a/cmd/decypharr/main.go +++ b/cmd/decypharr/main.go @@ -75,7 +75,7 @@ func Start(ctx context.Context) error { done := make(chan struct{}) go func(ctx context.Context) { - if err := startServices(ctx, wd, srv); err != nil { + if err := startServices(ctx, cancelSvc, wd, srv); err != nil { _log.Error().Err(err).Msg("Error starting services") cancelSvc() } @@ -107,7 +107,7 @@ func Start(ctx context.Context) error { } } -func startServices(ctx context.Context, wd *webdav.WebDav, srv *server.Server) error { +func startServices(ctx context.Context, cancelSvc context.CancelFunc, wd *webdav.WebDav, srv *server.Server) error { var wg sync.WaitGroup errChan := make(chan error) @@ -177,7 +177,11 @@ func startServices(ctx context.Context, wd *webdav.WebDav, srv *server.Server) e for err := range errChan { if err != nil { _log.Error().Err(err).Msg("Service error detected") - // Don't shut down the whole app + // If the error is critical, return it to stop the main loop + if ctx.Err() == nil { + _log.Error().Msg("Stopping services due to error") + cancelSvc() // Cancel the service context to stop all services + } } } }() diff --git a/internal/config/config.go b/internal/config/config.go index f0c10db..a28c62e 100644 --- a/internal/config/config.go +++ b/internal/config/config.go @@ -10,7 +10,6 @@ import ( "runtime" "strings" "sync" - "time" ) var ( @@ -78,20 +77,20 @@ type Config struct { URLBase string `json:"url_base,omitempty"` Port string `json:"port,omitempty"` - LogLevel string `json:"log_level,omitempty"` - Debrids []Debrid `json:"debrids,omitempty"` - QBitTorrent QBitTorrent `json:"qbittorrent,omitempty"` - Arrs []Arr `json:"arrs,omitempty"` - Repair Repair `json:"repair,omitempty"` - WebDav WebDav `json:"webdav,omitempty"` - AllowedExt []string `json:"allowed_file_types,omitempty"` - MinFileSize string `json:"min_file_size,omitempty"` // Minimum file size to download, 10MB, 1GB, etc - MaxFileSize string `json:"max_file_size,omitempty"` // Maximum file size to download (0 means no limit) - Path string `json:"-"` // Path to save the config file - UseAuth bool `json:"use_auth,omitempty"` - Auth *Auth `json:"-"` - DiscordWebhook string `json:"discord_webhook_url,omitempty"` - RemoveStalledAfter time.Duration `json:"remove_stalled_after,omitempty"` + LogLevel string `json:"log_level,omitempty"` + Debrids []Debrid `json:"debrids,omitempty"` + QBitTorrent QBitTorrent `json:"qbittorrent,omitempty"` + Arrs []Arr `json:"arrs,omitempty"` + Repair Repair `json:"repair,omitempty"` + WebDav WebDav `json:"webdav,omitempty"` + AllowedExt []string `json:"allowed_file_types,omitempty"` + MinFileSize string `json:"min_file_size,omitempty"` // Minimum file size to download, 10MB, 1GB, etc + MaxFileSize string `json:"max_file_size,omitempty"` // Maximum file size to download (0 means no limit) + Path string `json:"-"` // Path to save the config file + UseAuth bool `json:"use_auth,omitempty"` + Auth *Auth `json:"-"` + DiscordWebhook string `json:"discord_webhook_url,omitempty"` + RemoveStalledAfter string `json:"remove_stalled_after,omitzero"` } func (c *Config) JsonFile() string { diff --git a/pkg/debrid/store/repair.go b/pkg/debrid/store/repair.go index 53cd0ab..6d7875e 100644 --- a/pkg/debrid/store/repair.go +++ b/pkg/debrid/store/repair.go @@ -258,6 +258,7 @@ func (c *Cache) reInsertTorrent(ct *CachedTorrent) (*CachedTorrent, error) { } func (c *Cache) resetInvalidLinks() { + c.logger.Debug().Msgf("Resetting accounts") c.invalidDownloadLinks = sync.Map{} c.client.Accounts().Reset() // Reset the active download keys } diff --git a/pkg/store/store.go b/pkg/store/store.go index 99226c3..28dd752 100644 --- a/pkg/store/store.go +++ b/pkg/store/store.go @@ -40,16 +40,21 @@ func Get() *Store { qbitCfg := cfg.QBitTorrent instance = &Store{ - repair: repair.New(arrs, deb), - arr: arrs, - debrid: deb, - torrents: newTorrentStorage(cfg.TorrentsFile()), - logger: logger.Default(), // Use default logger [decypharr] - refreshInterval: time.Duration(cmp.Or(qbitCfg.RefreshInterval, 10)) * time.Minute, - skipPreCache: qbitCfg.SkipPreCache, - downloadSemaphore: make(chan struct{}, cmp.Or(qbitCfg.MaxDownloads, 5)), - importsQueue: NewImportQueue(context.Background(), 1000), - removeStalledAfter: cfg.RemoveStalledAfter, + repair: repair.New(arrs, deb), + arr: arrs, + debrid: deb, + torrents: newTorrentStorage(cfg.TorrentsFile()), + logger: logger.Default(), // Use default logger [decypharr] + refreshInterval: time.Duration(cmp.Or(qbitCfg.RefreshInterval, 10)) * time.Minute, + skipPreCache: qbitCfg.SkipPreCache, + downloadSemaphore: make(chan struct{}, cmp.Or(qbitCfg.MaxDownloads, 5)), + importsQueue: NewImportQueue(context.Background(), 1000), + } + if cfg.RemoveStalledAfter != "" { + removeStalledAfter, err := time.ParseDuration(cfg.RemoveStalledAfter) + if err == nil { + instance.removeStalledAfter = removeStalledAfter + } } }) return instance From b7226b21ec8cf807e4319ec6bcd85d81cd7ebbc9 Mon Sep 17 00:00:00 2001 From: Mukhtar Akere Date: Mon, 16 Jun 2025 22:41:46 +0100 Subject: [PATCH 18/26] added CET timezone --- pkg/debrid/store/cache.go | 9 ++++++++- 1 file changed, 8 insertions(+), 1 deletion(-) diff --git a/pkg/debrid/store/cache.go b/pkg/debrid/store/cache.go index 5170600..571da9f 100644 --- a/pkg/debrid/store/cache.go +++ b/pkg/debrid/store/cache.go @@ -108,7 +108,14 @@ type Cache struct { func NewDebridCache(dc config.Debrid, client types.Client) *Cache { cfg := config.Get() - cetSc, err := gocron.NewScheduler(gocron.WithLocation(time.UTC)) + cet, err := time.LoadLocation("CET") + if err != nil { + cet, err = time.LoadLocation("Europe/Berlin") // Fallback to Berlin if CET fails + if err != nil { + cet = time.FixedZone("CET", 1*60*60) // Fallback to a fixed CET zone + } + } + cetSc, err := gocron.NewScheduler(gocron.WithLocation(cet)) if err != nil { // If we can't create a CET scheduler, fallback to local time cetSc, _ = gocron.NewScheduler(gocron.WithLocation(time.Local)) From 5661b05ec1f283c77778e1c8f4e0eb879028691b Mon Sep 17 00:00:00 2001 From: Mukhtar Akere Date: Mon, 16 Jun 2025 22:54:11 +0100 Subject: [PATCH 19/26] added CET timezone --- pkg/debrid/store/repair.go | 5 ++++- pkg/debrid/store/worker.go | 2 +- 2 files changed, 5 insertions(+), 2 deletions(-) diff --git a/pkg/debrid/store/repair.go b/pkg/debrid/store/repair.go index 6d7875e..bebae24 100644 --- a/pkg/debrid/store/repair.go +++ b/pkg/debrid/store/repair.go @@ -257,8 +257,11 @@ func (c *Cache) reInsertTorrent(ct *CachedTorrent) (*CachedTorrent, error) { return ct, nil } -func (c *Cache) resetInvalidLinks() { +func (c *Cache) resetInvalidLinks(ctx context.Context) { c.logger.Debug().Msgf("Resetting accounts") c.invalidDownloadLinks = sync.Map{} c.client.Accounts().Reset() // Reset the active download keys + + // Refresh the download links + c.refreshDownloadLinks(ctx) } diff --git a/pkg/debrid/store/worker.go b/pkg/debrid/store/worker.go index 8fba929..e572727 100644 --- a/pkg/debrid/store/worker.go +++ b/pkg/debrid/store/worker.go @@ -45,7 +45,7 @@ func (c *Cache) StartSchedule(ctx context.Context) error { } else { // Schedule the job if _, err := c.cetScheduler.NewJob(jd, gocron.NewTask(func() { - c.resetInvalidLinks() + c.resetInvalidLinks(ctx) }), gocron.WithContext(ctx)); err != nil { c.logger.Error().Err(err).Msg("Failed to create link reset job") } else { From b2e99585f74d6ffe6563ca938410e13a2d220aed Mon Sep 17 00:00:00 2001 From: Mukhtar Akere Date: Wed, 18 Jun 2025 10:42:44 +0100 Subject: [PATCH 20/26] Fix issues with repair, move to a different streaming option --- docs/docs/guides/rclone.md | 4 +- pkg/debrid/store/repair.go | 2 + pkg/qbit/http.go | 2 +- pkg/webdav/file.go | 523 +++++++++++++++++++++++-------------- pkg/webdav/handler.go | 131 +++++----- pkg/webdav/misc.go | 120 +++++++++ 6 files changed, 507 insertions(+), 275 deletions(-) diff --git a/docs/docs/guides/rclone.md b/docs/docs/guides/rclone.md index bb0d40e..df2d693 100644 --- a/docs/docs/guides/rclone.md +++ b/docs/docs/guides/rclone.md @@ -51,7 +51,7 @@ Create a `config.json` file in `/opt/decypharr/` with your Decypharr configurati "folder": "/mnt/remote/realdebrid/__all__/", "rate_limit": "250/minute", "use_webdav": true, - "rc_url": "http://your-ip-address:5572" // Rclone RC URL + "rc_url": "http://your-ip-address:5572" } ], "qbittorrent": { @@ -99,7 +99,7 @@ services: decypharr: condition: service_healthy restart: true - command: "mount decypharr: /data --allow-non-empty --allow-other --dir-cache-time 10s --rc --rc-addr :5572 --rc-no-auth " + command: "mount decypharr: /data --allow-non-empty --allow-other --dir-cache-time 10s --rc --rc-addr :5572 --rc-no-auth" ``` #### Docker Notes diff --git a/pkg/debrid/store/repair.go b/pkg/debrid/store/repair.go index bebae24..d30d450 100644 --- a/pkg/debrid/store/repair.go +++ b/pkg/debrid/store/repair.go @@ -111,6 +111,8 @@ func (c *Cache) GetBrokenFiles(t *CachedTorrent, filenames []string) []string { }(f) } + wg.Wait() + // Try to reinsert the torrent if it's broken if len(brokenFiles) > 0 && t.Torrent != nil { // Check if the torrent is already in progress diff --git a/pkg/qbit/http.go b/pkg/qbit/http.go index 210c523..7ecb8a6 100644 --- a/pkg/qbit/http.go +++ b/pkg/qbit/http.go @@ -108,7 +108,7 @@ func (q *QBit) handleTorrentsAdd(w http.ResponseWriter, r *http.Request) { } for _, url := range urlList { if err := q.addMagnet(ctx, url, _arr, debridName, action); err != nil { - q.logger.Debug().Err(err).Msgf("Error adding magnet") + q.logger.Debug().Msgf("Error adding magnet: %s", err.Error()) http.Error(w, err.Error(), http.StatusBadRequest) return } diff --git a/pkg/webdav/file.go b/pkg/webdav/file.go index 06612b0..b0e9c18 100644 --- a/pkg/webdav/file.go +++ b/pkg/webdav/file.go @@ -27,35 +27,52 @@ var sharedClient = &http.Client{ Timeout: 0, } +type streamError struct { + Err error + StatusCode int + IsClientDisconnection bool +} + +func (e *streamError) Error() string { + return e.Err.Error() +} + +func (e *streamError) Unwrap() error { + return e.Err +} + type File struct { - cache *store.Cache - fileId string - torrentName string - - modTime time.Time - - size int64 - offset int64 - isDir bool - children []os.FileInfo - reader io.ReadCloser - seekPending bool - content []byte - isRar bool name string - metadataOnly bool - - downloadLink string + torrentName string link string + downloadLink string + size int64 + isDir bool + fileId string + isRar bool + metadataOnly bool + content []byte + children []os.FileInfo // For directories + cache *store.Cache + modTime time.Time + + // Minimal state for interface compliance only + readOffset int64 // Only used for Read() method compliance } // File interface implementations for File func (f *File) Close() error { - if f.reader != nil { - f.reader.Close() - f.reader = nil + if f.isDir { + return nil // No resources to close for directories } + + // For files, we don't have any resources to close either + // This is just to satisfy the os.File interface + f.content = nil + f.children = nil + f.downloadLink = "" + f.readOffset = 0 return nil } @@ -84,211 +101,274 @@ func (f *File) getDownloadByteRange() (*[2]int64, error) { return byteRange, nil } -func (f *File) stream() (*http.Response, error) { - client := sharedClient +func (f *File) servePreloadedContent(w http.ResponseWriter, r *http.Request) error { + content := f.content + size := int64(len(content)) + + // Handle range requests for preloaded content + if rangeHeader := r.Header.Get("Range"); rangeHeader != "" { + ranges, err := parseRange(rangeHeader, size) + if err != nil || len(ranges) != 1 { + w.Header().Set("Content-Range", fmt.Sprintf("bytes */%d", size)) + return &streamError{Err: fmt.Errorf("invalid range"), StatusCode: http.StatusRequestedRangeNotSatisfiable} + } + + start, end := ranges[0].start, ranges[0].end + w.Header().Set("Content-Range", fmt.Sprintf("bytes %d-%d/%d", start, end, size)) + w.Header().Set("Content-Length", fmt.Sprintf("%d", end-start+1)) + w.Header().Set("Accept-Ranges", "bytes") + w.WriteHeader(http.StatusPartialContent) + + _, err = w.Write(content[start : end+1]) + return err + } + + // Full content + w.Header().Set("Content-Length", fmt.Sprintf("%d", size)) + w.Header().Set("Accept-Ranges", "bytes") + w.WriteHeader(http.StatusOK) + + _, err := w.Write(content) + return err +} + +func (f *File) StreamResponse(w http.ResponseWriter, r *http.Request) error { + // Handle preloaded content files + if f.content != nil { + return f.servePreloadedContent(w, r) + } + + // Try streaming with retry logic + return f.streamWithRetry(w, r, 0) +} + +func (f *File) streamWithRetry(w http.ResponseWriter, r *http.Request, retryCount int) error { + const maxRetries = 0 _log := f.cache.Logger() + // Get download link (with caching optimization) downloadLink, err := f.getDownloadLink() if err != nil { - _log.Trace().Msgf("Failed to get download link for %s: %v", f.name, err) - return nil, err + return &streamError{Err: err, StatusCode: http.StatusPreconditionFailed} } if downloadLink == "" { - _log.Trace().Msgf("Failed to get download link for %s. Empty download link", f.name) - return nil, fmt.Errorf("empty download link") + return &streamError{Err: fmt.Errorf("empty download link"), StatusCode: http.StatusNotFound} } - byteRange, err := f.getDownloadByteRange() + // Create upstream request with streaming optimizations + upstreamReq, err := http.NewRequest("GET", downloadLink, nil) if err != nil { - _log.Trace().Msgf("Failed to get download byte range for %s: %v", f.name, err) - return nil, err + return &streamError{Err: err, StatusCode: http.StatusInternalServerError} } - req, err := http.NewRequest("GET", downloadLink, nil) + setVideoStreamingHeaders(upstreamReq) + + // Handle range requests (critical for video seeking) + isRangeRequest := f.handleRangeRequest(upstreamReq, r, w) + if isRangeRequest == -1 { + return &streamError{Err: fmt.Errorf("invalid range"), StatusCode: http.StatusRequestedRangeNotSatisfiable} + } + + resp, err := sharedClient.Do(upstreamReq) if err != nil { - _log.Trace().Msgf("Failed to create HTTP request: %v", err) - return nil, err + return &streamError{Err: err, StatusCode: http.StatusServiceUnavailable} + } + defer resp.Body.Close() + + // Handle upstream errors with retry logic + shouldRetry, retryErr := f.handleUpstream(resp, retryCount, maxRetries) + if shouldRetry && retryCount < maxRetries { + // Retry with new download link + _log.Debug(). + Int("retry_count", retryCount+1). + Str("file", f.name). + Msg("Retrying stream request") + return f.streamWithRetry(w, r, retryCount+1) + } + if retryErr != nil { + return retryErr } - if byteRange == nil { - req.Header.Set("Range", fmt.Sprintf("bytes=%d-", max(0, f.offset))) - } else { - req.Header.Set("Range", fmt.Sprintf("bytes=%d-", byteRange[0]+max(0, f.offset))) - } + setVideoResponseHeaders(w, resp, isRangeRequest == 1) - // Make the request - resp, err := client.Do(req) - if err != nil { - _log.Trace().Msgf("HTTP request failed: %v", err) - return nil, err - } - - if resp.StatusCode != http.StatusOK && resp.StatusCode != http.StatusPartialContent { - f.downloadLink = "" - - cleanupResp := func(resp *http.Response) { - if resp.Body != nil { - _, _ = io.Copy(io.Discard, resp.Body) - resp.Body.Close() - } - } - - switch resp.StatusCode { - case http.StatusServiceUnavailable: - // Read the body to check for specific error messages - body, readErr := io.ReadAll(resp.Body) - cleanupResp(resp) - - if readErr != nil { - _log.Trace().Msgf("Failed to read response body: %v", readErr) - return nil, fmt.Errorf("failed to read error response: %w", readErr) - } - - bodyStr := string(body) - if strings.Contains(bodyStr, "You can not download this file because you have exceeded your traffic on this hoster") { - _log.Trace().Msgf("Bandwidth exceeded for %s. Download token will be disabled if you have more than one", f.name) - f.cache.MarkDownloadLinkAsInvalid(f.link, downloadLink, "bandwidth_exceeded") - // Retry with a different API key if it's available - return f.stream() - } - - return nil, fmt.Errorf("service unavailable: %s", bodyStr) - - case http.StatusNotFound: - cleanupResp(resp) - // Mark download link as not found - // Regenerate a new download link - _log.Trace().Msgf("Link not found (404) for %s. Marking link as invalid and regenerating", f.name) - f.cache.MarkDownloadLinkAsInvalid(f.link, downloadLink, "link_not_found") - // Generate a new download link - downloadLink, err := f.getDownloadLink() - if err != nil { - _log.Trace().Msgf("Failed to get download link for %s. %s", f.name, err) - return nil, err - } - - if downloadLink == "" { - _log.Trace().Msgf("Failed to get download link for %s", f.name) - return nil, fmt.Errorf("failed to regenerate download link") - } - - req, err := http.NewRequest("GET", downloadLink, nil) - if err != nil { - return nil, err - } - - // Set the range header again - if byteRange == nil { - req.Header.Set("Range", fmt.Sprintf("bytes=%d-", max(0, f.offset))) - } else { - req.Header.Set("Range", fmt.Sprintf("bytes=%d-", byteRange[0]+max(0, f.offset))) - } - - newResp, err := client.Do(req) - if err != nil { - return nil, err - } - - if newResp.StatusCode != http.StatusOK && newResp.StatusCode != http.StatusPartialContent { - cleanupResp(newResp) - _log.Trace().Msgf("Regenerated link also failed with status %d", newResp.StatusCode) - f.cache.MarkDownloadLinkAsInvalid(f.link, downloadLink, newResp.Status) - return nil, fmt.Errorf("failed with status code %d even after link regeneration", newResp.StatusCode) - } - - return newResp, nil - - default: - body, _ := io.ReadAll(resp.Body) - cleanupResp(resp) - - _log.Trace().Msgf("Unexpected status code %d for %s: %s", resp.StatusCode, f.name, string(body)) - return nil, fmt.Errorf("unexpected status code %d: %s", resp.StatusCode, string(body)) - } - } - return resp, nil + // Stream with optimized buffering for video + return f.streamVideoOptimized(w, resp.Body) } -func (f *File) Read(p []byte) (n int, err error) { - if f.isDir { - return 0, os.ErrInvalid - } - if f.metadataOnly { - return 0, io.EOF - } - if f.content != nil { - if f.offset >= int64(len(f.content)) { - return 0, io.EOF - } - n = copy(p, f.content[f.offset:]) - f.offset += int64(n) - return n, nil +func (f *File) handleUpstream(resp *http.Response, retryCount, maxRetries int) (shouldRetry bool, err error) { + if resp.StatusCode == http.StatusOK || resp.StatusCode == http.StatusPartialContent { + return false, nil } - // If we haven't started streaming the file yet or need to reposition - if f.reader == nil || f.seekPending { - if f.reader != nil { - f.reader.Close() - f.reader = nil + _log := f.cache.Logger() + + // Clean up response body properly + cleanupResp := func(resp *http.Response) { + if resp.Body != nil { + _, _ = io.Copy(io.Discard, resp.Body) + resp.Body.Close() + } + } + + switch resp.StatusCode { + case http.StatusServiceUnavailable: + // Read the body to check for specific error messages + body, readErr := io.ReadAll(resp.Body) + cleanupResp(resp) + + if readErr != nil { + _log.Error().Err(readErr).Msg("Failed to read response body") + return false, &streamError{ + Err: fmt.Errorf("failed to read error response: %w", readErr), + StatusCode: http.StatusServiceUnavailable, + } } - // Make the request to get the file - resp, err := f.stream() - if err != nil { - return 0, err - } - if resp == nil { - return 0, fmt.Errorf("stream returned nil response") + bodyStr := string(body) + if strings.Contains(bodyStr, "you have exceeded your traffic") { + _log.Debug(). + Str("file", f.name). + Int("retry_count", retryCount). + Msg("Bandwidth exceeded. Marking link as invalid") + + f.cache.MarkDownloadLinkAsInvalid(f.link, f.downloadLink, "bandwidth_exceeded") + + // Retry with a different API key if available and we haven't exceeded retries + if retryCount < maxRetries { + return true, nil + } + + return false, &streamError{ + Err: fmt.Errorf("bandwidth exceeded after %d retries", retryCount), + StatusCode: http.StatusServiceUnavailable, + } } - f.reader = resp.Body - f.seekPending = false - } + return false, &streamError{ + Err: fmt.Errorf("service unavailable: %s", bodyStr), + StatusCode: http.StatusServiceUnavailable, + } - n, err = f.reader.Read(p) - f.offset += int64(n) + case http.StatusNotFound: + cleanupResp(resp) - if err != nil { - f.reader.Close() - f.reader = nil - } + _log.Debug(). + Str("file", f.name). + Int("retry_count", retryCount). + Msg("Link not found (404). Marking link as invalid and regenerating") - return n, err -} + f.cache.MarkDownloadLinkAsInvalid(f.link, f.downloadLink, "link_not_found") -func (f *File) Seek(offset int64, whence int) (int64, error) { - if f.isDir { - return 0, os.ErrInvalid - } + // Try to regenerate download link if we haven't exceeded retries + if retryCount < maxRetries { + // Clear cached link to force regeneration + f.downloadLink = "" + return true, nil + } + + return false, &streamError{ + Err: fmt.Errorf("file not found after %d retries", retryCount), + StatusCode: http.StatusNotFound, + } - newOffset := f.offset - switch whence { - case io.SeekStart: - newOffset = offset - case io.SeekCurrent: - newOffset += offset - case io.SeekEnd: - newOffset = f.size + offset default: - return 0, os.ErrInvalid - } + body, _ := io.ReadAll(resp.Body) + cleanupResp(resp) - if newOffset < 0 { - newOffset = 0 - } - if newOffset > f.size { - newOffset = f.size - } + _log.Error(). + Int("status_code", resp.StatusCode). + Str("file", f.name). + Str("response_body", string(body)). + Msg("Unexpected upstream error") - // Only mark seek as pending if position actually changed - if newOffset != f.offset { - f.offset = newOffset - f.seekPending = true + return false, &streamError{ + Err: fmt.Errorf("upstream error %d: %s", resp.StatusCode, string(body)), + StatusCode: http.StatusBadGateway, + } } - return f.offset, nil } +func (f *File) handleRangeRequest(upstreamReq *http.Request, r *http.Request, w http.ResponseWriter) int { + rangeHeader := r.Header.Get("Range") + if rangeHeader == "" { + // For video files, apply byte range if exists + if byteRange, _ := f.getDownloadByteRange(); byteRange != nil { + upstreamReq.Header.Set("Range", fmt.Sprintf("bytes=%d-%d", byteRange[0], byteRange[1])) + } + return 0 // No range request + } + + // Parse range request + ranges, err := parseRange(rangeHeader, f.size) + if err != nil || len(ranges) != 1 { + w.Header().Set("Content-Range", fmt.Sprintf("bytes */%d", f.size)) + return -1 // Invalid range + } + + // Apply byte range offset if exists + byteRange, _ := f.getDownloadByteRange() + start, end := ranges[0].start, ranges[0].end + + if byteRange != nil { + start += byteRange[0] + end += byteRange[0] + } + + upstreamReq.Header.Set("Range", fmt.Sprintf("bytes=%d-%d", start, end)) + return 1 // Valid range request +} + +func (f *File) streamVideoOptimized(w http.ResponseWriter, src io.Reader) error { + // Use larger buffer for video streaming (better throughput) + buf := make([]byte, 64*1024) // 64KB buffer + + // First chunk optimization - send immediately for faster start + n, err := src.Read(buf) + if err != nil && err != io.EOF { + if isClientDisconnection(err) { + return &streamError{Err: err, StatusCode: 0, IsClientDisconnection: true} + } + return &streamError{Err: err, StatusCode: 0} + } + + if n > 0 { + // Write first chunk immediately + _, writeErr := w.Write(buf[:n]) + if writeErr != nil { + if isClientDisconnection(writeErr) { + return &streamError{Err: writeErr, StatusCode: 0, IsClientDisconnection: true} + } + return &streamError{Err: writeErr, StatusCode: 0} + } + + // Flush immediately for faster video start + if flusher, ok := w.(http.Flusher); ok { + flusher.Flush() + } + } + + if err == io.EOF { + return nil + } + + // Continue with optimized copy for remaining data + _, err = io.CopyBuffer(w, src, buf) + if err != nil { + if isClientDisconnection(err) { + return &streamError{Err: err, StatusCode: 0, IsClientDisconnection: true} + } + return &streamError{Err: err, StatusCode: 0} + } + + return nil +} + +/* +These are the methods that implement the os.File interface for the File type. +Only Stat and ReadDir are used +*/ + func (f *File) Stat() (os.FileInfo, error) { if f.isDir { return &FileInfo{ @@ -309,18 +389,61 @@ func (f *File) Stat() (os.FileInfo, error) { }, nil } -func (f *File) ReadAt(p []byte, off int64) (n int, err error) { - // Save current position - - // Seek to requested position - _, err = f.Seek(off, io.SeekStart) - if err != nil { - return 0, err +func (f *File) Read(p []byte) (n int, err error) { + if f.isDir { + return 0, os.ErrInvalid } - // Read the data - n, err = f.Read(p) - return n, err + if f.metadataOnly { + return 0, io.EOF + } + + // For preloaded content files (like version.txt) + if f.content != nil { + if f.readOffset >= int64(len(f.content)) { + return 0, io.EOF + } + n = copy(p, f.content[f.readOffset:]) + f.readOffset += int64(n) + return n, nil + } + + // For streaming files, return an error to force use of StreamResponse + return 0, fmt.Errorf("use StreamResponse method for streaming files") +} + +func (f *File) Seek(offset int64, whence int) (int64, error) { + if f.isDir { + return 0, os.ErrInvalid + } + + // Only handle seeking for preloaded content + if f.content != nil { + newOffset := f.readOffset + switch whence { + case io.SeekStart: + newOffset = offset + case io.SeekCurrent: + newOffset += offset + case io.SeekEnd: + newOffset = int64(len(f.content)) + offset + default: + return 0, os.ErrInvalid + } + + if newOffset < 0 { + newOffset = 0 + } + if newOffset > int64(len(f.content)) { + newOffset = int64(len(f.content)) + } + + f.readOffset = newOffset + return f.readOffset, nil + } + + // For streaming files, return error to force use of StreamResponse + return 0, fmt.Errorf("use StreamResponse method for streaming files") } func (f *File) Write(p []byte) (n int, err error) { diff --git a/pkg/webdav/handler.go b/pkg/webdav/handler.go index 22e54a0..1bd0a8e 100644 --- a/pkg/webdav/handler.go +++ b/pkg/webdav/handler.go @@ -2,6 +2,7 @@ package webdav import ( "context" + "errors" "fmt" "github.com/sirrobot01/decypharr/pkg/debrid/types" "golang.org/x/net/webdav" @@ -415,104 +416,90 @@ func (h *Handler) serveDirectory(w http.ResponseWriter, r *http.Request, file we func (h *Handler) handleGet(w http.ResponseWriter, r *http.Request) { fRaw, err := h.OpenFile(r.Context(), r.URL.Path, os.O_RDONLY, 0) if err != nil { - h.logger.Error().Err(err). - Str("path", r.URL.Path). - Msg("Failed to open file") http.NotFound(w, r) return } - defer func(fRaw webdav.File) { - err := fRaw.Close() - if err != nil { - h.logger.Error().Err(err).Msg("Failed to close file") - return - } - }(fRaw) + defer fRaw.Close() fi, err := fRaw.Stat() if err != nil { - h.logger.Error().Err(err).Msg("Failed to stat file") http.Error(w, "Server Error", http.StatusInternalServerError) return } - // If the target is a directory, use your directory listing logic. if fi.IsDir() { h.serveDirectory(w, r, fRaw) return } - // Checks if the file is a torrent file - // .content is nil if the file is a torrent file - // .content means file is preloaded, e.g version.txt - if file, ok := fRaw.(*File); ok && file.content == nil { - link, err := file.getDownloadLink() - if err != nil { - h.logger.Debug(). - Err(err). - Str("link", file.link). - Str("path", r.URL.Path). - Msg("Could not fetch download link") - http.Error(w, "Could not fetch download link", http.StatusPreconditionFailed) - return - } - if link == "" { - http.NotFound(w, r) - return - } - file.downloadLink = link - // If the torrent file is not a RAR file and users enabled proxy streaming - if !file.isRar && h.cache.StreamWithRclone() { - w.Header().Set("Cache-Control", "no-cache, no-store, must-revalidate") - w.Header().Set("Pragma", "no-cache") - w.Header().Set("Expires", "0") - w.Header().Set("Content-Disposition", fmt.Sprintf("attachment; filename=\"%s\"", fi.Name())) - w.Header().Set("Content-Length", fmt.Sprintf("%d", fi.Size())) - w.Header().Set("Last-Modified", fi.ModTime().UTC().Format(http.TimeFormat)) - w.Header().Set("Accept-Ranges", "bytes") - w.Header().Set("X-Accel-Redirect", file.downloadLink) - w.Header().Set("X-Accel-Buffering", "no") - http.Redirect(w, r, file.downloadLink, http.StatusFound) - return - } - } - - // ETags + // Set common headers etag := fmt.Sprintf("\"%x-%x\"", fi.ModTime().Unix(), fi.Size()) w.Header().Set("ETag", etag) + w.Header().Set("Last-Modified", fi.ModTime().UTC().Format(http.TimeFormat)) - // 7. Content-Type by extension ext := filepath.Ext(fi.Name()) - contentType := mime.TypeByExtension(ext) - if contentType == "" { - contentType = "application/octet-stream" + if contentType := mime.TypeByExtension(ext); contentType != "" { + w.Header().Set("Content-Type", contentType) + } else { + w.Header().Set("Content-Type", "application/octet-stream") } - w.Header().Set("Content-Type", contentType) - rs, ok := fRaw.(io.ReadSeeker) - if !ok { - if r.Header.Get("Range") != "" { - http.Error(w, "Range not supported", http.StatusRequestedRangeNotSatisfiable) + // Handle File struct with direct streaming + if file, ok := fRaw.(*File); ok { + // Handle nginx proxy (X-Accel-Redirect) + if file.content == nil && !file.isRar && h.cache.StreamWithRclone() { + link, err := file.getDownloadLink() + if err != nil || link == "" { + http.Error(w, "Could not fetch download link", http.StatusPreconditionFailed) + return + } + + w.Header().Set("Content-Disposition", fmt.Sprintf("attachment; filename=\"%s\"", fi.Name())) + w.Header().Set("X-Accel-Redirect", link) + w.Header().Set("X-Accel-Buffering", "no") + http.Redirect(w, r, link, http.StatusFound) return } - w.Header().Set("Content-Length", fmt.Sprintf("%d", fi.Size())) - w.Header().Set("Last-Modified", fi.ModTime().UTC().Format(http.TimeFormat)) - w.Header().Set("Accept-Ranges", "bytes") - ctx := r.Context() - done := make(chan struct{}) - go func() { - defer close(done) - _, _ = io.Copy(w, fRaw) - }() - select { - case <-ctx.Done(): - h.logger.Debug().Msg("Client cancelled download") - return - case <-done: + + if err := file.StreamResponse(w, r); err != nil { + var streamErr *streamError + if errors.As(err, &streamErr) { + // Handle client disconnections silently (just debug log) + if errors.Is(streamErr.Err, context.Canceled) || errors.Is(streamErr.Err, context.DeadlineExceeded) || streamErr.IsClientDisconnection { + return // Don't log as error or try to write response + } + + if streamErr.StatusCode > 0 && !hasHeadersWritten(w) { + http.Error(w, streamErr.Error(), streamErr.StatusCode) + } else { + h.logger.Error(). + Err(streamErr.Err). + Str("path", r.URL.Path). + Msg("Stream error") + } + } else { + // Generic error + if !hasHeadersWritten(w) { + http.Error(w, "Stream error", http.StatusInternalServerError) + } else { + h.logger.Error(). + Err(err). + Str("path", r.URL.Path). + Msg("Stream error after headers written") + } + } } return } - http.ServeContent(w, r, fi.Name(), fi.ModTime(), rs) + + // Fallback to ServeContent for other webdav.File implementations + if rs, ok := fRaw.(io.ReadSeeker); ok { + http.ServeContent(w, r, fi.Name(), fi.ModTime(), rs) + } else { + w.Header().Set("Content-Length", fmt.Sprintf("%d", fi.Size())) + w.WriteHeader(http.StatusOK) + _, _ = io.Copy(w, fRaw) + } } func (h *Handler) handleHead(w http.ResponseWriter, r *http.Request) { diff --git a/pkg/webdav/misc.go b/pkg/webdav/misc.go index 2e3ad64..c37e76a 100644 --- a/pkg/webdav/misc.go +++ b/pkg/webdav/misc.go @@ -1,6 +1,7 @@ package webdav import ( + "fmt" "github.com/stanNthe5/stringbuf" "net/http" "net/url" @@ -132,3 +133,122 @@ func writeXml(w http.ResponseWriter, status int, buf stringbuf.StringBuf) { w.WriteHeader(status) _, _ = w.Write(buf.Bytes()) } + +func hasHeadersWritten(w http.ResponseWriter) bool { + // Most ResponseWriter implementations support this + if hw, ok := w.(interface{ Written() bool }); ok { + return hw.Written() + } + return false +} + +func isClientDisconnection(err error) bool { + if err == nil { + return false + } + + errStr := err.Error() + // Common client disconnection error patterns + return strings.Contains(errStr, "broken pipe") || + strings.Contains(errStr, "connection reset by peer") || + strings.Contains(errStr, "write: connection reset") || + strings.Contains(errStr, "read: connection reset") || + strings.Contains(errStr, "context canceled") || + strings.Contains(errStr, "context deadline exceeded") || + strings.Contains(errStr, "client disconnected") || + strings.Contains(errStr, "EOF") +} + +type httpRange struct{ start, end int64 } + +func parseRange(s string, size int64) ([]httpRange, error) { + if s == "" { + return nil, nil + } + const b = "bytes=" + if !strings.HasPrefix(s, b) { + return nil, fmt.Errorf("invalid range") + } + + var ranges []httpRange + for _, ra := range strings.Split(s[len(b):], ",") { + ra = strings.TrimSpace(ra) + if ra == "" { + continue + } + i := strings.Index(ra, "-") + if i < 0 { + return nil, fmt.Errorf("invalid range") + } + start, end := strings.TrimSpace(ra[:i]), strings.TrimSpace(ra[i+1:]) + var r httpRange + if start == "" { + i, err := strconv.ParseInt(end, 10, 64) + if err != nil { + return nil, fmt.Errorf("invalid range") + } + if i > size { + i = size + } + r.start = size - i + r.end = size - 1 + } else { + i, err := strconv.ParseInt(start, 10, 64) + if err != nil || i < 0 { + return nil, fmt.Errorf("invalid range") + } + r.start = i + if end == "" { + r.end = size - 1 + } else { + i, err := strconv.ParseInt(end, 10, 64) + if err != nil || r.start > i { + return nil, fmt.Errorf("invalid range") + } + if i >= size { + i = size - 1 + } + r.end = i + } + } + if r.start > size-1 { + continue + } + ranges = append(ranges, r) + } + return ranges, nil +} + +func setVideoStreamingHeaders(req *http.Request) { + // Request optimizations for faster response + req.Header.Set("Accept", "*/*") + req.Header.Set("Accept-Encoding", "identity") + req.Header.Set("Connection", "keep-alive") + req.Header.Set("User-Agent", "VideoStream/1.0") + req.Header.Set("Priority", "u=1") +} + +func setVideoResponseHeaders(w http.ResponseWriter, resp *http.Response, isRange bool) { + // Copy essential headers from upstream + if contentLength := resp.Header.Get("Content-Length"); contentLength != "" { + w.Header().Set("Content-Length", contentLength) + } + + if contentRange := resp.Header.Get("Content-Range"); contentRange != "" && isRange { + w.Header().Set("Content-Range", contentRange) + } + + // Video streaming optimizations + w.Header().Set("Accept-Ranges", "bytes") // Enable seeking + w.Header().Set("Connection", "keep-alive") // Keep connection open + + // Prevent buffering in proxies/CDNs + w.Header().Set("X-Accel-Buffering", "no") // Nginx + w.Header().Set("Proxy-Buffering", "off") // General proxy + + w.Header().Set("Access-Control-Allow-Origin", "*") + w.Header().Set("Access-Control-Allow-Headers", "Range") + w.Header().Set("Access-Control-Expose-Headers", "Content-Length, Content-Range") + + w.WriteHeader(resp.StatusCode) +} From c15e9d8f707c3d85b8edb8b2d287e42e92c3285d Mon Sep 17 00:00:00 2001 From: Mukhtar Akere Date: Wed, 18 Jun 2025 12:44:05 +0100 Subject: [PATCH 21/26] Updste repair --- pkg/debrid/providers/realdebrid/realdebrid.go | 1 + pkg/debrid/store/cache.go | 4 ++ pkg/debrid/store/torrent.go | 10 ++++ pkg/repair/misc.go | 30 ++++++------ pkg/repair/repair.go | 46 +++++++++++++++---- 5 files changed, 64 insertions(+), 27 deletions(-) diff --git a/pkg/debrid/providers/realdebrid/realdebrid.go b/pkg/debrid/providers/realdebrid/realdebrid.go index 28354c4..dedf3ef 100644 --- a/pkg/debrid/providers/realdebrid/realdebrid.go +++ b/pkg/debrid/providers/realdebrid/realdebrid.go @@ -919,6 +919,7 @@ func (r *RealDebrid) GetProfile() (*types.Profile, error) { Expiration: data.Expiration, Type: data.Type, } + r.Profile = profile return profile, nil } diff --git a/pkg/debrid/store/cache.go b/pkg/debrid/store/cache.go index 571da9f..6f50472 100644 --- a/pkg/debrid/store/cache.go +++ b/pkg/debrid/store/cache.go @@ -570,6 +570,10 @@ func (c *Cache) GetTorrentByName(name string) *CachedTorrent { return nil } +func (c *Cache) GetTorrentsName() map[string]CachedTorrent { + return c.torrents.getAllByName() +} + func (c *Cache) GetTorrent(torrentId string) *CachedTorrent { if torrent, ok := c.torrents.getByID(torrentId); ok { return &torrent diff --git a/pkg/debrid/store/torrent.go b/pkg/debrid/store/torrent.go index 54bfaae..bb15ca3 100644 --- a/pkg/debrid/store/torrent.go +++ b/pkg/debrid/store/torrent.go @@ -293,6 +293,16 @@ func (tc *torrentCache) getAllCount() int { return len(tc.torrents.byID) } +func (tc *torrentCache) getAllByName() map[string]CachedTorrent { + tc.torrents.RLock() + defer tc.torrents.RUnlock() + results := make(map[string]CachedTorrent, len(tc.torrents.byName)) + for name, torrent := range tc.torrents.byName { + results[name] = torrent + } + return results +} + func (tc *torrentCache) getIdMaps() map[string]struct{} { tc.torrents.RLock() defer tc.torrents.RUnlock() diff --git a/pkg/repair/misc.go b/pkg/repair/misc.go index d946a98..bc36088 100644 --- a/pkg/repair/misc.go +++ b/pkg/repair/misc.go @@ -103,11 +103,17 @@ func (r *Repair) checkTorrentFiles(torrentPath string, files []arr.ContentFile, r.logger.Debug().Msgf("No cache found for %s. Skipping", debridName) return files // Return all files as broken if no cache found } + tor, ok := r.torrentsMap.Load(debridName) + if !ok { + r.logger.Debug().Msgf("Could not find torrents for %s. Skipping", debridName) + } + + torrentsMap := tor.(map[string]store.CachedTorrent) // Check if torrent exists torrentName := filepath.Clean(filepath.Base(torrentPath)) - torrent := cache.GetTorrentByName(torrentName) - if torrent == nil { + torrent, ok := torrentsMap[torrentName] + if !ok { r.logger.Debug().Msgf("No torrent found for %s. Skipping", torrentName) return files // Return all files as broken if torrent not found } @@ -118,7 +124,7 @@ func (r *Repair) checkTorrentFiles(torrentPath string, files []arr.ContentFile, filePaths[i] = file.TargetPath } - brokenFilePaths := cache.GetBrokenFiles(torrent, filePaths) + brokenFilePaths := cache.GetBrokenFiles(&torrent, filePaths) if len(brokenFilePaths) > 0 { r.logger.Debug().Msgf("%d broken files found in %s", len(brokenFilePaths), torrentName) @@ -141,15 +147,9 @@ func (r *Repair) checkTorrentFiles(torrentPath string, files []arr.ContentFile, func (r *Repair) findDebridForPath(dir string, clients map[string]types.Client) string { // Check cache first - r.cacheMutex.RLock() - if r.debridPathCache == nil { - r.debridPathCache = make(map[string]string) + if debridName, exists := r.debridPathCache.Load(dir); exists { + return debridName.(string) } - if debridName, exists := r.debridPathCache[dir]; exists { - r.cacheMutex.RUnlock() - return debridName - } - r.cacheMutex.RUnlock() // Find debrid client for _, client := range clients { @@ -162,18 +162,14 @@ func (r *Repair) findDebridForPath(dir string, clients map[string]types.Client) debridName := client.Name() // Cache the result - r.cacheMutex.Lock() - r.debridPathCache[dir] = debridName - r.cacheMutex.Unlock() + r.debridPathCache.Store(dir, debridName) return debridName } } // Cache empty result to avoid repeated lookups - r.cacheMutex.Lock() - r.debridPathCache[dir] = "" - r.cacheMutex.Unlock() + r.debridPathCache.Store(dir, "") return "" } diff --git a/pkg/repair/repair.go b/pkg/repair/repair.go index ee10024..c708481 100644 --- a/pkg/repair/repair.go +++ b/pkg/repair/repair.go @@ -41,8 +41,8 @@ type Repair struct { workers int scheduler gocron.Scheduler - debridPathCache map[string]string // Cache for path -> debrid name mapping - cacheMutex sync.RWMutex + debridPathCache sync.Map // debridPath:debridName cache.Emptied after each run + torrentsMap sync.Map //debridName: map[string]*store.CacheTorrent. Emptied after each run ctx context.Context } @@ -214,6 +214,27 @@ func (r *Repair) newJob(arrsNames []string, mediaIDs []string) *Job { } } +// initRun initializes the repair run, setting up necessary configurations, checks and caches +func (r *Repair) initRun(ctx context.Context) { + if r.useWebdav { + // Webdav use is enabled, initialize debrid torrent caches + caches := r.deb.Caches() + if len(caches) == 0 { + return + } + for name, cache := range caches { + r.torrentsMap.Store(name, cache.GetTorrentsName()) + } + } +} + +// // onComplete is called when the repair job is completed +func (r *Repair) onComplete() { + // Set the cache maps to nil + r.torrentsMap = sync.Map{} // Clear the torrent map + r.debridPathCache = sync.Map{} +} + func (r *Repair) preRunChecks() error { if r.useWebdav { @@ -271,6 +292,7 @@ func (r *Repair) AddJob(arrsNames []string, mediaIDs []string, autoProcess, recu job.CompletedAt = time.Now() } } + r.onComplete() // Clear caches and maps after job completion }() return nil } @@ -313,6 +335,9 @@ func (r *Repair) repair(job *Job) error { return err } + // Initialize the run + r.initRun(job.ctx) + // Use a mutex to protect concurrent access to brokenItems var mu sync.Mutex brokenItems := map[string][]arr.ContentFile{} @@ -475,16 +500,17 @@ func (r *Repair) repairArr(job *Job, _arr string, tmdbId string) ([]arr.ContentF }() } - for _, m := range media { - select { - case <-job.ctx.Done(): - break - default: - workerChan <- m + go func() { + defer close(workerChan) + for _, m := range media { + select { + case <-job.ctx.Done(): + return + case workerChan <- m: + } } - } + }() - close(workerChan) wg.Wait() if len(brokenItems) == 0 { r.logger.Info().Msgf("No broken items found for %s", a.Name) From 086aa3b1ff94b6a802e94f3688f4734608f77b56 Mon Sep 17 00:00:00 2001 From: Mukhtar Akere Date: Thu, 19 Jun 2025 14:40:12 +0100 Subject: [PATCH 22/26] Improve Arr integerations --- internal/config/config.go | 1 + pkg/arr/arr.go | 38 ++-- pkg/qbit/context.go | 56 +++++- pkg/qbit/http.go | 10 +- pkg/web/api.go | 95 ++++++---- pkg/web/templates/config.html | 327 +++++++++++++++++++++++++--------- pkg/web/templates/layout.html | 90 ++++++++++ 7 files changed, 472 insertions(+), 145 deletions(-) diff --git a/internal/config/config.go b/internal/config/config.go index a28c62e..7c538b6 100644 --- a/internal/config/config.go +++ b/internal/config/config.go @@ -54,6 +54,7 @@ type Arr struct { SkipRepair bool `json:"skip_repair,omitempty"` DownloadUncached *bool `json:"download_uncached,omitempty"` SelectedDebrid string `json:"selected_debrid,omitempty"` + Source string `json:"source,omitempty"` // The source of the arr, e.g. "auto", "config", "". Auto means it was automatically detected from the arr } type Repair struct { diff --git a/pkg/arr/arr.go b/pkg/arr/arr.go index c9ee2b2..9b77cec 100644 --- a/pkg/arr/arr.go +++ b/pkg/arr/arr.go @@ -3,6 +3,7 @@ package arr import ( "bytes" "context" + "crypto/tls" "encoding/json" "fmt" "github.com/rs/zerolog" @@ -19,6 +20,13 @@ import ( // Type is a type of arr type Type string +var sharedClient = &http.Client{ + Transport: &http.Transport{ + TLSClientConfig: &tls.Config{InsecureSkipVerify: true}, + }, + Timeout: 60 * time.Second, +} + const ( Sonarr Type = "sonarr" Radarr Type = "radarr" @@ -35,10 +43,10 @@ type Arr struct { SkipRepair bool `json:"skip_repair"` DownloadUncached *bool `json:"download_uncached"` SelectedDebrid string `json:"selected_debrid,omitempty"` // The debrid service selected for this arr - client *request.Client + Source string `json:"source,omitempty"` // The source of the arr, e.g. "auto", "manual". Auto means it was automatically detected from the arr } -func New(name, host, token string, cleanup, skipRepair bool, downloadUncached *bool, selectedDebrid string) *Arr { +func New(name, host, token string, cleanup, skipRepair bool, downloadUncached *bool, selectedDebrid, source string) *Arr { return &Arr{ Name: name, Host: host, @@ -47,8 +55,8 @@ func New(name, host, token string, cleanup, skipRepair bool, downloadUncached *b Cleanup: cleanup, SkipRepair: skipRepair, DownloadUncached: downloadUncached, - client: request.New(), SelectedDebrid: selectedDebrid, + Source: source, } } @@ -75,14 +83,11 @@ func (a *Arr) Request(method, endpoint string, payload interface{}) (*http.Respo } req.Header.Set("Content-Type", "application/json") req.Header.Set("X-Api-Key", a.Token) - if a.client == nil { - a.client = request.New() - } var resp *http.Response for attempts := 0; attempts < 5; attempts++ { - resp, err = a.client.Do(req) + resp, err = sharedClient.Do(req) if err != nil { return nil, err } @@ -104,7 +109,7 @@ func (a *Arr) Request(method, endpoint string, payload interface{}) (*http.Respo func (a *Arr) Validate() error { if a.Token == "" || a.Host == "" { - return nil + return fmt.Errorf("arr not configured: %s", a.Name) } resp, err := a.Request("GET", "/api/v3/health", nil) if err != nil { @@ -146,8 +151,11 @@ func InferType(host, name string) Type { func NewStorage() *Storage { arrs := make(map[string]*Arr) for _, a := range config.Get().Arrs { + if a.Host == "" || a.Token == "" || a.Name == "" { + continue // Skip if host or token is not set + } name := a.Name - arrs[name] = New(name, a.Host, a.Token, a.Cleanup, a.SkipRepair, a.DownloadUncached, a.SelectedDebrid) + arrs[name] = New(name, a.Host, a.Token, a.Cleanup, a.SkipRepair, a.DownloadUncached, a.SelectedDebrid, a.Source) } return &Storage{ Arrs: arrs, @@ -158,7 +166,7 @@ func NewStorage() *Storage { func (s *Storage) AddOrUpdate(arr *Arr) { s.mu.Lock() defer s.mu.Unlock() - if arr.Name == "" { + if arr.Host == "" || arr.Token == "" || arr.Name == "" { return } s.Arrs[arr.Name] = arr @@ -175,19 +183,11 @@ func (s *Storage) GetAll() []*Arr { defer s.mu.Unlock() arrs := make([]*Arr, 0, len(s.Arrs)) for _, arr := range s.Arrs { - if arr.Host != "" && arr.Token != "" { - arrs = append(arrs, arr) - } + arrs = append(arrs, arr) } return arrs } -func (s *Storage) Clear() { - s.mu.Lock() - defer s.mu.Unlock() - s.Arrs = make(map[string]*Arr) -} - func (s *Storage) StartSchedule(ctx context.Context) error { ticker := time.NewTicker(10 * time.Second) diff --git a/pkg/qbit/context.go b/pkg/qbit/context.go index e68f6a2..cdbbbd7 100644 --- a/pkg/qbit/context.go +++ b/pkg/qbit/context.go @@ -3,10 +3,12 @@ package qbit import ( "context" "encoding/base64" + "fmt" "github.com/go-chi/chi/v5" "github.com/sirrobot01/decypharr/pkg/arr" "github.com/sirrobot01/decypharr/pkg/store" "net/http" + "net/url" "strings" ) @@ -18,6 +20,45 @@ const ( arrKey contextKey = "arr" ) +func validateServiceURL(urlStr string) error { + if urlStr == "" { + return fmt.Errorf("URL cannot be empty") + } + + // Try parsing as full URL first + u, err := url.Parse(urlStr) + if err == nil && u.Scheme != "" && u.Host != "" { + // It's a full URL, validate scheme + if u.Scheme != "http" && u.Scheme != "https" { + return fmt.Errorf("URL scheme must be http or https") + } + return nil + } + + // Check if it's a host:port format (no scheme) + if strings.Contains(urlStr, ":") && !strings.Contains(urlStr, "://") { + // Try parsing with http:// prefix + testURL := "http://" + urlStr + u, err := url.Parse(testURL) + if err != nil { + return fmt.Errorf("invalid host:port format: %w", err) + } + + if u.Host == "" { + return fmt.Errorf("host is required in host:port format") + } + + // Validate port number + if u.Port() == "" { + return fmt.Errorf("port is required in host:port format") + } + + return nil + } + + return fmt.Errorf("invalid URL format: %s", urlStr) +} + func getCategory(ctx context.Context) string { if category, ok := ctx.Value(categoryKey).(string); ok { return category @@ -32,7 +73,7 @@ func getHashes(ctx context.Context) []string { return nil } -func getArr(ctx context.Context) *arr.Arr { +func getArrFromContext(ctx context.Context) *arr.Arr { if a, ok := ctx.Value(arrKey).(*arr.Arr); ok { return a } @@ -78,6 +119,10 @@ func (q *QBit) categoryContext(next http.Handler) http.Handler { }) } +// authContext creates a middleware that extracts the Arr host and token from the Authorization header +// and adds it to the request context. +// This is used to identify the Arr instance for the request. +// Only a valid host and token will be added to the context/config. The rest are manual func (q *QBit) authContext(next http.Handler) http.Handler { return http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) { host, token, err := decodeAuthHeader(r.Header.Get("Authorization")) @@ -86,8 +131,9 @@ func (q *QBit) authContext(next http.Handler) http.Handler { // Check if arr exists a := arrs.Get(category) if a == nil { + // Arr is not configured, create a new one downloadUncached := false - a = arr.New(category, "", "", false, false, &downloadUncached, "") + a = arr.New(category, "", "", false, false, &downloadUncached, "", "auto") } if err == nil { host = strings.TrimSpace(host) @@ -99,7 +145,11 @@ func (q *QBit) authContext(next http.Handler) http.Handler { a.Token = token } } - + a.Source = "auto" + if err := validateServiceURL(a.Host); err != nil { + // Return silently, no need to raise a problem. Just do not add the Arr to the context/config.json + return + } arrs.AddOrUpdate(a) ctx := context.WithValue(r.Context(), arrKey, a) next.ServeHTTP(w, r.WithContext(ctx)) diff --git a/pkg/qbit/http.go b/pkg/qbit/http.go index 7ecb8a6..d548985 100644 --- a/pkg/qbit/http.go +++ b/pkg/qbit/http.go @@ -10,14 +10,15 @@ import ( func (q *QBit) handleLogin(w http.ResponseWriter, r *http.Request) { ctx := r.Context() - _arr := getArr(ctx) + _arr := getArrFromContext(ctx) if _arr == nil { - // No arr + // Arr not in context, return OK _, _ = w.Write([]byte("Ok.")) return } if err := _arr.Validate(); err != nil { q.logger.Error().Err(err).Msgf("Error validating arr") + http.Error(w, "Invalid arr configuration", http.StatusBadRequest) } _, _ = w.Write([]byte("Ok.")) } @@ -94,9 +95,10 @@ func (q *QBit) handleTorrentsAdd(w http.ResponseWriter, r *http.Request) { } debridName := r.FormValue("debrid") category := r.FormValue("category") - _arr := getArr(ctx) + _arr := getArrFromContext(ctx) if _arr == nil { - _arr = arr.New(category, "", "", false, false, nil, "") + // Arr is not in context + _arr = arr.New(category, "", "", false, false, nil, "", "") } atleastOne := false diff --git a/pkg/web/api.go b/pkg/web/api.go index 6fc3a29..99f69af 100644 --- a/pkg/web/api.go +++ b/pkg/web/api.go @@ -45,7 +45,8 @@ func (wb *Web) handleAddContent(w http.ResponseWriter, r *http.Request) { _arr := _store.Arr().Get(arrName) if _arr == nil { - _arr = arr.New(arrName, "", "", false, false, &downloadUncached, "") + // These are not found in the config. They are throwaway arrs. + _arr = arr.New(arrName, "", "", false, false, &downloadUncached, "", "") } // Handle URLs @@ -181,20 +182,38 @@ func (wb *Web) handleDeleteTorrents(w http.ResponseWriter, r *http.Request) { } func (wb *Web) handleGetConfig(w http.ResponseWriter, r *http.Request) { + // Merge config arrs, with arr Storage + unique := map[string]config.Arr{} cfg := config.Get() - arrCfgs := make([]config.Arr, 0) - _store := store.Get() - for _, a := range _store.Arr().GetAll() { - arrCfgs = append(arrCfgs, config.Arr{ - Host: a.Host, - Name: a.Name, - Token: a.Token, - Cleanup: a.Cleanup, - SkipRepair: a.SkipRepair, - DownloadUncached: a.DownloadUncached, - }) + arrStorage := store.Get().Arr() + + // Add existing Arrs from storage + for _, a := range arrStorage.GetAll() { + if _, ok := unique[a.Name]; !ok { + // Only add if not already in the unique map + unique[a.Name] = config.Arr{ + Name: a.Name, + Host: a.Host, + Token: a.Token, + Cleanup: a.Cleanup, + SkipRepair: a.SkipRepair, + DownloadUncached: a.DownloadUncached, + SelectedDebrid: a.SelectedDebrid, + Source: a.Source, + } + } + } + + for _, a := range cfg.Arrs { + if a.Host == "" || a.Token == "" { + continue // Skip empty arrs + } + unique[a.Name] = a + } + cfg.Arrs = make([]config.Arr, 0, len(unique)) + for _, a := range unique { + cfg.Arrs = append(cfg.Arrs, a) } - cfg.Arrs = arrCfgs request.JSONResponse(w, cfg, http.StatusOK) } @@ -235,27 +254,43 @@ func (wb *Web) handleUpdateConfig(w http.ResponseWriter, r *http.Request) { // Clear legacy single debrid if using array } - if len(updatedConfig.Arrs) > 0 { - currentConfig.Arrs = updatedConfig.Arrs - } - // Update Arrs through the service - _store := store.Get() - _arr := _store.Arr() - _arr.Clear() // Clear existing arrs + storage := store.Get() + arrStorage := storage.Arr() + newConfigArrs := make([]config.Arr, 0) for _, a := range updatedConfig.Arrs { - _arr.AddOrUpdate(&arr.Arr{ - Name: a.Name, - Host: a.Host, - Token: a.Token, - Cleanup: a.Cleanup, - SkipRepair: a.SkipRepair, - DownloadUncached: a.DownloadUncached, - SelectedDebrid: a.SelectedDebrid, - }) + if a.Name == "" || a.Host == "" || a.Token == "" { + // Skip empty or auto-generated arrs + continue + } + newConfigArrs = append(newConfigArrs, a) } - currentConfig.Arrs = updatedConfig.Arrs + currentConfig.Arrs = newConfigArrs + + // Add config arr into the config + for _, a := range currentConfig.Arrs { + if a.Host == "" || a.Token == "" { + continue // Skip empty arrs + } + existingArr := arrStorage.Get(a.Name) + if existingArr != nil { + // Update existing Arr + existingArr.Host = a.Host + existingArr.Token = a.Token + existingArr.Cleanup = a.Cleanup + existingArr.SkipRepair = a.SkipRepair + existingArr.DownloadUncached = a.DownloadUncached + existingArr.SelectedDebrid = a.SelectedDebrid + existingArr.Source = a.Source + arrStorage.AddOrUpdate(existingArr) + } else { + // Create new Arr if it doesn't exist + newArr := arr.New(a.Name, a.Host, a.Token, a.Cleanup, a.SkipRepair, a.DownloadUncached, a.SelectedDebrid, a.Source) + arrStorage.AddOrUpdate(newArr) + } + } + if err := currentConfig.Save(); err != nil { http.Error(w, "Error saving config: "+err.Error(), http.StatusInternalServerError) return diff --git a/pkg/web/templates/config.html b/pkg/web/templates/config.html index 47eb6d0..15dd472 100644 --- a/pkg/web/templates/config.html +++ b/pkg/web/templates/config.html @@ -17,6 +17,37 @@ [data-bs-theme="dark"] .nav-pills .nav-link.active { color: white !important; } + + .config-item.bg-light { + background-color: var(--bs-gray-100) !important; + border-left: 4px solid var(--bs-info) !important; + } + + .config-item input[readonly] { + background-color: var(--bs-gray-200); + opacity: 1; + } + + .config-item select[readonly] { + background-color: var(--bs-gray-200); + pointer-events: none; + } + + /* Dark mode specific overrides */ + [data-bs-theme="dark"] .config-item.bg-light { + background-color: var(--bs-gray-800) !important; + border-left: 4px solid var(--bs-info) !important; + } + + [data-bs-theme="dark"] .config-item input[readonly] { + background-color: var(--bs-gray-700); + color: var(--bs-gray-300); + } + + [data-bs-theme="dark"] .config-item select[readonly] { + background-color: var(--bs-gray-700); + color: var(--bs-gray-300); + }
@@ -80,7 +111,8 @@ - @@ -103,7 +135,8 @@ id="bindAddress" name="bind_address" placeholder=""> - Bind address for the application(default is all interface) + Bind address for the application(default is all + interface)
@@ -150,7 +183,8 @@ id="minFileSize" name="min_file_size" placeholder="e.g., 10MB, 1GB"> - Minimum file size to download (Empty for no limit) + Minimum file size to download (Empty for no + limit)
@@ -161,7 +195,8 @@ id="maxFileSize" name="max_file_size" placeholder="e.g., 50GB, 100MB"> - Maximum file size to download (Empty for no limit) + Maximum file size to download (Empty for no + limit)
@@ -172,13 +207,15 @@ id="removeStalledAfter" name="remove_stalled_after" placeholder="e.g., 1m, 30s, 1h"> - Remove torrents that have been stalled for this duration + Remove torrents that have been stalled for this + duration
- +
@@ -196,7 +233,8 @@ - +
@@ -206,23 +244,31 @@
- - Folder where the downloaded files will be stored + + Folder where the downloaded files will be + stored
- +
- - Maximum number of simultaneous local downloads across all torrents + + Maximum number of simultaneous local downloads + across all torrents
- - - Unchecking this caches a tiny part of your file to speed up import + + + Unchecking this caches a tiny part of your file + to speed up import
@@ -231,7 +277,8 @@ - +
@@ -249,7 +296,8 @@ - +
@@ -259,7 +307,8 @@
- +
@@ -268,34 +317,43 @@
- - Interval for the repair process(e.g., 24h, 1d, 03:00, or a crontab) + + Interval for the repair process(e.g., 24h, 1d, + 03:00, or a crontab)
- Number of workers to use for the repair process + Number of workers to use for the repair + process
- - If you have Zurg running, you can use it to speed up the repair process + + If you have Zurg running, you can use it to + speed up the repair process
- +
- Use Internal Webdav for repair(make sure webdav is enabled in the debrid section + Use Internal Webdav for repair(make sure webdav + is enabled in the debrid section
- +
- Automatically process the repair job(delete broken symlinks and searches the arr again) + Automatically process the repair job(delete + broken symlinks and searches the arr again)
@@ -335,15 +393,44 @@
- +
+ + +
API Key for the debrid service
+ +
+ + +
+ Multiple API keys for download (one per line). If empty, main API key will be used. +
+
Path to where you've mounted the debrid files. Usually your rclone path
-
+
Rate limit for the debrid service. Confirm your debrid service rate limit @@ -432,7 +519,17 @@
- +
+ + +
Rclone RC Password for the webdav server
@@ -538,7 +635,7 @@ const filterTemplate = (debridIndex, dirIndex, filterIndex, filterType) => { let placeholder, label; - switch(filterType) { + switch (filterType) { case 'include': placeholder = "Text that should be included in filename"; label = "Include"; @@ -632,58 +729,87 @@ `; }; - const arrTemplate = (index) => ` -
- -
-
- - + const arrTemplate = (index, data = {}) => ` +
+ ${data.source !== 'auto' ? ` + + ` : ` +
+ Auto-detected
-
- - -
-
- - -
-
-
-
- -
-
-
- - + `} +
+ +
+ + + ${data.source === 'auto' ? '' : ''} +
+
+ + +
+
+ +
+ + +
-
-
- - +
+
+ +
-
-
-
- - +
+
+ + +
+
+
+
+ + +
+
+
+
+ + +
-
- `; + `; const debridDirectoryCounts = {}; const directoryFilterCounts = {}; @@ -739,6 +865,7 @@ debridDirectoryCounts[debridIndex]++; return dirIndex; } + function addFilter(debridIndex, dirIndex, filterType, filterValue = "") { const dirKey = `${debridIndex}-${dirIndex}`; if (!directoryFilterCounts[dirKey]) { @@ -771,7 +898,7 @@ } // Main functionality - document.addEventListener('DOMContentLoaded', function() { + document.addEventListener('DOMContentLoaded', function () { let debridCount = 0; let arrCount = 0; let currentStep = 1; @@ -785,21 +912,21 @@ // Step navigation document.querySelectorAll('.nav-link').forEach(navLink => { - navLink.addEventListener('click', function() { + navLink.addEventListener('click', function () { const stepNumber = parseInt(this.getAttribute('data-step')); goToStep(stepNumber); }); }); document.querySelectorAll('.next-step').forEach(button => { - button.addEventListener('click', function() { + button.addEventListener('click', function () { const nextStep = parseInt(this.getAttribute('data-next')); goToStep(nextStep); }); }); document.querySelectorAll('.prev-step').forEach(button => { - button.addEventListener('click', function() { + button.addEventListener('click', function () { const prevStep = parseInt(this.getAttribute('data-prev')); goToStep(prevStep); }); @@ -910,7 +1037,7 @@ addArrConfig(); }); - $(document).on('change', '.useWebdav', function() { + $(document).on('change', '.useWebdav', function () { const webdavConfig = $(this).closest('.config-item').find(`.webdav`); if (webdavConfig.length === 0) return; @@ -953,7 +1080,7 @@ // Save config logic const response = await fetcher('/api/config', { method: 'POST', - headers: { 'Content-Type': 'application/json' }, + headers: {'Content-Type': 'application/json'}, body: JSON.stringify(config) }); @@ -1005,7 +1132,7 @@ if (data.use_webdav && data.directories) { Object.entries(data.directories).forEach(([dirName, dirData]) => { - const dirIndex = addDirectory(debridCount, { name: dirName }); + const dirIndex = addDirectory(debridCount, {name: dirName}); // Add filters if available if (dirData.filters) { @@ -1015,6 +1142,20 @@ } }); } + + if (data.download_api_keys && Array.isArray(data.download_api_keys)) { + const downloadKeysTextarea = container.querySelector(`[name="debrid[${debridCount}].download_api_keys"]`); + if (downloadKeysTextarea) { + downloadKeysTextarea.value = data.download_api_keys.join('\n'); + } + } + } + + const downloadKeysTextarea = newDebrid.querySelector(`[name="debrid[${debridCount}].download_api_keys"]`); + if (downloadKeysTextarea) { + downloadKeysTextarea.style.webkitTextSecurity = 'disc'; + downloadKeysTextarea.style.textSecurity = 'disc'; + downloadKeysTextarea.setAttribute('data-password-visible', 'false'); } debridCount++; @@ -1022,11 +1163,10 @@ function addArrConfig(data = {}) { const container = document.getElementById('arrConfigs'); - container.insertAdjacentHTML('beforeend', arrTemplate(arrCount)); + container.insertAdjacentHTML('beforeend', arrTemplate(arrCount, data)); - // Add a delete button to the new arr + // Don't add delete button for auto-detected arrs since it's already handled in template const newArr = container.lastElementChild; - addDeleteButton(newArr, `Delete this arr`); if (data) { Object.entries(data).forEach(([key, value]) => { @@ -1051,7 +1191,7 @@ deleteBtn.innerHTML = ''; deleteBtn.title = tooltip; - deleteBtn.addEventListener('click', function() { + deleteBtn.addEventListener('click', function () { if (confirm('Are you sure you want to delete this item?')) { element.remove(); } @@ -1126,7 +1266,7 @@ const nameInput = document.querySelector(`[name="debrid[${i}].directory[${j}].name"]`); if (nameInput && nameInput.value) { const dirName = nameInput.value; - debrid.directories[dirName] = { filters: {} }; + debrid.directories[dirName] = {filters: {}}; // Get directory key for filter counting const dirKey = `${i}-${j}`; @@ -1146,6 +1286,14 @@ } } + let downloadApiKeysTextarea = document.querySelector(`[name="debrid[${i}].download_api_keys"]`); + if (downloadApiKeysTextarea && downloadApiKeysTextarea.value.trim()) { + debrid.download_api_keys = downloadApiKeysTextarea.value + .split('\n') + .map(key => key.trim()) + .filter(key => key.length > 0); + } + if (debrid.name && debrid.api_key) { config.debrids.push(debrid); } @@ -1163,7 +1311,8 @@ cleanup: document.querySelector(`[name="arr[${i}].cleanup"]`).checked, skip_repair: document.querySelector(`[name="arr[${i}].skip_repair"]`).checked, download_uncached: document.querySelector(`[name="arr[${i}].download_uncached"]`).checked, - selectedDebrid: document.querySelector(`[name="arr[${i}].selected_debrid"]`).value + selected_debrid: document.querySelector(`[name="arr[${i}].selected_debrid"]`).value, + source: document.querySelector(`[name="arr[${i}].source"]`).value }; if (arr.name && arr.host) { diff --git a/pkg/web/templates/layout.html b/pkg/web/templates/layout.html index c941403..1b16b59 100644 --- a/pkg/web/templates/layout.html +++ b/pkg/web/templates/layout.html @@ -121,6 +121,45 @@ .theme-toggle:hover { background-color: rgba(128, 128, 128, 0.2); } + .password-toggle-container { + position: relative; + } + + .password-toggle-btn { + position: absolute; + right: 10px; + top: 50%; + transform: translateY(-50%); + background: none; + border: none; + color: #6c757d; + cursor: pointer; + padding: 0; + z-index: 10; + } + + .password-toggle-btn:hover { + color: #495057; + } + + .form-control.has-toggle { + padding-right: 35px; + } + textarea.has-toggle { + -webkit-text-security: disc; + text-security: disc; + font-family: monospace !important; + } + + textarea.has-toggle[data-password-visible="true"] { + -webkit-text-security: none; + text-security: none; + } + + /* Adjust toggle button position for textareas */ + .password-toggle-container textarea.has-toggle ~ .password-toggle-btn { + top: 20px; + }