Random access for RARed RealDebrid torrents (#61)
* feat: AI translated port of RARAR.py in Go * feat: Extract and cache byte ranges of RARed RD torrents * feat: Stream and download files with byte ranges if specified * refactor: Use a more structured data format for byte ranges * fix: Rework streaming to fix error handling * perf: More efficient RAR file pre-processing * feat: Made the RAR unpacker an optional config option * refactor: Remove unnecessary Rar prefix for more idiomatic code * refactor: More appropriate private method declaration * feat: Error handling for parsing RARed torrents with retry requests and EOF validation * fix: Correctly parse unicode file names * fix: Handle special character conversion for RAR torrent file names * refactor: Removed debug logs * feat: Only allow two concurrent RAR unpacking tasks * fix: Include "<" and ">" as unsafe chars for RAR unpacking * refactor: Seperate types into their own file * refactor: Don't read RAR files on reader initialization
This commit is contained in:
committed by
GitHub
parent
7f25599b60
commit
fbd6cd5038
@@ -27,6 +27,7 @@ type Debrid struct {
|
|||||||
CheckCached bool `json:"check_cached,omitempty"`
|
CheckCached bool `json:"check_cached,omitempty"`
|
||||||
RateLimit string `json:"rate_limit,omitempty"` // 200/minute or 10/second
|
RateLimit string `json:"rate_limit,omitempty"` // 200/minute or 10/second
|
||||||
Proxy string `json:"proxy,omitempty"`
|
Proxy string `json:"proxy,omitempty"`
|
||||||
|
UnpackRar bool `json:"unpack_rar,omitempty"`
|
||||||
AddSamples bool `json:"add_samples,omitempty"`
|
AddSamples bool `json:"add_samples,omitempty"`
|
||||||
|
|
||||||
UseWebDav bool `json:"use_webdav,omitempty"`
|
UseWebDav bool `json:"use_webdav,omitempty"`
|
||||||
|
|||||||
@@ -3,10 +3,12 @@ package debrid
|
|||||||
import (
|
import (
|
||||||
"errors"
|
"errors"
|
||||||
"fmt"
|
"fmt"
|
||||||
"github.com/sirrobot01/decypharr/internal/request"
|
|
||||||
"github.com/sirrobot01/decypharr/pkg/debrid/types"
|
|
||||||
"sync"
|
"sync"
|
||||||
"time"
|
"time"
|
||||||
|
|
||||||
|
"github.com/sirrobot01/decypharr/internal/request"
|
||||||
|
"github.com/sirrobot01/decypharr/pkg/debrid/types"
|
||||||
)
|
)
|
||||||
|
|
||||||
type linkCache struct {
|
type linkCache struct {
|
||||||
@@ -234,3 +236,12 @@ func (c *Cache) IsDownloadLinkInvalid(downloadLink string) bool {
|
|||||||
}
|
}
|
||||||
return false
|
return false
|
||||||
}
|
}
|
||||||
|
|
||||||
|
func (c *Cache) GetDownloadByteRange(torrentName, filename string) (*[2]int64, error) {
|
||||||
|
ct := c.GetTorrentByName(torrentName)
|
||||||
|
if ct == nil {
|
||||||
|
return nil, fmt.Errorf("torrent not found")
|
||||||
|
}
|
||||||
|
file := ct.Files[filename]
|
||||||
|
return file.ByteRange, nil
|
||||||
|
}
|
||||||
|
|||||||
@@ -5,12 +5,6 @@ import (
|
|||||||
"encoding/json"
|
"encoding/json"
|
||||||
"errors"
|
"errors"
|
||||||
"fmt"
|
"fmt"
|
||||||
"github.com/rs/zerolog"
|
|
||||||
"github.com/sirrobot01/decypharr/internal/config"
|
|
||||||
"github.com/sirrobot01/decypharr/internal/logger"
|
|
||||||
"github.com/sirrobot01/decypharr/internal/request"
|
|
||||||
"github.com/sirrobot01/decypharr/internal/utils"
|
|
||||||
"github.com/sirrobot01/decypharr/pkg/debrid/types"
|
|
||||||
"io"
|
"io"
|
||||||
"net/http"
|
"net/http"
|
||||||
gourl "net/url"
|
gourl "net/url"
|
||||||
@@ -20,6 +14,15 @@ import (
|
|||||||
"strings"
|
"strings"
|
||||||
"sync"
|
"sync"
|
||||||
"time"
|
"time"
|
||||||
|
|
||||||
|
"github.com/rs/zerolog"
|
||||||
|
"github.com/sirrobot01/decypharr/internal/config"
|
||||||
|
"github.com/sirrobot01/decypharr/internal/logger"
|
||||||
|
"github.com/sirrobot01/decypharr/internal/request"
|
||||||
|
"github.com/sirrobot01/decypharr/internal/utils"
|
||||||
|
"github.com/sirrobot01/decypharr/pkg/debrid/types"
|
||||||
|
|
||||||
|
"github.com/sirrobot01/decypharr/pkg/rar"
|
||||||
)
|
)
|
||||||
|
|
||||||
type RealDebrid struct {
|
type RealDebrid struct {
|
||||||
@@ -35,10 +38,13 @@ type RealDebrid struct {
|
|||||||
client *request.Client
|
client *request.Client
|
||||||
downloadClient *request.Client
|
downloadClient *request.Client
|
||||||
|
|
||||||
MountPath string
|
MountPath string
|
||||||
logger zerolog.Logger
|
logger zerolog.Logger
|
||||||
checkCached bool
|
UnpackRar bool
|
||||||
addSamples bool
|
|
||||||
|
rarSemaphore chan struct{}
|
||||||
|
checkCached bool
|
||||||
|
addSamples bool
|
||||||
}
|
}
|
||||||
|
|
||||||
func New(dc config.Debrid) *RealDebrid {
|
func New(dc config.Debrid) *RealDebrid {
|
||||||
@@ -70,6 +76,7 @@ func New(dc config.Debrid) *RealDebrid {
|
|||||||
APIKey: dc.APIKey,
|
APIKey: dc.APIKey,
|
||||||
accounts: accounts,
|
accounts: accounts,
|
||||||
DownloadUncached: dc.DownloadUncached,
|
DownloadUncached: dc.DownloadUncached,
|
||||||
|
UnpackRar: dc.UnpackRar,
|
||||||
client: request.New(
|
client: request.New(
|
||||||
request.WithHeaders(headers),
|
request.WithHeaders(headers),
|
||||||
request.WithRateLimiter(rl),
|
request.WithRateLimiter(rl),
|
||||||
@@ -88,6 +95,7 @@ func New(dc config.Debrid) *RealDebrid {
|
|||||||
currentDownloadKey: currentDownloadKey,
|
currentDownloadKey: currentDownloadKey,
|
||||||
MountPath: dc.Folder,
|
MountPath: dc.Folder,
|
||||||
logger: logger.New(dc.Name),
|
logger: logger.New(dc.Name),
|
||||||
|
rarSemaphore: make(chan struct{}, 2),
|
||||||
checkCached: dc.CheckCached,
|
checkCached: dc.CheckCached,
|
||||||
addSamples: dc.AddSamples,
|
addSamples: dc.AddSamples,
|
||||||
}
|
}
|
||||||
@@ -101,30 +109,127 @@ func (r *RealDebrid) GetLogger() zerolog.Logger {
|
|||||||
return r.logger
|
return r.logger
|
||||||
}
|
}
|
||||||
|
|
||||||
func getSelectedFiles(t *types.Torrent, data torrentInfo) map[string]types.File {
|
func (r *RealDebrid) getSelectedFiles(t *types.Torrent, data torrentInfo) (map[string]types.File, error) {
|
||||||
|
files := make(map[string]types.File)
|
||||||
selectedFiles := make([]types.File, 0)
|
selectedFiles := make([]types.File, 0)
|
||||||
|
|
||||||
for _, f := range data.Files {
|
for _, f := range data.Files {
|
||||||
if f.Selected == 1 {
|
if f.Selected == 1 {
|
||||||
name := filepath.Base(f.Path)
|
selectedFiles = append(selectedFiles, types.File{
|
||||||
file := types.File{
|
|
||||||
TorrentId: t.Id,
|
TorrentId: t.Id,
|
||||||
Name: name,
|
Name: filepath.Base(f.Path),
|
||||||
Path: name,
|
Path: filepath.Base(f.Path),
|
||||||
Size: f.Bytes,
|
Size: f.Bytes,
|
||||||
Id: strconv.Itoa(f.ID),
|
Id: strconv.Itoa(f.ID),
|
||||||
}
|
})
|
||||||
selectedFiles = append(selectedFiles, file)
|
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
if len(selectedFiles) == 0 {
|
||||||
|
return files, nil
|
||||||
|
}
|
||||||
|
|
||||||
|
// Handle RARed torrents (single link, multiple files)
|
||||||
|
if len(data.Links) == 1 && len(selectedFiles) > 1 {
|
||||||
|
return r.handleRarArchive(t, data, selectedFiles)
|
||||||
|
}
|
||||||
|
|
||||||
|
// Standard case - map files to links
|
||||||
|
if len(selectedFiles) > len(data.Links) {
|
||||||
|
r.logger.Warn().Msgf("More files than links available: %d files, %d links for %s", len(selectedFiles), len(data.Links), t.Name)
|
||||||
|
}
|
||||||
|
|
||||||
|
for i, f := range selectedFiles {
|
||||||
|
if i < len(data.Links) {
|
||||||
|
f.Link = data.Links[i]
|
||||||
|
files[f.Name] = f
|
||||||
|
} else {
|
||||||
|
r.logger.Warn().Str("file", f.Name).Msg("No link available for file")
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
return files, nil
|
||||||
|
}
|
||||||
|
|
||||||
|
// handleRarArchive processes RAR archives with multiple files
|
||||||
|
func (r *RealDebrid) handleRarArchive(t *types.Torrent, data torrentInfo, selectedFiles []types.File) (map[string]types.File, error) {
|
||||||
|
// This will block if 2 RAR operations are already in progress
|
||||||
|
r.rarSemaphore <- struct{}{}
|
||||||
|
defer func() {
|
||||||
|
<-r.rarSemaphore
|
||||||
|
}()
|
||||||
|
|
||||||
files := make(map[string]types.File)
|
files := make(map[string]types.File)
|
||||||
for index, f := range selectedFiles {
|
|
||||||
if index >= len(data.Links) {
|
if !r.UnpackRar {
|
||||||
break
|
r.logger.Debug().Msgf("RAR file detected, but unpacking is disabled: %s", t.Name)
|
||||||
|
// Create a single file representing the RAR archive
|
||||||
|
file := types.File{
|
||||||
|
TorrentId: t.Id,
|
||||||
|
Id: "0",
|
||||||
|
Name: t.Name + ".rar",
|
||||||
|
Size: 0,
|
||||||
|
IsRar: true,
|
||||||
|
ByteRange: nil,
|
||||||
|
Path: t.Name + ".rar",
|
||||||
|
Link: data.Links[0],
|
||||||
|
AccountId: selectedFiles[0].AccountId,
|
||||||
|
Generated: time.Now(),
|
||||||
}
|
}
|
||||||
f.Link = data.Links[index]
|
files[file.Name] = file
|
||||||
files[f.Name] = f
|
return files, nil
|
||||||
}
|
}
|
||||||
return files
|
|
||||||
|
r.logger.Info().Msgf("RAR file detected, unpacking: %s", t.Name)
|
||||||
|
linkFile := &types.File{TorrentId: t.Id, Link: data.Links[0]}
|
||||||
|
downloadLinkObj, err := r.GetDownloadLink(t, linkFile)
|
||||||
|
|
||||||
|
if err != nil {
|
||||||
|
return nil, fmt.Errorf("failed to get download link for RAR file: %w", err)
|
||||||
|
}
|
||||||
|
|
||||||
|
dlLink := downloadLinkObj.DownloadLink
|
||||||
|
reader, err := rar.NewReader(dlLink)
|
||||||
|
|
||||||
|
if err != nil {
|
||||||
|
return nil, fmt.Errorf("failed to create RAR reader: %w", err)
|
||||||
|
}
|
||||||
|
|
||||||
|
rarFiles, err := reader.GetFiles()
|
||||||
|
|
||||||
|
if err != nil {
|
||||||
|
return nil, fmt.Errorf("failed to read RAR files: %w", err)
|
||||||
|
}
|
||||||
|
|
||||||
|
// Create lookup map for faster matching
|
||||||
|
fileMap := make(map[string]*types.File)
|
||||||
|
for i := range selectedFiles {
|
||||||
|
// RD converts special chars to '_' for RAR file paths
|
||||||
|
// TOOD: there might be more special chars to replace
|
||||||
|
safeName := strings.NewReplacer("|", "_", "\"", "_", "\\", "_", "?", "_", "*", "_", ":", "_", "<", "_", ">", "_").Replace(selectedFiles[i].Name)
|
||||||
|
fileMap[safeName] = &selectedFiles[i]
|
||||||
|
}
|
||||||
|
|
||||||
|
for _, rarFile := range rarFiles {
|
||||||
|
if file, exists := fileMap[rarFile.Name()]; exists {
|
||||||
|
file.IsRar = true
|
||||||
|
file.ByteRange = rarFile.ByteRange()
|
||||||
|
file.Link = data.Links[0]
|
||||||
|
file.DownloadLink = &types.DownloadLink{
|
||||||
|
Link: data.Links[0],
|
||||||
|
DownloadLink: dlLink,
|
||||||
|
Filename: file.Name,
|
||||||
|
Size: file.Size,
|
||||||
|
Generated: time.Now(),
|
||||||
|
}
|
||||||
|
|
||||||
|
files[file.Name] = *file
|
||||||
|
} else if !rarFile.IsDirectory {
|
||||||
|
r.logger.Warn().Msgf("RAR file %s not found in torrent files", rarFile.Name())
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
return files, nil
|
||||||
}
|
}
|
||||||
|
|
||||||
// getTorrentFiles returns a list of torrent files from the torrent info
|
// getTorrentFiles returns a list of torrent files from the torrent info
|
||||||
@@ -338,7 +443,8 @@ func (r *RealDebrid) UpdateTorrent(t *types.Torrent) error {
|
|||||||
t.MountPath = r.MountPath
|
t.MountPath = r.MountPath
|
||||||
t.Debrid = r.Name
|
t.Debrid = r.Name
|
||||||
t.Added = data.Added
|
t.Added = data.Added
|
||||||
t.Files = getSelectedFiles(t, data) // Get selected files
|
t.Files, _ = r.getSelectedFiles(t, data) // Get selected files
|
||||||
|
|
||||||
return nil
|
return nil
|
||||||
}
|
}
|
||||||
|
|
||||||
@@ -390,7 +496,11 @@ func (r *RealDebrid) CheckStatus(t *types.Torrent, isSymlink bool) (*types.Torre
|
|||||||
return t, fmt.Errorf("realdebrid API error: Status: %d", res.StatusCode)
|
return t, fmt.Errorf("realdebrid API error: Status: %d", res.StatusCode)
|
||||||
}
|
}
|
||||||
} else if status == "downloaded" {
|
} else if status == "downloaded" {
|
||||||
t.Files = getSelectedFiles(t, data) // Get selected files
|
t.Files, err = r.getSelectedFiles(t, data) // Get selected files
|
||||||
|
if err != nil {
|
||||||
|
return t, err
|
||||||
|
}
|
||||||
|
|
||||||
r.logger.Info().Msgf("Torrent: %s downloaded to RD", t.Name)
|
r.logger.Info().Msgf("Torrent: %s downloaded to RD", t.Name)
|
||||||
if !isSymlink {
|
if !isSymlink {
|
||||||
err = r.GenerateDownloadLinks(t)
|
err = r.GenerateDownloadLinks(t)
|
||||||
|
|||||||
@@ -2,13 +2,14 @@ package types
|
|||||||
|
|
||||||
import (
|
import (
|
||||||
"fmt"
|
"fmt"
|
||||||
"github.com/sirrobot01/decypharr/internal/logger"
|
|
||||||
"github.com/sirrobot01/decypharr/internal/utils"
|
|
||||||
"github.com/sirrobot01/decypharr/pkg/arr"
|
|
||||||
"os"
|
"os"
|
||||||
"path/filepath"
|
"path/filepath"
|
||||||
"sync"
|
"sync"
|
||||||
"time"
|
"time"
|
||||||
|
|
||||||
|
"github.com/sirrobot01/decypharr/internal/logger"
|
||||||
|
"github.com/sirrobot01/decypharr/internal/utils"
|
||||||
|
"github.com/sirrobot01/decypharr/pkg/arr"
|
||||||
)
|
)
|
||||||
|
|
||||||
type Torrent struct {
|
type Torrent struct {
|
||||||
@@ -99,6 +100,8 @@ type File struct {
|
|||||||
Id string `json:"id"`
|
Id string `json:"id"`
|
||||||
Name string `json:"name"`
|
Name string `json:"name"`
|
||||||
Size int64 `json:"size"`
|
Size int64 `json:"size"`
|
||||||
|
IsRar bool `json:"is_rar"`
|
||||||
|
ByteRange *[2]int64 `json:"byte_range,omitempty"`
|
||||||
Path string `json:"path"`
|
Path string `json:"path"`
|
||||||
Link string `json:"link"`
|
Link string `json:"link"`
|
||||||
DownloadLink *DownloadLink `json:"-"`
|
DownloadLink *DownloadLink `json:"-"`
|
||||||
|
|||||||
@@ -2,22 +2,30 @@ package qbit
|
|||||||
|
|
||||||
import (
|
import (
|
||||||
"fmt"
|
"fmt"
|
||||||
"github.com/cavaliergopher/grab/v3"
|
|
||||||
"github.com/sirrobot01/decypharr/internal/utils"
|
|
||||||
debridTypes "github.com/sirrobot01/decypharr/pkg/debrid/types"
|
|
||||||
"io"
|
"io"
|
||||||
"net/http"
|
"net/http"
|
||||||
"os"
|
"os"
|
||||||
"path/filepath"
|
"path/filepath"
|
||||||
"sync"
|
"sync"
|
||||||
"time"
|
"time"
|
||||||
|
|
||||||
|
"github.com/cavaliergopher/grab/v3"
|
||||||
|
"github.com/sirrobot01/decypharr/internal/utils"
|
||||||
|
debrid "github.com/sirrobot01/decypharr/pkg/debrid/types"
|
||||||
)
|
)
|
||||||
|
|
||||||
func Download(client *grab.Client, url, filename string, progressCallback func(int64, int64)) error {
|
func Download(client *grab.Client, url, filename string, byterange *[2]int64, progressCallback func(int64, int64)) error {
|
||||||
req, err := grab.NewRequest(filename, url)
|
req, err := grab.NewRequest(filename, url)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return err
|
return err
|
||||||
}
|
}
|
||||||
|
|
||||||
|
// Set byte range if specified
|
||||||
|
if byterange != nil {
|
||||||
|
byterangeStr := fmt.Sprintf("%d-%d", byterange[0], byterange[1])
|
||||||
|
req.HTTPRequest.Header.Set("Range", "bytes="+byterangeStr)
|
||||||
|
}
|
||||||
|
|
||||||
resp := client.Do(req)
|
resp := client.Do(req)
|
||||||
|
|
||||||
t := time.NewTicker(time.Second * 2)
|
t := time.NewTicker(time.Second * 2)
|
||||||
@@ -107,7 +115,7 @@ func (q *QBit) downloadFiles(torrent *Torrent, parent string) {
|
|||||||
}
|
}
|
||||||
wg.Add(1)
|
wg.Add(1)
|
||||||
q.downloadSemaphore <- struct{}{}
|
q.downloadSemaphore <- struct{}{}
|
||||||
go func(file debridTypes.File) {
|
go func(file debrid.File) {
|
||||||
defer wg.Done()
|
defer wg.Done()
|
||||||
defer func() { <-q.downloadSemaphore }()
|
defer func() { <-q.downloadSemaphore }()
|
||||||
filename := file.Name
|
filename := file.Name
|
||||||
@@ -116,6 +124,7 @@ func (q *QBit) downloadFiles(torrent *Torrent, parent string) {
|
|||||||
client,
|
client,
|
||||||
file.DownloadLink.DownloadLink,
|
file.DownloadLink.DownloadLink,
|
||||||
filepath.Join(parent, filename),
|
filepath.Join(parent, filename),
|
||||||
|
file.ByteRange,
|
||||||
progressCallback,
|
progressCallback,
|
||||||
)
|
)
|
||||||
|
|
||||||
@@ -235,7 +244,7 @@ func (q *QBit) ProcessSymlink(torrent *Torrent) (string, error) {
|
|||||||
return torrentSymlinkPath, nil
|
return torrentSymlinkPath, nil
|
||||||
}
|
}
|
||||||
|
|
||||||
func (q *QBit) createSymlinksWebdav(debridTorrent *debridTypes.Torrent, rclonePath, torrentFolder string) (string, error) {
|
func (q *QBit) createSymlinksWebdav(debridTorrent *debrid.Torrent, rclonePath, torrentFolder string) (string, error) {
|
||||||
files := debridTorrent.Files
|
files := debridTorrent.Files
|
||||||
symlinkPath := filepath.Join(q.DownloadFolder, debridTorrent.Arr.Name, torrentFolder) // /mnt/symlinks/{category}/MyTVShow/
|
symlinkPath := filepath.Join(q.DownloadFolder, debridTorrent.Arr.Name, torrentFolder) // /mnt/symlinks/{category}/MyTVShow/
|
||||||
err := os.MkdirAll(symlinkPath, os.ModePerm)
|
err := os.MkdirAll(symlinkPath, os.ModePerm)
|
||||||
@@ -243,7 +252,7 @@ func (q *QBit) createSymlinksWebdav(debridTorrent *debridTypes.Torrent, rclonePa
|
|||||||
return "", fmt.Errorf("failed to create directory: %s: %v", symlinkPath, err)
|
return "", fmt.Errorf("failed to create directory: %s: %v", symlinkPath, err)
|
||||||
}
|
}
|
||||||
|
|
||||||
remainingFiles := make(map[string]debridTypes.File)
|
remainingFiles := make(map[string]debrid.File)
|
||||||
for _, file := range files {
|
for _, file := range files {
|
||||||
remainingFiles[file.Name] = file
|
remainingFiles[file.Name] = file
|
||||||
}
|
}
|
||||||
@@ -300,7 +309,7 @@ func (q *QBit) createSymlinksWebdav(debridTorrent *debridTypes.Torrent, rclonePa
|
|||||||
return symlinkPath, nil
|
return symlinkPath, nil
|
||||||
}
|
}
|
||||||
|
|
||||||
func (q *QBit) createSymlinks(debridTorrent *debridTypes.Torrent, rclonePath, torrentFolder string) (string, error) {
|
func (q *QBit) createSymlinks(debridTorrent *debrid.Torrent, rclonePath, torrentFolder string) (string, error) {
|
||||||
files := debridTorrent.Files
|
files := debridTorrent.Files
|
||||||
symlinkPath := filepath.Join(q.DownloadFolder, debridTorrent.Arr.Name, torrentFolder) // /mnt/symlinks/{category}/MyTVShow/
|
symlinkPath := filepath.Join(q.DownloadFolder, debridTorrent.Arr.Name, torrentFolder) // /mnt/symlinks/{category}/MyTVShow/
|
||||||
err := os.MkdirAll(symlinkPath, os.ModePerm)
|
err := os.MkdirAll(symlinkPath, os.ModePerm)
|
||||||
@@ -308,7 +317,7 @@ func (q *QBit) createSymlinks(debridTorrent *debridTypes.Torrent, rclonePath, to
|
|||||||
return "", fmt.Errorf("failed to create directory: %s: %v", symlinkPath, err)
|
return "", fmt.Errorf("failed to create directory: %s: %v", symlinkPath, err)
|
||||||
}
|
}
|
||||||
|
|
||||||
remainingFiles := make(map[string]debridTypes.File)
|
remainingFiles := make(map[string]debrid.File)
|
||||||
for _, file := range files {
|
for _, file := range files {
|
||||||
remainingFiles[file.Path] = file
|
remainingFiles[file.Path] = file
|
||||||
}
|
}
|
||||||
@@ -364,7 +373,7 @@ func (q *QBit) createSymlinks(debridTorrent *debridTypes.Torrent, rclonePath, to
|
|||||||
return symlinkPath, nil
|
return symlinkPath, nil
|
||||||
}
|
}
|
||||||
|
|
||||||
func (q *QBit) getTorrentPath(rclonePath string, debridTorrent *debridTypes.Torrent) (string, error) {
|
func (q *QBit) getTorrentPath(rclonePath string, debridTorrent *debrid.Torrent) (string, error) {
|
||||||
for {
|
for {
|
||||||
torrentPath, err := debridTorrent.GetMountFolder(rclonePath)
|
torrentPath, err := debridTorrent.GetMountFolder(rclonePath)
|
||||||
if err == nil {
|
if err == nil {
|
||||||
|
|||||||
701
pkg/rar/rarar.go
Normal file
701
pkg/rar/rarar.go
Normal file
@@ -0,0 +1,701 @@
|
|||||||
|
// Source: https://github.com/eliasbenb/RARAR.py
|
||||||
|
// Note that this code only translates the original Python for RAR3 (not RAR5) support.
|
||||||
|
|
||||||
|
package rar
|
||||||
|
|
||||||
|
import (
|
||||||
|
"bytes"
|
||||||
|
"encoding/binary"
|
||||||
|
"errors"
|
||||||
|
"fmt"
|
||||||
|
"io"
|
||||||
|
"math/rand"
|
||||||
|
"net/http"
|
||||||
|
"strings"
|
||||||
|
"time"
|
||||||
|
"unicode/utf8"
|
||||||
|
)
|
||||||
|
|
||||||
|
// Constants from the Python code
|
||||||
|
var (
|
||||||
|
// Chunk sizes
|
||||||
|
DefaultChunkSize = 4096
|
||||||
|
HttpChunkSize = 32768
|
||||||
|
MaxSearchSize = 1 << 20 // 1MB
|
||||||
|
|
||||||
|
// RAR marker and block types
|
||||||
|
Rar3Marker = []byte{0x52, 0x61, 0x72, 0x21, 0x1A, 0x07, 0x00}
|
||||||
|
BlockFile = byte(0x74)
|
||||||
|
BlockHeader = byte(0x73)
|
||||||
|
BlockMarker = byte(0x72)
|
||||||
|
BlockEnd = byte(0x7B)
|
||||||
|
|
||||||
|
// Header flags
|
||||||
|
FlagDirectory = 0xE0
|
||||||
|
FlagHasHighSize = 0x100
|
||||||
|
FlagHasUnicodeName = 0x200
|
||||||
|
FlagHasData = 0x8000
|
||||||
|
)
|
||||||
|
|
||||||
|
// Compression methods
|
||||||
|
var CompressionMethods = map[byte]string{
|
||||||
|
0x30: "Store",
|
||||||
|
0x31: "Fastest",
|
||||||
|
0x32: "Fast",
|
||||||
|
0x33: "Normal",
|
||||||
|
0x34: "Good",
|
||||||
|
0x35: "Best",
|
||||||
|
}
|
||||||
|
|
||||||
|
// Error definitions
|
||||||
|
var (
|
||||||
|
ErrMarkerNotFound = errors.New("RAR marker not found within search limit")
|
||||||
|
ErrInvalidFormat = errors.New("invalid RAR format")
|
||||||
|
ErrNetworkError = errors.New("network error")
|
||||||
|
ErrRangeRequestsNotSupported = errors.New("server does not support range requests")
|
||||||
|
ErrCompressionNotSupported = errors.New("compression method not supported")
|
||||||
|
ErrDirectoryExtractNotSupported = errors.New("directory extract not supported")
|
||||||
|
)
|
||||||
|
|
||||||
|
// Name returns the base filename of the file
|
||||||
|
func (f *File) Name() string {
|
||||||
|
if i := strings.LastIndexAny(f.Path, "\\/"); i >= 0 {
|
||||||
|
return f.Path[i+1:]
|
||||||
|
}
|
||||||
|
return f.Path
|
||||||
|
}
|
||||||
|
|
||||||
|
func (f *File) ByteRange() *[2]int64 {
|
||||||
|
return &[2]int64{f.DataOffset, f.DataOffset + f.CompressedSize - 1}
|
||||||
|
}
|
||||||
|
|
||||||
|
func NewHttpFile(url string) (*HttpFile, error) {
|
||||||
|
client := &http.Client{}
|
||||||
|
file := &HttpFile{
|
||||||
|
URL: url,
|
||||||
|
Position: 0,
|
||||||
|
Client: client,
|
||||||
|
MaxRetries: 3,
|
||||||
|
RetryDelay: time.Second,
|
||||||
|
}
|
||||||
|
|
||||||
|
// Get file size
|
||||||
|
size, err := file.getFileSize()
|
||||||
|
if err != nil {
|
||||||
|
return nil, fmt.Errorf("failed to get file size: %w", err)
|
||||||
|
}
|
||||||
|
file.FileSize = size
|
||||||
|
|
||||||
|
return file, nil
|
||||||
|
}
|
||||||
|
|
||||||
|
func (f *HttpFile) doWithRetry(operation func() (interface{}, error)) (interface{}, error) {
|
||||||
|
var lastErr error
|
||||||
|
for attempt := 0; attempt <= f.MaxRetries; attempt++ {
|
||||||
|
if attempt > 0 {
|
||||||
|
// Jitter + exponential backoff delay
|
||||||
|
delay := f.RetryDelay * time.Duration(1<<uint(attempt-1))
|
||||||
|
jitter := time.Duration(rand.Int63n(int64(delay / 4)))
|
||||||
|
time.Sleep(delay + jitter)
|
||||||
|
}
|
||||||
|
|
||||||
|
result, err := operation()
|
||||||
|
if err == nil {
|
||||||
|
return result, nil
|
||||||
|
}
|
||||||
|
|
||||||
|
lastErr = err
|
||||||
|
// Only retry on network errors
|
||||||
|
if !errors.Is(err, ErrNetworkError) {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
return nil, fmt.Errorf("after %d retries: %w", f.MaxRetries, lastErr)
|
||||||
|
}
|
||||||
|
|
||||||
|
// getFileSize gets the total file size from the server
|
||||||
|
func (f *HttpFile) getFileSize() (int64, error) {
|
||||||
|
result, err := f.doWithRetry(func() (interface{}, error) {
|
||||||
|
resp, err := f.Client.Head(f.URL)
|
||||||
|
if err != nil {
|
||||||
|
return int64(0), fmt.Errorf("%w: %v", ErrNetworkError, err)
|
||||||
|
}
|
||||||
|
defer resp.Body.Close()
|
||||||
|
|
||||||
|
if resp.StatusCode != http.StatusOK {
|
||||||
|
return int64(0), fmt.Errorf("%w: unexpected status code: %d", ErrNetworkError, resp.StatusCode)
|
||||||
|
}
|
||||||
|
|
||||||
|
contentLength := resp.Header.Get("Content-Length")
|
||||||
|
if contentLength == "" {
|
||||||
|
return int64(0), fmt.Errorf("%w: content length not provided", ErrNetworkError)
|
||||||
|
}
|
||||||
|
|
||||||
|
var size int64
|
||||||
|
_, err = fmt.Sscanf(contentLength, "%d", &size)
|
||||||
|
if err != nil {
|
||||||
|
return int64(0), fmt.Errorf("%w: %v", ErrNetworkError, err)
|
||||||
|
}
|
||||||
|
|
||||||
|
return size, nil
|
||||||
|
})
|
||||||
|
|
||||||
|
if err != nil {
|
||||||
|
return 0, err
|
||||||
|
}
|
||||||
|
|
||||||
|
return result.(int64), nil
|
||||||
|
}
|
||||||
|
|
||||||
|
// ReadAt implements the io.ReaderAt interface
|
||||||
|
func (f *HttpFile) ReadAt(p []byte, off int64) (n int, err error) {
|
||||||
|
if len(p) == 0 {
|
||||||
|
return 0, nil
|
||||||
|
}
|
||||||
|
|
||||||
|
// Ensure we don't read past the end of the file
|
||||||
|
size := int64(len(p))
|
||||||
|
if f.FileSize > 0 {
|
||||||
|
remaining := f.FileSize - off
|
||||||
|
if remaining <= 0 {
|
||||||
|
return 0, io.EOF
|
||||||
|
}
|
||||||
|
if size > remaining {
|
||||||
|
size = remaining
|
||||||
|
p = p[:size]
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
result, err := f.doWithRetry(func() (interface{}, error) {
|
||||||
|
// Create HTTP request with Range header
|
||||||
|
req, err := http.NewRequest("GET", f.URL, nil)
|
||||||
|
if err != nil {
|
||||||
|
return 0, fmt.Errorf("%w: %v", ErrNetworkError, err)
|
||||||
|
}
|
||||||
|
|
||||||
|
end := off + size - 1
|
||||||
|
req.Header.Set("Range", fmt.Sprintf("bytes=%d-%d", off, end))
|
||||||
|
|
||||||
|
// Make the request
|
||||||
|
resp, err := f.Client.Do(req)
|
||||||
|
if err != nil {
|
||||||
|
return 0, fmt.Errorf("%w: %v", ErrNetworkError, err)
|
||||||
|
}
|
||||||
|
defer resp.Body.Close()
|
||||||
|
|
||||||
|
// Handle response
|
||||||
|
switch resp.StatusCode {
|
||||||
|
case http.StatusPartialContent:
|
||||||
|
// Read the content
|
||||||
|
bytesRead, err := io.ReadFull(resp.Body, p)
|
||||||
|
return bytesRead, err
|
||||||
|
case http.StatusOK:
|
||||||
|
// Some servers return the full content instead of partial
|
||||||
|
fullData, err := io.ReadAll(resp.Body)
|
||||||
|
if err != nil {
|
||||||
|
return 0, fmt.Errorf("%w: %v", ErrNetworkError, err)
|
||||||
|
}
|
||||||
|
|
||||||
|
if int64(len(fullData)) <= off {
|
||||||
|
return 0, io.EOF
|
||||||
|
}
|
||||||
|
|
||||||
|
end = off + size
|
||||||
|
if int64(len(fullData)) < end {
|
||||||
|
end = int64(len(fullData))
|
||||||
|
}
|
||||||
|
|
||||||
|
copy(p, fullData[off:end])
|
||||||
|
return int(end - off), nil
|
||||||
|
case http.StatusRequestedRangeNotSatisfiable:
|
||||||
|
// We're at EOF
|
||||||
|
return 0, io.EOF
|
||||||
|
default:
|
||||||
|
return 0, fmt.Errorf("%w: unexpected status code: %d", ErrNetworkError, resp.StatusCode)
|
||||||
|
}
|
||||||
|
})
|
||||||
|
|
||||||
|
if err != nil {
|
||||||
|
return 0, err
|
||||||
|
}
|
||||||
|
|
||||||
|
return result.(int), nil
|
||||||
|
}
|
||||||
|
|
||||||
|
// NewReader creates a new RAR3 reader
|
||||||
|
func NewReader(url string) (*Reader, error) {
|
||||||
|
file, err := NewHttpFile(url)
|
||||||
|
if err != nil {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
|
||||||
|
reader := &Reader{
|
||||||
|
File: file,
|
||||||
|
ChunkSize: HttpChunkSize,
|
||||||
|
Files: make([]*File, 0),
|
||||||
|
}
|
||||||
|
|
||||||
|
// Find RAR marker
|
||||||
|
marker, err := reader.findMarker()
|
||||||
|
if err != nil {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
reader.Marker = marker
|
||||||
|
pos := reader.Marker + int64(len(Rar3Marker)) // Skip marker block
|
||||||
|
|
||||||
|
headerData, err := reader.readBytes(pos, 7)
|
||||||
|
if err != nil {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
|
||||||
|
if len(headerData) < 7 {
|
||||||
|
return nil, ErrInvalidFormat
|
||||||
|
}
|
||||||
|
|
||||||
|
headType := headerData[2]
|
||||||
|
headSize := int(binary.LittleEndian.Uint16(headerData[5:7]))
|
||||||
|
|
||||||
|
if headType != BlockHeader {
|
||||||
|
return nil, ErrInvalidFormat
|
||||||
|
}
|
||||||
|
|
||||||
|
// Store the position after the archive header
|
||||||
|
reader.HeaderEndPos = pos + int64(headSize)
|
||||||
|
|
||||||
|
return reader, nil
|
||||||
|
}
|
||||||
|
|
||||||
|
// readBytes reads a range of bytes from the file
|
||||||
|
func (r *Reader) readBytes(start int64, length int) ([]byte, error) {
|
||||||
|
if length <= 0 {
|
||||||
|
return []byte{}, nil
|
||||||
|
}
|
||||||
|
|
||||||
|
data := make([]byte, length)
|
||||||
|
n, err := r.File.ReadAt(data, start)
|
||||||
|
if err != nil && err != io.EOF {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
|
||||||
|
if n < length {
|
||||||
|
// Partial read, return what we got
|
||||||
|
return data[:n], nil
|
||||||
|
}
|
||||||
|
|
||||||
|
return data, nil
|
||||||
|
}
|
||||||
|
|
||||||
|
// findMarker finds the RAR marker in the file
|
||||||
|
func (r *Reader) findMarker() (int64, error) {
|
||||||
|
// First try to find marker in the first chunk
|
||||||
|
firstChunkSize := 8192 // 8KB
|
||||||
|
chunk, err := r.readBytes(0, firstChunkSize)
|
||||||
|
if err != nil {
|
||||||
|
return 0, err
|
||||||
|
}
|
||||||
|
|
||||||
|
markerPos := bytes.Index(chunk, Rar3Marker)
|
||||||
|
if markerPos != -1 {
|
||||||
|
return int64(markerPos), nil
|
||||||
|
}
|
||||||
|
|
||||||
|
// If not found, continue searching
|
||||||
|
position := int64(firstChunkSize - len(Rar3Marker) + 1)
|
||||||
|
maxSearch := int64(MaxSearchSize)
|
||||||
|
|
||||||
|
for position < maxSearch {
|
||||||
|
chunkSize := min(r.ChunkSize, int(maxSearch-position))
|
||||||
|
chunk, err := r.readBytes(position, chunkSize)
|
||||||
|
if err != nil || len(chunk) == 0 {
|
||||||
|
break
|
||||||
|
}
|
||||||
|
|
||||||
|
markerPos := bytes.Index(chunk, Rar3Marker)
|
||||||
|
if markerPos != -1 {
|
||||||
|
return position + int64(markerPos), nil
|
||||||
|
}
|
||||||
|
|
||||||
|
// Move forward by chunk size minus the marker length
|
||||||
|
position += int64(max(1, len(chunk)-len(Rar3Marker)+1))
|
||||||
|
}
|
||||||
|
|
||||||
|
return 0, ErrMarkerNotFound
|
||||||
|
}
|
||||||
|
|
||||||
|
// decodeUnicode decodes RAR3 Unicode encoding
|
||||||
|
func decodeUnicode(asciiStr string, unicodeData []byte) string {
|
||||||
|
if len(unicodeData) == 0 {
|
||||||
|
return asciiStr
|
||||||
|
}
|
||||||
|
|
||||||
|
result := []rune{}
|
||||||
|
asciiPos := 0
|
||||||
|
dataPos := 0
|
||||||
|
highByte := byte(0)
|
||||||
|
|
||||||
|
for dataPos < len(unicodeData) {
|
||||||
|
flags := unicodeData[dataPos]
|
||||||
|
dataPos++
|
||||||
|
|
||||||
|
// Determine the number of character positions this flag byte controls
|
||||||
|
var flagBits uint
|
||||||
|
var flagCount int
|
||||||
|
var bitCount int
|
||||||
|
|
||||||
|
if flags&0x80 != 0 {
|
||||||
|
// Extended flag - controls up to 32 characters (16 bit pairs)
|
||||||
|
flagBits = uint(flags)
|
||||||
|
bitCount = 1
|
||||||
|
for (flagBits&(0x80>>bitCount) != 0) && dataPos < len(unicodeData) {
|
||||||
|
flagBits = ((flagBits & ((0x80 >> bitCount) - 1)) << 8) | uint(unicodeData[dataPos])
|
||||||
|
dataPos++
|
||||||
|
bitCount++
|
||||||
|
}
|
||||||
|
flagCount = bitCount * 4
|
||||||
|
} else {
|
||||||
|
// Simple flag - controls 4 characters (4 bit pairs)
|
||||||
|
flagBits = uint(flags)
|
||||||
|
flagCount = 4
|
||||||
|
}
|
||||||
|
|
||||||
|
// Process each 2-bit flag
|
||||||
|
for i := 0; i < flagCount; i++ {
|
||||||
|
if asciiPos >= len(asciiStr) && dataPos >= len(unicodeData) {
|
||||||
|
break
|
||||||
|
}
|
||||||
|
|
||||||
|
flagValue := (flagBits >> (i * 2)) & 0x03
|
||||||
|
|
||||||
|
switch flagValue {
|
||||||
|
case 0:
|
||||||
|
// Use ASCII character
|
||||||
|
if asciiPos < len(asciiStr) {
|
||||||
|
result = append(result, rune(asciiStr[asciiPos]))
|
||||||
|
asciiPos++
|
||||||
|
}
|
||||||
|
case 1:
|
||||||
|
// Unicode character with high byte 0
|
||||||
|
if dataPos < len(unicodeData) {
|
||||||
|
result = append(result, rune(unicodeData[dataPos]))
|
||||||
|
dataPos++
|
||||||
|
}
|
||||||
|
case 2:
|
||||||
|
// Unicode character with current high byte
|
||||||
|
if dataPos < len(unicodeData) {
|
||||||
|
lowByte := uint(unicodeData[dataPos])
|
||||||
|
dataPos++
|
||||||
|
result = append(result, rune(lowByte|(uint(highByte)<<8)))
|
||||||
|
}
|
||||||
|
case 3:
|
||||||
|
// Set new high byte
|
||||||
|
if dataPos < len(unicodeData) {
|
||||||
|
highByte = unicodeData[dataPos]
|
||||||
|
dataPos++
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// Append any remaining ASCII characters
|
||||||
|
for asciiPos < len(asciiStr) {
|
||||||
|
result = append(result, rune(asciiStr[asciiPos]))
|
||||||
|
asciiPos++
|
||||||
|
}
|
||||||
|
|
||||||
|
return string(result)
|
||||||
|
}
|
||||||
|
|
||||||
|
// readFiles reads all file entries in the archive
|
||||||
|
func (r *Reader) readFiles() error {
|
||||||
|
pos := r.Marker
|
||||||
|
pos += int64(len(Rar3Marker)) // Skip marker block
|
||||||
|
|
||||||
|
// Read archive header
|
||||||
|
headerData, err := r.readBytes(pos, 7)
|
||||||
|
if err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
|
||||||
|
if len(headerData) < 7 {
|
||||||
|
return ErrInvalidFormat
|
||||||
|
}
|
||||||
|
|
||||||
|
headType := headerData[2]
|
||||||
|
headSize := int(binary.LittleEndian.Uint16(headerData[5:7]))
|
||||||
|
|
||||||
|
if headType != BlockHeader {
|
||||||
|
return ErrInvalidFormat
|
||||||
|
}
|
||||||
|
|
||||||
|
pos += int64(headSize) // Skip archive header
|
||||||
|
|
||||||
|
// Track whether we've found the end marker
|
||||||
|
foundEndMarker := false
|
||||||
|
|
||||||
|
// Process file entries
|
||||||
|
for !foundEndMarker {
|
||||||
|
headerData, err := r.readBytes(pos, 7)
|
||||||
|
if err != nil {
|
||||||
|
// Don't stop on EOF, might be temporary network error
|
||||||
|
// For definitive errors, return the error
|
||||||
|
if !errors.Is(err, io.EOF) && !errors.Is(err, ErrNetworkError) {
|
||||||
|
return fmt.Errorf("error reading block header: %w", err)
|
||||||
|
}
|
||||||
|
|
||||||
|
// If we get EOF or network error, retry a few times
|
||||||
|
retryCount := 0
|
||||||
|
maxRetries := 3
|
||||||
|
retryDelay := time.Second
|
||||||
|
|
||||||
|
for retryCount < maxRetries {
|
||||||
|
time.Sleep(retryDelay * time.Duration(1<<uint(retryCount)))
|
||||||
|
retryCount++
|
||||||
|
|
||||||
|
headerData, err = r.readBytes(pos, 7)
|
||||||
|
if err == nil && len(headerData) >= 7 {
|
||||||
|
break // Successfully got data
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
if len(headerData) < 7 {
|
||||||
|
return fmt.Errorf("failed to read block header after retries: %w", err)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
if len(headerData) < 7 {
|
||||||
|
return fmt.Errorf("incomplete block header at position %d", pos)
|
||||||
|
}
|
||||||
|
|
||||||
|
headType := headerData[2]
|
||||||
|
headFlags := int(binary.LittleEndian.Uint16(headerData[3:5]))
|
||||||
|
headSize := int(binary.LittleEndian.Uint16(headerData[5:7]))
|
||||||
|
|
||||||
|
if headType == BlockEnd {
|
||||||
|
// End of archive
|
||||||
|
foundEndMarker = true
|
||||||
|
break
|
||||||
|
}
|
||||||
|
|
||||||
|
if headType == BlockFile {
|
||||||
|
// Get complete header data
|
||||||
|
completeHeader, err := r.readBytes(pos, headSize)
|
||||||
|
if err != nil || len(completeHeader) < headSize {
|
||||||
|
// Retry logic for incomplete headers
|
||||||
|
retryCount := 0
|
||||||
|
maxRetries := 3
|
||||||
|
retryDelay := time.Second
|
||||||
|
|
||||||
|
for retryCount < maxRetries && (err != nil || len(completeHeader) < headSize) {
|
||||||
|
time.Sleep(retryDelay * time.Duration(1<<uint(retryCount)))
|
||||||
|
retryCount++
|
||||||
|
|
||||||
|
completeHeader, err = r.readBytes(pos, headSize)
|
||||||
|
if err == nil && len(completeHeader) >= headSize {
|
||||||
|
break // Successfully got data
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
if len(completeHeader) < headSize {
|
||||||
|
return fmt.Errorf("failed to read complete file header after retries: %w", err)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
fileInfo, err := r.parseFileHeader(completeHeader, pos)
|
||||||
|
if err == nil && fileInfo != nil {
|
||||||
|
r.Files = append(r.Files, fileInfo)
|
||||||
|
pos = fileInfo.NextOffset
|
||||||
|
} else {
|
||||||
|
pos += int64(headSize)
|
||||||
|
}
|
||||||
|
} else {
|
||||||
|
// Skip non-file block
|
||||||
|
pos += int64(headSize)
|
||||||
|
|
||||||
|
// Skip data if present
|
||||||
|
if headFlags&FlagHasData != 0 {
|
||||||
|
// Read data size
|
||||||
|
sizeData, err := r.readBytes(pos-4, 4)
|
||||||
|
if err != nil || len(sizeData) < 4 {
|
||||||
|
// Retry logic for data size read errors
|
||||||
|
retryCount := 0
|
||||||
|
maxRetries := 3
|
||||||
|
retryDelay := time.Second
|
||||||
|
|
||||||
|
for retryCount < maxRetries && (err != nil || len(sizeData) < 4) {
|
||||||
|
time.Sleep(retryDelay * time.Duration(1<<uint(retryCount)))
|
||||||
|
retryCount++
|
||||||
|
|
||||||
|
sizeData, err = r.readBytes(pos-4, 4)
|
||||||
|
if err == nil && len(sizeData) >= 4 {
|
||||||
|
break // Successfully got data
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
if len(sizeData) < 4 {
|
||||||
|
return fmt.Errorf("failed to read data size after retries: %w", err)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
dataSize := int64(binary.LittleEndian.Uint32(sizeData))
|
||||||
|
pos += dataSize
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
if !foundEndMarker {
|
||||||
|
return fmt.Errorf("end marker not found in archive")
|
||||||
|
}
|
||||||
|
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
|
||||||
|
// parseFileHeader parses a file header and returns file info
|
||||||
|
func (r *Reader) parseFileHeader(headerData []byte, position int64) (*File, error) {
|
||||||
|
if len(headerData) < 7 {
|
||||||
|
return nil, fmt.Errorf("header data too short")
|
||||||
|
}
|
||||||
|
|
||||||
|
headType := headerData[2]
|
||||||
|
headFlags := int(binary.LittleEndian.Uint16(headerData[3:5]))
|
||||||
|
headSize := int(binary.LittleEndian.Uint16(headerData[5:7]))
|
||||||
|
|
||||||
|
if headType != BlockFile {
|
||||||
|
return nil, fmt.Errorf("not a file block")
|
||||||
|
}
|
||||||
|
|
||||||
|
// Check if we have enough data
|
||||||
|
if len(headerData) < 32 {
|
||||||
|
return nil, fmt.Errorf("file header too short")
|
||||||
|
}
|
||||||
|
|
||||||
|
// Parse basic file header fields
|
||||||
|
packSize := binary.LittleEndian.Uint32(headerData[7:11])
|
||||||
|
unpackSize := binary.LittleEndian.Uint32(headerData[11:15])
|
||||||
|
// fileOS := headerData[15]
|
||||||
|
fileCRC := binary.LittleEndian.Uint32(headerData[16:20])
|
||||||
|
// fileTime := binary.LittleEndian.Uint32(headerData[20:24])
|
||||||
|
// unpVer := headerData[24]
|
||||||
|
method := headerData[25]
|
||||||
|
nameSize := binary.LittleEndian.Uint16(headerData[26:28])
|
||||||
|
// fileAttr := binary.LittleEndian.Uint32(headerData[28:32])
|
||||||
|
|
||||||
|
// Handle high pack/unp sizes
|
||||||
|
highPackSize := uint32(0)
|
||||||
|
highUnpSize := uint32(0)
|
||||||
|
|
||||||
|
offset := 32 // Start after basic header fields
|
||||||
|
|
||||||
|
if headFlags&FlagHasHighSize != 0 {
|
||||||
|
if offset+8 <= len(headerData) {
|
||||||
|
highPackSize = binary.LittleEndian.Uint32(headerData[offset : offset+4])
|
||||||
|
highUnpSize = binary.LittleEndian.Uint32(headerData[offset+4 : offset+8])
|
||||||
|
}
|
||||||
|
offset += 8
|
||||||
|
}
|
||||||
|
|
||||||
|
// Calculate actual sizes
|
||||||
|
fullPackSize := int64(packSize) + (int64(highPackSize) << 32)
|
||||||
|
fullUnpSize := int64(unpackSize) + (int64(highUnpSize) << 32)
|
||||||
|
|
||||||
|
// Read filename
|
||||||
|
var fileName string
|
||||||
|
if offset+int(nameSize) <= len(headerData) {
|
||||||
|
fileNameBytes := headerData[offset : offset+int(nameSize)]
|
||||||
|
|
||||||
|
if headFlags&FlagHasUnicodeName != 0 {
|
||||||
|
zeroPos := bytes.IndexByte(fileNameBytes, 0)
|
||||||
|
if zeroPos != -1 {
|
||||||
|
// Try UTF-8 first
|
||||||
|
asciiPart := fileNameBytes[:zeroPos]
|
||||||
|
if utf8.Valid(asciiPart) {
|
||||||
|
fileName = string(asciiPart)
|
||||||
|
} else {
|
||||||
|
// Fall back to custom decoder
|
||||||
|
asciiStr := string(asciiPart)
|
||||||
|
unicodePart := fileNameBytes[zeroPos+1:]
|
||||||
|
fileName = decodeUnicode(asciiStr, unicodePart)
|
||||||
|
}
|
||||||
|
} else {
|
||||||
|
// No null byte
|
||||||
|
if utf8.Valid(fileNameBytes) {
|
||||||
|
fileName = string(fileNameBytes)
|
||||||
|
} else {
|
||||||
|
fileName = string(fileNameBytes) // Last resort
|
||||||
|
}
|
||||||
|
}
|
||||||
|
} else {
|
||||||
|
// Non-Unicode filename
|
||||||
|
if utf8.Valid(fileNameBytes) {
|
||||||
|
fileName = string(fileNameBytes)
|
||||||
|
} else {
|
||||||
|
fileName = string(fileNameBytes) // Fallback
|
||||||
|
}
|
||||||
|
}
|
||||||
|
} else {
|
||||||
|
fileName = fmt.Sprintf("UnknownFile%d", len(r.Files))
|
||||||
|
}
|
||||||
|
|
||||||
|
isDirectory := (headFlags & FlagDirectory) == FlagDirectory
|
||||||
|
|
||||||
|
// Calculate data offsets
|
||||||
|
dataOffset := position + int64(headSize)
|
||||||
|
nextOffset := dataOffset
|
||||||
|
|
||||||
|
// Only add data size if it's not a directory and has data
|
||||||
|
if !isDirectory && headFlags&FlagHasData != 0 {
|
||||||
|
nextOffset += fullPackSize
|
||||||
|
}
|
||||||
|
|
||||||
|
return &File{
|
||||||
|
Path: fileName,
|
||||||
|
Size: fullUnpSize,
|
||||||
|
CompressedSize: fullPackSize,
|
||||||
|
Method: method,
|
||||||
|
CRC: fileCRC,
|
||||||
|
IsDirectory: isDirectory,
|
||||||
|
DataOffset: dataOffset,
|
||||||
|
NextOffset: nextOffset,
|
||||||
|
}, nil
|
||||||
|
}
|
||||||
|
|
||||||
|
// GetFiles returns all files in the archive
|
||||||
|
func (r *Reader) GetFiles() ([]*File, error) {
|
||||||
|
if len(r.Files) == 0 {
|
||||||
|
err := r.readFiles()
|
||||||
|
if err != nil {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
return r.Files, nil
|
||||||
|
}
|
||||||
|
|
||||||
|
// ExtractFile extracts a file from the archive
|
||||||
|
func (r *Reader) ExtractFile(file *File) ([]byte, error) {
|
||||||
|
if file.IsDirectory {
|
||||||
|
return nil, ErrDirectoryExtractNotSupported
|
||||||
|
}
|
||||||
|
|
||||||
|
// Only support "Store" method
|
||||||
|
if file.Method != 0x30 { // 0x30 = "Store"
|
||||||
|
return nil, ErrCompressionNotSupported
|
||||||
|
}
|
||||||
|
|
||||||
|
return r.readBytes(file.DataOffset, int(file.CompressedSize))
|
||||||
|
}
|
||||||
|
|
||||||
|
// Helper functions
|
||||||
|
func min(a, b int) int {
|
||||||
|
if a < b {
|
||||||
|
return a
|
||||||
|
}
|
||||||
|
return b
|
||||||
|
}
|
||||||
|
|
||||||
|
func max(a, b int) int {
|
||||||
|
if a > b {
|
||||||
|
return a
|
||||||
|
}
|
||||||
|
return b
|
||||||
|
}
|
||||||
37
pkg/rar/types.go
Normal file
37
pkg/rar/types.go
Normal file
@@ -0,0 +1,37 @@
|
|||||||
|
package rar
|
||||||
|
|
||||||
|
import (
|
||||||
|
"net/http"
|
||||||
|
"time"
|
||||||
|
)
|
||||||
|
|
||||||
|
// File represents a file entry in a RAR archive
|
||||||
|
type File struct {
|
||||||
|
Path string
|
||||||
|
Size int64
|
||||||
|
CompressedSize int64
|
||||||
|
Method byte
|
||||||
|
CRC uint32
|
||||||
|
IsDirectory bool
|
||||||
|
DataOffset int64
|
||||||
|
NextOffset int64
|
||||||
|
}
|
||||||
|
|
||||||
|
// Access point for a RAR archive served through HTTP
|
||||||
|
type HttpFile struct {
|
||||||
|
URL string
|
||||||
|
Position int64
|
||||||
|
Client *http.Client
|
||||||
|
FileSize int64
|
||||||
|
MaxRetries int
|
||||||
|
RetryDelay time.Duration
|
||||||
|
}
|
||||||
|
|
||||||
|
// Reader reads RAR3 format archives
|
||||||
|
type Reader struct {
|
||||||
|
File *HttpFile
|
||||||
|
ChunkSize int
|
||||||
|
Marker int64
|
||||||
|
HeaderEndPos int64 // Position after the archive header
|
||||||
|
Files []*File
|
||||||
|
}
|
||||||
@@ -363,11 +363,18 @@
|
|||||||
<small class="form-text text-muted">Add samples, extras etc when adding torrent to debrid(disabled by default)</small>
|
<small class="form-text text-muted">Add samples, extras etc when adding torrent to debrid(disabled by default)</small>
|
||||||
</div>
|
</div>
|
||||||
<div class="col-md-3">
|
<div class="col-md-3">
|
||||||
|
<div class="form-check me-3">
|
||||||
|
<input type="checkbox" class="form-check-input" name="debrid[${index}].unpack_rar" id="debrid[${index}].unpack_rar">
|
||||||
|
<label class="form-check-label" for="debrid[${index}].unpack_rar">Unpack RAR</label>
|
||||||
|
</div>
|
||||||
|
<small class="form-text text-muted">Preprocess RARed torrents to allow reading the files inside</small>
|
||||||
|
</div>
|
||||||
|
<div class="col-md-4">
|
||||||
<div class="form-check me-3">
|
<div class="form-check me-3">
|
||||||
<input type="checkbox" class="form-check-input useWebdav" name="debrid[${index}].use_webdav" id="debrid[${index}].use_webdav">
|
<input type="checkbox" class="form-check-input useWebdav" name="debrid[${index}].use_webdav" id="debrid[${index}].use_webdav">
|
||||||
<label class="form-check-label" for="debrid[${index}].use_webdav">Enable WebDav Server</label>
|
<label class="form-check-label" for="debrid[${index}].use_webdav">Enable WebDav Server</label>
|
||||||
</div>
|
</div>
|
||||||
<small class="form-text text-muted">Create an internal webdav for this debrid</small>
|
<small class="form-text text-muted">Create an internal webdav for this debrid</small>
|
||||||
</div>
|
</div>
|
||||||
</div>
|
</div>
|
||||||
<div class="webdav d-none">
|
<div class="webdav d-none">
|
||||||
@@ -1092,6 +1099,7 @@
|
|||||||
rate_limit: document.querySelector(`[name="debrid[${i}].rate_limit"]`).value,
|
rate_limit: document.querySelector(`[name="debrid[${i}].rate_limit"]`).value,
|
||||||
download_uncached: document.querySelector(`[name="debrid[${i}].download_uncached"]`).checked,
|
download_uncached: document.querySelector(`[name="debrid[${i}].download_uncached"]`).checked,
|
||||||
check_cached: document.querySelector(`[name="debrid[${i}].check_cached"]`).checked,
|
check_cached: document.querySelector(`[name="debrid[${i}].check_cached"]`).checked,
|
||||||
|
unpack_rar: document.querySelector(`[name="debrid[${i}].unpack_rar"]`).checked,
|
||||||
add_samples: document.querySelector(`[name="debrid[${i}].add_samples"]`).checked,
|
add_samples: document.querySelector(`[name="debrid[${i}].add_samples"]`).checked,
|
||||||
use_webdav: document.querySelector(`[name="debrid[${i}].use_webdav"]`).checked
|
use_webdav: document.querySelector(`[name="debrid[${i}].use_webdav"]`).checked
|
||||||
};
|
};
|
||||||
|
|||||||
@@ -3,12 +3,13 @@ package webdav
|
|||||||
import (
|
import (
|
||||||
"crypto/tls"
|
"crypto/tls"
|
||||||
"fmt"
|
"fmt"
|
||||||
"github.com/sirrobot01/decypharr/pkg/debrid/debrid"
|
|
||||||
"io"
|
"io"
|
||||||
"net/http"
|
"net/http"
|
||||||
"os"
|
"os"
|
||||||
"strings"
|
"strings"
|
||||||
"time"
|
"time"
|
||||||
|
|
||||||
|
"github.com/sirrobot01/decypharr/pkg/debrid/debrid"
|
||||||
)
|
)
|
||||||
|
|
||||||
var sharedClient = &http.Client{
|
var sharedClient = &http.Client{
|
||||||
@@ -76,104 +77,143 @@ func (f *File) getDownloadLink() (string, error) {
|
|||||||
return "", os.ErrNotExist
|
return "", os.ErrNotExist
|
||||||
}
|
}
|
||||||
|
|
||||||
func (f *File) stream() (*http.Response, error) {
|
func (f *File) getDownloadByteRange() (*[2]int64, error) {
|
||||||
client := sharedClient // Might be replaced with the custom client
|
byteRange, err := f.cache.GetDownloadByteRange(f.torrentName, f.name)
|
||||||
_log := f.cache.GetLogger()
|
|
||||||
var (
|
|
||||||
err error
|
|
||||||
downloadLink string
|
|
||||||
)
|
|
||||||
|
|
||||||
downloadLink, err = f.getDownloadLink()
|
|
||||||
if err != nil {
|
if err != nil {
|
||||||
|
return nil, err
|
||||||
_log.Trace().Msgf("Failed to get download link for %s. %s", f.name, err)
|
|
||||||
return nil, io.EOF
|
|
||||||
}
|
}
|
||||||
|
return byteRange, nil
|
||||||
|
}
|
||||||
|
|
||||||
|
func (f *File) stream() (*http.Response, error) {
|
||||||
|
client := sharedClient
|
||||||
|
_log := f.cache.GetLogger()
|
||||||
|
|
||||||
|
downloadLink, err := f.getDownloadLink()
|
||||||
|
if err != nil {
|
||||||
|
_log.Trace().Msgf("Failed to get download link for %s: %v", f.name, err)
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
|
||||||
if downloadLink == "" {
|
if downloadLink == "" {
|
||||||
_log.Trace().Msgf("Failed to get download link for %s. Empty download link", f.name)
|
_log.Trace().Msgf("Failed to get download link for %s. Empty download link", f.name)
|
||||||
return nil, io.EOF
|
return nil, fmt.Errorf("empty download link")
|
||||||
|
}
|
||||||
|
|
||||||
|
byteRange, err := f.getDownloadByteRange()
|
||||||
|
if err != nil {
|
||||||
|
_log.Trace().Msgf("Failed to get download byte range for %s: %v", f.name, err)
|
||||||
|
return nil, err
|
||||||
}
|
}
|
||||||
|
|
||||||
req, err := http.NewRequest("GET", downloadLink, nil)
|
req, err := http.NewRequest("GET", downloadLink, nil)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
_log.Trace().Msgf("Failed to create HTTP request: %s", err)
|
_log.Trace().Msgf("Failed to create HTTP request: %v", err)
|
||||||
return nil, io.EOF
|
return nil, err
|
||||||
}
|
}
|
||||||
|
|
||||||
if f.offset > 0 {
|
if byteRange == nil {
|
||||||
req.Header.Set("Range", fmt.Sprintf("bytes=%d-", f.offset))
|
req.Header.Set("Range", fmt.Sprintf("bytes=%d-", max(0, f.offset)))
|
||||||
|
} else {
|
||||||
|
req.Header.Set("Range", fmt.Sprintf("bytes=%d-", byteRange[0]+max(0, f.offset)))
|
||||||
}
|
}
|
||||||
|
|
||||||
|
// Make the request
|
||||||
resp, err := client.Do(req)
|
resp, err := client.Do(req)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return resp, io.EOF
|
_log.Trace().Msgf("HTTP request failed: %v", err)
|
||||||
|
return nil, err
|
||||||
}
|
}
|
||||||
|
|
||||||
if resp.StatusCode != http.StatusOK && resp.StatusCode != http.StatusPartialContent {
|
if resp.StatusCode != http.StatusOK && resp.StatusCode != http.StatusPartialContent {
|
||||||
f.downloadLink = ""
|
f.downloadLink = ""
|
||||||
closeResp := func() {
|
|
||||||
_, _ = io.Copy(io.Discard, resp.Body)
|
cleanupResp := func() {
|
||||||
resp.Body.Close()
|
if resp.Body != nil {
|
||||||
|
io.Copy(io.Discard, resp.Body)
|
||||||
|
resp.Body.Close()
|
||||||
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
if resp.StatusCode == http.StatusServiceUnavailable {
|
switch resp.StatusCode {
|
||||||
b, _ := io.ReadAll(resp.Body)
|
case http.StatusServiceUnavailable:
|
||||||
err := resp.Body.Close()
|
// Read the body to check for specific error messages
|
||||||
if err != nil {
|
body, readErr := io.ReadAll(resp.Body)
|
||||||
_log.Trace().Msgf("Failed to close response body: %s", err)
|
resp.Body.Close()
|
||||||
return nil, io.EOF
|
|
||||||
|
if readErr != nil {
|
||||||
|
_log.Trace().Msgf("Failed to read response body: %v", readErr)
|
||||||
|
return nil, fmt.Errorf("failed to read error response: %w", readErr)
|
||||||
}
|
}
|
||||||
if strings.Contains(string(b), "You can not download this file because you have exceeded your traffic on this hoster") {
|
|
||||||
|
bodyStr := string(body)
|
||||||
|
if strings.Contains(bodyStr, "You can not download this file because you have exceeded your traffic on this hoster") {
|
||||||
_log.Trace().Msgf("Bandwidth exceeded for %s. Download token will be disabled if you have more than one", f.name)
|
_log.Trace().Msgf("Bandwidth exceeded for %s. Download token will be disabled if you have more than one", f.name)
|
||||||
f.cache.MarkDownloadLinkAsInvalid(f.link, downloadLink, "bandwidth_exceeded")
|
f.cache.MarkDownloadLinkAsInvalid(f.link, downloadLink, "bandwidth_exceeded")
|
||||||
// Retry with a different API key if it's available
|
// Retry with a different API key if it's available
|
||||||
return f.stream()
|
return f.stream()
|
||||||
} else {
|
|
||||||
_log.Trace().Msgf("Failed to get download link for %s. %s", f.name, string(b))
|
|
||||||
return resp, io.EOF
|
|
||||||
}
|
}
|
||||||
|
|
||||||
} else if resp.StatusCode == http.StatusNotFound {
|
return nil, fmt.Errorf("service unavailable: %s", bodyStr)
|
||||||
closeResp()
|
|
||||||
|
case http.StatusNotFound:
|
||||||
|
cleanupResp()
|
||||||
// Mark download link as not found
|
// Mark download link as not found
|
||||||
// Regenerate a new download link
|
// Regenerate a new download link
|
||||||
|
_log.Trace().Msgf("File not found (404) for %s. Marking link as invalid and regenerating", f.name)
|
||||||
f.cache.MarkDownloadLinkAsInvalid(f.link, downloadLink, "link_not_found")
|
f.cache.MarkDownloadLinkAsInvalid(f.link, downloadLink, "link_not_found")
|
||||||
// Generate a new download link
|
// Generate a new download link
|
||||||
downloadLink, err = f.getDownloadLink()
|
downloadLink, err := f.getDownloadLink()
|
||||||
if err != nil {
|
if err != nil {
|
||||||
_log.Trace().Msgf("Failed to get download link for %s. %s", f.name, err)
|
_log.Trace().Msgf("Failed to get download link for %s. %s", f.name, err)
|
||||||
return nil, io.EOF
|
return nil, err
|
||||||
}
|
}
|
||||||
|
|
||||||
if downloadLink == "" {
|
if downloadLink == "" {
|
||||||
_log.Trace().Msgf("Failed to get download link for %s", f.name)
|
_log.Trace().Msgf("Failed to get download link for %s", f.name)
|
||||||
return nil, io.EOF
|
return nil, fmt.Errorf("failed to regenerate download link")
|
||||||
}
|
|
||||||
req, err = http.NewRequest("GET", downloadLink, nil)
|
|
||||||
if err != nil {
|
|
||||||
return nil, io.EOF
|
|
||||||
}
|
|
||||||
if f.offset > 0 {
|
|
||||||
req.Header.Set("Range", fmt.Sprintf("bytes=%d-", f.offset))
|
|
||||||
}
|
}
|
||||||
|
|
||||||
resp, err = client.Do(req)
|
req, err := http.NewRequest("GET", downloadLink, nil)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return resp, fmt.Errorf("HTTP request error: %w", err)
|
return nil, err
|
||||||
}
|
}
|
||||||
if resp.StatusCode != http.StatusOK && resp.StatusCode != http.StatusPartialContent {
|
|
||||||
closeResp()
|
// Set the range header again
|
||||||
// Read the body to consume the response
|
if byteRange == nil {
|
||||||
|
req.Header.Set("Range", fmt.Sprintf("bytes=%d-", max(0, f.offset)))
|
||||||
|
} else {
|
||||||
|
req.Header.Set("Range", fmt.Sprintf("bytes=%d-", byteRange[0]+max(0, f.offset)))
|
||||||
|
}
|
||||||
|
|
||||||
|
newResp, err := client.Do(req)
|
||||||
|
if err != nil {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
|
||||||
|
if newResp.StatusCode != http.StatusOK && newResp.StatusCode != http.StatusPartialContent {
|
||||||
|
cleanupBody := func() {
|
||||||
|
if newResp.Body != nil {
|
||||||
|
io.Copy(io.Discard, newResp.Body)
|
||||||
|
newResp.Body.Close()
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
cleanupBody()
|
||||||
|
_log.Trace().Msgf("Regenerated link also failed with status %d", newResp.StatusCode)
|
||||||
f.cache.MarkDownloadLinkAsInvalid(f.link, downloadLink, "link_not_found")
|
f.cache.MarkDownloadLinkAsInvalid(f.link, downloadLink, "link_not_found")
|
||||||
return resp, io.EOF
|
return nil, fmt.Errorf("failed with status code %d even after link regeneration", newResp.StatusCode)
|
||||||
}
|
}
|
||||||
return resp, nil
|
|
||||||
|
|
||||||
} else {
|
return newResp, nil
|
||||||
closeResp()
|
|
||||||
return resp, io.EOF
|
default:
|
||||||
|
body, _ := io.ReadAll(resp.Body)
|
||||||
|
resp.Body.Close()
|
||||||
|
|
||||||
|
_log.Trace().Msgf("Unexpected status code %d for %s: %s", resp.StatusCode, f.name, string(body))
|
||||||
|
return nil, fmt.Errorf("unexpected status code %d: %s", resp.StatusCode, string(body))
|
||||||
}
|
}
|
||||||
|
|
||||||
}
|
}
|
||||||
return resp, nil
|
return resp, nil
|
||||||
}
|
}
|
||||||
@@ -196,7 +236,7 @@ func (f *File) Read(p []byte) (n int, err error) {
|
|||||||
|
|
||||||
// If we haven't started streaming the file yet or need to reposition
|
// If we haven't started streaming the file yet or need to reposition
|
||||||
if f.reader == nil || f.seekPending {
|
if f.reader == nil || f.seekPending {
|
||||||
if f.reader != nil && f.seekPending {
|
if f.reader != nil {
|
||||||
f.reader.Close()
|
f.reader.Close()
|
||||||
f.reader = nil
|
f.reader = nil
|
||||||
}
|
}
|
||||||
@@ -207,7 +247,7 @@ func (f *File) Read(p []byte) (n int, err error) {
|
|||||||
return 0, err
|
return 0, err
|
||||||
}
|
}
|
||||||
if resp == nil {
|
if resp == nil {
|
||||||
return 0, io.EOF
|
return 0, fmt.Errorf("stream returned nil response")
|
||||||
}
|
}
|
||||||
|
|
||||||
f.reader = resp.Body
|
f.reader = resp.Body
|
||||||
|
|||||||
Reference in New Issue
Block a user