- Retyr RD 502 errors

- Fix issues with re-inserted torrents bugging out
- Fix version.txt
- Massive improvements in importing times
- Fix issues with config.json resetting
- Fix other minor issues
This commit is contained in:
Mukhtar Akere
2025-05-10 01:04:51 +01:00
parent 57de04b164
commit e05c6d5028
27 changed files with 437 additions and 338 deletions

View File

@@ -20,7 +20,7 @@ jobs:
- name: Set up Go - name: Set up Go
uses: actions/setup-go@v4 uses: actions/setup-go@v4
with: with:
go-version: '1.22' go-version: '1.24'
- name: Run GoReleaser - name: Run GoReleaser
uses: goreleaser/goreleaser-action@v5 uses: goreleaser/goreleaser-action@v5

View File

@@ -1,5 +1,5 @@
# Stage 1: Build binaries # Stage 1: Build binaries
FROM --platform=$BUILDPLATFORM golang:1.23-alpine as builder FROM --platform=$BUILDPLATFORM golang:1.24-alpine as builder
ARG TARGETOS ARG TARGETOS
ARG TARGETARCH ARG TARGETARCH
@@ -57,7 +57,7 @@ COPY --from=dirsetup --chown=nonroot:nonroot /app /app
# Metadata # Metadata
ENV LOG_PATH=/app/logs ENV LOG_PATH=/app/logs
EXPOSE 8181 8282 EXPOSE 8282
VOLUME ["/app"] VOLUME ["/app"]
USER nonroot:nonroot USER nonroot:nonroot

View File

@@ -182,7 +182,7 @@ func startServices(ctx context.Context) error {
safeGo(func() error { safeGo(func() error {
err := svc.Repair.Start(ctx) err := svc.Repair.Start(ctx)
if err != nil { if err != nil {
_log.Error().Err(err).Msg("Error during repair") _log.Error().Err(err).Msg("Error starting repair")
} }
return nil // Not propagating repair errors to terminate the app return nil // Not propagating repair errors to terminate the app
}) })

View File

View File

View File

@@ -69,6 +69,9 @@ nav:
- Overview: features/index.md - Overview: features/index.md
- Repair Worker: features/repair-worker.md - Repair Worker: features/repair-worker.md
- WebDAV: features/webdav.md - WebDAV: features/webdav.md
- Guides:
- Overview: guides/index.md
- Setting Up with Rclone: guides/rclone.md
- Changelog: changelog.md - Changelog: changelog.md

9
go.mod
View File

@@ -1,19 +1,19 @@
module github.com/sirrobot01/decypharr module github.com/sirrobot01/decypharr
go 1.23.0 go 1.24
toolchain go1.23.2 toolchain go1.24.3
require ( require (
github.com/anacrolix/torrent v1.55.0 github.com/anacrolix/torrent v1.55.0
github.com/beevik/etree v1.5.0
github.com/cavaliergopher/grab/v3 v3.0.1 github.com/cavaliergopher/grab/v3 v3.0.1
github.com/go-chi/chi/v5 v5.1.0 github.com/go-chi/chi/v5 v5.1.0
github.com/go-co-op/gocron/v2 v2.16.1 github.com/go-co-op/gocron/v2 v2.16.1
github.com/goccy/go-json v0.10.5 github.com/goccy/go-json v0.10.5
github.com/google/uuid v1.6.0 github.com/google/uuid v1.6.0
github.com/gorilla/sessions v1.4.0 github.com/gorilla/sessions v1.4.0
github.com/puzpuzpuz/xsync/v3 v3.5.1 github.com/puzpuzpuz/xsync/v4 v4.1.0
github.com/robfig/cron/v3 v3.0.1
github.com/rs/zerolog v1.33.0 github.com/rs/zerolog v1.33.0
golang.org/x/crypto v0.33.0 golang.org/x/crypto v0.33.0
golang.org/x/net v0.35.0 golang.org/x/net v0.35.0
@@ -34,7 +34,6 @@ require (
github.com/mattn/go-colorable v0.1.13 // indirect github.com/mattn/go-colorable v0.1.13 // indirect
github.com/mattn/go-isatty v0.0.20 // indirect github.com/mattn/go-isatty v0.0.20 // indirect
github.com/pmezard/go-difflib v1.0.1-0.20181226105442-5d4384ee4fb2 // indirect github.com/pmezard/go-difflib v1.0.1-0.20181226105442-5d4384ee4fb2 // indirect
github.com/robfig/cron/v3 v3.0.1 // indirect
github.com/rogpeppe/go-internal v1.13.1 // indirect github.com/rogpeppe/go-internal v1.13.1 // indirect
golang.org/x/sys v0.30.0 // indirect golang.org/x/sys v0.30.0 // indirect
) )

6
go.sum
View File

@@ -36,8 +36,6 @@ github.com/anacrolix/tagflag v1.1.0/go.mod h1:Scxs9CV10NQatSmbyjqmqmeQNwGzlNe0CM
github.com/anacrolix/torrent v1.55.0 h1:s9yh/YGdPmbN9dTa+0Inh2dLdrLQRvEAj1jdFW/Hdd8= github.com/anacrolix/torrent v1.55.0 h1:s9yh/YGdPmbN9dTa+0Inh2dLdrLQRvEAj1jdFW/Hdd8=
github.com/anacrolix/torrent v1.55.0/go.mod h1:sBdZHBSZNj4de0m+EbYg7vvs/G/STubxu/GzzNbojsE= github.com/anacrolix/torrent v1.55.0/go.mod h1:sBdZHBSZNj4de0m+EbYg7vvs/G/STubxu/GzzNbojsE=
github.com/apache/thrift v0.12.0/go.mod h1:cp2SuWMxlEZw2r+iP2GNCdIi4C1qmUzdZFSVb+bacwQ= github.com/apache/thrift v0.12.0/go.mod h1:cp2SuWMxlEZw2r+iP2GNCdIi4C1qmUzdZFSVb+bacwQ=
github.com/beevik/etree v1.5.0 h1:iaQZFSDS+3kYZiGoc9uKeOkUY3nYMXOKLl6KIJxiJWs=
github.com/beevik/etree v1.5.0/go.mod h1:gPNJNaBGVZ9AwsidazFZyygnd+0pAU38N4D+WemwKNs=
github.com/benbjohnson/immutable v0.2.0/go.mod h1:uc6OHo6PN2++n98KHLxW8ef4W42ylHiQSENghE1ezxI= github.com/benbjohnson/immutable v0.2.0/go.mod h1:uc6OHo6PN2++n98KHLxW8ef4W42ylHiQSENghE1ezxI=
github.com/beorn7/perks v0.0.0-20180321164747-3a771d992973/go.mod h1:Dwedo/Wpr24TaqPxmxbtue+5NUziq4I4S80YR8gNf3Q= github.com/beorn7/perks v0.0.0-20180321164747-3a771d992973/go.mod h1:Dwedo/Wpr24TaqPxmxbtue+5NUziq4I4S80YR8gNf3Q=
github.com/beorn7/perks v1.0.0/go.mod h1:KWe93zE9D1o94FZ5RNwFwVgaQK1VOXiVxmqh+CedLV8= github.com/beorn7/perks v1.0.0/go.mod h1:KWe93zE9D1o94FZ5RNwFwVgaQK1VOXiVxmqh+CedLV8=
@@ -187,8 +185,8 @@ github.com/prometheus/procfs v0.0.0-20190117184657-bf6a532e95b1/go.mod h1:c3At6R
github.com/prometheus/procfs v0.0.2/go.mod h1:TjEm7ze935MbeOT/UhFTIMYKhuLP4wbCsTZCD3I8kEA= github.com/prometheus/procfs v0.0.2/go.mod h1:TjEm7ze935MbeOT/UhFTIMYKhuLP4wbCsTZCD3I8kEA=
github.com/prometheus/procfs v0.0.8/go.mod h1:7Qr8sr6344vo1JqZ6HhLceV9o3AJ1Ff+GxbHq6oeK9A= github.com/prometheus/procfs v0.0.8/go.mod h1:7Qr8sr6344vo1JqZ6HhLceV9o3AJ1Ff+GxbHq6oeK9A=
github.com/prometheus/procfs v0.0.11/go.mod h1:lV6e/gmhEcM9IjHGsFOCxxuZ+z1YqCvr4OA4YeYWdaU= github.com/prometheus/procfs v0.0.11/go.mod h1:lV6e/gmhEcM9IjHGsFOCxxuZ+z1YqCvr4OA4YeYWdaU=
github.com/puzpuzpuz/xsync/v3 v3.5.1 h1:GJYJZwO6IdxN/IKbneznS6yPkVC+c3zyY/j19c++5Fg= github.com/puzpuzpuz/xsync/v4 v4.1.0 h1:x9eHRl4QhZFIPJ17yl4KKW9xLyVWbb3/Yq4SXpjF71U=
github.com/puzpuzpuz/xsync/v3 v3.5.1/go.mod h1:VjzYrABPabuM4KyBh1Ftq6u8nhwY5tBPKP9jpmh0nnA= github.com/puzpuzpuz/xsync/v4 v4.1.0/go.mod h1:VJDmTCJMBt8igNxnkQd86r+8KUeN1quSfNKu5bLYFQo=
github.com/rcrowley/go-metrics v0.0.0-20181016184325-3113b8401b8a/go.mod h1:bCqnVzQkZxMG4s8nGwiZ5l3QUCyqpo9Y+/ZMZ9VjZe4= github.com/rcrowley/go-metrics v0.0.0-20181016184325-3113b8401b8a/go.mod h1:bCqnVzQkZxMG4s8nGwiZ5l3QUCyqpo9Y+/ZMZ9VjZe4=
github.com/robfig/cron/v3 v3.0.1 h1:WdRxkvbJztn8LMz/QEvLN5sBU+xKpSqwwUO1Pjr4qDs= github.com/robfig/cron/v3 v3.0.1 h1:WdRxkvbJztn8LMz/QEvLN5sBU+xKpSqwwUO1Pjr4qDs=
github.com/robfig/cron/v3 v3.0.1/go.mod h1:eQICP3HwyT7UooqI/z+Ov+PtYAWygg1TEWWzGIFLtro= github.com/robfig/cron/v3 v3.0.1/go.mod h1:eQICP3HwyT7UooqI/z+Ov+PtYAWygg1TEWWzGIFLtro=

View File

@@ -119,19 +119,21 @@ func (c *Config) loadConfig() error {
file, err := os.ReadFile(c.JsonFile()) file, err := os.ReadFile(c.JsonFile())
if err != nil { if err != nil {
if os.IsNotExist(err) { if os.IsNotExist(err) {
fmt.Printf("Config file not found, creating a new one at %s\n", c.JsonFile())
// Create a default config file if it doesn't exist // Create a default config file if it doesn't exist
if err := c.createConfig(c.Path); err != nil { if err := c.createConfig(c.Path); err != nil {
return fmt.Errorf("failed to create config file: %w", err) return fmt.Errorf("failed to create config file: %w", err)
} }
} else { return c.Save()
}
return fmt.Errorf("error reading config file: %w", err) return fmt.Errorf("error reading config file: %w", err)
} }
} else {
if err := json.Unmarshal(file, &c); err != nil { if err := json.Unmarshal(file, &c); err != nil {
return fmt.Errorf("error unmarshaling config: %w", err) return fmt.Errorf("error unmarshaling config: %w", err)
} }
} c.setDefaults()
return c.Save() return nil
} }
func validateDebrids(debrids []Debrid) error { func validateDebrids(debrids []Debrid) error {
@@ -312,7 +314,7 @@ func (c *Config) updateDebrid(d Debrid) Debrid {
return d return d
} }
func (c *Config) Save() error { func (c *Config) setDefaults() {
for i, debrid := range c.Debrids { for i, debrid := range c.Debrids {
c.Debrids[i] = c.updateDebrid(debrid) c.Debrids[i] = c.updateDebrid(debrid)
} }
@@ -333,6 +335,12 @@ func (c *Config) Save() error {
// Load the auth file // Load the auth file
c.Auth = c.GetAuth() c.Auth = c.GetAuth()
}
func (c *Config) Save() error {
c.setDefaults()
data, err := json.MarshalIndent(c, "", " ") data, err := json.MarshalIndent(c, "", " ")
if err != nil { if err != nil {
return err return err

View File

@@ -382,22 +382,45 @@ func JSONResponse(w http.ResponseWriter, data interface{}, code int) {
} }
} }
func Gzip(body []byte) []byte { func Gzip(body []byte, pool *sync.Pool) []byte {
var b bytes.Buffer
if len(body) == 0 { if len(body) == 0 {
return nil return nil
} }
gz := gzip.NewWriter(&b)
_, err := gz.Write(body) var (
buf *bytes.Buffer
ok bool
)
// Check if the pool is nil
if pool == nil {
buf = bytes.NewBuffer(make([]byte, 0, len(body)))
} else {
buf, ok = pool.Get().(*bytes.Buffer)
if !ok || buf == nil {
buf = bytes.NewBuffer(make([]byte, 0, len(body)))
} else {
buf.Reset()
}
defer pool.Put(buf)
}
gz, err := gzip.NewWriterLevel(buf, gzip.BestSpeed)
if err != nil { if err != nil {
return nil return nil
} }
err = gz.Close()
if err != nil { if _, err := gz.Write(body); err != nil {
return nil return nil
} }
return b.Bytes() if err := gz.Close(); err != nil {
return nil
}
result := make([]byte, buf.Len())
copy(result, buf.Bytes())
return result
} }
func Default() *Client { func Default() *Client {

View File

@@ -0,0 +1,34 @@
package utils
import (
"sync"
"time"
)
type Debouncer[T any] struct {
mu sync.Mutex
timer *time.Timer
interval time.Duration
caller func(arg T)
arg T
}
func NewDebouncer[T any](interval time.Duration, caller func(arg T)) *Debouncer[T] {
return &Debouncer[T]{
interval: interval,
caller: caller,
}
}
func (d *Debouncer[T]) Call(arg T) {
d.mu.Lock()
defer d.mu.Unlock()
if d.timer != nil {
d.timer.Stop()
}
d.timer = time.AfterFunc(d.interval, func() {
d.caller(arg)
})
}

View File

@@ -11,7 +11,7 @@ var (
MUSICMATCH = "(?i)(\\.)(mp2|mp3|m4a|m4b|m4p|ogg|oga|opus|wma|wav|wv|flac|ape|aif|aiff|aifc)$" MUSICMATCH = "(?i)(\\.)(mp2|mp3|m4a|m4b|m4p|ogg|oga|opus|wma|wav|wv|flac|ape|aif|aiff|aifc)$"
) )
var SAMPLEMATCH = `(?i)(^|[\\/])(sample|trailer|thumb|special|extras?)s?([\s._-]|$|/)|(\(sample\))|(-\s*sample)` var SAMPLEMATCH = `(?i)(^|[\s/\\])(sample|trailer|thumb|special|extras?)s?[-/]|(\((sample|trailer|thumb|special|extras?)s?\))|(-\s*(sample|trailer|thumb|special|extras?)s?)`
func RegexMatch(regex string, value string) bool { func RegexMatch(regex string, value string) bool {
re := regexp.MustCompile(regex) re := regexp.MustCompile(regex)

View File

@@ -4,6 +4,7 @@ import (
"context" "context"
"fmt" "fmt"
"github.com/go-co-op/gocron/v2" "github.com/go-co-op/gocron/v2"
"github.com/robfig/cron/v3"
"strconv" "strconv"
"strings" "strings"
"time" "time"
@@ -40,14 +41,17 @@ func convertToJD(interval string) (gocron.JobDefinition, error) {
return gocron.DailyJob(1, gocron.NewAtTimes( return gocron.DailyJob(1, gocron.NewAtTimes(
gocron.NewAtTime(uint(t.Hour()), uint(t.Minute()), uint(t.Second())), gocron.NewAtTime(uint(t.Hour()), uint(t.Minute()), uint(t.Second())),
)), nil )), nil
} else {
dur, err := time.ParseDuration(interval)
if err != nil {
return jd, fmt.Errorf("failed to parse duration: %w", err)
} }
jd = gocron.DurationJob(dur)
if _, err := cron.ParseStandard(interval); err == nil {
return gocron.CronJob(interval, false), nil
} }
return jd, nil
if dur, err := time.ParseDuration(interval); err == nil {
return gocron.DurationJob(dur), nil
}
return jd, fmt.Errorf("invalid interval format: %s", interval)
} }
func parseClockTime(s string) (time.Time, bool) { func parseClockTime(s string) (time.Time, bool) {

View File

@@ -2,10 +2,12 @@ package debrid
import ( import (
"bufio" "bufio"
"bytes"
"cmp" "cmp"
"context" "context"
"errors" "errors"
"fmt" "fmt"
"github.com/puzpuzpuz/xsync/v4"
"os" "os"
"path" "path"
"path/filepath" "path/filepath"
@@ -17,7 +19,6 @@ import (
"github.com/go-co-op/gocron/v2" "github.com/go-co-op/gocron/v2"
"github.com/goccy/go-json" "github.com/goccy/go-json"
"github.com/puzpuzpuz/xsync/v3"
"github.com/rs/zerolog" "github.com/rs/zerolog"
"github.com/sirrobot01/decypharr/internal/config" "github.com/sirrobot01/decypharr/internal/config"
"github.com/sirrobot01/decypharr/internal/logger" "github.com/sirrobot01/decypharr/internal/logger"
@@ -43,13 +44,6 @@ type CachedTorrent struct {
DuplicateIds []string `json:"duplicate_ids"` DuplicateIds []string `json:"duplicate_ids"`
} }
type downloadLinkCache struct {
Id string
Link string
AccountId string
ExpiresAt time.Time
}
type RepairType string type RepairType string
const ( const (
@@ -64,19 +58,27 @@ type RepairRequest struct {
FileName string FileName string
} }
type PropfindResponse struct {
Data []byte
GzippedData []byte
Ts time.Time
}
type Cache struct { type Cache struct {
dir string dir string
client types.Client client types.Client
logger zerolog.Logger logger zerolog.Logger
torrents *xsync.MapOf[string, string] // key: torrent.Id, value: {torrent_folder_name} torrents *torrentCache
torrentsNames *xsync.MapOf[string, *CachedTorrent] // key: torrent.Name, value: torrent downloadLinks *xsync.Map[string, linkCache]
listings atomic.Value invalidDownloadLinks sync.Map
downloadLinks *xsync.MapOf[string, downloadLinkCache] PropfindResp *xsync.Map[string, PropfindResponse]
invalidDownloadLinks *xsync.MapOf[string, string]
PropfindResp *PropfindCache
folderNaming WebDavFolderNaming folderNaming WebDavFolderNaming
// optimizers
xmlPool sync.Pool
gzipPool sync.Pool
listingDebouncer *utils.Debouncer[bool]
// monitors // monitors
repairRequest sync.Map repairRequest sync.Map
failedToReinsert sync.Map failedToReinsert sync.Map
@@ -91,7 +93,6 @@ type Cache struct {
autoExpiresLinksAfterDuration time.Duration autoExpiresLinksAfterDuration time.Duration
// refresh mutex // refresh mutex
listingRefreshMu sync.RWMutex // for refreshing torrents
downloadLinksRefreshMu sync.RWMutex // for refreshing download links downloadLinksRefreshMu sync.RWMutex // for refreshing download links
torrentsRefreshMu sync.RWMutex // for refreshing torrents torrentsRefreshMu sync.RWMutex // for refreshing torrents
@@ -99,6 +100,8 @@ type Cache struct {
saveSemaphore chan struct{} saveSemaphore chan struct{}
ctx context.Context ctx context.Context
config config.Debrid
} }
func New(dc config.Debrid, client types.Client) *Cache { func New(dc config.Debrid, client types.Client) *Cache {
@@ -110,16 +113,15 @@ func New(dc config.Debrid, client types.Client) *Cache {
if autoExpiresLinksAfter == 0 || err != nil { if autoExpiresLinksAfter == 0 || err != nil {
autoExpiresLinksAfter = 48 * time.Hour autoExpiresLinksAfter = 48 * time.Hour
} }
return &Cache{ c := &Cache{
dir: filepath.Join(cfg.Path, "cache", dc.Name), // path to save cache files dir: filepath.Join(cfg.Path, "cache", dc.Name), // path to save cache files
torrents: xsync.NewMapOf[string, string](),
torrentsNames: xsync.NewMapOf[string, *CachedTorrent](), torrents: newTorrentCache(),
invalidDownloadLinks: xsync.NewMapOf[string, string](), PropfindResp: xsync.NewMap[string, PropfindResponse](),
PropfindResp: NewPropfindCache(),
client: client, client: client,
logger: logger.New(fmt.Sprintf("%s-webdav", client.GetName())), logger: logger.New(fmt.Sprintf("%s-webdav", client.GetName())),
workers: dc.Workers, workers: dc.Workers,
downloadLinks: xsync.NewMapOf[string, downloadLinkCache](), downloadLinks: xsync.NewMap[string, linkCache](),
torrentRefreshInterval: dc.TorrentsRefreshInterval, torrentRefreshInterval: dc.TorrentsRefreshInterval,
downloadLinksRefreshInterval: dc.DownloadLinksRefreshInterval, downloadLinksRefreshInterval: dc.DownloadLinksRefreshInterval,
folderNaming: WebDavFolderNaming(dc.FolderNaming), folderNaming: WebDavFolderNaming(dc.FolderNaming),
@@ -127,7 +129,18 @@ func New(dc config.Debrid, client types.Client) *Cache {
saveSemaphore: make(chan struct{}, 50), saveSemaphore: make(chan struct{}, 50),
ctx: context.Background(), ctx: context.Background(),
scheduler: s, scheduler: s,
config: dc,
xmlPool: sync.Pool{
New: func() interface{} {
return new(bytes.Buffer)
},
},
} }
c.listingDebouncer = utils.NewDebouncer[bool](100*time.Millisecond, func(refreshRclone bool) {
c.RefreshListings(refreshRclone)
})
return c
} }
func (c *Cache) Start(ctx context.Context) error { func (c *Cache) Start(ctx context.Context) error {
@@ -311,7 +324,7 @@ func (c *Cache) Sync() error {
// Write these torrents to the cache // Write these torrents to the cache
c.setTorrents(cachedTorrents, func() { c.setTorrents(cachedTorrents, func() {
go c.RefreshListings(false) c.listingDebouncer.Call(false)
}) // This is set to false, cos it's likely rclone hs not started yet. }) // This is set to false, cos it's likely rclone hs not started yet.
c.logger.Info().Msgf("Loaded %d torrents from cache", len(cachedTorrents)) c.logger.Info().Msgf("Loaded %d torrents from cache", len(cachedTorrents))
@@ -382,7 +395,7 @@ func (c *Cache) sync(torrents []*types.Torrent) error {
// Wait for all workers to complete // Wait for all workers to complete
wg.Wait() wg.Wait()
c.RefreshListings(true) // final refresh c.listingDebouncer.Call(false) // final refresh
c.logger.Info().Msgf("Sync complete: %d torrents processed, %d errors", len(torrents), errorCount) c.logger.Info().Msgf("Sync complete: %d torrents processed, %d errors", len(torrents), errorCount)
return nil return nil
} }
@@ -407,21 +420,23 @@ func (c *Cache) GetTorrentFolder(torrent *types.Torrent) string {
} }
func (c *Cache) setTorrent(t *CachedTorrent, callback func(torrent *CachedTorrent)) { func (c *Cache) setTorrent(t *CachedTorrent, callback func(torrent *CachedTorrent)) {
torrentKey := c.GetTorrentFolder(t.Torrent) torrentName := c.GetTorrentFolder(t.Torrent)
c.torrents.Store(t.Id, torrentKey) // Store the torrent id with the folder name(we might change the id after, hence why it's stored here) torrentId := t.Id
if o, ok := c.torrentsNames.Load(torrentKey); ok && o.Id != t.Id { if o, ok := c.torrents.getByName(torrentName); ok && o.Id != t.Id {
// If another torrent with the same name exists, merge the files, if the same file exists, // If another torrent with the same name exists, merge the files, if the same file exists,
// keep the one with the most recent added date // keep the one with the most recent added date
// Save the most recent torrent // Save the most recent torrent
mergedFiles := mergeFiles(t, o) // Useful for merging files across multiple torrents, while keeping the most recent mergedFiles := mergeFiles(t, o) // Useful for merging files across multiple torrents, while keeping the most recent
if o.AddedOn.After(t.AddedOn) { if o.AddedOn.After(t.AddedOn) {
// Swap the new torrent to "become" the old one
t = o t = o
} }
t.Files = mergedFiles t.Files = mergedFiles
} }
c.torrentsNames.Store(torrentKey, t) c.torrents.set(torrentId, torrentName, t)
c.SaveTorrent(t) c.SaveTorrent(t)
if callback != nil { if callback != nil {
callback(t) callback(t)
@@ -430,9 +445,9 @@ func (c *Cache) setTorrent(t *CachedTorrent, callback func(torrent *CachedTorren
func (c *Cache) setTorrents(torrents map[string]*CachedTorrent, callback func()) { func (c *Cache) setTorrents(torrents map[string]*CachedTorrent, callback func()) {
for _, t := range torrents { for _, t := range torrents {
torrentKey := c.GetTorrentFolder(t.Torrent) torrentName := c.GetTorrentFolder(t.Torrent)
c.torrents.Store(t.Id, torrentKey) torrentId := t.Id
if o, ok := c.torrentsNames.Load(torrentKey); ok && o.Id != t.Id { if o, ok := c.torrents.getByName(torrentName); ok && o.Id != t.Id {
// Save the most recent torrent // Save the most recent torrent
mergedFiles := mergeFiles(t, o) // Useful for merging files across multiple torrents, while keeping the most recent mergedFiles := mergeFiles(t, o) // Useful for merging files across multiple torrents, while keeping the most recent
if o.AddedOn.After(t.AddedOn) { if o.AddedOn.After(t.AddedOn) {
@@ -440,7 +455,7 @@ func (c *Cache) setTorrents(torrents map[string]*CachedTorrent, callback func())
} }
t.Files = mergedFiles t.Files = mergedFiles
} }
c.torrentsNames.Store(torrentKey, t) c.torrents.set(torrentId, torrentName, t)
} }
c.SaveTorrents() c.SaveTorrents()
if callback != nil { if callback != nil {
@@ -448,11 +463,9 @@ func (c *Cache) setTorrents(torrents map[string]*CachedTorrent, callback func())
} }
} }
// GetListing returns a sorted list of torrents(READ-ONLY)
func (c *Cache) GetListing() []os.FileInfo { func (c *Cache) GetListing() []os.FileInfo {
if v, ok := c.listings.Load().([]os.FileInfo); ok { return c.torrents.getListing()
return v
}
return nil
} }
func (c *Cache) Close() error { func (c *Cache) Close() error {
@@ -460,36 +473,28 @@ func (c *Cache) Close() error {
} }
func (c *Cache) GetTorrents() map[string]*CachedTorrent { func (c *Cache) GetTorrents() map[string]*CachedTorrent {
torrents := make(map[string]*CachedTorrent) return c.torrents.getAll()
c.torrentsNames.Range(func(key string, value *CachedTorrent) bool {
torrents[key] = value
return true
})
return torrents
} }
func (c *Cache) GetTorrentByName(name string) *CachedTorrent { func (c *Cache) GetTorrentByName(name string) *CachedTorrent {
if t, ok := c.torrentsNames.Load(name); ok { if torrent, ok := c.torrents.getByName(name); ok {
return t return torrent
} }
return nil return nil
} }
func (c *Cache) GetTorrent(torrentId string) *CachedTorrent { func (c *Cache) GetTorrent(torrentId string) *CachedTorrent {
if name, ok := c.torrents.Load(torrentId); ok { if torrent, ok := c.torrents.getByID(torrentId); ok {
if t, ok := c.torrentsNames.Load(name); ok { return torrent
return t
}
return nil
} }
return nil return nil
} }
func (c *Cache) SaveTorrents() { func (c *Cache) SaveTorrents() {
c.torrentsNames.Range(func(key string, value *CachedTorrent) bool { torrents := c.torrents.getAll()
c.SaveTorrent(value) for _, torrent := range torrents {
return true c.SaveTorrent(torrent)
}) }
} }
func (c *Cache) SaveTorrent(ct *CachedTorrent) { func (c *Cache) SaveTorrent(ct *CachedTorrent) {
@@ -618,7 +623,7 @@ func (c *Cache) AddTorrent(t *types.Torrent) error {
AddedOn: addedOn, AddedOn: addedOn,
} }
c.setTorrent(ct, func(tor *CachedTorrent) { c.setTorrent(ct, func(tor *CachedTorrent) {
go c.RefreshListings(true) c.listingDebouncer.Call(true)
}) })
go c.GenerateDownloadLinks(ct) go c.GenerateDownloadLinks(ct)
return nil return nil
@@ -634,7 +639,7 @@ func (c *Cache) DeleteTorrent(id string) error {
defer c.torrentsRefreshMu.Unlock() defer c.torrentsRefreshMu.Unlock()
if c.deleteTorrent(id, true) { if c.deleteTorrent(id, true) {
c.RefreshListings(true) c.listingDebouncer.Call(true)
c.logger.Trace().Msgf("Torrent %s deleted successfully", id) c.logger.Trace().Msgf("Torrent %s deleted successfully", id)
return nil return nil
} }
@@ -654,15 +659,15 @@ func (c *Cache) validateAndDeleteTorrents(torrents []string) {
}(torrent) }(torrent)
} }
wg.Wait() wg.Wait()
c.RefreshListings(true) c.listingDebouncer.Call(true)
} }
// deleteTorrent deletes the torrent from the cache and debrid service // deleteTorrent deletes the torrent from the cache and debrid service
// It also handles torrents with the same name but different IDs // It also handles torrents with the same name but different IDs
func (c *Cache) deleteTorrent(id string, removeFromDebrid bool) bool { func (c *Cache) deleteTorrent(id string, removeFromDebrid bool) bool {
if torrentName, ok := c.torrents.Load(id); ok { if torrentName, ok := c.torrents.getByIDName(id); ok {
c.torrents.Delete(id) // Delete id from cache c.torrents.removeId(id) // Delete id from cache
defer func() { defer func() {
c.removeFromDB(id) c.removeFromDB(id)
if removeFromDebrid { if removeFromDebrid {
@@ -670,7 +675,7 @@ func (c *Cache) deleteTorrent(id string, removeFromDebrid bool) bool {
} }
}() // defer delete from debrid }() // defer delete from debrid
if t, ok := c.torrentsNames.Load(torrentName); ok { if t, ok := c.torrents.getByName(torrentName); ok {
newFiles := map[string]types.File{} newFiles := map[string]types.File{}
newId := "" newId := ""
@@ -684,7 +689,7 @@ func (c *Cache) deleteTorrent(id string, removeFromDebrid bool) bool {
} }
if len(newFiles) == 0 { if len(newFiles) == 0 {
// Delete the torrent since no files are left // Delete the torrent since no files are left
c.torrentsNames.Delete(torrentName) c.torrents.remove(torrentName)
} else { } else {
t.Files = newFiles t.Files = newFiles
newId = cmp.Or(newId, t.Id) newId = cmp.Or(newId, t.Id)
@@ -702,7 +707,7 @@ func (c *Cache) DeleteTorrents(ids []string) {
for _, id := range ids { for _, id := range ids {
_ = c.deleteTorrent(id, true) _ = c.deleteTorrent(id, true)
} }
c.RefreshListings(true) c.listingDebouncer.Call(true)
} }
func (c *Cache) removeFromDB(torrentId string) { func (c *Cache) removeFromDB(torrentId string) {

View File

@@ -93,15 +93,13 @@ func ProcessTorrent(d *Engine, magnet *utils.Magnet, a *arr.Arr, isSymlink, over
if len(errs) == 0 { if len(errs) == 0 {
return nil, fmt.Errorf("failed to process torrent: no clients available") return nil, fmt.Errorf("failed to process torrent: no clients available")
} }
var errBuilder strings.Builder if len(errs) == 1 {
errBuilder.WriteString("failed to process torrent:") return nil, fmt.Errorf("failed to process torrent: %w", errs[0])
} else {
for _, e := range errs { errStrings := make([]string, 0, len(errs))
if e != nil { for _, err := range errs {
errBuilder.WriteString("\n") errStrings = append(errStrings, err.Error())
errBuilder.WriteString(e.Error()) }
return nil, fmt.Errorf("failed to process torrent: %s", strings.Join(errStrings, ", "))
} }
} }
return nil, fmt.Errorf(errBuilder.String())
}

View File

@@ -8,6 +8,13 @@ import (
"time" "time"
) )
type linkCache struct {
Id string
link string
accountId string
expiresAt time.Time
}
func (c *Cache) GetDownloadLink(torrentName, filename, fileLink string) (string, error) { func (c *Cache) GetDownloadLink(torrentName, filename, fileLink string) (string, error) {
// Check link cache // Check link cache
if dl := c.checkDownloadLink(fileLink); dl != "" { if dl := c.checkDownloadLink(fileLink); dl != "" {
@@ -45,7 +52,6 @@ func (c *Cache) fetchDownloadLink(torrentName, filename, fileLink string) (strin
} }
ct = newCt ct = newCt
file = ct.Files[filename] file = ct.Files[filename]
c.logger.Debug().Str("name", ct.Name).Str("id", ct.Id).Msgf("Reinserted torrent")
} }
c.logger.Trace().Msgf("Getting download link for %s(%s)", filename, file.Link) c.logger.Trace().Msgf("Getting download link for %s(%s)", filename, file.Link)
@@ -58,7 +64,6 @@ func (c *Cache) fetchDownloadLink(torrentName, filename, fileLink string) (strin
} }
ct = newCt ct = newCt
file = ct.Files[filename] file = ct.Files[filename]
c.logger.Debug().Str("name", ct.Name).Str("id", ct.Id).Msgf("Reinserted torrent")
// Retry getting the download link // Retry getting the download link
downloadLink, err = c.client.GetDownloadLink(ct.Torrent, &file) downloadLink, err = c.client.GetDownloadLink(ct.Torrent, &file)
if err != nil { if err != nil {
@@ -98,18 +103,18 @@ func (c *Cache) GenerateDownloadLinks(t *CachedTorrent) {
} }
func (c *Cache) updateDownloadLink(dl *types.DownloadLink) { func (c *Cache) updateDownloadLink(dl *types.DownloadLink) {
c.downloadLinks.Store(dl.Link, downloadLinkCache{ c.downloadLinks.Store(dl.Link, linkCache{
Id: dl.Id, Id: dl.Id,
Link: dl.DownloadLink, link: dl.DownloadLink,
ExpiresAt: time.Now().Add(c.autoExpiresLinksAfterDuration), expiresAt: time.Now().Add(c.autoExpiresLinksAfterDuration),
AccountId: dl.AccountId, accountId: dl.AccountId,
}) })
} }
func (c *Cache) checkDownloadLink(link string) string { func (c *Cache) checkDownloadLink(link string) string {
if dl, ok := c.downloadLinks.Load(link); ok { if dl, ok := c.downloadLinks.Load(link); ok {
if dl.ExpiresAt.After(time.Now()) && !c.IsDownloadLinkInvalid(dl.Link) { if dl.expiresAt.After(time.Now()) && !c.IsDownloadLinkInvalid(dl.link) {
return dl.Link return dl.link
} }
} }
return "" return ""
@@ -120,8 +125,8 @@ func (c *Cache) MarkDownloadLinkAsInvalid(link, downloadLink, reason string) {
// Remove the download api key from active // Remove the download api key from active
if reason == "bandwidth_exceeded" { if reason == "bandwidth_exceeded" {
if dl, ok := c.downloadLinks.Load(link); ok { if dl, ok := c.downloadLinks.Load(link); ok {
if dl.AccountId != "" && dl.Link == downloadLink { if dl.accountId != "" && dl.link == downloadLink {
c.client.DisableAccount(dl.AccountId) c.client.DisableAccount(dl.accountId)
} }
} }
} }

View File

@@ -1,59 +0,0 @@
package debrid
import (
"crypto/sha256"
"encoding/hex"
"fmt"
"path"
"sync"
"time"
)
type PropfindResponse struct {
Data []byte
GzippedData []byte
Ts time.Time
}
type PropfindCache struct {
sync.RWMutex
data map[string]PropfindResponse
}
func NewPropfindCache() *PropfindCache {
return &PropfindCache{
data: make(map[string]PropfindResponse),
}
}
func generateCacheKey(urlPath string) string {
cleanPath := path.Clean(urlPath)
// Create a more collision-resistant key by hashing
h := sha256.New()
h.Write([]byte(fmt.Sprintf("propfind:%s", cleanPath)))
return hex.EncodeToString(h.Sum(nil))
}
func (c *PropfindCache) Get(url string) (PropfindResponse, bool) {
key := generateCacheKey(url)
c.RLock()
defer c.RUnlock()
val, exists := c.data[key]
return val, exists
}
// Set stores an item in the cache
func (c *PropfindCache) Set(url string, value PropfindResponse) {
key := generateCacheKey(url)
c.Lock()
defer c.Unlock()
c.data[key] = value
}
func (c *PropfindCache) Remove(urlPath string) {
key := generateCacheKey(urlPath)
c.Lock()
defer c.Unlock()
delete(c.data, key)
}

View File

@@ -1,15 +1,12 @@
package debrid package debrid
import ( import (
"context"
"fmt" "fmt"
"github.com/sirrobot01/decypharr/internal/config"
"github.com/sirrobot01/decypharr/internal/utils" "github.com/sirrobot01/decypharr/internal/utils"
"github.com/sirrobot01/decypharr/pkg/debrid/types" "github.com/sirrobot01/decypharr/pkg/debrid/types"
"io" "io"
"net/http" "net/http"
"os" "os"
"sort"
"strings" "strings"
"sync" "sync"
"time" "time"
@@ -31,35 +28,9 @@ func (fi *fileInfo) IsDir() bool { return fi.isDir }
func (fi *fileInfo) Sys() interface{} { return nil } func (fi *fileInfo) Sys() interface{} { return nil }
func (c *Cache) RefreshListings(refreshRclone bool) { func (c *Cache) RefreshListings(refreshRclone bool) {
if c.listingRefreshMu.TryLock() {
defer c.listingRefreshMu.Unlock()
} else {
return
}
// Copy the torrents to a string|time map // Copy the torrents to a string|time map
torrentsTime := make(map[string]time.Time, c.torrentsNames.Size()) c.torrents.refreshListing() // refresh torrent listings
torrents := make([]string, 0, c.torrentsNames.Size())
c.torrentsNames.Range(func(name string, value *CachedTorrent) bool {
torrentsTime[name] = value.AddedOn
torrents = append(torrents, name)
return true
})
// Sort the torrents by name
sort.Strings(torrents)
files := make([]os.FileInfo, 0, len(torrents))
for _, t := range torrents {
files = append(files, &fileInfo{
name: t,
size: 0,
mode: 0755 | os.ModeDir,
modTime: torrentsTime[t],
isDir: true,
})
}
// Atomic store of the complete ready-to-use slice
c.listings.Store(files)
if err := c.refreshParentXml(); err != nil { if err := c.refreshParentXml(); err != nil {
c.logger.Debug().Err(err).Msg("Failed to refresh XML") c.logger.Debug().Err(err).Msg("Failed to refresh XML")
} }
@@ -98,19 +69,19 @@ func (c *Cache) refreshTorrents() {
// Let's implement deleting torrents removed from debrid // Let's implement deleting torrents removed from debrid
deletedTorrents := make([]string, 0) deletedTorrents := make([]string, 0)
c.torrents.Range(func(key string, _ string) bool { for _, id := range c.torrents.getAllIDs() {
if _, exists := currentTorrentIds[key]; !exists { if _, exists := currentTorrentIds[id]; !exists {
deletedTorrents = append(deletedTorrents, key) deletedTorrents = append(deletedTorrents, id)
}
} }
return true
})
// Validate the torrents are truly deleted, then remove them from the cache too // Validate the torrents are truly deleted, then remove them from the cache too
go c.validateAndDeleteTorrents(deletedTorrents) go c.validateAndDeleteTorrents(deletedTorrents)
newTorrents := make([]*types.Torrent, 0) newTorrents := make([]*types.Torrent, 0)
cachedIdsMaps := c.torrents.getIdMaps()
for _, t := range debTorrents { for _, t := range debTorrents {
if _, exists := c.torrents.Load(t.Id); !exists { if _, exists := cachedIdsMaps[t.Id]; !exists {
newTorrents = append(newTorrents, t) newTorrents = append(newTorrents, t)
} }
} }
@@ -144,13 +115,13 @@ func (c *Cache) refreshTorrents() {
close(workChan) close(workChan)
wg.Wait() wg.Wait()
c.RefreshListings(true) c.listingDebouncer.Call(true)
c.logger.Debug().Msgf("Processed %d new torrents", counter) c.logger.Debug().Msgf("Processed %d new torrents", counter)
} }
func (c *Cache) refreshRclone() error { func (c *Cache) refreshRclone() error {
cfg := config.Get().WebDav cfg := c.config
if cfg.RcUrl == "" { if cfg.RcUrl == "" {
return nil return nil
@@ -160,9 +131,8 @@ func (c *Cache) refreshRclone() error {
return nil return nil
} }
// Create an optimized HTTP client
client := &http.Client{ client := &http.Client{
Timeout: 5 * time.Second, Timeout: 10 * time.Second,
Transport: &http.Transport{ Transport: &http.Transport{
MaxIdleConns: 10, MaxIdleConns: 10,
IdleConnTimeout: 30 * time.Second, IdleConnTimeout: 30 * time.Second,
@@ -184,11 +154,6 @@ func (c *Cache) refreshRclone() error {
if cfg.RcUser != "" && cfg.RcPass != "" { if cfg.RcUser != "" && cfg.RcPass != "" {
req.SetBasicAuth(cfg.RcUser, cfg.RcPass) req.SetBasicAuth(cfg.RcUser, cfg.RcPass)
} }
ctx, cancel := context.WithTimeout(context.Background(), 5*time.Second)
defer cancel()
req = req.WithContext(ctx)
resp, err := client.Do(req) resp, err := client.Do(req)
if err != nil { if err != nil {
return err return err
@@ -196,12 +161,10 @@ func (c *Cache) refreshRclone() error {
defer resp.Body.Close() defer resp.Body.Close()
if resp.StatusCode != 200 { if resp.StatusCode != 200 {
// Only read a limited amount of the body on error
body, _ := io.ReadAll(io.LimitReader(resp.Body, 1024)) body, _ := io.ReadAll(io.LimitReader(resp.Body, 1024))
return fmt.Errorf("failed to perform %s: %s - %s", endpoint, resp.Status, string(body)) return fmt.Errorf("failed to perform %s: %s - %s", endpoint, resp.Status, string(body))
} }
// Discard response body to reuse connection
_, _ = io.Copy(io.Discard, resp.Body) _, _ = io.Copy(io.Discard, resp.Body)
return nil return nil
} }
@@ -232,7 +195,7 @@ func (c *Cache) refreshTorrent(torrentId string) *CachedTorrent {
IsComplete: len(torrent.Files) > 0, IsComplete: len(torrent.Files) > 0,
} }
c.setTorrent(ct, func(torrent *CachedTorrent) { c.setTorrent(ct, func(torrent *CachedTorrent) {
go c.RefreshListings(false) c.listingDebouncer.Call(true)
}) })
return ct return ct
@@ -253,11 +216,11 @@ func (c *Cache) refreshDownloadLinks() {
// if link is generated in the last 24 hours, add it to cache // if link is generated in the last 24 hours, add it to cache
timeSince := time.Since(v.Generated) timeSince := time.Since(v.Generated)
if timeSince < c.autoExpiresLinksAfterDuration { if timeSince < c.autoExpiresLinksAfterDuration {
c.downloadLinks.Store(k, downloadLinkCache{ c.downloadLinks.Store(k, linkCache{
Id: v.Id, Id: v.Id,
AccountId: v.AccountId, accountId: v.AccountId,
Link: v.DownloadLink, link: v.DownloadLink,
ExpiresAt: v.Generated.Add(c.autoExpiresLinksAfterDuration - timeSince), expiresAt: v.Generated.Add(c.autoExpiresLinksAfterDuration - timeSince),
}) })
} else { } else {
c.downloadLinks.Delete(k) c.downloadLinks.Delete(k)

View File

@@ -3,7 +3,6 @@ package debrid
import ( import (
"errors" "errors"
"fmt" "fmt"
"github.com/puzpuzpuz/xsync/v3"
"github.com/sirrobot01/decypharr/internal/config" "github.com/sirrobot01/decypharr/internal/config"
"github.com/sirrobot01/decypharr/internal/request" "github.com/sirrobot01/decypharr/internal/request"
"github.com/sirrobot01/decypharr/internal/utils" "github.com/sirrobot01/decypharr/internal/utils"
@@ -184,7 +183,7 @@ func (c *Cache) reInsertTorrent(ct *CachedTorrent) (*CachedTorrent, error) {
} }
// Update the torrent in the cache // Update the torrent in the cache
addedOn, err := time.Parse(time.RFC3339, torrent.Added) addedOn, err := time.Parse(time.RFC3339, newTorrent.Added)
if err != nil { if err != nil {
addedOn = time.Now() addedOn = time.Now()
} }
@@ -201,7 +200,7 @@ func (c *Cache) reInsertTorrent(ct *CachedTorrent) (*CachedTorrent, error) {
IsComplete: len(newTorrent.Files) > 0, IsComplete: len(newTorrent.Files) > 0,
} }
c.setTorrent(ct, func(torrent *CachedTorrent) { c.setTorrent(ct, func(torrent *CachedTorrent) {
go c.RefreshListings(true) c.listingDebouncer.Call(true)
}) })
// We can safely delete the old torrent here // We can safely delete the old torrent here
@@ -214,11 +213,13 @@ func (c *Cache) reInsertTorrent(ct *CachedTorrent) (*CachedTorrent, error) {
req.Complete(ct, err) req.Complete(ct, err)
c.failedToReinsert.Delete(oldID) // Delete the old torrent from the failed list c.failedToReinsert.Delete(oldID) // Delete the old torrent from the failed list
c.logger.Debug().Str("torrentId", torrent.Id).Msg("Torrent successfully reinserted")
return ct, nil return ct, nil
} }
func (c *Cache) resetInvalidLinks() { func (c *Cache) resetInvalidLinks() {
c.invalidDownloadLinks = xsync.NewMapOf[string, string]() c.invalidDownloadLinks = sync.Map{}
c.client.ResetActiveDownloadKeys() // Reset the active download keys c.client.ResetActiveDownloadKeys() // Reset the active download keys
c.failedToReinsert = sync.Map{} // Reset the failed to reinsert map c.failedToReinsert = sync.Map{} // Reset the failed to reinsert map
} }

View File

@@ -0,0 +1,164 @@
package debrid
import (
"os"
"sort"
"sync"
"sync/atomic"
"time"
)
type torrentCache struct {
mu sync.RWMutex
byID map[string]string
byName map[string]*CachedTorrent
listing atomic.Value
sortNeeded bool
}
func newTorrentCache() *torrentCache {
tc := &torrentCache{
byID: make(map[string]string),
byName: make(map[string]*CachedTorrent),
sortNeeded: false,
}
tc.listing.Store(make([]os.FileInfo, 0))
return tc
}
func (tc *torrentCache) getByID(id string) (*CachedTorrent, bool) {
tc.mu.RLock()
defer tc.mu.RUnlock()
torrent, exists := tc.byID[id]
if !exists {
return nil, false
}
t, ok := tc.byName[torrent]
return t, ok
}
func (tc *torrentCache) getByIDName(id string) (string, bool) {
tc.mu.RLock()
defer tc.mu.RUnlock()
name, exists := tc.byID[id]
return name, exists
}
func (tc *torrentCache) getByName(name string) (*CachedTorrent, bool) {
tc.mu.RLock()
defer tc.mu.RUnlock()
torrent, exists := tc.byName[name]
return torrent, exists
}
func (tc *torrentCache) set(id, name string, torrent *CachedTorrent) {
tc.mu.Lock()
defer tc.mu.Unlock()
tc.byID[id] = name
tc.byName[name] = torrent
tc.sortNeeded = true
}
func (tc *torrentCache) getListing() []os.FileInfo {
// Fast path: if we have a sorted list and no changes since last sort
if !tc.sortNeeded {
return tc.listing.Load().([]os.FileInfo)
}
// Slow path: need to sort
return tc.refreshListing()
}
func (tc *torrentCache) refreshListing() []os.FileInfo {
tc.mu.Lock()
size := len(tc.byName)
tc.mu.Unlock()
if size == 0 {
var empty []os.FileInfo
tc.listing.Store(empty)
tc.sortNeeded = false
return empty
}
// Create sortable entries
type sortableFile struct {
name string
modTime time.Time
}
tc.mu.Lock()
sortables := make([]sortableFile, 0, len(tc.byName))
for name, torrent := range tc.byName {
sortables = append(sortables, sortableFile{
name: name,
modTime: torrent.AddedOn,
})
}
tc.mu.Unlock()
// Sort by name
sort.Slice(sortables, func(i, j int) bool {
return sortables[i].name < sortables[j].name
})
// Create fileInfo objects
files := make([]os.FileInfo, 0, len(sortables))
for _, sf := range sortables {
files = append(files, &fileInfo{
name: sf.name,
size: 0,
mode: 0755 | os.ModeDir,
modTime: sf.modTime,
isDir: true,
})
}
tc.listing.Store(files)
tc.sortNeeded = false
return files
}
func (tc *torrentCache) getAll() map[string]*CachedTorrent {
tc.mu.RLock()
defer tc.mu.RUnlock()
result := make(map[string]*CachedTorrent)
for name, torrent := range tc.byName {
result[name] = torrent
}
return result
}
func (tc *torrentCache) getAllIDs() []string {
tc.mu.RLock()
defer tc.mu.RUnlock()
ids := make([]string, 0, len(tc.byID))
for id := range tc.byID {
ids = append(ids, id)
}
return ids
}
func (tc *torrentCache) getIdMaps() map[string]string {
tc.mu.RLock()
defer tc.mu.RUnlock()
res := make(map[string]string)
for id, name := range tc.byID {
res[id] = name
}
return res
}
func (tc *torrentCache) removeId(id string) {
tc.mu.Lock()
defer tc.mu.Unlock()
delete(tc.byID, id)
tc.sortNeeded = true
}
func (tc *torrentCache) remove(name string) {
tc.mu.Lock()
defer tc.mu.Unlock()
delete(tc.byName, name)
tc.sortNeeded = true
}

View File

@@ -1,8 +1,8 @@
package debrid package debrid
import ( import (
"bytes"
"fmt" "fmt"
"github.com/beevik/etree"
"github.com/sirrobot01/decypharr/internal/request" "github.com/sirrobot01/decypharr/internal/request"
"net/http" "net/http"
"os" "os"
@@ -23,98 +23,42 @@ func (c *Cache) refreshParentXml() error {
} }
func (c *Cache) refreshFolderXml(torrents []os.FileInfo, clientName, parent string) error { func (c *Cache) refreshFolderXml(torrents []os.FileInfo, clientName, parent string) error {
// Define the WebDAV namespace buf := c.xmlPool.Get().(*bytes.Buffer)
davNS := "DAV:" buf.Reset()
defer c.xmlPool.Put(buf)
// Create the root multistatus element // static prefix
doc := etree.NewDocument() buf.WriteString(`<?xml version="1.0" encoding="UTF-8"?><D:multistatus xmlns:D="DAV:">`)
doc.CreateProcInst("xml", `version="1.0" encoding="UTF-8"`) now := time.Now().UTC().Format(http.TimeFormat)
base := fmt.Sprintf("/webdav/%s/%s", clientName, parent)
multistatus := doc.CreateElement("D:multistatus") writeResponse(buf, base+"/", parent, now)
multistatus.CreateAttr("xmlns:D", davNS) for _, t := range torrents {
writeResponse(buf, base+"/"+t.Name()+"/", t.Name(), now)
// Get the current timestamp in RFC1123 format (WebDAV format)
currentTime := time.Now().UTC().Format(http.TimeFormat)
// Add the parent directory
baseUrl := path.Clean(fmt.Sprintf("/webdav/%s/%s", clientName, parent))
parentPath := fmt.Sprintf("%s/", baseUrl)
addDirectoryResponse(multistatus, parentPath, parent, currentTime)
// Add torrents to the XML
for _, torrent := range torrents {
name := torrent.Name()
// Note the path structure change - parent first, then torrent name
torrentPath := fmt.Sprintf("/webdav/%s/%s/%s/",
clientName,
parent,
name,
)
addDirectoryResponse(multistatus, torrentPath, name, currentTime)
} }
buf.WriteString("</D:multistatus>")
// Convert to XML string data := buf.Bytes()
xmlData, err := doc.WriteToBytes() gz := request.Gzip(data, &c.gzipPool)
if err != nil { c.PropfindResp.Store(path.Clean(base), PropfindResponse{Data: data, GzippedData: gz, Ts: time.Now()})
return fmt.Errorf("failed to generate XML: %v", err)
}
res := PropfindResponse{
Data: xmlData,
GzippedData: request.Gzip(xmlData),
Ts: time.Now(),
}
c.PropfindResp.Set(baseUrl, res)
return nil return nil
} }
func addDirectoryResponse(multistatus *etree.Element, href, displayName, modTime string) *etree.Element { func writeResponse(buf *bytes.Buffer, href, name, modTime string) {
responseElem := multistatus.CreateElement("D:response") fmt.Fprintf(buf, `
<D:response>
// Add href - ensure it's properly formatted <D:href>%s</D:href>
hrefElem := responseElem.CreateElement("D:href") <D:propstat>
hrefElem.SetText(href) <D:prop>
<D:resourcetype><D:collection/></D:resourcetype>
// Add propstat <D:displayname>%s</D:displayname>
propstatElem := responseElem.CreateElement("D:propstat") <D:getlastmodified>%s</D:getlastmodified>
<D:getcontenttype>httpd/unix-directory</D:getcontenttype>
// Add prop <D:getcontentlength>0</D:getcontentlength>
propElem := propstatElem.CreateElement("D:prop") <D:supportedlock>
<D:lockentry><D:lockscope><D:exclusive/></D:lockscope><D:locktype><D:write/></D:locktype></D:lockentry>
// Add resource type (collection = directory) </D:supportedlock>
resourceTypeElem := propElem.CreateElement("D:resourcetype") </D:prop>
resourceTypeElem.CreateElement("D:collection") <D:status>HTTP/1.1 200 OK</D:status>
</D:propstat>
// Add display name </D:response>`, href, name, modTime)
displayNameElem := propElem.CreateElement("D:displayname")
displayNameElem.SetText(displayName)
// Add last modified time
lastModElem := propElem.CreateElement("D:getlastmodified")
lastModElem.SetText(modTime)
// Add content type for directories
contentTypeElem := propElem.CreateElement("D:getcontenttype")
contentTypeElem.SetText("httpd/unix-directory")
// Add length (size) - directories typically have zero size
contentLengthElem := propElem.CreateElement("D:getcontentlength")
contentLengthElem.SetText("0")
// Add supported lock
lockElem := propElem.CreateElement("D:supportedlock")
lockEntryElem := lockElem.CreateElement("D:lockentry")
lockScopeElem := lockEntryElem.CreateElement("D:lockscope")
lockScopeElem.CreateElement("D:exclusive")
lockTypeElem := lockEntryElem.CreateElement("D:locktype")
lockTypeElem.CreateElement("D:write")
// Add status
statusElem := propstatElem.CreateElement("D:status")
statusElem.SetText("HTTP/1.1 200 OK")
return responseElem
} }

View File

@@ -74,14 +74,14 @@ func New(dc config.Debrid) *RealDebrid {
request.WithRateLimiter(rl), request.WithRateLimiter(rl),
request.WithLogger(_log), request.WithLogger(_log),
request.WithMaxRetries(5), request.WithMaxRetries(5),
request.WithRetryableStatus(429), request.WithRetryableStatus(429, 502),
request.WithProxy(dc.Proxy), request.WithProxy(dc.Proxy),
), ),
downloadClient: request.New( downloadClient: request.New(
request.WithHeaders(downloadHeaders), request.WithHeaders(downloadHeaders),
request.WithLogger(_log), request.WithLogger(_log),
request.WithMaxRetries(10), request.WithMaxRetries(10),
request.WithRetryableStatus(429, 447), request.WithRetryableStatus(429, 447, 502),
request.WithProxy(dc.Proxy), request.WithProxy(dc.Proxy),
), ),
currentDownloadKey: currentDownloadKey, currentDownloadKey: currentDownloadKey,

View File

@@ -290,7 +290,7 @@ func (q *QBit) getTorrentPath(rclonePath string, debridTorrent *debridTypes.Torr
} }
func (q *QBit) preCacheFile(name string, filePaths []string) error { func (q *QBit) preCacheFile(name string, filePaths []string) error {
q.logger.Trace().Msgf("Pre-caching file: %s", name) q.logger.Trace().Msgf("Pre-caching torrent: %s", name)
if len(filePaths) == 0 { if len(filePaths) == 0 {
return fmt.Errorf("no file paths provided") return fmt.Errorf("no file paths provided")
} }
@@ -300,7 +300,11 @@ func (q *QBit) preCacheFile(name string, filePaths []string) error {
file, err := os.Open(f) file, err := os.Open(f)
if err != nil { if err != nil {
return err if os.IsNotExist(err) {
// File has probably been moved by arr, return silently
return nil
}
return fmt.Errorf("failed to open file: %s: %v", f, err)
} }
defer file.Close() defer file.Close()

View File

@@ -258,7 +258,7 @@
<div class="col-md-4 mb-3"> <div class="col-md-4 mb-3">
<label class="form-label" for="repair.interval">Interval</label> <label class="form-label" for="repair.interval">Interval</label>
<input type="text" class="form-control" name="repair.interval" id="repair.interval" placeholder="e.g., 24h"> <input type="text" class="form-control" name="repair.interval" id="repair.interval" placeholder="e.g., 24h">
<small class="form-text text-muted">Interval for the repair process(e.g., 24h, 1d, 03:00)</small> <small class="form-text text-muted">Interval for the repair process(e.g., 24h, 1d, 03:00, or a crontab)</small>
</div> </div>
<div class="col-md-5 mb-3"> <div class="col-md-5 mb-3">
<label class="form-label" for="repair.zurg_url">Zurg URL</label> <label class="form-label" for="repair.zurg_url">Zurg URL</label>
@@ -846,10 +846,10 @@
navigator.registerProtocolHandler( navigator.registerProtocolHandler(
'magnet', 'magnet',
`${window.location.origin}/download?magnet=%s`, `${window.location.origin}/download?magnet=%s`,
'DecyphArr' 'Decypharr'
); );
localStorage.setItem('magnetHandler', 'true'); localStorage.setItem('magnetHandler', 'true');
document.getElementById('registerMagnetLink').innerText = '✅ DecyphArr Can Open Magnet Links'; document.getElementById('registerMagnetLink').innerText = '✅ Decypharr Can Open Magnet Links';
document.getElementById('registerMagnetLink').classList.add('bg-white', 'text-black'); document.getElementById('registerMagnetLink').classList.add('bg-white', 'text-black');
console.log('Registered magnet link handler successfully.'); console.log('Registered magnet link handler successfully.');
} catch (error) { } catch (error) {
@@ -860,7 +860,7 @@
var magnetHandler = localStorage.getItem('magnetHandler'); var magnetHandler = localStorage.getItem('magnetHandler');
if (magnetHandler === 'true') { if (magnetHandler === 'true') {
document.getElementById('registerMagnetLink').innerText = '✅ DecyphArr Can Open Magnet Links'; document.getElementById('registerMagnetLink').innerText = '✅ Decypharr Can Open Magnet Links';
document.getElementById('registerMagnetLink').classList.add('bg-white', 'text-black'); document.getElementById('registerMagnetLink').classList.add('bg-white', 'text-black');
} }
</script> </script>

View File

@@ -4,7 +4,7 @@
<head> <head>
<meta charset="UTF-8"> <meta charset="UTF-8">
<meta name="viewport" content="width=device-width, initial-scale=1.0"> <meta name="viewport" content="width=device-width, initial-scale=1.0">
<title>DecyphArr - {{.Title}}</title> <title>Decypharr - {{.Title}}</title>
<link href="https://cdn.jsdelivr.net/npm/bootstrap@5.3.0-alpha1/dist/css/bootstrap.min.css" rel="stylesheet"> <link href="https://cdn.jsdelivr.net/npm/bootstrap@5.3.0-alpha1/dist/css/bootstrap.min.css" rel="stylesheet">
<link href="https://cdn.jsdelivr.net/npm/bootstrap-icons@1.7.2/font/bootstrap-icons.css" rel="stylesheet"> <link href="https://cdn.jsdelivr.net/npm/bootstrap-icons@1.7.2/font/bootstrap-icons.css" rel="stylesheet">
<link href="https://cdn.jsdelivr.net/npm/select2@4.1.0-rc.0/dist/css/select2.min.css" rel="stylesheet"/> <link href="https://cdn.jsdelivr.net/npm/select2@4.1.0-rc.0/dist/css/select2.min.css" rel="stylesheet"/>
@@ -137,7 +137,7 @@
<nav class="navbar navbar-expand-lg navbar-light mb-4"> <nav class="navbar navbar-expand-lg navbar-light mb-4">
<div class="container"> <div class="container">
<a class="navbar-brand" href="/"> <a class="navbar-brand" href="/">
<i class="bi bi-cloud-download me-2"></i>DecyphArr <i class="bi bi-cloud-download me-2"></i>Decypharr
</a> </a>
<button class="navbar-toggler" type="button" data-bs-toggle="collapse" data-bs-target="#navbarNav"> <button class="navbar-toggler" type="button" data-bs-toggle="collapse" data-bs-target="#navbarNav">
<span class="navbar-toggler-icon"></span> <span class="navbar-toggler-icon"></span>

View File

@@ -183,7 +183,6 @@ func (f *File) Read(p []byte) (n int, err error) {
if f.metadataOnly { if f.metadataOnly {
return 0, io.EOF return 0, io.EOF
} }
// If file content is preloaded, read from memory.
if f.content != nil { if f.content != nil {
if f.offset >= int64(len(f.content)) { if f.offset >= int64(len(f.content)) {
return 0, io.EOF return 0, io.EOF

View File

@@ -14,6 +14,7 @@ import (
"path/filepath" "path/filepath"
"slices" "slices"
"strings" "strings"
"sync"
"time" "time"
"github.com/rs/zerolog" "github.com/rs/zerolog"
@@ -30,6 +31,8 @@ type Handler struct {
logger zerolog.Logger logger zerolog.Logger
cache *debrid.Cache cache *debrid.Cache
RootPath string RootPath string
gzipPool sync.Pool
} }
func NewHandler(name string, cache *debrid.Cache, logger zerolog.Logger) *Handler { func NewHandler(name string, cache *debrid.Cache, logger zerolog.Logger) *Handler {
@@ -284,11 +287,11 @@ func (h *Handler) ServeHTTP(w http.ResponseWriter, r *http.Request) {
} }
handler.ServeHTTP(responseRecorder, r) handler.ServeHTTP(responseRecorder, r)
responseData := responseRecorder.Body.Bytes() responseData := responseRecorder.Body.Bytes()
gzippedData := request.Gzip(responseData) gzippedData := request.Gzip(responseData, &h.gzipPool)
// Create compressed version // Create compressed version
h.cache.PropfindResp.Set(cleanPath, debrid.PropfindResponse{ h.cache.PropfindResp.Store(cleanPath, debrid.PropfindResponse{
Data: responseData, Data: responseData,
GzippedData: gzippedData, GzippedData: gzippedData,
Ts: time.Now(), Ts: time.Now(),
@@ -338,7 +341,10 @@ func (h *Handler) ServeHTTP(w http.ResponseWriter, r *http.Request) {
return return
} }
if file, ok := fRaw.(*File); ok { // Checks if the file is a torrent file
// .content is nil if the file is a torrent file
// .content means file is preloaded, e.g version.txt
if file, ok := fRaw.(*File); ok && file.content == nil {
link, err := file.getDownloadLink() link, err := file.getDownloadLink()
if err != nil { if err != nil {
h.logger.Trace(). h.logger.Trace().
@@ -450,7 +456,7 @@ func (h *Handler) isParentPath(urlPath string) bool {
} }
func (h *Handler) serveFromCacheIfValid(w http.ResponseWriter, r *http.Request, urlPath string) bool { func (h *Handler) serveFromCacheIfValid(w http.ResponseWriter, r *http.Request, urlPath string) bool {
respCache, ok := h.cache.PropfindResp.Get(urlPath) respCache, ok := h.cache.PropfindResp.Load(urlPath)
if !ok { if !ok {
return false return false
} }
@@ -458,7 +464,7 @@ func (h *Handler) serveFromCacheIfValid(w http.ResponseWriter, r *http.Request,
ttl := h.getCacheTTL(urlPath) ttl := h.getCacheTTL(urlPath)
if time.Since(respCache.Ts) >= ttl { if time.Since(respCache.Ts) >= ttl {
h.cache.PropfindResp.Remove(urlPath) h.cache.PropfindResp.Delete(urlPath)
return false return false
} }
w.Header().Set("Content-Type", "application/xml; charset=utf-8") w.Header().Set("Content-Type", "application/xml; charset=utf-8")