- Retyr RD 502 errors

- Fix issues with re-inserted torrents bugging out
- Fix version.txt
- Massive improvements in importing times
- Fix issues with config.json resetting
- Fix other minor issues
This commit is contained in:
Mukhtar Akere
2025-05-10 01:04:51 +01:00
parent 57de04b164
commit e05c6d5028
27 changed files with 437 additions and 338 deletions

View File

@@ -20,7 +20,7 @@ jobs:
- name: Set up Go
uses: actions/setup-go@v4
with:
go-version: '1.22'
go-version: '1.24'
- name: Run GoReleaser
uses: goreleaser/goreleaser-action@v5

View File

@@ -1,5 +1,5 @@
# Stage 1: Build binaries
FROM --platform=$BUILDPLATFORM golang:1.23-alpine as builder
FROM --platform=$BUILDPLATFORM golang:1.24-alpine as builder
ARG TARGETOS
ARG TARGETARCH
@@ -57,7 +57,7 @@ COPY --from=dirsetup --chown=nonroot:nonroot /app /app
# Metadata
ENV LOG_PATH=/app/logs
EXPOSE 8181 8282
EXPOSE 8282
VOLUME ["/app"]
USER nonroot:nonroot

View File

@@ -182,7 +182,7 @@ func startServices(ctx context.Context) error {
safeGo(func() error {
err := svc.Repair.Start(ctx)
if err != nil {
_log.Error().Err(err).Msg("Error during repair")
_log.Error().Err(err).Msg("Error starting repair")
}
return nil // Not propagating repair errors to terminate the app
})

View File

View File

View File

@@ -69,6 +69,9 @@ nav:
- Overview: features/index.md
- Repair Worker: features/repair-worker.md
- WebDAV: features/webdav.md
- Guides:
- Overview: guides/index.md
- Setting Up with Rclone: guides/rclone.md
- Changelog: changelog.md

9
go.mod
View File

@@ -1,19 +1,19 @@
module github.com/sirrobot01/decypharr
go 1.23.0
go 1.24
toolchain go1.23.2
toolchain go1.24.3
require (
github.com/anacrolix/torrent v1.55.0
github.com/beevik/etree v1.5.0
github.com/cavaliergopher/grab/v3 v3.0.1
github.com/go-chi/chi/v5 v5.1.0
github.com/go-co-op/gocron/v2 v2.16.1
github.com/goccy/go-json v0.10.5
github.com/google/uuid v1.6.0
github.com/gorilla/sessions v1.4.0
github.com/puzpuzpuz/xsync/v3 v3.5.1
github.com/puzpuzpuz/xsync/v4 v4.1.0
github.com/robfig/cron/v3 v3.0.1
github.com/rs/zerolog v1.33.0
golang.org/x/crypto v0.33.0
golang.org/x/net v0.35.0
@@ -34,7 +34,6 @@ require (
github.com/mattn/go-colorable v0.1.13 // indirect
github.com/mattn/go-isatty v0.0.20 // indirect
github.com/pmezard/go-difflib v1.0.1-0.20181226105442-5d4384ee4fb2 // indirect
github.com/robfig/cron/v3 v3.0.1 // indirect
github.com/rogpeppe/go-internal v1.13.1 // indirect
golang.org/x/sys v0.30.0 // indirect
)

6
go.sum
View File

@@ -36,8 +36,6 @@ github.com/anacrolix/tagflag v1.1.0/go.mod h1:Scxs9CV10NQatSmbyjqmqmeQNwGzlNe0CM
github.com/anacrolix/torrent v1.55.0 h1:s9yh/YGdPmbN9dTa+0Inh2dLdrLQRvEAj1jdFW/Hdd8=
github.com/anacrolix/torrent v1.55.0/go.mod h1:sBdZHBSZNj4de0m+EbYg7vvs/G/STubxu/GzzNbojsE=
github.com/apache/thrift v0.12.0/go.mod h1:cp2SuWMxlEZw2r+iP2GNCdIi4C1qmUzdZFSVb+bacwQ=
github.com/beevik/etree v1.5.0 h1:iaQZFSDS+3kYZiGoc9uKeOkUY3nYMXOKLl6KIJxiJWs=
github.com/beevik/etree v1.5.0/go.mod h1:gPNJNaBGVZ9AwsidazFZyygnd+0pAU38N4D+WemwKNs=
github.com/benbjohnson/immutable v0.2.0/go.mod h1:uc6OHo6PN2++n98KHLxW8ef4W42ylHiQSENghE1ezxI=
github.com/beorn7/perks v0.0.0-20180321164747-3a771d992973/go.mod h1:Dwedo/Wpr24TaqPxmxbtue+5NUziq4I4S80YR8gNf3Q=
github.com/beorn7/perks v1.0.0/go.mod h1:KWe93zE9D1o94FZ5RNwFwVgaQK1VOXiVxmqh+CedLV8=
@@ -187,8 +185,8 @@ github.com/prometheus/procfs v0.0.0-20190117184657-bf6a532e95b1/go.mod h1:c3At6R
github.com/prometheus/procfs v0.0.2/go.mod h1:TjEm7ze935MbeOT/UhFTIMYKhuLP4wbCsTZCD3I8kEA=
github.com/prometheus/procfs v0.0.8/go.mod h1:7Qr8sr6344vo1JqZ6HhLceV9o3AJ1Ff+GxbHq6oeK9A=
github.com/prometheus/procfs v0.0.11/go.mod h1:lV6e/gmhEcM9IjHGsFOCxxuZ+z1YqCvr4OA4YeYWdaU=
github.com/puzpuzpuz/xsync/v3 v3.5.1 h1:GJYJZwO6IdxN/IKbneznS6yPkVC+c3zyY/j19c++5Fg=
github.com/puzpuzpuz/xsync/v3 v3.5.1/go.mod h1:VjzYrABPabuM4KyBh1Ftq6u8nhwY5tBPKP9jpmh0nnA=
github.com/puzpuzpuz/xsync/v4 v4.1.0 h1:x9eHRl4QhZFIPJ17yl4KKW9xLyVWbb3/Yq4SXpjF71U=
github.com/puzpuzpuz/xsync/v4 v4.1.0/go.mod h1:VJDmTCJMBt8igNxnkQd86r+8KUeN1quSfNKu5bLYFQo=
github.com/rcrowley/go-metrics v0.0.0-20181016184325-3113b8401b8a/go.mod h1:bCqnVzQkZxMG4s8nGwiZ5l3QUCyqpo9Y+/ZMZ9VjZe4=
github.com/robfig/cron/v3 v3.0.1 h1:WdRxkvbJztn8LMz/QEvLN5sBU+xKpSqwwUO1Pjr4qDs=
github.com/robfig/cron/v3 v3.0.1/go.mod h1:eQICP3HwyT7UooqI/z+Ov+PtYAWygg1TEWWzGIFLtro=

View File

@@ -119,19 +119,21 @@ func (c *Config) loadConfig() error {
file, err := os.ReadFile(c.JsonFile())
if err != nil {
if os.IsNotExist(err) {
fmt.Printf("Config file not found, creating a new one at %s\n", c.JsonFile())
// Create a default config file if it doesn't exist
if err := c.createConfig(c.Path); err != nil {
return fmt.Errorf("failed to create config file: %w", err)
}
} else {
return c.Save()
}
return fmt.Errorf("error reading config file: %w", err)
}
} else {
if err := json.Unmarshal(file, &c); err != nil {
return fmt.Errorf("error unmarshaling config: %w", err)
}
}
return c.Save()
c.setDefaults()
return nil
}
func validateDebrids(debrids []Debrid) error {
@@ -312,7 +314,7 @@ func (c *Config) updateDebrid(d Debrid) Debrid {
return d
}
func (c *Config) Save() error {
func (c *Config) setDefaults() {
for i, debrid := range c.Debrids {
c.Debrids[i] = c.updateDebrid(debrid)
}
@@ -333,6 +335,12 @@ func (c *Config) Save() error {
// Load the auth file
c.Auth = c.GetAuth()
}
func (c *Config) Save() error {
c.setDefaults()
data, err := json.MarshalIndent(c, "", " ")
if err != nil {
return err

View File

@@ -382,22 +382,45 @@ func JSONResponse(w http.ResponseWriter, data interface{}, code int) {
}
}
func Gzip(body []byte) []byte {
var b bytes.Buffer
func Gzip(body []byte, pool *sync.Pool) []byte {
if len(body) == 0 {
return nil
}
gz := gzip.NewWriter(&b)
_, err := gz.Write(body)
var (
buf *bytes.Buffer
ok bool
)
// Check if the pool is nil
if pool == nil {
buf = bytes.NewBuffer(make([]byte, 0, len(body)))
} else {
buf, ok = pool.Get().(*bytes.Buffer)
if !ok || buf == nil {
buf = bytes.NewBuffer(make([]byte, 0, len(body)))
} else {
buf.Reset()
}
defer pool.Put(buf)
}
gz, err := gzip.NewWriterLevel(buf, gzip.BestSpeed)
if err != nil {
return nil
}
err = gz.Close()
if err != nil {
if _, err := gz.Write(body); err != nil {
return nil
}
return b.Bytes()
if err := gz.Close(); err != nil {
return nil
}
result := make([]byte, buf.Len())
copy(result, buf.Bytes())
return result
}
func Default() *Client {

View File

@@ -0,0 +1,34 @@
package utils
import (
"sync"
"time"
)
type Debouncer[T any] struct {
mu sync.Mutex
timer *time.Timer
interval time.Duration
caller func(arg T)
arg T
}
func NewDebouncer[T any](interval time.Duration, caller func(arg T)) *Debouncer[T] {
return &Debouncer[T]{
interval: interval,
caller: caller,
}
}
func (d *Debouncer[T]) Call(arg T) {
d.mu.Lock()
defer d.mu.Unlock()
if d.timer != nil {
d.timer.Stop()
}
d.timer = time.AfterFunc(d.interval, func() {
d.caller(arg)
})
}

View File

@@ -11,7 +11,7 @@ var (
MUSICMATCH = "(?i)(\\.)(mp2|mp3|m4a|m4b|m4p|ogg|oga|opus|wma|wav|wv|flac|ape|aif|aiff|aifc)$"
)
var SAMPLEMATCH = `(?i)(^|[\\/])(sample|trailer|thumb|special|extras?)s?([\s._-]|$|/)|(\(sample\))|(-\s*sample)`
var SAMPLEMATCH = `(?i)(^|[\s/\\])(sample|trailer|thumb|special|extras?)s?[-/]|(\((sample|trailer|thumb|special|extras?)s?\))|(-\s*(sample|trailer|thumb|special|extras?)s?)`
func RegexMatch(regex string, value string) bool {
re := regexp.MustCompile(regex)

View File

@@ -4,6 +4,7 @@ import (
"context"
"fmt"
"github.com/go-co-op/gocron/v2"
"github.com/robfig/cron/v3"
"strconv"
"strings"
"time"
@@ -40,14 +41,17 @@ func convertToJD(interval string) (gocron.JobDefinition, error) {
return gocron.DailyJob(1, gocron.NewAtTimes(
gocron.NewAtTime(uint(t.Hour()), uint(t.Minute()), uint(t.Second())),
)), nil
} else {
dur, err := time.ParseDuration(interval)
if err != nil {
return jd, fmt.Errorf("failed to parse duration: %w", err)
}
jd = gocron.DurationJob(dur)
if _, err := cron.ParseStandard(interval); err == nil {
return gocron.CronJob(interval, false), nil
}
return jd, nil
if dur, err := time.ParseDuration(interval); err == nil {
return gocron.DurationJob(dur), nil
}
return jd, fmt.Errorf("invalid interval format: %s", interval)
}
func parseClockTime(s string) (time.Time, bool) {

View File

@@ -2,10 +2,12 @@ package debrid
import (
"bufio"
"bytes"
"cmp"
"context"
"errors"
"fmt"
"github.com/puzpuzpuz/xsync/v4"
"os"
"path"
"path/filepath"
@@ -17,7 +19,6 @@ import (
"github.com/go-co-op/gocron/v2"
"github.com/goccy/go-json"
"github.com/puzpuzpuz/xsync/v3"
"github.com/rs/zerolog"
"github.com/sirrobot01/decypharr/internal/config"
"github.com/sirrobot01/decypharr/internal/logger"
@@ -43,13 +44,6 @@ type CachedTorrent struct {
DuplicateIds []string `json:"duplicate_ids"`
}
type downloadLinkCache struct {
Id string
Link string
AccountId string
ExpiresAt time.Time
}
type RepairType string
const (
@@ -64,19 +58,27 @@ type RepairRequest struct {
FileName string
}
type PropfindResponse struct {
Data []byte
GzippedData []byte
Ts time.Time
}
type Cache struct {
dir string
client types.Client
logger zerolog.Logger
torrents *xsync.MapOf[string, string] // key: torrent.Id, value: {torrent_folder_name}
torrentsNames *xsync.MapOf[string, *CachedTorrent] // key: torrent.Name, value: torrent
listings atomic.Value
downloadLinks *xsync.MapOf[string, downloadLinkCache]
invalidDownloadLinks *xsync.MapOf[string, string]
PropfindResp *PropfindCache
torrents *torrentCache
downloadLinks *xsync.Map[string, linkCache]
invalidDownloadLinks sync.Map
PropfindResp *xsync.Map[string, PropfindResponse]
folderNaming WebDavFolderNaming
// optimizers
xmlPool sync.Pool
gzipPool sync.Pool
listingDebouncer *utils.Debouncer[bool]
// monitors
repairRequest sync.Map
failedToReinsert sync.Map
@@ -91,7 +93,6 @@ type Cache struct {
autoExpiresLinksAfterDuration time.Duration
// refresh mutex
listingRefreshMu sync.RWMutex // for refreshing torrents
downloadLinksRefreshMu sync.RWMutex // for refreshing download links
torrentsRefreshMu sync.RWMutex // for refreshing torrents
@@ -99,6 +100,8 @@ type Cache struct {
saveSemaphore chan struct{}
ctx context.Context
config config.Debrid
}
func New(dc config.Debrid, client types.Client) *Cache {
@@ -110,16 +113,15 @@ func New(dc config.Debrid, client types.Client) *Cache {
if autoExpiresLinksAfter == 0 || err != nil {
autoExpiresLinksAfter = 48 * time.Hour
}
return &Cache{
c := &Cache{
dir: filepath.Join(cfg.Path, "cache", dc.Name), // path to save cache files
torrents: xsync.NewMapOf[string, string](),
torrentsNames: xsync.NewMapOf[string, *CachedTorrent](),
invalidDownloadLinks: xsync.NewMapOf[string, string](),
PropfindResp: NewPropfindCache(),
torrents: newTorrentCache(),
PropfindResp: xsync.NewMap[string, PropfindResponse](),
client: client,
logger: logger.New(fmt.Sprintf("%s-webdav", client.GetName())),
workers: dc.Workers,
downloadLinks: xsync.NewMapOf[string, downloadLinkCache](),
downloadLinks: xsync.NewMap[string, linkCache](),
torrentRefreshInterval: dc.TorrentsRefreshInterval,
downloadLinksRefreshInterval: dc.DownloadLinksRefreshInterval,
folderNaming: WebDavFolderNaming(dc.FolderNaming),
@@ -127,7 +129,18 @@ func New(dc config.Debrid, client types.Client) *Cache {
saveSemaphore: make(chan struct{}, 50),
ctx: context.Background(),
scheduler: s,
config: dc,
xmlPool: sync.Pool{
New: func() interface{} {
return new(bytes.Buffer)
},
},
}
c.listingDebouncer = utils.NewDebouncer[bool](100*time.Millisecond, func(refreshRclone bool) {
c.RefreshListings(refreshRclone)
})
return c
}
func (c *Cache) Start(ctx context.Context) error {
@@ -311,7 +324,7 @@ func (c *Cache) Sync() error {
// Write these torrents to the cache
c.setTorrents(cachedTorrents, func() {
go c.RefreshListings(false)
c.listingDebouncer.Call(false)
}) // This is set to false, cos it's likely rclone hs not started yet.
c.logger.Info().Msgf("Loaded %d torrents from cache", len(cachedTorrents))
@@ -382,7 +395,7 @@ func (c *Cache) sync(torrents []*types.Torrent) error {
// Wait for all workers to complete
wg.Wait()
c.RefreshListings(true) // final refresh
c.listingDebouncer.Call(false) // final refresh
c.logger.Info().Msgf("Sync complete: %d torrents processed, %d errors", len(torrents), errorCount)
return nil
}
@@ -407,21 +420,23 @@ func (c *Cache) GetTorrentFolder(torrent *types.Torrent) string {
}
func (c *Cache) setTorrent(t *CachedTorrent, callback func(torrent *CachedTorrent)) {
torrentKey := c.GetTorrentFolder(t.Torrent)
c.torrents.Store(t.Id, torrentKey) // Store the torrent id with the folder name(we might change the id after, hence why it's stored here)
if o, ok := c.torrentsNames.Load(torrentKey); ok && o.Id != t.Id {
torrentName := c.GetTorrentFolder(t.Torrent)
torrentId := t.Id
if o, ok := c.torrents.getByName(torrentName); ok && o.Id != t.Id {
// If another torrent with the same name exists, merge the files, if the same file exists,
// keep the one with the most recent added date
// Save the most recent torrent
mergedFiles := mergeFiles(t, o) // Useful for merging files across multiple torrents, while keeping the most recent
if o.AddedOn.After(t.AddedOn) {
// Swap the new torrent to "become" the old one
t = o
}
t.Files = mergedFiles
}
c.torrentsNames.Store(torrentKey, t)
c.torrents.set(torrentId, torrentName, t)
c.SaveTorrent(t)
if callback != nil {
callback(t)
@@ -430,9 +445,9 @@ func (c *Cache) setTorrent(t *CachedTorrent, callback func(torrent *CachedTorren
func (c *Cache) setTorrents(torrents map[string]*CachedTorrent, callback func()) {
for _, t := range torrents {
torrentKey := c.GetTorrentFolder(t.Torrent)
c.torrents.Store(t.Id, torrentKey)
if o, ok := c.torrentsNames.Load(torrentKey); ok && o.Id != t.Id {
torrentName := c.GetTorrentFolder(t.Torrent)
torrentId := t.Id
if o, ok := c.torrents.getByName(torrentName); ok && o.Id != t.Id {
// Save the most recent torrent
mergedFiles := mergeFiles(t, o) // Useful for merging files across multiple torrents, while keeping the most recent
if o.AddedOn.After(t.AddedOn) {
@@ -440,7 +455,7 @@ func (c *Cache) setTorrents(torrents map[string]*CachedTorrent, callback func())
}
t.Files = mergedFiles
}
c.torrentsNames.Store(torrentKey, t)
c.torrents.set(torrentId, torrentName, t)
}
c.SaveTorrents()
if callback != nil {
@@ -448,11 +463,9 @@ func (c *Cache) setTorrents(torrents map[string]*CachedTorrent, callback func())
}
}
// GetListing returns a sorted list of torrents(READ-ONLY)
func (c *Cache) GetListing() []os.FileInfo {
if v, ok := c.listings.Load().([]os.FileInfo); ok {
return v
}
return nil
return c.torrents.getListing()
}
func (c *Cache) Close() error {
@@ -460,36 +473,28 @@ func (c *Cache) Close() error {
}
func (c *Cache) GetTorrents() map[string]*CachedTorrent {
torrents := make(map[string]*CachedTorrent)
c.torrentsNames.Range(func(key string, value *CachedTorrent) bool {
torrents[key] = value
return true
})
return torrents
return c.torrents.getAll()
}
func (c *Cache) GetTorrentByName(name string) *CachedTorrent {
if t, ok := c.torrentsNames.Load(name); ok {
return t
if torrent, ok := c.torrents.getByName(name); ok {
return torrent
}
return nil
}
func (c *Cache) GetTorrent(torrentId string) *CachedTorrent {
if name, ok := c.torrents.Load(torrentId); ok {
if t, ok := c.torrentsNames.Load(name); ok {
return t
}
return nil
if torrent, ok := c.torrents.getByID(torrentId); ok {
return torrent
}
return nil
}
func (c *Cache) SaveTorrents() {
c.torrentsNames.Range(func(key string, value *CachedTorrent) bool {
c.SaveTorrent(value)
return true
})
torrents := c.torrents.getAll()
for _, torrent := range torrents {
c.SaveTorrent(torrent)
}
}
func (c *Cache) SaveTorrent(ct *CachedTorrent) {
@@ -618,7 +623,7 @@ func (c *Cache) AddTorrent(t *types.Torrent) error {
AddedOn: addedOn,
}
c.setTorrent(ct, func(tor *CachedTorrent) {
go c.RefreshListings(true)
c.listingDebouncer.Call(true)
})
go c.GenerateDownloadLinks(ct)
return nil
@@ -634,7 +639,7 @@ func (c *Cache) DeleteTorrent(id string) error {
defer c.torrentsRefreshMu.Unlock()
if c.deleteTorrent(id, true) {
c.RefreshListings(true)
c.listingDebouncer.Call(true)
c.logger.Trace().Msgf("Torrent %s deleted successfully", id)
return nil
}
@@ -654,15 +659,15 @@ func (c *Cache) validateAndDeleteTorrents(torrents []string) {
}(torrent)
}
wg.Wait()
c.RefreshListings(true)
c.listingDebouncer.Call(true)
}
// deleteTorrent deletes the torrent from the cache and debrid service
// It also handles torrents with the same name but different IDs
func (c *Cache) deleteTorrent(id string, removeFromDebrid bool) bool {
if torrentName, ok := c.torrents.Load(id); ok {
c.torrents.Delete(id) // Delete id from cache
if torrentName, ok := c.torrents.getByIDName(id); ok {
c.torrents.removeId(id) // Delete id from cache
defer func() {
c.removeFromDB(id)
if removeFromDebrid {
@@ -670,7 +675,7 @@ func (c *Cache) deleteTorrent(id string, removeFromDebrid bool) bool {
}
}() // defer delete from debrid
if t, ok := c.torrentsNames.Load(torrentName); ok {
if t, ok := c.torrents.getByName(torrentName); ok {
newFiles := map[string]types.File{}
newId := ""
@@ -684,7 +689,7 @@ func (c *Cache) deleteTorrent(id string, removeFromDebrid bool) bool {
}
if len(newFiles) == 0 {
// Delete the torrent since no files are left
c.torrentsNames.Delete(torrentName)
c.torrents.remove(torrentName)
} else {
t.Files = newFiles
newId = cmp.Or(newId, t.Id)
@@ -702,7 +707,7 @@ func (c *Cache) DeleteTorrents(ids []string) {
for _, id := range ids {
_ = c.deleteTorrent(id, true)
}
c.RefreshListings(true)
c.listingDebouncer.Call(true)
}
func (c *Cache) removeFromDB(torrentId string) {

View File

@@ -93,15 +93,13 @@ func ProcessTorrent(d *Engine, magnet *utils.Magnet, a *arr.Arr, isSymlink, over
if len(errs) == 0 {
return nil, fmt.Errorf("failed to process torrent: no clients available")
}
var errBuilder strings.Builder
errBuilder.WriteString("failed to process torrent:")
for _, e := range errs {
if e != nil {
errBuilder.WriteString("\n")
errBuilder.WriteString(e.Error())
if len(errs) == 1 {
return nil, fmt.Errorf("failed to process torrent: %w", errs[0])
} else {
errStrings := make([]string, 0, len(errs))
for _, err := range errs {
errStrings = append(errStrings, err.Error())
}
return nil, fmt.Errorf("failed to process torrent: %s", strings.Join(errStrings, ", "))
}
}
return nil, fmt.Errorf(errBuilder.String())
}

View File

@@ -8,6 +8,13 @@ import (
"time"
)
type linkCache struct {
Id string
link string
accountId string
expiresAt time.Time
}
func (c *Cache) GetDownloadLink(torrentName, filename, fileLink string) (string, error) {
// Check link cache
if dl := c.checkDownloadLink(fileLink); dl != "" {
@@ -45,7 +52,6 @@ func (c *Cache) fetchDownloadLink(torrentName, filename, fileLink string) (strin
}
ct = newCt
file = ct.Files[filename]
c.logger.Debug().Str("name", ct.Name).Str("id", ct.Id).Msgf("Reinserted torrent")
}
c.logger.Trace().Msgf("Getting download link for %s(%s)", filename, file.Link)
@@ -58,7 +64,6 @@ func (c *Cache) fetchDownloadLink(torrentName, filename, fileLink string) (strin
}
ct = newCt
file = ct.Files[filename]
c.logger.Debug().Str("name", ct.Name).Str("id", ct.Id).Msgf("Reinserted torrent")
// Retry getting the download link
downloadLink, err = c.client.GetDownloadLink(ct.Torrent, &file)
if err != nil {
@@ -98,18 +103,18 @@ func (c *Cache) GenerateDownloadLinks(t *CachedTorrent) {
}
func (c *Cache) updateDownloadLink(dl *types.DownloadLink) {
c.downloadLinks.Store(dl.Link, downloadLinkCache{
c.downloadLinks.Store(dl.Link, linkCache{
Id: dl.Id,
Link: dl.DownloadLink,
ExpiresAt: time.Now().Add(c.autoExpiresLinksAfterDuration),
AccountId: dl.AccountId,
link: dl.DownloadLink,
expiresAt: time.Now().Add(c.autoExpiresLinksAfterDuration),
accountId: dl.AccountId,
})
}
func (c *Cache) checkDownloadLink(link string) string {
if dl, ok := c.downloadLinks.Load(link); ok {
if dl.ExpiresAt.After(time.Now()) && !c.IsDownloadLinkInvalid(dl.Link) {
return dl.Link
if dl.expiresAt.After(time.Now()) && !c.IsDownloadLinkInvalid(dl.link) {
return dl.link
}
}
return ""
@@ -120,8 +125,8 @@ func (c *Cache) MarkDownloadLinkAsInvalid(link, downloadLink, reason string) {
// Remove the download api key from active
if reason == "bandwidth_exceeded" {
if dl, ok := c.downloadLinks.Load(link); ok {
if dl.AccountId != "" && dl.Link == downloadLink {
c.client.DisableAccount(dl.AccountId)
if dl.accountId != "" && dl.link == downloadLink {
c.client.DisableAccount(dl.accountId)
}
}
}

View File

@@ -1,59 +0,0 @@
package debrid
import (
"crypto/sha256"
"encoding/hex"
"fmt"
"path"
"sync"
"time"
)
type PropfindResponse struct {
Data []byte
GzippedData []byte
Ts time.Time
}
type PropfindCache struct {
sync.RWMutex
data map[string]PropfindResponse
}
func NewPropfindCache() *PropfindCache {
return &PropfindCache{
data: make(map[string]PropfindResponse),
}
}
func generateCacheKey(urlPath string) string {
cleanPath := path.Clean(urlPath)
// Create a more collision-resistant key by hashing
h := sha256.New()
h.Write([]byte(fmt.Sprintf("propfind:%s", cleanPath)))
return hex.EncodeToString(h.Sum(nil))
}
func (c *PropfindCache) Get(url string) (PropfindResponse, bool) {
key := generateCacheKey(url)
c.RLock()
defer c.RUnlock()
val, exists := c.data[key]
return val, exists
}
// Set stores an item in the cache
func (c *PropfindCache) Set(url string, value PropfindResponse) {
key := generateCacheKey(url)
c.Lock()
defer c.Unlock()
c.data[key] = value
}
func (c *PropfindCache) Remove(urlPath string) {
key := generateCacheKey(urlPath)
c.Lock()
defer c.Unlock()
delete(c.data, key)
}

View File

@@ -1,15 +1,12 @@
package debrid
import (
"context"
"fmt"
"github.com/sirrobot01/decypharr/internal/config"
"github.com/sirrobot01/decypharr/internal/utils"
"github.com/sirrobot01/decypharr/pkg/debrid/types"
"io"
"net/http"
"os"
"sort"
"strings"
"sync"
"time"
@@ -31,35 +28,9 @@ func (fi *fileInfo) IsDir() bool { return fi.isDir }
func (fi *fileInfo) Sys() interface{} { return nil }
func (c *Cache) RefreshListings(refreshRclone bool) {
if c.listingRefreshMu.TryLock() {
defer c.listingRefreshMu.Unlock()
} else {
return
}
// Copy the torrents to a string|time map
torrentsTime := make(map[string]time.Time, c.torrentsNames.Size())
torrents := make([]string, 0, c.torrentsNames.Size())
c.torrentsNames.Range(func(name string, value *CachedTorrent) bool {
torrentsTime[name] = value.AddedOn
torrents = append(torrents, name)
return true
})
c.torrents.refreshListing() // refresh torrent listings
// Sort the torrents by name
sort.Strings(torrents)
files := make([]os.FileInfo, 0, len(torrents))
for _, t := range torrents {
files = append(files, &fileInfo{
name: t,
size: 0,
mode: 0755 | os.ModeDir,
modTime: torrentsTime[t],
isDir: true,
})
}
// Atomic store of the complete ready-to-use slice
c.listings.Store(files)
if err := c.refreshParentXml(); err != nil {
c.logger.Debug().Err(err).Msg("Failed to refresh XML")
}
@@ -98,19 +69,19 @@ func (c *Cache) refreshTorrents() {
// Let's implement deleting torrents removed from debrid
deletedTorrents := make([]string, 0)
c.torrents.Range(func(key string, _ string) bool {
if _, exists := currentTorrentIds[key]; !exists {
deletedTorrents = append(deletedTorrents, key)
for _, id := range c.torrents.getAllIDs() {
if _, exists := currentTorrentIds[id]; !exists {
deletedTorrents = append(deletedTorrents, id)
}
}
return true
})
// Validate the torrents are truly deleted, then remove them from the cache too
go c.validateAndDeleteTorrents(deletedTorrents)
newTorrents := make([]*types.Torrent, 0)
cachedIdsMaps := c.torrents.getIdMaps()
for _, t := range debTorrents {
if _, exists := c.torrents.Load(t.Id); !exists {
if _, exists := cachedIdsMaps[t.Id]; !exists {
newTorrents = append(newTorrents, t)
}
}
@@ -144,13 +115,13 @@ func (c *Cache) refreshTorrents() {
close(workChan)
wg.Wait()
c.RefreshListings(true)
c.listingDebouncer.Call(true)
c.logger.Debug().Msgf("Processed %d new torrents", counter)
}
func (c *Cache) refreshRclone() error {
cfg := config.Get().WebDav
cfg := c.config
if cfg.RcUrl == "" {
return nil
@@ -160,9 +131,8 @@ func (c *Cache) refreshRclone() error {
return nil
}
// Create an optimized HTTP client
client := &http.Client{
Timeout: 5 * time.Second,
Timeout: 10 * time.Second,
Transport: &http.Transport{
MaxIdleConns: 10,
IdleConnTimeout: 30 * time.Second,
@@ -184,11 +154,6 @@ func (c *Cache) refreshRclone() error {
if cfg.RcUser != "" && cfg.RcPass != "" {
req.SetBasicAuth(cfg.RcUser, cfg.RcPass)
}
ctx, cancel := context.WithTimeout(context.Background(), 5*time.Second)
defer cancel()
req = req.WithContext(ctx)
resp, err := client.Do(req)
if err != nil {
return err
@@ -196,12 +161,10 @@ func (c *Cache) refreshRclone() error {
defer resp.Body.Close()
if resp.StatusCode != 200 {
// Only read a limited amount of the body on error
body, _ := io.ReadAll(io.LimitReader(resp.Body, 1024))
return fmt.Errorf("failed to perform %s: %s - %s", endpoint, resp.Status, string(body))
}
// Discard response body to reuse connection
_, _ = io.Copy(io.Discard, resp.Body)
return nil
}
@@ -232,7 +195,7 @@ func (c *Cache) refreshTorrent(torrentId string) *CachedTorrent {
IsComplete: len(torrent.Files) > 0,
}
c.setTorrent(ct, func(torrent *CachedTorrent) {
go c.RefreshListings(false)
c.listingDebouncer.Call(true)
})
return ct
@@ -253,11 +216,11 @@ func (c *Cache) refreshDownloadLinks() {
// if link is generated in the last 24 hours, add it to cache
timeSince := time.Since(v.Generated)
if timeSince < c.autoExpiresLinksAfterDuration {
c.downloadLinks.Store(k, downloadLinkCache{
c.downloadLinks.Store(k, linkCache{
Id: v.Id,
AccountId: v.AccountId,
Link: v.DownloadLink,
ExpiresAt: v.Generated.Add(c.autoExpiresLinksAfterDuration - timeSince),
accountId: v.AccountId,
link: v.DownloadLink,
expiresAt: v.Generated.Add(c.autoExpiresLinksAfterDuration - timeSince),
})
} else {
c.downloadLinks.Delete(k)

View File

@@ -3,7 +3,6 @@ package debrid
import (
"errors"
"fmt"
"github.com/puzpuzpuz/xsync/v3"
"github.com/sirrobot01/decypharr/internal/config"
"github.com/sirrobot01/decypharr/internal/request"
"github.com/sirrobot01/decypharr/internal/utils"
@@ -184,7 +183,7 @@ func (c *Cache) reInsertTorrent(ct *CachedTorrent) (*CachedTorrent, error) {
}
// Update the torrent in the cache
addedOn, err := time.Parse(time.RFC3339, torrent.Added)
addedOn, err := time.Parse(time.RFC3339, newTorrent.Added)
if err != nil {
addedOn = time.Now()
}
@@ -201,7 +200,7 @@ func (c *Cache) reInsertTorrent(ct *CachedTorrent) (*CachedTorrent, error) {
IsComplete: len(newTorrent.Files) > 0,
}
c.setTorrent(ct, func(torrent *CachedTorrent) {
go c.RefreshListings(true)
c.listingDebouncer.Call(true)
})
// We can safely delete the old torrent here
@@ -214,11 +213,13 @@ func (c *Cache) reInsertTorrent(ct *CachedTorrent) (*CachedTorrent, error) {
req.Complete(ct, err)
c.failedToReinsert.Delete(oldID) // Delete the old torrent from the failed list
c.logger.Debug().Str("torrentId", torrent.Id).Msg("Torrent successfully reinserted")
return ct, nil
}
func (c *Cache) resetInvalidLinks() {
c.invalidDownloadLinks = xsync.NewMapOf[string, string]()
c.invalidDownloadLinks = sync.Map{}
c.client.ResetActiveDownloadKeys() // Reset the active download keys
c.failedToReinsert = sync.Map{} // Reset the failed to reinsert map
}

View File

@@ -0,0 +1,164 @@
package debrid
import (
"os"
"sort"
"sync"
"sync/atomic"
"time"
)
type torrentCache struct {
mu sync.RWMutex
byID map[string]string
byName map[string]*CachedTorrent
listing atomic.Value
sortNeeded bool
}
func newTorrentCache() *torrentCache {
tc := &torrentCache{
byID: make(map[string]string),
byName: make(map[string]*CachedTorrent),
sortNeeded: false,
}
tc.listing.Store(make([]os.FileInfo, 0))
return tc
}
func (tc *torrentCache) getByID(id string) (*CachedTorrent, bool) {
tc.mu.RLock()
defer tc.mu.RUnlock()
torrent, exists := tc.byID[id]
if !exists {
return nil, false
}
t, ok := tc.byName[torrent]
return t, ok
}
func (tc *torrentCache) getByIDName(id string) (string, bool) {
tc.mu.RLock()
defer tc.mu.RUnlock()
name, exists := tc.byID[id]
return name, exists
}
func (tc *torrentCache) getByName(name string) (*CachedTorrent, bool) {
tc.mu.RLock()
defer tc.mu.RUnlock()
torrent, exists := tc.byName[name]
return torrent, exists
}
func (tc *torrentCache) set(id, name string, torrent *CachedTorrent) {
tc.mu.Lock()
defer tc.mu.Unlock()
tc.byID[id] = name
tc.byName[name] = torrent
tc.sortNeeded = true
}
func (tc *torrentCache) getListing() []os.FileInfo {
// Fast path: if we have a sorted list and no changes since last sort
if !tc.sortNeeded {
return tc.listing.Load().([]os.FileInfo)
}
// Slow path: need to sort
return tc.refreshListing()
}
func (tc *torrentCache) refreshListing() []os.FileInfo {
tc.mu.Lock()
size := len(tc.byName)
tc.mu.Unlock()
if size == 0 {
var empty []os.FileInfo
tc.listing.Store(empty)
tc.sortNeeded = false
return empty
}
// Create sortable entries
type sortableFile struct {
name string
modTime time.Time
}
tc.mu.Lock()
sortables := make([]sortableFile, 0, len(tc.byName))
for name, torrent := range tc.byName {
sortables = append(sortables, sortableFile{
name: name,
modTime: torrent.AddedOn,
})
}
tc.mu.Unlock()
// Sort by name
sort.Slice(sortables, func(i, j int) bool {
return sortables[i].name < sortables[j].name
})
// Create fileInfo objects
files := make([]os.FileInfo, 0, len(sortables))
for _, sf := range sortables {
files = append(files, &fileInfo{
name: sf.name,
size: 0,
mode: 0755 | os.ModeDir,
modTime: sf.modTime,
isDir: true,
})
}
tc.listing.Store(files)
tc.sortNeeded = false
return files
}
func (tc *torrentCache) getAll() map[string]*CachedTorrent {
tc.mu.RLock()
defer tc.mu.RUnlock()
result := make(map[string]*CachedTorrent)
for name, torrent := range tc.byName {
result[name] = torrent
}
return result
}
func (tc *torrentCache) getAllIDs() []string {
tc.mu.RLock()
defer tc.mu.RUnlock()
ids := make([]string, 0, len(tc.byID))
for id := range tc.byID {
ids = append(ids, id)
}
return ids
}
func (tc *torrentCache) getIdMaps() map[string]string {
tc.mu.RLock()
defer tc.mu.RUnlock()
res := make(map[string]string)
for id, name := range tc.byID {
res[id] = name
}
return res
}
func (tc *torrentCache) removeId(id string) {
tc.mu.Lock()
defer tc.mu.Unlock()
delete(tc.byID, id)
tc.sortNeeded = true
}
func (tc *torrentCache) remove(name string) {
tc.mu.Lock()
defer tc.mu.Unlock()
delete(tc.byName, name)
tc.sortNeeded = true
}

View File

@@ -1,8 +1,8 @@
package debrid
import (
"bytes"
"fmt"
"github.com/beevik/etree"
"github.com/sirrobot01/decypharr/internal/request"
"net/http"
"os"
@@ -23,98 +23,42 @@ func (c *Cache) refreshParentXml() error {
}
func (c *Cache) refreshFolderXml(torrents []os.FileInfo, clientName, parent string) error {
// Define the WebDAV namespace
davNS := "DAV:"
buf := c.xmlPool.Get().(*bytes.Buffer)
buf.Reset()
defer c.xmlPool.Put(buf)
// Create the root multistatus element
doc := etree.NewDocument()
doc.CreateProcInst("xml", `version="1.0" encoding="UTF-8"`)
multistatus := doc.CreateElement("D:multistatus")
multistatus.CreateAttr("xmlns:D", davNS)
// Get the current timestamp in RFC1123 format (WebDAV format)
currentTime := time.Now().UTC().Format(http.TimeFormat)
// Add the parent directory
baseUrl := path.Clean(fmt.Sprintf("/webdav/%s/%s", clientName, parent))
parentPath := fmt.Sprintf("%s/", baseUrl)
addDirectoryResponse(multistatus, parentPath, parent, currentTime)
// Add torrents to the XML
for _, torrent := range torrents {
name := torrent.Name()
// Note the path structure change - parent first, then torrent name
torrentPath := fmt.Sprintf("/webdav/%s/%s/%s/",
clientName,
parent,
name,
)
addDirectoryResponse(multistatus, torrentPath, name, currentTime)
// static prefix
buf.WriteString(`<?xml version="1.0" encoding="UTF-8"?><D:multistatus xmlns:D="DAV:">`)
now := time.Now().UTC().Format(http.TimeFormat)
base := fmt.Sprintf("/webdav/%s/%s", clientName, parent)
writeResponse(buf, base+"/", parent, now)
for _, t := range torrents {
writeResponse(buf, base+"/"+t.Name()+"/", t.Name(), now)
}
buf.WriteString("</D:multistatus>")
// Convert to XML string
xmlData, err := doc.WriteToBytes()
if err != nil {
return fmt.Errorf("failed to generate XML: %v", err)
}
res := PropfindResponse{
Data: xmlData,
GzippedData: request.Gzip(xmlData),
Ts: time.Now(),
}
c.PropfindResp.Set(baseUrl, res)
data := buf.Bytes()
gz := request.Gzip(data, &c.gzipPool)
c.PropfindResp.Store(path.Clean(base), PropfindResponse{Data: data, GzippedData: gz, Ts: time.Now()})
return nil
}
func addDirectoryResponse(multistatus *etree.Element, href, displayName, modTime string) *etree.Element {
responseElem := multistatus.CreateElement("D:response")
// Add href - ensure it's properly formatted
hrefElem := responseElem.CreateElement("D:href")
hrefElem.SetText(href)
// Add propstat
propstatElem := responseElem.CreateElement("D:propstat")
// Add prop
propElem := propstatElem.CreateElement("D:prop")
// Add resource type (collection = directory)
resourceTypeElem := propElem.CreateElement("D:resourcetype")
resourceTypeElem.CreateElement("D:collection")
// Add display name
displayNameElem := propElem.CreateElement("D:displayname")
displayNameElem.SetText(displayName)
// Add last modified time
lastModElem := propElem.CreateElement("D:getlastmodified")
lastModElem.SetText(modTime)
// Add content type for directories
contentTypeElem := propElem.CreateElement("D:getcontenttype")
contentTypeElem.SetText("httpd/unix-directory")
// Add length (size) - directories typically have zero size
contentLengthElem := propElem.CreateElement("D:getcontentlength")
contentLengthElem.SetText("0")
// Add supported lock
lockElem := propElem.CreateElement("D:supportedlock")
lockEntryElem := lockElem.CreateElement("D:lockentry")
lockScopeElem := lockEntryElem.CreateElement("D:lockscope")
lockScopeElem.CreateElement("D:exclusive")
lockTypeElem := lockEntryElem.CreateElement("D:locktype")
lockTypeElem.CreateElement("D:write")
// Add status
statusElem := propstatElem.CreateElement("D:status")
statusElem.SetText("HTTP/1.1 200 OK")
return responseElem
func writeResponse(buf *bytes.Buffer, href, name, modTime string) {
fmt.Fprintf(buf, `
<D:response>
<D:href>%s</D:href>
<D:propstat>
<D:prop>
<D:resourcetype><D:collection/></D:resourcetype>
<D:displayname>%s</D:displayname>
<D:getlastmodified>%s</D:getlastmodified>
<D:getcontenttype>httpd/unix-directory</D:getcontenttype>
<D:getcontentlength>0</D:getcontentlength>
<D:supportedlock>
<D:lockentry><D:lockscope><D:exclusive/></D:lockscope><D:locktype><D:write/></D:locktype></D:lockentry>
</D:supportedlock>
</D:prop>
<D:status>HTTP/1.1 200 OK</D:status>
</D:propstat>
</D:response>`, href, name, modTime)
}

View File

@@ -74,14 +74,14 @@ func New(dc config.Debrid) *RealDebrid {
request.WithRateLimiter(rl),
request.WithLogger(_log),
request.WithMaxRetries(5),
request.WithRetryableStatus(429),
request.WithRetryableStatus(429, 502),
request.WithProxy(dc.Proxy),
),
downloadClient: request.New(
request.WithHeaders(downloadHeaders),
request.WithLogger(_log),
request.WithMaxRetries(10),
request.WithRetryableStatus(429, 447),
request.WithRetryableStatus(429, 447, 502),
request.WithProxy(dc.Proxy),
),
currentDownloadKey: currentDownloadKey,

View File

@@ -290,7 +290,7 @@ func (q *QBit) getTorrentPath(rclonePath string, debridTorrent *debridTypes.Torr
}
func (q *QBit) preCacheFile(name string, filePaths []string) error {
q.logger.Trace().Msgf("Pre-caching file: %s", name)
q.logger.Trace().Msgf("Pre-caching torrent: %s", name)
if len(filePaths) == 0 {
return fmt.Errorf("no file paths provided")
}
@@ -300,7 +300,11 @@ func (q *QBit) preCacheFile(name string, filePaths []string) error {
file, err := os.Open(f)
if err != nil {
return err
if os.IsNotExist(err) {
// File has probably been moved by arr, return silently
return nil
}
return fmt.Errorf("failed to open file: %s: %v", f, err)
}
defer file.Close()

View File

@@ -258,7 +258,7 @@
<div class="col-md-4 mb-3">
<label class="form-label" for="repair.interval">Interval</label>
<input type="text" class="form-control" name="repair.interval" id="repair.interval" placeholder="e.g., 24h">
<small class="form-text text-muted">Interval for the repair process(e.g., 24h, 1d, 03:00)</small>
<small class="form-text text-muted">Interval for the repair process(e.g., 24h, 1d, 03:00, or a crontab)</small>
</div>
<div class="col-md-5 mb-3">
<label class="form-label" for="repair.zurg_url">Zurg URL</label>
@@ -846,10 +846,10 @@
navigator.registerProtocolHandler(
'magnet',
`${window.location.origin}/download?magnet=%s`,
'DecyphArr'
'Decypharr'
);
localStorage.setItem('magnetHandler', 'true');
document.getElementById('registerMagnetLink').innerText = '✅ DecyphArr Can Open Magnet Links';
document.getElementById('registerMagnetLink').innerText = '✅ Decypharr Can Open Magnet Links';
document.getElementById('registerMagnetLink').classList.add('bg-white', 'text-black');
console.log('Registered magnet link handler successfully.');
} catch (error) {
@@ -860,7 +860,7 @@
var magnetHandler = localStorage.getItem('magnetHandler');
if (magnetHandler === 'true') {
document.getElementById('registerMagnetLink').innerText = '✅ DecyphArr Can Open Magnet Links';
document.getElementById('registerMagnetLink').innerText = '✅ Decypharr Can Open Magnet Links';
document.getElementById('registerMagnetLink').classList.add('bg-white', 'text-black');
}
</script>

View File

@@ -4,7 +4,7 @@
<head>
<meta charset="UTF-8">
<meta name="viewport" content="width=device-width, initial-scale=1.0">
<title>DecyphArr - {{.Title}}</title>
<title>Decypharr - {{.Title}}</title>
<link href="https://cdn.jsdelivr.net/npm/bootstrap@5.3.0-alpha1/dist/css/bootstrap.min.css" rel="stylesheet">
<link href="https://cdn.jsdelivr.net/npm/bootstrap-icons@1.7.2/font/bootstrap-icons.css" rel="stylesheet">
<link href="https://cdn.jsdelivr.net/npm/select2@4.1.0-rc.0/dist/css/select2.min.css" rel="stylesheet"/>
@@ -137,7 +137,7 @@
<nav class="navbar navbar-expand-lg navbar-light mb-4">
<div class="container">
<a class="navbar-brand" href="/">
<i class="bi bi-cloud-download me-2"></i>DecyphArr
<i class="bi bi-cloud-download me-2"></i>Decypharr
</a>
<button class="navbar-toggler" type="button" data-bs-toggle="collapse" data-bs-target="#navbarNav">
<span class="navbar-toggler-icon"></span>

View File

@@ -183,7 +183,6 @@ func (f *File) Read(p []byte) (n int, err error) {
if f.metadataOnly {
return 0, io.EOF
}
// If file content is preloaded, read from memory.
if f.content != nil {
if f.offset >= int64(len(f.content)) {
return 0, io.EOF

View File

@@ -14,6 +14,7 @@ import (
"path/filepath"
"slices"
"strings"
"sync"
"time"
"github.com/rs/zerolog"
@@ -30,6 +31,8 @@ type Handler struct {
logger zerolog.Logger
cache *debrid.Cache
RootPath string
gzipPool sync.Pool
}
func NewHandler(name string, cache *debrid.Cache, logger zerolog.Logger) *Handler {
@@ -284,11 +287,11 @@ func (h *Handler) ServeHTTP(w http.ResponseWriter, r *http.Request) {
}
handler.ServeHTTP(responseRecorder, r)
responseData := responseRecorder.Body.Bytes()
gzippedData := request.Gzip(responseData)
gzippedData := request.Gzip(responseData, &h.gzipPool)
// Create compressed version
h.cache.PropfindResp.Set(cleanPath, debrid.PropfindResponse{
h.cache.PropfindResp.Store(cleanPath, debrid.PropfindResponse{
Data: responseData,
GzippedData: gzippedData,
Ts: time.Now(),
@@ -338,7 +341,10 @@ func (h *Handler) ServeHTTP(w http.ResponseWriter, r *http.Request) {
return
}
if file, ok := fRaw.(*File); ok {
// Checks if the file is a torrent file
// .content is nil if the file is a torrent file
// .content means file is preloaded, e.g version.txt
if file, ok := fRaw.(*File); ok && file.content == nil {
link, err := file.getDownloadLink()
if err != nil {
h.logger.Trace().
@@ -450,7 +456,7 @@ func (h *Handler) isParentPath(urlPath string) bool {
}
func (h *Handler) serveFromCacheIfValid(w http.ResponseWriter, r *http.Request, urlPath string) bool {
respCache, ok := h.cache.PropfindResp.Get(urlPath)
respCache, ok := h.cache.PropfindResp.Load(urlPath)
if !ok {
return false
}
@@ -458,7 +464,7 @@ func (h *Handler) serveFromCacheIfValid(w http.ResponseWriter, r *http.Request,
ttl := h.getCacheTTL(urlPath)
if time.Since(respCache.Ts) >= ttl {
h.cache.PropfindResp.Remove(urlPath)
h.cache.PropfindResp.Delete(urlPath)
return false
}
w.Header().Set("Content-Type", "application/xml; charset=utf-8")