- Add support for virtual folders

- Fix minor bug fixes
This commit is contained in:
Mukhtar Akere
2025-05-10 19:52:53 +01:00
parent 4cdfd051f3
commit 8e464cdcea
17 changed files with 871 additions and 174 deletions
+44 -16
View File
@@ -2,7 +2,6 @@ package debrid
import (
"bufio"
"bytes"
"cmp"
"context"
"errors"
@@ -11,6 +10,7 @@ import (
"os"
"path"
"path/filepath"
"regexp"
"strconv"
"strings"
"sync"
@@ -75,13 +75,11 @@ type Cache struct {
PropfindResp *xsync.Map[string, PropfindResponse]
folderNaming WebDavFolderNaming
// optimizers
xmlPool sync.Pool
gzipPool sync.Pool
listingDebouncer *utils.Debouncer[bool]
// monitors
repairRequest sync.Map
failedToReinsert sync.Map
repairRequest sync.Map
failedToReinsert sync.Map
downloadLinkRequests sync.Map
// repair
repairChan chan RepairRequest
@@ -101,7 +99,8 @@ type Cache struct {
saveSemaphore chan struct{}
ctx context.Context
config config.Debrid
config config.Debrid
customFolders []string
}
func New(dc config.Debrid, client types.Client) *Cache {
@@ -113,10 +112,28 @@ func New(dc config.Debrid, client types.Client) *Cache {
if autoExpiresLinksAfter == 0 || err != nil {
autoExpiresLinksAfter = 48 * time.Hour
}
var customFolders []string
dirFilters := map[string][]directoryFilter{}
for name, value := range dc.Directories {
for filterType, v := range value.Filters {
df := directoryFilter{filterType: filterType, value: v}
switch filterType {
case filterByRegex, filterByNotRegex:
df.regex = regexp.MustCompile(v)
case filterBySizeGT, filterBySizeLT:
df.sizeThreshold, _ = config.ParseSize(v)
case filterBLastAdded:
df.ageThreshold, _ = time.ParseDuration(v)
}
dirFilters[name] = append(dirFilters[name], df)
}
customFolders = append(customFolders, name)
}
c := &Cache{
dir: filepath.Join(cfg.Path, "cache", dc.Name), // path to save cache files
torrents: newTorrentCache(),
torrents: newTorrentCache(dirFilters),
PropfindResp: xsync.NewMap[string, PropfindResponse](),
client: client,
logger: logger.New(fmt.Sprintf("%s-webdav", client.GetName())),
@@ -130,12 +147,8 @@ func New(dc config.Debrid, client types.Client) *Cache {
ctx: context.Background(),
scheduler: s,
config: dc,
xmlPool: sync.Pool{
New: func() interface{} {
return new(bytes.Buffer)
},
},
config: dc,
customFolders: customFolders,
}
c.listingDebouncer = utils.NewDebouncer[bool](250*time.Millisecond, func(refreshRclone bool) {
c.RefreshListings(refreshRclone)
@@ -464,8 +477,23 @@ func (c *Cache) setTorrents(torrents map[string]*CachedTorrent, callback func())
}
// GetListing returns a sorted list of torrents(READ-ONLY)
func (c *Cache) GetListing() []os.FileInfo {
return c.torrents.getListing()
func (c *Cache) GetListing(folder string) []os.FileInfo {
switch folder {
case "__all__", "torrents":
return c.torrents.getListing()
default:
return c.torrents.getFolderListing(folder)
}
}
func (c *Cache) GetCustomFolders() []string {
return c.customFolders
}
func (c *Cache) GetDirectories() []string {
dirs := []string{"__all__", "torrents"}
dirs = append(dirs, c.customFolders...)
return dirs
}
func (c *Cache) Close() error {
+38
View File
@@ -15,14 +15,52 @@ type linkCache struct {
expiresAt time.Time
}
type downloadLinkRequest struct {
result string
err error
done chan struct{}
}
func newDownloadLinkRequest() *downloadLinkRequest {
return &downloadLinkRequest{
done: make(chan struct{}),
}
}
func (r *downloadLinkRequest) Complete(result string, err error) {
r.result = result
r.err = err
close(r.done)
}
func (r *downloadLinkRequest) Wait() (string, error) {
<-r.done
return r.result, r.err
}
func (c *Cache) GetDownloadLink(torrentName, filename, fileLink string) (string, error) {
// Check link cache
if dl := c.checkDownloadLink(fileLink); dl != "" {
return dl, nil
}
if req, inFlight := c.downloadLinkRequests.Load(fileLink); inFlight {
// Wait for the other request to complete and use its result
fmt.Println("Waiting for existing request to complete")
result := req.(*downloadLinkRequest)
return result.Wait()
}
// Create a new request object
req := newDownloadLinkRequest()
c.downloadLinkRequests.Store(fileLink, req)
downloadLink, err := c.fetchDownloadLink(torrentName, filename, fileLink)
// Complete the request and remove it from the map
req.Complete(downloadLink, err)
c.downloadLinkRequests.Delete(fileLink)
return downloadLink, err
}
+10 -2
View File
@@ -140,8 +140,16 @@ func (c *Cache) refreshRclone() error {
MaxIdleConnsPerHost: 5,
},
}
// Create form data
data := "dir=__all__&dir2=torrents"
data := ""
for index, dir := range c.GetDirectories() {
if dir != "" {
if index == 0 {
data += "dir=" + dir
} else {
data += "&dir" + fmt.Sprint(index) + "=" + dir
}
}
}
sendRequest := func(endpoint string) error {
req, err := http.NewRequest("POST", fmt.Sprintf("%s/%s", cfg.RcUrl, endpoint), strings.NewReader(data))
+156 -48
View File
@@ -2,26 +2,70 @@ package debrid
import (
"os"
"regexp"
"sort"
"strings"
"sync"
"sync/atomic"
"time"
)
type torrentCache struct {
mu sync.RWMutex
byID map[string]string
byName map[string]*CachedTorrent
listing atomic.Value
sortNeeded bool
const (
filterByInclude string = "include"
filterByExclude string = "exclude"
filterByStartsWith string = "starts_with"
filterByEndsWith string = "ends_with"
filterByNotStartsWith string = "not_starts_with"
filterByNotEndsWith string = "not_ends_with"
filterByRegex string = "regex"
filterByNotRegex string = "not_regex"
filterByExactMatch string = "exact_match"
filterByNotExactMatch string = "not_exact_match"
filterBySizeGT string = "size_gt"
filterBySizeLT string = "size_lt"
filterBLastAdded string = "last_added"
)
type directoryFilter struct {
filterType string
value string
regex *regexp.Regexp // only for regex/not_regex
sizeThreshold int64 // only for size_gt/size_lt
ageThreshold time.Duration // only for last_added
}
func newTorrentCache() *torrentCache {
type torrentCache struct {
mu sync.RWMutex
byID map[string]string
byName map[string]*CachedTorrent
listing atomic.Value
folderListing map[string][]os.FileInfo
folderListingMu sync.RWMutex
directoriesFilters map[string][]directoryFilter
sortNeeded bool
}
type sortableFile struct {
name string
modTime time.Time
size int64
}
func newTorrentCache(dirFilters map[string][]directoryFilter) *torrentCache {
tc := &torrentCache{
byID: make(map[string]string),
byName: make(map[string]*CachedTorrent),
sortNeeded: false,
byID: make(map[string]string),
byName: make(map[string]*CachedTorrent),
folderListing: make(map[string][]os.FileInfo),
sortNeeded: false,
directoriesFilters: dirFilters,
}
tc.listing.Store(make([]os.FileInfo, 0))
return tc
}
@@ -66,57 +110,121 @@ func (tc *torrentCache) getListing() []os.FileInfo {
}
// Slow path: need to sort
return tc.refreshListing()
tc.refreshListing()
return tc.listing.Load().([]os.FileInfo)
}
func (tc *torrentCache) refreshListing() []os.FileInfo {
tc.mu.Lock()
size := len(tc.byName)
tc.mu.Unlock()
if size == 0 {
var empty []os.FileInfo
tc.listing.Store(empty)
tc.sortNeeded = false
return empty
func (tc *torrentCache) getFolderListing(folderName string) []os.FileInfo {
tc.folderListingMu.RLock()
defer tc.folderListingMu.RUnlock()
if folderName == "" {
return tc.getListing()
}
if folder, ok := tc.folderListing[folderName]; ok {
return folder
}
// If folder not found, return empty slice
return []os.FileInfo{}
}
// Create sortable entries
type sortableFile struct {
name string
modTime time.Time
}
func (tc *torrentCache) refreshListing() {
tc.mu.Lock()
sortables := make([]sortableFile, 0, len(tc.byName))
for name, torrent := range tc.byName {
sortables = append(sortables, sortableFile{
name: name,
modTime: torrent.AddedOn,
})
all := make([]sortableFile, 0, len(tc.byName))
for name, t := range tc.byName {
all = append(all, sortableFile{name, t.AddedOn, t.Size})
}
tc.sortNeeded = false
tc.mu.Unlock()
// Sort by name
sort.Slice(sortables, func(i, j int) bool {
return sortables[i].name < sortables[j].name
sort.Slice(all, func(i, j int) bool {
if all[i].name != all[j].name {
return all[i].name < all[j].name
}
return all[i].modTime.Before(all[j].modTime)
})
// Create fileInfo objects
files := make([]os.FileInfo, 0, len(sortables))
for _, sf := range sortables {
files = append(files, &fileInfo{
name: sf.name,
size: 0,
mode: 0755 | os.ModeDir,
modTime: sf.modTime,
isDir: true,
})
wg := sync.WaitGroup{}
wg.Add(1) // for all listing
go func() {
listing := make([]os.FileInfo, len(all))
for i, sf := range all {
listing[i] = &fileInfo{sf.name, sf.size, 0755 | os.ModeDir, sf.modTime, true}
}
tc.listing.Store(listing)
}()
wg.Done()
now := time.Now()
wg.Add(len(tc.directoriesFilters)) // for each directory filter
for dir, filters := range tc.directoriesFilters {
go func(dir string, filters []directoryFilter) {
defer wg.Done()
var matched []os.FileInfo
for _, sf := range all {
if tc.torrentMatchDirectory(filters, sf, now) {
matched = append(matched, &fileInfo{
name: sf.name, size: sf.size,
mode: 0755 | os.ModeDir, modTime: sf.modTime, isDir: true,
})
}
}
tc.folderListingMu.Lock()
if len(matched) > 0 {
tc.folderListing[dir] = matched
} else {
delete(tc.folderListing, dir)
}
tc.folderListingMu.Unlock()
}(dir, filters)
}
tc.listing.Store(files)
tc.sortNeeded = false
return files
wg.Wait()
}
func (tc *torrentCache) torrentMatchDirectory(filters []directoryFilter, file sortableFile, now time.Time) bool {
torrentName := strings.ToLower(file.name)
for _, filter := range filters {
matched := false
switch filter.filterType {
case filterByInclude:
matched = strings.Contains(torrentName, filter.value)
case filterByStartsWith:
matched = strings.HasPrefix(torrentName, filter.value)
case filterByEndsWith:
matched = strings.HasSuffix(torrentName, filter.value)
case filterByExactMatch:
matched = torrentName == filter.value
case filterByExclude:
matched = !strings.Contains(torrentName, filter.value)
case filterByNotStartsWith:
matched = !strings.HasPrefix(torrentName, filter.value)
case filterByNotEndsWith:
matched = !strings.HasSuffix(torrentName, filter.value)
case filterByRegex:
matched = filter.regex.MatchString(torrentName)
case filterByNotRegex:
matched = !filter.regex.MatchString(torrentName)
case filterByNotExactMatch:
matched = torrentName != filter.value
case filterBySizeGT:
matched = file.size > filter.sizeThreshold
case filterBySizeLT:
matched = file.size < filter.sizeThreshold
case filterBLastAdded:
matched = file.modTime.After(now.Add(-filter.ageThreshold))
}
if !matched {
return false // All filters must match
}
}
// If we get here, all filters matched
return true
}
func (tc *torrentCache) getAll() map[string]*CachedTorrent {
+203 -37
View File
@@ -1,64 +1,230 @@
package debrid
import (
"bytes"
"encoding/xml"
"fmt"
"github.com/beevik/etree"
"github.com/sirrobot01/decypharr/internal/request"
"net/http"
"os"
"path"
"sync"
"time"
)
const (
DavNS = "DAV:"
)
// Multistatus XML types for WebDAV response
type Multistatus struct {
XMLName xml.Name `xml:"D:multistatus"`
Namespace string `xml:"xmlns:D,attr"`
Responses []Response `xml:"D:response"`
}
type Response struct {
Href string `xml:"D:href"`
Propstat Propstat `xml:"D:propstat"`
}
type Propstat struct {
Prop Prop `xml:"D:prop"`
Status string `xml:"D:status"`
}
type Prop struct {
ResourceType ResourceType `xml:"D:resourcetype"`
DisplayName string `xml:"D:displayname"`
LastModified string `xml:"D:getlastmodified"`
ContentType string `xml:"D:getcontenttype"`
ContentLength string `xml:"D:getcontentlength"`
SupportedLock SupportedLock `xml:"D:supportedlock"`
}
type ResourceType struct {
Collection *struct{} `xml:"D:collection,omitempty"`
}
type SupportedLock struct {
LockEntry LockEntry `xml:"D:lockentry"`
}
type LockEntry struct {
LockScope LockScope `xml:"D:lockscope"`
LockType LockType `xml:"D:locktype"`
}
type LockScope struct {
Exclusive *struct{} `xml:"D:exclusive"`
}
type LockType struct {
Write *struct{} `xml:"D:write"`
}
func (c *Cache) refreshParentXml() error {
// Refresh the defaults first
parents := []string{"__all__", "torrents"}
torrents := c.GetListing()
torrents := c.GetListing("__all__")
clientName := c.client.GetName()
customFolders := c.GetCustomFolders()
wg := sync.WaitGroup{}
totalFolders := len(parents) + len(customFolders)
wg.Add(totalFolders)
errCh := make(chan error, totalFolders)
for _, parent := range parents {
if err := c.refreshFolderXml(torrents, clientName, parent); err != nil {
return fmt.Errorf("failed to refresh XML for %s: %v", parent, err)
}
parent := parent
go func() {
defer wg.Done()
if err := c.refreshFolderXml(torrents, clientName, parent); err != nil {
errCh <- fmt.Errorf("failed to refresh folder %s: %v", parent, err)
}
}()
}
// refresh custom folders
for _, folder := range customFolders {
go func() {
folder := folder
defer wg.Done()
listing := c.GetListing(folder)
if err := c.refreshFolderXml(listing, clientName, folder); err != nil {
errCh <- fmt.Errorf("failed to refresh folder %s: %v", folder, err)
}
}()
}
wg.Wait()
close(errCh)
// if any errors, return the first
if err := <-errCh; err != nil {
return err
}
return nil
}
func (c *Cache) refreshFolderXml(torrents []os.FileInfo, clientName, parent string) error {
buf := c.xmlPool.Get().(*bytes.Buffer)
buf.Reset()
defer c.xmlPool.Put(buf)
// Get the current timestamp in RFC1123 format
currentTime := time.Now().UTC().Format(http.TimeFormat)
// static prefix
buf.WriteString(`<?xml version="1.0" encoding="UTF-8"?><D:multistatus xmlns:D="DAV:">`)
now := time.Now().UTC().Format(http.TimeFormat)
base := fmt.Sprintf("/webdav/%s/%s", clientName, parent)
writeResponse(buf, base+"/", parent, now)
for _, t := range torrents {
writeResponse(buf, base+"/"+t.Name()+"/", t.Name(), now)
// Create the multistatus response structure
ms := Multistatus{
Namespace: DavNS,
Responses: make([]Response, 0, len(torrents)+1), // Pre-allocate for parent + torrents
}
buf.WriteString("</D:multistatus>")
data := buf.Bytes()
gz := request.Gzip(data, &c.gzipPool)
c.PropfindResp.Store(path.Clean(base), PropfindResponse{Data: data, GzippedData: gz, Ts: time.Now()})
// Add the parent directory
baseUrl := path.Join("webdav", clientName, parent)
// Add parent response
ms.Responses = append(ms.Responses, createDirectoryResponse(baseUrl, parent, currentTime))
// Add torrents to the response
for _, torrent := range torrents {
name := torrent.Name()
torrentPath := path.Join("/webdav", clientName, parent, name) + "/"
ms.Responses = append(ms.Responses, createDirectoryResponse(torrentPath, name, currentTime))
}
// Create a buffer and encode the XML
xmlData, err := xml.MarshalIndent(ms, "", " ")
if err != nil {
return fmt.Errorf("failed to generate XML: %v", err)
}
// Add XML declaration
xmlHeader := []byte(xml.Header)
xmlOutput := append(xmlHeader, xmlData...)
// Cache the result
cacheKey := fmt.Sprintf("%s:1", baseUrl)
// Assume Gzip function exists elsewhere
gzippedData := request.Gzip(xmlOutput) // Replace with your actual gzip function
c.PropfindResp.Store(cacheKey, PropfindResponse{
Data: xmlOutput,
GzippedData: gzippedData,
Ts: time.Now(),
})
return nil
}
func writeResponse(buf *bytes.Buffer, href, name, modTime string) {
fmt.Fprintf(buf, `
<D:response>
<D:href>%s</D:href>
<D:propstat>
<D:prop>
<D:resourcetype><D:collection/></D:resourcetype>
<D:displayname>%s</D:displayname>
<D:getlastmodified>%s</D:getlastmodified>
<D:getcontenttype>httpd/unix-directory</D:getcontenttype>
<D:getcontentlength>0</D:getcontentlength>
<D:supportedlock>
<D:lockentry><D:lockscope><D:exclusive/></D:lockscope><D:locktype><D:write/></D:locktype></D:lockentry>
</D:supportedlock>
</D:prop>
<D:status>HTTP/1.1 200 OK</D:status>
</D:propstat>
</D:response>`, href, name, modTime)
func createDirectoryResponse(href, displayName, modTime string) Response {
return Response{
Href: href,
Propstat: Propstat{
Prop: Prop{
ResourceType: ResourceType{
Collection: &struct{}{},
},
DisplayName: displayName,
LastModified: modTime,
ContentType: "httpd/unix-directory",
ContentLength: "0",
SupportedLock: SupportedLock{
LockEntry: LockEntry{
LockScope: LockScope{
Exclusive: &struct{}{},
},
LockType: LockType{
Write: &struct{}{},
},
},
},
},
Status: "HTTP/1.1 200 OK",
},
}
}
func addDirectoryResponse(multistatus *etree.Element, href, displayName, modTime string) *etree.Element {
responseElem := multistatus.CreateElement("D:response")
// Add href - ensure it's properly formatted
hrefElem := responseElem.CreateElement("D:href")
hrefElem.SetText(href)
// Add propstat
propstatElem := responseElem.CreateElement("D:propstat")
// Add prop
propElem := propstatElem.CreateElement("D:prop")
// Add resource type (collection = directory)
resourceTypeElem := propElem.CreateElement("D:resourcetype")
resourceTypeElem.CreateElement("D:collection")
// Add display name
displayNameElem := propElem.CreateElement("D:displayname")
displayNameElem.SetText(displayName)
// Add last modified time
lastModElem := propElem.CreateElement("D:getlastmodified")
lastModElem.SetText(modTime)
// Add content type for directories
contentTypeElem := propElem.CreateElement("D:getcontenttype")
contentTypeElem.SetText("httpd/unix-directory")
// Add length (size) - directories typically have zero size
contentLengthElem := propElem.CreateElement("D:getcontentlength")
contentLengthElem.SetText("0")
// Add supported lock
lockElem := propElem.CreateElement("D:supportedlock")
lockEntryElem := lockElem.CreateElement("D:lockentry")
lockScopeElem := lockEntryElem.CreateElement("D:lockscope")
lockScopeElem.CreateElement("D:exclusive")
lockTypeElem := lockEntryElem.CreateElement("D:locktype")
lockTypeElem.CreateElement("D:write")
// Add status
statusElem := propstatElem.CreateElement("D:status")
statusElem.SetText("HTTP/1.1 200 OK")
return responseElem
}