Fix for file namings

This commit is contained in:
Mukhtar Akere
2025-03-26 21:12:01 +01:00
parent 56bca562f4
commit 7bd38736b1
18 changed files with 731 additions and 288 deletions

View File

@@ -95,6 +95,8 @@
"webdav": { "webdav": {
"torrents_refresh_interval": "15s", "torrents_refresh_interval": "15s",
"download_links_refresh_interval": "1h", "download_links_refresh_interval": "1h",
"folder_naming": "original",
"auto_expire_links_after": "24h",
"rc_url": "http://192.168.0.219:9990", "rc_url": "http://192.168.0.219:9990",
"rc_user": "your_rclone_rc_user", "rc_user": "your_rclone_rc_user",
"rc_pass": "your_rclone_rc_pass" "rc_pass": "your_rclone_rc_pass"

View File

@@ -31,6 +31,7 @@ type Debrid struct {
DownloadLinksRefreshInterval string `json:"downloads_refresh_interval"` DownloadLinksRefreshInterval string `json:"downloads_refresh_interval"`
TorrentRefreshWorkers int `json:"torrent_refresh_workers"` TorrentRefreshWorkers int `json:"torrent_refresh_workers"`
WebDavFolderNaming string `json:"webdav_folder_naming"` WebDavFolderNaming string `json:"webdav_folder_naming"`
AutoExpireLinksAfter string `json:"auto_expire_links_after"`
} }
type Proxy struct { type Proxy struct {
@@ -67,6 +68,7 @@ type Repair struct {
RunOnStart bool `json:"run_on_start"` RunOnStart bool `json:"run_on_start"`
ZurgURL string `json:"zurg_url"` ZurgURL string `json:"zurg_url"`
AutoProcess bool `json:"auto_process"` AutoProcess bool `json:"auto_process"`
UseWebDav bool `json:"use_webdav"`
} }
type Auth struct { type Auth struct {
@@ -78,6 +80,7 @@ type WebDav struct {
TorrentsRefreshInterval string `json:"torrents_refresh_interval"` TorrentsRefreshInterval string `json:"torrents_refresh_interval"`
DownloadLinksRefreshInterval string `json:"download_links_refresh_interval"` DownloadLinksRefreshInterval string `json:"download_links_refresh_interval"`
Workers int `json:"workers"` Workers int `json:"workers"`
AutoExpireLinksAfter string `json:"auto_expire_links_after"`
// Folder // Folder
FolderNaming string `json:"folder_naming"` FolderNaming string `json:"folder_naming"`
@@ -322,5 +325,8 @@ func (c *Config) GetDebridWebDav(d Debrid) Debrid {
if d.WebDavFolderNaming == "" { if d.WebDavFolderNaming == "" {
d.WebDavFolderNaming = cmp.Or(c.WebDav.FolderNaming, "original_no_ext") d.WebDavFolderNaming = cmp.Or(c.WebDav.FolderNaming, "original_no_ext")
} }
if d.AutoExpireLinksAfter == "" {
d.AutoExpireLinksAfter = cmp.Or(c.WebDav.AutoExpireLinksAfter, "24h")
}
return d return d
} }

View File

@@ -0,0 +1,23 @@
package request
type HTTPError struct {
StatusCode int
Message string
Code string
}
func (e *HTTPError) Error() string {
return e.Message
}
var HosterUnavailableError = &HTTPError{
StatusCode: 503,
Message: "Hoster is unavailable",
Code: "hoster_unavailable",
}
var ErrLinkBroken = &HTTPError{
StatusCode: 404,
Message: "File is unavailable",
Code: "file_unavailable",
}

View File

@@ -233,3 +233,15 @@ func GetInfohashFromURL(url string) (string, error) {
infoHash := hash.HexString() infoHash := hash.HexString()
return infoHash, nil return infoHash, nil
} }
func ConstructMagnet(infoHash, name string) *Magnet {
// Create a magnet link from the infohash and name
name = url.QueryEscape(strings.TrimSpace(name))
magnetUri := fmt.Sprintf("magnet:?xt=urn:btih:%s&dn=%s", infoHash, name)
return &Magnet{
InfoHash: infoHash,
Name: name,
Size: 0,
Link: magnetUri,
}
}

View File

@@ -309,6 +309,14 @@ func (ad *AllDebrid) GetDownloadUncached() bool {
return ad.DownloadUncached return ad.DownloadUncached
} }
func (ad *AllDebrid) CheckLink(link string) error {
return nil
}
func (ad *AllDebrid) GetMountPath() string {
return ad.MountPath
}
func New(dc config.Debrid) *AllDebrid { func New(dc config.Debrid) *AllDebrid {
rl := request.ParseRateLimit(dc.RateLimit) rl := request.ParseRateLimit(dc.RateLimit)
headers := map[string]string{ headers := map[string]string{

View File

@@ -3,12 +3,14 @@ package debrid
import ( import (
"bufio" "bufio"
"context" "context"
"errors"
"fmt" "fmt"
"github.com/goccy/go-json" "github.com/goccy/go-json"
"github.com/puzpuzpuz/xsync/v3" "github.com/puzpuzpuz/xsync/v3"
"github.com/rs/zerolog" "github.com/rs/zerolog"
"github.com/sirrobot01/debrid-blackhole/internal/config" "github.com/sirrobot01/debrid-blackhole/internal/config"
"github.com/sirrobot01/debrid-blackhole/internal/logger" "github.com/sirrobot01/debrid-blackhole/internal/logger"
"github.com/sirrobot01/debrid-blackhole/internal/request"
"github.com/sirrobot01/debrid-blackhole/internal/utils" "github.com/sirrobot01/debrid-blackhole/internal/utils"
"github.com/sirrobot01/debrid-blackhole/pkg/debrid/types" "github.com/sirrobot01/debrid-blackhole/pkg/debrid/types"
"os" "os"
@@ -39,31 +41,43 @@ type CachedTorrent struct {
IsComplete bool `json:"is_complete"` IsComplete bool `json:"is_complete"`
} }
type downloadLinkCache struct {
Link string
ExpiresAt time.Time
}
type RepairRequest struct {
TorrentID string
Priority int
FileName string
}
type Cache struct { type Cache struct {
dir string dir string
client types.Client client types.Client
logger zerolog.Logger logger zerolog.Logger
torrents map[string]*CachedTorrent // key: torrent.Id, value: *CachedTorrent torrents *xsync.MapOf[string, *CachedTorrent] // key: torrent.Id, value: *CachedTorrent
torrentsNames map[string]*CachedTorrent // key: torrent.Name, value: torrent torrentsNames *xsync.MapOf[string, *CachedTorrent] // key: torrent.Name, value: torrent
listings atomic.Value listings atomic.Value
downloadLinks map[string]string // key: file.Link, value: download link downloadLinks *xsync.MapOf[string, downloadLinkCache]
PropfindResp *xsync.MapOf[string, PropfindResponse] PropfindResp *xsync.MapOf[string, PropfindResponse]
folderNaming WebDavFolderNaming folderNaming WebDavFolderNaming
// repair
repairChan chan RepairRequest
repairsInProgress *xsync.MapOf[string, bool]
// config // config
workers int workers int
torrentRefreshInterval time.Duration torrentRefreshInterval time.Duration
downloadLinksRefreshInterval time.Duration downloadLinksRefreshInterval time.Duration
autoExpiresLinksAfter time.Duration
// refresh mutex // refresh mutex
listingRefreshMu sync.RWMutex // for refreshing torrents listingRefreshMu sync.RWMutex // for refreshing torrents
downloadLinksRefreshMu sync.RWMutex // for refreshing download links downloadLinksRefreshMu sync.RWMutex // for refreshing download links
torrentsRefreshMu sync.RWMutex // for refreshing torrents torrentsRefreshMu sync.RWMutex // for refreshing torrents
// Data Mutexes
torrentsMutex sync.RWMutex // for torrents and torrentsNames
downloadLinksMutex sync.Mutex // for downloadLinks
} }
func NewCache(dc config.Debrid, client types.Client) *Cache { func NewCache(dc config.Debrid, client types.Client) *Cache {
@@ -76,37 +90,41 @@ func NewCache(dc config.Debrid, client types.Client) *Cache {
if err != nil { if err != nil {
downloadLinksRefreshInterval = time.Minute * 40 downloadLinksRefreshInterval = time.Minute * 40
} }
autoExpiresLinksAfter, err := time.ParseDuration(dc.AutoExpireLinksAfter)
if err != nil {
autoExpiresLinksAfter = time.Hour * 24
}
return &Cache{ return &Cache{
dir: filepath.Join(cfg.Path, "cache", dc.Name), // path to save cache files dir: filepath.Join(cfg.Path, "cache", dc.Name), // path to save cache files
torrents: make(map[string]*CachedTorrent), torrents: xsync.NewMapOf[string, *CachedTorrent](),
torrentsNames: make(map[string]*CachedTorrent), torrentsNames: xsync.NewMapOf[string, *CachedTorrent](),
client: client, client: client,
logger: logger.NewLogger(fmt.Sprintf("%s-cache", client.GetName())), logger: logger.NewLogger(fmt.Sprintf("%s-cache", client.GetName())),
workers: 200, workers: 200,
downloadLinks: make(map[string]string), downloadLinks: xsync.NewMapOf[string, downloadLinkCache](),
torrentRefreshInterval: torrentRefreshInterval, torrentRefreshInterval: torrentRefreshInterval,
downloadLinksRefreshInterval: downloadLinksRefreshInterval, downloadLinksRefreshInterval: downloadLinksRefreshInterval,
PropfindResp: xsync.NewMapOf[string, PropfindResponse](), PropfindResp: xsync.NewMapOf[string, PropfindResponse](),
folderNaming: WebDavFolderNaming(dc.WebDavFolderNaming), folderNaming: WebDavFolderNaming(dc.WebDavFolderNaming),
autoExpiresLinksAfter: autoExpiresLinksAfter,
repairsInProgress: xsync.NewMapOf[string, bool](),
} }
} }
func (c *Cache) GetTorrentFolder(torrent *types.Torrent) string { func (c *Cache) GetTorrentFolder(torrent *types.Torrent) string {
folderName := torrent.Name folderName := torrent.Filename
if c.folderNaming == WebDavUseID { if c.folderNaming == WebDavUseID {
folderName = torrent.Id folderName = torrent.Id
} else if c.folderNaming == WebDavUseOriginalNameNoExt { } else if c.folderNaming == WebDavUseOriginalNameNoExt {
folderName = utils.RemoveExtension(torrent.Name) folderName = utils.RemoveExtension(folderName)
} }
return folderName return folderName
} }
func (c *Cache) setTorrent(t *CachedTorrent) { func (c *Cache) setTorrent(t *CachedTorrent) {
c.torrentsMutex.Lock() c.torrents.Store(t.Id, t)
c.torrents[t.Id] = t
c.torrentsNames[c.GetTorrentFolder(t.Torrent)] = t c.torrentsNames.Store(c.GetTorrentFolder(t.Torrent), t)
c.torrentsMutex.Unlock()
go func() { go func() {
if err := c.SaveTorrent(t); err != nil { if err := c.SaveTorrent(t); err != nil {
@@ -116,14 +134,11 @@ func (c *Cache) setTorrent(t *CachedTorrent) {
} }
func (c *Cache) setTorrents(torrents map[string]*CachedTorrent) { func (c *Cache) setTorrents(torrents map[string]*CachedTorrent) {
c.torrentsMutex.Lock()
for _, t := range torrents { for _, t := range torrents {
c.torrents[t.Id] = t c.torrents.Store(t.Id, t)
c.torrentsNames[c.GetTorrentFolder(t.Torrent)] = t c.torrentsNames.Store(c.GetTorrentFolder(t.Torrent), t)
} }
c.torrentsMutex.Unlock()
c.refreshListings() c.refreshListings()
go func() { go func() {
@@ -140,22 +155,6 @@ func (c *Cache) GetListing() []os.FileInfo {
return nil return nil
} }
func (c *Cache) GetTorrents() map[string]*CachedTorrent {
c.torrentsMutex.RLock()
defer c.torrentsMutex.RUnlock()
result := make(map[string]*CachedTorrent, len(c.torrents))
for k, v := range c.torrents {
result[k] = v
}
return result
}
func (c *Cache) GetTorrentNames() map[string]*CachedTorrent {
c.torrentsMutex.RLock()
defer c.torrentsMutex.RUnlock()
return c.torrentsNames
}
func (c *Cache) Start() error { func (c *Cache) Start() error {
if err := os.MkdirAll(c.dir, 0755); err != nil { if err := os.MkdirAll(c.dir, 0755); err != nil {
return fmt.Errorf("failed to create cache directory: %w", err) return fmt.Errorf("failed to create cache directory: %w", err)
@@ -167,10 +166,6 @@ func (c *Cache) Start() error {
// initial download links // initial download links
go func() { go func() {
// lock download refresh mutex
c.downloadLinksRefreshMu.Lock()
defer c.downloadLinksRefreshMu.Unlock()
// This prevents the download links from being refreshed twice
c.refreshDownloadLinks() c.refreshDownloadLinks()
}() }()
@@ -181,6 +176,9 @@ func (c *Cache) Start() error {
} }
}() }()
c.repairChan = make(chan RepairRequest, 100)
go c.repairWorker()
return nil return nil
} }
@@ -239,28 +237,36 @@ func (c *Cache) load() (map[string]*CachedTorrent, error) {
return torrents, nil return torrents, nil
} }
func (c *Cache) GetTorrents() map[string]*CachedTorrent {
torrents := make(map[string]*CachedTorrent)
c.torrents.Range(func(key string, value *CachedTorrent) bool {
torrents[key] = value
return true
})
return torrents
}
func (c *Cache) GetTorrent(id string) *CachedTorrent { func (c *Cache) GetTorrent(id string) *CachedTorrent {
c.torrentsMutex.RLock() if t, ok := c.torrents.Load(id); ok {
defer c.torrentsMutex.RUnlock()
if t, ok := c.torrents[id]; ok {
return t return t
} }
return nil return nil
} }
func (c *Cache) GetTorrentByName(name string) *CachedTorrent { func (c *Cache) GetTorrentByName(name string) *CachedTorrent {
if t, ok := c.GetTorrentNames()[name]; ok { if t, ok := c.torrentsNames.Load(name); ok {
return t return t
} }
return nil return nil
} }
func (c *Cache) SaveTorrents() error { func (c *Cache) SaveTorrents() error {
for _, ct := range c.GetTorrents() { c.torrents.Range(func(key string, value *CachedTorrent) bool {
if err := c.SaveTorrent(ct); err != nil { if err := c.SaveTorrent(value); err != nil {
return err c.logger.Debug().Err(err).Msgf("Failed to save torrent %s", key)
} }
} return true
})
return nil return nil
} }
@@ -383,6 +389,7 @@ func (c *Cache) sync(torrents []*types.Torrent) error {
count := atomic.AddInt64(&processed, 1) count := atomic.AddInt64(&processed, 1)
if count%1000 == 0 { if count%1000 == 0 {
c.refreshListings()
c.logger.Info().Msgf("Progress: %d/%d torrents processed", count, len(torrents)) c.logger.Info().Msgf("Progress: %d/%d torrents processed", count, len(torrents))
} }
@@ -448,9 +455,6 @@ func (c *Cache) GetDownloadLink(torrentId, filename, fileLink string) string {
if file.Link == "" { if file.Link == "" {
// file link is empty, refresh the torrent to get restricted links // file link is empty, refresh the torrent to get restricted links
if ct.IsComplete {
return ""
}
ct = c.refreshTorrent(ct) // Refresh the torrent from the debrid ct = c.refreshTorrent(ct) // Refresh the torrent from the debrid
if ct == nil { if ct == nil {
return "" return ""
@@ -458,17 +462,40 @@ func (c *Cache) GetDownloadLink(torrentId, filename, fileLink string) string {
file = ct.Files[filename] file = ct.Files[filename]
} }
} }
c.logger.Trace().Msgf("Getting download link for %s", ct.Name)
link, err := c.client.GetDownloadLink(ct.Torrent, &file) c.logger.Trace().Msgf("Getting download link for %s", filename)
downloadLink, err := c.client.GetDownloadLink(ct.Torrent, &file)
if err != nil { if err != nil {
c.logger.Error().Err(err).Msg("Failed to get download link") if errors.Is(err, request.HosterUnavailableError) {
// Check link here??
c.logger.Debug().Err(err).Msgf("Hoster is unavailable. Triggering repair for %s", ct.Name)
if err := c.repairTorrent(ct); err != nil {
c.logger.Error().Err(err).Msgf("Failed to trigger repair for %s", ct.Name)
return ""
}
// Generate download link for the file then
f := ct.Files[filename]
downloadLink, _ = c.client.GetDownloadLink(ct.Torrent, &f)
f.DownloadLink = downloadLink
file.Generated = time.Now()
ct.Files[filename] = f
c.updateDownloadLink(file.Link, downloadLink)
go func() {
go c.setTorrent(ct)
}()
return downloadLink // Gets download link in the next pass
}
c.logger.Debug().Err(err).Msgf("Failed to get download link for :%s", file.Link)
return "" return ""
} }
file.DownloadLink = link file.DownloadLink = downloadLink
file.Generated = time.Now() file.Generated = time.Now()
ct.Files[filename] = file ct.Files[filename] = file
go c.updateDownloadLink(file) go c.updateDownloadLink(file.Link, downloadLink)
go c.setTorrent(ct) go c.setTorrent(ct)
return file.DownloadLink return file.DownloadLink
} }
@@ -478,7 +505,7 @@ func (c *Cache) GenerateDownloadLinks(t *CachedTorrent) {
c.logger.Error().Err(err).Msg("Failed to generate download links") c.logger.Error().Err(err).Msg("Failed to generate download links")
} }
for _, file := range t.Files { for _, file := range t.Files {
c.updateDownloadLink(file) c.updateDownloadLink(file.Link, file.DownloadLink)
} }
go func() { go func() {
@@ -506,15 +533,18 @@ func (c *Cache) AddTorrent(t *types.Torrent) error {
} }
func (c *Cache) updateDownloadLink(file types.File) { func (c *Cache) updateDownloadLink(link, downloadLink string) {
c.downloadLinksMutex.Lock() c.downloadLinks.Store(link, downloadLinkCache{
defer c.downloadLinksMutex.Unlock() Link: downloadLink,
c.downloadLinks[file.Link] = file.DownloadLink ExpiresAt: time.Now().Add(c.autoExpiresLinksAfter), // Expires in 24 hours
})
} }
func (c *Cache) checkDownloadLink(link string) string { func (c *Cache) checkDownloadLink(link string) string {
if dl, ok := c.downloadLinks[link]; ok { if dl, ok := c.downloadLinks.Load(link); ok {
return dl if dl.ExpiresAt.After(time.Now()) {
return dl.Link
}
} }
return "" return ""
} }
@@ -525,26 +555,21 @@ func (c *Cache) GetClient() types.Client {
func (c *Cache) DeleteTorrent(id string) { func (c *Cache) DeleteTorrent(id string) {
c.logger.Info().Msgf("Deleting torrent %s", id) c.logger.Info().Msgf("Deleting torrent %s", id)
c.torrentsMutex.Lock()
defer c.torrentsMutex.Unlock()
if t, ok := c.torrents[id]; ok {
delete(c.torrents, id)
delete(c.torrentsNames, t.Name)
c.removeFromDB(id)
if t, ok := c.torrents.Load(id); ok {
c.torrents.Delete(id)
c.torrentsNames.Delete(c.GetTorrentFolder(t.Torrent))
go c.removeFromDB(id)
c.refreshListings() c.refreshListings()
} }
} }
func (c *Cache) DeleteTorrents(ids []string) { func (c *Cache) DeleteTorrents(ids []string) {
c.logger.Info().Msgf("Deleting %d torrents", len(ids)) c.logger.Info().Msgf("Deleting %d torrents", len(ids))
c.torrentsMutex.Lock()
defer c.torrentsMutex.Unlock()
for _, id := range ids { for _, id := range ids {
if t, ok := c.torrents[id]; ok { if t, ok := c.torrents.Load(id); ok {
delete(c.torrents, id) c.torrents.Delete(id)
delete(c.torrentsNames, c.GetTorrentFolder(t.Torrent)) c.torrentsNames.Delete(c.GetTorrentFolder(t.Torrent))
go c.removeFromDB(id) go c.removeFromDB(id)
} }
} }

View File

@@ -8,6 +8,7 @@ import (
"io" "io"
"net/http" "net/http"
"os" "os"
"path/filepath"
"slices" "slices"
"sort" "sort"
"strings" "strings"
@@ -37,12 +38,11 @@ func (c *Cache) refreshListings() {
return return
} }
// Copy the current torrents to avoid concurrent issues // Copy the current torrents to avoid concurrent issues
c.torrentsMutex.RLock() torrents := make([]string, 0, c.torrentsNames.Size())
torrents := make([]string, 0, len(c.torrentsNames)) c.torrentsNames.Range(func(key string, value *CachedTorrent) bool {
for k, _ := range c.torrentsNames { torrents = append(torrents, key)
torrents = append(torrents, k) return true
} })
c.torrentsMutex.RUnlock()
sort.Slice(torrents, func(i, j int) bool { sort.Slice(torrents, func(i, j int) bool {
return torrents[i] < torrents[j] return torrents[i] < torrents[j]
@@ -61,26 +61,47 @@ func (c *Cache) refreshListings() {
} }
// Atomic store of the complete ready-to-use slice // Atomic store of the complete ready-to-use slice
c.listings.Store(files) c.listings.Store(files)
_ = c.RefreshXml() c.resetPropfindResponse()
if err := c.RefreshRclone(); err != nil { if err := c.RefreshRclone(); err != nil {
c.logger.Debug().Err(err).Msg("Failed to refresh rclone") c.logger.Debug().Err(err).Msg("Failed to refresh rclone")
} }
} }
func (c *Cache) resetPropfindResponse() {
// Right now, parents are hardcoded
parents := []string{"__all__", "torrents"}
// Reset only the parent directories
// Convert the parents to a keys
// This is a bit hacky, but it works
// Instead of deleting all the keys, we only delete the parent keys, e.g __all__/ or torrents/
keys := make([]string, 0, len(parents))
for _, p := range parents {
// Construct the key
// construct url
url := filepath.Clean(filepath.Join("/webdav", c.client.GetName(), p))
key0 := fmt.Sprintf("propfind:%s:0", url)
key1 := fmt.Sprintf("propfind:%s:1", url)
keys = append(keys, key0, key1)
}
// Delete the keys
for _, k := range keys {
c.PropfindResp.Delete(k)
}
}
func (c *Cache) refreshTorrents() { func (c *Cache) refreshTorrents() {
if c.torrentsRefreshMu.TryLock() { if c.torrentsRefreshMu.TryLock() {
defer c.torrentsRefreshMu.Unlock() defer c.torrentsRefreshMu.Unlock()
} else { } else {
return return
} }
c.torrentsMutex.RLock()
currentTorrents := c.torrents //
// Create a copy of the current torrents to avoid concurrent issues // Create a copy of the current torrents to avoid concurrent issues
torrents := make(map[string]string, len(currentTorrents)) // a mpa of id and name torrents := make(map[string]string, c.torrents.Size()) // a mpa of id and name
for _, v := range currentTorrents { c.torrents.Range(func(key string, t *CachedTorrent) bool {
torrents[v.Id] = v.Name torrents[t.Id] = t.Name
} return true
c.torrentsMutex.RUnlock() })
// Get new torrents from the debrid service // Get new torrents from the debrid service
debTorrents, err := c.client.GetTorrents() debTorrents, err := c.client.GetTorrents()
@@ -206,14 +227,25 @@ func (c *Cache) refreshDownloadLinks() {
} else { } else {
return return
} }
c.downloadLinksMutex.Lock()
defer c.downloadLinksMutex.Unlock()
downloadLinks, err := c.client.GetDownloads() downloadLinks, err := c.client.GetDownloads()
if err != nil { if err != nil {
c.logger.Debug().Err(err).Msg("Failed to get download links") c.logger.Debug().Err(err).Msg("Failed to get download links")
} }
for k, v := range downloadLinks { for k, v := range downloadLinks {
c.downloadLinks[k] = v.DownloadLink // if link is generated in the last 24 hours, add it to cache
timeSince := time.Since(v.Generated)
if timeSince < c.autoExpiresLinksAfter {
c.downloadLinks.Store(k, downloadLinkCache{
Link: v.DownloadLink,
ExpiresAt: v.Generated.Add(c.autoExpiresLinksAfter - timeSince),
})
} else {
//c.downloadLinks.Delete(k) don't delete, just log
c.logger.Trace().Msgf("Download link for %s expired", k)
}
} }
c.logger.Debug().Msgf("Refreshed %d download links", len(downloadLinks))
} }

166
pkg/debrid/debrid/repair.go Normal file
View File

@@ -0,0 +1,166 @@
package debrid
import (
"errors"
"fmt"
"github.com/sirrobot01/debrid-blackhole/internal/request"
"github.com/sirrobot01/debrid-blackhole/internal/utils"
"github.com/sirrobot01/debrid-blackhole/pkg/debrid/types"
"slices"
)
func (c *Cache) IsTorrentBroken(t *CachedTorrent, filenames []string) bool {
// Check torrent files
isBroken := false
files := make(map[string]types.File)
if len(filenames) > 0 {
for name, f := range t.Files {
if slices.Contains(filenames, name) {
files[name] = f
}
}
} else {
files = t.Files
}
// Check empty links
for _, f := range files {
// Check if file is missing
if f.Link == "" {
// refresh torrent and then break
t = c.refreshTorrent(t)
break
}
}
for _, f := range files {
// Check if file link is still missing
if f.Link == "" {
isBroken = true
break
} else {
// Check if file.Link not in the downloadLink Cache
if _, ok := c.downloadLinks.Load(f.Link); !ok {
// File not in cache
// Check link
if err := c.client.CheckLink(f.Link); err != nil {
if errors.Is(err, request.ErrLinkBroken) {
isBroken = true
break
} else {
// This might just be a temporary error
}
} else {
// Generate a new download link?
}
} else {
// Link is in cache
// We might skip checking for now, it seems rd removes uncached links
}
}
}
return isBroken
}
func (c *Cache) repairWorker() {
// This watches a channel for torrents to repair
c.logger.Info().Msg("Starting repair worker")
for {
select {
case req := <-c.repairChan:
torrentId := req.TorrentID
if _, inProgress := c.repairsInProgress.Load(torrentId); inProgress {
c.logger.Debug().Str("torrentId", torrentId).Msg("Skipping duplicate repair request")
continue
}
// Mark as in progress
c.repairsInProgress.Store(torrentId, true)
c.logger.Debug().Str("torrentId", req.TorrentID).Msg("Received repair request")
// Get the torrent from the cache
cachedTorrent, ok := c.torrents.Load(torrentId)
if !ok || cachedTorrent == nil {
c.logger.Warn().Str("torrentId", torrentId).Msg("Torrent not found in cache")
continue
}
// Check if torrent is broken
if c.IsTorrentBroken(cachedTorrent, nil) {
c.logger.Info().Str("torrentId", torrentId).Msg("Repairing broken torrent")
// Repair torrent
if err := c.repairTorrent(cachedTorrent); err != nil {
c.logger.Error().Err(err).Str("torrentId", torrentId).Msg("Failed to repair torrent")
} else {
c.logger.Info().Str("torrentId", torrentId).Msg("Torrent repaired")
}
} else {
c.logger.Debug().Str("torrentId", torrentId).Msg("Torrent is not broken")
}
c.repairsInProgress.Delete(torrentId)
}
}
}
func (c *Cache) SubmitForRepair(torrentId, fileName string) {
// Submitting a torrent for repair.Not used yet
// Check if already in progress before even submitting
if _, inProgress := c.repairsInProgress.Load(torrentId); inProgress {
c.logger.Debug().Str("torrentID", torrentId).Msg("Repair already in progress")
return
}
select {
case c.repairChan <- RepairRequest{TorrentID: torrentId, FileName: fileName}:
c.logger.Debug().Str("torrentID", torrentId).Msg("Submitted for repair")
default:
c.logger.Warn().Str("torrentID", torrentId).Msg("Repair channel full, skipping repair request")
}
}
func (c *Cache) repairTorrent(t *CachedTorrent) error {
// Check if Magnet is not empty, if empty, reconstruct the magnet
if _, inProgress := c.repairsInProgress.Load(t.Id); inProgress {
c.logger.Debug().Str("torrentID", t.Id).Msg("Repair already in progress")
return nil
}
torrent := t.Torrent
if torrent.Magnet == nil {
torrent.Magnet = utils.ConstructMagnet(t.InfoHash, t.Name)
}
oldID := torrent.Id
// Submit the magnet to the debrid service
torrent.Id = ""
var err error
torrent, err = c.client.SubmitMagnet(torrent)
if err != nil {
return fmt.Errorf("failed to submit magnet: %w", err)
}
// Check if the torrent was submitted
if torrent == nil || torrent.Id == "" {
return fmt.Errorf("failed to submit magnet: empty torrent")
}
torrent, err = c.client.CheckStatus(torrent, true)
if err != nil {
return fmt.Errorf("failed to check status: %w", err)
}
c.client.DeleteTorrent(oldID) // delete the old torrent
c.DeleteTorrent(oldID) // Remove from listings
// Update the torrent in the cache
t.Torrent = torrent
c.setTorrent(t)
c.refreshListings()
c.repairsInProgress.Delete(oldID)
return nil
}

View File

@@ -353,3 +353,11 @@ func (dl *DebridLink) getTorrents(page, perPage int) ([]*types.Torrent, error) {
} }
return torrents, nil return torrents, nil
} }
func (dl *DebridLink) CheckLink(link string) error {
return nil
}
func (dl *DebridLink) GetMountPath() string {
return dl.MountPath
}

View File

@@ -17,7 +17,6 @@ import (
"strconv" "strconv"
"strings" "strings"
"sync" "sync"
"time"
) )
type RealDebrid struct { type RealDebrid struct {
@@ -167,7 +166,7 @@ func (r *RealDebrid) UpdateTorrent(t *types.Torrent) error {
if err != nil { if err != nil {
return err return err
} }
t.Name = data.OriginalFilename t.Name = data.Filename
t.Bytes = data.Bytes t.Bytes = data.Bytes
t.Folder = data.OriginalFilename t.Folder = data.OriginalFilename
t.Progress = data.Progress t.Progress = data.Progress
@@ -262,41 +261,105 @@ func (r *RealDebrid) DeleteTorrent(torrentId string) {
func (r *RealDebrid) GenerateDownloadLinks(t *types.Torrent) error { func (r *RealDebrid) GenerateDownloadLinks(t *types.Torrent) error {
url := fmt.Sprintf("%s/unrestrict/link/", r.Host) url := fmt.Sprintf("%s/unrestrict/link/", r.Host)
files := make(map[string]types.File) filesCh := make(chan types.File, len(t.Files))
errCh := make(chan error, len(t.Files))
var wg sync.WaitGroup
for _, f := range t.Files { for _, f := range t.Files {
payload := gourl.Values{ wg.Add(1)
"link": {f.Link}, go func(file types.File) {
} defer wg.Done()
req, _ := http.NewRequest(http.MethodPost, url, strings.NewReader(payload.Encode()))
resp, err := r.client.MakeRequest(req) payload := gourl.Values{"link": {file.Link}}
if err != nil { req, err := http.NewRequest(http.MethodPost, url, strings.NewReader(payload.Encode()))
fmt.Println(err) if err != nil {
return err errCh <- err
} return
var data UnrestrictResponse }
if err = json.Unmarshal(resp, &data); err != nil {
return err resp, err := r.client.Do(req)
} if err != nil {
f.DownloadLink = data.Download errCh <- err
f.Generated = time.Now() return
files[f.Name] = f }
if resp.StatusCode == http.StatusServiceUnavailable {
errCh <- request.HosterUnavailableError
return
}
defer resp.Body.Close()
b, err := io.ReadAll(resp.Body)
var data UnrestrictResponse
if err = json.Unmarshal(b, &data); err != nil {
errCh <- err
return
}
file.DownloadLink = data.Download
filesCh <- file
}(f)
} }
go func() {
wg.Wait()
close(filesCh)
close(errCh)
}()
// Collect results
files := make(map[string]types.File, len(t.Files))
for file := range filesCh {
files[file.Name] = file
}
// Check for errors
for err := range errCh {
if err != nil {
return err // Return the first error encountered
}
}
t.Files = files t.Files = files
return nil return nil
} }
func (r *RealDebrid) CheckLink(link string) error {
url := fmt.Sprintf("%s/unrestrict/check", r.Host)
payload := gourl.Values{
"link": {link},
}
req, _ := http.NewRequest(http.MethodPost, url, strings.NewReader(payload.Encode()))
resp, err := r.client.Do(req)
if err != nil {
return err
}
if resp.StatusCode == http.StatusNotFound {
return request.ErrLinkBroken // File has been removed
}
return nil
}
func (r *RealDebrid) GetDownloadLink(t *types.Torrent, file *types.File) (string, error) { func (r *RealDebrid) GetDownloadLink(t *types.Torrent, file *types.File) (string, error) {
url := fmt.Sprintf("%s/unrestrict/link/", r.Host) url := fmt.Sprintf("%s/unrestrict/link/", r.Host)
payload := gourl.Values{ payload := gourl.Values{
"link": {file.Link}, "link": {file.Link},
} }
req, _ := http.NewRequest(http.MethodPost, url, strings.NewReader(payload.Encode())) req, _ := http.NewRequest(http.MethodPost, url, strings.NewReader(payload.Encode()))
resp, err := r.client.MakeRequest(req) resp, err := r.client.Do(req)
if err != nil {
return "", err
}
if resp.StatusCode == http.StatusServiceUnavailable {
return "", request.HosterUnavailableError
}
defer resp.Body.Close()
b, err := io.ReadAll(resp.Body)
if err != nil { if err != nil {
return "", err return "", err
} }
var data UnrestrictResponse var data UnrestrictResponse
if err = json.Unmarshal(resp, &data); err != nil { if err = json.Unmarshal(b, &data); err != nil {
return "", err return "", err
} }
return data.Download, nil return data.Download, nil
@@ -348,7 +411,7 @@ func (r *RealDebrid) getTorrents(offset int, limit int) (int, []*types.Torrent,
} }
torrents = append(torrents, &types.Torrent{ torrents = append(torrents, &types.Torrent{
Id: t.Id, Id: t.Id,
Name: utils.RemoveInvalidChars(t.Filename), // This changes when we get the files Name: t.Filename,
Bytes: t.Bytes, Bytes: t.Bytes,
Progress: t.Progress, Progress: t.Progress,
Status: t.Status, Status: t.Status,
@@ -481,6 +544,10 @@ func (r *RealDebrid) GetDownloadUncached() bool {
return r.DownloadUncached return r.DownloadUncached
} }
func (r *RealDebrid) GetMountPath() string {
return r.MountPath
}
func New(dc config.Debrid) *RealDebrid { func New(dc config.Debrid) *RealDebrid {
rl := request.ParseRateLimit(dc.RateLimit) rl := request.ParseRateLimit(dc.RateLimit)
headers := map[string]string{ headers := map[string]string{
@@ -489,7 +556,9 @@ func New(dc config.Debrid) *RealDebrid {
_log := logger.NewLogger(dc.Name) _log := logger.NewLogger(dc.Name)
client := request.New(). client := request.New().
WithHeaders(headers). WithHeaders(headers).
WithRateLimiter(rl).WithLogger(_log) WithRateLimiter(rl).WithLogger(_log).
WithMaxRetries(5).
WithRetryableStatus(429)
return &RealDebrid{ return &RealDebrid{
Name: "realdebrid", Name: "realdebrid",
Host: dc.Host, Host: dc.Host,

View File

@@ -337,3 +337,11 @@ func New(dc config.Debrid) *Torbox {
func (tb *Torbox) GetDownloads() (map[string]types.DownloadLinks, error) { func (tb *Torbox) GetDownloads() (map[string]types.DownloadLinks, error) {
return nil, nil return nil, nil
} }
func (tb *Torbox) CheckLink(link string) error {
return nil
}
func (tb *Torbox) GetMountPath() string {
return tb.MountPath
}

View File

@@ -19,4 +19,6 @@ type Client interface {
GetLogger() zerolog.Logger GetLogger() zerolog.Logger
GetDownloadingStatus() []string GetDownloadingStatus() []string
GetDownloads() (map[string]DownloadLinks, error) GetDownloads() (map[string]DownloadLinks, error)
CheckLink(link string) error
GetMountPath() string
} }

146
pkg/repair/clean.go Normal file
View File

@@ -0,0 +1,146 @@
package repair
import (
"context"
"fmt"
"github.com/sirrobot01/debrid-blackhole/internal/request"
"golang.org/x/sync/errgroup"
"runtime"
"sync"
"time"
)
func (r *Repair) clean(job *Job) error {
// Create a new error group
g, ctx := errgroup.WithContext(context.Background())
uniqueItems := make(map[string]string)
mu := sync.Mutex{}
// Limit concurrent goroutines
g.SetLimit(runtime.NumCPU() * 4)
for _, a := range job.Arrs {
a := a // Capture range variable
g.Go(func() error {
// Check if context was canceled
select {
case <-ctx.Done():
return ctx.Err()
default:
}
items, err := r.cleanArr(job, a, "")
if err != nil {
r.logger.Error().Err(err).Msgf("Error cleaning %s", a)
return err
}
// Safely append the found items to the shared slice
if len(items) > 0 {
mu.Lock()
for k, v := range items {
uniqueItems[k] = v
}
mu.Unlock()
}
return nil
})
}
if err := g.Wait(); err != nil {
return err
}
if len(uniqueItems) == 0 {
job.CompletedAt = time.Now()
job.Status = JobCompleted
go func() {
if err := request.SendDiscordMessage("repair_clean_complete", "success", job.discordContext()); err != nil {
r.logger.Error().Msgf("Error sending discord message: %v", err)
}
}()
return nil
}
cache := r.deb.Caches["realdebrid"]
if cache == nil {
return fmt.Errorf("cache not found")
}
torrents := cache.GetTorrents()
dangling := make([]string, 0)
for _, t := range torrents {
if _, ok := uniqueItems[t.Name]; !ok {
dangling = append(dangling, t.Id)
}
}
r.logger.Info().Msgf("Found %d delapitated items", len(dangling))
if len(dangling) == 0 {
job.CompletedAt = time.Now()
job.Status = JobCompleted
return nil
}
client := r.deb.Clients["realdebrid"]
if client == nil {
return fmt.Errorf("client not found")
}
for _, id := range dangling {
client.DeleteTorrent(id)
}
return nil
}
func (r *Repair) cleanArr(j *Job, _arr string, tmdbId string) (map[string]string, error) {
uniqueItems := make(map[string]string)
a := r.arrs.Get(_arr)
r.logger.Info().Msgf("Starting repair for %s", a.Name)
media, err := a.GetMedia(tmdbId)
if err != nil {
r.logger.Info().Msgf("Failed to get %s media: %v", a.Name, err)
return uniqueItems, err
}
// Create a new error group
g, ctx := errgroup.WithContext(context.Background())
mu := sync.Mutex{}
// Limit concurrent goroutines
g.SetLimit(runtime.NumCPU() * 4)
for _, m := range media {
m := m // Create a new variable scoped to the loop iteration
g.Go(func() error {
// Check if context was canceled
select {
case <-ctx.Done():
return ctx.Err()
default:
}
u := r.getUniquePaths(m)
for k, v := range u {
mu.Lock()
uniqueItems[k] = v
mu.Unlock()
}
return nil
})
}
if err := g.Wait(); err != nil {
return uniqueItems, err
}
r.logger.Info().Msgf("Repair completed for %s. %d unique items", a.Name, len(uniqueItems))
return uniqueItems, nil
}

View File

@@ -2,6 +2,7 @@ package repair
import ( import (
"fmt" "fmt"
"github.com/sirrobot01/debrid-blackhole/pkg/arr"
"os" "os"
"path/filepath" "path/filepath"
"strconv" "strconv"
@@ -129,3 +130,20 @@ func checkFileStart(filePath string) error {
} }
return nil return nil
} }
func collectFiles(media arr.Content) map[string][]arr.ContentFile {
uniqueParents := make(map[string][]arr.ContentFile)
files := media.Files
for _, file := range files {
target := getSymlinkTarget(file.Path)
if target != "" {
file.IsSymlink = true
dir, f := filepath.Split(target)
torrentNamePath := filepath.Clean(dir)
// Set target path folder/file.mkv
file.TargetPath = f
uniqueParents[torrentNamePath] = append(uniqueParents[torrentNamePath], file)
}
}
return uniqueParents
}

View File

@@ -34,6 +34,7 @@ type Repair struct {
runOnStart bool runOnStart bool
ZurgURL string ZurgURL string
IsZurg bool IsZurg bool
useWebdav bool
autoProcess bool autoProcess bool
logger zerolog.Logger logger zerolog.Logger
filename string filename string
@@ -51,6 +52,7 @@ func New(arrs *arr.Storage, engine *debrid.Engine) *Repair {
duration: duration, duration: duration,
runOnStart: cfg.Repair.RunOnStart, runOnStart: cfg.Repair.RunOnStart,
ZurgURL: cfg.Repair.ZurgURL, ZurgURL: cfg.Repair.ZurgURL,
useWebdav: cfg.Repair.UseWebDav,
autoProcess: cfg.Repair.AutoProcess, autoProcess: cfg.Repair.AutoProcess,
filename: filepath.Join(cfg.Path, "repair.json"), filename: filepath.Join(cfg.Path, "repair.json"),
deb: engine, deb: engine,
@@ -157,6 +159,13 @@ func (r *Repair) newJob(arrsNames []string, mediaIDs []string) *Job {
} }
func (r *Repair) preRunChecks() error { func (r *Repair) preRunChecks() error {
if r.useWebdav {
if len(r.deb.Caches) == 0 {
return fmt.Errorf("no caches found")
}
}
// Check if zurg url is reachable // Check if zurg url is reachable
if !r.IsZurg { if !r.IsZurg {
return nil return nil
@@ -362,141 +371,6 @@ func (r *Repair) getUniquePaths(media arr.Content) map[string]string {
return uniqueParents return uniqueParents
} }
func (r *Repair) clean(job *Job) error {
// Create a new error group
g, ctx := errgroup.WithContext(context.Background())
uniqueItems := make(map[string]string)
mu := sync.Mutex{}
// Limit concurrent goroutines
g.SetLimit(runtime.NumCPU() * 4)
for _, a := range job.Arrs {
a := a // Capture range variable
g.Go(func() error {
// Check if context was canceled
select {
case <-ctx.Done():
return ctx.Err()
default:
}
items, err := r.cleanArr(job, a, "")
if err != nil {
r.logger.Error().Err(err).Msgf("Error cleaning %s", a)
return err
}
// Safely append the found items to the shared slice
if len(items) > 0 {
mu.Lock()
for k, v := range items {
uniqueItems[k] = v
}
mu.Unlock()
}
return nil
})
}
if err := g.Wait(); err != nil {
return err
}
if len(uniqueItems) == 0 {
job.CompletedAt = time.Now()
job.Status = JobCompleted
go func() {
if err := request.SendDiscordMessage("repair_clean_complete", "success", job.discordContext()); err != nil {
r.logger.Error().Msgf("Error sending discord message: %v", err)
}
}()
return nil
}
cache := r.deb.Caches["realdebrid"]
if cache == nil {
return fmt.Errorf("cache not found")
}
torrents := cache.GetTorrents()
dangling := make([]string, 0)
for _, t := range torrents {
if _, ok := uniqueItems[t.Name]; !ok {
dangling = append(dangling, t.Id)
}
}
r.logger.Info().Msgf("Found %d delapitated items", len(dangling))
if len(dangling) == 0 {
job.CompletedAt = time.Now()
job.Status = JobCompleted
return nil
}
client := r.deb.Clients["realdebrid"]
if client == nil {
return fmt.Errorf("client not found")
}
for _, id := range dangling {
client.DeleteTorrent(id)
}
return nil
}
func (r *Repair) cleanArr(j *Job, _arr string, tmdbId string) (map[string]string, error) {
uniqueItems := make(map[string]string)
a := r.arrs.Get(_arr)
r.logger.Info().Msgf("Starting repair for %s", a.Name)
media, err := a.GetMedia(tmdbId)
if err != nil {
r.logger.Info().Msgf("Failed to get %s media: %v", a.Name, err)
return uniqueItems, err
}
// Create a new error group
g, ctx := errgroup.WithContext(context.Background())
mu := sync.Mutex{}
// Limit concurrent goroutines
g.SetLimit(runtime.NumCPU() * 4)
for _, m := range media {
m := m // Create a new variable scoped to the loop iteration
g.Go(func() error {
// Check if context was canceled
select {
case <-ctx.Done():
return ctx.Err()
default:
}
u := r.getUniquePaths(m)
for k, v := range u {
mu.Lock()
uniqueItems[k] = v
mu.Unlock()
}
return nil
})
}
if err := g.Wait(); err != nil {
return uniqueItems, err
}
r.logger.Info().Msgf("Repair completed for %s. %d unique items", a.Name, len(uniqueItems))
return uniqueItems, nil
}
func (r *Repair) repairArr(j *Job, _arr string, tmdbId string) ([]arr.ContentFile, error) { func (r *Repair) repairArr(j *Job, _arr string, tmdbId string) ([]arr.ContentFile, error) {
brokenItems := make([]arr.ContentFile, 0) brokenItems := make([]arr.ContentFile, 0)
a := r.arrs.Get(_arr) a := r.arrs.Get(_arr)
@@ -598,7 +472,9 @@ func (r *Repair) isMediaAccessible(m arr.Content) bool {
func (r *Repair) getBrokenFiles(media arr.Content) []arr.ContentFile { func (r *Repair) getBrokenFiles(media arr.Content) []arr.ContentFile {
if r.IsZurg { if r.useWebdav {
return r.getWebdavBrokenFiles(media)
} else if r.IsZurg {
return r.getZurgBrokenFiles(media) return r.getZurgBrokenFiles(media)
} else { } else {
return r.getFileBrokenFiles(media) return r.getFileBrokenFiles(media)
@@ -610,17 +486,7 @@ func (r *Repair) getFileBrokenFiles(media arr.Content) []arr.ContentFile {
brokenFiles := make([]arr.ContentFile, 0) brokenFiles := make([]arr.ContentFile, 0)
uniqueParents := make(map[string][]arr.ContentFile) uniqueParents := collectFiles(media)
files := media.Files
for _, file := range files {
target := getSymlinkTarget(file.Path)
if target != "" {
file.IsSymlink = true
dir, _ := filepath.Split(target)
parent := filepath.Base(filepath.Clean(dir))
uniqueParents[parent] = append(uniqueParents[parent], file)
}
}
for parent, f := range uniqueParents { for parent, f := range uniqueParents {
// Check stat // Check stat
@@ -646,19 +512,7 @@ func (r *Repair) getZurgBrokenFiles(media arr.Content) []arr.ContentFile {
// This reduces bandwidth usage significantly // This reduces bandwidth usage significantly
brokenFiles := make([]arr.ContentFile, 0) brokenFiles := make([]arr.ContentFile, 0)
uniqueParents := make(map[string][]arr.ContentFile) uniqueParents := collectFiles(media)
files := media.Files
for _, file := range files {
target := getSymlinkTarget(file.Path)
if target != "" {
file.IsSymlink = true
dir, f := filepath.Split(target)
parent := filepath.Base(filepath.Clean(dir))
// Set target path folder/file.mkv
file.TargetPath = f
uniqueParents[parent] = append(uniqueParents[parent], file)
}
}
client := &http.Client{ client := &http.Client{
Timeout: 0, Timeout: 0,
Transport: &http.Transport{ Transport: &http.Transport{
@@ -672,9 +526,9 @@ func (r *Repair) getZurgBrokenFiles(media arr.Content) []arr.ContentFile {
// Access zurg url + symlink folder + first file(encoded) // Access zurg url + symlink folder + first file(encoded)
for parent, f := range uniqueParents { for parent, f := range uniqueParents {
r.logger.Debug().Msgf("Checking %s", parent) r.logger.Debug().Msgf("Checking %s", parent)
encodedParent := url.PathEscape(parent) torrentName := url.PathEscape(filepath.Base(parent))
encodedFile := url.PathEscape(f[0].TargetPath) encodedFile := url.PathEscape(f[0].TargetPath)
fullURL := fmt.Sprintf("%s/http/__all__/%s/%s", r.ZurgURL, encodedParent, encodedFile) fullURL := fmt.Sprintf("%s/http/__all__/%s/%s", r.ZurgURL, torrentName, encodedFile)
// Check file stat first // Check file stat first
if _, err := os.Stat(f[0].Path); os.IsNotExist(err) { if _, err := os.Stat(f[0].Path); os.IsNotExist(err) {
r.logger.Debug().Msgf("Broken symlink found: %s", fullURL) r.logger.Debug().Msgf("Broken symlink found: %s", fullURL)
@@ -715,6 +569,76 @@ func (r *Repair) getZurgBrokenFiles(media arr.Content) []arr.ContentFile {
return brokenFiles return brokenFiles
} }
func (r *Repair) getWebdavBrokenFiles(media arr.Content) []arr.ContentFile {
// Use internal webdav setup to check file availability
caches := r.deb.Caches
if len(caches) == 0 {
r.logger.Info().Msg("No caches found. Can't use webdav")
return nil
}
clients := r.deb.Clients
if len(clients) == 0 {
r.logger.Info().Msg("No clients found. Can't use webdav")
return nil
}
brokenFiles := make([]arr.ContentFile, 0)
uniqueParents := collectFiles(media)
// Access zurg url + symlink folder + first file(encoded)
for torrentPath, f := range uniqueParents {
r.logger.Debug().Msgf("Checking %s", torrentPath)
// Get the debrid first
dir := filepath.Dir(torrentPath)
debridName := ""
for _, client := range clients {
mountPath := client.GetMountPath()
if mountPath == "" {
continue
}
if filepath.Clean(mountPath) == filepath.Clean(dir) {
debridName = client.GetName()
break
}
}
if debridName == "" {
r.logger.Debug().Msgf("No debrid found for %s. Skipping", torrentPath)
continue
}
cache, ok := caches[debridName]
if !ok {
r.logger.Debug().Msgf("No cache found for %s. Skipping", debridName)
continue
}
// Check if torrent exists
torrentName := filepath.Clean(filepath.Base(torrentPath))
torrent := cache.GetTorrentByName(torrentName)
if torrent == nil {
r.logger.Debug().Msgf("No torrent found for %s. Skipping", torrentName)
continue
}
files := make([]string, 0)
for _, file := range f {
files = append(files, file.TargetPath)
}
if cache.IsTorrentBroken(torrent, files) {
r.logger.Debug().Msgf("[webdav] Broken symlink found: %s", torrentPath)
// Delete the torrent?
brokenFiles = append(brokenFiles, f...)
continue
}
}
if len(brokenFiles) == 0 {
r.logger.Debug().Msgf("No broken files found for %s", media.Title)
return nil
}
r.logger.Debug().Msgf("%d broken files found for %s", len(brokenFiles), media.Title)
return brokenFiles
}
func (r *Repair) GetJob(id string) *Job { func (r *Repair) GetJob(id string) *Job {
for _, job := range r.Jobs { for _, job := range r.Jobs {
if job.ID == id { if job.ID == id {

View File

@@ -1,6 +1,7 @@
package webdav package webdav
import ( import (
"crypto/tls"
"fmt" "fmt"
"github.com/sirrobot01/debrid-blackhole/pkg/debrid/debrid" "github.com/sirrobot01/debrid-blackhole/pkg/debrid/debrid"
"io" "io"
@@ -11,13 +12,8 @@ import (
var sharedClient = &http.Client{ var sharedClient = &http.Client{
Transport: &http.Transport{ Transport: &http.Transport{
MaxIdleConns: 100, TLSClientConfig: &tls.Config{InsecureSkipVerify: true},
IdleConnTimeout: 90 * time.Second, Proxy: http.ProxyFromEnvironment,
ResponseHeaderTimeout: 30 * time.Second,
ExpectContinueTimeout: 1 * time.Second,
DisableCompression: false, // Enable compression for faster transfers
DisableKeepAlives: false,
Proxy: http.ProxyFromEnvironment,
}, },
Timeout: 0, Timeout: 0,
} }
@@ -92,7 +88,7 @@ func (f *File) Read(p []byte) (n int, err error) {
downloadLink := f.GetDownloadLink() downloadLink := f.GetDownloadLink()
if downloadLink == "" { if downloadLink == "" {
return 0, fmt.Errorf("failed to get download link for file") return 0, io.EOF
} }
req, err := http.NewRequest("GET", downloadLink, nil) req, err := http.NewRequest("GET", downloadLink, nil)

View File

@@ -191,7 +191,6 @@ func (h *Handler) OpenFile(ctx context.Context, name string, flag int, perm os.F
name: file.Name, name: file.Name,
size: file.Size, size: file.Size,
link: file.Link, link: file.Link,
downloadLink: file.DownloadLink,
metadataOnly: metadataOnly, metadataOnly: metadataOnly,
} }
return fi, nil return fi, nil

View File

@@ -115,7 +115,6 @@ func (wd *WebDav) setupRootHandler(r chi.Router) {
func (wd *WebDav) commonMiddleware(next http.Handler) http.Handler { func (wd *WebDav) commonMiddleware(next http.Handler) http.Handler {
return http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) { return http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) {
w.Header().Set("DAV", "1, 2") w.Header().Set("DAV", "1, 2")
w.Header().Set("Cache-Control", "max-age=3600")
w.Header().Set("Allow", "OPTIONS, PROPFIND, GET, HEAD, POST, PUT, DELETE, MKCOL, PROPPATCH, COPY, MOVE, LOCK, UNLOCK") w.Header().Set("Allow", "OPTIONS, PROPFIND, GET, HEAD, POST, PUT, DELETE, MKCOL, PROPPATCH, COPY, MOVE, LOCK, UNLOCK")
w.Header().Set("Access-Control-Allow-Origin", "*") w.Header().Set("Access-Control-Allow-Origin", "*")
w.Header().Set("Access-Control-Allow-Methods", "OPTIONS, PROPFIND, GET, HEAD, POST, PUT, DELETE, MKCOL, PROPPATCH, COPY, MOVE, LOCK, UNLOCK") w.Header().Set("Access-Control-Allow-Methods", "OPTIONS, PROPFIND, GET, HEAD, POST, PUT, DELETE, MKCOL, PROPPATCH, COPY, MOVE, LOCK, UNLOCK")