fix bugs; move to gocron for scheduled jobs

This commit is contained in:
Mukhtar Akere
2025-04-21 23:23:35 +01:00
parent a27c5dd491
commit 32935ce3aa
15 changed files with 308 additions and 248 deletions

View File

@@ -174,6 +174,10 @@ func startServices(ctx context.Context) error {
return worker.Start(ctx)
})
safeGo(func() error {
return svc.Arr.StartSchedule(ctx)
})
if cfg.Repair.Enabled {
safeGo(func() error {
err := svc.Repair.Start(ctx)

3
go.mod
View File

@@ -26,12 +26,15 @@ require (
github.com/anacrolix/missinggo/v2 v2.7.3 // indirect
github.com/bradfitz/iter v0.0.0-20191230175014-e8f45d346db8 // indirect
github.com/davecgh/go-spew v1.1.2-0.20180830191138-d8f796af33cc // indirect
github.com/go-co-op/gocron/v2 v2.16.1 // indirect
github.com/google/go-cmp v0.6.0 // indirect
github.com/gorilla/securecookie v1.1.2 // indirect
github.com/huandu/xstrings v1.3.2 // indirect
github.com/jonboulle/clockwork v0.5.0 // indirect
github.com/mattn/go-colorable v0.1.13 // indirect
github.com/mattn/go-isatty v0.0.20 // indirect
github.com/pmezard/go-difflib v1.0.1-0.20181226105442-5d4384ee4fb2 // indirect
github.com/robfig/cron/v3 v3.0.1 // indirect
github.com/rogpeppe/go-internal v1.13.1 // indirect
github.com/stretchr/testify v1.10.0 // indirect
golang.org/x/sys v0.30.0 // indirect

6
go.sum
View File

@@ -72,6 +72,8 @@ github.com/glycerine/goconvey v0.0.0-20190315024820-982ee783a72e/go.mod h1:Ogl1T
github.com/glycerine/goconvey v0.0.0-20190410193231-58a59202ab31/go.mod h1:Ogl1Tioa0aV7gstGFO7KhffUsb9M4ydbEbbxpcEDc24=
github.com/go-chi/chi/v5 v5.1.0 h1:acVI1TYaD+hhedDJ3r54HyA6sExp3HfXq7QWEEY/xMw=
github.com/go-chi/chi/v5 v5.1.0/go.mod h1:DslCQbL2OYiznFReuXYUmQ2hGd1aDpCnlMNITLSKoi8=
github.com/go-co-op/gocron/v2 v2.16.1 h1:ux/5zxVRveCaCuTtNI3DiOk581KC1KpJbpJFYUEVYwo=
github.com/go-co-op/gocron/v2 v2.16.1/go.mod h1:opexeOFy5BplhsKdA7bzY9zeYih8I8/WNJ4arTIFPVc=
github.com/go-kit/kit v0.8.0/go.mod h1:xBxKIO96dXMWWy0MnWVtmwkA9/13aqxPnvrjFYMA2as=
github.com/go-kit/kit v0.9.0/go.mod h1:xBxKIO96dXMWWy0MnWVtmwkA9/13aqxPnvrjFYMA2as=
github.com/go-logfmt/logfmt v0.3.0/go.mod h1:Qt1PoO58o5twSAckw1HlFXLmHsOX5/0LbT9GBnD5lWE=
@@ -126,6 +128,8 @@ github.com/huandu/xstrings v1.2.0/go.mod h1:DvyZB1rfVYsBIigL8HwpZgxHwXozlTgGqn63
github.com/huandu/xstrings v1.3.1/go.mod h1:y5/lhBue+AyNmUVz9RLU9xbLR0o4KIIExikq4ovT0aE=
github.com/huandu/xstrings v1.3.2 h1:L18LIDzqlW6xN2rEkpdV8+oL/IXWJ1APd+vsdYy4Wdw=
github.com/huandu/xstrings v1.3.2/go.mod h1:y5/lhBue+AyNmUVz9RLU9xbLR0o4KIIExikq4ovT0aE=
github.com/jonboulle/clockwork v0.5.0 h1:Hyh9A8u51kptdkR+cqRpT1EebBwTn1oK9YfGYbdFz6I=
github.com/jonboulle/clockwork v0.5.0/go.mod h1:3mZlmanh0g2NDKO5TWZVJAfofYk64M7XN3SzBPjZF60=
github.com/json-iterator/go v1.1.6/go.mod h1:+SdeFBvtyEkXs7REEP0seUULqWtbJapLOCVDaaPEHmU=
github.com/json-iterator/go v1.1.9/go.mod h1:KdQUCv79m/52Kvf8AW2vK1V8akMuk1QjK/uOdHXbAo4=
github.com/jtolds/gls v4.2.1+incompatible/go.mod h1:QJZ7F/aHp+rZTRtaJ1ow/lLfFfVYBRgL+9YlvaHOwJU=
@@ -186,6 +190,8 @@ github.com/prometheus/procfs v0.0.11/go.mod h1:lV6e/gmhEcM9IjHGsFOCxxuZ+z1YqCvr4
github.com/puzpuzpuz/xsync/v3 v3.5.1 h1:GJYJZwO6IdxN/IKbneznS6yPkVC+c3zyY/j19c++5Fg=
github.com/puzpuzpuz/xsync/v3 v3.5.1/go.mod h1:VjzYrABPabuM4KyBh1Ftq6u8nhwY5tBPKP9jpmh0nnA=
github.com/rcrowley/go-metrics v0.0.0-20181016184325-3113b8401b8a/go.mod h1:bCqnVzQkZxMG4s8nGwiZ5l3QUCyqpo9Y+/ZMZ9VjZe4=
github.com/robfig/cron/v3 v3.0.1 h1:WdRxkvbJztn8LMz/QEvLN5sBU+xKpSqwwUO1Pjr4qDs=
github.com/robfig/cron/v3 v3.0.1/go.mod h1:eQICP3HwyT7UooqI/z+Ov+PtYAWygg1TEWWzGIFLtro=
github.com/rogpeppe/go-internal v1.13.1 h1:KvO1DLK/DRN07sQ1LQKScxyZJuNnedQ5/wKSR38lUII=
github.com/rogpeppe/go-internal v1.13.1/go.mod h1:uMEvuHeurkdAXX61udpOXGD/AzZDWNMNyH2VO9fmH0o=
github.com/rs/xid v1.5.0/go.mod h1:trrq9SKmegXys3aeAKXMUTdJsYXVwGY3RLcfgqegfbg=

View File

@@ -0,0 +1,74 @@
package utils
import (
"context"
"fmt"
"github.com/go-co-op/gocron/v2"
"strconv"
"strings"
"time"
)
func ScheduleJob(ctx context.Context, interval string, loc *time.Location, jobFunc func()) (gocron.Job, error) {
if loc == nil {
loc = time.Local
}
var job gocron.Job
s, err := gocron.NewScheduler(gocron.WithLocation(loc))
if err != nil {
return job, fmt.Errorf("failed to create scheduler: %w", err)
}
jd, err := convertToJD(interval)
if err != nil {
return job, fmt.Errorf("failed to convert interval to job definition: %w", err)
}
// Schedule the job
if job, err = s.NewJob(jd, gocron.NewTask(jobFunc), gocron.WithContext(ctx)); err != nil {
return job, fmt.Errorf("failed to create job: %w", err)
}
s.Start()
return job, nil
}
// ConvertToJobDef converts a string interval to a gocron.JobDefinition.
func convertToJD(interval string) (gocron.JobDefinition, error) {
// Parse the interval string
// Interval could be in the format "1h", "30m", "15s" or "1h30m" or "04:05"
var jd gocron.JobDefinition
if t, ok := parseClockTime(interval); ok {
return gocron.DailyJob(1, gocron.NewAtTimes(
gocron.NewAtTime(uint(t.Hour()), uint(t.Minute()), uint(t.Second())),
)), nil
} else {
dur, err := time.ParseDuration(interval)
if err != nil {
return jd, fmt.Errorf("failed to parse duration: %w", err)
}
jd = gocron.DurationJob(dur)
}
return jd, nil
}
func parseClockTime(s string) (time.Time, bool) {
parts := strings.Split(s, ":")
if len(parts) != 2 {
return time.Time{}, false
}
h, err := strconv.Atoi(parts[0])
if err != nil || h < 0 || h > 23 {
return time.Time{}, false
}
m, err := strconv.Atoi(parts[1])
if err != nil || m < 0 || m > 59 {
return time.Time{}, false
}
now := time.Now()
// build a time.Time for today at h:m:00 in the local zone
t := time.Date(
now.Year(), now.Month(), now.Day(),
h, m, 0, 0,
time.Local,
)
return t, true
}

View File

@@ -2,10 +2,14 @@ package arr
import (
"bytes"
"context"
"fmt"
"github.com/goccy/go-json"
"github.com/rs/zerolog"
"github.com/sirrobot01/decypharr/internal/config"
"github.com/sirrobot01/decypharr/internal/logger"
"github.com/sirrobot01/decypharr/internal/request"
"github.com/sirrobot01/decypharr/internal/utils"
"io"
"net/http"
"strconv"
@@ -113,8 +117,9 @@ func (a *Arr) Validate() error {
}
type Storage struct {
Arrs map[string]*Arr // name -> arr
mu sync.RWMutex
Arrs map[string]*Arr // name -> arr
mu sync.RWMutex
logger zerolog.Logger
}
func InferType(host, name string) Type {
@@ -139,7 +144,8 @@ func NewStorage() *Storage {
arrs[name] = New(name, a.Host, a.Token, a.Cleanup, a.SkipRepair, a.DownloadUncached)
}
return &Storage{
Arrs: arrs,
Arrs: arrs,
logger: logger.New("arr"),
}
}
@@ -176,6 +182,33 @@ func (as *Storage) Clear() {
as.Arrs = make(map[string]*Arr)
}
func (as *Storage) StartSchedule(ctx context.Context) error {
// Schedule the cleanup job every 10 seconds
_, err := utils.ScheduleJob(ctx, "10s", nil, as.cleanupArrsQueue)
if err != nil {
return err
}
return nil
}
func (as *Storage) cleanupArrsQueue() {
arrs := make([]*Arr, 0)
for _, arr := range as.Arrs {
if !arr.Cleanup {
continue
}
arrs = append(arrs, arr)
}
if len(arrs) > 0 {
as.logger.Trace().Msgf("Cleaning up %d arrs", len(arrs))
for _, arr := range arrs {
if err := arr.CleanupQueue(); err != nil {
as.logger.Error().Err(err).Msgf("Failed to cleanup arr %s", arr.Name)
}
}
}
}
func (a *Arr) Refresh() error {
payload := struct {
Name string `json:"name"`

View File

@@ -5,6 +5,7 @@ import (
"context"
"errors"
"fmt"
"github.com/go-co-op/gocron/v2"
"github.com/goccy/go-json"
"github.com/puzpuzpuz/xsync/v3"
"github.com/rs/zerolog"
@@ -82,51 +83,52 @@ type Cache struct {
repairsInProgress *xsync.MapOf[string, struct{}]
// config
workers int
torrentRefreshInterval time.Duration
downloadLinksRefreshInterval time.Duration
autoExpiresLinksAfter time.Duration
workers int
torrentRefreshInterval string
downloadLinksRefreshInterval string
autoExpiresLinksAfter string
autoExpiresLinksAfterDuration time.Duration
// refresh mutex
listingRefreshMu sync.RWMutex // for refreshing torrents
downloadLinksRefreshMu sync.RWMutex // for refreshing download links
torrentsRefreshMu sync.RWMutex // for refreshing torrents
scheduler gocron.Scheduler
saveSemaphore chan struct{}
ctx context.Context
}
func New(dc config.Debrid, client types.Client) *Cache {
cfg := config.Get()
torrentRefreshInterval, err := time.ParseDuration(dc.TorrentsRefreshInterval)
if err != nil {
torrentRefreshInterval = time.Second * 15
}
downloadLinksRefreshInterval, err := time.ParseDuration(dc.DownloadLinksRefreshInterval)
if err != nil {
downloadLinksRefreshInterval = time.Minute * 40
}
autoExpiresLinksAfter, err := time.ParseDuration(dc.AutoExpireLinksAfter)
if err != nil {
autoExpiresLinksAfter = time.Hour * 24
cet, _ := time.LoadLocation("CET")
s, _ := gocron.NewScheduler(gocron.WithLocation(cet))
autoExpiresLinksAfter, _ := time.ParseDuration(dc.AutoExpireLinksAfter)
if autoExpiresLinksAfter == 0 {
autoExpiresLinksAfter = 24 * time.Hour
}
return &Cache{
dir: path.Join(cfg.Path, "cache", dc.Name), // path to save cache files
torrents: xsync.NewMapOf[string, *CachedTorrent](),
torrentsNames: xsync.NewMapOf[string, *CachedTorrent](),
invalidDownloadLinks: xsync.NewMapOf[string, string](),
client: client,
logger: logger.New(fmt.Sprintf("%s-webdav", client.GetName())),
workers: dc.Workers,
downloadLinks: xsync.NewMapOf[string, downloadLinkCache](),
torrentRefreshInterval: torrentRefreshInterval,
downloadLinksRefreshInterval: downloadLinksRefreshInterval,
PropfindResp: xsync.NewMapOf[string, PropfindResponse](),
folderNaming: WebDavFolderNaming(dc.FolderNaming),
autoExpiresLinksAfter: autoExpiresLinksAfter,
repairsInProgress: xsync.NewMapOf[string, struct{}](),
saveSemaphore: make(chan struct{}, 50),
ctx: context.Background(),
dir: path.Join(cfg.Path, "cache", dc.Name), // path to save cache files
torrents: xsync.NewMapOf[string, *CachedTorrent](),
torrentsNames: xsync.NewMapOf[string, *CachedTorrent](),
invalidDownloadLinks: xsync.NewMapOf[string, string](),
client: client,
logger: logger.New(fmt.Sprintf("%s-webdav", client.GetName())),
workers: dc.Workers,
downloadLinks: xsync.NewMapOf[string, downloadLinkCache](),
torrentRefreshInterval: dc.TorrentsRefreshInterval,
downloadLinksRefreshInterval: dc.DownloadLinksRefreshInterval,
PropfindResp: xsync.NewMapOf[string, PropfindResponse](),
folderNaming: WebDavFolderNaming(dc.FolderNaming),
autoExpiresLinksAfter: dc.AutoExpireLinksAfter,
autoExpiresLinksAfterDuration: autoExpiresLinksAfter,
repairsInProgress: xsync.NewMapOf[string, struct{}](),
saveSemaphore: make(chan struct{}, 50),
ctx: context.Background(),
scheduler: s,
}
}
@@ -146,9 +148,9 @@ func (c *Cache) Start(ctx context.Context) error {
}()
go func() {
err := c.Refresh()
err := c.StartSchedule()
if err != nil {
c.logger.Error().Err(err).Msg("Failed to start cache refresh worker")
c.logger.Error().Err(err).Msg("Failed to start cache worker")
}
}()
@@ -584,16 +586,16 @@ func (c *Cache) ProcessTorrent(t *types.Torrent) error {
return nil
}
func (c *Cache) GetDownloadLink(torrentId, filename, fileLink string) string {
func (c *Cache) GetDownloadLink(torrentId, filename, fileLink string) (string, error) {
// Check link cache
if dl := c.checkDownloadLink(fileLink); dl != "" {
return dl
return dl, nil
}
ct := c.GetTorrent(torrentId)
if ct == nil {
return ""
return "", fmt.Errorf("torrent not found: %s", torrentId)
}
file := ct.Files[filename]
@@ -601,7 +603,7 @@ func (c *Cache) GetDownloadLink(torrentId, filename, fileLink string) string {
// file link is empty, refresh the torrent to get restricted links
ct = c.refreshTorrent(ct) // Refresh the torrent from the debrid
if ct == nil {
return ""
return "", fmt.Errorf("failed to refresh torrent: %s", torrentId)
} else {
file = ct.Files[filename]
}
@@ -613,23 +615,21 @@ func (c *Cache) GetDownloadLink(torrentId, filename, fileLink string) string {
// Try to reinsert the torrent?
newCt, err := c.reInsertTorrent(ct)
if err != nil {
c.logger.Error().Err(err).Msgf("Failed to reinsert torrent %s", ct.Name)
return ""
return "", fmt.Errorf("failed to reinsert torrent: %s. %w", ct.Name, err)
}
ct = newCt
file = ct.Files[filename]
c.logger.Debug().Msgf("Reinserted torrent %s", ct.Name)
}
c.logger.Trace().Msgf("Getting download link for %s", filename)
c.logger.Trace().Msgf("Getting download link for %s(%s)", filename, file.Link)
downloadLink, err := c.client.GetDownloadLink(ct.Torrent, &file)
if err != nil {
if errors.Is(err, request.HosterUnavailableError) {
c.logger.Error().Err(err).Msgf("Hoster is unavailable. Triggering repair for %s", ct.Name)
newCt, err := c.reInsertTorrent(ct)
if err != nil {
c.logger.Error().Err(err).Msgf("Failed to reinsert torrent %s", ct.Name)
return ""
return "", fmt.Errorf("failed to reinsert torrent: %w", err)
}
ct = newCt
c.logger.Debug().Msgf("Reinserted torrent %s", ct.Name)
@@ -637,30 +637,26 @@ func (c *Cache) GetDownloadLink(torrentId, filename, fileLink string) string {
// Retry getting the download link
downloadLink, err = c.client.GetDownloadLink(ct.Torrent, &file)
if err != nil {
c.logger.Error().Err(err).Msgf("Failed to get download link for %s", file.Link)
return ""
return "", err
}
if downloadLink == nil {
c.logger.Debug().Msgf("Download link is empty for %s", file.Link)
return ""
return "", fmt.Errorf("download link is empty for %s", file.Link)
}
c.updateDownloadLink(downloadLink)
return downloadLink.DownloadLink
return "", nil
} else if errors.Is(err, request.TrafficExceededError) {
// This is likely a fair usage limit error
c.logger.Error().Err(err).Msgf("Traffic exceeded for %s", ct.Name)
} else {
c.logger.Error().Err(err).Msgf("Failed to get download link for %s", file.Link)
return ""
return "", fmt.Errorf("failed to get download link: %w", err)
}
}
if downloadLink == nil {
c.logger.Debug().Msgf("Download link is empty for %s", file.Link)
return ""
return "", fmt.Errorf("download link is empty for %s", file.Link)
}
c.updateDownloadLink(downloadLink)
return downloadLink.DownloadLink
return downloadLink.DownloadLink, nil
}
func (c *Cache) GenerateDownloadLinks(t *CachedTorrent) {
@@ -700,10 +696,11 @@ func (c *Cache) AddTorrent(t *types.Torrent) error {
}
func (c *Cache) updateDownloadLink(dl *types.DownloadLink) {
expiresAt, _ := time.ParseDuration(c.autoExpiresLinksAfter)
c.downloadLinks.Store(dl.Link, downloadLinkCache{
Id: dl.Id,
Link: dl.DownloadLink,
ExpiresAt: time.Now().Add(c.autoExpiresLinksAfter),
ExpiresAt: time.Now().Add(expiresAt),
AccountId: dl.AccountId,
})
}

View File

@@ -241,12 +241,12 @@ func (c *Cache) refreshDownloadLinks() {
for k, v := range downloadLinks {
// if link is generated in the last 24 hours, add it to cache
timeSince := time.Since(v.Generated)
if timeSince < c.autoExpiresLinksAfter {
if timeSince < c.autoExpiresLinksAfterDuration {
c.downloadLinks.Store(k, downloadLinkCache{
Id: v.Id,
AccountId: v.AccountId,
Link: v.DownloadLink,
ExpiresAt: v.Generated.Add(c.autoExpiresLinksAfter - timeSince),
ExpiresAt: v.Generated.Add(c.autoExpiresLinksAfterDuration - timeSince),
})
} else {
c.downloadLinks.Delete(k)

View File

@@ -133,8 +133,8 @@ func (c *Cache) reInsertTorrent(ct *CachedTorrent) (*CachedTorrent, error) {
torrent.DownloadUncached = false // Set to false, avoid re-downloading
torrent, err = c.client.CheckStatus(torrent, true)
if err != nil && torrent != nil {
// Torrent is likely in progress
_ = c.DeleteTorrent(torrent.Id)
// Torrent is likely uncached, delete it
_ = c.client.DeleteTorrent(torrent.Id) // Delete the newly added un-cached torrent
return ct, fmt.Errorf("failed to check status: %w", err)
}
if torrent == nil {

View File

@@ -1,109 +1,78 @@
package debrid
import "time"
import (
"context"
"github.com/sirrobot01/decypharr/internal/utils"
"time"
)
func (c *Cache) Refresh() error {
func (c *Cache) StartSchedule() error {
// For now, we just want to refresh the listing and download links
go c.refreshDownloadLinksWorker()
go c.refreshTorrentsWorker()
go c.resetInvalidLinksWorker()
go c.cleanupWorker()
return nil
}
func (c *Cache) refreshDownloadLinksWorker() {
refreshTicker := time.NewTicker(c.downloadLinksRefreshInterval)
defer refreshTicker.Stop()
for range refreshTicker.C {
c.refreshDownloadLinks()
}
}
func (c *Cache) refreshTorrentsWorker() {
refreshTicker := time.NewTicker(c.torrentRefreshInterval)
defer refreshTicker.Stop()
for range refreshTicker.C {
c.refreshTorrents()
}
}
func (c *Cache) resetInvalidLinksWorker() {
// Calculate time until next 00:00 CET
now := time.Now()
loc, err := time.LoadLocation("CET")
ctx := context.Background()
downloadLinkJob, err := utils.ScheduleJob(ctx, c.downloadLinksRefreshInterval, nil, c.refreshDownloadLinks)
if err != nil {
// Fallback if CET timezone can't be loaded
c.logger.Error().Err(err).Msg("Failed to load CET timezone, using local time")
loc = time.Local
c.logger.Error().Err(err).Msg("Failed to add download link refresh job")
}
if t, err := downloadLinkJob.NextRun(); err == nil {
c.logger.Trace().Msgf("Next download link refresh job: %s", t.Format("2006-01-02 15:04:05"))
}
nowInCET := now.In(loc)
next := time.Date(
nowInCET.Year(),
nowInCET.Month(),
nowInCET.Day(),
0, 0, 0, 0,
loc,
)
// If it's already past 12:00 CET today, schedule for tomorrow
if nowInCET.After(next) {
next = next.Add(24 * time.Hour)
torrentJob, err := utils.ScheduleJob(ctx, c.torrentRefreshInterval, nil, c.refreshTorrents)
if err != nil {
c.logger.Error().Err(err).Msg("Failed to add torrent refresh job")
}
if t, err := torrentJob.NextRun(); err == nil {
c.logger.Trace().Msgf("Next torrent refresh job: %s", t.Format("2006-01-02 15:04:05"))
}
// Duration until next 12:00 CET
initialWait := next.Sub(nowInCET)
// Set up initial timer
timer := time.NewTimer(initialWait)
defer timer.Stop()
c.logger.Debug().Msgf("Scheduled Links Reset at %s (in %s)", next.Format("2006-01-02 15:04:05 MST"), initialWait)
// Wait for the first execution
<-timer.C
c.resetInvalidLinks()
// Now set up the daily ticker
refreshTicker := time.NewTicker(24 * time.Hour)
defer refreshTicker.Stop()
for range refreshTicker.C {
c.resetInvalidLinks()
// Schedule the reset invalid links job
// This job will run every 24 hours
// and reset the invalid links in the cache
cet, _ := time.LoadLocation("CET")
resetLinksJob, err := utils.ScheduleJob(ctx, "00:00", cet, c.resetInvalidLinks)
if err != nil {
c.logger.Error().Err(err).Msg("Failed to add reset invalid links job")
}
if t, err := resetLinksJob.NextRun(); err == nil {
c.logger.Trace().Msgf("Next reset invalid download links job at: %s", t.Format("2006-01-02 15:04:05"))
}
// Schedule the cleanup job
cleanupJob, err := utils.ScheduleJob(ctx, "1h", nil, c.cleanupWorker)
if err != nil {
c.logger.Error().Err(err).Msg("Failed to add cleanup job")
}
if t, err := cleanupJob.NextRun(); err == nil {
c.logger.Trace().Msgf("Next cleanup job at: %s", t.Format("2006-01-02 15:04:05"))
}
return nil
}
func (c *Cache) cleanupWorker() {
// Cleanup every hour
// Removes deleted torrents from the cache
torrents, err := c.client.GetTorrents()
if err != nil {
c.logger.Error().Err(err).Msg("Failed to get torrents")
return
}
ticker := time.NewTicker(1 * time.Hour)
idStore := make(map[string]struct{})
for _, t := range torrents {
idStore[t.Id] = struct{}{}
}
for range ticker.C {
torrents, err := c.client.GetTorrents()
if err != nil {
c.logger.Error().Err(err).Msg("Failed to get torrents")
continue
deletedTorrents := make([]string, 0)
c.torrents.Range(func(key string, _ *CachedTorrent) bool {
if _, exists := idStore[key]; !exists {
deletedTorrents = append(deletedTorrents, key)
}
return true
})
idStore := make(map[string]struct{})
for _, t := range torrents {
idStore[t.Id] = struct{}{}
}
deletedTorrents := make([]string, 0)
c.torrents.Range(func(key string, _ *CachedTorrent) bool {
if _, exists := idStore[key]; !exists {
deletedTorrents = append(deletedTorrents, key)
}
return true
})
if len(deletedTorrents) > 0 {
c.DeleteTorrents(deletedTorrents)
c.logger.Info().Msgf("Deleted %d torrents", len(deletedTorrents))
}
if len(deletedTorrents) > 0 {
c.DeleteTorrents(deletedTorrents)
c.logger.Info().Msgf("Deleted %d torrents", len(deletedTorrents))
}
}

View File

@@ -450,16 +450,18 @@ func (r *RealDebrid) _getDownloadLink(file *types.File) (*types.DownloadLink, er
return nil, err
}
switch data.ErrorCode {
case 19:
return nil, request.HosterUnavailableError // File has been removed
case 23:
return nil, request.TrafficExceededError
case 24:
return nil, request.HosterUnavailableError // Link has been nerfed
case 19:
return nil, request.HosterUnavailableError // File has been removed
case 36:
return nil, request.TrafficExceededError // traffic exceeded
case 34:
return nil, request.TrafficExceededError // traffic exceeded
case 35:
return nil, request.HosterUnavailableError
case 36:
return nil, request.TrafficExceededError // traffic exceeded
default:
return nil, fmt.Errorf("realdebrid API error: Status: %d || Code: %d", resp.StatusCode, data.ErrorCode)
}
@@ -489,48 +491,36 @@ func (r *RealDebrid) GetDownloadLink(t *types.Torrent, file *types.File) (*types
if r.currentDownloadKey == "" {
// If no download key is set, use the first one
r.DownloadKeys.Range(func(key string, value types.Account) bool {
if !value.Disabled {
r.currentDownloadKey = value.Token
return false
}
return true
})
accounts := r.getActiveAccounts()
if len(accounts) < 1 {
// No active download keys. It's likely that the key has reached bandwidth limit
return nil, fmt.Errorf("no active download keys")
}
r.currentDownloadKey = accounts[0].Token
}
r.downloadClient.SetHeader("Authorization", fmt.Sprintf("Bearer %s", r.currentDownloadKey))
downloadLink, err := r._getDownloadLink(file)
retries := 0
if err != nil {
accountsFunc := func() (*types.DownloadLink, error) {
accounts := r.getActiveAccounts()
var err error
if len(accounts) < 1 {
// No active download keys. It's likely that the key has reached bandwidth limit
return nil, fmt.Errorf("no active download keys")
}
for _, account := range accounts {
r.downloadClient.SetHeader("Authorization", fmt.Sprintf("Bearer %s", account.Token))
downloadLink, err := r._getDownloadLink(file)
if err != nil {
if errors.Is(err, request.TrafficExceededError) {
continue
}
// If the error is not traffic exceeded, skip generating the link with a new key
return nil, err
} else {
// If we successfully generated a link, break the loop
downloadLink.AccountId = account.ID
return downloadLink, nil
}
}
// If we reach here, it means all keys have been exhausted
if errors.Is(err, request.TrafficExceededError) {
return nil, request.TrafficExceededError
}
return nil, fmt.Errorf("failed to generate download link: %w", err)
if errors.Is(err, request.TrafficExceededError) {
// Retries generating
retries = 4
} else {
// If the error is not traffic exceeded, return the error
return nil, err
}
return accountsFunc()
}
for retries > 0 {
downloadLink, err = r._getDownloadLink(file)
if err == nil {
return downloadLink, nil
}
if !errors.Is(err, request.TrafficExceededError) {
return nil, err
}
// Add a delay before retrying
time.Sleep(5 * time.Second)
}
return downloadLink, nil
}
@@ -718,11 +708,9 @@ func (r *RealDebrid) DisableAccount(accountId string) {
r.logger.Info().Msgf("Cannot disable last account: %s", accountId)
return
}
r.currentDownloadKey = ""
if value, ok := r.DownloadKeys.Load(accountId); ok {
value.Disabled = true
if value.Token == r.currentDownloadKey {
r.currentDownloadKey = ""
}
r.DownloadKeys.Store(accountId, value)
r.logger.Info().Msgf("Disabled account Index: %s", value.ID)
}

View File

@@ -159,7 +159,7 @@ func (q *QBit) createSymlinksWebdav(debridTorrent *debrid.Torrent, rclonePath, t
remainingFiles := make(map[string]debrid.File)
for _, file := range files {
remainingFiles[file.Name] = file
remainingFiles[utils.EscapePath(file.Name)] = file
}
ticker := time.NewTicker(100 * time.Millisecond)
@@ -181,7 +181,7 @@ func (q *QBit) createSymlinksWebdav(debridTorrent *debrid.Torrent, rclonePath, t
fullFilePath := filepath.Join(rclonePath, file.Name)
fileSymlinkPath := filepath.Join(symlinkPath, file.Name)
if err := os.Symlink(fullFilePath, fileSymlinkPath); err != nil {
if err := os.Symlink(fullFilePath, fileSymlinkPath); err != nil && !os.IsExist(err) {
q.logger.Debug().Msgf("Failed to create symlink: %s: %v", fileSymlinkPath, err)
} else {
q.logger.Info().Msgf("File is ready: %s", file.Name)
@@ -225,7 +225,7 @@ func (q *QBit) createSymlinks(debridTorrent *debrid.Torrent, rclonePath, torrent
for _, file := range files {
pending[file.Path] = file
}
ticker := time.NewTicker(100 * time.Millisecond)
ticker := time.NewTicker(10 * time.Millisecond)
defer ticker.Stop()
filePaths := make([]string, 0, len(pending))
@@ -236,7 +236,7 @@ func (q *QBit) createSymlinks(debridTorrent *debrid.Torrent, rclonePath, torrent
if _, err := os.Stat(fullFilePath); !os.IsNotExist(err) {
q.logger.Info().Msgf("File is ready: %s", file.Name)
fileSymlinkPath := filepath.Join(symlinkPath, file.Name)
if err := os.Symlink(fullFilePath, fileSymlinkPath); err != nil {
if err := os.Symlink(fullFilePath, fileSymlinkPath); err != nil && !os.IsExist(err) {
q.logger.Debug().Msgf("Failed to create symlink: %s: %v", fileSymlinkPath, err)
}
filePaths = append(filePaths, fileSymlinkPath)
@@ -247,7 +247,7 @@ func (q *QBit) createSymlinks(debridTorrent *debrid.Torrent, rclonePath, torrent
if _, err := os.Stat(fullFilePath); !os.IsNotExist(err) {
q.logger.Info().Msgf("File is ready: %s", file.Path)
fileSymlinkPath := filepath.Join(symlinkPath, file.Path)
if err := os.Symlink(fullFilePath, fileSymlinkPath); err != nil {
if err := os.Symlink(fullFilePath, fileSymlinkPath); err != nil && !os.IsExist(err) {
q.logger.Debug().Msgf("Failed to create symlink: %s: %v", fileSymlinkPath, err)
}
filePaths = append(filePaths, fileSymlinkPath)

View File

@@ -119,6 +119,7 @@ func (q *QBit) ProcessFiles(torrent *Torrent, debridTorrent *debrid.Torrent, arr
if ok {
q.logger.Info().Msgf("Using internal webdav for %s", debridTorrent.Debrid)
// Use webdav to download the file
timer := time.Now()
if err := cache.AddTorrent(debridTorrent); err != nil {
q.logger.Error().Msgf("Error adding torrent to cache: %v", err)
q.MarkAsFailed(torrent)
@@ -126,9 +127,9 @@ func (q *QBit) ProcessFiles(torrent *Torrent, debridTorrent *debrid.Torrent, arr
}
rclonePath := filepath.Join(debridTorrent.MountPath, cache.GetTorrentFolder(debridTorrent)) // /mnt/remote/realdebrid/MyTVShow
torrentFolderNoExt := utils.RemoveExtension(debridTorrent.Name)
timer := time.Now()
torrentSymlinkPath, err = q.createSymlinksWebdav(debridTorrent, rclonePath, torrentFolderNoExt) // /mnt/symlinks/{category}/MyTVShow/
q.logger.Debug().Msgf("Symlink creation took %s", time.Since(timer))
q.logger.Debug().Msgf("Process Completed in %s", time.Since(timer))
} else {
// User is using either zurg or debrid webdav

View File

@@ -9,6 +9,7 @@ import (
"github.com/sirrobot01/decypharr/internal/config"
"github.com/sirrobot01/decypharr/internal/logger"
"github.com/sirrobot01/decypharr/internal/request"
"github.com/sirrobot01/decypharr/internal/utils"
"github.com/sirrobot01/decypharr/pkg/arr"
"github.com/sirrobot01/decypharr/pkg/debrid/debrid"
"golang.org/x/sync/errgroup"
@@ -28,7 +29,7 @@ type Repair struct {
Jobs map[string]*Job
arrs *arr.Storage
deb *debrid.Engine
duration time.Duration
interval string
runOnStart bool
ZurgURL string
IsZurg bool
@@ -42,10 +43,6 @@ type Repair struct {
func New(arrs *arr.Storage, engine *debrid.Engine) *Repair {
cfg := config.Get()
duration, err := parseSchedule(cfg.Repair.Interval)
if err != nil {
duration = time.Hour * 24
}
workers := runtime.NumCPU() * 20
if cfg.Repair.Workers > 0 {
workers = cfg.Repair.Workers
@@ -53,7 +50,7 @@ func New(arrs *arr.Storage, engine *debrid.Engine) *Repair {
r := &Repair{
arrs: arrs,
logger: logger.New("repair"),
duration: duration,
interval: cfg.Repair.Interval,
runOnStart: cfg.Repair.RunOnStart,
ZurgURL: cfg.Repair.ZurgURL,
useWebdav: cfg.Repair.UseWebDav,
@@ -73,7 +70,6 @@ func New(arrs *arr.Storage, engine *debrid.Engine) *Repair {
}
func (r *Repair) Start(ctx context.Context) error {
cfg := config.Get()
r.ctx = ctx
if r.runOnStart {
r.logger.Info().Msgf("Running initial repair")
@@ -84,30 +80,20 @@ func (r *Repair) Start(ctx context.Context) error {
}()
}
ticker := time.NewTicker(r.duration)
defer ticker.Stop()
r.logger.Info().Msgf("Starting repair worker with %v interval", r.duration)
for {
select {
case <-r.ctx.Done():
r.logger.Info().Msg("Repair worker stopped")
return nil
case t := <-ticker.C:
r.logger.Info().Msgf("Running repair at %v", t.Format("15:04:05"))
if err := r.AddJob([]string{}, []string{}, r.autoProcess, true); err != nil {
r.logger.Error().Err(err).Msg("Error running repair")
}
// If using time-of-day schedule, reset the ticker for next day
if strings.Contains(cfg.Repair.Interval, ":") {
ticker.Reset(r.duration)
}
r.logger.Info().Msgf("Next scheduled repair at %v", t.Add(r.duration).Format("15:04:05"))
job, err := utils.ScheduleJob(r.ctx, r.interval, time.Local, func() {
r.logger.Info().Msgf("Repair job started at %s", time.Now().Format("15:04:05"))
if err := r.AddJob([]string{}, []string{}, r.autoProcess, true); err != nil {
r.logger.Error().Err(err).Msg("Error running repair job")
}
})
if err != nil {
r.logger.Error().Err(err).Msg("Error scheduling repair job")
return err
}
if t, err := job.NextRun(); err == nil {
r.logger.Info().Msgf("Next repair job scheduled at %s", t.Format("15:04:05"))
}
return nil
}
type JobStatus string

View File

@@ -57,18 +57,21 @@ func (f *File) Close() error {
return nil
}
func (f *File) getDownloadLink() string {
func (f *File) getDownloadLink() (string, error) {
// Check if we already have a final URL cached
if f.downloadLink != "" && isValidURL(f.downloadLink) {
return f.downloadLink
return f.downloadLink, nil
}
downloadLink, err := f.cache.GetDownloadLink(f.torrentId, f.name, f.link)
if err != nil {
return "", err
}
downloadLink := f.cache.GetDownloadLink(f.torrentId, f.name, f.link)
if downloadLink != "" && isValidURL(downloadLink) {
f.downloadLink = downloadLink
return downloadLink
return downloadLink, nil
}
return ""
return "", fmt.Errorf("download link not found")
}
func (f *File) stream() (*http.Response, error) {
@@ -79,7 +82,11 @@ func (f *File) stream() (*http.Response, error) {
downloadLink string
)
downloadLink = f.getDownloadLink() // Uses the first API key
downloadLink, err = f.getDownloadLink()
if err != nil {
_log.Trace().Msgf("Failed to get download link for %s. %s", f.name, err)
return nil, io.EOF
}
if downloadLink == "" {
_log.Trace().Msgf("Failed to get download link for %s. Empty download link", f.name)
return nil, io.EOF
@@ -101,9 +108,7 @@ func (f *File) stream() (*http.Response, error) {
}
if resp.StatusCode != http.StatusOK && resp.StatusCode != http.StatusPartialContent {
f.downloadLink = ""
closeResp := func() {
_, _ = io.Copy(io.Discard, resp.Body)
resp.Body.Close()
@@ -117,7 +122,7 @@ func (f *File) stream() (*http.Response, error) {
return nil, io.EOF
}
if strings.Contains(string(b), "You can not download this file because you have exceeded your traffic on this hoster") {
_log.Trace().Msgf("Failed to get download link for %s. Download link expired", f.name)
_log.Trace().Msgf("Bandwidth exceeded for %s. Download token will be disabled if you have more than one", f.name)
f.cache.MarkDownloadLinkAsInvalid(f.link, downloadLink, "bandwidth_exceeded")
// Retry with a different API key if it's available
return f.stream()
@@ -132,7 +137,11 @@ func (f *File) stream() (*http.Response, error) {
// Regenerate a new download link
f.cache.MarkDownloadLinkAsInvalid(f.link, downloadLink, "link_not_found")
// Generate a new download link
downloadLink = f.getDownloadLink()
downloadLink, err = f.getDownloadLink()
if err != nil {
_log.Trace().Msgf("Failed to get download link for %s. %s", f.name, err)
return nil, io.EOF
}
if downloadLink == "" {
_log.Trace().Msgf("Failed to get download link for %s", f.name)
return nil, io.EOF

View File

@@ -262,16 +262,6 @@ func (h *Handler) ServeHTTP(w http.ResponseWriter, r *http.Request) {
// __all__ or torrents folder
// Manually build the xml
ttl = 30 * time.Second
//if served := h.serveFromCacheIfValid(w, r, cacheKey, ttl); served {
// return
//}
//// Refresh the parent XML
//h.cache.RefreshListings(false)
//// Check again if the cache is valid
//// If not, we will use the default WebDAV handler
//if served := h.serveFromCacheIfValid(w, r, cacheKey, ttl); served {
// return
//}
}
if served := h.serveFromCacheIfValid(w, r, cacheKey, ttl); served {