19 Commits

Author SHA1 Message Date
Mukhtar Akere
22dae9efad Add a new worker that checks if an account is opened
Some checks failed
Release Docker Build / docker (push) Has been cancelled
GoReleaser / goreleaser (push) Has been cancelled
2025-09-17 23:30:45 +01:00
Mukhtar Akere
3f0870cd1c torbox: fix pagination bug, fix download uncached bug 2025-09-16 21:34:58 +01:00
Mukhtar Akere
30b2db06e7 Rewrote account switching, fix some minor bugs here and there 2025-09-16 21:15:24 +01:00
Mukhtar Akere
76f5b85313 Fix issues with dir-cache-time, umask and wrongly set gid,uid, add extra vfs options 2025-09-05 16:11:22 +01:00
Mukhtar Akere
85cd37f29b Revert former beta chnages 2025-08-30 04:10:18 +01:00
Mukhtar Akere
aff12c2e4b Fix Added bug in torrent 2025-08-28 03:26:43 +01:00
Mukhtar Akere
d76ca032ab hotfix config update 2025-08-28 01:30:54 +01:00
Mukhtar Akere
8bb786c689 hotfix nil downloadLink 2025-08-27 23:57:49 +01:00
Mukhtar Akere
83058489b6 Add callback URL for post-processing 2025-08-27 13:02:43 +01:00
Mukhtar Akere
267cc2d32b Fix issues with account swutching 2025-08-26 15:31:24 +01:00
Mukhtar Akere
eefe8a3901 Hotfix for download link generation and account switching 2025-08-24 21:54:26 +01:00
Mukhtar Akere
618eb73067 - Add support for multi-season imports
- Improve in-memoery storage, whic reduces memory usage
- Fix issues with rclone integration
2025-08-24 16:25:37 +01:00
Mukhtar Akere
f8667938b6 Add more rclone flags, fix minor issues 2025-08-23 06:00:07 +01:00
Mukhtar Akere
b0a698f15e - Imporve memeoery footprint
- Add batch processing for arr repairs
2025-08-21 03:32:46 +01:00
Mukhtar Akere
2548c21e5b Fix rclone file log
Some checks failed
GoReleaser / goreleaser (push) Has been cancelled
Release Docker Build / docker (push) Has been cancelled
2025-08-19 01:01:53 +01:00
Mukhtar Akere
1b03ccefbb Hotfix rclone logging flags 2025-08-19 00:55:43 +01:00
Mukhtar Akere
e3a249a9cc Fix issues with rclone mounting
Some checks failed
GoReleaser / goreleaser (push) Has been cancelled
Release Docker Build / docker (push) Has been cancelled
2025-08-18 22:12:26 +01:00
Mukhtar Akere
8696db42d2 - Add more rclone supports
- Add rclone log viewer
- Add more stats to Stats page
- Fix some minor bugs
2025-08-18 01:57:02 +01:00
Mukhtar Akere
742d8fb088 - Fix issues with cache dir
Some checks failed
GoReleaser / goreleaser (push) Has been cancelled
Release Docker Build / docker (push) Has been cancelled
- Fix responsiveness issue with navbars
- Support user entry for users running as non-root
- Other minor fixes
2025-08-12 15:14:42 +01:00
76 changed files with 3458 additions and 1785 deletions

View File

@@ -42,8 +42,20 @@ LABEL org.opencontainers.image.authors = "sirrobot01"
LABEL org.opencontainers.image.documentation = "https://github.com/sirrobot01/decypharr/blob/main/README.md" LABEL org.opencontainers.image.documentation = "https://github.com/sirrobot01/decypharr/blob/main/README.md"
# Install dependencies including rclone # Install dependencies including rclone
RUN apk add --no-cache fuse3 ca-certificates su-exec shadow rclone && \ RUN apk add --no-cache fuse3 ca-certificates su-exec shadow curl unzip && \
echo "user_allow_other" >> /etc/fuse.conf echo "user_allow_other" >> /etc/fuse.conf && \
case "$(uname -m)" in \
x86_64) ARCH=amd64 ;; \
aarch64) ARCH=arm64 ;; \
armv7l) ARCH=arm ;; \
*) echo "Unsupported architecture: $(uname -m)" && exit 1 ;; \
esac && \
curl -O "https://downloads.rclone.org/rclone-current-linux-${ARCH}.zip" && \
unzip "rclone-current-linux-${ARCH}.zip" && \
cp rclone-*/rclone /usr/local/bin/ && \
chmod +x /usr/local/bin/rclone && \
rm -rf rclone-* && \
apk del curl unzip
# Copy binaries and entrypoint # Copy binaries and entrypoint
COPY --from=builder /decypharr /usr/bin/decypharr COPY --from=builder /decypharr /usr/bin/decypharr

View File

@@ -7,10 +7,10 @@ import (
"github.com/sirrobot01/decypharr/internal/logger" "github.com/sirrobot01/decypharr/internal/logger"
"github.com/sirrobot01/decypharr/pkg/qbit" "github.com/sirrobot01/decypharr/pkg/qbit"
"github.com/sirrobot01/decypharr/pkg/server" "github.com/sirrobot01/decypharr/pkg/server"
"github.com/sirrobot01/decypharr/pkg/store"
"github.com/sirrobot01/decypharr/pkg/version" "github.com/sirrobot01/decypharr/pkg/version"
"github.com/sirrobot01/decypharr/pkg/web" "github.com/sirrobot01/decypharr/pkg/web"
"github.com/sirrobot01/decypharr/pkg/webdav" "github.com/sirrobot01/decypharr/pkg/webdav"
"github.com/sirrobot01/decypharr/pkg/wire"
"net/http" "net/http"
"os" "os"
"runtime" "runtime"
@@ -40,6 +40,7 @@ func Start(ctx context.Context) error {
svcCtx, cancelSvc := context.WithCancel(ctx) svcCtx, cancelSvc := context.WithCancel(ctx)
defer cancelSvc() defer cancelSvc()
// Create the logger path if it doesn't exist
for { for {
cfg := config.Get() cfg := config.Get()
_log := logger.Default() _log := logger.Default()
@@ -76,7 +77,7 @@ func Start(ctx context.Context) error {
reset := func() { reset := func() {
// Reset the store and services // Reset the store and services
qb.Reset() qb.Reset()
store.Reset() wire.Reset()
// refresh GC // refresh GC
runtime.GC() runtime.GC()
} }
@@ -150,24 +151,16 @@ func startServices(ctx context.Context, cancelSvc context.CancelFunc, wd *webdav
// Start rclone RC server if enabled // Start rclone RC server if enabled
safeGo(func() error { safeGo(func() error {
rcManager := store.Get().RcloneManager() rcManager := wire.Get().RcloneManager()
if rcManager == nil { if rcManager == nil {
return nil return nil
} }
return rcManager.Start(ctx) return rcManager.Start(ctx)
}) })
safeGo(func() error {
arr := store.Get().Arr()
if arr == nil {
return nil
}
return arr.StartSchedule(ctx)
})
if cfg := config.Get(); cfg.Repair.Enabled { if cfg := config.Get(); cfg.Repair.Enabled {
safeGo(func() error { safeGo(func() error {
repair := store.Get().Repair() repair := wire.Get().Repair()
if repair != nil { if repair != nil {
if err := repair.Start(ctx); err != nil { if err := repair.Start(ctx); err != nil {
_log.Error().Err(err).Msg("repair failed") _log.Error().Err(err).Msg("repair failed")
@@ -178,7 +171,8 @@ func startServices(ctx context.Context, cancelSvc context.CancelFunc, wd *webdav
} }
safeGo(func() error { safeGo(func() error {
return store.Get().StartQueueSchedule(ctx) wire.Get().StartWorkers(ctx)
return nil
}) })
go func() { go func() {

View File

@@ -51,8 +51,12 @@ services:
- /dev/fuse:/dev/fuse:rwm - /dev/fuse:/dev/fuse:rwm
cap_add: cap_add:
- SYS_ADMIN - SYS_ADMIN
security_opt:
- apparmor:unconfined
environment: environment:
- UMASK=002 - UMASK=002
- PUID=1000 # Change to your user ID
- PGID=1000 # Change to your group ID
``` ```
**Important Docker Notes:** **Important Docker Notes:**

1
go.mod
View File

@@ -34,6 +34,7 @@ require (
github.com/mattn/go-colorable v0.1.14 // indirect github.com/mattn/go-colorable v0.1.14 // indirect
github.com/mattn/go-isatty v0.0.20 // indirect github.com/mattn/go-isatty v0.0.20 // indirect
github.com/pmezard/go-difflib v1.0.1-0.20181226105442-5d4384ee4fb2 // indirect github.com/pmezard/go-difflib v1.0.1-0.20181226105442-5d4384ee4fb2 // indirect
github.com/puzpuzpuz/xsync/v4 v4.1.0 // indirect
github.com/rogpeppe/go-internal v1.14.1 // indirect github.com/rogpeppe/go-internal v1.14.1 // indirect
golang.org/x/sys v0.33.0 // indirect golang.org/x/sys v0.33.0 // indirect
) )

2
go.sum
View File

@@ -186,6 +186,8 @@ github.com/prometheus/procfs v0.0.0-20190117184657-bf6a532e95b1/go.mod h1:c3At6R
github.com/prometheus/procfs v0.0.2/go.mod h1:TjEm7ze935MbeOT/UhFTIMYKhuLP4wbCsTZCD3I8kEA= github.com/prometheus/procfs v0.0.2/go.mod h1:TjEm7ze935MbeOT/UhFTIMYKhuLP4wbCsTZCD3I8kEA=
github.com/prometheus/procfs v0.0.8/go.mod h1:7Qr8sr6344vo1JqZ6HhLceV9o3AJ1Ff+GxbHq6oeK9A= github.com/prometheus/procfs v0.0.8/go.mod h1:7Qr8sr6344vo1JqZ6HhLceV9o3AJ1Ff+GxbHq6oeK9A=
github.com/prometheus/procfs v0.0.11/go.mod h1:lV6e/gmhEcM9IjHGsFOCxxuZ+z1YqCvr4OA4YeYWdaU= github.com/prometheus/procfs v0.0.11/go.mod h1:lV6e/gmhEcM9IjHGsFOCxxuZ+z1YqCvr4OA4YeYWdaU=
github.com/puzpuzpuz/xsync/v4 v4.1.0 h1:x9eHRl4QhZFIPJ17yl4KKW9xLyVWbb3/Yq4SXpjF71U=
github.com/puzpuzpuz/xsync/v4 v4.1.0/go.mod h1:VJDmTCJMBt8igNxnkQd86r+8KUeN1quSfNKu5bLYFQo=
github.com/rcrowley/go-metrics v0.0.0-20181016184325-3113b8401b8a/go.mod h1:bCqnVzQkZxMG4s8nGwiZ5l3QUCyqpo9Y+/ZMZ9VjZe4= github.com/rcrowley/go-metrics v0.0.0-20181016184325-3113b8401b8a/go.mod h1:bCqnVzQkZxMG4s8nGwiZ5l3QUCyqpo9Y+/ZMZ9VjZe4=
github.com/robfig/cron/v3 v3.0.1 h1:WdRxkvbJztn8LMz/QEvLN5sBU+xKpSqwwUO1Pjr4qDs= github.com/robfig/cron/v3 v3.0.1 h1:WdRxkvbJztn8LMz/QEvLN5sBU+xKpSqwwUO1Pjr4qDs=
github.com/robfig/cron/v3 v3.0.1/go.mod h1:eQICP3HwyT7UooqI/z+Ov+PtYAWygg1TEWWzGIFLtro= github.com/robfig/cron/v3 v3.0.1/go.mod h1:eQICP3HwyT7UooqI/z+Ov+PtYAWygg1TEWWzGIFLtro=

View File

@@ -32,6 +32,7 @@ type Debrid struct {
APIKey string `json:"api_key,omitempty"` APIKey string `json:"api_key,omitempty"`
DownloadAPIKeys []string `json:"download_api_keys,omitempty"` DownloadAPIKeys []string `json:"download_api_keys,omitempty"`
Folder string `json:"folder,omitempty"` Folder string `json:"folder,omitempty"`
RcloneMountPath string `json:"rclone_mount_path,omitempty"` // Custom rclone mount path for this debrid service
DownloadUncached bool `json:"download_uncached,omitempty"` DownloadUncached bool `json:"download_uncached,omitempty"`
CheckCached bool `json:"check_cached,omitempty"` CheckCached bool `json:"check_cached,omitempty"`
RateLimit string `json:"rate_limit,omitempty"` // 200/minute or 10/second RateLimit string `json:"rate_limit,omitempty"` // 200/minute or 10/second
@@ -90,6 +91,7 @@ type Rclone struct {
// Global mount folder where all providers will be mounted as subfolders // Global mount folder where all providers will be mounted as subfolders
Enabled bool `json:"enabled,omitempty"` Enabled bool `json:"enabled,omitempty"`
MountPath string `json:"mount_path,omitempty"` MountPath string `json:"mount_path,omitempty"`
RcPort string `json:"rc_port,omitempty"`
// Cache settings // Cache settings
CacheDir string `json:"cache_dir,omitempty"` CacheDir string `json:"cache_dir,omitempty"`
@@ -97,14 +99,21 @@ type Rclone struct {
// VFS settings // VFS settings
VfsCacheMode string `json:"vfs_cache_mode,omitempty"` // off, minimal, writes, full VfsCacheMode string `json:"vfs_cache_mode,omitempty"` // off, minimal, writes, full
VfsCacheMaxAge string `json:"vfs_cache_max_age,omitempty"` // Maximum age of objects in the cache (default 1h) VfsCacheMaxAge string `json:"vfs_cache_max_age,omitempty"` // Maximum age of objects in the cache (default 1h)
VfsDiskSpaceTotal string `json:"vfs_disk_space_total,omitempty"` // Total disk space available for the cache (default off)
VfsCacheMaxSize string `json:"vfs_cache_max_size,omitempty"` // Maximum size of the cache (default off) VfsCacheMaxSize string `json:"vfs_cache_max_size,omitempty"` // Maximum size of the cache (default off)
VfsCachePollInterval string `json:"vfs_cache_poll_interval,omitempty"` // How often to poll for changes (default 1m) VfsCachePollInterval string `json:"vfs_cache_poll_interval,omitempty"` // How often to poll for changes (default 1m)
VfsReadChunkSize string `json:"vfs_read_chunk_size,omitempty"` // Read chunk size (default 128M) VfsReadChunkSize string `json:"vfs_read_chunk_size,omitempty"` // Read chunk size (default 128M)
VfsReadChunkSizeLimit string `json:"vfs_read_chunk_size_limit,omitempty"` // Max chunk size (default off) VfsReadChunkSizeLimit string `json:"vfs_read_chunk_size_limit,omitempty"` // Max chunk size (default off)
VfsReadAhead string `json:"vfs_read_ahead,omitempty"` // read ahead size VfsReadAhead string `json:"vfs_read_ahead,omitempty"` // read ahead size
VfsPollInterval string `json:"vfs_poll_interval,omitempty"` // How often to rclone cleans the cache (default 1m)
BufferSize string `json:"buffer_size,omitempty"` // Buffer size for reading files (default 16M) BufferSize string `json:"buffer_size,omitempty"` // Buffer size for reading files (default 16M)
VfsCacheMinFreeSpace string `json:"vfs_cache_min_free_space,omitempty"`
VfsFastFingerprint bool `json:"vfs_fast_fingerprint,omitempty"`
VfsReadChunkStreams int `json:"vfs_read_chunk_streams,omitempty"`
AsyncRead *bool `json:"async_read,omitempty"` // Use async read for files
Transfers int `json:"transfers,omitempty"` // Number of transfers to use (default 4)
UseMmap bool `json:"use_mmap,omitempty"`
// File system settings // File system settings
UID uint32 `json:"uid,omitempty"` // User ID for mounted files UID uint32 `json:"uid,omitempty"` // User ID for mounted files
GID uint32 `json:"gid,omitempty"` // Group ID for mounted files GID uint32 `json:"gid,omitempty"` // Group ID for mounted files
@@ -117,6 +126,8 @@ type Rclone struct {
// Performance settings // Performance settings
NoModTime bool `json:"no_modtime,omitempty"` // Don't read/write modification time NoModTime bool `json:"no_modtime,omitempty"` // Don't read/write modification time
NoChecksum bool `json:"no_checksum,omitempty"` // Don't checksum files on upload NoChecksum bool `json:"no_checksum,omitempty"` // Don't checksum files on upload
LogLevel string `json:"log_level,omitempty"`
} }
type Config struct { type Config struct {
@@ -140,6 +151,7 @@ type Config struct {
Auth *Auth `json:"-"` Auth *Auth `json:"-"`
DiscordWebhook string `json:"discord_webhook_url,omitempty"` DiscordWebhook string `json:"discord_webhook_url,omitempty"`
RemoveStalledAfter string `json:"remove_stalled_after,omitzero"` RemoveStalledAfter string `json:"remove_stalled_after,omitzero"`
CallbackURL string `json:"callback_url,omitempty"`
} }
func (c *Config) JsonFile() string { func (c *Config) JsonFile() string {
@@ -296,6 +308,10 @@ func (c *Config) IsSizeAllowed(size int64) bool {
return true return true
} }
func (c *Config) SecretKey() string {
return cmp.Or(os.Getenv("DECYPHARR_SECRET_KEY"), "\"wqj(v%lj*!-+kf@4&i95rhh_!5_px5qnuwqbr%cjrvrozz_r*(\"")
}
func (c *Config) GetAuth() *Auth { func (c *Config) GetAuth() *Auth {
if !c.UseAuth { if !c.UseAuth {
return nil return nil
@@ -326,10 +342,7 @@ func (c *Config) NeedsSetup() error {
} }
func (c *Config) NeedsAuth() bool { func (c *Config) NeedsAuth() bool {
if c.UseAuth { return !c.UseAuth && c.GetAuth().Username == ""
return c.GetAuth().Username == ""
}
return false
} }
func (c *Config) updateDebrid(d Debrid) Debrid { func (c *Config) updateDebrid(d Debrid) Debrid {
@@ -414,6 +427,11 @@ func (c *Config) setDefaults() {
// Rclone defaults // Rclone defaults
if c.Rclone.Enabled { if c.Rclone.Enabled {
c.Rclone.RcPort = cmp.Or(c.Rclone.RcPort, "5572")
if c.Rclone.AsyncRead == nil {
_asyncTrue := true
c.Rclone.AsyncRead = &_asyncTrue
}
c.Rclone.VfsCacheMode = cmp.Or(c.Rclone.VfsCacheMode, "off") c.Rclone.VfsCacheMode = cmp.Or(c.Rclone.VfsCacheMode, "off")
if c.Rclone.UID == 0 { if c.Rclone.UID == 0 {
c.Rclone.UID = uint32(os.Getuid()) c.Rclone.UID = uint32(os.Getuid())
@@ -426,10 +444,14 @@ func (c *Config) setDefaults() {
c.Rclone.GID = uint32(os.Getgid()) c.Rclone.GID = uint32(os.Getgid())
} }
} }
if c.Rclone.Transfers == 0 {
c.Rclone.Transfers = 4 // Default number of transfers
}
if c.Rclone.VfsCacheMode != "off" { if c.Rclone.VfsCacheMode != "off" {
c.Rclone.VfsCachePollInterval = cmp.Or(c.Rclone.VfsCachePollInterval, "1m") // Clean cache every minute c.Rclone.VfsCachePollInterval = cmp.Or(c.Rclone.VfsCachePollInterval, "1m") // Clean cache every minute
} }
c.Rclone.DirCacheTime = cmp.Or(c.Rclone.DirCacheTime, "5m") c.Rclone.DirCacheTime = cmp.Or(c.Rclone.DirCacheTime, "5m")
c.Rclone.LogLevel = cmp.Or(c.Rclone.LogLevel, "INFO")
} }
// Load the auth file // Load the auth file
c.Auth = c.GetAuth() c.Auth = c.GetAuth()

View File

@@ -26,7 +26,7 @@ func GetLogPath() string {
} }
} }
return filepath.Join(logsDir, "decypharr.log") return logsDir
} }
func New(prefix string) zerolog.Logger { func New(prefix string) zerolog.Logger {
@@ -34,7 +34,7 @@ func New(prefix string) zerolog.Logger {
level := config.Get().LogLevel level := config.Get().LogLevel
rotatingLogFile := &lumberjack.Logger{ rotatingLogFile := &lumberjack.Logger{
Filename: GetLogPath(), Filename: filepath.Join(GetLogPath(), "decypharr.log"),
MaxSize: 10, MaxSize: 10,
MaxAge: 15, MaxAge: 15,
Compress: true, Compress: true,

View File

@@ -45,6 +45,8 @@ func getDiscordHeader(event string) string {
return "[Decypharr] Repair Completed, Awaiting action" return "[Decypharr] Repair Completed, Awaiting action"
case "repair_complete": case "repair_complete":
return "[Decypharr] Repair Complete" return "[Decypharr] Repair Complete"
case "repair_cancelled":
return "[Decypharr] Repair Cancelled"
default: default:
// split the event string and capitalize the first letter of each word // split the event string and capitalize the first letter of each word
evs := strings.Split(event, "_") evs := strings.Split(event, "_")

View File

@@ -298,40 +298,7 @@ func New(options ...ClientOption) *Client {
} }
// Configure proxy if needed // Configure proxy if needed
if client.proxy != "" { SetProxy(transport, client.proxy)
if strings.HasPrefix(client.proxy, "socks5://") {
// Handle SOCKS5 proxy
socksURL, err := url.Parse(client.proxy)
if err != nil {
client.logger.Error().Msgf("Failed to parse SOCKS5 proxy URL: %v", err)
} else {
auth := &proxy.Auth{}
if socksURL.User != nil {
auth.User = socksURL.User.Username()
password, _ := socksURL.User.Password()
auth.Password = password
}
dialer, err := proxy.SOCKS5("tcp", socksURL.Host, auth, proxy.Direct)
if err != nil {
client.logger.Error().Msgf("Failed to create SOCKS5 dialer: %v", err)
} else {
transport.DialContext = func(ctx context.Context, network, addr string) (net.Conn, error) {
return dialer.Dial(network, addr)
}
}
}
} else {
proxyURL, err := url.Parse(client.proxy)
if err != nil {
client.logger.Error().Msgf("Failed to parse proxy URL: %v", err)
} else {
transport.Proxy = http.ProxyURL(proxyURL)
}
}
} else {
transport.Proxy = http.ProxyFromEnvironment
}
// Set the transport to the client // Set the transport to the client
client.client.Transport = transport client.client.Transport = transport
@@ -417,3 +384,41 @@ func isRetryableError(err error) bool {
// Not a retryable error // Not a retryable error
return false return false
} }
func SetProxy(transport *http.Transport, proxyURL string) {
if proxyURL != "" {
if strings.HasPrefix(proxyURL, "socks5://") {
// Handle SOCKS5 proxy
socksURL, err := url.Parse(proxyURL)
if err != nil {
return
} else {
auth := &proxy.Auth{}
if socksURL.User != nil {
auth.User = socksURL.User.Username()
password, _ := socksURL.User.Password()
auth.Password = password
}
dialer, err := proxy.SOCKS5("tcp", socksURL.Host, auth, proxy.Direct)
if err != nil {
return
} else {
transport.DialContext = func(ctx context.Context, network, addr string) (net.Conn, error) {
return dialer.Dial(network, addr)
}
}
}
} else {
_proxy, err := url.Parse(proxyURL)
if err != nil {
return
} else {
transport.Proxy = http.ProxyURL(_proxy)
}
}
} else {
transport.Proxy = http.ProxyFromEnvironment
}
return
}

View File

@@ -22,3 +22,15 @@ func Contains(slice []string, value string) bool {
} }
return false return false
} }
func Mask(text string) string {
res := ""
if len(text) > 12 {
res = text[:8] + "****" + text[len(text)-4:]
} else if len(text) > 8 {
res = text[:4] + "****" + text[len(text)-2:]
} else {
res = "****"
}
return res
}

View File

@@ -1,7 +1,6 @@
package utils package utils
import ( import (
"context"
"fmt" "fmt"
"github.com/go-co-op/gocron/v2" "github.com/go-co-op/gocron/v2"
"github.com/robfig/cron/v3" "github.com/robfig/cron/v3"
@@ -10,25 +9,6 @@ import (
"time" "time"
) )
func ScheduleJob(ctx context.Context, interval string, loc *time.Location, jobFunc func()) (gocron.Scheduler, error) {
if loc == nil {
loc = time.Local
}
s, err := gocron.NewScheduler(gocron.WithLocation(loc))
if err != nil {
return s, fmt.Errorf("failed to create scheduler: %w", err)
}
jd, err := ConvertToJobDef(interval)
if err != nil {
return s, fmt.Errorf("failed to convert interval to job definition: %w", err)
}
// Schedule the job
if _, err = s.NewJob(jd, gocron.NewTask(jobFunc), gocron.WithContext(ctx)); err != nil {
return s, fmt.Errorf("failed to create job: %w", err)
}
return s, nil
}
// ConvertToJobDef converts a string interval to a gocron.JobDefinition. // ConvertToJobDef converts a string interval to a gocron.JobDefinition.
func ConvertToJobDef(interval string) (gocron.JobDefinition, error) { func ConvertToJobDef(interval string) (gocron.JobDefinition, error) {
// Parse the interval string // Parse the interval string

View File

@@ -6,15 +6,16 @@ import (
"crypto/tls" "crypto/tls"
"encoding/json" "encoding/json"
"fmt" "fmt"
"github.com/rs/zerolog"
"github.com/sirrobot01/decypharr/internal/config"
"github.com/sirrobot01/decypharr/internal/logger"
"github.com/sirrobot01/decypharr/internal/request"
"io" "io"
"net/http" "net/http"
"strings" "strings"
"sync" "sync"
"time" "time"
"github.com/rs/zerolog"
"github.com/sirrobot01/decypharr/internal/config"
"github.com/sirrobot01/decypharr/internal/logger"
"github.com/sirrobot01/decypharr/internal/request"
) )
// Type is a type of arr // Type is a type of arr
@@ -109,7 +110,7 @@ func (a *Arr) Request(method, endpoint string, payload interface{}) (*http.Respo
func (a *Arr) Validate() error { func (a *Arr) Validate() error {
if a.Token == "" || a.Host == "" { if a.Token == "" || a.Host == "" {
return fmt.Errorf("arr not configured: %s", a.Name) return nil
} }
resp, err := a.Request("GET", "/api/v3/health", nil) resp, err := a.Request("GET", "/api/v3/health", nil)
if err != nil { if err != nil {
@@ -190,7 +191,7 @@ func (s *Storage) GetAll() []*Arr {
return arrs return arrs
} }
func (s *Storage) StartSchedule(ctx context.Context) error { func (s *Storage) StartWorker(ctx context.Context) error {
ticker := time.NewTicker(10 * time.Second) ticker := time.NewTicker(10 * time.Second)

View File

@@ -234,6 +234,35 @@ func (a *Arr) searchRadarr(files []ContentFile) error {
} }
func (a *Arr) SearchMissing(files []ContentFile) error { func (a *Arr) SearchMissing(files []ContentFile) error {
if len(files) == 0 {
return nil
}
return a.batchSearchMissing(files)
}
func (a *Arr) batchSearchMissing(files []ContentFile) error {
if len(files) == 0 {
return nil
}
BatchSize := 50
// Batch search for missing files
if len(files) > BatchSize {
for i := 0; i < len(files); i += BatchSize {
end := i + BatchSize
if end > len(files) {
end = len(files)
}
if err := a.searchMissing(files[i:end]); err != nil {
// continue searching the rest of the files
continue
}
}
return nil
}
return a.searchMissing(files)
}
func (a *Arr) searchMissing(files []ContentFile) error {
switch a.Type { switch a.Type {
case Sonarr: case Sonarr:
return a.searchSonarr(files) return a.searchSonarr(files)
@@ -245,6 +274,28 @@ func (a *Arr) SearchMissing(files []ContentFile) error {
} }
func (a *Arr) DeleteFiles(files []ContentFile) error { func (a *Arr) DeleteFiles(files []ContentFile) error {
if len(files) == 0 {
return nil
}
BatchSize := 50
// Batch delete files
if len(files) > BatchSize {
for i := 0; i < len(files); i += BatchSize {
end := i + BatchSize
if end > len(files) {
end = len(files)
}
if err := a.batchDeleteFiles(files[i:end]); err != nil {
// continue deleting the rest of the files
continue
}
}
return nil
}
return a.batchDeleteFiles(files)
}
func (a *Arr) batchDeleteFiles(files []ContentFile) error {
ids := make([]int, 0) ids := make([]int, 0)
for _, f := range files { for _, f := range files {
ids = append(ids, f.FileId) ids = append(ids, f.FileId)

View File

@@ -133,7 +133,7 @@ func (a *Arr) CleanupQueue() error {
messages := q.StatusMessages messages := q.StatusMessages
if len(messages) > 0 { if len(messages) > 0 {
for _, m := range messages { for _, m := range messages {
if strings.Contains(strings.Join(m.Messages, " "), "No files found are eligible for import in") { if strings.Contains(strings.Join(m.Messages, " "), "No files found are eligible") {
isMessedUp = true isMessedUp = true
break break
} }

View File

@@ -0,0 +1,119 @@
package account
import (
"fmt"
"net/http"
"sync/atomic"
"github.com/puzpuzpuz/xsync/v4"
"github.com/sirrobot01/decypharr/internal/request"
"github.com/sirrobot01/decypharr/pkg/debrid/types"
)
type Account struct {
Debrid string `json:"debrid"` // The debrid service name, e.g. "realdebrid"
links *xsync.Map[string, types.DownloadLink] // key is the sliced file link
Index int `json:"index"` // The index of the account in the config
Disabled atomic.Bool `json:"disabled"`
Token string `json:"token"`
TrafficUsed atomic.Int64 `json:"traffic_used"` // Traffic used in bytes
Username string `json:"username"` // Username for the account
httpClient *request.Client
// Account reactivation tracking
DisableCount atomic.Int32 `json:"disable_count"`
}
func (a *Account) Equals(other *Account) bool {
if other == nil {
return false
}
return a.Token == other.Token && a.Debrid == other.Debrid
}
func (a *Account) Client() *request.Client {
return a.httpClient
}
// slice download link
func (a *Account) sliceFileLink(fileLink string) string {
if a.Debrid != "realdebrid" {
return fileLink
}
if len(fileLink) < 39 {
return fileLink
}
return fileLink[0:39]
}
func (a *Account) GetDownloadLink(fileLink string) (types.DownloadLink, error) {
slicedLink := a.sliceFileLink(fileLink)
dl, ok := a.links.Load(slicedLink)
if !ok {
return types.DownloadLink{}, types.ErrDownloadLinkNotFound
}
return dl, nil
}
func (a *Account) StoreDownloadLink(dl types.DownloadLink) {
slicedLink := a.sliceFileLink(dl.Link)
a.links.Store(slicedLink, dl)
}
func (a *Account) DeleteDownloadLink(fileLink string) {
slicedLink := a.sliceFileLink(fileLink)
a.links.Delete(slicedLink)
}
func (a *Account) ClearDownloadLinks() {
a.links.Clear()
}
func (a *Account) DownloadLinksCount() int {
return a.links.Size()
}
func (a *Account) StoreDownloadLinks(dls map[string]*types.DownloadLink) {
for _, dl := range dls {
a.StoreDownloadLink(*dl)
}
}
// MarkDisabled marks the account as disabled and increments the disable count
func (a *Account) MarkDisabled() {
a.Disabled.Store(true)
a.DisableCount.Add(1)
}
func (a *Account) Reset() {
a.DisableCount.Store(0)
a.Disabled.Store(false)
}
func (a *Account) CheckBandwidth() error {
// Get a one of the download links to check if the account is still valid
downloadLink := ""
a.links.Range(func(key string, dl types.DownloadLink) bool {
if dl.DownloadLink != "" {
downloadLink = dl.DownloadLink
return false
}
return true
})
if downloadLink == "" {
return fmt.Errorf("no download link found")
}
// Let's check the download link status
req, err := http.NewRequest(http.MethodGet, downloadLink, nil)
if err != nil {
return err
}
// Use a simple client
client := http.DefaultClient
resp, err := client.Do(req)
if err != nil {
return err
}
defer resp.Body.Close()
if resp.StatusCode != http.StatusOK && resp.StatusCode != http.StatusPartialContent {
return fmt.Errorf("account check failed with status code %d", resp.StatusCode)
}
return nil
}

View File

@@ -0,0 +1,239 @@
package account
import (
"fmt"
"slices"
"sync/atomic"
"github.com/puzpuzpuz/xsync/v4"
"github.com/rs/zerolog"
"github.com/sirrobot01/decypharr/internal/config"
"github.com/sirrobot01/decypharr/internal/request"
"github.com/sirrobot01/decypharr/internal/utils"
"github.com/sirrobot01/decypharr/pkg/debrid/types"
"go.uber.org/ratelimit"
)
const (
MaxDisableCount = 3
)
type Manager struct {
debrid string
current atomic.Pointer[Account]
accounts *xsync.Map[string, *Account]
logger zerolog.Logger
}
func NewManager(debridConf config.Debrid, downloadRL ratelimit.Limiter, logger zerolog.Logger) *Manager {
m := &Manager{
debrid: debridConf.Name,
accounts: xsync.NewMap[string, *Account](),
logger: logger,
}
var firstAccount *Account
for idx, token := range debridConf.DownloadAPIKeys {
if token == "" {
continue
}
headers := map[string]string{
"Authorization": fmt.Sprintf("Bearer %s", token),
}
account := &Account{
Debrid: debridConf.Name,
Token: token,
Index: idx,
links: xsync.NewMap[string, types.DownloadLink](),
httpClient: request.New(
request.WithRateLimiter(downloadRL),
request.WithLogger(logger),
request.WithHeaders(headers),
request.WithMaxRetries(3),
request.WithRetryableStatus(429, 447, 502),
request.WithProxy(debridConf.Proxy),
),
}
m.accounts.Store(token, account)
if firstAccount == nil {
firstAccount = account
}
}
m.current.Store(firstAccount)
return m
}
func (m *Manager) Active() []*Account {
activeAccounts := make([]*Account, 0)
m.accounts.Range(func(key string, acc *Account) bool {
if !acc.Disabled.Load() {
activeAccounts = append(activeAccounts, acc)
}
return true
})
slices.SortFunc(activeAccounts, func(i, j *Account) int {
return i.Index - j.Index
})
return activeAccounts
}
func (m *Manager) All() []*Account {
allAccounts := make([]*Account, 0)
m.accounts.Range(func(key string, acc *Account) bool {
allAccounts = append(allAccounts, acc)
return true
})
slices.SortFunc(allAccounts, func(i, j *Account) int {
return i.Index - j.Index
})
return allAccounts
}
func (m *Manager) Current() *Account {
// Fast path - most common case
current := m.current.Load()
if current != nil && !current.Disabled.Load() {
return current
}
// Slow path - find new current account
activeAccounts := m.Active()
if len(activeAccounts) == 0 {
// No active accounts left, try to use disabled ones
m.logger.Warn().Str("debrid", m.debrid).Msg("No active accounts available, all accounts are disabled")
allAccounts := m.All()
if len(allAccounts) == 0 {
m.logger.Error().Str("debrid", m.debrid).Msg("No accounts configured")
m.current.Store(nil)
return nil
}
m.current.Store(allAccounts[0])
return allAccounts[0]
}
newCurrent := activeAccounts[0]
m.current.Store(newCurrent)
return newCurrent
}
func (m *Manager) Disable(account *Account) {
if account == nil {
return
}
account.MarkDisabled()
// If we're disabling the current account, it will be replaced
// on the next Current() call - no need to proactively update
current := m.current.Load()
if current != nil && current.Token == account.Token {
// Optional: immediately find replacement
activeAccounts := m.Active()
if len(activeAccounts) > 0 {
m.current.Store(activeAccounts[0])
} else {
m.current.Store(nil)
}
}
}
func (m *Manager) Reset() {
m.accounts.Range(func(key string, acc *Account) bool {
acc.Reset()
return true
})
// Set current to first active account
activeAccounts := m.Active()
if len(activeAccounts) > 0 {
m.current.Store(activeAccounts[0])
} else {
m.current.Store(nil)
}
}
func (m *Manager) GetAccount(token string) (*Account, error) {
if token == "" {
return nil, fmt.Errorf("token cannot be empty")
}
acc, ok := m.accounts.Load(token)
if !ok {
return nil, fmt.Errorf("account not found for token")
}
return acc, nil
}
func (m *Manager) GetDownloadLink(fileLink string) (types.DownloadLink, error) {
current := m.Current()
if current == nil {
return types.DownloadLink{}, fmt.Errorf("no active account for debrid service %s", m.debrid)
}
return current.GetDownloadLink(fileLink)
}
func (m *Manager) GetAccountFromDownloadLink(downloadLink types.DownloadLink) (*Account, error) {
if downloadLink.Link == "" {
return nil, fmt.Errorf("cannot get account from empty download link")
}
if downloadLink.Token == "" {
return nil, fmt.Errorf("cannot get account from download link without token")
}
return m.GetAccount(downloadLink.Token)
}
func (m *Manager) StoreDownloadLink(downloadLink types.DownloadLink) {
if downloadLink.Link == "" || downloadLink.Token == "" {
return
}
account, err := m.GetAccount(downloadLink.Token)
if err != nil || account == nil {
return
}
account.StoreDownloadLink(downloadLink)
}
func (m *Manager) Stats() []map[string]any {
stats := make([]map[string]any, 0)
for _, acc := range m.All() {
maskedToken := utils.Mask(acc.Token)
accountDetail := map[string]any{
"in_use": acc.Equals(m.Current()),
"order": acc.Index,
"disabled": acc.Disabled.Load(),
"token_masked": maskedToken,
"username": acc.Username,
"traffic_used": acc.TrafficUsed.Load(),
"links_count": acc.DownloadLinksCount(),
"debrid": acc.Debrid,
}
stats = append(stats, accountDetail)
}
return stats
}
func (m *Manager) CheckAndResetBandwidth() {
found := false
m.accounts.Range(func(key string, acc *Account) bool {
if acc.Disabled.Load() && acc.DisableCount.Load() < MaxDisableCount {
if err := acc.CheckBandwidth(); err == nil {
acc.Disabled.Store(false)
found = true
m.logger.Info().Str("debrid", m.debrid).Str("token", utils.Mask(acc.Token)).Msg("Re-activated disabled account")
} else {
m.logger.Debug().Err(err).Str("debrid", m.debrid).Str("token", utils.Mask(acc.Token)).Msg("Account still disabled")
}
}
return true
})
if found {
// If we re-activated any account, reset current to first active
activeAccounts := m.Active()
if len(activeAccounts) > 0 {
m.current.Store(activeAccounts[0])
}
}
}

View File

@@ -0,0 +1,30 @@
package common
import (
"github.com/rs/zerolog"
"github.com/sirrobot01/decypharr/pkg/debrid/account"
"github.com/sirrobot01/decypharr/pkg/debrid/types"
)
type Client interface {
SubmitMagnet(tr *types.Torrent) (*types.Torrent, error)
CheckStatus(tr *types.Torrent) (*types.Torrent, error)
GetFileDownloadLinks(tr *types.Torrent) error
GetDownloadLink(tr *types.Torrent, file *types.File) (types.DownloadLink, error)
DeleteTorrent(torrentId string) error
IsAvailable(infohashes []string) map[string]bool
GetDownloadUncached() bool
UpdateTorrent(torrent *types.Torrent) error
GetTorrent(torrentId string) (*types.Torrent, error)
GetTorrents() ([]*types.Torrent, error)
Name() string
Logger() zerolog.Logger
GetDownloadingStatus() []string
RefreshDownloadLinks() error
CheckLink(link string) error
GetMountPath() string
AccountManager() *account.Manager // Returns the active download account/token
GetProfile() (*types.Profile, error)
GetAvailableSlots() (int, error)
SyncAccounts() error // Updates each accounts details(like traffic, username, etc.)
}

View File

@@ -1,29 +1,35 @@
package debrid package debrid
import ( import (
"cmp"
"context" "context"
"errors" "errors"
"fmt" "fmt"
"sync"
"time"
"github.com/sirrobot01/decypharr/internal/config" "github.com/sirrobot01/decypharr/internal/config"
"github.com/sirrobot01/decypharr/internal/logger" "github.com/sirrobot01/decypharr/internal/logger"
"github.com/sirrobot01/decypharr/internal/request"
"github.com/sirrobot01/decypharr/internal/utils" "github.com/sirrobot01/decypharr/internal/utils"
"github.com/sirrobot01/decypharr/pkg/arr" "github.com/sirrobot01/decypharr/pkg/arr"
"github.com/sirrobot01/decypharr/pkg/debrid/common"
"github.com/sirrobot01/decypharr/pkg/debrid/providers/alldebrid" "github.com/sirrobot01/decypharr/pkg/debrid/providers/alldebrid"
"github.com/sirrobot01/decypharr/pkg/debrid/providers/debrid_link" "github.com/sirrobot01/decypharr/pkg/debrid/providers/debridlink"
"github.com/sirrobot01/decypharr/pkg/debrid/providers/realdebrid" "github.com/sirrobot01/decypharr/pkg/debrid/providers/realdebrid"
"github.com/sirrobot01/decypharr/pkg/debrid/providers/torbox" "github.com/sirrobot01/decypharr/pkg/debrid/providers/torbox"
debridStore "github.com/sirrobot01/decypharr/pkg/debrid/store" debridStore "github.com/sirrobot01/decypharr/pkg/debrid/store"
"github.com/sirrobot01/decypharr/pkg/debrid/types" "github.com/sirrobot01/decypharr/pkg/debrid/types"
"github.com/sirrobot01/decypharr/pkg/rclone" "github.com/sirrobot01/decypharr/pkg/rclone"
"sync" "go.uber.org/ratelimit"
) )
type Debrid struct { type Debrid struct {
cache *debridStore.Cache // Could be nil if not using WebDAV cache *debridStore.Cache // Could be nil if not using WebDAV
client types.Client // HTTP client for making requests to the debrid service client common.Client // HTTP client for making requests to the debrid service
} }
func (de *Debrid) Client() types.Client { func (de *Debrid) Client() common.Client {
return de.client return de.client
} }
@@ -69,7 +75,7 @@ func NewStorage(rcManager *rclone.Manager) *Storage {
_log := client.Logger() _log := client.Logger()
if dc.UseWebDav { if dc.UseWebDav {
if cfg.Rclone.Enabled && rcManager != nil { if cfg.Rclone.Enabled && rcManager != nil {
mounter = rclone.NewMount(dc.Name, webdavUrl, rcManager) mounter = rclone.NewMount(dc.Name, dc.RcloneMountPath, webdavUrl, rcManager)
} }
cache = debridStore.NewDebridCache(dc, client, mounter) cache = debridStore.NewDebridCache(dc, client, mounter)
_log.Info().Msg("Debrid Service started with WebDAV") _log.Info().Msg("Debrid Service started with WebDAV")
@@ -98,6 +104,90 @@ func (d *Storage) Debrid(name string) *Debrid {
return nil return nil
} }
func (d *Storage) StartWorker(ctx context.Context) error {
if ctx == nil {
ctx = context.Background()
}
// Start syncAccounts worker
go d.syncAccountsWorker(ctx)
// Start bandwidth reset worker
go d.checkBandwidthWorker(ctx)
return nil
}
func (d *Storage) checkBandwidthWorker(ctx context.Context) {
if ctx == nil {
ctx = context.Background()
}
ticker := time.NewTicker(30 * time.Minute)
go func() {
for {
select {
case <-ctx.Done():
return
case <-ticker.C:
d.checkAccountBandwidth()
}
}
}()
}
func (d *Storage) checkAccountBandwidth() {
d.mu.Lock()
defer d.mu.Unlock()
for _, debrid := range d.debrids {
if debrid == nil || debrid.client == nil {
continue
}
accountManager := debrid.client.AccountManager()
if accountManager == nil {
continue
}
accountManager.CheckAndResetBandwidth()
}
}
func (d *Storage) syncAccountsWorker(ctx context.Context) {
if ctx == nil {
ctx = context.Background()
}
_ = d.syncAccounts()
ticker := time.NewTicker(5 * time.Minute)
go func() {
for {
select {
case <-ctx.Done():
return
case <-ticker.C:
_ = d.syncAccounts()
}
}
}()
}
func (d *Storage) syncAccounts() error {
d.mu.Lock()
defer d.mu.Unlock()
for name, debrid := range d.debrids {
if debrid == nil || debrid.client == nil {
continue
}
_log := debrid.client.Logger()
if err := debrid.client.SyncAccounts(); err != nil {
_log.Error().Err(err).Msgf("Failed to sync account for %s", name)
continue
}
}
return nil
}
func (d *Storage) Debrids() map[string]*Debrid { func (d *Storage) Debrids() map[string]*Debrid {
d.mu.RLock() d.mu.RLock()
defer d.mu.RUnlock() defer d.mu.RUnlock()
@@ -110,7 +200,7 @@ func (d *Storage) Debrids() map[string]*Debrid {
return debridsCopy return debridsCopy
} }
func (d *Storage) Client(name string) types.Client { func (d *Storage) Client(name string) common.Client {
d.mu.RLock() d.mu.RLock()
defer d.mu.RUnlock() defer d.mu.RUnlock()
if client, exists := d.debrids[name]; exists { if client, exists := d.debrids[name]; exists {
@@ -135,10 +225,10 @@ func (d *Storage) Reset() {
d.lastUsed = "" d.lastUsed = ""
} }
func (d *Storage) Clients() map[string]types.Client { func (d *Storage) Clients() map[string]common.Client {
d.mu.RLock() d.mu.RLock()
defer d.mu.RUnlock() defer d.mu.RUnlock()
clientsCopy := make(map[string]types.Client) clientsCopy := make(map[string]common.Client)
for name, debrid := range d.debrids { for name, debrid := range d.debrids {
if debrid != nil && debrid.client != nil { if debrid != nil && debrid.client != nil {
clientsCopy[name] = debrid.client clientsCopy[name] = debrid.client
@@ -159,10 +249,10 @@ func (d *Storage) Caches() map[string]*debridStore.Cache {
return cachesCopy return cachesCopy
} }
func (d *Storage) FilterClients(filter func(types.Client) bool) map[string]types.Client { func (d *Storage) FilterClients(filter func(common.Client) bool) map[string]common.Client {
d.mu.Lock() d.mu.Lock()
defer d.mu.Unlock() defer d.mu.Unlock()
filteredClients := make(map[string]types.Client) filteredClients := make(map[string]common.Client)
for name, client := range d.debrids { for name, client := range d.debrids {
if client != nil && filter(client.client) { if client != nil && filter(client.client) {
filteredClients[name] = client.client filteredClients[name] = client.client
@@ -171,18 +261,28 @@ func (d *Storage) FilterClients(filter func(types.Client) bool) map[string]types
return filteredClients return filteredClients
} }
func createDebridClient(dc config.Debrid) (types.Client, error) { func createDebridClient(dc config.Debrid) (common.Client, error) {
rateLimits := map[string]ratelimit.Limiter{}
mainRL := request.ParseRateLimit(dc.RateLimit)
repairRL := request.ParseRateLimit(cmp.Or(dc.RepairRateLimit, dc.RateLimit))
downloadRL := request.ParseRateLimit(cmp.Or(dc.DownloadRateLimit, dc.RateLimit))
rateLimits["main"] = mainRL
rateLimits["repair"] = repairRL
rateLimits["download"] = downloadRL
switch dc.Name { switch dc.Name {
case "realdebrid": case "realdebrid":
return realdebrid.New(dc) return realdebrid.New(dc, rateLimits)
case "torbox": case "torbox":
return torbox.New(dc) return torbox.New(dc, rateLimits)
case "debridlink": case "debridlink":
return debrid_link.New(dc) return debridlink.New(dc, rateLimits)
case "alldebrid": case "alldebrid":
return alldebrid.New(dc) return alldebrid.New(dc, rateLimits)
default: default:
return realdebrid.New(dc) return realdebrid.New(dc, rateLimits)
} }
} }
@@ -197,7 +297,7 @@ func Process(ctx context.Context, store *Storage, selectedDebrid string, magnet
Files: make(map[string]types.File), Files: make(map[string]types.File),
} }
clients := store.FilterClients(func(c types.Client) bool { clients := store.FilterClients(func(c common.Client) bool {
if selectedDebrid != "" && c.Name() != selectedDebrid { if selectedDebrid != "" && c.Name() != selectedDebrid {
return false return false
} }

View File

@@ -3,28 +3,32 @@ package alldebrid
import ( import (
"encoding/json" "encoding/json"
"fmt" "fmt"
"github.com/rs/zerolog"
"github.com/sirrobot01/decypharr/internal/config"
"github.com/sirrobot01/decypharr/internal/logger"
"github.com/sirrobot01/decypharr/internal/request"
"github.com/sirrobot01/decypharr/internal/utils"
"github.com/sirrobot01/decypharr/pkg/debrid/types"
"net/http" "net/http"
gourl "net/url" gourl "net/url"
"path/filepath" "path/filepath"
"strconv" "strconv"
"sync" "sync"
"time" "time"
"github.com/rs/zerolog"
"github.com/sirrobot01/decypharr/internal/config"
"github.com/sirrobot01/decypharr/internal/logger"
"github.com/sirrobot01/decypharr/internal/request"
"github.com/sirrobot01/decypharr/internal/utils"
"github.com/sirrobot01/decypharr/pkg/debrid/account"
"github.com/sirrobot01/decypharr/pkg/debrid/types"
"go.uber.org/ratelimit"
) )
type AllDebrid struct { type AllDebrid struct {
name string name string
Host string `json:"host"` Host string `json:"host"`
APIKey string APIKey string
accounts *types.Accounts accountsManager *account.Manager
autoExpiresLinksAfter time.Duration autoExpiresLinksAfter time.Duration
DownloadUncached bool DownloadUncached bool
client *request.Client client *request.Client
Profile *types.Profile `json:"profile"`
MountPath string MountPath string
logger zerolog.Logger logger zerolog.Logger
@@ -33,12 +37,7 @@ type AllDebrid struct {
minimumFreeSlot int minimumFreeSlot int
} }
func (ad *AllDebrid) GetProfile() (*types.Profile, error) { func New(dc config.Debrid, ratelimits map[string]ratelimit.Limiter) (*AllDebrid, error) {
return nil, nil
}
func New(dc config.Debrid) (*AllDebrid, error) {
rl := request.ParseRateLimit(dc.RateLimit)
headers := map[string]string{ headers := map[string]string{
"Authorization": fmt.Sprintf("Bearer %s", dc.APIKey), "Authorization": fmt.Sprintf("Bearer %s", dc.APIKey),
@@ -47,7 +46,7 @@ func New(dc config.Debrid) (*AllDebrid, error) {
client := request.New( client := request.New(
request.WithHeaders(headers), request.WithHeaders(headers),
request.WithLogger(_log), request.WithLogger(_log),
request.WithRateLimiter(rl), request.WithRateLimiter(ratelimits["main"]),
request.WithProxy(dc.Proxy), request.WithProxy(dc.Proxy),
) )
@@ -59,7 +58,7 @@ func New(dc config.Debrid) (*AllDebrid, error) {
name: "alldebrid", name: "alldebrid",
Host: "http://api.alldebrid.com/v4.1", Host: "http://api.alldebrid.com/v4.1",
APIKey: dc.APIKey, APIKey: dc.APIKey,
accounts: types.NewAccounts(dc), accountsManager: account.NewManager(dc, ratelimits["download"], _log),
DownloadUncached: dc.DownloadUncached, DownloadUncached: dc.DownloadUncached,
autoExpiresLinksAfter: autoExpiresLinksAfter, autoExpiresLinksAfter: autoExpiresLinksAfter,
client: client, client: client,
@@ -105,11 +104,12 @@ func (ad *AllDebrid) SubmitMagnet(torrent *types.Torrent) (*types.Torrent, error
} }
magnets := data.Data.Magnets magnets := data.Data.Magnets
if len(magnets) == 0 { if len(magnets) == 0 {
return nil, fmt.Errorf("error adding torrent") return nil, fmt.Errorf("error adding torrent. No magnets returned")
} }
magnet := magnets[0] magnet := magnets[0]
torrentId := strconv.Itoa(magnet.ID) torrentId := strconv.Itoa(magnet.ID)
torrent.Id = torrentId torrent.Id = torrentId
torrent.Added = time.Now().Format(time.RFC3339)
return torrent, nil return torrent, nil
} }
@@ -296,7 +296,7 @@ func (ad *AllDebrid) DeleteTorrent(torrentId string) error {
func (ad *AllDebrid) GetFileDownloadLinks(t *types.Torrent) error { func (ad *AllDebrid) GetFileDownloadLinks(t *types.Torrent) error {
filesCh := make(chan types.File, len(t.Files)) filesCh := make(chan types.File, len(t.Files))
linksCh := make(chan *types.DownloadLink, len(t.Files)) linksCh := make(chan types.DownloadLink, len(t.Files))
errCh := make(chan error, len(t.Files)) errCh := make(chan error, len(t.Files))
var wg sync.WaitGroup var wg sync.WaitGroup
@@ -309,10 +309,6 @@ func (ad *AllDebrid) GetFileDownloadLinks(t *types.Torrent) error {
errCh <- err errCh <- err
return return
} }
if link == nil {
errCh <- fmt.Errorf("download link is empty")
return
}
linksCh <- link linksCh <- link
file.DownloadLink = link file.DownloadLink = link
filesCh <- file filesCh <- file
@@ -330,17 +326,14 @@ func (ad *AllDebrid) GetFileDownloadLinks(t *types.Torrent) error {
} }
// Collect download links // Collect download links
links := make(map[string]*types.DownloadLink, len(t.Files)) links := make(map[string]types.DownloadLink, len(t.Files))
for link := range linksCh { for link := range linksCh {
if link == nil { if link.Empty() {
continue continue
} }
links[link.Link] = link links[link.Link] = link
} }
// Update the files with download links
ad.accounts.SetDownloadLinks(links)
// Check for errors // Check for errors
for err := range errCh { for err := range errCh {
if err != nil { if err != nil {
@@ -352,7 +345,7 @@ func (ad *AllDebrid) GetFileDownloadLinks(t *types.Torrent) error {
return nil return nil
} }
func (ad *AllDebrid) GetDownloadLink(t *types.Torrent, file *types.File) (*types.DownloadLink, error) { func (ad *AllDebrid) GetDownloadLink(t *types.Torrent, file *types.File) (types.DownloadLink, error) {
url := fmt.Sprintf("%s/link/unlock", ad.Host) url := fmt.Sprintf("%s/link/unlock", ad.Host)
query := gourl.Values{} query := gourl.Values{}
query.Add("link", file.Link) query.Add("link", file.Link)
@@ -360,22 +353,23 @@ func (ad *AllDebrid) GetDownloadLink(t *types.Torrent, file *types.File) (*types
req, _ := http.NewRequest(http.MethodGet, url, nil) req, _ := http.NewRequest(http.MethodGet, url, nil)
resp, err := ad.client.MakeRequest(req) resp, err := ad.client.MakeRequest(req)
if err != nil { if err != nil {
return nil, err return types.DownloadLink{}, err
} }
var data DownloadLink var data DownloadLink
if err = json.Unmarshal(resp, &data); err != nil { if err = json.Unmarshal(resp, &data); err != nil {
return nil, err return types.DownloadLink{}, err
} }
if data.Error != nil { if data.Error != nil {
return nil, fmt.Errorf("error getting download link: %s", data.Error.Message) return types.DownloadLink{}, fmt.Errorf("error getting download link: %s", data.Error.Message)
} }
link := data.Data.Link link := data.Data.Link
if link == "" { if link == "" {
return nil, fmt.Errorf("download link is empty") return types.DownloadLink{}, fmt.Errorf("download link is empty")
} }
now := time.Now() now := time.Now()
return &types.DownloadLink{ dl := types.DownloadLink{
Token: ad.APIKey,
Link: file.Link, Link: file.Link,
DownloadLink: link, DownloadLink: link,
Id: data.Data.Id, Id: data.Data.Id,
@@ -383,7 +377,10 @@ func (ad *AllDebrid) GetDownloadLink(t *types.Torrent, file *types.File) (*types
Filename: file.Name, Filename: file.Name,
Generated: now, Generated: now,
ExpiresAt: now.Add(ad.autoExpiresLinksAfter), ExpiresAt: now.Add(ad.autoExpiresLinksAfter),
}, nil }
// Set the download link in the account
ad.accountsManager.StoreDownloadLink(dl)
return dl, nil
} }
func (ad *AllDebrid) GetTorrents() ([]*types.Torrent, error) { func (ad *AllDebrid) GetTorrents() ([]*types.Torrent, error) {
@@ -419,8 +416,8 @@ func (ad *AllDebrid) GetTorrents() ([]*types.Torrent, error) {
return torrents, nil return torrents, nil
} }
func (ad *AllDebrid) GetDownloadLinks() (map[string]*types.DownloadLink, error) { func (ad *AllDebrid) RefreshDownloadLinks() error {
return nil, nil return nil
} }
func (ad *AllDebrid) GetDownloadingStatus() []string { func (ad *AllDebrid) GetDownloadingStatus() []string {
@@ -439,16 +436,64 @@ func (ad *AllDebrid) GetMountPath() string {
return ad.MountPath return ad.MountPath
} }
func (ad *AllDebrid) DeleteDownloadLink(linkId string) error {
return nil
}
func (ad *AllDebrid) GetAvailableSlots() (int, error) { func (ad *AllDebrid) GetAvailableSlots() (int, error) {
// This function is a placeholder for AllDebrid // This function is a placeholder for AllDebrid
//TODO: Implement the logic to check available slots for AllDebrid //TODO: Implement the logic to check available slots for AllDebrid
return 0, fmt.Errorf("GetAvailableSlots not implemented for AllDebrid") return 0, fmt.Errorf("GetAvailableSlots not implemented for AllDebrid")
} }
func (ad *AllDebrid) Accounts() *types.Accounts { func (ad *AllDebrid) GetProfile() (*types.Profile, error) {
return ad.accounts if ad.Profile != nil {
return ad.Profile, nil
}
url := fmt.Sprintf("%s/user", ad.Host)
req, err := http.NewRequest(http.MethodGet, url, nil)
if err != nil {
return nil, err
}
resp, err := ad.client.MakeRequest(req)
if err != nil {
return nil, err
}
var res UserProfileResponse
err = json.Unmarshal(resp, &res)
if err != nil {
ad.logger.Error().Err(err).Msgf("Error unmarshalling user profile")
return nil, err
}
if res.Status != "success" {
message := "unknown error"
if res.Error != nil {
message = res.Error.Message
}
return nil, fmt.Errorf("error getting user profile: %s", message)
}
userData := res.Data.User
expiration := time.Unix(userData.PremiumUntil, 0)
profile := &types.Profile{
Id: 1,
Name: ad.name,
Username: userData.Username,
Email: userData.Email,
Points: userData.FidelityPoints,
Premium: userData.PremiumUntil,
Expiration: expiration,
}
if userData.IsPremium {
profile.Type = "premium"
} else if userData.IsTrial {
profile.Type = "trial"
} else {
profile.Type = "free"
}
ad.Profile = profile
return profile, nil
}
func (ad *AllDebrid) AccountManager() *account.Manager {
return ad.accountsManager
}
func (ad *AllDebrid) SyncAccounts() error {
return nil
} }

View File

@@ -112,3 +112,22 @@ func (m *Magnets) UnmarshalJSON(data []byte) error {
} }
return fmt.Errorf("magnets: unsupported JSON format") return fmt.Errorf("magnets: unsupported JSON format")
} }
type UserProfileResponse struct {
Status string `json:"status"`
Error *errorResponse `json:"error"`
Data struct {
User struct {
Username string `json:"username"`
Email string `json:"email"`
IsPremium bool `json:"isPremium"`
IsSubscribed bool `json:"isSubscribed"`
IsTrial bool `json:"isTrial"`
PremiumUntil int64 `json:"premiumUntil"`
Lang string `json:"lang"`
FidelityPoints int `json:"fidelityPoints"`
LimitedHostersQuotas map[string]int `json:"limitedHostersQuotas"`
Notifications []string `json:"notifications"`
} `json:"user"`
} `json:"data"`
}

View File

@@ -1,16 +1,19 @@
package debrid_link package debridlink
import ( import (
"bytes" "bytes"
"encoding/json" "encoding/json"
"fmt" "fmt"
"time"
"github.com/rs/zerolog" "github.com/rs/zerolog"
"github.com/sirrobot01/decypharr/internal/config" "github.com/sirrobot01/decypharr/internal/config"
"github.com/sirrobot01/decypharr/internal/logger" "github.com/sirrobot01/decypharr/internal/logger"
"github.com/sirrobot01/decypharr/internal/request" "github.com/sirrobot01/decypharr/internal/request"
"github.com/sirrobot01/decypharr/internal/utils" "github.com/sirrobot01/decypharr/internal/utils"
"github.com/sirrobot01/decypharr/pkg/debrid/account"
"github.com/sirrobot01/decypharr/pkg/debrid/types" "github.com/sirrobot01/decypharr/pkg/debrid/types"
"time" "go.uber.org/ratelimit"
"net/http" "net/http"
"strings" "strings"
@@ -20,7 +23,7 @@ type DebridLink struct {
name string name string
Host string `json:"host"` Host string `json:"host"`
APIKey string APIKey string
accounts *types.Accounts accountsManager *account.Manager
DownloadUncached bool DownloadUncached bool
client *request.Client client *request.Client
@@ -30,11 +33,11 @@ type DebridLink struct {
logger zerolog.Logger logger zerolog.Logger
checkCached bool checkCached bool
addSamples bool addSamples bool
Profile *types.Profile `json:"profile,omitempty"`
} }
func New(dc config.Debrid) (*DebridLink, error) { func New(dc config.Debrid, ratelimits map[string]ratelimit.Limiter) (*DebridLink, error) {
rl := request.ParseRateLimit(dc.RateLimit)
headers := map[string]string{ headers := map[string]string{
"Authorization": fmt.Sprintf("Bearer %s", dc.APIKey), "Authorization": fmt.Sprintf("Bearer %s", dc.APIKey),
"Content-Type": "application/json", "Content-Type": "application/json",
@@ -43,7 +46,7 @@ func New(dc config.Debrid) (*DebridLink, error) {
client := request.New( client := request.New(
request.WithHeaders(headers), request.WithHeaders(headers),
request.WithLogger(_log), request.WithLogger(_log),
request.WithRateLimiter(rl), request.WithRateLimiter(ratelimits["main"]),
request.WithProxy(dc.Proxy), request.WithProxy(dc.Proxy),
) )
@@ -55,7 +58,7 @@ func New(dc config.Debrid) (*DebridLink, error) {
name: "debridlink", name: "debridlink",
Host: "https://debrid-link.com/api/v2", Host: "https://debrid-link.com/api/v2",
APIKey: dc.APIKey, APIKey: dc.APIKey,
accounts: types.NewAccounts(dc), accountsManager: account.NewManager(dc, ratelimits["download"], _log),
DownloadUncached: dc.DownloadUncached, DownloadUncached: dc.DownloadUncached,
autoExpiresLinksAfter: autoExpiresLinksAfter, autoExpiresLinksAfter: autoExpiresLinksAfter,
client: client, client: client,
@@ -66,10 +69,6 @@ func New(dc config.Debrid) (*DebridLink, error) {
}, nil }, nil
} }
func (dl *DebridLink) GetProfile() (*types.Profile, error) {
return nil, nil
}
func (dl *DebridLink) Name() string { func (dl *DebridLink) Name() string {
return dl.name return dl.name
} }
@@ -223,7 +222,6 @@ func (dl *DebridLink) UpdateTorrent(t *types.Torrent) error {
t.OriginalFilename = name t.OriginalFilename = name
t.Added = time.Unix(data.Created, 0).Format(time.RFC3339) t.Added = time.Unix(data.Created, 0).Format(time.RFC3339)
cfg := config.Get() cfg := config.Get()
links := make(map[string]*types.DownloadLink)
now := time.Now() now := time.Now()
for _, f := range data.Files { for _, f := range data.Files {
if !cfg.IsSizeAllowed(f.Size) { if !cfg.IsSizeAllowed(f.Size) {
@@ -237,19 +235,19 @@ func (dl *DebridLink) UpdateTorrent(t *types.Torrent) error {
Path: f.Name, Path: f.Name,
Link: f.DownloadURL, Link: f.DownloadURL,
} }
link := &types.DownloadLink{ link := types.DownloadLink{
Token: dl.APIKey,
Filename: f.Name, Filename: f.Name,
Link: f.DownloadURL, Link: f.DownloadURL,
DownloadLink: f.DownloadURL, DownloadLink: f.DownloadURL,
Generated: now, Generated: now,
ExpiresAt: now.Add(dl.autoExpiresLinksAfter), ExpiresAt: now.Add(dl.autoExpiresLinksAfter),
} }
links[file.Link] = link
file.DownloadLink = link file.DownloadLink = link
t.Files[f.Name] = file t.Files[f.Name] = file
dl.accountsManager.StoreDownloadLink(link)
} }
dl.accounts.SetDownloadLinks(links)
return nil return nil
} }
@@ -286,8 +284,6 @@ func (dl *DebridLink) SubmitMagnet(t *types.Torrent) (*types.Torrent, error) {
t.MountPath = dl.MountPath t.MountPath = dl.MountPath
t.Debrid = dl.name t.Debrid = dl.name
t.Added = time.Unix(data.Created, 0).Format(time.RFC3339) t.Added = time.Unix(data.Created, 0).Format(time.RFC3339)
links := make(map[string]*types.DownloadLink)
now := time.Now() now := time.Now()
for _, f := range data.Files { for _, f := range data.Files {
file := types.File{ file := types.File{
@@ -299,20 +295,19 @@ func (dl *DebridLink) SubmitMagnet(t *types.Torrent) (*types.Torrent, error) {
Link: f.DownloadURL, Link: f.DownloadURL,
Generated: now, Generated: now,
} }
link := &types.DownloadLink{ link := types.DownloadLink{
Token: dl.APIKey,
Filename: f.Name, Filename: f.Name,
Link: f.DownloadURL, Link: f.DownloadURL,
DownloadLink: f.DownloadURL, DownloadLink: f.DownloadURL,
Generated: now, Generated: now,
ExpiresAt: now.Add(dl.autoExpiresLinksAfter), ExpiresAt: now.Add(dl.autoExpiresLinksAfter),
} }
links[file.Link] = link
file.DownloadLink = link file.DownloadLink = link
t.Files[f.Name] = file t.Files[f.Name] = file
dl.accountsManager.StoreDownloadLink(link)
} }
dl.accounts.SetDownloadLinks(links)
return t, nil return t, nil
} }
@@ -355,12 +350,12 @@ func (dl *DebridLink) GetFileDownloadLinks(t *types.Torrent) error {
return nil return nil
} }
func (dl *DebridLink) GetDownloadLinks() (map[string]*types.DownloadLink, error) { func (dl *DebridLink) RefreshDownloadLinks() error {
return nil, nil return nil
} }
func (dl *DebridLink) GetDownloadLink(t *types.Torrent, file *types.File) (*types.DownloadLink, error) { func (dl *DebridLink) GetDownloadLink(t *types.Torrent, file *types.File) (types.DownloadLink, error) {
return dl.accounts.GetDownloadLink(file.Link) return dl.accountsManager.GetDownloadLink(file.Link)
} }
func (dl *DebridLink) GetDownloadingStatus() []string { func (dl *DebridLink) GetDownloadingStatus() []string {
@@ -405,7 +400,6 @@ func (dl *DebridLink) getTorrents(page, perPage int) ([]*types.Torrent, error) {
} }
data := *res.Value data := *res.Value
links := make(map[string]*types.DownloadLink)
if len(data) == 0 { if len(data) == 0 {
return torrents, nil return torrents, nil
@@ -441,20 +435,20 @@ func (dl *DebridLink) getTorrents(page, perPage int) ([]*types.Torrent, error) {
Path: f.Name, Path: f.Name,
Link: f.DownloadURL, Link: f.DownloadURL,
} }
link := &types.DownloadLink{ link := types.DownloadLink{
Token: dl.APIKey,
Filename: f.Name, Filename: f.Name,
Link: f.DownloadURL, Link: f.DownloadURL,
DownloadLink: f.DownloadURL, DownloadLink: f.DownloadURL,
Generated: now, Generated: now,
ExpiresAt: now.Add(dl.autoExpiresLinksAfter), ExpiresAt: now.Add(dl.autoExpiresLinksAfter),
} }
links[file.Link] = link
file.DownloadLink = link file.DownloadLink = link
torrent.Files[f.Name] = file torrent.Files[f.Name] = file
dl.accountsManager.StoreDownloadLink(link)
} }
torrents = append(torrents, torrent) torrents = append(torrents, torrent)
} }
dl.accounts.SetDownloadLinks(links)
return torrents, nil return torrents, nil
} }
@@ -467,15 +461,60 @@ func (dl *DebridLink) GetMountPath() string {
return dl.MountPath return dl.MountPath
} }
func (dl *DebridLink) DeleteDownloadLink(linkId string) error {
return nil
}
func (dl *DebridLink) GetAvailableSlots() (int, error) { func (dl *DebridLink) GetAvailableSlots() (int, error) {
//TODO: Implement the logic to check available slots for DebridLink //TODO: Implement the logic to check available slots for DebridLink
return 0, fmt.Errorf("GetAvailableSlots not implemented for DebridLink") return 0, fmt.Errorf("GetAvailableSlots not implemented for DebridLink")
} }
func (dl *DebridLink) Accounts() *types.Accounts { func (dl *DebridLink) GetProfile() (*types.Profile, error) {
return dl.accounts if dl.Profile != nil {
return dl.Profile, nil
}
url := fmt.Sprintf("%s/account/infos", dl.Host)
req, err := http.NewRequest(http.MethodGet, url, nil)
if err != nil {
return nil, err
}
resp, err := dl.client.MakeRequest(req)
if err != nil {
return nil, err
}
var res UserInfo
err = json.Unmarshal(resp, &res)
if err != nil {
dl.logger.Error().Err(err).Msgf("Error unmarshalling user info")
return nil, err
}
if !res.Success || res.Value == nil {
return nil, fmt.Errorf("error getting user info")
}
data := *res.Value
expiration := time.Unix(data.PremiumLeft, 0)
profile := &types.Profile{
Id: 1,
Username: data.Username,
Name: dl.name,
Email: data.Email,
Points: data.Points,
Premium: data.PremiumLeft,
Expiration: expiration,
}
if expiration.IsZero() {
profile.Expiration = time.Now().AddDate(1, 0, 0) // Default to 1 year if no expiration
}
if data.PremiumLeft > 0 {
profile.Type = "premium"
} else {
profile.Type = "free"
}
dl.Profile = profile
return profile, nil
}
func (dl *DebridLink) AccountManager() *account.Manager {
return dl.accountsManager
}
func (dl *DebridLink) SyncAccounts() error {
return nil
} }

View File

@@ -1,4 +1,4 @@
package debrid_link package debridlink
type APIResponse[T any] struct { type APIResponse[T any] struct {
Success bool `json:"success"` Success bool `json:"success"`
@@ -43,3 +43,12 @@ type _torrentInfo struct {
type torrentInfo APIResponse[[]_torrentInfo] type torrentInfo APIResponse[[]_torrentInfo]
type SubmitTorrentInfo APIResponse[_torrentInfo] type SubmitTorrentInfo APIResponse[_torrentInfo]
type UserInfo APIResponse[struct {
Username string `json:"username"`
Email string `json:"email"`
AccountType int `json:"accountType"`
PremiumLeft int64 `json:"premiumLeft"`
Points int `json:"pts"`
Trafficshare int `json:"trafficshare"`
}]

View File

@@ -2,11 +2,9 @@ package realdebrid
import ( import (
"bytes" "bytes"
"cmp"
"encoding/json" "encoding/json"
"errors" "errors"
"fmt" "fmt"
"github.com/sirrobot01/decypharr/pkg/debrid/types"
"io" "io"
"net/http" "net/http"
gourl "net/url" gourl "net/url"
@@ -16,6 +14,10 @@ import (
"sync" "sync"
"time" "time"
"github.com/sirrobot01/decypharr/pkg/debrid/account"
"github.com/sirrobot01/decypharr/pkg/debrid/types"
"go.uber.org/ratelimit"
"github.com/rs/zerolog" "github.com/rs/zerolog"
"github.com/sirrobot01/decypharr/internal/config" "github.com/sirrobot01/decypharr/internal/config"
"github.com/sirrobot01/decypharr/internal/logger" "github.com/sirrobot01/decypharr/internal/logger"
@@ -28,12 +30,11 @@ type RealDebrid struct {
name string name string
Host string `json:"host"` Host string `json:"host"`
APIKey string APIKey string
accounts *types.Accounts accountsManager *account.Manager
DownloadUncached bool DownloadUncached bool
client *request.Client client *request.Client
downloadClient *request.Client
repairClient *request.Client repairClient *request.Client
autoExpiresLinksAfter time.Duration autoExpiresLinksAfter time.Duration
@@ -49,10 +50,7 @@ type RealDebrid struct {
limit int limit int
} }
func New(dc config.Debrid) (*RealDebrid, error) { func New(dc config.Debrid, ratelimits map[string]ratelimit.Limiter) (*RealDebrid, error) {
rl := request.ParseRateLimit(dc.RateLimit)
repairRl := request.ParseRateLimit(cmp.Or(dc.RepairRateLimit, dc.RateLimit))
downloadRl := request.ParseRateLimit(cmp.Or(dc.DownloadRateLimit, dc.RateLimit))
headers := map[string]string{ headers := map[string]string{
"Authorization": fmt.Sprintf("Bearer %s", dc.APIKey), "Authorization": fmt.Sprintf("Bearer %s", dc.APIKey),
@@ -68,27 +66,20 @@ func New(dc config.Debrid) (*RealDebrid, error) {
name: "realdebrid", name: "realdebrid",
Host: "https://api.real-debrid.com/rest/1.0", Host: "https://api.real-debrid.com/rest/1.0",
APIKey: dc.APIKey, APIKey: dc.APIKey,
accounts: types.NewAccounts(dc), accountsManager: account.NewManager(dc, ratelimits["download"], _log),
DownloadUncached: dc.DownloadUncached, DownloadUncached: dc.DownloadUncached,
autoExpiresLinksAfter: autoExpiresLinksAfter, autoExpiresLinksAfter: autoExpiresLinksAfter,
UnpackRar: dc.UnpackRar, UnpackRar: dc.UnpackRar,
client: request.New( client: request.New(
request.WithHeaders(headers), request.WithHeaders(headers),
request.WithRateLimiter(rl), request.WithRateLimiter(ratelimits["main"]),
request.WithLogger(_log), request.WithLogger(_log),
request.WithMaxRetries(10), request.WithMaxRetries(10),
request.WithRetryableStatus(429, 502), request.WithRetryableStatus(429, 502),
request.WithProxy(dc.Proxy), request.WithProxy(dc.Proxy),
), ),
downloadClient: request.New(
request.WithRateLimiter(downloadRl),
request.WithLogger(_log),
request.WithMaxRetries(10),
request.WithRetryableStatus(429, 447, 502),
request.WithProxy(dc.Proxy),
),
repairClient: request.New( repairClient: request.New(
request.WithRateLimiter(repairRl), request.WithRateLimiter(ratelimits["repair"]),
request.WithHeaders(headers), request.WithHeaders(headers),
request.WithLogger(_log), request.WithLogger(_log),
request.WithMaxRetries(4), request.WithMaxRetries(4),
@@ -161,6 +152,23 @@ func (r *RealDebrid) getSelectedFiles(t *types.Torrent, data torrentInfo) (map[s
return files, nil return files, nil
} }
func (r *RealDebrid) handleRarFallback(t *types.Torrent, data torrentInfo) (map[string]types.File, error) {
files := make(map[string]types.File)
file := types.File{
TorrentId: t.Id,
Id: "0",
Name: t.Name + ".rar",
Size: data.Bytes,
IsRar: true,
ByteRange: nil,
Path: t.Name + ".rar",
Link: data.Links[0],
Generated: time.Now(),
}
files[file.Name] = file
return files, nil
}
// handleRarArchive processes RAR archives with multiple files // handleRarArchive processes RAR archives with multiple files
func (r *RealDebrid) handleRarArchive(t *types.Torrent, data torrentInfo, selectedFiles []types.File) (map[string]types.File, error) { func (r *RealDebrid) handleRarArchive(t *types.Torrent, data torrentInfo, selectedFiles []types.File) (map[string]types.File, error) {
// This will block if 2 RAR operations are already in progress // This will block if 2 RAR operations are already in progress
@@ -172,21 +180,8 @@ func (r *RealDebrid) handleRarArchive(t *types.Torrent, data torrentInfo, select
files := make(map[string]types.File) files := make(map[string]types.File)
if !r.UnpackRar { if !r.UnpackRar {
r.logger.Debug().Msgf("RAR file detected, but unpacking is disabled: %s", t.Name) r.logger.Debug().Msgf("RAR file detected, but unpacking is disabled: %s. Falling back to single file representation.", t.Name)
// Create a single file representing the RAR archive return r.handleRarFallback(t, data)
file := types.File{
TorrentId: t.Id,
Id: "0",
Name: t.Name + ".rar",
Size: 0,
IsRar: true,
ByteRange: nil,
Path: t.Name + ".rar",
Link: data.Links[0],
Generated: time.Now(),
}
files[file.Name] = file
return files, nil
} }
r.logger.Info().Msgf("RAR file detected, unpacking: %s", t.Name) r.logger.Info().Msgf("RAR file detected, unpacking: %s", t.Name)
@@ -194,20 +189,23 @@ func (r *RealDebrid) handleRarArchive(t *types.Torrent, data torrentInfo, select
downloadLinkObj, err := r.GetDownloadLink(t, linkFile) downloadLinkObj, err := r.GetDownloadLink(t, linkFile)
if err != nil { if err != nil {
return nil, fmt.Errorf("failed to get download link for RAR file: %w", err) r.logger.Debug().Err(err).Msgf("Error getting download link for RAR file: %s. Falling back to single file representation.", t.Name)
return r.handleRarFallback(t, data)
} }
dlLink := downloadLinkObj.DownloadLink dlLink := downloadLinkObj.DownloadLink
reader, err := rar.NewReader(dlLink) reader, err := rar.NewReader(dlLink)
if err != nil { if err != nil {
return nil, fmt.Errorf("failed to create RAR reader: %w", err) r.logger.Debug().Err(err).Msgf("Error creating RAR reader for %s. Falling back to single file representation.", t.Name)
return r.handleRarFallback(t, data)
} }
rarFiles, err := reader.GetFiles() rarFiles, err := reader.GetFiles()
if err != nil { if err != nil {
return nil, fmt.Errorf("failed to read RAR files: %w", err) r.logger.Debug().Err(err).Msgf("Error reading RAR files for %s. Falling back to single file representation.", t.Name)
return r.handleRarFallback(t, data)
} }
// Create lookup map for faster matching // Create lookup map for faster matching
@@ -232,7 +230,11 @@ func (r *RealDebrid) handleRarArchive(t *types.Torrent, data torrentInfo, select
r.logger.Warn().Msgf("RAR file %s not found in torrent files", rarFile.Name()) r.logger.Warn().Msgf("RAR file %s not found in torrent files", rarFile.Name())
} }
} }
if len(files) == 0 {
r.logger.Warn().Msgf("No valid files found in RAR archive for torrent: %s", t.Name)
return r.handleRarFallback(t, data)
}
r.logger.Info().Msgf("Unpacked RAR archive for torrent: %s with %d files", t.Name, len(files))
return files, nil return files, nil
} }
@@ -349,7 +351,9 @@ func (r *RealDebrid) addTorrent(t *types.Torrent) (*types.Torrent, error) {
bodyBytes, _ := io.ReadAll(resp.Body) bodyBytes, _ := io.ReadAll(resp.Body)
return nil, fmt.Errorf("realdebrid API error: Status: %d || Body: %s", resp.StatusCode, string(bodyBytes)) return nil, fmt.Errorf("realdebrid API error: Status: %d || Body: %s", resp.StatusCode, string(bodyBytes))
} }
defer resp.Body.Close() defer func(Body io.ReadCloser) {
_ = Body.Close()
}(resp.Body)
bodyBytes, err := io.ReadAll(resp.Body) bodyBytes, err := io.ReadAll(resp.Body)
if err != nil { if err != nil {
return nil, fmt.Errorf("reading response body: %w", err) return nil, fmt.Errorf("reading response body: %w", err)
@@ -360,6 +364,7 @@ func (r *RealDebrid) addTorrent(t *types.Torrent) (*types.Torrent, error) {
t.Id = data.Id t.Id = data.Id
t.Debrid = r.name t.Debrid = r.name
t.MountPath = r.MountPath t.MountPath = r.MountPath
t.Added = time.Now().Format(time.RFC3339)
return t, nil return t, nil
} }
@@ -395,6 +400,7 @@ func (r *RealDebrid) addMagnet(t *types.Torrent) (*types.Torrent, error) {
t.Id = data.Id t.Id = data.Id
t.Debrid = r.name t.Debrid = r.name
t.MountPath = r.MountPath t.MountPath = r.MountPath
t.Added = time.Now().Format(time.RFC3339)
return t, nil return t, nil
} }
@@ -476,7 +482,6 @@ func (r *RealDebrid) UpdateTorrent(t *types.Torrent) error {
t.Links = data.Links t.Links = data.Links
t.MountPath = r.MountPath t.MountPath = r.MountPath
t.Debrid = r.name t.Debrid = r.name
t.Added = data.Added
t.Files, _ = r.getSelectedFiles(t, data) // Get selected files t.Files, _ = r.getSelectedFiles(t, data) // Get selected files
return nil return nil
@@ -508,6 +513,7 @@ func (r *RealDebrid) CheckStatus(t *types.Torrent) (*types.Torrent, error) {
t.Status = status t.Status = status
t.Debrid = r.name t.Debrid = r.name
t.MountPath = r.MountPath t.MountPath = r.MountPath
t.Added = data.Added
if status == "waiting_files_selection" { if status == "waiting_files_selection" {
t.Files = r.getTorrentFiles(t, data) t.Files = r.getTorrentFiles(t, data)
if len(t.Files) == 0 { if len(t.Files) == 0 {
@@ -568,7 +574,7 @@ func (r *RealDebrid) GetFileDownloadLinks(t *types.Torrent) error {
var firstErr error var firstErr error
files := make(map[string]types.File) files := make(map[string]types.File)
links := make(map[string]*types.DownloadLink) links := make(map[string]types.DownloadLink)
_files := t.GetFiles() _files := t.GetFiles()
wg.Add(len(_files)) wg.Add(len(_files))
@@ -586,7 +592,7 @@ func (r *RealDebrid) GetFileDownloadLinks(t *types.Torrent) error {
mu.Unlock() mu.Unlock()
return return
} }
if link == nil { if link.Empty() {
mu.Lock() mu.Lock()
if firstErr == nil { if firstErr == nil {
firstErr = fmt.Errorf("realdebrid API error: download link not found for file %s", file.Name) firstErr = fmt.Errorf("realdebrid API error: download link not found for file %s", file.Name)
@@ -596,7 +602,6 @@ func (r *RealDebrid) GetFileDownloadLinks(t *types.Torrent) error {
} }
file.DownloadLink = link file.DownloadLink = link
mu.Lock() mu.Lock()
files[file.Name] = file files[file.Name] = file
links[link.Link] = link links[link.Link] = link
@@ -611,7 +616,6 @@ func (r *RealDebrid) GetFileDownloadLinks(t *types.Torrent) error {
} }
// Add links to cache // Add links to cache
r.accounts.SetDownloadLinks(links)
t.Files = files t.Files = files
return nil return nil
} }
@@ -632,8 +636,9 @@ func (r *RealDebrid) CheckLink(link string) error {
return nil return nil
} }
func (r *RealDebrid) _getDownloadLink(file *types.File) (*types.DownloadLink, error) { func (r *RealDebrid) getDownloadLink(account *account.Account, file *types.File) (types.DownloadLink, error) {
url := fmt.Sprintf("%s/unrestrict/link/", r.Host) url := fmt.Sprintf("%s/unrestrict/link/", r.Host)
emptyLink := types.DownloadLink{}
_link := file.Link _link := file.Link
if strings.HasPrefix(file.Link, "https://real-debrid.com/d/") && len(file.Link) > 39 { if strings.HasPrefix(file.Link, "https://real-debrid.com/d/") && len(file.Link) > 39 {
_link = file.Link[0:39] _link = file.Link[0:39]
@@ -642,70 +647,64 @@ func (r *RealDebrid) _getDownloadLink(file *types.File) (*types.DownloadLink, er
"link": {_link}, "link": {_link},
} }
req, _ := http.NewRequest(http.MethodPost, url, strings.NewReader(payload.Encode())) req, _ := http.NewRequest(http.MethodPost, url, strings.NewReader(payload.Encode()))
resp, err := r.downloadClient.Do(req) resp, err := account.Client().Do(req)
if err != nil { if err != nil {
return nil, err return emptyLink, err
} }
defer resp.Body.Close() defer func(Body io.ReadCloser) {
_ = Body.Close()
}(resp.Body)
if resp.StatusCode != http.StatusOK { if resp.StatusCode != http.StatusOK {
// Read the response body to get the error message // Read the response body to get the error message
b, err := io.ReadAll(resp.Body) b, err := io.ReadAll(resp.Body)
if err != nil { if err != nil {
return nil, err return emptyLink, err
} }
var data ErrorResponse var data ErrorResponse
if err = json.Unmarshal(b, &data); err != nil { if err = json.Unmarshal(b, &data); err != nil {
return nil, fmt.Errorf("error unmarshalling %d || %s \n %s", resp.StatusCode, err, string(b)) return emptyLink, fmt.Errorf("error unmarshalling %d || %s \n %s", resp.StatusCode, err, string(b))
} }
switch data.ErrorCode { switch data.ErrorCode {
case 19: case 19, 24, 35:
return nil, utils.HosterUnavailableError // File has been removed return emptyLink, utils.HosterUnavailableError // File has been removed
case 23: case 23, 34, 36:
return nil, utils.TrafficExceededError return emptyLink, utils.TrafficExceededError
case 24:
return nil, utils.HosterUnavailableError // Link has been nerfed
case 34:
return nil, utils.TrafficExceededError // traffic exceeded
case 35:
return nil, utils.HosterUnavailableError
case 36:
return nil, utils.TrafficExceededError // traffic exceeded
default: default:
return nil, fmt.Errorf("realdebrid API error: Status: %d || Code: %d", resp.StatusCode, data.ErrorCode) return emptyLink, fmt.Errorf("realdebrid API error: Status: %d || Code: %d", resp.StatusCode, data.ErrorCode)
} }
} }
b, err := io.ReadAll(resp.Body) b, err := io.ReadAll(resp.Body)
if err != nil { if err != nil {
return nil, err return emptyLink, err
} }
var data UnrestrictResponse var data UnrestrictResponse
if err = json.Unmarshal(b, &data); err != nil { if err = json.Unmarshal(b, &data); err != nil {
return nil, fmt.Errorf("realdebrid API error: Error unmarshalling response: %w", err) return emptyLink, fmt.Errorf("realdebrid API error: Error unmarshalling response: %w", err)
} }
if data.Download == "" { if data.Download == "" {
return nil, fmt.Errorf("realdebrid API error: download link not found") return emptyLink, fmt.Errorf("realdebrid API error: download link not found")
} }
now := time.Now() now := time.Now()
return &types.DownloadLink{ dl := types.DownloadLink{
Token: account.Token,
Filename: data.Filename, Filename: data.Filename,
Size: data.Filesize, Size: data.Filesize,
Link: data.Link, Link: data.Link,
DownloadLink: data.Download, DownloadLink: data.Download,
Generated: now, Generated: now,
ExpiresAt: now.Add(r.autoExpiresLinksAfter), ExpiresAt: now.Add(r.autoExpiresLinksAfter),
}, nil }
// Store the link in the account
account.StoreDownloadLink(dl)
return dl, nil
} }
func (r *RealDebrid) GetDownloadLink(t *types.Torrent, file *types.File) (*types.DownloadLink, error) { func (r *RealDebrid) GetDownloadLink(t *types.Torrent, file *types.File) (types.DownloadLink, error) {
accounts := r.accountsManager.Active()
accounts := r.accounts.All() for _, _account := range accounts {
downloadLink, err := r.getDownloadLink(_account, file)
for _, account := range accounts {
r.downloadClient.SetHeader("Authorization", fmt.Sprintf("Bearer %s", account.Token))
downloadLink, err := r._getDownloadLink(file)
if err == nil { if err == nil {
return downloadLink, nil return downloadLink, nil
} }
@@ -716,16 +715,16 @@ func (r *RealDebrid) GetDownloadLink(t *types.Torrent, file *types.File) (*types
retries = 5 retries = 5
} else { } else {
// If the error is not traffic exceeded, return the error // If the error is not traffic exceeded, return the error
return nil, err return downloadLink, err
} }
backOff := 1 * time.Second backOff := 1 * time.Second
for retries > 0 { for retries > 0 {
downloadLink, err = r._getDownloadLink(file) downloadLink, err = r.getDownloadLink(_account, file)
if err == nil { if err == nil {
return downloadLink, nil return downloadLink, nil
} }
if !errors.Is(err, utils.TrafficExceededError) { if !errors.Is(err, utils.TrafficExceededError) {
return nil, err return downloadLink, err
} }
// Add a delay before retrying // Add a delay before retrying
time.Sleep(backOff) time.Sleep(backOff)
@@ -733,7 +732,7 @@ func (r *RealDebrid) GetDownloadLink(t *types.Torrent, file *types.File) (*types
retries-- retries--
} }
} }
return nil, fmt.Errorf("realdebrid API error: download link not found") return types.DownloadLink{}, fmt.Errorf("realdebrid API error: used all active accounts")
} }
func (r *RealDebrid) getTorrents(offset int, limit int) (int, []*types.Torrent, error) { func (r *RealDebrid) getTorrents(offset int, limit int) (int, []*types.Torrent, error) {
@@ -830,49 +829,47 @@ func (r *RealDebrid) GetTorrents() ([]*types.Torrent, error) {
return allTorrents, nil return allTorrents, nil
} }
func (r *RealDebrid) GetDownloadLinks() (map[string]*types.DownloadLink, error) { func (r *RealDebrid) RefreshDownloadLinks() error {
links := make(map[string]*types.DownloadLink) accounts := r.accountsManager.All()
offset := 0
limit := 1000
accounts := r.accounts.All() for _, _account := range accounts {
if _account == nil || _account.Token == "" {
if len(accounts) < 1 { continue
// No active download keys. It's likely that the key has reached bandwidth limit
return links, fmt.Errorf("no active download keys")
}
activeAccount := accounts[0]
r.downloadClient.SetHeader("Authorization", fmt.Sprintf("Bearer %s", activeAccount.Token))
for {
dl, err := r._getDownloads(offset, limit)
if err != nil {
break
} }
if len(dl) == 0 { offset := 0
break limit := 1000
} links := make(map[string]*types.DownloadLink)
for {
for _, d := range dl { dl, err := r.getDownloadLinks(_account, offset, limit)
if _, exists := links[d.Link]; exists { if err != nil {
// This is ordered by date, so we can skip the rest break
continue
} }
links[d.Link] = &d if len(dl) == 0 {
break
}
for _, d := range dl {
if _, exists := links[d.Link]; exists {
// This is ordered by date, so we can skip the rest
continue
}
links[d.Link] = &d
}
offset += len(dl)
} }
_account.StoreDownloadLinks(links)
offset += len(dl)
} }
return nil
return links, nil
} }
func (r *RealDebrid) _getDownloads(offset int, limit int) ([]types.DownloadLink, error) { func (r *RealDebrid) getDownloadLinks(account *account.Account, offset int, limit int) ([]types.DownloadLink, error) {
url := fmt.Sprintf("%s/downloads?limit=%d", r.Host, limit) url := fmt.Sprintf("%s/downloads?limit=%d", r.Host, limit)
if offset > 0 { if offset > 0 {
url = fmt.Sprintf("%s&offset=%d", url, offset) url = fmt.Sprintf("%s&offset=%d", url, offset)
} }
req, _ := http.NewRequest(http.MethodGet, url, nil) req, _ := http.NewRequest(http.MethodGet, url, nil)
resp, err := r.downloadClient.MakeRequest(req) resp, err := account.Client().MakeRequest(req)
if err != nil { if err != nil {
return nil, err return nil, err
} }
@@ -883,6 +880,7 @@ func (r *RealDebrid) _getDownloads(offset int, limit int) ([]types.DownloadLink,
links := make([]types.DownloadLink, 0) links := make([]types.DownloadLink, 0)
for _, d := range data { for _, d := range data {
links = append(links, types.DownloadLink{ links = append(links, types.DownloadLink{
Token: account.Token,
Filename: d.Filename, Filename: d.Filename,
Size: d.Filesize, Size: d.Filesize,
Link: d.Link, Link: d.Link,
@@ -908,15 +906,6 @@ func (r *RealDebrid) GetMountPath() string {
return r.MountPath return r.MountPath
} }
func (r *RealDebrid) DeleteDownloadLink(linkId string) error {
url := fmt.Sprintf("%s/downloads/delete/%s", r.Host, linkId)
req, _ := http.NewRequest(http.MethodDelete, url, nil)
if _, err := r.downloadClient.MakeRequest(req); err != nil {
return err
}
return nil
}
func (r *RealDebrid) GetProfile() (*types.Profile, error) { func (r *RealDebrid) GetProfile() (*types.Profile, error) {
if r.Profile != nil { if r.Profile != nil {
return r.Profile, nil return r.Profile, nil
@@ -933,6 +922,7 @@ func (r *RealDebrid) GetProfile() (*types.Profile, error) {
return nil, err return nil, err
} }
profile := &types.Profile{ profile := &types.Profile{
Name: r.name,
Id: data.Id, Id: data.Id,
Username: data.Username, Username: data.Username,
Email: data.Email, Email: data.Email,
@@ -946,8 +936,7 @@ func (r *RealDebrid) GetProfile() (*types.Profile, error) {
} }
func (r *RealDebrid) GetAvailableSlots() (int, error) { func (r *RealDebrid) GetAvailableSlots() (int, error) {
url := fmt.Sprintf("%s/torrents/activeCount", r.Host) req, _ := http.NewRequest(http.MethodGet, fmt.Sprintf("%s/torrents/activeCount", r.Host), nil)
req, _ := http.NewRequest(http.MethodGet, url, nil)
resp, err := r.client.MakeRequest(req) resp, err := r.client.MakeRequest(req)
if err != nil { if err != nil {
return 0, nil return 0, nil
@@ -959,6 +948,72 @@ func (r *RealDebrid) GetAvailableSlots() (int, error) {
return data.TotalSlots - data.ActiveSlots - r.minimumFreeSlot, nil // Ensure we maintain minimum active pots return data.TotalSlots - data.ActiveSlots - r.minimumFreeSlot, nil // Ensure we maintain minimum active pots
} }
func (r *RealDebrid) Accounts() *types.Accounts { func (r *RealDebrid) AccountManager() *account.Manager {
return r.accounts return r.accountsManager
}
func (r *RealDebrid) SyncAccounts() error {
// Sync accounts with the current configuration
if len(r.accountsManager.Active()) == 0 {
return nil
}
for _, _account := range r.accountsManager.All() {
if err := r.syncAccount(_account); err != nil {
r.logger.Error().Err(err).Msgf("Error syncing account %s", _account.Username)
continue // Skip this account and continue with the next
}
}
return nil
}
func (r *RealDebrid) syncAccount(account *account.Account) error {
if account.Token == "" {
return fmt.Errorf("account %s has no token", account.Username)
}
req, err := http.NewRequest(http.MethodGet, fmt.Sprintf("%s/user", r.Host), nil)
if err != nil {
return fmt.Errorf("error creating request for account %s: %w", account.Username, err)
}
resp, err := account.Client().Do(req)
if err != nil {
return fmt.Errorf("error checking account %s: %w", account.Username, err)
}
if resp.StatusCode != http.StatusOK {
resp.Body.Close()
return fmt.Errorf("account %s is not valid, status code: %d", account.Username, resp.StatusCode)
}
defer resp.Body.Close()
var profile profileResponse
if err := json.NewDecoder(resp.Body).Decode(&profile); err != nil {
return fmt.Errorf("error decoding profile for account %s: %w", account.Username, err)
}
account.Username = profile.Username
// Get traffic usage
trafficReq, err := http.NewRequest(http.MethodGet, fmt.Sprintf("%s/traffic/details", r.Host), nil)
if err != nil {
return fmt.Errorf("error creating request for traffic details for account %s: %w", account.Username, err)
}
trafficResp, err := account.Client().Do(trafficReq)
if err != nil {
return fmt.Errorf("error checking traffic for account %s: %w", account.Username, err)
}
if trafficResp.StatusCode != http.StatusOK {
trafficResp.Body.Close()
return fmt.Errorf("error checking traffic for account %s, status code: %d", account.Username, trafficResp.StatusCode)
}
defer trafficResp.Body.Close()
var trafficData TrafficResponse
if err := json.NewDecoder(trafficResp.Body).Decode(&trafficData); err != nil {
// Skip logging traffic error
account.TrafficUsed.Store(0)
} else {
today := time.Now().Format(time.DateOnly)
if todayData, exists := trafficData[today]; exists {
account.TrafficUsed.Store(todayData.Bytes)
}
}
//r.accountsManager.Update(account)
return nil
} }

View File

@@ -144,11 +144,11 @@ type profileResponse struct {
Id int64 `json:"id"` Id int64 `json:"id"`
Username string `json:"username"` Username string `json:"username"`
Email string `json:"email"` Email string `json:"email"`
Points int64 `json:"points"` Points int `json:"points"`
Locale string `json:"locale"` Locale string `json:"locale"`
Avatar string `json:"avatar"` Avatar string `json:"avatar"`
Type string `json:"type"` Type string `json:"type"`
Premium int `json:"premium"` Premium int64 `json:"premium"`
Expiration time.Time `json:"expiration"` Expiration time.Time `json:"expiration"`
} }
@@ -156,3 +156,10 @@ type AvailableSlotsResponse struct {
ActiveSlots int `json:"nb"` ActiveSlots int `json:"nb"`
TotalSlots int `json:"limit"` TotalSlots int `json:"limit"`
} }
type hostData struct {
Host map[string]int64 `json:"host"`
Bytes int64 `json:"bytes"`
}
type TrafficResponse map[string]hostData

View File

@@ -20,15 +20,17 @@ import (
"github.com/sirrobot01/decypharr/internal/logger" "github.com/sirrobot01/decypharr/internal/logger"
"github.com/sirrobot01/decypharr/internal/request" "github.com/sirrobot01/decypharr/internal/request"
"github.com/sirrobot01/decypharr/internal/utils" "github.com/sirrobot01/decypharr/internal/utils"
"github.com/sirrobot01/decypharr/pkg/debrid/account"
"github.com/sirrobot01/decypharr/pkg/debrid/types" "github.com/sirrobot01/decypharr/pkg/debrid/types"
"github.com/sirrobot01/decypharr/pkg/version" "github.com/sirrobot01/decypharr/pkg/version"
"go.uber.org/ratelimit"
) )
type Torbox struct { type Torbox struct {
name string name string
Host string `json:"host"` Host string `json:"host"`
APIKey string APIKey string
accounts *types.Accounts accountsManager *account.Manager
autoExpiresLinksAfter time.Duration autoExpiresLinksAfter time.Duration
DownloadUncached bool DownloadUncached bool
@@ -40,12 +42,7 @@ type Torbox struct {
addSamples bool addSamples bool
} }
func (tb *Torbox) GetProfile() (*types.Profile, error) { func New(dc config.Debrid, ratelimits map[string]ratelimit.Limiter) (*Torbox, error) {
return nil, nil
}
func New(dc config.Debrid) (*Torbox, error) {
rl := request.ParseRateLimit(dc.RateLimit)
headers := map[string]string{ headers := map[string]string{
"Authorization": fmt.Sprintf("Bearer %s", dc.APIKey), "Authorization": fmt.Sprintf("Bearer %s", dc.APIKey),
@@ -54,7 +51,7 @@ func New(dc config.Debrid) (*Torbox, error) {
_log := logger.New(dc.Name) _log := logger.New(dc.Name)
client := request.New( client := request.New(
request.WithHeaders(headers), request.WithHeaders(headers),
request.WithRateLimiter(rl), request.WithRateLimiter(ratelimits["main"]),
request.WithLogger(_log), request.WithLogger(_log),
request.WithProxy(dc.Proxy), request.WithProxy(dc.Proxy),
) )
@@ -67,7 +64,7 @@ func New(dc config.Debrid) (*Torbox, error) {
name: "torbox", name: "torbox",
Host: "https://api.torbox.app/v1", Host: "https://api.torbox.app/v1",
APIKey: dc.APIKey, APIKey: dc.APIKey,
accounts: types.NewAccounts(dc), accountsManager: account.NewManager(dc, ratelimits["download"], _log),
DownloadUncached: dc.DownloadUncached, DownloadUncached: dc.DownloadUncached,
autoExpiresLinksAfter: autoExpiresLinksAfter, autoExpiresLinksAfter: autoExpiresLinksAfter,
client: client, client: client,
@@ -142,6 +139,9 @@ func (tb *Torbox) SubmitMagnet(torrent *types.Torrent) (*types.Torrent, error) {
payload := &bytes.Buffer{} payload := &bytes.Buffer{}
writer := multipart.NewWriter(payload) writer := multipart.NewWriter(payload)
_ = writer.WriteField("magnet", torrent.Magnet.Link) _ = writer.WriteField("magnet", torrent.Magnet.Link)
if !torrent.DownloadUncached {
_ = writer.WriteField("add_only_if_cached", "true")
}
err := writer.Close() err := writer.Close()
if err != nil { if err != nil {
return nil, err return nil, err
@@ -165,6 +165,7 @@ func (tb *Torbox) SubmitMagnet(torrent *types.Torrent) (*types.Torrent, error) {
torrent.Id = torrentId torrent.Id = torrentId
torrent.MountPath = tb.MountPath torrent.MountPath = tb.MountPath
torrent.Debrid = tb.name torrent.Debrid = tb.name
torrent.Added = time.Now().Format(time.RFC3339)
return torrent, nil return torrent, nil
} }
@@ -407,7 +408,7 @@ func (tb *Torbox) DeleteTorrent(torrentId string) error {
func (tb *Torbox) GetFileDownloadLinks(t *types.Torrent) error { func (tb *Torbox) GetFileDownloadLinks(t *types.Torrent) error {
filesCh := make(chan types.File, len(t.Files)) filesCh := make(chan types.File, len(t.Files))
linkCh := make(chan *types.DownloadLink) linkCh := make(chan types.DownloadLink)
errCh := make(chan error, len(t.Files)) errCh := make(chan error, len(t.Files))
var wg sync.WaitGroup var wg sync.WaitGroup
@@ -420,7 +421,7 @@ func (tb *Torbox) GetFileDownloadLinks(t *types.Torrent) error {
errCh <- err errCh <- err
return return
} }
if link != nil { if link.DownloadLink != "" {
linkCh <- link linkCh <- link
file.DownloadLink = link file.DownloadLink = link
} }
@@ -440,13 +441,6 @@ func (tb *Torbox) GetFileDownloadLinks(t *types.Torrent) error {
files[file.Name] = file files[file.Name] = file
} }
// Collect download links
for link := range linkCh {
if link != nil {
tb.accounts.SetDownloadLink(link.Link, link)
}
}
// Check for errors // Check for errors
for err := range errCh { for err := range errCh {
if err != nil { if err != nil {
@@ -458,7 +452,7 @@ func (tb *Torbox) GetFileDownloadLinks(t *types.Torrent) error {
return nil return nil
} }
func (tb *Torbox) GetDownloadLink(t *types.Torrent, file *types.File) (*types.DownloadLink, error) { func (tb *Torbox) GetDownloadLink(t *types.Torrent, file *types.File) (types.DownloadLink, error) {
url := fmt.Sprintf("%s/api/torrents/requestdl/", tb.Host) url := fmt.Sprintf("%s/api/torrents/requestdl/", tb.Host)
query := gourl.Values{} query := gourl.Values{}
query.Add("torrent_id", t.Id) query.Add("torrent_id", t.Id)
@@ -474,7 +468,7 @@ func (tb *Torbox) GetDownloadLink(t *types.Torrent, file *types.File) (*types.Do
Str("torrent_id", t.Id). Str("torrent_id", t.Id).
Str("file_id", file.Id). Str("file_id", file.Id).
Msg("Failed to make request to Torbox API") Msg("Failed to make request to Torbox API")
return nil, err return types.DownloadLink{}, err
} }
var data DownloadLinksResponse var data DownloadLinksResponse
@@ -484,7 +478,7 @@ func (tb *Torbox) GetDownloadLink(t *types.Torrent, file *types.File) (*types.Do
Str("torrent_id", t.Id). Str("torrent_id", t.Id).
Str("file_id", file.Id). Str("file_id", file.Id).
Msg("Failed to unmarshal Torbox API response") Msg("Failed to unmarshal Torbox API response")
return nil, err return types.DownloadLink{}, err
} }
if data.Data == nil { if data.Data == nil {
@@ -495,7 +489,7 @@ func (tb *Torbox) GetDownloadLink(t *types.Torrent, file *types.File) (*types.Do
Interface("error", data.Error). Interface("error", data.Error).
Str("detail", data.Detail). Str("detail", data.Detail).
Msg("Torbox API returned no data") Msg("Torbox API returned no data")
return nil, fmt.Errorf("error getting download links") return types.DownloadLink{}, fmt.Errorf("error getting download links")
} }
link := *data.Data link := *data.Data
@@ -504,11 +498,12 @@ func (tb *Torbox) GetDownloadLink(t *types.Torrent, file *types.File) (*types.Do
Str("torrent_id", t.Id). Str("torrent_id", t.Id).
Str("file_id", file.Id). Str("file_id", file.Id).
Msg("Torbox API returned empty download link") Msg("Torbox API returned empty download link")
return nil, fmt.Errorf("error getting download links") return types.DownloadLink{}, fmt.Errorf("error getting download links")
} }
now := time.Now() now := time.Now()
downloadLink := &types.DownloadLink{ dl := types.DownloadLink{
Token: tb.APIKey,
Link: file.Link, Link: file.Link,
DownloadLink: link, DownloadLink: link,
Id: file.Id, Id: file.Id,
@@ -516,7 +511,9 @@ func (tb *Torbox) GetDownloadLink(t *types.Torrent, file *types.File) (*types.Do
ExpiresAt: now.Add(tb.autoExpiresLinksAfter), ExpiresAt: now.Add(tb.autoExpiresLinksAfter),
} }
return downloadLink, nil tb.accountsManager.StoreDownloadLink(dl)
return dl, nil
} }
func (tb *Torbox) GetDownloadingStatus() []string { func (tb *Torbox) GetDownloadingStatus() []string {
@@ -524,7 +521,25 @@ func (tb *Torbox) GetDownloadingStatus() []string {
} }
func (tb *Torbox) GetTorrents() ([]*types.Torrent, error) { func (tb *Torbox) GetTorrents() ([]*types.Torrent, error) {
url := fmt.Sprintf("%s/api/torrents/mylist", tb.Host) offset := 0
allTorrents := make([]*types.Torrent, 0)
for {
torrents, err := tb.getTorrents(offset)
if err != nil {
break
}
if len(torrents) == 0 {
break
}
allTorrents = append(allTorrents, torrents...)
offset += len(torrents)
}
return allTorrents, nil
}
func (tb *Torbox) getTorrents(offset int) ([]*types.Torrent, error) {
url := fmt.Sprintf("%s/api/torrents/mylist?offset=%d", tb.Host, offset)
req, _ := http.NewRequest(http.MethodGet, url, nil) req, _ := http.NewRequest(http.MethodGet, url, nil)
resp, err := tb.client.MakeRequest(req) resp, err := tb.client.MakeRequest(req)
if err != nil { if err != nil {
@@ -611,8 +626,8 @@ func (tb *Torbox) GetDownloadUncached() bool {
return tb.DownloadUncached return tb.DownloadUncached
} }
func (tb *Torbox) GetDownloadLinks() (map[string]*types.DownloadLink, error) { func (tb *Torbox) RefreshDownloadLinks() error {
return nil, nil return nil
} }
func (tb *Torbox) CheckLink(link string) error { func (tb *Torbox) CheckLink(link string) error {
@@ -623,15 +638,19 @@ func (tb *Torbox) GetMountPath() string {
return tb.MountPath return tb.MountPath
} }
func (tb *Torbox) DeleteDownloadLink(linkId string) error {
return nil
}
func (tb *Torbox) GetAvailableSlots() (int, error) { func (tb *Torbox) GetAvailableSlots() (int, error) {
//TODO: Implement the logic to check available slots for Torbox //TODO: Implement the logic to check available slots for Torbox
return 0, fmt.Errorf("not implemented") return 0, fmt.Errorf("not implemented")
} }
func (tb *Torbox) Accounts() *types.Accounts { func (tb *Torbox) GetProfile() (*types.Profile, error) {
return tb.accounts return nil, nil
}
func (tb *Torbox) AccountManager() *account.Manager {
return tb.accountsManager
}
func (tb *Torbox) SyncAccounts() error {
return nil
} }

View File

@@ -4,9 +4,10 @@ import (
"bufio" "bufio"
"cmp" "cmp"
"context" "context"
"crypto/tls"
"errors" "errors"
"fmt" "fmt"
"github.com/sirrobot01/decypharr/pkg/rclone" "net/http"
"os" "os"
"path" "path"
"path/filepath" "path/filepath"
@@ -17,6 +18,10 @@ import (
"sync/atomic" "sync/atomic"
"time" "time"
"github.com/puzpuzpuz/xsync/v4"
"github.com/sirrobot01/decypharr/pkg/debrid/common"
"github.com/sirrobot01/decypharr/pkg/rclone"
"github.com/sirrobot01/decypharr/pkg/debrid/types" "github.com/sirrobot01/decypharr/pkg/debrid/types"
"encoding/json" "encoding/json"
@@ -72,18 +77,17 @@ type RepairRequest struct {
type Cache struct { type Cache struct {
dir string dir string
client types.Client client common.Client
logger zerolog.Logger logger zerolog.Logger
torrents *torrentCache torrents *torrentCache
invalidDownloadLinks sync.Map folderNaming WebDavFolderNaming
folderNaming WebDavFolderNaming
listingDebouncer *utils.Debouncer[bool] listingDebouncer *utils.Debouncer[bool]
// monitors // monitors
repairRequest sync.Map invalidDownloadLinks *xsync.Map[string, string]
failedToReinsert sync.Map repairRequest *xsync.Map[string, *reInsertRequest]
downloadLinkRequests sync.Map failedToReinsert *xsync.Map[string, struct{}]
// repair // repair
repairChan chan RepairRequest repairChan chan RepairRequest
@@ -108,9 +112,10 @@ type Cache struct {
config config.Debrid config config.Debrid
customFolders []string customFolders []string
mounter *rclone.Mount mounter *rclone.Mount
httpClient *http.Client
} }
func NewDebridCache(dc config.Debrid, client types.Client, mounter *rclone.Mount) *Cache { func NewDebridCache(dc config.Debrid, client common.Client, mounter *rclone.Mount) *Cache {
cfg := config.Get() cfg := config.Get()
cet, err := time.LoadLocation("CET") cet, err := time.LoadLocation("CET")
if err != nil { if err != nil {
@@ -122,9 +127,13 @@ func NewDebridCache(dc config.Debrid, client types.Client, mounter *rclone.Mount
cetSc, err := gocron.NewScheduler(gocron.WithLocation(cet)) cetSc, err := gocron.NewScheduler(gocron.WithLocation(cet))
if err != nil { if err != nil {
// If we can't create a CET scheduler, fallback to local time // If we can't create a CET scheduler, fallback to local time
cetSc, _ = gocron.NewScheduler(gocron.WithLocation(time.Local)) cetSc, _ = gocron.NewScheduler(gocron.WithLocation(time.Local), gocron.WithGlobalJobOptions(
gocron.WithTags("decypharr-"+dc.Name)))
} }
scheduler, err := gocron.NewScheduler(gocron.WithLocation(time.Local)) scheduler, err := gocron.NewScheduler(
gocron.WithLocation(time.Local),
gocron.WithGlobalJobOptions(
gocron.WithTags("decypharr-"+dc.Name)))
if err != nil { if err != nil {
// If we can't create a local scheduler, fallback to CET // If we can't create a local scheduler, fallback to CET
scheduler = cetSc scheduler = cetSc
@@ -149,6 +158,18 @@ func NewDebridCache(dc config.Debrid, client types.Client, mounter *rclone.Mount
} }
_log := logger.New(fmt.Sprintf("%s-webdav", client.Name())) _log := logger.New(fmt.Sprintf("%s-webdav", client.Name()))
transport := &http.Transport{
TLSClientConfig: &tls.Config{InsecureSkipVerify: true},
TLSHandshakeTimeout: 10 * time.Second,
ResponseHeaderTimeout: 30 * time.Second,
MaxIdleConns: 10,
MaxIdleConnsPerHost: 2,
}
httpClient := &http.Client{
Transport: transport,
Timeout: 0,
}
c := &Cache{ c := &Cache{
dir: filepath.Join(cfg.Path, "cache", dc.Name), // path to save cache files dir: filepath.Join(cfg.Path, "cache", dc.Name), // path to save cache files
@@ -167,7 +188,12 @@ func NewDebridCache(dc config.Debrid, client types.Client, mounter *rclone.Mount
customFolders: customFolders, customFolders: customFolders,
mounter: mounter, mounter: mounter,
ready: make(chan struct{}), ready: make(chan struct{}),
httpClient: httpClient,
invalidDownloadLinks: xsync.NewMap[string, string](),
repairRequest: xsync.NewMap[string, *reInsertRequest](),
failedToReinsert: xsync.NewMap[string, struct{}](),
repairChan: make(chan RepairRequest, 100), // Initialize the repair channel, max 100 requests buffered
} }
c.listingDebouncer = utils.NewDebouncer[bool](100*time.Millisecond, func(refreshRclone bool) { c.listingDebouncer = utils.NewDebouncer[bool](100*time.Millisecond, func(refreshRclone bool) {
@@ -218,10 +244,9 @@ func (c *Cache) Reset() {
c.torrents.reset() c.torrents.reset()
// 3. Clear any sync.Maps // 3. Clear any sync.Maps
c.invalidDownloadLinks = sync.Map{} c.invalidDownloadLinks = xsync.NewMap[string, string]()
c.repairRequest = sync.Map{} c.repairRequest = xsync.NewMap[string, *reInsertRequest]()
c.failedToReinsert = sync.Map{} c.failedToReinsert = xsync.NewMap[string, struct{}]()
c.downloadLinkRequests = sync.Map{}
// 5. Rebuild the listing debouncer // 5. Rebuild the listing debouncer
c.listingDebouncer = utils.NewDebouncer[bool]( c.listingDebouncer = utils.NewDebouncer[bool](
@@ -254,12 +279,6 @@ func (c *Cache) Start(ctx context.Context) error {
// initial download links // initial download links
go c.refreshDownloadLinks(ctx) go c.refreshDownloadLinks(ctx)
if err := c.StartSchedule(ctx); err != nil {
c.logger.Error().Err(err).Msg("Failed to start cache worker")
}
c.repairChan = make(chan RepairRequest, 100) // Initialize the repair channel, max 100 requests buffered
go c.repairWorker(ctx) go c.repairWorker(ctx)
cfg := config.Get() cfg := config.Get()
@@ -535,7 +554,7 @@ func (c *Cache) setTorrent(t CachedTorrent, callback func(torrent CachedTorrent)
mergedFiles := mergeFiles(o, updatedTorrent) // Useful for merging files across multiple torrents, while keeping the most recent mergedFiles := mergeFiles(o, updatedTorrent) // Useful for merging files across multiple torrents, while keeping the most recent
updatedTorrent.Files = mergedFiles updatedTorrent.Files = mergedFiles
} }
c.torrents.set(torrentName, t, updatedTorrent) c.torrents.set(torrentName, t)
go c.SaveTorrent(t) go c.SaveTorrent(t)
if callback != nil { if callback != nil {
go callback(updatedTorrent) go callback(updatedTorrent)
@@ -551,7 +570,7 @@ func (c *Cache) setTorrents(torrents map[string]CachedTorrent, callback func())
mergedFiles := mergeFiles(o, updatedTorrent) mergedFiles := mergeFiles(o, updatedTorrent)
updatedTorrent.Files = mergedFiles updatedTorrent.Files = mergedFiles
} }
c.torrents.set(torrentName, t, updatedTorrent) c.torrents.set(torrentName, t)
} }
c.SaveTorrents() c.SaveTorrents()
if callback != nil { if callback != nil {
@@ -708,7 +727,7 @@ func (c *Cache) ProcessTorrent(t *types.Torrent) error {
Str("torrent_id", t.Id). Str("torrent_id", t.Id).
Str("torrent_name", t.Name). Str("torrent_name", t.Name).
Int("total_files", len(t.Files)). Int("total_files", len(t.Files)).
Msg("Torrent still not complete after refresh") Msg("Torrent still not complete after refresh, marking as bad")
} else { } else {
addedOn, err := time.Parse(time.RFC3339, t.Added) addedOn, err := time.Parse(time.RFC3339, t.Added)
@@ -751,7 +770,7 @@ func (c *Cache) Add(t *types.Torrent) error {
} }
func (c *Cache) Client() types.Client { func (c *Cache) Client() common.Client {
return c.client return c.client
} }
@@ -905,3 +924,7 @@ func (c *Cache) Logger() zerolog.Logger {
func (c *Cache) GetConfig() config.Debrid { func (c *Cache) GetConfig() config.Debrid {
return c.config return c.config
} }
func (c *Cache) Download(req *http.Request) (*http.Response, error) {
return c.httpClient.Do(req)
}

View File

@@ -3,6 +3,7 @@ package store
import ( import (
"errors" "errors"
"fmt" "fmt"
"github.com/sirrobot01/decypharr/internal/utils" "github.com/sirrobot01/decypharr/internal/utils"
"github.com/sirrobot01/decypharr/pkg/debrid/types" "github.com/sirrobot01/decypharr/pkg/debrid/types"
) )
@@ -30,59 +31,44 @@ func (r *downloadLinkRequest) Wait() (string, error) {
return r.result, r.err return r.result, r.err
} }
func (c *Cache) GetDownloadLink(torrentName, filename, fileLink string) (string, error) { func (c *Cache) GetDownloadLink(torrentName, filename, fileLink string) (types.DownloadLink, error) {
// Check link cache // Check link cache
if dl, err := c.checkDownloadLink(fileLink); dl != "" && err == nil { if dl, err := c.checkDownloadLink(fileLink); err == nil && !dl.Empty() {
return dl, nil return dl, nil
} }
if req, inFlight := c.downloadLinkRequests.Load(fileLink); inFlight {
// Wait for the other request to complete and use its result
result := req.(*downloadLinkRequest)
return result.Wait()
}
// Create a new request object
req := newDownloadLinkRequest()
c.downloadLinkRequests.Store(fileLink, req)
dl, err := c.fetchDownloadLink(torrentName, filename, fileLink) dl, err := c.fetchDownloadLink(torrentName, filename, fileLink)
if err != nil { if err != nil {
req.Complete("", err) return types.DownloadLink{}, err
c.downloadLinkRequests.Delete(fileLink)
return "", err
} }
if dl == nil || dl.DownloadLink == "" { if dl.Empty() {
err = fmt.Errorf("download link is empty for %s in torrent %s", filename, torrentName) err = fmt.Errorf("download link is empty for %s in torrent %s", filename, torrentName)
req.Complete("", err) return types.DownloadLink{}, err
c.downloadLinkRequests.Delete(fileLink)
return "", err
} }
req.Complete(dl.DownloadLink, err) return dl, err
c.downloadLinkRequests.Delete(fileLink)
return dl.DownloadLink, err
} }
func (c *Cache) fetchDownloadLink(torrentName, filename, fileLink string) (*types.DownloadLink, error) { func (c *Cache) fetchDownloadLink(torrentName, filename, fileLink string) (types.DownloadLink, error) {
emptyDownloadLink := types.DownloadLink{}
ct := c.GetTorrentByName(torrentName) ct := c.GetTorrentByName(torrentName)
if ct == nil { if ct == nil {
return nil, fmt.Errorf("torrent not found") return emptyDownloadLink, fmt.Errorf("torrent not found")
} }
file, ok := ct.GetFile(filename) file, ok := ct.GetFile(filename)
if !ok { if !ok {
return nil, fmt.Errorf("file %s not found in torrent %s", filename, torrentName) return emptyDownloadLink, fmt.Errorf("file %s not found in torrent %s", filename, torrentName)
} }
if file.Link == "" { if file.Link == "" {
// file link is empty, refresh the torrent to get restricted links // file link is empty, refresh the torrent to get restricted links
ct = c.refreshTorrent(file.TorrentId) // Refresh the torrent from the debrid ct = c.refreshTorrent(file.TorrentId) // Refresh the torrent from the debrid
if ct == nil { if ct == nil {
return nil, fmt.Errorf("failed to refresh torrent") return emptyDownloadLink, fmt.Errorf("failed to refresh torrent")
} else { } else {
file, ok = ct.GetFile(filename) file, ok = ct.GetFile(filename)
if !ok { if !ok {
return nil, fmt.Errorf("file %s not found in refreshed torrent %s", filename, torrentName) return emptyDownloadLink, fmt.Errorf("file %s not found in refreshed torrent %s", filename, torrentName)
} }
} }
} }
@@ -92,12 +78,12 @@ func (c *Cache) fetchDownloadLink(torrentName, filename, fileLink string) (*type
// Try to reinsert the torrent? // Try to reinsert the torrent?
newCt, err := c.reInsertTorrent(ct) newCt, err := c.reInsertTorrent(ct)
if err != nil { if err != nil {
return nil, fmt.Errorf("failed to reinsert torrent. %w", err) return emptyDownloadLink, fmt.Errorf("failed to reinsert torrent. %w", err)
} }
ct = newCt ct = newCt
file, ok = ct.GetFile(filename) file, ok = ct.GetFile(filename)
if !ok { if !ok {
return nil, fmt.Errorf("file %s not found in reinserted torrent %s", filename, torrentName) return emptyDownloadLink, fmt.Errorf("file %s not found in reinserted torrent %s", filename, torrentName)
} }
} }
@@ -106,41 +92,39 @@ func (c *Cache) fetchDownloadLink(torrentName, filename, fileLink string) (*type
if err != nil { if err != nil {
if errors.Is(err, utils.HosterUnavailableError) { if errors.Is(err, utils.HosterUnavailableError) {
c.logger.Trace(). c.logger.Trace().
Str("token", utils.Mask(downloadLink.Token)).
Str("filename", filename). Str("filename", filename).
Str("torrent_id", ct.Id). Str("torrent_id", ct.Id).
Msg("Hoster unavailable, attempting to reinsert torrent") Msg("Hoster unavailable, attempting to reinsert torrent")
newCt, err := c.reInsertTorrent(ct) newCt, err := c.reInsertTorrent(ct)
if err != nil { if err != nil {
return nil, fmt.Errorf("failed to reinsert torrent: %w", err) return emptyDownloadLink, fmt.Errorf("failed to reinsert torrent: %w", err)
} }
ct = newCt ct = newCt
file, ok = ct.GetFile(filename) file, ok = ct.GetFile(filename)
if !ok { if !ok {
return nil, fmt.Errorf("file %s not found in reinserted torrent %s", filename, torrentName) return emptyDownloadLink, fmt.Errorf("file %s not found in reinserted torrent %s", filename, torrentName)
} }
// Retry getting the download link // Retry getting the download link
downloadLink, err = c.client.GetDownloadLink(ct.Torrent, &file) downloadLink, err = c.client.GetDownloadLink(ct.Torrent, &file)
if err != nil { if err != nil {
return nil, fmt.Errorf("retry failed to get download link: %w", err) return emptyDownloadLink, fmt.Errorf("retry failed to get download link: %w", err)
} }
if downloadLink == nil { if downloadLink.Empty() {
return nil, fmt.Errorf("download link is empty after retry") return emptyDownloadLink, fmt.Errorf("download link is empty after retry")
} }
return nil, nil return emptyDownloadLink, fmt.Errorf("download link is empty after retry")
} else if errors.Is(err, utils.TrafficExceededError) { } else if errors.Is(err, utils.TrafficExceededError) {
// This is likely a fair usage limit error // This is likely a fair usage limit error
return nil, err return emptyDownloadLink, err
} else { } else {
return nil, fmt.Errorf("failed to get download link: %w", err) return emptyDownloadLink, fmt.Errorf("failed to get download link: %w", err)
} }
} }
if downloadLink == nil { if downloadLink.Empty() {
return nil, fmt.Errorf("download link is empty") return emptyDownloadLink, fmt.Errorf("download link is empty")
} }
// Set link to cache
go c.client.Accounts().SetDownloadLink(fileLink, downloadLink)
return downloadLink, nil return downloadLink, nil
} }
@@ -151,28 +135,33 @@ func (c *Cache) GetFileDownloadLinks(t CachedTorrent) {
} }
} }
func (c *Cache) checkDownloadLink(link string) (string, error) { func (c *Cache) checkDownloadLink(link string) (types.DownloadLink, error) {
dl, err := c.client.AccountManager().GetDownloadLink(link)
dl, err := c.client.Accounts().GetDownloadLink(link)
if err != nil { if err != nil {
return "", err return dl, err
} }
if !c.downloadLinkIsInvalid(dl.DownloadLink) { if !c.downloadLinkIsInvalid(dl.DownloadLink) {
return dl.DownloadLink, nil return dl, nil
} }
return "", fmt.Errorf("download link not found for %s", link) return types.DownloadLink{}, fmt.Errorf("download link not found for %s", link)
} }
func (c *Cache) MarkDownloadLinkAsInvalid(link, downloadLink, reason string) { func (c *Cache) MarkDownloadLinkAsInvalid(downloadLink types.DownloadLink, reason string) {
c.invalidDownloadLinks.Store(downloadLink, reason) c.invalidDownloadLinks.Store(downloadLink.DownloadLink, reason)
// Remove the download api key from active // Remove the download api key from active
if reason == "bandwidth_exceeded" { if reason == "bandwidth_exceeded" {
// Disable the account // Disable the account
_, account, err := c.client.Accounts().GetDownloadLinkWithAccount(link) accountManager := c.client.AccountManager()
account, err := accountManager.GetAccount(downloadLink.Token)
if err != nil { if err != nil {
c.logger.Error().Err(err).Str("token", utils.Mask(downloadLink.Token)).Msg("Failed to get account to disable")
return return
} }
c.client.Accounts().Disable(account) if account == nil {
c.logger.Error().Str("token", utils.Mask(downloadLink.Token)).Msg("Account not found to disable")
return
}
accountManager.Disable(account)
} }
} }
@@ -194,5 +183,10 @@ func (c *Cache) GetDownloadByteRange(torrentName, filename string) (*[2]int64, e
} }
func (c *Cache) GetTotalActiveDownloadLinks() int { func (c *Cache) GetTotalActiveDownloadLinks() int {
return c.client.Accounts().GetLinksCount() total := 0
allAccounts := c.client.AccountManager().Active()
for _, acc := range allAccounts {
total += acc.DownloadLinksCount()
}
return total
} }

View File

@@ -3,13 +3,14 @@ package store
import ( import (
"context" "context"
"fmt" "fmt"
"github.com/sirrobot01/decypharr/pkg/debrid/types"
"io" "io"
"net/http" "net/http"
"os" "os"
"strings" "strings"
"sync" "sync"
"time" "time"
"github.com/sirrobot01/decypharr/pkg/debrid/types"
) )
type fileInfo struct { type fileInfo struct {
@@ -120,7 +121,7 @@ func (c *Cache) refreshTorrents(ctx context.Context) {
close(workChan) close(workChan)
wg.Wait() wg.Wait()
c.listingDebouncer.Call(false) c.listingDebouncer.Call(true)
c.logger.Debug().Msgf("Processed %d new torrents", counter) c.logger.Debug().Msgf("Processed %d new torrents", counter)
} }
@@ -243,14 +244,10 @@ func (c *Cache) refreshDownloadLinks(ctx context.Context) {
} }
defer c.downloadLinksRefreshMu.Unlock() defer c.downloadLinksRefreshMu.Unlock()
links, err := c.client.GetDownloadLinks() if err := c.client.RefreshDownloadLinks(); err != nil {
if err != nil {
c.logger.Error().Err(err).Msg("Failed to get download links") c.logger.Error().Err(err).Msg("Failed to get download links")
return return
} }
c.client.Accounts().SetDownloadLinks(links) c.logger.Debug().Msgf("Refreshed download links")
c.logger.Debug().Msgf("Refreshed download %d links", c.client.Accounts().GetLinksCount())
} }

View File

@@ -4,11 +4,13 @@ import (
"context" "context"
"errors" "errors"
"fmt" "fmt"
"sync"
"time"
"github.com/puzpuzpuz/xsync/v4"
"github.com/sirrobot01/decypharr/internal/config" "github.com/sirrobot01/decypharr/internal/config"
"github.com/sirrobot01/decypharr/internal/utils" "github.com/sirrobot01/decypharr/internal/utils"
"github.com/sirrobot01/decypharr/pkg/debrid/types" "github.com/sirrobot01/decypharr/pkg/debrid/types"
"sync"
"time"
) )
type reInsertRequest struct { type reInsertRequest struct {
@@ -59,6 +61,8 @@ func (c *Cache) markAsSuccessfullyReinserted(torrentId string) {
} }
} }
// GetBrokenFiles checks the files in the torrent for broken links.
// It also attempts to reinsert the torrent if any files are broken.
func (c *Cache) GetBrokenFiles(t *CachedTorrent, filenames []string) []string { func (c *Cache) GetBrokenFiles(t *CachedTorrent, filenames []string) []string {
files := make(map[string]types.File) files := make(map[string]types.File)
repairStrategy := config.Get().Repair.Strategy repairStrategy := config.Get().Repair.Strategy
@@ -217,8 +221,7 @@ func (c *Cache) reInsertTorrent(ct *CachedTorrent) (*CachedTorrent, error) {
if _, ok := c.failedToReinsert.Load(oldID); ok { if _, ok := c.failedToReinsert.Load(oldID); ok {
return ct, fmt.Errorf("can't retry re-insert for %s", torrent.Id) return ct, fmt.Errorf("can't retry re-insert for %s", torrent.Id)
} }
if reqI, inFlight := c.repairRequest.Load(oldID); inFlight { if req, inFlight := c.repairRequest.Load(oldID); inFlight {
req := reqI.(*reInsertRequest)
c.logger.Debug().Msgf("Waiting for existing reinsert request to complete for torrent %s", oldID) c.logger.Debug().Msgf("Waiting for existing reinsert request to complete for torrent %s", oldID)
return req.Wait() return req.Wait()
} }
@@ -232,12 +235,13 @@ func (c *Cache) reInsertTorrent(ct *CachedTorrent) (*CachedTorrent, error) {
// Submit the magnet to the debrid service // Submit the magnet to the debrid service
newTorrent := &types.Torrent{ newTorrent := &types.Torrent{
Name: torrent.Name, Name: torrent.Name,
Magnet: utils.ConstructMagnet(torrent.InfoHash, torrent.Name), Magnet: utils.ConstructMagnet(torrent.InfoHash, torrent.Name),
InfoHash: torrent.InfoHash, InfoHash: torrent.InfoHash,
Size: torrent.Size, Size: torrent.Size,
Files: make(map[string]types.File), Files: make(map[string]types.File),
Arr: torrent.Arr, Arr: torrent.Arr,
DownloadUncached: false,
} }
var err error var err error
newTorrent, err = c.client.SubmitMagnet(newTorrent) newTorrent, err = c.client.SubmitMagnet(newTorrent)
@@ -260,7 +264,7 @@ func (c *Cache) reInsertTorrent(ct *CachedTorrent) (*CachedTorrent, error) {
_ = c.client.DeleteTorrent(newTorrent.Id) _ = c.client.DeleteTorrent(newTorrent.Id)
} }
c.markAsFailedToReinsert(oldID) c.markAsFailedToReinsert(oldID)
return ct, err return ct, fmt.Errorf("failed to check torrent: %w", err)
} }
// Update the torrent in the cache // Update the torrent in the cache
@@ -293,7 +297,7 @@ func (c *Cache) reInsertTorrent(ct *CachedTorrent) (*CachedTorrent, error) {
} }
} }
req.Complete(ct, err) req.Complete(ct, nil)
c.markAsSuccessfullyReinserted(oldID) c.markAsSuccessfullyReinserted(oldID)
c.logger.Debug().Str("torrentId", torrent.Id).Msg("Torrent successfully reinserted") c.logger.Debug().Str("torrentId", torrent.Id).Msg("Torrent successfully reinserted")
@@ -303,9 +307,8 @@ func (c *Cache) reInsertTorrent(ct *CachedTorrent) (*CachedTorrent, error) {
func (c *Cache) resetInvalidLinks(ctx context.Context) { func (c *Cache) resetInvalidLinks(ctx context.Context) {
c.logger.Debug().Msgf("Resetting accounts") c.logger.Debug().Msgf("Resetting accounts")
c.invalidDownloadLinks = sync.Map{} c.invalidDownloadLinks = xsync.NewMap[string, string]()
c.client.Accounts().Reset() // Reset the active download keys c.client.AccountManager().Reset() // Reset the active download keys
// Refresh the download links // Refresh the download links
c.refreshDownloadLinks(ctx) c.refreshDownloadLinks(ctx)
} }

View File

@@ -40,19 +40,27 @@ type directoryFilter struct {
ageThreshold time.Duration // only for last_added ageThreshold time.Duration // only for last_added
} }
type torrents struct {
sync.RWMutex
byID map[string]CachedTorrent
byName map[string]CachedTorrent
}
type folders struct { type folders struct {
sync.RWMutex sync.RWMutex
listing map[string][]os.FileInfo // folder name to file listing listing map[string][]os.FileInfo // folder name to file listing
} }
type CachedTorrentEntry struct {
CachedTorrent
deleted bool // Tombstone flag
}
type torrentCache struct { type torrentCache struct {
torrents torrents mu sync.RWMutex
torrents []CachedTorrentEntry // Changed to store entries with tombstone
// Lookup indices
idIndex map[string]int
nameIndex map[string]int
// Compaction tracking
deletedCount atomic.Int32
compactThreshold int // Trigger compaction when deletedCount exceeds this
listing atomic.Value listing atomic.Value
folders folders folders folders
@@ -69,12 +77,11 @@ type sortableFile struct {
} }
func newTorrentCache(dirFilters map[string][]directoryFilter) *torrentCache { func newTorrentCache(dirFilters map[string][]directoryFilter) *torrentCache {
tc := &torrentCache{ tc := &torrentCache{
torrents: torrents{ torrents: []CachedTorrentEntry{},
byID: make(map[string]CachedTorrent), idIndex: make(map[string]int),
byName: make(map[string]CachedTorrent), nameIndex: make(map[string]int),
}, compactThreshold: 100, // Compact when 100+ deleted entries
folders: folders{ folders: folders{
listing: make(map[string][]os.FileInfo), listing: make(map[string][]os.FileInfo),
}, },
@@ -87,10 +94,12 @@ func newTorrentCache(dirFilters map[string][]directoryFilter) *torrentCache {
} }
func (tc *torrentCache) reset() { func (tc *torrentCache) reset() {
tc.torrents.Lock() tc.mu.Lock()
tc.torrents.byID = make(map[string]CachedTorrent) tc.torrents = tc.torrents[:0] // Clear the slice
tc.torrents.byName = make(map[string]CachedTorrent) tc.idIndex = make(map[string]int) // Reset the ID index
tc.torrents.Unlock() tc.nameIndex = make(map[string]int) // Reset the name index
tc.deletedCount.Store(0)
tc.mu.Unlock()
// reset the sorted listing // reset the sorted listing
tc.sortNeeded.Store(false) tc.sortNeeded.Store(false)
@@ -103,62 +112,183 @@ func (tc *torrentCache) reset() {
} }
func (tc *torrentCache) getByID(id string) (CachedTorrent, bool) { func (tc *torrentCache) getByID(id string) (CachedTorrent, bool) {
tc.torrents.RLock() tc.mu.RLock()
defer tc.torrents.RUnlock() defer tc.mu.RUnlock()
torrent, exists := tc.torrents.byID[id]
return torrent, exists if index, exists := tc.idIndex[id]; exists && index < len(tc.torrents) {
entry := tc.torrents[index]
if !entry.deleted {
return entry.CachedTorrent, true
}
}
return CachedTorrent{}, false
} }
func (tc *torrentCache) getByName(name string) (CachedTorrent, bool) { func (tc *torrentCache) getByName(name string) (CachedTorrent, bool) {
tc.torrents.RLock() tc.mu.RLock()
defer tc.torrents.RUnlock() defer tc.mu.RUnlock()
torrent, exists := tc.torrents.byName[name]
return torrent, exists if index, exists := tc.nameIndex[name]; exists && index < len(tc.torrents) {
entry := tc.torrents[index]
if !entry.deleted {
return entry.CachedTorrent, true
}
}
return CachedTorrent{}, false
} }
func (tc *torrentCache) set(name string, torrent, newTorrent CachedTorrent) { func (tc *torrentCache) set(name string, torrent CachedTorrent) {
tc.torrents.Lock() tc.mu.Lock()
// Set the id first defer tc.mu.Unlock()
tc.torrents.byName[name] = torrent // Check if this torrent already exists (update case)
tc.torrents.byID[torrent.Id] = torrent // This is the unadulterated torrent if existingIndex, exists := tc.idIndex[torrent.Id]; exists && existingIndex < len(tc.torrents) {
tc.torrents.Unlock() if !tc.torrents[existingIndex].deleted {
// Update existing entry
tc.torrents[existingIndex].CachedTorrent = torrent
tc.sortNeeded.Store(true)
return
}
}
// Add new torrent
entry := CachedTorrentEntry{
CachedTorrent: torrent,
deleted: false,
}
tc.torrents = append(tc.torrents, entry)
index := len(tc.torrents) - 1
tc.idIndex[torrent.Id] = index
tc.nameIndex[name] = index
tc.sortNeeded.Store(true) tc.sortNeeded.Store(true)
} }
func (tc *torrentCache) getListing() []os.FileInfo { func (tc *torrentCache) removeId(id string) {
// Fast path: if we have a sorted list and no changes since last sort tc.mu.Lock()
if !tc.sortNeeded.Load() { defer tc.mu.Unlock()
return tc.listing.Load().([]os.FileInfo)
}
// Slow path: need to sort if index, exists := tc.idIndex[id]; exists && index < len(tc.torrents) {
tc.refreshListing() if !tc.torrents[index].deleted {
return tc.listing.Load().([]os.FileInfo) // Mark as deleted (tombstone)
tc.torrents[index].deleted = true
tc.deletedCount.Add(1)
// Remove from indices
delete(tc.idIndex, id)
// Find and remove from name index
for name, idx := range tc.nameIndex {
if idx == index {
delete(tc.nameIndex, name)
break
}
}
tc.sortNeeded.Store(true)
// Trigger compaction if threshold exceeded
if tc.deletedCount.Load() > int32(tc.compactThreshold) {
go tc.compact()
}
}
}
} }
func (tc *torrentCache) getFolderListing(folderName string) []os.FileInfo { func (tc *torrentCache) remove(name string) {
tc.folders.RLock() tc.mu.Lock()
defer tc.folders.RUnlock() defer tc.mu.Unlock()
if folderName == "" {
return tc.getListing() if index, exists := tc.nameIndex[name]; exists && index < len(tc.torrents) {
if !tc.torrents[index].deleted {
// Mark as deleted (tombstone)
torrentID := tc.torrents[index].CachedTorrent.Id
tc.torrents[index].deleted = true
tc.deletedCount.Add(1)
// Remove from indices
delete(tc.nameIndex, name)
delete(tc.idIndex, torrentID)
tc.sortNeeded.Store(true)
// Trigger compaction if threshold exceeded
if tc.deletedCount.Load() > int32(tc.compactThreshold) {
go tc.compact()
}
}
} }
if folder, ok := tc.folders.listing[folderName]; ok { }
return folder
// Compact removes tombstoned entries and rebuilds indices
func (tc *torrentCache) compact() {
tc.mu.Lock()
defer tc.mu.Unlock()
deletedCount := tc.deletedCount.Load()
if deletedCount == 0 {
return // Nothing to compact
} }
// If folder not found, return empty slice
return []os.FileInfo{} // Create new slice with only non-deleted entries
newTorrents := make([]CachedTorrentEntry, 0, len(tc.torrents)-int(deletedCount))
newIdIndex := make(map[string]int, len(tc.idIndex))
newNameIndex := make(map[string]int, len(tc.nameIndex))
// Copy non-deleted entries
for oldIndex, entry := range tc.torrents {
if !entry.deleted {
newIndex := len(newTorrents)
newTorrents = append(newTorrents, entry)
// Find the name for this torrent (reverse lookup)
for name, nameIndex := range tc.nameIndex {
if nameIndex == oldIndex {
newNameIndex[name] = newIndex
break
}
}
newIdIndex[entry.CachedTorrent.Id] = newIndex
}
}
// Replace old data with compacted data
tc.torrents = newTorrents
tc.idIndex = newIdIndex
tc.nameIndex = newNameIndex
tc.deletedCount.Store(0)
tc.sortNeeded.Store(true)
}
func (tc *torrentCache) ForceCompact() {
tc.compact()
}
func (tc *torrentCache) GetStats() (total, active, deleted int) {
tc.mu.RLock()
defer tc.mu.RUnlock()
total = len(tc.torrents)
deleted = int(tc.deletedCount.Load())
active = total - deleted
return total, active, deleted
} }
func (tc *torrentCache) refreshListing() { func (tc *torrentCache) refreshListing() {
tc.mu.RLock()
tc.torrents.RLock() all := make([]sortableFile, 0, len(tc.nameIndex))
all := make([]sortableFile, 0, len(tc.torrents.byName)) for name, index := range tc.nameIndex {
for name, t := range tc.torrents.byName { if index < len(tc.torrents) && !tc.torrents[index].deleted {
all = append(all, sortableFile{t.Id, name, t.AddedOn, t.Bytes, t.Bad}) t := tc.torrents[index].CachedTorrent
all = append(all, sortableFile{t.Id, name, t.AddedOn, t.Bytes, t.Bad})
}
} }
tc.sortNeeded.Store(false) tc.sortNeeded.Store(false)
tc.torrents.RUnlock() tc.mu.RUnlock()
sort.Slice(all, func(i, j int) bool { sort.Slice(all, func(i, j int) bool {
if all[i].name != all[j].name { if all[i].name != all[j].name {
@@ -234,8 +364,31 @@ func (tc *torrentCache) refreshListing() {
wg.Wait() wg.Wait()
} }
func (tc *torrentCache) torrentMatchDirectory(filters []directoryFilter, file sortableFile, now time.Time) bool { func (tc *torrentCache) getListing() []os.FileInfo {
// Fast path: if we have a sorted list and no changes since last sort
if !tc.sortNeeded.Load() {
return tc.listing.Load().([]os.FileInfo)
}
// Slow path: need to sort
tc.refreshListing()
return tc.listing.Load().([]os.FileInfo)
}
func (tc *torrentCache) getFolderListing(folderName string) []os.FileInfo {
tc.folders.RLock()
defer tc.folders.RUnlock()
if folderName == "" {
return tc.getListing()
}
if folder, ok := tc.folders.listing[folderName]; ok {
return folder
}
// If folder not found, return empty slice
return []os.FileInfo{}
}
func (tc *torrentCache) torrentMatchDirectory(filters []directoryFilter, file sortableFile, now time.Time) bool {
torrentName := strings.ToLower(file.name) torrentName := strings.ToLower(file.name)
for _, filter := range filters { for _, filter := range filters {
matched := false matched := false
@@ -278,51 +431,46 @@ func (tc *torrentCache) torrentMatchDirectory(filters []directoryFilter, file so
} }
func (tc *torrentCache) getAll() map[string]CachedTorrent { func (tc *torrentCache) getAll() map[string]CachedTorrent {
tc.torrents.RLock() tc.mu.RLock()
defer tc.torrents.RUnlock() defer tc.mu.RUnlock()
result := make(map[string]CachedTorrent, len(tc.torrents.byID))
for name, torrent := range tc.torrents.byID { result := make(map[string]CachedTorrent)
result[name] = torrent for _, entry := range tc.torrents {
if !entry.deleted {
result[entry.CachedTorrent.Id] = entry.CachedTorrent
}
} }
return result return result
} }
func (tc *torrentCache) getAllCount() int { func (tc *torrentCache) getAllCount() int {
tc.torrents.RLock() tc.mu.RLock()
defer tc.torrents.RUnlock() defer tc.mu.RUnlock()
return len(tc.torrents.byID) return len(tc.torrents) - int(tc.deletedCount.Load())
} }
func (tc *torrentCache) getAllByName() map[string]CachedTorrent { func (tc *torrentCache) getAllByName() map[string]CachedTorrent {
tc.torrents.RLock() tc.mu.RLock()
defer tc.torrents.RUnlock() defer tc.mu.RUnlock()
results := make(map[string]CachedTorrent, len(tc.torrents.byName))
for name, torrent := range tc.torrents.byName { results := make(map[string]CachedTorrent, len(tc.nameIndex))
results[name] = torrent for name, index := range tc.nameIndex {
if index < len(tc.torrents) && !tc.torrents[index].deleted {
results[name] = tc.torrents[index].CachedTorrent
}
} }
return results return results
} }
func (tc *torrentCache) getIdMaps() map[string]struct{} { func (tc *torrentCache) getIdMaps() map[string]struct{} {
tc.torrents.RLock() tc.mu.RLock()
defer tc.torrents.RUnlock() defer tc.mu.RUnlock()
res := make(map[string]struct{}, len(tc.torrents.byID))
for id := range tc.torrents.byID { res := make(map[string]struct{}, len(tc.idIndex))
res[id] = struct{}{} for id, index := range tc.idIndex {
if index < len(tc.torrents) && !tc.torrents[index].deleted {
res[id] = struct{}{}
}
} }
return res return res
} }
func (tc *torrentCache) removeId(id string) {
tc.torrents.Lock()
defer tc.torrents.Unlock()
delete(tc.torrents.byID, id)
tc.sortNeeded.Store(true)
}
func (tc *torrentCache) remove(name string) {
tc.torrents.Lock()
defer tc.torrents.Unlock()
delete(tc.torrents.byName, name)
tc.sortNeeded.Store(true)
}

View File

@@ -6,11 +6,11 @@ import (
"github.com/sirrobot01/decypharr/internal/utils" "github.com/sirrobot01/decypharr/internal/utils"
) )
func (c *Cache) StartSchedule(ctx context.Context) error { func (c *Cache) StartWorker(ctx context.Context) error {
// For now, we just want to refresh the listing and download links // For now, we just want to refresh the listing and download links
// Stop any existing jobs before starting new ones // Stop any existing jobs before starting new ones
c.scheduler.RemoveByTags("decypharr") c.scheduler.RemoveByTags("decypharr-%s", c.GetConfig().Name)
// Schedule download link refresh job // Schedule download link refresh job
if jd, err := utils.ConvertToJobDef(c.downloadLinksRefreshInterval); err != nil { if jd, err := utils.ConvertToJobDef(c.downloadLinksRefreshInterval); err != nil {
@@ -19,7 +19,7 @@ func (c *Cache) StartSchedule(ctx context.Context) error {
// Schedule the job // Schedule the job
if _, err := c.scheduler.NewJob(jd, gocron.NewTask(func() { if _, err := c.scheduler.NewJob(jd, gocron.NewTask(func() {
c.refreshDownloadLinks(ctx) c.refreshDownloadLinks(ctx)
}), gocron.WithContext(ctx), gocron.WithTags("decypharr")); err != nil { }), gocron.WithContext(ctx)); err != nil {
c.logger.Error().Err(err).Msg("Failed to create download link refresh job") c.logger.Error().Err(err).Msg("Failed to create download link refresh job")
} else { } else {
c.logger.Debug().Msgf("Download link refresh job scheduled for every %s", c.downloadLinksRefreshInterval) c.logger.Debug().Msgf("Download link refresh job scheduled for every %s", c.downloadLinksRefreshInterval)
@@ -33,7 +33,7 @@ func (c *Cache) StartSchedule(ctx context.Context) error {
// Schedule the job // Schedule the job
if _, err := c.scheduler.NewJob(jd, gocron.NewTask(func() { if _, err := c.scheduler.NewJob(jd, gocron.NewTask(func() {
c.refreshTorrents(ctx) c.refreshTorrents(ctx)
}), gocron.WithContext(ctx), gocron.WithTags("decypharr")); err != nil { }), gocron.WithContext(ctx)); err != nil {
c.logger.Error().Err(err).Msg("Failed to create torrent refresh job") c.logger.Error().Err(err).Msg("Failed to create torrent refresh job")
} else { } else {
c.logger.Debug().Msgf("Torrent refresh job scheduled for every %s", c.torrentRefreshInterval) c.logger.Debug().Msgf("Torrent refresh job scheduled for every %s", c.torrentRefreshInterval)
@@ -49,7 +49,7 @@ func (c *Cache) StartSchedule(ctx context.Context) error {
// Schedule the job // Schedule the job
if _, err := c.cetScheduler.NewJob(jd, gocron.NewTask(func() { if _, err := c.cetScheduler.NewJob(jd, gocron.NewTask(func() {
c.resetInvalidLinks(ctx) c.resetInvalidLinks(ctx)
}), gocron.WithContext(ctx), gocron.WithTags("decypharr")); err != nil { }), gocron.WithContext(ctx)); err != nil {
c.logger.Error().Err(err).Msg("Failed to create link reset job") c.logger.Error().Err(err).Msg("Failed to create link reset job")
} else { } else {
c.logger.Debug().Msgf("Link reset job scheduled for every midnight, CET") c.logger.Debug().Msgf("Link reset job scheduled for every midnight, CET")

View File

@@ -1,243 +0,0 @@
package types
import (
"github.com/sirrobot01/decypharr/internal/config"
"sync"
"time"
)
type Accounts struct {
current *Account
accounts []*Account
mu sync.RWMutex
}
func NewAccounts(debridConf config.Debrid) *Accounts {
accounts := make([]*Account, 0)
for idx, token := range debridConf.DownloadAPIKeys {
if token == "" {
continue
}
account := newAccount(debridConf.Name, token, idx)
accounts = append(accounts, account)
}
var current *Account
if len(accounts) > 0 {
current = accounts[0]
}
return &Accounts{
accounts: accounts,
current: current,
}
}
type Account struct {
Debrid string // e.g., "realdebrid", "torbox", etc.
Order int
Disabled bool
Token string
links map[string]*DownloadLink
mu sync.RWMutex
}
func (a *Accounts) All() []*Account {
a.mu.RLock()
defer a.mu.RUnlock()
activeAccounts := make([]*Account, 0)
for _, acc := range a.accounts {
if !acc.Disabled {
activeAccounts = append(activeAccounts, acc)
}
}
return activeAccounts
}
func (a *Accounts) Current() *Account {
a.mu.RLock()
if a.current != nil {
current := a.current
a.mu.RUnlock()
return current
}
a.mu.RUnlock()
a.mu.Lock()
defer a.mu.Unlock()
// Double-check after acquiring write lock
if a.current != nil {
return a.current
}
activeAccounts := make([]*Account, 0)
for _, acc := range a.accounts {
if !acc.Disabled {
activeAccounts = append(activeAccounts, acc)
}
}
if len(activeAccounts) > 0 {
a.current = activeAccounts[0]
}
return a.current
}
func (a *Accounts) Disable(account *Account) {
a.mu.Lock()
defer a.mu.Unlock()
account.disable()
if a.current == account {
var newCurrent *Account
for _, acc := range a.accounts {
if !acc.Disabled {
newCurrent = acc
break
}
}
a.current = newCurrent
}
}
func (a *Accounts) Reset() {
a.mu.Lock()
defer a.mu.Unlock()
for _, acc := range a.accounts {
acc.resetDownloadLinks()
acc.Disabled = false
}
if len(a.accounts) > 0 {
a.current = a.accounts[0]
} else {
a.current = nil
}
}
func (a *Accounts) GetDownloadLink(fileLink string) (*DownloadLink, error) {
if a.Current() == nil {
return nil, NoActiveAccountsError
}
dl, ok := a.Current().getLink(fileLink)
if !ok {
return nil, NoDownloadLinkError
}
if dl.ExpiresAt.IsZero() || dl.ExpiresAt.Before(time.Now()) {
return nil, DownloadLinkExpiredError
}
if dl.DownloadLink == "" {
return nil, EmptyDownloadLinkError
}
return dl, nil
}
func (a *Accounts) GetDownloadLinkWithAccount(fileLink string) (*DownloadLink, *Account, error) {
currentAccount := a.Current()
if currentAccount == nil {
return nil, nil, NoActiveAccountsError
}
dl, ok := currentAccount.getLink(fileLink)
if !ok {
return nil, nil, NoDownloadLinkError
}
if dl.ExpiresAt.IsZero() || dl.ExpiresAt.Before(time.Now()) {
return nil, currentAccount, DownloadLinkExpiredError
}
if dl.DownloadLink == "" {
return nil, currentAccount, EmptyDownloadLinkError
}
return dl, currentAccount, nil
}
func (a *Accounts) SetDownloadLink(fileLink string, dl *DownloadLink) {
if a.Current() == nil {
return
}
a.Current().setLink(fileLink, dl)
}
func (a *Accounts) DeleteDownloadLink(fileLink string) {
if a.Current() == nil {
return
}
a.Current().deleteLink(fileLink)
}
func (a *Accounts) GetLinksCount() int {
if a.Current() == nil {
return 0
}
return a.Current().LinksCount()
}
func (a *Accounts) SetDownloadLinks(links map[string]*DownloadLink) {
if a.Current() == nil {
return
}
a.Current().setLinks(links)
}
func newAccount(debridName, token string, index int) *Account {
return &Account{
Debrid: debridName,
Token: token,
Order: index,
links: make(map[string]*DownloadLink),
}
}
func (a *Account) getLink(fileLink string) (*DownloadLink, bool) {
a.mu.RLock()
defer a.mu.RUnlock()
dl, ok := a.links[a.sliceFileLink(fileLink)]
return dl, ok
}
func (a *Account) setLink(fileLink string, dl *DownloadLink) {
a.mu.Lock()
defer a.mu.Unlock()
a.links[a.sliceFileLink(fileLink)] = dl
}
func (a *Account) deleteLink(fileLink string) {
a.mu.Lock()
defer a.mu.Unlock()
delete(a.links, a.sliceFileLink(fileLink))
}
func (a *Account) resetDownloadLinks() {
a.mu.Lock()
defer a.mu.Unlock()
a.links = make(map[string]*DownloadLink)
}
func (a *Account) LinksCount() int {
a.mu.RLock()
defer a.mu.RUnlock()
return len(a.links)
}
func (a *Account) disable() {
a.Disabled = true
}
func (a *Account) setLinks(links map[string]*DownloadLink) {
a.mu.Lock()
defer a.mu.Unlock()
now := time.Now()
for _, dl := range links {
if !dl.ExpiresAt.IsZero() && dl.ExpiresAt.Before(now) {
// Expired, continue
continue
}
a.links[a.sliceFileLink(dl.Link)] = dl
}
}
// slice download link
func (a *Account) sliceFileLink(fileLink string) string {
if a.Debrid != "realdebrid" {
return fileLink
}
if len(fileLink) < 39 {
return fileLink
}
return fileLink[0:39]
}

View File

@@ -1,28 +0,0 @@
package types
import (
"github.com/rs/zerolog"
)
type Client interface {
SubmitMagnet(tr *Torrent) (*Torrent, error)
CheckStatus(tr *Torrent) (*Torrent, error)
GetFileDownloadLinks(tr *Torrent) error
GetDownloadLink(tr *Torrent, file *File) (*DownloadLink, error)
DeleteTorrent(torrentId string) error
IsAvailable(infohashes []string) map[string]bool
GetDownloadUncached() bool
UpdateTorrent(torrent *Torrent) error
GetTorrent(torrentId string) (*Torrent, error)
GetTorrents() ([]*Torrent, error)
Name() string
Logger() zerolog.Logger
GetDownloadingStatus() []string
GetDownloadLinks() (map[string]*DownloadLink, error)
CheckLink(link string) error
GetMountPath() string
Accounts() *Accounts // Returns the active download account/token
DeleteDownloadLink(linkId string) error
GetProfile() (*Profile, error)
GetAvailableSlots() (int, error)
}

View File

@@ -14,7 +14,7 @@ var NoActiveAccountsError = &Error{
Code: "no_active_accounts", Code: "no_active_accounts",
} }
var NoDownloadLinkError = &Error{ var ErrDownloadLinkNotFound = &Error{
Message: "No download link found", Message: "No download link found",
Code: "no_download_link", Code: "no_download_link",
} }

View File

@@ -2,6 +2,7 @@ package types
import ( import (
"fmt" "fmt"
"net/url"
"os" "os"
"path/filepath" "path/filepath"
"sync" "sync"
@@ -42,6 +43,38 @@ type Torrent struct {
sync.Mutex sync.Mutex
} }
func (t *Torrent) Copy() *Torrent {
t.Lock()
defer t.Unlock()
newFiles := make(map[string]File, len(t.Files))
for k, v := range t.Files {
newFiles[k] = v
}
return &Torrent{
Id: t.Id,
InfoHash: t.InfoHash,
Name: t.Name,
Folder: t.Folder,
Filename: t.Filename,
OriginalFilename: t.OriginalFilename,
Size: t.Size,
Bytes: t.Bytes,
Magnet: t.Magnet,
Files: newFiles,
Status: t.Status,
Added: t.Added,
Progress: t.Progress,
Speed: t.Speed,
Seeders: t.Seeders,
Links: append([]string{}, t.Links...),
MountPath: t.MountPath,
Debrid: t.Debrid,
Arr: t.Arr,
}
}
func (t *Torrent) GetSymlinkFolder(parent string) string { func (t *Torrent) GetSymlinkFolder(parent string) string {
return filepath.Join(parent, t.Arr.Name, t.Folder) return filepath.Join(parent, t.Arr.Name, t.Folder)
} }
@@ -84,18 +117,18 @@ func (t *Torrent) GetFiles() []File {
} }
type File struct { type File struct {
TorrentId string `json:"torrent_id"` TorrentId string `json:"torrent_id"`
Id string `json:"id"` Id string `json:"id"`
Name string `json:"name"` Name string `json:"name"`
Size int64 `json:"size"` Size int64 `json:"size"`
IsRar bool `json:"is_rar"` IsRar bool `json:"is_rar"`
ByteRange *[2]int64 `json:"byte_range,omitempty"` ByteRange *[2]int64 `json:"byte_range,omitempty"`
Path string `json:"path"` Path string `json:"path"`
Link string `json:"link"` Link string `json:"link"`
AccountId string `json:"account_id"` AccountId string `json:"account_id"`
Generated time.Time `json:"generated"` Generated time.Time `json:"generated"`
Deleted bool `json:"deleted"` Deleted bool `json:"deleted"`
DownloadLink *DownloadLink `json:"-"` DownloadLink DownloadLink `json:"-"`
} }
func (t *Torrent) Cleanup(remove bool) { func (t *Torrent) Cleanup(remove bool) {
@@ -114,22 +147,32 @@ type IngestData struct {
Size int64 `json:"size"` Size int64 `json:"size"`
} }
type LibraryStats struct {
Total int `json:"total"`
Bad int `json:"bad"`
ActiveLinks int `json:"active_links"`
}
type Stats struct {
Profile *Profile `json:"profile"`
Library LibraryStats `json:"library"`
Accounts []map[string]any `json:"accounts"`
}
type Profile struct { type Profile struct {
Name string `json:"name"` Name string `json:"name"`
Id int64 `json:"id"` Id int64 `json:"id"`
Username string `json:"username"` Username string `json:"username"`
Email string `json:"email"` Email string `json:"email"`
Points int64 `json:"points"` Points int `json:"points"`
Type string `json:"type"` Type string `json:"type"`
Premium int `json:"premium"` Premium int64 `json:"premium"`
Expiration time.Time `json:"expiration"` Expiration time.Time `json:"expiration"`
LibrarySize int `json:"library_size"`
BadTorrents int `json:"bad_torrents"`
ActiveLinks int `json:"active_links"`
} }
type DownloadLink struct { type DownloadLink struct {
Debrid string `json:"debrid"`
Token string `json:"token"`
Filename string `json:"filename"` Filename string `json:"filename"`
Link string `json:"link"` Link string `json:"link"`
DownloadLink string `json:"download_link"` DownloadLink string `json:"download_link"`
@@ -139,6 +182,27 @@ type DownloadLink struct {
ExpiresAt time.Time ExpiresAt time.Time
} }
func (d *DownloadLink) String() string { func isValidURL(str string) bool {
return d.DownloadLink u, err := url.Parse(str)
// A valid URL should parse without error, and have a non-empty scheme and host.
return err == nil && u.Scheme != "" && u.Host != ""
}
func (dl *DownloadLink) Valid() error {
if dl.Empty() {
return EmptyDownloadLinkError
}
// Check if the link is actually a valid URL
if !isValidURL(dl.DownloadLink) {
return ErrDownloadLinkNotFound
}
return nil
}
func (dl *DownloadLink) Empty() bool {
return dl.DownloadLink == ""
}
func (dl *DownloadLink) String() string {
return dl.DownloadLink
} }

View File

@@ -2,16 +2,18 @@ package qbit
import ( import (
"context" "context"
"crypto/sha256"
"encoding/base64" "encoding/base64"
"fmt" "fmt"
"github.com/go-chi/chi/v5"
"github.com/sirrobot01/decypharr/internal/config"
"github.com/sirrobot01/decypharr/pkg/arr"
"github.com/sirrobot01/decypharr/pkg/store"
"golang.org/x/crypto/bcrypt"
"net/http" "net/http"
"net/url" "net/url"
"strings" "strings"
"github.com/go-chi/chi/v5"
"github.com/sirrobot01/decypharr/internal/config"
"github.com/sirrobot01/decypharr/pkg/arr"
"github.com/sirrobot01/decypharr/pkg/wire"
"golang.org/x/crypto/bcrypt"
) )
type contextKey string type contextKey string
@@ -97,14 +99,22 @@ func decodeAuthHeader(header string) (string, string, error) {
bearer := string(bytes) bearer := string(bytes)
colonIndex := strings.LastIndex(bearer, ":") colonIndex := strings.LastIndex(bearer, ":")
host := bearer[:colonIndex] username := bearer[:colonIndex]
token := bearer[colonIndex+1:] password := bearer[colonIndex+1:]
return host, token, nil if username == "" || password == "" {
return username, password, fmt.Errorf("empty username or password")
}
return strings.TrimSpace(username), strings.TrimSpace(password), nil
} }
func (q *QBit) categoryContext(next http.Handler) http.Handler { func (q *QBit) categoryContext(next http.Handler) http.Handler {
// Print full URL for debugging
// Try to get category from URL query first
return http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) { return http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) {
// Print request method and URL
category := strings.Trim(r.URL.Query().Get("category"), "") category := strings.Trim(r.URL.Query().Get("category"), "")
if category == "" { if category == "" {
// Get from form // Get from form
@@ -127,49 +137,114 @@ func (q *QBit) categoryContext(next http.Handler) http.Handler {
// Only a valid host and token will be added to the context/config. The rest are manual // Only a valid host and token will be added to the context/config. The rest are manual
func (q *QBit) authContext(next http.Handler) http.Handler { func (q *QBit) authContext(next http.Handler) http.Handler {
return http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) { return http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) {
cfg := config.Get()
host, token, err := decodeAuthHeader(r.Header.Get("Authorization"))
category := getCategory(r.Context())
arrs := store.Get().Arr()
// Check if arr exists
a := arrs.Get(category)
if a == nil {
// Arr is not configured, create a new one
downloadUncached := false
a = arr.New(category, "", "", false, false, &downloadUncached, "", "auto")
}
if err == nil {
host = strings.TrimSpace(host)
if host != "" {
a.Host = host
}
token = strings.TrimSpace(token)
if token != "" {
a.Token = token
}
}
if cfg.NeedsAuth() {
if a.Host == "" || a.Token == "" {
http.Error(w, "Unauthorized: Host and token are required for authentication", http.StatusUnauthorized)
return
}
// try to use either Arr validate, or user auth validation
if err := a.Validate(); err != nil {
// If this failed, try to use user auth validation
if !verifyAuth(host, token) {
http.Error(w, "Unauthorized: Invalid host or token", http.StatusUnauthorized)
return
}
}
}
a.Source = "auto" username, password, err := getUsernameAndPassword(r)
arrs.AddOrUpdate(a) if err != nil {
http.Error(w, err.Error(), http.StatusUnauthorized)
return
}
category := getCategory(r.Context())
a, err := q.authenticate(category, username, password)
if err != nil {
http.Error(w, err.Error(), http.StatusUnauthorized)
return
}
ctx := context.WithValue(r.Context(), arrKey, a) ctx := context.WithValue(r.Context(), arrKey, a)
next.ServeHTTP(w, r.WithContext(ctx)) next.ServeHTTP(w, r.WithContext(ctx))
}) })
} }
func getUsernameAndPassword(r *http.Request) (string, string, error) {
// Try to get from authorization header
username, password, err := decodeAuthHeader(r.Header.Get("Authorization"))
if err == nil && username != "" {
return username, password, err
}
// Try to get from cookie
sid, err := r.Cookie("sid")
if err != nil {
// try SID
sid, err = r.Cookie("SID")
}
if err == nil {
username, password, err = extractFromSID(sid.Value)
if err != nil {
return "", "", err
}
}
return username, password, nil
}
func (q *QBit) authenticate(category, username, password string) (*arr.Arr, error) {
cfg := config.Get()
arrs := wire.Get().Arr()
// Check if arr exists
a := arrs.Get(category)
if a == nil {
// Arr is not configured, create a new one
downloadUncached := false
a = arr.New(category, "", "", false, false, &downloadUncached, "", "auto")
}
a.Host = username
a.Token = password
if cfg.UseAuth {
if a.Host == "" || a.Token == "" {
return nil, fmt.Errorf("unauthorized: Host and token are required for authentication(you've enabled authentication)")
}
// try to use either Arr validate, or user auth validation
if err := a.Validate(); err != nil {
// If this failed, try to use user auth validation
if !verifyAuth(username, password) {
return nil, fmt.Errorf("unauthorized: invalid credentials")
}
}
}
a.Source = "auto"
arrs.AddOrUpdate(a)
return a, nil
}
func createSID(username, password string) string {
// Create a verification hash
cfg := config.Get()
combined := fmt.Sprintf("%s|%s", username, password)
hash := sha256.Sum256([]byte(combined + cfg.SecretKey()))
hashStr := fmt.Sprintf("%x", hash)[:16] // First 16 chars
// Base64 encode
return base64.URLEncoding.EncodeToString([]byte(fmt.Sprintf("%s|%s", combined, hashStr)))
}
func extractFromSID(sid string) (string, string, error) {
// Decode base64
decoded, err := base64.URLEncoding.DecodeString(sid)
if err != nil {
return "", "", fmt.Errorf("invalid SID format")
}
// Split into parts: username:password:hash
parts := strings.Split(string(decoded), "|")
if len(parts) != 3 {
return "", "", fmt.Errorf("invalid SID structure")
}
username := parts[0]
password := parts[1]
providedHash := parts[2]
// Verify hash
cfg := config.Get()
combined := fmt.Sprintf("%s|%s", username, password)
expectedHash := sha256.Sum256([]byte(combined + cfg.SecretKey()))
expectedHashStr := fmt.Sprintf("%x", expectedHash)[:16]
if providedHash != expectedHashStr {
return "", "", fmt.Errorf("invalid SID signature")
}
return username, password, nil
}
func hashesContext(next http.Handler) http.Handler { func hashesContext(next http.Handler) http.Handler {
return http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) { return http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) {
_hashes := chi.URLParam(r, "hashes") _hashes := chi.URLParam(r, "hashes")

View File

@@ -1,25 +1,33 @@
package qbit package qbit
import ( import (
"github.com/sirrobot01/decypharr/internal/request"
"github.com/sirrobot01/decypharr/pkg/arr"
"net/http" "net/http"
"path/filepath" "path/filepath"
"strings" "strings"
"github.com/sirrobot01/decypharr/internal/config"
"github.com/sirrobot01/decypharr/internal/request"
"github.com/sirrobot01/decypharr/pkg/arr"
) )
func (q *QBit) handleLogin(w http.ResponseWriter, r *http.Request) { func (q *QBit) handleLogin(w http.ResponseWriter, r *http.Request) {
ctx := r.Context() ctx := r.Context()
_arr := getArrFromContext(ctx) cfg := config.Get()
if _arr == nil { username := r.FormValue("username")
// Arr not in context, return OK password := r.FormValue("password")
_, _ = w.Write([]byte("Ok.")) a, err := q.authenticate(getCategory(ctx), username, password)
if err != nil {
http.Error(w, err.Error(), http.StatusUnauthorized)
return return
} }
if err := _arr.Validate(); err != nil { if cfg.UseAuth {
q.logger.Error().Err(err).Msgf("Error validating arr") cookie := &http.Cookie{
http.Error(w, "Invalid arr configuration", http.StatusBadRequest) Name: "sid",
return Value: createSID(a.Host, a.Token),
Path: "/",
SameSite: http.SameSiteNoneMode,
}
http.SetCookie(w, cookie)
} }
_, _ = w.Write([]byte("Ok.")) _, _ = w.Write([]byte("Ok."))
} }

View File

@@ -4,7 +4,7 @@ import (
"github.com/rs/zerolog" "github.com/rs/zerolog"
"github.com/sirrobot01/decypharr/internal/config" "github.com/sirrobot01/decypharr/internal/config"
"github.com/sirrobot01/decypharr/internal/logger" "github.com/sirrobot01/decypharr/internal/logger"
"github.com/sirrobot01/decypharr/pkg/store" "github.com/sirrobot01/decypharr/pkg/wire"
) )
type QBit struct { type QBit struct {
@@ -12,7 +12,7 @@ type QBit struct {
Password string Password string
DownloadFolder string DownloadFolder string
Categories []string Categories []string
storage *store.TorrentStorage storage *wire.TorrentStorage
logger zerolog.Logger logger zerolog.Logger
Tags []string Tags []string
} }
@@ -25,7 +25,7 @@ func New() *QBit {
Password: cfg.Password, Password: cfg.Password,
DownloadFolder: cfg.DownloadFolder, DownloadFolder: cfg.DownloadFolder,
Categories: cfg.Categories, Categories: cfg.Categories,
storage: store.Get().Torrents(), storage: wire.Get().Torrents(),
logger: logger.New("qbit"), logger: logger.New("qbit"),
} }
} }

View File

@@ -1,33 +1,50 @@
package qbit package qbit
import ( import (
"github.com/go-chi/chi/v5"
"net/http" "net/http"
"github.com/go-chi/chi/v5"
) )
func (q *QBit) Routes() http.Handler { func (q *QBit) Routes() http.Handler {
r := chi.NewRouter() r := chi.NewRouter()
r.Use(q.categoryContext) r.Use(q.categoryContext)
r.Group(func(r chi.Router) { r.Group(func(r chi.Router) {
r.Use(q.authContext)
r.Post("/auth/login", q.handleLogin) r.Post("/auth/login", q.handleLogin)
r.Route("/torrents", func(r chi.Router) { r.Route("/torrents", func(r chi.Router) {
r.Use(q.authContext)
r.Use(hashesContext) r.Use(hashesContext)
r.Get("/info", q.handleTorrentsInfo) r.Get("/info", q.handleTorrentsInfo)
r.Post("/info", q.handleTorrentsInfo)
r.Post("/add", q.handleTorrentsAdd) r.Post("/add", q.handleTorrentsAdd)
r.Post("/delete", q.handleTorrentsDelete) r.Post("/delete", q.handleTorrentsDelete)
r.Get("/categories", q.handleCategories) r.Get("/categories", q.handleCategories)
r.Post("/categories", q.handleCategories)
r.Post("/createCategory", q.handleCreateCategory) r.Post("/createCategory", q.handleCreateCategory)
r.Post("/setCategory", q.handleSetCategory) r.Post("/setCategory", q.handleSetCategory)
r.Post("/addTags", q.handleAddTorrentTags) r.Post("/addTags", q.handleAddTorrentTags)
r.Post("/removeTags", q.handleRemoveTorrentTags) r.Post("/removeTags", q.handleRemoveTorrentTags)
r.Post("/createTags", q.handleCreateTags) r.Post("/createTags", q.handleCreateTags)
r.Get("/tags", q.handleGetTags) r.Get("/tags", q.handleGetTags)
r.Get("/pause", q.handleTorrentsPause) r.Get("/pause", q.handleTorrentsPause)
r.Get("/resume", q.handleTorrentsResume) r.Get("/resume", q.handleTorrentsResume)
r.Get("/recheck", q.handleTorrentRecheck) r.Get("/recheck", q.handleTorrentRecheck)
r.Get("/properties", q.handleTorrentProperties) r.Get("/properties", q.handleTorrentProperties)
r.Get("/files", q.handleTorrentFiles) r.Get("/files", q.handleTorrentFiles)
// Create POST equivalents for pause, resume, recheck
r.Post("/tags", q.handleGetTags)
r.Post("/pause", q.handleTorrentsPause)
r.Post("/resume", q.handleTorrentsResume)
r.Post("/recheck", q.handleTorrentRecheck)
r.Post("/properties", q.handleTorrentProperties)
r.Post("/files", q.handleTorrentFiles)
}) })
r.Route("/app", func(r chi.Router) { r.Route("/app", func(r chi.Router) {

View File

@@ -5,7 +5,7 @@ import (
"fmt" "fmt"
"github.com/sirrobot01/decypharr/internal/utils" "github.com/sirrobot01/decypharr/internal/utils"
"github.com/sirrobot01/decypharr/pkg/arr" "github.com/sirrobot01/decypharr/pkg/arr"
"github.com/sirrobot01/decypharr/pkg/store" "github.com/sirrobot01/decypharr/pkg/wire"
"io" "io"
"mime/multipart" "mime/multipart"
"strings" "strings"
@@ -18,9 +18,9 @@ func (q *QBit) addMagnet(ctx context.Context, url string, arr *arr.Arr, debrid s
if err != nil { if err != nil {
return fmt.Errorf("error parsing magnet link: %w", err) return fmt.Errorf("error parsing magnet link: %w", err)
} }
_store := store.Get() _store := wire.Get()
importReq := store.NewImportRequest(debrid, q.DownloadFolder, magnet, arr, action, false, "", store.ImportTypeQBitTorrent) importReq := wire.NewImportRequest(debrid, q.DownloadFolder, magnet, arr, action, false, "", wire.ImportTypeQBitTorrent)
err = _store.AddTorrent(ctx, importReq) err = _store.AddTorrent(ctx, importReq)
if err != nil { if err != nil {
@@ -37,8 +37,8 @@ func (q *QBit) addTorrent(ctx context.Context, fileHeader *multipart.FileHeader,
if err != nil { if err != nil {
return fmt.Errorf("error reading file: %s \n %w", fileHeader.Filename, err) return fmt.Errorf("error reading file: %s \n %w", fileHeader.Filename, err)
} }
_store := store.Get() _store := wire.Get()
importReq := store.NewImportRequest(debrid, q.DownloadFolder, magnet, arr, action, false, "", store.ImportTypeQBitTorrent) importReq := wire.NewImportRequest(debrid, q.DownloadFolder, magnet, arr, action, false, "", wire.ImportTypeQBitTorrent)
err = _store.AddTorrent(ctx, importReq) err = _store.AddTorrent(ctx, importReq)
if err != nil { if err != nil {
return fmt.Errorf("failed to process torrent: %w", err) return fmt.Errorf("failed to process torrent: %w", err)
@@ -46,19 +46,19 @@ func (q *QBit) addTorrent(ctx context.Context, fileHeader *multipart.FileHeader,
return nil return nil
} }
func (q *QBit) ResumeTorrent(t *store.Torrent) bool { func (q *QBit) ResumeTorrent(t *wire.Torrent) bool {
return true return true
} }
func (q *QBit) PauseTorrent(t *store.Torrent) bool { func (q *QBit) PauseTorrent(t *wire.Torrent) bool {
return true return true
} }
func (q *QBit) RefreshTorrent(t *store.Torrent) bool { func (q *QBit) RefreshTorrent(t *wire.Torrent) bool {
return true return true
} }
func (q *QBit) GetTorrentProperties(t *store.Torrent) *TorrentProperties { func (q *QBit) GetTorrentProperties(t *wire.Torrent) *TorrentProperties {
return &TorrentProperties{ return &TorrentProperties{
AdditionDate: t.AddedOn, AdditionDate: t.AddedOn,
Comment: "Debrid Blackhole <https://github.com/sirrobot01/decypharr>", Comment: "Debrid Blackhole <https://github.com/sirrobot01/decypharr>",
@@ -83,7 +83,7 @@ func (q *QBit) GetTorrentProperties(t *store.Torrent) *TorrentProperties {
} }
} }
func (q *QBit) setTorrentTags(t *store.Torrent, tags []string) bool { func (q *QBit) setTorrentTags(t *wire.Torrent, tags []string) bool {
torrentTags := strings.Split(t.Tags, ",") torrentTags := strings.Split(t.Tags, ",")
for _, tag := range tags { for _, tag := range tags {
if tag == "" { if tag == "" {
@@ -101,7 +101,7 @@ func (q *QBit) setTorrentTags(t *store.Torrent, tags []string) bool {
return true return true
} }
func (q *QBit) removeTorrentTags(t *store.Torrent, tags []string) bool { func (q *QBit) removeTorrentTags(t *wire.Torrent, tags []string) bool {
torrentTags := strings.Split(t.Tags, ",") torrentTags := strings.Split(t.Tags, ",")
newTorrentTags := utils.RemoveItem(torrentTags, tags...) newTorrentTags := utils.RemoveItem(torrentTags, tags...)
q.Tags = utils.RemoveItem(q.Tags, tags...) q.Tags = utils.RemoveItem(q.Tags, tags...)

View File

@@ -4,19 +4,19 @@ import (
"fmt" "fmt"
"os" "os"
"os/exec" "os/exec"
"path/filepath" "strconv"
"time" "time"
"github.com/sirrobot01/decypharr/internal/config" "github.com/sirrobot01/decypharr/internal/config"
) )
// Mount creates a mount using the rclone RC API with retry logic // Mount creates a mount using the rclone RC API with retry logic
func (m *Manager) Mount(provider, webdavURL string) error { func (m *Manager) Mount(mountPath, provider, webdavURL string) error {
return m.mountWithRetry(provider, webdavURL, 3) return m.mountWithRetry(mountPath, provider, webdavURL, 3)
} }
// mountWithRetry attempts to mount with retry logic // mountWithRetry attempts to mount with retry logic
func (m *Manager) mountWithRetry(provider, webdavURL string, maxRetries int) error { func (m *Manager) mountWithRetry(mountPath, provider, webdavURL string, maxRetries int) error {
if !m.IsReady() { if !m.IsReady() {
if err := m.WaitForReady(30 * time.Second); err != nil { if err := m.WaitForReady(30 * time.Second); err != nil {
return fmt.Errorf("rclone RC server not ready: %w", err) return fmt.Errorf("rclone RC server not ready: %w", err)
@@ -34,7 +34,7 @@ func (m *Manager) mountWithRetry(provider, webdavURL string, maxRetries int) err
time.Sleep(wait) time.Sleep(wait)
} }
if err := m.performMount(provider, webdavURL); err != nil { if err := m.performMount(mountPath, provider, webdavURL); err != nil {
m.logger.Error(). m.logger.Error().
Err(err). Err(err).
Str("provider", provider). Str("provider", provider).
@@ -49,13 +49,8 @@ func (m *Manager) mountWithRetry(provider, webdavURL string, maxRetries int) err
} }
// performMount performs a single mount attempt // performMount performs a single mount attempt
func (m *Manager) performMount(provider, webdavURL string) error { func (m *Manager) performMount(mountPath, provider, webdavURL string) error {
cfg := config.Get() cfg := config.Get()
mountPath := filepath.Join(cfg.Rclone.MountPath, provider)
cacheDir := ""
if cfg.Rclone.CacheDir != "" {
cacheDir = filepath.Join(cfg.Rclone.CacheDir, provider)
}
// Create mount directory // Create mount directory
if err := os.MkdirAll(mountPath, 0755); err != nil { if err := os.MkdirAll(mountPath, 0755); err != nil {
@@ -74,27 +69,40 @@ func (m *Manager) performMount(provider, webdavURL string) error {
// Clean up any stale mount first // Clean up any stale mount first
if exists && !existingMount.Mounted { if exists && !existingMount.Mounted {
m.forceUnmountPath(mountPath) err := m.forceUnmountPath(mountPath)
if err != nil {
return err
}
} }
// Create rclone config for this provider // Create rclone config for this provider
configName := fmt.Sprintf("decypharr-%s", provider) if err := m.createConfig(provider, webdavURL); err != nil {
if err := m.createConfig(configName, webdavURL); err != nil {
return fmt.Errorf("failed to create rclone config: %w", err) return fmt.Errorf("failed to create rclone config: %w", err)
} }
// Prepare mount arguments // Prepare mount arguments
mountArgs := map[string]interface{}{ mountArgs := map[string]interface{}{
"fs": fmt.Sprintf("%s:", configName), "fs": fmt.Sprintf("%s:", provider),
"mountPoint": mountPath, "mountPoint": mountPath,
"mountType": "mount", // Use standard FUSE mount }
"mountOpt": map[string]interface{}{ mountOpt := map[string]interface{}{
"AllowNonEmpty": true, "AllowNonEmpty": true,
"AllowOther": true, "AllowOther": true,
"DebugFUSE": false, "DebugFUSE": false,
"DeviceName": fmt.Sprintf("decypharr-%s", provider), "DeviceName": fmt.Sprintf("decypharr-%s", provider),
"VolumeName": fmt.Sprintf("decypharr-%s", provider), "VolumeName": fmt.Sprintf("decypharr-%s", provider),
}, }
if cfg.Rclone.AsyncRead != nil {
mountOpt["AsyncRead"] = *cfg.Rclone.AsyncRead
}
if cfg.Rclone.UseMmap {
mountOpt["UseMmap"] = cfg.Rclone.UseMmap
}
if cfg.Rclone.Transfers != 0 {
mountOpt["Transfers"] = cfg.Rclone.Transfers
} }
configOpts := make(map[string]interface{}) configOpts := make(map[string]interface{})
@@ -103,29 +111,29 @@ func (m *Manager) performMount(provider, webdavURL string) error {
configOpts["BufferSize"] = cfg.Rclone.BufferSize configOpts["BufferSize"] = cfg.Rclone.BufferSize
} }
if cacheDir != "" {
// Create cache directory if specified
if err := os.MkdirAll(cacheDir, 0755); err != nil {
m.logger.Warn().Str("cacheDir", cacheDir).Msg("Failed to create cache directory")
} else {
configOpts["CacheDir"] = cacheDir
}
}
if len(configOpts) > 0 { if len(configOpts) > 0 {
// Only add _config if there are options to set // Only add _config if there are options to set
mountArgs["_config"] = configOpts mountArgs["_config"] = configOpts
} }
vfsOpt := map[string]interface{}{
"CacheMode": cfg.Rclone.VfsCacheMode,
"DirCacheTime": cfg.Rclone.DirCacheTime,
}
vfsOpt["PollInterval"] = 0 // Poll interval not supported for webdav, set to 0
// Add VFS options if caching is enabled // Add VFS options if caching is enabled
if cfg.Rclone.VfsCacheMode != "off" { if cfg.Rclone.VfsCacheMode != "off" {
vfsOpt := map[string]interface{}{
"CacheMode": cfg.Rclone.VfsCacheMode,
}
if cfg.Rclone.VfsCacheMaxAge != "" { if cfg.Rclone.VfsCacheMaxAge != "" {
vfsOpt["CacheMaxAge"] = cfg.Rclone.VfsCacheMaxAge vfsOpt["CacheMaxAge"] = cfg.Rclone.VfsCacheMaxAge
} }
if cfg.Rclone.VfsDiskSpaceTotal != "" {
vfsOpt["DiskSpaceTotalSize"] = cfg.Rclone.VfsDiskSpaceTotal
}
if cfg.Rclone.VfsReadChunkSizeLimit != "" {
vfsOpt["ChunkSizeLimit"] = cfg.Rclone.VfsReadChunkSizeLimit
}
if cfg.Rclone.VfsCacheMaxSize != "" { if cfg.Rclone.VfsCacheMaxSize != "" {
vfsOpt["CacheMaxSize"] = cfg.Rclone.VfsCacheMaxSize vfsOpt["CacheMaxSize"] = cfg.Rclone.VfsCacheMaxSize
} }
@@ -138,28 +146,50 @@ func (m *Manager) performMount(provider, webdavURL string) error {
if cfg.Rclone.VfsReadAhead != "" { if cfg.Rclone.VfsReadAhead != "" {
vfsOpt["ReadAhead"] = cfg.Rclone.VfsReadAhead vfsOpt["ReadAhead"] = cfg.Rclone.VfsReadAhead
} }
if cfg.Rclone.VfsCacheMinFreeSpace != "" {
vfsOpt["CacheMinFreeSpace"] = cfg.Rclone.VfsCacheMinFreeSpace
}
if cfg.Rclone.VfsFastFingerprint {
vfsOpt["FastFingerprint"] = cfg.Rclone.VfsFastFingerprint
}
if cfg.Rclone.VfsReadChunkStreams != 0 {
vfsOpt["ChunkStreams"] = cfg.Rclone.VfsReadChunkStreams
}
if cfg.Rclone.NoChecksum { if cfg.Rclone.NoChecksum {
vfsOpt["NoChecksum"] = cfg.Rclone.NoChecksum vfsOpt["NoChecksum"] = cfg.Rclone.NoChecksum
} }
if cfg.Rclone.NoModTime { if cfg.Rclone.NoModTime {
vfsOpt["NoModTime"] = cfg.Rclone.NoModTime vfsOpt["NoModTime"] = cfg.Rclone.NoModTime
} }
mountArgs["vfsOpt"] = vfsOpt
} }
// Add mount options based on configuration // Add mount options based on configuration
if cfg.Rclone.UID != 0 { if cfg.Rclone.UID != 0 {
mountArgs["mountOpt"].(map[string]interface{})["UID"] = cfg.Rclone.UID vfsOpt["UID"] = cfg.Rclone.UID
} }
if cfg.Rclone.GID != 0 { if cfg.Rclone.GID != 0 {
mountArgs["mountOpt"].(map[string]interface{})["GID"] = cfg.Rclone.GID vfsOpt["GID"] = cfg.Rclone.GID
} }
if cfg.Rclone.AttrTimeout != "" {
if attrTimeout, err := time.ParseDuration(cfg.Rclone.AttrTimeout); err == nil { if cfg.Rclone.Umask != "" {
mountArgs["mountOpt"].(map[string]interface{})["AttrTimeout"] = attrTimeout.String() umask, err := strconv.ParseInt(cfg.Rclone.Umask, 8, 32)
if err == nil {
vfsOpt["Umask"] = uint32(umask)
} }
} }
if cfg.Rclone.AttrTimeout != "" {
if attrTimeout, err := time.ParseDuration(cfg.Rclone.AttrTimeout); err == nil {
mountOpt["AttrTimeout"] = attrTimeout.String()
}
}
mountArgs["vfsOpt"] = vfsOpt
mountArgs["mountOpt"] = mountOpt
// Make the mount request // Make the mount request
req := RCRequest{ req := RCRequest{
Command: "mount/mount", Command: "mount/mount",
@@ -169,7 +199,7 @@ func (m *Manager) performMount(provider, webdavURL string) error {
_, err := m.makeRequest(req, true) _, err := m.makeRequest(req, true)
if err != nil { if err != nil {
// Clean up mount point on failure // Clean up mount point on failure
m.forceUnmountPath(mountPath) _ = m.forceUnmountPath(mountPath)
return fmt.Errorf("failed to create mount for %s: %w", provider, err) return fmt.Errorf("failed to create mount for %s: %w", provider, err)
} }
@@ -180,7 +210,7 @@ func (m *Manager) performMount(provider, webdavURL string) error {
WebDAVURL: webdavURL, WebDAVURL: webdavURL,
Mounted: true, Mounted: true,
MountedAt: time.Now().Format(time.RFC3339), MountedAt: time.Now().Format(time.RFC3339),
ConfigName: configName, ConfigName: provider,
} }
m.mountsMutex.Lock() m.mountsMutex.Lock()
@@ -319,7 +349,7 @@ func (m *Manager) RefreshDir(provider string, dirs []string) error {
dirs = []string{"/"} dirs = []string{"/"}
} }
args := map[string]interface{}{ args := map[string]interface{}{
"fs": fmt.Sprintf("decypharr-%s:", provider), "fs": fmt.Sprintf("%s:", provider),
} }
for i, dir := range dirs { for i, dir := range dirs {
if dir != "" { if dir != "" {

View File

@@ -44,8 +44,8 @@ func (m *Manager) checkMountHealth(provider string) bool {
req := RCRequest{ req := RCRequest{
Command: "operations/list", Command: "operations/list",
Args: map[string]interface{}{ Args: map[string]interface{}{
"fs": fmt.Sprintf("decypharr-%s:", provider), "fs": fmt.Sprintf("%s:", provider),
"remote": "/", "remote": "",
}, },
} }
@@ -71,10 +71,10 @@ func (m *Manager) RecoverMount(provider string) error {
} }
// Wait a moment // Wait a moment
time.Sleep(2 * time.Second) time.Sleep(1 * time.Second)
// Try to remount // Try to remount
if err := m.Mount(provider, mountInfo.WebDAVURL); err != nil { if err := m.Mount(mountInfo.LocalPath, provider, mountInfo.WebDAVURL); err != nil {
return fmt.Errorf("failed to recover mount for %s: %w", provider, err) return fmt.Errorf("failed to recover mount for %s: %w", provider, err)
} }

View File

@@ -6,10 +6,13 @@ import (
"encoding/json" "encoding/json"
"errors" "errors"
"fmt" "fmt"
"io"
"net/http" "net/http"
"os" "os"
"os/exec" "os/exec"
"path/filepath" "path/filepath"
"slices"
"strings"
"sync" "sync"
"time" "time"
@@ -24,7 +27,7 @@ type Manager struct {
rcPort string rcPort string
rcUser string rcUser string
rcPass string rcPass string
configDir string rcloneDir string
mounts map[string]*MountInfo mounts map[string]*MountInfo
mountsMutex sync.RWMutex mountsMutex sync.RWMutex
logger zerolog.Logger logger zerolog.Logger
@@ -61,10 +64,10 @@ func NewManager() *Manager {
cfg := config.Get() cfg := config.Get()
rcPort := "5572" rcPort := "5572"
configDir := filepath.Join(cfg.Path, "rclone") rcloneDir := filepath.Join(cfg.Path, "rclone")
// Ensure config directory exists // Ensure config directory exists
if err := os.MkdirAll(configDir, 0755); err != nil { if err := os.MkdirAll(rcloneDir, 0755); err != nil {
_logger := logger.New("rclone") _logger := logger.New("rclone")
_logger.Error().Err(err).Msg("Failed to create rclone config directory") _logger.Error().Err(err).Msg("Failed to create rclone config directory")
} }
@@ -73,12 +76,12 @@ func NewManager() *Manager {
return &Manager{ return &Manager{
rcPort: rcPort, rcPort: rcPort,
configDir: configDir, rcloneDir: rcloneDir,
mounts: make(map[string]*MountInfo), mounts: make(map[string]*MountInfo),
logger: logger.New("rclone"), logger: logger.New("rclone"),
ctx: ctx, ctx: ctx,
cancel: cancel, cancel: cancel,
httpClient: &http.Client{Timeout: 30 * time.Second}, httpClient: &http.Client{Timeout: 60 * time.Second},
serverReady: make(chan struct{}), serverReady: make(chan struct{}),
} }
} }
@@ -98,15 +101,37 @@ func (m *Manager) Start(ctx context.Context) error {
return nil return nil
} }
logFile := filepath.Join(logger.GetLogPath(), "rclone.log")
// Delete old log file if it exists
if _, err := os.Stat(logFile); err == nil {
if err := os.Remove(logFile); err != nil {
return fmt.Errorf("failed to remove old rclone log file: %w", err)
}
}
args := []string{ args := []string{
"rcd", "rcd",
"--rc-addr", ":" + m.rcPort, "--rc-addr", ":" + m.rcPort,
"--rc-no-auth", // We'll handle auth at the application level "--rc-no-auth", // We'll handle auth at the application level
"--config", filepath.Join(m.configDir, "rclone.conf"), "--config", filepath.Join(m.rcloneDir, "rclone.conf"),
"--log-level", "INFO", "--log-file", logFile,
}
logLevel := cfg.Rclone.LogLevel
if logLevel != "" {
if !slices.Contains([]string{"DEBUG", "INFO", "NOTICE", "ERROR"}, logLevel) {
logLevel = "INFO"
}
args = append(args, "--log-level", logLevel)
}
if cfg.Rclone.CacheDir != "" {
if err := os.MkdirAll(cfg.Rclone.CacheDir, 0755); err == nil {
args = append(args, "--cache-dir", cfg.Rclone.CacheDir)
}
} }
m.cmd = exec.CommandContext(ctx, "rclone", args...) m.cmd = exec.CommandContext(ctx, "rclone", args...)
m.cmd.Dir = m.configDir
// Capture output for debugging // Capture output for debugging
var stdout, stderr bytes.Buffer var stdout, stderr bytes.Buffer
@@ -114,9 +139,10 @@ func (m *Manager) Start(ctx context.Context) error {
m.cmd.Stderr = &stderr m.cmd.Stderr = &stderr
if err := m.cmd.Start(); err != nil { if err := m.cmd.Start(); err != nil {
m.logger.Error().Str("stderr", stderr.String()).Str("stdout", stdout.String()).
Err(err).Msg("Failed to start rclone RC server")
return fmt.Errorf("failed to start rclone RC server: %w", err) return fmt.Errorf("failed to start rclone RC server: %w", err)
} }
m.serverStarted = true m.serverStarted = true
// Wait for server to be ready in a goroutine // Wait for server to be ready in a goroutine
@@ -155,9 +181,12 @@ func (m *Manager) Start(ctx context.Context) error {
default: default:
if code, ok := ExitCode(err); ok { if code, ok := ExitCode(err); ok {
m.logger.Debug().Int("exit_code", code).Err(err). m.logger.Debug().Int("exit_code", code).Err(err).
Str("stderr", stderr.String()).
Str("stdout", stdout.String()).
Msg("Rclone RC server error") Msg("Rclone RC server error")
} else { } else {
m.logger.Debug().Err(err).Msg("Rclone RC server error (no exit code)") m.logger.Debug().Err(err).Str("stderr", stderr.String()).
Str("stdout", stdout.String()).Msg("Rclone RC server error (no exit code)")
} }
} }
}() }()
@@ -234,50 +263,35 @@ func (m *Manager) Stop() error {
case err := <-done: case err := <-done:
if err != nil && !errors.Is(err, context.Canceled) && !WasHardTerminated(err) { if err != nil && !errors.Is(err, context.Canceled) && !WasHardTerminated(err) {
m.logger.Warn().Err(err).Msg("Rclone process exited with error") m.logger.Warn().Err(err).Msg("Rclone process exited with error")
} else {
m.logger.Info().Msg("Rclone process exited gracefully")
} }
case <-time.After(10 * time.Second): case <-time.After(2 * time.Second):
m.logger.Warn().Msg("Timeout waiting for rclone to exit, force killing") m.logger.Warn().Msg("Timeout waiting for rclone to exit, force killing")
if err := m.cmd.Process.Kill(); err != nil { if err := m.cmd.Process.Kill(); err != nil {
m.logger.Error().Err(err).Msg("Failed to force kill rclone process") // Check if the process already finished
return err if !strings.Contains(err.Error(), "process already finished") {
m.logger.Error().Err(err).Msg("Failed to force kill rclone process")
return err
}
m.logger.Info().Msg("Process already finished during kill attempt")
} }
// Wait a bit more for the kill to take effect
// Still wait for the Wait() to complete to clean up the process
select { select {
case <-done: case <-done:
m.logger.Info().Msg("Rclone process killed successfully") m.logger.Info().Msg("Rclone process cleanup completed")
case <-time.After(5 * time.Second): case <-time.After(5 * time.Second):
m.logger.Error().Msg("Process may still be running after kill") m.logger.Error().Msg("Process cleanup timeout")
} }
} }
} }
// Clean up any remaining mount directories
cfg := config.Get()
if cfg.Rclone.MountPath != "" {
m.cleanupMountDirectories(cfg.Rclone.MountPath)
}
m.serverStarted = false m.serverStarted = false
m.logger.Info().Msg("Rclone RC server stopped") m.logger.Info().Msg("Rclone RC server stopped")
return nil return nil
} }
// cleanupMountDirectories removes empty mount directories
func (m *Manager) cleanupMountDirectories(_ string) {
m.mountsMutex.RLock()
defer m.mountsMutex.RUnlock()
for _, mount := range m.mounts {
if mount.LocalPath != "" {
// Try to remove the directory if it's empty
if err := os.Remove(mount.LocalPath); err == nil {
m.logger.Debug().Str("path", mount.LocalPath).Msg("Removed empty mount directory")
}
// Don't log errors here as the directory might not be empty, which is fine
}
}
}
// waitForServer waits for the RC server to become available // waitForServer waits for the RC server to become available
func (m *Manager) waitForServer() { func (m *Manager) waitForServer() {
maxAttempts := 30 maxAttempts := 30
@@ -325,7 +339,12 @@ func (m *Manager) makeRequest(req RCRequest, close bool) (*http.Response, error)
if resp.StatusCode != http.StatusOK { if resp.StatusCode != http.StatusOK {
// Read the response body to get more details // Read the response body to get more details
defer resp.Body.Close() defer func(Body io.ReadCloser) {
err := Body.Close()
if err != nil {
m.logger.Debug().Err(err).Msg("Failed to close response body")
}
}(resp.Body)
var errorResp RCResponse var errorResp RCResponse
if err := json.NewDecoder(resp.Body).Decode(&errorResp); err != nil { if err := json.NewDecoder(resp.Body).Decode(&errorResp); err != nil {
return nil, fmt.Errorf("request failed with status %s, but could not decode error response: %w", resp.Status, err) return nil, fmt.Errorf("request failed with status %s, but could not decode error response: %w", resp.Status, err)

View File

@@ -3,10 +3,12 @@ package rclone
import ( import (
"context" "context"
"fmt" "fmt"
"github.com/rs/zerolog"
"github.com/sirrobot01/decypharr/internal/config"
"net/url" "net/url"
"path/filepath" "path/filepath"
"strings"
"github.com/rs/zerolog"
"github.com/sirrobot01/decypharr/internal/config"
) )
// Mount represents a mount using the rclone RC client // Mount represents a mount using the rclone RC client
@@ -19,15 +21,24 @@ type Mount struct {
} }
// NewMount creates a new RC-based mount // NewMount creates a new RC-based mount
func NewMount(provider, webdavURL string, rcManager *Manager) *Mount { func NewMount(provider, customRcloneMount, webdavURL string, rcManager *Manager) *Mount {
cfg := config.Get() cfg := config.Get()
mountPath := filepath.Join(cfg.Rclone.MountPath, provider) var mountPath string
if customRcloneMount != "" {
mountPath = customRcloneMount
} else {
mountPath = filepath.Join(cfg.Rclone.MountPath, provider)
}
_url, err := url.JoinPath(webdavURL, provider) _url, err := url.JoinPath(webdavURL, provider)
if err != nil { if err != nil {
_url = fmt.Sprintf("%s/%s", webdavURL, provider) _url = fmt.Sprintf("%s/%s", webdavURL, provider)
} }
if !strings.HasSuffix(_url, "/") {
_url += "/"
}
return &Mount{ return &Mount{
Provider: provider, Provider: provider,
LocalPath: mountPath, LocalPath: mountPath,
@@ -55,7 +66,7 @@ func (m *Mount) Mount(ctx context.Context) error {
Str("mount_path", m.LocalPath). Str("mount_path", m.LocalPath).
Msg("Creating mount via RC") Msg("Creating mount via RC")
if err := m.rcManager.Mount(m.Provider, m.WebDAVURL); err != nil { if err := m.rcManager.Mount(m.LocalPath, m.Provider, m.WebDAVURL); err != nil {
m.logger.Error().Str("provider", m.Provider).Msg("Mount operation failed") m.logger.Error().Str("provider", m.Provider).Msg("Mount operation failed")
return fmt.Errorf("mount failed for %s", m.Provider) return fmt.Errorf("mount failed for %s", m.Provider)
} }

View File

@@ -76,7 +76,7 @@ func (m *Manager) GetStats() (*Stats, error) {
} }
// Get bandwidth stats // Get bandwidth stats
bwStats, err := m.GetBandwidthStats() bwStats, err := m.GetBandwidthStats()
if err == nil { if err == nil && bwStats != nil {
stats.Bandwidth = *bwStats stats.Bandwidth = *bwStats
} else { } else {
fmt.Println("Failed to get rclone stats", err) fmt.Println("Failed to get rclone stats", err)

View File

@@ -2,11 +2,12 @@ package repair
import ( import (
"fmt" "fmt"
"github.com/sirrobot01/decypharr/pkg/arr"
"github.com/sirrobot01/decypharr/pkg/debrid/store"
"github.com/sirrobot01/decypharr/pkg/debrid/types"
"os" "os"
"path/filepath" "path/filepath"
"github.com/sirrobot01/decypharr/pkg/arr"
"github.com/sirrobot01/decypharr/pkg/debrid/common"
"github.com/sirrobot01/decypharr/pkg/debrid/store"
) )
func fileIsSymlinked(file string) bool { func fileIsSymlinked(file string) bool {
@@ -85,7 +86,7 @@ func collectFiles(media arr.Content) map[string][]arr.ContentFile {
return uniqueParents return uniqueParents
} }
func (r *Repair) checkTorrentFiles(torrentPath string, files []arr.ContentFile, clients map[string]types.Client, caches map[string]*store.Cache) []arr.ContentFile { func (r *Repair) checkTorrentFiles(torrentPath string, files []arr.ContentFile, clients map[string]common.Client, caches map[string]*store.Cache) []arr.ContentFile {
brokenFiles := make([]arr.ContentFile, 0) brokenFiles := make([]arr.ContentFile, 0)
emptyFiles := make([]arr.ContentFile, 0) emptyFiles := make([]arr.ContentFile, 0)
@@ -149,7 +150,7 @@ func (r *Repair) checkTorrentFiles(torrentPath string, files []arr.ContentFile,
return brokenFiles return brokenFiles
} }
func (r *Repair) findDebridForPath(dir string, clients map[string]types.Client) string { func (r *Repair) findDebridForPath(dir string, clients map[string]common.Client) string {
// Check cache first // Check cache first
if debridName, exists := r.debridPathCache.Load(dir); exists { if debridName, exists := r.debridPathCache.Load(dir); exists {
return debridName.(string) return debridName.(string)

View File

@@ -2,17 +2,18 @@ package server
import ( import (
"fmt" "fmt"
"net/http"
"runtime"
"github.com/go-chi/chi/v5" "github.com/go-chi/chi/v5"
"github.com/sirrobot01/decypharr/internal/request" "github.com/sirrobot01/decypharr/internal/request"
debridTypes "github.com/sirrobot01/decypharr/pkg/debrid/types" debridTypes "github.com/sirrobot01/decypharr/pkg/debrid/types"
"github.com/sirrobot01/decypharr/pkg/store" "github.com/sirrobot01/decypharr/pkg/wire"
"net/http"
"runtime"
) )
func (s *Server) handleIngests(w http.ResponseWriter, r *http.Request) { func (s *Server) handleIngests(w http.ResponseWriter, r *http.Request) {
ingests := make([]debridTypes.IngestData, 0) ingests := make([]debridTypes.IngestData, 0)
_store := store.Get() _store := wire.Get()
debrids := _store.Debrid() debrids := _store.Debrid()
if debrids == nil { if debrids == nil {
http.Error(w, "Debrid service is not enabled", http.StatusInternalServerError) http.Error(w, "Debrid service is not enabled", http.StatusInternalServerError)
@@ -42,7 +43,7 @@ func (s *Server) handleIngestsByDebrid(w http.ResponseWriter, r *http.Request) {
return return
} }
_store := store.Get() _store := wire.Get()
debrids := _store.Debrid() debrids := _store.Debrid()
if debrids == nil { if debrids == nil {
@@ -92,35 +93,40 @@ func (s *Server) handleStats(w http.ResponseWriter, r *http.Request) {
"go_version": runtime.Version(), "go_version": runtime.Version(),
} }
debrids := store.Get().Debrid() debrids := wire.Get().Debrid()
if debrids == nil { if debrids != nil {
request.JSONResponse(w, stats, http.StatusOK) clients := debrids.Clients()
return caches := debrids.Caches()
} debridStats := make([]debridTypes.Stats, 0)
clients := debrids.Clients() for debridName, client := range clients {
caches := debrids.Caches() debridStat := debridTypes.Stats{}
profiles := make([]*debridTypes.Profile, 0) libraryStat := debridTypes.LibraryStats{}
for debridName, client := range clients { profile, err := client.GetProfile()
profile, err := client.GetProfile() if err != nil {
profile.Name = debridName s.logger.Error().Err(err).Str("debrid", debridName).Msg("Failed to get debrid profile")
if err != nil { profile = &debridTypes.Profile{
s.logger.Error().Err(err).Msg("Failed to get debrid profile") Name: debridName,
continue }
} }
cache, ok := caches[debridName] profile.Name = debridName
if ok { debridStat.Profile = profile
// Get torrent data cache, ok := caches[debridName]
profile.LibrarySize = cache.TotalTorrents() if ok {
profile.BadTorrents = len(cache.GetListing("__bad__")) // Get torrent data
profile.ActiveLinks = cache.GetTotalActiveDownloadLinks() libraryStat.Total = cache.TotalTorrents()
libraryStat.Bad = len(cache.GetListing("__bad__"))
libraryStat.ActiveLinks = cache.GetTotalActiveDownloadLinks()
}
debridStat.Library = libraryStat
debridStat.Accounts = client.AccountManager().Stats()
debridStats = append(debridStats, debridStat)
} }
profiles = append(profiles, profile) stats["debrids"] = debridStats
} }
stats["debrids"] = profiles
// Add rclone stats if available // Add rclone stats if available
if rcManager := store.Get().RcloneManager(); rcManager != nil && rcManager.IsReady() { if rcManager := wire.Get().RcloneManager(); rcManager != nil && rcManager.IsReady() {
rcStats, err := rcManager.GetStats() rcStats, err := rcManager.GetStats()
if err != nil { if err != nil {
s.logger.Error().Err(err).Msg("Failed to get rclone stats") s.logger.Error().Err(err).Msg("Failed to get rclone stats")

View File

@@ -12,6 +12,7 @@ import (
"io" "io"
"net/http" "net/http"
"os" "os"
"path/filepath"
) )
type Server struct { type Server struct {
@@ -36,11 +37,12 @@ func New(handlers map[string]http.Handler) *Server {
} }
//logs //logs
r.Get("/logs", s.getLogs) r.Get("/logs", s.getLogs) // deprecated, use /debug/logs
//debugs
r.Route("/debug", func(r chi.Router) { r.Route("/debug", func(r chi.Router) {
r.Get("/stats", s.handleStats) r.Get("/stats", s.handleStats)
r.Get("/logs", s.getLogs)
r.Get("/logs/rclone", s.getRcloneLogs)
r.Get("/ingests", s.handleIngests) r.Get("/ingests", s.handleIngests)
r.Get("/ingests/{debrid}", s.handleIngestsByDebrid) r.Get("/ingests/{debrid}", s.handleIngestsByDebrid)
}) })
@@ -75,7 +77,7 @@ func (s *Server) Start(ctx context.Context) error {
} }
func (s *Server) getLogs(w http.ResponseWriter, r *http.Request) { func (s *Server) getLogs(w http.ResponseWriter, r *http.Request) {
logFile := logger.GetLogPath() logFile := filepath.Join(logger.GetLogPath(), "decypharr.log")
// Open and read the file // Open and read the file
file, err := os.Open(logFile) file, err := os.Open(logFile)
@@ -98,5 +100,42 @@ func (s *Server) getLogs(w http.ResponseWriter, r *http.Request) {
w.Header().Set("Expires", "0") w.Header().Set("Expires", "0")
// Stream the file // Stream the file
_, _ = io.Copy(w, file) if _, err := io.Copy(w, file); err != nil {
s.logger.Error().Err(err).Msg("Error streaming log file")
http.Error(w, "Error streaming log file", http.StatusInternalServerError)
return
}
}
func (s *Server) getRcloneLogs(w http.ResponseWriter, r *http.Request) {
// Rclone logs resides in the same directory as the application logs
logFile := filepath.Join(logger.GetLogPath(), "rclone.log")
// Open and read the file
file, err := os.Open(logFile)
if err != nil {
http.Error(w, "Error reading log file", http.StatusInternalServerError)
return
}
defer func(file *os.File) {
err := file.Close()
if err != nil {
s.logger.Error().Err(err).Msg("Error closing log file")
return
}
}(file)
// Set headers
w.Header().Set("Content-Type", "text/plain; charset=utf-8")
w.Header().Set("Content-Disposition", "inline; filename=application.log")
w.Header().Set("Cache-Control", "no-cache, no-store, must-revalidate")
w.Header().Set("Pragma", "no-cache")
w.Header().Set("Expires", "0")
// Stream the file
if _, err := io.Copy(w, file); err != nil {
s.logger.Error().Err(err).Msg("Error streaming log file")
http.Error(w, "Error streaming log file", http.StatusInternalServerError)
return
}
} }

View File

@@ -3,7 +3,7 @@ package server
import ( import (
"cmp" "cmp"
"encoding/json" "encoding/json"
"github.com/sirrobot01/decypharr/pkg/store" "github.com/sirrobot01/decypharr/pkg/wire"
"net/http" "net/http"
) )
@@ -38,7 +38,7 @@ func (s *Server) handleTautulli(w http.ResponseWriter, r *http.Request) {
http.Error(w, "Invalid ID", http.StatusBadRequest) http.Error(w, "Invalid ID", http.StatusBadRequest)
return return
} }
repair := store.Get().Repair() repair := wire.Get().Repair()
mediaId := cmp.Or(payload.TmdbID, payload.TvdbID) mediaId := cmp.Or(payload.TmdbID, payload.TvdbID)

View File

@@ -1,318 +0,0 @@
package store
import (
"fmt"
"net/http"
"os"
"path/filepath"
"sync"
"time"
"github.com/sirrobot01/decypharr/pkg/debrid/types"
"github.com/cavaliergopher/grab/v3"
"github.com/sirrobot01/decypharr/internal/utils"
)
func grabber(client *grab.Client, url, filename string, byterange *[2]int64, progressCallback func(int64, int64)) error {
req, err := grab.NewRequest(filename, url)
if err != nil {
return err
}
// Set byte range if specified
if byterange != nil {
byterangeStr := fmt.Sprintf("%d-%d", byterange[0], byterange[1])
req.HTTPRequest.Header.Set("Range", "bytes="+byterangeStr)
}
resp := client.Do(req)
t := time.NewTicker(time.Second * 2)
defer t.Stop()
var lastReported int64
Loop:
for {
select {
case <-t.C:
current := resp.BytesComplete()
speed := int64(resp.BytesPerSecond())
if current != lastReported {
if progressCallback != nil {
progressCallback(current-lastReported, speed)
}
lastReported = current
}
case <-resp.Done:
break Loop
}
}
// Report final bytes
if progressCallback != nil {
progressCallback(resp.BytesComplete()-lastReported, 0)
}
return resp.Err()
}
func (s *Store) processDownload(torrent *Torrent, debridTorrent *types.Torrent) (string, error) {
s.logger.Info().Msgf("Downloading %d files...", len(debridTorrent.Files))
torrentPath := filepath.Join(torrent.SavePath, utils.RemoveExtension(debridTorrent.OriginalFilename))
torrentPath = utils.RemoveInvalidChars(torrentPath)
err := os.MkdirAll(torrentPath, os.ModePerm)
if err != nil {
// add the previous error to the error and return
return "", fmt.Errorf("failed to create directory: %s: %v", torrentPath, err)
}
s.downloadFiles(torrent, debridTorrent, torrentPath)
return torrentPath, nil
}
func (s *Store) downloadFiles(torrent *Torrent, debridTorrent *types.Torrent, parent string) {
var wg sync.WaitGroup
totalSize := int64(0)
for _, file := range debridTorrent.GetFiles() {
totalSize += file.Size
}
debridTorrent.Lock()
debridTorrent.SizeDownloaded = 0 // Reset downloaded bytes
debridTorrent.Progress = 0 // Reset progress
debridTorrent.Unlock()
progressCallback := func(downloaded int64, speed int64) {
debridTorrent.Lock()
defer debridTorrent.Unlock()
torrent.Lock()
defer torrent.Unlock()
// Update total downloaded bytes
debridTorrent.SizeDownloaded += downloaded
debridTorrent.Speed = speed
// Calculate overall progress
if totalSize > 0 {
debridTorrent.Progress = float64(debridTorrent.SizeDownloaded) / float64(totalSize) * 100
}
s.partialTorrentUpdate(torrent, debridTorrent)
}
client := &grab.Client{
UserAgent: "Decypharr[QBitTorrent]",
HTTPClient: &http.Client{
Transport: &http.Transport{
Proxy: http.ProxyFromEnvironment,
},
},
}
errChan := make(chan error, len(debridTorrent.Files))
for _, file := range debridTorrent.GetFiles() {
if file.DownloadLink == nil {
s.logger.Info().Msgf("No download link found for %s", file.Name)
continue
}
wg.Add(1)
s.downloadSemaphore <- struct{}{}
go func(file types.File) {
defer wg.Done()
defer func() { <-s.downloadSemaphore }()
filename := file.Name
err := grabber(
client,
file.DownloadLink.DownloadLink,
filepath.Join(parent, filename),
file.ByteRange,
progressCallback,
)
if err != nil {
s.logger.Error().Msgf("Failed to download %s: %v", filename, err)
errChan <- err
} else {
s.logger.Info().Msgf("Downloaded %s", filename)
}
}(file)
}
wg.Wait()
close(errChan)
var errors []error
for err := range errChan {
if err != nil {
errors = append(errors, err)
}
}
if len(errors) > 0 {
s.logger.Error().Msgf("Errors occurred during download: %v", errors)
return
}
s.logger.Info().Msgf("Downloaded all files for %s", debridTorrent.Name)
}
func (s *Store) processSymlink(torrent *Torrent, debridTorrent *types.Torrent) (string, error) {
files := debridTorrent.Files
if len(files) == 0 {
return "", fmt.Errorf("no valid files found")
}
s.logger.Info().Msgf("Checking symlinks for %d files...", len(files))
rCloneBase := debridTorrent.MountPath
torrentPath, err := s.getTorrentPath(rCloneBase, debridTorrent) // /MyTVShow/
// This returns filename.ext for alldebrid instead of the parent folder filename/
torrentFolder := torrentPath
if err != nil {
return "", fmt.Errorf("failed to get torrent path: %v", err)
}
// Check if the torrent path is a file
torrentRclonePath := filepath.Join(rCloneBase, torrentPath) // leave it as is
if debridTorrent.Debrid == "alldebrid" && utils.IsMediaFile(torrentPath) {
// Alldebrid hotfix for single file torrents
torrentFolder = utils.RemoveExtension(torrentFolder)
torrentRclonePath = rCloneBase // /mnt/rclone/magnets/ // Remove the filename since it's in the root folder
}
torrentSymlinkPath := filepath.Join(torrent.SavePath, torrentFolder) // /mnt/symlinks/{category}/MyTVShow/
err = os.MkdirAll(torrentSymlinkPath, os.ModePerm)
if err != nil {
return "", fmt.Errorf("failed to create directory: %s: %v", torrentSymlinkPath, err)
}
realPaths := make(map[string]string)
err = filepath.WalkDir(torrentRclonePath, func(path string, d os.DirEntry, err error) error {
if err != nil {
return nil
}
if !d.IsDir() {
filename := d.Name()
rel, _ := filepath.Rel(torrentRclonePath, path)
realPaths[filename] = rel
}
return nil
})
if err != nil {
s.logger.Warn().Msgf("Error while scanning rclone path: %v", err)
}
pending := make(map[string]types.File)
for _, file := range files {
if realRelPath, ok := realPaths[file.Name]; ok {
file.Path = realRelPath
}
pending[file.Path] = file
}
ticker := time.NewTicker(200 * time.Millisecond)
defer ticker.Stop()
timeout := time.After(30 * time.Minute)
filePaths := make([]string, 0, len(pending))
for len(pending) > 0 {
select {
case <-ticker.C:
for path, file := range pending {
fullFilePath := filepath.Join(torrentRclonePath, file.Path)
if _, err := os.Stat(fullFilePath); !os.IsNotExist(err) {
fileSymlinkPath := filepath.Join(torrentSymlinkPath, file.Name)
if err := os.Symlink(fullFilePath, fileSymlinkPath); err != nil && !os.IsExist(err) {
s.logger.Warn().Msgf("Failed to create symlink: %s: %v", fileSymlinkPath, err)
} else {
filePaths = append(filePaths, fileSymlinkPath)
delete(pending, path)
s.logger.Info().Msgf("File is ready: %s", file.Name)
}
}
}
case <-timeout:
s.logger.Warn().Msgf("Timeout waiting for files, %d files still pending", len(pending))
return torrentSymlinkPath, fmt.Errorf("timeout waiting for files: %d files still pending", len(pending))
}
}
if s.skipPreCache {
return torrentSymlinkPath, nil
}
go func() {
s.logger.Debug().Msgf("Pre-caching %s", debridTorrent.Name)
if err := utils.PreCacheFile(filePaths); err != nil {
s.logger.Error().Msgf("Failed to pre-cache file: %s", err)
} else {
s.logger.Trace().Msgf("Pre-cached %d files", len(filePaths))
}
}()
return torrentSymlinkPath, nil
}
func (s *Store) createSymlinksWebdav(torrent *Torrent, debridTorrent *types.Torrent, rclonePath, torrentFolder string) (string, error) {
files := debridTorrent.Files
symlinkPath := filepath.Join(torrent.SavePath, torrentFolder) // /mnt/symlinks/{category}/MyTVShow/
err := os.MkdirAll(symlinkPath, os.ModePerm)
if err != nil {
return "", fmt.Errorf("failed to create directory: %s: %v", symlinkPath, err)
}
remainingFiles := make(map[string]types.File)
for _, file := range files {
remainingFiles[file.Name] = file
}
ticker := time.NewTicker(100 * time.Millisecond)
defer ticker.Stop()
timeout := time.After(30 * time.Minute)
filePaths := make([]string, 0, len(files))
for len(remainingFiles) > 0 {
select {
case <-ticker.C:
entries, err := os.ReadDir(rclonePath)
if err != nil {
continue
}
// Check which files exist in this batch
for _, entry := range entries {
filename := entry.Name()
if file, exists := remainingFiles[filename]; exists {
fullFilePath := filepath.Join(rclonePath, filename)
fileSymlinkPath := filepath.Join(symlinkPath, file.Name)
if err := os.Symlink(fullFilePath, fileSymlinkPath); err != nil && !os.IsExist(err) {
s.logger.Debug().Msgf("Failed to create symlink: %s: %v", fileSymlinkPath, err)
} else {
filePaths = append(filePaths, fileSymlinkPath)
delete(remainingFiles, filename)
s.logger.Info().Msgf("File is ready: %s", file.Name)
}
}
}
case <-timeout:
s.logger.Warn().Msgf("Timeout waiting for files, %d files still pending", len(remainingFiles))
return symlinkPath, fmt.Errorf("timeout waiting for files")
}
}
if s.skipPreCache {
return symlinkPath, nil
}
go func() {
s.logger.Debug().Msgf("Pre-caching %s", debridTorrent.Name)
if err := utils.PreCacheFile(filePaths); err != nil {
s.logger.Error().Msgf("Failed to pre-cache file: %s", err)
} else {
s.logger.Debug().Msgf("Pre-cached %d files", len(filePaths))
}
}() // Pre-cache the files in the background
// Pre-cache the first 256KB and 1MB of the file
return symlinkPath, nil
}
func (s *Store) getTorrentPath(rclonePath string, debridTorrent *types.Torrent) (string, error) {
for {
torrentPath, err := debridTorrent.GetMountFolder(rclonePath)
if err == nil {
s.logger.Debug().Msgf("Found torrent path: %s", torrentPath)
return torrentPath, err
}
time.Sleep(100 * time.Millisecond)
}
}

View File

@@ -2,7 +2,7 @@ package web
import ( import (
"fmt" "fmt"
"github.com/sirrobot01/decypharr/pkg/store" "github.com/sirrobot01/decypharr/pkg/wire"
"golang.org/x/crypto/bcrypt" "golang.org/x/crypto/bcrypt"
"net/http" "net/http"
"strings" "strings"
@@ -18,7 +18,7 @@ import (
) )
func (wb *Web) handleGetArrs(w http.ResponseWriter, r *http.Request) { func (wb *Web) handleGetArrs(w http.ResponseWriter, r *http.Request) {
_store := store.Get() _store := wire.Get()
request.JSONResponse(w, _store.Arr().GetAll(), http.StatusOK) request.JSONResponse(w, _store.Arr().GetAll(), http.StatusOK)
} }
@@ -28,9 +28,9 @@ func (wb *Web) handleAddContent(w http.ResponseWriter, r *http.Request) {
http.Error(w, err.Error(), http.StatusBadRequest) http.Error(w, err.Error(), http.StatusBadRequest)
return return
} }
_store := store.Get() _store := wire.Get()
results := make([]*store.ImportRequest, 0) results := make([]*wire.ImportRequest, 0)
errs := make([]string, 0) errs := make([]string, 0)
arrName := r.FormValue("arr") arrName := r.FormValue("arr")
@@ -66,7 +66,7 @@ func (wb *Web) handleAddContent(w http.ResponseWriter, r *http.Request) {
continue continue
} }
importReq := store.NewImportRequest(debridName, downloadFolder, magnet, _arr, action, downloadUncached, callbackUrl, store.ImportTypeAPI) importReq := wire.NewImportRequest(debridName, downloadFolder, magnet, _arr, action, downloadUncached, callbackUrl, wire.ImportTypeAPI)
if err := _store.AddTorrent(ctx, importReq); err != nil { if err := _store.AddTorrent(ctx, importReq); err != nil {
wb.logger.Error().Err(err).Str("url", url).Msg("Failed to add torrent") wb.logger.Error().Err(err).Str("url", url).Msg("Failed to add torrent")
errs = append(errs, fmt.Sprintf("URL %s: %v", url, err)) errs = append(errs, fmt.Sprintf("URL %s: %v", url, err))
@@ -91,7 +91,7 @@ func (wb *Web) handleAddContent(w http.ResponseWriter, r *http.Request) {
continue continue
} }
importReq := store.NewImportRequest(debridName, downloadFolder, magnet, _arr, action, downloadUncached, callbackUrl, store.ImportTypeAPI) importReq := wire.NewImportRequest(debridName, downloadFolder, magnet, _arr, action, downloadUncached, callbackUrl, wire.ImportTypeAPI)
err = _store.AddTorrent(ctx, importReq) err = _store.AddTorrent(ctx, importReq)
if err != nil { if err != nil {
wb.logger.Error().Err(err).Str("file", fileHeader.Filename).Msg("Failed to add torrent") wb.logger.Error().Err(err).Str("file", fileHeader.Filename).Msg("Failed to add torrent")
@@ -103,8 +103,8 @@ func (wb *Web) handleAddContent(w http.ResponseWriter, r *http.Request) {
} }
request.JSONResponse(w, struct { request.JSONResponse(w, struct {
Results []*store.ImportRequest `json:"results"` Results []*wire.ImportRequest `json:"results"`
Errors []string `json:"errors,omitempty"` Errors []string `json:"errors,omitempty"`
}{ }{
Results: results, Results: results,
Errors: errs, Errors: errs,
@@ -118,7 +118,7 @@ func (wb *Web) handleRepairMedia(w http.ResponseWriter, r *http.Request) {
return return
} }
_store := store.Get() _store := wire.Get()
var arrs []string var arrs []string
@@ -186,7 +186,7 @@ func (wb *Web) handleGetConfig(w http.ResponseWriter, r *http.Request) {
// Merge config arrs, with arr Storage // Merge config arrs, with arr Storage
unique := map[string]config.Arr{} unique := map[string]config.Arr{}
cfg := config.Get() cfg := config.Get()
arrStorage := store.Get().Arr() arrStorage := wire.Get().Arr()
// Add existing Arrs from storage // Add existing Arrs from storage
for _, a := range arrStorage.GetAll() { for _, a := range arrStorage.GetAll() {
@@ -256,6 +256,7 @@ func (wb *Web) handleUpdateConfig(w http.ResponseWriter, r *http.Request) {
currentConfig.RemoveStalledAfter = updatedConfig.RemoveStalledAfter currentConfig.RemoveStalledAfter = updatedConfig.RemoveStalledAfter
currentConfig.AllowedExt = updatedConfig.AllowedExt currentConfig.AllowedExt = updatedConfig.AllowedExt
currentConfig.DiscordWebhook = updatedConfig.DiscordWebhook currentConfig.DiscordWebhook = updatedConfig.DiscordWebhook
currentConfig.CallbackURL = updatedConfig.CallbackURL
// Should this be added? // Should this be added?
currentConfig.URLBase = updatedConfig.URLBase currentConfig.URLBase = updatedConfig.URLBase
@@ -276,7 +277,7 @@ func (wb *Web) handleUpdateConfig(w http.ResponseWriter, r *http.Request) {
} }
// Update Arrs through the service // Update Arrs through the service
storage := store.Get() storage := wire.Get()
arrStorage := storage.Arr() arrStorage := storage.Arr()
newConfigArrs := make([]config.Arr, 0) newConfigArrs := make([]config.Arr, 0)
@@ -320,7 +321,7 @@ func (wb *Web) handleUpdateConfig(w http.ResponseWriter, r *http.Request) {
if restartFunc != nil { if restartFunc != nil {
go func() { go func() {
// Small delay to ensure the response is sent // Small delay to ensure the response is sent
time.Sleep(500 * time.Millisecond) time.Sleep(200 * time.Millisecond)
restartFunc() restartFunc()
}() }()
} }
@@ -330,7 +331,7 @@ func (wb *Web) handleUpdateConfig(w http.ResponseWriter, r *http.Request) {
} }
func (wb *Web) handleGetRepairJobs(w http.ResponseWriter, r *http.Request) { func (wb *Web) handleGetRepairJobs(w http.ResponseWriter, r *http.Request) {
_store := store.Get() _store := wire.Get()
request.JSONResponse(w, _store.Repair().GetJobs(), http.StatusOK) request.JSONResponse(w, _store.Repair().GetJobs(), http.StatusOK)
} }
@@ -340,7 +341,7 @@ func (wb *Web) handleProcessRepairJob(w http.ResponseWriter, r *http.Request) {
http.Error(w, "No job ID provided", http.StatusBadRequest) http.Error(w, "No job ID provided", http.StatusBadRequest)
return return
} }
_store := store.Get() _store := wire.Get()
if err := _store.Repair().ProcessJob(id); err != nil { if err := _store.Repair().ProcessJob(id); err != nil {
wb.logger.Error().Err(err).Msg("Failed to process repair job") wb.logger.Error().Err(err).Msg("Failed to process repair job")
} }
@@ -361,7 +362,7 @@ func (wb *Web) handleDeleteRepairJob(w http.ResponseWriter, r *http.Request) {
return return
} }
_store := store.Get() _store := wire.Get()
_store.Repair().DeleteJobs(req.IDs) _store.Repair().DeleteJobs(req.IDs)
w.WriteHeader(http.StatusOK) w.WriteHeader(http.StatusOK)
} }
@@ -372,7 +373,7 @@ func (wb *Web) handleStopRepairJob(w http.ResponseWriter, r *http.Request) {
http.Error(w, "No job ID provided", http.StatusBadRequest) http.Error(w, "No job ID provided", http.StatusBadRequest)
return return
} }
_store := store.Get() _store := wire.Get()
if err := _store.Repair().StopJob(id); err != nil { if err := _store.Repair().StopJob(id); err != nil {
wb.logger.Error().Err(err).Msg("Failed to stop repair job") wb.logger.Error().Err(err).Msg("Failed to stop repair job")
http.Error(w, "Failed to stop job: "+err.Error(), http.StatusInternalServerError) http.Error(w, "Failed to stop job: "+err.Error(), http.StatusInternalServerError)

File diff suppressed because one or more lines are too long

File diff suppressed because one or more lines are too long

File diff suppressed because one or more lines are too long

View File

@@ -414,8 +414,91 @@ class DecypharrUtils {
} }
} }
// Mobile navigation dropdown handler
setupMobileNavigation() {
const mobileMenuBtn = document.querySelector('.navbar-start .dropdown [role="button"]');
const mobileMenu = document.querySelector('.navbar-start .dropdown .dropdown-content');
const dropdown = document.querySelector('.navbar-start .dropdown');
if (!mobileMenuBtn || !mobileMenu || !dropdown) return;
let isOpen = false;
const openDropdown = () => {
if (!isOpen) {
dropdown.classList.add('dropdown-open');
mobileMenuBtn.setAttribute('aria-expanded', 'true');
isOpen = true;
}
};
const closeDropdown = () => {
if (isOpen) {
dropdown.classList.remove('dropdown-open');
mobileMenuBtn.setAttribute('aria-expanded', 'false');
isOpen = false;
}
};
const toggleDropdown = (e) => {
e.preventDefault();
e.stopPropagation();
if (isOpen) {
closeDropdown();
} else {
openDropdown();
}
};
// Handle button clicks (both mouse and touch)
mobileMenuBtn.addEventListener('click', toggleDropdown);
mobileMenuBtn.addEventListener('touchend', (e) => {
e.preventDefault();
toggleDropdown(e);
});
// Close dropdown when clicking outside
document.addEventListener('click', (e) => {
if (isOpen && !dropdown.contains(e.target)) {
closeDropdown();
}
});
// Close dropdown when touching outside
document.addEventListener('touchend', (e) => {
if (isOpen && !dropdown.contains(e.target)) {
closeDropdown();
}
});
// Close dropdown when clicking menu items
mobileMenu.addEventListener('click', (e) => {
if (e.target.tagName === 'A') {
closeDropdown();
}
});
// Handle keyboard navigation
mobileMenuBtn.addEventListener('keydown', (e) => {
if (e.key === 'Enter' || e.key === ' ') {
e.preventDefault();
toggleDropdown(e);
} else if (e.key === 'Escape') {
closeDropdown();
}
});
// Set initial aria attributes
mobileMenuBtn.setAttribute('aria-expanded', 'false');
mobileMenuBtn.setAttribute('aria-haspopup', 'true');
}
// Global event listeners // Global event listeners
setupGlobalEventListeners() { setupGlobalEventListeners() {
// Setup mobile navigation dropdown
this.setupMobileNavigation();
// Smooth scroll for anchor links // Smooth scroll for anchor links
document.addEventListener('click', (e) => { document.addEventListener('click', (e) => {
const link = e.target.closest('a[href^="#"]'); const link = e.target.closest('a[href^="#"]');

View File

@@ -149,10 +149,11 @@ class ConfigManager {
if (!rcloneConfig) return; if (!rcloneConfig) return;
const fields = [ const fields = [
'enabled', 'mount_path', 'cache_dir', 'vfs_cache_mode', 'vfs_cache_max_size', 'vfs_cache_max_age', 'enabled', 'rc_port', 'mount_path', 'cache_dir', 'transfers', 'vfs_cache_mode', 'vfs_cache_max_size', 'vfs_cache_max_age',
'vfs_cache_poll_interval', 'vfs_read_chunk_size', 'vfs_read_chunk_size_limit', 'buffer_size', 'vfs_cache_poll_interval', 'vfs_read_chunk_size', 'vfs_read_chunk_size_limit', 'buffer_size',
'uid', 'gid', 'vfs_read_ahead', 'attr_timeout', 'dir_cache_time', 'poll_interval', 'umask', 'uid', 'gid', 'vfs_read_ahead', 'attr_timeout', 'dir_cache_time', 'poll_interval', 'umask',
'no_modtime', 'no_checksum' 'no_modtime', 'no_checksum', 'log_level', 'vfs_cache_min_free_space', 'vfs_fast_fingerprint', 'vfs_read_chunk_streams',
'async_read', 'use_mmap'
]; ];
fields.forEach(field => { fields.forEach(field => {
@@ -246,7 +247,7 @@ class ConfigManager {
<select class="select select-bordered" name="debrid[${index}].name" id="debrid[${index}].name" required> <select class="select select-bordered" name="debrid[${index}].name" id="debrid[${index}].name" required>
<option value="realdebrid">Real Debrid</option> <option value="realdebrid">Real Debrid</option>
<option value="alldebrid">AllDebrid</option> <option value="alldebrid">AllDebrid</option>
<option value="debrid_link">Debrid Link</option> <option value="debridlink">Debrid Link</option>
<option value="torbox">Torbox</option> <option value="torbox">Torbox</option>
</select> </select>
</div> </div>
@@ -273,7 +274,6 @@ class ConfigManager {
<div class="form-control flex-1"> <div class="form-control flex-1">
<label class="label" for="debrid[${index}].download_api_keys"> <label class="label" for="debrid[${index}].download_api_keys">
<span class="label-text font-medium">Download API Keys</span> <span class="label-text font-medium">Download API Keys</span>
<span class="badge badge-ghost badge-sm">Optional</span>
</label> </label>
<div class="password-toggle-container"> <div class="password-toggle-container">
<textarea class="textarea textarea-bordered has-toggle font-mono h-full min-h-[200px]" <textarea class="textarea textarea-bordered has-toggle font-mono h-full min-h-[200px]"
@@ -290,7 +290,7 @@ class ConfigManager {
</div> </div>
</div> </div>
<div class="space-y-4"> <div class="space-y-4">
<div class="grid grid-cols-1 lg:grid-cols-2 gap-6"> <div class="grid grid-cols-1 lg:grid-cols-2 gap-4">
<div class="form-control"> <div class="form-control">
<label class="label" for="debrid[${index}].folder"> <label class="label" for="debrid[${index}].folder">
<span class="label-text font-medium">Mount/Rclone Folder</span> <span class="label-text font-medium">Mount/Rclone Folder</span>
@@ -302,6 +302,21 @@ class ConfigManager {
<span class="label-text-alt">Path where debrid files are mounted</span> <span class="label-text-alt">Path where debrid files are mounted</span>
</div> </div>
</div> </div>
<div class="form-control">
<label class="label" for="debrid[${index}].rclone_mount_path">
<span class="label-text font-medium">Custom Rclone Mount Path</span>
<span class="badge badge-ghost badge-sm">Optional</span>
</label>
<input type="text" class="input input-bordered"
name="debrid[${index}].rclone_mount_path" id="debrid[${index}].rclone_mount_path"
placeholder="/custom/mount/path (leave empty for global mount path)">
<div class="label">
<span class="label-text-alt">Custom mount path for this debrid service. If empty, uses global rclone mount path.</span>
</div>
</div>
</div>
<div class="grid grid-cols-2 lg:grid-cols-3 gap-3">
<div class="form-control"> <div class="form-control">
<label class="label" for="debrid[${index}].rate_limit"> <label class="label" for="debrid[${index}].rate_limit">
<span class="label-text font-medium">Rate Limit</span> <span class="label-text font-medium">Rate Limit</span>
@@ -324,6 +339,17 @@ class ConfigManager {
<span class="label-text-alt">This proxy is used for this debrid account</span> <span class="label-text-alt">This proxy is used for this debrid account</span>
</div> </div>
</div> </div>
<div class="form-control">
<label class="label" for="debrid[${index}].minimum_free_slot">
<span class="label-text font-medium">Minimum Free Slot</span>
</label>
<input type="number" class="input input-bordered"
name="debrid[${index}].minimum_free_slot" id="debrid[${index}].minimum_free_slot"
placeholder="1" value="1">
<div class="label">
<span class="label-text-alt">Minimum free slot for this debrid</span>
</div>
</div>
</div> </div>
</div> </div>
@@ -531,11 +557,6 @@ class ConfigManager {
if (toggle.checked || forceShow) { if (toggle.checked || forceShow) {
webdavSection.classList.remove('hidden'); webdavSection.classList.remove('hidden');
// Add required attributes to key fields
webdavSection.querySelectorAll('input[name$=".torrents_refresh_interval"]').forEach(el => el.required = true);
webdavSection.querySelectorAll('input[name$=".download_links_refresh_interval"]').forEach(el => el.required = true);
webdavSection.querySelectorAll('input[name$=".auto_expire_links_after"]').forEach(el => el.required = true);
webdavSection.querySelectorAll('input[name$=".workers"]').forEach(el => el.required = true);
} else { } else {
webdavSection.classList.add('hidden'); webdavSection.classList.add('hidden');
// Remove required attributes // Remove required attributes
@@ -877,7 +898,7 @@ class ConfigManager {
<input type="hidden" name="arr[${index}].source" value="${data.source || ''}"> <input type="hidden" name="arr[${index}].source" value="${data.source || ''}">
<div class="grid grid-cols-1 lg:grid-cols-3 gap-4"> <div class="grid grid-cols-1 lg:grid-cols-2 gap-4">
<div class="form-control"> <div class="form-control">
<label class="label" for="arr[${index}].name"> <label class="label" for="arr[${index}].name">
<span class="label-text font-medium">Service Name</span> <span class="label-text font-medium">Service Name</span>
@@ -912,9 +933,7 @@ class ConfigManager {
</button> </button>
</div> </div>
</div> </div>
</div>
<div class="grid grid-cols-1 lg:grid-cols-2 gap-4 mt-4">
<div class="form-control"> <div class="form-control">
<label class="label" for="arr[${index}].selected_debrid"> <label class="label" for="arr[${index}].selected_debrid">
<span class="label-text font-medium">Preferred Debrid Service</span> <span class="label-text font-medium">Preferred Debrid Service</span>
@@ -923,40 +942,38 @@ class ConfigManager {
<option value="" selected>Auto-select</option> <option value="" selected>Auto-select</option>
<option value="realdebrid">Real Debrid</option> <option value="realdebrid">Real Debrid</option>
<option value="alldebrid">AllDebrid</option> <option value="alldebrid">AllDebrid</option>
<option value="debrid_link">Debrid Link</option> <option value="debridlink">Debrid Link</option>
<option value="torbox">Torbox</option> <option value="torbox">Torbox</option>
</select> </select>
<div class="label"> <div class="label">
<span class="label-text-alt">Which debrid service this Arr should prefer</span> <span class="label-text-alt">Which debrid service this Arr should prefer</span>
</div> </div>
</div> </div>
</div>
<div class="flex flex-col justify-end"> <div class="grid grid-cols-3 gap-4">
<div class="grid grid-cols-3 gap-2"> <div class="form-control">
<div class="form-control"> <label class="label cursor-pointer justify-start gap-2">
<label class="label cursor-pointer justify-start gap-2"> <input type="checkbox" class="checkbox checkbox-sm"
<input type="checkbox" class="checkbox checkbox-sm" name="arr[${index}].cleanup" id="arr[${index}].cleanup">
name="arr[${index}].cleanup" id="arr[${index}].cleanup"> <span class="label-text text-sm">Cleanup Queue</span>
<span class="label-text text-sm">Cleanup Queue</span> </label>
</label> </div>
</div>
<div class="form-control"> <div class="form-control">
<label class="label cursor-pointer justify-start gap-2"> <label class="label cursor-pointer justify-start gap-2">
<input type="checkbox" class="checkbox checkbox-sm" <input type="checkbox" class="checkbox checkbox-sm"
name="arr[${index}].skip_repair" id="arr[${index}].skip_repair"> name="arr[${index}].skip_repair" id="arr[${index}].skip_repair">
<span class="label-text text-sm">Skip Repair</span> <span class="label-text text-sm">Skip Repair</span>
</label> </label>
</div> </div>
<div class="form-control"> <div class="form-control">
<label class="label cursor-pointer justify-start gap-2"> <label class="label cursor-pointer justify-start gap-2">
<input type="checkbox" class="checkbox checkbox-sm" <input type="checkbox" class="checkbox checkbox-sm"
name="arr[${index}].download_uncached" id="arr[${index}].download_uncached"> name="arr[${index}].download_uncached" id="arr[${index}].download_uncached">
<span class="label-text text-sm">Download Uncached</span> <span class="label-text text-sm">Download Uncached</span>
</label> </label>
</div>
</div>
</div> </div>
</div> </div>
</div> </div>
@@ -1064,6 +1081,7 @@ class ConfigManager {
min_file_size: document.getElementById('minFileSize').value, min_file_size: document.getElementById('minFileSize').value,
max_file_size: document.getElementById('maxFileSize').value, max_file_size: document.getElementById('maxFileSize').value,
remove_stalled_after: document.getElementById('removeStalledAfter').value, remove_stalled_after: document.getElementById('removeStalledAfter').value,
callback_url: document.getElementById('callbackUrl').value,
// Debrid configurations // Debrid configurations
debrids: this.collectDebridConfigs(), debrids: this.collectDebridConfigs(),
@@ -1094,6 +1112,8 @@ class ConfigManager {
api_key: document.querySelector(`[name="debrid[${i}].api_key"]`).value, api_key: document.querySelector(`[name="debrid[${i}].api_key"]`).value,
folder: document.querySelector(`[name="debrid[${i}].folder"]`).value, folder: document.querySelector(`[name="debrid[${i}].folder"]`).value,
rate_limit: document.querySelector(`[name="debrid[${i}].rate_limit"]`).value, rate_limit: document.querySelector(`[name="debrid[${i}].rate_limit"]`).value,
minimum_free_slot: parseInt(document.querySelector(`[name="debrid[${i}].minimum_free_slot"]`).value) || 0,
rclone_mount_path: document.querySelector(`[name="debrid[${i}].rclone_mount_path"]`).value,
proxy: document.querySelector(`[name="debrid[${i}].proxy"]`).value, proxy: document.querySelector(`[name="debrid[${i}].proxy"]`).value,
download_uncached: document.querySelector(`[name="debrid[${i}].download_uncached"]`).checked, download_uncached: document.querySelector(`[name="debrid[${i}].download_uncached"]`).checked,
unpack_rar: document.querySelector(`[name="debrid[${i}].unpack_rar"]`).checked, unpack_rar: document.querySelector(`[name="debrid[${i}].unpack_rar"]`).checked,
@@ -1222,15 +1242,22 @@ class ConfigManager {
return { return {
enabled: getElementValue('enabled', false), enabled: getElementValue('enabled', false),
rc_port: getElementValue('rc_port', "5572"),
mount_path: getElementValue('mount_path'), mount_path: getElementValue('mount_path'),
buffer_size: getElementValue('buffer_size'), buffer_size: getElementValue('buffer_size'),
cache_dir: getElementValue('cache_dir'), cache_dir: getElementValue('cache_dir'),
transfers: getElementValue('transfers', 8),
vfs_cache_mode: getElementValue('vfs_cache_mode', 'off'), vfs_cache_mode: getElementValue('vfs_cache_mode', 'off'),
vfs_cache_max_age: getElementValue('vfs_cache_max_age', '1h'), vfs_cache_max_age: getElementValue('vfs_cache_max_age', '1h'),
vfs_cache_max_size: getElementValue('vfs_cache_max_size'), vfs_cache_max_size: getElementValue('vfs_cache_max_size'),
vfs_cache_poll_interval: getElementValue('vfs_cache_poll_interval', '1m'), vfs_cache_poll_interval: getElementValue('vfs_cache_poll_interval', '1m'),
vfs_read_chunk_size: getElementValue('vfs_read_chunk_size', '128M'), vfs_read_chunk_size: getElementValue('vfs_read_chunk_size', '128M'),
vfs_read_chunk_size_limit: getElementValue('vfs_read_chunk_size_limit', 'off'), vfs_read_chunk_size_limit: getElementValue('vfs_read_chunk_size_limit', 'off'),
vfs_cache_min_free_space: getElementValue('vfs_cache_min_free_space', ''),
vfs_fast_fingerprint: getElementValue('vfs_fast_fingerprint', false),
vfs_read_chunk_streams: getElementValue('vfs_read_chunk_streams', 0),
use_mmap: getElementValue('use_mmap', false),
async_read: getElementValue('async_read', true),
uid: getElementValue('uid', 0), uid: getElementValue('uid', 0),
gid: getElementValue('gid', 0), gid: getElementValue('gid', 0),
umask: getElementValue('umask', ''), umask: getElementValue('umask', ''),
@@ -1239,6 +1266,7 @@ class ConfigManager {
dir_cache_time: getElementValue('dir_cache_time', '5m'), dir_cache_time: getElementValue('dir_cache_time', '5m'),
no_modtime: getElementValue('no_modtime', false), no_modtime: getElementValue('no_modtime', false),
no_checksum: getElementValue('no_checksum', false), no_checksum: getElementValue('no_checksum', false),
log_level: getElementValue('log_level', 'INFO'),
}; };
} }

View File

@@ -3,9 +3,10 @@ package web
import ( import (
"encoding/json" "encoding/json"
"fmt" "fmt"
"github.com/sirrobot01/decypharr/internal/config"
"net/http" "net/http"
"strings" "strings"
"github.com/sirrobot01/decypharr/internal/config"
) )
func (wb *Web) setupMiddleware(next http.Handler) http.Handler { func (wb *Web) setupMiddleware(next http.Handler) http.Handler {
@@ -79,7 +80,7 @@ func (wb *Web) sendJSONError(w http.ResponseWriter, message string, statusCode i
w.Header().Set("Content-Type", "application/json") w.Header().Set("Content-Type", "application/json")
w.WriteHeader(statusCode) w.WriteHeader(statusCode)
json.NewEncoder(w).Encode(map[string]interface{}{ json.NewEncoder(w).Encode(map[string]interface{}{
"error": message, "error": message,
"status": statusCode, "status": statusCode,
}) })
} }

View File

@@ -120,7 +120,7 @@
</div> </div>
</div> </div>
<div class="grid grid-cols-1 lg:grid-cols-3 gap-4"> <div class="grid grid-cols-1 lg:grid-cols-2 gap-4">
<div class="form-control"> <div class="form-control">
<label class="label" for="minFileSize"> <label class="label" for="minFileSize">
<span class="label-text font-medium">Minimum File Size</span> <span class="label-text font-medium">Minimum File Size</span>
@@ -150,6 +150,15 @@
<span class="label-text-alt">Duration before removing stalled torrents</span> <span class="label-text-alt">Duration before removing stalled torrents</span>
</div> </div>
</div> </div>
<div class="form-control">
<label class="label" for="callbackUrl">
<span class="label-text font-medium">Callback URL</span>
</label>
<input type="text" class="input input-bordered" id="callbackUrl" name="callback_url" placeholder="http://example.com/callback">
<div class="label">
<span class="label-text-alt">Optional callback URL for download status updates</span>
</div>
</div>
</div> </div>
<!-- Authentication Settings Section --> <!-- Authentication Settings Section -->
@@ -364,71 +373,68 @@
</div> </div>
<div class="grid grid-cols-1 lg:grid-cols-2 gap-4"> <div class="grid grid-cols-1 lg:grid-cols-2 gap-4">
<div class="space-y-4"> <div class="form-control">
<div class="form-control"> <label class="label" for="repair.interval">
<label class="label" for="repair.interval"> <span class="label-text font-medium">Repair Interval</span>
<span class="label-text font-medium">Repair Interval</span> </label>
</label> <input type="text" class="input input-bordered" name="repair.interval" id="repair.interval" placeholder="24h">
<input type="text" class="input input-bordered" name="repair.interval" id="repair.interval" placeholder="24h"> <div class="label">
<div class="label"> <span class="label-text-alt">How often to run repair (e.g., 24h, 1d, 03:00, or crontab)</span>
<span class="label-text-alt">How often to run repair (e.g., 24h, 1d, 03:00, or crontab)</span>
</div>
</div>
<div class="form-control">
<label class="label" for="repair.workers">
<span class="label-text font-medium">Worker Threads</span>
</label>
<input type="number" class="input input-bordered" name="repair.workers" id="repair.workers" min="1" placeholder="40">
<div class="label">
<span class="label-text-alt">Number of concurrent repair workers</span>
</div>
</div>
<div class="form-control">
<label class="label" for="repair.strategy">
<span class="label-text font-medium">Repair Strategy</span>
</label>
<select class="select select-bordered" name="repair.strategy" id="repair.strategy">
<option value="per_torrent" selected>Per Torrent</option>
<option value="per_file">Per File</option>
</select>
<div class="label">
<span class="label-text-alt">How to handle repairs</span>
</div>
</div> </div>
</div> </div>
<div class="space-y-4"> <div class="form-control">
<div class="form-control"> <label class="label" for="repair.workers">
<label class="label" for="repair.zurg_url"> <span class="label-text font-medium">Worker Threads</span>
<span class="label-text font-medium">Zurg URL</span> </label>
</label> <input type="number" class="input input-bordered" name="repair.workers" id="repair.workers" min="1" placeholder="40">
<input type="url" class="input input-bordered" name="repair.zurg_url" id="repair.zurg_url" placeholder="http://zurg:9999"> <div class="label">
<div class="label"> <span class="label-text-alt">Number of concurrent repair workers</span>
<span class="label-text-alt">Optional Zurg instance to speed up repairs</span> </div>
</div>
<div class="form-control">
<label class="label" for="repair.strategy">
<span class="label-text font-medium">Repair Strategy</span>
</label>
<select class="select select-bordered" name="repair.strategy" id="repair.strategy">
<option value="per_torrent" selected>Per Torrent</option>
<option value="per_file">Per File</option>
</select>
<div class="label">
<span class="label-text-alt">How to handle repairs</span>
</div>
</div>
<div class="form-control">
<label class="label" for="repair.zurg_url">
<span class="label-text font-medium">Zurg URL</span>
</label>
<input type="url" class="input input-bordered" name="repair.zurg_url" id="repair.zurg_url" placeholder="http://zurg:9999">
<div class="label">
<span class="label-text-alt">Optional Zurg instance to speed up repairs</span>
</div>
</div>
</div>
<div class="grid grid-cols-2 lg:grid-cols-3 gap-4">
<div class="form-control">
<label class="label cursor-pointer justify-start gap-3">
<input type="checkbox" class="checkbox" name="repair.use_webdav" id="repair.use_webdav">
<div>
<span class="label-text font-medium">Use WebDAV</span>
<div class="label-text-alt">Use internal WebDAV for repairs</div>
</div> </div>
</div> </label>
</div>
<div class="form-control"> <div class="form-control">
<label class="label cursor-pointer justify-start gap-3"> <label class="label cursor-pointer justify-start gap-3">
<input type="checkbox" class="checkbox" name="repair.use_webdav" id="repair.use_webdav"> <input type="checkbox" class="checkbox" name="repair.auto_process" id="repair.auto_process">
<div> <div>
<span class="label-text font-medium">Use WebDAV</span> <span class="label-text font-medium">Auto Process</span>
<div class="label-text-alt">Use internal WebDAV for repairs</div> <div class="label-text-alt">Automatically delete broken symlinks and re-search</div>
</div> </div>
</label> </label>
</div>
<div class="form-control">
<label class="label cursor-pointer justify-start gap-3">
<input type="checkbox" class="checkbox" name="repair.auto_process" id="repair.auto_process">
<div>
<span class="label-text font-medium">Auto Process</span>
<div class="label-text-alt">Automatically delete broken symlinks and re-search</div>
</div>
</label>
</div>
</div> </div>
</div> </div>
</div> </div>
@@ -466,6 +472,25 @@
</div> </div>
</div> </div>
<div class="form-control">
<label class="label" for="rclone.rc_port">
<span class="label-text font-medium">RC Port</span>
</label>
<input type="text" class="input input-bordered" name="rclone.rc_port" id="rclone.rc_port">
</div>
<div class="form-control">
<label class="label" for="rclone.log_level">
<span class="label-text font-medium">Log Level</span>
</label>
<select class="select select-bordered" name="rclone.log_level" id="rclone.log_level">
<option value="INFO">INFO</option>
<option value="DEBUG">DEBUG</option>
<option value="NOTICE">NOTICE</option>
<option value="ERROR">ERROR</option>
</select>
</div>
<div class="form-control"> <div class="form-control">
<label class="label" for="rclone.uid"> <label class="label" for="rclone.uid">
<span class="label-text font-medium">User ID (PUID)</span> <span class="label-text font-medium">User ID (PUID)</span>
@@ -487,7 +512,7 @@
</div> </div>
<div class="form-control"> <div class="form-control">
<label class="label" for="rclone.umask"> <label class="label" for="rclone.umask">
<span class="label-text font-medium">Group ID (PGID)</span> <span class="label-text font-medium">UMASK</span>
</label> </label>
<input type="text" class="input input-bordered" name="rclone.umask" id="rclone.umask" placeholder="0022"> <input type="text" class="input input-bordered" name="rclone.umask" id="rclone.umask" placeholder="0022">
<div class="label"> <div class="label">
@@ -512,6 +537,15 @@
<span class="label-text-alt">How long the kernel caches the attributes (size, modification time, etc.)</span> <span class="label-text-alt">How long the kernel caches the attributes (size, modification time, etc.)</span>
</div> </div>
</div> </div>
<div class="form-control">
<label class="label" for="rclone.transfers">
<span class="label-text font-medium">Transfers</span>
</label>
<input type="number" class="input input-bordered" name="rclone.transfers" id="rclone.transfers" placeholder="8" min="1">
<div class="label">
<span class="label-text-alt">Number of file transfers to run in parallel</span>
</div>
</div>
</div> </div>
</div> </div>
</div> </div>
@@ -615,6 +649,36 @@
<span class="label-text-alt">How often VFS cache dir gets cleaned</span> <span class="label-text-alt">How often VFS cache dir gets cleaned</span>
</div> </div>
</div> </div>
<div class="form-control">
<label class="label" for="rclone.vfs_cache_min_free_space">
<span class="label-text font-medium">VFS Cache Min Free Space</span>
</label>
<input type="text" class="input input-bordered" name="rclone.vfs_cache_min_free_space" id="rclone.vfs_cache_min_free_space" placeholder="1G">
<div class="label">
<span class="label-text-alt">Target minimum free space on the disk containing the cache</span>
</div>
</div>
<div class="form-control">
<label class="label" for="rclone.vfs_disk_space_total">
<span class="label-text font-medium">VFS Disk Space Total</span>
</label>
<input type="text" class="input input-bordered" name="rclone.vfs_disk_space_total" id="rclone.vfs_disk_space_total" placeholder="1G">
<div class="label">
<span class="label-text-alt">Specify the total space of disk</span>
</div>
</div>
<div class="form-control">
<label class="label" for="rclone.vfs_read_chunk_streams">
<span class="label-text font-medium">VFS Read Chunk Streams</span>
</label>
<input type="number" class="input input-bordered" name="rclone.vfs_read_chunk_streams" id="rclone.vfs_read_chunk_streams" placeholder="4" min="0">
<div class="label">
<span class="label-text-alt">The number of parallel streams to read at once</span>
</div>
</div>
</div> </div>
</div> </div>
</div> </div>
@@ -624,7 +688,7 @@
<h3 class="text-lg font-semibold mb-4 flex items-center"> <h3 class="text-lg font-semibold mb-4 flex items-center">
<i class="bi bi-gear mr-2"></i>Advanced Settings <i class="bi bi-gear mr-2"></i>Advanced Settings
</h3> </h3>
<div class="grid grid-cols-1 lg:grid-cols-2 gap-4"> <div class="grid grid-cols-2 lg:grid-cols-3 gap-4">
<div class="form-control"> <div class="form-control">
<label class="label cursor-pointer justify-start gap-3"> <label class="label cursor-pointer justify-start gap-3">
<input type="checkbox" class="checkbox" name="rclone.no_modtime" id="rclone.no_modtime"> <input type="checkbox" class="checkbox" name="rclone.no_modtime" id="rclone.no_modtime">
@@ -644,6 +708,36 @@
</div> </div>
</label> </label>
</div> </div>
<div class="form-control">
<label class="label cursor-pointer justify-start gap-3" for="rclone.async_read">
<input type="checkbox" class="checkbox" name="rclone.async_read" id="rclone.async_read">
<div>
<span class="label-text font-medium">Async Read</span>
<div class="label-text-alt">Use asynchronous reads</div>
</div>
</label>
</div>
<div class="form-control">
<label class="label cursor-pointer justify-start gap-3" for="rclone.vfs_fast_fingerprint">
<input type="checkbox" class="checkbox" name="rclone.vfs_fast_fingerprint" id="rclone.vfs_fast_fingerprint">
<div>
<span class="label-text font-medium">VFS Fast Fingerprint</span>
<div class="label-text-alt">Use fast (less accurate) fingerprints for change detection</div>
</div>
</label>
</div>
<div class="form-control">
<label class="label cursor-pointer justify-start gap-3" for="rclone.use_mmap">
<input type="checkbox" class="checkbox" name="rclone.use_mmap" id="rclone.use_mmap">
<div>
<span class="label-text font-medium">Use Mmap</span>
<div class="label-text-alt">Use fast (less accurate) fingerprints for change detection</div>
</div>
</label>
</div>
</div> </div>
</div> </div>
</div> </div>

View File

@@ -60,9 +60,6 @@
<li><a href="{{.URLBase}}webdav" target="_blank"> <li><a href="{{.URLBase}}webdav" target="_blank">
<i class="bi bi-cloud text-success"></i>WebDAV <i class="bi bi-cloud text-success"></i>WebDAV
</a></li> </a></li>
<li><a href="{{.URLBase}}stats" class="{{if eq .Page "stats"}}active{{end}}">
<i class="bi bi-graph-up text-info"></i>Stats
</a></li>
<li><a href="{{.URLBase}}logs" target="_blank"> <li><a href="{{.URLBase}}logs" target="_blank">
<i class="bi bi-journal-text text-warning"></i>Logs <i class="bi bi-journal-text text-warning"></i>Logs
</a></li> </a></li>
@@ -96,7 +93,7 @@
<i class="bi bi-cloud"></i> <i class="bi bi-cloud"></i>
<span class="hidden xl:inline">WebDAV</span> <span class="hidden xl:inline">WebDAV</span>
</a></li> </a></li>
<li><a href="{{.URLBase}}logs" target="_blank" class="tooltip tooltip-bottom" data-tip="System Logs"> <li><a href="{{.URLBase}}debug/logs" target="_blank" class="tooltip tooltip-bottom" data-tip="System Logs">
<i class="bi bi-journal-text"></i> <i class="bi bi-journal-text"></i>
<span class="hidden xl:inline">Logs</span> <span class="hidden xl:inline">Logs</span>
</a></li> </a></li>

View File

@@ -72,10 +72,16 @@
<div class="card bg-base-100 shadow-xl" id="rclone-card"> <div class="card bg-base-100 shadow-xl" id="rclone-card">
<div class="card-header p-6 pb-3"> <div class="card-header p-6 pb-3">
<h2 class="card-title text-xl"> <div class="card-title text-xl justify-between items-center">
<i class="bi bi-cloud-arrow-up text-primary"></i> <h2>
Rclone Statistics <i class="bi bi-cloud-arrow-up text-primary"></i>
</h2> Rclone Statistics
</h2>
<a href="{{.URLBase}}debug/logs/rclone" class="btn btn-sm btn-outline" target="_blank">
<i class="bi bi-arrow-right"></i>
View Rclone Logs
</a>
</div>
<div class="badge" id="rclone-status">Unknown</div> <div class="badge" id="rclone-status">Unknown</div>
</div> </div>
<div class="card-body p-6 pt-3" id="rclone-content"> <div class="card-body p-6 pt-3" id="rclone-content">
@@ -179,9 +185,9 @@
<div class="stat-desc">Total: ${cs.totalChecks || 0}</div> <div class="stat-desc">Total: ${cs.totalChecks || 0}</div>
</div> </div>
<div class="stat"> <div class="stat">
<div class="stat-title">Elapsed Time</div> <div class="stat-title">Uptime</div>
<div class="stat-value text-accent">${((cs.elapsedTime || 0) / 60).toFixed(1)}m</div> <div class="stat-value text-accent">${window.decypharrUtils.formatDuration(cs.elapsedTime)}</div>
<div class="stat-desc">Transfer: ${((cs.transferTime || 0) / 60).toFixed(1)}m</div> <div class="stat-desc">Transfer: ${window.decypharrUtils.formatDuration(cs.transferTime)}</div>
</div> </div>
`; `;
} }
@@ -225,7 +231,7 @@
</div> </div>
<div class="flex justify-between text-xs text-base-content/60 mt-1"> <div class="flex justify-between text-xs text-base-content/60 mt-1">
<span>${window.decypharrUtils.formatBytes(transfer.bytes || 0)} / ${window.decypharrUtils.formatBytes(transfer.size || 0)}</span> <span>${window.decypharrUtils.formatBytes(transfer.bytes || 0)} / ${window.decypharrUtils.formatBytes(transfer.size || 0)}</span>
<span>ETA: ${transfer.eta ? Math.ceil(transfer.eta / 60) + 'm' : 'Unknown'}</span> <span>ETA: ${transfer.eta ? window.decypharrUtils.formatDuration(transfer.eta) : 'Unknown'}</span>
</div> </div>
</div> </div>
</div> </div>
@@ -312,37 +318,100 @@
let html = '<div class="space-y-4">'; let html = '<div class="space-y-4">';
debrids.forEach(debrid => { debrids.forEach(debrid => {
const profile = debrid.profile || {};
const library = debrid.library || {};
const accounts = debrid.accounts || [];
html += ` html += `
<div class="card bg-base-200"> <div class="card bg-base-200">
<div class="card-body p-4"> <div class="card-body p-4">
<div class="flex justify-between items-start"> <div class="flex justify-between items-start">
<div> <div>
<h3 class="card-title text-lg">${debrid.name || 'Unknown Service'}</h3> <h3 class="card-title text-lg">${profile.name || 'Unknown Service'}</h3>
<p class="text-sm text-base-content/70">${debrid.username || 'No username'}</p> <p class="text-sm text-base-content/70">${profile.username || 'No username'}</p>
</div> </div>
<div class="text-right"> <div class="text-right">
<div class="text-sm font-mono">${debrid.points} points</div> <div class="text-sm font-mono">${formatNumber(profile.points || 0)} points</div>
<div class="text-xs text-base-content/70">Expires: ${debrid.expiration || 'Unknown'}</div> <div class="text-xs text-base-content/70">Type: ${profile.type || 'Unknown'}</div>
<div class="text-xs text-base-content/70">Expires: ${profile.expiration ? new Date(profile.expiration).toLocaleDateString() : 'Unknown'}</div>
</div> </div>
</div> </div>
<div class="grid grid-cols-2 md:grid-cols-4 gap-3 mt-4"> <div class="grid grid-cols-2 md:grid-cols-4 gap-3 mt-4">
<div class="stat"> <div class="stat">
<div class="stat-title text-xs">Library Size</div> <div class="stat-title text-xs">Library Size</div>
<div class="stat-value text-sm">${formatNumber(debrid.library_size || 0)}</div> <div class="stat-value text-sm">${formatNumber(library.total || 0)}</div>
</div> </div>
<div class="stat"> <div class="stat">
<div class="stat-title text-xs">Bad Torrents</div> <div class="stat-title text-xs">Bad Torrents</div>
<div class="stat-value text-sm text-error">${formatNumber(debrid.bad_torrents || 0)}</div> <div class="stat-value text-sm text-error">${formatNumber(library.bad || 0)}</div>
</div> </div>
<div class="stat"> <div class="stat">
<div class="stat-title text-xs">Active Links</div> <div class="stat-title text-xs">Active Links</div>
<div class="stat-value text-sm text-success">${formatNumber(debrid.active_links || 0)}</div> <div class="stat-value text-sm text-success">${formatNumber(library.active_links || 0)}</div>
</div> </div>
<div class="stat"> <div class="stat">
<div class="stat-title text-xs">Type</div> <div class="stat-title text-xs">Total Accounts</div>
<div class="stat-value text-sm">${debrid.type || 'Unknown'}</div> <div class="stat-value text-sm text-info">${accounts.length}</div>
</div> </div>
</div> </div>
`;
// Add accounts section if there are accounts
if (accounts && accounts.length > 0) {
html += `
<div class="mt-6">
<h4 class="text-lg font-semibold mb-3">
<i class="bi bi-person-lines-fill text-primary"></i>
Accounts
</h4>
<div class="grid grid-cols-2 md:grid-cols-2 gap-2">
`;
accounts.forEach((account, index) => {
const statusBadge = account.disabled ?
'<span class="badge badge-error badge-sm">Disabled</span>' :
'<span class="badge badge-success badge-sm">Active</span>';
const inUseBadge = account.in_use ?
'<span class="badge badge-info badge-sm">In Use</span>' :
'';
html += `
<div class="card bg-base-100 compact">
<div class="card-body p-3">
<div class="flex justify-between items-start mb-2">
<div class="flex-1">
<div class="flex items-center gap-2">
<h5 class="font-medium text-sm">Account #${account.order + 1}</h5>
${statusBadge}
${inUseBadge}
</div>
<p class="text-xs text-base-content/70 mt-1">${account.username || 'No username'}</p>
</div>
<div class="text-right">
<div class="text-xs font-mono text-base-content/80">
Token: ${account.token_masked || '****'}
</div>
</div>
</div>
<div class="grid grid-cols-2 md:grid-cols-2 gap-2">
<div class="stat bg-base-200 rounded p-2">
<div class="stat-title text-xs">Traffic Used</div>
<div class="stat-value text-xs">${window.decypharrUtils.formatBytes(account.traffic_used || 0)}</div>
</div>
<div class="stat bg-base-200 rounded p-2">
<div class="stat-title text-xs">Links Count</div>
<div class="stat-value text-xs">${formatNumber(account.links_count || 0)}</div>
</div>
</div>
</div>
</div>
`;
});
html += '</div></div>';
}
html += `
</div> </div>
</div> </div>
`; `;

View File

@@ -2,9 +2,10 @@ package web
import ( import (
"encoding/json" "encoding/json"
"net/http"
"github.com/sirrobot01/decypharr/internal/config" "github.com/sirrobot01/decypharr/internal/config"
"golang.org/x/crypto/bcrypt" "golang.org/x/crypto/bcrypt"
"net/http"
) )
func (wb *Web) LoginHandler(w http.ResponseWriter, r *http.Request) { func (wb *Web) LoginHandler(w http.ResponseWriter, r *http.Request) {

View File

@@ -1,14 +1,14 @@
package web package web
import ( import (
"cmp"
"embed" "embed"
"html/template"
"github.com/gorilla/sessions" "github.com/gorilla/sessions"
"github.com/rs/zerolog" "github.com/rs/zerolog"
"github.com/sirrobot01/decypharr/internal/config"
"github.com/sirrobot01/decypharr/internal/logger" "github.com/sirrobot01/decypharr/internal/logger"
"github.com/sirrobot01/decypharr/pkg/store" "github.com/sirrobot01/decypharr/pkg/wire"
"html/template"
"os"
) )
var restartFunc func() var restartFunc func()
@@ -60,10 +60,11 @@ type Web struct {
logger zerolog.Logger logger zerolog.Logger
cookie *sessions.CookieStore cookie *sessions.CookieStore
templates *template.Template templates *template.Template
torrents *store.TorrentStorage torrents *wire.TorrentStorage
} }
func New() *Web { func New() *Web {
cfg := config.Get()
templates := template.Must(template.ParseFS( templates := template.Must(template.ParseFS(
content, content,
"templates/layout.html", "templates/layout.html",
@@ -75,8 +76,7 @@ func New() *Web {
"templates/login.html", "templates/login.html",
"templates/register.html", "templates/register.html",
)) ))
secretKey := cmp.Or(os.Getenv("DECYPHARR_SECRET_KEY"), "\"wqj(v%lj*!-+kf@4&i95rhh_!5_px5qnuwqbr%cjrvrozz_r*(\"") cookieStore := sessions.NewCookieStore([]byte(cfg.SecretKey()))
cookieStore := sessions.NewCookieStore([]byte(secretKey))
cookieStore.Options = &sessions.Options{ cookieStore.Options = &sessions.Options{
Path: "/", Path: "/",
MaxAge: 86400 * 7, MaxAge: 86400 * 7,
@@ -86,6 +86,6 @@ func New() *Web {
logger: logger.New("ui"), logger: logger.New("ui"),
templates: templates, templates: templates,
cookie: cookieStore, cookie: cookieStore,
torrents: store.Get().Torrents(), torrents: wire.Get().Torrents(),
} }
} }

View File

@@ -1,7 +1,6 @@
package webdav package webdav
import ( import (
"crypto/tls"
"fmt" "fmt"
"io" "io"
"net/http" "net/http"
@@ -9,28 +8,23 @@ import (
"strings" "strings"
"time" "time"
"github.com/sirrobot01/decypharr/internal/utils"
"github.com/sirrobot01/decypharr/pkg/debrid/store" "github.com/sirrobot01/decypharr/pkg/debrid/store"
"github.com/sirrobot01/decypharr/pkg/debrid/types"
) )
var streamingTransport = &http.Transport{ type retryAction int
TLSClientConfig: &tls.Config{InsecureSkipVerify: true},
MaxIdleConns: 200,
MaxIdleConnsPerHost: 100,
MaxConnsPerHost: 200,
IdleConnTimeout: 90 * time.Second,
TLSHandshakeTimeout: 10 * time.Second,
ResponseHeaderTimeout: 60 * time.Second, // give the upstream a minute to send headers
ExpectContinueTimeout: 1 * time.Second,
DisableKeepAlives: true, // close after each request
ForceAttemptHTTP2: false, // dont speak HTTP/2
// this line is what truly blocks HTTP/2:
TLSNextProto: make(map[string]func(string, *tls.Conn) http.RoundTripper),
}
var sharedClient = &http.Client{ const (
Transport: streamingTransport, noRetry retryAction = iota
Timeout: 0, retryWithLimit
} retryAlways
)
const (
MaxNetworkRetries = 3
MaxLinkRetries = 10
)
type streamError struct { type streamError struct {
Err error Err error
@@ -50,7 +44,7 @@ type File struct {
name string name string
torrentName string torrentName string
link string link string
downloadLink string downloadLink types.DownloadLink
size int64 size int64
isDir bool isDir bool
fileId string fileId string
@@ -76,26 +70,27 @@ func (f *File) Close() error {
// This is just to satisfy the os.File interface // This is just to satisfy the os.File interface
f.content = nil f.content = nil
f.children = nil f.children = nil
f.downloadLink = "" f.downloadLink = types.DownloadLink{}
f.readOffset = 0 f.readOffset = 0
return nil return nil
} }
func (f *File) getDownloadLink() (string, error) { func (f *File) getDownloadLink() (types.DownloadLink, error) {
// Check if we already have a final URL cached // Check if we already have a final URL cached
if f.downloadLink != "" && isValidURL(f.downloadLink) { if f.downloadLink.Valid() == nil {
return f.downloadLink, nil return f.downloadLink, nil
} }
downloadLink, err := f.cache.GetDownloadLink(f.torrentName, f.name, f.link) downloadLink, err := f.cache.GetDownloadLink(f.torrentName, f.name, f.link)
if err != nil { if err != nil {
return "", err return downloadLink, err
} }
if downloadLink != "" && isValidURL(downloadLink) { err = downloadLink.Valid()
f.downloadLink = downloadLink if err != nil {
return downloadLink, nil return types.DownloadLink{}, err
} }
return "", os.ErrNotExist f.downloadLink = downloadLink
return downloadLink, nil
} }
func (f *File) getDownloadByteRange() (*[2]int64, error) { func (f *File) getDownloadByteRange() (*[2]int64, error) {
@@ -106,7 +101,6 @@ func (f *File) getDownloadByteRange() (*[2]int64, error) {
return byteRange, nil return byteRange, nil
} }
// setVideoStreamingHeaders sets the necessary headers for video streaming
// It returns error and a boolean indicating if the request is a range request // It returns error and a boolean indicating if the request is a range request
func (f *File) servePreloadedContent(w http.ResponseWriter, r *http.Request) error { func (f *File) servePreloadedContent(w http.ResponseWriter, r *http.Request) error {
content := f.content content := f.content
@@ -140,70 +134,91 @@ func (f *File) servePreloadedContent(w http.ResponseWriter, r *http.Request) err
} }
func (f *File) StreamResponse(w http.ResponseWriter, r *http.Request) error { func (f *File) StreamResponse(w http.ResponseWriter, r *http.Request) error {
// Handle preloaded content files
if f.content != nil { if f.content != nil {
return f.servePreloadedContent(w, r) return f.servePreloadedContent(w, r)
} }
// Try streaming with retry logic return f.streamWithRetry(w, r, 0, 0)
return f.streamWithRetry(w, r, 0)
} }
func (f *File) streamWithRetry(w http.ResponseWriter, r *http.Request, retryCount int) error { func (f *File) streamWithRetry(w http.ResponseWriter, r *http.Request, networkRetries, recoverableRetries int) error {
const maxRetries = 3
_log := f.cache.Logger() _log := f.cache.Logger()
// Get download link (with caching optimization)
downloadLink, err := f.getDownloadLink() downloadLink, err := f.getDownloadLink()
if err != nil { if err != nil {
return &streamError{Err: err, StatusCode: http.StatusPreconditionFailed} return &streamError{Err: err, StatusCode: http.StatusPreconditionFailed}
} }
if downloadLink == "" { upstreamReq, err := http.NewRequest("GET", downloadLink.DownloadLink, nil)
return &streamError{Err: fmt.Errorf("empty download link"), StatusCode: http.StatusNotFound}
}
// Create upstream request with streaming optimizations
upstreamReq, err := http.NewRequest("GET", downloadLink, nil)
if err != nil { if err != nil {
return &streamError{Err: err, StatusCode: http.StatusInternalServerError} return &streamError{Err: err, StatusCode: http.StatusInternalServerError}
} }
setVideoStreamingHeaders(upstreamReq)
// Handle range requests (critical for video seeking)
isRangeRequest := f.handleRangeRequest(upstreamReq, r, w) isRangeRequest := f.handleRangeRequest(upstreamReq, r, w)
if isRangeRequest == -1 { if isRangeRequest == -1 {
return &streamError{Err: fmt.Errorf("invalid range"), StatusCode: http.StatusRequestedRangeNotSatisfiable} return &streamError{Err: fmt.Errorf("invalid range"), StatusCode: http.StatusRequestedRangeNotSatisfiable}
} }
resp, err := sharedClient.Do(upstreamReq) resp, err := f.cache.Download(upstreamReq)
if err != nil { if err != nil {
// Network error - retry with limit
if networkRetries < MaxNetworkRetries {
_log.Debug().
Int("network_retries", networkRetries+1).
Err(err).
Msg("Network error, retrying")
return f.streamWithRetry(w, r, networkRetries+1, recoverableRetries)
}
return &streamError{Err: err, StatusCode: http.StatusServiceUnavailable} return &streamError{Err: err, StatusCode: http.StatusServiceUnavailable}
} }
defer resp.Body.Close() defer resp.Body.Close()
// Handle upstream errors with retry logic if resp.StatusCode != http.StatusOK && resp.StatusCode != http.StatusPartialContent {
shouldRetry, retryErr := f.handleUpstream(resp, retryCount, maxRetries) retryType, retryErr := f.handleUpstreamError(downloadLink, resp)
if shouldRetry && retryCount < maxRetries {
// Retry with new download link switch retryType {
_log.Debug(). case retryAlways:
Int("retry_count", retryCount+1). if recoverableRetries >= MaxLinkRetries {
Str("file", f.name). return &streamError{
Msg("Retrying stream request") Err: fmt.Errorf("max link retries exceeded (%d)", MaxLinkRetries),
return f.streamWithRetry(w, r, retryCount+1) StatusCode: http.StatusServiceUnavailable,
} }
if retryErr != nil { }
return retryErr
_log.Debug().
Int("recoverable_retries", recoverableRetries+1).
Str("file", f.name).
Msg("Recoverable error, retrying")
return f.streamWithRetry(w, r, 0, recoverableRetries+1) // Reset network retries
case retryWithLimit:
if networkRetries < MaxNetworkRetries {
_log.Debug().
Int("network_retries", networkRetries+1).
Str("file", f.name).
Msg("Network error, retrying")
return f.streamWithRetry(w, r, networkRetries+1, recoverableRetries)
}
fallthrough
case noRetry:
if retryErr != nil {
return retryErr
}
return &streamError{
Err: fmt.Errorf("non-retryable error: status %d", resp.StatusCode),
StatusCode: http.StatusBadGateway,
}
}
} }
// Determine status code based on range request // Success - stream the response
statusCode := http.StatusOK statusCode := http.StatusOK
if isRangeRequest == 1 { if isRangeRequest == 1 {
statusCode = http.StatusPartialContent statusCode = http.StatusPartialContent
} }
// Set headers before streaming
if contentLength := resp.Header.Get("Content-Length"); contentLength != "" { if contentLength := resp.Header.Get("Content-Length"); contentLength != "" {
w.Header().Set("Content-Length", contentLength) w.Header().Set("Content-Length", contentLength)
} }
@@ -212,10 +227,71 @@ func (f *File) streamWithRetry(w http.ResponseWriter, r *http.Request, retryCoun
w.Header().Set("Content-Range", contentRange) w.Header().Set("Content-Range", contentRange)
} }
if err := f.streamBuffer(w, resp.Body, statusCode); err != nil { return f.streamBuffer(w, resp.Body, statusCode)
return err }
func (f *File) handleUpstreamError(downloadLink types.DownloadLink, resp *http.Response) (retryAction, error) {
_log := f.cache.Logger()
cleanupResp := func(resp *http.Response) {
if resp.Body != nil {
_, _ = io.Copy(io.Discard, resp.Body)
resp.Body.Close()
}
}
switch resp.StatusCode {
case http.StatusServiceUnavailable:
body, readErr := io.ReadAll(resp.Body)
cleanupResp(resp)
if readErr != nil {
_log.Error().Err(readErr).Msg("Failed to read response body")
return retryWithLimit, nil
}
bodyStr := string(body)
if strings.Contains(bodyStr, "you have exceeded your traffic") {
_log.Debug().
Str("token", utils.Mask(downloadLink.Token)).
Str("file", f.name).
Msg("Bandwidth exceeded for account, invalidating link")
f.cache.MarkDownloadLinkAsInvalid(f.downloadLink, "bandwidth_exceeded")
f.downloadLink = types.DownloadLink{}
return retryAlways, nil
}
return noRetry, &streamError{
Err: fmt.Errorf("service unavailable: %s", bodyStr),
StatusCode: http.StatusServiceUnavailable,
}
case http.StatusNotFound:
cleanupResp(resp)
_log.Debug().
Str("file", f.name).
Msg("Link not found, invalidating and regenerating")
f.cache.MarkDownloadLinkAsInvalid(f.downloadLink, "link_not_found")
f.downloadLink = types.DownloadLink{}
return retryAlways, nil
default:
body, _ := io.ReadAll(resp.Body)
cleanupResp(resp)
_log.Error().
Int("status_code", resp.StatusCode).
Str("file", f.name).
Str("response_body", string(body)).
Msg("Unexpected upstream error")
return retryWithLimit, &streamError{
Err: fmt.Errorf("upstream error %d: %s", resp.StatusCode, string(body)),
StatusCode: http.StatusBadGateway,
}
} }
return nil
} }
func (f *File) streamBuffer(w http.ResponseWriter, src io.Reader, statusCode int) error { func (f *File) streamBuffer(w http.ResponseWriter, src io.Reader, statusCode int) error {
@@ -228,7 +304,7 @@ func (f *File) streamBuffer(w http.ResponseWriter, src io.Reader, statusCode int
if n, err := src.Read(smallBuf); n > 0 { if n, err := src.Read(smallBuf); n > 0 {
// Write status code just before first successful write // Write status code just before first successful write
w.WriteHeader(statusCode) w.WriteHeader(statusCode)
if _, werr := w.Write(smallBuf[:n]); werr != nil { if _, werr := w.Write(smallBuf[:n]); werr != nil {
if isClientDisconnection(werr) { if isClientDisconnection(werr) {
return &streamError{Err: werr, StatusCode: 0, IsClientDisconnection: true} return &streamError{Err: werr, StatusCode: 0, IsClientDisconnection: true}
@@ -266,99 +342,6 @@ func (f *File) streamBuffer(w http.ResponseWriter, src io.Reader, statusCode int
} }
} }
func (f *File) handleUpstream(resp *http.Response, retryCount, maxRetries int) (shouldRetry bool, err error) {
if resp.StatusCode == http.StatusOK || resp.StatusCode == http.StatusPartialContent {
return false, nil
}
_log := f.cache.Logger()
// Clean up response body properly
cleanupResp := func(resp *http.Response) {
if resp.Body != nil {
_, _ = io.Copy(io.Discard, resp.Body)
resp.Body.Close()
}
}
switch resp.StatusCode {
case http.StatusServiceUnavailable:
// Read the body to check for specific error messages
body, readErr := io.ReadAll(resp.Body)
cleanupResp(resp)
if readErr != nil {
_log.Error().Err(readErr).Msg("Failed to read response body")
return false, &streamError{
Err: fmt.Errorf("failed to read error response: %w", readErr),
StatusCode: http.StatusServiceUnavailable,
}
}
bodyStr := string(body)
if strings.Contains(bodyStr, "you have exceeded your traffic") {
_log.Debug().
Str("file", f.name).
Int("retry_count", retryCount).
Msg("Bandwidth exceeded. Marking link as invalid")
f.cache.MarkDownloadLinkAsInvalid(f.link, f.downloadLink, "bandwidth_exceeded")
// Retry with a different API key if available and we haven't exceeded retries
if retryCount < maxRetries {
return true, nil
}
return false, &streamError{
Err: fmt.Errorf("bandwidth exceeded after %d retries", retryCount),
StatusCode: http.StatusServiceUnavailable,
}
}
return false, &streamError{
Err: fmt.Errorf("service unavailable: %s", bodyStr),
StatusCode: http.StatusServiceUnavailable,
}
case http.StatusNotFound:
cleanupResp(resp)
_log.Debug().
Str("file", f.name).
Int("retry_count", retryCount).
Msg("Link not found (404). Marking link as invalid and regenerating")
f.cache.MarkDownloadLinkAsInvalid(f.link, f.downloadLink, "link_not_found")
// Try to regenerate download link if we haven't exceeded retries
if retryCount < maxRetries {
// Clear cached link to force regeneration
f.downloadLink = ""
return true, nil
}
return false, &streamError{
Err: fmt.Errorf("file not found after %d retries", retryCount),
StatusCode: http.StatusNotFound,
}
default:
body, _ := io.ReadAll(resp.Body)
cleanupResp(resp)
_log.Error().
Int("status_code", resp.StatusCode).
Str("file", f.name).
Str("response_body", string(body)).
Msg("Unexpected upstream error")
return false, &streamError{
Err: fmt.Errorf("upstream error %d: %s", resp.StatusCode, string(body)),
StatusCode: http.StatusBadGateway,
}
}
}
func (f *File) handleRangeRequest(upstreamReq *http.Request, r *http.Request, w http.ResponseWriter) int { func (f *File) handleRangeRequest(upstreamReq *http.Request, r *http.Request, w http.ResponseWriter) int {
rangeHeader := r.Header.Get("Range") rangeHeader := r.Header.Get("Range")
if rangeHeader == "" { if rangeHeader == "" {

View File

@@ -4,8 +4,6 @@ import (
"context" "context"
"errors" "errors"
"fmt" "fmt"
"github.com/sirrobot01/decypharr/pkg/debrid/types"
"golang.org/x/net/webdav"
"mime" "mime"
"net/http" "net/http"
"os" "os"
@@ -15,6 +13,9 @@ import (
"strings" "strings"
"time" "time"
"github.com/sirrobot01/decypharr/pkg/debrid/types"
"golang.org/x/net/webdav"
"github.com/rs/zerolog" "github.com/rs/zerolog"
"github.com/sirrobot01/decypharr/internal/utils" "github.com/sirrobot01/decypharr/internal/utils"
"github.com/sirrobot01/decypharr/pkg/debrid/store" "github.com/sirrobot01/decypharr/pkg/debrid/store"
@@ -451,15 +452,15 @@ func (h *Handler) handleGet(w http.ResponseWriter, r *http.Request) {
// Handle nginx proxy (X-Accel-Redirect) // Handle nginx proxy (X-Accel-Redirect)
if file.content == nil && !file.isRar && h.cache.StreamWithRclone() { if file.content == nil && !file.isRar && h.cache.StreamWithRclone() {
link, err := file.getDownloadLink() link, err := file.getDownloadLink()
if err != nil || link == "" { if err != nil || link.Empty() {
http.Error(w, "Could not fetch download link", http.StatusPreconditionFailed) http.Error(w, "Could not fetch download link", http.StatusPreconditionFailed)
return return
} }
w.Header().Set("Content-Disposition", fmt.Sprintf("attachment; filename=\"%s\"", fi.Name())) w.Header().Set("Content-Disposition", fmt.Sprintf("attachment; filename=\"%s\"", fi.Name()))
w.Header().Set("X-Accel-Redirect", link) w.Header().Set("X-Accel-Redirect", link.DownloadLink)
w.Header().Set("X-Accel-Buffering", "no") w.Header().Set("X-Accel-Buffering", "no")
http.Redirect(w, r, link, http.StatusFound) http.Redirect(w, r, link.DownloadLink, http.StatusFound)
return return
} }

View File

@@ -2,21 +2,15 @@ package webdav
import ( import (
"fmt" "fmt"
"github.com/stanNthe5/stringbuf"
"net/http" "net/http"
"net/url"
"os" "os"
"path" "path"
"strconv" "strconv"
"strings" "strings"
"time" "time"
)
func isValidURL(str string) bool { "github.com/stanNthe5/stringbuf"
u, err := url.Parse(str) )
// A valid URL should parse without error, and have a non-empty scheme and host.
return err == nil && u.Scheme != "" && u.Host != ""
}
var pctHex = "0123456789ABCDEF" var pctHex = "0123456789ABCDEF"
@@ -218,12 +212,3 @@ func parseRange(s string, size int64) ([]httpRange, error) {
} }
return ranges, nil return ranges, nil
} }
func setVideoStreamingHeaders(req *http.Request) {
// Request optimizations for faster response
req.Header.Set("Accept", "*/*")
req.Header.Set("Accept-Encoding", "identity")
req.Header.Set("Connection", "keep-alive")
req.Header.Set("User-Agent", "VideoStream/1.0")
req.Header.Set("Priority", "u=1")
}

View File

@@ -7,7 +7,7 @@ import (
"github.com/go-chi/chi/v5" "github.com/go-chi/chi/v5"
"github.com/go-chi/chi/v5/middleware" "github.com/go-chi/chi/v5/middleware"
"github.com/sirrobot01/decypharr/internal/config" "github.com/sirrobot01/decypharr/internal/config"
"github.com/sirrobot01/decypharr/pkg/store" "github.com/sirrobot01/decypharr/pkg/wire"
"html/template" "html/template"
"net/http" "net/http"
"net/url" "net/url"
@@ -97,7 +97,7 @@ func New() *WebDav {
Handlers: make([]*Handler, 0), Handlers: make([]*Handler, 0),
URLBase: urlBase, URLBase: urlBase,
} }
for name, c := range store.Get().Debrid().Caches() { for name, c := range wire.Get().Debrid().Caches() {
h := NewHandler(name, urlBase, c, c.Logger()) h := NewHandler(name, urlBase, c, c.Logger())
w.Handlers = append(w.Handlers, h) w.Handlers = append(w.Handlers, h)
} }

625
pkg/wire/downloader.go Normal file
View File

@@ -0,0 +1,625 @@
package wire
import (
"crypto/md5"
"fmt"
"net/http"
"os"
"path/filepath"
"regexp"
"strconv"
"strings"
"sync"
"time"
"github.com/google/uuid"
"github.com/sirrobot01/decypharr/pkg/debrid/types"
"github.com/cavaliergopher/grab/v3"
"github.com/sirrobot01/decypharr/internal/utils"
)
// Multi-season detection patterns
var (
// Pre-compiled patterns for multi-season replacement
multiSeasonReplacements = []multiSeasonPattern{
// S01-08 -> S01 (or whatever target season)
{regexp.MustCompile(`(?i)S(\d{1,2})-\d{1,2}`), "S%02d"},
// S01-S08 -> S01
{regexp.MustCompile(`(?i)S(\d{1,2})-S\d{1,2}`), "S%02d"},
// Season 1-8 -> Season 1
{regexp.MustCompile(`(?i)Season\.?\s*\d{1,2}-\d{1,2}`), "Season %02d"},
// Seasons 1-8 -> Season 1
{regexp.MustCompile(`(?i)Seasons\.?\s*\d{1,2}-\d{1,2}`), "Season %02d"},
// Complete Series -> Season X
{regexp.MustCompile(`(?i)Complete\.?Series`), "Season %02d"},
// All Seasons -> Season X
{regexp.MustCompile(`(?i)All\.?Seasons?`), "Season %02d"},
}
// Also pre-compile other patterns
seasonPattern = regexp.MustCompile(`(?i)(?:season\.?\s*|s)(\d{1,2})`)
qualityIndicators = regexp.MustCompile(`(?i)\b(2160p|1080p|720p|BluRay|WEB-DL|HDTV|x264|x265|HEVC)`)
multiSeasonIndicators = []*regexp.Regexp{
regexp.MustCompile(`(?i)complete\.?series`),
regexp.MustCompile(`(?i)all\.?seasons?`),
regexp.MustCompile(`(?i)season\.?\s*\d+\s*-\s*\d+`),
regexp.MustCompile(`(?i)s\d+\s*-\s*s?\d+`),
regexp.MustCompile(`(?i)seasons?\s*\d+\s*-\s*\d+`),
}
)
type multiSeasonPattern struct {
pattern *regexp.Regexp
replacement string
}
type SeasonInfo struct {
SeasonNumber int
Files []types.File
InfoHash string
Name string
}
func (s *Store) replaceMultiSeasonPattern(name string, targetSeason int) string {
result := name
// Apply each pre-compiled pattern replacement
for _, msp := range multiSeasonReplacements {
if msp.pattern.MatchString(result) {
replacement := fmt.Sprintf(msp.replacement, targetSeason)
result = msp.pattern.ReplaceAllString(result, replacement)
s.logger.Debug().Msgf("Applied pattern replacement: %s -> %s", name, result)
return result
}
}
// If no multi-season pattern found, try to insert season info intelligently
return s.insertSeasonIntoName(result, targetSeason)
}
func (s *Store) insertSeasonIntoName(name string, seasonNum int) string {
// Check if season info already exists
if seasonPattern.MatchString(name) {
return name // Already has season info, keep as is
}
// Try to find a good insertion point (before quality indicators)
if loc := qualityIndicators.FindStringIndex(name); loc != nil {
// Insert season before quality info
before := strings.TrimSpace(name[:loc[0]])
after := name[loc[0]:]
return fmt.Sprintf("%s S%02d %s", before, seasonNum, after)
}
// If no quality indicators found, append at the end
return fmt.Sprintf("%s S%02d", name, seasonNum)
}
func (s *Store) detectMultiSeason(debridTorrent *types.Torrent) (bool, []SeasonInfo, error) {
torrentName := debridTorrent.Name
files := debridTorrent.GetFiles()
s.logger.Debug().Msgf("Analyzing torrent for multi-season: %s", torrentName)
// Find all seasons present in the files
seasonsFound := s.findAllSeasons(files)
// Check if this is actually a multi-season torrent
isMultiSeason := len(seasonsFound) > 1 || s.hasMultiSeasonIndicators(torrentName)
if !isMultiSeason {
return false, nil, nil
}
s.logger.Info().Msgf("Multi-season torrent detected with seasons: %v", getSortedSeasons(seasonsFound))
// Group files by season
seasonGroups := s.groupFilesBySeason(files, seasonsFound)
// Create SeasonInfo objects with proper naming
var seasons []SeasonInfo
for seasonNum, seasonFiles := range seasonGroups {
if len(seasonFiles) == 0 {
continue
}
// Generate season-specific name preserving all metadata
seasonName := s.generateSeasonSpecificName(torrentName, seasonNum)
seasons = append(seasons, SeasonInfo{
SeasonNumber: seasonNum,
Files: seasonFiles,
InfoHash: s.generateSeasonHash(debridTorrent.InfoHash, seasonNum),
Name: seasonName,
})
}
return true, seasons, nil
}
// generateSeasonSpecificName creates season name preserving all original metadata
func (s *Store) generateSeasonSpecificName(originalName string, seasonNum int) string {
// Find and replace the multi-season pattern with single season
seasonName := s.replaceMultiSeasonPattern(originalName, seasonNum)
s.logger.Debug().Msgf("Generated season name for S%02d: %s", seasonNum, seasonName)
return seasonName
}
func (s *Store) findAllSeasons(files []types.File) map[int]bool {
seasons := make(map[int]bool)
for _, file := range files {
// Check filename first
if season := s.extractSeason(file.Name); season > 0 {
seasons[season] = true
continue
}
// Check full path
if season := s.extractSeason(file.Path); season > 0 {
seasons[season] = true
}
}
return seasons
}
// extractSeason pulls season number from a string
func (s *Store) extractSeason(text string) int {
matches := seasonPattern.FindStringSubmatch(text)
if len(matches) > 1 {
if num, err := strconv.Atoi(matches[1]); err == nil && num > 0 && num < 100 {
return num
}
}
return 0
}
func (s *Store) hasMultiSeasonIndicators(torrentName string) bool {
for _, pattern := range multiSeasonIndicators {
if pattern.MatchString(torrentName) {
return true
}
}
return false
}
// groupFilesBySeason puts files into season buckets
func (s *Store) groupFilesBySeason(files []types.File, knownSeasons map[int]bool) map[int][]types.File {
groups := make(map[int][]types.File)
// Initialize groups
for season := range knownSeasons {
groups[season] = []types.File{}
}
for _, file := range files {
// Try to find season from filename or path
season := s.extractSeason(file.Name)
if season == 0 {
season = s.extractSeason(file.Path)
}
// If we found a season and it's known, add the file
if season > 0 && knownSeasons[season] {
groups[season] = append(groups[season], file)
} else {
// If no season found, try path-based inference
inferredSeason := s.inferSeasonFromPath(file.Path, knownSeasons)
if inferredSeason > 0 {
groups[inferredSeason] = append(groups[inferredSeason], file)
} else if len(knownSeasons) == 1 {
// If only one season exists, default to it
for season := range knownSeasons {
groups[season] = append(groups[season], file)
}
}
}
}
return groups
}
func (s *Store) inferSeasonFromPath(path string, knownSeasons map[int]bool) int {
pathParts := strings.Split(path, "/")
for _, part := range pathParts {
if season := s.extractSeason(part); season > 0 && knownSeasons[season] {
return season
}
}
return 0
}
// Helper to get sorted season list for logging
func getSortedSeasons(seasons map[int]bool) []int {
var result []int
for season := range seasons {
result = append(result, season)
}
return result
}
// generateSeasonHash creates a unique hash for a season based on original hash
func (s *Store) generateSeasonHash(originalHash string, seasonNumber int) string {
source := fmt.Sprintf("%s-season-%d", originalHash, seasonNumber)
hash := md5.Sum([]byte(source))
return fmt.Sprintf("%x", hash)
}
func grabber(client *grab.Client, url, filename string, byterange *[2]int64, progressCallback func(int64, int64)) error {
req, err := grab.NewRequest(filename, url)
if err != nil {
return err
}
// Set byte range if specified
if byterange != nil {
byterangeStr := fmt.Sprintf("%d-%d", byterange[0], byterange[1])
req.HTTPRequest.Header.Set("Range", "bytes="+byterangeStr)
}
resp := client.Do(req)
t := time.NewTicker(time.Second * 2)
defer t.Stop()
var lastReported int64
Loop:
for {
select {
case <-t.C:
current := resp.BytesComplete()
speed := int64(resp.BytesPerSecond())
if current != lastReported {
if progressCallback != nil {
progressCallback(current-lastReported, speed)
}
lastReported = current
}
case <-resp.Done:
break Loop
}
}
// Report final bytes
if progressCallback != nil {
progressCallback(resp.BytesComplete()-lastReported, 0)
}
return resp.Err()
}
func (s *Store) processDownload(torrent *Torrent, debridTorrent *types.Torrent) (string, error) {
s.logger.Info().Msgf("Downloading %d files...", len(debridTorrent.Files))
torrentPath := filepath.Join(torrent.SavePath, utils.RemoveExtension(debridTorrent.OriginalFilename))
torrentPath = utils.RemoveInvalidChars(torrentPath)
err := os.MkdirAll(torrentPath, os.ModePerm)
if err != nil {
// add the previous error to the error and return
return "", fmt.Errorf("failed to create directory: %s: %v", torrentPath, err)
}
s.downloadFiles(torrent, debridTorrent, torrentPath)
return torrentPath, nil
}
func (s *Store) downloadFiles(torrent *Torrent, debridTorrent *types.Torrent, parent string) {
var wg sync.WaitGroup
totalSize := int64(0)
for _, file := range debridTorrent.GetFiles() {
totalSize += file.Size
}
debridTorrent.Lock()
debridTorrent.SizeDownloaded = 0 // Reset downloaded bytes
debridTorrent.Progress = 0 // Reset progress
debridTorrent.Unlock()
progressCallback := func(downloaded int64, speed int64) {
debridTorrent.Lock()
defer debridTorrent.Unlock()
torrent.Lock()
defer torrent.Unlock()
// Update total downloaded bytes
debridTorrent.SizeDownloaded += downloaded
debridTorrent.Speed = speed
// Calculate overall progress
if totalSize > 0 {
debridTorrent.Progress = float64(debridTorrent.SizeDownloaded) / float64(totalSize) * 100
}
s.partialTorrentUpdate(torrent, debridTorrent)
}
client := &grab.Client{
UserAgent: "Decypharr[QBitTorrent]",
HTTPClient: &http.Client{
Transport: &http.Transport{
Proxy: http.ProxyFromEnvironment,
},
},
}
errChan := make(chan error, len(debridTorrent.Files))
for _, file := range debridTorrent.GetFiles() {
if file.DownloadLink.Empty() {
s.logger.Info().Msgf("No download link found for %s", file.Name)
continue
}
wg.Add(1)
s.downloadSemaphore <- struct{}{}
go func(file types.File) {
defer wg.Done()
defer func() { <-s.downloadSemaphore }()
filename := file.Name
err := grabber(
client,
file.DownloadLink.DownloadLink,
filepath.Join(parent, filename),
file.ByteRange,
progressCallback,
)
if err != nil {
s.logger.Error().Msgf("Failed to download %s: %v", filename, err)
errChan <- err
} else {
s.logger.Info().Msgf("Downloaded %s", filename)
}
}(file)
}
wg.Wait()
close(errChan)
var errors []error
for err := range errChan {
if err != nil {
errors = append(errors, err)
}
}
if len(errors) > 0 {
s.logger.Error().Msgf("Errors occurred during download: %v", errors)
return
}
s.logger.Info().Msgf("Downloaded all files for %s", debridTorrent.Name)
}
func (s *Store) processSymlink(debridTorrent *types.Torrent, torrentRclonePath, torrentSymlinkPath string) (string, error) {
files := debridTorrent.GetFiles()
if len(files) == 0 {
return "", fmt.Errorf("no valid files found")
}
s.logger.Info().Msgf("Creating symlinks for %d files ...", len(files))
// Create symlink directory
err := os.MkdirAll(torrentSymlinkPath, os.ModePerm)
if err != nil {
return "", fmt.Errorf("failed to create directory: %s: %v", torrentSymlinkPath, err)
}
// Track pending files
remainingFiles := make(map[string]types.File)
for _, file := range files {
remainingFiles[file.Name] = file
}
ticker := time.NewTicker(100 * time.Millisecond)
defer ticker.Stop()
timeout := time.After(30 * time.Minute)
filePaths := make([]string, 0, len(remainingFiles))
var checkDirectory func(string) // Recursive function
checkDirectory = func(dirPath string) {
entries, err := os.ReadDir(dirPath)
if err != nil {
return
}
for _, entry := range entries {
entryName := entry.Name()
fullPath := filepath.Join(dirPath, entryName)
// Check if this matches a remaining file
if file, exists := remainingFiles[entryName]; exists {
fileSymlinkPath := filepath.Join(torrentSymlinkPath, file.Name)
if err := os.Symlink(fullPath, fileSymlinkPath); err == nil || os.IsExist(err) {
filePaths = append(filePaths, fileSymlinkPath)
delete(remainingFiles, entryName)
s.logger.Info().Msgf("File is ready: %s", file.Name)
}
} else if entry.IsDir() {
// If not found and it's a directory, check inside
checkDirectory(fullPath)
}
}
}
for len(remainingFiles) > 0 {
select {
case <-ticker.C:
checkDirectory(torrentRclonePath)
case <-timeout:
s.logger.Warn().Msgf("Timeout waiting for files, %d files still pending", len(remainingFiles))
return torrentSymlinkPath, fmt.Errorf("timeout waiting for files: %d files still pending", len(remainingFiles))
}
}
// Pre-cache files if enabled
if !s.skipPreCache && len(filePaths) > 0 {
go func() {
s.logger.Debug().Msgf("Pre-caching %s", debridTorrent.Name)
if err := utils.PreCacheFile(filePaths); err != nil {
s.logger.Error().Msgf("Failed to pre-cache file: %s", err)
} else {
s.logger.Debug().Msgf("Pre-cached %d files", len(filePaths))
}
}()
}
return torrentSymlinkPath, nil
}
// getTorrentPaths returns mountPath and symlinkPath for a torrent
func (s *Store) getTorrentPaths(arrFolder string, debridTorrent *types.Torrent) (string, string, error) {
for {
torrentFolder, err := debridTorrent.GetMountFolder(debridTorrent.MountPath)
if err == nil {
// Found mountPath
mountPath := filepath.Join(debridTorrent.MountPath, torrentFolder)
if debridTorrent.Debrid == "alldebrid" && utils.IsMediaFile(torrentFolder) {
torrentFolder = utils.RemoveExtension(torrentFolder)
mountPath = debridTorrent.MountPath
}
// Return mountPath and symlink path
return mountPath, filepath.Join(arrFolder, torrentFolder), nil
}
time.Sleep(100 * time.Millisecond)
}
}
func (s *Store) processMultiSeasonSymlinks(torrent *Torrent, debridTorrent *types.Torrent, seasons []SeasonInfo, importReq *ImportRequest) error {
for _, seasonInfo := range seasons {
// Create a season-specific debrid torrent
seasonDebridTorrent := debridTorrent.Copy()
// Update the season torrent with season-specific data
seasonDebridTorrent.InfoHash = seasonInfo.InfoHash
seasonDebridTorrent.Name = seasonInfo.Name
seasonTorrent := torrent.Copy()
seasonTorrent.ID = seasonInfo.InfoHash
seasonTorrent.Name = seasonInfo.Name
seasonTorrent.Hash = seasonInfo.InfoHash
torrentFiles := make([]*File, 0)
size := int64(0)
// Filter files to only include this season's files
seasonFiles := make(map[string]types.File)
for index, file := range seasonInfo.Files {
seasonFiles[file.Name] = file
torrentFiles = append(torrentFiles, &File{
Index: index,
Name: file.Path,
Size: file.Size,
})
size += file.Size
}
seasonDebridTorrent.Files = seasonFiles
seasonTorrent.Files = torrentFiles
seasonTorrent.Size = size
// Create a season-specific torrent record
// Create season folder path using the extracted season name
seasonFolderName := seasonInfo.Name
s.logger.Info().Msgf("Processing season %s with %d files", seasonTorrent.Name, len(seasonInfo.Files))
var err error
cache := s.debrid.Debrid(debridTorrent.Debrid).Cache()
var torrentRclonePath, torrentSymlinkPath string
if cache != nil {
torrentRclonePath = filepath.Join(debridTorrent.MountPath, cache.GetTorrentFolder(debridTorrent))
} else {
// Regular mount mode
torrentRclonePath, _, err = s.getTorrentPaths(seasonTorrent.SavePath, seasonDebridTorrent)
if err != nil {
return err
}
}
torrentSymlinkPath = filepath.Join(seasonTorrent.SavePath, seasonFolderName)
torrentSymlinkPath, err = s.processSymlink(seasonDebridTorrent, torrentRclonePath, torrentSymlinkPath)
if err != nil {
return err
}
if torrentSymlinkPath == "" {
return fmt.Errorf("no symlink found for season %d", seasonInfo.SeasonNumber)
}
// Update season torrent with final path
seasonTorrent.TorrentPath = torrentSymlinkPath
seasonTorrent.ContentPath = torrentSymlinkPath
seasonTorrent.State = "pausedUP"
// Add the season torrent to storage
s.torrents.AddOrUpdate(seasonTorrent)
s.logger.Info().Str("path", torrentSymlinkPath).Msgf("Successfully created season %d torrent: %s", seasonInfo.SeasonNumber, seasonTorrent.ID)
}
s.torrents.Delete(torrent.Hash, "", false)
s.logger.Info().Msgf("Multi-season processing completed for %s", debridTorrent.Name)
return nil
}
// processMultiSeasonDownloads handles multi-season torrent downloading
func (s *Store) processMultiSeasonDownloads(torrent *Torrent, debridTorrent *types.Torrent, seasons []SeasonInfo, importReq *ImportRequest) error {
s.logger.Info().Msgf("Creating separate download records for %d seasons", len(seasons))
for _, seasonInfo := range seasons {
// Create a season-specific debrid torrent
seasonDebridTorrent := debridTorrent.Copy()
// Update the season torrent with season-specific data
seasonDebridTorrent.InfoHash = seasonInfo.InfoHash
seasonDebridTorrent.Name = seasonInfo.Name
// Filter files to only include this season's files
seasonFiles := make(map[string]types.File)
for _, file := range seasonInfo.Files {
seasonFiles[file.Name] = file
}
seasonDebridTorrent.Files = seasonFiles
// Create a season-specific torrent record
seasonTorrent := torrent.Copy()
seasonTorrent.ID = uuid.New().String()
seasonTorrent.Name = seasonInfo.Name
seasonTorrent.Hash = seasonInfo.InfoHash
seasonTorrent.SavePath = torrent.SavePath
s.logger.Info().Msgf("Downloading season %d with %d files", seasonInfo.SeasonNumber, len(seasonInfo.Files))
// Generate download links for season files
client := s.debrid.Debrid(debridTorrent.Debrid).Client()
if err := client.GetFileDownloadLinks(seasonDebridTorrent); err != nil {
s.logger.Error().Msgf("Failed to get download links for season %d: %v", seasonInfo.SeasonNumber, err)
return fmt.Errorf("failed to get download links for season %d: %v", seasonInfo.SeasonNumber, err)
}
// Download files for this season
seasonDownloadPath, err := s.processDownload(seasonTorrent, seasonDebridTorrent)
if err != nil {
s.logger.Error().Msgf("Failed to download season %d: %v", seasonInfo.SeasonNumber, err)
return fmt.Errorf("failed to download season %d: %v", seasonInfo.SeasonNumber, err)
}
// Update season torrent with final path
seasonTorrent.TorrentPath = seasonDownloadPath
torrent.ContentPath = seasonDownloadPath
seasonTorrent.State = "pausedUP"
// Add the season torrent to storage
s.torrents.AddOrUpdate(seasonTorrent)
s.logger.Info().Msgf("Successfully downloaded season %d torrent: %s", seasonInfo.SeasonNumber, seasonTorrent.ID)
}
s.logger.Debug().Msgf("Deleting original torrent with hash: %s, category: %s", torrent.Hash, torrent.Category)
s.torrents.Delete(torrent.Hash, torrent.Category, false)
s.logger.Info().Msgf("Multi-season download processing completed for %s", debridTorrent.Name)
return nil
}

View File

@@ -1,4 +1,4 @@
package store package wire
import ( import (
"os" "os"
@@ -25,6 +25,7 @@ func createTorrentFromMagnet(req *ImportRequest) *Torrent {
AutoTmm: false, AutoTmm: false,
Ratio: 1, Ratio: 1,
RatioLimit: 1, RatioLimit: 1,
TotalSize: magnet.Size,
SavePath: filepath.Join(req.DownloadFolder, arrName) + string(os.PathSeparator), SavePath: filepath.Join(req.DownloadFolder, arrName) + string(os.PathSeparator),
} }
return torrent return torrent

View File

@@ -1,8 +1,10 @@
package store package wire
import ( import (
"context" "context"
"fmt" "fmt"
"github.com/go-co-op/gocron/v2"
"github.com/sirrobot01/decypharr/internal/utils"
"time" "time"
) )
@@ -25,58 +27,50 @@ func (s *Store) addToQueue(importReq *ImportRequest) error {
return nil return nil
} }
func (s *Store) StartQueueSchedule(ctx context.Context) error { func (s *Store) StartQueueWorkers(ctx context.Context) error {
// Start the slots processing in a separate goroutine // This function is responsible for starting the scheduled tasks
go func() { if ctx == nil {
if err := s.processSlotsQueue(ctx); err != nil { ctx = context.Background()
s.logger.Error().Err(err).Msg("Error processing slots queue") }
}
}()
// Start the remove stalled torrents processing in a separate goroutine s.scheduler.RemoveByTags("decypharr-store")
go func() {
if err := s.processRemoveStalledTorrents(ctx); err != nil {
s.logger.Error().Err(err).Msg("Error processing remove stalled torrents")
}
}()
return nil if jd, err := utils.ConvertToJobDef("30s"); err != nil {
} s.logger.Error().Err(err).Msg("Failed to convert slots tracking interval to job definition")
} else {
func (s *Store) processSlotsQueue(ctx context.Context) error { // Schedule the job
s.trackAvailableSlots(ctx) // Initial tracking of available slots if _, err := s.scheduler.NewJob(jd, gocron.NewTask(func() {
ticker := time.NewTicker(30 * time.Second)
defer ticker.Stop()
for {
select {
case <-ctx.Done():
return nil
case <-ticker.C:
s.trackAvailableSlots(ctx) s.trackAvailableSlots(ctx)
}), gocron.WithContext(ctx)); err != nil {
s.logger.Error().Err(err).Msg("Failed to create slots tracking job")
} else {
s.logger.Trace().Msgf("Slots tracking job scheduled for every %s", "30s")
} }
} }
}
func (s *Store) processRemoveStalledTorrents(ctx context.Context) error { if s.removeStalledAfter > 0 {
if s.removeStalledAfter <= 0 { // Stalled torrents removal job
return nil // No need to remove stalled torrents if the duration is not set if jd, err := utils.ConvertToJobDef("1m"); err != nil {
} s.logger.Error().Err(err).Msg("Failed to convert remove stalled torrents interval to job definition")
} else {
ticker := time.NewTicker(time.Minute) // Schedule the job
defer ticker.Stop() if _, err := s.scheduler.NewJob(jd, gocron.NewTask(func() {
err := s.removeStalledTorrents(ctx)
for { if err != nil {
select { s.logger.Error().Err(err).Msg("Failed to process remove stalled torrents")
case <-ctx.Done(): }
return nil }), gocron.WithContext(ctx)); err != nil {
case <-ticker.C: s.logger.Error().Err(err).Msg("Failed to create remove stalled torrents job")
if err := s.removeStalledTorrents(ctx); err != nil { } else {
s.logger.Error().Err(err).Msg("Error removing stalled torrents") s.logger.Trace().Msgf("Remove stalled torrents job scheduled for every %s", "1m")
} }
} }
} }
// Start the scheduler
s.scheduler.Start()
s.logger.Debug().Msg("Store worker started")
return nil
} }
func (s *Store) trackAvailableSlots(ctx context.Context) { func (s *Store) trackAvailableSlots(ctx context.Context) {
@@ -91,13 +85,17 @@ func (s *Store) trackAvailableSlots(ctx context.Context) {
availableSlots[name] = slots availableSlots[name] = slots
} }
if len(availableSlots) == 0 {
s.logger.Debug().Msg("No debrid clients available or no slots found")
return // No debrid clients or slots available, nothing to process
}
if s.importsQueue.Size() <= 0 { if s.importsQueue.Size() <= 0 {
// Queue is empty, no need to process // Queue is empty, no need to process
return return
} }
for name, slots := range availableSlots { for name, slots := range availableSlots {
s.logger.Debug().Msgf("Available slots for %s: %d", name, slots) s.logger.Debug().Msgf("Available slots for %s: %d", name, slots)
// If slots are available, process the next import request from the queue // If slots are available, process the next import request from the queue
for slots > 0 { for slots > 0 {

View File

@@ -1,4 +1,4 @@
package store package wire
import ( import (
"bytes" "bytes"
@@ -7,6 +7,7 @@ import (
"encoding/json" "encoding/json"
"fmt" "fmt"
"github.com/google/uuid" "github.com/google/uuid"
"github.com/sirrobot01/decypharr/internal/config"
"github.com/sirrobot01/decypharr/internal/request" "github.com/sirrobot01/decypharr/internal/request"
"github.com/sirrobot01/decypharr/internal/utils" "github.com/sirrobot01/decypharr/internal/utils"
"github.com/sirrobot01/decypharr/pkg/arr" "github.com/sirrobot01/decypharr/pkg/arr"
@@ -43,6 +44,8 @@ type ImportRequest struct {
} }
func NewImportRequest(debrid string, downloadFolder string, magnet *utils.Magnet, arr *arr.Arr, action string, downloadUncached bool, callBackUrl string, importType ImportType) *ImportRequest { func NewImportRequest(debrid string, downloadFolder string, magnet *utils.Magnet, arr *arr.Arr, action string, downloadUncached bool, callBackUrl string, importType ImportType) *ImportRequest {
cfg := config.Get()
callBackUrl = cmp.Or(callBackUrl, cfg.CallbackURL)
return &ImportRequest{ return &ImportRequest{
Id: uuid.New().String(), Id: uuid.New().String(),
Status: "started", Status: "started",

View File

@@ -1,8 +1,9 @@
package store package wire
import ( import (
"cmp" "cmp"
"context" "context"
"github.com/go-co-op/gocron/v2"
"github.com/rs/zerolog" "github.com/rs/zerolog"
"github.com/sirrobot01/decypharr/internal/config" "github.com/sirrobot01/decypharr/internal/config"
"github.com/sirrobot01/decypharr/internal/logger" "github.com/sirrobot01/decypharr/internal/logger"
@@ -26,6 +27,7 @@ type Store struct {
skipPreCache bool skipPreCache bool
downloadSemaphore chan struct{} downloadSemaphore chan struct{}
removeStalledAfter time.Duration // Duration after which stalled torrents are removed removeStalledAfter time.Duration // Duration after which stalled torrents are removed
scheduler gocron.Scheduler
} }
var ( var (
@@ -49,6 +51,11 @@ func Get() *Store {
arrs := arr.NewStorage() arrs := arr.NewStorage()
deb := debrid.NewStorage(rcManager) deb := debrid.NewStorage(rcManager)
scheduler, err := gocron.NewScheduler(gocron.WithLocation(time.Local), gocron.WithGlobalJobOptions(gocron.WithTags("decypharr-store")))
if err != nil {
scheduler, _ = gocron.NewScheduler(gocron.WithGlobalJobOptions(gocron.WithTags("decypharr-store")))
}
instance = &Store{ instance = &Store{
repair: repair.New(arrs, deb), repair: repair.New(arrs, deb),
arr: arrs, arr: arrs,
@@ -56,10 +63,11 @@ func Get() *Store {
rcloneManager: rcManager, rcloneManager: rcManager,
torrents: newTorrentStorage(cfg.TorrentsFile()), torrents: newTorrentStorage(cfg.TorrentsFile()),
logger: logger.Default(), // Use default logger [decypharr] logger: logger.Default(), // Use default logger [decypharr]
refreshInterval: time.Duration(cmp.Or(qbitCfg.RefreshInterval, 10)) * time.Minute, refreshInterval: time.Duration(cmp.Or(qbitCfg.RefreshInterval, 30)) * time.Second,
skipPreCache: qbitCfg.SkipPreCache, skipPreCache: qbitCfg.SkipPreCache,
downloadSemaphore: make(chan struct{}, cmp.Or(qbitCfg.MaxDownloads, 5)), downloadSemaphore: make(chan struct{}, cmp.Or(qbitCfg.MaxDownloads, 5)),
importsQueue: NewImportQueue(context.Background(), 1000), importsQueue: NewImportQueue(context.Background(), 1000),
scheduler: scheduler,
} }
if cfg.RemoveStalledAfter != "" { if cfg.RemoveStalledAfter != "" {
removeStalledAfter, err := time.ParseDuration(cfg.RemoveStalledAfter) removeStalledAfter, err := time.ParseDuration(cfg.RemoveStalledAfter)
@@ -78,7 +86,10 @@ func Reset() {
} }
if instance.rcloneManager != nil { if instance.rcloneManager != nil {
instance.rcloneManager.Stop() err := instance.rcloneManager.Stop()
if err != nil {
instance.logger.Error().Err(err).Msg("Failed to stop rclone manager")
}
} }
if instance.importsQueue != nil { if instance.importsQueue != nil {
@@ -88,6 +99,11 @@ func Reset() {
// Close the semaphore channel to // Close the semaphore channel to
close(instance.downloadSemaphore) close(instance.downloadSemaphore)
} }
if instance.scheduler != nil {
_ = instance.scheduler.StopJobs()
_ = instance.scheduler.Shutdown()
}
} }
once = sync.Once{} once = sync.Once{}
instance = nil instance = nil
@@ -108,3 +124,7 @@ func (s *Store) Torrents() *TorrentStorage {
func (s *Store) RcloneManager() *rclone.Manager { func (s *Store) RcloneManager() *rclone.Manager {
return s.rcloneManager return s.rcloneManager
} }
func (s *Store) Scheduler() gocron.Scheduler {
return s.scheduler
}

View File

@@ -1,4 +1,4 @@
package store package wire
import ( import (
"cmp" "cmp"
@@ -6,7 +6,6 @@ import (
"errors" "errors"
"fmt" "fmt"
"math" "math"
"os"
"path/filepath" "path/filepath"
"time" "time"
@@ -99,8 +98,7 @@ func (s *Store) processFiles(torrent *Torrent, debridTorrent *types.Torrent, imp
backoff.Reset(nextInterval) backoff.Reset(nextInterval)
} }
} }
var torrentSymlinkPath string var torrentSymlinkPath, torrentRclonePath string
var err error
debridTorrent.Arr = _arr debridTorrent.Arr = _arr
// Check if debrid supports webdav by checking cache // Check if debrid supports webdav by checking cache
@@ -134,11 +132,20 @@ func (s *Store) processFiles(torrent *Torrent, debridTorrent *types.Torrent, imp
}() }()
} }
// Check for multi-season torrent support
isMultiSeason, seasons, err := s.detectMultiSeason(debridTorrent)
if err != nil {
s.logger.Warn().Msgf("Error detecting multi-season for %s: %v", debridTorrent.Name, err)
// Continue with normal processing if detection fails
isMultiSeason = false
}
switch importReq.Action { switch importReq.Action {
case "symlink": case "symlink":
// Symlink action, we will create a symlink to the torrent // Symlink action, we will create a symlink to the torrent
s.logger.Debug().Msgf("Post-Download Action: Symlink") s.logger.Debug().Msgf("Post-Download Action: Symlink")
cache := deb.Cache() cache := deb.Cache()
if cache != nil { if cache != nil {
s.logger.Info().Msgf("Using internal webdav for %s", debridTorrent.Debrid) s.logger.Info().Msgf("Using internal webdav for %s", debridTorrent.Debrid)
// Use webdav to download the file // Use webdav to download the file
@@ -146,14 +153,45 @@ func (s *Store) processFiles(torrent *Torrent, debridTorrent *types.Torrent, imp
onFailed(err) onFailed(err)
return return
} }
}
if isMultiSeason {
s.logger.Info().Msgf("Processing multi-season torrent with %d seasons", len(seasons))
// Remove any torrent already added
err := s.processMultiSeasonSymlinks(torrent, debridTorrent, seasons, importReq)
if err == nil {
// If an error occurred during multi-season processing, send it to normal processing
s.logger.Info().Msgf("Adding %s took %s", debridTorrent.Name, time.Since(timer))
go importReq.markAsCompleted(torrent, debridTorrent) // Mark the import request as completed, send callback if needed
go func() {
if err := request.SendDiscordMessage("download_complete", "success", torrent.discordContext()); err != nil {
s.logger.Error().Msgf("Error sending discord message: %v", err)
}
}()
go func() {
_arr.Refresh()
}()
return
}
}
if cache != nil {
torrentRclonePath = filepath.Join(debridTorrent.MountPath, cache.GetTorrentFolder(debridTorrent)) // /mnt/remote/realdebrid/MyTVShow
torrentSymlinkPath = filepath.Join(torrent.SavePath, utils.RemoveExtension(debridTorrent.Name)) // /mnt/symlinks/{category}/MyTVShow/
rclonePath := filepath.Join(debridTorrent.MountPath, cache.GetTorrentFolder(debridTorrent)) // /mnt/remote/realdebrid/MyTVShow
torrentFolderNoExt := utils.RemoveExtension(debridTorrent.Name)
torrentSymlinkPath, err = s.createSymlinksWebdav(torrent, debridTorrent, rclonePath, torrentFolderNoExt) // /mnt/symlinks/{category}/MyTVShow/
} else { } else {
// User is using either zurg or debrid webdav // User is using either zurg or debrid webdav
torrentSymlinkPath, err = s.processSymlink(torrent, debridTorrent) // /mnt/symlinks/{category}/MyTVShow/ torrentRclonePath, torrentSymlinkPath, err = s.getTorrentPaths(torrent.SavePath, debridTorrent)
if err != nil {
onFailed(err)
return
}
} }
torrentSymlinkPath, err = s.processSymlink(debridTorrent, torrentRclonePath, torrentSymlinkPath)
if err != nil { if err != nil {
onFailed(err) onFailed(err)
return return
@@ -168,6 +206,19 @@ func (s *Store) processFiles(torrent *Torrent, debridTorrent *types.Torrent, imp
// Download action, we will download the torrent to the specified folder // Download action, we will download the torrent to the specified folder
// Generate download links // Generate download links
s.logger.Debug().Msgf("Post-Download Action: Download") s.logger.Debug().Msgf("Post-Download Action: Download")
if isMultiSeason {
s.logger.Info().Msgf("Processing multi-season download with %d seasons", len(seasons))
err := s.processMultiSeasonDownloads(torrent, debridTorrent, seasons, importReq)
if err != nil {
onFailed(err)
return
}
// Multi-season processing completed successfully
onSuccess(torrent.SavePath)
return
}
if err := client.GetFileDownloadLinks(debridTorrent); err != nil { if err := client.GetFileDownloadLinks(debridTorrent); err != nil {
onFailed(err) onFailed(err)
return return
@@ -241,6 +292,7 @@ func (s *Store) partialTorrentUpdate(t *Torrent, debridTorrent *types.Torrent) *
t.Files = files t.Files = files
t.Debrid = debridTorrent.Debrid t.Debrid = debridTorrent.Debrid
t.Size = totalSize t.Size = totalSize
t.TotalSize = totalSize
t.Completed = sizeCompleted t.Completed = sizeCompleted
t.NumSeeds = debridTorrent.Seeders t.NumSeeds = debridTorrent.Seeders
t.Downloaded = sizeCompleted t.Downloaded = sizeCompleted
@@ -252,7 +304,6 @@ func (s *Store) partialTorrentUpdate(t *Torrent, debridTorrent *types.Torrent) *
t.Eta = eta t.Eta = eta
t.Dlspeed = speed t.Dlspeed = speed
t.Upspeed = speed t.Upspeed = speed
t.ContentPath = filepath.Join(t.SavePath, t.Name) + string(os.PathSeparator)
return t return t
} }
@@ -267,7 +318,7 @@ func (s *Store) updateTorrent(t *Torrent, debridTorrent *types.Torrent) *Torrent
} }
} }
t = s.partialTorrentUpdate(t, debridTorrent) t = s.partialTorrentUpdate(t, debridTorrent)
t.ContentPath = t.TorrentPath + string(os.PathSeparator) t.ContentPath = t.TorrentPath
if t.IsReady() { if t.IsReady() {
t.State = "pausedUP" t.State = "pausedUP"

View File

@@ -1,4 +1,4 @@
package store package wire
import ( import (
"encoding/json" "encoding/json"
@@ -167,44 +167,33 @@ func (ts *TorrentStorage) Update(torrent *Torrent) {
func (ts *TorrentStorage) Delete(hash, category string, removeFromDebrid bool) { func (ts *TorrentStorage) Delete(hash, category string, removeFromDebrid bool) {
ts.mu.Lock() ts.mu.Lock()
defer ts.mu.Unlock() defer ts.mu.Unlock()
key := keyPair(hash, category)
torrent, exists := ts.torrents[key] wireStore := Get()
if !exists && category == "" { for key, torrent := range ts.torrents {
// Remove the torrent without knowing the category if torrent == nil {
for k, t := range ts.torrents { continue
if t.Hash == hash { }
key = k if torrent.Hash == hash && (category == "" || torrent.Category == category) {
torrent = t if torrent.State == "queued" && torrent.ID != "" {
break // Remove the torrent from the import queue if it exists
wireStore.importsQueue.Delete(torrent.ID)
} }
} if removeFromDebrid && torrent.DebridID != "" && torrent.Debrid != "" {
} dbClient := wireStore.debrid.Client(torrent.Debrid)
if dbClient != nil {
_ = dbClient.DeleteTorrent(torrent.DebridID)
}
}
delete(ts.torrents, key)
if torrent == nil { // Delete the torrent folder
return if torrent.ContentPath != "" {
} err := os.RemoveAll(torrent.ContentPath)
st := Get() if err != nil {
// Check if torrent is queued for download return
}
if torrent.State == "queued" && torrent.ID != "" { }
// Remove the torrent from the import queue if it exists break
st.importsQueue.Delete(torrent.ID)
}
if removeFromDebrid && torrent.DebridID != "" && torrent.Debrid != "" {
dbClient := st.debrid.Client(torrent.Debrid)
if dbClient != nil {
_ = dbClient.DeleteTorrent(torrent.DebridID)
}
}
delete(ts.torrents, key)
// Delete the torrent folder
if torrent.ContentPath != "" {
err := os.RemoveAll(torrent.ContentPath)
if err != nil {
return
} }
} }
go func() { go func() {
@@ -227,12 +216,11 @@ func (ts *TorrentStorage) DeleteMultiple(hashes []string, removeFromDebrid bool)
if torrent == nil { if torrent == nil {
continue continue
} }
if torrent.State == "queued" && torrent.ID != "" {
// Remove the torrent from the import queue if it exists
st.importsQueue.Delete(torrent.ID)
}
if torrent.Hash == hash { if torrent.Hash == hash {
if torrent.State == "queued" && torrent.ID != "" {
// Remove the torrent from the import queue if it exists
st.importsQueue.Delete(torrent.ID)
}
if removeFromDebrid && torrent.DebridID != "" && torrent.Debrid != "" { if removeFromDebrid && torrent.DebridID != "" && torrent.Debrid != "" {
toDelete[torrent.DebridID] = torrent.Debrid toDelete[torrent.DebridID] = torrent.Debrid
} }
@@ -243,6 +231,7 @@ func (ts *TorrentStorage) DeleteMultiple(hashes []string, removeFromDebrid bool)
return return
} }
} }
break
} }
} }
} }

View File

@@ -1,4 +1,4 @@
package store package wire
import ( import (
"fmt" "fmt"
@@ -72,6 +72,60 @@ type Torrent struct {
sync.Mutex sync.Mutex
} }
func (t *Torrent) Copy() *Torrent {
return &Torrent{
ID: t.ID,
DebridID: t.DebridID,
Debrid: t.Debrid,
TorrentPath: t.TorrentPath,
AddedOn: t.AddedOn,
AmountLeft: t.AmountLeft,
AutoTmm: t.AutoTmm,
Availability: t.Availability,
Category: t.Category,
Completed: t.Completed,
CompletionOn: t.CompletionOn,
ContentPath: t.ContentPath,
DlLimit: t.DlLimit,
Dlspeed: t.Dlspeed,
Downloaded: t.Downloaded,
DownloadedSession: t.DownloadedSession,
Eta: t.Eta,
FlPiecePrio: t.FlPiecePrio,
ForceStart: t.ForceStart,
Hash: t.Hash,
LastActivity: t.LastActivity,
MagnetUri: t.MagnetUri,
MaxRatio: t.MaxRatio,
MaxSeedingTime: t.MaxSeedingTime,
Name: t.Name,
NumComplete: t.NumComplete,
NumIncomplete: t.NumIncomplete,
NumLeechs: t.NumLeechs,
NumSeeds: t.NumSeeds,
Priority: t.Priority,
Progress: t.Progress,
Ratio: t.Ratio,
RatioLimit: t.RatioLimit,
SavePath: t.SavePath,
SeedingTimeLimit: t.SeedingTimeLimit,
SeenComplete: t.SeenComplete,
SeqDl: t.SeqDl,
Size: t.Size,
State: t.State,
SuperSeeding: t.SuperSeeding,
Tags: t.Tags,
TimeActive: t.TimeActive,
TotalSize: t.TotalSize,
Tracker: t.Tracker,
UpLimit: t.UpLimit,
Uploaded: t.Uploaded,
UploadedSession: t.UploadedSession,
Upspeed: t.Upspeed,
Source: t.Source,
}
}
func (t *Torrent) IsReady() bool { func (t *Torrent) IsReady() bool {
return (t.AmountLeft <= 0 || t.Progress == 1) && t.TorrentPath != "" return (t.AmountLeft <= 0 || t.Progress == 1) && t.TorrentPath != ""
} }

44
pkg/wire/worker.go Normal file
View File

@@ -0,0 +1,44 @@
package wire
import "context"
func (s *Store) StartWorkers(ctx context.Context) {
if ctx == nil {
ctx = context.Background()
}
// Start debrid workers
if err := s.Debrid().StartWorker(ctx); err != nil {
s.logger.Error().Err(err).Msg("Failed to start debrid worker")
} else {
s.logger.Debug().Msg("Started debrid worker")
}
// Cache workers
for _, cache := range s.Debrid().Caches() {
if cache == nil {
continue
}
go func() {
if err := cache.StartWorker(ctx); err != nil {
s.logger.Error().Err(err).Msg("Failed to start debrid cache worker")
} else {
s.logger.Debug().Msgf("Started debrid cache worker for %s", cache.GetConfig().Name)
}
}()
}
// Store queue workers
if err := s.StartQueueWorkers(ctx); err != nil {
s.logger.Error().Err(err).Msg("Failed to start store worker")
} else {
s.logger.Debug().Msg("Started store worker")
}
// Arr workers
if err := s.Arr().StartWorker(ctx); err != nil {
s.logger.Error().Err(err).Msg("Failed to start Arr worker")
} else {
s.logger.Debug().Msg("Started Arr worker")
}
}

View File

@@ -4,6 +4,38 @@ set -e
# Default values # Default values
PUID=${PUID:-1000} PUID=${PUID:-1000}
PGID=${PGID:-1000} PGID=${PGID:-1000}
UMASK=${UMASK:-022}
# Set umask
umask "$UMASK"
# Function to create directories and files
setup_directories() {
# Ensure directories exist
mkdir -p /app/logs /app/cache 2>/dev/null || true
# Create log file if it doesn't exist
touch /app/logs/decypharr.log 2>/dev/null || true
# Try to set permissions if possible
chmod 755 /app 2>/dev/null || true
chmod 666 /app/logs/decypharr.log 2>/dev/null || true
}
# Check if we're running as root
if [ "$(id -u)" != "0" ]; then
echo "Running as non-root user $(id -u):$(id -g) with umask $UMASK"
# Try to create directories as the current user
setup_directories
export USER="$(id -un)"
export HOME="/app"
exec "$@"
fi
echo "Running as root, setting up user $PUID:$PGID with umask $UMASK"
# Create group if it doesn't exist # Create group if it doesn't exist
if ! getent group "$PGID" > /dev/null 2>&1; then if ! getent group "$PGID" > /dev/null 2>&1; then
@@ -19,7 +51,7 @@ fi
USERNAME=$(getent passwd "$PUID" | cut -d: -f1) USERNAME=$(getent passwd "$PUID" | cut -d: -f1)
GROUPNAME=$(getent group "$PGID" | cut -d: -f1) GROUPNAME=$(getent group "$PGID" | cut -d: -f1)
# Ensure directories exist and have correct permissions # Create directories and set proper ownership
mkdir -p /app/logs /app/cache mkdir -p /app/logs /app/cache
chown -R "$PUID:$PGID" /app chown -R "$PUID:$PGID" /app
chmod 755 /app chmod 755 /app