19 Commits

Author SHA1 Message Date
Mukhtar Akere
22dae9efad Add a new worker that checks if an account is opened
Some checks failed
Release Docker Build / docker (push) Has been cancelled
GoReleaser / goreleaser (push) Has been cancelled
2025-09-17 23:30:45 +01:00
Mukhtar Akere
3f0870cd1c torbox: fix pagination bug, fix download uncached bug 2025-09-16 21:34:58 +01:00
Mukhtar Akere
30b2db06e7 Rewrote account switching, fix some minor bugs here and there 2025-09-16 21:15:24 +01:00
Mukhtar Akere
76f5b85313 Fix issues with dir-cache-time, umask and wrongly set gid,uid, add extra vfs options 2025-09-05 16:11:22 +01:00
Mukhtar Akere
85cd37f29b Revert former beta chnages 2025-08-30 04:10:18 +01:00
Mukhtar Akere
aff12c2e4b Fix Added bug in torrent 2025-08-28 03:26:43 +01:00
Mukhtar Akere
d76ca032ab hotfix config update 2025-08-28 01:30:54 +01:00
Mukhtar Akere
8bb786c689 hotfix nil downloadLink 2025-08-27 23:57:49 +01:00
Mukhtar Akere
83058489b6 Add callback URL for post-processing 2025-08-27 13:02:43 +01:00
Mukhtar Akere
267cc2d32b Fix issues with account swutching 2025-08-26 15:31:24 +01:00
Mukhtar Akere
eefe8a3901 Hotfix for download link generation and account switching 2025-08-24 21:54:26 +01:00
Mukhtar Akere
618eb73067 - Add support for multi-season imports
- Improve in-memoery storage, whic reduces memory usage
- Fix issues with rclone integration
2025-08-24 16:25:37 +01:00
Mukhtar Akere
f8667938b6 Add more rclone flags, fix minor issues 2025-08-23 06:00:07 +01:00
Mukhtar Akere
b0a698f15e - Imporve memeoery footprint
- Add batch processing for arr repairs
2025-08-21 03:32:46 +01:00
Mukhtar Akere
2548c21e5b Fix rclone file log
Some checks failed
GoReleaser / goreleaser (push) Has been cancelled
Release Docker Build / docker (push) Has been cancelled
2025-08-19 01:01:53 +01:00
Mukhtar Akere
1b03ccefbb Hotfix rclone logging flags 2025-08-19 00:55:43 +01:00
Mukhtar Akere
e3a249a9cc Fix issues with rclone mounting
Some checks failed
GoReleaser / goreleaser (push) Has been cancelled
Release Docker Build / docker (push) Has been cancelled
2025-08-18 22:12:26 +01:00
Mukhtar Akere
8696db42d2 - Add more rclone supports
- Add rclone log viewer
- Add more stats to Stats page
- Fix some minor bugs
2025-08-18 01:57:02 +01:00
Mukhtar Akere
742d8fb088 - Fix issues with cache dir
Some checks failed
GoReleaser / goreleaser (push) Has been cancelled
Release Docker Build / docker (push) Has been cancelled
- Fix responsiveness issue with navbars
- Support user entry for users running as non-root
- Other minor fixes
2025-08-12 15:14:42 +01:00
76 changed files with 3458 additions and 1785 deletions

View File

@@ -42,8 +42,20 @@ LABEL org.opencontainers.image.authors = "sirrobot01"
LABEL org.opencontainers.image.documentation = "https://github.com/sirrobot01/decypharr/blob/main/README.md"
# Install dependencies including rclone
RUN apk add --no-cache fuse3 ca-certificates su-exec shadow rclone && \
echo "user_allow_other" >> /etc/fuse.conf
RUN apk add --no-cache fuse3 ca-certificates su-exec shadow curl unzip && \
echo "user_allow_other" >> /etc/fuse.conf && \
case "$(uname -m)" in \
x86_64) ARCH=amd64 ;; \
aarch64) ARCH=arm64 ;; \
armv7l) ARCH=arm ;; \
*) echo "Unsupported architecture: $(uname -m)" && exit 1 ;; \
esac && \
curl -O "https://downloads.rclone.org/rclone-current-linux-${ARCH}.zip" && \
unzip "rclone-current-linux-${ARCH}.zip" && \
cp rclone-*/rclone /usr/local/bin/ && \
chmod +x /usr/local/bin/rclone && \
rm -rf rclone-* && \
apk del curl unzip
# Copy binaries and entrypoint
COPY --from=builder /decypharr /usr/bin/decypharr

View File

@@ -7,10 +7,10 @@ import (
"github.com/sirrobot01/decypharr/internal/logger"
"github.com/sirrobot01/decypharr/pkg/qbit"
"github.com/sirrobot01/decypharr/pkg/server"
"github.com/sirrobot01/decypharr/pkg/store"
"github.com/sirrobot01/decypharr/pkg/version"
"github.com/sirrobot01/decypharr/pkg/web"
"github.com/sirrobot01/decypharr/pkg/webdav"
"github.com/sirrobot01/decypharr/pkg/wire"
"net/http"
"os"
"runtime"
@@ -40,6 +40,7 @@ func Start(ctx context.Context) error {
svcCtx, cancelSvc := context.WithCancel(ctx)
defer cancelSvc()
// Create the logger path if it doesn't exist
for {
cfg := config.Get()
_log := logger.Default()
@@ -76,7 +77,7 @@ func Start(ctx context.Context) error {
reset := func() {
// Reset the store and services
qb.Reset()
store.Reset()
wire.Reset()
// refresh GC
runtime.GC()
}
@@ -150,24 +151,16 @@ func startServices(ctx context.Context, cancelSvc context.CancelFunc, wd *webdav
// Start rclone RC server if enabled
safeGo(func() error {
rcManager := store.Get().RcloneManager()
rcManager := wire.Get().RcloneManager()
if rcManager == nil {
return nil
}
return rcManager.Start(ctx)
})
safeGo(func() error {
arr := store.Get().Arr()
if arr == nil {
return nil
}
return arr.StartSchedule(ctx)
})
if cfg := config.Get(); cfg.Repair.Enabled {
safeGo(func() error {
repair := store.Get().Repair()
repair := wire.Get().Repair()
if repair != nil {
if err := repair.Start(ctx); err != nil {
_log.Error().Err(err).Msg("repair failed")
@@ -178,7 +171,8 @@ func startServices(ctx context.Context, cancelSvc context.CancelFunc, wd *webdav
}
safeGo(func() error {
return store.Get().StartQueueSchedule(ctx)
wire.Get().StartWorkers(ctx)
return nil
})
go func() {

View File

@@ -51,8 +51,12 @@ services:
- /dev/fuse:/dev/fuse:rwm
cap_add:
- SYS_ADMIN
security_opt:
- apparmor:unconfined
environment:
- UMASK=002
- PUID=1000 # Change to your user ID
- PGID=1000 # Change to your group ID
```
**Important Docker Notes:**

1
go.mod
View File

@@ -34,6 +34,7 @@ require (
github.com/mattn/go-colorable v0.1.14 // indirect
github.com/mattn/go-isatty v0.0.20 // indirect
github.com/pmezard/go-difflib v1.0.1-0.20181226105442-5d4384ee4fb2 // indirect
github.com/puzpuzpuz/xsync/v4 v4.1.0 // indirect
github.com/rogpeppe/go-internal v1.14.1 // indirect
golang.org/x/sys v0.33.0 // indirect
)

2
go.sum
View File

@@ -186,6 +186,8 @@ github.com/prometheus/procfs v0.0.0-20190117184657-bf6a532e95b1/go.mod h1:c3At6R
github.com/prometheus/procfs v0.0.2/go.mod h1:TjEm7ze935MbeOT/UhFTIMYKhuLP4wbCsTZCD3I8kEA=
github.com/prometheus/procfs v0.0.8/go.mod h1:7Qr8sr6344vo1JqZ6HhLceV9o3AJ1Ff+GxbHq6oeK9A=
github.com/prometheus/procfs v0.0.11/go.mod h1:lV6e/gmhEcM9IjHGsFOCxxuZ+z1YqCvr4OA4YeYWdaU=
github.com/puzpuzpuz/xsync/v4 v4.1.0 h1:x9eHRl4QhZFIPJ17yl4KKW9xLyVWbb3/Yq4SXpjF71U=
github.com/puzpuzpuz/xsync/v4 v4.1.0/go.mod h1:VJDmTCJMBt8igNxnkQd86r+8KUeN1quSfNKu5bLYFQo=
github.com/rcrowley/go-metrics v0.0.0-20181016184325-3113b8401b8a/go.mod h1:bCqnVzQkZxMG4s8nGwiZ5l3QUCyqpo9Y+/ZMZ9VjZe4=
github.com/robfig/cron/v3 v3.0.1 h1:WdRxkvbJztn8LMz/QEvLN5sBU+xKpSqwwUO1Pjr4qDs=
github.com/robfig/cron/v3 v3.0.1/go.mod h1:eQICP3HwyT7UooqI/z+Ov+PtYAWygg1TEWWzGIFLtro=

View File

@@ -32,6 +32,7 @@ type Debrid struct {
APIKey string `json:"api_key,omitempty"`
DownloadAPIKeys []string `json:"download_api_keys,omitempty"`
Folder string `json:"folder,omitempty"`
RcloneMountPath string `json:"rclone_mount_path,omitempty"` // Custom rclone mount path for this debrid service
DownloadUncached bool `json:"download_uncached,omitempty"`
CheckCached bool `json:"check_cached,omitempty"`
RateLimit string `json:"rate_limit,omitempty"` // 200/minute or 10/second
@@ -90,6 +91,7 @@ type Rclone struct {
// Global mount folder where all providers will be mounted as subfolders
Enabled bool `json:"enabled,omitempty"`
MountPath string `json:"mount_path,omitempty"`
RcPort string `json:"rc_port,omitempty"`
// Cache settings
CacheDir string `json:"cache_dir,omitempty"`
@@ -97,14 +99,21 @@ type Rclone struct {
// VFS settings
VfsCacheMode string `json:"vfs_cache_mode,omitempty"` // off, minimal, writes, full
VfsCacheMaxAge string `json:"vfs_cache_max_age,omitempty"` // Maximum age of objects in the cache (default 1h)
VfsDiskSpaceTotal string `json:"vfs_disk_space_total,omitempty"` // Total disk space available for the cache (default off)
VfsCacheMaxSize string `json:"vfs_cache_max_size,omitempty"` // Maximum size of the cache (default off)
VfsCachePollInterval string `json:"vfs_cache_poll_interval,omitempty"` // How often to poll for changes (default 1m)
VfsReadChunkSize string `json:"vfs_read_chunk_size,omitempty"` // Read chunk size (default 128M)
VfsReadChunkSizeLimit string `json:"vfs_read_chunk_size_limit,omitempty"` // Max chunk size (default off)
VfsReadAhead string `json:"vfs_read_ahead,omitempty"` // read ahead size
VfsPollInterval string `json:"vfs_poll_interval,omitempty"` // How often to rclone cleans the cache (default 1m)
BufferSize string `json:"buffer_size,omitempty"` // Buffer size for reading files (default 16M)
VfsCacheMinFreeSpace string `json:"vfs_cache_min_free_space,omitempty"`
VfsFastFingerprint bool `json:"vfs_fast_fingerprint,omitempty"`
VfsReadChunkStreams int `json:"vfs_read_chunk_streams,omitempty"`
AsyncRead *bool `json:"async_read,omitempty"` // Use async read for files
Transfers int `json:"transfers,omitempty"` // Number of transfers to use (default 4)
UseMmap bool `json:"use_mmap,omitempty"`
// File system settings
UID uint32 `json:"uid,omitempty"` // User ID for mounted files
GID uint32 `json:"gid,omitempty"` // Group ID for mounted files
@@ -117,6 +126,8 @@ type Rclone struct {
// Performance settings
NoModTime bool `json:"no_modtime,omitempty"` // Don't read/write modification time
NoChecksum bool `json:"no_checksum,omitempty"` // Don't checksum files on upload
LogLevel string `json:"log_level,omitempty"`
}
type Config struct {
@@ -140,6 +151,7 @@ type Config struct {
Auth *Auth `json:"-"`
DiscordWebhook string `json:"discord_webhook_url,omitempty"`
RemoveStalledAfter string `json:"remove_stalled_after,omitzero"`
CallbackURL string `json:"callback_url,omitempty"`
}
func (c *Config) JsonFile() string {
@@ -296,6 +308,10 @@ func (c *Config) IsSizeAllowed(size int64) bool {
return true
}
func (c *Config) SecretKey() string {
return cmp.Or(os.Getenv("DECYPHARR_SECRET_KEY"), "\"wqj(v%lj*!-+kf@4&i95rhh_!5_px5qnuwqbr%cjrvrozz_r*(\"")
}
func (c *Config) GetAuth() *Auth {
if !c.UseAuth {
return nil
@@ -326,10 +342,7 @@ func (c *Config) NeedsSetup() error {
}
func (c *Config) NeedsAuth() bool {
if c.UseAuth {
return c.GetAuth().Username == ""
}
return false
return !c.UseAuth && c.GetAuth().Username == ""
}
func (c *Config) updateDebrid(d Debrid) Debrid {
@@ -414,6 +427,11 @@ func (c *Config) setDefaults() {
// Rclone defaults
if c.Rclone.Enabled {
c.Rclone.RcPort = cmp.Or(c.Rclone.RcPort, "5572")
if c.Rclone.AsyncRead == nil {
_asyncTrue := true
c.Rclone.AsyncRead = &_asyncTrue
}
c.Rclone.VfsCacheMode = cmp.Or(c.Rclone.VfsCacheMode, "off")
if c.Rclone.UID == 0 {
c.Rclone.UID = uint32(os.Getuid())
@@ -426,10 +444,14 @@ func (c *Config) setDefaults() {
c.Rclone.GID = uint32(os.Getgid())
}
}
if c.Rclone.Transfers == 0 {
c.Rclone.Transfers = 4 // Default number of transfers
}
if c.Rclone.VfsCacheMode != "off" {
c.Rclone.VfsCachePollInterval = cmp.Or(c.Rclone.VfsCachePollInterval, "1m") // Clean cache every minute
}
c.Rclone.DirCacheTime = cmp.Or(c.Rclone.DirCacheTime, "5m")
c.Rclone.LogLevel = cmp.Or(c.Rclone.LogLevel, "INFO")
}
// Load the auth file
c.Auth = c.GetAuth()

View File

@@ -26,7 +26,7 @@ func GetLogPath() string {
}
}
return filepath.Join(logsDir, "decypharr.log")
return logsDir
}
func New(prefix string) zerolog.Logger {
@@ -34,7 +34,7 @@ func New(prefix string) zerolog.Logger {
level := config.Get().LogLevel
rotatingLogFile := &lumberjack.Logger{
Filename: GetLogPath(),
Filename: filepath.Join(GetLogPath(), "decypharr.log"),
MaxSize: 10,
MaxAge: 15,
Compress: true,

View File

@@ -45,6 +45,8 @@ func getDiscordHeader(event string) string {
return "[Decypharr] Repair Completed, Awaiting action"
case "repair_complete":
return "[Decypharr] Repair Complete"
case "repair_cancelled":
return "[Decypharr] Repair Cancelled"
default:
// split the event string and capitalize the first letter of each word
evs := strings.Split(event, "_")

View File

@@ -298,40 +298,7 @@ func New(options ...ClientOption) *Client {
}
// Configure proxy if needed
if client.proxy != "" {
if strings.HasPrefix(client.proxy, "socks5://") {
// Handle SOCKS5 proxy
socksURL, err := url.Parse(client.proxy)
if err != nil {
client.logger.Error().Msgf("Failed to parse SOCKS5 proxy URL: %v", err)
} else {
auth := &proxy.Auth{}
if socksURL.User != nil {
auth.User = socksURL.User.Username()
password, _ := socksURL.User.Password()
auth.Password = password
}
dialer, err := proxy.SOCKS5("tcp", socksURL.Host, auth, proxy.Direct)
if err != nil {
client.logger.Error().Msgf("Failed to create SOCKS5 dialer: %v", err)
} else {
transport.DialContext = func(ctx context.Context, network, addr string) (net.Conn, error) {
return dialer.Dial(network, addr)
}
}
}
} else {
proxyURL, err := url.Parse(client.proxy)
if err != nil {
client.logger.Error().Msgf("Failed to parse proxy URL: %v", err)
} else {
transport.Proxy = http.ProxyURL(proxyURL)
}
}
} else {
transport.Proxy = http.ProxyFromEnvironment
}
SetProxy(transport, client.proxy)
// Set the transport to the client
client.client.Transport = transport
@@ -417,3 +384,41 @@ func isRetryableError(err error) bool {
// Not a retryable error
return false
}
func SetProxy(transport *http.Transport, proxyURL string) {
if proxyURL != "" {
if strings.HasPrefix(proxyURL, "socks5://") {
// Handle SOCKS5 proxy
socksURL, err := url.Parse(proxyURL)
if err != nil {
return
} else {
auth := &proxy.Auth{}
if socksURL.User != nil {
auth.User = socksURL.User.Username()
password, _ := socksURL.User.Password()
auth.Password = password
}
dialer, err := proxy.SOCKS5("tcp", socksURL.Host, auth, proxy.Direct)
if err != nil {
return
} else {
transport.DialContext = func(ctx context.Context, network, addr string) (net.Conn, error) {
return dialer.Dial(network, addr)
}
}
}
} else {
_proxy, err := url.Parse(proxyURL)
if err != nil {
return
} else {
transport.Proxy = http.ProxyURL(_proxy)
}
}
} else {
transport.Proxy = http.ProxyFromEnvironment
}
return
}

View File

@@ -22,3 +22,15 @@ func Contains(slice []string, value string) bool {
}
return false
}
func Mask(text string) string {
res := ""
if len(text) > 12 {
res = text[:8] + "****" + text[len(text)-4:]
} else if len(text) > 8 {
res = text[:4] + "****" + text[len(text)-2:]
} else {
res = "****"
}
return res
}

View File

@@ -1,7 +1,6 @@
package utils
import (
"context"
"fmt"
"github.com/go-co-op/gocron/v2"
"github.com/robfig/cron/v3"
@@ -10,25 +9,6 @@ import (
"time"
)
func ScheduleJob(ctx context.Context, interval string, loc *time.Location, jobFunc func()) (gocron.Scheduler, error) {
if loc == nil {
loc = time.Local
}
s, err := gocron.NewScheduler(gocron.WithLocation(loc))
if err != nil {
return s, fmt.Errorf("failed to create scheduler: %w", err)
}
jd, err := ConvertToJobDef(interval)
if err != nil {
return s, fmt.Errorf("failed to convert interval to job definition: %w", err)
}
// Schedule the job
if _, err = s.NewJob(jd, gocron.NewTask(jobFunc), gocron.WithContext(ctx)); err != nil {
return s, fmt.Errorf("failed to create job: %w", err)
}
return s, nil
}
// ConvertToJobDef converts a string interval to a gocron.JobDefinition.
func ConvertToJobDef(interval string) (gocron.JobDefinition, error) {
// Parse the interval string

View File

@@ -6,15 +6,16 @@ import (
"crypto/tls"
"encoding/json"
"fmt"
"github.com/rs/zerolog"
"github.com/sirrobot01/decypharr/internal/config"
"github.com/sirrobot01/decypharr/internal/logger"
"github.com/sirrobot01/decypharr/internal/request"
"io"
"net/http"
"strings"
"sync"
"time"
"github.com/rs/zerolog"
"github.com/sirrobot01/decypharr/internal/config"
"github.com/sirrobot01/decypharr/internal/logger"
"github.com/sirrobot01/decypharr/internal/request"
)
// Type is a type of arr
@@ -109,7 +110,7 @@ func (a *Arr) Request(method, endpoint string, payload interface{}) (*http.Respo
func (a *Arr) Validate() error {
if a.Token == "" || a.Host == "" {
return fmt.Errorf("arr not configured: %s", a.Name)
return nil
}
resp, err := a.Request("GET", "/api/v3/health", nil)
if err != nil {
@@ -190,7 +191,7 @@ func (s *Storage) GetAll() []*Arr {
return arrs
}
func (s *Storage) StartSchedule(ctx context.Context) error {
func (s *Storage) StartWorker(ctx context.Context) error {
ticker := time.NewTicker(10 * time.Second)

View File

@@ -234,6 +234,35 @@ func (a *Arr) searchRadarr(files []ContentFile) error {
}
func (a *Arr) SearchMissing(files []ContentFile) error {
if len(files) == 0 {
return nil
}
return a.batchSearchMissing(files)
}
func (a *Arr) batchSearchMissing(files []ContentFile) error {
if len(files) == 0 {
return nil
}
BatchSize := 50
// Batch search for missing files
if len(files) > BatchSize {
for i := 0; i < len(files); i += BatchSize {
end := i + BatchSize
if end > len(files) {
end = len(files)
}
if err := a.searchMissing(files[i:end]); err != nil {
// continue searching the rest of the files
continue
}
}
return nil
}
return a.searchMissing(files)
}
func (a *Arr) searchMissing(files []ContentFile) error {
switch a.Type {
case Sonarr:
return a.searchSonarr(files)
@@ -245,6 +274,28 @@ func (a *Arr) SearchMissing(files []ContentFile) error {
}
func (a *Arr) DeleteFiles(files []ContentFile) error {
if len(files) == 0 {
return nil
}
BatchSize := 50
// Batch delete files
if len(files) > BatchSize {
for i := 0; i < len(files); i += BatchSize {
end := i + BatchSize
if end > len(files) {
end = len(files)
}
if err := a.batchDeleteFiles(files[i:end]); err != nil {
// continue deleting the rest of the files
continue
}
}
return nil
}
return a.batchDeleteFiles(files)
}
func (a *Arr) batchDeleteFiles(files []ContentFile) error {
ids := make([]int, 0)
for _, f := range files {
ids = append(ids, f.FileId)

View File

@@ -133,7 +133,7 @@ func (a *Arr) CleanupQueue() error {
messages := q.StatusMessages
if len(messages) > 0 {
for _, m := range messages {
if strings.Contains(strings.Join(m.Messages, " "), "No files found are eligible for import in") {
if strings.Contains(strings.Join(m.Messages, " "), "No files found are eligible") {
isMessedUp = true
break
}

View File

@@ -0,0 +1,119 @@
package account
import (
"fmt"
"net/http"
"sync/atomic"
"github.com/puzpuzpuz/xsync/v4"
"github.com/sirrobot01/decypharr/internal/request"
"github.com/sirrobot01/decypharr/pkg/debrid/types"
)
type Account struct {
Debrid string `json:"debrid"` // The debrid service name, e.g. "realdebrid"
links *xsync.Map[string, types.DownloadLink] // key is the sliced file link
Index int `json:"index"` // The index of the account in the config
Disabled atomic.Bool `json:"disabled"`
Token string `json:"token"`
TrafficUsed atomic.Int64 `json:"traffic_used"` // Traffic used in bytes
Username string `json:"username"` // Username for the account
httpClient *request.Client
// Account reactivation tracking
DisableCount atomic.Int32 `json:"disable_count"`
}
func (a *Account) Equals(other *Account) bool {
if other == nil {
return false
}
return a.Token == other.Token && a.Debrid == other.Debrid
}
func (a *Account) Client() *request.Client {
return a.httpClient
}
// slice download link
func (a *Account) sliceFileLink(fileLink string) string {
if a.Debrid != "realdebrid" {
return fileLink
}
if len(fileLink) < 39 {
return fileLink
}
return fileLink[0:39]
}
func (a *Account) GetDownloadLink(fileLink string) (types.DownloadLink, error) {
slicedLink := a.sliceFileLink(fileLink)
dl, ok := a.links.Load(slicedLink)
if !ok {
return types.DownloadLink{}, types.ErrDownloadLinkNotFound
}
return dl, nil
}
func (a *Account) StoreDownloadLink(dl types.DownloadLink) {
slicedLink := a.sliceFileLink(dl.Link)
a.links.Store(slicedLink, dl)
}
func (a *Account) DeleteDownloadLink(fileLink string) {
slicedLink := a.sliceFileLink(fileLink)
a.links.Delete(slicedLink)
}
func (a *Account) ClearDownloadLinks() {
a.links.Clear()
}
func (a *Account) DownloadLinksCount() int {
return a.links.Size()
}
func (a *Account) StoreDownloadLinks(dls map[string]*types.DownloadLink) {
for _, dl := range dls {
a.StoreDownloadLink(*dl)
}
}
// MarkDisabled marks the account as disabled and increments the disable count
func (a *Account) MarkDisabled() {
a.Disabled.Store(true)
a.DisableCount.Add(1)
}
func (a *Account) Reset() {
a.DisableCount.Store(0)
a.Disabled.Store(false)
}
func (a *Account) CheckBandwidth() error {
// Get a one of the download links to check if the account is still valid
downloadLink := ""
a.links.Range(func(key string, dl types.DownloadLink) bool {
if dl.DownloadLink != "" {
downloadLink = dl.DownloadLink
return false
}
return true
})
if downloadLink == "" {
return fmt.Errorf("no download link found")
}
// Let's check the download link status
req, err := http.NewRequest(http.MethodGet, downloadLink, nil)
if err != nil {
return err
}
// Use a simple client
client := http.DefaultClient
resp, err := client.Do(req)
if err != nil {
return err
}
defer resp.Body.Close()
if resp.StatusCode != http.StatusOK && resp.StatusCode != http.StatusPartialContent {
return fmt.Errorf("account check failed with status code %d", resp.StatusCode)
}
return nil
}

View File

@@ -0,0 +1,239 @@
package account
import (
"fmt"
"slices"
"sync/atomic"
"github.com/puzpuzpuz/xsync/v4"
"github.com/rs/zerolog"
"github.com/sirrobot01/decypharr/internal/config"
"github.com/sirrobot01/decypharr/internal/request"
"github.com/sirrobot01/decypharr/internal/utils"
"github.com/sirrobot01/decypharr/pkg/debrid/types"
"go.uber.org/ratelimit"
)
const (
MaxDisableCount = 3
)
type Manager struct {
debrid string
current atomic.Pointer[Account]
accounts *xsync.Map[string, *Account]
logger zerolog.Logger
}
func NewManager(debridConf config.Debrid, downloadRL ratelimit.Limiter, logger zerolog.Logger) *Manager {
m := &Manager{
debrid: debridConf.Name,
accounts: xsync.NewMap[string, *Account](),
logger: logger,
}
var firstAccount *Account
for idx, token := range debridConf.DownloadAPIKeys {
if token == "" {
continue
}
headers := map[string]string{
"Authorization": fmt.Sprintf("Bearer %s", token),
}
account := &Account{
Debrid: debridConf.Name,
Token: token,
Index: idx,
links: xsync.NewMap[string, types.DownloadLink](),
httpClient: request.New(
request.WithRateLimiter(downloadRL),
request.WithLogger(logger),
request.WithHeaders(headers),
request.WithMaxRetries(3),
request.WithRetryableStatus(429, 447, 502),
request.WithProxy(debridConf.Proxy),
),
}
m.accounts.Store(token, account)
if firstAccount == nil {
firstAccount = account
}
}
m.current.Store(firstAccount)
return m
}
func (m *Manager) Active() []*Account {
activeAccounts := make([]*Account, 0)
m.accounts.Range(func(key string, acc *Account) bool {
if !acc.Disabled.Load() {
activeAccounts = append(activeAccounts, acc)
}
return true
})
slices.SortFunc(activeAccounts, func(i, j *Account) int {
return i.Index - j.Index
})
return activeAccounts
}
func (m *Manager) All() []*Account {
allAccounts := make([]*Account, 0)
m.accounts.Range(func(key string, acc *Account) bool {
allAccounts = append(allAccounts, acc)
return true
})
slices.SortFunc(allAccounts, func(i, j *Account) int {
return i.Index - j.Index
})
return allAccounts
}
func (m *Manager) Current() *Account {
// Fast path - most common case
current := m.current.Load()
if current != nil && !current.Disabled.Load() {
return current
}
// Slow path - find new current account
activeAccounts := m.Active()
if len(activeAccounts) == 0 {
// No active accounts left, try to use disabled ones
m.logger.Warn().Str("debrid", m.debrid).Msg("No active accounts available, all accounts are disabled")
allAccounts := m.All()
if len(allAccounts) == 0 {
m.logger.Error().Str("debrid", m.debrid).Msg("No accounts configured")
m.current.Store(nil)
return nil
}
m.current.Store(allAccounts[0])
return allAccounts[0]
}
newCurrent := activeAccounts[0]
m.current.Store(newCurrent)
return newCurrent
}
func (m *Manager) Disable(account *Account) {
if account == nil {
return
}
account.MarkDisabled()
// If we're disabling the current account, it will be replaced
// on the next Current() call - no need to proactively update
current := m.current.Load()
if current != nil && current.Token == account.Token {
// Optional: immediately find replacement
activeAccounts := m.Active()
if len(activeAccounts) > 0 {
m.current.Store(activeAccounts[0])
} else {
m.current.Store(nil)
}
}
}
func (m *Manager) Reset() {
m.accounts.Range(func(key string, acc *Account) bool {
acc.Reset()
return true
})
// Set current to first active account
activeAccounts := m.Active()
if len(activeAccounts) > 0 {
m.current.Store(activeAccounts[0])
} else {
m.current.Store(nil)
}
}
func (m *Manager) GetAccount(token string) (*Account, error) {
if token == "" {
return nil, fmt.Errorf("token cannot be empty")
}
acc, ok := m.accounts.Load(token)
if !ok {
return nil, fmt.Errorf("account not found for token")
}
return acc, nil
}
func (m *Manager) GetDownloadLink(fileLink string) (types.DownloadLink, error) {
current := m.Current()
if current == nil {
return types.DownloadLink{}, fmt.Errorf("no active account for debrid service %s", m.debrid)
}
return current.GetDownloadLink(fileLink)
}
func (m *Manager) GetAccountFromDownloadLink(downloadLink types.DownloadLink) (*Account, error) {
if downloadLink.Link == "" {
return nil, fmt.Errorf("cannot get account from empty download link")
}
if downloadLink.Token == "" {
return nil, fmt.Errorf("cannot get account from download link without token")
}
return m.GetAccount(downloadLink.Token)
}
func (m *Manager) StoreDownloadLink(downloadLink types.DownloadLink) {
if downloadLink.Link == "" || downloadLink.Token == "" {
return
}
account, err := m.GetAccount(downloadLink.Token)
if err != nil || account == nil {
return
}
account.StoreDownloadLink(downloadLink)
}
func (m *Manager) Stats() []map[string]any {
stats := make([]map[string]any, 0)
for _, acc := range m.All() {
maskedToken := utils.Mask(acc.Token)
accountDetail := map[string]any{
"in_use": acc.Equals(m.Current()),
"order": acc.Index,
"disabled": acc.Disabled.Load(),
"token_masked": maskedToken,
"username": acc.Username,
"traffic_used": acc.TrafficUsed.Load(),
"links_count": acc.DownloadLinksCount(),
"debrid": acc.Debrid,
}
stats = append(stats, accountDetail)
}
return stats
}
func (m *Manager) CheckAndResetBandwidth() {
found := false
m.accounts.Range(func(key string, acc *Account) bool {
if acc.Disabled.Load() && acc.DisableCount.Load() < MaxDisableCount {
if err := acc.CheckBandwidth(); err == nil {
acc.Disabled.Store(false)
found = true
m.logger.Info().Str("debrid", m.debrid).Str("token", utils.Mask(acc.Token)).Msg("Re-activated disabled account")
} else {
m.logger.Debug().Err(err).Str("debrid", m.debrid).Str("token", utils.Mask(acc.Token)).Msg("Account still disabled")
}
}
return true
})
if found {
// If we re-activated any account, reset current to first active
activeAccounts := m.Active()
if len(activeAccounts) > 0 {
m.current.Store(activeAccounts[0])
}
}
}

View File

@@ -0,0 +1,30 @@
package common
import (
"github.com/rs/zerolog"
"github.com/sirrobot01/decypharr/pkg/debrid/account"
"github.com/sirrobot01/decypharr/pkg/debrid/types"
)
type Client interface {
SubmitMagnet(tr *types.Torrent) (*types.Torrent, error)
CheckStatus(tr *types.Torrent) (*types.Torrent, error)
GetFileDownloadLinks(tr *types.Torrent) error
GetDownloadLink(tr *types.Torrent, file *types.File) (types.DownloadLink, error)
DeleteTorrent(torrentId string) error
IsAvailable(infohashes []string) map[string]bool
GetDownloadUncached() bool
UpdateTorrent(torrent *types.Torrent) error
GetTorrent(torrentId string) (*types.Torrent, error)
GetTorrents() ([]*types.Torrent, error)
Name() string
Logger() zerolog.Logger
GetDownloadingStatus() []string
RefreshDownloadLinks() error
CheckLink(link string) error
GetMountPath() string
AccountManager() *account.Manager // Returns the active download account/token
GetProfile() (*types.Profile, error)
GetAvailableSlots() (int, error)
SyncAccounts() error // Updates each accounts details(like traffic, username, etc.)
}

View File

@@ -1,29 +1,35 @@
package debrid
import (
"cmp"
"context"
"errors"
"fmt"
"sync"
"time"
"github.com/sirrobot01/decypharr/internal/config"
"github.com/sirrobot01/decypharr/internal/logger"
"github.com/sirrobot01/decypharr/internal/request"
"github.com/sirrobot01/decypharr/internal/utils"
"github.com/sirrobot01/decypharr/pkg/arr"
"github.com/sirrobot01/decypharr/pkg/debrid/common"
"github.com/sirrobot01/decypharr/pkg/debrid/providers/alldebrid"
"github.com/sirrobot01/decypharr/pkg/debrid/providers/debrid_link"
"github.com/sirrobot01/decypharr/pkg/debrid/providers/debridlink"
"github.com/sirrobot01/decypharr/pkg/debrid/providers/realdebrid"
"github.com/sirrobot01/decypharr/pkg/debrid/providers/torbox"
debridStore "github.com/sirrobot01/decypharr/pkg/debrid/store"
"github.com/sirrobot01/decypharr/pkg/debrid/types"
"github.com/sirrobot01/decypharr/pkg/rclone"
"sync"
"go.uber.org/ratelimit"
)
type Debrid struct {
cache *debridStore.Cache // Could be nil if not using WebDAV
client types.Client // HTTP client for making requests to the debrid service
client common.Client // HTTP client for making requests to the debrid service
}
func (de *Debrid) Client() types.Client {
func (de *Debrid) Client() common.Client {
return de.client
}
@@ -69,7 +75,7 @@ func NewStorage(rcManager *rclone.Manager) *Storage {
_log := client.Logger()
if dc.UseWebDav {
if cfg.Rclone.Enabled && rcManager != nil {
mounter = rclone.NewMount(dc.Name, webdavUrl, rcManager)
mounter = rclone.NewMount(dc.Name, dc.RcloneMountPath, webdavUrl, rcManager)
}
cache = debridStore.NewDebridCache(dc, client, mounter)
_log.Info().Msg("Debrid Service started with WebDAV")
@@ -98,6 +104,90 @@ func (d *Storage) Debrid(name string) *Debrid {
return nil
}
func (d *Storage) StartWorker(ctx context.Context) error {
if ctx == nil {
ctx = context.Background()
}
// Start syncAccounts worker
go d.syncAccountsWorker(ctx)
// Start bandwidth reset worker
go d.checkBandwidthWorker(ctx)
return nil
}
func (d *Storage) checkBandwidthWorker(ctx context.Context) {
if ctx == nil {
ctx = context.Background()
}
ticker := time.NewTicker(30 * time.Minute)
go func() {
for {
select {
case <-ctx.Done():
return
case <-ticker.C:
d.checkAccountBandwidth()
}
}
}()
}
func (d *Storage) checkAccountBandwidth() {
d.mu.Lock()
defer d.mu.Unlock()
for _, debrid := range d.debrids {
if debrid == nil || debrid.client == nil {
continue
}
accountManager := debrid.client.AccountManager()
if accountManager == nil {
continue
}
accountManager.CheckAndResetBandwidth()
}
}
func (d *Storage) syncAccountsWorker(ctx context.Context) {
if ctx == nil {
ctx = context.Background()
}
_ = d.syncAccounts()
ticker := time.NewTicker(5 * time.Minute)
go func() {
for {
select {
case <-ctx.Done():
return
case <-ticker.C:
_ = d.syncAccounts()
}
}
}()
}
func (d *Storage) syncAccounts() error {
d.mu.Lock()
defer d.mu.Unlock()
for name, debrid := range d.debrids {
if debrid == nil || debrid.client == nil {
continue
}
_log := debrid.client.Logger()
if err := debrid.client.SyncAccounts(); err != nil {
_log.Error().Err(err).Msgf("Failed to sync account for %s", name)
continue
}
}
return nil
}
func (d *Storage) Debrids() map[string]*Debrid {
d.mu.RLock()
defer d.mu.RUnlock()
@@ -110,7 +200,7 @@ func (d *Storage) Debrids() map[string]*Debrid {
return debridsCopy
}
func (d *Storage) Client(name string) types.Client {
func (d *Storage) Client(name string) common.Client {
d.mu.RLock()
defer d.mu.RUnlock()
if client, exists := d.debrids[name]; exists {
@@ -135,10 +225,10 @@ func (d *Storage) Reset() {
d.lastUsed = ""
}
func (d *Storage) Clients() map[string]types.Client {
func (d *Storage) Clients() map[string]common.Client {
d.mu.RLock()
defer d.mu.RUnlock()
clientsCopy := make(map[string]types.Client)
clientsCopy := make(map[string]common.Client)
for name, debrid := range d.debrids {
if debrid != nil && debrid.client != nil {
clientsCopy[name] = debrid.client
@@ -159,10 +249,10 @@ func (d *Storage) Caches() map[string]*debridStore.Cache {
return cachesCopy
}
func (d *Storage) FilterClients(filter func(types.Client) bool) map[string]types.Client {
func (d *Storage) FilterClients(filter func(common.Client) bool) map[string]common.Client {
d.mu.Lock()
defer d.mu.Unlock()
filteredClients := make(map[string]types.Client)
filteredClients := make(map[string]common.Client)
for name, client := range d.debrids {
if client != nil && filter(client.client) {
filteredClients[name] = client.client
@@ -171,18 +261,28 @@ func (d *Storage) FilterClients(filter func(types.Client) bool) map[string]types
return filteredClients
}
func createDebridClient(dc config.Debrid) (types.Client, error) {
func createDebridClient(dc config.Debrid) (common.Client, error) {
rateLimits := map[string]ratelimit.Limiter{}
mainRL := request.ParseRateLimit(dc.RateLimit)
repairRL := request.ParseRateLimit(cmp.Or(dc.RepairRateLimit, dc.RateLimit))
downloadRL := request.ParseRateLimit(cmp.Or(dc.DownloadRateLimit, dc.RateLimit))
rateLimits["main"] = mainRL
rateLimits["repair"] = repairRL
rateLimits["download"] = downloadRL
switch dc.Name {
case "realdebrid":
return realdebrid.New(dc)
return realdebrid.New(dc, rateLimits)
case "torbox":
return torbox.New(dc)
return torbox.New(dc, rateLimits)
case "debridlink":
return debrid_link.New(dc)
return debridlink.New(dc, rateLimits)
case "alldebrid":
return alldebrid.New(dc)
return alldebrid.New(dc, rateLimits)
default:
return realdebrid.New(dc)
return realdebrid.New(dc, rateLimits)
}
}
@@ -197,7 +297,7 @@ func Process(ctx context.Context, store *Storage, selectedDebrid string, magnet
Files: make(map[string]types.File),
}
clients := store.FilterClients(func(c types.Client) bool {
clients := store.FilterClients(func(c common.Client) bool {
if selectedDebrid != "" && c.Name() != selectedDebrid {
return false
}

View File

@@ -3,28 +3,32 @@ package alldebrid
import (
"encoding/json"
"fmt"
"github.com/rs/zerolog"
"github.com/sirrobot01/decypharr/internal/config"
"github.com/sirrobot01/decypharr/internal/logger"
"github.com/sirrobot01/decypharr/internal/request"
"github.com/sirrobot01/decypharr/internal/utils"
"github.com/sirrobot01/decypharr/pkg/debrid/types"
"net/http"
gourl "net/url"
"path/filepath"
"strconv"
"sync"
"time"
"github.com/rs/zerolog"
"github.com/sirrobot01/decypharr/internal/config"
"github.com/sirrobot01/decypharr/internal/logger"
"github.com/sirrobot01/decypharr/internal/request"
"github.com/sirrobot01/decypharr/internal/utils"
"github.com/sirrobot01/decypharr/pkg/debrid/account"
"github.com/sirrobot01/decypharr/pkg/debrid/types"
"go.uber.org/ratelimit"
)
type AllDebrid struct {
name string
Host string `json:"host"`
APIKey string
accounts *types.Accounts
accountsManager *account.Manager
autoExpiresLinksAfter time.Duration
DownloadUncached bool
client *request.Client
Profile *types.Profile `json:"profile"`
MountPath string
logger zerolog.Logger
@@ -33,12 +37,7 @@ type AllDebrid struct {
minimumFreeSlot int
}
func (ad *AllDebrid) GetProfile() (*types.Profile, error) {
return nil, nil
}
func New(dc config.Debrid) (*AllDebrid, error) {
rl := request.ParseRateLimit(dc.RateLimit)
func New(dc config.Debrid, ratelimits map[string]ratelimit.Limiter) (*AllDebrid, error) {
headers := map[string]string{
"Authorization": fmt.Sprintf("Bearer %s", dc.APIKey),
@@ -47,7 +46,7 @@ func New(dc config.Debrid) (*AllDebrid, error) {
client := request.New(
request.WithHeaders(headers),
request.WithLogger(_log),
request.WithRateLimiter(rl),
request.WithRateLimiter(ratelimits["main"]),
request.WithProxy(dc.Proxy),
)
@@ -59,7 +58,7 @@ func New(dc config.Debrid) (*AllDebrid, error) {
name: "alldebrid",
Host: "http://api.alldebrid.com/v4.1",
APIKey: dc.APIKey,
accounts: types.NewAccounts(dc),
accountsManager: account.NewManager(dc, ratelimits["download"], _log),
DownloadUncached: dc.DownloadUncached,
autoExpiresLinksAfter: autoExpiresLinksAfter,
client: client,
@@ -105,11 +104,12 @@ func (ad *AllDebrid) SubmitMagnet(torrent *types.Torrent) (*types.Torrent, error
}
magnets := data.Data.Magnets
if len(magnets) == 0 {
return nil, fmt.Errorf("error adding torrent")
return nil, fmt.Errorf("error adding torrent. No magnets returned")
}
magnet := magnets[0]
torrentId := strconv.Itoa(magnet.ID)
torrent.Id = torrentId
torrent.Added = time.Now().Format(time.RFC3339)
return torrent, nil
}
@@ -296,7 +296,7 @@ func (ad *AllDebrid) DeleteTorrent(torrentId string) error {
func (ad *AllDebrid) GetFileDownloadLinks(t *types.Torrent) error {
filesCh := make(chan types.File, len(t.Files))
linksCh := make(chan *types.DownloadLink, len(t.Files))
linksCh := make(chan types.DownloadLink, len(t.Files))
errCh := make(chan error, len(t.Files))
var wg sync.WaitGroup
@@ -309,10 +309,6 @@ func (ad *AllDebrid) GetFileDownloadLinks(t *types.Torrent) error {
errCh <- err
return
}
if link == nil {
errCh <- fmt.Errorf("download link is empty")
return
}
linksCh <- link
file.DownloadLink = link
filesCh <- file
@@ -330,17 +326,14 @@ func (ad *AllDebrid) GetFileDownloadLinks(t *types.Torrent) error {
}
// Collect download links
links := make(map[string]*types.DownloadLink, len(t.Files))
links := make(map[string]types.DownloadLink, len(t.Files))
for link := range linksCh {
if link == nil {
if link.Empty() {
continue
}
links[link.Link] = link
}
// Update the files with download links
ad.accounts.SetDownloadLinks(links)
// Check for errors
for err := range errCh {
if err != nil {
@@ -352,7 +345,7 @@ func (ad *AllDebrid) GetFileDownloadLinks(t *types.Torrent) error {
return nil
}
func (ad *AllDebrid) GetDownloadLink(t *types.Torrent, file *types.File) (*types.DownloadLink, error) {
func (ad *AllDebrid) GetDownloadLink(t *types.Torrent, file *types.File) (types.DownloadLink, error) {
url := fmt.Sprintf("%s/link/unlock", ad.Host)
query := gourl.Values{}
query.Add("link", file.Link)
@@ -360,22 +353,23 @@ func (ad *AllDebrid) GetDownloadLink(t *types.Torrent, file *types.File) (*types
req, _ := http.NewRequest(http.MethodGet, url, nil)
resp, err := ad.client.MakeRequest(req)
if err != nil {
return nil, err
return types.DownloadLink{}, err
}
var data DownloadLink
if err = json.Unmarshal(resp, &data); err != nil {
return nil, err
return types.DownloadLink{}, err
}
if data.Error != nil {
return nil, fmt.Errorf("error getting download link: %s", data.Error.Message)
return types.DownloadLink{}, fmt.Errorf("error getting download link: %s", data.Error.Message)
}
link := data.Data.Link
if link == "" {
return nil, fmt.Errorf("download link is empty")
return types.DownloadLink{}, fmt.Errorf("download link is empty")
}
now := time.Now()
return &types.DownloadLink{
dl := types.DownloadLink{
Token: ad.APIKey,
Link: file.Link,
DownloadLink: link,
Id: data.Data.Id,
@@ -383,7 +377,10 @@ func (ad *AllDebrid) GetDownloadLink(t *types.Torrent, file *types.File) (*types
Filename: file.Name,
Generated: now,
ExpiresAt: now.Add(ad.autoExpiresLinksAfter),
}, nil
}
// Set the download link in the account
ad.accountsManager.StoreDownloadLink(dl)
return dl, nil
}
func (ad *AllDebrid) GetTorrents() ([]*types.Torrent, error) {
@@ -419,8 +416,8 @@ func (ad *AllDebrid) GetTorrents() ([]*types.Torrent, error) {
return torrents, nil
}
func (ad *AllDebrid) GetDownloadLinks() (map[string]*types.DownloadLink, error) {
return nil, nil
func (ad *AllDebrid) RefreshDownloadLinks() error {
return nil
}
func (ad *AllDebrid) GetDownloadingStatus() []string {
@@ -439,16 +436,64 @@ func (ad *AllDebrid) GetMountPath() string {
return ad.MountPath
}
func (ad *AllDebrid) DeleteDownloadLink(linkId string) error {
return nil
}
func (ad *AllDebrid) GetAvailableSlots() (int, error) {
// This function is a placeholder for AllDebrid
//TODO: Implement the logic to check available slots for AllDebrid
return 0, fmt.Errorf("GetAvailableSlots not implemented for AllDebrid")
}
func (ad *AllDebrid) Accounts() *types.Accounts {
return ad.accounts
func (ad *AllDebrid) GetProfile() (*types.Profile, error) {
if ad.Profile != nil {
return ad.Profile, nil
}
url := fmt.Sprintf("%s/user", ad.Host)
req, err := http.NewRequest(http.MethodGet, url, nil)
if err != nil {
return nil, err
}
resp, err := ad.client.MakeRequest(req)
if err != nil {
return nil, err
}
var res UserProfileResponse
err = json.Unmarshal(resp, &res)
if err != nil {
ad.logger.Error().Err(err).Msgf("Error unmarshalling user profile")
return nil, err
}
if res.Status != "success" {
message := "unknown error"
if res.Error != nil {
message = res.Error.Message
}
return nil, fmt.Errorf("error getting user profile: %s", message)
}
userData := res.Data.User
expiration := time.Unix(userData.PremiumUntil, 0)
profile := &types.Profile{
Id: 1,
Name: ad.name,
Username: userData.Username,
Email: userData.Email,
Points: userData.FidelityPoints,
Premium: userData.PremiumUntil,
Expiration: expiration,
}
if userData.IsPremium {
profile.Type = "premium"
} else if userData.IsTrial {
profile.Type = "trial"
} else {
profile.Type = "free"
}
ad.Profile = profile
return profile, nil
}
func (ad *AllDebrid) AccountManager() *account.Manager {
return ad.accountsManager
}
func (ad *AllDebrid) SyncAccounts() error {
return nil
}

View File

@@ -112,3 +112,22 @@ func (m *Magnets) UnmarshalJSON(data []byte) error {
}
return fmt.Errorf("magnets: unsupported JSON format")
}
type UserProfileResponse struct {
Status string `json:"status"`
Error *errorResponse `json:"error"`
Data struct {
User struct {
Username string `json:"username"`
Email string `json:"email"`
IsPremium bool `json:"isPremium"`
IsSubscribed bool `json:"isSubscribed"`
IsTrial bool `json:"isTrial"`
PremiumUntil int64 `json:"premiumUntil"`
Lang string `json:"lang"`
FidelityPoints int `json:"fidelityPoints"`
LimitedHostersQuotas map[string]int `json:"limitedHostersQuotas"`
Notifications []string `json:"notifications"`
} `json:"user"`
} `json:"data"`
}

View File

@@ -1,16 +1,19 @@
package debrid_link
package debridlink
import (
"bytes"
"encoding/json"
"fmt"
"time"
"github.com/rs/zerolog"
"github.com/sirrobot01/decypharr/internal/config"
"github.com/sirrobot01/decypharr/internal/logger"
"github.com/sirrobot01/decypharr/internal/request"
"github.com/sirrobot01/decypharr/internal/utils"
"github.com/sirrobot01/decypharr/pkg/debrid/account"
"github.com/sirrobot01/decypharr/pkg/debrid/types"
"time"
"go.uber.org/ratelimit"
"net/http"
"strings"
@@ -20,7 +23,7 @@ type DebridLink struct {
name string
Host string `json:"host"`
APIKey string
accounts *types.Accounts
accountsManager *account.Manager
DownloadUncached bool
client *request.Client
@@ -30,11 +33,11 @@ type DebridLink struct {
logger zerolog.Logger
checkCached bool
addSamples bool
Profile *types.Profile `json:"profile,omitempty"`
}
func New(dc config.Debrid) (*DebridLink, error) {
rl := request.ParseRateLimit(dc.RateLimit)
func New(dc config.Debrid, ratelimits map[string]ratelimit.Limiter) (*DebridLink, error) {
headers := map[string]string{
"Authorization": fmt.Sprintf("Bearer %s", dc.APIKey),
"Content-Type": "application/json",
@@ -43,7 +46,7 @@ func New(dc config.Debrid) (*DebridLink, error) {
client := request.New(
request.WithHeaders(headers),
request.WithLogger(_log),
request.WithRateLimiter(rl),
request.WithRateLimiter(ratelimits["main"]),
request.WithProxy(dc.Proxy),
)
@@ -55,7 +58,7 @@ func New(dc config.Debrid) (*DebridLink, error) {
name: "debridlink",
Host: "https://debrid-link.com/api/v2",
APIKey: dc.APIKey,
accounts: types.NewAccounts(dc),
accountsManager: account.NewManager(dc, ratelimits["download"], _log),
DownloadUncached: dc.DownloadUncached,
autoExpiresLinksAfter: autoExpiresLinksAfter,
client: client,
@@ -66,10 +69,6 @@ func New(dc config.Debrid) (*DebridLink, error) {
}, nil
}
func (dl *DebridLink) GetProfile() (*types.Profile, error) {
return nil, nil
}
func (dl *DebridLink) Name() string {
return dl.name
}
@@ -223,7 +222,6 @@ func (dl *DebridLink) UpdateTorrent(t *types.Torrent) error {
t.OriginalFilename = name
t.Added = time.Unix(data.Created, 0).Format(time.RFC3339)
cfg := config.Get()
links := make(map[string]*types.DownloadLink)
now := time.Now()
for _, f := range data.Files {
if !cfg.IsSizeAllowed(f.Size) {
@@ -237,19 +235,19 @@ func (dl *DebridLink) UpdateTorrent(t *types.Torrent) error {
Path: f.Name,
Link: f.DownloadURL,
}
link := &types.DownloadLink{
link := types.DownloadLink{
Token: dl.APIKey,
Filename: f.Name,
Link: f.DownloadURL,
DownloadLink: f.DownloadURL,
Generated: now,
ExpiresAt: now.Add(dl.autoExpiresLinksAfter),
}
links[file.Link] = link
file.DownloadLink = link
t.Files[f.Name] = file
dl.accountsManager.StoreDownloadLink(link)
}
dl.accounts.SetDownloadLinks(links)
return nil
}
@@ -286,8 +284,6 @@ func (dl *DebridLink) SubmitMagnet(t *types.Torrent) (*types.Torrent, error) {
t.MountPath = dl.MountPath
t.Debrid = dl.name
t.Added = time.Unix(data.Created, 0).Format(time.RFC3339)
links := make(map[string]*types.DownloadLink)
now := time.Now()
for _, f := range data.Files {
file := types.File{
@@ -299,20 +295,19 @@ func (dl *DebridLink) SubmitMagnet(t *types.Torrent) (*types.Torrent, error) {
Link: f.DownloadURL,
Generated: now,
}
link := &types.DownloadLink{
link := types.DownloadLink{
Token: dl.APIKey,
Filename: f.Name,
Link: f.DownloadURL,
DownloadLink: f.DownloadURL,
Generated: now,
ExpiresAt: now.Add(dl.autoExpiresLinksAfter),
}
links[file.Link] = link
file.DownloadLink = link
t.Files[f.Name] = file
dl.accountsManager.StoreDownloadLink(link)
}
dl.accounts.SetDownloadLinks(links)
return t, nil
}
@@ -355,12 +350,12 @@ func (dl *DebridLink) GetFileDownloadLinks(t *types.Torrent) error {
return nil
}
func (dl *DebridLink) GetDownloadLinks() (map[string]*types.DownloadLink, error) {
return nil, nil
func (dl *DebridLink) RefreshDownloadLinks() error {
return nil
}
func (dl *DebridLink) GetDownloadLink(t *types.Torrent, file *types.File) (*types.DownloadLink, error) {
return dl.accounts.GetDownloadLink(file.Link)
func (dl *DebridLink) GetDownloadLink(t *types.Torrent, file *types.File) (types.DownloadLink, error) {
return dl.accountsManager.GetDownloadLink(file.Link)
}
func (dl *DebridLink) GetDownloadingStatus() []string {
@@ -405,7 +400,6 @@ func (dl *DebridLink) getTorrents(page, perPage int) ([]*types.Torrent, error) {
}
data := *res.Value
links := make(map[string]*types.DownloadLink)
if len(data) == 0 {
return torrents, nil
@@ -441,20 +435,20 @@ func (dl *DebridLink) getTorrents(page, perPage int) ([]*types.Torrent, error) {
Path: f.Name,
Link: f.DownloadURL,
}
link := &types.DownloadLink{
link := types.DownloadLink{
Token: dl.APIKey,
Filename: f.Name,
Link: f.DownloadURL,
DownloadLink: f.DownloadURL,
Generated: now,
ExpiresAt: now.Add(dl.autoExpiresLinksAfter),
}
links[file.Link] = link
file.DownloadLink = link
torrent.Files[f.Name] = file
dl.accountsManager.StoreDownloadLink(link)
}
torrents = append(torrents, torrent)
}
dl.accounts.SetDownloadLinks(links)
return torrents, nil
}
@@ -467,15 +461,60 @@ func (dl *DebridLink) GetMountPath() string {
return dl.MountPath
}
func (dl *DebridLink) DeleteDownloadLink(linkId string) error {
return nil
}
func (dl *DebridLink) GetAvailableSlots() (int, error) {
//TODO: Implement the logic to check available slots for DebridLink
return 0, fmt.Errorf("GetAvailableSlots not implemented for DebridLink")
}
func (dl *DebridLink) Accounts() *types.Accounts {
return dl.accounts
func (dl *DebridLink) GetProfile() (*types.Profile, error) {
if dl.Profile != nil {
return dl.Profile, nil
}
url := fmt.Sprintf("%s/account/infos", dl.Host)
req, err := http.NewRequest(http.MethodGet, url, nil)
if err != nil {
return nil, err
}
resp, err := dl.client.MakeRequest(req)
if err != nil {
return nil, err
}
var res UserInfo
err = json.Unmarshal(resp, &res)
if err != nil {
dl.logger.Error().Err(err).Msgf("Error unmarshalling user info")
return nil, err
}
if !res.Success || res.Value == nil {
return nil, fmt.Errorf("error getting user info")
}
data := *res.Value
expiration := time.Unix(data.PremiumLeft, 0)
profile := &types.Profile{
Id: 1,
Username: data.Username,
Name: dl.name,
Email: data.Email,
Points: data.Points,
Premium: data.PremiumLeft,
Expiration: expiration,
}
if expiration.IsZero() {
profile.Expiration = time.Now().AddDate(1, 0, 0) // Default to 1 year if no expiration
}
if data.PremiumLeft > 0 {
profile.Type = "premium"
} else {
profile.Type = "free"
}
dl.Profile = profile
return profile, nil
}
func (dl *DebridLink) AccountManager() *account.Manager {
return dl.accountsManager
}
func (dl *DebridLink) SyncAccounts() error {
return nil
}

View File

@@ -1,4 +1,4 @@
package debrid_link
package debridlink
type APIResponse[T any] struct {
Success bool `json:"success"`
@@ -43,3 +43,12 @@ type _torrentInfo struct {
type torrentInfo APIResponse[[]_torrentInfo]
type SubmitTorrentInfo APIResponse[_torrentInfo]
type UserInfo APIResponse[struct {
Username string `json:"username"`
Email string `json:"email"`
AccountType int `json:"accountType"`
PremiumLeft int64 `json:"premiumLeft"`
Points int `json:"pts"`
Trafficshare int `json:"trafficshare"`
}]

View File

@@ -2,11 +2,9 @@ package realdebrid
import (
"bytes"
"cmp"
"encoding/json"
"errors"
"fmt"
"github.com/sirrobot01/decypharr/pkg/debrid/types"
"io"
"net/http"
gourl "net/url"
@@ -16,6 +14,10 @@ import (
"sync"
"time"
"github.com/sirrobot01/decypharr/pkg/debrid/account"
"github.com/sirrobot01/decypharr/pkg/debrid/types"
"go.uber.org/ratelimit"
"github.com/rs/zerolog"
"github.com/sirrobot01/decypharr/internal/config"
"github.com/sirrobot01/decypharr/internal/logger"
@@ -28,12 +30,11 @@ type RealDebrid struct {
name string
Host string `json:"host"`
APIKey string
accounts *types.Accounts
APIKey string
accountsManager *account.Manager
DownloadUncached bool
client *request.Client
downloadClient *request.Client
repairClient *request.Client
autoExpiresLinksAfter time.Duration
@@ -49,10 +50,7 @@ type RealDebrid struct {
limit int
}
func New(dc config.Debrid) (*RealDebrid, error) {
rl := request.ParseRateLimit(dc.RateLimit)
repairRl := request.ParseRateLimit(cmp.Or(dc.RepairRateLimit, dc.RateLimit))
downloadRl := request.ParseRateLimit(cmp.Or(dc.DownloadRateLimit, dc.RateLimit))
func New(dc config.Debrid, ratelimits map[string]ratelimit.Limiter) (*RealDebrid, error) {
headers := map[string]string{
"Authorization": fmt.Sprintf("Bearer %s", dc.APIKey),
@@ -68,27 +66,20 @@ func New(dc config.Debrid) (*RealDebrid, error) {
name: "realdebrid",
Host: "https://api.real-debrid.com/rest/1.0",
APIKey: dc.APIKey,
accounts: types.NewAccounts(dc),
accountsManager: account.NewManager(dc, ratelimits["download"], _log),
DownloadUncached: dc.DownloadUncached,
autoExpiresLinksAfter: autoExpiresLinksAfter,
UnpackRar: dc.UnpackRar,
client: request.New(
request.WithHeaders(headers),
request.WithRateLimiter(rl),
request.WithRateLimiter(ratelimits["main"]),
request.WithLogger(_log),
request.WithMaxRetries(10),
request.WithRetryableStatus(429, 502),
request.WithProxy(dc.Proxy),
),
downloadClient: request.New(
request.WithRateLimiter(downloadRl),
request.WithLogger(_log),
request.WithMaxRetries(10),
request.WithRetryableStatus(429, 447, 502),
request.WithProxy(dc.Proxy),
),
repairClient: request.New(
request.WithRateLimiter(repairRl),
request.WithRateLimiter(ratelimits["repair"]),
request.WithHeaders(headers),
request.WithLogger(_log),
request.WithMaxRetries(4),
@@ -161,6 +152,23 @@ func (r *RealDebrid) getSelectedFiles(t *types.Torrent, data torrentInfo) (map[s
return files, nil
}
func (r *RealDebrid) handleRarFallback(t *types.Torrent, data torrentInfo) (map[string]types.File, error) {
files := make(map[string]types.File)
file := types.File{
TorrentId: t.Id,
Id: "0",
Name: t.Name + ".rar",
Size: data.Bytes,
IsRar: true,
ByteRange: nil,
Path: t.Name + ".rar",
Link: data.Links[0],
Generated: time.Now(),
}
files[file.Name] = file
return files, nil
}
// handleRarArchive processes RAR archives with multiple files
func (r *RealDebrid) handleRarArchive(t *types.Torrent, data torrentInfo, selectedFiles []types.File) (map[string]types.File, error) {
// This will block if 2 RAR operations are already in progress
@@ -172,21 +180,8 @@ func (r *RealDebrid) handleRarArchive(t *types.Torrent, data torrentInfo, select
files := make(map[string]types.File)
if !r.UnpackRar {
r.logger.Debug().Msgf("RAR file detected, but unpacking is disabled: %s", t.Name)
// Create a single file representing the RAR archive
file := types.File{
TorrentId: t.Id,
Id: "0",
Name: t.Name + ".rar",
Size: 0,
IsRar: true,
ByteRange: nil,
Path: t.Name + ".rar",
Link: data.Links[0],
Generated: time.Now(),
}
files[file.Name] = file
return files, nil
r.logger.Debug().Msgf("RAR file detected, but unpacking is disabled: %s. Falling back to single file representation.", t.Name)
return r.handleRarFallback(t, data)
}
r.logger.Info().Msgf("RAR file detected, unpacking: %s", t.Name)
@@ -194,20 +189,23 @@ func (r *RealDebrid) handleRarArchive(t *types.Torrent, data torrentInfo, select
downloadLinkObj, err := r.GetDownloadLink(t, linkFile)
if err != nil {
return nil, fmt.Errorf("failed to get download link for RAR file: %w", err)
r.logger.Debug().Err(err).Msgf("Error getting download link for RAR file: %s. Falling back to single file representation.", t.Name)
return r.handleRarFallback(t, data)
}
dlLink := downloadLinkObj.DownloadLink
reader, err := rar.NewReader(dlLink)
if err != nil {
return nil, fmt.Errorf("failed to create RAR reader: %w", err)
r.logger.Debug().Err(err).Msgf("Error creating RAR reader for %s. Falling back to single file representation.", t.Name)
return r.handleRarFallback(t, data)
}
rarFiles, err := reader.GetFiles()
if err != nil {
return nil, fmt.Errorf("failed to read RAR files: %w", err)
r.logger.Debug().Err(err).Msgf("Error reading RAR files for %s. Falling back to single file representation.", t.Name)
return r.handleRarFallback(t, data)
}
// Create lookup map for faster matching
@@ -232,7 +230,11 @@ func (r *RealDebrid) handleRarArchive(t *types.Torrent, data torrentInfo, select
r.logger.Warn().Msgf("RAR file %s not found in torrent files", rarFile.Name())
}
}
if len(files) == 0 {
r.logger.Warn().Msgf("No valid files found in RAR archive for torrent: %s", t.Name)
return r.handleRarFallback(t, data)
}
r.logger.Info().Msgf("Unpacked RAR archive for torrent: %s with %d files", t.Name, len(files))
return files, nil
}
@@ -349,7 +351,9 @@ func (r *RealDebrid) addTorrent(t *types.Torrent) (*types.Torrent, error) {
bodyBytes, _ := io.ReadAll(resp.Body)
return nil, fmt.Errorf("realdebrid API error: Status: %d || Body: %s", resp.StatusCode, string(bodyBytes))
}
defer resp.Body.Close()
defer func(Body io.ReadCloser) {
_ = Body.Close()
}(resp.Body)
bodyBytes, err := io.ReadAll(resp.Body)
if err != nil {
return nil, fmt.Errorf("reading response body: %w", err)
@@ -360,6 +364,7 @@ func (r *RealDebrid) addTorrent(t *types.Torrent) (*types.Torrent, error) {
t.Id = data.Id
t.Debrid = r.name
t.MountPath = r.MountPath
t.Added = time.Now().Format(time.RFC3339)
return t, nil
}
@@ -395,6 +400,7 @@ func (r *RealDebrid) addMagnet(t *types.Torrent) (*types.Torrent, error) {
t.Id = data.Id
t.Debrid = r.name
t.MountPath = r.MountPath
t.Added = time.Now().Format(time.RFC3339)
return t, nil
}
@@ -476,7 +482,6 @@ func (r *RealDebrid) UpdateTorrent(t *types.Torrent) error {
t.Links = data.Links
t.MountPath = r.MountPath
t.Debrid = r.name
t.Added = data.Added
t.Files, _ = r.getSelectedFiles(t, data) // Get selected files
return nil
@@ -508,6 +513,7 @@ func (r *RealDebrid) CheckStatus(t *types.Torrent) (*types.Torrent, error) {
t.Status = status
t.Debrid = r.name
t.MountPath = r.MountPath
t.Added = data.Added
if status == "waiting_files_selection" {
t.Files = r.getTorrentFiles(t, data)
if len(t.Files) == 0 {
@@ -568,7 +574,7 @@ func (r *RealDebrid) GetFileDownloadLinks(t *types.Torrent) error {
var firstErr error
files := make(map[string]types.File)
links := make(map[string]*types.DownloadLink)
links := make(map[string]types.DownloadLink)
_files := t.GetFiles()
wg.Add(len(_files))
@@ -586,7 +592,7 @@ func (r *RealDebrid) GetFileDownloadLinks(t *types.Torrent) error {
mu.Unlock()
return
}
if link == nil {
if link.Empty() {
mu.Lock()
if firstErr == nil {
firstErr = fmt.Errorf("realdebrid API error: download link not found for file %s", file.Name)
@@ -596,7 +602,6 @@ func (r *RealDebrid) GetFileDownloadLinks(t *types.Torrent) error {
}
file.DownloadLink = link
mu.Lock()
files[file.Name] = file
links[link.Link] = link
@@ -611,7 +616,6 @@ func (r *RealDebrid) GetFileDownloadLinks(t *types.Torrent) error {
}
// Add links to cache
r.accounts.SetDownloadLinks(links)
t.Files = files
return nil
}
@@ -632,8 +636,9 @@ func (r *RealDebrid) CheckLink(link string) error {
return nil
}
func (r *RealDebrid) _getDownloadLink(file *types.File) (*types.DownloadLink, error) {
func (r *RealDebrid) getDownloadLink(account *account.Account, file *types.File) (types.DownloadLink, error) {
url := fmt.Sprintf("%s/unrestrict/link/", r.Host)
emptyLink := types.DownloadLink{}
_link := file.Link
if strings.HasPrefix(file.Link, "https://real-debrid.com/d/") && len(file.Link) > 39 {
_link = file.Link[0:39]
@@ -642,70 +647,64 @@ func (r *RealDebrid) _getDownloadLink(file *types.File) (*types.DownloadLink, er
"link": {_link},
}
req, _ := http.NewRequest(http.MethodPost, url, strings.NewReader(payload.Encode()))
resp, err := r.downloadClient.Do(req)
resp, err := account.Client().Do(req)
if err != nil {
return nil, err
return emptyLink, err
}
defer resp.Body.Close()
defer func(Body io.ReadCloser) {
_ = Body.Close()
}(resp.Body)
if resp.StatusCode != http.StatusOK {
// Read the response body to get the error message
b, err := io.ReadAll(resp.Body)
if err != nil {
return nil, err
return emptyLink, err
}
var data ErrorResponse
if err = json.Unmarshal(b, &data); err != nil {
return nil, fmt.Errorf("error unmarshalling %d || %s \n %s", resp.StatusCode, err, string(b))
return emptyLink, fmt.Errorf("error unmarshalling %d || %s \n %s", resp.StatusCode, err, string(b))
}
switch data.ErrorCode {
case 19:
return nil, utils.HosterUnavailableError // File has been removed
case 23:
return nil, utils.TrafficExceededError
case 24:
return nil, utils.HosterUnavailableError // Link has been nerfed
case 34:
return nil, utils.TrafficExceededError // traffic exceeded
case 35:
return nil, utils.HosterUnavailableError
case 36:
return nil, utils.TrafficExceededError // traffic exceeded
case 19, 24, 35:
return emptyLink, utils.HosterUnavailableError // File has been removed
case 23, 34, 36:
return emptyLink, utils.TrafficExceededError
default:
return nil, fmt.Errorf("realdebrid API error: Status: %d || Code: %d", resp.StatusCode, data.ErrorCode)
return emptyLink, fmt.Errorf("realdebrid API error: Status: %d || Code: %d", resp.StatusCode, data.ErrorCode)
}
}
b, err := io.ReadAll(resp.Body)
if err != nil {
return nil, err
return emptyLink, err
}
var data UnrestrictResponse
if err = json.Unmarshal(b, &data); err != nil {
return nil, fmt.Errorf("realdebrid API error: Error unmarshalling response: %w", err)
return emptyLink, fmt.Errorf("realdebrid API error: Error unmarshalling response: %w", err)
}
if data.Download == "" {
return nil, fmt.Errorf("realdebrid API error: download link not found")
return emptyLink, fmt.Errorf("realdebrid API error: download link not found")
}
now := time.Now()
return &types.DownloadLink{
dl := types.DownloadLink{
Token: account.Token,
Filename: data.Filename,
Size: data.Filesize,
Link: data.Link,
DownloadLink: data.Download,
Generated: now,
ExpiresAt: now.Add(r.autoExpiresLinksAfter),
}, nil
}
// Store the link in the account
account.StoreDownloadLink(dl)
return dl, nil
}
func (r *RealDebrid) GetDownloadLink(t *types.Torrent, file *types.File) (*types.DownloadLink, error) {
accounts := r.accounts.All()
for _, account := range accounts {
r.downloadClient.SetHeader("Authorization", fmt.Sprintf("Bearer %s", account.Token))
downloadLink, err := r._getDownloadLink(file)
func (r *RealDebrid) GetDownloadLink(t *types.Torrent, file *types.File) (types.DownloadLink, error) {
accounts := r.accountsManager.Active()
for _, _account := range accounts {
downloadLink, err := r.getDownloadLink(_account, file)
if err == nil {
return downloadLink, nil
}
@@ -716,16 +715,16 @@ func (r *RealDebrid) GetDownloadLink(t *types.Torrent, file *types.File) (*types
retries = 5
} else {
// If the error is not traffic exceeded, return the error
return nil, err
return downloadLink, err
}
backOff := 1 * time.Second
for retries > 0 {
downloadLink, err = r._getDownloadLink(file)
downloadLink, err = r.getDownloadLink(_account, file)
if err == nil {
return downloadLink, nil
}
if !errors.Is(err, utils.TrafficExceededError) {
return nil, err
return downloadLink, err
}
// Add a delay before retrying
time.Sleep(backOff)
@@ -733,7 +732,7 @@ func (r *RealDebrid) GetDownloadLink(t *types.Torrent, file *types.File) (*types
retries--
}
}
return nil, fmt.Errorf("realdebrid API error: download link not found")
return types.DownloadLink{}, fmt.Errorf("realdebrid API error: used all active accounts")
}
func (r *RealDebrid) getTorrents(offset int, limit int) (int, []*types.Torrent, error) {
@@ -830,49 +829,47 @@ func (r *RealDebrid) GetTorrents() ([]*types.Torrent, error) {
return allTorrents, nil
}
func (r *RealDebrid) GetDownloadLinks() (map[string]*types.DownloadLink, error) {
links := make(map[string]*types.DownloadLink)
offset := 0
limit := 1000
func (r *RealDebrid) RefreshDownloadLinks() error {
accounts := r.accountsManager.All()
accounts := r.accounts.All()
if len(accounts) < 1 {
// No active download keys. It's likely that the key has reached bandwidth limit
return links, fmt.Errorf("no active download keys")
}
activeAccount := accounts[0]
r.downloadClient.SetHeader("Authorization", fmt.Sprintf("Bearer %s", activeAccount.Token))
for {
dl, err := r._getDownloads(offset, limit)
if err != nil {
break
for _, _account := range accounts {
if _account == nil || _account.Token == "" {
continue
}
if len(dl) == 0 {
break
}
for _, d := range dl {
if _, exists := links[d.Link]; exists {
// This is ordered by date, so we can skip the rest
continue
offset := 0
limit := 1000
links := make(map[string]*types.DownloadLink)
for {
dl, err := r.getDownloadLinks(_account, offset, limit)
if err != nil {
break
}
links[d.Link] = &d
if len(dl) == 0 {
break
}
for _, d := range dl {
if _, exists := links[d.Link]; exists {
// This is ordered by date, so we can skip the rest
continue
}
links[d.Link] = &d
}
offset += len(dl)
}
offset += len(dl)
_account.StoreDownloadLinks(links)
}
return links, nil
return nil
}
func (r *RealDebrid) _getDownloads(offset int, limit int) ([]types.DownloadLink, error) {
func (r *RealDebrid) getDownloadLinks(account *account.Account, offset int, limit int) ([]types.DownloadLink, error) {
url := fmt.Sprintf("%s/downloads?limit=%d", r.Host, limit)
if offset > 0 {
url = fmt.Sprintf("%s&offset=%d", url, offset)
}
req, _ := http.NewRequest(http.MethodGet, url, nil)
resp, err := r.downloadClient.MakeRequest(req)
resp, err := account.Client().MakeRequest(req)
if err != nil {
return nil, err
}
@@ -883,6 +880,7 @@ func (r *RealDebrid) _getDownloads(offset int, limit int) ([]types.DownloadLink,
links := make([]types.DownloadLink, 0)
for _, d := range data {
links = append(links, types.DownloadLink{
Token: account.Token,
Filename: d.Filename,
Size: d.Filesize,
Link: d.Link,
@@ -908,15 +906,6 @@ func (r *RealDebrid) GetMountPath() string {
return r.MountPath
}
func (r *RealDebrid) DeleteDownloadLink(linkId string) error {
url := fmt.Sprintf("%s/downloads/delete/%s", r.Host, linkId)
req, _ := http.NewRequest(http.MethodDelete, url, nil)
if _, err := r.downloadClient.MakeRequest(req); err != nil {
return err
}
return nil
}
func (r *RealDebrid) GetProfile() (*types.Profile, error) {
if r.Profile != nil {
return r.Profile, nil
@@ -933,6 +922,7 @@ func (r *RealDebrid) GetProfile() (*types.Profile, error) {
return nil, err
}
profile := &types.Profile{
Name: r.name,
Id: data.Id,
Username: data.Username,
Email: data.Email,
@@ -946,8 +936,7 @@ func (r *RealDebrid) GetProfile() (*types.Profile, error) {
}
func (r *RealDebrid) GetAvailableSlots() (int, error) {
url := fmt.Sprintf("%s/torrents/activeCount", r.Host)
req, _ := http.NewRequest(http.MethodGet, url, nil)
req, _ := http.NewRequest(http.MethodGet, fmt.Sprintf("%s/torrents/activeCount", r.Host), nil)
resp, err := r.client.MakeRequest(req)
if err != nil {
return 0, nil
@@ -959,6 +948,72 @@ func (r *RealDebrid) GetAvailableSlots() (int, error) {
return data.TotalSlots - data.ActiveSlots - r.minimumFreeSlot, nil // Ensure we maintain minimum active pots
}
func (r *RealDebrid) Accounts() *types.Accounts {
return r.accounts
func (r *RealDebrid) AccountManager() *account.Manager {
return r.accountsManager
}
func (r *RealDebrid) SyncAccounts() error {
// Sync accounts with the current configuration
if len(r.accountsManager.Active()) == 0 {
return nil
}
for _, _account := range r.accountsManager.All() {
if err := r.syncAccount(_account); err != nil {
r.logger.Error().Err(err).Msgf("Error syncing account %s", _account.Username)
continue // Skip this account and continue with the next
}
}
return nil
}
func (r *RealDebrid) syncAccount(account *account.Account) error {
if account.Token == "" {
return fmt.Errorf("account %s has no token", account.Username)
}
req, err := http.NewRequest(http.MethodGet, fmt.Sprintf("%s/user", r.Host), nil)
if err != nil {
return fmt.Errorf("error creating request for account %s: %w", account.Username, err)
}
resp, err := account.Client().Do(req)
if err != nil {
return fmt.Errorf("error checking account %s: %w", account.Username, err)
}
if resp.StatusCode != http.StatusOK {
resp.Body.Close()
return fmt.Errorf("account %s is not valid, status code: %d", account.Username, resp.StatusCode)
}
defer resp.Body.Close()
var profile profileResponse
if err := json.NewDecoder(resp.Body).Decode(&profile); err != nil {
return fmt.Errorf("error decoding profile for account %s: %w", account.Username, err)
}
account.Username = profile.Username
// Get traffic usage
trafficReq, err := http.NewRequest(http.MethodGet, fmt.Sprintf("%s/traffic/details", r.Host), nil)
if err != nil {
return fmt.Errorf("error creating request for traffic details for account %s: %w", account.Username, err)
}
trafficResp, err := account.Client().Do(trafficReq)
if err != nil {
return fmt.Errorf("error checking traffic for account %s: %w", account.Username, err)
}
if trafficResp.StatusCode != http.StatusOK {
trafficResp.Body.Close()
return fmt.Errorf("error checking traffic for account %s, status code: %d", account.Username, trafficResp.StatusCode)
}
defer trafficResp.Body.Close()
var trafficData TrafficResponse
if err := json.NewDecoder(trafficResp.Body).Decode(&trafficData); err != nil {
// Skip logging traffic error
account.TrafficUsed.Store(0)
} else {
today := time.Now().Format(time.DateOnly)
if todayData, exists := trafficData[today]; exists {
account.TrafficUsed.Store(todayData.Bytes)
}
}
//r.accountsManager.Update(account)
return nil
}

View File

@@ -144,11 +144,11 @@ type profileResponse struct {
Id int64 `json:"id"`
Username string `json:"username"`
Email string `json:"email"`
Points int64 `json:"points"`
Points int `json:"points"`
Locale string `json:"locale"`
Avatar string `json:"avatar"`
Type string `json:"type"`
Premium int `json:"premium"`
Premium int64 `json:"premium"`
Expiration time.Time `json:"expiration"`
}
@@ -156,3 +156,10 @@ type AvailableSlotsResponse struct {
ActiveSlots int `json:"nb"`
TotalSlots int `json:"limit"`
}
type hostData struct {
Host map[string]int64 `json:"host"`
Bytes int64 `json:"bytes"`
}
type TrafficResponse map[string]hostData

View File

@@ -20,15 +20,17 @@ import (
"github.com/sirrobot01/decypharr/internal/logger"
"github.com/sirrobot01/decypharr/internal/request"
"github.com/sirrobot01/decypharr/internal/utils"
"github.com/sirrobot01/decypharr/pkg/debrid/account"
"github.com/sirrobot01/decypharr/pkg/debrid/types"
"github.com/sirrobot01/decypharr/pkg/version"
"go.uber.org/ratelimit"
)
type Torbox struct {
name string
Host string `json:"host"`
APIKey string
accounts *types.Accounts
accountsManager *account.Manager
autoExpiresLinksAfter time.Duration
DownloadUncached bool
@@ -40,12 +42,7 @@ type Torbox struct {
addSamples bool
}
func (tb *Torbox) GetProfile() (*types.Profile, error) {
return nil, nil
}
func New(dc config.Debrid) (*Torbox, error) {
rl := request.ParseRateLimit(dc.RateLimit)
func New(dc config.Debrid, ratelimits map[string]ratelimit.Limiter) (*Torbox, error) {
headers := map[string]string{
"Authorization": fmt.Sprintf("Bearer %s", dc.APIKey),
@@ -54,7 +51,7 @@ func New(dc config.Debrid) (*Torbox, error) {
_log := logger.New(dc.Name)
client := request.New(
request.WithHeaders(headers),
request.WithRateLimiter(rl),
request.WithRateLimiter(ratelimits["main"]),
request.WithLogger(_log),
request.WithProxy(dc.Proxy),
)
@@ -67,7 +64,7 @@ func New(dc config.Debrid) (*Torbox, error) {
name: "torbox",
Host: "https://api.torbox.app/v1",
APIKey: dc.APIKey,
accounts: types.NewAccounts(dc),
accountsManager: account.NewManager(dc, ratelimits["download"], _log),
DownloadUncached: dc.DownloadUncached,
autoExpiresLinksAfter: autoExpiresLinksAfter,
client: client,
@@ -142,6 +139,9 @@ func (tb *Torbox) SubmitMagnet(torrent *types.Torrent) (*types.Torrent, error) {
payload := &bytes.Buffer{}
writer := multipart.NewWriter(payload)
_ = writer.WriteField("magnet", torrent.Magnet.Link)
if !torrent.DownloadUncached {
_ = writer.WriteField("add_only_if_cached", "true")
}
err := writer.Close()
if err != nil {
return nil, err
@@ -165,6 +165,7 @@ func (tb *Torbox) SubmitMagnet(torrent *types.Torrent) (*types.Torrent, error) {
torrent.Id = torrentId
torrent.MountPath = tb.MountPath
torrent.Debrid = tb.name
torrent.Added = time.Now().Format(time.RFC3339)
return torrent, nil
}
@@ -407,7 +408,7 @@ func (tb *Torbox) DeleteTorrent(torrentId string) error {
func (tb *Torbox) GetFileDownloadLinks(t *types.Torrent) error {
filesCh := make(chan types.File, len(t.Files))
linkCh := make(chan *types.DownloadLink)
linkCh := make(chan types.DownloadLink)
errCh := make(chan error, len(t.Files))
var wg sync.WaitGroup
@@ -420,7 +421,7 @@ func (tb *Torbox) GetFileDownloadLinks(t *types.Torrent) error {
errCh <- err
return
}
if link != nil {
if link.DownloadLink != "" {
linkCh <- link
file.DownloadLink = link
}
@@ -440,13 +441,6 @@ func (tb *Torbox) GetFileDownloadLinks(t *types.Torrent) error {
files[file.Name] = file
}
// Collect download links
for link := range linkCh {
if link != nil {
tb.accounts.SetDownloadLink(link.Link, link)
}
}
// Check for errors
for err := range errCh {
if err != nil {
@@ -458,7 +452,7 @@ func (tb *Torbox) GetFileDownloadLinks(t *types.Torrent) error {
return nil
}
func (tb *Torbox) GetDownloadLink(t *types.Torrent, file *types.File) (*types.DownloadLink, error) {
func (tb *Torbox) GetDownloadLink(t *types.Torrent, file *types.File) (types.DownloadLink, error) {
url := fmt.Sprintf("%s/api/torrents/requestdl/", tb.Host)
query := gourl.Values{}
query.Add("torrent_id", t.Id)
@@ -474,7 +468,7 @@ func (tb *Torbox) GetDownloadLink(t *types.Torrent, file *types.File) (*types.Do
Str("torrent_id", t.Id).
Str("file_id", file.Id).
Msg("Failed to make request to Torbox API")
return nil, err
return types.DownloadLink{}, err
}
var data DownloadLinksResponse
@@ -484,7 +478,7 @@ func (tb *Torbox) GetDownloadLink(t *types.Torrent, file *types.File) (*types.Do
Str("torrent_id", t.Id).
Str("file_id", file.Id).
Msg("Failed to unmarshal Torbox API response")
return nil, err
return types.DownloadLink{}, err
}
if data.Data == nil {
@@ -495,7 +489,7 @@ func (tb *Torbox) GetDownloadLink(t *types.Torrent, file *types.File) (*types.Do
Interface("error", data.Error).
Str("detail", data.Detail).
Msg("Torbox API returned no data")
return nil, fmt.Errorf("error getting download links")
return types.DownloadLink{}, fmt.Errorf("error getting download links")
}
link := *data.Data
@@ -504,11 +498,12 @@ func (tb *Torbox) GetDownloadLink(t *types.Torrent, file *types.File) (*types.Do
Str("torrent_id", t.Id).
Str("file_id", file.Id).
Msg("Torbox API returned empty download link")
return nil, fmt.Errorf("error getting download links")
return types.DownloadLink{}, fmt.Errorf("error getting download links")
}
now := time.Now()
downloadLink := &types.DownloadLink{
dl := types.DownloadLink{
Token: tb.APIKey,
Link: file.Link,
DownloadLink: link,
Id: file.Id,
@@ -516,7 +511,9 @@ func (tb *Torbox) GetDownloadLink(t *types.Torrent, file *types.File) (*types.Do
ExpiresAt: now.Add(tb.autoExpiresLinksAfter),
}
return downloadLink, nil
tb.accountsManager.StoreDownloadLink(dl)
return dl, nil
}
func (tb *Torbox) GetDownloadingStatus() []string {
@@ -524,7 +521,25 @@ func (tb *Torbox) GetDownloadingStatus() []string {
}
func (tb *Torbox) GetTorrents() ([]*types.Torrent, error) {
url := fmt.Sprintf("%s/api/torrents/mylist", tb.Host)
offset := 0
allTorrents := make([]*types.Torrent, 0)
for {
torrents, err := tb.getTorrents(offset)
if err != nil {
break
}
if len(torrents) == 0 {
break
}
allTorrents = append(allTorrents, torrents...)
offset += len(torrents)
}
return allTorrents, nil
}
func (tb *Torbox) getTorrents(offset int) ([]*types.Torrent, error) {
url := fmt.Sprintf("%s/api/torrents/mylist?offset=%d", tb.Host, offset)
req, _ := http.NewRequest(http.MethodGet, url, nil)
resp, err := tb.client.MakeRequest(req)
if err != nil {
@@ -611,8 +626,8 @@ func (tb *Torbox) GetDownloadUncached() bool {
return tb.DownloadUncached
}
func (tb *Torbox) GetDownloadLinks() (map[string]*types.DownloadLink, error) {
return nil, nil
func (tb *Torbox) RefreshDownloadLinks() error {
return nil
}
func (tb *Torbox) CheckLink(link string) error {
@@ -623,15 +638,19 @@ func (tb *Torbox) GetMountPath() string {
return tb.MountPath
}
func (tb *Torbox) DeleteDownloadLink(linkId string) error {
return nil
}
func (tb *Torbox) GetAvailableSlots() (int, error) {
//TODO: Implement the logic to check available slots for Torbox
return 0, fmt.Errorf("not implemented")
}
func (tb *Torbox) Accounts() *types.Accounts {
return tb.accounts
func (tb *Torbox) GetProfile() (*types.Profile, error) {
return nil, nil
}
func (tb *Torbox) AccountManager() *account.Manager {
return tb.accountsManager
}
func (tb *Torbox) SyncAccounts() error {
return nil
}

View File

@@ -4,9 +4,10 @@ import (
"bufio"
"cmp"
"context"
"crypto/tls"
"errors"
"fmt"
"github.com/sirrobot01/decypharr/pkg/rclone"
"net/http"
"os"
"path"
"path/filepath"
@@ -17,6 +18,10 @@ import (
"sync/atomic"
"time"
"github.com/puzpuzpuz/xsync/v4"
"github.com/sirrobot01/decypharr/pkg/debrid/common"
"github.com/sirrobot01/decypharr/pkg/rclone"
"github.com/sirrobot01/decypharr/pkg/debrid/types"
"encoding/json"
@@ -72,18 +77,17 @@ type RepairRequest struct {
type Cache struct {
dir string
client types.Client
client common.Client
logger zerolog.Logger
torrents *torrentCache
invalidDownloadLinks sync.Map
folderNaming WebDavFolderNaming
torrents *torrentCache
folderNaming WebDavFolderNaming
listingDebouncer *utils.Debouncer[bool]
// monitors
repairRequest sync.Map
failedToReinsert sync.Map
downloadLinkRequests sync.Map
invalidDownloadLinks *xsync.Map[string, string]
repairRequest *xsync.Map[string, *reInsertRequest]
failedToReinsert *xsync.Map[string, struct{}]
// repair
repairChan chan RepairRequest
@@ -108,9 +112,10 @@ type Cache struct {
config config.Debrid
customFolders []string
mounter *rclone.Mount
httpClient *http.Client
}
func NewDebridCache(dc config.Debrid, client types.Client, mounter *rclone.Mount) *Cache {
func NewDebridCache(dc config.Debrid, client common.Client, mounter *rclone.Mount) *Cache {
cfg := config.Get()
cet, err := time.LoadLocation("CET")
if err != nil {
@@ -122,9 +127,13 @@ func NewDebridCache(dc config.Debrid, client types.Client, mounter *rclone.Mount
cetSc, err := gocron.NewScheduler(gocron.WithLocation(cet))
if err != nil {
// If we can't create a CET scheduler, fallback to local time
cetSc, _ = gocron.NewScheduler(gocron.WithLocation(time.Local))
cetSc, _ = gocron.NewScheduler(gocron.WithLocation(time.Local), gocron.WithGlobalJobOptions(
gocron.WithTags("decypharr-"+dc.Name)))
}
scheduler, err := gocron.NewScheduler(gocron.WithLocation(time.Local))
scheduler, err := gocron.NewScheduler(
gocron.WithLocation(time.Local),
gocron.WithGlobalJobOptions(
gocron.WithTags("decypharr-"+dc.Name)))
if err != nil {
// If we can't create a local scheduler, fallback to CET
scheduler = cetSc
@@ -149,6 +158,18 @@ func NewDebridCache(dc config.Debrid, client types.Client, mounter *rclone.Mount
}
_log := logger.New(fmt.Sprintf("%s-webdav", client.Name()))
transport := &http.Transport{
TLSClientConfig: &tls.Config{InsecureSkipVerify: true},
TLSHandshakeTimeout: 10 * time.Second,
ResponseHeaderTimeout: 30 * time.Second,
MaxIdleConns: 10,
MaxIdleConnsPerHost: 2,
}
httpClient := &http.Client{
Transport: transport,
Timeout: 0,
}
c := &Cache{
dir: filepath.Join(cfg.Path, "cache", dc.Name), // path to save cache files
@@ -167,7 +188,12 @@ func NewDebridCache(dc config.Debrid, client types.Client, mounter *rclone.Mount
customFolders: customFolders,
mounter: mounter,
ready: make(chan struct{}),
ready: make(chan struct{}),
httpClient: httpClient,
invalidDownloadLinks: xsync.NewMap[string, string](),
repairRequest: xsync.NewMap[string, *reInsertRequest](),
failedToReinsert: xsync.NewMap[string, struct{}](),
repairChan: make(chan RepairRequest, 100), // Initialize the repair channel, max 100 requests buffered
}
c.listingDebouncer = utils.NewDebouncer[bool](100*time.Millisecond, func(refreshRclone bool) {
@@ -218,10 +244,9 @@ func (c *Cache) Reset() {
c.torrents.reset()
// 3. Clear any sync.Maps
c.invalidDownloadLinks = sync.Map{}
c.repairRequest = sync.Map{}
c.failedToReinsert = sync.Map{}
c.downloadLinkRequests = sync.Map{}
c.invalidDownloadLinks = xsync.NewMap[string, string]()
c.repairRequest = xsync.NewMap[string, *reInsertRequest]()
c.failedToReinsert = xsync.NewMap[string, struct{}]()
// 5. Rebuild the listing debouncer
c.listingDebouncer = utils.NewDebouncer[bool](
@@ -254,12 +279,6 @@ func (c *Cache) Start(ctx context.Context) error {
// initial download links
go c.refreshDownloadLinks(ctx)
if err := c.StartSchedule(ctx); err != nil {
c.logger.Error().Err(err).Msg("Failed to start cache worker")
}
c.repairChan = make(chan RepairRequest, 100) // Initialize the repair channel, max 100 requests buffered
go c.repairWorker(ctx)
cfg := config.Get()
@@ -535,7 +554,7 @@ func (c *Cache) setTorrent(t CachedTorrent, callback func(torrent CachedTorrent)
mergedFiles := mergeFiles(o, updatedTorrent) // Useful for merging files across multiple torrents, while keeping the most recent
updatedTorrent.Files = mergedFiles
}
c.torrents.set(torrentName, t, updatedTorrent)
c.torrents.set(torrentName, t)
go c.SaveTorrent(t)
if callback != nil {
go callback(updatedTorrent)
@@ -551,7 +570,7 @@ func (c *Cache) setTorrents(torrents map[string]CachedTorrent, callback func())
mergedFiles := mergeFiles(o, updatedTorrent)
updatedTorrent.Files = mergedFiles
}
c.torrents.set(torrentName, t, updatedTorrent)
c.torrents.set(torrentName, t)
}
c.SaveTorrents()
if callback != nil {
@@ -708,7 +727,7 @@ func (c *Cache) ProcessTorrent(t *types.Torrent) error {
Str("torrent_id", t.Id).
Str("torrent_name", t.Name).
Int("total_files", len(t.Files)).
Msg("Torrent still not complete after refresh")
Msg("Torrent still not complete after refresh, marking as bad")
} else {
addedOn, err := time.Parse(time.RFC3339, t.Added)
@@ -751,7 +770,7 @@ func (c *Cache) Add(t *types.Torrent) error {
}
func (c *Cache) Client() types.Client {
func (c *Cache) Client() common.Client {
return c.client
}
@@ -905,3 +924,7 @@ func (c *Cache) Logger() zerolog.Logger {
func (c *Cache) GetConfig() config.Debrid {
return c.config
}
func (c *Cache) Download(req *http.Request) (*http.Response, error) {
return c.httpClient.Do(req)
}

View File

@@ -3,6 +3,7 @@ package store
import (
"errors"
"fmt"
"github.com/sirrobot01/decypharr/internal/utils"
"github.com/sirrobot01/decypharr/pkg/debrid/types"
)
@@ -30,59 +31,44 @@ func (r *downloadLinkRequest) Wait() (string, error) {
return r.result, r.err
}
func (c *Cache) GetDownloadLink(torrentName, filename, fileLink string) (string, error) {
func (c *Cache) GetDownloadLink(torrentName, filename, fileLink string) (types.DownloadLink, error) {
// Check link cache
if dl, err := c.checkDownloadLink(fileLink); dl != "" && err == nil {
if dl, err := c.checkDownloadLink(fileLink); err == nil && !dl.Empty() {
return dl, nil
}
if req, inFlight := c.downloadLinkRequests.Load(fileLink); inFlight {
// Wait for the other request to complete and use its result
result := req.(*downloadLinkRequest)
return result.Wait()
}
// Create a new request object
req := newDownloadLinkRequest()
c.downloadLinkRequests.Store(fileLink, req)
dl, err := c.fetchDownloadLink(torrentName, filename, fileLink)
if err != nil {
req.Complete("", err)
c.downloadLinkRequests.Delete(fileLink)
return "", err
return types.DownloadLink{}, err
}
if dl == nil || dl.DownloadLink == "" {
if dl.Empty() {
err = fmt.Errorf("download link is empty for %s in torrent %s", filename, torrentName)
req.Complete("", err)
c.downloadLinkRequests.Delete(fileLink)
return "", err
return types.DownloadLink{}, err
}
req.Complete(dl.DownloadLink, err)
c.downloadLinkRequests.Delete(fileLink)
return dl.DownloadLink, err
return dl, err
}
func (c *Cache) fetchDownloadLink(torrentName, filename, fileLink string) (*types.DownloadLink, error) {
func (c *Cache) fetchDownloadLink(torrentName, filename, fileLink string) (types.DownloadLink, error) {
emptyDownloadLink := types.DownloadLink{}
ct := c.GetTorrentByName(torrentName)
if ct == nil {
return nil, fmt.Errorf("torrent not found")
return emptyDownloadLink, fmt.Errorf("torrent not found")
}
file, ok := ct.GetFile(filename)
if !ok {
return nil, fmt.Errorf("file %s not found in torrent %s", filename, torrentName)
return emptyDownloadLink, fmt.Errorf("file %s not found in torrent %s", filename, torrentName)
}
if file.Link == "" {
// file link is empty, refresh the torrent to get restricted links
ct = c.refreshTorrent(file.TorrentId) // Refresh the torrent from the debrid
if ct == nil {
return nil, fmt.Errorf("failed to refresh torrent")
return emptyDownloadLink, fmt.Errorf("failed to refresh torrent")
} else {
file, ok = ct.GetFile(filename)
if !ok {
return nil, fmt.Errorf("file %s not found in refreshed torrent %s", filename, torrentName)
return emptyDownloadLink, fmt.Errorf("file %s not found in refreshed torrent %s", filename, torrentName)
}
}
}
@@ -92,12 +78,12 @@ func (c *Cache) fetchDownloadLink(torrentName, filename, fileLink string) (*type
// Try to reinsert the torrent?
newCt, err := c.reInsertTorrent(ct)
if err != nil {
return nil, fmt.Errorf("failed to reinsert torrent. %w", err)
return emptyDownloadLink, fmt.Errorf("failed to reinsert torrent. %w", err)
}
ct = newCt
file, ok = ct.GetFile(filename)
if !ok {
return nil, fmt.Errorf("file %s not found in reinserted torrent %s", filename, torrentName)
return emptyDownloadLink, fmt.Errorf("file %s not found in reinserted torrent %s", filename, torrentName)
}
}
@@ -106,41 +92,39 @@ func (c *Cache) fetchDownloadLink(torrentName, filename, fileLink string) (*type
if err != nil {
if errors.Is(err, utils.HosterUnavailableError) {
c.logger.Trace().
Str("token", utils.Mask(downloadLink.Token)).
Str("filename", filename).
Str("torrent_id", ct.Id).
Msg("Hoster unavailable, attempting to reinsert torrent")
newCt, err := c.reInsertTorrent(ct)
if err != nil {
return nil, fmt.Errorf("failed to reinsert torrent: %w", err)
return emptyDownloadLink, fmt.Errorf("failed to reinsert torrent: %w", err)
}
ct = newCt
file, ok = ct.GetFile(filename)
if !ok {
return nil, fmt.Errorf("file %s not found in reinserted torrent %s", filename, torrentName)
return emptyDownloadLink, fmt.Errorf("file %s not found in reinserted torrent %s", filename, torrentName)
}
// Retry getting the download link
downloadLink, err = c.client.GetDownloadLink(ct.Torrent, &file)
if err != nil {
return nil, fmt.Errorf("retry failed to get download link: %w", err)
return emptyDownloadLink, fmt.Errorf("retry failed to get download link: %w", err)
}
if downloadLink == nil {
return nil, fmt.Errorf("download link is empty after retry")
if downloadLink.Empty() {
return emptyDownloadLink, fmt.Errorf("download link is empty after retry")
}
return nil, nil
return emptyDownloadLink, fmt.Errorf("download link is empty after retry")
} else if errors.Is(err, utils.TrafficExceededError) {
// This is likely a fair usage limit error
return nil, err
return emptyDownloadLink, err
} else {
return nil, fmt.Errorf("failed to get download link: %w", err)
return emptyDownloadLink, fmt.Errorf("failed to get download link: %w", err)
}
}
if downloadLink == nil {
return nil, fmt.Errorf("download link is empty")
if downloadLink.Empty() {
return emptyDownloadLink, fmt.Errorf("download link is empty")
}
// Set link to cache
go c.client.Accounts().SetDownloadLink(fileLink, downloadLink)
return downloadLink, nil
}
@@ -151,28 +135,33 @@ func (c *Cache) GetFileDownloadLinks(t CachedTorrent) {
}
}
func (c *Cache) checkDownloadLink(link string) (string, error) {
dl, err := c.client.Accounts().GetDownloadLink(link)
func (c *Cache) checkDownloadLink(link string) (types.DownloadLink, error) {
dl, err := c.client.AccountManager().GetDownloadLink(link)
if err != nil {
return "", err
return dl, err
}
if !c.downloadLinkIsInvalid(dl.DownloadLink) {
return dl.DownloadLink, nil
return dl, nil
}
return "", fmt.Errorf("download link not found for %s", link)
return types.DownloadLink{}, fmt.Errorf("download link not found for %s", link)
}
func (c *Cache) MarkDownloadLinkAsInvalid(link, downloadLink, reason string) {
c.invalidDownloadLinks.Store(downloadLink, reason)
func (c *Cache) MarkDownloadLinkAsInvalid(downloadLink types.DownloadLink, reason string) {
c.invalidDownloadLinks.Store(downloadLink.DownloadLink, reason)
// Remove the download api key from active
if reason == "bandwidth_exceeded" {
// Disable the account
_, account, err := c.client.Accounts().GetDownloadLinkWithAccount(link)
accountManager := c.client.AccountManager()
account, err := accountManager.GetAccount(downloadLink.Token)
if err != nil {
c.logger.Error().Err(err).Str("token", utils.Mask(downloadLink.Token)).Msg("Failed to get account to disable")
return
}
c.client.Accounts().Disable(account)
if account == nil {
c.logger.Error().Str("token", utils.Mask(downloadLink.Token)).Msg("Account not found to disable")
return
}
accountManager.Disable(account)
}
}
@@ -194,5 +183,10 @@ func (c *Cache) GetDownloadByteRange(torrentName, filename string) (*[2]int64, e
}
func (c *Cache) GetTotalActiveDownloadLinks() int {
return c.client.Accounts().GetLinksCount()
total := 0
allAccounts := c.client.AccountManager().Active()
for _, acc := range allAccounts {
total += acc.DownloadLinksCount()
}
return total
}

View File

@@ -3,13 +3,14 @@ package store
import (
"context"
"fmt"
"github.com/sirrobot01/decypharr/pkg/debrid/types"
"io"
"net/http"
"os"
"strings"
"sync"
"time"
"github.com/sirrobot01/decypharr/pkg/debrid/types"
)
type fileInfo struct {
@@ -120,7 +121,7 @@ func (c *Cache) refreshTorrents(ctx context.Context) {
close(workChan)
wg.Wait()
c.listingDebouncer.Call(false)
c.listingDebouncer.Call(true)
c.logger.Debug().Msgf("Processed %d new torrents", counter)
}
@@ -243,14 +244,10 @@ func (c *Cache) refreshDownloadLinks(ctx context.Context) {
}
defer c.downloadLinksRefreshMu.Unlock()
links, err := c.client.GetDownloadLinks()
if err != nil {
if err := c.client.RefreshDownloadLinks(); err != nil {
c.logger.Error().Err(err).Msg("Failed to get download links")
return
}
c.client.Accounts().SetDownloadLinks(links)
c.logger.Debug().Msgf("Refreshed download %d links", c.client.Accounts().GetLinksCount())
c.logger.Debug().Msgf("Refreshed download links")
}

View File

@@ -4,11 +4,13 @@ import (
"context"
"errors"
"fmt"
"sync"
"time"
"github.com/puzpuzpuz/xsync/v4"
"github.com/sirrobot01/decypharr/internal/config"
"github.com/sirrobot01/decypharr/internal/utils"
"github.com/sirrobot01/decypharr/pkg/debrid/types"
"sync"
"time"
)
type reInsertRequest struct {
@@ -59,6 +61,8 @@ func (c *Cache) markAsSuccessfullyReinserted(torrentId string) {
}
}
// GetBrokenFiles checks the files in the torrent for broken links.
// It also attempts to reinsert the torrent if any files are broken.
func (c *Cache) GetBrokenFiles(t *CachedTorrent, filenames []string) []string {
files := make(map[string]types.File)
repairStrategy := config.Get().Repair.Strategy
@@ -217,8 +221,7 @@ func (c *Cache) reInsertTorrent(ct *CachedTorrent) (*CachedTorrent, error) {
if _, ok := c.failedToReinsert.Load(oldID); ok {
return ct, fmt.Errorf("can't retry re-insert for %s", torrent.Id)
}
if reqI, inFlight := c.repairRequest.Load(oldID); inFlight {
req := reqI.(*reInsertRequest)
if req, inFlight := c.repairRequest.Load(oldID); inFlight {
c.logger.Debug().Msgf("Waiting for existing reinsert request to complete for torrent %s", oldID)
return req.Wait()
}
@@ -232,12 +235,13 @@ func (c *Cache) reInsertTorrent(ct *CachedTorrent) (*CachedTorrent, error) {
// Submit the magnet to the debrid service
newTorrent := &types.Torrent{
Name: torrent.Name,
Magnet: utils.ConstructMagnet(torrent.InfoHash, torrent.Name),
InfoHash: torrent.InfoHash,
Size: torrent.Size,
Files: make(map[string]types.File),
Arr: torrent.Arr,
Name: torrent.Name,
Magnet: utils.ConstructMagnet(torrent.InfoHash, torrent.Name),
InfoHash: torrent.InfoHash,
Size: torrent.Size,
Files: make(map[string]types.File),
Arr: torrent.Arr,
DownloadUncached: false,
}
var err error
newTorrent, err = c.client.SubmitMagnet(newTorrent)
@@ -260,7 +264,7 @@ func (c *Cache) reInsertTorrent(ct *CachedTorrent) (*CachedTorrent, error) {
_ = c.client.DeleteTorrent(newTorrent.Id)
}
c.markAsFailedToReinsert(oldID)
return ct, err
return ct, fmt.Errorf("failed to check torrent: %w", err)
}
// Update the torrent in the cache
@@ -293,7 +297,7 @@ func (c *Cache) reInsertTorrent(ct *CachedTorrent) (*CachedTorrent, error) {
}
}
req.Complete(ct, err)
req.Complete(ct, nil)
c.markAsSuccessfullyReinserted(oldID)
c.logger.Debug().Str("torrentId", torrent.Id).Msg("Torrent successfully reinserted")
@@ -303,9 +307,8 @@ func (c *Cache) reInsertTorrent(ct *CachedTorrent) (*CachedTorrent, error) {
func (c *Cache) resetInvalidLinks(ctx context.Context) {
c.logger.Debug().Msgf("Resetting accounts")
c.invalidDownloadLinks = sync.Map{}
c.client.Accounts().Reset() // Reset the active download keys
c.invalidDownloadLinks = xsync.NewMap[string, string]()
c.client.AccountManager().Reset() // Reset the active download keys
// Refresh the download links
c.refreshDownloadLinks(ctx)
}

View File

@@ -40,19 +40,27 @@ type directoryFilter struct {
ageThreshold time.Duration // only for last_added
}
type torrents struct {
sync.RWMutex
byID map[string]CachedTorrent
byName map[string]CachedTorrent
}
type folders struct {
sync.RWMutex
listing map[string][]os.FileInfo // folder name to file listing
}
type CachedTorrentEntry struct {
CachedTorrent
deleted bool // Tombstone flag
}
type torrentCache struct {
torrents torrents
mu sync.RWMutex
torrents []CachedTorrentEntry // Changed to store entries with tombstone
// Lookup indices
idIndex map[string]int
nameIndex map[string]int
// Compaction tracking
deletedCount atomic.Int32
compactThreshold int // Trigger compaction when deletedCount exceeds this
listing atomic.Value
folders folders
@@ -69,12 +77,11 @@ type sortableFile struct {
}
func newTorrentCache(dirFilters map[string][]directoryFilter) *torrentCache {
tc := &torrentCache{
torrents: torrents{
byID: make(map[string]CachedTorrent),
byName: make(map[string]CachedTorrent),
},
torrents: []CachedTorrentEntry{},
idIndex: make(map[string]int),
nameIndex: make(map[string]int),
compactThreshold: 100, // Compact when 100+ deleted entries
folders: folders{
listing: make(map[string][]os.FileInfo),
},
@@ -87,10 +94,12 @@ func newTorrentCache(dirFilters map[string][]directoryFilter) *torrentCache {
}
func (tc *torrentCache) reset() {
tc.torrents.Lock()
tc.torrents.byID = make(map[string]CachedTorrent)
tc.torrents.byName = make(map[string]CachedTorrent)
tc.torrents.Unlock()
tc.mu.Lock()
tc.torrents = tc.torrents[:0] // Clear the slice
tc.idIndex = make(map[string]int) // Reset the ID index
tc.nameIndex = make(map[string]int) // Reset the name index
tc.deletedCount.Store(0)
tc.mu.Unlock()
// reset the sorted listing
tc.sortNeeded.Store(false)
@@ -103,62 +112,183 @@ func (tc *torrentCache) reset() {
}
func (tc *torrentCache) getByID(id string) (CachedTorrent, bool) {
tc.torrents.RLock()
defer tc.torrents.RUnlock()
torrent, exists := tc.torrents.byID[id]
return torrent, exists
tc.mu.RLock()
defer tc.mu.RUnlock()
if index, exists := tc.idIndex[id]; exists && index < len(tc.torrents) {
entry := tc.torrents[index]
if !entry.deleted {
return entry.CachedTorrent, true
}
}
return CachedTorrent{}, false
}
func (tc *torrentCache) getByName(name string) (CachedTorrent, bool) {
tc.torrents.RLock()
defer tc.torrents.RUnlock()
torrent, exists := tc.torrents.byName[name]
return torrent, exists
tc.mu.RLock()
defer tc.mu.RUnlock()
if index, exists := tc.nameIndex[name]; exists && index < len(tc.torrents) {
entry := tc.torrents[index]
if !entry.deleted {
return entry.CachedTorrent, true
}
}
return CachedTorrent{}, false
}
func (tc *torrentCache) set(name string, torrent, newTorrent CachedTorrent) {
tc.torrents.Lock()
// Set the id first
func (tc *torrentCache) set(name string, torrent CachedTorrent) {
tc.mu.Lock()
defer tc.mu.Unlock()
tc.torrents.byName[name] = torrent
tc.torrents.byID[torrent.Id] = torrent // This is the unadulterated torrent
tc.torrents.Unlock()
// Check if this torrent already exists (update case)
if existingIndex, exists := tc.idIndex[torrent.Id]; exists && existingIndex < len(tc.torrents) {
if !tc.torrents[existingIndex].deleted {
// Update existing entry
tc.torrents[existingIndex].CachedTorrent = torrent
tc.sortNeeded.Store(true)
return
}
}
// Add new torrent
entry := CachedTorrentEntry{
CachedTorrent: torrent,
deleted: false,
}
tc.torrents = append(tc.torrents, entry)
index := len(tc.torrents) - 1
tc.idIndex[torrent.Id] = index
tc.nameIndex[name] = index
tc.sortNeeded.Store(true)
}
func (tc *torrentCache) getListing() []os.FileInfo {
// Fast path: if we have a sorted list and no changes since last sort
if !tc.sortNeeded.Load() {
return tc.listing.Load().([]os.FileInfo)
}
func (tc *torrentCache) removeId(id string) {
tc.mu.Lock()
defer tc.mu.Unlock()
// Slow path: need to sort
tc.refreshListing()
return tc.listing.Load().([]os.FileInfo)
if index, exists := tc.idIndex[id]; exists && index < len(tc.torrents) {
if !tc.torrents[index].deleted {
// Mark as deleted (tombstone)
tc.torrents[index].deleted = true
tc.deletedCount.Add(1)
// Remove from indices
delete(tc.idIndex, id)
// Find and remove from name index
for name, idx := range tc.nameIndex {
if idx == index {
delete(tc.nameIndex, name)
break
}
}
tc.sortNeeded.Store(true)
// Trigger compaction if threshold exceeded
if tc.deletedCount.Load() > int32(tc.compactThreshold) {
go tc.compact()
}
}
}
}
func (tc *torrentCache) getFolderListing(folderName string) []os.FileInfo {
tc.folders.RLock()
defer tc.folders.RUnlock()
if folderName == "" {
return tc.getListing()
func (tc *torrentCache) remove(name string) {
tc.mu.Lock()
defer tc.mu.Unlock()
if index, exists := tc.nameIndex[name]; exists && index < len(tc.torrents) {
if !tc.torrents[index].deleted {
// Mark as deleted (tombstone)
torrentID := tc.torrents[index].CachedTorrent.Id
tc.torrents[index].deleted = true
tc.deletedCount.Add(1)
// Remove from indices
delete(tc.nameIndex, name)
delete(tc.idIndex, torrentID)
tc.sortNeeded.Store(true)
// Trigger compaction if threshold exceeded
if tc.deletedCount.Load() > int32(tc.compactThreshold) {
go tc.compact()
}
}
}
if folder, ok := tc.folders.listing[folderName]; ok {
return folder
}
// Compact removes tombstoned entries and rebuilds indices
func (tc *torrentCache) compact() {
tc.mu.Lock()
defer tc.mu.Unlock()
deletedCount := tc.deletedCount.Load()
if deletedCount == 0 {
return // Nothing to compact
}
// If folder not found, return empty slice
return []os.FileInfo{}
// Create new slice with only non-deleted entries
newTorrents := make([]CachedTorrentEntry, 0, len(tc.torrents)-int(deletedCount))
newIdIndex := make(map[string]int, len(tc.idIndex))
newNameIndex := make(map[string]int, len(tc.nameIndex))
// Copy non-deleted entries
for oldIndex, entry := range tc.torrents {
if !entry.deleted {
newIndex := len(newTorrents)
newTorrents = append(newTorrents, entry)
// Find the name for this torrent (reverse lookup)
for name, nameIndex := range tc.nameIndex {
if nameIndex == oldIndex {
newNameIndex[name] = newIndex
break
}
}
newIdIndex[entry.CachedTorrent.Id] = newIndex
}
}
// Replace old data with compacted data
tc.torrents = newTorrents
tc.idIndex = newIdIndex
tc.nameIndex = newNameIndex
tc.deletedCount.Store(0)
tc.sortNeeded.Store(true)
}
func (tc *torrentCache) ForceCompact() {
tc.compact()
}
func (tc *torrentCache) GetStats() (total, active, deleted int) {
tc.mu.RLock()
defer tc.mu.RUnlock()
total = len(tc.torrents)
deleted = int(tc.deletedCount.Load())
active = total - deleted
return total, active, deleted
}
func (tc *torrentCache) refreshListing() {
tc.torrents.RLock()
all := make([]sortableFile, 0, len(tc.torrents.byName))
for name, t := range tc.torrents.byName {
all = append(all, sortableFile{t.Id, name, t.AddedOn, t.Bytes, t.Bad})
tc.mu.RLock()
all := make([]sortableFile, 0, len(tc.nameIndex))
for name, index := range tc.nameIndex {
if index < len(tc.torrents) && !tc.torrents[index].deleted {
t := tc.torrents[index].CachedTorrent
all = append(all, sortableFile{t.Id, name, t.AddedOn, t.Bytes, t.Bad})
}
}
tc.sortNeeded.Store(false)
tc.torrents.RUnlock()
tc.mu.RUnlock()
sort.Slice(all, func(i, j int) bool {
if all[i].name != all[j].name {
@@ -234,8 +364,31 @@ func (tc *torrentCache) refreshListing() {
wg.Wait()
}
func (tc *torrentCache) torrentMatchDirectory(filters []directoryFilter, file sortableFile, now time.Time) bool {
func (tc *torrentCache) getListing() []os.FileInfo {
// Fast path: if we have a sorted list and no changes since last sort
if !tc.sortNeeded.Load() {
return tc.listing.Load().([]os.FileInfo)
}
// Slow path: need to sort
tc.refreshListing()
return tc.listing.Load().([]os.FileInfo)
}
func (tc *torrentCache) getFolderListing(folderName string) []os.FileInfo {
tc.folders.RLock()
defer tc.folders.RUnlock()
if folderName == "" {
return tc.getListing()
}
if folder, ok := tc.folders.listing[folderName]; ok {
return folder
}
// If folder not found, return empty slice
return []os.FileInfo{}
}
func (tc *torrentCache) torrentMatchDirectory(filters []directoryFilter, file sortableFile, now time.Time) bool {
torrentName := strings.ToLower(file.name)
for _, filter := range filters {
matched := false
@@ -278,51 +431,46 @@ func (tc *torrentCache) torrentMatchDirectory(filters []directoryFilter, file so
}
func (tc *torrentCache) getAll() map[string]CachedTorrent {
tc.torrents.RLock()
defer tc.torrents.RUnlock()
result := make(map[string]CachedTorrent, len(tc.torrents.byID))
for name, torrent := range tc.torrents.byID {
result[name] = torrent
tc.mu.RLock()
defer tc.mu.RUnlock()
result := make(map[string]CachedTorrent)
for _, entry := range tc.torrents {
if !entry.deleted {
result[entry.CachedTorrent.Id] = entry.CachedTorrent
}
}
return result
}
func (tc *torrentCache) getAllCount() int {
tc.torrents.RLock()
defer tc.torrents.RUnlock()
return len(tc.torrents.byID)
tc.mu.RLock()
defer tc.mu.RUnlock()
return len(tc.torrents) - int(tc.deletedCount.Load())
}
func (tc *torrentCache) getAllByName() map[string]CachedTorrent {
tc.torrents.RLock()
defer tc.torrents.RUnlock()
results := make(map[string]CachedTorrent, len(tc.torrents.byName))
for name, torrent := range tc.torrents.byName {
results[name] = torrent
tc.mu.RLock()
defer tc.mu.RUnlock()
results := make(map[string]CachedTorrent, len(tc.nameIndex))
for name, index := range tc.nameIndex {
if index < len(tc.torrents) && !tc.torrents[index].deleted {
results[name] = tc.torrents[index].CachedTorrent
}
}
return results
}
func (tc *torrentCache) getIdMaps() map[string]struct{} {
tc.torrents.RLock()
defer tc.torrents.RUnlock()
res := make(map[string]struct{}, len(tc.torrents.byID))
for id := range tc.torrents.byID {
res[id] = struct{}{}
tc.mu.RLock()
defer tc.mu.RUnlock()
res := make(map[string]struct{}, len(tc.idIndex))
for id, index := range tc.idIndex {
if index < len(tc.torrents) && !tc.torrents[index].deleted {
res[id] = struct{}{}
}
}
return res
}
func (tc *torrentCache) removeId(id string) {
tc.torrents.Lock()
defer tc.torrents.Unlock()
delete(tc.torrents.byID, id)
tc.sortNeeded.Store(true)
}
func (tc *torrentCache) remove(name string) {
tc.torrents.Lock()
defer tc.torrents.Unlock()
delete(tc.torrents.byName, name)
tc.sortNeeded.Store(true)
}

View File

@@ -6,11 +6,11 @@ import (
"github.com/sirrobot01/decypharr/internal/utils"
)
func (c *Cache) StartSchedule(ctx context.Context) error {
func (c *Cache) StartWorker(ctx context.Context) error {
// For now, we just want to refresh the listing and download links
// Stop any existing jobs before starting new ones
c.scheduler.RemoveByTags("decypharr")
c.scheduler.RemoveByTags("decypharr-%s", c.GetConfig().Name)
// Schedule download link refresh job
if jd, err := utils.ConvertToJobDef(c.downloadLinksRefreshInterval); err != nil {
@@ -19,7 +19,7 @@ func (c *Cache) StartSchedule(ctx context.Context) error {
// Schedule the job
if _, err := c.scheduler.NewJob(jd, gocron.NewTask(func() {
c.refreshDownloadLinks(ctx)
}), gocron.WithContext(ctx), gocron.WithTags("decypharr")); err != nil {
}), gocron.WithContext(ctx)); err != nil {
c.logger.Error().Err(err).Msg("Failed to create download link refresh job")
} else {
c.logger.Debug().Msgf("Download link refresh job scheduled for every %s", c.downloadLinksRefreshInterval)
@@ -33,7 +33,7 @@ func (c *Cache) StartSchedule(ctx context.Context) error {
// Schedule the job
if _, err := c.scheduler.NewJob(jd, gocron.NewTask(func() {
c.refreshTorrents(ctx)
}), gocron.WithContext(ctx), gocron.WithTags("decypharr")); err != nil {
}), gocron.WithContext(ctx)); err != nil {
c.logger.Error().Err(err).Msg("Failed to create torrent refresh job")
} else {
c.logger.Debug().Msgf("Torrent refresh job scheduled for every %s", c.torrentRefreshInterval)
@@ -49,7 +49,7 @@ func (c *Cache) StartSchedule(ctx context.Context) error {
// Schedule the job
if _, err := c.cetScheduler.NewJob(jd, gocron.NewTask(func() {
c.resetInvalidLinks(ctx)
}), gocron.WithContext(ctx), gocron.WithTags("decypharr")); err != nil {
}), gocron.WithContext(ctx)); err != nil {
c.logger.Error().Err(err).Msg("Failed to create link reset job")
} else {
c.logger.Debug().Msgf("Link reset job scheduled for every midnight, CET")

View File

@@ -1,243 +0,0 @@
package types
import (
"github.com/sirrobot01/decypharr/internal/config"
"sync"
"time"
)
type Accounts struct {
current *Account
accounts []*Account
mu sync.RWMutex
}
func NewAccounts(debridConf config.Debrid) *Accounts {
accounts := make([]*Account, 0)
for idx, token := range debridConf.DownloadAPIKeys {
if token == "" {
continue
}
account := newAccount(debridConf.Name, token, idx)
accounts = append(accounts, account)
}
var current *Account
if len(accounts) > 0 {
current = accounts[0]
}
return &Accounts{
accounts: accounts,
current: current,
}
}
type Account struct {
Debrid string // e.g., "realdebrid", "torbox", etc.
Order int
Disabled bool
Token string
links map[string]*DownloadLink
mu sync.RWMutex
}
func (a *Accounts) All() []*Account {
a.mu.RLock()
defer a.mu.RUnlock()
activeAccounts := make([]*Account, 0)
for _, acc := range a.accounts {
if !acc.Disabled {
activeAccounts = append(activeAccounts, acc)
}
}
return activeAccounts
}
func (a *Accounts) Current() *Account {
a.mu.RLock()
if a.current != nil {
current := a.current
a.mu.RUnlock()
return current
}
a.mu.RUnlock()
a.mu.Lock()
defer a.mu.Unlock()
// Double-check after acquiring write lock
if a.current != nil {
return a.current
}
activeAccounts := make([]*Account, 0)
for _, acc := range a.accounts {
if !acc.Disabled {
activeAccounts = append(activeAccounts, acc)
}
}
if len(activeAccounts) > 0 {
a.current = activeAccounts[0]
}
return a.current
}
func (a *Accounts) Disable(account *Account) {
a.mu.Lock()
defer a.mu.Unlock()
account.disable()
if a.current == account {
var newCurrent *Account
for _, acc := range a.accounts {
if !acc.Disabled {
newCurrent = acc
break
}
}
a.current = newCurrent
}
}
func (a *Accounts) Reset() {
a.mu.Lock()
defer a.mu.Unlock()
for _, acc := range a.accounts {
acc.resetDownloadLinks()
acc.Disabled = false
}
if len(a.accounts) > 0 {
a.current = a.accounts[0]
} else {
a.current = nil
}
}
func (a *Accounts) GetDownloadLink(fileLink string) (*DownloadLink, error) {
if a.Current() == nil {
return nil, NoActiveAccountsError
}
dl, ok := a.Current().getLink(fileLink)
if !ok {
return nil, NoDownloadLinkError
}
if dl.ExpiresAt.IsZero() || dl.ExpiresAt.Before(time.Now()) {
return nil, DownloadLinkExpiredError
}
if dl.DownloadLink == "" {
return nil, EmptyDownloadLinkError
}
return dl, nil
}
func (a *Accounts) GetDownloadLinkWithAccount(fileLink string) (*DownloadLink, *Account, error) {
currentAccount := a.Current()
if currentAccount == nil {
return nil, nil, NoActiveAccountsError
}
dl, ok := currentAccount.getLink(fileLink)
if !ok {
return nil, nil, NoDownloadLinkError
}
if dl.ExpiresAt.IsZero() || dl.ExpiresAt.Before(time.Now()) {
return nil, currentAccount, DownloadLinkExpiredError
}
if dl.DownloadLink == "" {
return nil, currentAccount, EmptyDownloadLinkError
}
return dl, currentAccount, nil
}
func (a *Accounts) SetDownloadLink(fileLink string, dl *DownloadLink) {
if a.Current() == nil {
return
}
a.Current().setLink(fileLink, dl)
}
func (a *Accounts) DeleteDownloadLink(fileLink string) {
if a.Current() == nil {
return
}
a.Current().deleteLink(fileLink)
}
func (a *Accounts) GetLinksCount() int {
if a.Current() == nil {
return 0
}
return a.Current().LinksCount()
}
func (a *Accounts) SetDownloadLinks(links map[string]*DownloadLink) {
if a.Current() == nil {
return
}
a.Current().setLinks(links)
}
func newAccount(debridName, token string, index int) *Account {
return &Account{
Debrid: debridName,
Token: token,
Order: index,
links: make(map[string]*DownloadLink),
}
}
func (a *Account) getLink(fileLink string) (*DownloadLink, bool) {
a.mu.RLock()
defer a.mu.RUnlock()
dl, ok := a.links[a.sliceFileLink(fileLink)]
return dl, ok
}
func (a *Account) setLink(fileLink string, dl *DownloadLink) {
a.mu.Lock()
defer a.mu.Unlock()
a.links[a.sliceFileLink(fileLink)] = dl
}
func (a *Account) deleteLink(fileLink string) {
a.mu.Lock()
defer a.mu.Unlock()
delete(a.links, a.sliceFileLink(fileLink))
}
func (a *Account) resetDownloadLinks() {
a.mu.Lock()
defer a.mu.Unlock()
a.links = make(map[string]*DownloadLink)
}
func (a *Account) LinksCount() int {
a.mu.RLock()
defer a.mu.RUnlock()
return len(a.links)
}
func (a *Account) disable() {
a.Disabled = true
}
func (a *Account) setLinks(links map[string]*DownloadLink) {
a.mu.Lock()
defer a.mu.Unlock()
now := time.Now()
for _, dl := range links {
if !dl.ExpiresAt.IsZero() && dl.ExpiresAt.Before(now) {
// Expired, continue
continue
}
a.links[a.sliceFileLink(dl.Link)] = dl
}
}
// slice download link
func (a *Account) sliceFileLink(fileLink string) string {
if a.Debrid != "realdebrid" {
return fileLink
}
if len(fileLink) < 39 {
return fileLink
}
return fileLink[0:39]
}

View File

@@ -1,28 +0,0 @@
package types
import (
"github.com/rs/zerolog"
)
type Client interface {
SubmitMagnet(tr *Torrent) (*Torrent, error)
CheckStatus(tr *Torrent) (*Torrent, error)
GetFileDownloadLinks(tr *Torrent) error
GetDownloadLink(tr *Torrent, file *File) (*DownloadLink, error)
DeleteTorrent(torrentId string) error
IsAvailable(infohashes []string) map[string]bool
GetDownloadUncached() bool
UpdateTorrent(torrent *Torrent) error
GetTorrent(torrentId string) (*Torrent, error)
GetTorrents() ([]*Torrent, error)
Name() string
Logger() zerolog.Logger
GetDownloadingStatus() []string
GetDownloadLinks() (map[string]*DownloadLink, error)
CheckLink(link string) error
GetMountPath() string
Accounts() *Accounts // Returns the active download account/token
DeleteDownloadLink(linkId string) error
GetProfile() (*Profile, error)
GetAvailableSlots() (int, error)
}

View File

@@ -14,7 +14,7 @@ var NoActiveAccountsError = &Error{
Code: "no_active_accounts",
}
var NoDownloadLinkError = &Error{
var ErrDownloadLinkNotFound = &Error{
Message: "No download link found",
Code: "no_download_link",
}

View File

@@ -2,6 +2,7 @@ package types
import (
"fmt"
"net/url"
"os"
"path/filepath"
"sync"
@@ -42,6 +43,38 @@ type Torrent struct {
sync.Mutex
}
func (t *Torrent) Copy() *Torrent {
t.Lock()
defer t.Unlock()
newFiles := make(map[string]File, len(t.Files))
for k, v := range t.Files {
newFiles[k] = v
}
return &Torrent{
Id: t.Id,
InfoHash: t.InfoHash,
Name: t.Name,
Folder: t.Folder,
Filename: t.Filename,
OriginalFilename: t.OriginalFilename,
Size: t.Size,
Bytes: t.Bytes,
Magnet: t.Magnet,
Files: newFiles,
Status: t.Status,
Added: t.Added,
Progress: t.Progress,
Speed: t.Speed,
Seeders: t.Seeders,
Links: append([]string{}, t.Links...),
MountPath: t.MountPath,
Debrid: t.Debrid,
Arr: t.Arr,
}
}
func (t *Torrent) GetSymlinkFolder(parent string) string {
return filepath.Join(parent, t.Arr.Name, t.Folder)
}
@@ -84,18 +117,18 @@ func (t *Torrent) GetFiles() []File {
}
type File struct {
TorrentId string `json:"torrent_id"`
Id string `json:"id"`
Name string `json:"name"`
Size int64 `json:"size"`
IsRar bool `json:"is_rar"`
ByteRange *[2]int64 `json:"byte_range,omitempty"`
Path string `json:"path"`
Link string `json:"link"`
AccountId string `json:"account_id"`
Generated time.Time `json:"generated"`
Deleted bool `json:"deleted"`
DownloadLink *DownloadLink `json:"-"`
TorrentId string `json:"torrent_id"`
Id string `json:"id"`
Name string `json:"name"`
Size int64 `json:"size"`
IsRar bool `json:"is_rar"`
ByteRange *[2]int64 `json:"byte_range,omitempty"`
Path string `json:"path"`
Link string `json:"link"`
AccountId string `json:"account_id"`
Generated time.Time `json:"generated"`
Deleted bool `json:"deleted"`
DownloadLink DownloadLink `json:"-"`
}
func (t *Torrent) Cleanup(remove bool) {
@@ -114,22 +147,32 @@ type IngestData struct {
Size int64 `json:"size"`
}
type LibraryStats struct {
Total int `json:"total"`
Bad int `json:"bad"`
ActiveLinks int `json:"active_links"`
}
type Stats struct {
Profile *Profile `json:"profile"`
Library LibraryStats `json:"library"`
Accounts []map[string]any `json:"accounts"`
}
type Profile struct {
Name string `json:"name"`
Id int64 `json:"id"`
Username string `json:"username"`
Email string `json:"email"`
Points int64 `json:"points"`
Points int `json:"points"`
Type string `json:"type"`
Premium int `json:"premium"`
Premium int64 `json:"premium"`
Expiration time.Time `json:"expiration"`
LibrarySize int `json:"library_size"`
BadTorrents int `json:"bad_torrents"`
ActiveLinks int `json:"active_links"`
}
type DownloadLink struct {
Debrid string `json:"debrid"`
Token string `json:"token"`
Filename string `json:"filename"`
Link string `json:"link"`
DownloadLink string `json:"download_link"`
@@ -139,6 +182,27 @@ type DownloadLink struct {
ExpiresAt time.Time
}
func (d *DownloadLink) String() string {
return d.DownloadLink
func isValidURL(str string) bool {
u, err := url.Parse(str)
// A valid URL should parse without error, and have a non-empty scheme and host.
return err == nil && u.Scheme != "" && u.Host != ""
}
func (dl *DownloadLink) Valid() error {
if dl.Empty() {
return EmptyDownloadLinkError
}
// Check if the link is actually a valid URL
if !isValidURL(dl.DownloadLink) {
return ErrDownloadLinkNotFound
}
return nil
}
func (dl *DownloadLink) Empty() bool {
return dl.DownloadLink == ""
}
func (dl *DownloadLink) String() string {
return dl.DownloadLink
}

View File

@@ -2,16 +2,18 @@ package qbit
import (
"context"
"crypto/sha256"
"encoding/base64"
"fmt"
"github.com/go-chi/chi/v5"
"github.com/sirrobot01/decypharr/internal/config"
"github.com/sirrobot01/decypharr/pkg/arr"
"github.com/sirrobot01/decypharr/pkg/store"
"golang.org/x/crypto/bcrypt"
"net/http"
"net/url"
"strings"
"github.com/go-chi/chi/v5"
"github.com/sirrobot01/decypharr/internal/config"
"github.com/sirrobot01/decypharr/pkg/arr"
"github.com/sirrobot01/decypharr/pkg/wire"
"golang.org/x/crypto/bcrypt"
)
type contextKey string
@@ -97,14 +99,22 @@ func decodeAuthHeader(header string) (string, string, error) {
bearer := string(bytes)
colonIndex := strings.LastIndex(bearer, ":")
host := bearer[:colonIndex]
token := bearer[colonIndex+1:]
username := bearer[:colonIndex]
password := bearer[colonIndex+1:]
return host, token, nil
if username == "" || password == "" {
return username, password, fmt.Errorf("empty username or password")
}
return strings.TrimSpace(username), strings.TrimSpace(password), nil
}
func (q *QBit) categoryContext(next http.Handler) http.Handler {
// Print full URL for debugging
// Try to get category from URL query first
return http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) {
// Print request method and URL
category := strings.Trim(r.URL.Query().Get("category"), "")
if category == "" {
// Get from form
@@ -127,49 +137,114 @@ func (q *QBit) categoryContext(next http.Handler) http.Handler {
// Only a valid host and token will be added to the context/config. The rest are manual
func (q *QBit) authContext(next http.Handler) http.Handler {
return http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) {
cfg := config.Get()
host, token, err := decodeAuthHeader(r.Header.Get("Authorization"))
category := getCategory(r.Context())
arrs := store.Get().Arr()
// Check if arr exists
a := arrs.Get(category)
if a == nil {
// Arr is not configured, create a new one
downloadUncached := false
a = arr.New(category, "", "", false, false, &downloadUncached, "", "auto")
}
if err == nil {
host = strings.TrimSpace(host)
if host != "" {
a.Host = host
}
token = strings.TrimSpace(token)
if token != "" {
a.Token = token
}
}
if cfg.NeedsAuth() {
if a.Host == "" || a.Token == "" {
http.Error(w, "Unauthorized: Host and token are required for authentication", http.StatusUnauthorized)
return
}
// try to use either Arr validate, or user auth validation
if err := a.Validate(); err != nil {
// If this failed, try to use user auth validation
if !verifyAuth(host, token) {
http.Error(w, "Unauthorized: Invalid host or token", http.StatusUnauthorized)
return
}
}
}
a.Source = "auto"
arrs.AddOrUpdate(a)
username, password, err := getUsernameAndPassword(r)
if err != nil {
http.Error(w, err.Error(), http.StatusUnauthorized)
return
}
category := getCategory(r.Context())
a, err := q.authenticate(category, username, password)
if err != nil {
http.Error(w, err.Error(), http.StatusUnauthorized)
return
}
ctx := context.WithValue(r.Context(), arrKey, a)
next.ServeHTTP(w, r.WithContext(ctx))
})
}
func getUsernameAndPassword(r *http.Request) (string, string, error) {
// Try to get from authorization header
username, password, err := decodeAuthHeader(r.Header.Get("Authorization"))
if err == nil && username != "" {
return username, password, err
}
// Try to get from cookie
sid, err := r.Cookie("sid")
if err != nil {
// try SID
sid, err = r.Cookie("SID")
}
if err == nil {
username, password, err = extractFromSID(sid.Value)
if err != nil {
return "", "", err
}
}
return username, password, nil
}
func (q *QBit) authenticate(category, username, password string) (*arr.Arr, error) {
cfg := config.Get()
arrs := wire.Get().Arr()
// Check if arr exists
a := arrs.Get(category)
if a == nil {
// Arr is not configured, create a new one
downloadUncached := false
a = arr.New(category, "", "", false, false, &downloadUncached, "", "auto")
}
a.Host = username
a.Token = password
if cfg.UseAuth {
if a.Host == "" || a.Token == "" {
return nil, fmt.Errorf("unauthorized: Host and token are required for authentication(you've enabled authentication)")
}
// try to use either Arr validate, or user auth validation
if err := a.Validate(); err != nil {
// If this failed, try to use user auth validation
if !verifyAuth(username, password) {
return nil, fmt.Errorf("unauthorized: invalid credentials")
}
}
}
a.Source = "auto"
arrs.AddOrUpdate(a)
return a, nil
}
func createSID(username, password string) string {
// Create a verification hash
cfg := config.Get()
combined := fmt.Sprintf("%s|%s", username, password)
hash := sha256.Sum256([]byte(combined + cfg.SecretKey()))
hashStr := fmt.Sprintf("%x", hash)[:16] // First 16 chars
// Base64 encode
return base64.URLEncoding.EncodeToString([]byte(fmt.Sprintf("%s|%s", combined, hashStr)))
}
func extractFromSID(sid string) (string, string, error) {
// Decode base64
decoded, err := base64.URLEncoding.DecodeString(sid)
if err != nil {
return "", "", fmt.Errorf("invalid SID format")
}
// Split into parts: username:password:hash
parts := strings.Split(string(decoded), "|")
if len(parts) != 3 {
return "", "", fmt.Errorf("invalid SID structure")
}
username := parts[0]
password := parts[1]
providedHash := parts[2]
// Verify hash
cfg := config.Get()
combined := fmt.Sprintf("%s|%s", username, password)
expectedHash := sha256.Sum256([]byte(combined + cfg.SecretKey()))
expectedHashStr := fmt.Sprintf("%x", expectedHash)[:16]
if providedHash != expectedHashStr {
return "", "", fmt.Errorf("invalid SID signature")
}
return username, password, nil
}
func hashesContext(next http.Handler) http.Handler {
return http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) {
_hashes := chi.URLParam(r, "hashes")

View File

@@ -1,25 +1,33 @@
package qbit
import (
"github.com/sirrobot01/decypharr/internal/request"
"github.com/sirrobot01/decypharr/pkg/arr"
"net/http"
"path/filepath"
"strings"
"github.com/sirrobot01/decypharr/internal/config"
"github.com/sirrobot01/decypharr/internal/request"
"github.com/sirrobot01/decypharr/pkg/arr"
)
func (q *QBit) handleLogin(w http.ResponseWriter, r *http.Request) {
ctx := r.Context()
_arr := getArrFromContext(ctx)
if _arr == nil {
// Arr not in context, return OK
_, _ = w.Write([]byte("Ok."))
cfg := config.Get()
username := r.FormValue("username")
password := r.FormValue("password")
a, err := q.authenticate(getCategory(ctx), username, password)
if err != nil {
http.Error(w, err.Error(), http.StatusUnauthorized)
return
}
if err := _arr.Validate(); err != nil {
q.logger.Error().Err(err).Msgf("Error validating arr")
http.Error(w, "Invalid arr configuration", http.StatusBadRequest)
return
if cfg.UseAuth {
cookie := &http.Cookie{
Name: "sid",
Value: createSID(a.Host, a.Token),
Path: "/",
SameSite: http.SameSiteNoneMode,
}
http.SetCookie(w, cookie)
}
_, _ = w.Write([]byte("Ok."))
}

View File

@@ -4,7 +4,7 @@ import (
"github.com/rs/zerolog"
"github.com/sirrobot01/decypharr/internal/config"
"github.com/sirrobot01/decypharr/internal/logger"
"github.com/sirrobot01/decypharr/pkg/store"
"github.com/sirrobot01/decypharr/pkg/wire"
)
type QBit struct {
@@ -12,7 +12,7 @@ type QBit struct {
Password string
DownloadFolder string
Categories []string
storage *store.TorrentStorage
storage *wire.TorrentStorage
logger zerolog.Logger
Tags []string
}
@@ -25,7 +25,7 @@ func New() *QBit {
Password: cfg.Password,
DownloadFolder: cfg.DownloadFolder,
Categories: cfg.Categories,
storage: store.Get().Torrents(),
storage: wire.Get().Torrents(),
logger: logger.New("qbit"),
}
}

View File

@@ -1,33 +1,50 @@
package qbit
import (
"github.com/go-chi/chi/v5"
"net/http"
"github.com/go-chi/chi/v5"
)
func (q *QBit) Routes() http.Handler {
r := chi.NewRouter()
r.Use(q.categoryContext)
r.Group(func(r chi.Router) {
r.Use(q.authContext)
r.Post("/auth/login", q.handleLogin)
r.Route("/torrents", func(r chi.Router) {
r.Use(q.authContext)
r.Use(hashesContext)
r.Get("/info", q.handleTorrentsInfo)
r.Post("/info", q.handleTorrentsInfo)
r.Post("/add", q.handleTorrentsAdd)
r.Post("/delete", q.handleTorrentsDelete)
r.Get("/categories", q.handleCategories)
r.Post("/categories", q.handleCategories)
r.Post("/createCategory", q.handleCreateCategory)
r.Post("/setCategory", q.handleSetCategory)
r.Post("/addTags", q.handleAddTorrentTags)
r.Post("/removeTags", q.handleRemoveTorrentTags)
r.Post("/createTags", q.handleCreateTags)
r.Get("/tags", q.handleGetTags)
r.Get("/pause", q.handleTorrentsPause)
r.Get("/resume", q.handleTorrentsResume)
r.Get("/recheck", q.handleTorrentRecheck)
r.Get("/properties", q.handleTorrentProperties)
r.Get("/files", q.handleTorrentFiles)
// Create POST equivalents for pause, resume, recheck
r.Post("/tags", q.handleGetTags)
r.Post("/pause", q.handleTorrentsPause)
r.Post("/resume", q.handleTorrentsResume)
r.Post("/recheck", q.handleTorrentRecheck)
r.Post("/properties", q.handleTorrentProperties)
r.Post("/files", q.handleTorrentFiles)
})
r.Route("/app", func(r chi.Router) {

View File

@@ -5,7 +5,7 @@ import (
"fmt"
"github.com/sirrobot01/decypharr/internal/utils"
"github.com/sirrobot01/decypharr/pkg/arr"
"github.com/sirrobot01/decypharr/pkg/store"
"github.com/sirrobot01/decypharr/pkg/wire"
"io"
"mime/multipart"
"strings"
@@ -18,9 +18,9 @@ func (q *QBit) addMagnet(ctx context.Context, url string, arr *arr.Arr, debrid s
if err != nil {
return fmt.Errorf("error parsing magnet link: %w", err)
}
_store := store.Get()
_store := wire.Get()
importReq := store.NewImportRequest(debrid, q.DownloadFolder, magnet, arr, action, false, "", store.ImportTypeQBitTorrent)
importReq := wire.NewImportRequest(debrid, q.DownloadFolder, magnet, arr, action, false, "", wire.ImportTypeQBitTorrent)
err = _store.AddTorrent(ctx, importReq)
if err != nil {
@@ -37,8 +37,8 @@ func (q *QBit) addTorrent(ctx context.Context, fileHeader *multipart.FileHeader,
if err != nil {
return fmt.Errorf("error reading file: %s \n %w", fileHeader.Filename, err)
}
_store := store.Get()
importReq := store.NewImportRequest(debrid, q.DownloadFolder, magnet, arr, action, false, "", store.ImportTypeQBitTorrent)
_store := wire.Get()
importReq := wire.NewImportRequest(debrid, q.DownloadFolder, magnet, arr, action, false, "", wire.ImportTypeQBitTorrent)
err = _store.AddTorrent(ctx, importReq)
if err != nil {
return fmt.Errorf("failed to process torrent: %w", err)
@@ -46,19 +46,19 @@ func (q *QBit) addTorrent(ctx context.Context, fileHeader *multipart.FileHeader,
return nil
}
func (q *QBit) ResumeTorrent(t *store.Torrent) bool {
func (q *QBit) ResumeTorrent(t *wire.Torrent) bool {
return true
}
func (q *QBit) PauseTorrent(t *store.Torrent) bool {
func (q *QBit) PauseTorrent(t *wire.Torrent) bool {
return true
}
func (q *QBit) RefreshTorrent(t *store.Torrent) bool {
func (q *QBit) RefreshTorrent(t *wire.Torrent) bool {
return true
}
func (q *QBit) GetTorrentProperties(t *store.Torrent) *TorrentProperties {
func (q *QBit) GetTorrentProperties(t *wire.Torrent) *TorrentProperties {
return &TorrentProperties{
AdditionDate: t.AddedOn,
Comment: "Debrid Blackhole <https://github.com/sirrobot01/decypharr>",
@@ -83,7 +83,7 @@ func (q *QBit) GetTorrentProperties(t *store.Torrent) *TorrentProperties {
}
}
func (q *QBit) setTorrentTags(t *store.Torrent, tags []string) bool {
func (q *QBit) setTorrentTags(t *wire.Torrent, tags []string) bool {
torrentTags := strings.Split(t.Tags, ",")
for _, tag := range tags {
if tag == "" {
@@ -101,7 +101,7 @@ func (q *QBit) setTorrentTags(t *store.Torrent, tags []string) bool {
return true
}
func (q *QBit) removeTorrentTags(t *store.Torrent, tags []string) bool {
func (q *QBit) removeTorrentTags(t *wire.Torrent, tags []string) bool {
torrentTags := strings.Split(t.Tags, ",")
newTorrentTags := utils.RemoveItem(torrentTags, tags...)
q.Tags = utils.RemoveItem(q.Tags, tags...)

View File

@@ -4,19 +4,19 @@ import (
"fmt"
"os"
"os/exec"
"path/filepath"
"strconv"
"time"
"github.com/sirrobot01/decypharr/internal/config"
)
// Mount creates a mount using the rclone RC API with retry logic
func (m *Manager) Mount(provider, webdavURL string) error {
return m.mountWithRetry(provider, webdavURL, 3)
func (m *Manager) Mount(mountPath, provider, webdavURL string) error {
return m.mountWithRetry(mountPath, provider, webdavURL, 3)
}
// mountWithRetry attempts to mount with retry logic
func (m *Manager) mountWithRetry(provider, webdavURL string, maxRetries int) error {
func (m *Manager) mountWithRetry(mountPath, provider, webdavURL string, maxRetries int) error {
if !m.IsReady() {
if err := m.WaitForReady(30 * time.Second); err != nil {
return fmt.Errorf("rclone RC server not ready: %w", err)
@@ -34,7 +34,7 @@ func (m *Manager) mountWithRetry(provider, webdavURL string, maxRetries int) err
time.Sleep(wait)
}
if err := m.performMount(provider, webdavURL); err != nil {
if err := m.performMount(mountPath, provider, webdavURL); err != nil {
m.logger.Error().
Err(err).
Str("provider", provider).
@@ -49,13 +49,8 @@ func (m *Manager) mountWithRetry(provider, webdavURL string, maxRetries int) err
}
// performMount performs a single mount attempt
func (m *Manager) performMount(provider, webdavURL string) error {
func (m *Manager) performMount(mountPath, provider, webdavURL string) error {
cfg := config.Get()
mountPath := filepath.Join(cfg.Rclone.MountPath, provider)
cacheDir := ""
if cfg.Rclone.CacheDir != "" {
cacheDir = filepath.Join(cfg.Rclone.CacheDir, provider)
}
// Create mount directory
if err := os.MkdirAll(mountPath, 0755); err != nil {
@@ -74,27 +69,40 @@ func (m *Manager) performMount(provider, webdavURL string) error {
// Clean up any stale mount first
if exists && !existingMount.Mounted {
m.forceUnmountPath(mountPath)
err := m.forceUnmountPath(mountPath)
if err != nil {
return err
}
}
// Create rclone config for this provider
configName := fmt.Sprintf("decypharr-%s", provider)
if err := m.createConfig(configName, webdavURL); err != nil {
if err := m.createConfig(provider, webdavURL); err != nil {
return fmt.Errorf("failed to create rclone config: %w", err)
}
// Prepare mount arguments
mountArgs := map[string]interface{}{
"fs": fmt.Sprintf("%s:", configName),
"fs": fmt.Sprintf("%s:", provider),
"mountPoint": mountPath,
"mountType": "mount", // Use standard FUSE mount
"mountOpt": map[string]interface{}{
"AllowNonEmpty": true,
"AllowOther": true,
"DebugFUSE": false,
"DeviceName": fmt.Sprintf("decypharr-%s", provider),
"VolumeName": fmt.Sprintf("decypharr-%s", provider),
},
}
mountOpt := map[string]interface{}{
"AllowNonEmpty": true,
"AllowOther": true,
"DebugFUSE": false,
"DeviceName": fmt.Sprintf("decypharr-%s", provider),
"VolumeName": fmt.Sprintf("decypharr-%s", provider),
}
if cfg.Rclone.AsyncRead != nil {
mountOpt["AsyncRead"] = *cfg.Rclone.AsyncRead
}
if cfg.Rclone.UseMmap {
mountOpt["UseMmap"] = cfg.Rclone.UseMmap
}
if cfg.Rclone.Transfers != 0 {
mountOpt["Transfers"] = cfg.Rclone.Transfers
}
configOpts := make(map[string]interface{})
@@ -103,29 +111,29 @@ func (m *Manager) performMount(provider, webdavURL string) error {
configOpts["BufferSize"] = cfg.Rclone.BufferSize
}
if cacheDir != "" {
// Create cache directory if specified
if err := os.MkdirAll(cacheDir, 0755); err != nil {
m.logger.Warn().Str("cacheDir", cacheDir).Msg("Failed to create cache directory")
} else {
configOpts["CacheDir"] = cacheDir
}
}
if len(configOpts) > 0 {
// Only add _config if there are options to set
mountArgs["_config"] = configOpts
}
vfsOpt := map[string]interface{}{
"CacheMode": cfg.Rclone.VfsCacheMode,
"DirCacheTime": cfg.Rclone.DirCacheTime,
}
vfsOpt["PollInterval"] = 0 // Poll interval not supported for webdav, set to 0
// Add VFS options if caching is enabled
if cfg.Rclone.VfsCacheMode != "off" {
vfsOpt := map[string]interface{}{
"CacheMode": cfg.Rclone.VfsCacheMode,
}
if cfg.Rclone.VfsCacheMaxAge != "" {
vfsOpt["CacheMaxAge"] = cfg.Rclone.VfsCacheMaxAge
}
if cfg.Rclone.VfsDiskSpaceTotal != "" {
vfsOpt["DiskSpaceTotalSize"] = cfg.Rclone.VfsDiskSpaceTotal
}
if cfg.Rclone.VfsReadChunkSizeLimit != "" {
vfsOpt["ChunkSizeLimit"] = cfg.Rclone.VfsReadChunkSizeLimit
}
if cfg.Rclone.VfsCacheMaxSize != "" {
vfsOpt["CacheMaxSize"] = cfg.Rclone.VfsCacheMaxSize
}
@@ -138,28 +146,50 @@ func (m *Manager) performMount(provider, webdavURL string) error {
if cfg.Rclone.VfsReadAhead != "" {
vfsOpt["ReadAhead"] = cfg.Rclone.VfsReadAhead
}
if cfg.Rclone.VfsCacheMinFreeSpace != "" {
vfsOpt["CacheMinFreeSpace"] = cfg.Rclone.VfsCacheMinFreeSpace
}
if cfg.Rclone.VfsFastFingerprint {
vfsOpt["FastFingerprint"] = cfg.Rclone.VfsFastFingerprint
}
if cfg.Rclone.VfsReadChunkStreams != 0 {
vfsOpt["ChunkStreams"] = cfg.Rclone.VfsReadChunkStreams
}
if cfg.Rclone.NoChecksum {
vfsOpt["NoChecksum"] = cfg.Rclone.NoChecksum
}
if cfg.Rclone.NoModTime {
vfsOpt["NoModTime"] = cfg.Rclone.NoModTime
}
mountArgs["vfsOpt"] = vfsOpt
}
// Add mount options based on configuration
if cfg.Rclone.UID != 0 {
mountArgs["mountOpt"].(map[string]interface{})["UID"] = cfg.Rclone.UID
vfsOpt["UID"] = cfg.Rclone.UID
}
if cfg.Rclone.GID != 0 {
mountArgs["mountOpt"].(map[string]interface{})["GID"] = cfg.Rclone.GID
vfsOpt["GID"] = cfg.Rclone.GID
}
if cfg.Rclone.AttrTimeout != "" {
if attrTimeout, err := time.ParseDuration(cfg.Rclone.AttrTimeout); err == nil {
mountArgs["mountOpt"].(map[string]interface{})["AttrTimeout"] = attrTimeout.String()
if cfg.Rclone.Umask != "" {
umask, err := strconv.ParseInt(cfg.Rclone.Umask, 8, 32)
if err == nil {
vfsOpt["Umask"] = uint32(umask)
}
}
if cfg.Rclone.AttrTimeout != "" {
if attrTimeout, err := time.ParseDuration(cfg.Rclone.AttrTimeout); err == nil {
mountOpt["AttrTimeout"] = attrTimeout.String()
}
}
mountArgs["vfsOpt"] = vfsOpt
mountArgs["mountOpt"] = mountOpt
// Make the mount request
req := RCRequest{
Command: "mount/mount",
@@ -169,7 +199,7 @@ func (m *Manager) performMount(provider, webdavURL string) error {
_, err := m.makeRequest(req, true)
if err != nil {
// Clean up mount point on failure
m.forceUnmountPath(mountPath)
_ = m.forceUnmountPath(mountPath)
return fmt.Errorf("failed to create mount for %s: %w", provider, err)
}
@@ -180,7 +210,7 @@ func (m *Manager) performMount(provider, webdavURL string) error {
WebDAVURL: webdavURL,
Mounted: true,
MountedAt: time.Now().Format(time.RFC3339),
ConfigName: configName,
ConfigName: provider,
}
m.mountsMutex.Lock()
@@ -319,7 +349,7 @@ func (m *Manager) RefreshDir(provider string, dirs []string) error {
dirs = []string{"/"}
}
args := map[string]interface{}{
"fs": fmt.Sprintf("decypharr-%s:", provider),
"fs": fmt.Sprintf("%s:", provider),
}
for i, dir := range dirs {
if dir != "" {

View File

@@ -44,8 +44,8 @@ func (m *Manager) checkMountHealth(provider string) bool {
req := RCRequest{
Command: "operations/list",
Args: map[string]interface{}{
"fs": fmt.Sprintf("decypharr-%s:", provider),
"remote": "/",
"fs": fmt.Sprintf("%s:", provider),
"remote": "",
},
}
@@ -71,10 +71,10 @@ func (m *Manager) RecoverMount(provider string) error {
}
// Wait a moment
time.Sleep(2 * time.Second)
time.Sleep(1 * time.Second)
// Try to remount
if err := m.Mount(provider, mountInfo.WebDAVURL); err != nil {
if err := m.Mount(mountInfo.LocalPath, provider, mountInfo.WebDAVURL); err != nil {
return fmt.Errorf("failed to recover mount for %s: %w", provider, err)
}

View File

@@ -6,10 +6,13 @@ import (
"encoding/json"
"errors"
"fmt"
"io"
"net/http"
"os"
"os/exec"
"path/filepath"
"slices"
"strings"
"sync"
"time"
@@ -24,7 +27,7 @@ type Manager struct {
rcPort string
rcUser string
rcPass string
configDir string
rcloneDir string
mounts map[string]*MountInfo
mountsMutex sync.RWMutex
logger zerolog.Logger
@@ -61,10 +64,10 @@ func NewManager() *Manager {
cfg := config.Get()
rcPort := "5572"
configDir := filepath.Join(cfg.Path, "rclone")
rcloneDir := filepath.Join(cfg.Path, "rclone")
// Ensure config directory exists
if err := os.MkdirAll(configDir, 0755); err != nil {
if err := os.MkdirAll(rcloneDir, 0755); err != nil {
_logger := logger.New("rclone")
_logger.Error().Err(err).Msg("Failed to create rclone config directory")
}
@@ -73,12 +76,12 @@ func NewManager() *Manager {
return &Manager{
rcPort: rcPort,
configDir: configDir,
rcloneDir: rcloneDir,
mounts: make(map[string]*MountInfo),
logger: logger.New("rclone"),
ctx: ctx,
cancel: cancel,
httpClient: &http.Client{Timeout: 30 * time.Second},
httpClient: &http.Client{Timeout: 60 * time.Second},
serverReady: make(chan struct{}),
}
}
@@ -98,15 +101,37 @@ func (m *Manager) Start(ctx context.Context) error {
return nil
}
logFile := filepath.Join(logger.GetLogPath(), "rclone.log")
// Delete old log file if it exists
if _, err := os.Stat(logFile); err == nil {
if err := os.Remove(logFile); err != nil {
return fmt.Errorf("failed to remove old rclone log file: %w", err)
}
}
args := []string{
"rcd",
"--rc-addr", ":" + m.rcPort,
"--rc-no-auth", // We'll handle auth at the application level
"--config", filepath.Join(m.configDir, "rclone.conf"),
"--log-level", "INFO",
"--config", filepath.Join(m.rcloneDir, "rclone.conf"),
"--log-file", logFile,
}
logLevel := cfg.Rclone.LogLevel
if logLevel != "" {
if !slices.Contains([]string{"DEBUG", "INFO", "NOTICE", "ERROR"}, logLevel) {
logLevel = "INFO"
}
args = append(args, "--log-level", logLevel)
}
if cfg.Rclone.CacheDir != "" {
if err := os.MkdirAll(cfg.Rclone.CacheDir, 0755); err == nil {
args = append(args, "--cache-dir", cfg.Rclone.CacheDir)
}
}
m.cmd = exec.CommandContext(ctx, "rclone", args...)
m.cmd.Dir = m.configDir
// Capture output for debugging
var stdout, stderr bytes.Buffer
@@ -114,9 +139,10 @@ func (m *Manager) Start(ctx context.Context) error {
m.cmd.Stderr = &stderr
if err := m.cmd.Start(); err != nil {
m.logger.Error().Str("stderr", stderr.String()).Str("stdout", stdout.String()).
Err(err).Msg("Failed to start rclone RC server")
return fmt.Errorf("failed to start rclone RC server: %w", err)
}
m.serverStarted = true
// Wait for server to be ready in a goroutine
@@ -155,9 +181,12 @@ func (m *Manager) Start(ctx context.Context) error {
default:
if code, ok := ExitCode(err); ok {
m.logger.Debug().Int("exit_code", code).Err(err).
Str("stderr", stderr.String()).
Str("stdout", stdout.String()).
Msg("Rclone RC server error")
} else {
m.logger.Debug().Err(err).Msg("Rclone RC server error (no exit code)")
m.logger.Debug().Err(err).Str("stderr", stderr.String()).
Str("stdout", stdout.String()).Msg("Rclone RC server error (no exit code)")
}
}
}()
@@ -234,50 +263,35 @@ func (m *Manager) Stop() error {
case err := <-done:
if err != nil && !errors.Is(err, context.Canceled) && !WasHardTerminated(err) {
m.logger.Warn().Err(err).Msg("Rclone process exited with error")
} else {
m.logger.Info().Msg("Rclone process exited gracefully")
}
case <-time.After(10 * time.Second):
case <-time.After(2 * time.Second):
m.logger.Warn().Msg("Timeout waiting for rclone to exit, force killing")
if err := m.cmd.Process.Kill(); err != nil {
m.logger.Error().Err(err).Msg("Failed to force kill rclone process")
return err
// Check if the process already finished
if !strings.Contains(err.Error(), "process already finished") {
m.logger.Error().Err(err).Msg("Failed to force kill rclone process")
return err
}
m.logger.Info().Msg("Process already finished during kill attempt")
}
// Wait a bit more for the kill to take effect
// Still wait for the Wait() to complete to clean up the process
select {
case <-done:
m.logger.Info().Msg("Rclone process killed successfully")
m.logger.Info().Msg("Rclone process cleanup completed")
case <-time.After(5 * time.Second):
m.logger.Error().Msg("Process may still be running after kill")
m.logger.Error().Msg("Process cleanup timeout")
}
}
}
// Clean up any remaining mount directories
cfg := config.Get()
if cfg.Rclone.MountPath != "" {
m.cleanupMountDirectories(cfg.Rclone.MountPath)
}
m.serverStarted = false
m.logger.Info().Msg("Rclone RC server stopped")
return nil
}
// cleanupMountDirectories removes empty mount directories
func (m *Manager) cleanupMountDirectories(_ string) {
m.mountsMutex.RLock()
defer m.mountsMutex.RUnlock()
for _, mount := range m.mounts {
if mount.LocalPath != "" {
// Try to remove the directory if it's empty
if err := os.Remove(mount.LocalPath); err == nil {
m.logger.Debug().Str("path", mount.LocalPath).Msg("Removed empty mount directory")
}
// Don't log errors here as the directory might not be empty, which is fine
}
}
}
// waitForServer waits for the RC server to become available
func (m *Manager) waitForServer() {
maxAttempts := 30
@@ -325,7 +339,12 @@ func (m *Manager) makeRequest(req RCRequest, close bool) (*http.Response, error)
if resp.StatusCode != http.StatusOK {
// Read the response body to get more details
defer resp.Body.Close()
defer func(Body io.ReadCloser) {
err := Body.Close()
if err != nil {
m.logger.Debug().Err(err).Msg("Failed to close response body")
}
}(resp.Body)
var errorResp RCResponse
if err := json.NewDecoder(resp.Body).Decode(&errorResp); err != nil {
return nil, fmt.Errorf("request failed with status %s, but could not decode error response: %w", resp.Status, err)

View File

@@ -3,10 +3,12 @@ package rclone
import (
"context"
"fmt"
"github.com/rs/zerolog"
"github.com/sirrobot01/decypharr/internal/config"
"net/url"
"path/filepath"
"strings"
"github.com/rs/zerolog"
"github.com/sirrobot01/decypharr/internal/config"
)
// Mount represents a mount using the rclone RC client
@@ -19,15 +21,24 @@ type Mount struct {
}
// NewMount creates a new RC-based mount
func NewMount(provider, webdavURL string, rcManager *Manager) *Mount {
func NewMount(provider, customRcloneMount, webdavURL string, rcManager *Manager) *Mount {
cfg := config.Get()
mountPath := filepath.Join(cfg.Rclone.MountPath, provider)
var mountPath string
if customRcloneMount != "" {
mountPath = customRcloneMount
} else {
mountPath = filepath.Join(cfg.Rclone.MountPath, provider)
}
_url, err := url.JoinPath(webdavURL, provider)
if err != nil {
_url = fmt.Sprintf("%s/%s", webdavURL, provider)
}
if !strings.HasSuffix(_url, "/") {
_url += "/"
}
return &Mount{
Provider: provider,
LocalPath: mountPath,
@@ -55,7 +66,7 @@ func (m *Mount) Mount(ctx context.Context) error {
Str("mount_path", m.LocalPath).
Msg("Creating mount via RC")
if err := m.rcManager.Mount(m.Provider, m.WebDAVURL); err != nil {
if err := m.rcManager.Mount(m.LocalPath, m.Provider, m.WebDAVURL); err != nil {
m.logger.Error().Str("provider", m.Provider).Msg("Mount operation failed")
return fmt.Errorf("mount failed for %s", m.Provider)
}

View File

@@ -76,7 +76,7 @@ func (m *Manager) GetStats() (*Stats, error) {
}
// Get bandwidth stats
bwStats, err := m.GetBandwidthStats()
if err == nil {
if err == nil && bwStats != nil {
stats.Bandwidth = *bwStats
} else {
fmt.Println("Failed to get rclone stats", err)

View File

@@ -2,11 +2,12 @@ package repair
import (
"fmt"
"github.com/sirrobot01/decypharr/pkg/arr"
"github.com/sirrobot01/decypharr/pkg/debrid/store"
"github.com/sirrobot01/decypharr/pkg/debrid/types"
"os"
"path/filepath"
"github.com/sirrobot01/decypharr/pkg/arr"
"github.com/sirrobot01/decypharr/pkg/debrid/common"
"github.com/sirrobot01/decypharr/pkg/debrid/store"
)
func fileIsSymlinked(file string) bool {
@@ -85,7 +86,7 @@ func collectFiles(media arr.Content) map[string][]arr.ContentFile {
return uniqueParents
}
func (r *Repair) checkTorrentFiles(torrentPath string, files []arr.ContentFile, clients map[string]types.Client, caches map[string]*store.Cache) []arr.ContentFile {
func (r *Repair) checkTorrentFiles(torrentPath string, files []arr.ContentFile, clients map[string]common.Client, caches map[string]*store.Cache) []arr.ContentFile {
brokenFiles := make([]arr.ContentFile, 0)
emptyFiles := make([]arr.ContentFile, 0)
@@ -149,7 +150,7 @@ func (r *Repair) checkTorrentFiles(torrentPath string, files []arr.ContentFile,
return brokenFiles
}
func (r *Repair) findDebridForPath(dir string, clients map[string]types.Client) string {
func (r *Repair) findDebridForPath(dir string, clients map[string]common.Client) string {
// Check cache first
if debridName, exists := r.debridPathCache.Load(dir); exists {
return debridName.(string)

View File

@@ -2,17 +2,18 @@ package server
import (
"fmt"
"net/http"
"runtime"
"github.com/go-chi/chi/v5"
"github.com/sirrobot01/decypharr/internal/request"
debridTypes "github.com/sirrobot01/decypharr/pkg/debrid/types"
"github.com/sirrobot01/decypharr/pkg/store"
"net/http"
"runtime"
"github.com/sirrobot01/decypharr/pkg/wire"
)
func (s *Server) handleIngests(w http.ResponseWriter, r *http.Request) {
ingests := make([]debridTypes.IngestData, 0)
_store := store.Get()
_store := wire.Get()
debrids := _store.Debrid()
if debrids == nil {
http.Error(w, "Debrid service is not enabled", http.StatusInternalServerError)
@@ -42,7 +43,7 @@ func (s *Server) handleIngestsByDebrid(w http.ResponseWriter, r *http.Request) {
return
}
_store := store.Get()
_store := wire.Get()
debrids := _store.Debrid()
if debrids == nil {
@@ -92,35 +93,40 @@ func (s *Server) handleStats(w http.ResponseWriter, r *http.Request) {
"go_version": runtime.Version(),
}
debrids := store.Get().Debrid()
if debrids == nil {
request.JSONResponse(w, stats, http.StatusOK)
return
}
clients := debrids.Clients()
caches := debrids.Caches()
profiles := make([]*debridTypes.Profile, 0)
for debridName, client := range clients {
profile, err := client.GetProfile()
profile.Name = debridName
if err != nil {
s.logger.Error().Err(err).Msg("Failed to get debrid profile")
continue
}
cache, ok := caches[debridName]
if ok {
// Get torrent data
profile.LibrarySize = cache.TotalTorrents()
profile.BadTorrents = len(cache.GetListing("__bad__"))
profile.ActiveLinks = cache.GetTotalActiveDownloadLinks()
debrids := wire.Get().Debrid()
if debrids != nil {
clients := debrids.Clients()
caches := debrids.Caches()
debridStats := make([]debridTypes.Stats, 0)
for debridName, client := range clients {
debridStat := debridTypes.Stats{}
libraryStat := debridTypes.LibraryStats{}
profile, err := client.GetProfile()
if err != nil {
s.logger.Error().Err(err).Str("debrid", debridName).Msg("Failed to get debrid profile")
profile = &debridTypes.Profile{
Name: debridName,
}
}
profile.Name = debridName
debridStat.Profile = profile
cache, ok := caches[debridName]
if ok {
// Get torrent data
libraryStat.Total = cache.TotalTorrents()
libraryStat.Bad = len(cache.GetListing("__bad__"))
libraryStat.ActiveLinks = cache.GetTotalActiveDownloadLinks()
}
debridStat.Library = libraryStat
debridStat.Accounts = client.AccountManager().Stats()
debridStats = append(debridStats, debridStat)
}
profiles = append(profiles, profile)
stats["debrids"] = debridStats
}
stats["debrids"] = profiles
// Add rclone stats if available
if rcManager := store.Get().RcloneManager(); rcManager != nil && rcManager.IsReady() {
if rcManager := wire.Get().RcloneManager(); rcManager != nil && rcManager.IsReady() {
rcStats, err := rcManager.GetStats()
if err != nil {
s.logger.Error().Err(err).Msg("Failed to get rclone stats")

View File

@@ -12,6 +12,7 @@ import (
"io"
"net/http"
"os"
"path/filepath"
)
type Server struct {
@@ -36,11 +37,12 @@ func New(handlers map[string]http.Handler) *Server {
}
//logs
r.Get("/logs", s.getLogs)
r.Get("/logs", s.getLogs) // deprecated, use /debug/logs
//debugs
r.Route("/debug", func(r chi.Router) {
r.Get("/stats", s.handleStats)
r.Get("/logs", s.getLogs)
r.Get("/logs/rclone", s.getRcloneLogs)
r.Get("/ingests", s.handleIngests)
r.Get("/ingests/{debrid}", s.handleIngestsByDebrid)
})
@@ -75,7 +77,7 @@ func (s *Server) Start(ctx context.Context) error {
}
func (s *Server) getLogs(w http.ResponseWriter, r *http.Request) {
logFile := logger.GetLogPath()
logFile := filepath.Join(logger.GetLogPath(), "decypharr.log")
// Open and read the file
file, err := os.Open(logFile)
@@ -98,5 +100,42 @@ func (s *Server) getLogs(w http.ResponseWriter, r *http.Request) {
w.Header().Set("Expires", "0")
// Stream the file
_, _ = io.Copy(w, file)
if _, err := io.Copy(w, file); err != nil {
s.logger.Error().Err(err).Msg("Error streaming log file")
http.Error(w, "Error streaming log file", http.StatusInternalServerError)
return
}
}
func (s *Server) getRcloneLogs(w http.ResponseWriter, r *http.Request) {
// Rclone logs resides in the same directory as the application logs
logFile := filepath.Join(logger.GetLogPath(), "rclone.log")
// Open and read the file
file, err := os.Open(logFile)
if err != nil {
http.Error(w, "Error reading log file", http.StatusInternalServerError)
return
}
defer func(file *os.File) {
err := file.Close()
if err != nil {
s.logger.Error().Err(err).Msg("Error closing log file")
return
}
}(file)
// Set headers
w.Header().Set("Content-Type", "text/plain; charset=utf-8")
w.Header().Set("Content-Disposition", "inline; filename=application.log")
w.Header().Set("Cache-Control", "no-cache, no-store, must-revalidate")
w.Header().Set("Pragma", "no-cache")
w.Header().Set("Expires", "0")
// Stream the file
if _, err := io.Copy(w, file); err != nil {
s.logger.Error().Err(err).Msg("Error streaming log file")
http.Error(w, "Error streaming log file", http.StatusInternalServerError)
return
}
}

View File

@@ -3,7 +3,7 @@ package server
import (
"cmp"
"encoding/json"
"github.com/sirrobot01/decypharr/pkg/store"
"github.com/sirrobot01/decypharr/pkg/wire"
"net/http"
)
@@ -38,7 +38,7 @@ func (s *Server) handleTautulli(w http.ResponseWriter, r *http.Request) {
http.Error(w, "Invalid ID", http.StatusBadRequest)
return
}
repair := store.Get().Repair()
repair := wire.Get().Repair()
mediaId := cmp.Or(payload.TmdbID, payload.TvdbID)

View File

@@ -1,318 +0,0 @@
package store
import (
"fmt"
"net/http"
"os"
"path/filepath"
"sync"
"time"
"github.com/sirrobot01/decypharr/pkg/debrid/types"
"github.com/cavaliergopher/grab/v3"
"github.com/sirrobot01/decypharr/internal/utils"
)
func grabber(client *grab.Client, url, filename string, byterange *[2]int64, progressCallback func(int64, int64)) error {
req, err := grab.NewRequest(filename, url)
if err != nil {
return err
}
// Set byte range if specified
if byterange != nil {
byterangeStr := fmt.Sprintf("%d-%d", byterange[0], byterange[1])
req.HTTPRequest.Header.Set("Range", "bytes="+byterangeStr)
}
resp := client.Do(req)
t := time.NewTicker(time.Second * 2)
defer t.Stop()
var lastReported int64
Loop:
for {
select {
case <-t.C:
current := resp.BytesComplete()
speed := int64(resp.BytesPerSecond())
if current != lastReported {
if progressCallback != nil {
progressCallback(current-lastReported, speed)
}
lastReported = current
}
case <-resp.Done:
break Loop
}
}
// Report final bytes
if progressCallback != nil {
progressCallback(resp.BytesComplete()-lastReported, 0)
}
return resp.Err()
}
func (s *Store) processDownload(torrent *Torrent, debridTorrent *types.Torrent) (string, error) {
s.logger.Info().Msgf("Downloading %d files...", len(debridTorrent.Files))
torrentPath := filepath.Join(torrent.SavePath, utils.RemoveExtension(debridTorrent.OriginalFilename))
torrentPath = utils.RemoveInvalidChars(torrentPath)
err := os.MkdirAll(torrentPath, os.ModePerm)
if err != nil {
// add the previous error to the error and return
return "", fmt.Errorf("failed to create directory: %s: %v", torrentPath, err)
}
s.downloadFiles(torrent, debridTorrent, torrentPath)
return torrentPath, nil
}
func (s *Store) downloadFiles(torrent *Torrent, debridTorrent *types.Torrent, parent string) {
var wg sync.WaitGroup
totalSize := int64(0)
for _, file := range debridTorrent.GetFiles() {
totalSize += file.Size
}
debridTorrent.Lock()
debridTorrent.SizeDownloaded = 0 // Reset downloaded bytes
debridTorrent.Progress = 0 // Reset progress
debridTorrent.Unlock()
progressCallback := func(downloaded int64, speed int64) {
debridTorrent.Lock()
defer debridTorrent.Unlock()
torrent.Lock()
defer torrent.Unlock()
// Update total downloaded bytes
debridTorrent.SizeDownloaded += downloaded
debridTorrent.Speed = speed
// Calculate overall progress
if totalSize > 0 {
debridTorrent.Progress = float64(debridTorrent.SizeDownloaded) / float64(totalSize) * 100
}
s.partialTorrentUpdate(torrent, debridTorrent)
}
client := &grab.Client{
UserAgent: "Decypharr[QBitTorrent]",
HTTPClient: &http.Client{
Transport: &http.Transport{
Proxy: http.ProxyFromEnvironment,
},
},
}
errChan := make(chan error, len(debridTorrent.Files))
for _, file := range debridTorrent.GetFiles() {
if file.DownloadLink == nil {
s.logger.Info().Msgf("No download link found for %s", file.Name)
continue
}
wg.Add(1)
s.downloadSemaphore <- struct{}{}
go func(file types.File) {
defer wg.Done()
defer func() { <-s.downloadSemaphore }()
filename := file.Name
err := grabber(
client,
file.DownloadLink.DownloadLink,
filepath.Join(parent, filename),
file.ByteRange,
progressCallback,
)
if err != nil {
s.logger.Error().Msgf("Failed to download %s: %v", filename, err)
errChan <- err
} else {
s.logger.Info().Msgf("Downloaded %s", filename)
}
}(file)
}
wg.Wait()
close(errChan)
var errors []error
for err := range errChan {
if err != nil {
errors = append(errors, err)
}
}
if len(errors) > 0 {
s.logger.Error().Msgf("Errors occurred during download: %v", errors)
return
}
s.logger.Info().Msgf("Downloaded all files for %s", debridTorrent.Name)
}
func (s *Store) processSymlink(torrent *Torrent, debridTorrent *types.Torrent) (string, error) {
files := debridTorrent.Files
if len(files) == 0 {
return "", fmt.Errorf("no valid files found")
}
s.logger.Info().Msgf("Checking symlinks for %d files...", len(files))
rCloneBase := debridTorrent.MountPath
torrentPath, err := s.getTorrentPath(rCloneBase, debridTorrent) // /MyTVShow/
// This returns filename.ext for alldebrid instead of the parent folder filename/
torrentFolder := torrentPath
if err != nil {
return "", fmt.Errorf("failed to get torrent path: %v", err)
}
// Check if the torrent path is a file
torrentRclonePath := filepath.Join(rCloneBase, torrentPath) // leave it as is
if debridTorrent.Debrid == "alldebrid" && utils.IsMediaFile(torrentPath) {
// Alldebrid hotfix for single file torrents
torrentFolder = utils.RemoveExtension(torrentFolder)
torrentRclonePath = rCloneBase // /mnt/rclone/magnets/ // Remove the filename since it's in the root folder
}
torrentSymlinkPath := filepath.Join(torrent.SavePath, torrentFolder) // /mnt/symlinks/{category}/MyTVShow/
err = os.MkdirAll(torrentSymlinkPath, os.ModePerm)
if err != nil {
return "", fmt.Errorf("failed to create directory: %s: %v", torrentSymlinkPath, err)
}
realPaths := make(map[string]string)
err = filepath.WalkDir(torrentRclonePath, func(path string, d os.DirEntry, err error) error {
if err != nil {
return nil
}
if !d.IsDir() {
filename := d.Name()
rel, _ := filepath.Rel(torrentRclonePath, path)
realPaths[filename] = rel
}
return nil
})
if err != nil {
s.logger.Warn().Msgf("Error while scanning rclone path: %v", err)
}
pending := make(map[string]types.File)
for _, file := range files {
if realRelPath, ok := realPaths[file.Name]; ok {
file.Path = realRelPath
}
pending[file.Path] = file
}
ticker := time.NewTicker(200 * time.Millisecond)
defer ticker.Stop()
timeout := time.After(30 * time.Minute)
filePaths := make([]string, 0, len(pending))
for len(pending) > 0 {
select {
case <-ticker.C:
for path, file := range pending {
fullFilePath := filepath.Join(torrentRclonePath, file.Path)
if _, err := os.Stat(fullFilePath); !os.IsNotExist(err) {
fileSymlinkPath := filepath.Join(torrentSymlinkPath, file.Name)
if err := os.Symlink(fullFilePath, fileSymlinkPath); err != nil && !os.IsExist(err) {
s.logger.Warn().Msgf("Failed to create symlink: %s: %v", fileSymlinkPath, err)
} else {
filePaths = append(filePaths, fileSymlinkPath)
delete(pending, path)
s.logger.Info().Msgf("File is ready: %s", file.Name)
}
}
}
case <-timeout:
s.logger.Warn().Msgf("Timeout waiting for files, %d files still pending", len(pending))
return torrentSymlinkPath, fmt.Errorf("timeout waiting for files: %d files still pending", len(pending))
}
}
if s.skipPreCache {
return torrentSymlinkPath, nil
}
go func() {
s.logger.Debug().Msgf("Pre-caching %s", debridTorrent.Name)
if err := utils.PreCacheFile(filePaths); err != nil {
s.logger.Error().Msgf("Failed to pre-cache file: %s", err)
} else {
s.logger.Trace().Msgf("Pre-cached %d files", len(filePaths))
}
}()
return torrentSymlinkPath, nil
}
func (s *Store) createSymlinksWebdav(torrent *Torrent, debridTorrent *types.Torrent, rclonePath, torrentFolder string) (string, error) {
files := debridTorrent.Files
symlinkPath := filepath.Join(torrent.SavePath, torrentFolder) // /mnt/symlinks/{category}/MyTVShow/
err := os.MkdirAll(symlinkPath, os.ModePerm)
if err != nil {
return "", fmt.Errorf("failed to create directory: %s: %v", symlinkPath, err)
}
remainingFiles := make(map[string]types.File)
for _, file := range files {
remainingFiles[file.Name] = file
}
ticker := time.NewTicker(100 * time.Millisecond)
defer ticker.Stop()
timeout := time.After(30 * time.Minute)
filePaths := make([]string, 0, len(files))
for len(remainingFiles) > 0 {
select {
case <-ticker.C:
entries, err := os.ReadDir(rclonePath)
if err != nil {
continue
}
// Check which files exist in this batch
for _, entry := range entries {
filename := entry.Name()
if file, exists := remainingFiles[filename]; exists {
fullFilePath := filepath.Join(rclonePath, filename)
fileSymlinkPath := filepath.Join(symlinkPath, file.Name)
if err := os.Symlink(fullFilePath, fileSymlinkPath); err != nil && !os.IsExist(err) {
s.logger.Debug().Msgf("Failed to create symlink: %s: %v", fileSymlinkPath, err)
} else {
filePaths = append(filePaths, fileSymlinkPath)
delete(remainingFiles, filename)
s.logger.Info().Msgf("File is ready: %s", file.Name)
}
}
}
case <-timeout:
s.logger.Warn().Msgf("Timeout waiting for files, %d files still pending", len(remainingFiles))
return symlinkPath, fmt.Errorf("timeout waiting for files")
}
}
if s.skipPreCache {
return symlinkPath, nil
}
go func() {
s.logger.Debug().Msgf("Pre-caching %s", debridTorrent.Name)
if err := utils.PreCacheFile(filePaths); err != nil {
s.logger.Error().Msgf("Failed to pre-cache file: %s", err)
} else {
s.logger.Debug().Msgf("Pre-cached %d files", len(filePaths))
}
}() // Pre-cache the files in the background
// Pre-cache the first 256KB and 1MB of the file
return symlinkPath, nil
}
func (s *Store) getTorrentPath(rclonePath string, debridTorrent *types.Torrent) (string, error) {
for {
torrentPath, err := debridTorrent.GetMountFolder(rclonePath)
if err == nil {
s.logger.Debug().Msgf("Found torrent path: %s", torrentPath)
return torrentPath, err
}
time.Sleep(100 * time.Millisecond)
}
}

View File

@@ -2,7 +2,7 @@ package web
import (
"fmt"
"github.com/sirrobot01/decypharr/pkg/store"
"github.com/sirrobot01/decypharr/pkg/wire"
"golang.org/x/crypto/bcrypt"
"net/http"
"strings"
@@ -18,7 +18,7 @@ import (
)
func (wb *Web) handleGetArrs(w http.ResponseWriter, r *http.Request) {
_store := store.Get()
_store := wire.Get()
request.JSONResponse(w, _store.Arr().GetAll(), http.StatusOK)
}
@@ -28,9 +28,9 @@ func (wb *Web) handleAddContent(w http.ResponseWriter, r *http.Request) {
http.Error(w, err.Error(), http.StatusBadRequest)
return
}
_store := store.Get()
_store := wire.Get()
results := make([]*store.ImportRequest, 0)
results := make([]*wire.ImportRequest, 0)
errs := make([]string, 0)
arrName := r.FormValue("arr")
@@ -66,7 +66,7 @@ func (wb *Web) handleAddContent(w http.ResponseWriter, r *http.Request) {
continue
}
importReq := store.NewImportRequest(debridName, downloadFolder, magnet, _arr, action, downloadUncached, callbackUrl, store.ImportTypeAPI)
importReq := wire.NewImportRequest(debridName, downloadFolder, magnet, _arr, action, downloadUncached, callbackUrl, wire.ImportTypeAPI)
if err := _store.AddTorrent(ctx, importReq); err != nil {
wb.logger.Error().Err(err).Str("url", url).Msg("Failed to add torrent")
errs = append(errs, fmt.Sprintf("URL %s: %v", url, err))
@@ -91,7 +91,7 @@ func (wb *Web) handleAddContent(w http.ResponseWriter, r *http.Request) {
continue
}
importReq := store.NewImportRequest(debridName, downloadFolder, magnet, _arr, action, downloadUncached, callbackUrl, store.ImportTypeAPI)
importReq := wire.NewImportRequest(debridName, downloadFolder, magnet, _arr, action, downloadUncached, callbackUrl, wire.ImportTypeAPI)
err = _store.AddTorrent(ctx, importReq)
if err != nil {
wb.logger.Error().Err(err).Str("file", fileHeader.Filename).Msg("Failed to add torrent")
@@ -103,8 +103,8 @@ func (wb *Web) handleAddContent(w http.ResponseWriter, r *http.Request) {
}
request.JSONResponse(w, struct {
Results []*store.ImportRequest `json:"results"`
Errors []string `json:"errors,omitempty"`
Results []*wire.ImportRequest `json:"results"`
Errors []string `json:"errors,omitempty"`
}{
Results: results,
Errors: errs,
@@ -118,7 +118,7 @@ func (wb *Web) handleRepairMedia(w http.ResponseWriter, r *http.Request) {
return
}
_store := store.Get()
_store := wire.Get()
var arrs []string
@@ -186,7 +186,7 @@ func (wb *Web) handleGetConfig(w http.ResponseWriter, r *http.Request) {
// Merge config arrs, with arr Storage
unique := map[string]config.Arr{}
cfg := config.Get()
arrStorage := store.Get().Arr()
arrStorage := wire.Get().Arr()
// Add existing Arrs from storage
for _, a := range arrStorage.GetAll() {
@@ -256,6 +256,7 @@ func (wb *Web) handleUpdateConfig(w http.ResponseWriter, r *http.Request) {
currentConfig.RemoveStalledAfter = updatedConfig.RemoveStalledAfter
currentConfig.AllowedExt = updatedConfig.AllowedExt
currentConfig.DiscordWebhook = updatedConfig.DiscordWebhook
currentConfig.CallbackURL = updatedConfig.CallbackURL
// Should this be added?
currentConfig.URLBase = updatedConfig.URLBase
@@ -276,7 +277,7 @@ func (wb *Web) handleUpdateConfig(w http.ResponseWriter, r *http.Request) {
}
// Update Arrs through the service
storage := store.Get()
storage := wire.Get()
arrStorage := storage.Arr()
newConfigArrs := make([]config.Arr, 0)
@@ -320,7 +321,7 @@ func (wb *Web) handleUpdateConfig(w http.ResponseWriter, r *http.Request) {
if restartFunc != nil {
go func() {
// Small delay to ensure the response is sent
time.Sleep(500 * time.Millisecond)
time.Sleep(200 * time.Millisecond)
restartFunc()
}()
}
@@ -330,7 +331,7 @@ func (wb *Web) handleUpdateConfig(w http.ResponseWriter, r *http.Request) {
}
func (wb *Web) handleGetRepairJobs(w http.ResponseWriter, r *http.Request) {
_store := store.Get()
_store := wire.Get()
request.JSONResponse(w, _store.Repair().GetJobs(), http.StatusOK)
}
@@ -340,7 +341,7 @@ func (wb *Web) handleProcessRepairJob(w http.ResponseWriter, r *http.Request) {
http.Error(w, "No job ID provided", http.StatusBadRequest)
return
}
_store := store.Get()
_store := wire.Get()
if err := _store.Repair().ProcessJob(id); err != nil {
wb.logger.Error().Err(err).Msg("Failed to process repair job")
}
@@ -361,7 +362,7 @@ func (wb *Web) handleDeleteRepairJob(w http.ResponseWriter, r *http.Request) {
return
}
_store := store.Get()
_store := wire.Get()
_store.Repair().DeleteJobs(req.IDs)
w.WriteHeader(http.StatusOK)
}
@@ -372,7 +373,7 @@ func (wb *Web) handleStopRepairJob(w http.ResponseWriter, r *http.Request) {
http.Error(w, "No job ID provided", http.StatusBadRequest)
return
}
_store := store.Get()
_store := wire.Get()
if err := _store.Repair().StopJob(id); err != nil {
wb.logger.Error().Err(err).Msg("Failed to stop repair job")
http.Error(w, "Failed to stop job: "+err.Error(), http.StatusInternalServerError)

File diff suppressed because one or more lines are too long

File diff suppressed because one or more lines are too long

File diff suppressed because one or more lines are too long

View File

@@ -414,8 +414,91 @@ class DecypharrUtils {
}
}
// Mobile navigation dropdown handler
setupMobileNavigation() {
const mobileMenuBtn = document.querySelector('.navbar-start .dropdown [role="button"]');
const mobileMenu = document.querySelector('.navbar-start .dropdown .dropdown-content');
const dropdown = document.querySelector('.navbar-start .dropdown');
if (!mobileMenuBtn || !mobileMenu || !dropdown) return;
let isOpen = false;
const openDropdown = () => {
if (!isOpen) {
dropdown.classList.add('dropdown-open');
mobileMenuBtn.setAttribute('aria-expanded', 'true');
isOpen = true;
}
};
const closeDropdown = () => {
if (isOpen) {
dropdown.classList.remove('dropdown-open');
mobileMenuBtn.setAttribute('aria-expanded', 'false');
isOpen = false;
}
};
const toggleDropdown = (e) => {
e.preventDefault();
e.stopPropagation();
if (isOpen) {
closeDropdown();
} else {
openDropdown();
}
};
// Handle button clicks (both mouse and touch)
mobileMenuBtn.addEventListener('click', toggleDropdown);
mobileMenuBtn.addEventListener('touchend', (e) => {
e.preventDefault();
toggleDropdown(e);
});
// Close dropdown when clicking outside
document.addEventListener('click', (e) => {
if (isOpen && !dropdown.contains(e.target)) {
closeDropdown();
}
});
// Close dropdown when touching outside
document.addEventListener('touchend', (e) => {
if (isOpen && !dropdown.contains(e.target)) {
closeDropdown();
}
});
// Close dropdown when clicking menu items
mobileMenu.addEventListener('click', (e) => {
if (e.target.tagName === 'A') {
closeDropdown();
}
});
// Handle keyboard navigation
mobileMenuBtn.addEventListener('keydown', (e) => {
if (e.key === 'Enter' || e.key === ' ') {
e.preventDefault();
toggleDropdown(e);
} else if (e.key === 'Escape') {
closeDropdown();
}
});
// Set initial aria attributes
mobileMenuBtn.setAttribute('aria-expanded', 'false');
mobileMenuBtn.setAttribute('aria-haspopup', 'true');
}
// Global event listeners
setupGlobalEventListeners() {
// Setup mobile navigation dropdown
this.setupMobileNavigation();
// Smooth scroll for anchor links
document.addEventListener('click', (e) => {
const link = e.target.closest('a[href^="#"]');

View File

@@ -149,10 +149,11 @@ class ConfigManager {
if (!rcloneConfig) return;
const fields = [
'enabled', 'mount_path', 'cache_dir', 'vfs_cache_mode', 'vfs_cache_max_size', 'vfs_cache_max_age',
'enabled', 'rc_port', 'mount_path', 'cache_dir', 'transfers', 'vfs_cache_mode', 'vfs_cache_max_size', 'vfs_cache_max_age',
'vfs_cache_poll_interval', 'vfs_read_chunk_size', 'vfs_read_chunk_size_limit', 'buffer_size',
'uid', 'gid', 'vfs_read_ahead', 'attr_timeout', 'dir_cache_time', 'poll_interval', 'umask',
'no_modtime', 'no_checksum'
'no_modtime', 'no_checksum', 'log_level', 'vfs_cache_min_free_space', 'vfs_fast_fingerprint', 'vfs_read_chunk_streams',
'async_read', 'use_mmap'
];
fields.forEach(field => {
@@ -246,7 +247,7 @@ class ConfigManager {
<select class="select select-bordered" name="debrid[${index}].name" id="debrid[${index}].name" required>
<option value="realdebrid">Real Debrid</option>
<option value="alldebrid">AllDebrid</option>
<option value="debrid_link">Debrid Link</option>
<option value="debridlink">Debrid Link</option>
<option value="torbox">Torbox</option>
</select>
</div>
@@ -273,7 +274,6 @@ class ConfigManager {
<div class="form-control flex-1">
<label class="label" for="debrid[${index}].download_api_keys">
<span class="label-text font-medium">Download API Keys</span>
<span class="badge badge-ghost badge-sm">Optional</span>
</label>
<div class="password-toggle-container">
<textarea class="textarea textarea-bordered has-toggle font-mono h-full min-h-[200px]"
@@ -290,7 +290,7 @@ class ConfigManager {
</div>
</div>
<div class="space-y-4">
<div class="grid grid-cols-1 lg:grid-cols-2 gap-6">
<div class="grid grid-cols-1 lg:grid-cols-2 gap-4">
<div class="form-control">
<label class="label" for="debrid[${index}].folder">
<span class="label-text font-medium">Mount/Rclone Folder</span>
@@ -302,6 +302,21 @@ class ConfigManager {
<span class="label-text-alt">Path where debrid files are mounted</span>
</div>
</div>
<div class="form-control">
<label class="label" for="debrid[${index}].rclone_mount_path">
<span class="label-text font-medium">Custom Rclone Mount Path</span>
<span class="badge badge-ghost badge-sm">Optional</span>
</label>
<input type="text" class="input input-bordered"
name="debrid[${index}].rclone_mount_path" id="debrid[${index}].rclone_mount_path"
placeholder="/custom/mount/path (leave empty for global mount path)">
<div class="label">
<span class="label-text-alt">Custom mount path for this debrid service. If empty, uses global rclone mount path.</span>
</div>
</div>
</div>
<div class="grid grid-cols-2 lg:grid-cols-3 gap-3">
<div class="form-control">
<label class="label" for="debrid[${index}].rate_limit">
<span class="label-text font-medium">Rate Limit</span>
@@ -324,6 +339,17 @@ class ConfigManager {
<span class="label-text-alt">This proxy is used for this debrid account</span>
</div>
</div>
<div class="form-control">
<label class="label" for="debrid[${index}].minimum_free_slot">
<span class="label-text font-medium">Minimum Free Slot</span>
</label>
<input type="number" class="input input-bordered"
name="debrid[${index}].minimum_free_slot" id="debrid[${index}].minimum_free_slot"
placeholder="1" value="1">
<div class="label">
<span class="label-text-alt">Minimum free slot for this debrid</span>
</div>
</div>
</div>
</div>
@@ -531,11 +557,6 @@ class ConfigManager {
if (toggle.checked || forceShow) {
webdavSection.classList.remove('hidden');
// Add required attributes to key fields
webdavSection.querySelectorAll('input[name$=".torrents_refresh_interval"]').forEach(el => el.required = true);
webdavSection.querySelectorAll('input[name$=".download_links_refresh_interval"]').forEach(el => el.required = true);
webdavSection.querySelectorAll('input[name$=".auto_expire_links_after"]').forEach(el => el.required = true);
webdavSection.querySelectorAll('input[name$=".workers"]').forEach(el => el.required = true);
} else {
webdavSection.classList.add('hidden');
// Remove required attributes
@@ -877,7 +898,7 @@ class ConfigManager {
<input type="hidden" name="arr[${index}].source" value="${data.source || ''}">
<div class="grid grid-cols-1 lg:grid-cols-3 gap-4">
<div class="grid grid-cols-1 lg:grid-cols-2 gap-4">
<div class="form-control">
<label class="label" for="arr[${index}].name">
<span class="label-text font-medium">Service Name</span>
@@ -912,9 +933,7 @@ class ConfigManager {
</button>
</div>
</div>
</div>
<div class="grid grid-cols-1 lg:grid-cols-2 gap-4 mt-4">
<div class="form-control">
<label class="label" for="arr[${index}].selected_debrid">
<span class="label-text font-medium">Preferred Debrid Service</span>
@@ -923,40 +942,38 @@ class ConfigManager {
<option value="" selected>Auto-select</option>
<option value="realdebrid">Real Debrid</option>
<option value="alldebrid">AllDebrid</option>
<option value="debrid_link">Debrid Link</option>
<option value="debridlink">Debrid Link</option>
<option value="torbox">Torbox</option>
</select>
<div class="label">
<span class="label-text-alt">Which debrid service this Arr should prefer</span>
</div>
</div>
</div>
<div class="flex flex-col justify-end">
<div class="grid grid-cols-3 gap-2">
<div class="form-control">
<label class="label cursor-pointer justify-start gap-2">
<input type="checkbox" class="checkbox checkbox-sm"
name="arr[${index}].cleanup" id="arr[${index}].cleanup">
<span class="label-text text-sm">Cleanup Queue</span>
</label>
</div>
<div class="grid grid-cols-3 gap-4">
<div class="form-control">
<label class="label cursor-pointer justify-start gap-2">
<input type="checkbox" class="checkbox checkbox-sm"
name="arr[${index}].cleanup" id="arr[${index}].cleanup">
<span class="label-text text-sm">Cleanup Queue</span>
</label>
</div>
<div class="form-control">
<label class="label cursor-pointer justify-start gap-2">
<input type="checkbox" class="checkbox checkbox-sm"
name="arr[${index}].skip_repair" id="arr[${index}].skip_repair">
<span class="label-text text-sm">Skip Repair</span>
</label>
</div>
<div class="form-control">
<label class="label cursor-pointer justify-start gap-2">
<input type="checkbox" class="checkbox checkbox-sm"
name="arr[${index}].skip_repair" id="arr[${index}].skip_repair">
<span class="label-text text-sm">Skip Repair</span>
</label>
</div>
<div class="form-control">
<label class="label cursor-pointer justify-start gap-2">
<input type="checkbox" class="checkbox checkbox-sm"
name="arr[${index}].download_uncached" id="arr[${index}].download_uncached">
<span class="label-text text-sm">Download Uncached</span>
</label>
</div>
</div>
<div class="form-control">
<label class="label cursor-pointer justify-start gap-2">
<input type="checkbox" class="checkbox checkbox-sm"
name="arr[${index}].download_uncached" id="arr[${index}].download_uncached">
<span class="label-text text-sm">Download Uncached</span>
</label>
</div>
</div>
</div>
@@ -1064,6 +1081,7 @@ class ConfigManager {
min_file_size: document.getElementById('minFileSize').value,
max_file_size: document.getElementById('maxFileSize').value,
remove_stalled_after: document.getElementById('removeStalledAfter').value,
callback_url: document.getElementById('callbackUrl').value,
// Debrid configurations
debrids: this.collectDebridConfigs(),
@@ -1094,6 +1112,8 @@ class ConfigManager {
api_key: document.querySelector(`[name="debrid[${i}].api_key"]`).value,
folder: document.querySelector(`[name="debrid[${i}].folder"]`).value,
rate_limit: document.querySelector(`[name="debrid[${i}].rate_limit"]`).value,
minimum_free_slot: parseInt(document.querySelector(`[name="debrid[${i}].minimum_free_slot"]`).value) || 0,
rclone_mount_path: document.querySelector(`[name="debrid[${i}].rclone_mount_path"]`).value,
proxy: document.querySelector(`[name="debrid[${i}].proxy"]`).value,
download_uncached: document.querySelector(`[name="debrid[${i}].download_uncached"]`).checked,
unpack_rar: document.querySelector(`[name="debrid[${i}].unpack_rar"]`).checked,
@@ -1222,15 +1242,22 @@ class ConfigManager {
return {
enabled: getElementValue('enabled', false),
rc_port: getElementValue('rc_port', "5572"),
mount_path: getElementValue('mount_path'),
buffer_size: getElementValue('buffer_size'),
cache_dir: getElementValue('cache_dir'),
transfers: getElementValue('transfers', 8),
vfs_cache_mode: getElementValue('vfs_cache_mode', 'off'),
vfs_cache_max_age: getElementValue('vfs_cache_max_age', '1h'),
vfs_cache_max_size: getElementValue('vfs_cache_max_size'),
vfs_cache_poll_interval: getElementValue('vfs_cache_poll_interval', '1m'),
vfs_read_chunk_size: getElementValue('vfs_read_chunk_size', '128M'),
vfs_read_chunk_size_limit: getElementValue('vfs_read_chunk_size_limit', 'off'),
vfs_cache_min_free_space: getElementValue('vfs_cache_min_free_space', ''),
vfs_fast_fingerprint: getElementValue('vfs_fast_fingerprint', false),
vfs_read_chunk_streams: getElementValue('vfs_read_chunk_streams', 0),
use_mmap: getElementValue('use_mmap', false),
async_read: getElementValue('async_read', true),
uid: getElementValue('uid', 0),
gid: getElementValue('gid', 0),
umask: getElementValue('umask', ''),
@@ -1239,6 +1266,7 @@ class ConfigManager {
dir_cache_time: getElementValue('dir_cache_time', '5m'),
no_modtime: getElementValue('no_modtime', false),
no_checksum: getElementValue('no_checksum', false),
log_level: getElementValue('log_level', 'INFO'),
};
}

View File

@@ -3,9 +3,10 @@ package web
import (
"encoding/json"
"fmt"
"github.com/sirrobot01/decypharr/internal/config"
"net/http"
"strings"
"github.com/sirrobot01/decypharr/internal/config"
)
func (wb *Web) setupMiddleware(next http.Handler) http.Handler {
@@ -79,7 +80,7 @@ func (wb *Web) sendJSONError(w http.ResponseWriter, message string, statusCode i
w.Header().Set("Content-Type", "application/json")
w.WriteHeader(statusCode)
json.NewEncoder(w).Encode(map[string]interface{}{
"error": message,
"status": statusCode,
"error": message,
"status": statusCode,
})
}
}

View File

@@ -120,7 +120,7 @@
</div>
</div>
<div class="grid grid-cols-1 lg:grid-cols-3 gap-4">
<div class="grid grid-cols-1 lg:grid-cols-2 gap-4">
<div class="form-control">
<label class="label" for="minFileSize">
<span class="label-text font-medium">Minimum File Size</span>
@@ -150,6 +150,15 @@
<span class="label-text-alt">Duration before removing stalled torrents</span>
</div>
</div>
<div class="form-control">
<label class="label" for="callbackUrl">
<span class="label-text font-medium">Callback URL</span>
</label>
<input type="text" class="input input-bordered" id="callbackUrl" name="callback_url" placeholder="http://example.com/callback">
<div class="label">
<span class="label-text-alt">Optional callback URL for download status updates</span>
</div>
</div>
</div>
<!-- Authentication Settings Section -->
@@ -364,71 +373,68 @@
</div>
<div class="grid grid-cols-1 lg:grid-cols-2 gap-4">
<div class="space-y-4">
<div class="form-control">
<label class="label" for="repair.interval">
<span class="label-text font-medium">Repair Interval</span>
</label>
<input type="text" class="input input-bordered" name="repair.interval" id="repair.interval" placeholder="24h">
<div class="label">
<span class="label-text-alt">How often to run repair (e.g., 24h, 1d, 03:00, or crontab)</span>
</div>
</div>
<div class="form-control">
<label class="label" for="repair.workers">
<span class="label-text font-medium">Worker Threads</span>
</label>
<input type="number" class="input input-bordered" name="repair.workers" id="repair.workers" min="1" placeholder="40">
<div class="label">
<span class="label-text-alt">Number of concurrent repair workers</span>
</div>
</div>
<div class="form-control">
<label class="label" for="repair.strategy">
<span class="label-text font-medium">Repair Strategy</span>
</label>
<select class="select select-bordered" name="repair.strategy" id="repair.strategy">
<option value="per_torrent" selected>Per Torrent</option>
<option value="per_file">Per File</option>
</select>
<div class="label">
<span class="label-text-alt">How to handle repairs</span>
</div>
<div class="form-control">
<label class="label" for="repair.interval">
<span class="label-text font-medium">Repair Interval</span>
</label>
<input type="text" class="input input-bordered" name="repair.interval" id="repair.interval" placeholder="24h">
<div class="label">
<span class="label-text-alt">How often to run repair (e.g., 24h, 1d, 03:00, or crontab)</span>
</div>
</div>
<div class="space-y-4">
<div class="form-control">
<label class="label" for="repair.zurg_url">
<span class="label-text font-medium">Zurg URL</span>
</label>
<input type="url" class="input input-bordered" name="repair.zurg_url" id="repair.zurg_url" placeholder="http://zurg:9999">
<div class="label">
<span class="label-text-alt">Optional Zurg instance to speed up repairs</span>
<div class="form-control">
<label class="label" for="repair.workers">
<span class="label-text font-medium">Worker Threads</span>
</label>
<input type="number" class="input input-bordered" name="repair.workers" id="repair.workers" min="1" placeholder="40">
<div class="label">
<span class="label-text-alt">Number of concurrent repair workers</span>
</div>
</div>
<div class="form-control">
<label class="label" for="repair.strategy">
<span class="label-text font-medium">Repair Strategy</span>
</label>
<select class="select select-bordered" name="repair.strategy" id="repair.strategy">
<option value="per_torrent" selected>Per Torrent</option>
<option value="per_file">Per File</option>
</select>
<div class="label">
<span class="label-text-alt">How to handle repairs</span>
</div>
</div>
<div class="form-control">
<label class="label" for="repair.zurg_url">
<span class="label-text font-medium">Zurg URL</span>
</label>
<input type="url" class="input input-bordered" name="repair.zurg_url" id="repair.zurg_url" placeholder="http://zurg:9999">
<div class="label">
<span class="label-text-alt">Optional Zurg instance to speed up repairs</span>
</div>
</div>
</div>
<div class="grid grid-cols-2 lg:grid-cols-3 gap-4">
<div class="form-control">
<label class="label cursor-pointer justify-start gap-3">
<input type="checkbox" class="checkbox" name="repair.use_webdav" id="repair.use_webdav">
<div>
<span class="label-text font-medium">Use WebDAV</span>
<div class="label-text-alt">Use internal WebDAV for repairs</div>
</div>
</div>
</label>
</div>
<div class="form-control">
<label class="label cursor-pointer justify-start gap-3">
<input type="checkbox" class="checkbox" name="repair.use_webdav" id="repair.use_webdav">
<div>
<span class="label-text font-medium">Use WebDAV</span>
<div class="label-text-alt">Use internal WebDAV for repairs</div>
</div>
</label>
</div>
<div class="form-control">
<label class="label cursor-pointer justify-start gap-3">
<input type="checkbox" class="checkbox" name="repair.auto_process" id="repair.auto_process">
<div>
<span class="label-text font-medium">Auto Process</span>
<div class="label-text-alt">Automatically delete broken symlinks and re-search</div>
</div>
</label>
</div>
<div class="form-control">
<label class="label cursor-pointer justify-start gap-3">
<input type="checkbox" class="checkbox" name="repair.auto_process" id="repair.auto_process">
<div>
<span class="label-text font-medium">Auto Process</span>
<div class="label-text-alt">Automatically delete broken symlinks and re-search</div>
</div>
</label>
</div>
</div>
</div>
@@ -466,6 +472,25 @@
</div>
</div>
<div class="form-control">
<label class="label" for="rclone.rc_port">
<span class="label-text font-medium">RC Port</span>
</label>
<input type="text" class="input input-bordered" name="rclone.rc_port" id="rclone.rc_port">
</div>
<div class="form-control">
<label class="label" for="rclone.log_level">
<span class="label-text font-medium">Log Level</span>
</label>
<select class="select select-bordered" name="rclone.log_level" id="rclone.log_level">
<option value="INFO">INFO</option>
<option value="DEBUG">DEBUG</option>
<option value="NOTICE">NOTICE</option>
<option value="ERROR">ERROR</option>
</select>
</div>
<div class="form-control">
<label class="label" for="rclone.uid">
<span class="label-text font-medium">User ID (PUID)</span>
@@ -487,7 +512,7 @@
</div>
<div class="form-control">
<label class="label" for="rclone.umask">
<span class="label-text font-medium">Group ID (PGID)</span>
<span class="label-text font-medium">UMASK</span>
</label>
<input type="text" class="input input-bordered" name="rclone.umask" id="rclone.umask" placeholder="0022">
<div class="label">
@@ -512,6 +537,15 @@
<span class="label-text-alt">How long the kernel caches the attributes (size, modification time, etc.)</span>
</div>
</div>
<div class="form-control">
<label class="label" for="rclone.transfers">
<span class="label-text font-medium">Transfers</span>
</label>
<input type="number" class="input input-bordered" name="rclone.transfers" id="rclone.transfers" placeholder="8" min="1">
<div class="label">
<span class="label-text-alt">Number of file transfers to run in parallel</span>
</div>
</div>
</div>
</div>
</div>
@@ -615,6 +649,36 @@
<span class="label-text-alt">How often VFS cache dir gets cleaned</span>
</div>
</div>
<div class="form-control">
<label class="label" for="rclone.vfs_cache_min_free_space">
<span class="label-text font-medium">VFS Cache Min Free Space</span>
</label>
<input type="text" class="input input-bordered" name="rclone.vfs_cache_min_free_space" id="rclone.vfs_cache_min_free_space" placeholder="1G">
<div class="label">
<span class="label-text-alt">Target minimum free space on the disk containing the cache</span>
</div>
</div>
<div class="form-control">
<label class="label" for="rclone.vfs_disk_space_total">
<span class="label-text font-medium">VFS Disk Space Total</span>
</label>
<input type="text" class="input input-bordered" name="rclone.vfs_disk_space_total" id="rclone.vfs_disk_space_total" placeholder="1G">
<div class="label">
<span class="label-text-alt">Specify the total space of disk</span>
</div>
</div>
<div class="form-control">
<label class="label" for="rclone.vfs_read_chunk_streams">
<span class="label-text font-medium">VFS Read Chunk Streams</span>
</label>
<input type="number" class="input input-bordered" name="rclone.vfs_read_chunk_streams" id="rclone.vfs_read_chunk_streams" placeholder="4" min="0">
<div class="label">
<span class="label-text-alt">The number of parallel streams to read at once</span>
</div>
</div>
</div>
</div>
</div>
@@ -624,7 +688,7 @@
<h3 class="text-lg font-semibold mb-4 flex items-center">
<i class="bi bi-gear mr-2"></i>Advanced Settings
</h3>
<div class="grid grid-cols-1 lg:grid-cols-2 gap-4">
<div class="grid grid-cols-2 lg:grid-cols-3 gap-4">
<div class="form-control">
<label class="label cursor-pointer justify-start gap-3">
<input type="checkbox" class="checkbox" name="rclone.no_modtime" id="rclone.no_modtime">
@@ -644,6 +708,36 @@
</div>
</label>
</div>
<div class="form-control">
<label class="label cursor-pointer justify-start gap-3" for="rclone.async_read">
<input type="checkbox" class="checkbox" name="rclone.async_read" id="rclone.async_read">
<div>
<span class="label-text font-medium">Async Read</span>
<div class="label-text-alt">Use asynchronous reads</div>
</div>
</label>
</div>
<div class="form-control">
<label class="label cursor-pointer justify-start gap-3" for="rclone.vfs_fast_fingerprint">
<input type="checkbox" class="checkbox" name="rclone.vfs_fast_fingerprint" id="rclone.vfs_fast_fingerprint">
<div>
<span class="label-text font-medium">VFS Fast Fingerprint</span>
<div class="label-text-alt">Use fast (less accurate) fingerprints for change detection</div>
</div>
</label>
</div>
<div class="form-control">
<label class="label cursor-pointer justify-start gap-3" for="rclone.use_mmap">
<input type="checkbox" class="checkbox" name="rclone.use_mmap" id="rclone.use_mmap">
<div>
<span class="label-text font-medium">Use Mmap</span>
<div class="label-text-alt">Use fast (less accurate) fingerprints for change detection</div>
</div>
</label>
</div>
</div>
</div>
</div>

View File

@@ -60,9 +60,6 @@
<li><a href="{{.URLBase}}webdav" target="_blank">
<i class="bi bi-cloud text-success"></i>WebDAV
</a></li>
<li><a href="{{.URLBase}}stats" class="{{if eq .Page "stats"}}active{{end}}">
<i class="bi bi-graph-up text-info"></i>Stats
</a></li>
<li><a href="{{.URLBase}}logs" target="_blank">
<i class="bi bi-journal-text text-warning"></i>Logs
</a></li>
@@ -96,7 +93,7 @@
<i class="bi bi-cloud"></i>
<span class="hidden xl:inline">WebDAV</span>
</a></li>
<li><a href="{{.URLBase}}logs" target="_blank" class="tooltip tooltip-bottom" data-tip="System Logs">
<li><a href="{{.URLBase}}debug/logs" target="_blank" class="tooltip tooltip-bottom" data-tip="System Logs">
<i class="bi bi-journal-text"></i>
<span class="hidden xl:inline">Logs</span>
</a></li>

View File

@@ -72,10 +72,16 @@
<div class="card bg-base-100 shadow-xl" id="rclone-card">
<div class="card-header p-6 pb-3">
<h2 class="card-title text-xl">
<i class="bi bi-cloud-arrow-up text-primary"></i>
Rclone Statistics
</h2>
<div class="card-title text-xl justify-between items-center">
<h2>
<i class="bi bi-cloud-arrow-up text-primary"></i>
Rclone Statistics
</h2>
<a href="{{.URLBase}}debug/logs/rclone" class="btn btn-sm btn-outline" target="_blank">
<i class="bi bi-arrow-right"></i>
View Rclone Logs
</a>
</div>
<div class="badge" id="rclone-status">Unknown</div>
</div>
<div class="card-body p-6 pt-3" id="rclone-content">
@@ -179,9 +185,9 @@
<div class="stat-desc">Total: ${cs.totalChecks || 0}</div>
</div>
<div class="stat">
<div class="stat-title">Elapsed Time</div>
<div class="stat-value text-accent">${((cs.elapsedTime || 0) / 60).toFixed(1)}m</div>
<div class="stat-desc">Transfer: ${((cs.transferTime || 0) / 60).toFixed(1)}m</div>
<div class="stat-title">Uptime</div>
<div class="stat-value text-accent">${window.decypharrUtils.formatDuration(cs.elapsedTime)}</div>
<div class="stat-desc">Transfer: ${window.decypharrUtils.formatDuration(cs.transferTime)}</div>
</div>
`;
}
@@ -225,7 +231,7 @@
</div>
<div class="flex justify-between text-xs text-base-content/60 mt-1">
<span>${window.decypharrUtils.formatBytes(transfer.bytes || 0)} / ${window.decypharrUtils.formatBytes(transfer.size || 0)}</span>
<span>ETA: ${transfer.eta ? Math.ceil(transfer.eta / 60) + 'm' : 'Unknown'}</span>
<span>ETA: ${transfer.eta ? window.decypharrUtils.formatDuration(transfer.eta) : 'Unknown'}</span>
</div>
</div>
</div>
@@ -312,37 +318,100 @@
let html = '<div class="space-y-4">';
debrids.forEach(debrid => {
const profile = debrid.profile || {};
const library = debrid.library || {};
const accounts = debrid.accounts || [];
html += `
<div class="card bg-base-200">
<div class="card-body p-4">
<div class="flex justify-between items-start">
<div>
<h3 class="card-title text-lg">${debrid.name || 'Unknown Service'}</h3>
<p class="text-sm text-base-content/70">${debrid.username || 'No username'}</p>
<h3 class="card-title text-lg">${profile.name || 'Unknown Service'}</h3>
<p class="text-sm text-base-content/70">${profile.username || 'No username'}</p>
</div>
<div class="text-right">
<div class="text-sm font-mono">${debrid.points} points</div>
<div class="text-xs text-base-content/70">Expires: ${debrid.expiration || 'Unknown'}</div>
<div class="text-sm font-mono">${formatNumber(profile.points || 0)} points</div>
<div class="text-xs text-base-content/70">Type: ${profile.type || 'Unknown'}</div>
<div class="text-xs text-base-content/70">Expires: ${profile.expiration ? new Date(profile.expiration).toLocaleDateString() : 'Unknown'}</div>
</div>
</div>
<div class="grid grid-cols-2 md:grid-cols-4 gap-3 mt-4">
<div class="stat">
<div class="stat-title text-xs">Library Size</div>
<div class="stat-value text-sm">${formatNumber(debrid.library_size || 0)}</div>
<div class="stat-value text-sm">${formatNumber(library.total || 0)}</div>
</div>
<div class="stat">
<div class="stat-title text-xs">Bad Torrents</div>
<div class="stat-value text-sm text-error">${formatNumber(debrid.bad_torrents || 0)}</div>
<div class="stat-value text-sm text-error">${formatNumber(library.bad || 0)}</div>
</div>
<div class="stat">
<div class="stat-title text-xs">Active Links</div>
<div class="stat-value text-sm text-success">${formatNumber(debrid.active_links || 0)}</div>
<div class="stat-value text-sm text-success">${formatNumber(library.active_links || 0)}</div>
</div>
<div class="stat">
<div class="stat-title text-xs">Type</div>
<div class="stat-value text-sm">${debrid.type || 'Unknown'}</div>
<div class="stat-title text-xs">Total Accounts</div>
<div class="stat-value text-sm text-info">${accounts.length}</div>
</div>
</div>
`;
// Add accounts section if there are accounts
if (accounts && accounts.length > 0) {
html += `
<div class="mt-6">
<h4 class="text-lg font-semibold mb-3">
<i class="bi bi-person-lines-fill text-primary"></i>
Accounts
</h4>
<div class="grid grid-cols-2 md:grid-cols-2 gap-2">
`;
accounts.forEach((account, index) => {
const statusBadge = account.disabled ?
'<span class="badge badge-error badge-sm">Disabled</span>' :
'<span class="badge badge-success badge-sm">Active</span>';
const inUseBadge = account.in_use ?
'<span class="badge badge-info badge-sm">In Use</span>' :
'';
html += `
<div class="card bg-base-100 compact">
<div class="card-body p-3">
<div class="flex justify-between items-start mb-2">
<div class="flex-1">
<div class="flex items-center gap-2">
<h5 class="font-medium text-sm">Account #${account.order + 1}</h5>
${statusBadge}
${inUseBadge}
</div>
<p class="text-xs text-base-content/70 mt-1">${account.username || 'No username'}</p>
</div>
<div class="text-right">
<div class="text-xs font-mono text-base-content/80">
Token: ${account.token_masked || '****'}
</div>
</div>
</div>
<div class="grid grid-cols-2 md:grid-cols-2 gap-2">
<div class="stat bg-base-200 rounded p-2">
<div class="stat-title text-xs">Traffic Used</div>
<div class="stat-value text-xs">${window.decypharrUtils.formatBytes(account.traffic_used || 0)}</div>
</div>
<div class="stat bg-base-200 rounded p-2">
<div class="stat-title text-xs">Links Count</div>
<div class="stat-value text-xs">${formatNumber(account.links_count || 0)}</div>
</div>
</div>
</div>
</div>
`;
});
html += '</div></div>';
}
html += `
</div>
</div>
`;

View File

@@ -2,9 +2,10 @@ package web
import (
"encoding/json"
"net/http"
"github.com/sirrobot01/decypharr/internal/config"
"golang.org/x/crypto/bcrypt"
"net/http"
)
func (wb *Web) LoginHandler(w http.ResponseWriter, r *http.Request) {

View File

@@ -1,14 +1,14 @@
package web
import (
"cmp"
"embed"
"html/template"
"github.com/gorilla/sessions"
"github.com/rs/zerolog"
"github.com/sirrobot01/decypharr/internal/config"
"github.com/sirrobot01/decypharr/internal/logger"
"github.com/sirrobot01/decypharr/pkg/store"
"html/template"
"os"
"github.com/sirrobot01/decypharr/pkg/wire"
)
var restartFunc func()
@@ -60,10 +60,11 @@ type Web struct {
logger zerolog.Logger
cookie *sessions.CookieStore
templates *template.Template
torrents *store.TorrentStorage
torrents *wire.TorrentStorage
}
func New() *Web {
cfg := config.Get()
templates := template.Must(template.ParseFS(
content,
"templates/layout.html",
@@ -75,8 +76,7 @@ func New() *Web {
"templates/login.html",
"templates/register.html",
))
secretKey := cmp.Or(os.Getenv("DECYPHARR_SECRET_KEY"), "\"wqj(v%lj*!-+kf@4&i95rhh_!5_px5qnuwqbr%cjrvrozz_r*(\"")
cookieStore := sessions.NewCookieStore([]byte(secretKey))
cookieStore := sessions.NewCookieStore([]byte(cfg.SecretKey()))
cookieStore.Options = &sessions.Options{
Path: "/",
MaxAge: 86400 * 7,
@@ -86,6 +86,6 @@ func New() *Web {
logger: logger.New("ui"),
templates: templates,
cookie: cookieStore,
torrents: store.Get().Torrents(),
torrents: wire.Get().Torrents(),
}
}

View File

@@ -1,7 +1,6 @@
package webdav
import (
"crypto/tls"
"fmt"
"io"
"net/http"
@@ -9,28 +8,23 @@ import (
"strings"
"time"
"github.com/sirrobot01/decypharr/internal/utils"
"github.com/sirrobot01/decypharr/pkg/debrid/store"
"github.com/sirrobot01/decypharr/pkg/debrid/types"
)
var streamingTransport = &http.Transport{
TLSClientConfig: &tls.Config{InsecureSkipVerify: true},
MaxIdleConns: 200,
MaxIdleConnsPerHost: 100,
MaxConnsPerHost: 200,
IdleConnTimeout: 90 * time.Second,
TLSHandshakeTimeout: 10 * time.Second,
ResponseHeaderTimeout: 60 * time.Second, // give the upstream a minute to send headers
ExpectContinueTimeout: 1 * time.Second,
DisableKeepAlives: true, // close after each request
ForceAttemptHTTP2: false, // dont speak HTTP/2
// this line is what truly blocks HTTP/2:
TLSNextProto: make(map[string]func(string, *tls.Conn) http.RoundTripper),
}
type retryAction int
var sharedClient = &http.Client{
Transport: streamingTransport,
Timeout: 0,
}
const (
noRetry retryAction = iota
retryWithLimit
retryAlways
)
const (
MaxNetworkRetries = 3
MaxLinkRetries = 10
)
type streamError struct {
Err error
@@ -50,7 +44,7 @@ type File struct {
name string
torrentName string
link string
downloadLink string
downloadLink types.DownloadLink
size int64
isDir bool
fileId string
@@ -76,26 +70,27 @@ func (f *File) Close() error {
// This is just to satisfy the os.File interface
f.content = nil
f.children = nil
f.downloadLink = ""
f.downloadLink = types.DownloadLink{}
f.readOffset = 0
return nil
}
func (f *File) getDownloadLink() (string, error) {
func (f *File) getDownloadLink() (types.DownloadLink, error) {
// Check if we already have a final URL cached
if f.downloadLink != "" && isValidURL(f.downloadLink) {
if f.downloadLink.Valid() == nil {
return f.downloadLink, nil
}
downloadLink, err := f.cache.GetDownloadLink(f.torrentName, f.name, f.link)
if err != nil {
return "", err
return downloadLink, err
}
if downloadLink != "" && isValidURL(downloadLink) {
f.downloadLink = downloadLink
return downloadLink, nil
err = downloadLink.Valid()
if err != nil {
return types.DownloadLink{}, err
}
return "", os.ErrNotExist
f.downloadLink = downloadLink
return downloadLink, nil
}
func (f *File) getDownloadByteRange() (*[2]int64, error) {
@@ -106,7 +101,6 @@ func (f *File) getDownloadByteRange() (*[2]int64, error) {
return byteRange, nil
}
// setVideoStreamingHeaders sets the necessary headers for video streaming
// It returns error and a boolean indicating if the request is a range request
func (f *File) servePreloadedContent(w http.ResponseWriter, r *http.Request) error {
content := f.content
@@ -140,70 +134,91 @@ func (f *File) servePreloadedContent(w http.ResponseWriter, r *http.Request) err
}
func (f *File) StreamResponse(w http.ResponseWriter, r *http.Request) error {
// Handle preloaded content files
if f.content != nil {
return f.servePreloadedContent(w, r)
}
// Try streaming with retry logic
return f.streamWithRetry(w, r, 0)
return f.streamWithRetry(w, r, 0, 0)
}
func (f *File) streamWithRetry(w http.ResponseWriter, r *http.Request, retryCount int) error {
const maxRetries = 3
func (f *File) streamWithRetry(w http.ResponseWriter, r *http.Request, networkRetries, recoverableRetries int) error {
_log := f.cache.Logger()
// Get download link (with caching optimization)
downloadLink, err := f.getDownloadLink()
if err != nil {
return &streamError{Err: err, StatusCode: http.StatusPreconditionFailed}
}
if downloadLink == "" {
return &streamError{Err: fmt.Errorf("empty download link"), StatusCode: http.StatusNotFound}
}
// Create upstream request with streaming optimizations
upstreamReq, err := http.NewRequest("GET", downloadLink, nil)
upstreamReq, err := http.NewRequest("GET", downloadLink.DownloadLink, nil)
if err != nil {
return &streamError{Err: err, StatusCode: http.StatusInternalServerError}
}
setVideoStreamingHeaders(upstreamReq)
// Handle range requests (critical for video seeking)
isRangeRequest := f.handleRangeRequest(upstreamReq, r, w)
if isRangeRequest == -1 {
return &streamError{Err: fmt.Errorf("invalid range"), StatusCode: http.StatusRequestedRangeNotSatisfiable}
}
resp, err := sharedClient.Do(upstreamReq)
resp, err := f.cache.Download(upstreamReq)
if err != nil {
// Network error - retry with limit
if networkRetries < MaxNetworkRetries {
_log.Debug().
Int("network_retries", networkRetries+1).
Err(err).
Msg("Network error, retrying")
return f.streamWithRetry(w, r, networkRetries+1, recoverableRetries)
}
return &streamError{Err: err, StatusCode: http.StatusServiceUnavailable}
}
defer resp.Body.Close()
// Handle upstream errors with retry logic
shouldRetry, retryErr := f.handleUpstream(resp, retryCount, maxRetries)
if shouldRetry && retryCount < maxRetries {
// Retry with new download link
_log.Debug().
Int("retry_count", retryCount+1).
Str("file", f.name).
Msg("Retrying stream request")
return f.streamWithRetry(w, r, retryCount+1)
}
if retryErr != nil {
return retryErr
if resp.StatusCode != http.StatusOK && resp.StatusCode != http.StatusPartialContent {
retryType, retryErr := f.handleUpstreamError(downloadLink, resp)
switch retryType {
case retryAlways:
if recoverableRetries >= MaxLinkRetries {
return &streamError{
Err: fmt.Errorf("max link retries exceeded (%d)", MaxLinkRetries),
StatusCode: http.StatusServiceUnavailable,
}
}
_log.Debug().
Int("recoverable_retries", recoverableRetries+1).
Str("file", f.name).
Msg("Recoverable error, retrying")
return f.streamWithRetry(w, r, 0, recoverableRetries+1) // Reset network retries
case retryWithLimit:
if networkRetries < MaxNetworkRetries {
_log.Debug().
Int("network_retries", networkRetries+1).
Str("file", f.name).
Msg("Network error, retrying")
return f.streamWithRetry(w, r, networkRetries+1, recoverableRetries)
}
fallthrough
case noRetry:
if retryErr != nil {
return retryErr
}
return &streamError{
Err: fmt.Errorf("non-retryable error: status %d", resp.StatusCode),
StatusCode: http.StatusBadGateway,
}
}
}
// Determine status code based on range request
// Success - stream the response
statusCode := http.StatusOK
if isRangeRequest == 1 {
statusCode = http.StatusPartialContent
}
// Set headers before streaming
if contentLength := resp.Header.Get("Content-Length"); contentLength != "" {
w.Header().Set("Content-Length", contentLength)
}
@@ -212,10 +227,71 @@ func (f *File) streamWithRetry(w http.ResponseWriter, r *http.Request, retryCoun
w.Header().Set("Content-Range", contentRange)
}
if err := f.streamBuffer(w, resp.Body, statusCode); err != nil {
return err
return f.streamBuffer(w, resp.Body, statusCode)
}
func (f *File) handleUpstreamError(downloadLink types.DownloadLink, resp *http.Response) (retryAction, error) {
_log := f.cache.Logger()
cleanupResp := func(resp *http.Response) {
if resp.Body != nil {
_, _ = io.Copy(io.Discard, resp.Body)
resp.Body.Close()
}
}
switch resp.StatusCode {
case http.StatusServiceUnavailable:
body, readErr := io.ReadAll(resp.Body)
cleanupResp(resp)
if readErr != nil {
_log.Error().Err(readErr).Msg("Failed to read response body")
return retryWithLimit, nil
}
bodyStr := string(body)
if strings.Contains(bodyStr, "you have exceeded your traffic") {
_log.Debug().
Str("token", utils.Mask(downloadLink.Token)).
Str("file", f.name).
Msg("Bandwidth exceeded for account, invalidating link")
f.cache.MarkDownloadLinkAsInvalid(f.downloadLink, "bandwidth_exceeded")
f.downloadLink = types.DownloadLink{}
return retryAlways, nil
}
return noRetry, &streamError{
Err: fmt.Errorf("service unavailable: %s", bodyStr),
StatusCode: http.StatusServiceUnavailable,
}
case http.StatusNotFound:
cleanupResp(resp)
_log.Debug().
Str("file", f.name).
Msg("Link not found, invalidating and regenerating")
f.cache.MarkDownloadLinkAsInvalid(f.downloadLink, "link_not_found")
f.downloadLink = types.DownloadLink{}
return retryAlways, nil
default:
body, _ := io.ReadAll(resp.Body)
cleanupResp(resp)
_log.Error().
Int("status_code", resp.StatusCode).
Str("file", f.name).
Str("response_body", string(body)).
Msg("Unexpected upstream error")
return retryWithLimit, &streamError{
Err: fmt.Errorf("upstream error %d: %s", resp.StatusCode, string(body)),
StatusCode: http.StatusBadGateway,
}
}
return nil
}
func (f *File) streamBuffer(w http.ResponseWriter, src io.Reader, statusCode int) error {
@@ -228,7 +304,7 @@ func (f *File) streamBuffer(w http.ResponseWriter, src io.Reader, statusCode int
if n, err := src.Read(smallBuf); n > 0 {
// Write status code just before first successful write
w.WriteHeader(statusCode)
if _, werr := w.Write(smallBuf[:n]); werr != nil {
if isClientDisconnection(werr) {
return &streamError{Err: werr, StatusCode: 0, IsClientDisconnection: true}
@@ -266,99 +342,6 @@ func (f *File) streamBuffer(w http.ResponseWriter, src io.Reader, statusCode int
}
}
func (f *File) handleUpstream(resp *http.Response, retryCount, maxRetries int) (shouldRetry bool, err error) {
if resp.StatusCode == http.StatusOK || resp.StatusCode == http.StatusPartialContent {
return false, nil
}
_log := f.cache.Logger()
// Clean up response body properly
cleanupResp := func(resp *http.Response) {
if resp.Body != nil {
_, _ = io.Copy(io.Discard, resp.Body)
resp.Body.Close()
}
}
switch resp.StatusCode {
case http.StatusServiceUnavailable:
// Read the body to check for specific error messages
body, readErr := io.ReadAll(resp.Body)
cleanupResp(resp)
if readErr != nil {
_log.Error().Err(readErr).Msg("Failed to read response body")
return false, &streamError{
Err: fmt.Errorf("failed to read error response: %w", readErr),
StatusCode: http.StatusServiceUnavailable,
}
}
bodyStr := string(body)
if strings.Contains(bodyStr, "you have exceeded your traffic") {
_log.Debug().
Str("file", f.name).
Int("retry_count", retryCount).
Msg("Bandwidth exceeded. Marking link as invalid")
f.cache.MarkDownloadLinkAsInvalid(f.link, f.downloadLink, "bandwidth_exceeded")
// Retry with a different API key if available and we haven't exceeded retries
if retryCount < maxRetries {
return true, nil
}
return false, &streamError{
Err: fmt.Errorf("bandwidth exceeded after %d retries", retryCount),
StatusCode: http.StatusServiceUnavailable,
}
}
return false, &streamError{
Err: fmt.Errorf("service unavailable: %s", bodyStr),
StatusCode: http.StatusServiceUnavailable,
}
case http.StatusNotFound:
cleanupResp(resp)
_log.Debug().
Str("file", f.name).
Int("retry_count", retryCount).
Msg("Link not found (404). Marking link as invalid and regenerating")
f.cache.MarkDownloadLinkAsInvalid(f.link, f.downloadLink, "link_not_found")
// Try to regenerate download link if we haven't exceeded retries
if retryCount < maxRetries {
// Clear cached link to force regeneration
f.downloadLink = ""
return true, nil
}
return false, &streamError{
Err: fmt.Errorf("file not found after %d retries", retryCount),
StatusCode: http.StatusNotFound,
}
default:
body, _ := io.ReadAll(resp.Body)
cleanupResp(resp)
_log.Error().
Int("status_code", resp.StatusCode).
Str("file", f.name).
Str("response_body", string(body)).
Msg("Unexpected upstream error")
return false, &streamError{
Err: fmt.Errorf("upstream error %d: %s", resp.StatusCode, string(body)),
StatusCode: http.StatusBadGateway,
}
}
}
func (f *File) handleRangeRequest(upstreamReq *http.Request, r *http.Request, w http.ResponseWriter) int {
rangeHeader := r.Header.Get("Range")
if rangeHeader == "" {

View File

@@ -4,8 +4,6 @@ import (
"context"
"errors"
"fmt"
"github.com/sirrobot01/decypharr/pkg/debrid/types"
"golang.org/x/net/webdav"
"mime"
"net/http"
"os"
@@ -15,6 +13,9 @@ import (
"strings"
"time"
"github.com/sirrobot01/decypharr/pkg/debrid/types"
"golang.org/x/net/webdav"
"github.com/rs/zerolog"
"github.com/sirrobot01/decypharr/internal/utils"
"github.com/sirrobot01/decypharr/pkg/debrid/store"
@@ -451,15 +452,15 @@ func (h *Handler) handleGet(w http.ResponseWriter, r *http.Request) {
// Handle nginx proxy (X-Accel-Redirect)
if file.content == nil && !file.isRar && h.cache.StreamWithRclone() {
link, err := file.getDownloadLink()
if err != nil || link == "" {
if err != nil || link.Empty() {
http.Error(w, "Could not fetch download link", http.StatusPreconditionFailed)
return
}
w.Header().Set("Content-Disposition", fmt.Sprintf("attachment; filename=\"%s\"", fi.Name()))
w.Header().Set("X-Accel-Redirect", link)
w.Header().Set("X-Accel-Redirect", link.DownloadLink)
w.Header().Set("X-Accel-Buffering", "no")
http.Redirect(w, r, link, http.StatusFound)
http.Redirect(w, r, link.DownloadLink, http.StatusFound)
return
}

View File

@@ -2,21 +2,15 @@ package webdav
import (
"fmt"
"github.com/stanNthe5/stringbuf"
"net/http"
"net/url"
"os"
"path"
"strconv"
"strings"
"time"
)
func isValidURL(str string) bool {
u, err := url.Parse(str)
// A valid URL should parse without error, and have a non-empty scheme and host.
return err == nil && u.Scheme != "" && u.Host != ""
}
"github.com/stanNthe5/stringbuf"
)
var pctHex = "0123456789ABCDEF"
@@ -218,12 +212,3 @@ func parseRange(s string, size int64) ([]httpRange, error) {
}
return ranges, nil
}
func setVideoStreamingHeaders(req *http.Request) {
// Request optimizations for faster response
req.Header.Set("Accept", "*/*")
req.Header.Set("Accept-Encoding", "identity")
req.Header.Set("Connection", "keep-alive")
req.Header.Set("User-Agent", "VideoStream/1.0")
req.Header.Set("Priority", "u=1")
}

View File

@@ -7,7 +7,7 @@ import (
"github.com/go-chi/chi/v5"
"github.com/go-chi/chi/v5/middleware"
"github.com/sirrobot01/decypharr/internal/config"
"github.com/sirrobot01/decypharr/pkg/store"
"github.com/sirrobot01/decypharr/pkg/wire"
"html/template"
"net/http"
"net/url"
@@ -97,7 +97,7 @@ func New() *WebDav {
Handlers: make([]*Handler, 0),
URLBase: urlBase,
}
for name, c := range store.Get().Debrid().Caches() {
for name, c := range wire.Get().Debrid().Caches() {
h := NewHandler(name, urlBase, c, c.Logger())
w.Handlers = append(w.Handlers, h)
}

625
pkg/wire/downloader.go Normal file
View File

@@ -0,0 +1,625 @@
package wire
import (
"crypto/md5"
"fmt"
"net/http"
"os"
"path/filepath"
"regexp"
"strconv"
"strings"
"sync"
"time"
"github.com/google/uuid"
"github.com/sirrobot01/decypharr/pkg/debrid/types"
"github.com/cavaliergopher/grab/v3"
"github.com/sirrobot01/decypharr/internal/utils"
)
// Multi-season detection patterns
var (
// Pre-compiled patterns for multi-season replacement
multiSeasonReplacements = []multiSeasonPattern{
// S01-08 -> S01 (or whatever target season)
{regexp.MustCompile(`(?i)S(\d{1,2})-\d{1,2}`), "S%02d"},
// S01-S08 -> S01
{regexp.MustCompile(`(?i)S(\d{1,2})-S\d{1,2}`), "S%02d"},
// Season 1-8 -> Season 1
{regexp.MustCompile(`(?i)Season\.?\s*\d{1,2}-\d{1,2}`), "Season %02d"},
// Seasons 1-8 -> Season 1
{regexp.MustCompile(`(?i)Seasons\.?\s*\d{1,2}-\d{1,2}`), "Season %02d"},
// Complete Series -> Season X
{regexp.MustCompile(`(?i)Complete\.?Series`), "Season %02d"},
// All Seasons -> Season X
{regexp.MustCompile(`(?i)All\.?Seasons?`), "Season %02d"},
}
// Also pre-compile other patterns
seasonPattern = regexp.MustCompile(`(?i)(?:season\.?\s*|s)(\d{1,2})`)
qualityIndicators = regexp.MustCompile(`(?i)\b(2160p|1080p|720p|BluRay|WEB-DL|HDTV|x264|x265|HEVC)`)
multiSeasonIndicators = []*regexp.Regexp{
regexp.MustCompile(`(?i)complete\.?series`),
regexp.MustCompile(`(?i)all\.?seasons?`),
regexp.MustCompile(`(?i)season\.?\s*\d+\s*-\s*\d+`),
regexp.MustCompile(`(?i)s\d+\s*-\s*s?\d+`),
regexp.MustCompile(`(?i)seasons?\s*\d+\s*-\s*\d+`),
}
)
type multiSeasonPattern struct {
pattern *regexp.Regexp
replacement string
}
type SeasonInfo struct {
SeasonNumber int
Files []types.File
InfoHash string
Name string
}
func (s *Store) replaceMultiSeasonPattern(name string, targetSeason int) string {
result := name
// Apply each pre-compiled pattern replacement
for _, msp := range multiSeasonReplacements {
if msp.pattern.MatchString(result) {
replacement := fmt.Sprintf(msp.replacement, targetSeason)
result = msp.pattern.ReplaceAllString(result, replacement)
s.logger.Debug().Msgf("Applied pattern replacement: %s -> %s", name, result)
return result
}
}
// If no multi-season pattern found, try to insert season info intelligently
return s.insertSeasonIntoName(result, targetSeason)
}
func (s *Store) insertSeasonIntoName(name string, seasonNum int) string {
// Check if season info already exists
if seasonPattern.MatchString(name) {
return name // Already has season info, keep as is
}
// Try to find a good insertion point (before quality indicators)
if loc := qualityIndicators.FindStringIndex(name); loc != nil {
// Insert season before quality info
before := strings.TrimSpace(name[:loc[0]])
after := name[loc[0]:]
return fmt.Sprintf("%s S%02d %s", before, seasonNum, after)
}
// If no quality indicators found, append at the end
return fmt.Sprintf("%s S%02d", name, seasonNum)
}
func (s *Store) detectMultiSeason(debridTorrent *types.Torrent) (bool, []SeasonInfo, error) {
torrentName := debridTorrent.Name
files := debridTorrent.GetFiles()
s.logger.Debug().Msgf("Analyzing torrent for multi-season: %s", torrentName)
// Find all seasons present in the files
seasonsFound := s.findAllSeasons(files)
// Check if this is actually a multi-season torrent
isMultiSeason := len(seasonsFound) > 1 || s.hasMultiSeasonIndicators(torrentName)
if !isMultiSeason {
return false, nil, nil
}
s.logger.Info().Msgf("Multi-season torrent detected with seasons: %v", getSortedSeasons(seasonsFound))
// Group files by season
seasonGroups := s.groupFilesBySeason(files, seasonsFound)
// Create SeasonInfo objects with proper naming
var seasons []SeasonInfo
for seasonNum, seasonFiles := range seasonGroups {
if len(seasonFiles) == 0 {
continue
}
// Generate season-specific name preserving all metadata
seasonName := s.generateSeasonSpecificName(torrentName, seasonNum)
seasons = append(seasons, SeasonInfo{
SeasonNumber: seasonNum,
Files: seasonFiles,
InfoHash: s.generateSeasonHash(debridTorrent.InfoHash, seasonNum),
Name: seasonName,
})
}
return true, seasons, nil
}
// generateSeasonSpecificName creates season name preserving all original metadata
func (s *Store) generateSeasonSpecificName(originalName string, seasonNum int) string {
// Find and replace the multi-season pattern with single season
seasonName := s.replaceMultiSeasonPattern(originalName, seasonNum)
s.logger.Debug().Msgf("Generated season name for S%02d: %s", seasonNum, seasonName)
return seasonName
}
func (s *Store) findAllSeasons(files []types.File) map[int]bool {
seasons := make(map[int]bool)
for _, file := range files {
// Check filename first
if season := s.extractSeason(file.Name); season > 0 {
seasons[season] = true
continue
}
// Check full path
if season := s.extractSeason(file.Path); season > 0 {
seasons[season] = true
}
}
return seasons
}
// extractSeason pulls season number from a string
func (s *Store) extractSeason(text string) int {
matches := seasonPattern.FindStringSubmatch(text)
if len(matches) > 1 {
if num, err := strconv.Atoi(matches[1]); err == nil && num > 0 && num < 100 {
return num
}
}
return 0
}
func (s *Store) hasMultiSeasonIndicators(torrentName string) bool {
for _, pattern := range multiSeasonIndicators {
if pattern.MatchString(torrentName) {
return true
}
}
return false
}
// groupFilesBySeason puts files into season buckets
func (s *Store) groupFilesBySeason(files []types.File, knownSeasons map[int]bool) map[int][]types.File {
groups := make(map[int][]types.File)
// Initialize groups
for season := range knownSeasons {
groups[season] = []types.File{}
}
for _, file := range files {
// Try to find season from filename or path
season := s.extractSeason(file.Name)
if season == 0 {
season = s.extractSeason(file.Path)
}
// If we found a season and it's known, add the file
if season > 0 && knownSeasons[season] {
groups[season] = append(groups[season], file)
} else {
// If no season found, try path-based inference
inferredSeason := s.inferSeasonFromPath(file.Path, knownSeasons)
if inferredSeason > 0 {
groups[inferredSeason] = append(groups[inferredSeason], file)
} else if len(knownSeasons) == 1 {
// If only one season exists, default to it
for season := range knownSeasons {
groups[season] = append(groups[season], file)
}
}
}
}
return groups
}
func (s *Store) inferSeasonFromPath(path string, knownSeasons map[int]bool) int {
pathParts := strings.Split(path, "/")
for _, part := range pathParts {
if season := s.extractSeason(part); season > 0 && knownSeasons[season] {
return season
}
}
return 0
}
// Helper to get sorted season list for logging
func getSortedSeasons(seasons map[int]bool) []int {
var result []int
for season := range seasons {
result = append(result, season)
}
return result
}
// generateSeasonHash creates a unique hash for a season based on original hash
func (s *Store) generateSeasonHash(originalHash string, seasonNumber int) string {
source := fmt.Sprintf("%s-season-%d", originalHash, seasonNumber)
hash := md5.Sum([]byte(source))
return fmt.Sprintf("%x", hash)
}
func grabber(client *grab.Client, url, filename string, byterange *[2]int64, progressCallback func(int64, int64)) error {
req, err := grab.NewRequest(filename, url)
if err != nil {
return err
}
// Set byte range if specified
if byterange != nil {
byterangeStr := fmt.Sprintf("%d-%d", byterange[0], byterange[1])
req.HTTPRequest.Header.Set("Range", "bytes="+byterangeStr)
}
resp := client.Do(req)
t := time.NewTicker(time.Second * 2)
defer t.Stop()
var lastReported int64
Loop:
for {
select {
case <-t.C:
current := resp.BytesComplete()
speed := int64(resp.BytesPerSecond())
if current != lastReported {
if progressCallback != nil {
progressCallback(current-lastReported, speed)
}
lastReported = current
}
case <-resp.Done:
break Loop
}
}
// Report final bytes
if progressCallback != nil {
progressCallback(resp.BytesComplete()-lastReported, 0)
}
return resp.Err()
}
func (s *Store) processDownload(torrent *Torrent, debridTorrent *types.Torrent) (string, error) {
s.logger.Info().Msgf("Downloading %d files...", len(debridTorrent.Files))
torrentPath := filepath.Join(torrent.SavePath, utils.RemoveExtension(debridTorrent.OriginalFilename))
torrentPath = utils.RemoveInvalidChars(torrentPath)
err := os.MkdirAll(torrentPath, os.ModePerm)
if err != nil {
// add the previous error to the error and return
return "", fmt.Errorf("failed to create directory: %s: %v", torrentPath, err)
}
s.downloadFiles(torrent, debridTorrent, torrentPath)
return torrentPath, nil
}
func (s *Store) downloadFiles(torrent *Torrent, debridTorrent *types.Torrent, parent string) {
var wg sync.WaitGroup
totalSize := int64(0)
for _, file := range debridTorrent.GetFiles() {
totalSize += file.Size
}
debridTorrent.Lock()
debridTorrent.SizeDownloaded = 0 // Reset downloaded bytes
debridTorrent.Progress = 0 // Reset progress
debridTorrent.Unlock()
progressCallback := func(downloaded int64, speed int64) {
debridTorrent.Lock()
defer debridTorrent.Unlock()
torrent.Lock()
defer torrent.Unlock()
// Update total downloaded bytes
debridTorrent.SizeDownloaded += downloaded
debridTorrent.Speed = speed
// Calculate overall progress
if totalSize > 0 {
debridTorrent.Progress = float64(debridTorrent.SizeDownloaded) / float64(totalSize) * 100
}
s.partialTorrentUpdate(torrent, debridTorrent)
}
client := &grab.Client{
UserAgent: "Decypharr[QBitTorrent]",
HTTPClient: &http.Client{
Transport: &http.Transport{
Proxy: http.ProxyFromEnvironment,
},
},
}
errChan := make(chan error, len(debridTorrent.Files))
for _, file := range debridTorrent.GetFiles() {
if file.DownloadLink.Empty() {
s.logger.Info().Msgf("No download link found for %s", file.Name)
continue
}
wg.Add(1)
s.downloadSemaphore <- struct{}{}
go func(file types.File) {
defer wg.Done()
defer func() { <-s.downloadSemaphore }()
filename := file.Name
err := grabber(
client,
file.DownloadLink.DownloadLink,
filepath.Join(parent, filename),
file.ByteRange,
progressCallback,
)
if err != nil {
s.logger.Error().Msgf("Failed to download %s: %v", filename, err)
errChan <- err
} else {
s.logger.Info().Msgf("Downloaded %s", filename)
}
}(file)
}
wg.Wait()
close(errChan)
var errors []error
for err := range errChan {
if err != nil {
errors = append(errors, err)
}
}
if len(errors) > 0 {
s.logger.Error().Msgf("Errors occurred during download: %v", errors)
return
}
s.logger.Info().Msgf("Downloaded all files for %s", debridTorrent.Name)
}
func (s *Store) processSymlink(debridTorrent *types.Torrent, torrentRclonePath, torrentSymlinkPath string) (string, error) {
files := debridTorrent.GetFiles()
if len(files) == 0 {
return "", fmt.Errorf("no valid files found")
}
s.logger.Info().Msgf("Creating symlinks for %d files ...", len(files))
// Create symlink directory
err := os.MkdirAll(torrentSymlinkPath, os.ModePerm)
if err != nil {
return "", fmt.Errorf("failed to create directory: %s: %v", torrentSymlinkPath, err)
}
// Track pending files
remainingFiles := make(map[string]types.File)
for _, file := range files {
remainingFiles[file.Name] = file
}
ticker := time.NewTicker(100 * time.Millisecond)
defer ticker.Stop()
timeout := time.After(30 * time.Minute)
filePaths := make([]string, 0, len(remainingFiles))
var checkDirectory func(string) // Recursive function
checkDirectory = func(dirPath string) {
entries, err := os.ReadDir(dirPath)
if err != nil {
return
}
for _, entry := range entries {
entryName := entry.Name()
fullPath := filepath.Join(dirPath, entryName)
// Check if this matches a remaining file
if file, exists := remainingFiles[entryName]; exists {
fileSymlinkPath := filepath.Join(torrentSymlinkPath, file.Name)
if err := os.Symlink(fullPath, fileSymlinkPath); err == nil || os.IsExist(err) {
filePaths = append(filePaths, fileSymlinkPath)
delete(remainingFiles, entryName)
s.logger.Info().Msgf("File is ready: %s", file.Name)
}
} else if entry.IsDir() {
// If not found and it's a directory, check inside
checkDirectory(fullPath)
}
}
}
for len(remainingFiles) > 0 {
select {
case <-ticker.C:
checkDirectory(torrentRclonePath)
case <-timeout:
s.logger.Warn().Msgf("Timeout waiting for files, %d files still pending", len(remainingFiles))
return torrentSymlinkPath, fmt.Errorf("timeout waiting for files: %d files still pending", len(remainingFiles))
}
}
// Pre-cache files if enabled
if !s.skipPreCache && len(filePaths) > 0 {
go func() {
s.logger.Debug().Msgf("Pre-caching %s", debridTorrent.Name)
if err := utils.PreCacheFile(filePaths); err != nil {
s.logger.Error().Msgf("Failed to pre-cache file: %s", err)
} else {
s.logger.Debug().Msgf("Pre-cached %d files", len(filePaths))
}
}()
}
return torrentSymlinkPath, nil
}
// getTorrentPaths returns mountPath and symlinkPath for a torrent
func (s *Store) getTorrentPaths(arrFolder string, debridTorrent *types.Torrent) (string, string, error) {
for {
torrentFolder, err := debridTorrent.GetMountFolder(debridTorrent.MountPath)
if err == nil {
// Found mountPath
mountPath := filepath.Join(debridTorrent.MountPath, torrentFolder)
if debridTorrent.Debrid == "alldebrid" && utils.IsMediaFile(torrentFolder) {
torrentFolder = utils.RemoveExtension(torrentFolder)
mountPath = debridTorrent.MountPath
}
// Return mountPath and symlink path
return mountPath, filepath.Join(arrFolder, torrentFolder), nil
}
time.Sleep(100 * time.Millisecond)
}
}
func (s *Store) processMultiSeasonSymlinks(torrent *Torrent, debridTorrent *types.Torrent, seasons []SeasonInfo, importReq *ImportRequest) error {
for _, seasonInfo := range seasons {
// Create a season-specific debrid torrent
seasonDebridTorrent := debridTorrent.Copy()
// Update the season torrent with season-specific data
seasonDebridTorrent.InfoHash = seasonInfo.InfoHash
seasonDebridTorrent.Name = seasonInfo.Name
seasonTorrent := torrent.Copy()
seasonTorrent.ID = seasonInfo.InfoHash
seasonTorrent.Name = seasonInfo.Name
seasonTorrent.Hash = seasonInfo.InfoHash
torrentFiles := make([]*File, 0)
size := int64(0)
// Filter files to only include this season's files
seasonFiles := make(map[string]types.File)
for index, file := range seasonInfo.Files {
seasonFiles[file.Name] = file
torrentFiles = append(torrentFiles, &File{
Index: index,
Name: file.Path,
Size: file.Size,
})
size += file.Size
}
seasonDebridTorrent.Files = seasonFiles
seasonTorrent.Files = torrentFiles
seasonTorrent.Size = size
// Create a season-specific torrent record
// Create season folder path using the extracted season name
seasonFolderName := seasonInfo.Name
s.logger.Info().Msgf("Processing season %s with %d files", seasonTorrent.Name, len(seasonInfo.Files))
var err error
cache := s.debrid.Debrid(debridTorrent.Debrid).Cache()
var torrentRclonePath, torrentSymlinkPath string
if cache != nil {
torrentRclonePath = filepath.Join(debridTorrent.MountPath, cache.GetTorrentFolder(debridTorrent))
} else {
// Regular mount mode
torrentRclonePath, _, err = s.getTorrentPaths(seasonTorrent.SavePath, seasonDebridTorrent)
if err != nil {
return err
}
}
torrentSymlinkPath = filepath.Join(seasonTorrent.SavePath, seasonFolderName)
torrentSymlinkPath, err = s.processSymlink(seasonDebridTorrent, torrentRclonePath, torrentSymlinkPath)
if err != nil {
return err
}
if torrentSymlinkPath == "" {
return fmt.Errorf("no symlink found for season %d", seasonInfo.SeasonNumber)
}
// Update season torrent with final path
seasonTorrent.TorrentPath = torrentSymlinkPath
seasonTorrent.ContentPath = torrentSymlinkPath
seasonTorrent.State = "pausedUP"
// Add the season torrent to storage
s.torrents.AddOrUpdate(seasonTorrent)
s.logger.Info().Str("path", torrentSymlinkPath).Msgf("Successfully created season %d torrent: %s", seasonInfo.SeasonNumber, seasonTorrent.ID)
}
s.torrents.Delete(torrent.Hash, "", false)
s.logger.Info().Msgf("Multi-season processing completed for %s", debridTorrent.Name)
return nil
}
// processMultiSeasonDownloads handles multi-season torrent downloading
func (s *Store) processMultiSeasonDownloads(torrent *Torrent, debridTorrent *types.Torrent, seasons []SeasonInfo, importReq *ImportRequest) error {
s.logger.Info().Msgf("Creating separate download records for %d seasons", len(seasons))
for _, seasonInfo := range seasons {
// Create a season-specific debrid torrent
seasonDebridTorrent := debridTorrent.Copy()
// Update the season torrent with season-specific data
seasonDebridTorrent.InfoHash = seasonInfo.InfoHash
seasonDebridTorrent.Name = seasonInfo.Name
// Filter files to only include this season's files
seasonFiles := make(map[string]types.File)
for _, file := range seasonInfo.Files {
seasonFiles[file.Name] = file
}
seasonDebridTorrent.Files = seasonFiles
// Create a season-specific torrent record
seasonTorrent := torrent.Copy()
seasonTorrent.ID = uuid.New().String()
seasonTorrent.Name = seasonInfo.Name
seasonTorrent.Hash = seasonInfo.InfoHash
seasonTorrent.SavePath = torrent.SavePath
s.logger.Info().Msgf("Downloading season %d with %d files", seasonInfo.SeasonNumber, len(seasonInfo.Files))
// Generate download links for season files
client := s.debrid.Debrid(debridTorrent.Debrid).Client()
if err := client.GetFileDownloadLinks(seasonDebridTorrent); err != nil {
s.logger.Error().Msgf("Failed to get download links for season %d: %v", seasonInfo.SeasonNumber, err)
return fmt.Errorf("failed to get download links for season %d: %v", seasonInfo.SeasonNumber, err)
}
// Download files for this season
seasonDownloadPath, err := s.processDownload(seasonTorrent, seasonDebridTorrent)
if err != nil {
s.logger.Error().Msgf("Failed to download season %d: %v", seasonInfo.SeasonNumber, err)
return fmt.Errorf("failed to download season %d: %v", seasonInfo.SeasonNumber, err)
}
// Update season torrent with final path
seasonTorrent.TorrentPath = seasonDownloadPath
torrent.ContentPath = seasonDownloadPath
seasonTorrent.State = "pausedUP"
// Add the season torrent to storage
s.torrents.AddOrUpdate(seasonTorrent)
s.logger.Info().Msgf("Successfully downloaded season %d torrent: %s", seasonInfo.SeasonNumber, seasonTorrent.ID)
}
s.logger.Debug().Msgf("Deleting original torrent with hash: %s, category: %s", torrent.Hash, torrent.Category)
s.torrents.Delete(torrent.Hash, torrent.Category, false)
s.logger.Info().Msgf("Multi-season download processing completed for %s", debridTorrent.Name)
return nil
}

View File

@@ -1,4 +1,4 @@
package store
package wire
import (
"os"
@@ -25,6 +25,7 @@ func createTorrentFromMagnet(req *ImportRequest) *Torrent {
AutoTmm: false,
Ratio: 1,
RatioLimit: 1,
TotalSize: magnet.Size,
SavePath: filepath.Join(req.DownloadFolder, arrName) + string(os.PathSeparator),
}
return torrent

View File

@@ -1,8 +1,10 @@
package store
package wire
import (
"context"
"fmt"
"github.com/go-co-op/gocron/v2"
"github.com/sirrobot01/decypharr/internal/utils"
"time"
)
@@ -25,58 +27,50 @@ func (s *Store) addToQueue(importReq *ImportRequest) error {
return nil
}
func (s *Store) StartQueueSchedule(ctx context.Context) error {
// Start the slots processing in a separate goroutine
go func() {
if err := s.processSlotsQueue(ctx); err != nil {
s.logger.Error().Err(err).Msg("Error processing slots queue")
}
}()
func (s *Store) StartQueueWorkers(ctx context.Context) error {
// This function is responsible for starting the scheduled tasks
if ctx == nil {
ctx = context.Background()
}
// Start the remove stalled torrents processing in a separate goroutine
go func() {
if err := s.processRemoveStalledTorrents(ctx); err != nil {
s.logger.Error().Err(err).Msg("Error processing remove stalled torrents")
}
}()
s.scheduler.RemoveByTags("decypharr-store")
return nil
}
func (s *Store) processSlotsQueue(ctx context.Context) error {
s.trackAvailableSlots(ctx) // Initial tracking of available slots
ticker := time.NewTicker(30 * time.Second)
defer ticker.Stop()
for {
select {
case <-ctx.Done():
return nil
case <-ticker.C:
if jd, err := utils.ConvertToJobDef("30s"); err != nil {
s.logger.Error().Err(err).Msg("Failed to convert slots tracking interval to job definition")
} else {
// Schedule the job
if _, err := s.scheduler.NewJob(jd, gocron.NewTask(func() {
s.trackAvailableSlots(ctx)
}), gocron.WithContext(ctx)); err != nil {
s.logger.Error().Err(err).Msg("Failed to create slots tracking job")
} else {
s.logger.Trace().Msgf("Slots tracking job scheduled for every %s", "30s")
}
}
}
func (s *Store) processRemoveStalledTorrents(ctx context.Context) error {
if s.removeStalledAfter <= 0 {
return nil // No need to remove stalled torrents if the duration is not set
}
ticker := time.NewTicker(time.Minute)
defer ticker.Stop()
for {
select {
case <-ctx.Done():
return nil
case <-ticker.C:
if err := s.removeStalledTorrents(ctx); err != nil {
s.logger.Error().Err(err).Msg("Error removing stalled torrents")
if s.removeStalledAfter > 0 {
// Stalled torrents removal job
if jd, err := utils.ConvertToJobDef("1m"); err != nil {
s.logger.Error().Err(err).Msg("Failed to convert remove stalled torrents interval to job definition")
} else {
// Schedule the job
if _, err := s.scheduler.NewJob(jd, gocron.NewTask(func() {
err := s.removeStalledTorrents(ctx)
if err != nil {
s.logger.Error().Err(err).Msg("Failed to process remove stalled torrents")
}
}), gocron.WithContext(ctx)); err != nil {
s.logger.Error().Err(err).Msg("Failed to create remove stalled torrents job")
} else {
s.logger.Trace().Msgf("Remove stalled torrents job scheduled for every %s", "1m")
}
}
}
// Start the scheduler
s.scheduler.Start()
s.logger.Debug().Msg("Store worker started")
return nil
}
func (s *Store) trackAvailableSlots(ctx context.Context) {
@@ -91,13 +85,17 @@ func (s *Store) trackAvailableSlots(ctx context.Context) {
availableSlots[name] = slots
}
if len(availableSlots) == 0 {
s.logger.Debug().Msg("No debrid clients available or no slots found")
return // No debrid clients or slots available, nothing to process
}
if s.importsQueue.Size() <= 0 {
// Queue is empty, no need to process
return
}
for name, slots := range availableSlots {
s.logger.Debug().Msgf("Available slots for %s: %d", name, slots)
// If slots are available, process the next import request from the queue
for slots > 0 {

View File

@@ -1,4 +1,4 @@
package store
package wire
import (
"bytes"
@@ -7,6 +7,7 @@ import (
"encoding/json"
"fmt"
"github.com/google/uuid"
"github.com/sirrobot01/decypharr/internal/config"
"github.com/sirrobot01/decypharr/internal/request"
"github.com/sirrobot01/decypharr/internal/utils"
"github.com/sirrobot01/decypharr/pkg/arr"
@@ -43,6 +44,8 @@ type ImportRequest struct {
}
func NewImportRequest(debrid string, downloadFolder string, magnet *utils.Magnet, arr *arr.Arr, action string, downloadUncached bool, callBackUrl string, importType ImportType) *ImportRequest {
cfg := config.Get()
callBackUrl = cmp.Or(callBackUrl, cfg.CallbackURL)
return &ImportRequest{
Id: uuid.New().String(),
Status: "started",

View File

@@ -1,8 +1,9 @@
package store
package wire
import (
"cmp"
"context"
"github.com/go-co-op/gocron/v2"
"github.com/rs/zerolog"
"github.com/sirrobot01/decypharr/internal/config"
"github.com/sirrobot01/decypharr/internal/logger"
@@ -26,6 +27,7 @@ type Store struct {
skipPreCache bool
downloadSemaphore chan struct{}
removeStalledAfter time.Duration // Duration after which stalled torrents are removed
scheduler gocron.Scheduler
}
var (
@@ -49,6 +51,11 @@ func Get() *Store {
arrs := arr.NewStorage()
deb := debrid.NewStorage(rcManager)
scheduler, err := gocron.NewScheduler(gocron.WithLocation(time.Local), gocron.WithGlobalJobOptions(gocron.WithTags("decypharr-store")))
if err != nil {
scheduler, _ = gocron.NewScheduler(gocron.WithGlobalJobOptions(gocron.WithTags("decypharr-store")))
}
instance = &Store{
repair: repair.New(arrs, deb),
arr: arrs,
@@ -56,10 +63,11 @@ func Get() *Store {
rcloneManager: rcManager,
torrents: newTorrentStorage(cfg.TorrentsFile()),
logger: logger.Default(), // Use default logger [decypharr]
refreshInterval: time.Duration(cmp.Or(qbitCfg.RefreshInterval, 10)) * time.Minute,
refreshInterval: time.Duration(cmp.Or(qbitCfg.RefreshInterval, 30)) * time.Second,
skipPreCache: qbitCfg.SkipPreCache,
downloadSemaphore: make(chan struct{}, cmp.Or(qbitCfg.MaxDownloads, 5)),
importsQueue: NewImportQueue(context.Background(), 1000),
scheduler: scheduler,
}
if cfg.RemoveStalledAfter != "" {
removeStalledAfter, err := time.ParseDuration(cfg.RemoveStalledAfter)
@@ -78,7 +86,10 @@ func Reset() {
}
if instance.rcloneManager != nil {
instance.rcloneManager.Stop()
err := instance.rcloneManager.Stop()
if err != nil {
instance.logger.Error().Err(err).Msg("Failed to stop rclone manager")
}
}
if instance.importsQueue != nil {
@@ -88,6 +99,11 @@ func Reset() {
// Close the semaphore channel to
close(instance.downloadSemaphore)
}
if instance.scheduler != nil {
_ = instance.scheduler.StopJobs()
_ = instance.scheduler.Shutdown()
}
}
once = sync.Once{}
instance = nil
@@ -108,3 +124,7 @@ func (s *Store) Torrents() *TorrentStorage {
func (s *Store) RcloneManager() *rclone.Manager {
return s.rcloneManager
}
func (s *Store) Scheduler() gocron.Scheduler {
return s.scheduler
}

View File

@@ -1,4 +1,4 @@
package store
package wire
import (
"cmp"
@@ -6,7 +6,6 @@ import (
"errors"
"fmt"
"math"
"os"
"path/filepath"
"time"
@@ -99,8 +98,7 @@ func (s *Store) processFiles(torrent *Torrent, debridTorrent *types.Torrent, imp
backoff.Reset(nextInterval)
}
}
var torrentSymlinkPath string
var err error
var torrentSymlinkPath, torrentRclonePath string
debridTorrent.Arr = _arr
// Check if debrid supports webdav by checking cache
@@ -134,11 +132,20 @@ func (s *Store) processFiles(torrent *Torrent, debridTorrent *types.Torrent, imp
}()
}
// Check for multi-season torrent support
isMultiSeason, seasons, err := s.detectMultiSeason(debridTorrent)
if err != nil {
s.logger.Warn().Msgf("Error detecting multi-season for %s: %v", debridTorrent.Name, err)
// Continue with normal processing if detection fails
isMultiSeason = false
}
switch importReq.Action {
case "symlink":
// Symlink action, we will create a symlink to the torrent
s.logger.Debug().Msgf("Post-Download Action: Symlink")
cache := deb.Cache()
if cache != nil {
s.logger.Info().Msgf("Using internal webdav for %s", debridTorrent.Debrid)
// Use webdav to download the file
@@ -146,14 +153,45 @@ func (s *Store) processFiles(torrent *Torrent, debridTorrent *types.Torrent, imp
onFailed(err)
return
}
}
if isMultiSeason {
s.logger.Info().Msgf("Processing multi-season torrent with %d seasons", len(seasons))
// Remove any torrent already added
err := s.processMultiSeasonSymlinks(torrent, debridTorrent, seasons, importReq)
if err == nil {
// If an error occurred during multi-season processing, send it to normal processing
s.logger.Info().Msgf("Adding %s took %s", debridTorrent.Name, time.Since(timer))
go importReq.markAsCompleted(torrent, debridTorrent) // Mark the import request as completed, send callback if needed
go func() {
if err := request.SendDiscordMessage("download_complete", "success", torrent.discordContext()); err != nil {
s.logger.Error().Msgf("Error sending discord message: %v", err)
}
}()
go func() {
_arr.Refresh()
}()
return
}
}
if cache != nil {
torrentRclonePath = filepath.Join(debridTorrent.MountPath, cache.GetTorrentFolder(debridTorrent)) // /mnt/remote/realdebrid/MyTVShow
torrentSymlinkPath = filepath.Join(torrent.SavePath, utils.RemoveExtension(debridTorrent.Name)) // /mnt/symlinks/{category}/MyTVShow/
rclonePath := filepath.Join(debridTorrent.MountPath, cache.GetTorrentFolder(debridTorrent)) // /mnt/remote/realdebrid/MyTVShow
torrentFolderNoExt := utils.RemoveExtension(debridTorrent.Name)
torrentSymlinkPath, err = s.createSymlinksWebdav(torrent, debridTorrent, rclonePath, torrentFolderNoExt) // /mnt/symlinks/{category}/MyTVShow/
} else {
// User is using either zurg or debrid webdav
torrentSymlinkPath, err = s.processSymlink(torrent, debridTorrent) // /mnt/symlinks/{category}/MyTVShow/
torrentRclonePath, torrentSymlinkPath, err = s.getTorrentPaths(torrent.SavePath, debridTorrent)
if err != nil {
onFailed(err)
return
}
}
torrentSymlinkPath, err = s.processSymlink(debridTorrent, torrentRclonePath, torrentSymlinkPath)
if err != nil {
onFailed(err)
return
@@ -168,6 +206,19 @@ func (s *Store) processFiles(torrent *Torrent, debridTorrent *types.Torrent, imp
// Download action, we will download the torrent to the specified folder
// Generate download links
s.logger.Debug().Msgf("Post-Download Action: Download")
if isMultiSeason {
s.logger.Info().Msgf("Processing multi-season download with %d seasons", len(seasons))
err := s.processMultiSeasonDownloads(torrent, debridTorrent, seasons, importReq)
if err != nil {
onFailed(err)
return
}
// Multi-season processing completed successfully
onSuccess(torrent.SavePath)
return
}
if err := client.GetFileDownloadLinks(debridTorrent); err != nil {
onFailed(err)
return
@@ -241,6 +292,7 @@ func (s *Store) partialTorrentUpdate(t *Torrent, debridTorrent *types.Torrent) *
t.Files = files
t.Debrid = debridTorrent.Debrid
t.Size = totalSize
t.TotalSize = totalSize
t.Completed = sizeCompleted
t.NumSeeds = debridTorrent.Seeders
t.Downloaded = sizeCompleted
@@ -252,7 +304,6 @@ func (s *Store) partialTorrentUpdate(t *Torrent, debridTorrent *types.Torrent) *
t.Eta = eta
t.Dlspeed = speed
t.Upspeed = speed
t.ContentPath = filepath.Join(t.SavePath, t.Name) + string(os.PathSeparator)
return t
}
@@ -267,7 +318,7 @@ func (s *Store) updateTorrent(t *Torrent, debridTorrent *types.Torrent) *Torrent
}
}
t = s.partialTorrentUpdate(t, debridTorrent)
t.ContentPath = t.TorrentPath + string(os.PathSeparator)
t.ContentPath = t.TorrentPath
if t.IsReady() {
t.State = "pausedUP"

View File

@@ -1,4 +1,4 @@
package store
package wire
import (
"encoding/json"
@@ -167,44 +167,33 @@ func (ts *TorrentStorage) Update(torrent *Torrent) {
func (ts *TorrentStorage) Delete(hash, category string, removeFromDebrid bool) {
ts.mu.Lock()
defer ts.mu.Unlock()
key := keyPair(hash, category)
torrent, exists := ts.torrents[key]
if !exists && category == "" {
// Remove the torrent without knowing the category
for k, t := range ts.torrents {
if t.Hash == hash {
key = k
torrent = t
break
wireStore := Get()
for key, torrent := range ts.torrents {
if torrent == nil {
continue
}
if torrent.Hash == hash && (category == "" || torrent.Category == category) {
if torrent.State == "queued" && torrent.ID != "" {
// Remove the torrent from the import queue if it exists
wireStore.importsQueue.Delete(torrent.ID)
}
}
}
if removeFromDebrid && torrent.DebridID != "" && torrent.Debrid != "" {
dbClient := wireStore.debrid.Client(torrent.Debrid)
if dbClient != nil {
_ = dbClient.DeleteTorrent(torrent.DebridID)
}
}
delete(ts.torrents, key)
if torrent == nil {
return
}
st := Get()
// Check if torrent is queued for download
if torrent.State == "queued" && torrent.ID != "" {
// Remove the torrent from the import queue if it exists
st.importsQueue.Delete(torrent.ID)
}
if removeFromDebrid && torrent.DebridID != "" && torrent.Debrid != "" {
dbClient := st.debrid.Client(torrent.Debrid)
if dbClient != nil {
_ = dbClient.DeleteTorrent(torrent.DebridID)
}
}
delete(ts.torrents, key)
// Delete the torrent folder
if torrent.ContentPath != "" {
err := os.RemoveAll(torrent.ContentPath)
if err != nil {
return
// Delete the torrent folder
if torrent.ContentPath != "" {
err := os.RemoveAll(torrent.ContentPath)
if err != nil {
return
}
}
break
}
}
go func() {
@@ -227,12 +216,11 @@ func (ts *TorrentStorage) DeleteMultiple(hashes []string, removeFromDebrid bool)
if torrent == nil {
continue
}
if torrent.State == "queued" && torrent.ID != "" {
// Remove the torrent from the import queue if it exists
st.importsQueue.Delete(torrent.ID)
}
if torrent.Hash == hash {
if torrent.State == "queued" && torrent.ID != "" {
// Remove the torrent from the import queue if it exists
st.importsQueue.Delete(torrent.ID)
}
if removeFromDebrid && torrent.DebridID != "" && torrent.Debrid != "" {
toDelete[torrent.DebridID] = torrent.Debrid
}
@@ -243,6 +231,7 @@ func (ts *TorrentStorage) DeleteMultiple(hashes []string, removeFromDebrid bool)
return
}
}
break
}
}
}

View File

@@ -1,4 +1,4 @@
package store
package wire
import (
"fmt"
@@ -72,6 +72,60 @@ type Torrent struct {
sync.Mutex
}
func (t *Torrent) Copy() *Torrent {
return &Torrent{
ID: t.ID,
DebridID: t.DebridID,
Debrid: t.Debrid,
TorrentPath: t.TorrentPath,
AddedOn: t.AddedOn,
AmountLeft: t.AmountLeft,
AutoTmm: t.AutoTmm,
Availability: t.Availability,
Category: t.Category,
Completed: t.Completed,
CompletionOn: t.CompletionOn,
ContentPath: t.ContentPath,
DlLimit: t.DlLimit,
Dlspeed: t.Dlspeed,
Downloaded: t.Downloaded,
DownloadedSession: t.DownloadedSession,
Eta: t.Eta,
FlPiecePrio: t.FlPiecePrio,
ForceStart: t.ForceStart,
Hash: t.Hash,
LastActivity: t.LastActivity,
MagnetUri: t.MagnetUri,
MaxRatio: t.MaxRatio,
MaxSeedingTime: t.MaxSeedingTime,
Name: t.Name,
NumComplete: t.NumComplete,
NumIncomplete: t.NumIncomplete,
NumLeechs: t.NumLeechs,
NumSeeds: t.NumSeeds,
Priority: t.Priority,
Progress: t.Progress,
Ratio: t.Ratio,
RatioLimit: t.RatioLimit,
SavePath: t.SavePath,
SeedingTimeLimit: t.SeedingTimeLimit,
SeenComplete: t.SeenComplete,
SeqDl: t.SeqDl,
Size: t.Size,
State: t.State,
SuperSeeding: t.SuperSeeding,
Tags: t.Tags,
TimeActive: t.TimeActive,
TotalSize: t.TotalSize,
Tracker: t.Tracker,
UpLimit: t.UpLimit,
Uploaded: t.Uploaded,
UploadedSession: t.UploadedSession,
Upspeed: t.Upspeed,
Source: t.Source,
}
}
func (t *Torrent) IsReady() bool {
return (t.AmountLeft <= 0 || t.Progress == 1) && t.TorrentPath != ""
}

44
pkg/wire/worker.go Normal file
View File

@@ -0,0 +1,44 @@
package wire
import "context"
func (s *Store) StartWorkers(ctx context.Context) {
if ctx == nil {
ctx = context.Background()
}
// Start debrid workers
if err := s.Debrid().StartWorker(ctx); err != nil {
s.logger.Error().Err(err).Msg("Failed to start debrid worker")
} else {
s.logger.Debug().Msg("Started debrid worker")
}
// Cache workers
for _, cache := range s.Debrid().Caches() {
if cache == nil {
continue
}
go func() {
if err := cache.StartWorker(ctx); err != nil {
s.logger.Error().Err(err).Msg("Failed to start debrid cache worker")
} else {
s.logger.Debug().Msgf("Started debrid cache worker for %s", cache.GetConfig().Name)
}
}()
}
// Store queue workers
if err := s.StartQueueWorkers(ctx); err != nil {
s.logger.Error().Err(err).Msg("Failed to start store worker")
} else {
s.logger.Debug().Msg("Started store worker")
}
// Arr workers
if err := s.Arr().StartWorker(ctx); err != nil {
s.logger.Error().Err(err).Msg("Failed to start Arr worker")
} else {
s.logger.Debug().Msg("Started Arr worker")
}
}

View File

@@ -4,6 +4,38 @@ set -e
# Default values
PUID=${PUID:-1000}
PGID=${PGID:-1000}
UMASK=${UMASK:-022}
# Set umask
umask "$UMASK"
# Function to create directories and files
setup_directories() {
# Ensure directories exist
mkdir -p /app/logs /app/cache 2>/dev/null || true
# Create log file if it doesn't exist
touch /app/logs/decypharr.log 2>/dev/null || true
# Try to set permissions if possible
chmod 755 /app 2>/dev/null || true
chmod 666 /app/logs/decypharr.log 2>/dev/null || true
}
# Check if we're running as root
if [ "$(id -u)" != "0" ]; then
echo "Running as non-root user $(id -u):$(id -g) with umask $UMASK"
# Try to create directories as the current user
setup_directories
export USER="$(id -un)"
export HOME="/app"
exec "$@"
fi
echo "Running as root, setting up user $PUID:$PGID with umask $UMASK"
# Create group if it doesn't exist
if ! getent group "$PGID" > /dev/null 2>&1; then
@@ -19,7 +51,7 @@ fi
USERNAME=$(getent passwd "$PUID" | cut -d: -f1)
GROUPNAME=$(getent group "$PGID" | cut -d: -f1)
# Ensure directories exist and have correct permissions
# Create directories and set proper ownership
mkdir -p /app/logs /app/cache
chown -R "$PUID:$PGID" /app
chmod 755 /app