Compare commits
32 Commits
| Author | SHA1 | Date | |
|---|---|---|---|
|
|
207d43b13f | ||
|
|
9f9a85d302 | ||
|
|
2712315108 | ||
|
|
1f384ba4f7 | ||
|
|
7db79e99ba | ||
|
|
ad394c86ee | ||
|
|
7af90ebe47 | ||
|
|
7032cc368b | ||
|
|
f21f5cad94 | ||
|
|
f93d1a5913 | ||
|
|
2a4f09c06d | ||
|
|
b1b6353fb3 | ||
|
|
df7979c430 | ||
|
|
726f97e13c | ||
|
|
ab485adfc8 | ||
|
|
700d00b802 | ||
|
|
22dae9efad | ||
|
|
3f0870cd1c | ||
|
|
30b2db06e7 | ||
|
|
76f5b85313 | ||
|
|
85cd37f29b | ||
|
|
aff12c2e4b | ||
|
|
d76ca032ab | ||
|
|
8bb786c689 | ||
|
|
83058489b6 | ||
|
|
267cc2d32b | ||
|
|
eefe8a3901 | ||
|
|
618eb73067 | ||
|
|
f8667938b6 | ||
|
|
b0a698f15e | ||
|
|
2548c21e5b | ||
|
|
1b03ccefbb |
2
.gitignore
vendored
2
.gitignore
vendored
@@ -3,7 +3,9 @@ config.json
|
||||
.idea/
|
||||
.DS_Store
|
||||
*.torrent
|
||||
!testdata/*.torrent
|
||||
*.magnet
|
||||
!testdata/*.magnet
|
||||
*.db
|
||||
*.log
|
||||
*.log.*
|
||||
|
||||
@@ -7,10 +7,10 @@ import (
|
||||
"github.com/sirrobot01/decypharr/internal/logger"
|
||||
"github.com/sirrobot01/decypharr/pkg/qbit"
|
||||
"github.com/sirrobot01/decypharr/pkg/server"
|
||||
"github.com/sirrobot01/decypharr/pkg/store"
|
||||
"github.com/sirrobot01/decypharr/pkg/version"
|
||||
"github.com/sirrobot01/decypharr/pkg/web"
|
||||
"github.com/sirrobot01/decypharr/pkg/webdav"
|
||||
"github.com/sirrobot01/decypharr/pkg/wire"
|
||||
"net/http"
|
||||
"os"
|
||||
"runtime"
|
||||
@@ -77,7 +77,7 @@ func Start(ctx context.Context) error {
|
||||
reset := func() {
|
||||
// Reset the store and services
|
||||
qb.Reset()
|
||||
store.Reset()
|
||||
wire.Reset()
|
||||
// refresh GC
|
||||
runtime.GC()
|
||||
}
|
||||
@@ -151,7 +151,7 @@ func startServices(ctx context.Context, cancelSvc context.CancelFunc, wd *webdav
|
||||
|
||||
// Start rclone RC server if enabled
|
||||
safeGo(func() error {
|
||||
rcManager := store.Get().RcloneManager()
|
||||
rcManager := wire.Get().RcloneManager()
|
||||
if rcManager == nil {
|
||||
return nil
|
||||
}
|
||||
@@ -160,7 +160,7 @@ func startServices(ctx context.Context, cancelSvc context.CancelFunc, wd *webdav
|
||||
|
||||
if cfg := config.Get(); cfg.Repair.Enabled {
|
||||
safeGo(func() error {
|
||||
repair := store.Get().Repair()
|
||||
repair := wire.Get().Repair()
|
||||
if repair != nil {
|
||||
if err := repair.Start(ctx); err != nil {
|
||||
_log.Error().Err(err).Msg("repair failed")
|
||||
@@ -171,7 +171,7 @@ func startServices(ctx context.Context, cancelSvc context.CancelFunc, wd *webdav
|
||||
}
|
||||
|
||||
safeGo(func() error {
|
||||
store.Get().StartWorkers(ctx)
|
||||
wire.Get().StartWorkers(ctx)
|
||||
return nil
|
||||
})
|
||||
|
||||
|
||||
117
docs/docs/features/private-tracker-downloads.md
Normal file
117
docs/docs/features/private-tracker-downloads.md
Normal file
@@ -0,0 +1,117 @@
|
||||
# Private Tracker Downloads
|
||||
|
||||
It is against the rules of most private trackers to download using debrid services. That's because debrid services do not seed back.
|
||||
|
||||
Despite that, **many torrents from private trackers are cached on debrid services**.
|
||||
|
||||
This can happen if the exact same torrent is uploaded to a public tracker or if another user downloads the torrent from the private tracker using their debrid account.
|
||||
|
||||
However, you do **_NOT_** want to be the first person who downloads and caches the private tracker torrent because it is a very quick way to get your private tracker account banned.
|
||||
|
||||
Fortunately, decypharr offers a feature that allows you to check whether a private tracker torrent has _already_ been cached.
|
||||
|
||||
In a way, this feature lets you use your private trackers to find hashes for the latest releases that have not yet been indexed by zilean, torrentio, and other debrid-focused indexers.
|
||||
|
||||
This allows you to add private tracker torrents to your debrid account without breaking the most common private tracker rules. This significantly reduces the chance of account bans, **but please read the `Risks` section below** for more details and other precautions you should make.
|
||||
|
||||
## Risks
|
||||
|
||||
A lot of care has gone into ensuring this feature is compliant with most private tracker rules:
|
||||
|
||||
- The passkey is not leaked
|
||||
- The private tracker announce URLs are not leaked
|
||||
- The private tracker swarm is not leaked
|
||||
- Even the torrent content is not leaked (by you)
|
||||
|
||||
You are merely downloading it from another source. It's not much different than downloading a torrent that has been uploaded to MegaUpload or another file hoster.
|
||||
|
||||
**But it is NOT completely risk-free.**
|
||||
|
||||
### Suspicious-looking activity
|
||||
|
||||
To use this feature, you must download the `.torrent` file from the private tracker. But since you will never leech the content, it can make your account look suspicious.
|
||||
|
||||
In fact, there is a strictly forbidden technique called `ghostleeching` that also requires downloading of the `.torrent` file, and tracker admins might suspect that this is what you are doing.
|
||||
|
||||
We know of one user who got banned from a Unit3D-based tracker for this.
|
||||
|
||||
**Here is what is recommended:**
|
||||
|
||||
- Be a good private tracker user in general. Perma-seed, upload, contribute
|
||||
- Only enable `Interactive Search` in the arrs (disable `Automatic Search`)
|
||||
- Only use it for content that is not on public sources yet, and you need to watch **RIGHT NOW** without having time to wait for the download to finish
|
||||
- Do **NOT** use it to avoid seeding
|
||||
|
||||
### Accidentally disable this feature
|
||||
|
||||
Another big risk is that you might accidentally disable the feature. The consequence will be that you actually leech the torrent from the tracker, don't seed it, and expose the private swarm to an untrusted third party.
|
||||
|
||||
You should avoid this at all costs.
|
||||
|
||||
Therefore, to reduce the risk further, it is recommended to enable the feature using both methods:
|
||||
|
||||
1. Using the global `Always Remove Tracker URLs` setting in your decypharr `config.json`
|
||||
2. And by enabling the `First and Last First` setting in Radarr / Sonarr
|
||||
|
||||
This way, if one of them gets disabled, you have another backup.
|
||||
|
||||
## How to enable this feature
|
||||
|
||||
### Always Remove Tracker URLs
|
||||
|
||||
- In the web UI under `Settings -> QBitTorrent -> Always Remove Tracker URLs`
|
||||
- Or in your `config.json` by setting the `qbittorrent.always_rm_tracker_url` to `true`
|
||||
|
||||
This ensures that the Tracker URLs are removed from **ALL torrents** (regardless of whether they are public, private, or how they were added).
|
||||
|
||||
But this can make downloads of uncached torrents slower or stall because the tracker helps the client find peers to download from.
|
||||
|
||||
If the torrent file has no tracker URLs, the torrent client can try to find peers for public torrents using [DHT](https://en.wikipedia.org/wiki/Mainline_DHT). However, this may be less efficient than connecting to a tracker, and the downloads may be slower or stall.
|
||||
|
||||
If you only download cached torrents, there is no further downside to enabling this option.
|
||||
|
||||
### Only on specific Arr-app clients and indexers
|
||||
|
||||
Alternatively, you can toggle it only for specific download clients and indexers in the Arr-apps...
|
||||
|
||||
- Enable `Show Advanced Settings` in your Arr app
|
||||
- Add a new download client in `Settings -> Download Clients` and call it something like `Decypharr (Private)`
|
||||
- Enable the `First and Last First` checkbox, which will tell Decypharr to remove the tracker URLs
|
||||
- Add a duplicate version of your private tracker indexer for Decypharr downloads
|
||||
- Untick `Enable Automatic Search`
|
||||
- Tick `Enable Interactive Search`
|
||||
- Set `Download Client` to your new `Decypharr (Private)` client (requires `Show Advanced Settings`)
|
||||
|
||||
If you are using Prowlarr to sync your indexers, you can't set the `Download Client` in Prowlarr. You must update it directly in your Arr-apps after the indexers get synced. But future updates to the indexers won't reset the setting.
|
||||
|
||||
### Test it
|
||||
|
||||
After enabling the feature, try adding a [public torrent](https://ubuntu.com/download/alternative-downloads) through the Decypharr UI and a **public torrent** through your Arr-apps.
|
||||
|
||||
Then check the decypharr log to check for a log entry like...
|
||||
|
||||
```log
|
||||
Removed 2 tracker URLs from torrent file
|
||||
```
|
||||
|
||||
If you see this log entry, it means the tracker URLs are being stripped from your torrents and you can safely enable it on private tracker indexers.
|
||||
|
||||
## How it works
|
||||
|
||||
When you add a new torrent through the QBitTorrent API or through the Web UI, decypharr converts your torrent into a magnet link and then uses your debrid service's API to download that magnet link.
|
||||
|
||||
The torrent magnet link contains:
|
||||
|
||||
1. The `info hash` that uniquely identifies the torrent, files, and file names
|
||||
2. The torrent name
|
||||
3. The URLs of the tracker to connect to
|
||||
|
||||
Private tracker URLs in torrents contain a `passkey`. This is a unique identifier that ties the torrent file to your private tracker account.
|
||||
|
||||
Only if the `passkey` is valid will the tracker allow the torrent client to connect and download the files. This is also how private torrent trackers measure your downloads and uploads.
|
||||
|
||||
The `Remove Tracker URLs` feature removes all the tracker URLs (which include your private `passkey`). This means when decypharr attempts to download the torrent, it only passes the `info hash` and torrent name to the debrid service.
|
||||
|
||||
Without the tracker URLs, your debrid service has no way to connect to the private tracker to download the files, and your `passkey` and the private torrent tracker swarm are not exposed.
|
||||
|
||||
**But if the torrent is already cached, it's immediately added to your account.**
|
||||
@@ -21,6 +21,7 @@ If it's the first time you're accessing the UI, you will be prompted to set up y
|
||||
|
||||
- Click on **Qbittorrent** in the tab
|
||||
- Set the **Download Folder** to where you want Decypharr to save downloaded files. These files will be symlinked to the mount folder you configured earlier.
|
||||
- Set **Always Remove Tracker URLs** if you want to always remove the tracker URLs torrents and magnet links. This is useful if you want to [download private tracker torrents](features/private-tracker-downloads.md) without breaking the rules, but will make uncached torrents always stall.
|
||||
You can leave the remaining settings as default for now.
|
||||
|
||||
### Arrs Configuration
|
||||
@@ -42,6 +43,7 @@ To connect Decypharr to your Sonarr or Radarr instance:
|
||||
- **Category**: e.g., `sonarr`, `radarr` (match what you configured in Decypharr)
|
||||
- **Use SSL**: `No`
|
||||
- **Sequential Download**: `No` or `Yes` (if you want to download torrents locally instead of symlink)
|
||||
- **First and Last First**: `No` by default or `Yes` if you want to remove torrent tracker URLs from the torrents. This can make it possible to [download private trackers torrents without breaking the rules](features/private-tracker-downloads.md).
|
||||
3. Click **Test** to verify the connection
|
||||
4. Click **Save** to add the download client
|
||||
|
||||
|
||||
@@ -66,6 +66,7 @@ nav:
|
||||
- Features:
|
||||
- Overview: features/index.md
|
||||
- Repair Worker: features/repair-worker.md
|
||||
- Private Tracker Downloads: features/private-tracker-downloads.md
|
||||
- Guides:
|
||||
- Overview: guides/index.md
|
||||
- Manual Downloading: guides/downloading.md
|
||||
|
||||
1
go.mod
1
go.mod
@@ -11,6 +11,7 @@ require (
|
||||
github.com/go-co-op/gocron/v2 v2.16.1
|
||||
github.com/google/uuid v1.6.0
|
||||
github.com/gorilla/sessions v1.4.0
|
||||
github.com/puzpuzpuz/xsync/v4 v4.1.0
|
||||
github.com/robfig/cron/v3 v3.0.1
|
||||
github.com/rs/zerolog v1.33.0
|
||||
github.com/stanNthe5/stringbuf v0.0.3
|
||||
|
||||
2
go.sum
2
go.sum
@@ -186,6 +186,8 @@ github.com/prometheus/procfs v0.0.0-20190117184657-bf6a532e95b1/go.mod h1:c3At6R
|
||||
github.com/prometheus/procfs v0.0.2/go.mod h1:TjEm7ze935MbeOT/UhFTIMYKhuLP4wbCsTZCD3I8kEA=
|
||||
github.com/prometheus/procfs v0.0.8/go.mod h1:7Qr8sr6344vo1JqZ6HhLceV9o3AJ1Ff+GxbHq6oeK9A=
|
||||
github.com/prometheus/procfs v0.0.11/go.mod h1:lV6e/gmhEcM9IjHGsFOCxxuZ+z1YqCvr4OA4YeYWdaU=
|
||||
github.com/puzpuzpuz/xsync/v4 v4.1.0 h1:x9eHRl4QhZFIPJ17yl4KKW9xLyVWbb3/Yq4SXpjF71U=
|
||||
github.com/puzpuzpuz/xsync/v4 v4.1.0/go.mod h1:VJDmTCJMBt8igNxnkQd86r+8KUeN1quSfNKu5bLYFQo=
|
||||
github.com/rcrowley/go-metrics v0.0.0-20181016184325-3113b8401b8a/go.mod h1:bCqnVzQkZxMG4s8nGwiZ5l3QUCyqpo9Y+/ZMZ9VjZe4=
|
||||
github.com/robfig/cron/v3 v3.0.1 h1:WdRxkvbJztn8LMz/QEvLN5sBU+xKpSqwwUO1Pjr4qDs=
|
||||
github.com/robfig/cron/v3 v3.0.1/go.mod h1:eQICP3HwyT7UooqI/z+Ov+PtYAWygg1TEWWzGIFLtro=
|
||||
|
||||
19
internal/config/auth.go
Normal file
19
internal/config/auth.go
Normal file
@@ -0,0 +1,19 @@
|
||||
package config
|
||||
|
||||
import "golang.org/x/crypto/bcrypt"
|
||||
|
||||
func VerifyAuth(username, password string) bool {
|
||||
// If you're storing hashed password, use bcrypt to compare
|
||||
if username == "" {
|
||||
return false
|
||||
}
|
||||
auth := Get().GetAuth()
|
||||
if auth == nil {
|
||||
return false
|
||||
}
|
||||
if username != auth.Username {
|
||||
return false
|
||||
}
|
||||
err := bcrypt.CompareHashAndPassword([]byte(auth.Password), []byte(password))
|
||||
return err == nil
|
||||
}
|
||||
@@ -49,14 +49,15 @@ type Debrid struct {
|
||||
}
|
||||
|
||||
type QBitTorrent struct {
|
||||
Username string `json:"username,omitempty"`
|
||||
Password string `json:"password,omitempty"`
|
||||
Port string `json:"port,omitempty"` // deprecated
|
||||
DownloadFolder string `json:"download_folder,omitempty"`
|
||||
Categories []string `json:"categories,omitempty"`
|
||||
RefreshInterval int `json:"refresh_interval,omitempty"`
|
||||
SkipPreCache bool `json:"skip_pre_cache,omitempty"`
|
||||
MaxDownloads int `json:"max_downloads,omitempty"`
|
||||
Username string `json:"username,omitempty"`
|
||||
Password string `json:"password,omitempty"`
|
||||
Port string `json:"port,omitempty"` // deprecated
|
||||
DownloadFolder string `json:"download_folder,omitempty"`
|
||||
Categories []string `json:"categories,omitempty"`
|
||||
RefreshInterval int `json:"refresh_interval,omitempty"`
|
||||
SkipPreCache bool `json:"skip_pre_cache,omitempty"`
|
||||
MaxDownloads int `json:"max_downloads,omitempty"`
|
||||
AlwaysRmTrackerUrls bool `json:"always_rm_tracker_urls,omitempty"`
|
||||
}
|
||||
|
||||
type Arr struct {
|
||||
@@ -91,6 +92,7 @@ type Rclone struct {
|
||||
// Global mount folder where all providers will be mounted as subfolders
|
||||
Enabled bool `json:"enabled,omitempty"`
|
||||
MountPath string `json:"mount_path,omitempty"`
|
||||
RcPort string `json:"rc_port,omitempty"`
|
||||
|
||||
// Cache settings
|
||||
CacheDir string `json:"cache_dir,omitempty"`
|
||||
@@ -98,13 +100,21 @@ type Rclone struct {
|
||||
// VFS settings
|
||||
VfsCacheMode string `json:"vfs_cache_mode,omitempty"` // off, minimal, writes, full
|
||||
VfsCacheMaxAge string `json:"vfs_cache_max_age,omitempty"` // Maximum age of objects in the cache (default 1h)
|
||||
VfsDiskSpaceTotal string `json:"vfs_disk_space_total,omitempty"` // Total disk space available for the cache (default off)
|
||||
VfsCacheMaxSize string `json:"vfs_cache_max_size,omitempty"` // Maximum size of the cache (default off)
|
||||
VfsCachePollInterval string `json:"vfs_cache_poll_interval,omitempty"` // How often to poll for changes (default 1m)
|
||||
VfsReadChunkSize string `json:"vfs_read_chunk_size,omitempty"` // Read chunk size (default 128M)
|
||||
VfsReadChunkSizeLimit string `json:"vfs_read_chunk_size_limit,omitempty"` // Max chunk size (default off)
|
||||
VfsReadAhead string `json:"vfs_read_ahead,omitempty"` // read ahead size
|
||||
VfsPollInterval string `json:"vfs_poll_interval,omitempty"` // How often to rclone cleans the cache (default 1m)
|
||||
BufferSize string `json:"buffer_size,omitempty"` // Buffer size for reading files (default 16M)
|
||||
BwLimit string `json:"bw_limit,omitempty"` // Bandwidth limit (default off)
|
||||
|
||||
VfsCacheMinFreeSpace string `json:"vfs_cache_min_free_space,omitempty"`
|
||||
VfsFastFingerprint bool `json:"vfs_fast_fingerprint,omitempty"`
|
||||
VfsReadChunkStreams int `json:"vfs_read_chunk_streams,omitempty"`
|
||||
AsyncRead *bool `json:"async_read,omitempty"` // Use async read for files
|
||||
Transfers int `json:"transfers,omitempty"` // Number of transfers to use (default 4)
|
||||
UseMmap bool `json:"use_mmap,omitempty"`
|
||||
|
||||
// File system settings
|
||||
UID uint32 `json:"uid,omitempty"` // User ID for mounted files
|
||||
@@ -143,6 +153,8 @@ type Config struct {
|
||||
Auth *Auth `json:"-"`
|
||||
DiscordWebhook string `json:"discord_webhook_url,omitempty"`
|
||||
RemoveStalledAfter string `json:"remove_stalled_after,omitzero"`
|
||||
CallbackURL string `json:"callback_url,omitempty"`
|
||||
EnableWebdavAuth bool `json:"enable_webdav_auth,omitempty"`
|
||||
}
|
||||
|
||||
func (c *Config) JsonFile() string {
|
||||
@@ -299,6 +311,10 @@ func (c *Config) IsSizeAllowed(size int64) bool {
|
||||
return true
|
||||
}
|
||||
|
||||
func (c *Config) SecretKey() string {
|
||||
return cmp.Or(os.Getenv("DECYPHARR_SECRET_KEY"), "\"wqj(v%lj*!-+kf@4&i95rhh_!5_px5qnuwqbr%cjrvrozz_r*(\"")
|
||||
}
|
||||
|
||||
func (c *Config) GetAuth() *Auth {
|
||||
if !c.UseAuth {
|
||||
return nil
|
||||
@@ -324,15 +340,12 @@ func (c *Config) SaveAuth(auth *Auth) error {
|
||||
return os.WriteFile(c.AuthFile(), data, 0644)
|
||||
}
|
||||
|
||||
func (c *Config) NeedsSetup() error {
|
||||
func (c *Config) CheckSetup() error {
|
||||
return ValidateConfig(c)
|
||||
}
|
||||
|
||||
func (c *Config) NeedsAuth() bool {
|
||||
if c.UseAuth {
|
||||
return c.GetAuth().Username == ""
|
||||
}
|
||||
return false
|
||||
return c.UseAuth && (c.Auth == nil || c.Auth.Username == "" || c.Auth.Password == "")
|
||||
}
|
||||
|
||||
func (c *Config) updateDebrid(d Debrid) Debrid {
|
||||
@@ -417,6 +430,11 @@ func (c *Config) setDefaults() {
|
||||
|
||||
// Rclone defaults
|
||||
if c.Rclone.Enabled {
|
||||
c.Rclone.RcPort = cmp.Or(c.Rclone.RcPort, "5572")
|
||||
if c.Rclone.AsyncRead == nil {
|
||||
_asyncTrue := true
|
||||
c.Rclone.AsyncRead = &_asyncTrue
|
||||
}
|
||||
c.Rclone.VfsCacheMode = cmp.Or(c.Rclone.VfsCacheMode, "off")
|
||||
if c.Rclone.UID == 0 {
|
||||
c.Rclone.UID = uint32(os.Getuid())
|
||||
@@ -429,6 +447,9 @@ func (c *Config) setDefaults() {
|
||||
c.Rclone.GID = uint32(os.Getgid())
|
||||
}
|
||||
}
|
||||
if c.Rclone.Transfers == 0 {
|
||||
c.Rclone.Transfers = 4 // Default number of transfers
|
||||
}
|
||||
if c.Rclone.VfsCacheMode != "off" {
|
||||
c.Rclone.VfsCachePollInterval = cmp.Or(c.Rclone.VfsCachePollInterval, "1m") // Clean cache every minute
|
||||
}
|
||||
|
||||
@@ -45,6 +45,8 @@ func getDiscordHeader(event string) string {
|
||||
return "[Decypharr] Repair Completed, Awaiting action"
|
||||
case "repair_complete":
|
||||
return "[Decypharr] Repair Complete"
|
||||
case "repair_cancelled":
|
||||
return "[Decypharr] Repair Cancelled"
|
||||
default:
|
||||
// split the event string and capitalize the first letter of each word
|
||||
evs := strings.Split(event, "_")
|
||||
|
||||
@@ -7,10 +7,6 @@ import (
|
||||
"encoding/json"
|
||||
"errors"
|
||||
"fmt"
|
||||
"github.com/rs/zerolog"
|
||||
"github.com/sirrobot01/decypharr/internal/logger"
|
||||
"go.uber.org/ratelimit"
|
||||
"golang.org/x/net/proxy"
|
||||
"io"
|
||||
"math/rand"
|
||||
"net"
|
||||
@@ -20,6 +16,11 @@ import (
|
||||
"strings"
|
||||
"sync"
|
||||
"time"
|
||||
|
||||
"github.com/rs/zerolog"
|
||||
"github.com/sirrobot01/decypharr/internal/logger"
|
||||
"go.uber.org/ratelimit"
|
||||
"golang.org/x/net/proxy"
|
||||
)
|
||||
|
||||
func JoinURL(base string, paths ...string) (string, error) {
|
||||
@@ -298,40 +299,7 @@ func New(options ...ClientOption) *Client {
|
||||
}
|
||||
|
||||
// Configure proxy if needed
|
||||
if client.proxy != "" {
|
||||
if strings.HasPrefix(client.proxy, "socks5://") {
|
||||
// Handle SOCKS5 proxy
|
||||
socksURL, err := url.Parse(client.proxy)
|
||||
if err != nil {
|
||||
client.logger.Error().Msgf("Failed to parse SOCKS5 proxy URL: %v", err)
|
||||
} else {
|
||||
auth := &proxy.Auth{}
|
||||
if socksURL.User != nil {
|
||||
auth.User = socksURL.User.Username()
|
||||
password, _ := socksURL.User.Password()
|
||||
auth.Password = password
|
||||
}
|
||||
|
||||
dialer, err := proxy.SOCKS5("tcp", socksURL.Host, auth, proxy.Direct)
|
||||
if err != nil {
|
||||
client.logger.Error().Msgf("Failed to create SOCKS5 dialer: %v", err)
|
||||
} else {
|
||||
transport.DialContext = func(ctx context.Context, network, addr string) (net.Conn, error) {
|
||||
return dialer.Dial(network, addr)
|
||||
}
|
||||
}
|
||||
}
|
||||
} else {
|
||||
proxyURL, err := url.Parse(client.proxy)
|
||||
if err != nil {
|
||||
client.logger.Error().Msgf("Failed to parse proxy URL: %v", err)
|
||||
} else {
|
||||
transport.Proxy = http.ProxyURL(proxyURL)
|
||||
}
|
||||
}
|
||||
} else {
|
||||
transport.Proxy = http.ProxyFromEnvironment
|
||||
}
|
||||
SetProxy(transport, client.proxy)
|
||||
|
||||
// Set the transport to the client
|
||||
client.client.Transport = transport
|
||||
@@ -417,3 +385,80 @@ func isRetryableError(err error) bool {
|
||||
// Not a retryable error
|
||||
return false
|
||||
}
|
||||
|
||||
func SetProxy(transport *http.Transport, proxyURL string) {
|
||||
if proxyURL != "" {
|
||||
if strings.HasPrefix(proxyURL, "socks5://") {
|
||||
// Handle SOCKS5 proxy
|
||||
socksURL, err := url.Parse(proxyURL)
|
||||
if err != nil {
|
||||
return
|
||||
} else {
|
||||
auth := &proxy.Auth{}
|
||||
if socksURL.User != nil {
|
||||
auth.User = socksURL.User.Username()
|
||||
password, _ := socksURL.User.Password()
|
||||
auth.Password = password
|
||||
}
|
||||
|
||||
dialer, err := proxy.SOCKS5("tcp", socksURL.Host, auth, proxy.Direct)
|
||||
if err != nil {
|
||||
return
|
||||
} else {
|
||||
transport.DialContext = func(ctx context.Context, network, addr string) (net.Conn, error) {
|
||||
return dialer.Dial(network, addr)
|
||||
}
|
||||
}
|
||||
}
|
||||
} else {
|
||||
_proxy, err := url.Parse(proxyURL)
|
||||
if err != nil {
|
||||
return
|
||||
} else {
|
||||
transport.Proxy = http.ProxyURL(_proxy)
|
||||
}
|
||||
}
|
||||
} else {
|
||||
transport.Proxy = http.ProxyFromEnvironment
|
||||
}
|
||||
return
|
||||
}
|
||||
|
||||
func ValidateURL(urlStr string) error {
|
||||
if urlStr == "" {
|
||||
return fmt.Errorf("URL cannot be empty")
|
||||
}
|
||||
|
||||
// Try parsing as full URL first
|
||||
u, err := url.Parse(urlStr)
|
||||
if err == nil && u.Scheme != "" && u.Host != "" {
|
||||
// It's a full URL, validate scheme
|
||||
if u.Scheme != "http" && u.Scheme != "https" {
|
||||
return fmt.Errorf("URL scheme must be http or https")
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
// Check if it's a host:port format (no scheme)
|
||||
if strings.Contains(urlStr, ":") && !strings.Contains(urlStr, "://") {
|
||||
// Try parsing with http:// prefix
|
||||
testURL := "http://" + urlStr
|
||||
u, err := url.Parse(testURL)
|
||||
if err != nil {
|
||||
return fmt.Errorf("invalid host:port format: %w", err)
|
||||
}
|
||||
|
||||
if u.Host == "" {
|
||||
return fmt.Errorf("host is required in host:port format")
|
||||
}
|
||||
|
||||
// Validate port number
|
||||
if u.Port() == "" {
|
||||
return fmt.Errorf("port is required in host:port format")
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
return fmt.Errorf("invalid URL format: %s", urlStr)
|
||||
}
|
||||
|
||||
45
internal/testutil/testutil.go
Normal file
45
internal/testutil/testutil.go
Normal file
@@ -0,0 +1,45 @@
|
||||
package testutil
|
||||
|
||||
import (
|
||||
"os"
|
||||
"path/filepath"
|
||||
"strings"
|
||||
)
|
||||
|
||||
// GetTestDataPath returns the path to the testdata directory in the project root
|
||||
func GetTestDataPath() string {
|
||||
return filepath.Join("..", "..", "testdata")
|
||||
}
|
||||
|
||||
// GetTestDataFilePath returns the path to a specific file in the testdata directory
|
||||
func GetTestDataFilePath(filename string) string {
|
||||
return filepath.Join(GetTestDataPath(), filename)
|
||||
}
|
||||
|
||||
// GetTestTorrentPath returns the path to the Ubuntu test torrent file
|
||||
func GetTestTorrentPath() string {
|
||||
return GetTestDataFilePath("ubuntu-25.04-desktop-amd64.iso.torrent")
|
||||
}
|
||||
|
||||
// GetTestMagnetPath returns the path to the Ubuntu test magnet file
|
||||
func GetTestMagnetPath() string {
|
||||
return GetTestDataFilePath("ubuntu-25.04-desktop-amd64.iso.magnet")
|
||||
}
|
||||
|
||||
// GetTestDataBytes reads and returns the raw bytes of a test data file
|
||||
func GetTestDataBytes(filename string) ([]byte, error) {
|
||||
filePath := GetTestDataFilePath(filename)
|
||||
return os.ReadFile(filePath)
|
||||
}
|
||||
|
||||
// GetTestDataContent reads and returns the content of a test data file
|
||||
func GetTestDataContent(filename string) (string, error) {
|
||||
|
||||
content, err := GetTestDataBytes(filename)
|
||||
return strings.TrimSpace(string(content)), err
|
||||
}
|
||||
|
||||
// GetTestMagnetContent reads and returns the content of the Ubuntu test magnet file
|
||||
func GetTestMagnetContent() (string, error) {
|
||||
return GetTestDataContent("ubuntu-25.04-desktop-amd64.iso.magnet")
|
||||
}
|
||||
@@ -84,3 +84,54 @@ func readSmallChunks(file *os.File, startPos int64, totalToRead int, chunkSize i
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
func EnsureDir(dirPath string) error {
|
||||
if dirPath == "" {
|
||||
return fmt.Errorf("directory path is empty")
|
||||
}
|
||||
_, err := os.Stat(dirPath)
|
||||
if os.IsNotExist(err) {
|
||||
// Directory does not exist, create it
|
||||
if err := os.MkdirAll(dirPath, 0755); err != nil {
|
||||
return fmt.Errorf("failed to create directory: %v", err)
|
||||
}
|
||||
return nil
|
||||
}
|
||||
return err
|
||||
}
|
||||
|
||||
func FormatSize(bytes int64) string {
|
||||
const (
|
||||
KB = 1024
|
||||
MB = 1024 * KB
|
||||
GB = 1024 * MB
|
||||
TB = 1024 * GB
|
||||
)
|
||||
|
||||
var size float64
|
||||
var unit string
|
||||
|
||||
switch {
|
||||
case bytes >= TB:
|
||||
size = float64(bytes) / TB
|
||||
unit = "TB"
|
||||
case bytes >= GB:
|
||||
size = float64(bytes) / GB
|
||||
unit = "GB"
|
||||
case bytes >= MB:
|
||||
size = float64(bytes) / MB
|
||||
unit = "MB"
|
||||
case bytes >= KB:
|
||||
size = float64(bytes) / KB
|
||||
unit = "KB"
|
||||
default:
|
||||
size = float64(bytes)
|
||||
unit = "bytes"
|
||||
}
|
||||
|
||||
// Format to 2 decimal places for larger units, no decimals for bytes
|
||||
if unit == "bytes" {
|
||||
return fmt.Sprintf("%.0f %s", size, unit)
|
||||
}
|
||||
return fmt.Sprintf("%.2f %s", size, unit)
|
||||
}
|
||||
|
||||
@@ -7,17 +7,17 @@ import (
|
||||
"encoding/base32"
|
||||
"encoding/hex"
|
||||
"fmt"
|
||||
"github.com/anacrolix/torrent/metainfo"
|
||||
"github.com/sirrobot01/decypharr/internal/request"
|
||||
"io"
|
||||
"log"
|
||||
"net/http"
|
||||
"net/url"
|
||||
"os"
|
||||
"path/filepath"
|
||||
"regexp"
|
||||
"strings"
|
||||
"time"
|
||||
|
||||
"github.com/anacrolix/torrent/metainfo"
|
||||
"github.com/sirrobot01/decypharr/internal/logger"
|
||||
"github.com/sirrobot01/decypharr/internal/request"
|
||||
)
|
||||
|
||||
var (
|
||||
@@ -36,7 +36,18 @@ func (m *Magnet) IsTorrent() bool {
|
||||
return m.File != nil
|
||||
}
|
||||
|
||||
func GetMagnetFromFile(file io.Reader, filePath string) (*Magnet, error) {
|
||||
// stripTrackersFromMagnet removes trackers from a magnet and returns a modified copy
|
||||
func stripTrackersFromMagnet(mi metainfo.Magnet, fileType string) metainfo.Magnet {
|
||||
originalTrackerCount := len(mi.Trackers)
|
||||
if len(mi.Trackers) > 0 {
|
||||
log := logger.Default()
|
||||
mi.Trackers = nil
|
||||
log.Printf("Removed %d tracker URLs from %s", originalTrackerCount, fileType)
|
||||
}
|
||||
return mi
|
||||
}
|
||||
|
||||
func GetMagnetFromFile(file io.Reader, filePath string, rmTrackerUrls bool) (*Magnet, error) {
|
||||
var (
|
||||
m *Magnet
|
||||
err error
|
||||
@@ -46,14 +57,14 @@ func GetMagnetFromFile(file io.Reader, filePath string) (*Magnet, error) {
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
m, err = GetMagnetFromBytes(torrentData)
|
||||
m, err = GetMagnetFromBytes(torrentData, rmTrackerUrls)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
} else {
|
||||
// .magnet file
|
||||
magnetLink := ReadMagnetFile(file)
|
||||
m, err = GetMagnetInfo(magnetLink)
|
||||
m, err = GetMagnetInfo(magnetLink, rmTrackerUrls)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
@@ -62,52 +73,42 @@ func GetMagnetFromFile(file io.Reader, filePath string) (*Magnet, error) {
|
||||
return m, nil
|
||||
}
|
||||
|
||||
func GetMagnetFromUrl(url string) (*Magnet, error) {
|
||||
func GetMagnetFromUrl(url string, rmTrackerUrls bool) (*Magnet, error) {
|
||||
if strings.HasPrefix(url, "magnet:") {
|
||||
return GetMagnetInfo(url)
|
||||
return GetMagnetInfo(url, rmTrackerUrls)
|
||||
} else if strings.HasPrefix(url, "http") {
|
||||
return OpenMagnetHttpURL(url)
|
||||
return OpenMagnetHttpURL(url, rmTrackerUrls)
|
||||
}
|
||||
return nil, fmt.Errorf("invalid url")
|
||||
}
|
||||
|
||||
func GetMagnetFromBytes(torrentData []byte) (*Magnet, error) {
|
||||
func GetMagnetFromBytes(torrentData []byte, rmTrackerUrls bool) (*Magnet, error) {
|
||||
// Create a scanner to read the file line by line
|
||||
mi, err := metainfo.Load(bytes.NewReader(torrentData))
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
hash := mi.HashInfoBytes()
|
||||
infoHash := hash.HexString()
|
||||
info, err := mi.UnmarshalInfo()
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
magnetMeta := mi.Magnet(&hash, &info)
|
||||
if rmTrackerUrls {
|
||||
magnetMeta = stripTrackersFromMagnet(magnetMeta, "torrent file")
|
||||
}
|
||||
magnet := &Magnet{
|
||||
InfoHash: infoHash,
|
||||
Name: info.Name,
|
||||
Size: info.Length,
|
||||
Link: mi.Magnet(&hash, &info).String(),
|
||||
Link: magnetMeta.String(),
|
||||
File: torrentData,
|
||||
}
|
||||
return magnet, nil
|
||||
}
|
||||
|
||||
func OpenMagnetFile(filePath string) string {
|
||||
file, err := os.Open(filePath)
|
||||
if err != nil {
|
||||
log.Println("Error opening file:", err)
|
||||
return ""
|
||||
}
|
||||
defer func(file *os.File) {
|
||||
err := file.Close()
|
||||
if err != nil {
|
||||
return
|
||||
}
|
||||
}(file) // Ensure the file is closed after the function ends
|
||||
return ReadMagnetFile(file)
|
||||
}
|
||||
|
||||
func ReadMagnetFile(file io.Reader) string {
|
||||
scanner := bufio.NewScanner(file)
|
||||
for scanner.Scan() {
|
||||
@@ -119,12 +120,13 @@ func ReadMagnetFile(file io.Reader) string {
|
||||
|
||||
// Check for any errors during scanning
|
||||
if err := scanner.Err(); err != nil {
|
||||
log := logger.Default()
|
||||
log.Println("Error reading file:", err)
|
||||
}
|
||||
return ""
|
||||
}
|
||||
|
||||
func OpenMagnetHttpURL(magnetLink string) (*Magnet, error) {
|
||||
func OpenMagnetHttpURL(magnetLink string, rmTrackerUrls bool) (*Magnet, error) {
|
||||
resp, err := http.Get(magnetLink)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("error making GET request: %v", err)
|
||||
@@ -139,34 +141,35 @@ func OpenMagnetHttpURL(magnetLink string) (*Magnet, error) {
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("error reading response body: %v", err)
|
||||
}
|
||||
return GetMagnetFromBytes(torrentData)
|
||||
return GetMagnetFromBytes(torrentData, rmTrackerUrls)
|
||||
}
|
||||
|
||||
func GetMagnetInfo(magnetLink string) (*Magnet, error) {
|
||||
func GetMagnetInfo(magnetLink string, rmTrackerUrls bool) (*Magnet, error) {
|
||||
if magnetLink == "" {
|
||||
return nil, fmt.Errorf("error getting magnet from file")
|
||||
}
|
||||
|
||||
magnetURI, err := url.Parse(magnetLink)
|
||||
mi, err := metainfo.ParseMagnetUri(magnetLink)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("error parsing magnet link")
|
||||
return nil, fmt.Errorf("error parsing magnet link: %w", err)
|
||||
}
|
||||
|
||||
query := magnetURI.Query()
|
||||
xt := query.Get("xt")
|
||||
dn := query.Get("dn")
|
||||
|
||||
// Extract BTIH
|
||||
parts := strings.Split(xt, ":")
|
||||
btih := ""
|
||||
if len(parts) > 2 {
|
||||
btih = parts[2]
|
||||
// Strip all announce URLs if requested
|
||||
if rmTrackerUrls {
|
||||
mi = stripTrackersFromMagnet(mi, "magnet link")
|
||||
}
|
||||
|
||||
btih := mi.InfoHash.HexString()
|
||||
dn := mi.DisplayName
|
||||
|
||||
// Reconstruct the magnet link using the (possibly modified) spec
|
||||
finalLink := mi.String()
|
||||
|
||||
magnet := &Magnet{
|
||||
InfoHash: btih,
|
||||
Name: dn,
|
||||
Size: 0,
|
||||
Link: magnetLink,
|
||||
Link: finalLink,
|
||||
}
|
||||
return magnet, nil
|
||||
}
|
||||
|
||||
198
internal/utils/magnet_test.go
Normal file
198
internal/utils/magnet_test.go
Normal file
@@ -0,0 +1,198 @@
|
||||
package utils
|
||||
|
||||
import (
|
||||
"net/http"
|
||||
"net/http/httptest"
|
||||
"os"
|
||||
"path/filepath"
|
||||
"strings"
|
||||
"testing"
|
||||
|
||||
"github.com/sirrobot01/decypharr/internal/testutil"
|
||||
)
|
||||
|
||||
// checkMagnet is a helper function that verifies magnet properties
|
||||
func checkMagnet(t *testing.T, magnet *Magnet, expectedInfoHash, expectedName, expectedLink string, expectedTrackerCount int, shouldBeTorrent bool) {
|
||||
t.Helper() // This marks the function as a test helper
|
||||
|
||||
// Verify basic properties
|
||||
if magnet.Name != expectedName {
|
||||
t.Errorf("Expected name '%s', got '%s'", expectedName, magnet.Name)
|
||||
}
|
||||
if magnet.InfoHash != expectedInfoHash {
|
||||
t.Errorf("Expected InfoHash '%s', got '%s'", expectedInfoHash, magnet.InfoHash)
|
||||
}
|
||||
if magnet.Link != expectedLink {
|
||||
t.Errorf("Expected Link '%s', got '%s'", expectedLink, magnet.Link)
|
||||
}
|
||||
|
||||
// Verify the magnet link contains the essential info hash
|
||||
if !strings.Contains(magnet.Link, "xt=urn:btih:"+expectedInfoHash) {
|
||||
t.Error("Magnet link should contain info hash")
|
||||
}
|
||||
|
||||
// Verify tracker count
|
||||
trCount := strings.Count(magnet.Link, "tr=")
|
||||
if trCount != expectedTrackerCount {
|
||||
t.Errorf("Expected %d tracker URLs, got %d", expectedTrackerCount, trCount)
|
||||
}
|
||||
}
|
||||
|
||||
// testMagnetFromFile is a helper function for tests that use GetMagnetFromFile with file operations
|
||||
func testMagnetFromFile(t *testing.T, filePath string, rmTrackerUrls bool, expectedInfoHash, expectedName, expectedLink string, expectedTrackerCount int) {
|
||||
t.Helper()
|
||||
|
||||
file, err := os.Open(filePath)
|
||||
if err != nil {
|
||||
t.Fatalf("Failed to open torrent file %s: %v", filePath, err)
|
||||
}
|
||||
defer file.Close()
|
||||
|
||||
magnet, err := GetMagnetFromFile(file, filepath.Base(filePath), rmTrackerUrls)
|
||||
if err != nil {
|
||||
t.Fatalf("GetMagnetFromFile failed: %v", err)
|
||||
}
|
||||
|
||||
checkMagnet(t, magnet, expectedInfoHash, expectedName, expectedLink, expectedTrackerCount, true)
|
||||
|
||||
// Log the result
|
||||
if rmTrackerUrls {
|
||||
t.Logf("Generated clean magnet link: %s", magnet.Link)
|
||||
} else {
|
||||
t.Logf("Generated magnet link with trackers: %s", magnet.Link)
|
||||
}
|
||||
}
|
||||
|
||||
func TestGetMagnetFromFile_RealTorrentFile_StripTrue(t *testing.T) {
|
||||
expectedInfoHash := "8a19577fb5f690970ca43a57ff1011ae202244b8"
|
||||
expectedName := "ubuntu-25.04-desktop-amd64.iso"
|
||||
expectedLink := "magnet:?xt=urn:btih:8a19577fb5f690970ca43a57ff1011ae202244b8&dn=ubuntu-25.04-desktop-amd64.iso"
|
||||
expectedTrackerCount := 0 // Should be 0 when stripping trackers
|
||||
|
||||
torrentPath := testutil.GetTestTorrentPath()
|
||||
testMagnetFromFile(t, torrentPath, true, expectedInfoHash, expectedName, expectedLink, expectedTrackerCount)
|
||||
}
|
||||
|
||||
func TestGetMagnetFromFile_RealTorrentFile_StripFalse(t *testing.T) {
|
||||
expectedInfoHash := "8a19577fb5f690970ca43a57ff1011ae202244b8"
|
||||
expectedName := "ubuntu-25.04-desktop-amd64.iso"
|
||||
expectedLink := "magnet:?xt=urn:btih:8a19577fb5f690970ca43a57ff1011ae202244b8&dn=ubuntu-25.04-desktop-amd64.iso&tr=https%3A%2F%2Ftorrent.ubuntu.com%2Fannounce&tr=https%3A%2F%2Fipv6.torrent.ubuntu.com%2Fannounce"
|
||||
expectedTrackerCount := 2 // Should be 2 when preserving trackers
|
||||
|
||||
torrentPath := testutil.GetTestTorrentPath()
|
||||
testMagnetFromFile(t, torrentPath, false, expectedInfoHash, expectedName, expectedLink, expectedTrackerCount)
|
||||
}
|
||||
|
||||
func TestGetMagnetFromFile_MagnetFile_StripTrue(t *testing.T) {
|
||||
expectedInfoHash := "8a19577fb5f690970ca43a57ff1011ae202244b8"
|
||||
expectedName := "ubuntu-25.04-desktop-amd64.iso"
|
||||
expectedLink := "magnet:?xt=urn:btih:8a19577fb5f690970ca43a57ff1011ae202244b8&dn=ubuntu-25.04-desktop-amd64.iso"
|
||||
expectedTrackerCount := 0 // Should be 0 when stripping trackers
|
||||
|
||||
torrentPath := testutil.GetTestMagnetPath()
|
||||
testMagnetFromFile(t, torrentPath, true, expectedInfoHash, expectedName, expectedLink, expectedTrackerCount)
|
||||
}
|
||||
|
||||
func TestGetMagnetFromFile_MagnetFile_StripFalse(t *testing.T) {
|
||||
expectedInfoHash := "8a19577fb5f690970ca43a57ff1011ae202244b8"
|
||||
expectedName := "ubuntu-25.04-desktop-amd64.iso"
|
||||
expectedLink := "magnet:?xt=urn:btih:8a19577fb5f690970ca43a57ff1011ae202244b8&dn=ubuntu-25.04-desktop-amd64.iso&tr=https%3A%2F%2Fipv6.torrent.ubuntu.com%2Fannounce&tr=https%3A%2F%2Ftorrent.ubuntu.com%2Fannounce"
|
||||
expectedTrackerCount := 2
|
||||
|
||||
torrentPath := testutil.GetTestMagnetPath()
|
||||
testMagnetFromFile(t, torrentPath, false, expectedInfoHash, expectedName, expectedLink, expectedTrackerCount)
|
||||
}
|
||||
|
||||
func TestGetMagnetFromUrl_MagnetLink_StripTrue(t *testing.T) {
|
||||
expectedInfoHash := "8a19577fb5f690970ca43a57ff1011ae202244b8"
|
||||
expectedName := "ubuntu-25.04-desktop-amd64.iso"
|
||||
expectedLink := "magnet:?xt=urn:btih:8a19577fb5f690970ca43a57ff1011ae202244b8&dn=ubuntu-25.04-desktop-amd64.iso"
|
||||
expectedTrackerCount := 0
|
||||
|
||||
// Load the magnet URL from the test file
|
||||
magnetUrl, err := testutil.GetTestMagnetContent()
|
||||
if err != nil {
|
||||
t.Fatalf("Failed to load magnet URL from test file: %v", err)
|
||||
}
|
||||
|
||||
magnet, err := GetMagnetFromUrl(magnetUrl, true)
|
||||
if err != nil {
|
||||
t.Fatalf("GetMagnetFromUrl failed: %v", err)
|
||||
}
|
||||
|
||||
checkMagnet(t, magnet, expectedInfoHash, expectedName, expectedLink, expectedTrackerCount, false)
|
||||
t.Logf("Generated clean magnet link: %s", magnet.Link)
|
||||
}
|
||||
|
||||
func TestGetMagnetFromUrl_MagnetLink_StripFalse(t *testing.T) {
|
||||
expectedInfoHash := "8a19577fb5f690970ca43a57ff1011ae202244b8"
|
||||
expectedName := "ubuntu-25.04-desktop-amd64.iso"
|
||||
expectedLink := "magnet:?xt=urn:btih:8a19577fb5f690970ca43a57ff1011ae202244b8&dn=ubuntu-25.04-desktop-amd64.iso&tr=https%3A%2F%2Fipv6.torrent.ubuntu.com%2Fannounce&tr=https%3A%2F%2Ftorrent.ubuntu.com%2Fannounce"
|
||||
expectedTrackerCount := 2
|
||||
|
||||
// Load the magnet URL from the test file
|
||||
magnetUrl, err := testutil.GetTestMagnetContent()
|
||||
if err != nil {
|
||||
t.Fatalf("Failed to load magnet URL from test file: %v", err)
|
||||
}
|
||||
|
||||
magnet, err := GetMagnetFromUrl(magnetUrl, false)
|
||||
if err != nil {
|
||||
t.Fatalf("GetMagnetFromUrl failed: %v", err)
|
||||
}
|
||||
|
||||
checkMagnet(t, magnet, expectedInfoHash, expectedName, expectedLink, expectedTrackerCount, false)
|
||||
t.Logf("Generated magnet link with trackers: %s", magnet.Link)
|
||||
}
|
||||
|
||||
|
||||
// testMagnetFromHttpTorrent is a helper function for tests that use GetMagnetFromUrl with HTTP torrent links
|
||||
func testMagnetFromHttpTorrent(t *testing.T, torrentPath string, rmTrackerUrls bool, expectedInfoHash, expectedName, expectedLink string, expectedTrackerCount int) {
|
||||
t.Helper()
|
||||
|
||||
// Read the torrent file content
|
||||
torrentData, err := testutil.GetTestDataBytes(torrentPath)
|
||||
if err != nil {
|
||||
t.Fatalf("Failed to read torrent file: %v", err)
|
||||
}
|
||||
|
||||
// Create a test HTTP server that serves the torrent file
|
||||
server := httptest.NewServer(http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) {
|
||||
w.Header().Set("Content-Type", "application/x-bittorrent")
|
||||
w.Write(torrentData)
|
||||
}))
|
||||
defer server.Close()
|
||||
|
||||
// Test the function with the mock server URL
|
||||
magnet, err := GetMagnetFromUrl(server.URL, rmTrackerUrls)
|
||||
if err != nil {
|
||||
t.Fatalf("GetMagnetFromUrl failed: %v", err)
|
||||
}
|
||||
|
||||
checkMagnet(t, magnet, expectedInfoHash, expectedName, expectedLink, expectedTrackerCount, true)
|
||||
|
||||
// Log the result
|
||||
if rmTrackerUrls {
|
||||
t.Logf("Generated clean magnet link from HTTP torrent: %s", magnet.Link)
|
||||
} else {
|
||||
t.Logf("Generated magnet link with trackers from HTTP torrent: %s", magnet.Link)
|
||||
}
|
||||
}
|
||||
|
||||
func TestGetMagnetFromUrl_TorrentLink_StripTrue(t *testing.T) {
|
||||
expectedInfoHash := "8a19577fb5f690970ca43a57ff1011ae202244b8"
|
||||
expectedName := "ubuntu-25.04-desktop-amd64.iso"
|
||||
expectedLink := "magnet:?xt=urn:btih:8a19577fb5f690970ca43a57ff1011ae202244b8&dn=ubuntu-25.04-desktop-amd64.iso"
|
||||
expectedTrackerCount := 0
|
||||
|
||||
testMagnetFromHttpTorrent(t, "ubuntu-25.04-desktop-amd64.iso.torrent", true, expectedInfoHash, expectedName, expectedLink, expectedTrackerCount)
|
||||
}
|
||||
|
||||
func TestGetMagnetFromUrl_TorrentLink_StripFalse(t *testing.T) {
|
||||
expectedInfoHash := "8a19577fb5f690970ca43a57ff1011ae202244b8"
|
||||
expectedName := "ubuntu-25.04-desktop-amd64.iso"
|
||||
expectedLink := "magnet:?xt=urn:btih:8a19577fb5f690970ca43a57ff1011ae202244b8&dn=ubuntu-25.04-desktop-amd64.iso&tr=https%3A%2F%2Ftorrent.ubuntu.com%2Fannounce&tr=https%3A%2F%2Fipv6.torrent.ubuntu.com%2Fannounce"
|
||||
expectedTrackerCount := 2
|
||||
|
||||
testMagnetFromHttpTorrent(t, "ubuntu-25.04-desktop-amd64.iso.torrent", false, expectedInfoHash, expectedName, expectedLink, expectedTrackerCount)
|
||||
}
|
||||
@@ -22,3 +22,15 @@ func Contains(slice []string, value string) bool {
|
||||
}
|
||||
return false
|
||||
}
|
||||
|
||||
func Mask(text string) string {
|
||||
res := ""
|
||||
if len(text) > 12 {
|
||||
res = text[:8] + "****" + text[len(text)-4:]
|
||||
} else if len(text) > 8 {
|
||||
res = text[:4] + "****" + text[len(text)-2:]
|
||||
} else {
|
||||
res = "****"
|
||||
}
|
||||
return res
|
||||
}
|
||||
|
||||
118
pkg/arr/arr.go
118
pkg/arr/arr.go
@@ -2,19 +2,21 @@ package arr
|
||||
|
||||
import (
|
||||
"bytes"
|
||||
"cmp"
|
||||
"context"
|
||||
"crypto/tls"
|
||||
"encoding/json"
|
||||
"fmt"
|
||||
"github.com/rs/zerolog"
|
||||
"github.com/sirrobot01/decypharr/internal/config"
|
||||
"github.com/sirrobot01/decypharr/internal/logger"
|
||||
"github.com/sirrobot01/decypharr/internal/request"
|
||||
"io"
|
||||
"net/http"
|
||||
"strings"
|
||||
"sync"
|
||||
"time"
|
||||
|
||||
"github.com/rs/zerolog"
|
||||
"github.com/sirrobot01/decypharr/internal/config"
|
||||
"github.com/sirrobot01/decypharr/internal/logger"
|
||||
"github.com/sirrobot01/decypharr/internal/request"
|
||||
)
|
||||
|
||||
// Type is a type of arr
|
||||
@@ -32,12 +34,14 @@ const (
|
||||
Radarr Type = "radarr"
|
||||
Lidarr Type = "lidarr"
|
||||
Readarr Type = "readarr"
|
||||
Others Type = "others"
|
||||
)
|
||||
|
||||
type Arr struct {
|
||||
Name string `json:"name"`
|
||||
Host string `json:"host"`
|
||||
Token string `json:"token"`
|
||||
Name string `json:"name"`
|
||||
Host string `json:"host"`
|
||||
Token string `json:"token"`
|
||||
|
||||
Type Type `json:"type"`
|
||||
Cleanup bool `json:"cleanup"`
|
||||
SkipRepair bool `json:"skip_repair"`
|
||||
@@ -109,7 +113,11 @@ func (a *Arr) Request(method, endpoint string, payload interface{}) (*http.Respo
|
||||
|
||||
func (a *Arr) Validate() error {
|
||||
if a.Token == "" || a.Host == "" {
|
||||
return fmt.Errorf("arr not configured: %s", a.Name)
|
||||
return fmt.Errorf("arr not configured")
|
||||
}
|
||||
|
||||
if request.ValidateURL(a.Host) != nil {
|
||||
return fmt.Errorf("invalid arr host URL")
|
||||
}
|
||||
resp, err := a.Request("GET", "/api/v3/health", nil)
|
||||
if err != nil {
|
||||
@@ -146,7 +154,7 @@ func InferType(host, name string) Type {
|
||||
case strings.Contains(host, "readarr") || strings.Contains(name, "readarr"):
|
||||
return Readarr
|
||||
default:
|
||||
return ""
|
||||
return Others
|
||||
}
|
||||
}
|
||||
|
||||
@@ -157,7 +165,11 @@ func NewStorage() *Storage {
|
||||
continue // Skip if host or token is not set
|
||||
}
|
||||
name := a.Name
|
||||
arrs[name] = New(name, a.Host, a.Token, a.Cleanup, a.SkipRepair, a.DownloadUncached, a.SelectedDebrid, a.Source)
|
||||
as := New(name, a.Host, a.Token, a.Cleanup, a.SkipRepair, a.DownloadUncached, a.SelectedDebrid, a.Source)
|
||||
if request.ValidateURL(as.Host) != nil {
|
||||
continue
|
||||
}
|
||||
arrs[a.Name] = as
|
||||
}
|
||||
return &Storage{
|
||||
Arrs: arrs,
|
||||
@@ -171,6 +183,11 @@ func (s *Storage) AddOrUpdate(arr *Arr) {
|
||||
if arr.Host == "" || arr.Token == "" || arr.Name == "" {
|
||||
return
|
||||
}
|
||||
|
||||
// Check the host URL
|
||||
if request.ValidateURL(arr.Host) != nil {
|
||||
return
|
||||
}
|
||||
s.Arrs[arr.Name] = arr
|
||||
}
|
||||
|
||||
@@ -190,6 +207,87 @@ func (s *Storage) GetAll() []*Arr {
|
||||
return arrs
|
||||
}
|
||||
|
||||
func (s *Storage) SyncToConfig() []config.Arr {
|
||||
s.mu.Lock()
|
||||
defer s.mu.Unlock()
|
||||
cfg := config.Get()
|
||||
arrConfigs := make(map[string]config.Arr)
|
||||
for _, a := range cfg.Arrs {
|
||||
if a.Host == "" || a.Token == "" {
|
||||
continue // Skip empty arrs
|
||||
}
|
||||
arrConfigs[a.Name] = a
|
||||
}
|
||||
|
||||
for name, arr := range s.Arrs {
|
||||
exists, ok := arrConfigs[name]
|
||||
if ok {
|
||||
// Update existing arr config
|
||||
// Check if the host URL is valid
|
||||
if request.ValidateURL(arr.Host) == nil {
|
||||
exists.Host = arr.Host
|
||||
}
|
||||
exists.Token = cmp.Or(exists.Token, arr.Token)
|
||||
exists.Cleanup = arr.Cleanup
|
||||
exists.SkipRepair = arr.SkipRepair
|
||||
exists.DownloadUncached = arr.DownloadUncached
|
||||
exists.SelectedDebrid = arr.SelectedDebrid
|
||||
arrConfigs[name] = exists
|
||||
} else {
|
||||
// Add new arr config
|
||||
arrConfigs[name] = config.Arr{
|
||||
Name: arr.Name,
|
||||
Host: arr.Host,
|
||||
Token: arr.Token,
|
||||
Cleanup: arr.Cleanup,
|
||||
SkipRepair: arr.SkipRepair,
|
||||
DownloadUncached: arr.DownloadUncached,
|
||||
SelectedDebrid: arr.SelectedDebrid,
|
||||
Source: arr.Source,
|
||||
}
|
||||
}
|
||||
}
|
||||
// Convert map to slice
|
||||
arrs := make([]config.Arr, 0, len(arrConfigs))
|
||||
for _, a := range arrConfigs {
|
||||
arrs = append(arrs, a)
|
||||
}
|
||||
return arrs
|
||||
}
|
||||
|
||||
func (s *Storage) SyncFromConfig(arrs []config.Arr) {
|
||||
s.mu.Lock()
|
||||
defer s.mu.Unlock()
|
||||
arrConfigs := make(map[string]*Arr)
|
||||
for _, a := range arrs {
|
||||
arrConfigs[a.Name] = New(a.Name, a.Host, a.Token, a.Cleanup, a.SkipRepair, a.DownloadUncached, a.SelectedDebrid, a.Source)
|
||||
}
|
||||
|
||||
// Add or update arrs from config
|
||||
for name, arr := range s.Arrs {
|
||||
if ac, ok := arrConfigs[name]; ok {
|
||||
// Update existing arr
|
||||
// is the host URL valid?
|
||||
if request.ValidateURL(ac.Host) == nil {
|
||||
ac.Host = arr.Host
|
||||
}
|
||||
ac.Token = cmp.Or(ac.Token, arr.Token)
|
||||
ac.Cleanup = arr.Cleanup
|
||||
ac.SkipRepair = arr.SkipRepair
|
||||
ac.DownloadUncached = arr.DownloadUncached
|
||||
ac.SelectedDebrid = arr.SelectedDebrid
|
||||
ac.Source = arr.Source
|
||||
arrConfigs[name] = ac
|
||||
} else {
|
||||
arrConfigs[name] = arr
|
||||
}
|
||||
}
|
||||
|
||||
// Replace the arrs map
|
||||
s.Arrs = arrConfigs
|
||||
|
||||
}
|
||||
|
||||
func (s *Storage) StartWorker(ctx context.Context) error {
|
||||
|
||||
ticker := time.NewTicker(10 * time.Second)
|
||||
|
||||
@@ -234,6 +234,35 @@ func (a *Arr) searchRadarr(files []ContentFile) error {
|
||||
}
|
||||
|
||||
func (a *Arr) SearchMissing(files []ContentFile) error {
|
||||
if len(files) == 0 {
|
||||
return nil
|
||||
}
|
||||
return a.batchSearchMissing(files)
|
||||
}
|
||||
|
||||
func (a *Arr) batchSearchMissing(files []ContentFile) error {
|
||||
if len(files) == 0 {
|
||||
return nil
|
||||
}
|
||||
BatchSize := 50
|
||||
// Batch search for missing files
|
||||
if len(files) > BatchSize {
|
||||
for i := 0; i < len(files); i += BatchSize {
|
||||
end := i + BatchSize
|
||||
if end > len(files) {
|
||||
end = len(files)
|
||||
}
|
||||
if err := a.searchMissing(files[i:end]); err != nil {
|
||||
// continue searching the rest of the files
|
||||
continue
|
||||
}
|
||||
}
|
||||
return nil
|
||||
}
|
||||
return a.searchMissing(files)
|
||||
}
|
||||
|
||||
func (a *Arr) searchMissing(files []ContentFile) error {
|
||||
switch a.Type {
|
||||
case Sonarr:
|
||||
return a.searchSonarr(files)
|
||||
@@ -245,6 +274,28 @@ func (a *Arr) SearchMissing(files []ContentFile) error {
|
||||
}
|
||||
|
||||
func (a *Arr) DeleteFiles(files []ContentFile) error {
|
||||
if len(files) == 0 {
|
||||
return nil
|
||||
}
|
||||
BatchSize := 50
|
||||
// Batch delete files
|
||||
if len(files) > BatchSize {
|
||||
for i := 0; i < len(files); i += BatchSize {
|
||||
end := i + BatchSize
|
||||
if end > len(files) {
|
||||
end = len(files)
|
||||
}
|
||||
if err := a.batchDeleteFiles(files[i:end]); err != nil {
|
||||
// continue deleting the rest of the files
|
||||
continue
|
||||
}
|
||||
}
|
||||
return nil
|
||||
}
|
||||
return a.batchDeleteFiles(files)
|
||||
}
|
||||
|
||||
func (a *Arr) batchDeleteFiles(files []ContentFile) error {
|
||||
ids := make([]int, 0)
|
||||
for _, f := range files {
|
||||
ids = append(ids, f.FileId)
|
||||
|
||||
119
pkg/debrid/account/account.go
Normal file
119
pkg/debrid/account/account.go
Normal file
@@ -0,0 +1,119 @@
|
||||
package account
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"net/http"
|
||||
"sync/atomic"
|
||||
|
||||
"github.com/puzpuzpuz/xsync/v4"
|
||||
"github.com/sirrobot01/decypharr/internal/request"
|
||||
"github.com/sirrobot01/decypharr/pkg/debrid/types"
|
||||
)
|
||||
|
||||
type Account struct {
|
||||
Debrid string `json:"debrid"` // The debrid service name, e.g. "realdebrid"
|
||||
links *xsync.Map[string, types.DownloadLink] // key is the sliced file link
|
||||
Index int `json:"index"` // The index of the account in the config
|
||||
Disabled atomic.Bool `json:"disabled"`
|
||||
Token string `json:"token"`
|
||||
TrafficUsed atomic.Int64 `json:"traffic_used"` // Traffic used in bytes
|
||||
Username string `json:"username"` // Username for the account
|
||||
httpClient *request.Client
|
||||
|
||||
// Account reactivation tracking
|
||||
DisableCount atomic.Int32 `json:"disable_count"`
|
||||
}
|
||||
|
||||
func (a *Account) Equals(other *Account) bool {
|
||||
if other == nil {
|
||||
return false
|
||||
}
|
||||
return a.Token == other.Token && a.Debrid == other.Debrid
|
||||
}
|
||||
|
||||
func (a *Account) Client() *request.Client {
|
||||
return a.httpClient
|
||||
}
|
||||
|
||||
// slice download link
|
||||
func (a *Account) sliceFileLink(fileLink string) string {
|
||||
if a.Debrid != "realdebrid" {
|
||||
return fileLink
|
||||
}
|
||||
if len(fileLink) < 39 {
|
||||
return fileLink
|
||||
}
|
||||
return fileLink[0:39]
|
||||
}
|
||||
|
||||
func (a *Account) GetDownloadLink(fileLink string) (types.DownloadLink, error) {
|
||||
slicedLink := a.sliceFileLink(fileLink)
|
||||
dl, ok := a.links.Load(slicedLink)
|
||||
if !ok {
|
||||
return types.DownloadLink{}, types.ErrDownloadLinkNotFound
|
||||
}
|
||||
return dl, nil
|
||||
}
|
||||
|
||||
func (a *Account) StoreDownloadLink(dl types.DownloadLink) {
|
||||
slicedLink := a.sliceFileLink(dl.Link)
|
||||
a.links.Store(slicedLink, dl)
|
||||
}
|
||||
func (a *Account) DeleteDownloadLink(fileLink string) {
|
||||
slicedLink := a.sliceFileLink(fileLink)
|
||||
a.links.Delete(slicedLink)
|
||||
}
|
||||
func (a *Account) ClearDownloadLinks() {
|
||||
a.links.Clear()
|
||||
}
|
||||
func (a *Account) DownloadLinksCount() int {
|
||||
return a.links.Size()
|
||||
}
|
||||
func (a *Account) StoreDownloadLinks(dls map[string]*types.DownloadLink) {
|
||||
for _, dl := range dls {
|
||||
a.StoreDownloadLink(*dl)
|
||||
}
|
||||
}
|
||||
|
||||
// MarkDisabled marks the account as disabled and increments the disable count
|
||||
func (a *Account) MarkDisabled() {
|
||||
a.Disabled.Store(true)
|
||||
a.DisableCount.Add(1)
|
||||
}
|
||||
|
||||
func (a *Account) Reset() {
|
||||
a.DisableCount.Store(0)
|
||||
a.Disabled.Store(false)
|
||||
}
|
||||
|
||||
func (a *Account) CheckBandwidth() error {
|
||||
// Get a one of the download links to check if the account is still valid
|
||||
downloadLink := ""
|
||||
a.links.Range(func(key string, dl types.DownloadLink) bool {
|
||||
if dl.DownloadLink != "" {
|
||||
downloadLink = dl.DownloadLink
|
||||
return false
|
||||
}
|
||||
return true
|
||||
})
|
||||
if downloadLink == "" {
|
||||
return fmt.Errorf("no download link found")
|
||||
}
|
||||
|
||||
// Let's check the download link status
|
||||
req, err := http.NewRequest(http.MethodGet, downloadLink, nil)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
// Use a simple client
|
||||
client := http.DefaultClient
|
||||
resp, err := client.Do(req)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
defer resp.Body.Close()
|
||||
if resp.StatusCode != http.StatusOK && resp.StatusCode != http.StatusPartialContent {
|
||||
return fmt.Errorf("account check failed with status code %d", resp.StatusCode)
|
||||
}
|
||||
return nil
|
||||
}
|
||||
239
pkg/debrid/account/manager.go
Normal file
239
pkg/debrid/account/manager.go
Normal file
@@ -0,0 +1,239 @@
|
||||
package account
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"slices"
|
||||
"sync/atomic"
|
||||
|
||||
"github.com/puzpuzpuz/xsync/v4"
|
||||
"github.com/rs/zerolog"
|
||||
"github.com/sirrobot01/decypharr/internal/config"
|
||||
"github.com/sirrobot01/decypharr/internal/request"
|
||||
"github.com/sirrobot01/decypharr/internal/utils"
|
||||
"github.com/sirrobot01/decypharr/pkg/debrid/types"
|
||||
"go.uber.org/ratelimit"
|
||||
)
|
||||
|
||||
const (
|
||||
MaxDisableCount = 3
|
||||
)
|
||||
|
||||
type Manager struct {
|
||||
debrid string
|
||||
current atomic.Pointer[Account]
|
||||
accounts *xsync.Map[string, *Account]
|
||||
logger zerolog.Logger
|
||||
}
|
||||
|
||||
func NewManager(debridConf config.Debrid, downloadRL ratelimit.Limiter, logger zerolog.Logger) *Manager {
|
||||
m := &Manager{
|
||||
debrid: debridConf.Name,
|
||||
accounts: xsync.NewMap[string, *Account](),
|
||||
logger: logger,
|
||||
}
|
||||
|
||||
var firstAccount *Account
|
||||
for idx, token := range debridConf.DownloadAPIKeys {
|
||||
if token == "" {
|
||||
continue
|
||||
}
|
||||
headers := map[string]string{
|
||||
"Authorization": fmt.Sprintf("Bearer %s", token),
|
||||
}
|
||||
account := &Account{
|
||||
Debrid: debridConf.Name,
|
||||
Token: token,
|
||||
Index: idx,
|
||||
links: xsync.NewMap[string, types.DownloadLink](),
|
||||
httpClient: request.New(
|
||||
request.WithRateLimiter(downloadRL),
|
||||
request.WithLogger(logger),
|
||||
request.WithHeaders(headers),
|
||||
request.WithMaxRetries(3),
|
||||
request.WithRetryableStatus(429, 447, 502),
|
||||
request.WithProxy(debridConf.Proxy),
|
||||
),
|
||||
}
|
||||
m.accounts.Store(token, account)
|
||||
if firstAccount == nil {
|
||||
firstAccount = account
|
||||
}
|
||||
}
|
||||
m.current.Store(firstAccount)
|
||||
return m
|
||||
}
|
||||
|
||||
func (m *Manager) Active() []*Account {
|
||||
activeAccounts := make([]*Account, 0)
|
||||
m.accounts.Range(func(key string, acc *Account) bool {
|
||||
if !acc.Disabled.Load() {
|
||||
activeAccounts = append(activeAccounts, acc)
|
||||
}
|
||||
return true
|
||||
})
|
||||
|
||||
slices.SortFunc(activeAccounts, func(i, j *Account) int {
|
||||
return i.Index - j.Index
|
||||
})
|
||||
return activeAccounts
|
||||
}
|
||||
|
||||
func (m *Manager) All() []*Account {
|
||||
allAccounts := make([]*Account, 0)
|
||||
m.accounts.Range(func(key string, acc *Account) bool {
|
||||
allAccounts = append(allAccounts, acc)
|
||||
return true
|
||||
})
|
||||
|
||||
slices.SortFunc(allAccounts, func(i, j *Account) int {
|
||||
return i.Index - j.Index
|
||||
})
|
||||
return allAccounts
|
||||
}
|
||||
|
||||
func (m *Manager) Current() *Account {
|
||||
// Fast path - most common case
|
||||
current := m.current.Load()
|
||||
if current != nil && !current.Disabled.Load() {
|
||||
return current
|
||||
}
|
||||
|
||||
// Slow path - find new current account
|
||||
activeAccounts := m.Active()
|
||||
if len(activeAccounts) == 0 {
|
||||
// No active accounts left, try to use disabled ones
|
||||
m.logger.Warn().Str("debrid", m.debrid).Msg("No active accounts available, all accounts are disabled")
|
||||
allAccounts := m.All()
|
||||
if len(allAccounts) == 0 {
|
||||
m.logger.Error().Str("debrid", m.debrid).Msg("No accounts configured")
|
||||
m.current.Store(nil)
|
||||
return nil
|
||||
}
|
||||
m.current.Store(allAccounts[0])
|
||||
return allAccounts[0]
|
||||
}
|
||||
|
||||
newCurrent := activeAccounts[0]
|
||||
m.current.Store(newCurrent)
|
||||
return newCurrent
|
||||
}
|
||||
|
||||
func (m *Manager) Disable(account *Account) {
|
||||
if account == nil {
|
||||
return
|
||||
}
|
||||
|
||||
account.MarkDisabled()
|
||||
|
||||
// If we're disabling the current account, it will be replaced
|
||||
// on the next Current() call - no need to proactively update
|
||||
current := m.current.Load()
|
||||
if current != nil && current.Token == account.Token {
|
||||
// Optional: immediately find replacement
|
||||
activeAccounts := m.Active()
|
||||
if len(activeAccounts) > 0 {
|
||||
m.current.Store(activeAccounts[0])
|
||||
} else {
|
||||
m.current.Store(nil)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func (m *Manager) Reset() {
|
||||
m.accounts.Range(func(key string, acc *Account) bool {
|
||||
acc.Reset()
|
||||
return true
|
||||
})
|
||||
|
||||
// Set current to first active account
|
||||
activeAccounts := m.Active()
|
||||
if len(activeAccounts) > 0 {
|
||||
m.current.Store(activeAccounts[0])
|
||||
} else {
|
||||
m.current.Store(nil)
|
||||
}
|
||||
}
|
||||
|
||||
func (m *Manager) GetAccount(token string) (*Account, error) {
|
||||
if token == "" {
|
||||
return nil, fmt.Errorf("token cannot be empty")
|
||||
}
|
||||
acc, ok := m.accounts.Load(token)
|
||||
if !ok {
|
||||
return nil, fmt.Errorf("account not found for token")
|
||||
}
|
||||
return acc, nil
|
||||
}
|
||||
|
||||
func (m *Manager) GetDownloadLink(fileLink string) (types.DownloadLink, error) {
|
||||
current := m.Current()
|
||||
if current == nil {
|
||||
return types.DownloadLink{}, fmt.Errorf("no active account for debrid service %s", m.debrid)
|
||||
}
|
||||
return current.GetDownloadLink(fileLink)
|
||||
}
|
||||
|
||||
func (m *Manager) GetAccountFromDownloadLink(downloadLink types.DownloadLink) (*Account, error) {
|
||||
if downloadLink.Link == "" {
|
||||
return nil, fmt.Errorf("cannot get account from empty download link")
|
||||
}
|
||||
if downloadLink.Token == "" {
|
||||
return nil, fmt.Errorf("cannot get account from download link without token")
|
||||
}
|
||||
return m.GetAccount(downloadLink.Token)
|
||||
}
|
||||
|
||||
func (m *Manager) StoreDownloadLink(downloadLink types.DownloadLink) {
|
||||
if downloadLink.Link == "" || downloadLink.Token == "" {
|
||||
return
|
||||
}
|
||||
account, err := m.GetAccount(downloadLink.Token)
|
||||
if err != nil || account == nil {
|
||||
return
|
||||
}
|
||||
account.StoreDownloadLink(downloadLink)
|
||||
}
|
||||
|
||||
func (m *Manager) Stats() []map[string]any {
|
||||
stats := make([]map[string]any, 0)
|
||||
|
||||
for _, acc := range m.All() {
|
||||
maskedToken := utils.Mask(acc.Token)
|
||||
accountDetail := map[string]any{
|
||||
"in_use": acc.Equals(m.Current()),
|
||||
"order": acc.Index,
|
||||
"disabled": acc.Disabled.Load(),
|
||||
"token_masked": maskedToken,
|
||||
"username": acc.Username,
|
||||
"traffic_used": acc.TrafficUsed.Load(),
|
||||
"links_count": acc.DownloadLinksCount(),
|
||||
"debrid": acc.Debrid,
|
||||
}
|
||||
stats = append(stats, accountDetail)
|
||||
}
|
||||
return stats
|
||||
}
|
||||
|
||||
func (m *Manager) CheckAndResetBandwidth() {
|
||||
found := false
|
||||
m.accounts.Range(func(key string, acc *Account) bool {
|
||||
if acc.Disabled.Load() && acc.DisableCount.Load() < MaxDisableCount {
|
||||
if err := acc.CheckBandwidth(); err == nil {
|
||||
acc.Disabled.Store(false)
|
||||
found = true
|
||||
m.logger.Info().Str("debrid", m.debrid).Str("token", utils.Mask(acc.Token)).Msg("Re-activated disabled account")
|
||||
} else {
|
||||
m.logger.Debug().Err(err).Str("debrid", m.debrid).Str("token", utils.Mask(acc.Token)).Msg("Account still disabled")
|
||||
}
|
||||
}
|
||||
return true
|
||||
})
|
||||
if found {
|
||||
// If we re-activated any account, reset current to first active
|
||||
activeAccounts := m.Active()
|
||||
if len(activeAccounts) > 0 {
|
||||
m.current.Store(activeAccounts[0])
|
||||
}
|
||||
|
||||
}
|
||||
}
|
||||
31
pkg/debrid/common/interface.go
Normal file
31
pkg/debrid/common/interface.go
Normal file
@@ -0,0 +1,31 @@
|
||||
package common
|
||||
|
||||
import (
|
||||
"github.com/rs/zerolog"
|
||||
"github.com/sirrobot01/decypharr/pkg/debrid/account"
|
||||
"github.com/sirrobot01/decypharr/pkg/debrid/types"
|
||||
)
|
||||
|
||||
type Client interface {
|
||||
SubmitMagnet(tr *types.Torrent) (*types.Torrent, error)
|
||||
CheckStatus(tr *types.Torrent) (*types.Torrent, error)
|
||||
GetFileDownloadLinks(tr *types.Torrent) error
|
||||
GetDownloadLink(tr *types.Torrent, file *types.File) (types.DownloadLink, error)
|
||||
DeleteTorrent(torrentId string) error
|
||||
IsAvailable(infohashes []string) map[string]bool
|
||||
GetDownloadUncached() bool
|
||||
UpdateTorrent(torrent *types.Torrent) error
|
||||
GetTorrent(torrentId string) (*types.Torrent, error)
|
||||
GetTorrents() ([]*types.Torrent, error)
|
||||
Name() string
|
||||
Logger() zerolog.Logger
|
||||
GetDownloadingStatus() []string
|
||||
RefreshDownloadLinks() error
|
||||
CheckLink(link string) error
|
||||
GetMountPath() string
|
||||
AccountManager() *account.Manager // Returns the active download account/token
|
||||
GetProfile() (*types.Profile, error)
|
||||
GetAvailableSlots() (int, error)
|
||||
SyncAccounts() error // Updates each accounts details(like traffic, username, etc.)
|
||||
DeleteDownloadLink(account *account.Account, downloadLink types.DownloadLink) error
|
||||
}
|
||||
@@ -1,13 +1,19 @@
|
||||
package debrid
|
||||
|
||||
import (
|
||||
"cmp"
|
||||
"context"
|
||||
"errors"
|
||||
"fmt"
|
||||
"sync"
|
||||
"time"
|
||||
|
||||
"github.com/sirrobot01/decypharr/internal/config"
|
||||
"github.com/sirrobot01/decypharr/internal/logger"
|
||||
"github.com/sirrobot01/decypharr/internal/request"
|
||||
"github.com/sirrobot01/decypharr/internal/utils"
|
||||
"github.com/sirrobot01/decypharr/pkg/arr"
|
||||
"github.com/sirrobot01/decypharr/pkg/debrid/common"
|
||||
"github.com/sirrobot01/decypharr/pkg/debrid/providers/alldebrid"
|
||||
"github.com/sirrobot01/decypharr/pkg/debrid/providers/debridlink"
|
||||
"github.com/sirrobot01/decypharr/pkg/debrid/providers/realdebrid"
|
||||
@@ -15,16 +21,15 @@ import (
|
||||
debridStore "github.com/sirrobot01/decypharr/pkg/debrid/store"
|
||||
"github.com/sirrobot01/decypharr/pkg/debrid/types"
|
||||
"github.com/sirrobot01/decypharr/pkg/rclone"
|
||||
"sync"
|
||||
"time"
|
||||
"go.uber.org/ratelimit"
|
||||
)
|
||||
|
||||
type Debrid struct {
|
||||
cache *debridStore.Cache // Could be nil if not using WebDAV
|
||||
client types.Client // HTTP client for making requests to the debrid service
|
||||
client common.Client // HTTP client for making requests to the debrid service
|
||||
}
|
||||
|
||||
func (de *Debrid) Client() types.Client {
|
||||
func (de *Debrid) Client() common.Client {
|
||||
return de.client
|
||||
}
|
||||
|
||||
@@ -104,11 +109,54 @@ func (d *Storage) StartWorker(ctx context.Context) error {
|
||||
ctx = context.Background()
|
||||
}
|
||||
|
||||
// Start all debrid syncAccounts
|
||||
// Runs every 1m
|
||||
if err := d.syncAccounts(); err != nil {
|
||||
return err
|
||||
// Start syncAccounts worker
|
||||
go d.syncAccountsWorker(ctx)
|
||||
|
||||
// Start bandwidth reset worker
|
||||
go d.checkBandwidthWorker(ctx)
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
func (d *Storage) checkBandwidthWorker(ctx context.Context) {
|
||||
if ctx == nil {
|
||||
ctx = context.Background()
|
||||
}
|
||||
ticker := time.NewTicker(30 * time.Minute)
|
||||
go func() {
|
||||
for {
|
||||
select {
|
||||
case <-ctx.Done():
|
||||
return
|
||||
case <-ticker.C:
|
||||
d.checkAccountBandwidth()
|
||||
}
|
||||
}
|
||||
}()
|
||||
}
|
||||
|
||||
func (d *Storage) checkAccountBandwidth() {
|
||||
d.mu.Lock()
|
||||
defer d.mu.Unlock()
|
||||
|
||||
for _, debrid := range d.debrids {
|
||||
if debrid == nil || debrid.client == nil {
|
||||
continue
|
||||
}
|
||||
accountManager := debrid.client.AccountManager()
|
||||
if accountManager == nil {
|
||||
continue
|
||||
}
|
||||
accountManager.CheckAndResetBandwidth()
|
||||
}
|
||||
}
|
||||
|
||||
func (d *Storage) syncAccountsWorker(ctx context.Context) {
|
||||
if ctx == nil {
|
||||
ctx = context.Background()
|
||||
}
|
||||
|
||||
_ = d.syncAccounts()
|
||||
ticker := time.NewTicker(5 * time.Minute)
|
||||
go func() {
|
||||
for {
|
||||
@@ -120,7 +168,7 @@ func (d *Storage) StartWorker(ctx context.Context) error {
|
||||
}
|
||||
}
|
||||
}()
|
||||
return nil
|
||||
|
||||
}
|
||||
|
||||
func (d *Storage) syncAccounts() error {
|
||||
@@ -152,7 +200,7 @@ func (d *Storage) Debrids() map[string]*Debrid {
|
||||
return debridsCopy
|
||||
}
|
||||
|
||||
func (d *Storage) Client(name string) types.Client {
|
||||
func (d *Storage) Client(name string) common.Client {
|
||||
d.mu.RLock()
|
||||
defer d.mu.RUnlock()
|
||||
if client, exists := d.debrids[name]; exists {
|
||||
@@ -177,10 +225,10 @@ func (d *Storage) Reset() {
|
||||
d.lastUsed = ""
|
||||
}
|
||||
|
||||
func (d *Storage) Clients() map[string]types.Client {
|
||||
func (d *Storage) Clients() map[string]common.Client {
|
||||
d.mu.RLock()
|
||||
defer d.mu.RUnlock()
|
||||
clientsCopy := make(map[string]types.Client)
|
||||
clientsCopy := make(map[string]common.Client)
|
||||
for name, debrid := range d.debrids {
|
||||
if debrid != nil && debrid.client != nil {
|
||||
clientsCopy[name] = debrid.client
|
||||
@@ -201,10 +249,10 @@ func (d *Storage) Caches() map[string]*debridStore.Cache {
|
||||
return cachesCopy
|
||||
}
|
||||
|
||||
func (d *Storage) FilterClients(filter func(types.Client) bool) map[string]types.Client {
|
||||
func (d *Storage) FilterClients(filter func(common.Client) bool) map[string]common.Client {
|
||||
d.mu.Lock()
|
||||
defer d.mu.Unlock()
|
||||
filteredClients := make(map[string]types.Client)
|
||||
filteredClients := make(map[string]common.Client)
|
||||
for name, client := range d.debrids {
|
||||
if client != nil && filter(client.client) {
|
||||
filteredClients[name] = client.client
|
||||
@@ -213,18 +261,28 @@ func (d *Storage) FilterClients(filter func(types.Client) bool) map[string]types
|
||||
return filteredClients
|
||||
}
|
||||
|
||||
func createDebridClient(dc config.Debrid) (types.Client, error) {
|
||||
func createDebridClient(dc config.Debrid) (common.Client, error) {
|
||||
rateLimits := map[string]ratelimit.Limiter{}
|
||||
|
||||
mainRL := request.ParseRateLimit(dc.RateLimit)
|
||||
repairRL := request.ParseRateLimit(cmp.Or(dc.RepairRateLimit, dc.RateLimit))
|
||||
downloadRL := request.ParseRateLimit(cmp.Or(dc.DownloadRateLimit, dc.RateLimit))
|
||||
|
||||
rateLimits["main"] = mainRL
|
||||
rateLimits["repair"] = repairRL
|
||||
rateLimits["download"] = downloadRL
|
||||
|
||||
switch dc.Name {
|
||||
case "realdebrid":
|
||||
return realdebrid.New(dc)
|
||||
return realdebrid.New(dc, rateLimits)
|
||||
case "torbox":
|
||||
return torbox.New(dc)
|
||||
return torbox.New(dc, rateLimits)
|
||||
case "debridlink":
|
||||
return debridlink.New(dc)
|
||||
return debridlink.New(dc, rateLimits)
|
||||
case "alldebrid":
|
||||
return alldebrid.New(dc)
|
||||
return alldebrid.New(dc, rateLimits)
|
||||
default:
|
||||
return realdebrid.New(dc)
|
||||
return realdebrid.New(dc, rateLimits)
|
||||
}
|
||||
}
|
||||
|
||||
@@ -239,7 +297,7 @@ func Process(ctx context.Context, store *Storage, selectedDebrid string, magnet
|
||||
Files: make(map[string]types.File),
|
||||
}
|
||||
|
||||
clients := store.FilterClients(func(c types.Client) bool {
|
||||
clients := store.FilterClients(func(c common.Client) bool {
|
||||
if selectedDebrid != "" && c.Name() != selectedDebrid {
|
||||
return false
|
||||
}
|
||||
@@ -254,13 +312,9 @@ func Process(ctx context.Context, store *Storage, selectedDebrid string, magnet
|
||||
|
||||
// Override first, arr second, debrid third
|
||||
|
||||
if overrideDownloadUncached {
|
||||
debridTorrent.DownloadUncached = true
|
||||
} else if a.DownloadUncached != nil {
|
||||
if !overrideDownloadUncached && a.DownloadUncached != nil {
|
||||
// Arr cached is set
|
||||
debridTorrent.DownloadUncached = *a.DownloadUncached
|
||||
} else {
|
||||
debridTorrent.DownloadUncached = false
|
||||
overrideDownloadUncached = *a.DownloadUncached
|
||||
}
|
||||
|
||||
for _, db := range clients {
|
||||
@@ -273,8 +327,9 @@ func Process(ctx context.Context, store *Storage, selectedDebrid string, magnet
|
||||
Str("Action", action).
|
||||
Msg("Processing torrent")
|
||||
|
||||
if !overrideDownloadUncached && a.DownloadUncached == nil {
|
||||
debridTorrent.DownloadUncached = db.GetDownloadUncached()
|
||||
// If debrid.DownloadUnached is true, it overrides everything
|
||||
if db.GetDownloadUncached() || overrideDownloadUncached {
|
||||
debridTorrent.DownloadUncached = true
|
||||
}
|
||||
|
||||
dbt, err := db.SubmitMagnet(debridTorrent)
|
||||
|
||||
@@ -3,25 +3,28 @@ package alldebrid
|
||||
import (
|
||||
"encoding/json"
|
||||
"fmt"
|
||||
"github.com/rs/zerolog"
|
||||
"github.com/sirrobot01/decypharr/internal/config"
|
||||
"github.com/sirrobot01/decypharr/internal/logger"
|
||||
"github.com/sirrobot01/decypharr/internal/request"
|
||||
"github.com/sirrobot01/decypharr/internal/utils"
|
||||
"github.com/sirrobot01/decypharr/pkg/debrid/types"
|
||||
"net/http"
|
||||
gourl "net/url"
|
||||
"path/filepath"
|
||||
"strconv"
|
||||
"sync"
|
||||
"time"
|
||||
|
||||
"github.com/rs/zerolog"
|
||||
"github.com/sirrobot01/decypharr/internal/config"
|
||||
"github.com/sirrobot01/decypharr/internal/logger"
|
||||
"github.com/sirrobot01/decypharr/internal/request"
|
||||
"github.com/sirrobot01/decypharr/internal/utils"
|
||||
"github.com/sirrobot01/decypharr/pkg/debrid/account"
|
||||
"github.com/sirrobot01/decypharr/pkg/debrid/types"
|
||||
"go.uber.org/ratelimit"
|
||||
)
|
||||
|
||||
type AllDebrid struct {
|
||||
name string
|
||||
Host string `json:"host"`
|
||||
APIKey string
|
||||
accounts *types.Accounts
|
||||
accountsManager *account.Manager
|
||||
autoExpiresLinksAfter time.Duration
|
||||
DownloadUncached bool
|
||||
client *request.Client
|
||||
@@ -34,8 +37,7 @@ type AllDebrid struct {
|
||||
minimumFreeSlot int
|
||||
}
|
||||
|
||||
func New(dc config.Debrid) (*AllDebrid, error) {
|
||||
rl := request.ParseRateLimit(dc.RateLimit)
|
||||
func New(dc config.Debrid, ratelimits map[string]ratelimit.Limiter) (*AllDebrid, error) {
|
||||
|
||||
headers := map[string]string{
|
||||
"Authorization": fmt.Sprintf("Bearer %s", dc.APIKey),
|
||||
@@ -44,7 +46,7 @@ func New(dc config.Debrid) (*AllDebrid, error) {
|
||||
client := request.New(
|
||||
request.WithHeaders(headers),
|
||||
request.WithLogger(_log),
|
||||
request.WithRateLimiter(rl),
|
||||
request.WithRateLimiter(ratelimits["main"]),
|
||||
request.WithProxy(dc.Proxy),
|
||||
)
|
||||
|
||||
@@ -56,7 +58,7 @@ func New(dc config.Debrid) (*AllDebrid, error) {
|
||||
name: "alldebrid",
|
||||
Host: "http://api.alldebrid.com/v4.1",
|
||||
APIKey: dc.APIKey,
|
||||
accounts: types.NewAccounts(dc),
|
||||
accountsManager: account.NewManager(dc, ratelimits["download"], _log),
|
||||
DownloadUncached: dc.DownloadUncached,
|
||||
autoExpiresLinksAfter: autoExpiresLinksAfter,
|
||||
client: client,
|
||||
@@ -102,11 +104,12 @@ func (ad *AllDebrid) SubmitMagnet(torrent *types.Torrent) (*types.Torrent, error
|
||||
}
|
||||
magnets := data.Data.Magnets
|
||||
if len(magnets) == 0 {
|
||||
return nil, fmt.Errorf("error adding torrent")
|
||||
return nil, fmt.Errorf("error adding torrent. No magnets returned")
|
||||
}
|
||||
magnet := magnets[0]
|
||||
torrentId := strconv.Itoa(magnet.ID)
|
||||
torrent.Id = torrentId
|
||||
torrent.Added = time.Now().Format(time.RFC3339)
|
||||
|
||||
return torrent, nil
|
||||
}
|
||||
@@ -293,7 +296,7 @@ func (ad *AllDebrid) DeleteTorrent(torrentId string) error {
|
||||
|
||||
func (ad *AllDebrid) GetFileDownloadLinks(t *types.Torrent) error {
|
||||
filesCh := make(chan types.File, len(t.Files))
|
||||
linksCh := make(chan *types.DownloadLink, len(t.Files))
|
||||
linksCh := make(chan types.DownloadLink, len(t.Files))
|
||||
errCh := make(chan error, len(t.Files))
|
||||
|
||||
var wg sync.WaitGroup
|
||||
@@ -306,10 +309,6 @@ func (ad *AllDebrid) GetFileDownloadLinks(t *types.Torrent) error {
|
||||
errCh <- err
|
||||
return
|
||||
}
|
||||
if link == nil {
|
||||
errCh <- fmt.Errorf("download link is empty")
|
||||
return
|
||||
}
|
||||
linksCh <- link
|
||||
file.DownloadLink = link
|
||||
filesCh <- file
|
||||
@@ -327,17 +326,14 @@ func (ad *AllDebrid) GetFileDownloadLinks(t *types.Torrent) error {
|
||||
}
|
||||
|
||||
// Collect download links
|
||||
links := make(map[string]*types.DownloadLink, len(t.Files))
|
||||
links := make(map[string]types.DownloadLink, len(t.Files))
|
||||
|
||||
for link := range linksCh {
|
||||
if link == nil {
|
||||
if link.Empty() {
|
||||
continue
|
||||
}
|
||||
links[link.Link] = link
|
||||
}
|
||||
// Update the files with download links
|
||||
ad.accounts.SetDownloadLinks(links)
|
||||
|
||||
// Check for errors
|
||||
for err := range errCh {
|
||||
if err != nil {
|
||||
@@ -349,7 +345,7 @@ func (ad *AllDebrid) GetFileDownloadLinks(t *types.Torrent) error {
|
||||
return nil
|
||||
}
|
||||
|
||||
func (ad *AllDebrid) GetDownloadLink(t *types.Torrent, file *types.File) (*types.DownloadLink, error) {
|
||||
func (ad *AllDebrid) GetDownloadLink(t *types.Torrent, file *types.File) (types.DownloadLink, error) {
|
||||
url := fmt.Sprintf("%s/link/unlock", ad.Host)
|
||||
query := gourl.Values{}
|
||||
query.Add("link", file.Link)
|
||||
@@ -357,22 +353,23 @@ func (ad *AllDebrid) GetDownloadLink(t *types.Torrent, file *types.File) (*types
|
||||
req, _ := http.NewRequest(http.MethodGet, url, nil)
|
||||
resp, err := ad.client.MakeRequest(req)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
return types.DownloadLink{}, err
|
||||
}
|
||||
var data DownloadLink
|
||||
if err = json.Unmarshal(resp, &data); err != nil {
|
||||
return nil, err
|
||||
return types.DownloadLink{}, err
|
||||
}
|
||||
|
||||
if data.Error != nil {
|
||||
return nil, fmt.Errorf("error getting download link: %s", data.Error.Message)
|
||||
return types.DownloadLink{}, fmt.Errorf("error getting download link: %s", data.Error.Message)
|
||||
}
|
||||
link := data.Data.Link
|
||||
if link == "" {
|
||||
return nil, fmt.Errorf("download link is empty")
|
||||
return types.DownloadLink{}, fmt.Errorf("download link is empty")
|
||||
}
|
||||
now := time.Now()
|
||||
return &types.DownloadLink{
|
||||
dl := types.DownloadLink{
|
||||
Token: ad.APIKey,
|
||||
Link: file.Link,
|
||||
DownloadLink: link,
|
||||
Id: data.Data.Id,
|
||||
@@ -380,7 +377,10 @@ func (ad *AllDebrid) GetDownloadLink(t *types.Torrent, file *types.File) (*types
|
||||
Filename: file.Name,
|
||||
Generated: now,
|
||||
ExpiresAt: now.Add(ad.autoExpiresLinksAfter),
|
||||
}, nil
|
||||
}
|
||||
// Set the download link in the account
|
||||
ad.accountsManager.StoreDownloadLink(dl)
|
||||
return dl, nil
|
||||
}
|
||||
|
||||
func (ad *AllDebrid) GetTorrents() ([]*types.Torrent, error) {
|
||||
@@ -416,8 +416,8 @@ func (ad *AllDebrid) GetTorrents() ([]*types.Torrent, error) {
|
||||
return torrents, nil
|
||||
}
|
||||
|
||||
func (ad *AllDebrid) GetDownloadLinks() (map[string]*types.DownloadLink, error) {
|
||||
return nil, nil
|
||||
func (ad *AllDebrid) RefreshDownloadLinks() error {
|
||||
return nil
|
||||
}
|
||||
|
||||
func (ad *AllDebrid) GetDownloadingStatus() []string {
|
||||
@@ -436,10 +436,6 @@ func (ad *AllDebrid) GetMountPath() string {
|
||||
return ad.MountPath
|
||||
}
|
||||
|
||||
func (ad *AllDebrid) DeleteDownloadLink(linkId string) error {
|
||||
return nil
|
||||
}
|
||||
|
||||
func (ad *AllDebrid) GetAvailableSlots() (int, error) {
|
||||
// This function is a placeholder for AllDebrid
|
||||
//TODO: Implement the logic to check available slots for AllDebrid
|
||||
@@ -494,10 +490,15 @@ func (ad *AllDebrid) GetProfile() (*types.Profile, error) {
|
||||
return profile, nil
|
||||
}
|
||||
|
||||
func (ad *AllDebrid) Accounts() *types.Accounts {
|
||||
return ad.accounts
|
||||
func (ad *AllDebrid) AccountManager() *account.Manager {
|
||||
return ad.accountsManager
|
||||
}
|
||||
|
||||
func (ad *AllDebrid) SyncAccounts() error {
|
||||
return nil
|
||||
}
|
||||
|
||||
func (ad *AllDebrid) DeleteDownloadLink(account *account.Account, downloadLink types.DownloadLink) error {
|
||||
account.DeleteDownloadLink(downloadLink.Link)
|
||||
return nil
|
||||
}
|
||||
|
||||
@@ -4,13 +4,16 @@ import (
|
||||
"bytes"
|
||||
"encoding/json"
|
||||
"fmt"
|
||||
"time"
|
||||
|
||||
"github.com/rs/zerolog"
|
||||
"github.com/sirrobot01/decypharr/internal/config"
|
||||
"github.com/sirrobot01/decypharr/internal/logger"
|
||||
"github.com/sirrobot01/decypharr/internal/request"
|
||||
"github.com/sirrobot01/decypharr/internal/utils"
|
||||
"github.com/sirrobot01/decypharr/pkg/debrid/account"
|
||||
"github.com/sirrobot01/decypharr/pkg/debrid/types"
|
||||
"time"
|
||||
"go.uber.org/ratelimit"
|
||||
|
||||
"net/http"
|
||||
"strings"
|
||||
@@ -20,7 +23,7 @@ type DebridLink struct {
|
||||
name string
|
||||
Host string `json:"host"`
|
||||
APIKey string
|
||||
accounts *types.Accounts
|
||||
accountsManager *account.Manager
|
||||
DownloadUncached bool
|
||||
client *request.Client
|
||||
|
||||
@@ -34,9 +37,7 @@ type DebridLink struct {
|
||||
Profile *types.Profile `json:"profile,omitempty"`
|
||||
}
|
||||
|
||||
func New(dc config.Debrid) (*DebridLink, error) {
|
||||
rl := request.ParseRateLimit(dc.RateLimit)
|
||||
|
||||
func New(dc config.Debrid, ratelimits map[string]ratelimit.Limiter) (*DebridLink, error) {
|
||||
headers := map[string]string{
|
||||
"Authorization": fmt.Sprintf("Bearer %s", dc.APIKey),
|
||||
"Content-Type": "application/json",
|
||||
@@ -45,7 +46,7 @@ func New(dc config.Debrid) (*DebridLink, error) {
|
||||
client := request.New(
|
||||
request.WithHeaders(headers),
|
||||
request.WithLogger(_log),
|
||||
request.WithRateLimiter(rl),
|
||||
request.WithRateLimiter(ratelimits["main"]),
|
||||
request.WithProxy(dc.Proxy),
|
||||
)
|
||||
|
||||
@@ -57,7 +58,7 @@ func New(dc config.Debrid) (*DebridLink, error) {
|
||||
name: "debridlink",
|
||||
Host: "https://debrid-link.com/api/v2",
|
||||
APIKey: dc.APIKey,
|
||||
accounts: types.NewAccounts(dc),
|
||||
accountsManager: account.NewManager(dc, ratelimits["download"], _log),
|
||||
DownloadUncached: dc.DownloadUncached,
|
||||
autoExpiresLinksAfter: autoExpiresLinksAfter,
|
||||
client: client,
|
||||
@@ -221,7 +222,6 @@ func (dl *DebridLink) UpdateTorrent(t *types.Torrent) error {
|
||||
t.OriginalFilename = name
|
||||
t.Added = time.Unix(data.Created, 0).Format(time.RFC3339)
|
||||
cfg := config.Get()
|
||||
links := make(map[string]*types.DownloadLink)
|
||||
now := time.Now()
|
||||
for _, f := range data.Files {
|
||||
if !cfg.IsSizeAllowed(f.Size) {
|
||||
@@ -235,19 +235,19 @@ func (dl *DebridLink) UpdateTorrent(t *types.Torrent) error {
|
||||
Path: f.Name,
|
||||
Link: f.DownloadURL,
|
||||
}
|
||||
link := &types.DownloadLink{
|
||||
link := types.DownloadLink{
|
||||
Token: dl.APIKey,
|
||||
Filename: f.Name,
|
||||
Link: f.DownloadURL,
|
||||
DownloadLink: f.DownloadURL,
|
||||
Generated: now,
|
||||
ExpiresAt: now.Add(dl.autoExpiresLinksAfter),
|
||||
}
|
||||
links[file.Link] = link
|
||||
file.DownloadLink = link
|
||||
t.Files[f.Name] = file
|
||||
dl.accountsManager.StoreDownloadLink(link)
|
||||
}
|
||||
|
||||
dl.accounts.SetDownloadLinks(links)
|
||||
return nil
|
||||
}
|
||||
|
||||
@@ -284,8 +284,6 @@ func (dl *DebridLink) SubmitMagnet(t *types.Torrent) (*types.Torrent, error) {
|
||||
t.MountPath = dl.MountPath
|
||||
t.Debrid = dl.name
|
||||
t.Added = time.Unix(data.Created, 0).Format(time.RFC3339)
|
||||
|
||||
links := make(map[string]*types.DownloadLink)
|
||||
now := time.Now()
|
||||
for _, f := range data.Files {
|
||||
file := types.File{
|
||||
@@ -297,20 +295,19 @@ func (dl *DebridLink) SubmitMagnet(t *types.Torrent) (*types.Torrent, error) {
|
||||
Link: f.DownloadURL,
|
||||
Generated: now,
|
||||
}
|
||||
link := &types.DownloadLink{
|
||||
link := types.DownloadLink{
|
||||
Token: dl.APIKey,
|
||||
Filename: f.Name,
|
||||
Link: f.DownloadURL,
|
||||
DownloadLink: f.DownloadURL,
|
||||
Generated: now,
|
||||
ExpiresAt: now.Add(dl.autoExpiresLinksAfter),
|
||||
}
|
||||
links[file.Link] = link
|
||||
file.DownloadLink = link
|
||||
t.Files[f.Name] = file
|
||||
dl.accountsManager.StoreDownloadLink(link)
|
||||
}
|
||||
|
||||
dl.accounts.SetDownloadLinks(links)
|
||||
|
||||
return t, nil
|
||||
}
|
||||
|
||||
@@ -353,12 +350,12 @@ func (dl *DebridLink) GetFileDownloadLinks(t *types.Torrent) error {
|
||||
return nil
|
||||
}
|
||||
|
||||
func (dl *DebridLink) GetDownloadLinks() (map[string]*types.DownloadLink, error) {
|
||||
return nil, nil
|
||||
func (dl *DebridLink) RefreshDownloadLinks() error {
|
||||
return nil
|
||||
}
|
||||
|
||||
func (dl *DebridLink) GetDownloadLink(t *types.Torrent, file *types.File) (*types.DownloadLink, error) {
|
||||
return dl.accounts.GetDownloadLink(file.Link)
|
||||
func (dl *DebridLink) GetDownloadLink(t *types.Torrent, file *types.File) (types.DownloadLink, error) {
|
||||
return dl.accountsManager.GetDownloadLink(file.Link)
|
||||
}
|
||||
|
||||
func (dl *DebridLink) GetDownloadingStatus() []string {
|
||||
@@ -403,7 +400,6 @@ func (dl *DebridLink) getTorrents(page, perPage int) ([]*types.Torrent, error) {
|
||||
}
|
||||
|
||||
data := *res.Value
|
||||
links := make(map[string]*types.DownloadLink)
|
||||
|
||||
if len(data) == 0 {
|
||||
return torrents, nil
|
||||
@@ -439,20 +435,20 @@ func (dl *DebridLink) getTorrents(page, perPage int) ([]*types.Torrent, error) {
|
||||
Path: f.Name,
|
||||
Link: f.DownloadURL,
|
||||
}
|
||||
link := &types.DownloadLink{
|
||||
link := types.DownloadLink{
|
||||
Token: dl.APIKey,
|
||||
Filename: f.Name,
|
||||
Link: f.DownloadURL,
|
||||
DownloadLink: f.DownloadURL,
|
||||
Generated: now,
|
||||
ExpiresAt: now.Add(dl.autoExpiresLinksAfter),
|
||||
}
|
||||
links[file.Link] = link
|
||||
file.DownloadLink = link
|
||||
torrent.Files[f.Name] = file
|
||||
dl.accountsManager.StoreDownloadLink(link)
|
||||
}
|
||||
torrents = append(torrents, torrent)
|
||||
}
|
||||
dl.accounts.SetDownloadLinks(links)
|
||||
|
||||
return torrents, nil
|
||||
}
|
||||
@@ -465,10 +461,6 @@ func (dl *DebridLink) GetMountPath() string {
|
||||
return dl.MountPath
|
||||
}
|
||||
|
||||
func (dl *DebridLink) DeleteDownloadLink(linkId string) error {
|
||||
return nil
|
||||
}
|
||||
|
||||
func (dl *DebridLink) GetAvailableSlots() (int, error) {
|
||||
//TODO: Implement the logic to check available slots for DebridLink
|
||||
return 0, fmt.Errorf("GetAvailableSlots not implemented for DebridLink")
|
||||
@@ -519,10 +511,15 @@ func (dl *DebridLink) GetProfile() (*types.Profile, error) {
|
||||
return profile, nil
|
||||
}
|
||||
|
||||
func (dl *DebridLink) Accounts() *types.Accounts {
|
||||
return dl.accounts
|
||||
func (dl *DebridLink) AccountManager() *account.Manager {
|
||||
return dl.accountsManager
|
||||
}
|
||||
|
||||
func (dl *DebridLink) SyncAccounts() error {
|
||||
return nil
|
||||
}
|
||||
|
||||
func (dl *DebridLink) DeleteDownloadLink(account *account.Account, downloadLink types.DownloadLink) error {
|
||||
account.DeleteDownloadLink(downloadLink.Link)
|
||||
return nil
|
||||
}
|
||||
|
||||
@@ -2,11 +2,9 @@ package realdebrid
|
||||
|
||||
import (
|
||||
"bytes"
|
||||
"cmp"
|
||||
"encoding/json"
|
||||
"errors"
|
||||
"fmt"
|
||||
"github.com/sirrobot01/decypharr/pkg/debrid/types"
|
||||
"io"
|
||||
"net/http"
|
||||
gourl "net/url"
|
||||
@@ -16,6 +14,10 @@ import (
|
||||
"sync"
|
||||
"time"
|
||||
|
||||
"github.com/sirrobot01/decypharr/pkg/debrid/account"
|
||||
"github.com/sirrobot01/decypharr/pkg/debrid/types"
|
||||
"go.uber.org/ratelimit"
|
||||
|
||||
"github.com/rs/zerolog"
|
||||
"github.com/sirrobot01/decypharr/internal/config"
|
||||
"github.com/sirrobot01/decypharr/internal/logger"
|
||||
@@ -28,12 +30,11 @@ type RealDebrid struct {
|
||||
name string
|
||||
Host string `json:"host"`
|
||||
|
||||
APIKey string
|
||||
accounts *types.Accounts
|
||||
APIKey string
|
||||
accountsManager *account.Manager
|
||||
|
||||
DownloadUncached bool
|
||||
client *request.Client
|
||||
downloadClient *request.Client
|
||||
repairClient *request.Client
|
||||
autoExpiresLinksAfter time.Duration
|
||||
|
||||
@@ -49,10 +50,7 @@ type RealDebrid struct {
|
||||
limit int
|
||||
}
|
||||
|
||||
func New(dc config.Debrid) (*RealDebrid, error) {
|
||||
rl := request.ParseRateLimit(dc.RateLimit)
|
||||
repairRl := request.ParseRateLimit(cmp.Or(dc.RepairRateLimit, dc.RateLimit))
|
||||
downloadRl := request.ParseRateLimit(cmp.Or(dc.DownloadRateLimit, dc.RateLimit))
|
||||
func New(dc config.Debrid, ratelimits map[string]ratelimit.Limiter) (*RealDebrid, error) {
|
||||
|
||||
headers := map[string]string{
|
||||
"Authorization": fmt.Sprintf("Bearer %s", dc.APIKey),
|
||||
@@ -68,27 +66,20 @@ func New(dc config.Debrid) (*RealDebrid, error) {
|
||||
name: "realdebrid",
|
||||
Host: "https://api.real-debrid.com/rest/1.0",
|
||||
APIKey: dc.APIKey,
|
||||
accounts: types.NewAccounts(dc),
|
||||
accountsManager: account.NewManager(dc, ratelimits["download"], _log),
|
||||
DownloadUncached: dc.DownloadUncached,
|
||||
autoExpiresLinksAfter: autoExpiresLinksAfter,
|
||||
UnpackRar: dc.UnpackRar,
|
||||
client: request.New(
|
||||
request.WithHeaders(headers),
|
||||
request.WithRateLimiter(rl),
|
||||
request.WithRateLimiter(ratelimits["main"]),
|
||||
request.WithLogger(_log),
|
||||
request.WithMaxRetries(10),
|
||||
request.WithRetryableStatus(429, 502),
|
||||
request.WithProxy(dc.Proxy),
|
||||
),
|
||||
downloadClient: request.New(
|
||||
request.WithRateLimiter(downloadRl),
|
||||
request.WithLogger(_log),
|
||||
request.WithMaxRetries(10),
|
||||
request.WithRetryableStatus(429, 447, 502),
|
||||
request.WithProxy(dc.Proxy),
|
||||
),
|
||||
repairClient: request.New(
|
||||
request.WithRateLimiter(repairRl),
|
||||
request.WithRateLimiter(ratelimits["repair"]),
|
||||
request.WithHeaders(headers),
|
||||
request.WithLogger(_log),
|
||||
request.WithMaxRetries(4),
|
||||
@@ -356,21 +347,17 @@ func (r *RealDebrid) addTorrent(t *types.Torrent) (*types.Torrent, error) {
|
||||
if resp.StatusCode == 509 {
|
||||
return nil, utils.TooManyActiveDownloadsError
|
||||
}
|
||||
|
||||
bodyBytes, _ := io.ReadAll(resp.Body)
|
||||
return nil, fmt.Errorf("realdebrid API error: Status: %d || Body: %s", resp.StatusCode, string(bodyBytes))
|
||||
}
|
||||
defer resp.Body.Close()
|
||||
bodyBytes, err := io.ReadAll(resp.Body)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("reading response body: %w", err)
|
||||
}
|
||||
if err = json.Unmarshal(bodyBytes, &data); err != nil {
|
||||
defer func(Body io.ReadCloser) {
|
||||
_ = Body.Close()
|
||||
}(resp.Body)
|
||||
if err = json.NewDecoder(resp.Body).Decode(&data); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
t.Id = data.Id
|
||||
t.Debrid = r.name
|
||||
t.MountPath = r.MountPath
|
||||
t.Added = time.Now().Format(time.RFC3339)
|
||||
return t, nil
|
||||
}
|
||||
|
||||
@@ -385,6 +372,7 @@ func (r *RealDebrid) addMagnet(t *types.Torrent) (*types.Torrent, error) {
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
defer resp.Body.Close()
|
||||
if resp.StatusCode != http.StatusOK && resp.StatusCode != http.StatusCreated {
|
||||
// Handle multiple_downloads
|
||||
|
||||
@@ -392,20 +380,16 @@ func (r *RealDebrid) addMagnet(t *types.Torrent) (*types.Torrent, error) {
|
||||
return nil, utils.TooManyActiveDownloadsError
|
||||
}
|
||||
|
||||
bodyBytes, _ := io.ReadAll(resp.Body)
|
||||
bodyBytes, _ := io.ReadAll(io.LimitReader(resp.Body, 1024))
|
||||
return nil, fmt.Errorf("realdebrid API error: Status: %d || Body: %s", resp.StatusCode, string(bodyBytes))
|
||||
}
|
||||
defer resp.Body.Close()
|
||||
bodyBytes, err := io.ReadAll(resp.Body)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("reading response body: %w", err)
|
||||
}
|
||||
if err = json.Unmarshal(bodyBytes, &data); err != nil {
|
||||
if err = json.NewDecoder(resp.Body).Decode(&data); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
t.Id = data.Id
|
||||
t.Debrid = r.name
|
||||
t.MountPath = r.MountPath
|
||||
t.Added = time.Now().Format(time.RFC3339)
|
||||
return t, nil
|
||||
}
|
||||
|
||||
@@ -417,19 +401,15 @@ func (r *RealDebrid) GetTorrent(torrentId string) (*types.Torrent, error) {
|
||||
return nil, err
|
||||
}
|
||||
defer resp.Body.Close()
|
||||
bodyBytes, err := io.ReadAll(resp.Body)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("reading response body: %w", err)
|
||||
}
|
||||
if resp.StatusCode != http.StatusOK {
|
||||
bodyBytes, _ := io.ReadAll(io.LimitReader(resp.Body, 1024))
|
||||
if resp.StatusCode == http.StatusNotFound {
|
||||
return nil, utils.TorrentNotFoundError
|
||||
}
|
||||
return nil, fmt.Errorf("realdebrid API error: Status: %d || Body: %s", resp.StatusCode, string(bodyBytes))
|
||||
return nil, fmt.Errorf("realdebrid API error: Status: %d || Body %s", resp.StatusCode, string(bodyBytes))
|
||||
}
|
||||
var data torrentInfo
|
||||
err = json.Unmarshal(bodyBytes, &data)
|
||||
if err != nil {
|
||||
if err = json.NewDecoder(resp.Body).Decode(&data); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
t := &types.Torrent{
|
||||
@@ -460,19 +440,15 @@ func (r *RealDebrid) UpdateTorrent(t *types.Torrent) error {
|
||||
return err
|
||||
}
|
||||
defer resp.Body.Close()
|
||||
bodyBytes, err := io.ReadAll(resp.Body)
|
||||
if err != nil {
|
||||
return fmt.Errorf("reading response body: %w", err)
|
||||
}
|
||||
if resp.StatusCode != http.StatusOK {
|
||||
if resp.StatusCode == http.StatusNotFound {
|
||||
return utils.TorrentNotFoundError
|
||||
}
|
||||
bodyBytes, _ := io.ReadAll(io.LimitReader(resp.Body, 1024))
|
||||
return fmt.Errorf("realdebrid API error: Status: %d || Body: %s", resp.StatusCode, string(bodyBytes))
|
||||
}
|
||||
var data torrentInfo
|
||||
err = json.Unmarshal(bodyBytes, &data)
|
||||
if err != nil {
|
||||
if err = json.NewDecoder(resp.Body).Decode(&data); err != nil {
|
||||
return err
|
||||
}
|
||||
t.Name = data.Filename
|
||||
@@ -487,7 +463,6 @@ func (r *RealDebrid) UpdateTorrent(t *types.Torrent) error {
|
||||
t.Links = data.Links
|
||||
t.MountPath = r.MountPath
|
||||
t.Debrid = r.name
|
||||
t.Added = data.Added
|
||||
t.Files, _ = r.getSelectedFiles(t, data) // Get selected files
|
||||
|
||||
return nil
|
||||
@@ -519,6 +494,7 @@ func (r *RealDebrid) CheckStatus(t *types.Torrent) (*types.Torrent, error) {
|
||||
t.Status = status
|
||||
t.Debrid = r.name
|
||||
t.MountPath = r.MountPath
|
||||
t.Added = data.Added
|
||||
if status == "waiting_files_selection" {
|
||||
t.Files = r.getTorrentFiles(t, data)
|
||||
if len(t.Files) == 0 {
|
||||
@@ -579,7 +555,7 @@ func (r *RealDebrid) GetFileDownloadLinks(t *types.Torrent) error {
|
||||
var firstErr error
|
||||
|
||||
files := make(map[string]types.File)
|
||||
links := make(map[string]*types.DownloadLink)
|
||||
links := make(map[string]types.DownloadLink)
|
||||
|
||||
_files := t.GetFiles()
|
||||
wg.Add(len(_files))
|
||||
@@ -597,7 +573,7 @@ func (r *RealDebrid) GetFileDownloadLinks(t *types.Torrent) error {
|
||||
mu.Unlock()
|
||||
return
|
||||
}
|
||||
if link == nil {
|
||||
if link.Empty() {
|
||||
mu.Lock()
|
||||
if firstErr == nil {
|
||||
firstErr = fmt.Errorf("realdebrid API error: download link not found for file %s", file.Name)
|
||||
@@ -607,7 +583,6 @@ func (r *RealDebrid) GetFileDownloadLinks(t *types.Torrent) error {
|
||||
}
|
||||
|
||||
file.DownloadLink = link
|
||||
|
||||
mu.Lock()
|
||||
files[file.Name] = file
|
||||
links[link.Link] = link
|
||||
@@ -622,7 +597,6 @@ func (r *RealDebrid) GetFileDownloadLinks(t *types.Torrent) error {
|
||||
}
|
||||
|
||||
// Add links to cache
|
||||
r.accounts.SetDownloadLinks(links)
|
||||
t.Files = files
|
||||
return nil
|
||||
}
|
||||
@@ -643,8 +617,9 @@ func (r *RealDebrid) CheckLink(link string) error {
|
||||
return nil
|
||||
}
|
||||
|
||||
func (r *RealDebrid) _getDownloadLink(file *types.File) (*types.DownloadLink, error) {
|
||||
func (r *RealDebrid) getDownloadLink(account *account.Account, file *types.File) (types.DownloadLink, error) {
|
||||
url := fmt.Sprintf("%s/unrestrict/link/", r.Host)
|
||||
emptyLink := types.DownloadLink{}
|
||||
_link := file.Link
|
||||
if strings.HasPrefix(file.Link, "https://real-debrid.com/d/") && len(file.Link) > 39 {
|
||||
_link = file.Link[0:39]
|
||||
@@ -653,70 +628,56 @@ func (r *RealDebrid) _getDownloadLink(file *types.File) (*types.DownloadLink, er
|
||||
"link": {_link},
|
||||
}
|
||||
req, _ := http.NewRequest(http.MethodPost, url, strings.NewReader(payload.Encode()))
|
||||
resp, err := r.downloadClient.Do(req)
|
||||
resp, err := account.Client().Do(req)
|
||||
|
||||
if err != nil {
|
||||
return nil, err
|
||||
return emptyLink, err
|
||||
}
|
||||
defer resp.Body.Close()
|
||||
defer func(Body io.ReadCloser) {
|
||||
_ = Body.Close()
|
||||
}(resp.Body)
|
||||
if resp.StatusCode != http.StatusOK {
|
||||
// Read the response body to get the error message
|
||||
b, err := io.ReadAll(resp.Body)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
var data ErrorResponse
|
||||
if err = json.Unmarshal(b, &data); err != nil {
|
||||
return nil, fmt.Errorf("error unmarshalling %d || %s \n %s", resp.StatusCode, err, string(b))
|
||||
if err = json.NewDecoder(resp.Body).Decode(&data); err != nil {
|
||||
return emptyLink, fmt.Errorf("error unmarshalling %d || %s", resp.StatusCode, err)
|
||||
}
|
||||
switch data.ErrorCode {
|
||||
case 19:
|
||||
return nil, utils.HosterUnavailableError // File has been removed
|
||||
case 23:
|
||||
return nil, utils.TrafficExceededError
|
||||
case 24:
|
||||
return nil, utils.HosterUnavailableError // Link has been nerfed
|
||||
case 34:
|
||||
return nil, utils.TrafficExceededError // traffic exceeded
|
||||
case 35:
|
||||
return nil, utils.HosterUnavailableError
|
||||
case 36:
|
||||
return nil, utils.TrafficExceededError // traffic exceeded
|
||||
case 19, 24, 35:
|
||||
return emptyLink, utils.HosterUnavailableError // File has been removed
|
||||
case 23, 34, 36:
|
||||
return emptyLink, utils.TrafficExceededError
|
||||
default:
|
||||
return nil, fmt.Errorf("realdebrid API error: Status: %d || Code: %d", resp.StatusCode, data.ErrorCode)
|
||||
return emptyLink, fmt.Errorf("realdebrid API error: Status: %d || Code: %d", resp.StatusCode, data.ErrorCode)
|
||||
}
|
||||
}
|
||||
b, err := io.ReadAll(resp.Body)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
var data UnrestrictResponse
|
||||
if err = json.Unmarshal(b, &data); err != nil {
|
||||
return nil, fmt.Errorf("realdebrid API error: Error unmarshalling response: %w", err)
|
||||
if err = json.NewDecoder(resp.Body).Decode(&data); err != nil {
|
||||
return emptyLink, fmt.Errorf("realdebrid API error: Error unmarshalling response: %w", err)
|
||||
}
|
||||
if data.Download == "" {
|
||||
return nil, fmt.Errorf("realdebrid API error: download link not found")
|
||||
return emptyLink, fmt.Errorf("realdebrid API error: download link not found")
|
||||
}
|
||||
now := time.Now()
|
||||
return &types.DownloadLink{
|
||||
dl := types.DownloadLink{
|
||||
Token: account.Token,
|
||||
Filename: data.Filename,
|
||||
Size: data.Filesize,
|
||||
Link: data.Link,
|
||||
DownloadLink: data.Download,
|
||||
Generated: now,
|
||||
ExpiresAt: now.Add(r.autoExpiresLinksAfter),
|
||||
}, nil
|
||||
}
|
||||
|
||||
// Store the link in the account
|
||||
account.StoreDownloadLink(dl)
|
||||
return dl, nil
|
||||
}
|
||||
|
||||
func (r *RealDebrid) GetDownloadLink(t *types.Torrent, file *types.File) (*types.DownloadLink, error) {
|
||||
|
||||
accounts := r.accounts.Active()
|
||||
|
||||
for _, account := range accounts {
|
||||
r.downloadClient.SetHeader("Authorization", fmt.Sprintf("Bearer %s", account.Token))
|
||||
downloadLink, err := r._getDownloadLink(file)
|
||||
|
||||
func (r *RealDebrid) GetDownloadLink(t *types.Torrent, file *types.File) (types.DownloadLink, error) {
|
||||
accounts := r.accountsManager.Active()
|
||||
for _, _account := range accounts {
|
||||
downloadLink, err := r.getDownloadLink(_account, file)
|
||||
if err == nil {
|
||||
return downloadLink, nil
|
||||
}
|
||||
@@ -727,16 +688,16 @@ func (r *RealDebrid) GetDownloadLink(t *types.Torrent, file *types.File) (*types
|
||||
retries = 5
|
||||
} else {
|
||||
// If the error is not traffic exceeded, return the error
|
||||
return nil, err
|
||||
return downloadLink, err
|
||||
}
|
||||
backOff := 1 * time.Second
|
||||
for retries > 0 {
|
||||
downloadLink, err = r._getDownloadLink(file)
|
||||
downloadLink, err = r.getDownloadLink(_account, file)
|
||||
if err == nil {
|
||||
return downloadLink, nil
|
||||
}
|
||||
if !errors.Is(err, utils.TrafficExceededError) {
|
||||
return nil, err
|
||||
return downloadLink, err
|
||||
}
|
||||
// Add a delay before retrying
|
||||
time.Sleep(backOff)
|
||||
@@ -744,7 +705,7 @@ func (r *RealDebrid) GetDownloadLink(t *types.Torrent, file *types.File) (*types
|
||||
retries--
|
||||
}
|
||||
}
|
||||
return nil, fmt.Errorf("realdebrid API error: download link not found")
|
||||
return types.DownloadLink{}, fmt.Errorf("realdebrid API error: used all active accounts")
|
||||
}
|
||||
|
||||
func (r *RealDebrid) getTorrents(offset int, limit int) (int, []*types.Torrent, error) {
|
||||
@@ -770,14 +731,10 @@ func (r *RealDebrid) getTorrents(offset int, limit int) (int, []*types.Torrent,
|
||||
}
|
||||
|
||||
defer resp.Body.Close()
|
||||
body, err := io.ReadAll(resp.Body)
|
||||
if err != nil {
|
||||
return 0, torrents, err
|
||||
}
|
||||
totalItems, _ := strconv.Atoi(resp.Header.Get("X-Total-Count"))
|
||||
var data []TorrentsResponse
|
||||
if err = json.Unmarshal(body, &data); err != nil {
|
||||
return 0, torrents, err
|
||||
if err := json.NewDecoder(resp.Body).Decode(&data); err != nil {
|
||||
return 0, nil, fmt.Errorf("failed to decode response: %w", err)
|
||||
}
|
||||
filenames := map[string]struct{}{}
|
||||
for _, t := range data {
|
||||
@@ -841,49 +798,47 @@ func (r *RealDebrid) GetTorrents() ([]*types.Torrent, error) {
|
||||
return allTorrents, nil
|
||||
}
|
||||
|
||||
func (r *RealDebrid) GetDownloadLinks() (map[string]*types.DownloadLink, error) {
|
||||
links := make(map[string]*types.DownloadLink)
|
||||
offset := 0
|
||||
limit := 1000
|
||||
func (r *RealDebrid) RefreshDownloadLinks() error {
|
||||
accounts := r.accountsManager.All()
|
||||
|
||||
accounts := r.accounts.Active()
|
||||
|
||||
if len(accounts) < 1 {
|
||||
// No active download keys. It's likely that the key has reached bandwidth limit
|
||||
return links, fmt.Errorf("no active download keys")
|
||||
}
|
||||
activeAccount := accounts[0]
|
||||
r.downloadClient.SetHeader("Authorization", fmt.Sprintf("Bearer %s", activeAccount.Token))
|
||||
for {
|
||||
dl, err := r._getDownloads(offset, limit)
|
||||
if err != nil {
|
||||
break
|
||||
for _, _account := range accounts {
|
||||
if _account == nil || _account.Token == "" {
|
||||
continue
|
||||
}
|
||||
if len(dl) == 0 {
|
||||
break
|
||||
}
|
||||
|
||||
for _, d := range dl {
|
||||
if _, exists := links[d.Link]; exists {
|
||||
// This is ordered by date, so we can skip the rest
|
||||
continue
|
||||
offset := 0
|
||||
limit := 1000
|
||||
links := make(map[string]*types.DownloadLink)
|
||||
for {
|
||||
dl, err := r.getDownloadLinks(_account, offset, limit)
|
||||
if err != nil {
|
||||
break
|
||||
}
|
||||
links[d.Link] = &d
|
||||
if len(dl) == 0 {
|
||||
break
|
||||
}
|
||||
|
||||
for _, d := range dl {
|
||||
if _, exists := links[d.Link]; exists {
|
||||
// This is ordered by date, so we can skip the rest
|
||||
continue
|
||||
}
|
||||
links[d.Link] = &d
|
||||
}
|
||||
|
||||
offset += len(dl)
|
||||
}
|
||||
|
||||
offset += len(dl)
|
||||
_account.StoreDownloadLinks(links)
|
||||
}
|
||||
|
||||
return links, nil
|
||||
return nil
|
||||
}
|
||||
|
||||
func (r *RealDebrid) _getDownloads(offset int, limit int) ([]types.DownloadLink, error) {
|
||||
func (r *RealDebrid) getDownloadLinks(account *account.Account, offset int, limit int) ([]types.DownloadLink, error) {
|
||||
url := fmt.Sprintf("%s/downloads?limit=%d", r.Host, limit)
|
||||
if offset > 0 {
|
||||
url = fmt.Sprintf("%s&offset=%d", url, offset)
|
||||
}
|
||||
req, _ := http.NewRequest(http.MethodGet, url, nil)
|
||||
resp, err := r.downloadClient.MakeRequest(req)
|
||||
resp, err := account.Client().MakeRequest(req)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
@@ -894,6 +849,7 @@ func (r *RealDebrid) _getDownloads(offset int, limit int) ([]types.DownloadLink,
|
||||
links := make([]types.DownloadLink, 0)
|
||||
for _, d := range data {
|
||||
links = append(links, types.DownloadLink{
|
||||
Token: account.Token,
|
||||
Filename: d.Filename,
|
||||
Size: d.Filesize,
|
||||
Link: d.Link,
|
||||
@@ -919,15 +875,6 @@ func (r *RealDebrid) GetMountPath() string {
|
||||
return r.MountPath
|
||||
}
|
||||
|
||||
func (r *RealDebrid) DeleteDownloadLink(linkId string) error {
|
||||
url := fmt.Sprintf("%s/downloads/delete/%s", r.Host, linkId)
|
||||
req, _ := http.NewRequest(http.MethodDelete, url, nil)
|
||||
if _, err := r.downloadClient.MakeRequest(req); err != nil {
|
||||
return err
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
func (r *RealDebrid) GetProfile() (*types.Profile, error) {
|
||||
if r.Profile != nil {
|
||||
return r.Profile, nil
|
||||
@@ -958,8 +905,7 @@ func (r *RealDebrid) GetProfile() (*types.Profile, error) {
|
||||
}
|
||||
|
||||
func (r *RealDebrid) GetAvailableSlots() (int, error) {
|
||||
url := fmt.Sprintf("%s/torrents/activeCount", r.Host)
|
||||
req, _ := http.NewRequest(http.MethodGet, url, nil)
|
||||
req, _ := http.NewRequest(http.MethodGet, fmt.Sprintf("%s/torrents/activeCount", r.Host), nil)
|
||||
resp, err := r.client.MakeRequest(req)
|
||||
if err != nil {
|
||||
return 0, nil
|
||||
@@ -971,35 +917,33 @@ func (r *RealDebrid) GetAvailableSlots() (int, error) {
|
||||
return data.TotalSlots - data.ActiveSlots - r.minimumFreeSlot, nil // Ensure we maintain minimum active pots
|
||||
}
|
||||
|
||||
func (r *RealDebrid) Accounts() *types.Accounts {
|
||||
return r.accounts
|
||||
func (r *RealDebrid) AccountManager() *account.Manager {
|
||||
return r.accountsManager
|
||||
}
|
||||
|
||||
func (r *RealDebrid) SyncAccounts() error {
|
||||
// Sync accounts with the current configuration
|
||||
if len(r.accounts.Active()) == 0 {
|
||||
if len(r.accountsManager.Active()) == 0 {
|
||||
return nil
|
||||
}
|
||||
for idx, account := range r.accounts.Active() {
|
||||
if err := r.syncAccount(idx, account); err != nil {
|
||||
r.logger.Error().Err(err).Msgf("Error syncing account %s", account.Username)
|
||||
for _, _account := range r.accountsManager.All() {
|
||||
if err := r.syncAccount(_account); err != nil {
|
||||
r.logger.Error().Err(err).Msgf("Error syncing account %s", _account.Username)
|
||||
continue // Skip this account and continue with the next
|
||||
}
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
func (r *RealDebrid) syncAccount(index int, account *types.Account) error {
|
||||
func (r *RealDebrid) syncAccount(account *account.Account) error {
|
||||
if account.Token == "" {
|
||||
return fmt.Errorf("account %s has no token", account.Username)
|
||||
}
|
||||
client := http.DefaultClient
|
||||
req, err := http.NewRequest(http.MethodGet, fmt.Sprintf("%s/user", r.Host), nil)
|
||||
if err != nil {
|
||||
return fmt.Errorf("error creating request for account %s: %w", account.Username, err)
|
||||
}
|
||||
req.Header.Set("Authorization", fmt.Sprintf("Bearer %s", account.Token))
|
||||
resp, err := client.Do(req)
|
||||
resp, err := account.Client().Do(req)
|
||||
if err != nil {
|
||||
return fmt.Errorf("error checking account %s: %w", account.Username, err)
|
||||
}
|
||||
@@ -1019,8 +963,7 @@ func (r *RealDebrid) syncAccount(index int, account *types.Account) error {
|
||||
if err != nil {
|
||||
return fmt.Errorf("error creating request for traffic details for account %s: %w", account.Username, err)
|
||||
}
|
||||
trafficReq.Header.Set("Authorization", fmt.Sprintf("Bearer %s", account.Token))
|
||||
trafficResp, err := client.Do(trafficReq)
|
||||
trafficResp, err := account.Client().Do(trafficReq)
|
||||
if err != nil {
|
||||
return fmt.Errorf("error checking traffic for account %s: %w", account.Username, err)
|
||||
}
|
||||
@@ -1032,13 +975,31 @@ func (r *RealDebrid) syncAccount(index int, account *types.Account) error {
|
||||
defer trafficResp.Body.Close()
|
||||
var trafficData TrafficResponse
|
||||
if err := json.NewDecoder(trafficResp.Body).Decode(&trafficData); err != nil {
|
||||
return fmt.Errorf("error decoding traffic details for account %s: %w", account.Username, err)
|
||||
// Skip logging traffic error
|
||||
account.TrafficUsed.Store(0)
|
||||
} else {
|
||||
today := time.Now().Format(time.DateOnly)
|
||||
if todayData, exists := trafficData[today]; exists {
|
||||
account.TrafficUsed.Store(todayData.Bytes)
|
||||
}
|
||||
}
|
||||
today := time.Now().Format(time.DateOnly)
|
||||
if todayData, exists := trafficData[today]; exists {
|
||||
account.TrafficUsed = todayData.Bytes
|
||||
}
|
||||
|
||||
r.accounts.Update(index, account)
|
||||
//r.accountsManager.Update(account)
|
||||
return nil
|
||||
}
|
||||
|
||||
func (r *RealDebrid) DeleteDownloadLink(account *account.Account, downloadLink types.DownloadLink) error {
|
||||
url := fmt.Sprintf("%s/downloads/delete/%s", r.Host, downloadLink.Id)
|
||||
req, _ := http.NewRequest(http.MethodDelete, url, nil)
|
||||
resp, err := account.Client().Do(req)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
defer func(Body io.ReadCloser) {
|
||||
_ = Body.Close()
|
||||
}(resp.Body)
|
||||
if resp.StatusCode != http.StatusNoContent {
|
||||
return fmt.Errorf("realdebrid API error: %d", resp.StatusCode)
|
||||
}
|
||||
account.DeleteDownloadLink(downloadLink.Link)
|
||||
return nil
|
||||
}
|
||||
|
||||
@@ -20,15 +20,17 @@ import (
|
||||
"github.com/sirrobot01/decypharr/internal/logger"
|
||||
"github.com/sirrobot01/decypharr/internal/request"
|
||||
"github.com/sirrobot01/decypharr/internal/utils"
|
||||
"github.com/sirrobot01/decypharr/pkg/debrid/account"
|
||||
"github.com/sirrobot01/decypharr/pkg/debrid/types"
|
||||
"github.com/sirrobot01/decypharr/pkg/version"
|
||||
"go.uber.org/ratelimit"
|
||||
)
|
||||
|
||||
type Torbox struct {
|
||||
name string
|
||||
Host string `json:"host"`
|
||||
APIKey string
|
||||
accounts *types.Accounts
|
||||
accountsManager *account.Manager
|
||||
autoExpiresLinksAfter time.Duration
|
||||
|
||||
DownloadUncached bool
|
||||
@@ -40,8 +42,7 @@ type Torbox struct {
|
||||
addSamples bool
|
||||
}
|
||||
|
||||
func New(dc config.Debrid) (*Torbox, error) {
|
||||
rl := request.ParseRateLimit(dc.RateLimit)
|
||||
func New(dc config.Debrid, ratelimits map[string]ratelimit.Limiter) (*Torbox, error) {
|
||||
|
||||
headers := map[string]string{
|
||||
"Authorization": fmt.Sprintf("Bearer %s", dc.APIKey),
|
||||
@@ -50,7 +51,7 @@ func New(dc config.Debrid) (*Torbox, error) {
|
||||
_log := logger.New(dc.Name)
|
||||
client := request.New(
|
||||
request.WithHeaders(headers),
|
||||
request.WithRateLimiter(rl),
|
||||
request.WithRateLimiter(ratelimits["main"]),
|
||||
request.WithLogger(_log),
|
||||
request.WithProxy(dc.Proxy),
|
||||
)
|
||||
@@ -63,7 +64,7 @@ func New(dc config.Debrid) (*Torbox, error) {
|
||||
name: "torbox",
|
||||
Host: "https://api.torbox.app/v1",
|
||||
APIKey: dc.APIKey,
|
||||
accounts: types.NewAccounts(dc),
|
||||
accountsManager: account.NewManager(dc, ratelimits["download"], _log),
|
||||
DownloadUncached: dc.DownloadUncached,
|
||||
autoExpiresLinksAfter: autoExpiresLinksAfter,
|
||||
client: client,
|
||||
@@ -138,6 +139,9 @@ func (tb *Torbox) SubmitMagnet(torrent *types.Torrent) (*types.Torrent, error) {
|
||||
payload := &bytes.Buffer{}
|
||||
writer := multipart.NewWriter(payload)
|
||||
_ = writer.WriteField("magnet", torrent.Magnet.Link)
|
||||
if !torrent.DownloadUncached {
|
||||
_ = writer.WriteField("add_only_if_cached", "true")
|
||||
}
|
||||
err := writer.Close()
|
||||
if err != nil {
|
||||
return nil, err
|
||||
@@ -161,6 +165,7 @@ func (tb *Torbox) SubmitMagnet(torrent *types.Torrent) (*types.Torrent, error) {
|
||||
torrent.Id = torrentId
|
||||
torrent.MountPath = tb.MountPath
|
||||
torrent.Debrid = tb.name
|
||||
torrent.Added = time.Now().Format(time.RFC3339)
|
||||
|
||||
return torrent, nil
|
||||
}
|
||||
@@ -403,7 +408,7 @@ func (tb *Torbox) DeleteTorrent(torrentId string) error {
|
||||
|
||||
func (tb *Torbox) GetFileDownloadLinks(t *types.Torrent) error {
|
||||
filesCh := make(chan types.File, len(t.Files))
|
||||
linkCh := make(chan *types.DownloadLink)
|
||||
linkCh := make(chan types.DownloadLink)
|
||||
errCh := make(chan error, len(t.Files))
|
||||
|
||||
var wg sync.WaitGroup
|
||||
@@ -416,7 +421,7 @@ func (tb *Torbox) GetFileDownloadLinks(t *types.Torrent) error {
|
||||
errCh <- err
|
||||
return
|
||||
}
|
||||
if link != nil {
|
||||
if link.DownloadLink != "" {
|
||||
linkCh <- link
|
||||
file.DownloadLink = link
|
||||
}
|
||||
@@ -436,13 +441,6 @@ func (tb *Torbox) GetFileDownloadLinks(t *types.Torrent) error {
|
||||
files[file.Name] = file
|
||||
}
|
||||
|
||||
// Collect download links
|
||||
for link := range linkCh {
|
||||
if link != nil {
|
||||
tb.accounts.SetDownloadLink(link.Link, link)
|
||||
}
|
||||
}
|
||||
|
||||
// Check for errors
|
||||
for err := range errCh {
|
||||
if err != nil {
|
||||
@@ -454,7 +452,7 @@ func (tb *Torbox) GetFileDownloadLinks(t *types.Torrent) error {
|
||||
return nil
|
||||
}
|
||||
|
||||
func (tb *Torbox) GetDownloadLink(t *types.Torrent, file *types.File) (*types.DownloadLink, error) {
|
||||
func (tb *Torbox) GetDownloadLink(t *types.Torrent, file *types.File) (types.DownloadLink, error) {
|
||||
url := fmt.Sprintf("%s/api/torrents/requestdl/", tb.Host)
|
||||
query := gourl.Values{}
|
||||
query.Add("torrent_id", t.Id)
|
||||
@@ -470,7 +468,7 @@ func (tb *Torbox) GetDownloadLink(t *types.Torrent, file *types.File) (*types.Do
|
||||
Str("torrent_id", t.Id).
|
||||
Str("file_id", file.Id).
|
||||
Msg("Failed to make request to Torbox API")
|
||||
return nil, err
|
||||
return types.DownloadLink{}, err
|
||||
}
|
||||
|
||||
var data DownloadLinksResponse
|
||||
@@ -480,7 +478,7 @@ func (tb *Torbox) GetDownloadLink(t *types.Torrent, file *types.File) (*types.Do
|
||||
Str("torrent_id", t.Id).
|
||||
Str("file_id", file.Id).
|
||||
Msg("Failed to unmarshal Torbox API response")
|
||||
return nil, err
|
||||
return types.DownloadLink{}, err
|
||||
}
|
||||
|
||||
if data.Data == nil {
|
||||
@@ -491,7 +489,7 @@ func (tb *Torbox) GetDownloadLink(t *types.Torrent, file *types.File) (*types.Do
|
||||
Interface("error", data.Error).
|
||||
Str("detail", data.Detail).
|
||||
Msg("Torbox API returned no data")
|
||||
return nil, fmt.Errorf("error getting download links")
|
||||
return types.DownloadLink{}, fmt.Errorf("error getting download links")
|
||||
}
|
||||
|
||||
link := *data.Data
|
||||
@@ -500,11 +498,12 @@ func (tb *Torbox) GetDownloadLink(t *types.Torrent, file *types.File) (*types.Do
|
||||
Str("torrent_id", t.Id).
|
||||
Str("file_id", file.Id).
|
||||
Msg("Torbox API returned empty download link")
|
||||
return nil, fmt.Errorf("error getting download links")
|
||||
return types.DownloadLink{}, fmt.Errorf("error getting download links")
|
||||
}
|
||||
|
||||
now := time.Now()
|
||||
downloadLink := &types.DownloadLink{
|
||||
dl := types.DownloadLink{
|
||||
Token: tb.APIKey,
|
||||
Link: file.Link,
|
||||
DownloadLink: link,
|
||||
Id: file.Id,
|
||||
@@ -512,7 +511,9 @@ func (tb *Torbox) GetDownloadLink(t *types.Torrent, file *types.File) (*types.Do
|
||||
ExpiresAt: now.Add(tb.autoExpiresLinksAfter),
|
||||
}
|
||||
|
||||
return downloadLink, nil
|
||||
tb.accountsManager.StoreDownloadLink(dl)
|
||||
|
||||
return dl, nil
|
||||
}
|
||||
|
||||
func (tb *Torbox) GetDownloadingStatus() []string {
|
||||
@@ -520,7 +521,25 @@ func (tb *Torbox) GetDownloadingStatus() []string {
|
||||
}
|
||||
|
||||
func (tb *Torbox) GetTorrents() ([]*types.Torrent, error) {
|
||||
url := fmt.Sprintf("%s/api/torrents/mylist", tb.Host)
|
||||
offset := 0
|
||||
allTorrents := make([]*types.Torrent, 0)
|
||||
|
||||
for {
|
||||
torrents, err := tb.getTorrents(offset)
|
||||
if err != nil {
|
||||
break
|
||||
}
|
||||
if len(torrents) == 0 {
|
||||
break
|
||||
}
|
||||
allTorrents = append(allTorrents, torrents...)
|
||||
offset += len(torrents)
|
||||
}
|
||||
return allTorrents, nil
|
||||
}
|
||||
|
||||
func (tb *Torbox) getTorrents(offset int) ([]*types.Torrent, error) {
|
||||
url := fmt.Sprintf("%s/api/torrents/mylist?offset=%d", tb.Host, offset)
|
||||
req, _ := http.NewRequest(http.MethodGet, url, nil)
|
||||
resp, err := tb.client.MakeRequest(req)
|
||||
if err != nil {
|
||||
@@ -607,8 +626,8 @@ func (tb *Torbox) GetDownloadUncached() bool {
|
||||
return tb.DownloadUncached
|
||||
}
|
||||
|
||||
func (tb *Torbox) GetDownloadLinks() (map[string]*types.DownloadLink, error) {
|
||||
return nil, nil
|
||||
func (tb *Torbox) RefreshDownloadLinks() error {
|
||||
return nil
|
||||
}
|
||||
|
||||
func (tb *Torbox) CheckLink(link string) error {
|
||||
@@ -619,10 +638,6 @@ func (tb *Torbox) GetMountPath() string {
|
||||
return tb.MountPath
|
||||
}
|
||||
|
||||
func (tb *Torbox) DeleteDownloadLink(linkId string) error {
|
||||
return nil
|
||||
}
|
||||
|
||||
func (tb *Torbox) GetAvailableSlots() (int, error) {
|
||||
//TODO: Implement the logic to check available slots for Torbox
|
||||
return 0, fmt.Errorf("not implemented")
|
||||
@@ -632,10 +647,15 @@ func (tb *Torbox) GetProfile() (*types.Profile, error) {
|
||||
return nil, nil
|
||||
}
|
||||
|
||||
func (tb *Torbox) Accounts() *types.Accounts {
|
||||
return tb.accounts
|
||||
func (tb *Torbox) AccountManager() *account.Manager {
|
||||
return tb.accountsManager
|
||||
}
|
||||
|
||||
func (tb *Torbox) SyncAccounts() error {
|
||||
return nil
|
||||
}
|
||||
|
||||
func (tb *Torbox) DeleteDownloadLink(account *account.Account, downloadLink types.DownloadLink) error {
|
||||
account.DeleteDownloadLink(downloadLink.Link)
|
||||
return nil
|
||||
}
|
||||
|
||||
@@ -4,9 +4,10 @@ import (
|
||||
"bufio"
|
||||
"cmp"
|
||||
"context"
|
||||
"crypto/tls"
|
||||
"errors"
|
||||
"fmt"
|
||||
"github.com/sirrobot01/decypharr/pkg/rclone"
|
||||
"net/http"
|
||||
"os"
|
||||
"path"
|
||||
"path/filepath"
|
||||
@@ -17,7 +18,12 @@ import (
|
||||
"sync/atomic"
|
||||
"time"
|
||||
|
||||
"github.com/puzpuzpuz/xsync/v4"
|
||||
"github.com/sirrobot01/decypharr/pkg/debrid/common"
|
||||
"github.com/sirrobot01/decypharr/pkg/rclone"
|
||||
|
||||
"github.com/sirrobot01/decypharr/pkg/debrid/types"
|
||||
"golang.org/x/sync/singleflight"
|
||||
|
||||
"encoding/json"
|
||||
_ "time/tzdata"
|
||||
@@ -72,18 +78,18 @@ type RepairRequest struct {
|
||||
|
||||
type Cache struct {
|
||||
dir string
|
||||
client types.Client
|
||||
client common.Client
|
||||
logger zerolog.Logger
|
||||
|
||||
torrents *torrentCache
|
||||
invalidDownloadLinks sync.Map
|
||||
folderNaming WebDavFolderNaming
|
||||
torrents *torrentCache
|
||||
folderNaming WebDavFolderNaming
|
||||
|
||||
listingDebouncer *utils.Debouncer[bool]
|
||||
// monitors
|
||||
repairRequest sync.Map
|
||||
failedToReinsert sync.Map
|
||||
downloadLinkRequests sync.Map
|
||||
invalidDownloadLinks *xsync.Map[string, string]
|
||||
repairRequest *xsync.Map[string, *reInsertRequest]
|
||||
failedToReinsert *xsync.Map[string, struct{}]
|
||||
failedLinksCounter *xsync.Map[string, atomic.Int32] // link -> counter
|
||||
|
||||
// repair
|
||||
repairChan chan RepairRequest
|
||||
@@ -108,9 +114,11 @@ type Cache struct {
|
||||
config config.Debrid
|
||||
customFolders []string
|
||||
mounter *rclone.Mount
|
||||
downloadSG singleflight.Group
|
||||
streamClient *http.Client
|
||||
}
|
||||
|
||||
func NewDebridCache(dc config.Debrid, client types.Client, mounter *rclone.Mount) *Cache {
|
||||
func NewDebridCache(dc config.Debrid, client common.Client, mounter *rclone.Mount) *Cache {
|
||||
cfg := config.Get()
|
||||
cet, err := time.LoadLocation("CET")
|
||||
if err != nil {
|
||||
@@ -153,6 +161,21 @@ func NewDebridCache(dc config.Debrid, client types.Client, mounter *rclone.Mount
|
||||
|
||||
}
|
||||
_log := logger.New(fmt.Sprintf("%s-webdav", client.Name()))
|
||||
transport := &http.Transport{
|
||||
TLSClientConfig: &tls.Config{InsecureSkipVerify: true},
|
||||
TLSHandshakeTimeout: 30 * time.Second,
|
||||
ResponseHeaderTimeout: 60 * time.Second,
|
||||
MaxIdleConns: 100,
|
||||
MaxIdleConnsPerHost: 20,
|
||||
IdleConnTimeout: 90 * time.Second,
|
||||
DisableKeepAlives: false,
|
||||
ForceAttemptHTTP2: false,
|
||||
}
|
||||
httpClient := &http.Client{
|
||||
Transport: transport,
|
||||
Timeout: 0,
|
||||
}
|
||||
|
||||
c := &Cache{
|
||||
dir: filepath.Join(cfg.Path, "cache", dc.Name), // path to save cache files
|
||||
|
||||
@@ -171,7 +194,13 @@ func NewDebridCache(dc config.Debrid, client types.Client, mounter *rclone.Mount
|
||||
customFolders: customFolders,
|
||||
mounter: mounter,
|
||||
|
||||
ready: make(chan struct{}),
|
||||
ready: make(chan struct{}),
|
||||
invalidDownloadLinks: xsync.NewMap[string, string](),
|
||||
repairRequest: xsync.NewMap[string, *reInsertRequest](),
|
||||
failedToReinsert: xsync.NewMap[string, struct{}](),
|
||||
failedLinksCounter: xsync.NewMap[string, atomic.Int32](),
|
||||
streamClient: httpClient,
|
||||
repairChan: make(chan RepairRequest, 100), // Initialize the repair channel, max 100 requests buffered
|
||||
}
|
||||
|
||||
c.listingDebouncer = utils.NewDebouncer[bool](100*time.Millisecond, func(refreshRclone bool) {
|
||||
@@ -202,14 +231,12 @@ func (c *Cache) Reset() {
|
||||
}
|
||||
}
|
||||
|
||||
if err := c.scheduler.StopJobs(); err != nil {
|
||||
c.logger.Error().Err(err).Msg("Failed to stop scheduler jobs")
|
||||
}
|
||||
|
||||
if err := c.scheduler.Shutdown(); err != nil {
|
||||
c.logger.Error().Err(err).Msg("Failed to stop scheduler")
|
||||
}
|
||||
|
||||
go func() {
|
||||
// Shutdown the scheduler (this will stop all jobs)
|
||||
if err := c.scheduler.Shutdown(); err != nil {
|
||||
c.logger.Error().Err(err).Msg("Failed to stop scheduler")
|
||||
}
|
||||
}()
|
||||
// Stop the listing debouncer
|
||||
c.listingDebouncer.Stop()
|
||||
|
||||
@@ -222,10 +249,9 @@ func (c *Cache) Reset() {
|
||||
c.torrents.reset()
|
||||
|
||||
// 3. Clear any sync.Maps
|
||||
c.invalidDownloadLinks = sync.Map{}
|
||||
c.repairRequest = sync.Map{}
|
||||
c.failedToReinsert = sync.Map{}
|
||||
c.downloadLinkRequests = sync.Map{}
|
||||
c.invalidDownloadLinks = xsync.NewMap[string, string]()
|
||||
c.repairRequest = xsync.NewMap[string, *reInsertRequest]()
|
||||
c.failedToReinsert = xsync.NewMap[string, struct{}]()
|
||||
|
||||
// 5. Rebuild the listing debouncer
|
||||
c.listingDebouncer = utils.NewDebouncer[bool](
|
||||
@@ -258,7 +284,6 @@ func (c *Cache) Start(ctx context.Context) error {
|
||||
|
||||
// initial download links
|
||||
go c.refreshDownloadLinks(ctx)
|
||||
c.repairChan = make(chan RepairRequest, 100) // Initialize the repair channel, max 100 requests buffered
|
||||
go c.repairWorker(ctx)
|
||||
|
||||
cfg := config.Get()
|
||||
@@ -534,7 +559,7 @@ func (c *Cache) setTorrent(t CachedTorrent, callback func(torrent CachedTorrent)
|
||||
mergedFiles := mergeFiles(o, updatedTorrent) // Useful for merging files across multiple torrents, while keeping the most recent
|
||||
updatedTorrent.Files = mergedFiles
|
||||
}
|
||||
c.torrents.set(torrentName, t, updatedTorrent)
|
||||
c.torrents.set(torrentName, t)
|
||||
go c.SaveTorrent(t)
|
||||
if callback != nil {
|
||||
go callback(updatedTorrent)
|
||||
@@ -550,7 +575,7 @@ func (c *Cache) setTorrents(torrents map[string]CachedTorrent, callback func())
|
||||
mergedFiles := mergeFiles(o, updatedTorrent)
|
||||
updatedTorrent.Files = mergedFiles
|
||||
}
|
||||
c.torrents.set(torrentName, t, updatedTorrent)
|
||||
c.torrents.set(torrentName, t)
|
||||
}
|
||||
c.SaveTorrents()
|
||||
if callback != nil {
|
||||
@@ -750,7 +775,7 @@ func (c *Cache) Add(t *types.Torrent) error {
|
||||
|
||||
}
|
||||
|
||||
func (c *Cache) Client() types.Client {
|
||||
func (c *Cache) Client() common.Client {
|
||||
return c.client
|
||||
}
|
||||
|
||||
|
||||
@@ -3,86 +3,72 @@ package store
|
||||
import (
|
||||
"errors"
|
||||
"fmt"
|
||||
"sync/atomic"
|
||||
|
||||
"github.com/sirrobot01/decypharr/internal/utils"
|
||||
"github.com/sirrobot01/decypharr/pkg/debrid/types"
|
||||
)
|
||||
|
||||
type downloadLinkRequest struct {
|
||||
result string
|
||||
err error
|
||||
done chan struct{}
|
||||
}
|
||||
const (
|
||||
MaxLinkFailures = 10
|
||||
)
|
||||
|
||||
func newDownloadLinkRequest() *downloadLinkRequest {
|
||||
return &downloadLinkRequest{
|
||||
done: make(chan struct{}),
|
||||
func (c *Cache) GetDownloadLink(torrentName, filename, fileLink string) (types.DownloadLink, error) {
|
||||
// Check
|
||||
counter, ok := c.failedLinksCounter.Load(fileLink)
|
||||
if ok && counter.Load() >= MaxLinkFailures {
|
||||
return types.DownloadLink{}, fmt.Errorf("file link %s has failed %d times, not retrying", fileLink, counter.Load())
|
||||
}
|
||||
}
|
||||
|
||||
func (r *downloadLinkRequest) Complete(result string, err error) {
|
||||
r.result = result
|
||||
r.err = err
|
||||
close(r.done)
|
||||
}
|
||||
// Use singleflight to deduplicate concurrent requests
|
||||
v, err, _ := c.downloadSG.Do(fileLink, func() (interface{}, error) {
|
||||
// Double-check cache inside singleflight (another goroutine might have filled it)
|
||||
if dl, err := c.checkDownloadLink(fileLink); err == nil && !dl.Empty() {
|
||||
return dl, nil
|
||||
}
|
||||
|
||||
func (r *downloadLinkRequest) Wait() (string, error) {
|
||||
<-r.done
|
||||
return r.result, r.err
|
||||
}
|
||||
// Fetch the download link
|
||||
dl, err := c.fetchDownloadLink(torrentName, filename, fileLink)
|
||||
if err != nil {
|
||||
c.downloadSG.Forget(fileLink)
|
||||
return types.DownloadLink{}, err
|
||||
}
|
||||
|
||||
if dl.Empty() {
|
||||
c.downloadSG.Forget(fileLink)
|
||||
err = fmt.Errorf("download link is empty for %s in torrent %s", filename, torrentName)
|
||||
return types.DownloadLink{}, err
|
||||
}
|
||||
|
||||
func (c *Cache) GetDownloadLink(torrentName, filename, fileLink string) (string, error) {
|
||||
// Check link cache
|
||||
if dl, err := c.checkDownloadLink(fileLink); dl != "" && err == nil {
|
||||
return dl, nil
|
||||
}
|
||||
})
|
||||
|
||||
if req, inFlight := c.downloadLinkRequests.Load(fileLink); inFlight {
|
||||
// Wait for the other request to complete and use its result
|
||||
result := req.(*downloadLinkRequest)
|
||||
return result.Wait()
|
||||
}
|
||||
|
||||
// Create a new request object
|
||||
req := newDownloadLinkRequest()
|
||||
c.downloadLinkRequests.Store(fileLink, req)
|
||||
|
||||
dl, err := c.fetchDownloadLink(torrentName, filename, fileLink)
|
||||
if err != nil {
|
||||
req.Complete("", err)
|
||||
c.downloadLinkRequests.Delete(fileLink)
|
||||
return "", err
|
||||
return types.DownloadLink{}, err
|
||||
}
|
||||
|
||||
if dl == nil || dl.DownloadLink == "" {
|
||||
err = fmt.Errorf("download link is empty for %s in torrent %s", filename, torrentName)
|
||||
req.Complete("", err)
|
||||
c.downloadLinkRequests.Delete(fileLink)
|
||||
return "", err
|
||||
}
|
||||
req.Complete(dl.DownloadLink, err)
|
||||
c.downloadLinkRequests.Delete(fileLink)
|
||||
return dl.DownloadLink, err
|
||||
return v.(types.DownloadLink), nil
|
||||
}
|
||||
|
||||
func (c *Cache) fetchDownloadLink(torrentName, filename, fileLink string) (*types.DownloadLink, error) {
|
||||
func (c *Cache) fetchDownloadLink(torrentName, filename, fileLink string) (types.DownloadLink, error) {
|
||||
emptyDownloadLink := types.DownloadLink{}
|
||||
ct := c.GetTorrentByName(torrentName)
|
||||
if ct == nil {
|
||||
return nil, fmt.Errorf("torrent not found")
|
||||
return emptyDownloadLink, fmt.Errorf("torrent not found")
|
||||
}
|
||||
file, ok := ct.GetFile(filename)
|
||||
if !ok {
|
||||
return nil, fmt.Errorf("file %s not found in torrent %s", filename, torrentName)
|
||||
return emptyDownloadLink, fmt.Errorf("file %s not found in torrent %s", filename, torrentName)
|
||||
}
|
||||
|
||||
if file.Link == "" {
|
||||
// file link is empty, refresh the torrent to get restricted links
|
||||
ct = c.refreshTorrent(file.TorrentId) // Refresh the torrent from the debrid
|
||||
if ct == nil {
|
||||
return nil, fmt.Errorf("failed to refresh torrent")
|
||||
return emptyDownloadLink, fmt.Errorf("failed to refresh torrent")
|
||||
} else {
|
||||
file, ok = ct.GetFile(filename)
|
||||
if !ok {
|
||||
return nil, fmt.Errorf("file %s not found in refreshed torrent %s", filename, torrentName)
|
||||
return emptyDownloadLink, fmt.Errorf("file %s not found in refreshed torrent %s", filename, torrentName)
|
||||
}
|
||||
}
|
||||
}
|
||||
@@ -92,12 +78,12 @@ func (c *Cache) fetchDownloadLink(torrentName, filename, fileLink string) (*type
|
||||
// Try to reinsert the torrent?
|
||||
newCt, err := c.reInsertTorrent(ct)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("failed to reinsert torrent. %w", err)
|
||||
return emptyDownloadLink, fmt.Errorf("failed to reinsert torrent. %w", err)
|
||||
}
|
||||
ct = newCt
|
||||
file, ok = ct.GetFile(filename)
|
||||
if !ok {
|
||||
return nil, fmt.Errorf("file %s not found in reinserted torrent %s", filename, torrentName)
|
||||
return emptyDownloadLink, fmt.Errorf("file %s not found in reinserted torrent %s", filename, torrentName)
|
||||
}
|
||||
}
|
||||
|
||||
@@ -106,41 +92,39 @@ func (c *Cache) fetchDownloadLink(torrentName, filename, fileLink string) (*type
|
||||
if err != nil {
|
||||
if errors.Is(err, utils.HosterUnavailableError) {
|
||||
c.logger.Trace().
|
||||
Str("token", utils.Mask(downloadLink.Token)).
|
||||
Str("filename", filename).
|
||||
Str("torrent_id", ct.Id).
|
||||
Msg("Hoster unavailable, attempting to reinsert torrent")
|
||||
|
||||
newCt, err := c.reInsertTorrent(ct)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("failed to reinsert torrent: %w", err)
|
||||
return emptyDownloadLink, fmt.Errorf("failed to reinsert torrent: %w", err)
|
||||
}
|
||||
ct = newCt
|
||||
file, ok = ct.GetFile(filename)
|
||||
if !ok {
|
||||
return nil, fmt.Errorf("file %s not found in reinserted torrent %s", filename, torrentName)
|
||||
return emptyDownloadLink, fmt.Errorf("file %s not found in reinserted torrent %s", filename, torrentName)
|
||||
}
|
||||
// Retry getting the download link
|
||||
downloadLink, err = c.client.GetDownloadLink(ct.Torrent, &file)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("retry failed to get download link: %w", err)
|
||||
return emptyDownloadLink, fmt.Errorf("retry failed to get download link: %w", err)
|
||||
}
|
||||
if downloadLink == nil {
|
||||
return nil, fmt.Errorf("download link is empty after retry")
|
||||
if downloadLink.Empty() {
|
||||
return emptyDownloadLink, fmt.Errorf("download link is empty after retry")
|
||||
}
|
||||
return nil, nil
|
||||
return emptyDownloadLink, fmt.Errorf("download link is empty after retry")
|
||||
} else if errors.Is(err, utils.TrafficExceededError) {
|
||||
// This is likely a fair usage limit error
|
||||
return nil, err
|
||||
return emptyDownloadLink, err
|
||||
} else {
|
||||
return nil, fmt.Errorf("failed to get download link: %w", err)
|
||||
return emptyDownloadLink, fmt.Errorf("failed to get download link: %w", err)
|
||||
}
|
||||
}
|
||||
if downloadLink == nil {
|
||||
return nil, fmt.Errorf("download link is empty")
|
||||
if downloadLink.Empty() {
|
||||
return emptyDownloadLink, fmt.Errorf("download link is empty")
|
||||
}
|
||||
|
||||
// Set link to cache
|
||||
go c.client.Accounts().SetDownloadLink(fileLink, downloadLink)
|
||||
return downloadLink, nil
|
||||
}
|
||||
|
||||
@@ -151,34 +135,65 @@ func (c *Cache) GetFileDownloadLinks(t CachedTorrent) {
|
||||
}
|
||||
}
|
||||
|
||||
func (c *Cache) checkDownloadLink(link string) (string, error) {
|
||||
|
||||
dl, err := c.client.Accounts().GetDownloadLink(link)
|
||||
func (c *Cache) checkDownloadLink(link string) (types.DownloadLink, error) {
|
||||
dl, err := c.client.AccountManager().GetDownloadLink(link)
|
||||
if err != nil {
|
||||
return "", err
|
||||
return dl, err
|
||||
}
|
||||
if !c.downloadLinkIsInvalid(dl.DownloadLink) {
|
||||
return dl.DownloadLink, nil
|
||||
return dl, nil
|
||||
}
|
||||
return "", fmt.Errorf("download link not found for %s", link)
|
||||
return types.DownloadLink{}, fmt.Errorf("download link not found for %s", link)
|
||||
}
|
||||
|
||||
func (c *Cache) MarkDownloadLinkAsInvalid(link, downloadLink, reason string) {
|
||||
c.invalidDownloadLinks.Store(downloadLink, reason)
|
||||
func (c *Cache) IncrementFailedLinkCounter(link string) int32 {
|
||||
counter, _ := c.failedLinksCounter.LoadOrCompute(link, func() (atomic.Int32, bool) {
|
||||
return atomic.Int32{}, true
|
||||
})
|
||||
return counter.Add(1)
|
||||
}
|
||||
|
||||
func (c *Cache) MarkLinkAsInvalid(downloadLink types.DownloadLink, reason string) {
|
||||
// Increment file link error counter
|
||||
c.IncrementFailedLinkCounter(downloadLink.Link)
|
||||
|
||||
c.invalidDownloadLinks.Store(downloadLink.DownloadLink, reason)
|
||||
// Remove the download api key from active
|
||||
if reason == "bandwidth_exceeded" {
|
||||
// Disable the account
|
||||
_, account, err := c.client.Accounts().GetDownloadLinkWithAccount(link)
|
||||
accountManager := c.client.AccountManager()
|
||||
account, err := accountManager.GetAccount(downloadLink.Token)
|
||||
if err != nil {
|
||||
c.logger.Error().Err(err).Str("token", utils.Mask(downloadLink.Token)).Msg("Failed to get account to disable")
|
||||
return
|
||||
}
|
||||
if account == nil {
|
||||
c.logger.Error().Str("token", utils.Mask(downloadLink.Token)).Msg("Account not found to disable")
|
||||
return
|
||||
}
|
||||
accountManager.Disable(account)
|
||||
} else if reason == "link_not_found" {
|
||||
// Let's try to delete the download link from the account, so we can fetch a new one next time
|
||||
accountManager := c.client.AccountManager()
|
||||
account, err := accountManager.GetAccount(downloadLink.Token)
|
||||
if err != nil {
|
||||
c.logger.Error().Err(err).Str("token", utils.Mask(downloadLink.Token)).Msg("Failed to get account to delete download link")
|
||||
return
|
||||
}
|
||||
if account == nil {
|
||||
c.logger.Error().Str("token", utils.Mask(downloadLink.Token)).Msg("Account not found to delete download link")
|
||||
return
|
||||
}
|
||||
|
||||
if err := c.client.DeleteDownloadLink(account, downloadLink); err != nil {
|
||||
c.logger.Error().Err(err).Str("token", utils.Mask(downloadLink.Token)).Msg("Failed to delete download link from account")
|
||||
return
|
||||
}
|
||||
c.client.Accounts().Disable(account)
|
||||
}
|
||||
}
|
||||
|
||||
func (c *Cache) downloadLinkIsInvalid(downloadLink string) bool {
|
||||
if reason, ok := c.invalidDownloadLinks.Load(downloadLink); ok {
|
||||
c.logger.Debug().Msgf("Download link %s is invalid: %s", downloadLink, reason)
|
||||
if _, ok := c.invalidDownloadLinks.Load(downloadLink); ok {
|
||||
return true
|
||||
}
|
||||
return false
|
||||
@@ -194,5 +209,10 @@ func (c *Cache) GetDownloadByteRange(torrentName, filename string) (*[2]int64, e
|
||||
}
|
||||
|
||||
func (c *Cache) GetTotalActiveDownloadLinks() int {
|
||||
return c.client.Accounts().GetLinksCount()
|
||||
total := 0
|
||||
allAccounts := c.client.AccountManager().Active()
|
||||
for _, acc := range allAccounts {
|
||||
total += acc.DownloadLinksCount()
|
||||
}
|
||||
return total
|
||||
}
|
||||
|
||||
@@ -1,8 +1,9 @@
|
||||
package store
|
||||
|
||||
import (
|
||||
"github.com/sirrobot01/decypharr/pkg/debrid/types"
|
||||
"sort"
|
||||
|
||||
"github.com/sirrobot01/decypharr/pkg/debrid/types"
|
||||
)
|
||||
|
||||
// MergeFiles merges the files from multiple torrents into a single map.
|
||||
|
||||
@@ -3,13 +3,14 @@ package store
|
||||
import (
|
||||
"context"
|
||||
"fmt"
|
||||
"github.com/sirrobot01/decypharr/pkg/debrid/types"
|
||||
"io"
|
||||
"net/http"
|
||||
"os"
|
||||
"strings"
|
||||
"sync"
|
||||
"time"
|
||||
|
||||
"github.com/sirrobot01/decypharr/pkg/debrid/types"
|
||||
)
|
||||
|
||||
type fileInfo struct {
|
||||
@@ -120,7 +121,7 @@ func (c *Cache) refreshTorrents(ctx context.Context) {
|
||||
close(workChan)
|
||||
wg.Wait()
|
||||
|
||||
c.listingDebouncer.Call(false)
|
||||
c.listingDebouncer.Call(true)
|
||||
|
||||
c.logger.Debug().Msgf("Processed %d new torrents", counter)
|
||||
}
|
||||
@@ -243,14 +244,10 @@ func (c *Cache) refreshDownloadLinks(ctx context.Context) {
|
||||
}
|
||||
defer c.downloadLinksRefreshMu.Unlock()
|
||||
|
||||
links, err := c.client.GetDownloadLinks()
|
||||
|
||||
if err != nil {
|
||||
if err := c.client.RefreshDownloadLinks(); err != nil {
|
||||
c.logger.Error().Err(err).Msg("Failed to get download links")
|
||||
return
|
||||
}
|
||||
|
||||
c.client.Accounts().SetDownloadLinks(links)
|
||||
|
||||
c.logger.Debug().Msgf("Refreshed download %d links", c.client.Accounts().GetLinksCount())
|
||||
c.logger.Debug().Msgf("Refreshed download links")
|
||||
}
|
||||
|
||||
@@ -4,11 +4,13 @@ import (
|
||||
"context"
|
||||
"errors"
|
||||
"fmt"
|
||||
"sync"
|
||||
"time"
|
||||
|
||||
"github.com/puzpuzpuz/xsync/v4"
|
||||
"github.com/sirrobot01/decypharr/internal/config"
|
||||
"github.com/sirrobot01/decypharr/internal/utils"
|
||||
"github.com/sirrobot01/decypharr/pkg/debrid/types"
|
||||
"sync"
|
||||
"time"
|
||||
)
|
||||
|
||||
type reInsertRequest struct {
|
||||
@@ -59,6 +61,8 @@ func (c *Cache) markAsSuccessfullyReinserted(torrentId string) {
|
||||
}
|
||||
}
|
||||
|
||||
// GetBrokenFiles checks the files in the torrent for broken links.
|
||||
// It also attempts to reinsert the torrent if any files are broken.
|
||||
func (c *Cache) GetBrokenFiles(t *CachedTorrent, filenames []string) []string {
|
||||
files := make(map[string]types.File)
|
||||
repairStrategy := config.Get().Repair.Strategy
|
||||
@@ -217,8 +221,7 @@ func (c *Cache) reInsertTorrent(ct *CachedTorrent) (*CachedTorrent, error) {
|
||||
if _, ok := c.failedToReinsert.Load(oldID); ok {
|
||||
return ct, fmt.Errorf("can't retry re-insert for %s", torrent.Id)
|
||||
}
|
||||
if reqI, inFlight := c.repairRequest.Load(oldID); inFlight {
|
||||
req := reqI.(*reInsertRequest)
|
||||
if req, inFlight := c.repairRequest.Load(oldID); inFlight {
|
||||
c.logger.Debug().Msgf("Waiting for existing reinsert request to complete for torrent %s", oldID)
|
||||
return req.Wait()
|
||||
}
|
||||
@@ -232,12 +235,13 @@ func (c *Cache) reInsertTorrent(ct *CachedTorrent) (*CachedTorrent, error) {
|
||||
|
||||
// Submit the magnet to the debrid service
|
||||
newTorrent := &types.Torrent{
|
||||
Name: torrent.Name,
|
||||
Magnet: utils.ConstructMagnet(torrent.InfoHash, torrent.Name),
|
||||
InfoHash: torrent.InfoHash,
|
||||
Size: torrent.Size,
|
||||
Files: make(map[string]types.File),
|
||||
Arr: torrent.Arr,
|
||||
Name: torrent.Name,
|
||||
Magnet: utils.ConstructMagnet(torrent.InfoHash, torrent.Name),
|
||||
InfoHash: torrent.InfoHash,
|
||||
Size: torrent.Size,
|
||||
Files: make(map[string]types.File),
|
||||
Arr: torrent.Arr,
|
||||
DownloadUncached: false,
|
||||
}
|
||||
var err error
|
||||
newTorrent, err = c.client.SubmitMagnet(newTorrent)
|
||||
@@ -260,7 +264,7 @@ func (c *Cache) reInsertTorrent(ct *CachedTorrent) (*CachedTorrent, error) {
|
||||
_ = c.client.DeleteTorrent(newTorrent.Id)
|
||||
}
|
||||
c.markAsFailedToReinsert(oldID)
|
||||
return ct, err
|
||||
return ct, fmt.Errorf("failed to check torrent: %w", err)
|
||||
}
|
||||
|
||||
// Update the torrent in the cache
|
||||
@@ -293,7 +297,7 @@ func (c *Cache) reInsertTorrent(ct *CachedTorrent) (*CachedTorrent, error) {
|
||||
}
|
||||
}
|
||||
|
||||
req.Complete(ct, err)
|
||||
req.Complete(ct, nil)
|
||||
c.markAsSuccessfullyReinserted(oldID)
|
||||
|
||||
c.logger.Debug().Str("torrentId", torrent.Id).Msg("Torrent successfully reinserted")
|
||||
@@ -303,9 +307,8 @@ func (c *Cache) reInsertTorrent(ct *CachedTorrent) (*CachedTorrent, error) {
|
||||
|
||||
func (c *Cache) resetInvalidLinks(ctx context.Context) {
|
||||
c.logger.Debug().Msgf("Resetting accounts")
|
||||
c.invalidDownloadLinks = sync.Map{}
|
||||
c.client.Accounts().Reset() // Reset the active download keys
|
||||
|
||||
c.invalidDownloadLinks = xsync.NewMap[string, string]()
|
||||
c.client.AccountManager().Reset() // Reset the active download keys
|
||||
// Refresh the download links
|
||||
c.refreshDownloadLinks(ctx)
|
||||
}
|
||||
|
||||
236
pkg/debrid/store/stream.go
Normal file
236
pkg/debrid/store/stream.go
Normal file
@@ -0,0 +1,236 @@
|
||||
package store
|
||||
|
||||
import (
|
||||
"context"
|
||||
"errors"
|
||||
"fmt"
|
||||
"io"
|
||||
"math/rand"
|
||||
"net"
|
||||
"net/http"
|
||||
"strings"
|
||||
"time"
|
||||
|
||||
"github.com/sirrobot01/decypharr/pkg/debrid/types"
|
||||
)
|
||||
|
||||
const (
|
||||
MaxNetworkRetries = 5
|
||||
MaxLinkRetries = 10
|
||||
)
|
||||
|
||||
type StreamError struct {
|
||||
Err error
|
||||
Retryable bool
|
||||
LinkError bool // true if we should try a new link
|
||||
}
|
||||
|
||||
func (e StreamError) Error() string {
|
||||
return e.Err.Error()
|
||||
}
|
||||
|
||||
// isConnectionError checks if the error is related to connection issues
|
||||
func (c *Cache) isConnectionError(err error) bool {
|
||||
if err == nil {
|
||||
return false
|
||||
}
|
||||
|
||||
errStr := err.Error()
|
||||
// Check for common connection errors
|
||||
if strings.Contains(errStr, "EOF") ||
|
||||
strings.Contains(errStr, "connection reset by peer") ||
|
||||
strings.Contains(errStr, "broken pipe") ||
|
||||
strings.Contains(errStr, "connection refused") {
|
||||
return true
|
||||
}
|
||||
|
||||
// Check for net.Error types
|
||||
var netErr net.Error
|
||||
return errors.As(err, &netErr)
|
||||
}
|
||||
|
||||
func (c *Cache) Stream(ctx context.Context, start, end int64, linkFunc func() (types.DownloadLink, error)) (*http.Response, error) {
|
||||
|
||||
var lastErr error
|
||||
|
||||
downloadLink, err := linkFunc()
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("failed to get download link: %w", err)
|
||||
}
|
||||
|
||||
// Outer loop: Link retries
|
||||
for retry := 0; retry < MaxLinkRetries; retry++ {
|
||||
select {
|
||||
case <-ctx.Done():
|
||||
return nil, ctx.Err()
|
||||
default:
|
||||
}
|
||||
|
||||
resp, err := c.doRequest(ctx, downloadLink.DownloadLink, start, end)
|
||||
if err != nil {
|
||||
// Network/connection error
|
||||
lastErr = err
|
||||
c.logger.Trace().
|
||||
Int("retries", retry).
|
||||
Err(err).
|
||||
Msg("Network request failed, retrying")
|
||||
|
||||
// Backoff and continue network retry
|
||||
if retry < MaxLinkRetries {
|
||||
backoff := time.Duration(retry+1) * time.Second
|
||||
jitter := time.Duration(rand.Intn(1000)) * time.Millisecond
|
||||
select {
|
||||
case <-time.After(backoff + jitter):
|
||||
case <-ctx.Done():
|
||||
return nil, ctx.Err()
|
||||
}
|
||||
continue
|
||||
} else {
|
||||
return nil, fmt.Errorf("network request failed after retries: %w", lastErr)
|
||||
}
|
||||
}
|
||||
|
||||
// Got response - check status
|
||||
if resp.StatusCode == http.StatusOK || resp.StatusCode == http.StatusPartialContent {
|
||||
return resp, nil
|
||||
}
|
||||
|
||||
// Bad status code - handle error
|
||||
streamErr := c.handleHTTPError(resp, downloadLink)
|
||||
resp.Body.Close()
|
||||
|
||||
if !streamErr.Retryable {
|
||||
return nil, streamErr // Fatal error
|
||||
}
|
||||
|
||||
if streamErr.LinkError {
|
||||
lastErr = streamErr
|
||||
// Try new link
|
||||
downloadLink, err = linkFunc()
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("failed to get download link: %w", err)
|
||||
}
|
||||
continue
|
||||
}
|
||||
|
||||
// Retryable HTTP error (429, 503, 404 etc.) - retry network
|
||||
lastErr = streamErr
|
||||
c.logger.Trace().
|
||||
Err(lastErr).
|
||||
Str("downloadLink", downloadLink.DownloadLink).
|
||||
Str("link", downloadLink.Link).
|
||||
Int("retries", retry).
|
||||
Int("statusCode", resp.StatusCode).
|
||||
Msg("HTTP error, retrying")
|
||||
|
||||
if retry < MaxNetworkRetries-1 {
|
||||
backoff := time.Duration(retry+1) * time.Second
|
||||
jitter := time.Duration(rand.Intn(1000)) * time.Millisecond
|
||||
select {
|
||||
case <-time.After(backoff + jitter):
|
||||
case <-ctx.Done():
|
||||
return nil, ctx.Err()
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
return nil, fmt.Errorf("stream failed after %d link retries: %w", MaxLinkRetries, lastErr)
|
||||
}
|
||||
|
||||
func (c *Cache) StreamReader(ctx context.Context, start, end int64, linkFunc func() (types.DownloadLink, error)) (io.ReadCloser, error) {
|
||||
resp, err := c.Stream(ctx, start, end, linkFunc)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
// Validate we got the expected content
|
||||
if resp.ContentLength == 0 {
|
||||
resp.Body.Close()
|
||||
return nil, fmt.Errorf("received empty response")
|
||||
}
|
||||
|
||||
return resp.Body, nil
|
||||
}
|
||||
|
||||
func (c *Cache) doRequest(ctx context.Context, url string, start, end int64) (*http.Response, error) {
|
||||
var lastErr error
|
||||
// Retry loop specifically for connection-level failures (EOF, reset, etc.)
|
||||
for connRetry := 0; connRetry < 3; connRetry++ {
|
||||
req, err := http.NewRequestWithContext(ctx, "GET", url, nil)
|
||||
if err != nil {
|
||||
return nil, StreamError{Err: err, Retryable: false}
|
||||
}
|
||||
|
||||
// Set range header
|
||||
if start > 0 || end > 0 {
|
||||
rangeHeader := fmt.Sprintf("bytes=%d-", start)
|
||||
if end > 0 {
|
||||
rangeHeader = fmt.Sprintf("bytes=%d-%d", start, end)
|
||||
}
|
||||
req.Header.Set("Range", rangeHeader)
|
||||
}
|
||||
|
||||
// Set optimized headers for streaming
|
||||
req.Header.Set("Connection", "keep-alive")
|
||||
req.Header.Set("Accept-Encoding", "identity") // Disable compression for streaming
|
||||
req.Header.Set("Cache-Control", "no-cache")
|
||||
|
||||
resp, err := c.streamClient.Do(req)
|
||||
if err != nil {
|
||||
lastErr = err
|
||||
|
||||
// Check if it's a connection error that we should retry
|
||||
if c.isConnectionError(err) && connRetry < 2 {
|
||||
// Brief backoff before retrying with fresh connection
|
||||
time.Sleep(time.Duration(connRetry+1) * 100 * time.Millisecond)
|
||||
continue
|
||||
}
|
||||
|
||||
return nil, StreamError{Err: err, Retryable: true}
|
||||
}
|
||||
return resp, nil
|
||||
}
|
||||
|
||||
return nil, StreamError{Err: fmt.Errorf("connection retry exhausted: %w", lastErr), Retryable: true}
|
||||
}
|
||||
|
||||
func (c *Cache) handleHTTPError(resp *http.Response, downloadLink types.DownloadLink) StreamError {
|
||||
switch resp.StatusCode {
|
||||
case http.StatusNotFound:
|
||||
c.MarkLinkAsInvalid(downloadLink, "link_not_found")
|
||||
return StreamError{
|
||||
Err: errors.New("download link not found"),
|
||||
Retryable: true,
|
||||
LinkError: true,
|
||||
}
|
||||
|
||||
case http.StatusServiceUnavailable:
|
||||
body, _ := io.ReadAll(resp.Body)
|
||||
bodyStr := strings.ToLower(string(body))
|
||||
if strings.Contains(bodyStr, "bandwidth") || strings.Contains(bodyStr, "traffic") {
|
||||
c.MarkLinkAsInvalid(downloadLink, "bandwidth_exceeded")
|
||||
return StreamError{
|
||||
Err: errors.New("bandwidth limit exceeded"),
|
||||
Retryable: true,
|
||||
LinkError: true,
|
||||
}
|
||||
}
|
||||
fallthrough
|
||||
|
||||
case http.StatusTooManyRequests:
|
||||
return StreamError{
|
||||
Err: fmt.Errorf("HTTP %d: rate limited", resp.StatusCode),
|
||||
Retryable: true,
|
||||
LinkError: false,
|
||||
}
|
||||
|
||||
default:
|
||||
retryable := resp.StatusCode >= 500
|
||||
body, _ := io.ReadAll(resp.Body)
|
||||
return StreamError{
|
||||
Err: fmt.Errorf("HTTP %d: %s", resp.StatusCode, string(body)),
|
||||
Retryable: retryable,
|
||||
LinkError: false,
|
||||
}
|
||||
}
|
||||
}
|
||||
@@ -40,19 +40,27 @@ type directoryFilter struct {
|
||||
ageThreshold time.Duration // only for last_added
|
||||
}
|
||||
|
||||
type torrents struct {
|
||||
sync.RWMutex
|
||||
byID map[string]CachedTorrent
|
||||
byName map[string]CachedTorrent
|
||||
}
|
||||
|
||||
type folders struct {
|
||||
sync.RWMutex
|
||||
listing map[string][]os.FileInfo // folder name to file listing
|
||||
}
|
||||
|
||||
type CachedTorrentEntry struct {
|
||||
CachedTorrent
|
||||
deleted bool // Tombstone flag
|
||||
}
|
||||
|
||||
type torrentCache struct {
|
||||
torrents torrents
|
||||
mu sync.RWMutex
|
||||
torrents []CachedTorrentEntry // Changed to store entries with tombstone
|
||||
|
||||
// Lookup indices
|
||||
idIndex map[string]int
|
||||
nameIndex map[string]int
|
||||
|
||||
// Compaction tracking
|
||||
deletedCount atomic.Int32
|
||||
compactThreshold int // Trigger compaction when deletedCount exceeds this
|
||||
|
||||
listing atomic.Value
|
||||
folders folders
|
||||
@@ -69,12 +77,11 @@ type sortableFile struct {
|
||||
}
|
||||
|
||||
func newTorrentCache(dirFilters map[string][]directoryFilter) *torrentCache {
|
||||
|
||||
tc := &torrentCache{
|
||||
torrents: torrents{
|
||||
byID: make(map[string]CachedTorrent),
|
||||
byName: make(map[string]CachedTorrent),
|
||||
},
|
||||
torrents: []CachedTorrentEntry{},
|
||||
idIndex: make(map[string]int),
|
||||
nameIndex: make(map[string]int),
|
||||
compactThreshold: 100, // Compact when 100+ deleted entries
|
||||
folders: folders{
|
||||
listing: make(map[string][]os.FileInfo),
|
||||
},
|
||||
@@ -87,10 +94,12 @@ func newTorrentCache(dirFilters map[string][]directoryFilter) *torrentCache {
|
||||
}
|
||||
|
||||
func (tc *torrentCache) reset() {
|
||||
tc.torrents.Lock()
|
||||
tc.torrents.byID = make(map[string]CachedTorrent)
|
||||
tc.torrents.byName = make(map[string]CachedTorrent)
|
||||
tc.torrents.Unlock()
|
||||
tc.mu.Lock()
|
||||
tc.torrents = tc.torrents[:0] // Clear the slice
|
||||
tc.idIndex = make(map[string]int) // Reset the ID index
|
||||
tc.nameIndex = make(map[string]int) // Reset the name index
|
||||
tc.deletedCount.Store(0)
|
||||
tc.mu.Unlock()
|
||||
|
||||
// reset the sorted listing
|
||||
tc.sortNeeded.Store(false)
|
||||
@@ -103,62 +112,183 @@ func (tc *torrentCache) reset() {
|
||||
}
|
||||
|
||||
func (tc *torrentCache) getByID(id string) (CachedTorrent, bool) {
|
||||
tc.torrents.RLock()
|
||||
defer tc.torrents.RUnlock()
|
||||
torrent, exists := tc.torrents.byID[id]
|
||||
return torrent, exists
|
||||
tc.mu.RLock()
|
||||
defer tc.mu.RUnlock()
|
||||
|
||||
if index, exists := tc.idIndex[id]; exists && index < len(tc.torrents) {
|
||||
entry := tc.torrents[index]
|
||||
if !entry.deleted {
|
||||
return entry.CachedTorrent, true
|
||||
}
|
||||
}
|
||||
return CachedTorrent{}, false
|
||||
}
|
||||
|
||||
func (tc *torrentCache) getByName(name string) (CachedTorrent, bool) {
|
||||
tc.torrents.RLock()
|
||||
defer tc.torrents.RUnlock()
|
||||
torrent, exists := tc.torrents.byName[name]
|
||||
return torrent, exists
|
||||
tc.mu.RLock()
|
||||
defer tc.mu.RUnlock()
|
||||
|
||||
if index, exists := tc.nameIndex[name]; exists && index < len(tc.torrents) {
|
||||
entry := tc.torrents[index]
|
||||
if !entry.deleted {
|
||||
return entry.CachedTorrent, true
|
||||
}
|
||||
}
|
||||
return CachedTorrent{}, false
|
||||
}
|
||||
|
||||
func (tc *torrentCache) set(name string, torrent, newTorrent CachedTorrent) {
|
||||
tc.torrents.Lock()
|
||||
// Set the id first
|
||||
func (tc *torrentCache) set(name string, torrent CachedTorrent) {
|
||||
tc.mu.Lock()
|
||||
defer tc.mu.Unlock()
|
||||
|
||||
tc.torrents.byName[name] = torrent
|
||||
tc.torrents.byID[torrent.Id] = torrent // This is the unadulterated torrent
|
||||
tc.torrents.Unlock()
|
||||
// Check if this torrent already exists (update case)
|
||||
if existingIndex, exists := tc.idIndex[torrent.Id]; exists && existingIndex < len(tc.torrents) {
|
||||
if !tc.torrents[existingIndex].deleted {
|
||||
// Update existing entry
|
||||
tc.torrents[existingIndex].CachedTorrent = torrent
|
||||
tc.sortNeeded.Store(true)
|
||||
return
|
||||
}
|
||||
}
|
||||
|
||||
// Add new torrent
|
||||
entry := CachedTorrentEntry{
|
||||
CachedTorrent: torrent,
|
||||
deleted: false,
|
||||
}
|
||||
|
||||
tc.torrents = append(tc.torrents, entry)
|
||||
index := len(tc.torrents) - 1
|
||||
|
||||
tc.idIndex[torrent.Id] = index
|
||||
tc.nameIndex[name] = index
|
||||
tc.sortNeeded.Store(true)
|
||||
}
|
||||
|
||||
func (tc *torrentCache) getListing() []os.FileInfo {
|
||||
// Fast path: if we have a sorted list and no changes since last sort
|
||||
if !tc.sortNeeded.Load() {
|
||||
return tc.listing.Load().([]os.FileInfo)
|
||||
}
|
||||
func (tc *torrentCache) removeId(id string) {
|
||||
tc.mu.Lock()
|
||||
defer tc.mu.Unlock()
|
||||
|
||||
// Slow path: need to sort
|
||||
tc.refreshListing()
|
||||
return tc.listing.Load().([]os.FileInfo)
|
||||
if index, exists := tc.idIndex[id]; exists && index < len(tc.torrents) {
|
||||
if !tc.torrents[index].deleted {
|
||||
// Mark as deleted (tombstone)
|
||||
tc.torrents[index].deleted = true
|
||||
tc.deletedCount.Add(1)
|
||||
|
||||
// Remove from indices
|
||||
delete(tc.idIndex, id)
|
||||
|
||||
// Find and remove from name index
|
||||
for name, idx := range tc.nameIndex {
|
||||
if idx == index {
|
||||
delete(tc.nameIndex, name)
|
||||
break
|
||||
}
|
||||
}
|
||||
|
||||
tc.sortNeeded.Store(true)
|
||||
|
||||
// Trigger compaction if threshold exceeded
|
||||
if tc.deletedCount.Load() > int32(tc.compactThreshold) {
|
||||
go tc.compact()
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func (tc *torrentCache) getFolderListing(folderName string) []os.FileInfo {
|
||||
tc.folders.RLock()
|
||||
defer tc.folders.RUnlock()
|
||||
if folderName == "" {
|
||||
return tc.getListing()
|
||||
func (tc *torrentCache) remove(name string) {
|
||||
tc.mu.Lock()
|
||||
defer tc.mu.Unlock()
|
||||
|
||||
if index, exists := tc.nameIndex[name]; exists && index < len(tc.torrents) {
|
||||
if !tc.torrents[index].deleted {
|
||||
// Mark as deleted (tombstone)
|
||||
torrentID := tc.torrents[index].CachedTorrent.Id
|
||||
tc.torrents[index].deleted = true
|
||||
tc.deletedCount.Add(1)
|
||||
|
||||
// Remove from indices
|
||||
delete(tc.nameIndex, name)
|
||||
delete(tc.idIndex, torrentID)
|
||||
|
||||
tc.sortNeeded.Store(true)
|
||||
|
||||
// Trigger compaction if threshold exceeded
|
||||
if tc.deletedCount.Load() > int32(tc.compactThreshold) {
|
||||
go tc.compact()
|
||||
}
|
||||
}
|
||||
}
|
||||
if folder, ok := tc.folders.listing[folderName]; ok {
|
||||
return folder
|
||||
}
|
||||
|
||||
// Compact removes tombstoned entries and rebuilds indices
|
||||
func (tc *torrentCache) compact() {
|
||||
tc.mu.Lock()
|
||||
defer tc.mu.Unlock()
|
||||
|
||||
deletedCount := tc.deletedCount.Load()
|
||||
if deletedCount == 0 {
|
||||
return // Nothing to compact
|
||||
}
|
||||
// If folder not found, return empty slice
|
||||
return []os.FileInfo{}
|
||||
|
||||
// Create new slice with only non-deleted entries
|
||||
newTorrents := make([]CachedTorrentEntry, 0, len(tc.torrents)-int(deletedCount))
|
||||
newIdIndex := make(map[string]int, len(tc.idIndex))
|
||||
newNameIndex := make(map[string]int, len(tc.nameIndex))
|
||||
|
||||
// Copy non-deleted entries
|
||||
for oldIndex, entry := range tc.torrents {
|
||||
if !entry.deleted {
|
||||
newIndex := len(newTorrents)
|
||||
newTorrents = append(newTorrents, entry)
|
||||
|
||||
// Find the name for this torrent (reverse lookup)
|
||||
for name, nameIndex := range tc.nameIndex {
|
||||
if nameIndex == oldIndex {
|
||||
newNameIndex[name] = newIndex
|
||||
break
|
||||
}
|
||||
}
|
||||
|
||||
newIdIndex[entry.CachedTorrent.Id] = newIndex
|
||||
}
|
||||
}
|
||||
|
||||
// Replace old data with compacted data
|
||||
tc.torrents = newTorrents
|
||||
tc.idIndex = newIdIndex
|
||||
tc.nameIndex = newNameIndex
|
||||
|
||||
tc.deletedCount.Store(0)
|
||||
tc.sortNeeded.Store(true)
|
||||
}
|
||||
|
||||
func (tc *torrentCache) ForceCompact() {
|
||||
tc.compact()
|
||||
}
|
||||
|
||||
func (tc *torrentCache) GetStats() (total, active, deleted int) {
|
||||
tc.mu.RLock()
|
||||
defer tc.mu.RUnlock()
|
||||
|
||||
total = len(tc.torrents)
|
||||
deleted = int(tc.deletedCount.Load())
|
||||
active = total - deleted
|
||||
|
||||
return total, active, deleted
|
||||
}
|
||||
|
||||
func (tc *torrentCache) refreshListing() {
|
||||
|
||||
tc.torrents.RLock()
|
||||
all := make([]sortableFile, 0, len(tc.torrents.byName))
|
||||
for name, t := range tc.torrents.byName {
|
||||
all = append(all, sortableFile{t.Id, name, t.AddedOn, t.Bytes, t.Bad})
|
||||
tc.mu.RLock()
|
||||
all := make([]sortableFile, 0, len(tc.nameIndex))
|
||||
for name, index := range tc.nameIndex {
|
||||
if index < len(tc.torrents) && !tc.torrents[index].deleted {
|
||||
t := tc.torrents[index].CachedTorrent
|
||||
all = append(all, sortableFile{t.Id, name, t.AddedOn, t.Bytes, t.Bad})
|
||||
}
|
||||
}
|
||||
tc.sortNeeded.Store(false)
|
||||
tc.torrents.RUnlock()
|
||||
tc.mu.RUnlock()
|
||||
|
||||
sort.Slice(all, func(i, j int) bool {
|
||||
if all[i].name != all[j].name {
|
||||
@@ -234,8 +364,31 @@ func (tc *torrentCache) refreshListing() {
|
||||
wg.Wait()
|
||||
}
|
||||
|
||||
func (tc *torrentCache) torrentMatchDirectory(filters []directoryFilter, file sortableFile, now time.Time) bool {
|
||||
func (tc *torrentCache) getListing() []os.FileInfo {
|
||||
// Fast path: if we have a sorted list and no changes since last sort
|
||||
if !tc.sortNeeded.Load() {
|
||||
return tc.listing.Load().([]os.FileInfo)
|
||||
}
|
||||
|
||||
// Slow path: need to sort
|
||||
tc.refreshListing()
|
||||
return tc.listing.Load().([]os.FileInfo)
|
||||
}
|
||||
|
||||
func (tc *torrentCache) getFolderListing(folderName string) []os.FileInfo {
|
||||
tc.folders.RLock()
|
||||
defer tc.folders.RUnlock()
|
||||
if folderName == "" {
|
||||
return tc.getListing()
|
||||
}
|
||||
if folder, ok := tc.folders.listing[folderName]; ok {
|
||||
return folder
|
||||
}
|
||||
// If folder not found, return empty slice
|
||||
return []os.FileInfo{}
|
||||
}
|
||||
|
||||
func (tc *torrentCache) torrentMatchDirectory(filters []directoryFilter, file sortableFile, now time.Time) bool {
|
||||
torrentName := strings.ToLower(file.name)
|
||||
for _, filter := range filters {
|
||||
matched := false
|
||||
@@ -278,51 +431,46 @@ func (tc *torrentCache) torrentMatchDirectory(filters []directoryFilter, file so
|
||||
}
|
||||
|
||||
func (tc *torrentCache) getAll() map[string]CachedTorrent {
|
||||
tc.torrents.RLock()
|
||||
defer tc.torrents.RUnlock()
|
||||
result := make(map[string]CachedTorrent, len(tc.torrents.byID))
|
||||
for name, torrent := range tc.torrents.byID {
|
||||
result[name] = torrent
|
||||
tc.mu.RLock()
|
||||
defer tc.mu.RUnlock()
|
||||
|
||||
result := make(map[string]CachedTorrent)
|
||||
for _, entry := range tc.torrents {
|
||||
if !entry.deleted {
|
||||
result[entry.CachedTorrent.Id] = entry.CachedTorrent
|
||||
}
|
||||
}
|
||||
return result
|
||||
}
|
||||
|
||||
func (tc *torrentCache) getAllCount() int {
|
||||
tc.torrents.RLock()
|
||||
defer tc.torrents.RUnlock()
|
||||
return len(tc.torrents.byID)
|
||||
tc.mu.RLock()
|
||||
defer tc.mu.RUnlock()
|
||||
return len(tc.torrents) - int(tc.deletedCount.Load())
|
||||
}
|
||||
|
||||
func (tc *torrentCache) getAllByName() map[string]CachedTorrent {
|
||||
tc.torrents.RLock()
|
||||
defer tc.torrents.RUnlock()
|
||||
results := make(map[string]CachedTorrent, len(tc.torrents.byName))
|
||||
for name, torrent := range tc.torrents.byName {
|
||||
results[name] = torrent
|
||||
tc.mu.RLock()
|
||||
defer tc.mu.RUnlock()
|
||||
|
||||
results := make(map[string]CachedTorrent, len(tc.nameIndex))
|
||||
for name, index := range tc.nameIndex {
|
||||
if index < len(tc.torrents) && !tc.torrents[index].deleted {
|
||||
results[name] = tc.torrents[index].CachedTorrent
|
||||
}
|
||||
}
|
||||
return results
|
||||
}
|
||||
|
||||
func (tc *torrentCache) getIdMaps() map[string]struct{} {
|
||||
tc.torrents.RLock()
|
||||
defer tc.torrents.RUnlock()
|
||||
res := make(map[string]struct{}, len(tc.torrents.byID))
|
||||
for id := range tc.torrents.byID {
|
||||
res[id] = struct{}{}
|
||||
tc.mu.RLock()
|
||||
defer tc.mu.RUnlock()
|
||||
|
||||
res := make(map[string]struct{}, len(tc.idIndex))
|
||||
for id, index := range tc.idIndex {
|
||||
if index < len(tc.torrents) && !tc.torrents[index].deleted {
|
||||
res[id] = struct{}{}
|
||||
}
|
||||
}
|
||||
return res
|
||||
}
|
||||
|
||||
func (tc *torrentCache) removeId(id string) {
|
||||
tc.torrents.Lock()
|
||||
defer tc.torrents.Unlock()
|
||||
delete(tc.torrents.byID, id)
|
||||
tc.sortNeeded.Store(true)
|
||||
}
|
||||
|
||||
func (tc *torrentCache) remove(name string) {
|
||||
tc.torrents.Lock()
|
||||
defer tc.torrents.Unlock()
|
||||
delete(tc.torrents.byName, name)
|
||||
tc.sortNeeded.Store(true)
|
||||
}
|
||||
|
||||
@@ -2,6 +2,7 @@ package store
|
||||
|
||||
import (
|
||||
"context"
|
||||
|
||||
"github.com/go-co-op/gocron/v2"
|
||||
"github.com/sirrobot01/decypharr/internal/utils"
|
||||
)
|
||||
|
||||
@@ -1,267 +0,0 @@
|
||||
package types
|
||||
|
||||
import (
|
||||
"github.com/sirrobot01/decypharr/internal/config"
|
||||
"sync"
|
||||
"time"
|
||||
)
|
||||
|
||||
type Accounts struct {
|
||||
current *Account
|
||||
accounts []*Account
|
||||
mu sync.RWMutex
|
||||
}
|
||||
|
||||
func NewAccounts(debridConf config.Debrid) *Accounts {
|
||||
accounts := make([]*Account, 0)
|
||||
for idx, token := range debridConf.DownloadAPIKeys {
|
||||
if token == "" {
|
||||
continue
|
||||
}
|
||||
account := newAccount(debridConf.Name, token, idx)
|
||||
accounts = append(accounts, account)
|
||||
}
|
||||
|
||||
var current *Account
|
||||
if len(accounts) > 0 {
|
||||
current = accounts[0]
|
||||
}
|
||||
return &Accounts{
|
||||
accounts: accounts,
|
||||
current: current,
|
||||
}
|
||||
}
|
||||
|
||||
type Account struct {
|
||||
Debrid string // e.g., "realdebrid", "torbox", etc.
|
||||
Order int
|
||||
Disabled bool
|
||||
Token string `json:"token"`
|
||||
links map[string]*DownloadLink
|
||||
mu sync.RWMutex
|
||||
TrafficUsed int64 `json:"traffic_used"` // Traffic used in bytes
|
||||
Username string `json:"username"` // Username for the account
|
||||
}
|
||||
|
||||
func (a *Accounts) Active() []*Account {
|
||||
a.mu.RLock()
|
||||
defer a.mu.RUnlock()
|
||||
activeAccounts := make([]*Account, 0)
|
||||
for _, acc := range a.accounts {
|
||||
if !acc.Disabled {
|
||||
activeAccounts = append(activeAccounts, acc)
|
||||
}
|
||||
}
|
||||
return activeAccounts
|
||||
}
|
||||
|
||||
func (a *Accounts) All() []*Account {
|
||||
a.mu.RLock()
|
||||
defer a.mu.RUnlock()
|
||||
return a.accounts
|
||||
}
|
||||
|
||||
func (a *Accounts) Current() *Account {
|
||||
a.mu.RLock()
|
||||
if a.current != nil {
|
||||
current := a.current
|
||||
a.mu.RUnlock()
|
||||
return current
|
||||
}
|
||||
a.mu.RUnlock()
|
||||
|
||||
a.mu.Lock()
|
||||
defer a.mu.Unlock()
|
||||
|
||||
// Double-check after acquiring write lock
|
||||
if a.current != nil {
|
||||
return a.current
|
||||
}
|
||||
|
||||
activeAccounts := make([]*Account, 0)
|
||||
for _, acc := range a.accounts {
|
||||
if !acc.Disabled {
|
||||
activeAccounts = append(activeAccounts, acc)
|
||||
}
|
||||
}
|
||||
|
||||
if len(activeAccounts) > 0 {
|
||||
a.current = activeAccounts[0]
|
||||
}
|
||||
return a.current
|
||||
}
|
||||
|
||||
func (a *Accounts) Disable(account *Account) {
|
||||
a.mu.Lock()
|
||||
defer a.mu.Unlock()
|
||||
account.disable()
|
||||
|
||||
if a.current == account {
|
||||
var newCurrent *Account
|
||||
for _, acc := range a.accounts {
|
||||
if !acc.Disabled {
|
||||
newCurrent = acc
|
||||
break
|
||||
}
|
||||
}
|
||||
a.current = newCurrent
|
||||
}
|
||||
}
|
||||
|
||||
func (a *Accounts) Reset() {
|
||||
a.mu.Lock()
|
||||
defer a.mu.Unlock()
|
||||
for _, acc := range a.accounts {
|
||||
acc.resetDownloadLinks()
|
||||
acc.Disabled = false
|
||||
}
|
||||
if len(a.accounts) > 0 {
|
||||
a.current = a.accounts[0]
|
||||
} else {
|
||||
a.current = nil
|
||||
}
|
||||
}
|
||||
|
||||
func (a *Accounts) GetDownloadLink(fileLink string) (*DownloadLink, error) {
|
||||
if a.Current() == nil {
|
||||
return nil, NoActiveAccountsError
|
||||
}
|
||||
dl, ok := a.Current().getLink(fileLink)
|
||||
if !ok {
|
||||
return nil, NoDownloadLinkError
|
||||
}
|
||||
if dl.ExpiresAt.IsZero() || dl.ExpiresAt.Before(time.Now()) {
|
||||
return nil, DownloadLinkExpiredError
|
||||
}
|
||||
if dl.DownloadLink == "" {
|
||||
return nil, EmptyDownloadLinkError
|
||||
}
|
||||
return dl, nil
|
||||
}
|
||||
|
||||
func (a *Accounts) GetDownloadLinkWithAccount(fileLink string) (*DownloadLink, *Account, error) {
|
||||
currentAccount := a.Current()
|
||||
if currentAccount == nil {
|
||||
return nil, nil, NoActiveAccountsError
|
||||
}
|
||||
dl, ok := currentAccount.getLink(fileLink)
|
||||
if !ok {
|
||||
return nil, nil, NoDownloadLinkError
|
||||
}
|
||||
if dl.ExpiresAt.IsZero() || dl.ExpiresAt.Before(time.Now()) {
|
||||
return nil, currentAccount, DownloadLinkExpiredError
|
||||
}
|
||||
if dl.DownloadLink == "" {
|
||||
return nil, currentAccount, EmptyDownloadLinkError
|
||||
}
|
||||
return dl, currentAccount, nil
|
||||
}
|
||||
|
||||
func (a *Accounts) SetDownloadLink(fileLink string, dl *DownloadLink) {
|
||||
if a.Current() == nil {
|
||||
return
|
||||
}
|
||||
a.Current().setLink(fileLink, dl)
|
||||
}
|
||||
|
||||
func (a *Accounts) DeleteDownloadLink(fileLink string) {
|
||||
if a.Current() == nil {
|
||||
return
|
||||
}
|
||||
a.Current().deleteLink(fileLink)
|
||||
}
|
||||
|
||||
func (a *Accounts) GetLinksCount() int {
|
||||
if a.Current() == nil {
|
||||
return 0
|
||||
}
|
||||
return a.Current().LinksCount()
|
||||
}
|
||||
|
||||
func (a *Accounts) SetDownloadLinks(links map[string]*DownloadLink) {
|
||||
if a.Current() == nil {
|
||||
return
|
||||
}
|
||||
a.Current().setLinks(links)
|
||||
}
|
||||
|
||||
func (a *Accounts) Update(index int, account *Account) {
|
||||
a.mu.Lock()
|
||||
defer a.mu.Unlock()
|
||||
|
||||
if index < 0 || index >= len(a.accounts) {
|
||||
return // Index out of bounds
|
||||
}
|
||||
|
||||
// Update the account at the specified index
|
||||
a.accounts[index] = account
|
||||
|
||||
// If the updated account is the current one, update the current reference
|
||||
if a.current == nil || a.current.Order == index {
|
||||
a.current = account
|
||||
}
|
||||
}
|
||||
|
||||
func newAccount(debridName, token string, index int) *Account {
|
||||
return &Account{
|
||||
Debrid: debridName,
|
||||
Token: token,
|
||||
Order: index,
|
||||
links: make(map[string]*DownloadLink),
|
||||
}
|
||||
}
|
||||
|
||||
func (a *Account) getLink(fileLink string) (*DownloadLink, bool) {
|
||||
a.mu.RLock()
|
||||
defer a.mu.RUnlock()
|
||||
dl, ok := a.links[a.sliceFileLink(fileLink)]
|
||||
return dl, ok
|
||||
}
|
||||
func (a *Account) setLink(fileLink string, dl *DownloadLink) {
|
||||
a.mu.Lock()
|
||||
defer a.mu.Unlock()
|
||||
a.links[a.sliceFileLink(fileLink)] = dl
|
||||
}
|
||||
func (a *Account) deleteLink(fileLink string) {
|
||||
a.mu.Lock()
|
||||
defer a.mu.Unlock()
|
||||
|
||||
delete(a.links, a.sliceFileLink(fileLink))
|
||||
}
|
||||
func (a *Account) resetDownloadLinks() {
|
||||
a.mu.Lock()
|
||||
defer a.mu.Unlock()
|
||||
a.links = make(map[string]*DownloadLink)
|
||||
}
|
||||
func (a *Account) LinksCount() int {
|
||||
a.mu.RLock()
|
||||
defer a.mu.RUnlock()
|
||||
return len(a.links)
|
||||
}
|
||||
func (a *Account) disable() {
|
||||
a.Disabled = true
|
||||
}
|
||||
|
||||
func (a *Account) setLinks(links map[string]*DownloadLink) {
|
||||
a.mu.Lock()
|
||||
defer a.mu.Unlock()
|
||||
now := time.Now()
|
||||
for _, dl := range links {
|
||||
if !dl.ExpiresAt.IsZero() && dl.ExpiresAt.Before(now) {
|
||||
// Expired, continue
|
||||
continue
|
||||
}
|
||||
a.links[a.sliceFileLink(dl.Link)] = dl
|
||||
}
|
||||
}
|
||||
|
||||
// slice download link
|
||||
func (a *Account) sliceFileLink(fileLink string) string {
|
||||
if a.Debrid != "realdebrid" {
|
||||
return fileLink
|
||||
}
|
||||
if len(fileLink) < 39 {
|
||||
return fileLink
|
||||
}
|
||||
return fileLink[0:39]
|
||||
}
|
||||
@@ -1,29 +0,0 @@
|
||||
package types
|
||||
|
||||
import (
|
||||
"github.com/rs/zerolog"
|
||||
)
|
||||
|
||||
type Client interface {
|
||||
SubmitMagnet(tr *Torrent) (*Torrent, error)
|
||||
CheckStatus(tr *Torrent) (*Torrent, error)
|
||||
GetFileDownloadLinks(tr *Torrent) error
|
||||
GetDownloadLink(tr *Torrent, file *File) (*DownloadLink, error)
|
||||
DeleteTorrent(torrentId string) error
|
||||
IsAvailable(infohashes []string) map[string]bool
|
||||
GetDownloadUncached() bool
|
||||
UpdateTorrent(torrent *Torrent) error
|
||||
GetTorrent(torrentId string) (*Torrent, error)
|
||||
GetTorrents() ([]*Torrent, error)
|
||||
Name() string
|
||||
Logger() zerolog.Logger
|
||||
GetDownloadingStatus() []string
|
||||
GetDownloadLinks() (map[string]*DownloadLink, error)
|
||||
CheckLink(link string) error
|
||||
GetMountPath() string
|
||||
Accounts() *Accounts // Returns the active download account/token
|
||||
DeleteDownloadLink(linkId string) error
|
||||
GetProfile() (*Profile, error)
|
||||
GetAvailableSlots() (int, error)
|
||||
SyncAccounts() error // Updates each accounts details(like traffic, username, etc.)
|
||||
}
|
||||
@@ -14,7 +14,7 @@ var NoActiveAccountsError = &Error{
|
||||
Code: "no_active_accounts",
|
||||
}
|
||||
|
||||
var NoDownloadLinkError = &Error{
|
||||
var ErrDownloadLinkNotFound = &Error{
|
||||
Message: "No download link found",
|
||||
Code: "no_download_link",
|
||||
}
|
||||
|
||||
@@ -42,6 +42,38 @@ type Torrent struct {
|
||||
sync.Mutex
|
||||
}
|
||||
|
||||
func (t *Torrent) Copy() *Torrent {
|
||||
t.Lock()
|
||||
defer t.Unlock()
|
||||
|
||||
newFiles := make(map[string]File, len(t.Files))
|
||||
for k, v := range t.Files {
|
||||
newFiles[k] = v
|
||||
}
|
||||
|
||||
return &Torrent{
|
||||
Id: t.Id,
|
||||
InfoHash: t.InfoHash,
|
||||
Name: t.Name,
|
||||
Folder: t.Folder,
|
||||
Filename: t.Filename,
|
||||
OriginalFilename: t.OriginalFilename,
|
||||
Size: t.Size,
|
||||
Bytes: t.Bytes,
|
||||
Magnet: t.Magnet,
|
||||
Files: newFiles,
|
||||
Status: t.Status,
|
||||
Added: t.Added,
|
||||
Progress: t.Progress,
|
||||
Speed: t.Speed,
|
||||
Seeders: t.Seeders,
|
||||
Links: append([]string{}, t.Links...),
|
||||
MountPath: t.MountPath,
|
||||
Debrid: t.Debrid,
|
||||
Arr: t.Arr,
|
||||
}
|
||||
}
|
||||
|
||||
func (t *Torrent) GetSymlinkFolder(parent string) string {
|
||||
return filepath.Join(parent, t.Arr.Name, t.Folder)
|
||||
}
|
||||
@@ -84,18 +116,18 @@ func (t *Torrent) GetFiles() []File {
|
||||
}
|
||||
|
||||
type File struct {
|
||||
TorrentId string `json:"torrent_id"`
|
||||
Id string `json:"id"`
|
||||
Name string `json:"name"`
|
||||
Size int64 `json:"size"`
|
||||
IsRar bool `json:"is_rar"`
|
||||
ByteRange *[2]int64 `json:"byte_range,omitempty"`
|
||||
Path string `json:"path"`
|
||||
Link string `json:"link"`
|
||||
AccountId string `json:"account_id"`
|
||||
Generated time.Time `json:"generated"`
|
||||
Deleted bool `json:"deleted"`
|
||||
DownloadLink *DownloadLink `json:"-"`
|
||||
TorrentId string `json:"torrent_id"`
|
||||
Id string `json:"id"`
|
||||
Name string `json:"name"`
|
||||
Size int64 `json:"size"`
|
||||
IsRar bool `json:"is_rar"`
|
||||
ByteRange *[2]int64 `json:"byte_range,omitempty"`
|
||||
Path string `json:"path"`
|
||||
Link string `json:"link"`
|
||||
AccountId string `json:"account_id"`
|
||||
Generated time.Time `json:"generated"`
|
||||
Deleted bool `json:"deleted"`
|
||||
DownloadLink DownloadLink `json:"-"`
|
||||
}
|
||||
|
||||
func (t *Torrent) Cleanup(remove bool) {
|
||||
@@ -138,6 +170,8 @@ type Profile struct {
|
||||
}
|
||||
|
||||
type DownloadLink struct {
|
||||
Debrid string `json:"debrid"`
|
||||
Token string `json:"token"`
|
||||
Filename string `json:"filename"`
|
||||
Link string `json:"link"`
|
||||
DownloadLink string `json:"download_link"`
|
||||
@@ -147,6 +181,17 @@ type DownloadLink struct {
|
||||
ExpiresAt time.Time
|
||||
}
|
||||
|
||||
func (d *DownloadLink) String() string {
|
||||
return d.DownloadLink
|
||||
func (dl *DownloadLink) Valid() error {
|
||||
if dl.Empty() {
|
||||
return EmptyDownloadLinkError
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
func (dl *DownloadLink) Empty() bool {
|
||||
return dl.DownloadLink == ""
|
||||
}
|
||||
|
||||
func (dl *DownloadLink) String() string {
|
||||
return dl.DownloadLink
|
||||
}
|
||||
|
||||
@@ -2,16 +2,16 @@ package qbit
|
||||
|
||||
import (
|
||||
"context"
|
||||
"crypto/sha256"
|
||||
"encoding/base64"
|
||||
"fmt"
|
||||
"net/http"
|
||||
"strings"
|
||||
|
||||
"github.com/go-chi/chi/v5"
|
||||
"github.com/sirrobot01/decypharr/internal/config"
|
||||
"github.com/sirrobot01/decypharr/pkg/arr"
|
||||
"github.com/sirrobot01/decypharr/pkg/store"
|
||||
"golang.org/x/crypto/bcrypt"
|
||||
"net/http"
|
||||
"net/url"
|
||||
"strings"
|
||||
"github.com/sirrobot01/decypharr/pkg/wire"
|
||||
)
|
||||
|
||||
type contextKey string
|
||||
@@ -22,45 +22,6 @@ const (
|
||||
arrKey contextKey = "arr"
|
||||
)
|
||||
|
||||
func validateServiceURL(urlStr string) error {
|
||||
if urlStr == "" {
|
||||
return fmt.Errorf("URL cannot be empty")
|
||||
}
|
||||
|
||||
// Try parsing as full URL first
|
||||
u, err := url.Parse(urlStr)
|
||||
if err == nil && u.Scheme != "" && u.Host != "" {
|
||||
// It's a full URL, validate scheme
|
||||
if u.Scheme != "http" && u.Scheme != "https" {
|
||||
return fmt.Errorf("URL scheme must be http or https")
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
// Check if it's a host:port format (no scheme)
|
||||
if strings.Contains(urlStr, ":") && !strings.Contains(urlStr, "://") {
|
||||
// Try parsing with http:// prefix
|
||||
testURL := "http://" + urlStr
|
||||
u, err := url.Parse(testURL)
|
||||
if err != nil {
|
||||
return fmt.Errorf("invalid host:port format: %w", err)
|
||||
}
|
||||
|
||||
if u.Host == "" {
|
||||
return fmt.Errorf("host is required in host:port format")
|
||||
}
|
||||
|
||||
// Validate port number
|
||||
if u.Port() == "" {
|
||||
return fmt.Errorf("port is required in host:port format")
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
return fmt.Errorf("invalid URL format: %s", urlStr)
|
||||
}
|
||||
|
||||
func getCategory(ctx context.Context) string {
|
||||
if category, ok := ctx.Value(categoryKey).(string); ok {
|
||||
return category
|
||||
@@ -97,14 +58,22 @@ func decodeAuthHeader(header string) (string, string, error) {
|
||||
bearer := string(bytes)
|
||||
|
||||
colonIndex := strings.LastIndex(bearer, ":")
|
||||
host := bearer[:colonIndex]
|
||||
token := bearer[colonIndex+1:]
|
||||
username := bearer[:colonIndex]
|
||||
password := bearer[colonIndex+1:]
|
||||
|
||||
return host, token, nil
|
||||
if username == "" || password == "" {
|
||||
return username, password, fmt.Errorf("empty username or password")
|
||||
}
|
||||
|
||||
return strings.TrimSpace(username), strings.TrimSpace(password), nil
|
||||
}
|
||||
|
||||
func (q *QBit) categoryContext(next http.Handler) http.Handler {
|
||||
// Print full URL for debugging
|
||||
|
||||
// Try to get category from URL query first
|
||||
return http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) {
|
||||
// Print request method and URL
|
||||
category := strings.Trim(r.URL.Query().Get("category"), "")
|
||||
if category == "" {
|
||||
// Get from form
|
||||
@@ -127,49 +96,116 @@ func (q *QBit) categoryContext(next http.Handler) http.Handler {
|
||||
// Only a valid host and token will be added to the context/config. The rest are manual
|
||||
func (q *QBit) authContext(next http.Handler) http.Handler {
|
||||
return http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) {
|
||||
cfg := config.Get()
|
||||
host, token, err := decodeAuthHeader(r.Header.Get("Authorization"))
|
||||
category := getCategory(r.Context())
|
||||
arrs := store.Get().Arr()
|
||||
// Check if arr exists
|
||||
a := arrs.Get(category)
|
||||
if a == nil {
|
||||
// Arr is not configured, create a new one
|
||||
downloadUncached := false
|
||||
a = arr.New(category, "", "", false, false, &downloadUncached, "", "auto")
|
||||
}
|
||||
if err == nil {
|
||||
host = strings.TrimSpace(host)
|
||||
if host != "" {
|
||||
a.Host = host
|
||||
}
|
||||
token = strings.TrimSpace(token)
|
||||
if token != "" {
|
||||
a.Token = token
|
||||
}
|
||||
}
|
||||
if cfg.NeedsAuth() {
|
||||
if a.Host == "" || a.Token == "" {
|
||||
http.Error(w, "Unauthorized: Host and token are required for authentication", http.StatusUnauthorized)
|
||||
return
|
||||
}
|
||||
// try to use either Arr validate, or user auth validation
|
||||
if err := a.Validate(); err != nil {
|
||||
// If this failed, try to use user auth validation
|
||||
if !verifyAuth(host, token) {
|
||||
http.Error(w, "Unauthorized: Invalid host or token", http.StatusUnauthorized)
|
||||
return
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
a.Source = "auto"
|
||||
arrs.AddOrUpdate(a)
|
||||
username, password, err := getUsernameAndPassword(r)
|
||||
if err != nil {
|
||||
http.Error(w, err.Error(), http.StatusUnauthorized)
|
||||
return
|
||||
}
|
||||
category := getCategory(r.Context())
|
||||
a, err := q.authenticate(category, username, password)
|
||||
if err != nil {
|
||||
http.Error(w, err.Error(), http.StatusUnauthorized)
|
||||
return
|
||||
}
|
||||
ctx := context.WithValue(r.Context(), arrKey, a)
|
||||
next.ServeHTTP(w, r.WithContext(ctx))
|
||||
})
|
||||
}
|
||||
|
||||
func getUsernameAndPassword(r *http.Request) (string, string, error) {
|
||||
// Try to get from authorization header
|
||||
username, password, err := decodeAuthHeader(r.Header.Get("Authorization"))
|
||||
if err == nil && username != "" {
|
||||
return username, password, err
|
||||
}
|
||||
// Try to get from cookie
|
||||
sid, err := r.Cookie("sid")
|
||||
if err != nil {
|
||||
// try SID
|
||||
sid, err = r.Cookie("SID")
|
||||
}
|
||||
if err == nil {
|
||||
username, password, err = extractFromSID(sid.Value)
|
||||
if err != nil {
|
||||
return "", "", err
|
||||
}
|
||||
}
|
||||
return username, password, nil
|
||||
}
|
||||
|
||||
func (q *QBit) authenticate(category, username, password string) (*arr.Arr, error) {
|
||||
cfg := config.Get()
|
||||
arrs := wire.Get().Arr()
|
||||
// Check if arr exists
|
||||
a := arrs.Get(category)
|
||||
if a == nil {
|
||||
// Arr is not configured, create a new one
|
||||
downloadUncached := false
|
||||
a = arr.New(category, "", "", false, false, &downloadUncached, "", "auto")
|
||||
}
|
||||
a.Host = username
|
||||
a.Token = password
|
||||
arrValidated := false // This is a flag to indicate if arr validation was successful
|
||||
if (a.Host == "" || a.Token == "") && cfg.UseAuth {
|
||||
return nil, fmt.Errorf("unauthorized: Host and token are required for authentication(you've enabled authentication)")
|
||||
}
|
||||
if err := a.Validate(); err == nil {
|
||||
arrValidated = true
|
||||
}
|
||||
|
||||
if !arrValidated && cfg.UseAuth {
|
||||
// If arr validation failed, try to use user auth validation
|
||||
if !config.VerifyAuth(username, password) {
|
||||
return nil, fmt.Errorf("unauthorized: invalid credentials")
|
||||
}
|
||||
}
|
||||
a.Source = "auto"
|
||||
arrs.AddOrUpdate(a)
|
||||
|
||||
return a, nil
|
||||
}
|
||||
|
||||
func createSID(username, password string) string {
|
||||
// Create a verification hash
|
||||
cfg := config.Get()
|
||||
combined := fmt.Sprintf("%s|%s", username, password)
|
||||
hash := sha256.Sum256([]byte(combined + cfg.SecretKey()))
|
||||
hashStr := fmt.Sprintf("%x", hash)[:16] // First 16 chars
|
||||
// Base64 encode
|
||||
return base64.URLEncoding.EncodeToString([]byte(fmt.Sprintf("%s|%s", combined, hashStr)))
|
||||
}
|
||||
|
||||
func extractFromSID(sid string) (string, string, error) {
|
||||
// Decode base64
|
||||
decoded, err := base64.URLEncoding.DecodeString(sid)
|
||||
if err != nil {
|
||||
return "", "", fmt.Errorf("invalid SID format")
|
||||
}
|
||||
|
||||
// Split into parts: username:password:hash
|
||||
parts := strings.Split(string(decoded), "|")
|
||||
if len(parts) != 3 {
|
||||
return "", "", fmt.Errorf("invalid SID structure")
|
||||
}
|
||||
|
||||
username := parts[0]
|
||||
password := parts[1]
|
||||
providedHash := parts[2]
|
||||
|
||||
// Verify hash
|
||||
cfg := config.Get()
|
||||
combined := fmt.Sprintf("%s|%s", username, password)
|
||||
expectedHash := sha256.Sum256([]byte(combined + cfg.SecretKey()))
|
||||
expectedHashStr := fmt.Sprintf("%x", expectedHash)[:16]
|
||||
|
||||
if providedHash != expectedHashStr {
|
||||
return "", "", fmt.Errorf("invalid SID signature")
|
||||
}
|
||||
|
||||
return username, password, nil
|
||||
}
|
||||
|
||||
func hashesContext(next http.Handler) http.Handler {
|
||||
return http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) {
|
||||
_hashes := chi.URLParam(r, "hashes")
|
||||
@@ -189,19 +225,3 @@ func hashesContext(next http.Handler) http.Handler {
|
||||
next.ServeHTTP(w, r.WithContext(ctx))
|
||||
})
|
||||
}
|
||||
|
||||
func verifyAuth(username, password string) bool {
|
||||
// If you're storing hashed password, use bcrypt to compare
|
||||
if username == "" {
|
||||
return false
|
||||
}
|
||||
auth := config.Get().GetAuth()
|
||||
if auth == nil {
|
||||
return false
|
||||
}
|
||||
if username != auth.Username {
|
||||
return false
|
||||
}
|
||||
err := bcrypt.CompareHashAndPassword([]byte(auth.Password), []byte(password))
|
||||
return err == nil
|
||||
}
|
||||
|
||||
@@ -1,25 +1,33 @@
|
||||
package qbit
|
||||
|
||||
import (
|
||||
"github.com/sirrobot01/decypharr/internal/request"
|
||||
"github.com/sirrobot01/decypharr/pkg/arr"
|
||||
"net/http"
|
||||
"path/filepath"
|
||||
"strings"
|
||||
|
||||
"github.com/sirrobot01/decypharr/internal/config"
|
||||
"github.com/sirrobot01/decypharr/internal/request"
|
||||
"github.com/sirrobot01/decypharr/pkg/arr"
|
||||
)
|
||||
|
||||
func (q *QBit) handleLogin(w http.ResponseWriter, r *http.Request) {
|
||||
ctx := r.Context()
|
||||
_arr := getArrFromContext(ctx)
|
||||
if _arr == nil {
|
||||
// Arr not in context, return OK
|
||||
_, _ = w.Write([]byte("Ok."))
|
||||
cfg := config.Get()
|
||||
username := r.FormValue("username")
|
||||
password := r.FormValue("password")
|
||||
a, err := q.authenticate(getCategory(ctx), username, password)
|
||||
if err != nil {
|
||||
http.Error(w, err.Error(), http.StatusUnauthorized)
|
||||
return
|
||||
}
|
||||
if err := _arr.Validate(); err != nil {
|
||||
q.logger.Error().Err(err).Msgf("Error validating arr")
|
||||
http.Error(w, "Invalid arr configuration", http.StatusBadRequest)
|
||||
return
|
||||
if cfg.UseAuth {
|
||||
cookie := &http.Cookie{
|
||||
Name: "sid",
|
||||
Value: createSID(a.Host, a.Token),
|
||||
Path: "/",
|
||||
SameSite: http.SameSiteNoneMode,
|
||||
}
|
||||
http.SetCookie(w, cookie)
|
||||
}
|
||||
_, _ = w.Write([]byte("Ok."))
|
||||
}
|
||||
@@ -94,6 +102,13 @@ func (q *QBit) handleTorrentsAdd(w http.ResponseWriter, r *http.Request) {
|
||||
if strings.ToLower(r.FormValue("sequentialDownload")) == "true" {
|
||||
action = "download"
|
||||
}
|
||||
rmTrackerUrls := strings.ToLower(r.FormValue("firstLastPiecePrio")) == "true"
|
||||
|
||||
// Check config setting - if always remove tracker URLs is enabled, force it to true
|
||||
if q.AlwaysRmTrackerUrls {
|
||||
rmTrackerUrls = true
|
||||
}
|
||||
|
||||
debridName := r.FormValue("debrid")
|
||||
category := r.FormValue("category")
|
||||
_arr := getArrFromContext(ctx)
|
||||
@@ -110,7 +125,7 @@ func (q *QBit) handleTorrentsAdd(w http.ResponseWriter, r *http.Request) {
|
||||
urlList = append(urlList, strings.TrimSpace(u))
|
||||
}
|
||||
for _, url := range urlList {
|
||||
if err := q.addMagnet(ctx, url, _arr, debridName, action); err != nil {
|
||||
if err := q.addMagnet(ctx, url, _arr, debridName, action, rmTrackerUrls); err != nil {
|
||||
q.logger.Debug().Msgf("Error adding magnet: %s", err.Error())
|
||||
http.Error(w, err.Error(), http.StatusBadRequest)
|
||||
return
|
||||
@@ -123,7 +138,7 @@ func (q *QBit) handleTorrentsAdd(w http.ResponseWriter, r *http.Request) {
|
||||
if r.MultipartForm != nil && r.MultipartForm.File != nil {
|
||||
if files := r.MultipartForm.File["torrents"]; len(files) > 0 {
|
||||
for _, fileHeader := range files {
|
||||
if err := q.addTorrent(ctx, fileHeader, _arr, debridName, action); err != nil {
|
||||
if err := q.addTorrent(ctx, fileHeader, _arr, debridName, action, rmTrackerUrls); err != nil {
|
||||
q.logger.Debug().Err(err).Msgf("Error adding torrent")
|
||||
http.Error(w, err.Error(), http.StatusBadRequest)
|
||||
return
|
||||
|
||||
@@ -4,29 +4,31 @@ import (
|
||||
"github.com/rs/zerolog"
|
||||
"github.com/sirrobot01/decypharr/internal/config"
|
||||
"github.com/sirrobot01/decypharr/internal/logger"
|
||||
"github.com/sirrobot01/decypharr/pkg/store"
|
||||
"github.com/sirrobot01/decypharr/pkg/wire"
|
||||
)
|
||||
|
||||
type QBit struct {
|
||||
Username string
|
||||
Password string
|
||||
DownloadFolder string
|
||||
Categories []string
|
||||
storage *store.TorrentStorage
|
||||
logger zerolog.Logger
|
||||
Tags []string
|
||||
Username string
|
||||
Password string
|
||||
DownloadFolder string
|
||||
Categories []string
|
||||
AlwaysRmTrackerUrls bool
|
||||
storage *wire.TorrentStorage
|
||||
logger zerolog.Logger
|
||||
Tags []string
|
||||
}
|
||||
|
||||
func New() *QBit {
|
||||
_cfg := config.Get()
|
||||
cfg := _cfg.QBitTorrent
|
||||
return &QBit{
|
||||
Username: cfg.Username,
|
||||
Password: cfg.Password,
|
||||
DownloadFolder: cfg.DownloadFolder,
|
||||
Categories: cfg.Categories,
|
||||
storage: store.Get().Torrents(),
|
||||
logger: logger.New("qbit"),
|
||||
Username: cfg.Username,
|
||||
Password: cfg.Password,
|
||||
DownloadFolder: cfg.DownloadFolder,
|
||||
Categories: cfg.Categories,
|
||||
AlwaysRmTrackerUrls: cfg.AlwaysRmTrackerUrls,
|
||||
storage: wire.Get().Torrents(),
|
||||
logger: logger.New("qbit"),
|
||||
}
|
||||
}
|
||||
|
||||
|
||||
@@ -1,33 +1,50 @@
|
||||
package qbit
|
||||
|
||||
import (
|
||||
"github.com/go-chi/chi/v5"
|
||||
"net/http"
|
||||
|
||||
"github.com/go-chi/chi/v5"
|
||||
)
|
||||
|
||||
func (q *QBit) Routes() http.Handler {
|
||||
r := chi.NewRouter()
|
||||
r.Use(q.categoryContext)
|
||||
r.Group(func(r chi.Router) {
|
||||
r.Use(q.authContext)
|
||||
r.Post("/auth/login", q.handleLogin)
|
||||
r.Route("/torrents", func(r chi.Router) {
|
||||
r.Use(q.authContext)
|
||||
r.Use(hashesContext)
|
||||
|
||||
r.Get("/info", q.handleTorrentsInfo)
|
||||
r.Post("/info", q.handleTorrentsInfo)
|
||||
|
||||
r.Post("/add", q.handleTorrentsAdd)
|
||||
r.Post("/delete", q.handleTorrentsDelete)
|
||||
|
||||
r.Get("/categories", q.handleCategories)
|
||||
r.Post("/categories", q.handleCategories)
|
||||
|
||||
r.Post("/createCategory", q.handleCreateCategory)
|
||||
r.Post("/setCategory", q.handleSetCategory)
|
||||
r.Post("/addTags", q.handleAddTorrentTags)
|
||||
r.Post("/removeTags", q.handleRemoveTorrentTags)
|
||||
r.Post("/createTags", q.handleCreateTags)
|
||||
|
||||
r.Get("/tags", q.handleGetTags)
|
||||
r.Get("/pause", q.handleTorrentsPause)
|
||||
r.Get("/resume", q.handleTorrentsResume)
|
||||
r.Get("/recheck", q.handleTorrentRecheck)
|
||||
r.Get("/properties", q.handleTorrentProperties)
|
||||
r.Get("/files", q.handleTorrentFiles)
|
||||
|
||||
// Create POST equivalents for pause, resume, recheck
|
||||
r.Post("/tags", q.handleGetTags)
|
||||
r.Post("/pause", q.handleTorrentsPause)
|
||||
r.Post("/resume", q.handleTorrentsResume)
|
||||
r.Post("/recheck", q.handleTorrentRecheck)
|
||||
r.Post("/properties", q.handleTorrentProperties)
|
||||
r.Post("/files", q.handleTorrentFiles)
|
||||
|
||||
})
|
||||
|
||||
r.Route("/app", func(r chi.Router) {
|
||||
|
||||
@@ -3,24 +3,25 @@ package qbit
|
||||
import (
|
||||
"context"
|
||||
"fmt"
|
||||
"github.com/sirrobot01/decypharr/internal/utils"
|
||||
"github.com/sirrobot01/decypharr/pkg/arr"
|
||||
"github.com/sirrobot01/decypharr/pkg/store"
|
||||
"io"
|
||||
"mime/multipart"
|
||||
"strings"
|
||||
"time"
|
||||
|
||||
"github.com/sirrobot01/decypharr/internal/utils"
|
||||
"github.com/sirrobot01/decypharr/pkg/arr"
|
||||
"github.com/sirrobot01/decypharr/pkg/wire"
|
||||
)
|
||||
|
||||
// All torrent-related helpers goes here
|
||||
func (q *QBit) addMagnet(ctx context.Context, url string, arr *arr.Arr, debrid string, action string) error {
|
||||
magnet, err := utils.GetMagnetFromUrl(url)
|
||||
func (q *QBit) addMagnet(ctx context.Context, url string, arr *arr.Arr, debrid string, action string, rmTrackerUrls bool) error {
|
||||
magnet, err := utils.GetMagnetFromUrl(url, rmTrackerUrls)
|
||||
if err != nil {
|
||||
return fmt.Errorf("error parsing magnet link: %w", err)
|
||||
}
|
||||
_store := store.Get()
|
||||
_store := wire.Get()
|
||||
|
||||
importReq := store.NewImportRequest(debrid, q.DownloadFolder, magnet, arr, action, false, "", store.ImportTypeQBitTorrent)
|
||||
importReq := wire.NewImportRequest(debrid, q.DownloadFolder, magnet, arr, action, false, "", wire.ImportTypeQBitTorrent, false)
|
||||
|
||||
err = _store.AddTorrent(ctx, importReq)
|
||||
if err != nil {
|
||||
@@ -29,16 +30,16 @@ func (q *QBit) addMagnet(ctx context.Context, url string, arr *arr.Arr, debrid s
|
||||
return nil
|
||||
}
|
||||
|
||||
func (q *QBit) addTorrent(ctx context.Context, fileHeader *multipart.FileHeader, arr *arr.Arr, debrid string, action string) error {
|
||||
func (q *QBit) addTorrent(ctx context.Context, fileHeader *multipart.FileHeader, arr *arr.Arr, debrid string, action string, rmTrackerUrls bool) error {
|
||||
file, _ := fileHeader.Open()
|
||||
defer file.Close()
|
||||
var reader io.Reader = file
|
||||
magnet, err := utils.GetMagnetFromFile(reader, fileHeader.Filename)
|
||||
magnet, err := utils.GetMagnetFromFile(reader, fileHeader.Filename, rmTrackerUrls)
|
||||
if err != nil {
|
||||
return fmt.Errorf("error reading file: %s \n %w", fileHeader.Filename, err)
|
||||
}
|
||||
_store := store.Get()
|
||||
importReq := store.NewImportRequest(debrid, q.DownloadFolder, magnet, arr, action, false, "", store.ImportTypeQBitTorrent)
|
||||
_store := wire.Get()
|
||||
importReq := wire.NewImportRequest(debrid, q.DownloadFolder, magnet, arr, action, false, "", wire.ImportTypeQBitTorrent, false)
|
||||
err = _store.AddTorrent(ctx, importReq)
|
||||
if err != nil {
|
||||
return fmt.Errorf("failed to process torrent: %w", err)
|
||||
@@ -46,19 +47,19 @@ func (q *QBit) addTorrent(ctx context.Context, fileHeader *multipart.FileHeader,
|
||||
return nil
|
||||
}
|
||||
|
||||
func (q *QBit) ResumeTorrent(t *store.Torrent) bool {
|
||||
func (q *QBit) ResumeTorrent(t *wire.Torrent) bool {
|
||||
return true
|
||||
}
|
||||
|
||||
func (q *QBit) PauseTorrent(t *store.Torrent) bool {
|
||||
func (q *QBit) PauseTorrent(t *wire.Torrent) bool {
|
||||
return true
|
||||
}
|
||||
|
||||
func (q *QBit) RefreshTorrent(t *store.Torrent) bool {
|
||||
func (q *QBit) RefreshTorrent(t *wire.Torrent) bool {
|
||||
return true
|
||||
}
|
||||
|
||||
func (q *QBit) GetTorrentProperties(t *store.Torrent) *TorrentProperties {
|
||||
func (q *QBit) GetTorrentProperties(t *wire.Torrent) *TorrentProperties {
|
||||
return &TorrentProperties{
|
||||
AdditionDate: t.AddedOn,
|
||||
Comment: "Debrid Blackhole <https://github.com/sirrobot01/decypharr>",
|
||||
@@ -83,7 +84,7 @@ func (q *QBit) GetTorrentProperties(t *store.Torrent) *TorrentProperties {
|
||||
}
|
||||
}
|
||||
|
||||
func (q *QBit) setTorrentTags(t *store.Torrent, tags []string) bool {
|
||||
func (q *QBit) setTorrentTags(t *wire.Torrent, tags []string) bool {
|
||||
torrentTags := strings.Split(t.Tags, ",")
|
||||
for _, tag := range tags {
|
||||
if tag == "" {
|
||||
@@ -101,7 +102,7 @@ func (q *QBit) setTorrentTags(t *store.Torrent, tags []string) bool {
|
||||
return true
|
||||
}
|
||||
|
||||
func (q *QBit) removeTorrentTags(t *store.Torrent, tags []string) bool {
|
||||
func (q *QBit) removeTorrentTags(t *wire.Torrent, tags []string) bool {
|
||||
torrentTags := strings.Split(t.Tags, ",")
|
||||
newTorrentTags := utils.RemoveItem(torrentTags, tags...)
|
||||
q.Tags = utils.RemoveItem(q.Tags, tags...)
|
||||
|
||||
@@ -191,7 +191,6 @@ func (f *HttpFile) ReadAt(p []byte, off int64) (n int, err error) {
|
||||
bytesRead, err := io.ReadFull(resp.Body, p)
|
||||
return bytesRead, err
|
||||
case http.StatusOK:
|
||||
// Some servers return the full content instead of partial
|
||||
fullData, err := io.ReadAll(resp.Body)
|
||||
if err != nil {
|
||||
return 0, fmt.Errorf("%w: %v", ErrNetworkError, err)
|
||||
@@ -684,18 +683,3 @@ func (r *Reader) ExtractFile(file *File) ([]byte, error) {
|
||||
|
||||
return r.readBytes(file.DataOffset, int(file.CompressedSize))
|
||||
}
|
||||
|
||||
// Helper functions
|
||||
func min(a, b int) int {
|
||||
if a < b {
|
||||
return a
|
||||
}
|
||||
return b
|
||||
}
|
||||
|
||||
func max(a, b int) int {
|
||||
if a > b {
|
||||
return a
|
||||
}
|
||||
return b
|
||||
}
|
||||
|
||||
@@ -4,19 +4,20 @@ import (
|
||||
"fmt"
|
||||
"os"
|
||||
"os/exec"
|
||||
"path/filepath"
|
||||
"runtime"
|
||||
"strconv"
|
||||
"time"
|
||||
|
||||
"github.com/sirrobot01/decypharr/internal/config"
|
||||
)
|
||||
|
||||
// Mount creates a mount using the rclone RC API with retry logic
|
||||
func (m *Manager) Mount(provider, webdavURL string) error {
|
||||
return m.mountWithRetry(provider, webdavURL, 3)
|
||||
func (m *Manager) Mount(mountPath, provider, webdavURL string) error {
|
||||
return m.mountWithRetry(mountPath, provider, webdavURL, 3)
|
||||
}
|
||||
|
||||
// mountWithRetry attempts to mount with retry logic
|
||||
func (m *Manager) mountWithRetry(provider, webdavURL string, maxRetries int) error {
|
||||
func (m *Manager) mountWithRetry(mountPath, provider, webdavURL string, maxRetries int) error {
|
||||
if !m.IsReady() {
|
||||
if err := m.WaitForReady(30 * time.Second); err != nil {
|
||||
return fmt.Errorf("rclone RC server not ready: %w", err)
|
||||
@@ -34,7 +35,7 @@ func (m *Manager) mountWithRetry(provider, webdavURL string, maxRetries int) err
|
||||
time.Sleep(wait)
|
||||
}
|
||||
|
||||
if err := m.performMount(provider, webdavURL); err != nil {
|
||||
if err := m.performMount(mountPath, provider, webdavURL); err != nil {
|
||||
m.logger.Error().
|
||||
Err(err).
|
||||
Str("provider", provider).
|
||||
@@ -49,13 +50,17 @@ func (m *Manager) mountWithRetry(provider, webdavURL string, maxRetries int) err
|
||||
}
|
||||
|
||||
// performMount performs a single mount attempt
|
||||
func (m *Manager) performMount(provider, webdavURL string) error {
|
||||
func (m *Manager) performMount(mountPath, provider, webdavURL string) error {
|
||||
cfg := config.Get()
|
||||
mountPath := filepath.Join(cfg.Rclone.MountPath, provider)
|
||||
|
||||
// Create mount directory
|
||||
if err := os.MkdirAll(mountPath, 0755); err != nil {
|
||||
return fmt.Errorf("failed to create mount directory %s: %w", mountPath, err)
|
||||
// Create mount directory(except on Windows, cos winFSP handles it)
|
||||
if runtime.GOOS != "windows" {
|
||||
if err := os.MkdirAll(mountPath, 0755); err != nil {
|
||||
return fmt.Errorf("failed to create mount directory %s: %w", mountPath, err)
|
||||
}
|
||||
} else {
|
||||
// In fact, delete the mount if it exists, to avoid issues
|
||||
_ = os.Remove(mountPath) // Ignore error
|
||||
}
|
||||
|
||||
// Check if already mounted
|
||||
@@ -94,18 +99,35 @@ func (m *Manager) performMount(provider, webdavURL string) error {
|
||||
"VolumeName": fmt.Sprintf("decypharr-%s", provider),
|
||||
}
|
||||
|
||||
if cfg.Rclone.AsyncRead != nil {
|
||||
mountOpt["AsyncRead"] = *cfg.Rclone.AsyncRead
|
||||
}
|
||||
|
||||
if cfg.Rclone.UseMmap {
|
||||
mountOpt["UseMmap"] = cfg.Rclone.UseMmap
|
||||
}
|
||||
|
||||
if cfg.Rclone.Transfers != 0 {
|
||||
mountOpt["Transfers"] = cfg.Rclone.Transfers
|
||||
}
|
||||
|
||||
configOpts := make(map[string]interface{})
|
||||
|
||||
if cfg.Rclone.BufferSize != "" {
|
||||
configOpts["BufferSize"] = cfg.Rclone.BufferSize
|
||||
}
|
||||
|
||||
if cfg.Rclone.BwLimit != "" {
|
||||
configOpts["BwLimit"] = cfg.Rclone.BwLimit
|
||||
}
|
||||
|
||||
if len(configOpts) > 0 {
|
||||
// Only add _config if there are options to set
|
||||
mountArgs["_config"] = configOpts
|
||||
}
|
||||
vfsOpt := map[string]interface{}{
|
||||
"CacheMode": cfg.Rclone.VfsCacheMode,
|
||||
"CacheMode": cfg.Rclone.VfsCacheMode,
|
||||
"DirCacheTime": cfg.Rclone.DirCacheTime,
|
||||
}
|
||||
vfsOpt["PollInterval"] = 0 // Poll interval not supported for webdav, set to 0
|
||||
|
||||
@@ -115,6 +137,13 @@ func (m *Manager) performMount(provider, webdavURL string) error {
|
||||
if cfg.Rclone.VfsCacheMaxAge != "" {
|
||||
vfsOpt["CacheMaxAge"] = cfg.Rclone.VfsCacheMaxAge
|
||||
}
|
||||
if cfg.Rclone.VfsDiskSpaceTotal != "" {
|
||||
vfsOpt["DiskSpaceTotalSize"] = cfg.Rclone.VfsDiskSpaceTotal
|
||||
}
|
||||
if cfg.Rclone.VfsReadChunkSizeLimit != "" {
|
||||
vfsOpt["ChunkSizeLimit"] = cfg.Rclone.VfsReadChunkSizeLimit
|
||||
}
|
||||
|
||||
if cfg.Rclone.VfsCacheMaxSize != "" {
|
||||
vfsOpt["CacheMaxSize"] = cfg.Rclone.VfsCacheMaxSize
|
||||
}
|
||||
@@ -127,6 +156,19 @@ func (m *Manager) performMount(provider, webdavURL string) error {
|
||||
if cfg.Rclone.VfsReadAhead != "" {
|
||||
vfsOpt["ReadAhead"] = cfg.Rclone.VfsReadAhead
|
||||
}
|
||||
|
||||
if cfg.Rclone.VfsCacheMinFreeSpace != "" {
|
||||
vfsOpt["CacheMinFreeSpace"] = cfg.Rclone.VfsCacheMinFreeSpace
|
||||
}
|
||||
|
||||
if cfg.Rclone.VfsFastFingerprint {
|
||||
vfsOpt["FastFingerprint"] = cfg.Rclone.VfsFastFingerprint
|
||||
}
|
||||
|
||||
if cfg.Rclone.VfsReadChunkStreams != 0 {
|
||||
vfsOpt["ChunkStreams"] = cfg.Rclone.VfsReadChunkStreams
|
||||
}
|
||||
|
||||
if cfg.Rclone.NoChecksum {
|
||||
vfsOpt["NoChecksum"] = cfg.Rclone.NoChecksum
|
||||
}
|
||||
@@ -137,11 +179,19 @@ func (m *Manager) performMount(provider, webdavURL string) error {
|
||||
|
||||
// Add mount options based on configuration
|
||||
if cfg.Rclone.UID != 0 {
|
||||
mountOpt["UID"] = cfg.Rclone.UID
|
||||
vfsOpt["UID"] = cfg.Rclone.UID
|
||||
}
|
||||
if cfg.Rclone.GID != 0 {
|
||||
mountOpt["GID"] = cfg.Rclone.GID
|
||||
vfsOpt["GID"] = cfg.Rclone.GID
|
||||
}
|
||||
|
||||
if cfg.Rclone.Umask != "" {
|
||||
umask, err := strconv.ParseInt(cfg.Rclone.Umask, 8, 32)
|
||||
if err == nil {
|
||||
vfsOpt["Umask"] = uint32(umask)
|
||||
}
|
||||
}
|
||||
|
||||
if cfg.Rclone.AttrTimeout != "" {
|
||||
if attrTimeout, err := time.ParseDuration(cfg.Rclone.AttrTimeout); err == nil {
|
||||
mountOpt["AttrTimeout"] = attrTimeout.String()
|
||||
@@ -159,7 +209,7 @@ func (m *Manager) performMount(provider, webdavURL string) error {
|
||||
_, err := m.makeRequest(req, true)
|
||||
if err != nil {
|
||||
// Clean up mount point on failure
|
||||
m.forceUnmountPath(mountPath)
|
||||
_ = m.forceUnmountPath(mountPath)
|
||||
return fmt.Errorf("failed to create mount for %s: %w", provider, err)
|
||||
}
|
||||
|
||||
|
||||
@@ -74,7 +74,7 @@ func (m *Manager) RecoverMount(provider string) error {
|
||||
time.Sleep(1 * time.Second)
|
||||
|
||||
// Try to remount
|
||||
if err := m.Mount(provider, mountInfo.WebDAVURL); err != nil {
|
||||
if err := m.Mount(mountInfo.LocalPath, provider, mountInfo.WebDAVURL); err != nil {
|
||||
return fmt.Errorf("failed to recover mount for %s: %w", provider, err)
|
||||
}
|
||||
|
||||
|
||||
@@ -6,10 +6,13 @@ import (
|
||||
"encoding/json"
|
||||
"errors"
|
||||
"fmt"
|
||||
"io"
|
||||
"net/http"
|
||||
"os"
|
||||
"os/exec"
|
||||
"path/filepath"
|
||||
"slices"
|
||||
"strings"
|
||||
"sync"
|
||||
"time"
|
||||
|
||||
@@ -78,7 +81,7 @@ func NewManager() *Manager {
|
||||
logger: logger.New("rclone"),
|
||||
ctx: ctx,
|
||||
cancel: cancel,
|
||||
httpClient: &http.Client{Timeout: 30 * time.Second},
|
||||
httpClient: &http.Client{Timeout: 60 * time.Second},
|
||||
serverReady: make(chan struct{}),
|
||||
}
|
||||
}
|
||||
@@ -112,9 +115,17 @@ func (m *Manager) Start(ctx context.Context) error {
|
||||
"--rc-addr", ":" + m.rcPort,
|
||||
"--rc-no-auth", // We'll handle auth at the application level
|
||||
"--config", filepath.Join(m.rcloneDir, "rclone.conf"),
|
||||
"--log-level", cfg.Rclone.LogLevel,
|
||||
"--log-file", logFile,
|
||||
}
|
||||
|
||||
logLevel := cfg.Rclone.LogLevel
|
||||
if logLevel != "" {
|
||||
if !slices.Contains([]string{"DEBUG", "INFO", "NOTICE", "ERROR"}, logLevel) {
|
||||
logLevel = "INFO"
|
||||
}
|
||||
args = append(args, "--log-level", logLevel)
|
||||
}
|
||||
|
||||
if cfg.Rclone.CacheDir != "" {
|
||||
if err := os.MkdirAll(cfg.Rclone.CacheDir, 0755); err == nil {
|
||||
args = append(args, "--cache-dir", cfg.Rclone.CacheDir)
|
||||
@@ -170,9 +181,12 @@ func (m *Manager) Start(ctx context.Context) error {
|
||||
default:
|
||||
if code, ok := ExitCode(err); ok {
|
||||
m.logger.Debug().Int("exit_code", code).Err(err).
|
||||
Str("stderr", stderr.String()).
|
||||
Str("stdout", stdout.String()).
|
||||
Msg("Rclone RC server error")
|
||||
} else {
|
||||
m.logger.Debug().Err(err).Msg("Rclone RC server error (no exit code)")
|
||||
m.logger.Debug().Err(err).Str("stderr", stderr.String()).
|
||||
Str("stdout", stdout.String()).Msg("Rclone RC server error (no exit code)")
|
||||
}
|
||||
}
|
||||
}()
|
||||
@@ -249,50 +263,35 @@ func (m *Manager) Stop() error {
|
||||
case err := <-done:
|
||||
if err != nil && !errors.Is(err, context.Canceled) && !WasHardTerminated(err) {
|
||||
m.logger.Warn().Err(err).Msg("Rclone process exited with error")
|
||||
} else {
|
||||
m.logger.Info().Msg("Rclone process exited gracefully")
|
||||
}
|
||||
case <-time.After(10 * time.Second):
|
||||
case <-time.After(2 * time.Second):
|
||||
m.logger.Warn().Msg("Timeout waiting for rclone to exit, force killing")
|
||||
if err := m.cmd.Process.Kill(); err != nil {
|
||||
m.logger.Error().Err(err).Msg("Failed to force kill rclone process")
|
||||
return err
|
||||
// Check if the process already finished
|
||||
if !strings.Contains(err.Error(), "process already finished") {
|
||||
m.logger.Error().Err(err).Msg("Failed to force kill rclone process")
|
||||
return err
|
||||
}
|
||||
m.logger.Info().Msg("Process already finished during kill attempt")
|
||||
}
|
||||
// Wait a bit more for the kill to take effect
|
||||
|
||||
// Still wait for the Wait() to complete to clean up the process
|
||||
select {
|
||||
case <-done:
|
||||
m.logger.Info().Msg("Rclone process killed successfully")
|
||||
m.logger.Info().Msg("Rclone process cleanup completed")
|
||||
case <-time.After(5 * time.Second):
|
||||
m.logger.Error().Msg("Process may still be running after kill")
|
||||
m.logger.Error().Msg("Process cleanup timeout")
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// Clean up any remaining mount directories
|
||||
cfg := config.Get()
|
||||
if cfg.Rclone.MountPath != "" {
|
||||
m.cleanupMountDirectories(cfg.Rclone.MountPath)
|
||||
}
|
||||
|
||||
m.serverStarted = false
|
||||
m.logger.Info().Msg("Rclone RC server stopped")
|
||||
return nil
|
||||
}
|
||||
|
||||
// cleanupMountDirectories removes empty mount directories
|
||||
func (m *Manager) cleanupMountDirectories(_ string) {
|
||||
m.mountsMutex.RLock()
|
||||
defer m.mountsMutex.RUnlock()
|
||||
|
||||
for _, mount := range m.mounts {
|
||||
if mount.LocalPath != "" {
|
||||
// Try to remove the directory if it's empty
|
||||
if err := os.Remove(mount.LocalPath); err == nil {
|
||||
m.logger.Debug().Str("path", mount.LocalPath).Msg("Removed empty mount directory")
|
||||
}
|
||||
// Don't log errors here as the directory might not be empty, which is fine
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// waitForServer waits for the RC server to become available
|
||||
func (m *Manager) waitForServer() {
|
||||
maxAttempts := 30
|
||||
@@ -340,7 +339,12 @@ func (m *Manager) makeRequest(req RCRequest, close bool) (*http.Response, error)
|
||||
|
||||
if resp.StatusCode != http.StatusOK {
|
||||
// Read the response body to get more details
|
||||
defer resp.Body.Close()
|
||||
defer func(Body io.ReadCloser) {
|
||||
err := Body.Close()
|
||||
if err != nil {
|
||||
m.logger.Debug().Err(err).Msg("Failed to close response body")
|
||||
}
|
||||
}(resp.Body)
|
||||
var errorResp RCResponse
|
||||
if err := json.NewDecoder(resp.Body).Decode(&errorResp); err != nil {
|
||||
return nil, fmt.Errorf("request failed with status %s, but could not decode error response: %w", resp.Status, err)
|
||||
|
||||
@@ -3,11 +3,12 @@ package rclone
|
||||
import (
|
||||
"context"
|
||||
"fmt"
|
||||
"github.com/rs/zerolog"
|
||||
"github.com/sirrobot01/decypharr/internal/config"
|
||||
"net/url"
|
||||
"path/filepath"
|
||||
"strings"
|
||||
|
||||
"github.com/rs/zerolog"
|
||||
"github.com/sirrobot01/decypharr/internal/config"
|
||||
)
|
||||
|
||||
// Mount represents a mount using the rclone RC client
|
||||
@@ -65,7 +66,7 @@ func (m *Mount) Mount(ctx context.Context) error {
|
||||
Str("mount_path", m.LocalPath).
|
||||
Msg("Creating mount via RC")
|
||||
|
||||
if err := m.rcManager.Mount(m.Provider, m.WebDAVURL); err != nil {
|
||||
if err := m.rcManager.Mount(m.LocalPath, m.Provider, m.WebDAVURL); err != nil {
|
||||
m.logger.Error().Str("provider", m.Provider).Msg("Mount operation failed")
|
||||
return fmt.Errorf("mount failed for %s", m.Provider)
|
||||
}
|
||||
|
||||
@@ -2,11 +2,12 @@ package repair
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"github.com/sirrobot01/decypharr/pkg/arr"
|
||||
"github.com/sirrobot01/decypharr/pkg/debrid/store"
|
||||
"github.com/sirrobot01/decypharr/pkg/debrid/types"
|
||||
"os"
|
||||
"path/filepath"
|
||||
|
||||
"github.com/sirrobot01/decypharr/pkg/arr"
|
||||
"github.com/sirrobot01/decypharr/pkg/debrid/common"
|
||||
"github.com/sirrobot01/decypharr/pkg/debrid/store"
|
||||
)
|
||||
|
||||
func fileIsSymlinked(file string) bool {
|
||||
@@ -85,7 +86,7 @@ func collectFiles(media arr.Content) map[string][]arr.ContentFile {
|
||||
return uniqueParents
|
||||
}
|
||||
|
||||
func (r *Repair) checkTorrentFiles(torrentPath string, files []arr.ContentFile, clients map[string]types.Client, caches map[string]*store.Cache) []arr.ContentFile {
|
||||
func (r *Repair) checkTorrentFiles(torrentPath string, files []arr.ContentFile, clients map[string]common.Client, caches map[string]*store.Cache) []arr.ContentFile {
|
||||
brokenFiles := make([]arr.ContentFile, 0)
|
||||
|
||||
emptyFiles := make([]arr.ContentFile, 0)
|
||||
@@ -149,7 +150,7 @@ func (r *Repair) checkTorrentFiles(torrentPath string, files []arr.ContentFile,
|
||||
return brokenFiles
|
||||
}
|
||||
|
||||
func (r *Repair) findDebridForPath(dir string, clients map[string]types.Client) string {
|
||||
func (r *Repair) findDebridForPath(dir string, clients map[string]common.Client) string {
|
||||
// Check cache first
|
||||
if debridName, exists := r.debridPathCache.Load(dir); exists {
|
||||
return debridName.(string)
|
||||
|
||||
@@ -5,16 +5,6 @@ import (
|
||||
"encoding/json"
|
||||
"errors"
|
||||
"fmt"
|
||||
"github.com/go-co-op/gocron/v2"
|
||||
"github.com/google/uuid"
|
||||
"github.com/rs/zerolog"
|
||||
"github.com/sirrobot01/decypharr/internal/config"
|
||||
"github.com/sirrobot01/decypharr/internal/logger"
|
||||
"github.com/sirrobot01/decypharr/internal/request"
|
||||
"github.com/sirrobot01/decypharr/internal/utils"
|
||||
"github.com/sirrobot01/decypharr/pkg/arr"
|
||||
"github.com/sirrobot01/decypharr/pkg/debrid"
|
||||
"golang.org/x/sync/errgroup"
|
||||
"net"
|
||||
"net/http"
|
||||
"net/url"
|
||||
@@ -25,6 +15,17 @@ import (
|
||||
"strings"
|
||||
"sync"
|
||||
"time"
|
||||
|
||||
"github.com/go-co-op/gocron/v2"
|
||||
"github.com/google/uuid"
|
||||
"github.com/rs/zerolog"
|
||||
"github.com/sirrobot01/decypharr/internal/config"
|
||||
"github.com/sirrobot01/decypharr/internal/logger"
|
||||
"github.com/sirrobot01/decypharr/internal/request"
|
||||
"github.com/sirrobot01/decypharr/internal/utils"
|
||||
"github.com/sirrobot01/decypharr/pkg/arr"
|
||||
"github.com/sirrobot01/decypharr/pkg/debrid"
|
||||
"golang.org/x/sync/errgroup"
|
||||
)
|
||||
|
||||
type Repair struct {
|
||||
@@ -105,10 +106,6 @@ func New(arrs *arr.Storage, engine *debrid.Storage) *Repair {
|
||||
func (r *Repair) Reset() {
|
||||
// Stop scheduler
|
||||
if r.scheduler != nil {
|
||||
if err := r.scheduler.StopJobs(); err != nil {
|
||||
r.logger.Error().Err(err).Msg("Error stopping scheduler")
|
||||
}
|
||||
|
||||
if err := r.scheduler.Shutdown(); err != nil {
|
||||
r.logger.Error().Err(err).Msg("Error shutting down scheduler")
|
||||
}
|
||||
|
||||
@@ -2,17 +2,18 @@ package server
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"net/http"
|
||||
"runtime"
|
||||
|
||||
"github.com/go-chi/chi/v5"
|
||||
"github.com/sirrobot01/decypharr/internal/request"
|
||||
debridTypes "github.com/sirrobot01/decypharr/pkg/debrid/types"
|
||||
"github.com/sirrobot01/decypharr/pkg/store"
|
||||
"net/http"
|
||||
"runtime"
|
||||
"github.com/sirrobot01/decypharr/pkg/wire"
|
||||
)
|
||||
|
||||
func (s *Server) handleIngests(w http.ResponseWriter, r *http.Request) {
|
||||
ingests := make([]debridTypes.IngestData, 0)
|
||||
_store := store.Get()
|
||||
_store := wire.Get()
|
||||
debrids := _store.Debrid()
|
||||
if debrids == nil {
|
||||
http.Error(w, "Debrid service is not enabled", http.StatusInternalServerError)
|
||||
@@ -42,7 +43,7 @@ func (s *Server) handleIngestsByDebrid(w http.ResponseWriter, r *http.Request) {
|
||||
return
|
||||
}
|
||||
|
||||
_store := store.Get()
|
||||
_store := wire.Get()
|
||||
debrids := _store.Debrid()
|
||||
|
||||
if debrids == nil {
|
||||
@@ -92,68 +93,40 @@ func (s *Server) handleStats(w http.ResponseWriter, r *http.Request) {
|
||||
"go_version": runtime.Version(),
|
||||
}
|
||||
|
||||
debrids := store.Get().Debrid()
|
||||
if debrids == nil {
|
||||
request.JSONResponse(w, stats, http.StatusOK)
|
||||
return
|
||||
}
|
||||
clients := debrids.Clients()
|
||||
caches := debrids.Caches()
|
||||
debridStats := make([]debridTypes.Stats, 0)
|
||||
for debridName, client := range clients {
|
||||
debridStat := debridTypes.Stats{}
|
||||
libraryStat := debridTypes.LibraryStats{}
|
||||
profile, err := client.GetProfile()
|
||||
if err != nil {
|
||||
s.logger.Error().Err(err).Str("debrid", debridName).Msg("Failed to get debrid profile")
|
||||
profile = &debridTypes.Profile{
|
||||
Name: debridName,
|
||||
debrids := wire.Get().Debrid()
|
||||
if debrids != nil {
|
||||
clients := debrids.Clients()
|
||||
caches := debrids.Caches()
|
||||
debridStats := make([]debridTypes.Stats, 0)
|
||||
for debridName, client := range clients {
|
||||
debridStat := debridTypes.Stats{}
|
||||
libraryStat := debridTypes.LibraryStats{}
|
||||
profile, err := client.GetProfile()
|
||||
if err != nil {
|
||||
s.logger.Error().Err(err).Str("debrid", debridName).Msg("Failed to get debrid profile")
|
||||
profile = &debridTypes.Profile{
|
||||
Name: debridName,
|
||||
}
|
||||
}
|
||||
}
|
||||
profile.Name = debridName
|
||||
debridStat.Profile = profile
|
||||
cache, ok := caches[debridName]
|
||||
if ok {
|
||||
// Get torrent data
|
||||
libraryStat.Total = cache.TotalTorrents()
|
||||
libraryStat.Bad = len(cache.GetListing("__bad__"))
|
||||
libraryStat.ActiveLinks = cache.GetTotalActiveDownloadLinks()
|
||||
profile.Name = debridName
|
||||
debridStat.Profile = profile
|
||||
cache, ok := caches[debridName]
|
||||
if ok {
|
||||
// Get torrent data
|
||||
libraryStat.Total = cache.TotalTorrents()
|
||||
libraryStat.Bad = len(cache.GetListing("__bad__"))
|
||||
libraryStat.ActiveLinks = cache.GetTotalActiveDownloadLinks()
|
||||
|
||||
}
|
||||
debridStat.Library = libraryStat
|
||||
|
||||
// Get detailed account information
|
||||
accounts := client.Accounts().All()
|
||||
accountDetails := make([]map[string]any, 0)
|
||||
for _, account := range accounts {
|
||||
// Mask token - show first 8 characters and last 4 characters
|
||||
maskedToken := ""
|
||||
if len(account.Token) > 12 {
|
||||
maskedToken = account.Token[:8] + "****" + account.Token[len(account.Token)-4:]
|
||||
} else if len(account.Token) > 8 {
|
||||
maskedToken = account.Token[:4] + "****" + account.Token[len(account.Token)-2:]
|
||||
} else {
|
||||
maskedToken = "****"
|
||||
}
|
||||
|
||||
accountDetail := map[string]any{
|
||||
"order": account.Order,
|
||||
"disabled": account.Disabled,
|
||||
"token_masked": maskedToken,
|
||||
"username": account.Username,
|
||||
"traffic_used": account.TrafficUsed,
|
||||
"links_count": account.LinksCount(),
|
||||
"debrid": account.Debrid,
|
||||
}
|
||||
accountDetails = append(accountDetails, accountDetail)
|
||||
debridStat.Library = libraryStat
|
||||
debridStat.Accounts = client.AccountManager().Stats()
|
||||
debridStats = append(debridStats, debridStat)
|
||||
}
|
||||
debridStat.Accounts = accountDetails
|
||||
debridStats = append(debridStats, debridStat)
|
||||
stats["debrids"] = debridStats
|
||||
}
|
||||
stats["debrids"] = debridStats
|
||||
|
||||
// Add rclone stats if available
|
||||
if rcManager := store.Get().RcloneManager(); rcManager != nil && rcManager.IsReady() {
|
||||
if rcManager := wire.Get().RcloneManager(); rcManager != nil && rcManager.IsReady() {
|
||||
rcStats, err := rcManager.GetStats()
|
||||
if err != nil {
|
||||
s.logger.Error().Err(err).Msg("Failed to get rclone stats")
|
||||
|
||||
@@ -4,15 +4,16 @@ import (
|
||||
"context"
|
||||
"errors"
|
||||
"fmt"
|
||||
"io"
|
||||
"net/http"
|
||||
"os"
|
||||
"path/filepath"
|
||||
|
||||
"github.com/go-chi/chi/v5"
|
||||
"github.com/go-chi/chi/v5/middleware"
|
||||
"github.com/rs/zerolog"
|
||||
"github.com/sirrobot01/decypharr/internal/config"
|
||||
"github.com/sirrobot01/decypharr/internal/logger"
|
||||
"io"
|
||||
"net/http"
|
||||
"os"
|
||||
"path/filepath"
|
||||
)
|
||||
|
||||
type Server struct {
|
||||
@@ -24,6 +25,8 @@ func New(handlers map[string]http.Handler) *Server {
|
||||
l := logger.New("http")
|
||||
r := chi.NewRouter()
|
||||
r.Use(middleware.Recoverer)
|
||||
r.Use(middleware.StripSlashes)
|
||||
r.Use(middleware.RedirectSlashes)
|
||||
|
||||
cfg := config.Get()
|
||||
|
||||
|
||||
@@ -3,7 +3,7 @@ package server
|
||||
import (
|
||||
"cmp"
|
||||
"encoding/json"
|
||||
"github.com/sirrobot01/decypharr/pkg/store"
|
||||
"github.com/sirrobot01/decypharr/pkg/wire"
|
||||
"net/http"
|
||||
)
|
||||
|
||||
@@ -38,7 +38,7 @@ func (s *Server) handleTautulli(w http.ResponseWriter, r *http.Request) {
|
||||
http.Error(w, "Invalid ID", http.StatusBadRequest)
|
||||
return
|
||||
}
|
||||
repair := store.Get().Repair()
|
||||
repair := wire.Get().Repair()
|
||||
|
||||
mediaId := cmp.Or(payload.TmdbID, payload.TvdbID)
|
||||
|
||||
|
||||
@@ -1,318 +0,0 @@
|
||||
package store
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"net/http"
|
||||
"os"
|
||||
"path/filepath"
|
||||
"sync"
|
||||
"time"
|
||||
|
||||
"github.com/sirrobot01/decypharr/pkg/debrid/types"
|
||||
|
||||
"github.com/cavaliergopher/grab/v3"
|
||||
"github.com/sirrobot01/decypharr/internal/utils"
|
||||
)
|
||||
|
||||
func grabber(client *grab.Client, url, filename string, byterange *[2]int64, progressCallback func(int64, int64)) error {
|
||||
req, err := grab.NewRequest(filename, url)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
// Set byte range if specified
|
||||
if byterange != nil {
|
||||
byterangeStr := fmt.Sprintf("%d-%d", byterange[0], byterange[1])
|
||||
req.HTTPRequest.Header.Set("Range", "bytes="+byterangeStr)
|
||||
}
|
||||
|
||||
resp := client.Do(req)
|
||||
|
||||
t := time.NewTicker(time.Second * 2)
|
||||
defer t.Stop()
|
||||
|
||||
var lastReported int64
|
||||
Loop:
|
||||
for {
|
||||
select {
|
||||
case <-t.C:
|
||||
current := resp.BytesComplete()
|
||||
speed := int64(resp.BytesPerSecond())
|
||||
if current != lastReported {
|
||||
if progressCallback != nil {
|
||||
progressCallback(current-lastReported, speed)
|
||||
}
|
||||
lastReported = current
|
||||
}
|
||||
case <-resp.Done:
|
||||
break Loop
|
||||
}
|
||||
}
|
||||
|
||||
// Report final bytes
|
||||
if progressCallback != nil {
|
||||
progressCallback(resp.BytesComplete()-lastReported, 0)
|
||||
}
|
||||
|
||||
return resp.Err()
|
||||
}
|
||||
|
||||
func (s *Store) processDownload(torrent *Torrent, debridTorrent *types.Torrent) (string, error) {
|
||||
s.logger.Info().Msgf("Downloading %d files...", len(debridTorrent.Files))
|
||||
torrentPath := filepath.Join(torrent.SavePath, utils.RemoveExtension(debridTorrent.OriginalFilename))
|
||||
torrentPath = utils.RemoveInvalidChars(torrentPath)
|
||||
err := os.MkdirAll(torrentPath, os.ModePerm)
|
||||
if err != nil {
|
||||
// add the previous error to the error and return
|
||||
return "", fmt.Errorf("failed to create directory: %s: %v", torrentPath, err)
|
||||
}
|
||||
s.downloadFiles(torrent, debridTorrent, torrentPath)
|
||||
return torrentPath, nil
|
||||
}
|
||||
|
||||
func (s *Store) downloadFiles(torrent *Torrent, debridTorrent *types.Torrent, parent string) {
|
||||
var wg sync.WaitGroup
|
||||
|
||||
totalSize := int64(0)
|
||||
for _, file := range debridTorrent.GetFiles() {
|
||||
totalSize += file.Size
|
||||
}
|
||||
debridTorrent.Lock()
|
||||
debridTorrent.SizeDownloaded = 0 // Reset downloaded bytes
|
||||
debridTorrent.Progress = 0 // Reset progress
|
||||
debridTorrent.Unlock()
|
||||
progressCallback := func(downloaded int64, speed int64) {
|
||||
debridTorrent.Lock()
|
||||
defer debridTorrent.Unlock()
|
||||
torrent.Lock()
|
||||
defer torrent.Unlock()
|
||||
|
||||
// Update total downloaded bytes
|
||||
debridTorrent.SizeDownloaded += downloaded
|
||||
debridTorrent.Speed = speed
|
||||
|
||||
// Calculate overall progress
|
||||
if totalSize > 0 {
|
||||
debridTorrent.Progress = float64(debridTorrent.SizeDownloaded) / float64(totalSize) * 100
|
||||
}
|
||||
s.partialTorrentUpdate(torrent, debridTorrent)
|
||||
}
|
||||
client := &grab.Client{
|
||||
UserAgent: "Decypharr[QBitTorrent]",
|
||||
HTTPClient: &http.Client{
|
||||
Transport: &http.Transport{
|
||||
Proxy: http.ProxyFromEnvironment,
|
||||
},
|
||||
},
|
||||
}
|
||||
errChan := make(chan error, len(debridTorrent.Files))
|
||||
for _, file := range debridTorrent.GetFiles() {
|
||||
if file.DownloadLink == nil {
|
||||
s.logger.Info().Msgf("No download link found for %s", file.Name)
|
||||
continue
|
||||
}
|
||||
wg.Add(1)
|
||||
s.downloadSemaphore <- struct{}{}
|
||||
go func(file types.File) {
|
||||
defer wg.Done()
|
||||
defer func() { <-s.downloadSemaphore }()
|
||||
filename := file.Name
|
||||
|
||||
err := grabber(
|
||||
client,
|
||||
file.DownloadLink.DownloadLink,
|
||||
filepath.Join(parent, filename),
|
||||
file.ByteRange,
|
||||
progressCallback,
|
||||
)
|
||||
|
||||
if err != nil {
|
||||
s.logger.Error().Msgf("Failed to download %s: %v", filename, err)
|
||||
errChan <- err
|
||||
} else {
|
||||
s.logger.Info().Msgf("Downloaded %s", filename)
|
||||
}
|
||||
}(file)
|
||||
}
|
||||
wg.Wait()
|
||||
|
||||
close(errChan)
|
||||
var errors []error
|
||||
for err := range errChan {
|
||||
if err != nil {
|
||||
errors = append(errors, err)
|
||||
}
|
||||
}
|
||||
if len(errors) > 0 {
|
||||
s.logger.Error().Msgf("Errors occurred during download: %v", errors)
|
||||
return
|
||||
}
|
||||
s.logger.Info().Msgf("Downloaded all files for %s", debridTorrent.Name)
|
||||
}
|
||||
|
||||
func (s *Store) processSymlink(torrent *Torrent, debridTorrent *types.Torrent) (string, error) {
|
||||
files := debridTorrent.Files
|
||||
if len(files) == 0 {
|
||||
return "", fmt.Errorf("no valid files found")
|
||||
}
|
||||
s.logger.Info().Msgf("Checking symlinks for %d files...", len(files))
|
||||
rCloneBase := debridTorrent.MountPath
|
||||
torrentPath, err := s.getTorrentPath(rCloneBase, debridTorrent) // /MyTVShow/
|
||||
// This returns filename.ext for alldebrid instead of the parent folder filename/
|
||||
torrentFolder := torrentPath
|
||||
if err != nil {
|
||||
return "", fmt.Errorf("failed to get torrent path: %v", err)
|
||||
}
|
||||
// Check if the torrent path is a file
|
||||
torrentRclonePath := filepath.Join(rCloneBase, torrentPath) // leave it as is
|
||||
if debridTorrent.Debrid == "alldebrid" && utils.IsMediaFile(torrentPath) {
|
||||
// Alldebrid hotfix for single file torrents
|
||||
torrentFolder = utils.RemoveExtension(torrentFolder)
|
||||
torrentRclonePath = rCloneBase // /mnt/rclone/magnets/ // Remove the filename since it's in the root folder
|
||||
}
|
||||
torrentSymlinkPath := filepath.Join(torrent.SavePath, torrentFolder) // /mnt/symlinks/{category}/MyTVShow/
|
||||
err = os.MkdirAll(torrentSymlinkPath, os.ModePerm)
|
||||
if err != nil {
|
||||
return "", fmt.Errorf("failed to create directory: %s: %v", torrentSymlinkPath, err)
|
||||
}
|
||||
|
||||
realPaths := make(map[string]string)
|
||||
err = filepath.WalkDir(torrentRclonePath, func(path string, d os.DirEntry, err error) error {
|
||||
if err != nil {
|
||||
return nil
|
||||
}
|
||||
if !d.IsDir() {
|
||||
filename := d.Name()
|
||||
rel, _ := filepath.Rel(torrentRclonePath, path)
|
||||
realPaths[filename] = rel
|
||||
}
|
||||
return nil
|
||||
})
|
||||
if err != nil {
|
||||
s.logger.Warn().Msgf("Error while scanning rclone path: %v", err)
|
||||
}
|
||||
|
||||
pending := make(map[string]types.File)
|
||||
for _, file := range files {
|
||||
if realRelPath, ok := realPaths[file.Name]; ok {
|
||||
file.Path = realRelPath
|
||||
}
|
||||
pending[file.Path] = file
|
||||
}
|
||||
ticker := time.NewTicker(200 * time.Millisecond)
|
||||
defer ticker.Stop()
|
||||
|
||||
timeout := time.After(30 * time.Minute)
|
||||
filePaths := make([]string, 0, len(pending))
|
||||
|
||||
for len(pending) > 0 {
|
||||
select {
|
||||
case <-ticker.C:
|
||||
for path, file := range pending {
|
||||
fullFilePath := filepath.Join(torrentRclonePath, file.Path)
|
||||
if _, err := os.Stat(fullFilePath); !os.IsNotExist(err) {
|
||||
fileSymlinkPath := filepath.Join(torrentSymlinkPath, file.Name)
|
||||
if err := os.Symlink(fullFilePath, fileSymlinkPath); err != nil && !os.IsExist(err) {
|
||||
s.logger.Warn().Msgf("Failed to create symlink: %s: %v", fileSymlinkPath, err)
|
||||
} else {
|
||||
filePaths = append(filePaths, fileSymlinkPath)
|
||||
delete(pending, path)
|
||||
s.logger.Info().Msgf("File is ready: %s", file.Name)
|
||||
}
|
||||
}
|
||||
}
|
||||
case <-timeout:
|
||||
s.logger.Warn().Msgf("Timeout waiting for files, %d files still pending", len(pending))
|
||||
return torrentSymlinkPath, fmt.Errorf("timeout waiting for files: %d files still pending", len(pending))
|
||||
}
|
||||
}
|
||||
if s.skipPreCache {
|
||||
return torrentSymlinkPath, nil
|
||||
}
|
||||
|
||||
go func() {
|
||||
s.logger.Debug().Msgf("Pre-caching %s", debridTorrent.Name)
|
||||
if err := utils.PreCacheFile(filePaths); err != nil {
|
||||
s.logger.Error().Msgf("Failed to pre-cache file: %s", err)
|
||||
} else {
|
||||
s.logger.Trace().Msgf("Pre-cached %d files", len(filePaths))
|
||||
}
|
||||
}()
|
||||
return torrentSymlinkPath, nil
|
||||
}
|
||||
|
||||
func (s *Store) createSymlinksWebdav(torrent *Torrent, debridTorrent *types.Torrent, rclonePath, torrentFolder string) (string, error) {
|
||||
files := debridTorrent.Files
|
||||
symlinkPath := filepath.Join(torrent.SavePath, torrentFolder) // /mnt/symlinks/{category}/MyTVShow/
|
||||
err := os.MkdirAll(symlinkPath, os.ModePerm)
|
||||
if err != nil {
|
||||
return "", fmt.Errorf("failed to create directory: %s: %v", symlinkPath, err)
|
||||
}
|
||||
|
||||
remainingFiles := make(map[string]types.File)
|
||||
for _, file := range files {
|
||||
remainingFiles[file.Name] = file
|
||||
}
|
||||
|
||||
ticker := time.NewTicker(100 * time.Millisecond)
|
||||
defer ticker.Stop()
|
||||
timeout := time.After(30 * time.Minute)
|
||||
filePaths := make([]string, 0, len(files))
|
||||
|
||||
for len(remainingFiles) > 0 {
|
||||
select {
|
||||
case <-ticker.C:
|
||||
entries, err := os.ReadDir(rclonePath)
|
||||
if err != nil {
|
||||
continue
|
||||
}
|
||||
|
||||
// Check which files exist in this batch
|
||||
for _, entry := range entries {
|
||||
filename := entry.Name()
|
||||
if file, exists := remainingFiles[filename]; exists {
|
||||
fullFilePath := filepath.Join(rclonePath, filename)
|
||||
fileSymlinkPath := filepath.Join(symlinkPath, file.Name)
|
||||
|
||||
if err := os.Symlink(fullFilePath, fileSymlinkPath); err != nil && !os.IsExist(err) {
|
||||
s.logger.Debug().Msgf("Failed to create symlink: %s: %v", fileSymlinkPath, err)
|
||||
} else {
|
||||
filePaths = append(filePaths, fileSymlinkPath)
|
||||
delete(remainingFiles, filename)
|
||||
s.logger.Info().Msgf("File is ready: %s", file.Name)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
case <-timeout:
|
||||
s.logger.Warn().Msgf("Timeout waiting for files, %d files still pending", len(remainingFiles))
|
||||
return symlinkPath, fmt.Errorf("timeout waiting for files")
|
||||
}
|
||||
}
|
||||
|
||||
if s.skipPreCache {
|
||||
return symlinkPath, nil
|
||||
}
|
||||
|
||||
go func() {
|
||||
s.logger.Debug().Msgf("Pre-caching %s", debridTorrent.Name)
|
||||
if err := utils.PreCacheFile(filePaths); err != nil {
|
||||
s.logger.Error().Msgf("Failed to pre-cache file: %s", err)
|
||||
} else {
|
||||
s.logger.Debug().Msgf("Pre-cached %d files", len(filePaths))
|
||||
}
|
||||
}() // Pre-cache the files in the background
|
||||
// Pre-cache the first 256KB and 1MB of the file
|
||||
return symlinkPath, nil
|
||||
}
|
||||
|
||||
func (s *Store) getTorrentPath(rclonePath string, debridTorrent *types.Torrent) (string, error) {
|
||||
for {
|
||||
torrentPath, err := debridTorrent.GetMountFolder(rclonePath)
|
||||
if err == nil {
|
||||
s.logger.Debug().Msgf("Found torrent path: %s", torrentPath)
|
||||
return torrentPath, err
|
||||
}
|
||||
time.Sleep(100 * time.Millisecond)
|
||||
}
|
||||
}
|
||||
111
pkg/web/api.go
111
pkg/web/api.go
@@ -2,13 +2,15 @@ package web
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"github.com/sirrobot01/decypharr/pkg/store"
|
||||
"golang.org/x/crypto/bcrypt"
|
||||
"net/http"
|
||||
"strings"
|
||||
"time"
|
||||
|
||||
"github.com/sirrobot01/decypharr/pkg/wire"
|
||||
"golang.org/x/crypto/bcrypt"
|
||||
|
||||
"encoding/json"
|
||||
|
||||
"github.com/go-chi/chi/v5"
|
||||
"github.com/sirrobot01/decypharr/internal/config"
|
||||
"github.com/sirrobot01/decypharr/internal/request"
|
||||
@@ -18,8 +20,8 @@ import (
|
||||
)
|
||||
|
||||
func (wb *Web) handleGetArrs(w http.ResponseWriter, r *http.Request) {
|
||||
_store := store.Get()
|
||||
request.JSONResponse(w, _store.Arr().GetAll(), http.StatusOK)
|
||||
arrStorage := wire.Get().Arr()
|
||||
request.JSONResponse(w, arrStorage.GetAll(), http.StatusOK)
|
||||
}
|
||||
|
||||
func (wb *Web) handleAddContent(w http.ResponseWriter, r *http.Request) {
|
||||
@@ -28,9 +30,9 @@ func (wb *Web) handleAddContent(w http.ResponseWriter, r *http.Request) {
|
||||
http.Error(w, err.Error(), http.StatusBadRequest)
|
||||
return
|
||||
}
|
||||
_store := store.Get()
|
||||
_store := wire.Get()
|
||||
|
||||
results := make([]*store.ImportRequest, 0)
|
||||
results := make([]*wire.ImportRequest, 0)
|
||||
errs := make([]string, 0)
|
||||
|
||||
arrName := r.FormValue("arr")
|
||||
@@ -41,8 +43,16 @@ func (wb *Web) handleAddContent(w http.ResponseWriter, r *http.Request) {
|
||||
if downloadFolder == "" {
|
||||
downloadFolder = config.Get().QBitTorrent.DownloadFolder
|
||||
}
|
||||
skipMultiSeason := r.FormValue("skipMultiSeason") == "true"
|
||||
|
||||
downloadUncached := r.FormValue("downloadUncached") == "true"
|
||||
rmTrackerUrls := r.FormValue("rmTrackerUrls") == "true"
|
||||
|
||||
// Check config setting - if always remove tracker URLs is enabled, force it to true
|
||||
cfg := config.Get()
|
||||
if cfg.QBitTorrent.AlwaysRmTrackerUrls {
|
||||
rmTrackerUrls = true
|
||||
}
|
||||
|
||||
_arr := _store.Arr().Get(arrName)
|
||||
if _arr == nil {
|
||||
@@ -60,13 +70,13 @@ func (wb *Web) handleAddContent(w http.ResponseWriter, r *http.Request) {
|
||||
}
|
||||
|
||||
for _, url := range urlList {
|
||||
magnet, err := utils.GetMagnetFromUrl(url)
|
||||
magnet, err := utils.GetMagnetFromUrl(url, rmTrackerUrls)
|
||||
if err != nil {
|
||||
errs = append(errs, fmt.Sprintf("Failed to parse URL %s: %v", url, err))
|
||||
continue
|
||||
}
|
||||
|
||||
importReq := store.NewImportRequest(debridName, downloadFolder, magnet, _arr, action, downloadUncached, callbackUrl, store.ImportTypeAPI)
|
||||
importReq := wire.NewImportRequest(debridName, downloadFolder, magnet, _arr, action, downloadUncached, callbackUrl, wire.ImportTypeAPI, skipMultiSeason)
|
||||
if err := _store.AddTorrent(ctx, importReq); err != nil {
|
||||
wb.logger.Error().Err(err).Str("url", url).Msg("Failed to add torrent")
|
||||
errs = append(errs, fmt.Sprintf("URL %s: %v", url, err))
|
||||
@@ -85,13 +95,13 @@ func (wb *Web) handleAddContent(w http.ResponseWriter, r *http.Request) {
|
||||
continue
|
||||
}
|
||||
|
||||
magnet, err := utils.GetMagnetFromFile(file, fileHeader.Filename)
|
||||
magnet, err := utils.GetMagnetFromFile(file, fileHeader.Filename, rmTrackerUrls)
|
||||
if err != nil {
|
||||
errs = append(errs, fmt.Sprintf("Failed to parse torrent file %s: %v", fileHeader.Filename, err))
|
||||
continue
|
||||
}
|
||||
|
||||
importReq := store.NewImportRequest(debridName, downloadFolder, magnet, _arr, action, downloadUncached, callbackUrl, store.ImportTypeAPI)
|
||||
importReq := wire.NewImportRequest(debridName, downloadFolder, magnet, _arr, action, downloadUncached, callbackUrl, wire.ImportTypeAPI, skipMultiSeason)
|
||||
err = _store.AddTorrent(ctx, importReq)
|
||||
if err != nil {
|
||||
wb.logger.Error().Err(err).Str("file", fileHeader.Filename).Msg("Failed to add torrent")
|
||||
@@ -103,8 +113,8 @@ func (wb *Web) handleAddContent(w http.ResponseWriter, r *http.Request) {
|
||||
}
|
||||
|
||||
request.JSONResponse(w, struct {
|
||||
Results []*store.ImportRequest `json:"results"`
|
||||
Errors []string `json:"errors,omitempty"`
|
||||
Results []*wire.ImportRequest `json:"results"`
|
||||
Errors []string `json:"errors,omitempty"`
|
||||
}{
|
||||
Results: results,
|
||||
Errors: errs,
|
||||
@@ -118,7 +128,7 @@ func (wb *Web) handleRepairMedia(w http.ResponseWriter, r *http.Request) {
|
||||
return
|
||||
}
|
||||
|
||||
_store := store.Get()
|
||||
_store := wire.Get()
|
||||
|
||||
var arrs []string
|
||||
|
||||
@@ -183,38 +193,9 @@ func (wb *Web) handleDeleteTorrents(w http.ResponseWriter, r *http.Request) {
|
||||
}
|
||||
|
||||
func (wb *Web) handleGetConfig(w http.ResponseWriter, r *http.Request) {
|
||||
// Merge config arrs, with arr Storage
|
||||
unique := map[string]config.Arr{}
|
||||
arrStorage := wire.Get().Arr()
|
||||
cfg := config.Get()
|
||||
arrStorage := store.Get().Arr()
|
||||
|
||||
// Add existing Arrs from storage
|
||||
for _, a := range arrStorage.GetAll() {
|
||||
if _, ok := unique[a.Name]; !ok {
|
||||
// Only add if not already in the unique map
|
||||
unique[a.Name] = config.Arr{
|
||||
Name: a.Name,
|
||||
Host: a.Host,
|
||||
Token: a.Token,
|
||||
Cleanup: a.Cleanup,
|
||||
SkipRepair: a.SkipRepair,
|
||||
DownloadUncached: a.DownloadUncached,
|
||||
SelectedDebrid: a.SelectedDebrid,
|
||||
Source: a.Source,
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
for _, a := range cfg.Arrs {
|
||||
if a.Host == "" || a.Token == "" {
|
||||
continue // Skip empty arrs
|
||||
}
|
||||
unique[a.Name] = a
|
||||
}
|
||||
cfg.Arrs = make([]config.Arr, 0, len(unique))
|
||||
for _, a := range unique {
|
||||
cfg.Arrs = append(cfg.Arrs, a)
|
||||
}
|
||||
cfg.Arrs = arrStorage.SyncToConfig()
|
||||
|
||||
// Create response with API token info
|
||||
type ConfigResponse struct {
|
||||
@@ -256,6 +237,7 @@ func (wb *Web) handleUpdateConfig(w http.ResponseWriter, r *http.Request) {
|
||||
currentConfig.RemoveStalledAfter = updatedConfig.RemoveStalledAfter
|
||||
currentConfig.AllowedExt = updatedConfig.AllowedExt
|
||||
currentConfig.DiscordWebhook = updatedConfig.DiscordWebhook
|
||||
currentConfig.CallbackURL = updatedConfig.CallbackURL
|
||||
|
||||
// Should this be added?
|
||||
currentConfig.URLBase = updatedConfig.URLBase
|
||||
@@ -270,13 +252,10 @@ func (wb *Web) handleUpdateConfig(w http.ResponseWriter, r *http.Request) {
|
||||
currentConfig.Rclone = updatedConfig.Rclone
|
||||
|
||||
// Update Debrids
|
||||
if len(updatedConfig.Debrids) > 0 {
|
||||
currentConfig.Debrids = updatedConfig.Debrids
|
||||
// Clear legacy single debrid if using array
|
||||
}
|
||||
currentConfig.Debrids = updatedConfig.Debrids
|
||||
|
||||
// Update Arrs through the service
|
||||
storage := store.Get()
|
||||
storage := wire.Get()
|
||||
arrStorage := storage.Arr()
|
||||
|
||||
newConfigArrs := make([]config.Arr, 0)
|
||||
@@ -289,28 +268,8 @@ func (wb *Web) handleUpdateConfig(w http.ResponseWriter, r *http.Request) {
|
||||
}
|
||||
currentConfig.Arrs = newConfigArrs
|
||||
|
||||
// Add config arr into the config
|
||||
for _, a := range currentConfig.Arrs {
|
||||
if a.Host == "" || a.Token == "" {
|
||||
continue // Skip empty arrs
|
||||
}
|
||||
existingArr := arrStorage.Get(a.Name)
|
||||
if existingArr != nil {
|
||||
// Update existing Arr
|
||||
existingArr.Host = a.Host
|
||||
existingArr.Token = a.Token
|
||||
existingArr.Cleanup = a.Cleanup
|
||||
existingArr.SkipRepair = a.SkipRepair
|
||||
existingArr.DownloadUncached = a.DownloadUncached
|
||||
existingArr.SelectedDebrid = a.SelectedDebrid
|
||||
existingArr.Source = a.Source
|
||||
arrStorage.AddOrUpdate(existingArr)
|
||||
} else {
|
||||
// Create new Arr if it doesn't exist
|
||||
newArr := arr.New(a.Name, a.Host, a.Token, a.Cleanup, a.SkipRepair, a.DownloadUncached, a.SelectedDebrid, a.Source)
|
||||
arrStorage.AddOrUpdate(newArr)
|
||||
}
|
||||
}
|
||||
// Sync arrStorage with the new arrs
|
||||
arrStorage.SyncFromConfig(currentConfig.Arrs)
|
||||
|
||||
if err := currentConfig.Save(); err != nil {
|
||||
http.Error(w, "Error saving config: "+err.Error(), http.StatusInternalServerError)
|
||||
@@ -320,7 +279,7 @@ func (wb *Web) handleUpdateConfig(w http.ResponseWriter, r *http.Request) {
|
||||
if restartFunc != nil {
|
||||
go func() {
|
||||
// Small delay to ensure the response is sent
|
||||
time.Sleep(500 * time.Millisecond)
|
||||
time.Sleep(200 * time.Millisecond)
|
||||
restartFunc()
|
||||
}()
|
||||
}
|
||||
@@ -330,7 +289,7 @@ func (wb *Web) handleUpdateConfig(w http.ResponseWriter, r *http.Request) {
|
||||
}
|
||||
|
||||
func (wb *Web) handleGetRepairJobs(w http.ResponseWriter, r *http.Request) {
|
||||
_store := store.Get()
|
||||
_store := wire.Get()
|
||||
request.JSONResponse(w, _store.Repair().GetJobs(), http.StatusOK)
|
||||
}
|
||||
|
||||
@@ -340,7 +299,7 @@ func (wb *Web) handleProcessRepairJob(w http.ResponseWriter, r *http.Request) {
|
||||
http.Error(w, "No job ID provided", http.StatusBadRequest)
|
||||
return
|
||||
}
|
||||
_store := store.Get()
|
||||
_store := wire.Get()
|
||||
if err := _store.Repair().ProcessJob(id); err != nil {
|
||||
wb.logger.Error().Err(err).Msg("Failed to process repair job")
|
||||
}
|
||||
@@ -361,7 +320,7 @@ func (wb *Web) handleDeleteRepairJob(w http.ResponseWriter, r *http.Request) {
|
||||
return
|
||||
}
|
||||
|
||||
_store := store.Get()
|
||||
_store := wire.Get()
|
||||
_store.Repair().DeleteJobs(req.IDs)
|
||||
w.WriteHeader(http.StatusOK)
|
||||
}
|
||||
@@ -372,7 +331,7 @@ func (wb *Web) handleStopRepairJob(w http.ResponseWriter, r *http.Request) {
|
||||
http.Error(w, "No job ID provided", http.StatusBadRequest)
|
||||
return
|
||||
}
|
||||
_store := store.Get()
|
||||
_store := wire.Get()
|
||||
if err := _store.Repair().StopJob(id); err != nil {
|
||||
wb.logger.Error().Err(err).Msg("Failed to stop repair job")
|
||||
http.Error(w, "Failed to stop job: "+err.Error(), http.StatusInternalServerError)
|
||||
|
||||
File diff suppressed because one or more lines are too long
File diff suppressed because one or more lines are too long
File diff suppressed because one or more lines are too long
@@ -388,7 +388,7 @@ class DecypharrUtils {
|
||||
|
||||
if (versionBadge) {
|
||||
versionBadge.innerHTML = `
|
||||
<a href="https://github.com/sirrobot01/decypharr/releases/tag/${data.version}"
|
||||
<a href="https://github.com/sirrobot01/decypharr/releases/tag/v${data.version}"
|
||||
target="_blank"
|
||||
class="text-current hover:text-primary transition-colors">
|
||||
${data.channel}-${data.version}
|
||||
@@ -718,4 +718,4 @@ window.createToast = (message, type, duration) => window.decypharrUtils.createTo
|
||||
// Export for ES6 modules if needed
|
||||
if (typeof module !== 'undefined' && module.exports) {
|
||||
module.exports = DecypharrUtils;
|
||||
}
|
||||
}
|
||||
|
||||
@@ -114,7 +114,7 @@ class ConfigManager {
|
||||
populateQBittorrentSettings(qbitConfig) {
|
||||
if (!qbitConfig) return;
|
||||
|
||||
const fields = ['download_folder', 'refresh_interval', 'max_downloads', 'skip_pre_cache'];
|
||||
const fields = ['download_folder', 'refresh_interval', 'max_downloads', 'skip_pre_cache', 'always_rm_tracker_urls'];
|
||||
|
||||
fields.forEach(field => {
|
||||
const element = document.querySelector(`[name="qbit.${field}"]`);
|
||||
@@ -149,10 +149,11 @@ class ConfigManager {
|
||||
if (!rcloneConfig) return;
|
||||
|
||||
const fields = [
|
||||
'enabled', 'mount_path', 'cache_dir', 'vfs_cache_mode', 'vfs_cache_max_size', 'vfs_cache_max_age',
|
||||
'vfs_cache_poll_interval', 'vfs_read_chunk_size', 'vfs_read_chunk_size_limit', 'buffer_size',
|
||||
'enabled', 'rc_port', 'mount_path', 'cache_dir', 'transfers', 'vfs_cache_mode', 'vfs_cache_max_size', 'vfs_cache_max_age',
|
||||
'vfs_cache_poll_interval', 'vfs_read_chunk_size', 'vfs_read_chunk_size_limit', 'buffer_size', 'bw_limit',
|
||||
'uid', 'gid', 'vfs_read_ahead', 'attr_timeout', 'dir_cache_time', 'poll_interval', 'umask',
|
||||
'no_modtime', 'no_checksum', 'log_level'
|
||||
'no_modtime', 'no_checksum', 'log_level', 'vfs_cache_min_free_space', 'vfs_fast_fingerprint', 'vfs_read_chunk_streams',
|
||||
'async_read', 'use_mmap'
|
||||
];
|
||||
|
||||
fields.forEach(field => {
|
||||
@@ -273,7 +274,6 @@ class ConfigManager {
|
||||
<div class="form-control flex-1">
|
||||
<label class="label" for="debrid[${index}].download_api_keys">
|
||||
<span class="label-text font-medium">Download API Keys</span>
|
||||
<span class="badge badge-ghost badge-sm">Optional</span>
|
||||
</label>
|
||||
<div class="password-toggle-container">
|
||||
<textarea class="textarea textarea-bordered has-toggle font-mono h-full min-h-[200px]"
|
||||
@@ -290,7 +290,7 @@ class ConfigManager {
|
||||
</div>
|
||||
</div>
|
||||
<div class="space-y-4">
|
||||
<div class="grid grid-cols-1 lg:grid-cols-2 gap-6">
|
||||
<div class="grid grid-cols-1 lg:grid-cols-2 gap-4">
|
||||
<div class="form-control">
|
||||
<label class="label" for="debrid[${index}].folder">
|
||||
<span class="label-text font-medium">Mount/Rclone Folder</span>
|
||||
@@ -302,17 +302,6 @@ class ConfigManager {
|
||||
<span class="label-text-alt">Path where debrid files are mounted</span>
|
||||
</div>
|
||||
</div>
|
||||
<div class="form-control">
|
||||
<label class="label" for="debrid[${index}].rate_limit">
|
||||
<span class="label-text font-medium">Rate Limit</span>
|
||||
</label>
|
||||
<input type="text" class="input input-bordered"
|
||||
name="debrid[${index}].rate_limit" id="debrid[${index}].rate_limit"
|
||||
placeholder="250/minute" value="250/minute">
|
||||
<div class="label">
|
||||
<span class="label-text-alt">API rate limit for this service</span>
|
||||
</div>
|
||||
</div>
|
||||
<div class="form-control">
|
||||
<label class="label" for="debrid[${index}].rclone_mount_path">
|
||||
<span class="label-text font-medium">Custom Rclone Mount Path</span>
|
||||
@@ -324,8 +313,21 @@ class ConfigManager {
|
||||
<div class="label">
|
||||
<span class="label-text-alt">Custom mount path for this debrid service. If empty, uses global rclone mount path.</span>
|
||||
</div>
|
||||
</div>
|
||||
|
||||
</div>
|
||||
|
||||
</div>
|
||||
<div class="grid grid-cols-2 lg:grid-cols-3 gap-3">
|
||||
<div class="form-control">
|
||||
<label class="label" for="debrid[${index}].rate_limit">
|
||||
<span class="label-text font-medium">Rate Limit</span>
|
||||
</label>
|
||||
<input type="text" class="input input-bordered"
|
||||
name="debrid[${index}].rate_limit" id="debrid[${index}].rate_limit"
|
||||
placeholder="250/minute" value="250/minute">
|
||||
<div class="label">
|
||||
<span class="label-text-alt">API rate limit for this service</span>
|
||||
</div>
|
||||
</div>
|
||||
<div class="form-control">
|
||||
<label class="label" for="debrid[${index}].proxy">
|
||||
<span class="label-text font-medium">Proxy</span>
|
||||
@@ -337,6 +339,17 @@ class ConfigManager {
|
||||
<span class="label-text-alt">This proxy is used for this debrid account</span>
|
||||
</div>
|
||||
</div>
|
||||
<div class="form-control">
|
||||
<label class="label" for="debrid[${index}].minimum_free_slot">
|
||||
<span class="label-text font-medium">Minimum Free Slot</span>
|
||||
</label>
|
||||
<input type="number" class="input input-bordered"
|
||||
name="debrid[${index}].minimum_free_slot" id="debrid[${index}].minimum_free_slot"
|
||||
placeholder="1" value="1">
|
||||
<div class="label">
|
||||
<span class="label-text-alt">Minimum free slot for this debrid</span>
|
||||
</div>
|
||||
</div>
|
||||
</div>
|
||||
|
||||
</div>
|
||||
@@ -885,7 +898,7 @@ class ConfigManager {
|
||||
|
||||
<input type="hidden" name="arr[${index}].source" value="${data.source || ''}">
|
||||
|
||||
<div class="grid grid-cols-1 lg:grid-cols-3 gap-4">
|
||||
<div class="grid grid-cols-1 lg:grid-cols-2 gap-4">
|
||||
<div class="form-control">
|
||||
<label class="label" for="arr[${index}].name">
|
||||
<span class="label-text font-medium">Service Name</span>
|
||||
@@ -920,9 +933,7 @@ class ConfigManager {
|
||||
</button>
|
||||
</div>
|
||||
</div>
|
||||
</div>
|
||||
|
||||
<div class="grid grid-cols-1 lg:grid-cols-2 gap-4 mt-4">
|
||||
|
||||
<div class="form-control">
|
||||
<label class="label" for="arr[${index}].selected_debrid">
|
||||
<span class="label-text font-medium">Preferred Debrid Service</span>
|
||||
@@ -938,33 +949,31 @@ class ConfigManager {
|
||||
<span class="label-text-alt">Which debrid service this Arr should prefer</span>
|
||||
</div>
|
||||
</div>
|
||||
</div>
|
||||
|
||||
<div class="flex flex-col justify-end">
|
||||
<div class="grid grid-cols-3 gap-2">
|
||||
<div class="form-control">
|
||||
<label class="label cursor-pointer justify-start gap-2">
|
||||
<input type="checkbox" class="checkbox checkbox-sm"
|
||||
name="arr[${index}].cleanup" id="arr[${index}].cleanup">
|
||||
<span class="label-text text-sm">Cleanup Queue</span>
|
||||
</label>
|
||||
</div>
|
||||
<div class="grid grid-cols-3 gap-4">
|
||||
<div class="form-control">
|
||||
<label class="label cursor-pointer justify-start gap-2">
|
||||
<input type="checkbox" class="checkbox checkbox-sm"
|
||||
name="arr[${index}].cleanup" id="arr[${index}].cleanup">
|
||||
<span class="label-text text-sm">Cleanup Queue</span>
|
||||
</label>
|
||||
</div>
|
||||
|
||||
<div class="form-control">
|
||||
<label class="label cursor-pointer justify-start gap-2">
|
||||
<input type="checkbox" class="checkbox checkbox-sm"
|
||||
name="arr[${index}].skip_repair" id="arr[${index}].skip_repair">
|
||||
<span class="label-text text-sm">Skip Repair</span>
|
||||
</label>
|
||||
</div>
|
||||
<div class="form-control">
|
||||
<label class="label cursor-pointer justify-start gap-2">
|
||||
<input type="checkbox" class="checkbox checkbox-sm"
|
||||
name="arr[${index}].skip_repair" id="arr[${index}].skip_repair">
|
||||
<span class="label-text text-sm">Skip Repair</span>
|
||||
</label>
|
||||
</div>
|
||||
|
||||
<div class="form-control">
|
||||
<label class="label cursor-pointer justify-start gap-2">
|
||||
<input type="checkbox" class="checkbox checkbox-sm"
|
||||
name="arr[${index}].download_uncached" id="arr[${index}].download_uncached">
|
||||
<span class="label-text text-sm">Download Uncached</span>
|
||||
</label>
|
||||
</div>
|
||||
</div>
|
||||
<div class="form-control">
|
||||
<label class="label cursor-pointer justify-start gap-2">
|
||||
<input type="checkbox" class="checkbox checkbox-sm"
|
||||
name="arr[${index}].download_uncached" id="arr[${index}].download_uncached">
|
||||
<span class="label-text text-sm">Download Uncached</span>
|
||||
</label>
|
||||
</div>
|
||||
</div>
|
||||
</div>
|
||||
@@ -1072,6 +1081,7 @@ class ConfigManager {
|
||||
min_file_size: document.getElementById('minFileSize').value,
|
||||
max_file_size: document.getElementById('maxFileSize').value,
|
||||
remove_stalled_after: document.getElementById('removeStalledAfter').value,
|
||||
callback_url: document.getElementById('callbackUrl').value,
|
||||
|
||||
// Debrid configurations
|
||||
debrids: this.collectDebridConfigs(),
|
||||
@@ -1102,6 +1112,7 @@ class ConfigManager {
|
||||
api_key: document.querySelector(`[name="debrid[${i}].api_key"]`).value,
|
||||
folder: document.querySelector(`[name="debrid[${i}].folder"]`).value,
|
||||
rate_limit: document.querySelector(`[name="debrid[${i}].rate_limit"]`).value,
|
||||
minimum_free_slot: parseInt(document.querySelector(`[name="debrid[${i}].minimum_free_slot"]`).value) || 0,
|
||||
rclone_mount_path: document.querySelector(`[name="debrid[${i}].rclone_mount_path"]`).value,
|
||||
proxy: document.querySelector(`[name="debrid[${i}].proxy"]`).value,
|
||||
download_uncached: document.querySelector(`[name="debrid[${i}].download_uncached"]`).checked,
|
||||
@@ -1172,7 +1183,8 @@ class ConfigManager {
|
||||
download_folder: document.querySelector('[name="qbit.download_folder"]').value,
|
||||
refresh_interval: parseInt(document.querySelector('[name="qbit.refresh_interval"]').value) || 30,
|
||||
max_downloads: parseInt(document.querySelector('[name="qbit.max_downloads"]').value) || 0,
|
||||
skip_pre_cache: document.querySelector('[name="qbit.skip_pre_cache"]').checked
|
||||
skip_pre_cache: document.querySelector('[name="qbit.skip_pre_cache"]').checked,
|
||||
always_rm_tracker_urls: document.querySelector('[name="qbit.always_rm_tracker_urls"]').checked
|
||||
};
|
||||
}
|
||||
|
||||
@@ -1231,15 +1243,23 @@ class ConfigManager {
|
||||
|
||||
return {
|
||||
enabled: getElementValue('enabled', false),
|
||||
rc_port: getElementValue('rc_port', "5572"),
|
||||
mount_path: getElementValue('mount_path'),
|
||||
buffer_size: getElementValue('buffer_size'),
|
||||
bw_limit: getElementValue('bw_limit'),
|
||||
cache_dir: getElementValue('cache_dir'),
|
||||
transfers: getElementValue('transfers', 8),
|
||||
vfs_cache_mode: getElementValue('vfs_cache_mode', 'off'),
|
||||
vfs_cache_max_age: getElementValue('vfs_cache_max_age', '1h'),
|
||||
vfs_cache_max_size: getElementValue('vfs_cache_max_size'),
|
||||
vfs_cache_poll_interval: getElementValue('vfs_cache_poll_interval', '1m'),
|
||||
vfs_read_chunk_size: getElementValue('vfs_read_chunk_size', '128M'),
|
||||
vfs_read_chunk_size_limit: getElementValue('vfs_read_chunk_size_limit', 'off'),
|
||||
vfs_cache_min_free_space: getElementValue('vfs_cache_min_free_space', ''),
|
||||
vfs_fast_fingerprint: getElementValue('vfs_fast_fingerprint', false),
|
||||
vfs_read_chunk_streams: getElementValue('vfs_read_chunk_streams', 0),
|
||||
use_mmap: getElementValue('use_mmap', false),
|
||||
async_read: getElementValue('async_read', true),
|
||||
uid: getElementValue('uid', 0),
|
||||
gid: getElementValue('gid', 0),
|
||||
umask: getElementValue('umask', ''),
|
||||
|
||||
@@ -9,6 +9,7 @@ class DownloadManager {
|
||||
arr: document.getElementById('arr'),
|
||||
downloadAction: document.getElementById('downloadAction'),
|
||||
downloadUncached: document.getElementById('downloadUncached'),
|
||||
rmTrackerUrls: document.getElementById('rmTrackerUrls'),
|
||||
downloadFolder: document.getElementById('downloadFolder'),
|
||||
debrid: document.getElementById('debrid'),
|
||||
submitBtn: document.getElementById('submitDownload'),
|
||||
@@ -34,6 +35,7 @@ class DownloadManager {
|
||||
this.refs.arr.addEventListener('change', () => this.saveOptions());
|
||||
this.refs.downloadAction.addEventListener('change', () => this.saveOptions());
|
||||
this.refs.downloadUncached.addEventListener('change', () => this.saveOptions());
|
||||
this.refs.rmTrackerUrls.addEventListener('change', () => this.saveOptions());
|
||||
this.refs.downloadFolder.addEventListener('change', () => this.saveOptions());
|
||||
|
||||
// File input enhancement
|
||||
@@ -48,12 +50,14 @@ class DownloadManager {
|
||||
category: localStorage.getItem('downloadCategory') || '',
|
||||
action: localStorage.getItem('downloadAction') || 'symlink',
|
||||
uncached: localStorage.getItem('downloadUncached') === 'true',
|
||||
rmTrackerUrls: localStorage.getItem('rmTrackerUrls') === 'true',
|
||||
folder: localStorage.getItem('downloadFolder') || this.downloadFolder
|
||||
};
|
||||
|
||||
this.refs.arr.value = savedOptions.category;
|
||||
this.refs.downloadAction.value = savedOptions.action;
|
||||
this.refs.downloadUncached.checked = savedOptions.uncached;
|
||||
this.refs.rmTrackerUrls.checked = savedOptions.rmTrackerUrls;
|
||||
this.refs.downloadFolder.value = savedOptions.folder;
|
||||
}
|
||||
|
||||
@@ -61,6 +65,12 @@ class DownloadManager {
|
||||
localStorage.setItem('downloadCategory', this.refs.arr.value);
|
||||
localStorage.setItem('downloadAction', this.refs.downloadAction.value);
|
||||
localStorage.setItem('downloadUncached', this.refs.downloadUncached.checked.toString());
|
||||
|
||||
// Only save rmTrackerUrls if not disabled (i.e., not forced by config)
|
||||
if (!this.refs.rmTrackerUrls.disabled) {
|
||||
localStorage.setItem('rmTrackerUrls', this.refs.rmTrackerUrls.checked.toString());
|
||||
}
|
||||
|
||||
localStorage.setItem('downloadFolder', this.refs.downloadFolder.value);
|
||||
}
|
||||
|
||||
@@ -114,6 +124,7 @@ class DownloadManager {
|
||||
formData.append('downloadFolder', this.refs.downloadFolder.value);
|
||||
formData.append('action', this.refs.downloadAction.value);
|
||||
formData.append('downloadUncached', this.refs.downloadUncached.checked);
|
||||
formData.append('rmTrackerUrls', this.refs.rmTrackerUrls.checked);
|
||||
|
||||
if (this.refs.debrid) {
|
||||
formData.append('debrid', this.refs.debrid.value);
|
||||
|
||||
@@ -3,24 +3,25 @@ package web
|
||||
import (
|
||||
"encoding/json"
|
||||
"fmt"
|
||||
"github.com/sirrobot01/decypharr/internal/config"
|
||||
"net/http"
|
||||
"strings"
|
||||
|
||||
"github.com/sirrobot01/decypharr/internal/config"
|
||||
)
|
||||
|
||||
func (wb *Web) setupMiddleware(next http.Handler) http.Handler {
|
||||
return http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) {
|
||||
cfg := config.Get()
|
||||
needsAuth := cfg.NeedsSetup()
|
||||
if needsAuth != nil && r.URL.Path != "/config" && r.URL.Path != "/api/config" {
|
||||
http.Redirect(w, r, fmt.Sprintf("/config?inco=%s", needsAuth.Error()), http.StatusSeeOther)
|
||||
needsSetup := cfg.CheckSetup()
|
||||
if needsSetup != nil && r.URL.Path != "/settings" && r.URL.Path != "/api/config" {
|
||||
http.Redirect(w, r, fmt.Sprintf("/settings?inco=%s", needsSetup.Error()), http.StatusSeeOther)
|
||||
return
|
||||
}
|
||||
|
||||
// strip inco from URL
|
||||
if inco := r.URL.Query().Get("inco"); inco != "" && needsAuth == nil && r.URL.Path == "/config" {
|
||||
if inco := r.URL.Query().Get("inco"); inco != "" && needsSetup == nil && r.URL.Path == "/settings" {
|
||||
// redirect to the same URL without the inco parameter
|
||||
http.Redirect(w, r, "/config", http.StatusSeeOther)
|
||||
http.Redirect(w, r, "/settings", http.StatusSeeOther)
|
||||
}
|
||||
next.ServeHTTP(w, r)
|
||||
})
|
||||
@@ -78,8 +79,11 @@ func (wb *Web) isAPIRequest(r *http.Request) bool {
|
||||
func (wb *Web) sendJSONError(w http.ResponseWriter, message string, statusCode int) {
|
||||
w.Header().Set("Content-Type", "application/json")
|
||||
w.WriteHeader(statusCode)
|
||||
json.NewEncoder(w).Encode(map[string]interface{}{
|
||||
"error": message,
|
||||
"status": statusCode,
|
||||
err := json.NewEncoder(w).Encode(map[string]interface{}{
|
||||
"error": message,
|
||||
"status": statusCode,
|
||||
})
|
||||
}
|
||||
if err != nil {
|
||||
return
|
||||
}
|
||||
}
|
||||
|
||||
@@ -1,53 +1,58 @@
|
||||
package web
|
||||
|
||||
import (
|
||||
"github.com/go-chi/chi/v5"
|
||||
"io/fs"
|
||||
"net/http"
|
||||
|
||||
"github.com/go-chi/chi/v5"
|
||||
)
|
||||
|
||||
func (wb *Web) Routes() http.Handler {
|
||||
r := chi.NewRouter()
|
||||
|
||||
// Load static files from embedded filesystem
|
||||
staticFS, err := fs.Sub(assetsEmbed, "assets/build")
|
||||
if err != nil {
|
||||
panic(err)
|
||||
}
|
||||
imagesFS, err := fs.Sub(imagesEmbed, "assets/images")
|
||||
if err != nil {
|
||||
panic(err)
|
||||
}
|
||||
|
||||
r.Handle("/assets/*", http.StripPrefix("/assets/", http.FileServer(http.FS(staticFS))))
|
||||
r.Handle("/images/*", http.StripPrefix("/images/", http.FileServer(http.FS(imagesFS))))
|
||||
// Static assets - always public
|
||||
staticFS, _ := fs.Sub(assetsEmbed, "assets/build")
|
||||
imagesFS, _ := fs.Sub(imagesEmbed, "assets/images")
|
||||
r.Handle("/assets/*", http.StripPrefix(wb.urlBase+"assets/", http.FileServer(http.FS(staticFS))))
|
||||
r.Handle("/images/*", http.StripPrefix(wb.urlBase+"images/", http.FileServer(http.FS(imagesFS))))
|
||||
|
||||
// Public routes - no auth needed
|
||||
r.Get("/version", wb.handleGetVersion)
|
||||
r.Get("/login", wb.LoginHandler)
|
||||
r.Post("/login", wb.LoginHandler)
|
||||
r.Get("/register", wb.RegisterHandler)
|
||||
r.Post("/register", wb.RegisterHandler)
|
||||
r.Get("/skip-auth", wb.skipAuthHandler)
|
||||
r.Get("/version", wb.handleGetVersion)
|
||||
r.Post("/skip-auth", wb.skipAuthHandler)
|
||||
|
||||
// Protected routes - require auth
|
||||
r.Group(func(r chi.Router) {
|
||||
r.Use(wb.authMiddleware)
|
||||
r.Use(wb.setupMiddleware)
|
||||
// Web pages
|
||||
r.Get("/", wb.IndexHandler)
|
||||
r.Get("/download", wb.DownloadHandler)
|
||||
r.Get("/repair", wb.RepairHandler)
|
||||
r.Get("/stats", wb.StatsHandler)
|
||||
r.Get("/config", wb.ConfigHandler)
|
||||
r.Get("/settings", wb.ConfigHandler)
|
||||
|
||||
// API routes
|
||||
r.Route("/api", func(r chi.Router) {
|
||||
// Arr management
|
||||
r.Get("/arrs", wb.handleGetArrs)
|
||||
r.Post("/add", wb.handleAddContent)
|
||||
|
||||
// Repair operations
|
||||
r.Post("/repair", wb.handleRepairMedia)
|
||||
r.Get("/repair/jobs", wb.handleGetRepairJobs)
|
||||
r.Post("/repair/jobs/{id}/process", wb.handleProcessRepairJob)
|
||||
r.Post("/repair/jobs/{id}/stop", wb.handleStopRepairJob)
|
||||
r.Delete("/repair/jobs", wb.handleDeleteRepairJob)
|
||||
|
||||
// Torrent management
|
||||
r.Get("/torrents", wb.handleGetTorrents)
|
||||
r.Delete("/torrents/{category}/{hash}", wb.handleDeleteTorrent)
|
||||
r.Delete("/torrents/", wb.handleDeleteTorrents)
|
||||
r.Delete("/torrents", wb.handleDeleteTorrents) // Fixed trailing slash
|
||||
|
||||
// Config/Auth
|
||||
r.Get("/config", wb.handleGetConfig)
|
||||
r.Post("/config", wb.handleUpdateConfig)
|
||||
r.Post("/refresh-token", wb.handleRefreshAPIToken)
|
||||
|
||||
@@ -1,5 +1,15 @@
|
||||
{{ define "config" }}
|
||||
<div class="space-y-6">
|
||||
{{ if .NeedSetup }}
|
||||
<div role="alert" class="alert alert-warning">
|
||||
<i class="bi bi-exclamation-triangle text-xl"></i>
|
||||
<div>
|
||||
<h3 class="font-bold">Configuration Required</h3>
|
||||
<div class="text-sm">Your configuration is incomplete. Please complete the setup below.</div>
|
||||
</div>
|
||||
</div>
|
||||
{{ end }}
|
||||
|
||||
<form id="configForm" class="space-y-6">
|
||||
<div class="card bg-base-100 shadow-xl">
|
||||
<div class="card-body">
|
||||
@@ -120,7 +130,7 @@
|
||||
</div>
|
||||
</div>
|
||||
|
||||
<div class="grid grid-cols-1 lg:grid-cols-3 gap-4">
|
||||
<div class="grid grid-cols-1 lg:grid-cols-2 gap-4">
|
||||
<div class="form-control">
|
||||
<label class="label" for="minFileSize">
|
||||
<span class="label-text font-medium">Minimum File Size</span>
|
||||
@@ -150,6 +160,15 @@
|
||||
<span class="label-text-alt">Duration before removing stalled torrents</span>
|
||||
</div>
|
||||
</div>
|
||||
<div class="form-control">
|
||||
<label class="label" for="callbackUrl">
|
||||
<span class="label-text font-medium">Callback URL</span>
|
||||
</label>
|
||||
<input type="text" class="input input-bordered" id="callbackUrl" name="callback_url" placeholder="http://example.com/callback">
|
||||
<div class="label">
|
||||
<span class="label-text-alt">Optional callback URL for download status updates</span>
|
||||
</div>
|
||||
</div>
|
||||
</div>
|
||||
|
||||
<!-- Authentication Settings Section -->
|
||||
@@ -327,6 +346,16 @@
|
||||
</div>
|
||||
</label>
|
||||
</div>
|
||||
|
||||
<div class="form-control">
|
||||
<label class="label cursor-pointer justify-start gap-3">
|
||||
<input type="checkbox" class="checkbox" name="qbit.always_rm_tracker_urls" id="qbit.always_rm_tracker_urls">
|
||||
<div>
|
||||
<span class="label-text font-medium">Always Remove Tracker URLs</span>
|
||||
<div class="label-text-alt">Allows you to <a href="https://sirrobot01.github.io/decypharr/features/repair-worker/private-tracker-downloads" class="link link-hover font-semibold" target="_blank">download private tracker torrents</a> with lower risk</div>
|
||||
</div>
|
||||
</label>
|
||||
</div>
|
||||
</div>
|
||||
</div>
|
||||
</div>
|
||||
@@ -364,71 +393,68 @@
|
||||
</div>
|
||||
|
||||
<div class="grid grid-cols-1 lg:grid-cols-2 gap-4">
|
||||
<div class="space-y-4">
|
||||
<div class="form-control">
|
||||
<label class="label" for="repair.interval">
|
||||
<span class="label-text font-medium">Repair Interval</span>
|
||||
</label>
|
||||
<input type="text" class="input input-bordered" name="repair.interval" id="repair.interval" placeholder="24h">
|
||||
<div class="label">
|
||||
<span class="label-text-alt">How often to run repair (e.g., 24h, 1d, 03:00, or crontab)</span>
|
||||
</div>
|
||||
</div>
|
||||
|
||||
<div class="form-control">
|
||||
<label class="label" for="repair.workers">
|
||||
<span class="label-text font-medium">Worker Threads</span>
|
||||
</label>
|
||||
<input type="number" class="input input-bordered" name="repair.workers" id="repair.workers" min="1" placeholder="40">
|
||||
<div class="label">
|
||||
<span class="label-text-alt">Number of concurrent repair workers</span>
|
||||
</div>
|
||||
</div>
|
||||
|
||||
<div class="form-control">
|
||||
<label class="label" for="repair.strategy">
|
||||
<span class="label-text font-medium">Repair Strategy</span>
|
||||
</label>
|
||||
<select class="select select-bordered" name="repair.strategy" id="repair.strategy">
|
||||
<option value="per_torrent" selected>Per Torrent</option>
|
||||
<option value="per_file">Per File</option>
|
||||
</select>
|
||||
<div class="label">
|
||||
<span class="label-text-alt">How to handle repairs</span>
|
||||
</div>
|
||||
<div class="form-control">
|
||||
<label class="label" for="repair.interval">
|
||||
<span class="label-text font-medium">Repair Interval</span>
|
||||
</label>
|
||||
<input type="text" class="input input-bordered" name="repair.interval" id="repair.interval" placeholder="24h">
|
||||
<div class="label">
|
||||
<span class="label-text-alt">How often to run repair (e.g., 24h, 1d, 03:00, or crontab)</span>
|
||||
</div>
|
||||
</div>
|
||||
|
||||
<div class="space-y-4">
|
||||
<div class="form-control">
|
||||
<label class="label" for="repair.zurg_url">
|
||||
<span class="label-text font-medium">Zurg URL</span>
|
||||
</label>
|
||||
<input type="url" class="input input-bordered" name="repair.zurg_url" id="repair.zurg_url" placeholder="http://zurg:9999">
|
||||
<div class="label">
|
||||
<span class="label-text-alt">Optional Zurg instance to speed up repairs</span>
|
||||
<div class="form-control">
|
||||
<label class="label" for="repair.workers">
|
||||
<span class="label-text font-medium">Worker Threads</span>
|
||||
</label>
|
||||
<input type="number" class="input input-bordered" name="repair.workers" id="repair.workers" min="1" placeholder="40">
|
||||
<div class="label">
|
||||
<span class="label-text-alt">Number of concurrent repair workers</span>
|
||||
</div>
|
||||
</div>
|
||||
|
||||
<div class="form-control">
|
||||
<label class="label" for="repair.strategy">
|
||||
<span class="label-text font-medium">Repair Strategy</span>
|
||||
</label>
|
||||
<select class="select select-bordered" name="repair.strategy" id="repair.strategy">
|
||||
<option value="per_torrent" selected>Per Torrent</option>
|
||||
<option value="per_file">Per File</option>
|
||||
</select>
|
||||
<div class="label">
|
||||
<span class="label-text-alt">How to handle repairs</span>
|
||||
</div>
|
||||
</div>
|
||||
<div class="form-control">
|
||||
<label class="label" for="repair.zurg_url">
|
||||
<span class="label-text font-medium">Zurg URL</span>
|
||||
</label>
|
||||
<input type="url" class="input input-bordered" name="repair.zurg_url" id="repair.zurg_url" placeholder="http://zurg:9999">
|
||||
<div class="label">
|
||||
<span class="label-text-alt">Optional Zurg instance to speed up repairs</span>
|
||||
</div>
|
||||
</div>
|
||||
</div>
|
||||
|
||||
<div class="grid grid-cols-2 lg:grid-cols-3 gap-4">
|
||||
<div class="form-control">
|
||||
<label class="label cursor-pointer justify-start gap-3">
|
||||
<input type="checkbox" class="checkbox" name="repair.use_webdav" id="repair.use_webdav">
|
||||
<div>
|
||||
<span class="label-text font-medium">Use WebDAV</span>
|
||||
<div class="label-text-alt">Use internal WebDAV for repairs</div>
|
||||
</div>
|
||||
</div>
|
||||
</label>
|
||||
</div>
|
||||
|
||||
<div class="form-control">
|
||||
<label class="label cursor-pointer justify-start gap-3">
|
||||
<input type="checkbox" class="checkbox" name="repair.use_webdav" id="repair.use_webdav">
|
||||
<div>
|
||||
<span class="label-text font-medium">Use WebDAV</span>
|
||||
<div class="label-text-alt">Use internal WebDAV for repairs</div>
|
||||
</div>
|
||||
</label>
|
||||
</div>
|
||||
|
||||
<div class="form-control">
|
||||
<label class="label cursor-pointer justify-start gap-3">
|
||||
<input type="checkbox" class="checkbox" name="repair.auto_process" id="repair.auto_process">
|
||||
<div>
|
||||
<span class="label-text font-medium">Auto Process</span>
|
||||
<div class="label-text-alt">Automatically delete broken symlinks and re-search</div>
|
||||
</div>
|
||||
</label>
|
||||
</div>
|
||||
<div class="form-control">
|
||||
<label class="label cursor-pointer justify-start gap-3">
|
||||
<input type="checkbox" class="checkbox" name="repair.auto_process" id="repair.auto_process">
|
||||
<div>
|
||||
<span class="label-text font-medium">Auto Process</span>
|
||||
<div class="label-text-alt">Automatically delete broken symlinks and re-search</div>
|
||||
</div>
|
||||
</label>
|
||||
</div>
|
||||
</div>
|
||||
</div>
|
||||
@@ -455,7 +481,7 @@
|
||||
<h3 class="text-lg font-semibold mb-4 flex items-center">
|
||||
<i class="bi bi-folder mr-2"></i>Mount Configuration
|
||||
</h3>
|
||||
<div class="grid grid-cols-3 gap-4">
|
||||
<div class="grid grid-cols-1 lg:grid-cols-4 gap-4">
|
||||
<div class="form-control">
|
||||
<label class="label" for="rclone.mount_path">
|
||||
<span class="label-text font-medium">Global Mount Path</span>
|
||||
@@ -466,6 +492,13 @@
|
||||
</div>
|
||||
</div>
|
||||
|
||||
<div class="form-control">
|
||||
<label class="label" for="rclone.rc_port">
|
||||
<span class="label-text font-medium">RC Port</span>
|
||||
</label>
|
||||
<input type="text" class="input input-bordered" name="rclone.rc_port" id="rclone.rc_port">
|
||||
</div>
|
||||
|
||||
<div class="form-control">
|
||||
<label class="label" for="rclone.log_level">
|
||||
<span class="label-text font-medium">Log Level</span>
|
||||
@@ -473,9 +506,8 @@
|
||||
<select class="select select-bordered" name="rclone.log_level" id="rclone.log_level">
|
||||
<option value="INFO">INFO</option>
|
||||
<option value="DEBUG">DEBUG</option>
|
||||
<option value="WARN">WARN</option>
|
||||
<option value="NOTICE">NOTICE</option>
|
||||
<option value="ERROR">ERROR</option>
|
||||
<option value="TRACE">TRACE</option>
|
||||
</select>
|
||||
</div>
|
||||
|
||||
@@ -511,11 +543,20 @@
|
||||
<label class="label" for="rclone.buffer_size">
|
||||
<span class="label-text font-medium">Buffer Size</span>
|
||||
</label>
|
||||
<input type="text" class="input input-bordered" name="rclone.buffer_size" id="rclone.buffer_size" placeholder="10M" min="0">
|
||||
<input type="text" class="input input-bordered" name="rclone.buffer_size" id="rclone.buffer_size" placeholder="10M">
|
||||
<div class="label">
|
||||
<span class="label-text-alt">Buffer Size(This caches to memory, be wary!!)</span>
|
||||
</div>
|
||||
</div>
|
||||
<div class="form-control">
|
||||
<label class="label" for="rclone.bw_limit">
|
||||
<span class="label-text font-medium">Bandwidth Limit</span>
|
||||
</label>
|
||||
<input type="text" class="input input-bordered" name="rclone.bw_limit" id="rclone.bw_limit" placeholder="100M">
|
||||
<div class="label">
|
||||
<span class="label-text-alt">Bandwidth limit (e.g., 100M, 1G, leave empty for unlimited)</span>
|
||||
</div>
|
||||
</div>
|
||||
<div class="form-control">
|
||||
<label class="label" for="rclone.attr_timeout">
|
||||
<span class="label-text font-medium">Attribute Caching Timeout</span>
|
||||
@@ -525,6 +566,15 @@
|
||||
<span class="label-text-alt">How long the kernel caches the attributes (size, modification time, etc.)</span>
|
||||
</div>
|
||||
</div>
|
||||
<div class="form-control">
|
||||
<label class="label" for="rclone.transfers">
|
||||
<span class="label-text font-medium">Transfers</span>
|
||||
</label>
|
||||
<input type="number" class="input input-bordered" name="rclone.transfers" id="rclone.transfers" placeholder="8" min="1">
|
||||
<div class="label">
|
||||
<span class="label-text-alt">Number of file transfers to run in parallel</span>
|
||||
</div>
|
||||
</div>
|
||||
</div>
|
||||
</div>
|
||||
</div>
|
||||
@@ -628,6 +678,36 @@
|
||||
<span class="label-text-alt">How often VFS cache dir gets cleaned</span>
|
||||
</div>
|
||||
</div>
|
||||
|
||||
<div class="form-control">
|
||||
<label class="label" for="rclone.vfs_cache_min_free_space">
|
||||
<span class="label-text font-medium">VFS Cache Min Free Space</span>
|
||||
</label>
|
||||
<input type="text" class="input input-bordered" name="rclone.vfs_cache_min_free_space" id="rclone.vfs_cache_min_free_space" placeholder="1G">
|
||||
<div class="label">
|
||||
<span class="label-text-alt">Target minimum free space on the disk containing the cache</span>
|
||||
</div>
|
||||
</div>
|
||||
|
||||
<div class="form-control">
|
||||
<label class="label" for="rclone.vfs_disk_space_total">
|
||||
<span class="label-text font-medium">VFS Disk Space Total</span>
|
||||
</label>
|
||||
<input type="text" class="input input-bordered" name="rclone.vfs_disk_space_total" id="rclone.vfs_disk_space_total" placeholder="1G">
|
||||
<div class="label">
|
||||
<span class="label-text-alt">Specify the total space of disk</span>
|
||||
</div>
|
||||
</div>
|
||||
|
||||
<div class="form-control">
|
||||
<label class="label" for="rclone.vfs_read_chunk_streams">
|
||||
<span class="label-text font-medium">VFS Read Chunk Streams</span>
|
||||
</label>
|
||||
<input type="number" class="input input-bordered" name="rclone.vfs_read_chunk_streams" id="rclone.vfs_read_chunk_streams" placeholder="4" min="0">
|
||||
<div class="label">
|
||||
<span class="label-text-alt">The number of parallel streams to read at once</span>
|
||||
</div>
|
||||
</div>
|
||||
</div>
|
||||
</div>
|
||||
</div>
|
||||
@@ -637,7 +717,7 @@
|
||||
<h3 class="text-lg font-semibold mb-4 flex items-center">
|
||||
<i class="bi bi-gear mr-2"></i>Advanced Settings
|
||||
</h3>
|
||||
<div class="grid grid-cols-1 lg:grid-cols-2 gap-4">
|
||||
<div class="grid grid-cols-2 lg:grid-cols-3 gap-4">
|
||||
<div class="form-control">
|
||||
<label class="label cursor-pointer justify-start gap-3">
|
||||
<input type="checkbox" class="checkbox" name="rclone.no_modtime" id="rclone.no_modtime">
|
||||
@@ -657,6 +737,36 @@
|
||||
</div>
|
||||
</label>
|
||||
</div>
|
||||
|
||||
<div class="form-control">
|
||||
<label class="label cursor-pointer justify-start gap-3" for="rclone.async_read">
|
||||
<input type="checkbox" class="checkbox" name="rclone.async_read" id="rclone.async_read">
|
||||
<div>
|
||||
<span class="label-text font-medium">Async Read</span>
|
||||
<div class="label-text-alt">Use asynchronous reads</div>
|
||||
</div>
|
||||
</label>
|
||||
</div>
|
||||
|
||||
<div class="form-control">
|
||||
<label class="label cursor-pointer justify-start gap-3" for="rclone.vfs_fast_fingerprint">
|
||||
<input type="checkbox" class="checkbox" name="rclone.vfs_fast_fingerprint" id="rclone.vfs_fast_fingerprint">
|
||||
<div>
|
||||
<span class="label-text font-medium">VFS Fast Fingerprint</span>
|
||||
<div class="label-text-alt">Use fast (less accurate) fingerprints for change detection</div>
|
||||
</div>
|
||||
</label>
|
||||
</div>
|
||||
|
||||
<div class="form-control">
|
||||
<label class="label cursor-pointer justify-start gap-3" for="rclone.use_mmap">
|
||||
<input type="checkbox" class="checkbox" name="rclone.use_mmap" id="rclone.use_mmap">
|
||||
<div>
|
||||
<span class="label-text font-medium">Use Mmap</span>
|
||||
<div class="label-text-alt">Use fast (less accurate) fingerprints for change detection</div>
|
||||
</div>
|
||||
</label>
|
||||
</div>
|
||||
</div>
|
||||
</div>
|
||||
</div>
|
||||
|
||||
@@ -1,10 +1,22 @@
|
||||
{{ define "download" }}
|
||||
<div class="space-y-6">
|
||||
{{ if .NeedSetup }}
|
||||
<div role="alert" class="alert alert-warning">
|
||||
<i class="bi bi-exclamation-triangle text-xl"></i>
|
||||
<div>
|
||||
<h3 class="font-bold">Configuration Required</h3>
|
||||
<div class="text-sm">Your configuration is incomplete. Please complete the setup in the <a
|
||||
href="{{.URLBase}}settings" class="link link-hover font-semibold">Settings page</a>.
|
||||
</div>
|
||||
</div>
|
||||
</div>
|
||||
{{ end }}
|
||||
|
||||
<div class="card bg-base-100 shadow-xl">
|
||||
<div class="card-body">
|
||||
<form id="downloadForm" enctype="multipart/form-data" class="space-y-3">
|
||||
<div class="space-y-2">
|
||||
<div class="form-control">
|
||||
<form id="downloadForm" enctype="multipart/form-data" class="space-y-6">
|
||||
<div class="flex gap-4">
|
||||
<div class="form-control flex-1">
|
||||
<label class="label" for="magnetURI">
|
||||
<span class="label-text font-semibold">
|
||||
<i class="bi bi-magnet mr-2 text-primary"></i>Torrent Links
|
||||
@@ -17,9 +29,7 @@
|
||||
placeholder="Paste your magnet links or torrent URLs here, one per line..."></textarea>
|
||||
</div>
|
||||
|
||||
<div class="divider">OR</div>
|
||||
|
||||
<div class="form-control">
|
||||
<div class="form-control flex-1">
|
||||
<label class="label">
|
||||
<span class="label-text font-semibold">
|
||||
<i class="bi bi-file-earmark-arrow-up mr-2 text-secondary"></i>Upload Torrent Files
|
||||
@@ -40,86 +50,93 @@
|
||||
</div>
|
||||
</div>
|
||||
|
||||
<div class="divider"></div>
|
||||
<div class="divider">Download Settings</div>
|
||||
|
||||
<div class="grid grid-cols-1 lg:grid-cols-2 gap-3">
|
||||
<div class="space-y-2">
|
||||
<h3 class="text-lg font-semibold flex items-center">
|
||||
<i class="bi bi-gear mr-2 text-info"></i>Download Settings
|
||||
</h3>
|
||||
|
||||
<div class="form-control">
|
||||
<label class="label" for="downloadAction">
|
||||
<span class="label-text">Post Download Action</span>
|
||||
</label>
|
||||
<select class="select select-bordered" id="downloadAction" name="downloadAction">
|
||||
<option value="symlink" selected>Create Symlink</option>
|
||||
<option value="download">Download Files</option>
|
||||
<option value="none">No Action</option>
|
||||
</select>
|
||||
<div class="label">
|
||||
<span class="label-text-alt">How to handle files after download completion</span>
|
||||
</div>
|
||||
</div>
|
||||
|
||||
<div class="form-control">
|
||||
<label class="label" for="downloadFolder">
|
||||
<span class="label-text">Download Folder</span>
|
||||
</label>
|
||||
<input type="text"
|
||||
class="input input-bordered"
|
||||
id="downloadFolder"
|
||||
name="downloadFolder"
|
||||
placeholder="/downloads/torrents">
|
||||
<div class="label">
|
||||
<span class="label-text-alt">Leave empty to use default qBittorrent folder</span>
|
||||
</div>
|
||||
<div class="grid grid-cols-1 lg:grid-cols-3 gap-3 space-y-4">
|
||||
<div class="form-control">
|
||||
<label class="label" for="downloadAction">
|
||||
<span class="label-text">Post Download Action</span>
|
||||
</label>
|
||||
<select class="select select-bordered" id="downloadAction" name="downloadAction">
|
||||
<option value="symlink" selected>Create Symlink</option>
|
||||
<option value="download">Download Files</option>
|
||||
<option value="none">No Action</option>
|
||||
</select>
|
||||
<div class="label">
|
||||
<span class="label-text-alt">How to handle files after download completion</span>
|
||||
</div>
|
||||
</div>
|
||||
|
||||
<div class="space-y-2">
|
||||
<h3 class="text-lg font-semibold flex items-center">
|
||||
<i class="bi bi-tags mr-2 text-warning"></i>Categorization
|
||||
</h3>
|
||||
|
||||
<div class="form-control">
|
||||
<label class="label" for="arr">
|
||||
<span class="label-text">Arr Category</span>
|
||||
</label>
|
||||
<input type="text"
|
||||
class="input input-bordered"
|
||||
id="arr"
|
||||
name="arr"
|
||||
placeholder="sonarr, radarr, etc.">
|
||||
<div class="label">
|
||||
<span class="label-text-alt">Optional: Specify which Arr service should handle this</span>
|
||||
</div>
|
||||
<div class="form-control">
|
||||
<label class="label" for="downloadFolder">
|
||||
<span class="label-text">Download Folder</span>
|
||||
</label>
|
||||
<input type="text"
|
||||
class="input input-bordered"
|
||||
id="downloadFolder"
|
||||
name="downloadFolder"
|
||||
placeholder="/downloads/torrents">
|
||||
<div class="label">
|
||||
<span class="label-text-alt">Leave empty to use default qBittorrent folder</span>
|
||||
</div>
|
||||
|
||||
{{ if .HasMultiDebrid }}
|
||||
<div class="form-control">
|
||||
<label class="label" for="debrid">
|
||||
<span class="label-text">Debrid Service</span>
|
||||
</label>
|
||||
<select class="select select-bordered" id="debrid" name="debrid">
|
||||
{{ range $index, $debrid := .Debrids }}
|
||||
<option value="{{ $debrid }}" {{ if eq $index 0 }}selected{{end}}>
|
||||
{{ $debrid }}
|
||||
</option>
|
||||
{{ end }}
|
||||
</select>
|
||||
<div class="label">
|
||||
<span class="label-text-alt">Choose which debrid service to use</span>
|
||||
</div>
|
||||
</div>
|
||||
{{ end }}
|
||||
</div>
|
||||
|
||||
<div class="form-control">
|
||||
<label class="label" for="arr">
|
||||
<span class="label-text">Arr Category</span>
|
||||
</label>
|
||||
<input type="text"
|
||||
class="input input-bordered"
|
||||
id="arr"
|
||||
name="arr"
|
||||
placeholder="sonarr, radarr, etc.">
|
||||
<div class="label">
|
||||
<span class="label-text-alt">Optional: Specify which Arr service should handle this</span>
|
||||
</div>
|
||||
</div>
|
||||
|
||||
{{ if .HasMultiDebrid }}
|
||||
<div class="form-control">
|
||||
<label class="label" for="debrid">
|
||||
<span class="label-text">Debrid Service</span>
|
||||
</label>
|
||||
<select class="select select-bordered" id="debrid" name="debrid">
|
||||
{{ range $index, $debrid := .Debrids }}
|
||||
<option value="{{ $debrid }}" {{ if eq $index 0 }}selected{{end}}>
|
||||
{{ $debrid }}
|
||||
</option>
|
||||
{{ end }}
|
||||
</select>
|
||||
<div class="label">
|
||||
<span class="label-text-alt">Choose which debrid service to use</span>
|
||||
</div>
|
||||
</div>
|
||||
{{ end }}
|
||||
<div class="form-control">
|
||||
<label class="label cursor-pointer justify-start gap-3">
|
||||
<input type="checkbox" class="checkbox" name="downloadUncached" id="downloadUncached">
|
||||
<div>
|
||||
<span class="label-text font-medium">Download Uncached Content</span>
|
||||
<div class="label-text-alt">Allow downloading of content not cached by debrid service</div>
|
||||
<div class="label-text-alt">Allow downloading of content not cached by debrid service
|
||||
</div>
|
||||
</div>
|
||||
</label>
|
||||
</div>
|
||||
<div class="form-control">
|
||||
<label class="label cursor-pointer justify-start gap-3">
|
||||
<input type="checkbox" class="checkbox" name="skipMultiSeason" id="skipMultiSeason">
|
||||
<div>
|
||||
<span class="label-text font-medium">Skip Multi-Season Checker</span>
|
||||
<div class="label-text-alt">Skip the multi-season episode checker for TV shows</div>
|
||||
</div>
|
||||
</label>
|
||||
</div>
|
||||
<div class="form-control">
|
||||
<label class="label cursor-pointer justify-start gap-3">
|
||||
<input type="checkbox" class="checkbox" name="rmTrackerUrls" id="rmTrackerUrls" {{ if .AlwaysRmTrackerUrls }}checked disabled{{ end }}>
|
||||
<div>
|
||||
<span class="label-text font-medium">Remove Tracker</span>
|
||||
<div class="label-text-alt">Allows you to <a href="https://sirrobot01.github.io/decypharr/features/repair-worker/private-tracker-downloads" class="link link-hover font-semibold" target="_blank">download private tracker torrents</a> with lower risk</div>
|
||||
</div>
|
||||
</label>
|
||||
</div>
|
||||
|
||||
@@ -1,6 +1,16 @@
|
||||
{{ define "index" }}
|
||||
<div class="space-y-6">
|
||||
|
||||
{{ if .NeedSetup }}
|
||||
<div role="alert" class="alert alert-warning">
|
||||
<i class="bi bi-exclamation-triangle text-xl"></i>
|
||||
<div>
|
||||
<h3 class="font-bold">Configuration Required</h3>
|
||||
<div class="text-sm">Your configuration is incomplete. Please complete the setup in the <a href="{{.URLBase}}settings" class="link link-hover font-semibold">Settings page</a>.</div>
|
||||
</div>
|
||||
</div>
|
||||
{{ end }}
|
||||
|
||||
<div class="card bg-base-100 shadow-xl">
|
||||
<div class="card-body">
|
||||
<div class="flex flex-col lg:flex-row justify-between items-start lg:items-center gap-4">
|
||||
|
||||
@@ -54,7 +54,7 @@
|
||||
<li><a href="{{.URLBase}}repair" class="{{if eq .Page "repair"}}active{{end}}">
|
||||
<i class="bi bi-wrench-adjustable text-accent"></i>Repair
|
||||
</a></li>
|
||||
<li><a href="{{.URLBase}}config" class="{{if eq .Page "config"}}active{{end}}">
|
||||
<li><a href="{{.URLBase}}settings" class="{{if eq .Page "config"}}active{{end}}">
|
||||
<i class="bi bi-gear text-info"></i>Settings
|
||||
</a></li>
|
||||
<li><a href="{{.URLBase}}webdav" target="_blank">
|
||||
@@ -85,7 +85,7 @@
|
||||
<i class="bi bi-wrench-adjustable"></i>
|
||||
<span class="hidden xl:inline">Repair</span>
|
||||
</a></li>
|
||||
<li><a href="{{.URLBase}}config" class="{{if eq .Page "config"}}active{{end}} tooltip tooltip-bottom" data-tip="Settings">
|
||||
<li><a href="{{.URLBase}}settings" class="{{if eq .Page "config"}}active{{end}} tooltip tooltip-bottom" data-tip="Settings">
|
||||
<i class="bi bi-gear"></i>
|
||||
<span class="hidden xl:inline">Settings</span>
|
||||
</a></li>
|
||||
|
||||
@@ -75,7 +75,7 @@
|
||||
|
||||
// Handle skip auth button
|
||||
skipAuthBtn.addEventListener('click', function() {
|
||||
window.decypharrUtils.fetcher('/skip-auth', { method: 'GET' })
|
||||
window.decypharrUtils.fetcher('/skip-auth', { method: 'POST' })
|
||||
.then(response => {
|
||||
if (response.ok) {
|
||||
window.location.href = window.decypharrUtils.joinURL(window.urlBase, '/');
|
||||
|
||||
@@ -1,5 +1,15 @@
|
||||
{{ define "repair" }}
|
||||
<div class="space-y-6">
|
||||
{{ if .NeedSetup }}
|
||||
<div role="alert" class="alert alert-warning">
|
||||
<i class="bi bi-exclamation-triangle text-xl"></i>
|
||||
<div>
|
||||
<h3 class="font-bold">Configuration Required</h3>
|
||||
<div class="text-sm">Your configuration is incomplete. Please complete the setup in the <a href="{{.URLBase}}settings" class="link link-hover font-semibold">Settings page</a>.</div>
|
||||
</div>
|
||||
</div>
|
||||
{{ end }}
|
||||
|
||||
<div class="card bg-base-100 shadow-xl">
|
||||
<div class="card-body">
|
||||
<h2 class="card-title text-2xl mb-6">
|
||||
|
||||
@@ -371,6 +371,9 @@
|
||||
const statusBadge = account.disabled ?
|
||||
'<span class="badge badge-error badge-sm">Disabled</span>' :
|
||||
'<span class="badge badge-success badge-sm">Active</span>';
|
||||
const inUseBadge = account.in_use ?
|
||||
'<span class="badge badge-info badge-sm">In Use</span>' :
|
||||
'';
|
||||
|
||||
html += `
|
||||
<div class="card bg-base-100 compact">
|
||||
@@ -380,6 +383,7 @@
|
||||
<div class="flex items-center gap-2">
|
||||
<h5 class="font-medium text-sm">Account #${account.order + 1}</h5>
|
||||
${statusBadge}
|
||||
${inUseBadge}
|
||||
</div>
|
||||
<p class="text-xs text-base-content/70 mt-1">${account.username || 'No username'}</p>
|
||||
</div>
|
||||
|
||||
@@ -2,9 +2,10 @@ package web
|
||||
|
||||
import (
|
||||
"encoding/json"
|
||||
"net/http"
|
||||
|
||||
"github.com/sirrobot01/decypharr/internal/config"
|
||||
"golang.org/x/crypto/bcrypt"
|
||||
"net/http"
|
||||
)
|
||||
|
||||
func (wb *Web) LoginHandler(w http.ResponseWriter, r *http.Request) {
|
||||
@@ -113,9 +114,11 @@ func (wb *Web) RegisterHandler(w http.ResponseWriter, r *http.Request) {
|
||||
func (wb *Web) IndexHandler(w http.ResponseWriter, r *http.Request) {
|
||||
cfg := config.Get()
|
||||
data := map[string]interface{}{
|
||||
"URLBase": cfg.URLBase,
|
||||
"Page": "index",
|
||||
"Title": "Torrents",
|
||||
"URLBase": cfg.URLBase,
|
||||
"Page": "index",
|
||||
"Title": "Torrents",
|
||||
"NeedSetup": cfg.CheckSetup() != nil,
|
||||
"SetupError": cfg.CheckSetup(),
|
||||
}
|
||||
_ = wb.templates.ExecuteTemplate(w, "layout", data)
|
||||
}
|
||||
@@ -127,12 +130,15 @@ func (wb *Web) DownloadHandler(w http.ResponseWriter, r *http.Request) {
|
||||
debrids = append(debrids, d.Name)
|
||||
}
|
||||
data := map[string]interface{}{
|
||||
"URLBase": cfg.URLBase,
|
||||
"Page": "download",
|
||||
"Title": "Download",
|
||||
"Debrids": debrids,
|
||||
"HasMultiDebrid": len(debrids) > 1,
|
||||
"DownloadFolder": cfg.QBitTorrent.DownloadFolder,
|
||||
"URLBase": cfg.URLBase,
|
||||
"Page": "download",
|
||||
"Title": "Download",
|
||||
"Debrids": debrids,
|
||||
"HasMultiDebrid": len(debrids) > 1,
|
||||
"DownloadFolder": cfg.QBitTorrent.DownloadFolder,
|
||||
"AlwaysRmTrackerUrls": cfg.QBitTorrent.AlwaysRmTrackerUrls,
|
||||
"NeedSetup": cfg.CheckSetup() != nil,
|
||||
"SetupError": cfg.CheckSetup(),
|
||||
}
|
||||
_ = wb.templates.ExecuteTemplate(w, "layout", data)
|
||||
}
|
||||
@@ -140,9 +146,11 @@ func (wb *Web) DownloadHandler(w http.ResponseWriter, r *http.Request) {
|
||||
func (wb *Web) RepairHandler(w http.ResponseWriter, r *http.Request) {
|
||||
cfg := config.Get()
|
||||
data := map[string]interface{}{
|
||||
"URLBase": cfg.URLBase,
|
||||
"Page": "repair",
|
||||
"Title": "Repair",
|
||||
"URLBase": cfg.URLBase,
|
||||
"Page": "repair",
|
||||
"Title": "Repair",
|
||||
"NeedSetup": cfg.CheckSetup() != nil,
|
||||
"SetupError": cfg.CheckSetup(),
|
||||
}
|
||||
_ = wb.templates.ExecuteTemplate(w, "layout", data)
|
||||
}
|
||||
@@ -150,9 +158,11 @@ func (wb *Web) RepairHandler(w http.ResponseWriter, r *http.Request) {
|
||||
func (wb *Web) ConfigHandler(w http.ResponseWriter, r *http.Request) {
|
||||
cfg := config.Get()
|
||||
data := map[string]interface{}{
|
||||
"URLBase": cfg.URLBase,
|
||||
"Page": "config",
|
||||
"Title": "Config",
|
||||
"URLBase": cfg.URLBase,
|
||||
"Page": "config",
|
||||
"Title": "Config",
|
||||
"NeedSetup": cfg.CheckSetup() != nil,
|
||||
"SetupError": cfg.CheckSetup(),
|
||||
}
|
||||
_ = wb.templates.ExecuteTemplate(w, "layout", data)
|
||||
}
|
||||
|
||||
@@ -1,14 +1,14 @@
|
||||
package web
|
||||
|
||||
import (
|
||||
"cmp"
|
||||
"embed"
|
||||
"html/template"
|
||||
|
||||
"github.com/gorilla/sessions"
|
||||
"github.com/rs/zerolog"
|
||||
"github.com/sirrobot01/decypharr/internal/config"
|
||||
"github.com/sirrobot01/decypharr/internal/logger"
|
||||
"github.com/sirrobot01/decypharr/pkg/store"
|
||||
"html/template"
|
||||
"os"
|
||||
"github.com/sirrobot01/decypharr/pkg/wire"
|
||||
)
|
||||
|
||||
var restartFunc func()
|
||||
@@ -60,10 +60,12 @@ type Web struct {
|
||||
logger zerolog.Logger
|
||||
cookie *sessions.CookieStore
|
||||
templates *template.Template
|
||||
torrents *store.TorrentStorage
|
||||
torrents *wire.TorrentStorage
|
||||
urlBase string
|
||||
}
|
||||
|
||||
func New() *Web {
|
||||
cfg := config.Get()
|
||||
templates := template.Must(template.ParseFS(
|
||||
content,
|
||||
"templates/layout.html",
|
||||
@@ -75,8 +77,7 @@ func New() *Web {
|
||||
"templates/login.html",
|
||||
"templates/register.html",
|
||||
))
|
||||
secretKey := cmp.Or(os.Getenv("DECYPHARR_SECRET_KEY"), "\"wqj(v%lj*!-+kf@4&i95rhh_!5_px5qnuwqbr%cjrvrozz_r*(\"")
|
||||
cookieStore := sessions.NewCookieStore([]byte(secretKey))
|
||||
cookieStore := sessions.NewCookieStore([]byte(cfg.SecretKey()))
|
||||
cookieStore.Options = &sessions.Options{
|
||||
Path: "/",
|
||||
MaxAge: 86400 * 7,
|
||||
@@ -86,6 +87,7 @@ func New() *Web {
|
||||
logger: logger.New("ui"),
|
||||
templates: templates,
|
||||
cookie: cookieStore,
|
||||
torrents: store.Get().Torrents(),
|
||||
torrents: wire.Get().Torrents(),
|
||||
urlBase: cfg.URLBase,
|
||||
}
|
||||
}
|
||||
|
||||
@@ -1,36 +1,20 @@
|
||||
package webdav
|
||||
|
||||
import (
|
||||
"crypto/tls"
|
||||
"fmt"
|
||||
"io"
|
||||
"net/http"
|
||||
"os"
|
||||
"strings"
|
||||
"time"
|
||||
|
||||
"github.com/sirrobot01/decypharr/pkg/debrid/store"
|
||||
"github.com/sirrobot01/decypharr/pkg/debrid/types"
|
||||
)
|
||||
|
||||
var streamingTransport = &http.Transport{
|
||||
TLSClientConfig: &tls.Config{InsecureSkipVerify: true},
|
||||
MaxIdleConns: 200,
|
||||
MaxIdleConnsPerHost: 100,
|
||||
MaxConnsPerHost: 200,
|
||||
IdleConnTimeout: 90 * time.Second,
|
||||
TLSHandshakeTimeout: 10 * time.Second,
|
||||
ResponseHeaderTimeout: 60 * time.Second, // give the upstream a minute to send headers
|
||||
ExpectContinueTimeout: 1 * time.Second,
|
||||
DisableKeepAlives: true, // close after each request
|
||||
ForceAttemptHTTP2: false, // don’t speak HTTP/2
|
||||
// this line is what truly blocks HTTP/2:
|
||||
TLSNextProto: make(map[string]func(string, *tls.Conn) http.RoundTripper),
|
||||
}
|
||||
|
||||
var sharedClient = &http.Client{
|
||||
Transport: streamingTransport,
|
||||
Timeout: 0,
|
||||
}
|
||||
const (
|
||||
MaxNetworkRetries = 3
|
||||
MaxLinkRetries = 10
|
||||
)
|
||||
|
||||
type streamError struct {
|
||||
Err error
|
||||
@@ -50,7 +34,6 @@ type File struct {
|
||||
name string
|
||||
torrentName string
|
||||
link string
|
||||
downloadLink string
|
||||
size int64
|
||||
isDir bool
|
||||
fileId string
|
||||
@@ -76,26 +59,21 @@ func (f *File) Close() error {
|
||||
// This is just to satisfy the os.File interface
|
||||
f.content = nil
|
||||
f.children = nil
|
||||
f.downloadLink = ""
|
||||
f.readOffset = 0
|
||||
return nil
|
||||
}
|
||||
|
||||
func (f *File) getDownloadLink() (string, error) {
|
||||
func (f *File) getDownloadLink() (types.DownloadLink, error) {
|
||||
// Check if we already have a final URL cached
|
||||
|
||||
if f.downloadLink != "" && isValidURL(f.downloadLink) {
|
||||
return f.downloadLink, nil
|
||||
}
|
||||
downloadLink, err := f.cache.GetDownloadLink(f.torrentName, f.name, f.link)
|
||||
if err != nil {
|
||||
return "", err
|
||||
return downloadLink, err
|
||||
}
|
||||
if downloadLink != "" && isValidURL(downloadLink) {
|
||||
f.downloadLink = downloadLink
|
||||
return downloadLink, nil
|
||||
err = downloadLink.Valid()
|
||||
if err != nil {
|
||||
return types.DownloadLink{}, err
|
||||
}
|
||||
return "", os.ErrNotExist
|
||||
return downloadLink, nil
|
||||
}
|
||||
|
||||
func (f *File) getDownloadByteRange() (*[2]int64, error) {
|
||||
@@ -106,7 +84,6 @@ func (f *File) getDownloadByteRange() (*[2]int64, error) {
|
||||
return byteRange, nil
|
||||
}
|
||||
|
||||
// setVideoStreamingHeaders sets the necessary headers for video streaming
|
||||
// It returns error and a boolean indicating if the request is a range request
|
||||
func (f *File) servePreloadedContent(w http.ResponseWriter, r *http.Request) error {
|
||||
content := f.content
|
||||
@@ -140,82 +117,45 @@ func (f *File) servePreloadedContent(w http.ResponseWriter, r *http.Request) err
|
||||
}
|
||||
|
||||
func (f *File) StreamResponse(w http.ResponseWriter, r *http.Request) error {
|
||||
// Handle preloaded content files
|
||||
if f.content != nil {
|
||||
return f.servePreloadedContent(w, r)
|
||||
}
|
||||
_logger := f.cache.Logger()
|
||||
|
||||
// Try streaming with retry logic
|
||||
return f.streamWithRetry(w, r, 0)
|
||||
start, end := f.getRange(r)
|
||||
|
||||
resp, err := f.cache.Stream(r.Context(), start, end, f.getDownloadLink)
|
||||
if err != nil {
|
||||
_logger.Error().Err(err).Str("file", f.name).Msg("Failed to stream with initial link")
|
||||
return &streamError{Err: err, StatusCode: http.StatusRequestedRangeNotSatisfiable}
|
||||
}
|
||||
defer func(Body io.ReadCloser) {
|
||||
_ = Body.Close()
|
||||
}(resp.Body)
|
||||
return f.handleSuccessfulResponse(w, resp, start, end)
|
||||
}
|
||||
|
||||
func (f *File) streamWithRetry(w http.ResponseWriter, r *http.Request, retryCount int) error {
|
||||
const maxRetries = 3
|
||||
_log := f.cache.Logger()
|
||||
|
||||
// Get download link (with caching optimization)
|
||||
downloadLink, err := f.getDownloadLink()
|
||||
if err != nil {
|
||||
return &streamError{Err: err, StatusCode: http.StatusPreconditionFailed}
|
||||
}
|
||||
|
||||
if downloadLink == "" {
|
||||
return &streamError{Err: fmt.Errorf("empty download link"), StatusCode: http.StatusNotFound}
|
||||
}
|
||||
|
||||
// Create upstream request with streaming optimizations
|
||||
upstreamReq, err := http.NewRequest("GET", downloadLink, nil)
|
||||
if err != nil {
|
||||
return &streamError{Err: err, StatusCode: http.StatusInternalServerError}
|
||||
}
|
||||
|
||||
setVideoStreamingHeaders(upstreamReq)
|
||||
|
||||
// Handle range requests (critical for video seeking)
|
||||
isRangeRequest := f.handleRangeRequest(upstreamReq, r, w)
|
||||
if isRangeRequest == -1 {
|
||||
return &streamError{Err: fmt.Errorf("invalid range"), StatusCode: http.StatusRequestedRangeNotSatisfiable}
|
||||
}
|
||||
|
||||
resp, err := sharedClient.Do(upstreamReq)
|
||||
if err != nil {
|
||||
return &streamError{Err: err, StatusCode: http.StatusServiceUnavailable}
|
||||
}
|
||||
defer resp.Body.Close()
|
||||
|
||||
// Handle upstream errors with retry logic
|
||||
shouldRetry, retryErr := f.handleUpstream(resp, retryCount, maxRetries)
|
||||
if shouldRetry && retryCount < maxRetries {
|
||||
// Retry with new download link
|
||||
_log.Debug().
|
||||
Int("retry_count", retryCount+1).
|
||||
Str("file", f.name).
|
||||
Msg("Retrying stream request")
|
||||
return f.streamWithRetry(w, r, retryCount+1)
|
||||
}
|
||||
if retryErr != nil {
|
||||
return retryErr
|
||||
}
|
||||
|
||||
// Determine status code based on range request
|
||||
func (f *File) handleSuccessfulResponse(w http.ResponseWriter, resp *http.Response, start, end int64) error {
|
||||
statusCode := http.StatusOK
|
||||
if isRangeRequest == 1 {
|
||||
if start > 0 || end > 0 {
|
||||
statusCode = http.StatusPartialContent
|
||||
}
|
||||
|
||||
// Set headers before streaming
|
||||
// Copy relevant headers
|
||||
if contentLength := resp.Header.Get("Content-Length"); contentLength != "" {
|
||||
w.Header().Set("Content-Length", contentLength)
|
||||
}
|
||||
|
||||
if contentRange := resp.Header.Get("Content-Range"); contentRange != "" && isRangeRequest == 1 {
|
||||
if contentRange := resp.Header.Get("Content-Range"); contentRange != "" && statusCode == http.StatusPartialContent {
|
||||
w.Header().Set("Content-Range", contentRange)
|
||||
}
|
||||
|
||||
if err := f.streamBuffer(w, resp.Body, statusCode); err != nil {
|
||||
return err
|
||||
// Copy other important headers
|
||||
if contentType := resp.Header.Get("Content-Type"); contentType != "" {
|
||||
w.Header().Set("Content-Type", contentType)
|
||||
}
|
||||
return nil
|
||||
|
||||
return f.streamBuffer(w, resp.Body, statusCode)
|
||||
}
|
||||
|
||||
func (f *File) streamBuffer(w http.ResponseWriter, src io.Reader, statusCode int) error {
|
||||
@@ -228,7 +168,7 @@ func (f *File) streamBuffer(w http.ResponseWriter, src io.Reader, statusCode int
|
||||
if n, err := src.Read(smallBuf); n > 0 {
|
||||
// Write status code just before first successful write
|
||||
w.WriteHeader(statusCode)
|
||||
|
||||
|
||||
if _, werr := w.Write(smallBuf[:n]); werr != nil {
|
||||
if isClientDisconnection(werr) {
|
||||
return &streamError{Err: werr, StatusCode: 0, IsClientDisconnection: true}
|
||||
@@ -266,114 +206,21 @@ func (f *File) streamBuffer(w http.ResponseWriter, src io.Reader, statusCode int
|
||||
}
|
||||
}
|
||||
|
||||
func (f *File) handleUpstream(resp *http.Response, retryCount, maxRetries int) (shouldRetry bool, err error) {
|
||||
if resp.StatusCode == http.StatusOK || resp.StatusCode == http.StatusPartialContent {
|
||||
return false, nil
|
||||
}
|
||||
|
||||
_log := f.cache.Logger()
|
||||
|
||||
// Clean up response body properly
|
||||
cleanupResp := func(resp *http.Response) {
|
||||
if resp.Body != nil {
|
||||
_, _ = io.Copy(io.Discard, resp.Body)
|
||||
resp.Body.Close()
|
||||
}
|
||||
}
|
||||
|
||||
switch resp.StatusCode {
|
||||
case http.StatusServiceUnavailable:
|
||||
// Read the body to check for specific error messages
|
||||
body, readErr := io.ReadAll(resp.Body)
|
||||
cleanupResp(resp)
|
||||
|
||||
if readErr != nil {
|
||||
_log.Error().Err(readErr).Msg("Failed to read response body")
|
||||
return false, &streamError{
|
||||
Err: fmt.Errorf("failed to read error response: %w", readErr),
|
||||
StatusCode: http.StatusServiceUnavailable,
|
||||
}
|
||||
}
|
||||
|
||||
bodyStr := string(body)
|
||||
if strings.Contains(bodyStr, "you have exceeded your traffic") {
|
||||
_log.Debug().
|
||||
Str("file", f.name).
|
||||
Int("retry_count", retryCount).
|
||||
Msg("Bandwidth exceeded. Marking link as invalid")
|
||||
|
||||
f.cache.MarkDownloadLinkAsInvalid(f.link, f.downloadLink, "bandwidth_exceeded")
|
||||
|
||||
// Retry with a different API key if available and we haven't exceeded retries
|
||||
if retryCount < maxRetries {
|
||||
return true, nil
|
||||
}
|
||||
|
||||
return false, &streamError{
|
||||
Err: fmt.Errorf("bandwidth exceeded after %d retries", retryCount),
|
||||
StatusCode: http.StatusServiceUnavailable,
|
||||
}
|
||||
}
|
||||
|
||||
return false, &streamError{
|
||||
Err: fmt.Errorf("service unavailable: %s", bodyStr),
|
||||
StatusCode: http.StatusServiceUnavailable,
|
||||
}
|
||||
|
||||
case http.StatusNotFound:
|
||||
cleanupResp(resp)
|
||||
|
||||
_log.Debug().
|
||||
Str("file", f.name).
|
||||
Int("retry_count", retryCount).
|
||||
Msg("Link not found (404). Marking link as invalid and regenerating")
|
||||
|
||||
f.cache.MarkDownloadLinkAsInvalid(f.link, f.downloadLink, "link_not_found")
|
||||
|
||||
// Try to regenerate download link if we haven't exceeded retries
|
||||
if retryCount < maxRetries {
|
||||
// Clear cached link to force regeneration
|
||||
f.downloadLink = ""
|
||||
return true, nil
|
||||
}
|
||||
|
||||
return false, &streamError{
|
||||
Err: fmt.Errorf("file not found after %d retries", retryCount),
|
||||
StatusCode: http.StatusNotFound,
|
||||
}
|
||||
|
||||
default:
|
||||
body, _ := io.ReadAll(resp.Body)
|
||||
cleanupResp(resp)
|
||||
|
||||
_log.Error().
|
||||
Int("status_code", resp.StatusCode).
|
||||
Str("file", f.name).
|
||||
Str("response_body", string(body)).
|
||||
Msg("Unexpected upstream error")
|
||||
|
||||
return false, &streamError{
|
||||
Err: fmt.Errorf("upstream error %d: %s", resp.StatusCode, string(body)),
|
||||
StatusCode: http.StatusBadGateway,
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func (f *File) handleRangeRequest(upstreamReq *http.Request, r *http.Request, w http.ResponseWriter) int {
|
||||
func (f *File) getRange(r *http.Request) (int64, int64) {
|
||||
rangeHeader := r.Header.Get("Range")
|
||||
if rangeHeader == "" {
|
||||
// For video files, apply byte range if exists
|
||||
if byteRange, _ := f.getDownloadByteRange(); byteRange != nil {
|
||||
upstreamReq.Header.Set("Range", fmt.Sprintf("bytes=%d-%d", byteRange[0], byteRange[1]))
|
||||
return byteRange[0], byteRange[1]
|
||||
}
|
||||
return 0 // No range request
|
||||
return 0, 0
|
||||
}
|
||||
|
||||
// Parse range request
|
||||
ranges, err := parseRange(rangeHeader, f.size)
|
||||
if err != nil || len(ranges) != 1 {
|
||||
w.Header().Set("Content-Range", fmt.Sprintf("bytes */%d", f.size))
|
||||
return -1 // Invalid range
|
||||
// Invalid range, return full content
|
||||
return 0, 0
|
||||
}
|
||||
|
||||
// Apply byte range offset if exists
|
||||
@@ -384,9 +231,7 @@ func (f *File) handleRangeRequest(upstreamReq *http.Request, r *http.Request, w
|
||||
start += byteRange[0]
|
||||
end += byteRange[0]
|
||||
}
|
||||
|
||||
upstreamReq.Header.Set("Range", fmt.Sprintf("bytes=%d-%d", start, end))
|
||||
return 1 // Valid range request
|
||||
return start, end
|
||||
}
|
||||
|
||||
/*
|
||||
|
||||
@@ -4,8 +4,6 @@ import (
|
||||
"context"
|
||||
"errors"
|
||||
"fmt"
|
||||
"github.com/sirrobot01/decypharr/pkg/debrid/types"
|
||||
"golang.org/x/net/webdav"
|
||||
"mime"
|
||||
"net/http"
|
||||
"os"
|
||||
@@ -15,6 +13,9 @@ import (
|
||||
"strings"
|
||||
"time"
|
||||
|
||||
"github.com/sirrobot01/decypharr/pkg/debrid/types"
|
||||
"golang.org/x/net/webdav"
|
||||
|
||||
"github.com/rs/zerolog"
|
||||
"github.com/sirrobot01/decypharr/internal/utils"
|
||||
"github.com/sirrobot01/decypharr/pkg/debrid/store"
|
||||
@@ -451,15 +452,15 @@ func (h *Handler) handleGet(w http.ResponseWriter, r *http.Request) {
|
||||
// Handle nginx proxy (X-Accel-Redirect)
|
||||
if file.content == nil && !file.isRar && h.cache.StreamWithRclone() {
|
||||
link, err := file.getDownloadLink()
|
||||
if err != nil || link == "" {
|
||||
if err != nil || link.Empty() {
|
||||
http.Error(w, "Could not fetch download link", http.StatusPreconditionFailed)
|
||||
return
|
||||
}
|
||||
|
||||
w.Header().Set("Content-Disposition", fmt.Sprintf("attachment; filename=\"%s\"", fi.Name()))
|
||||
w.Header().Set("X-Accel-Redirect", link)
|
||||
w.Header().Set("X-Accel-Redirect", link.DownloadLink)
|
||||
w.Header().Set("X-Accel-Buffering", "no")
|
||||
http.Redirect(w, r, link, http.StatusFound)
|
||||
http.Redirect(w, r, link.DownloadLink, http.StatusFound)
|
||||
return
|
||||
}
|
||||
|
||||
@@ -488,7 +489,6 @@ func (h *Handler) handleGet(w http.ResponseWriter, r *http.Request) {
|
||||
}
|
||||
return
|
||||
}
|
||||
return
|
||||
}
|
||||
|
||||
func (h *Handler) handleHead(w http.ResponseWriter, r *http.Request) {
|
||||
|
||||
@@ -2,21 +2,15 @@ package webdav
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"github.com/stanNthe5/stringbuf"
|
||||
"net/http"
|
||||
"net/url"
|
||||
"os"
|
||||
"path"
|
||||
"strconv"
|
||||
"strings"
|
||||
"time"
|
||||
)
|
||||
|
||||
func isValidURL(str string) bool {
|
||||
u, err := url.Parse(str)
|
||||
// A valid URL should parse without error, and have a non-empty scheme and host.
|
||||
return err == nil && u.Scheme != "" && u.Host != ""
|
||||
}
|
||||
"github.com/stanNthe5/stringbuf"
|
||||
)
|
||||
|
||||
var pctHex = "0123456789ABCDEF"
|
||||
|
||||
@@ -134,14 +128,6 @@ func writeXml(w http.ResponseWriter, status int, buf stringbuf.StringBuf) {
|
||||
_, _ = w.Write(buf.Bytes())
|
||||
}
|
||||
|
||||
func hasHeadersWritten(w http.ResponseWriter) bool {
|
||||
// Most ResponseWriter implementations support this
|
||||
if hw, ok := w.(interface{ Written() bool }); ok {
|
||||
return hw.Written()
|
||||
}
|
||||
return false
|
||||
}
|
||||
|
||||
func isClientDisconnection(err error) bool {
|
||||
if err == nil {
|
||||
return false
|
||||
@@ -218,12 +204,3 @@ func parseRange(s string, size int64) ([]httpRange, error) {
|
||||
}
|
||||
return ranges, nil
|
||||
}
|
||||
|
||||
func setVideoStreamingHeaders(req *http.Request) {
|
||||
// Request optimizations for faster response
|
||||
req.Header.Set("Accept", "*/*")
|
||||
req.Header.Set("Accept-Encoding", "identity")
|
||||
req.Header.Set("Connection", "keep-alive")
|
||||
req.Header.Set("User-Agent", "VideoStream/1.0")
|
||||
req.Header.Set("Priority", "u=1")
|
||||
}
|
||||
|
||||
@@ -4,10 +4,6 @@ import (
|
||||
"context"
|
||||
"embed"
|
||||
"fmt"
|
||||
"github.com/go-chi/chi/v5"
|
||||
"github.com/go-chi/chi/v5/middleware"
|
||||
"github.com/sirrobot01/decypharr/internal/config"
|
||||
"github.com/sirrobot01/decypharr/pkg/store"
|
||||
"html/template"
|
||||
"net/http"
|
||||
"net/url"
|
||||
@@ -16,6 +12,11 @@ import (
|
||||
"strings"
|
||||
"sync"
|
||||
"time"
|
||||
|
||||
"github.com/go-chi/chi/v5"
|
||||
"github.com/sirrobot01/decypharr/internal/config"
|
||||
"github.com/sirrobot01/decypharr/internal/utils"
|
||||
"github.com/sirrobot01/decypharr/pkg/wire"
|
||||
)
|
||||
|
||||
//go:embed templates/*
|
||||
@@ -33,42 +34,8 @@ var (
|
||||
}
|
||||
return strings.Join(segments, "/")
|
||||
},
|
||||
"formatSize": func(bytes int64) string {
|
||||
const (
|
||||
KB = 1024
|
||||
MB = 1024 * KB
|
||||
GB = 1024 * MB
|
||||
TB = 1024 * GB
|
||||
)
|
||||
|
||||
var size float64
|
||||
var unit string
|
||||
|
||||
switch {
|
||||
case bytes >= TB:
|
||||
size = float64(bytes) / TB
|
||||
unit = "TB"
|
||||
case bytes >= GB:
|
||||
size = float64(bytes) / GB
|
||||
unit = "GB"
|
||||
case bytes >= MB:
|
||||
size = float64(bytes) / MB
|
||||
unit = "MB"
|
||||
case bytes >= KB:
|
||||
size = float64(bytes) / KB
|
||||
unit = "KB"
|
||||
default:
|
||||
size = float64(bytes)
|
||||
unit = "bytes"
|
||||
}
|
||||
|
||||
// Format to 2 decimal places for larger units, no decimals for bytes
|
||||
if unit == "bytes" {
|
||||
return fmt.Sprintf("%.0f %s", size, unit)
|
||||
}
|
||||
return fmt.Sprintf("%.2f %s", size, unit)
|
||||
},
|
||||
"hasSuffix": strings.HasSuffix,
|
||||
"formatSize": utils.FormatSize,
|
||||
"hasSuffix": strings.HasSuffix,
|
||||
}
|
||||
tplRoot = template.Must(template.ParseFS(templatesFS, "templates/root.html"))
|
||||
tplDirectory = template.Must(template.New("").Funcs(funcMap).ParseFS(templatesFS, "templates/directory.html"))
|
||||
@@ -97,7 +64,7 @@ func New() *WebDav {
|
||||
Handlers: make([]*Handler, 0),
|
||||
URLBase: urlBase,
|
||||
}
|
||||
for name, c := range store.Get().Debrid().Caches() {
|
||||
for name, c := range wire.Get().Debrid().Caches() {
|
||||
h := NewHandler(name, urlBase, c, c.Logger())
|
||||
w.Handlers = append(w.Handlers, h)
|
||||
}
|
||||
@@ -106,8 +73,8 @@ func New() *WebDav {
|
||||
|
||||
func (wd *WebDav) Routes() http.Handler {
|
||||
wr := chi.NewRouter()
|
||||
wr.Use(middleware.StripSlashes)
|
||||
wr.Use(wd.commonMiddleware)
|
||||
//wr.Use(wd.authMiddleware) Disable auth for now
|
||||
|
||||
wd.setupRootHandler(wr)
|
||||
wd.mountHandlers(wr)
|
||||
@@ -178,6 +145,21 @@ func (wd *WebDav) commonMiddleware(next http.Handler) http.Handler {
|
||||
})
|
||||
}
|
||||
|
||||
func (wd *WebDav) authMiddleware(next http.Handler) http.Handler {
|
||||
return http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) {
|
||||
cfg := config.Get()
|
||||
if cfg.UseAuth && cfg.EnableWebdavAuth {
|
||||
username, password, ok := r.BasicAuth()
|
||||
if !ok || !config.VerifyAuth(username, password) {
|
||||
w.Header().Set("WWW-Authenticate", `Basic realm="Restricted"`)
|
||||
http.Error(w, "Unauthorized", http.StatusUnauthorized)
|
||||
return
|
||||
}
|
||||
}
|
||||
next.ServeHTTP(w, r)
|
||||
})
|
||||
}
|
||||
|
||||
func (wd *WebDav) handleGetRoot() http.HandlerFunc {
|
||||
return func(w http.ResponseWriter, r *http.Request) {
|
||||
w.Header().Set("Content-Type", "text/html; charset=utf-8")
|
||||
|
||||
625
pkg/wire/downloader.go
Normal file
625
pkg/wire/downloader.go
Normal file
@@ -0,0 +1,625 @@
|
||||
package wire
|
||||
|
||||
import (
|
||||
"crypto/md5"
|
||||
"fmt"
|
||||
"net/http"
|
||||
"os"
|
||||
"path/filepath"
|
||||
"regexp"
|
||||
"strconv"
|
||||
"strings"
|
||||
"sync"
|
||||
"time"
|
||||
|
||||
"github.com/google/uuid"
|
||||
|
||||
"github.com/sirrobot01/decypharr/pkg/debrid/types"
|
||||
|
||||
"github.com/cavaliergopher/grab/v3"
|
||||
"github.com/sirrobot01/decypharr/internal/utils"
|
||||
)
|
||||
|
||||
// Multi-season detection patterns
|
||||
var (
|
||||
// Pre-compiled patterns for multi-season replacement
|
||||
multiSeasonReplacements = []multiSeasonPattern{
|
||||
// S01-08 -> S01 (or whatever target season)
|
||||
{regexp.MustCompile(`(?i)S(\d{1,2})-\d{1,2}`), "S%02d"},
|
||||
|
||||
// S01-S08 -> S01
|
||||
{regexp.MustCompile(`(?i)S(\d{1,2})-S\d{1,2}`), "S%02d"},
|
||||
|
||||
// Season 1-8 -> Season 1
|
||||
{regexp.MustCompile(`(?i)Season\.?\s*\d{1,2}-\d{1,2}`), "Season %02d"},
|
||||
|
||||
// Seasons 1-8 -> Season 1
|
||||
{regexp.MustCompile(`(?i)Seasons\.?\s*\d{1,2}-\d{1,2}`), "Season %02d"},
|
||||
|
||||
// Complete Series -> Season X
|
||||
{regexp.MustCompile(`(?i)Complete\.?Series`), "Season %02d"},
|
||||
|
||||
// All Seasons -> Season X
|
||||
{regexp.MustCompile(`(?i)All\.?Seasons?`), "Season %02d"},
|
||||
}
|
||||
|
||||
// Also pre-compile other patterns
|
||||
seasonPattern = regexp.MustCompile(`(?i)(?:season\.?\s*|s)(\d{1,2})`)
|
||||
qualityIndicators = regexp.MustCompile(`(?i)\b(2160p|1080p|720p|BluRay|WEB-DL|HDTV|x264|x265|HEVC)`)
|
||||
|
||||
multiSeasonIndicators = []*regexp.Regexp{
|
||||
regexp.MustCompile(`(?i)complete\.?series`),
|
||||
regexp.MustCompile(`(?i)all\.?seasons?`),
|
||||
regexp.MustCompile(`(?i)season\.?\s*\d+\s*-\s*\d+`),
|
||||
regexp.MustCompile(`(?i)s\d+\s*-\s*s?\d+`),
|
||||
regexp.MustCompile(`(?i)seasons?\s*\d+\s*-\s*\d+`),
|
||||
}
|
||||
)
|
||||
|
||||
type multiSeasonPattern struct {
|
||||
pattern *regexp.Regexp
|
||||
replacement string
|
||||
}
|
||||
|
||||
type SeasonInfo struct {
|
||||
SeasonNumber int
|
||||
Files []types.File
|
||||
InfoHash string
|
||||
Name string
|
||||
}
|
||||
|
||||
func (s *Store) replaceMultiSeasonPattern(name string, targetSeason int) string {
|
||||
result := name
|
||||
|
||||
// Apply each pre-compiled pattern replacement
|
||||
for _, msp := range multiSeasonReplacements {
|
||||
if msp.pattern.MatchString(result) {
|
||||
replacement := fmt.Sprintf(msp.replacement, targetSeason)
|
||||
result = msp.pattern.ReplaceAllString(result, replacement)
|
||||
s.logger.Debug().Msgf("Applied pattern replacement: %s -> %s", name, result)
|
||||
return result
|
||||
}
|
||||
}
|
||||
|
||||
// If no multi-season pattern found, try to insert season info intelligently
|
||||
return s.insertSeasonIntoName(result, targetSeason)
|
||||
}
|
||||
|
||||
func (s *Store) insertSeasonIntoName(name string, seasonNum int) string {
|
||||
// Check if season info already exists
|
||||
if seasonPattern.MatchString(name) {
|
||||
return name // Already has season info, keep as is
|
||||
}
|
||||
|
||||
// Try to find a good insertion point (before quality indicators)
|
||||
if loc := qualityIndicators.FindStringIndex(name); loc != nil {
|
||||
// Insert season before quality info
|
||||
before := strings.TrimSpace(name[:loc[0]])
|
||||
after := name[loc[0]:]
|
||||
return fmt.Sprintf("%s S%02d %s", before, seasonNum, after)
|
||||
}
|
||||
|
||||
// If no quality indicators found, append at the end
|
||||
return fmt.Sprintf("%s S%02d", name, seasonNum)
|
||||
}
|
||||
|
||||
func (s *Store) detectMultiSeason(debridTorrent *types.Torrent) (bool, []SeasonInfo, error) {
|
||||
torrentName := debridTorrent.Name
|
||||
files := debridTorrent.GetFiles()
|
||||
|
||||
s.logger.Debug().Msgf("Analyzing torrent for multi-season: %s", torrentName)
|
||||
|
||||
// Find all seasons present in the files
|
||||
seasonsFound := s.findAllSeasons(files)
|
||||
|
||||
// Check if this is actually a multi-season torrent
|
||||
isMultiSeason := len(seasonsFound) > 1 || s.hasMultiSeasonIndicators(torrentName)
|
||||
|
||||
if !isMultiSeason {
|
||||
return false, nil, nil
|
||||
}
|
||||
|
||||
s.logger.Info().Msgf("Multi-season torrent detected with seasons: %v", getSortedSeasons(seasonsFound))
|
||||
|
||||
// Group files by season
|
||||
seasonGroups := s.groupFilesBySeason(files, seasonsFound)
|
||||
|
||||
// Create SeasonInfo objects with proper naming
|
||||
var seasons []SeasonInfo
|
||||
for seasonNum, seasonFiles := range seasonGroups {
|
||||
if len(seasonFiles) == 0 {
|
||||
continue
|
||||
}
|
||||
|
||||
// Generate season-specific name preserving all metadata
|
||||
seasonName := s.generateSeasonSpecificName(torrentName, seasonNum)
|
||||
|
||||
seasons = append(seasons, SeasonInfo{
|
||||
SeasonNumber: seasonNum,
|
||||
Files: seasonFiles,
|
||||
InfoHash: s.generateSeasonHash(debridTorrent.InfoHash, seasonNum),
|
||||
Name: seasonName,
|
||||
})
|
||||
}
|
||||
|
||||
return true, seasons, nil
|
||||
}
|
||||
|
||||
// generateSeasonSpecificName creates season name preserving all original metadata
|
||||
func (s *Store) generateSeasonSpecificName(originalName string, seasonNum int) string {
|
||||
// Find and replace the multi-season pattern with single season
|
||||
seasonName := s.replaceMultiSeasonPattern(originalName, seasonNum)
|
||||
|
||||
s.logger.Debug().Msgf("Generated season name for S%02d: %s", seasonNum, seasonName)
|
||||
|
||||
return seasonName
|
||||
}
|
||||
|
||||
func (s *Store) findAllSeasons(files []types.File) map[int]bool {
|
||||
seasons := make(map[int]bool)
|
||||
|
||||
for _, file := range files {
|
||||
// Check filename first
|
||||
if season := s.extractSeason(file.Name); season > 0 {
|
||||
seasons[season] = true
|
||||
continue
|
||||
}
|
||||
|
||||
// Check full path
|
||||
if season := s.extractSeason(file.Path); season > 0 {
|
||||
seasons[season] = true
|
||||
}
|
||||
}
|
||||
|
||||
return seasons
|
||||
}
|
||||
|
||||
// extractSeason pulls season number from a string
|
||||
func (s *Store) extractSeason(text string) int {
|
||||
matches := seasonPattern.FindStringSubmatch(text)
|
||||
if len(matches) > 1 {
|
||||
if num, err := strconv.Atoi(matches[1]); err == nil && num > 0 && num < 100 {
|
||||
return num
|
||||
}
|
||||
}
|
||||
return 0
|
||||
}
|
||||
|
||||
func (s *Store) hasMultiSeasonIndicators(torrentName string) bool {
|
||||
for _, pattern := range multiSeasonIndicators {
|
||||
if pattern.MatchString(torrentName) {
|
||||
return true
|
||||
}
|
||||
}
|
||||
return false
|
||||
}
|
||||
|
||||
// groupFilesBySeason puts files into season buckets
|
||||
func (s *Store) groupFilesBySeason(files []types.File, knownSeasons map[int]bool) map[int][]types.File {
|
||||
groups := make(map[int][]types.File)
|
||||
|
||||
// Initialize groups
|
||||
for season := range knownSeasons {
|
||||
groups[season] = []types.File{}
|
||||
}
|
||||
|
||||
for _, file := range files {
|
||||
// Try to find season from filename or path
|
||||
season := s.extractSeason(file.Name)
|
||||
if season == 0 {
|
||||
season = s.extractSeason(file.Path)
|
||||
}
|
||||
|
||||
// If we found a season and it's known, add the file
|
||||
if season > 0 && knownSeasons[season] {
|
||||
groups[season] = append(groups[season], file)
|
||||
} else {
|
||||
// If no season found, try path-based inference
|
||||
inferredSeason := s.inferSeasonFromPath(file.Path, knownSeasons)
|
||||
if inferredSeason > 0 {
|
||||
groups[inferredSeason] = append(groups[inferredSeason], file)
|
||||
} else if len(knownSeasons) == 1 {
|
||||
// If only one season exists, default to it
|
||||
for season := range knownSeasons {
|
||||
groups[season] = append(groups[season], file)
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
return groups
|
||||
}
|
||||
|
||||
func (s *Store) inferSeasonFromPath(path string, knownSeasons map[int]bool) int {
|
||||
pathParts := strings.Split(path, "/")
|
||||
|
||||
for _, part := range pathParts {
|
||||
if season := s.extractSeason(part); season > 0 && knownSeasons[season] {
|
||||
return season
|
||||
}
|
||||
}
|
||||
|
||||
return 0
|
||||
}
|
||||
|
||||
// Helper to get sorted season list for logging
|
||||
func getSortedSeasons(seasons map[int]bool) []int {
|
||||
var result []int
|
||||
for season := range seasons {
|
||||
result = append(result, season)
|
||||
}
|
||||
return result
|
||||
}
|
||||
|
||||
// generateSeasonHash creates a unique hash for a season based on original hash
|
||||
func (s *Store) generateSeasonHash(originalHash string, seasonNumber int) string {
|
||||
source := fmt.Sprintf("%s-season-%d", originalHash, seasonNumber)
|
||||
hash := md5.Sum([]byte(source))
|
||||
return fmt.Sprintf("%x", hash)
|
||||
}
|
||||
|
||||
func grabber(client *grab.Client, url, filename string, byterange *[2]int64, progressCallback func(int64, int64)) error {
|
||||
req, err := grab.NewRequest(filename, url)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
// Set byte range if specified
|
||||
if byterange != nil {
|
||||
byterangeStr := fmt.Sprintf("%d-%d", byterange[0], byterange[1])
|
||||
req.HTTPRequest.Header.Set("Range", "bytes="+byterangeStr)
|
||||
}
|
||||
|
||||
resp := client.Do(req)
|
||||
|
||||
t := time.NewTicker(time.Second * 2)
|
||||
defer t.Stop()
|
||||
|
||||
var lastReported int64
|
||||
Loop:
|
||||
for {
|
||||
select {
|
||||
case <-t.C:
|
||||
current := resp.BytesComplete()
|
||||
speed := int64(resp.BytesPerSecond())
|
||||
if current != lastReported {
|
||||
if progressCallback != nil {
|
||||
progressCallback(current-lastReported, speed)
|
||||
}
|
||||
lastReported = current
|
||||
}
|
||||
case <-resp.Done:
|
||||
break Loop
|
||||
}
|
||||
}
|
||||
|
||||
// Report final bytes
|
||||
if progressCallback != nil {
|
||||
progressCallback(resp.BytesComplete()-lastReported, 0)
|
||||
}
|
||||
|
||||
return resp.Err()
|
||||
}
|
||||
|
||||
func (s *Store) processDownload(torrent *Torrent, debridTorrent *types.Torrent) (string, error) {
|
||||
s.logger.Info().Msgf("Downloading %d files...", len(debridTorrent.Files))
|
||||
torrentPath := filepath.Join(torrent.SavePath, utils.RemoveExtension(debridTorrent.OriginalFilename))
|
||||
torrentPath = utils.RemoveInvalidChars(torrentPath)
|
||||
err := os.MkdirAll(torrentPath, os.ModePerm)
|
||||
if err != nil {
|
||||
// add the previous error to the error and return
|
||||
return "", fmt.Errorf("failed to create directory: %s: %v", torrentPath, err)
|
||||
}
|
||||
s.downloadFiles(torrent, debridTorrent, torrentPath)
|
||||
return torrentPath, nil
|
||||
}
|
||||
|
||||
func (s *Store) downloadFiles(torrent *Torrent, debridTorrent *types.Torrent, parent string) {
|
||||
var wg sync.WaitGroup
|
||||
|
||||
totalSize := int64(0)
|
||||
for _, file := range debridTorrent.GetFiles() {
|
||||
totalSize += file.Size
|
||||
}
|
||||
debridTorrent.Lock()
|
||||
debridTorrent.SizeDownloaded = 0 // Reset downloaded bytes
|
||||
debridTorrent.Progress = 0 // Reset progress
|
||||
debridTorrent.Unlock()
|
||||
progressCallback := func(downloaded int64, speed int64) {
|
||||
debridTorrent.Lock()
|
||||
defer debridTorrent.Unlock()
|
||||
torrent.Lock()
|
||||
defer torrent.Unlock()
|
||||
|
||||
// Update total downloaded bytes
|
||||
debridTorrent.SizeDownloaded += downloaded
|
||||
debridTorrent.Speed = speed
|
||||
|
||||
// Calculate overall progress
|
||||
if totalSize > 0 {
|
||||
debridTorrent.Progress = float64(debridTorrent.SizeDownloaded) / float64(totalSize) * 100
|
||||
}
|
||||
s.partialTorrentUpdate(torrent, debridTorrent)
|
||||
}
|
||||
client := &grab.Client{
|
||||
UserAgent: "Decypharr[QBitTorrent]",
|
||||
HTTPClient: &http.Client{
|
||||
Transport: &http.Transport{
|
||||
Proxy: http.ProxyFromEnvironment,
|
||||
},
|
||||
},
|
||||
}
|
||||
errChan := make(chan error, len(debridTorrent.Files))
|
||||
for _, file := range debridTorrent.GetFiles() {
|
||||
if file.DownloadLink.Empty() {
|
||||
s.logger.Info().Msgf("No download link found for %s", file.Name)
|
||||
continue
|
||||
}
|
||||
wg.Add(1)
|
||||
s.downloadSemaphore <- struct{}{}
|
||||
go func(file types.File) {
|
||||
defer wg.Done()
|
||||
defer func() { <-s.downloadSemaphore }()
|
||||
filename := file.Name
|
||||
|
||||
err := grabber(
|
||||
client,
|
||||
file.DownloadLink.DownloadLink,
|
||||
filepath.Join(parent, filename),
|
||||
file.ByteRange,
|
||||
progressCallback,
|
||||
)
|
||||
|
||||
if err != nil {
|
||||
s.logger.Error().Msgf("Failed to download %s: %v", filename, err)
|
||||
errChan <- err
|
||||
} else {
|
||||
s.logger.Info().Msgf("Downloaded %s", filename)
|
||||
}
|
||||
}(file)
|
||||
}
|
||||
wg.Wait()
|
||||
|
||||
close(errChan)
|
||||
var errors []error
|
||||
for err := range errChan {
|
||||
if err != nil {
|
||||
errors = append(errors, err)
|
||||
}
|
||||
}
|
||||
if len(errors) > 0 {
|
||||
s.logger.Error().Msgf("Errors occurred during download: %v", errors)
|
||||
return
|
||||
}
|
||||
s.logger.Info().Msgf("Downloaded all files for %s", debridTorrent.Name)
|
||||
}
|
||||
|
||||
func (s *Store) processSymlink(debridTorrent *types.Torrent, torrentRclonePath, torrentSymlinkPath string) (string, error) {
|
||||
files := debridTorrent.GetFiles()
|
||||
if len(files) == 0 {
|
||||
return "", fmt.Errorf("no valid files found")
|
||||
}
|
||||
|
||||
s.logger.Info().Msgf("Creating symlinks for %d files ...", len(files))
|
||||
|
||||
// Create symlink directory
|
||||
err := os.MkdirAll(torrentSymlinkPath, os.ModePerm)
|
||||
if err != nil {
|
||||
return "", fmt.Errorf("failed to create directory: %s: %v", torrentSymlinkPath, err)
|
||||
}
|
||||
|
||||
// Track pending files
|
||||
remainingFiles := make(map[string]types.File)
|
||||
for _, file := range files {
|
||||
remainingFiles[file.Name] = file
|
||||
}
|
||||
|
||||
ticker := time.NewTicker(100 * time.Millisecond)
|
||||
defer ticker.Stop()
|
||||
timeout := time.After(30 * time.Minute)
|
||||
filePaths := make([]string, 0, len(remainingFiles))
|
||||
|
||||
var checkDirectory func(string) // Recursive function
|
||||
checkDirectory = func(dirPath string) {
|
||||
entries, err := os.ReadDir(dirPath)
|
||||
if err != nil {
|
||||
return
|
||||
}
|
||||
|
||||
for _, entry := range entries {
|
||||
entryName := entry.Name()
|
||||
fullPath := filepath.Join(dirPath, entryName)
|
||||
|
||||
// Check if this matches a remaining file
|
||||
if file, exists := remainingFiles[entryName]; exists {
|
||||
fileSymlinkPath := filepath.Join(torrentSymlinkPath, file.Name)
|
||||
|
||||
if err := os.Symlink(fullPath, fileSymlinkPath); err == nil || os.IsExist(err) {
|
||||
filePaths = append(filePaths, fileSymlinkPath)
|
||||
delete(remainingFiles, entryName)
|
||||
s.logger.Info().Msgf("File is ready: %s", file.Name)
|
||||
}
|
||||
} else if entry.IsDir() {
|
||||
// If not found and it's a directory, check inside
|
||||
checkDirectory(fullPath)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
for len(remainingFiles) > 0 {
|
||||
select {
|
||||
case <-ticker.C:
|
||||
checkDirectory(torrentRclonePath)
|
||||
|
||||
case <-timeout:
|
||||
s.logger.Warn().Msgf("Timeout waiting for files, %d files still pending", len(remainingFiles))
|
||||
return torrentSymlinkPath, fmt.Errorf("timeout waiting for files: %d files still pending", len(remainingFiles))
|
||||
}
|
||||
}
|
||||
|
||||
// Pre-cache files if enabled
|
||||
if !s.skipPreCache && len(filePaths) > 0 {
|
||||
go func() {
|
||||
s.logger.Debug().Msgf("Pre-caching %s", debridTorrent.Name)
|
||||
if err := utils.PreCacheFile(filePaths); err != nil {
|
||||
s.logger.Error().Msgf("Failed to pre-cache file: %s", err)
|
||||
} else {
|
||||
s.logger.Debug().Msgf("Pre-cached %d files", len(filePaths))
|
||||
}
|
||||
}()
|
||||
}
|
||||
|
||||
return torrentSymlinkPath, nil
|
||||
}
|
||||
|
||||
// getTorrentPaths returns mountPath and symlinkPath for a torrent
|
||||
func (s *Store) getTorrentPaths(arrFolder string, debridTorrent *types.Torrent) (string, string, error) {
|
||||
for {
|
||||
torrentFolder, err := debridTorrent.GetMountFolder(debridTorrent.MountPath)
|
||||
if err == nil {
|
||||
// Found mountPath
|
||||
mountPath := filepath.Join(debridTorrent.MountPath, torrentFolder)
|
||||
if debridTorrent.Debrid == "alldebrid" && utils.IsMediaFile(torrentFolder) {
|
||||
torrentFolder = utils.RemoveExtension(torrentFolder)
|
||||
mountPath = debridTorrent.MountPath
|
||||
}
|
||||
// Return mountPath and symlink path
|
||||
return mountPath, filepath.Join(arrFolder, torrentFolder), nil
|
||||
}
|
||||
time.Sleep(100 * time.Millisecond)
|
||||
}
|
||||
}
|
||||
|
||||
func (s *Store) processMultiSeasonSymlinks(torrent *Torrent, debridTorrent *types.Torrent, seasons []SeasonInfo, importReq *ImportRequest) error {
|
||||
for _, seasonInfo := range seasons {
|
||||
// Create a season-specific debrid torrent
|
||||
seasonDebridTorrent := debridTorrent.Copy()
|
||||
|
||||
// Update the season torrent with season-specific data
|
||||
seasonDebridTorrent.InfoHash = seasonInfo.InfoHash
|
||||
seasonDebridTorrent.Name = seasonInfo.Name
|
||||
|
||||
seasonTorrent := torrent.Copy()
|
||||
seasonTorrent.ID = seasonInfo.InfoHash
|
||||
seasonTorrent.Name = seasonInfo.Name
|
||||
seasonTorrent.Hash = seasonInfo.InfoHash
|
||||
|
||||
torrentFiles := make([]*File, 0)
|
||||
size := int64(0)
|
||||
|
||||
// Filter files to only include this season's files
|
||||
seasonFiles := make(map[string]types.File)
|
||||
for index, file := range seasonInfo.Files {
|
||||
seasonFiles[file.Name] = file
|
||||
torrentFiles = append(torrentFiles, &File{
|
||||
Index: index,
|
||||
Name: file.Path,
|
||||
Size: file.Size,
|
||||
})
|
||||
size += file.Size
|
||||
}
|
||||
seasonDebridTorrent.Files = seasonFiles
|
||||
seasonTorrent.Files = torrentFiles
|
||||
seasonTorrent.Size = size
|
||||
|
||||
// Create a season-specific torrent record
|
||||
|
||||
// Create season folder path using the extracted season name
|
||||
seasonFolderName := seasonInfo.Name
|
||||
|
||||
s.logger.Info().Msgf("Processing season %s with %d files", seasonTorrent.Name, len(seasonInfo.Files))
|
||||
var err error
|
||||
|
||||
cache := s.debrid.Debrid(debridTorrent.Debrid).Cache()
|
||||
var torrentRclonePath, torrentSymlinkPath string
|
||||
if cache != nil {
|
||||
torrentRclonePath = filepath.Join(debridTorrent.MountPath, cache.GetTorrentFolder(debridTorrent))
|
||||
|
||||
} else {
|
||||
// Regular mount mode
|
||||
torrentRclonePath, _, err = s.getTorrentPaths(seasonTorrent.SavePath, seasonDebridTorrent)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
|
||||
torrentSymlinkPath = filepath.Join(seasonTorrent.SavePath, seasonFolderName)
|
||||
torrentSymlinkPath, err = s.processSymlink(seasonDebridTorrent, torrentRclonePath, torrentSymlinkPath)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
if torrentSymlinkPath == "" {
|
||||
return fmt.Errorf("no symlink found for season %d", seasonInfo.SeasonNumber)
|
||||
}
|
||||
|
||||
// Update season torrent with final path
|
||||
seasonTorrent.TorrentPath = torrentSymlinkPath
|
||||
seasonTorrent.ContentPath = torrentSymlinkPath
|
||||
seasonTorrent.State = "pausedUP"
|
||||
// Add the season torrent to storage
|
||||
s.torrents.AddOrUpdate(seasonTorrent)
|
||||
|
||||
s.logger.Info().Str("path", torrentSymlinkPath).Msgf("Successfully created season %d torrent: %s", seasonInfo.SeasonNumber, seasonTorrent.ID)
|
||||
}
|
||||
s.torrents.Delete(torrent.Hash, "", false)
|
||||
s.logger.Info().Msgf("Multi-season processing completed for %s", debridTorrent.Name)
|
||||
return nil
|
||||
}
|
||||
|
||||
// processMultiSeasonDownloads handles multi-season torrent downloading
|
||||
func (s *Store) processMultiSeasonDownloads(torrent *Torrent, debridTorrent *types.Torrent, seasons []SeasonInfo, importReq *ImportRequest) error {
|
||||
s.logger.Info().Msgf("Creating separate download records for %d seasons", len(seasons))
|
||||
for _, seasonInfo := range seasons {
|
||||
// Create a season-specific debrid torrent
|
||||
seasonDebridTorrent := debridTorrent.Copy()
|
||||
|
||||
// Update the season torrent with season-specific data
|
||||
seasonDebridTorrent.InfoHash = seasonInfo.InfoHash
|
||||
seasonDebridTorrent.Name = seasonInfo.Name
|
||||
|
||||
// Filter files to only include this season's files
|
||||
seasonFiles := make(map[string]types.File)
|
||||
for _, file := range seasonInfo.Files {
|
||||
seasonFiles[file.Name] = file
|
||||
}
|
||||
seasonDebridTorrent.Files = seasonFiles
|
||||
|
||||
// Create a season-specific torrent record
|
||||
seasonTorrent := torrent.Copy()
|
||||
seasonTorrent.ID = uuid.New().String()
|
||||
seasonTorrent.Name = seasonInfo.Name
|
||||
seasonTorrent.Hash = seasonInfo.InfoHash
|
||||
seasonTorrent.SavePath = torrent.SavePath
|
||||
|
||||
s.logger.Info().Msgf("Downloading season %d with %d files", seasonInfo.SeasonNumber, len(seasonInfo.Files))
|
||||
|
||||
// Generate download links for season files
|
||||
client := s.debrid.Debrid(debridTorrent.Debrid).Client()
|
||||
if err := client.GetFileDownloadLinks(seasonDebridTorrent); err != nil {
|
||||
s.logger.Error().Msgf("Failed to get download links for season %d: %v", seasonInfo.SeasonNumber, err)
|
||||
return fmt.Errorf("failed to get download links for season %d: %v", seasonInfo.SeasonNumber, err)
|
||||
}
|
||||
|
||||
// Download files for this season
|
||||
seasonDownloadPath, err := s.processDownload(seasonTorrent, seasonDebridTorrent)
|
||||
if err != nil {
|
||||
s.logger.Error().Msgf("Failed to download season %d: %v", seasonInfo.SeasonNumber, err)
|
||||
return fmt.Errorf("failed to download season %d: %v", seasonInfo.SeasonNumber, err)
|
||||
}
|
||||
|
||||
// Update season torrent with final path
|
||||
seasonTorrent.TorrentPath = seasonDownloadPath
|
||||
seasonTorrent.ContentPath = seasonDownloadPath
|
||||
seasonTorrent.State = "pausedUP"
|
||||
|
||||
// Add the season torrent to storage
|
||||
s.torrents.AddOrUpdate(seasonTorrent)
|
||||
s.logger.Info().Msgf("Successfully downloaded season %d torrent: %s", seasonInfo.SeasonNumber, seasonTorrent.ID)
|
||||
}
|
||||
s.logger.Debug().Msgf("Deleting original torrent with hash: %s, category: %s", torrent.Hash, torrent.Category)
|
||||
s.torrents.Delete(torrent.Hash, torrent.Category, false)
|
||||
|
||||
s.logger.Info().Msgf("Multi-season download processing completed for %s", debridTorrent.Name)
|
||||
return nil
|
||||
}
|
||||
@@ -1,4 +1,4 @@
|
||||
package store
|
||||
package wire
|
||||
|
||||
import (
|
||||
"os"
|
||||
@@ -25,6 +25,7 @@ func createTorrentFromMagnet(req *ImportRequest) *Torrent {
|
||||
AutoTmm: false,
|
||||
Ratio: 1,
|
||||
RatioLimit: 1,
|
||||
TotalSize: magnet.Size,
|
||||
SavePath: filepath.Join(req.DownloadFolder, arrName) + string(os.PathSeparator),
|
||||
}
|
||||
return torrent
|
||||
@@ -1,4 +1,4 @@
|
||||
package store
|
||||
package wire
|
||||
|
||||
import (
|
||||
"context"
|
||||
@@ -29,7 +29,6 @@ func (s *Store) addToQueue(importReq *ImportRequest) error {
|
||||
|
||||
func (s *Store) StartQueueWorkers(ctx context.Context) error {
|
||||
// This function is responsible for starting the scheduled tasks
|
||||
|
||||
if ctx == nil {
|
||||
ctx = context.Background()
|
||||
}
|
||||
@@ -45,7 +44,7 @@ func (s *Store) StartQueueWorkers(ctx context.Context) error {
|
||||
}), gocron.WithContext(ctx)); err != nil {
|
||||
s.logger.Error().Err(err).Msg("Failed to create slots tracking job")
|
||||
} else {
|
||||
s.logger.Trace().Msgf("Download link refresh job scheduled for every %s", "30s")
|
||||
s.logger.Trace().Msgf("Slots tracking job scheduled for every %s", "30s")
|
||||
}
|
||||
}
|
||||
|
||||
@@ -86,13 +85,17 @@ func (s *Store) trackAvailableSlots(ctx context.Context) {
|
||||
availableSlots[name] = slots
|
||||
}
|
||||
|
||||
if len(availableSlots) == 0 {
|
||||
s.logger.Debug().Msg("No debrid clients available or no slots found")
|
||||
return // No debrid clients or slots available, nothing to process
|
||||
}
|
||||
|
||||
if s.importsQueue.Size() <= 0 {
|
||||
// Queue is empty, no need to process
|
||||
return
|
||||
}
|
||||
|
||||
for name, slots := range availableSlots {
|
||||
|
||||
s.logger.Debug().Msgf("Available slots for %s: %d", name, slots)
|
||||
// If slots are available, process the next import request from the queue
|
||||
for slots > 0 {
|
||||
@@ -1,4 +1,4 @@
|
||||
package store
|
||||
package wire
|
||||
|
||||
import (
|
||||
"bytes"
|
||||
@@ -6,15 +6,17 @@ import (
|
||||
"context"
|
||||
"encoding/json"
|
||||
"fmt"
|
||||
"github.com/google/uuid"
|
||||
"github.com/sirrobot01/decypharr/internal/request"
|
||||
"github.com/sirrobot01/decypharr/internal/utils"
|
||||
"github.com/sirrobot01/decypharr/pkg/arr"
|
||||
debridTypes "github.com/sirrobot01/decypharr/pkg/debrid/types"
|
||||
"net/http"
|
||||
"net/url"
|
||||
"sync"
|
||||
"time"
|
||||
|
||||
"github.com/google/uuid"
|
||||
"github.com/sirrobot01/decypharr/internal/config"
|
||||
"github.com/sirrobot01/decypharr/internal/request"
|
||||
"github.com/sirrobot01/decypharr/internal/utils"
|
||||
"github.com/sirrobot01/decypharr/pkg/arr"
|
||||
debridTypes "github.com/sirrobot01/decypharr/pkg/debrid/types"
|
||||
)
|
||||
|
||||
type ImportType string
|
||||
@@ -33,6 +35,7 @@ type ImportRequest struct {
|
||||
Action string `json:"action"`
|
||||
DownloadUncached bool `json:"downloadUncached"`
|
||||
CallBackUrl string `json:"callBackUrl"`
|
||||
SkipMultiSeason bool `json:"skip_multi_season"`
|
||||
|
||||
Status string `json:"status"`
|
||||
CompletedAt time.Time `json:"completedAt,omitempty"`
|
||||
@@ -42,7 +45,9 @@ type ImportRequest struct {
|
||||
Async bool `json:"async"`
|
||||
}
|
||||
|
||||
func NewImportRequest(debrid string, downloadFolder string, magnet *utils.Magnet, arr *arr.Arr, action string, downloadUncached bool, callBackUrl string, importType ImportType) *ImportRequest {
|
||||
func NewImportRequest(debrid string, downloadFolder string, magnet *utils.Magnet, arr *arr.Arr, action string, downloadUncached bool, callBackUrl string, importType ImportType, skipMultiSeason bool) *ImportRequest {
|
||||
cfg := config.Get()
|
||||
callBackUrl = cmp.Or(callBackUrl, cfg.CallbackURL)
|
||||
return &ImportRequest{
|
||||
Id: uuid.New().String(),
|
||||
Status: "started",
|
||||
@@ -54,6 +59,7 @@ func NewImportRequest(debrid string, downloadFolder string, magnet *utils.Magnet
|
||||
DownloadUncached: downloadUncached,
|
||||
CallBackUrl: callBackUrl,
|
||||
Type: importType,
|
||||
SkipMultiSeason: skipMultiSeason,
|
||||
}
|
||||
}
|
||||
|
||||
@@ -1,8 +1,11 @@
|
||||
package store
|
||||
package wire
|
||||
|
||||
import (
|
||||
"cmp"
|
||||
"context"
|
||||
"sync"
|
||||
"time"
|
||||
|
||||
"github.com/go-co-op/gocron/v2"
|
||||
"github.com/rs/zerolog"
|
||||
"github.com/sirrobot01/decypharr/internal/config"
|
||||
@@ -11,8 +14,6 @@ import (
|
||||
"github.com/sirrobot01/decypharr/pkg/debrid"
|
||||
"github.com/sirrobot01/decypharr/pkg/rclone"
|
||||
"github.com/sirrobot01/decypharr/pkg/repair"
|
||||
"sync"
|
||||
"time"
|
||||
)
|
||||
|
||||
type Store struct {
|
||||
@@ -86,7 +87,10 @@ func Reset() {
|
||||
}
|
||||
|
||||
if instance.rcloneManager != nil {
|
||||
instance.rcloneManager.Stop()
|
||||
err := instance.rcloneManager.Stop()
|
||||
if err != nil {
|
||||
instance.logger.Error().Err(err).Msg("Failed to stop rclone manager")
|
||||
}
|
||||
}
|
||||
|
||||
if instance.importsQueue != nil {
|
||||
@@ -98,7 +102,6 @@ func Reset() {
|
||||
}
|
||||
|
||||
if instance.scheduler != nil {
|
||||
_ = instance.scheduler.StopJobs()
|
||||
_ = instance.scheduler.Shutdown()
|
||||
}
|
||||
}
|
||||
@@ -1,4 +1,4 @@
|
||||
package store
|
||||
package wire
|
||||
|
||||
import (
|
||||
"cmp"
|
||||
@@ -6,7 +6,6 @@ import (
|
||||
"errors"
|
||||
"fmt"
|
||||
"math"
|
||||
"os"
|
||||
"path/filepath"
|
||||
"time"
|
||||
|
||||
@@ -19,7 +18,6 @@ import (
|
||||
func (s *Store) AddTorrent(ctx context.Context, importReq *ImportRequest) error {
|
||||
torrent := createTorrentFromMagnet(importReq)
|
||||
debridTorrent, err := debridTypes.Process(ctx, s.debrid, importReq.SelectedDebrid, importReq.Magnet, importReq.Arr, importReq.Action, importReq.DownloadUncached)
|
||||
|
||||
if err != nil {
|
||||
var httpErr *utils.HTTPError
|
||||
if ok := errors.As(err, &httpErr); ok {
|
||||
@@ -92,15 +90,13 @@ func (s *Store) processFiles(torrent *Torrent, debridTorrent *types.Torrent, imp
|
||||
if debridTorrent.Status == "downloaded" || !utils.Contains(downloadingStatuses, debridTorrent.Status) {
|
||||
break
|
||||
}
|
||||
select {
|
||||
case <-backoff.C:
|
||||
// Increase interval gradually, cap at max
|
||||
nextInterval := min(s.refreshInterval*2, 30*time.Second)
|
||||
backoff.Reset(nextInterval)
|
||||
}
|
||||
|
||||
<-backoff.C
|
||||
// Reset the backoff timer
|
||||
nextInterval := min(s.refreshInterval*2, 30*time.Second)
|
||||
backoff.Reset(nextInterval)
|
||||
}
|
||||
var torrentSymlinkPath string
|
||||
var err error
|
||||
var torrentSymlinkPath, torrentRclonePath string
|
||||
debridTorrent.Arr = _arr
|
||||
|
||||
// Check if debrid supports webdav by checking cache
|
||||
@@ -115,7 +111,6 @@ func (s *Store) processFiles(torrent *Torrent, debridTorrent *types.Torrent, imp
|
||||
}()
|
||||
s.logger.Error().Err(err).Msgf("Error occured while processing torrent %s", debridTorrent.Name)
|
||||
importReq.markAsFailed(err, torrent, debridTorrent)
|
||||
return
|
||||
}
|
||||
|
||||
onSuccess := func(torrentSymlinkPath string) {
|
||||
@@ -134,11 +129,25 @@ func (s *Store) processFiles(torrent *Torrent, debridTorrent *types.Torrent, imp
|
||||
}()
|
||||
}
|
||||
|
||||
// Check for multi-season torrent support
|
||||
var isMultiSeason bool
|
||||
var seasons []SeasonInfo
|
||||
var err error
|
||||
if !importReq.SkipMultiSeason {
|
||||
isMultiSeason, seasons, err = s.detectMultiSeason(debridTorrent)
|
||||
if err != nil {
|
||||
s.logger.Warn().Msgf("Error detecting multi-season for %s: %v", debridTorrent.Name, err)
|
||||
// Continue with normal processing if detection fails
|
||||
isMultiSeason = false
|
||||
}
|
||||
}
|
||||
|
||||
switch importReq.Action {
|
||||
case "symlink":
|
||||
// Symlink action, we will create a symlink to the torrent
|
||||
s.logger.Debug().Msgf("Post-Download Action: Symlink")
|
||||
cache := deb.Cache()
|
||||
|
||||
if cache != nil {
|
||||
s.logger.Info().Msgf("Using internal webdav for %s", debridTorrent.Debrid)
|
||||
// Use webdav to download the file
|
||||
@@ -146,14 +155,45 @@ func (s *Store) processFiles(torrent *Torrent, debridTorrent *types.Torrent, imp
|
||||
onFailed(err)
|
||||
return
|
||||
}
|
||||
}
|
||||
|
||||
if isMultiSeason {
|
||||
s.logger.Info().Msgf("Processing multi-season torrent with %d seasons", len(seasons))
|
||||
|
||||
// Remove any torrent already added
|
||||
err := s.processMultiSeasonSymlinks(torrent, debridTorrent, seasons, importReq)
|
||||
if err == nil {
|
||||
// If an error occurred during multi-season processing, send it to normal processing
|
||||
s.logger.Info().Msgf("Adding %s took %s", debridTorrent.Name, time.Since(timer))
|
||||
|
||||
go importReq.markAsCompleted(torrent, debridTorrent) // Mark the import request as completed, send callback if needed
|
||||
go func() {
|
||||
if err := request.SendDiscordMessage("download_complete", "success", torrent.discordContext()); err != nil {
|
||||
s.logger.Error().Msgf("Error sending discord message: %v", err)
|
||||
}
|
||||
}()
|
||||
go func() {
|
||||
_arr.Refresh()
|
||||
}()
|
||||
return
|
||||
}
|
||||
}
|
||||
|
||||
if cache != nil {
|
||||
torrentRclonePath = filepath.Join(debridTorrent.MountPath, cache.GetTorrentFolder(debridTorrent)) // /mnt/remote/realdebrid/MyTVShow
|
||||
torrentSymlinkPath = filepath.Join(torrent.SavePath, utils.RemoveExtension(debridTorrent.Name)) // /mnt/symlinks/{category}/MyTVShow/
|
||||
|
||||
rclonePath := filepath.Join(debridTorrent.MountPath, cache.GetTorrentFolder(debridTorrent)) // /mnt/remote/realdebrid/MyTVShow
|
||||
torrentFolderNoExt := utils.RemoveExtension(debridTorrent.Name)
|
||||
torrentSymlinkPath, err = s.createSymlinksWebdav(torrent, debridTorrent, rclonePath, torrentFolderNoExt) // /mnt/symlinks/{category}/MyTVShow/
|
||||
} else {
|
||||
// User is using either zurg or debrid webdav
|
||||
torrentSymlinkPath, err = s.processSymlink(torrent, debridTorrent) // /mnt/symlinks/{category}/MyTVShow/
|
||||
torrentRclonePath, torrentSymlinkPath, err = s.getTorrentPaths(torrent.SavePath, debridTorrent)
|
||||
if err != nil {
|
||||
onFailed(err)
|
||||
return
|
||||
}
|
||||
}
|
||||
|
||||
torrentSymlinkPath, err = s.processSymlink(debridTorrent, torrentRclonePath, torrentSymlinkPath)
|
||||
|
||||
if err != nil {
|
||||
onFailed(err)
|
||||
return
|
||||
@@ -168,6 +208,19 @@ func (s *Store) processFiles(torrent *Torrent, debridTorrent *types.Torrent, imp
|
||||
// Download action, we will download the torrent to the specified folder
|
||||
// Generate download links
|
||||
s.logger.Debug().Msgf("Post-Download Action: Download")
|
||||
|
||||
if isMultiSeason {
|
||||
s.logger.Info().Msgf("Processing multi-season download with %d seasons", len(seasons))
|
||||
err := s.processMultiSeasonDownloads(torrent, debridTorrent, seasons, importReq)
|
||||
if err != nil {
|
||||
onFailed(err)
|
||||
return
|
||||
}
|
||||
// Multi-season processing completed successfully
|
||||
onSuccess(torrent.SavePath)
|
||||
return
|
||||
}
|
||||
|
||||
if err := client.GetFileDownloadLinks(debridTorrent); err != nil {
|
||||
onFailed(err)
|
||||
return
|
||||
@@ -241,6 +294,7 @@ func (s *Store) partialTorrentUpdate(t *Torrent, debridTorrent *types.Torrent) *
|
||||
t.Files = files
|
||||
t.Debrid = debridTorrent.Debrid
|
||||
t.Size = totalSize
|
||||
t.TotalSize = totalSize
|
||||
t.Completed = sizeCompleted
|
||||
t.NumSeeds = debridTorrent.Seeders
|
||||
t.Downloaded = sizeCompleted
|
||||
@@ -252,7 +306,6 @@ func (s *Store) partialTorrentUpdate(t *Torrent, debridTorrent *types.Torrent) *
|
||||
t.Eta = eta
|
||||
t.Dlspeed = speed
|
||||
t.Upspeed = speed
|
||||
t.ContentPath = filepath.Join(t.SavePath, t.Name) + string(os.PathSeparator)
|
||||
return t
|
||||
}
|
||||
|
||||
@@ -267,7 +320,7 @@ func (s *Store) updateTorrent(t *Torrent, debridTorrent *types.Torrent) *Torrent
|
||||
}
|
||||
}
|
||||
t = s.partialTorrentUpdate(t, debridTorrent)
|
||||
t.ContentPath = t.TorrentPath + string(os.PathSeparator)
|
||||
t.ContentPath = t.TorrentPath
|
||||
|
||||
if t.IsReady() {
|
||||
t.State = "pausedUP"
|
||||
@@ -1,4 +1,4 @@
|
||||
package store
|
||||
package wire
|
||||
|
||||
import (
|
||||
"encoding/json"
|
||||
@@ -167,44 +167,33 @@ func (ts *TorrentStorage) Update(torrent *Torrent) {
|
||||
func (ts *TorrentStorage) Delete(hash, category string, removeFromDebrid bool) {
|
||||
ts.mu.Lock()
|
||||
defer ts.mu.Unlock()
|
||||
key := keyPair(hash, category)
|
||||
torrent, exists := ts.torrents[key]
|
||||
if !exists && category == "" {
|
||||
// Remove the torrent without knowing the category
|
||||
for k, t := range ts.torrents {
|
||||
if t.Hash == hash {
|
||||
key = k
|
||||
torrent = t
|
||||
break
|
||||
|
||||
wireStore := Get()
|
||||
for key, torrent := range ts.torrents {
|
||||
if torrent == nil {
|
||||
continue
|
||||
}
|
||||
if torrent.Hash == hash && (category == "" || torrent.Category == category) {
|
||||
if torrent.State == "queued" && torrent.ID != "" {
|
||||
// Remove the torrent from the import queue if it exists
|
||||
wireStore.importsQueue.Delete(torrent.ID)
|
||||
}
|
||||
}
|
||||
}
|
||||
if removeFromDebrid && torrent.DebridID != "" && torrent.Debrid != "" {
|
||||
dbClient := wireStore.debrid.Client(torrent.Debrid)
|
||||
if dbClient != nil {
|
||||
_ = dbClient.DeleteTorrent(torrent.DebridID)
|
||||
}
|
||||
}
|
||||
delete(ts.torrents, key)
|
||||
|
||||
if torrent == nil {
|
||||
return
|
||||
}
|
||||
st := Get()
|
||||
// Check if torrent is queued for download
|
||||
|
||||
if torrent.State == "queued" && torrent.ID != "" {
|
||||
// Remove the torrent from the import queue if it exists
|
||||
st.importsQueue.Delete(torrent.ID)
|
||||
}
|
||||
|
||||
if removeFromDebrid && torrent.DebridID != "" && torrent.Debrid != "" {
|
||||
dbClient := st.debrid.Client(torrent.Debrid)
|
||||
if dbClient != nil {
|
||||
_ = dbClient.DeleteTorrent(torrent.DebridID)
|
||||
}
|
||||
}
|
||||
|
||||
delete(ts.torrents, key)
|
||||
|
||||
// Delete the torrent folder
|
||||
if torrent.ContentPath != "" {
|
||||
err := os.RemoveAll(torrent.ContentPath)
|
||||
if err != nil {
|
||||
return
|
||||
// Delete the torrent folder
|
||||
if torrent.ContentPath != "" {
|
||||
err := os.RemoveAll(torrent.ContentPath)
|
||||
if err != nil {
|
||||
return
|
||||
}
|
||||
}
|
||||
break
|
||||
}
|
||||
}
|
||||
go func() {
|
||||
@@ -227,12 +216,11 @@ func (ts *TorrentStorage) DeleteMultiple(hashes []string, removeFromDebrid bool)
|
||||
if torrent == nil {
|
||||
continue
|
||||
}
|
||||
|
||||
if torrent.State == "queued" && torrent.ID != "" {
|
||||
// Remove the torrent from the import queue if it exists
|
||||
st.importsQueue.Delete(torrent.ID)
|
||||
}
|
||||
if torrent.Hash == hash {
|
||||
if torrent.State == "queued" && torrent.ID != "" {
|
||||
// Remove the torrent from the import queue if it exists
|
||||
st.importsQueue.Delete(torrent.ID)
|
||||
}
|
||||
if removeFromDebrid && torrent.DebridID != "" && torrent.Debrid != "" {
|
||||
toDelete[torrent.DebridID] = torrent.Debrid
|
||||
}
|
||||
@@ -243,6 +231,7 @@ func (ts *TorrentStorage) DeleteMultiple(hashes []string, removeFromDebrid bool)
|
||||
return
|
||||
}
|
||||
}
|
||||
break
|
||||
}
|
||||
}
|
||||
}
|
||||
@@ -1,4 +1,4 @@
|
||||
package store
|
||||
package wire
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
@@ -72,6 +72,60 @@ type Torrent struct {
|
||||
sync.Mutex
|
||||
}
|
||||
|
||||
func (t *Torrent) Copy() *Torrent {
|
||||
return &Torrent{
|
||||
ID: t.ID,
|
||||
DebridID: t.DebridID,
|
||||
Debrid: t.Debrid,
|
||||
TorrentPath: t.TorrentPath,
|
||||
AddedOn: t.AddedOn,
|
||||
AmountLeft: t.AmountLeft,
|
||||
AutoTmm: t.AutoTmm,
|
||||
Availability: t.Availability,
|
||||
Category: t.Category,
|
||||
Completed: t.Completed,
|
||||
CompletionOn: t.CompletionOn,
|
||||
ContentPath: t.ContentPath,
|
||||
DlLimit: t.DlLimit,
|
||||
Dlspeed: t.Dlspeed,
|
||||
Downloaded: t.Downloaded,
|
||||
DownloadedSession: t.DownloadedSession,
|
||||
Eta: t.Eta,
|
||||
FlPiecePrio: t.FlPiecePrio,
|
||||
ForceStart: t.ForceStart,
|
||||
Hash: t.Hash,
|
||||
LastActivity: t.LastActivity,
|
||||
MagnetUri: t.MagnetUri,
|
||||
MaxRatio: t.MaxRatio,
|
||||
MaxSeedingTime: t.MaxSeedingTime,
|
||||
Name: t.Name,
|
||||
NumComplete: t.NumComplete,
|
||||
NumIncomplete: t.NumIncomplete,
|
||||
NumLeechs: t.NumLeechs,
|
||||
NumSeeds: t.NumSeeds,
|
||||
Priority: t.Priority,
|
||||
Progress: t.Progress,
|
||||
Ratio: t.Ratio,
|
||||
RatioLimit: t.RatioLimit,
|
||||
SavePath: t.SavePath,
|
||||
SeedingTimeLimit: t.SeedingTimeLimit,
|
||||
SeenComplete: t.SeenComplete,
|
||||
SeqDl: t.SeqDl,
|
||||
Size: t.Size,
|
||||
State: t.State,
|
||||
SuperSeeding: t.SuperSeeding,
|
||||
Tags: t.Tags,
|
||||
TimeActive: t.TimeActive,
|
||||
TotalSize: t.TotalSize,
|
||||
Tracker: t.Tracker,
|
||||
UpLimit: t.UpLimit,
|
||||
Uploaded: t.Uploaded,
|
||||
UploadedSession: t.UploadedSession,
|
||||
Upspeed: t.Upspeed,
|
||||
Source: t.Source,
|
||||
}
|
||||
}
|
||||
|
||||
func (t *Torrent) IsReady() bool {
|
||||
return (t.AmountLeft <= 0 || t.Progress == 1) && t.TorrentPath != ""
|
||||
}
|
||||
@@ -1,4 +1,4 @@
|
||||
package store
|
||||
package wire
|
||||
|
||||
import "context"
|
||||
|
||||
1
testdata/ubuntu-25.04-desktop-amd64.iso.magnet
vendored
Normal file
1
testdata/ubuntu-25.04-desktop-amd64.iso.magnet
vendored
Normal file
@@ -0,0 +1 @@
|
||||
magnet:?xt=urn:btih:8a19577fb5f690970ca43a57ff1011ae202244b8&dn=ubuntu-25.04-desktop-amd64.iso&tr=https%3A//ipv6.torrent.ubuntu.com/announce&tr=https%3A//torrent.ubuntu.com/announce
|
||||
BIN
testdata/ubuntu-25.04-desktop-amd64.iso.torrent
vendored
Normal file
BIN
testdata/ubuntu-25.04-desktop-amd64.iso.torrent
vendored
Normal file
Binary file not shown.
Reference in New Issue
Block a user