2 Commits

Author SHA1 Message Date
Mukhtar Akere
87bf8d0574 Merge branch 'beta'
Some checks failed
GoReleaser / goreleaser (push) Has been cancelled
Release Docker Build / docker (push) Has been cancelled
2025-05-27 23:45:13 +01:00
Mukhtar Akere
d313ed0712 hotfix non-webdav symlinker
Some checks failed
GoReleaser / goreleaser (push) Has been cancelled
Release Docker Build / docker (push) Has been cancelled
2025-05-26 00:16:46 +01:00
162 changed files with 6456 additions and 24848 deletions

View File

@@ -5,16 +5,16 @@ tmp_dir = "tmp"
[build]
args_bin = ["--config", "data/"]
bin = "./tmp/main"
cmd = "bash -c 'npm run build && go build -ldflags \"-X github.com/sirrobot01/decypharr/pkg/version.Version=0.0.0 -X github.com/sirrobot01/decypharr/pkg/version.Channel=dev\" -o ./tmp/main .'"
cmd = "bash -c 'go build -ldflags \"-X github.com/sirrobot01/decypharr/pkg/version.Version=0.0.0 -X github.com/sirrobot01/decypharr/pkg/version.Channel=dev\" -o ./tmp/main .'"
delay = 1000
exclude_dir = ["tmp", "vendor", "testdata", "data", "logs", "docs", "dist", "node_modules", ".ven"]
exclude_dir = ["assets", "tmp", "vendor", "testdata", "data"]
exclude_file = []
exclude_regex = ["_test.go"]
exclude_unchanged = false
follow_symlink = false
full_bin = ""
include_dir = []
include_ext = ["go", "tpl", "tmpl", "html", ".json", ".js", ".css"]
include_ext = ["go", "tpl", "tmpl", "html", ".json"]
include_file = []
kill_delay = "0s"
log = "build-errors.log"

View File

@@ -11,19 +11,3 @@ torrents.json
*.json
.ven/**
docs/**
# Don't copy node modules
node_modules/
# Don't copy development files
.git/
.gitignore
*.md
.env*
*.log
# Build artifacts
decypharr
healthcheck
*.exe
.venv/

View File

@@ -1,76 +0,0 @@
name: Bug Report
description: 'Report a new bug'
labels: ['Type: Bug', 'Status: Needs Triage']
body:
- type: checkboxes
attributes:
label: Is there an existing issue for this?
description: Please search to see if an open or closed issue already exists for the bug you encountered. If a bug exists and is closed note that it may only be fixed in an unstable branch.
options:
- label: I have searched the existing open and closed issues
required: true
- type: textarea
attributes:
label: Current Behavior
description: A concise description of what you're experiencing.
validations:
required: true
- type: textarea
attributes:
label: Expected Behavior
description: A concise description of what you expected to happen.
validations:
required: true
- type: textarea
attributes:
label: Steps To Reproduce
description: Steps to reproduce the behavior.
placeholder: |
1. In this environment...
2. With this config...
3. Run '...'
4. See error...
validations:
required: false
- type: textarea
attributes:
label: Environment
description: |
examples:
- **OS**: Ubuntu 20.04
- **Version**: v1.0.0
- **Docker Install**: Yes
- **Browser**: Firefox 90 (If UI related)
value: |
- OS:
- Version:
- Docker Install:
- Browser:
render: markdown
validations:
required: true
- type: dropdown
attributes:
label: What branch are you running?
options:
- Main/Latest
- Beta
- Experimental
validations:
required: true
- type: textarea
attributes:
label: Trace Logs? **Not Optional**
description: |
Trace Logs
- are **required** for bug reports
- are not optional
validations:
required: true
- type: checkboxes
attributes:
label: Trace Logs have been provided as applicable
description: Trace logs are **generally required** and are not optional for all bug reports and contain `trace`. Info logs are invalid for bug reports and do not contain `debug` nor `trace`
options:
- label: I have read and followed the steps in the documentation link and provided the required trace logs - the logs contain `trace` - that are relevant and show this issue.
required: true

View File

@@ -1,38 +0,0 @@
name: Feature Request
description: 'Suggest an idea for Decypharr'
labels: ['Type: Feature Request', 'Status: Needs Triage']
body:
- type: checkboxes
attributes:
label: Is there an existing issue for this?
description: Please search to see if an open or closed issue already exists for the feature you are requesting. If a request exists and is closed note that it may only be fixed in an unstable branch.
options:
- label: I have searched the existing open and closed issues
required: true
- type: textarea
attributes:
label: Is your feature request related to a problem? Please describe
description: A clear and concise description of what the problem is.
validations:
required: true
- type: textarea
attributes:
label: Describe the solution you'd like
description: A clear and concise description of what you want to happen.
validations:
required: true
- type: textarea
attributes:
label: Describe alternatives you've considered
description: A clear and concise description of any alternative solutions or features you've considered.
validations:
required: true
- type: textarea
attributes:
label: Anything else?
description: |
Links? References? Mockups? Anything that will give us more context about the feature you are encountering!
Tip: You can attach images or log files by clicking this area to highlight it and then dragging files in.
validations:
required: true

5
.gitignore vendored
View File

@@ -12,7 +12,4 @@ tmp/**
torrents.json
logs/**
auth.json
.ven/
.env
.venv/
node_modules/
.ven/

View File

@@ -61,8 +61,6 @@ EXPOSE 8282
VOLUME ["/app"]
USER nonroot:nonroot
# Base healthcheck
HEALTHCHECK --interval=3s --retries=10 CMD ["/usr/bin/healthcheck", "--config", "/app", "--basic"]
HEALTHCHECK --interval=3s --retries=10 CMD ["/usr/bin/healthcheck", "--config", "/app"]
CMD ["/usr/bin/decypharr", "--config", "/app"]

View File

@@ -6,16 +6,16 @@
## What is Decypharr?
Decypharr combines the power of QBittorrent with popular Debrid services to enhance your media management. It provides a familiar interface for Sonarr, Radarr, and other \*Arr applications.
Decypharr combines the power of QBittorrent with popular Debrid services to enhance your media management. It provides a familiar interface for Sonarr, Radarr, and other \*Arr applications while leveraging the capabilities of Debrid providers.
## Features
- Mock Qbittorent API that supports the Arrs (Sonarr, Radarr, Lidarr etc)
- Full-fledged UI for managing torrents
- Proxy support for filtering out un-cached Debrid torrents
- Multiple Debrid providers support
- WebDAV server support for each debrid provider
- Repair Worker for missing files
- 🔄 Mock Qbittorent API that supports the Arrs (Sonarr, Radarr, Lidarr etc)
- 🖥️ Full-fledged UI for managing torrents
- 🛡️ Proxy support for filtering out un-cached Debrid torrents
- 🔌 Multiple Debrid providers support
- 📁 WebDAV server support for each debrid provider
- 🔧 Repair Worker for missing files
## Supported Debrid Providers
@@ -36,9 +36,14 @@ services:
container_name: decypharr
ports:
- "8282:8282" # qBittorrent
user: "1000:1000"
volumes:
- /mnt/:/mnt
- ./configs/:/app # config.json must be in this directory
environment:
- PUID=1000
- PGID=1000
- UMASK=002
restart: unless-stopped
```

View File

@@ -6,13 +6,12 @@ import (
"github.com/sirrobot01/decypharr/internal/config"
"github.com/sirrobot01/decypharr/internal/logger"
"github.com/sirrobot01/decypharr/pkg/qbit"
"github.com/sirrobot01/decypharr/pkg/sabnzbd"
"github.com/sirrobot01/decypharr/pkg/server"
"github.com/sirrobot01/decypharr/pkg/store"
"github.com/sirrobot01/decypharr/pkg/usenet"
"github.com/sirrobot01/decypharr/pkg/service"
"github.com/sirrobot01/decypharr/pkg/version"
"github.com/sirrobot01/decypharr/pkg/web"
"github.com/sirrobot01/decypharr/pkg/webdav"
"github.com/sirrobot01/decypharr/pkg/worker"
"net/http"
"os"
"runtime"
@@ -60,34 +59,24 @@ func Start(ctx context.Context) error {
`, version.GetInfo(), cfg.LogLevel)
// Initialize services
_usenet := usenet.New()
debridCaches := store.Get().Debrid().Caches()
wd := webdav.New(debridCaches, _usenet)
var sb *sabnzbd.SABnzbd
ui := web.New(_usenet).Routes()
webdavRoutes := wd.Routes()
qb := qbit.New()
wd := webdav.New()
ui := web.New(qb).Routes()
webdavRoutes := wd.Routes()
qbitRoutes := qb.Routes()
// Register routes
handlers := map[string]http.Handler{
"/": ui,
"/api/v2": qbitRoutes,
"/webdav": webdavRoutes,
}
if qb != nil {
handlers["/api/v2"] = qb.Routes()
}
if _usenet != nil {
sb = sabnzbd.New(_usenet)
sabRoutes := sb.Routes()
handlers["/sabnzbd"] = sabRoutes
}
srv := server.New(_usenet, handlers)
srv := server.New(handlers)
done := make(chan struct{})
go func(ctx context.Context) {
if err := startServices(ctx, cancelSvc, wd, srv); err != nil {
if err := startServices(ctx, wd, srv); err != nil {
_log.Error().Err(err).Msg("Error starting services")
cancelSvc()
}
@@ -105,26 +94,21 @@ func Start(ctx context.Context) error {
cancelSvc() // tell existing services to shut down
_log.Info().Msg("Restarting Decypharr...")
<-done // wait for them to finish
if qb != nil {
qb.Reset()
}
store.Reset()
if _usenet != nil {
_usenet.Close()
}
qb.Reset()
service.Reset()
// rebuild svcCtx off the original parent
svcCtx, cancelSvc = context.WithCancel(ctx)
runtime.GC()
config.Reload()
store.Reset()
service.Reset()
// loop will restart services automatically
}
}
}
func startServices(ctx context.Context, cancelSvc context.CancelFunc, wd *webdav.WebDav, srv *server.Server) error {
func startServices(ctx context.Context, wd *webdav.WebDav, srv *server.Server) error {
var wg sync.WaitGroup
errChan := make(chan error)
@@ -162,7 +146,11 @@ func startServices(ctx context.Context, cancelSvc context.CancelFunc, wd *webdav
})
safeGo(func() error {
arr := store.Get().Arr()
return worker.Start(ctx)
})
safeGo(func() error {
arr := service.GetService().Arr
if arr == nil {
return nil
}
@@ -171,9 +159,9 @@ func startServices(ctx context.Context, cancelSvc context.CancelFunc, wd *webdav
if cfg := config.Get(); cfg.Repair.Enabled {
safeGo(func() error {
repair := store.Get().Repair()
if repair != nil {
if err := repair.Start(ctx); err != nil {
r := service.GetService().Repair
if r != nil {
if err := r.Start(ctx); err != nil {
_log.Error().Err(err).Msg("repair failed")
}
}
@@ -181,10 +169,6 @@ func startServices(ctx context.Context, cancelSvc context.CancelFunc, wd *webdav
})
}
safeGo(func() error {
return store.Get().StartQueueSchedule(ctx)
})
go func() {
wg.Wait()
close(errChan)
@@ -194,11 +178,7 @@ func startServices(ctx context.Context, cancelSvc context.CancelFunc, wd *webdav
for err := range errChan {
if err != nil {
_log.Error().Err(err).Msg("Service error detected")
// If the error is critical, return it to stop the main loop
if ctx.Err() == nil {
_log.Error().Msg("Stopping services due to error")
cancelSvc() // Cancel the service context to stop all services
}
// Don't shut down the whole app
}
}
}()

View File

@@ -22,14 +22,8 @@ type HealthStatus struct {
}
func main() {
var (
configPath string
isBasicCheck bool
debug bool
)
var configPath string
flag.StringVar(&configPath, "config", "/data", "path to the data folder")
flag.BoolVar(&isBasicCheck, "basic", false, "perform basic health check without WebDAV")
flag.BoolVar(&debug, "debug", false, "enable debug mode for detailed output")
flag.Parse()
config.SetConfigPath(configPath)
cfg := config.Get()
@@ -69,17 +63,16 @@ func main() {
status.WebUI = true
}
if isBasicCheck {
status.WebDAVService = checkBaseWebdav(ctx, baseUrl, port)
} else {
// If not a basic check, check WebDAV with debrid path
if webdavPath != "" {
status.WebDAVService = checkDebridWebDAV(ctx, baseUrl, port, webdavPath)
} else {
// If no WebDAV path is set, consider it healthy
// Check WebDAV if enabled
if webdavPath != "" {
if checkWebDAV(ctx, baseUrl, port, webdavPath) {
status.WebDAVService = true
}
} else {
// If WebDAV is not enabled, consider it healthy
status.WebDAVService = true
}
// Determine overall status
// Consider the application healthy if core services are running
status.OverallStatus = status.QbitAPI && status.WebUI
@@ -88,7 +81,7 @@ func main() {
}
// Optional: output health status as JSON for logging
if debug {
if os.Getenv("DEBUG") == "true" {
statusJSON, _ := json.MarshalIndent(status, "", " ")
fmt.Println(string(statusJSON))
}
@@ -139,24 +132,7 @@ func checkWebUI(ctx context.Context, baseUrl, port string) bool {
return resp.StatusCode == http.StatusOK
}
func checkBaseWebdav(ctx context.Context, baseUrl, port string) bool {
url := fmt.Sprintf("http://localhost:%s%swebdav/", port, baseUrl)
req, err := http.NewRequestWithContext(ctx, "PROPFIND", url, nil)
if err != nil {
return false
}
resp, err := http.DefaultClient.Do(req)
if err != nil {
return false
}
defer resp.Body.Close()
return resp.StatusCode == http.StatusMultiStatus ||
resp.StatusCode == http.StatusOK
}
func checkDebridWebDAV(ctx context.Context, baseUrl, port, path string) bool {
func checkWebDAV(ctx context.Context, baseUrl, port, path string) bool {
url := fmt.Sprintf("http://localhost:%s%swebdav/%s", port, baseUrl, path)
req, err := http.NewRequestWithContext(ctx, "PROPFIND", url, nil)
if err != nil {
@@ -169,7 +145,5 @@ func checkDebridWebDAV(ctx context.Context, baseUrl, port, path string) bool {
}
defer resp.Body.Close()
return resp.StatusCode == http.StatusMultiStatus ||
resp.StatusCode == http.StatusOK
return resp.StatusCode == 207 || resp.StatusCode == http.StatusOK
}

View File

@@ -14,7 +14,7 @@ Here are the fundamental configuration options:
"discord_webhook_url": "",
"min_file_size": 0,
"max_file_size": 0,
"allowed_file_types": ["mp4", "mkv", "avi", ...],
"allowed_file_types": [".mp4", ".mkv", ".avi", ...],
}
```
@@ -55,8 +55,8 @@ When enabled, you'll need to provide a username and password to access the Decyp
You can set minimum and maximum file size limits for torrents:
```json
"min_file_size": 0,
"max_file_size": 0
"min_file_size": 0, // Minimum file size in bytes (0 = no minimum)
"max_file_size": 0 // Maximum file size in bytes (0 = no maximum)
```
#### Allowed File Types
@@ -64,9 +64,9 @@ You can restrict the types of files that Decypharr will process by specifying al
```json
"allowed_file_types": [
"mp4", "mkv", "avi", "mov",
"m4v", "mpg", "mpeg", "wmv",
"m4a", "mp3", "flac", "wav"
".mp4", ".mkv", ".avi", ".mov",
".m4v", ".mpg", ".mpeg", ".wmv",
".m4a", ".mp3", ".flac", ".wav"
]
```

View File

@@ -23,7 +23,8 @@ Here's a minimal configuration to get started:
},
"repair": {
"enabled": false,
"interval": "12h"
"interval": "12h",
"run_on_start": false
},
"use_auth": false,
"log_level": "info"

View File

@@ -1,7 +1,5 @@
# Repair Worker
![Repair Worker](../images/repair.png)
The Repair Worker is a powerful feature that helps maintain the health of your media library by scanning for and fixing issues with files.
## What It Does
@@ -21,6 +19,7 @@ To enable and configure the Repair Worker, add the following to your `config.jso
"repair": {
"enabled": true,
"interval": "12h",
"run_on_start": false,
"use_webdav": false,
"zurg_url": "http://localhost:9999",
"auto_process": true
@@ -31,6 +30,7 @@ To enable and configure the Repair Worker, add the following to your `config.jso
- `enabled`: Set to `true` to enable the Repair Worker.
- `interval`: The time interval for the Repair Worker to run (e.g., `12h`, `1d`).
- `run_on_start`: If set to `true`, the Repair Worker will run immediately after Decypharr starts.
- `use_webdav`: If set to `true`, the Repair Worker will use WebDAV for file operations.
- `zurg_url`: The URL for the Zurg service (if using).
- `auto_process`: If set to `true`, the Repair Worker will automatically process files that it finds issues with.

View File

@@ -1,7 +1,5 @@
# WebDAV Server
![WebDAV Server](../images/webdav.png)
Decypharr includes a built-in WebDAV server that provides direct access to your Debrid files, making them easily accessible to media players and other applications.

View File

@@ -1,22 +0,0 @@
### Downloading with Decypharr
While Decypharr provides a Qbittorent API for integration with media management applications, it also allows you to manually download torrents directly through its interface. This guide will walk you through the process of downloading torrents using Decypharr.
- You can either use the Decypharr UI to add torrents manually or use its API to automate the process.
## Manual Downloading
![Downloading UI](../images/download.png)
To manually download a torrent using Decypharr, follow these steps:
1. **Access the Download Page**: Navigate to the "Download" section in the Decypharr UI.
2. You can either upload torrent file(s) or paste magnet links directly into the input fields
3. Select the action(defaults to Symlink)
5. Add any additional options, such as:
- *Download Folder*: Specify the folder where the downloaded files will be saved.
- *Arr Category*: Choose the category for the download, which helps in organizing files in your media management applications.
- **Debrid Provider**: Choose which Debrid service to use for the download(if you have multiple)
- **File Size Limits**: Set minimum and maximum file size limits if needed.
- **Allowed File Types**: Specify which file types are allowed for download.
Note:
- If you use an arr category, your download will go into **{download_folder}/{arr}**

View File

@@ -1,5 +1,4 @@
# Guides for setting up Decypharr
- [Setting up with Rclone](rclone.md)
- [Manual Downloading with Decypharr](downloading.md)
- [Setting up with Rclone](rclone.md)

View File

@@ -5,7 +5,7 @@ This guide will help you set up Decypharr with Rclone, allowing you to use your
#### Rclone
Make sure you have Rclone installed and configured on your system. You can follow the [Rclone installation guide](https://rclone.org/install/) for instructions.
It's recommended to use a docker version of Rclone, as it provides a consistent environment across different platforms.
It's recommended to use docker version of Rclone, as it provides a consistent environment across different platforms.
### Steps
@@ -35,7 +35,7 @@ Create a `rclone.conf` file in `/opt/rclone/` with your Rclone configuration.
```conf
[decypharr]
type = webdav
url = http://your-ip-or-domain:8282/webdav/realdebrid
url = https://your-ip-or-domain:8282/webdav/realdebrid
vendor = other
pacer_min_sleep = 0
```
@@ -51,7 +51,7 @@ Create a `config.json` file in `/opt/decypharr/` with your Decypharr configurati
"folder": "/mnt/remote/realdebrid/__all__/",
"rate_limit": "250/minute",
"use_webdav": true,
"rc_url": "rclone:5572"
"rc_url": "http://your-ip-address:5572" // Rclone RC URL
}
],
"qbittorrent": {
@@ -62,11 +62,6 @@ Create a `config.json` file in `/opt/decypharr/` with your Decypharr configurati
```
### Docker Compose Setup
- Check your current user and group IDs by running `id -u` and `id -g` in your terminal. You can use these values to set the `PUID` and `PGID` environment variables in the Docker Compose file.
- You should also set `user` to your user ID and group ID in the Docker Compose file to ensure proper file permissions.
Create a `docker-compose.yml` file with the following content:
```yaml
@@ -74,14 +69,14 @@ services:
decypharr:
image: cy01/blackhole:latest
container_name: decypharr
user: "${PUID:-1000}:${PGID:-1000}"
user: "1000:1000"
volumes:
- /mnt/:/mnt:rslave
- /mnt/:/mnt
- /opt/decypharr/:/app
environment:
- PUID=1000
- PGID=1000
- UMASK=002
- PUID=1000 # Replace with your user ID
- PGID=1000 # Replace with your group ID
ports:
- "8282:8282/tcp"
restart: unless-stopped
@@ -92,11 +87,14 @@ services:
restart: unless-stopped
environment:
TZ: UTC
PUID: 1000
PGID: 1000
ports:
- 5572:5572
volumes:
- /mnt/remote/realdebrid:/data:rshared
- /opt/rclone/rclone.conf:/config/rclone/rclone.conf
- /mnt:/mnt
cap_add:
- SYS_ADMIN
security_opt:
@@ -107,17 +105,9 @@ services:
decypharr:
condition: service_healthy
restart: true
command: "mount decypharr: /data --allow-non-empty --allow-other --dir-cache-time 10s --rc --rc-addr :5572 --rc-no-auth"
command: "mount decypharr: /data --allow-non-empty --allow-other --uid=1000 --gid=1000 --umask=002 --dir-cache-time 10s --rc --rc-addr :5572 --rc-no-auth "
```
#### Docker Notes
- Ensure that the `/mnt/` directory is mounted correctly to access your media files.
- You can check your current user and group IDs and UMASK by running `id -a` and `umask` commands in your terminal.
- You can adjust the `PUID` and `PGID` environment variables to match your user and group IDs for proper file permissions.
- Also adding `--uid=$YOUR_PUID --gid=$YOUR_PGID` to the `rclone mount` command can help with permissions.
- The `UMASK` environment variable can be set to control file permissions created by Decypharr.
Start the containers:
```bash
docker-compose up -d
@@ -142,7 +132,7 @@ For each provider, you'll need a different rclone. OR you can change your `rclon
```apache
[decypharr]
type = webdav
url = http://your-ip-or-domain:8282/webdav/
url = https://your-ip-or-domain:8282/webdav/
vendor = other
pacer_min_sleep = 0
```

Binary file not shown.

Before

Width:  |  Height:  |  Size: 218 KiB

Binary file not shown.

Before

Width:  |  Height:  |  Size: 226 KiB

Binary file not shown.

Before

Width:  |  Height:  |  Size: 62 KiB

View File

@@ -45,15 +45,21 @@ docker run -d \
Create a `docker-compose.yml` file with the following content:
```yaml
version: '3.7'
services:
decypharr:
image: cy01/blackhole:latest
container_name: decypharr
ports:
- "8282:8282"
user: "1000:1000"
volumes:
- /mnt/:/mnt:rslave # Mount your media directory
- /mnt/:/mnt # Mount your media directory
- ./config/:/app # config.json must be in this directory
environment:
- PUID=1000
- PGID=1000
- UMASK=002
- QBIT_PORT=8282 # qBittorrent Port (optional)
restart: unless-stopped
```
@@ -67,10 +73,9 @@ docker-compose up -d
## Binary Installation
If you prefer not to use Docker, you can download and run the binary directly.
Download your OS-specific release from the [releases page](https://github.com/sirrobot01/decypharr/releases).
Download the binary from the releases page
Create a configuration file (see Configuration)
Run the binary:
```bash
chmod +x decypharr
./decypharr --config /path/to/config/folder
@@ -104,28 +109,8 @@ You can also configure Decypharr through the web interface, but it's recommended
}
```
### Notes for Docker Users
### Few Notes
- Ensure that the `/mnt/` directory is mounted correctly to access your media files.
- The `./config/` directory should contain your `config.json` file.
- You can adjust the `PUID` and `PGID` environment variables to match your user and group IDs for proper file permissions.
- The `UMASK` environment variable can be set to control file permissions created by Decypharr.
##### Health Checks
- Health checks are disabled by default. You can enable them by adding a `healthcheck` section in your `docker-compose.yml` file.
- Health checks checks for availability of several parts of the application;
- The main web interface
- The qBittorrent API
- The WebDAV server (if enabled). You should disable health checks for the initial indexes as they can take a long time to complete.
```yaml
services:
decypharr:
...
...
healthcheck:
test: ["CMD", "/usr/bin/healthcheck", "--config", "/app/"]
interval: 5s
timeout: 10s
retries: 3
```
- Make sure decypharr has access to the directories specified in the configuration file.
- Ensure decypharr have write permissions to the qbittorrent download folder.
- Make sure decypharr can write to the `./config/` directory.

14
go.mod
View File

@@ -5,32 +5,25 @@ go 1.24.0
toolchain go1.24.3
require (
github.com/Tensai75/nzbparser v0.1.0
github.com/anacrolix/torrent v1.55.0
github.com/cavaliergopher/grab/v3 v3.0.1
github.com/chrisfarms/yenc v0.0.0-20140520125709-00bca2f8b3cb
github.com/go-chi/chi/v5 v5.1.0
github.com/go-co-op/gocron/v2 v2.16.1
github.com/google/uuid v1.6.0
github.com/gorilla/sessions v1.4.0
github.com/nwaples/rardecode/v2 v2.0.0-beta.4
github.com/puzpuzpuz/xsync/v4 v4.1.0
github.com/robfig/cron/v3 v3.0.1
github.com/rs/zerolog v1.33.0
github.com/sourcegraph/conc v0.3.0
github.com/stanNthe5/stringbuf v0.0.3
go.uber.org/ratelimit v0.3.1
golang.org/x/crypto v0.33.0
golang.org/x/net v0.35.0
golang.org/x/sync v0.15.0
golang.org/x/sync v0.12.0
golang.org/x/time v0.8.0
gopkg.in/natefinch/lumberjack.v2 v2.2.1
)
require (
github.com/Tensai75/subjectparser v0.1.0 // indirect
github.com/anacrolix/missinggo v1.3.0 // indirect
github.com/anacrolix/missinggo/v2 v2.7.3 // indirect
github.com/benbjohnson/clock v1.3.0 // indirect
github.com/bradfitz/iter v0.0.0-20191230175014-e8f45d346db8 // indirect
github.com/davecgh/go-spew v1.1.2-0.20180830191138-d8f796af33cc // indirect
github.com/google/go-cmp v0.6.0 // indirect
@@ -41,8 +34,5 @@ require (
github.com/mattn/go-isatty v0.0.20 // indirect
github.com/pmezard/go-difflib v1.0.1-0.20181226105442-5d4384ee4fb2 // indirect
github.com/rogpeppe/go-internal v1.13.1 // indirect
go.uber.org/atomic v1.7.0 // indirect
go.uber.org/multierr v1.9.0 // indirect
golang.org/x/sys v0.30.0 // indirect
golang.org/x/text v0.26.0 // indirect
)

28
go.sum
View File

@@ -8,10 +8,6 @@ github.com/RoaringBitmap/roaring v0.4.17/go.mod h1:D3qVegWTmfCaX4Bl5CrBE9hfrSrrX
github.com/RoaringBitmap/roaring v0.4.23/go.mod h1:D0gp8kJQgE1A4LQ5wFLggQEyvDi06Mq5mKs52e1TwOo=
github.com/Shopify/sarama v1.19.0/go.mod h1:FVkBWblsNy7DGZRfXLU0O9RCGt5g3g3yEuWXgklEdEo=
github.com/Shopify/toxiproxy v2.1.4+incompatible/go.mod h1:OXgGpZ6Cli1/URJOF1DMxUHB2q5Ap20/P/eIdh4G0pI=
github.com/Tensai75/nzbparser v0.1.0 h1:6RppAuWFahqu/kKjWO5Br0xuEYcxGz+XBTxYc+qvPo4=
github.com/Tensai75/nzbparser v0.1.0/go.mod h1:IUIIaeGaYp2dLAAF29BWYeKTfI4COvXaeQAzQiTOfMY=
github.com/Tensai75/subjectparser v0.1.0 h1:6fEWnRov8lDHxJS2EWqY6VonwYfrIRN+k8h8H7fFwHA=
github.com/Tensai75/subjectparser v0.1.0/go.mod h1:PNBFBnkOGbVDfX+56ZmC4GKSpqoRMCF1Y44xYd7NLGI=
github.com/alecthomas/template v0.0.0-20160405071501-a0175ee3bccc/go.mod h1:LOuyumcjzFXgccqObfd/Ljyb9UuFJ6TxHnclSeseNhc=
github.com/alecthomas/template v0.0.0-20190718012654-fb15b899a751/go.mod h1:LOuyumcjzFXgccqObfd/Ljyb9UuFJ6TxHnclSeseNhc=
github.com/alecthomas/units v0.0.0-20151022065526-2efee857e7cf/go.mod h1:ybxpYRFXyAe+OPACYpWeL0wqObRcbAqCMya13uyzqw0=
@@ -40,8 +36,6 @@ github.com/anacrolix/tagflag v1.1.0/go.mod h1:Scxs9CV10NQatSmbyjqmqmeQNwGzlNe0CM
github.com/anacrolix/torrent v1.55.0 h1:s9yh/YGdPmbN9dTa+0Inh2dLdrLQRvEAj1jdFW/Hdd8=
github.com/anacrolix/torrent v1.55.0/go.mod h1:sBdZHBSZNj4de0m+EbYg7vvs/G/STubxu/GzzNbojsE=
github.com/apache/thrift v0.12.0/go.mod h1:cp2SuWMxlEZw2r+iP2GNCdIi4C1qmUzdZFSVb+bacwQ=
github.com/benbjohnson/clock v1.3.0 h1:ip6w0uFQkncKQ979AypyG0ER7mqUSBdKLOgAle/AT8A=
github.com/benbjohnson/clock v1.3.0/go.mod h1:J11/hYXuz8f4ySSvYwY0FKfm+ezbsZBKZxNJlLklBHA=
github.com/benbjohnson/immutable v0.2.0/go.mod h1:uc6OHo6PN2++n98KHLxW8ef4W42ylHiQSENghE1ezxI=
github.com/beorn7/perks v0.0.0-20180321164747-3a771d992973/go.mod h1:Dwedo/Wpr24TaqPxmxbtue+5NUziq4I4S80YR8gNf3Q=
github.com/beorn7/perks v1.0.0/go.mod h1:KWe93zE9D1o94FZ5RNwFwVgaQK1VOXiVxmqh+CedLV8=
@@ -53,8 +47,6 @@ github.com/bradfitz/iter v0.0.0-20191230175014-e8f45d346db8/go.mod h1:spo1JLcs67
github.com/cavaliergopher/grab/v3 v3.0.1 h1:4z7TkBfmPjmLAAmkkAZNX/6QJ1nNFdv3SdIHXju0Fr4=
github.com/cavaliergopher/grab/v3 v3.0.1/go.mod h1:1U/KNnD+Ft6JJiYoYBAimKH2XrYptb8Kl3DFGmsjpq4=
github.com/cespare/xxhash/v2 v2.1.1/go.mod h1:VGX0DQ3Q6kWi7AoAeZDth3/j3BFtOZR5XLFGgcrjCOs=
github.com/chrisfarms/yenc v0.0.0-20140520125709-00bca2f8b3cb h1:BK9pqCayfiXrcRypTPxDsunA6hPJtOyOTJYY2DJ429g=
github.com/chrisfarms/yenc v0.0.0-20140520125709-00bca2f8b3cb/go.mod h1:V4bkS2felTTOSIsYx9JivzrbdBOuksi02ZkzfbHUVAk=
github.com/client9/misspell v0.3.4/go.mod h1:qj6jICC3Q7zFZvVWo7KLAzC3yx5G7kyvSDkc90ppPyw=
github.com/coreos/go-systemd/v22 v22.5.0/go.mod h1:Y58oyj3AT4RCenI/lSvhwexgC+NSVTIJ3seZv2GcEnc=
github.com/davecgh/go-spew v1.1.0/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38=
@@ -163,8 +155,6 @@ github.com/modern-go/reflect2 v1.0.1/go.mod h1:bx2lNnkwVCuqBIxFjflWJWanXIb3Rllmb
github.com/mschoch/smat v0.0.0-20160514031455-90eadee771ae/go.mod h1:qAyveg+e4CE+eKJXWVjKXM4ck2QobLqTDytGJbLLhJg=
github.com/mschoch/smat v0.2.0/go.mod h1:kc9mz7DoBKqDyiRL7VZN8KvXQMWeTaVnttLRXOlotKw=
github.com/mwitkow/go-conntrack v0.0.0-20161129095857-cc309e4a2223/go.mod h1:qRWi+5nqEBWmkhHvq77mSJWrCKwh8bxhgT7d/eI7P4U=
github.com/nwaples/rardecode/v2 v2.0.0-beta.4 h1:sdiJxQdPjECn2lh9nLFFhgLCf+0ulDU5rODbtERTlUY=
github.com/nwaples/rardecode/v2 v2.0.0-beta.4/go.mod h1:yntwv/HfMc/Hbvtq9I19D1n58te3h6KsqCf3GxyfBGY=
github.com/onsi/ginkgo v1.6.0/go.mod h1:lLunBs/Ym6LB5Z9jYTR76FiuTmxDTDusOGeTQH+WWjE=
github.com/onsi/ginkgo v1.7.0/go.mod h1:lLunBs/Ym6LB5Z9jYTR76FiuTmxDTDusOGeTQH+WWjE=
github.com/onsi/gomega v1.4.3/go.mod h1:ex+gbHU/CVuBBDIJjb2X0qEXbFg53c61hWP/1CpauHY=
@@ -193,8 +183,6 @@ github.com/prometheus/procfs v0.0.0-20190117184657-bf6a532e95b1/go.mod h1:c3At6R
github.com/prometheus/procfs v0.0.2/go.mod h1:TjEm7ze935MbeOT/UhFTIMYKhuLP4wbCsTZCD3I8kEA=
github.com/prometheus/procfs v0.0.8/go.mod h1:7Qr8sr6344vo1JqZ6HhLceV9o3AJ1Ff+GxbHq6oeK9A=
github.com/prometheus/procfs v0.0.11/go.mod h1:lV6e/gmhEcM9IjHGsFOCxxuZ+z1YqCvr4OA4YeYWdaU=
github.com/puzpuzpuz/xsync/v4 v4.1.0 h1:x9eHRl4QhZFIPJ17yl4KKW9xLyVWbb3/Yq4SXpjF71U=
github.com/puzpuzpuz/xsync/v4 v4.1.0/go.mod h1:VJDmTCJMBt8igNxnkQd86r+8KUeN1quSfNKu5bLYFQo=
github.com/rcrowley/go-metrics v0.0.0-20181016184325-3113b8401b8a/go.mod h1:bCqnVzQkZxMG4s8nGwiZ5l3QUCyqpo9Y+/ZMZ9VjZe4=
github.com/robfig/cron/v3 v3.0.1 h1:WdRxkvbJztn8LMz/QEvLN5sBU+xKpSqwwUO1Pjr4qDs=
github.com/robfig/cron/v3 v3.0.1/go.mod h1:eQICP3HwyT7UooqI/z+Ov+PtYAWygg1TEWWzGIFLtro=
@@ -210,8 +198,6 @@ github.com/smartystreets/assertions v0.0.0-20180927180507-b2de0cb4f26d/go.mod h1
github.com/smartystreets/assertions v0.0.0-20190215210624-980c5ac6f3ac/go.mod h1:OnSkiWE9lh6wB0YB77sQom3nweQdgAjqCqsofrRNTgc=
github.com/smartystreets/goconvey v0.0.0-20181108003508-044398e4856c/go.mod h1:XDJAKZRPZ1CvBcN2aX5YOUTYGHki24fSF0Iv48Ibg0s=
github.com/smartystreets/goconvey v0.0.0-20190306220146-200a235640ff/go.mod h1:KSQcGKpxUMHk3nbYzs/tIBAM2iDooCn0BmttHOJEbLs=
github.com/sourcegraph/conc v0.3.0 h1:OQTbbt6P72L20UqAkXXuLOj79LfEanQ+YQFNpLA9ySo=
github.com/sourcegraph/conc v0.3.0/go.mod h1:Sdozi7LEKbFPqYX2/J+iBAM6HpqSLTASQIKqDmF7Mt0=
github.com/stanNthe5/stringbuf v0.0.3 h1:3ChRipDckEY6FykaQ1Dowy3B+ZQa72EDBCasvT5+D1w=
github.com/stanNthe5/stringbuf v0.0.3/go.mod h1:hii5Vr+mucoWkNJlIYQVp8YvuPtq45fFnJEAhcPf2cQ=
github.com/stretchr/objx v0.1.0/go.mod h1:HFkY916IF+rwdDfMAkV7OtwuqBVzrE8GR6GFx+wExME=
@@ -230,14 +216,8 @@ github.com/willf/bitset v1.1.10/go.mod h1:RjeCKbqT1RxIR/KWY6phxZiaY1IyutSBfGjNPy
go.opencensus.io v0.20.1/go.mod h1:6WKK9ahsWS3RSO+PY9ZHZUfv2irvY6gN279GOPZjmmk=
go.opencensus.io v0.20.2/go.mod h1:6WKK9ahsWS3RSO+PY9ZHZUfv2irvY6gN279GOPZjmmk=
go.opencensus.io v0.22.3/go.mod h1:yxeiOL68Rb0Xd1ddK5vPZ/oVn4vY4Ynel7k9FzqtOIw=
go.uber.org/atomic v1.7.0 h1:ADUqmZGgLDDfbSL9ZmPxKTybcoEYHgpYfELNoN+7hsw=
go.uber.org/atomic v1.7.0/go.mod h1:fEN4uk6kAWBTFdckzkM89CLk9XfWZrxpCo0nPH17wJc=
go.uber.org/goleak v1.3.0 h1:2K3zAYmnTNqV73imy9J1T3WC+gmCePx2hEGkimedGto=
go.uber.org/goleak v1.3.0/go.mod h1:CoHD4mav9JJNrW/WLlf7HGZPjdw8EucARQHekz1X6bE=
go.uber.org/multierr v1.9.0 h1:7fIwc/ZtS0q++VgcfqFDxSBZVv/Xo49/SYnDFupUwlI=
go.uber.org/multierr v1.9.0/go.mod h1:X2jQV1h+kxSjClGpnseKVIxpmcjrj7MNnI0bnlfKTVQ=
go.uber.org/ratelimit v0.3.1 h1:K4qVE+byfv/B3tC+4nYWP7v/6SimcO7HzHekoMNBma0=
go.uber.org/ratelimit v0.3.1/go.mod h1:6euWsTB6U/Nb3X++xEUXA8ciPJvr19Q/0h1+oDcJhRk=
golang.org/x/crypto v0.0.0-20180904163835-0709b304e793/go.mod h1:6SG95UA2DQfeDnfUPMdvaQW0Q7yPrPDi9nlGo2tz2b4=
golang.org/x/crypto v0.0.0-20190308221718-c2843e01d9a2/go.mod h1:djNgcEr1/C05ACkg1iLfiJU5Ep61QUkGW8qpdssI0+w=
golang.org/x/crypto v0.33.0 h1:IOBPskki6Lysi0lo9qQvbxiQ+FvsCC/YWOecCHAixus=
@@ -266,8 +246,8 @@ golang.org/x/sync v0.0.0-20181108010431-42b317875d0f/go.mod h1:RxMgew5VJxzue5/jJ
golang.org/x/sync v0.0.0-20181221193216-37e7f081c4d4/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
golang.org/x/sync v0.0.0-20190227155943-e225da77a7e6/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
golang.org/x/sync v0.0.0-20190911185100-cd5d95a43a6e/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
golang.org/x/sync v0.15.0 h1:KWH3jNZsfyT6xfAfKiz6MRNmd46ByHDYaZ7KSkCtdW8=
golang.org/x/sync v0.15.0/go.mod h1:1dzgHSNfp02xaA81J2MS99Qcpr2w7fw1gpm99rleRqA=
golang.org/x/sync v0.12.0 h1:MHc5BpPuC30uJk597Ri8TV3CNZcTLu6B6z4lJy+g6Jw=
golang.org/x/sync v0.12.0/go.mod h1:1dzgHSNfp02xaA81J2MS99Qcpr2w7fw1gpm99rleRqA=
golang.org/x/sys v0.0.0-20180830151530-49385e6e1522/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY=
golang.org/x/sys v0.0.0-20180905080454-ebe1bf3edb33/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY=
golang.org/x/sys v0.0.0-20180909124046-d0be0721c37e/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY=
@@ -286,8 +266,8 @@ golang.org/x/sys v0.30.0 h1:QjkSwP/36a20jFYWkSue1YwXzLmsV5Gfq7Eiy72C1uc=
golang.org/x/sys v0.30.0/go.mod h1:/VUhepiaJMQUp4+oa/7Zr1D23ma6VTLIYjOOTFZPUcA=
golang.org/x/text v0.3.0/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ=
golang.org/x/text v0.3.2/go.mod h1:bEr9sfX3Q8Zfm5fL9x+3itogRgK3+ptLWKqgva+5dAk=
golang.org/x/text v0.26.0 h1:P42AVeLghgTYr4+xUnTRKDMqpar+PtX7KWuNQL21L8M=
golang.org/x/text v0.26.0/go.mod h1:QK15LZJUUQVJxhz7wXgxSy/CJaTFjd0G+YLonydOVQA=
golang.org/x/time v0.8.0 h1:9i3RxcPv3PZnitoVGMPDKZSq1xW1gK1Xy3ArNOGZfEg=
golang.org/x/time v0.8.0/go.mod h1:3BpzKBy/shNhVucY/MWOyx10tF3SFh9QdLuxbVysPQM=
golang.org/x/tools v0.0.0-20180828015842-6cd1fcedba52/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ=
golang.org/x/tools v0.0.0-20180917221912-90fa682c2a6e/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ=
golang.org/x/tools v0.0.0-20190114222345-bf090417da8b/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ=

View File

@@ -12,13 +12,6 @@ import (
"sync"
)
type RepairStrategy string
const (
RepairStrategyPerFile RepairStrategy = "per_file"
RepairStrategyPerTorrent RepairStrategy = "per_torrent"
)
var (
instance *Config
once sync.Once
@@ -26,19 +19,15 @@ var (
)
type Debrid struct {
Name string `json:"name,omitempty"`
APIKey string `json:"api_key,omitempty"`
DownloadAPIKeys []string `json:"download_api_keys,omitempty"`
Folder string `json:"folder,omitempty"`
DownloadUncached bool `json:"download_uncached,omitempty"`
CheckCached bool `json:"check_cached,omitempty"`
RateLimit string `json:"rate_limit,omitempty"` // 200/minute or 10/second
RepairRateLimit string `json:"repair_rate_limit,omitempty"`
DownloadRateLimit string `json:"download_rate_limit,omitempty"`
Proxy string `json:"proxy,omitempty"`
UnpackRar bool `json:"unpack_rar,omitempty"`
AddSamples bool `json:"add_samples,omitempty"`
MinimumFreeSlot int `json:"minimum_free_slot,omitempty"` // Minimum active pots to use this debrid
Name string `json:"name,omitempty"`
APIKey string `json:"api_key,omitempty"`
DownloadAPIKeys []string `json:"download_api_keys,omitempty"`
Folder string `json:"folder,omitempty"`
DownloadUncached bool `json:"download_uncached,omitempty"`
CheckCached bool `json:"check_cached,omitempty"`
RateLimit string `json:"rate_limit,omitempty"` // 200/minute or 10/second
Proxy string `json:"proxy,omitempty"`
AddSamples bool `json:"add_samples,omitempty"`
UseWebDav bool `json:"use_webdav,omitempty"`
WebDav
@@ -47,6 +36,7 @@ type Debrid struct {
type QBitTorrent struct {
Username string `json:"username,omitempty"`
Password string `json:"password,omitempty"`
Port string `json:"port,omitempty"` // deprecated
DownloadFolder string `json:"download_folder,omitempty"`
Categories []string `json:"categories,omitempty"`
RefreshInterval int `json:"refresh_interval,omitempty"`
@@ -61,19 +51,17 @@ type Arr struct {
Cleanup bool `json:"cleanup,omitempty"`
SkipRepair bool `json:"skip_repair,omitempty"`
DownloadUncached *bool `json:"download_uncached,omitempty"`
SelectedDebrid string `json:"selected_debrid,omitempty"`
Source string `json:"source,omitempty"` // The source of the arr, e.g. "auto", "config", "". Auto means it was automatically detected from the arr
}
type Repair struct {
Enabled bool `json:"enabled,omitempty"`
Interval string `json:"interval,omitempty"`
ZurgURL string `json:"zurg_url,omitempty"`
AutoProcess bool `json:"auto_process,omitempty"`
UseWebDav bool `json:"use_webdav,omitempty"`
Workers int `json:"workers,omitempty"`
ReInsert bool `json:"reinsert,omitempty"`
Strategy RepairStrategy `json:"strategy,omitempty"`
Enabled bool `json:"enabled,omitempty"`
Interval string `json:"interval,omitempty"`
RunOnStart bool `json:"run_on_start,omitempty"`
ZurgURL string `json:"zurg_url,omitempty"`
AutoProcess bool `json:"auto_process,omitempty"`
UseWebDav bool `json:"use_webdav,omitempty"`
Workers int `json:"workers,omitempty"`
ReInsert bool `json:"reinsert,omitempty"`
}
type Auth struct {
@@ -81,55 +69,25 @@ type Auth struct {
Password string `json:"password,omitempty"`
}
type SABnzbd struct {
DownloadFolder string `json:"download_folder,omitempty"`
RefreshInterval int `json:"refresh_interval,omitempty"`
Categories []string `json:"categories,omitempty"`
}
type Usenet struct {
Providers []UsenetProvider `json:"providers,omitempty"` // List of usenet providers
MountFolder string `json:"mount_folder,omitempty"` // Folder where usenet downloads are mounted
SkipPreCache bool `json:"skip_pre_cache,omitempty"`
Chunks int `json:"chunks,omitempty"` // Number of chunks to pre-cache
RcUrl string `json:"rc_url,omitempty"` // Rclone RC URL for the webdav
RcUser string `json:"rc_user,omitempty"` // Rclone RC username
RcPass string `json:"rc_pass,omitempty"` // Rclone RC password
}
type UsenetProvider struct {
Name string `json:"name,omitempty"`
Host string `json:"host,omitempty"` // Host of the usenet server
Port int `json:"port,omitempty"` // Port of the usenet server
Username string `json:"username,omitempty"`
Password string `json:"password,omitempty"`
Connections int `json:"connections,omitempty"` // Number of connections to use
SSL bool `json:"ssl,omitempty"` // Use SSL for the connection
UseTLS bool `json:"use_tls,omitempty"` // Use TLS for the connection
}
type Config struct {
// server
BindAddress string `json:"bind_address,omitempty"`
URLBase string `json:"url_base,omitempty"`
Port string `json:"port,omitempty"`
LogLevel string `json:"log_level,omitempty"`
Debrids []Debrid `json:"debrids,omitempty"`
QBitTorrent *QBitTorrent `json:"qbittorrent,omitempty"`
SABnzbd *SABnzbd `json:"sabnzbd,omitempty"`
Usenet *Usenet `json:"usenet,omitempty"` // Usenet configuration
Arrs []Arr `json:"arrs,omitempty"`
Repair Repair `json:"repair,omitempty"`
WebDav WebDav `json:"webdav,omitempty"`
AllowedExt []string `json:"allowed_file_types,omitempty"`
MinFileSize string `json:"min_file_size,omitempty"` // Minimum file size to download, 10MB, 1GB, etc
MaxFileSize string `json:"max_file_size,omitempty"` // Maximum file size to download (0 means no limit)
Path string `json:"-"` // Path to save the config file
UseAuth bool `json:"use_auth,omitempty"`
Auth *Auth `json:"-"`
DiscordWebhook string `json:"discord_webhook_url,omitempty"`
RemoveStalledAfter string `json:"remove_stalled_after,omitzero"`
LogLevel string `json:"log_level,omitempty"`
Debrids []Debrid `json:"debrids,omitempty"`
QBitTorrent QBitTorrent `json:"qbittorrent,omitempty"`
Arrs []Arr `json:"arrs,omitempty"`
Repair Repair `json:"repair,omitempty"`
WebDav WebDav `json:"webdav,omitempty"`
AllowedExt []string `json:"allowed_file_types,omitempty"`
MinFileSize string `json:"min_file_size,omitempty"` // Minimum file size to download, 10MB, 1GB, etc
MaxFileSize string `json:"max_file_size,omitempty"` // Maximum file size to download (0 means no limit)
Path string `json:"-"` // Path to save the config file
UseAuth bool `json:"use_auth,omitempty"`
Auth *Auth `json:"-"`
DiscordWebhook string `json:"discord_webhook_url,omitempty"`
}
func (c *Config) JsonFile() string {
@@ -139,14 +97,6 @@ func (c *Config) AuthFile() string {
return filepath.Join(c.Path, "auth.json")
}
func (c *Config) TorrentsFile() string {
return filepath.Join(c.Path, "torrents.json")
}
func (c *Config) NZBsPath() string {
return filepath.Join(c.Path, "cache/nzbs")
}
func (c *Config) loadConfig() error {
// Load the config file
if configPath == "" {
@@ -174,6 +124,9 @@ func (c *Config) loadConfig() error {
}
func validateDebrids(debrids []Debrid) error {
if len(debrids) == 0 {
return errors.New("no debrids configured")
}
for _, debrid := range debrids {
// Basic field validation
@@ -188,51 +141,17 @@ func validateDebrids(debrids []Debrid) error {
return nil
}
func validateUsenet(usenet *Usenet) error {
if usenet == nil {
return nil // No usenet configuration provided
}
for _, usenet := range usenet.Providers {
// Basic field validation
if usenet.Host == "" {
return errors.New("usenet host is required")
}
if usenet.Username == "" {
return errors.New("usenet username is required")
}
if usenet.Password == "" {
return errors.New("usenet password is required")
}
}
return nil
}
func validateSabznbd(config *SABnzbd) error {
if config == nil {
return nil // No SABnzbd configuration provided
}
if config.DownloadFolder != "" {
if _, err := os.Stat(config.DownloadFolder); os.IsNotExist(err) {
return fmt.Errorf("sabnzbd download folder(%s) does not exist", config.DownloadFolder)
}
}
return nil
}
func validateQbitTorrent(config *QBitTorrent) error {
if config == nil {
return nil // No qBittorrent configuration provided
if config.DownloadFolder == "" {
return errors.New("qbittorent download folder is required")
}
if config.DownloadFolder != "" {
if _, err := os.Stat(config.DownloadFolder); os.IsNotExist(err) {
return fmt.Errorf("qbittorent download folder(%s) does not exist", config.DownloadFolder)
}
if _, err := os.Stat(config.DownloadFolder); os.IsNotExist(err) {
return fmt.Errorf("qbittorent download folder(%s) does not exist", config.DownloadFolder)
}
return nil
}
func validateRepair(config Repair) error {
func validateRepair(config *Repair) error {
if !config.Enabled {
return nil
}
@@ -244,34 +163,19 @@ func validateRepair(config Repair) error {
func ValidateConfig(config *Config) error {
// Run validations concurrently
// Check if there's at least one debrid or usenet configured
hasUsenet := false
if config.Usenet != nil && len(config.Usenet.Providers) > 0 {
hasUsenet = true
}
if len(config.Debrids) == 0 && !hasUsenet {
return errors.New("at least one debrid or usenet provider must be configured")
}
if err := validateDebrids(config.Debrids); err != nil {
return err
}
if err := validateUsenet(config.Usenet); err != nil {
if err := validateQbitTorrent(&config.QBitTorrent); err != nil {
return err
}
if err := validateSabznbd(config.SABnzbd); err != nil {
if err := validateRepair(&config.Repair); err != nil {
return err
}
if err := validateQbitTorrent(config.QBitTorrent); err != nil {
return err
}
if err := validateRepair(config.Repair); err != nil {
return err
}
return nil
}
@@ -367,18 +271,8 @@ func (c *Config) updateDebrid(d Debrid) Debrid {
workers := runtime.NumCPU() * 50
perDebrid := workers / len(c.Debrids)
var downloadKeys []string
if len(d.DownloadAPIKeys) > 0 {
downloadKeys = d.DownloadAPIKeys
} else {
// If no download API keys are specified, use the main API key
downloadKeys = []string{d.APIKey}
}
d.DownloadAPIKeys = downloadKeys
if d.Workers == 0 {
d.Workers = perDebrid
if len(d.DownloadAPIKeys) == 0 {
d.DownloadAPIKeys = append(d.DownloadAPIKeys, d.APIKey)
}
if !d.UseWebDav {
@@ -391,6 +285,9 @@ func (c *Config) updateDebrid(d Debrid) Debrid {
if d.WebDav.DownloadLinksRefreshInterval == "" {
d.DownloadLinksRefreshInterval = cmp.Or(c.WebDav.DownloadLinksRefreshInterval, "40m") // 40 minutes
}
if d.Workers == 0 {
d.Workers = perDebrid
}
if d.FolderNaming == "" {
d.FolderNaming = cmp.Or(c.WebDav.FolderNaming, "original_no_ext")
}
@@ -417,47 +314,17 @@ func (c *Config) updateDebrid(d Debrid) Debrid {
return d
}
func (c *Config) updateUsenet(u UsenetProvider) UsenetProvider {
if u.Name == "" {
parts := strings.Split(u.Host, ".")
if len(parts) >= 2 {
u.Name = parts[len(parts)-2] // Gets "example" from "news.example.com"
} else {
u.Name = u.Host // Fallback to host if it doesn't look like a domain
}
}
if u.Port == 0 {
u.Port = 119 // Default port for usenet
}
if u.Connections == 0 {
u.Connections = 30 // Default connections
}
if u.SSL && !u.UseTLS {
u.UseTLS = true // Use TLS if SSL is enabled
}
return u
}
func (c *Config) setDefaults() {
for i, debrid := range c.Debrids {
c.Debrids[i] = c.updateDebrid(debrid)
}
if c.SABnzbd != nil {
c.SABnzbd.RefreshInterval = cmp.Or(c.SABnzbd.RefreshInterval, 10) // Default to 10 seconds
}
if c.Usenet != nil {
c.Usenet.Chunks = cmp.Or(c.Usenet.Chunks, 5)
for i, provider := range c.Usenet.Providers {
c.Usenet.Providers[i] = c.updateUsenet(provider)
}
}
if len(c.AllowedExt) == 0 {
c.AllowedExt = getDefaultExtensions()
}
c.Port = cmp.Or(c.Port, c.QBitTorrent.Port)
if c.URLBase == "" {
c.URLBase = "/"
}
@@ -469,11 +336,6 @@ func (c *Config) setDefaults() {
c.URLBase += "/"
}
// Set repair defaults
if c.Repair.Strategy == "" {
c.Repair.Strategy = RepairStrategyPerTorrent
}
// Load the auth file
c.Auth = c.GetAuth()
}
@@ -504,6 +366,11 @@ func (c *Config) createConfig(path string) error {
c.Port = "8282"
c.LogLevel = "info"
c.UseAuth = true
c.QBitTorrent = QBitTorrent{
DownloadFolder: filepath.Join(path, "downloads"),
Categories: []string{"sonarr", "radarr"},
RefreshInterval: 15,
}
return nil
}

View File

@@ -24,7 +24,7 @@ func (c *Config) IsAllowedFile(filename string) bool {
}
func getDefaultExtensions() []string {
videoExts := strings.Split("webm,m4v,3gp,nsv,ty,strm,rm,rmvb,m3u,ifo,mov,qt,divx,xvid,bivx,nrg,pva,wmv,asf,asx,ogm,ogv,m2v,avi,bin,dat,dvr-ms,mpg,mpeg,mp4,avc,vp3,svq3,nuv,viv,dv,fli,flv,wpl,img,iso,vob,mkv,mk3d,ts,wtv,m2ts", ",")
videoExts := strings.Split("webm,m4v,3gp,nsv,ty,strm,rm,rmvb,m3u,ifo,mov,qt,divx,xvid,bivx,nrg,pva,wmv,asf,asx,ogm,ogv,m2v,avi,bin,dat,dvr-ms,mpg,mpeg,mp4,avc,vp3,svq3,nuv,viv,dv,fli,flv,wpl,img,iso,vob,mkv,mk3d,ts,wtv,m2ts'", ",")
musicExts := strings.Split("MP3,WAV,FLAC,OGG,WMA,AIFF,ALAC,M4A,APE,AC3,DTS,M4P,MID,MIDI,MKA,MP2,MPA,RA,VOC,WV,AMR", ",")
// Combine both slices

View File

@@ -1,178 +0,0 @@
package nntp
import (
"bytes"
"context"
"errors"
"fmt"
"github.com/puzpuzpuz/xsync/v4"
"github.com/rs/zerolog"
"github.com/sirrobot01/decypharr/internal/config"
"github.com/sirrobot01/decypharr/internal/logger"
"sync/atomic"
"time"
)
// Client represents a failover NNTP client that manages multiple providers
type Client struct {
providers []config.UsenetProvider
pools *xsync.Map[string, *Pool]
logger zerolog.Logger
closed atomic.Bool
minimumMaxConns int // Minimum number of max connections across all pools
}
func NewClient(providers []config.UsenetProvider) (*Client, error) {
client := &Client{
providers: providers,
logger: logger.New("nntp"),
pools: xsync.NewMap[string, *Pool](),
}
if len(providers) == 0 {
return nil, fmt.Errorf("no NNTP providers configured")
}
return client, nil
}
func (c *Client) InitPools() error {
var initErrors []error
successfulPools := 0
for _, provider := range c.providers {
serverPool, err := NewPool(provider, c.logger)
if err != nil {
c.logger.Error().
Err(err).
Str("server", provider.Host).
Int("port", provider.Port).
Msg("Failed to initialize server pool")
initErrors = append(initErrors, err)
continue
}
if c.minimumMaxConns == 0 {
// Set minimumMaxConns to the max connections of the first successful pool
c.minimumMaxConns = serverPool.ConnectionCount()
} else {
c.minimumMaxConns = min(c.minimumMaxConns, serverPool.ConnectionCount())
}
c.pools.Store(provider.Name, serverPool)
successfulPools++
}
if successfulPools == 0 {
return fmt.Errorf("failed to initialize any server pools: %v", initErrors)
}
c.logger.Info().
Int("providers", len(c.providers)).
Msg("NNTP client created")
return nil
}
func (c *Client) Close() {
if c.closed.Load() {
c.logger.Warn().Msg("NNTP client already closed")
return
}
c.pools.Range(func(key string, value *Pool) bool {
if value != nil {
err := value.Close()
if err != nil {
return false
}
}
return true
})
c.closed.Store(true)
c.logger.Info().Msg("NNTP client closed")
}
func (c *Client) GetConnection(ctx context.Context) (*Connection, func(), error) {
if c.closed.Load() {
return nil, nil, fmt.Errorf("nntp client is closed")
}
// Prevent workers from waiting too long for connections
connCtx, cancel := context.WithTimeout(ctx, 5*time.Second)
defer cancel()
providerCount := len(c.providers)
for _, provider := range c.providers {
pool, ok := c.pools.Load(provider.Name)
if !ok {
return nil, nil, fmt.Errorf("no pool found for provider %s", provider.Name)
}
if !pool.IsFree() && providerCount > 1 {
continue
}
conn, err := pool.Get(connCtx) // Use timeout context
if err != nil {
if errors.Is(err, ErrNoAvailableConnection) || errors.Is(err, context.DeadlineExceeded) {
continue
}
return nil, nil, fmt.Errorf("error getting connection from provider %s: %w", provider.Name, err)
}
if conn == nil {
continue
}
return conn, func() { pool.Put(conn) }, nil
}
return nil, nil, ErrNoAvailableConnection
}
func (c *Client) DownloadHeader(ctx context.Context, messageID string) (*YencMetadata, error) {
conn, cleanup, err := c.GetConnection(ctx)
if err != nil {
return nil, err
}
defer cleanup()
data, err := conn.GetBody(messageID)
if err != nil {
return nil, err
}
// yEnc decode
part, err := DecodeYencHeaders(bytes.NewReader(data))
if err != nil || part == nil {
return nil, fmt.Errorf("failed to decode segment")
}
// Return both the filename and decoded data
return part, nil
}
func (c *Client) MinimumMaxConns() int {
return c.minimumMaxConns
}
func (c *Client) TotalActiveConnections() int {
total := 0
c.pools.Range(func(key string, value *Pool) bool {
if value != nil {
total += value.ActiveConnections()
}
return true
})
return total
}
func (c *Client) Pools() *xsync.Map[string, *Pool] {
return c.pools
}
func (c *Client) GetProviders() []config.UsenetProvider {
return c.providers
}

View File

@@ -1,394 +0,0 @@
package nntp
import (
"bufio"
"crypto/tls"
"fmt"
"github.com/chrisfarms/yenc"
"github.com/rs/zerolog"
"io"
"net"
"net/textproto"
"strconv"
"strings"
)
// Connection represents an NNTP connection
type Connection struct {
username, password, address string
port int
conn net.Conn
text *textproto.Conn
reader *bufio.Reader
writer *bufio.Writer
logger zerolog.Logger
}
func (c *Connection) authenticate() error {
// Send AUTHINFO USER command
if err := c.sendCommand(fmt.Sprintf("AUTHINFO USER %s", c.username)); err != nil {
return NewConnectionError(fmt.Errorf("failed to send username: %w", err))
}
resp, err := c.readResponse()
if err != nil {
return NewConnectionError(fmt.Errorf("failed to read user response: %w", err))
}
if resp.Code != 381 {
return classifyNNTPError(resp.Code, fmt.Sprintf("unexpected response to AUTHINFO USER: %s", resp.Message))
}
// Send AUTHINFO PASS command
if err := c.sendCommand(fmt.Sprintf("AUTHINFO PASS %s", c.password)); err != nil {
return NewConnectionError(fmt.Errorf("failed to send password: %w", err))
}
resp, err = c.readResponse()
if err != nil {
return NewConnectionError(fmt.Errorf("failed to read password response: %w", err))
}
if resp.Code != 281 {
return classifyNNTPError(resp.Code, fmt.Sprintf("authentication failed: %s", resp.Message))
}
return nil
}
// startTLS initiates TLS encryption with proper error handling
func (c *Connection) startTLS() error {
if err := c.sendCommand("STARTTLS"); err != nil {
return NewConnectionError(fmt.Errorf("failed to send STARTTLS: %w", err))
}
resp, err := c.readResponse()
if err != nil {
return NewConnectionError(fmt.Errorf("failed to read STARTTLS response: %w", err))
}
if resp.Code != 382 {
return classifyNNTPError(resp.Code, fmt.Sprintf("STARTTLS not supported: %s", resp.Message))
}
// Upgrade connection to TLS
tlsConn := tls.Client(c.conn, &tls.Config{
ServerName: c.address,
InsecureSkipVerify: false,
})
c.conn = tlsConn
c.reader = bufio.NewReader(tlsConn)
c.writer = bufio.NewWriter(tlsConn)
c.text = textproto.NewConn(tlsConn)
c.logger.Debug().Msg("TLS encryption enabled")
return nil
}
// ping sends a simple command to test the connection
func (c *Connection) ping() error {
if err := c.sendCommand("DATE"); err != nil {
return NewConnectionError(err)
}
_, err := c.readResponse()
if err != nil {
return NewConnectionError(err)
}
return nil
}
// sendCommand sends a command to the NNTP server
func (c *Connection) sendCommand(command string) error {
_, err := fmt.Fprintf(c.writer, "%s\r\n", command)
if err != nil {
return err
}
return c.writer.Flush()
}
// readResponse reads a response from the NNTP server
func (c *Connection) readResponse() (*Response, error) {
line, err := c.text.ReadLine()
if err != nil {
return nil, err
}
parts := strings.SplitN(line, " ", 2)
code, err := strconv.Atoi(parts[0])
if err != nil {
return nil, fmt.Errorf("invalid response code: %s", parts[0])
}
message := ""
if len(parts) > 1 {
message = parts[1]
}
return &Response{
Code: code,
Message: message,
}, nil
}
// readMultilineResponse reads a multiline response
func (c *Connection) readMultilineResponse() (*Response, error) {
resp, err := c.readResponse()
if err != nil {
return nil, err
}
// Check if this is a multiline response
if resp.Code < 200 || resp.Code >= 300 {
return resp, nil
}
lines, err := c.text.ReadDotLines()
if err != nil {
return nil, err
}
resp.Lines = lines
return resp, nil
}
// GetArticle retrieves an article by message ID with proper error classification
func (c *Connection) GetArticle(messageID string) (*Article, error) {
messageID = FormatMessageID(messageID)
if err := c.sendCommand(fmt.Sprintf("ARTICLE %s", messageID)); err != nil {
return nil, NewConnectionError(fmt.Errorf("failed to send ARTICLE command: %w", err))
}
resp, err := c.readMultilineResponse()
if err != nil {
return nil, NewConnectionError(fmt.Errorf("failed to read article response: %w", err))
}
if resp.Code != 220 {
return nil, classifyNNTPError(resp.Code, resp.Message)
}
return c.parseArticle(messageID, resp.Lines)
}
// GetBody retrieves article body by message ID with proper error classification
func (c *Connection) GetBody(messageID string) ([]byte, error) {
messageID = FormatMessageID(messageID)
if err := c.sendCommand(fmt.Sprintf("BODY %s", messageID)); err != nil {
return nil, NewConnectionError(fmt.Errorf("failed to send BODY command: %w", err))
}
// Read the initial response
resp, err := c.readResponse()
if err != nil {
return nil, NewConnectionError(fmt.Errorf("failed to read body response: %w", err))
}
if resp.Code != 222 {
return nil, classifyNNTPError(resp.Code, resp.Message)
}
// Read the raw body data directly using textproto to preserve exact formatting for yEnc
lines, err := c.text.ReadDotLines()
if err != nil {
return nil, NewConnectionError(fmt.Errorf("failed to read body data: %w", err))
}
// Join with \r\n to preserve original line endings and add final \r\n
body := strings.Join(lines, "\r\n")
if len(lines) > 0 {
body += "\r\n"
}
return []byte(body), nil
}
// GetHead retrieves article headers by message ID
func (c *Connection) GetHead(messageID string) ([]byte, error) {
messageID = FormatMessageID(messageID)
if err := c.sendCommand(fmt.Sprintf("HEAD %s", messageID)); err != nil {
return nil, NewConnectionError(fmt.Errorf("failed to send HEAD command: %w", err))
}
// Read the initial response
resp, err := c.readResponse()
if err != nil {
return nil, NewConnectionError(fmt.Errorf("failed to read head response: %w", err))
}
if resp.Code != 221 {
return nil, classifyNNTPError(resp.Code, resp.Message)
}
// Read the header data using textproto
lines, err := c.text.ReadDotLines()
if err != nil {
return nil, NewConnectionError(fmt.Errorf("failed to read header data: %w", err))
}
// Join with \r\n to preserve original line endings and add final \r\n
headers := strings.Join(lines, "\r\n")
if len(lines) > 0 {
headers += "\r\n"
}
return []byte(headers), nil
}
// GetSegment retrieves a specific segment with proper error handling
func (c *Connection) GetSegment(messageID string, segmentNumber int) (*Segment, error) {
messageID = FormatMessageID(messageID)
body, err := c.GetBody(messageID)
if err != nil {
return nil, err // GetBody already returns classified errors
}
return &Segment{
MessageID: messageID,
Number: segmentNumber,
Bytes: int64(len(body)),
Data: body,
}, nil
}
// Stat retrieves article statistics by message ID with proper error classification
func (c *Connection) Stat(messageID string) (articleNumber int, echoedID string, err error) {
messageID = FormatMessageID(messageID)
if err = c.sendCommand(fmt.Sprintf("STAT %s", messageID)); err != nil {
return 0, "", NewConnectionError(fmt.Errorf("failed to send STAT: %w", err))
}
resp, err := c.readResponse()
if err != nil {
return 0, "", NewConnectionError(fmt.Errorf("failed to read STAT response: %w", err))
}
if resp.Code != 223 {
return 0, "", classifyNNTPError(resp.Code, resp.Message)
}
fields := strings.Fields(resp.Message)
if len(fields) < 2 {
return 0, "", NewProtocolError(resp.Code, fmt.Sprintf("unexpected STAT response format: %q", resp.Message))
}
if articleNumber, err = strconv.Atoi(fields[0]); err != nil {
return 0, "", NewProtocolError(resp.Code, fmt.Sprintf("invalid article number %q: %v", fields[0], err))
}
echoedID = fields[1]
return articleNumber, echoedID, nil
}
// SelectGroup selects a newsgroup and returns group information
func (c *Connection) SelectGroup(groupName string) (*GroupInfo, error) {
if err := c.sendCommand(fmt.Sprintf("GROUP %s", groupName)); err != nil {
return nil, NewConnectionError(fmt.Errorf("failed to send GROUP command: %w", err))
}
resp, err := c.readResponse()
if err != nil {
return nil, NewConnectionError(fmt.Errorf("failed to read GROUP response: %w", err))
}
if resp.Code != 211 {
return nil, classifyNNTPError(resp.Code, resp.Message)
}
// Parse GROUP response: "211 number low high group-name"
fields := strings.Fields(resp.Message)
if len(fields) < 4 {
return nil, NewProtocolError(resp.Code, fmt.Sprintf("unexpected GROUP response format: %q", resp.Message))
}
groupInfo := &GroupInfo{
Name: groupName,
}
if count, err := strconv.Atoi(fields[0]); err == nil {
groupInfo.Count = count
}
if low, err := strconv.Atoi(fields[1]); err == nil {
groupInfo.Low = low
}
if high, err := strconv.Atoi(fields[2]); err == nil {
groupInfo.High = high
}
return groupInfo, nil
}
// parseArticle parses article data from response lines
func (c *Connection) parseArticle(messageID string, lines []string) (*Article, error) {
article := &Article{
MessageID: messageID,
Groups: []string{},
}
headerEnd := -1
for i, line := range lines {
if line == "" {
headerEnd = i
break
}
// Parse headers
if strings.HasPrefix(line, "Subject: ") {
article.Subject = strings.TrimPrefix(line, "Subject: ")
} else if strings.HasPrefix(line, "From: ") {
article.From = strings.TrimPrefix(line, "From: ")
} else if strings.HasPrefix(line, "Date: ") {
article.Date = strings.TrimPrefix(line, "Date: ")
} else if strings.HasPrefix(line, "Newsgroups: ") {
groups := strings.TrimPrefix(line, "Newsgroups: ")
article.Groups = strings.Split(groups, ",")
for i := range article.Groups {
article.Groups[i] = strings.TrimSpace(article.Groups[i])
}
}
}
// Join body lines
if headerEnd != -1 && headerEnd+1 < len(lines) {
body := strings.Join(lines[headerEnd+1:], "\n")
article.Body = []byte(body)
article.Size = int64(len(article.Body))
}
return article, nil
}
// close closes the NNTP connection
func (c *Connection) close() error {
if c.conn != nil {
return c.conn.Close()
}
return nil
}
func DecodeYenc(reader io.Reader) (*yenc.Part, error) {
part, err := yenc.Decode(reader)
if err != nil {
return nil, NewYencDecodeError(fmt.Errorf("failed to create yenc decoder: %w", err))
}
return part, nil
}
func IsValidMessageID(messageID string) bool {
if len(messageID) < 3 {
return false
}
return strings.Contains(messageID, "@")
}
// FormatMessageID ensures message ID has proper format
func FormatMessageID(messageID string) string {
messageID = strings.TrimSpace(messageID)
if !strings.HasPrefix(messageID, "<") {
messageID = "<" + messageID
}
if !strings.HasSuffix(messageID, ">") {
messageID = messageID + ">"
}
return messageID
}

View File

@@ -1,116 +0,0 @@
package nntp
import (
"bufio"
"fmt"
"io"
"strconv"
"strings"
)
// YencMetadata contains just the header information
type YencMetadata struct {
Name string // filename
Size int64 // total file size
Part int // part number
Total int // total parts
Begin int64 // part start byte
End int64 // part end byte
LineSize int // line length
}
// DecodeYencHeaders extracts only yenc header metadata without decoding body
func DecodeYencHeaders(reader io.Reader) (*YencMetadata, error) {
buf := bufio.NewReader(reader)
metadata := &YencMetadata{}
// Find and parse =ybegin header
if err := parseYBeginHeader(buf, metadata); err != nil {
return nil, NewYencDecodeError(fmt.Errorf("failed to parse ybegin header: %w", err))
}
// Parse =ypart header if this is a multipart file
if metadata.Part > 0 {
if err := parseYPartHeader(buf, metadata); err != nil {
return nil, NewYencDecodeError(fmt.Errorf("failed to parse ypart header: %w", err))
}
}
return metadata, nil
}
func parseYBeginHeader(buf *bufio.Reader, metadata *YencMetadata) error {
var s string
var err error
// Find the =ybegin line
for {
s, err = buf.ReadString('\n')
if err != nil {
return err
}
if len(s) >= 7 && s[:7] == "=ybegin" {
break
}
}
// Parse the header line
parts := strings.SplitN(s[7:], "name=", 2)
if len(parts) > 1 {
metadata.Name = strings.TrimSpace(parts[1])
}
// Parse other parameters
for _, header := range strings.Split(parts[0], " ") {
kv := strings.SplitN(strings.TrimSpace(header), "=", 2)
if len(kv) < 2 {
continue
}
switch kv[0] {
case "size":
metadata.Size, _ = strconv.ParseInt(kv[1], 10, 64)
case "line":
metadata.LineSize, _ = strconv.Atoi(kv[1])
case "part":
metadata.Part, _ = strconv.Atoi(kv[1])
case "total":
metadata.Total, _ = strconv.Atoi(kv[1])
}
}
return nil
}
func parseYPartHeader(buf *bufio.Reader, metadata *YencMetadata) error {
var s string
var err error
// Find the =ypart line
for {
s, err = buf.ReadString('\n')
if err != nil {
return err
}
if len(s) >= 6 && s[:6] == "=ypart" {
break
}
}
// Parse part parameters
for _, header := range strings.Split(s[6:], " ") {
kv := strings.SplitN(strings.TrimSpace(header), "=", 2)
if len(kv) < 2 {
continue
}
switch kv[0] {
case "begin":
metadata.Begin, _ = strconv.ParseInt(kv[1], 10, 64)
case "end":
metadata.End, _ = strconv.ParseInt(kv[1], 10, 64)
}
}
return nil
}

View File

@@ -1,195 +0,0 @@
package nntp
import (
"errors"
"fmt"
)
// Error types for NNTP operations
type ErrorType int
const (
ErrorTypeUnknown ErrorType = iota
ErrorTypeConnection
ErrorTypeAuthentication
ErrorTypeTimeout
ErrorTypeArticleNotFound
ErrorTypeGroupNotFound
ErrorTypePermissionDenied
ErrorTypeServerBusy
ErrorTypeInvalidCommand
ErrorTypeProtocol
ErrorTypeYencDecode
ErrorTypeNoAvailableConnection
)
// Error represents an NNTP-specific error
type Error struct {
Type ErrorType
Code int // NNTP response code
Message string // Error message
Err error // Underlying error
}
// Predefined errors for common cases
var (
ErrArticleNotFound = &Error{Type: ErrorTypeArticleNotFound, Code: 430, Message: "article not found"}
ErrGroupNotFound = &Error{Type: ErrorTypeGroupNotFound, Code: 411, Message: "group not found"}
ErrPermissionDenied = &Error{Type: ErrorTypePermissionDenied, Code: 502, Message: "permission denied"}
ErrAuthenticationFail = &Error{Type: ErrorTypeAuthentication, Code: 482, Message: "authentication failed"}
ErrServerBusy = &Error{Type: ErrorTypeServerBusy, Code: 400, Message: "server busy"}
ErrPoolNotFound = &Error{Type: ErrorTypeUnknown, Code: 0, Message: "NNTP pool not found", Err: nil}
ErrNoAvailableConnection = &Error{Type: ErrorTypeNoAvailableConnection, Code: 0, Message: "no available connection in pool", Err: nil}
)
func (e *Error) Error() string {
if e.Err != nil {
return fmt.Sprintf("NNTP %s (code %d): %s - %v", e.Type.String(), e.Code, e.Message, e.Err)
}
return fmt.Sprintf("NNTP %s (code %d): %s", e.Type.String(), e.Code, e.Message)
}
func (e *Error) Unwrap() error {
return e.Err
}
func (e *Error) Is(target error) bool {
if t, ok := target.(*Error); ok {
return e.Type == t.Type
}
return false
}
// IsRetryable returns true if the error might be resolved by retrying
func (e *Error) IsRetryable() bool {
switch e.Type {
case ErrorTypeConnection, ErrorTypeTimeout, ErrorTypeServerBusy:
return true
case ErrorTypeArticleNotFound, ErrorTypeGroupNotFound, ErrorTypePermissionDenied, ErrorTypeAuthentication:
return false
default:
return false
}
}
// ShouldStopParsing returns true if this error should stop the entire parsing process
func (e *Error) ShouldStopParsing() bool {
switch e.Type {
case ErrorTypeAuthentication, ErrorTypePermissionDenied:
return true // Critical auth issues
case ErrorTypeConnection:
return false // Can continue with other connections
case ErrorTypeArticleNotFound:
return false // Can continue searching for other articles
case ErrorTypeServerBusy:
return false // Temporary issue
default:
return false
}
}
func (et ErrorType) String() string {
switch et {
case ErrorTypeConnection:
return "CONNECTION"
case ErrorTypeAuthentication:
return "AUTHENTICATION"
case ErrorTypeTimeout:
return "TIMEOUT"
case ErrorTypeArticleNotFound:
return "ARTICLE_NOT_FOUND"
case ErrorTypeGroupNotFound:
return "GROUP_NOT_FOUND"
case ErrorTypePermissionDenied:
return "PERMISSION_DENIED"
case ErrorTypeServerBusy:
return "SERVER_BUSY"
case ErrorTypeInvalidCommand:
return "INVALID_COMMAND"
case ErrorTypeProtocol:
return "PROTOCOL"
case ErrorTypeYencDecode:
return "YENC_DECODE"
default:
return "UNKNOWN"
}
}
// Helper functions to create specific errors
func NewConnectionError(err error) *Error {
return &Error{
Type: ErrorTypeConnection,
Message: "connection failed",
Err: err,
}
}
func NewTimeoutError(err error) *Error {
return &Error{
Type: ErrorTypeTimeout,
Message: "operation timed out",
Err: err,
}
}
func NewProtocolError(code int, message string) *Error {
return &Error{
Type: ErrorTypeProtocol,
Code: code,
Message: message,
}
}
func NewYencDecodeError(err error) *Error {
return &Error{
Type: ErrorTypeYencDecode,
Message: "yEnc decode failed",
Err: err,
}
}
// classifyNNTPError classifies an NNTP response code into an error type
func classifyNNTPError(code int, message string) *Error {
switch {
case code == 430 || code == 423:
return &Error{Type: ErrorTypeArticleNotFound, Code: code, Message: message}
case code == 411:
return &Error{Type: ErrorTypeGroupNotFound, Code: code, Message: message}
case code == 502 || code == 503:
return &Error{Type: ErrorTypePermissionDenied, Code: code, Message: message}
case code == 481 || code == 482:
return &Error{Type: ErrorTypeAuthentication, Code: code, Message: message}
case code == 400:
return &Error{Type: ErrorTypeServerBusy, Code: code, Message: message}
case code == 500 || code == 501:
return &Error{Type: ErrorTypeInvalidCommand, Code: code, Message: message}
case code >= 400:
return &Error{Type: ErrorTypeProtocol, Code: code, Message: message}
default:
return &Error{Type: ErrorTypeUnknown, Code: code, Message: message}
}
}
func IsArticleNotFoundError(err error) bool {
var nntpErr *Error
if errors.As(err, &nntpErr) {
return nntpErr.Type == ErrorTypeArticleNotFound
}
return false
}
func IsAuthenticationError(err error) bool {
var nntpErr *Error
if errors.As(err, &nntpErr) {
return nntpErr.Type == ErrorTypeAuthentication
}
return false
}
func IsRetryableError(err error) bool {
var nntpErr *Error
if errors.As(err, &nntpErr) {
return nntpErr.IsRetryable()
}
return false
}

View File

@@ -1,299 +0,0 @@
package nntp
import (
"bufio"
"context"
"crypto/tls"
"fmt"
"github.com/rs/zerolog"
"github.com/sirrobot01/decypharr/internal/config"
"net"
"net/textproto"
"sync"
"sync/atomic"
"time"
)
// Pool manages a pool of NNTP connections
type Pool struct {
address, username, password string
maxConns, port int
ssl bool
useTLS bool
connections chan *Connection
logger zerolog.Logger
closed atomic.Bool
totalConnections atomic.Int32
activeConnections atomic.Int32
}
// Segment represents a usenet segment
type Segment struct {
MessageID string
Number int
Bytes int64
Data []byte
}
// Article represents a complete usenet article
type Article struct {
MessageID string
Subject string
From string
Date string
Groups []string
Body []byte
Size int64
}
// Response represents an NNTP server response
type Response struct {
Code int
Message string
Lines []string
}
// GroupInfo represents information about a newsgroup
type GroupInfo struct {
Name string
Count int // Number of articles in the group
Low int // Lowest article number
High int // Highest article number
}
// NewPool creates a new NNTP connection pool
func NewPool(provider config.UsenetProvider, logger zerolog.Logger) (*Pool, error) {
maxConns := provider.Connections
if maxConns <= 0 {
maxConns = 1
}
pool := &Pool{
address: provider.Host,
username: provider.Username,
password: provider.Password,
port: provider.Port,
maxConns: maxConns,
ssl: provider.SSL,
useTLS: provider.UseTLS,
connections: make(chan *Connection, maxConns),
logger: logger,
}
return pool.initializeConnections()
}
func (p *Pool) initializeConnections() (*Pool, error) {
var wg sync.WaitGroup
var mu sync.Mutex
var successfulConnections []*Connection
var errs []error
// Create connections concurrently
for i := 0; i < p.maxConns; i++ {
wg.Add(1)
go func(connIndex int) {
defer wg.Done()
conn, err := p.createConnection()
mu.Lock()
defer mu.Unlock()
if err != nil {
errs = append(errs, err)
} else {
successfulConnections = append(successfulConnections, conn)
}
}(i)
}
// Wait for all connection attempts to complete
wg.Wait()
// Add successful connections to the pool
for _, conn := range successfulConnections {
p.connections <- conn
}
p.totalConnections.Store(int32(len(successfulConnections)))
if len(successfulConnections) == 0 {
return nil, fmt.Errorf("failed to create any connections: %v", errs)
}
// Log results
p.logger.Info().
Str("server", p.address).
Int("port", p.port).
Int("requested_connections", p.maxConns).
Int("successful_connections", len(successfulConnections)).
Int("failed_connections", len(errs)).
Msg("NNTP connection pool created")
// If some connections failed, log a warning but continue
if len(errs) > 0 {
p.logger.Warn().
Int("failed_count", len(errs)).
Msg("Some connections failed during pool initialization")
}
return p, nil
}
// Get retrieves a connection from the pool
func (p *Pool) Get(ctx context.Context) (*Connection, error) {
if p.closed.Load() {
return nil, NewConnectionError(fmt.Errorf("connection pool is closed"))
}
select {
case conn := <-p.connections:
if conn == nil {
return nil, NewConnectionError(fmt.Errorf("received nil connection from pool"))
}
p.activeConnections.Add(1)
if err := conn.ping(); err != nil {
p.activeConnections.Add(-1)
err := conn.close()
if err != nil {
return nil, err
}
// Create a new connection
newConn, err := p.createConnection()
if err != nil {
return nil, NewConnectionError(fmt.Errorf("failed to create replacement connection: %w", err))
}
p.activeConnections.Add(1)
return newConn, nil
}
return conn, nil
case <-ctx.Done():
return nil, NewTimeoutError(ctx.Err())
}
}
// Put returns a connection to the pool
func (p *Pool) Put(conn *Connection) {
if conn == nil {
return
}
defer p.activeConnections.Add(-1)
if p.closed.Load() {
conn.close()
return
}
// Try non-blocking first
select {
case p.connections <- conn:
return
default:
}
// If pool is full, this usually means we have too many connections
// Force return by making space (close oldest connection)
select {
case oldConn := <-p.connections:
oldConn.close() // Close the old connection
p.connections <- conn // Put the new one back
case <-time.After(1 * time.Second):
// Still can't return - close this connection
conn.close()
}
}
// Close closes all connections in the pool
func (p *Pool) Close() error {
if p.closed.Load() {
return nil
}
p.closed.Store(true)
close(p.connections)
for conn := range p.connections {
err := conn.close()
if err != nil {
return err
}
}
p.logger.Info().Msg("NNTP connection pool closed")
return nil
}
// createConnection creates a new NNTP connection with proper error handling
func (p *Pool) createConnection() (*Connection, error) {
addr := fmt.Sprintf("%s:%d", p.address, p.port)
var conn net.Conn
var err error
if p.ssl {
conn, err = tls.DialWithDialer(&net.Dialer{}, "tcp", addr, &tls.Config{
InsecureSkipVerify: false,
})
} else {
conn, err = net.Dial("tcp", addr)
}
if err != nil {
return nil, NewConnectionError(fmt.Errorf("failed to connect to %s: %w", addr, err))
}
reader := bufio.NewReaderSize(conn, 256*1024) // 256KB buffer for better performance
writer := bufio.NewWriterSize(conn, 256*1024) // 256KB buffer for better performance
text := textproto.NewConn(conn)
nntpConn := &Connection{
username: p.username,
password: p.password,
address: p.address,
port: p.port,
conn: conn,
text: text,
reader: reader,
writer: writer,
logger: p.logger,
}
// Read welcome message
_, err = nntpConn.readResponse()
if err != nil {
conn.Close()
return nil, NewConnectionError(fmt.Errorf("failed to read welcome message: %w", err))
}
// Authenticate if credentials are provided
if p.username != "" && p.password != "" {
if err := nntpConn.authenticate(); err != nil {
conn.Close()
return nil, err // authenticate() already returns NNTPError
}
}
// Enable TLS if requested (STARTTLS)
if p.useTLS && !p.ssl {
if err := nntpConn.startTLS(); err != nil {
conn.Close()
return nil, err // startTLS() already returns NNTPError
}
}
return nntpConn, nil
}
func (p *Pool) ConnectionCount() int {
return int(p.totalConnections.Load())
}
func (p *Pool) ActiveConnections() int {
return int(p.activeConnections.Load())
}
func (p *Pool) IsFree() bool {
return p.ActiveConnections() < p.maxConns
}

View File

@@ -1,6 +1,4 @@
package utils
import "errors"
package request
type HTTPError struct {
StatusCode int
@@ -35,13 +33,3 @@ var TorrentNotFoundError = &HTTPError{
Message: "Torrent not found",
Code: "torrent_not_found",
}
var TooManyActiveDownloadsError = &HTTPError{
StatusCode: 509,
Message: "Too many active downloads",
Code: "too_many_active_downloads",
}
func IsTooManyActiveDownloadsError(err error) bool {
return errors.As(err, &TooManyActiveDownloadsError)
}

View File

@@ -2,15 +2,18 @@ package request
import (
"bytes"
"compress/gzip"
"context"
"crypto/tls"
"encoding/json"
"errors"
"fmt"
"github.com/rs/zerolog"
"github.com/sirrobot01/decypharr/internal/logger"
"go.uber.org/ratelimit"
"golang.org/x/net/proxy"
"golang.org/x/time/rate"
"io"
"math"
"math/rand"
"net"
"net/http"
@@ -50,7 +53,7 @@ type ClientOption func(*Client)
// Client represents an HTTP client with additional capabilities
type Client struct {
client *http.Client
rateLimiter ratelimit.Limiter
rateLimiter *rate.Limiter
headers map[string]string
headersMu sync.RWMutex
maxRetries int
@@ -82,7 +85,7 @@ func WithRedirectPolicy(policy func(req *http.Request, via []*http.Request) erro
}
// WithRateLimiter sets a rate limiter
func WithRateLimiter(rl ratelimit.Limiter) ClientOption {
func WithRateLimiter(rl *rate.Limiter) ClientOption {
return func(c *Client) {
c.rateLimiter = rl
}
@@ -134,11 +137,9 @@ func WithProxy(proxyURL string) ClientOption {
// doRequest performs a single HTTP request with rate limiting
func (c *Client) doRequest(req *http.Request) (*http.Response, error) {
if c.rateLimiter != nil {
select {
case <-req.Context().Done():
return nil, req.Context().Err()
default:
c.rateLimiter.Take()
err := c.rateLimiter.Wait(req.Context())
if err != nil {
return nil, fmt.Errorf("rate limiter wait: %w", err)
}
}
@@ -179,7 +180,8 @@ func (c *Client) Do(req *http.Request) (*http.Response, error) {
resp, err = c.doRequest(req)
if err != nil {
if attempt < c.maxRetries {
// Check if this is a network error that might be worth retrying
if isRetryableError(err) && attempt < c.maxRetries {
// Apply backoff with jitter
jitter := time.Duration(rand.Int63n(int64(backoff / 4)))
sleepTime := backoff + jitter
@@ -338,10 +340,7 @@ func New(options ...ClientOption) *Client {
return client
}
func ParseRateLimit(rateStr string) ratelimit.Limiter {
if rateStr == "" {
return nil
}
func ParseRateLimit(rateStr string) *rate.Limiter {
parts := strings.SplitN(rateStr, "/", 2)
if len(parts) != 2 {
return nil
@@ -353,21 +352,23 @@ func ParseRateLimit(rateStr string) ratelimit.Limiter {
return nil
}
// Set slack size to 10%
slackSize := count / 10
// normalize unit
unit := strings.ToLower(strings.TrimSpace(parts[1]))
unit = strings.TrimSuffix(unit, "s")
burstSize := int(math.Ceil(float64(count) * 0.1))
if burstSize < 1 {
burstSize = 1
}
if burstSize > count {
burstSize = count
}
switch unit {
case "minute", "min":
return ratelimit.New(count, ratelimit.Per(time.Minute), ratelimit.WithSlack(slackSize))
return rate.NewLimiter(rate.Limit(float64(count)/60.0), burstSize)
case "second", "sec":
return ratelimit.New(count, ratelimit.Per(time.Second), ratelimit.WithSlack(slackSize))
return rate.NewLimiter(rate.Limit(float64(count)), burstSize)
case "hour", "hr":
return ratelimit.New(count, ratelimit.Per(time.Hour), ratelimit.WithSlack(slackSize))
case "day", "d":
return ratelimit.New(count, ratelimit.Per(24*time.Hour), ratelimit.WithSlack(slackSize))
return rate.NewLimiter(rate.Limit(float64(count)/3600.0), burstSize)
default:
return nil
}
@@ -382,9 +383,61 @@ func JSONResponse(w http.ResponseWriter, data interface{}, code int) {
}
}
func Gzip(body []byte) []byte {
if len(body) == 0 {
return nil
}
// Check if the pool is nil
buf := bytes.NewBuffer(make([]byte, 0, len(body)))
gz, err := gzip.NewWriterLevel(buf, gzip.BestSpeed)
if err != nil {
return nil
}
if _, err := gz.Write(body); err != nil {
return nil
}
if err := gz.Close(); err != nil {
return nil
}
result := make([]byte, buf.Len())
copy(result, buf.Bytes())
return result
}
func Default() *Client {
once.Do(func() {
instance = New()
})
return instance
}
func isRetryableError(err error) bool {
errString := err.Error()
// Connection reset and other network errors
if strings.Contains(errString, "connection reset by peer") ||
strings.Contains(errString, "read: connection reset") ||
strings.Contains(errString, "connection refused") ||
strings.Contains(errString, "network is unreachable") ||
strings.Contains(errString, "connection timed out") ||
strings.Contains(errString, "no such host") ||
strings.Contains(errString, "i/o timeout") ||
strings.Contains(errString, "unexpected EOF") ||
strings.Contains(errString, "TLS handshake timeout") {
return true
}
// Check for net.Error type which can provide more information
var netErr net.Error
if errors.As(err, &netErr) {
// Retry on timeout errors and temporary errors
return netErr.Timeout() || netErr.Temporary()
}
// Not a retryable error
return false
}

View File

@@ -1,10 +1,7 @@
package utils
import (
"fmt"
"io"
"net/url"
"os"
"strings"
)
@@ -22,65 +19,3 @@ func PathUnescape(path string) string {
return unescapedPath
}
func PreCacheFile(filePaths []string) error {
if len(filePaths) == 0 {
return fmt.Errorf("no file paths provided")
}
for _, filePath := range filePaths {
err := func(f string) error {
file, err := os.Open(f)
if err != nil {
if os.IsNotExist(err) {
// File has probably been moved by arr, return silently
return nil
}
return fmt.Errorf("failed to open file: %s: %v", f, err)
}
defer file.Close()
// Pre-cache the file header (first 256KB) using 16KB chunks.
if err := readSmallChunks(file, 0, 256*1024, 16*1024); err != nil {
return err
}
if err := readSmallChunks(file, 1024*1024, 64*1024, 16*1024); err != nil {
return err
}
return nil
}(filePath)
if err != nil {
return err
}
}
return nil
}
func readSmallChunks(file *os.File, startPos int64, totalToRead int, chunkSize int) error {
_, err := file.Seek(startPos, 0)
if err != nil {
return err
}
buf := make([]byte, chunkSize)
bytesRemaining := totalToRead
for bytesRemaining > 0 {
toRead := chunkSize
if bytesRemaining < chunkSize {
toRead = bytesRemaining
}
n, err := file.Read(buf[:toRead])
if err != nil {
if err == io.EOF {
break
}
return err
}
bytesRemaining -= n
}
return nil
}

View File

@@ -25,11 +25,11 @@ var (
)
type Magnet struct {
Name string `json:"name"`
InfoHash string `json:"infoHash"`
Size int64 `json:"size"`
Link string `json:"link"`
File []byte `json:"-"`
Name string
InfoHash string
Size int64
Link string
File []byte
}
func (m *Magnet) IsTorrent() bool {
@@ -83,6 +83,7 @@ func GetMagnetFromBytes(torrentData []byte) (*Magnet, error) {
if err != nil {
return nil, err
}
log.Println("InfoHash: ", infoHash)
magnet := &Magnet{
InfoHash: infoHash,
Name: info.Name,

View File

@@ -1,16 +1,5 @@
package utils
import (
"fmt"
"io"
"mime"
"net/http"
"net/url"
"path"
"path/filepath"
"strings"
)
func RemoveItem[S ~[]E, E comparable](s S, values ...E) S {
result := make(S, 0, len(s))
outer:
@@ -33,131 +22,3 @@ func Contains(slice []string, value string) bool {
}
return false
}
func GenerateHash(data string) string {
// Simple hash generation using a basic algorithm (for demonstration purposes)
_hash := 0
for _, char := range data {
_hash = (_hash*31 + int(char)) % 1000003 // Simple hash function
}
return string(rune(_hash))
}
func DownloadFile(url string) (string, []byte, error) {
resp, err := http.Get(url)
if err != nil {
return "", nil, fmt.Errorf("failed to download file: %w", err)
}
defer resp.Body.Close()
if resp.StatusCode != http.StatusOK {
return "", nil, fmt.Errorf("failed to download file: status code %d", resp.StatusCode)
}
filename := getFilenameFromResponse(resp, url)
data, err := io.ReadAll(resp.Body)
if err != nil {
return "", nil, fmt.Errorf("failed to read response body: %w", err)
}
return filename, data, nil
}
func getFilenameFromResponse(resp *http.Response, originalURL string) string {
// 1. Try Content-Disposition header
if cd := resp.Header.Get("Content-Disposition"); cd != "" {
if _, params, err := mime.ParseMediaType(cd); err == nil {
if filename := params["filename"]; filename != "" {
return filename
}
}
}
// 2. Try to decode URL-encoded filename from Content-Disposition
if cd := resp.Header.Get("Content-Disposition"); cd != "" {
if strings.Contains(cd, "filename*=") {
// Handle RFC 5987 encoded filenames
parts := strings.Split(cd, "filename*=")
if len(parts) > 1 {
encoded := strings.Trim(parts[1], `"`)
if strings.HasPrefix(encoded, "UTF-8''") {
if decoded, err := url.QueryUnescape(encoded[7:]); err == nil {
return decoded
}
}
}
}
}
// 3. Fall back to URL path
if parsedURL, err := url.Parse(originalURL); err == nil {
if filename := filepath.Base(parsedURL.Path); filename != "." && filename != "/" {
// URL decode the filename
if decoded, err := url.QueryUnescape(filename); err == nil {
return decoded
}
return filename
}
}
// 4. Default filename
return "downloaded_file"
}
func ValidateServiceURL(urlStr string) error {
if urlStr == "" {
return fmt.Errorf("URL cannot be empty")
}
// Try parsing as full URL first
u, err := url.Parse(urlStr)
if err == nil && u.Scheme != "" && u.Host != "" {
// It's a full URL, validate scheme
if u.Scheme != "http" && u.Scheme != "https" {
return fmt.Errorf("URL scheme must be http or https")
}
return nil
}
// Check if it's a host:port format (no scheme)
if strings.Contains(urlStr, ":") && !strings.Contains(urlStr, "://") {
// Try parsing with http:// prefix
testURL := "http://" + urlStr
u, err := url.Parse(testURL)
if err != nil {
return fmt.Errorf("invalid host:port format: %w", err)
}
if u.Host == "" {
return fmt.Errorf("host is required in host:port format")
}
// Validate port number
if u.Port() == "" {
return fmt.Errorf("port is required in host:port format")
}
return nil
}
return fmt.Errorf("invalid URL format: %s", urlStr)
}
func ExtractFilenameFromURL(rawURL string) string {
// Parse the URL
parsedURL, err := url.Parse(rawURL)
if err != nil {
return ""
}
// Get the base filename from path
filename := path.Base(parsedURL.Path)
// Handle edge cases
if filename == "/" || filename == "." || filename == "" {
return ""
}
return filename
}

View File

@@ -40,10 +40,12 @@ func RemoveInvalidChars(value string) string {
}
func RemoveExtension(value string) string {
if loc := mediaRegex.FindStringIndex(value); loc != nil {
loc := mediaRegex.FindStringIndex(value)
if loc != nil {
return value[:loc[0]]
} else {
return value
}
return value
}
func IsMediaFile(path string) bool {
@@ -51,21 +53,8 @@ func IsMediaFile(path string) bool {
}
func IsSampleFile(path string) bool {
filename := filepath.Base(path)
if strings.HasSuffix(strings.ToLower(filename), "sample.mkv") {
if strings.HasSuffix(strings.ToLower(path), "sample.mkv") {
return true
}
return RegexMatch(sampleRegex, path)
}
func IsParFile(path string) bool {
ext := filepath.Ext(path)
return strings.EqualFold(ext, ".par") || strings.EqualFold(ext, ".par2")
}
func IsRarFile(path string) bool {
ext := filepath.Ext(path)
return strings.EqualFold(ext, ".rar") || strings.EqualFold(ext, ".r00") ||
strings.EqualFold(ext, ".r01") || strings.EqualFold(ext, ".r02") ||
strings.EqualFold(ext, ".r03") || strings.EqualFold(ext, ".r04")
}

1624
package-lock.json generated

File diff suppressed because it is too large Load Diff

View File

@@ -1,19 +0,0 @@
{
"name": "decypharr",
"version": "1.0.0",
"description": "Media management tool",
"scripts": {
"build-css": "tailwindcss -i ./pkg/web/assets/styles.css -o ./pkg/web/assets/build/css/styles.css --minify",
"minify-js": "node scripts/minify-js.js",
"download-assets": "node scripts/download-assets.js",
"build": "npm run build-css && npm run minify-js",
"build-all": "npm run download-assets && npm run build",
"dev": "npm run build && air"
},
"devDependencies": {
"tailwindcss": "^3.4.0",
"daisyui": "^4.12.10",
"terser": "^5.24.0",
"clean-css": "^5.3.3"
}
}

View File

@@ -3,7 +3,6 @@ package arr
import (
"bytes"
"context"
"crypto/tls"
"encoding/json"
"fmt"
"github.com/rs/zerolog"
@@ -12,6 +11,7 @@ import (
"github.com/sirrobot01/decypharr/internal/request"
"io"
"net/http"
"strconv"
"strings"
"sync"
"time"
@@ -20,13 +20,6 @@ import (
// Type is a type of arr
type Type string
var sharedClient = &http.Client{
Transport: &http.Transport{
TLSClientConfig: &tls.Config{InsecureSkipVerify: true},
},
Timeout: 60 * time.Second,
}
const (
Sonarr Type = "sonarr"
Radarr Type = "radarr"
@@ -42,11 +35,10 @@ type Arr struct {
Cleanup bool `json:"cleanup"`
SkipRepair bool `json:"skip_repair"`
DownloadUncached *bool `json:"download_uncached"`
SelectedDebrid string `json:"selected_debrid,omitempty"` // The debrid service selected for this arr
Source string `json:"source,omitempty"` // The source of the arr, e.g. "auto", "manual". Auto means it was automatically detected from the arr
client *request.Client
}
func New(name, host, token string, cleanup, skipRepair bool, downloadUncached *bool, selectedDebrid, source string) *Arr {
func New(name, host, token string, cleanup, skipRepair bool, downloadUncached *bool) *Arr {
return &Arr{
Name: name,
Host: host,
@@ -55,8 +47,7 @@ func New(name, host, token string, cleanup, skipRepair bool, downloadUncached *b
Cleanup: cleanup,
SkipRepair: skipRepair,
DownloadUncached: downloadUncached,
SelectedDebrid: selectedDebrid,
Source: source,
client: request.New(),
}
}
@@ -83,11 +74,14 @@ func (a *Arr) Request(method, endpoint string, payload interface{}) (*http.Respo
}
req.Header.Set("Content-Type", "application/json")
req.Header.Set("X-Api-Key", a.Token)
if a.client == nil {
a.client = request.New()
}
var resp *http.Response
for attempts := 0; attempts < 5; attempts++ {
resp, err = sharedClient.Do(req)
resp, err = a.client.Do(req)
if err != nil {
return nil, err
}
@@ -109,16 +103,14 @@ func (a *Arr) Request(method, endpoint string, payload interface{}) (*http.Respo
func (a *Arr) Validate() error {
if a.Token == "" || a.Host == "" {
return fmt.Errorf("arr not configured: %s", a.Name)
return nil
}
resp, err := a.Request("GET", "/api/v3/health", nil)
if err != nil {
return err
}
defer resp.Body.Close()
// If response is not 200 or 404(this is the case for Lidarr, etc), return an error
if resp.StatusCode != http.StatusOK && resp.StatusCode != http.StatusNotFound {
return fmt.Errorf("failed to validate arr %s: %s", a.Name, resp.Status)
if resp.StatusCode != http.StatusOK {
return fmt.Errorf("arr test failed: %s", resp.Status)
}
return nil
}
@@ -129,10 +121,10 @@ type Storage struct {
logger zerolog.Logger
}
func (s *Storage) Cleanup() {
s.mu.Lock()
defer s.mu.Unlock()
s.Arrs = make(map[string]*Arr)
func (as *Storage) Cleanup() {
as.mu.Lock()
defer as.mu.Unlock()
as.Arrs = make(map[string]*Arr)
}
func InferType(host, name string) Type {
@@ -153,11 +145,8 @@ func InferType(host, name string) Type {
func NewStorage() *Storage {
arrs := make(map[string]*Arr)
for _, a := range config.Get().Arrs {
if a.Host == "" || a.Token == "" || a.Name == "" {
continue // Skip if host or token is not set
}
name := a.Name
arrs[name] = New(name, a.Host, a.Token, a.Cleanup, a.SkipRepair, a.DownloadUncached, a.SelectedDebrid, a.Source)
arrs[name] = New(name, a.Host, a.Token, a.Cleanup, a.SkipRepair, a.DownloadUncached)
}
return &Storage{
Arrs: arrs,
@@ -165,38 +154,46 @@ func NewStorage() *Storage {
}
}
func (s *Storage) AddOrUpdate(arr *Arr) {
s.mu.Lock()
defer s.mu.Unlock()
if arr.Host == "" || arr.Token == "" || arr.Name == "" {
func (as *Storage) AddOrUpdate(arr *Arr) {
as.mu.Lock()
defer as.mu.Unlock()
if arr.Name == "" {
return
}
s.Arrs[arr.Name] = arr
as.Arrs[arr.Name] = arr
}
func (s *Storage) Get(name string) *Arr {
s.mu.Lock()
defer s.mu.Unlock()
return s.Arrs[name]
func (as *Storage) Get(name string) *Arr {
as.mu.Lock()
defer as.mu.Unlock()
return as.Arrs[name]
}
func (s *Storage) GetAll() []*Arr {
s.mu.Lock()
defer s.mu.Unlock()
arrs := make([]*Arr, 0, len(s.Arrs))
for _, arr := range s.Arrs {
arrs = append(arrs, arr)
func (as *Storage) GetAll() []*Arr {
as.mu.Lock()
defer as.mu.Unlock()
arrs := make([]*Arr, 0, len(as.Arrs))
for _, arr := range as.Arrs {
if arr.Host != "" && arr.Token != "" {
arrs = append(arrs, arr)
}
}
return arrs
}
func (s *Storage) StartSchedule(ctx context.Context) error {
func (as *Storage) Clear() {
as.mu.Lock()
defer as.mu.Unlock()
as.Arrs = make(map[string]*Arr)
}
func (as *Storage) StartSchedule(ctx context.Context) error {
ticker := time.NewTicker(10 * time.Second)
select {
case <-ticker.C:
s.cleanupArrsQueue()
as.cleanupArrsQueue()
case <-ctx.Done():
ticker.Stop()
return nil
@@ -204,9 +201,9 @@ func (s *Storage) StartSchedule(ctx context.Context) error {
return nil
}
func (s *Storage) cleanupArrsQueue() {
func (as *Storage) cleanupArrsQueue() {
arrs := make([]*Arr, 0)
for _, arr := range s.Arrs {
for _, arr := range as.Arrs {
if !arr.Cleanup {
continue
}
@@ -215,18 +212,26 @@ func (s *Storage) cleanupArrsQueue() {
if len(arrs) > 0 {
for _, arr := range arrs {
if err := arr.CleanupQueue(); err != nil {
s.logger.Error().Err(err).Msgf("Failed to cleanup arr %s", arr.Name)
as.logger.Error().Err(err).Msgf("Failed to cleanup arr %s", arr.Name)
}
}
}
}
func (a *Arr) Refresh() {
func (a *Arr) Refresh() error {
payload := struct {
Name string `json:"name"`
}{
Name: "RefreshMonitoredDownloads",
}
_, _ = a.Request(http.MethodPost, "api/v3/command", payload)
resp, err := a.Request(http.MethodPost, "api/v3/command", payload)
if err == nil && resp != nil {
statusOk := strconv.Itoa(resp.StatusCode)[0] == '2'
if statusOk {
return nil
}
}
return fmt.Errorf("failed to refresh: %v", err)
}

View File

@@ -105,7 +105,6 @@ func (a *Arr) GetMedia(mediaId string) ([]Content, error) {
Id: d.Id,
EpisodeId: eId,
SeasonNumber: file.SeasonNumber,
Size: file.Size,
})
}
if len(files) == 0 {
@@ -149,7 +148,6 @@ func GetMovies(a *Arr, tvId string) ([]Content, error) {
FileId: movie.MovieFile.Id,
Id: movie.Id,
Path: movie.MovieFile.Path,
Size: movie.MovieFile.Size,
})
ct.Files = files
contents = append(contents, ct)

View File

@@ -205,4 +205,5 @@ func (a *Arr) Import(path string, seriesId int, seasons []int) (io.ReadCloser, e
}
defer resp.Body.Close()
return resp.Body, nil
}

View File

@@ -11,7 +11,6 @@ type Movie struct {
RelativePath string `json:"relativePath"`
Path string `json:"path"`
Id int `json:"id"`
Size int64 `json:"size"`
} `json:"movieFile"`
Id int `json:"id"`
}
@@ -26,8 +25,6 @@ type ContentFile struct {
IsSymlink bool `json:"isSymlink"`
IsBroken bool `json:"isBroken"`
SeasonNumber int `json:"seasonNumber"`
Processed bool `json:"processed"`
Size int64 `json:"size"`
}
func (file *ContentFile) Delete() {
@@ -47,5 +44,4 @@ type seriesFile struct {
SeasonNumber int `json:"seasonNumber"`
Path string `json:"path"`
Id int `json:"id"`
Size int64 `json:"size"`
}

View File

@@ -18,26 +18,20 @@ import (
)
type AllDebrid struct {
name string
Host string `json:"host"`
APIKey string
accounts *types.Accounts
autoExpiresLinksAfter time.Duration
DownloadUncached bool
client *request.Client
Name string
Host string `json:"host"`
APIKey string
accounts map[string]types.Account
DownloadUncached bool
client *request.Client
MountPath string
logger zerolog.Logger
checkCached bool
addSamples bool
minimumFreeSlot int
MountPath string
logger zerolog.Logger
checkCached bool
addSamples bool
}
func (ad *AllDebrid) GetProfile() (*types.Profile, error) {
return nil, nil
}
func New(dc config.Debrid) (*AllDebrid, error) {
func New(dc config.Debrid) *AllDebrid {
rl := request.ParseRateLimit(dc.RateLimit)
headers := map[string]string{
@@ -51,31 +45,34 @@ func New(dc config.Debrid) (*AllDebrid, error) {
request.WithProxy(dc.Proxy),
)
autoExpiresLinksAfter, err := time.ParseDuration(dc.AutoExpireLinksAfter)
if autoExpiresLinksAfter == 0 || err != nil {
autoExpiresLinksAfter = 48 * time.Hour
accounts := make(map[string]types.Account)
for idx, key := range dc.DownloadAPIKeys {
id := strconv.Itoa(idx)
accounts[id] = types.Account{
Name: key,
ID: id,
Token: key,
}
}
return &AllDebrid{
name: "alldebrid",
Host: "http://api.alldebrid.com/v4.1",
APIKey: dc.APIKey,
accounts: types.NewAccounts(dc),
DownloadUncached: dc.DownloadUncached,
autoExpiresLinksAfter: autoExpiresLinksAfter,
client: client,
MountPath: dc.Folder,
logger: logger.New(dc.Name),
checkCached: dc.CheckCached,
addSamples: dc.AddSamples,
minimumFreeSlot: dc.MinimumFreeSlot,
}, nil
Name: "alldebrid",
Host: "http://api.alldebrid.com/v4.1",
APIKey: dc.APIKey,
accounts: accounts,
DownloadUncached: dc.DownloadUncached,
client: client,
MountPath: dc.Folder,
logger: logger.New(dc.Name),
checkCached: dc.CheckCached,
addSamples: dc.AddSamples,
}
}
func (ad *AllDebrid) Name() string {
return ad.name
func (ad *AllDebrid) GetName() string {
return ad.Name
}
func (ad *AllDebrid) Logger() zerolog.Logger {
func (ad *AllDebrid) GetLogger() zerolog.Logger {
return ad.logger
}
@@ -189,7 +186,7 @@ func (ad *AllDebrid) GetTorrent(torrentId string) (*types.Torrent, error) {
var res TorrentInfoResponse
err = json.Unmarshal(resp, &res)
if err != nil {
ad.logger.Error().Err(err).Msgf("Error unmarshalling torrent info")
ad.logger.Info().Msgf("Error unmarshalling torrent info: %s", err)
return nil, err
}
data := res.Data.Magnets
@@ -203,7 +200,7 @@ func (ad *AllDebrid) GetTorrent(torrentId string) (*types.Torrent, error) {
OriginalFilename: name,
Files: make(map[string]types.File),
InfoHash: data.Hash,
Debrid: ad.name,
Debrid: ad.Name,
MountPath: ad.MountPath,
Added: time.Unix(data.CompletionDate, 0).Format(time.RFC3339),
}
@@ -231,7 +228,7 @@ func (ad *AllDebrid) UpdateTorrent(t *types.Torrent) error {
var res TorrentInfoResponse
err = json.Unmarshal(resp, &res)
if err != nil {
ad.logger.Error().Err(err).Msgf("Error unmarshalling torrent info")
ad.logger.Info().Msgf("Error unmarshalling torrent info: %s", err)
return err
}
data := res.Data.Magnets
@@ -243,7 +240,7 @@ func (ad *AllDebrid) UpdateTorrent(t *types.Torrent) error {
t.OriginalFilename = name
t.Folder = name
t.MountPath = ad.MountPath
t.Debrid = ad.name
t.Debrid = ad.Name
t.Bytes = data.Size
t.Seeders = data.Seeders
t.Added = time.Unix(data.CompletionDate, 0).Format(time.RFC3339)
@@ -259,7 +256,7 @@ func (ad *AllDebrid) UpdateTorrent(t *types.Torrent) error {
return nil
}
func (ad *AllDebrid) CheckStatus(torrent *types.Torrent) (*types.Torrent, error) {
func (ad *AllDebrid) CheckStatus(torrent *types.Torrent, isSymlink bool) (*types.Torrent, error) {
for {
err := ad.UpdateTorrent(torrent)
@@ -269,7 +266,13 @@ func (ad *AllDebrid) CheckStatus(torrent *types.Torrent) (*types.Torrent, error)
status := torrent.Status
if status == "downloaded" {
ad.logger.Info().Msgf("Torrent: %s downloaded", torrent.Name)
return torrent, nil
if !isSymlink {
err = ad.GenerateDownloadLinks(torrent)
if err != nil {
return torrent, err
}
}
break
} else if utils.Contains(ad.GetDownloadingStatus(), status) {
if !torrent.DownloadUncached {
return torrent, fmt.Errorf("torrent: %s not cached", torrent.Name)
@@ -282,6 +285,7 @@ func (ad *AllDebrid) CheckStatus(torrent *types.Torrent) (*types.Torrent, error)
}
}
return torrent, nil
}
func (ad *AllDebrid) DeleteTorrent(torrentId string) error {
@@ -294,9 +298,8 @@ func (ad *AllDebrid) DeleteTorrent(torrentId string) error {
return nil
}
func (ad *AllDebrid) GetFileDownloadLinks(t *types.Torrent) error {
func (ad *AllDebrid) GenerateDownloadLinks(t *types.Torrent) error {
filesCh := make(chan types.File, len(t.Files))
linksCh := make(chan *types.DownloadLink, len(t.Files))
errCh := make(chan error, len(t.Files))
var wg sync.WaitGroup
@@ -309,19 +312,17 @@ func (ad *AllDebrid) GetFileDownloadLinks(t *types.Torrent) error {
errCh <- err
return
}
if link == nil {
file.DownloadLink = link
if link != nil {
errCh <- fmt.Errorf("download link is empty")
return
}
linksCh <- link
file.DownloadLink = link
filesCh <- file
}(file)
}
go func() {
wg.Wait()
close(filesCh)
close(linksCh)
close(errCh)
}()
files := make(map[string]types.File, len(t.Files))
@@ -329,22 +330,10 @@ func (ad *AllDebrid) GetFileDownloadLinks(t *types.Torrent) error {
files[file.Name] = file
}
// Collect download links
links := make(map[string]*types.DownloadLink, len(t.Files))
for link := range linksCh {
if link == nil {
continue
}
links[link.Link] = link
}
// Update the files with download links
ad.accounts.SetDownloadLinks(links)
// Check for errors
for err := range errCh {
if err != nil {
return err
return err // Return the first error encountered
}
}
@@ -374,18 +363,21 @@ func (ad *AllDebrid) GetDownloadLink(t *types.Torrent, file *types.File) (*types
if link == "" {
return nil, fmt.Errorf("download link is empty")
}
now := time.Now()
return &types.DownloadLink{
Link: file.Link,
DownloadLink: link,
Id: data.Data.Id,
Size: file.Size,
Filename: file.Name,
Generated: now,
ExpiresAt: now.Add(ad.autoExpiresLinksAfter),
Generated: time.Now(),
AccountId: "0",
}, nil
}
func (ad *AllDebrid) GetCheckCached() bool {
return ad.checkCached
}
func (ad *AllDebrid) GetTorrents() ([]*types.Torrent, error) {
url := fmt.Sprintf("%s/magnet/status?status=ready", ad.Host)
req, _ := http.NewRequest(http.MethodGet, url, nil)
@@ -397,7 +389,7 @@ func (ad *AllDebrid) GetTorrents() ([]*types.Torrent, error) {
var res TorrentsListResponse
err = json.Unmarshal(resp, &res)
if err != nil {
ad.logger.Error().Err(err).Msgf("Error unmarshalling torrent info")
ad.logger.Info().Msgf("Error unmarshalling torrent info: %s", err)
return torrents, err
}
for _, magnet := range res.Data.Magnets {
@@ -410,7 +402,7 @@ func (ad *AllDebrid) GetTorrents() ([]*types.Torrent, error) {
OriginalFilename: magnet.Filename,
Files: make(map[string]types.File),
InfoHash: magnet.Hash,
Debrid: ad.name,
Debrid: ad.Name,
MountPath: ad.MountPath,
Added: time.Unix(magnet.CompletionDate, 0).Format(time.RFC3339),
})
@@ -419,7 +411,7 @@ func (ad *AllDebrid) GetTorrents() ([]*types.Torrent, error) {
return torrents, nil
}
func (ad *AllDebrid) GetDownloadLinks() (map[string]*types.DownloadLink, error) {
func (ad *AllDebrid) GetDownloads() (map[string]types.DownloadLink, error) {
return nil, nil
}
@@ -439,16 +431,12 @@ func (ad *AllDebrid) GetMountPath() string {
return ad.MountPath
}
func (ad *AllDebrid) DisableAccount(accountId string) {
}
func (ad *AllDebrid) ResetActiveDownloadKeys() {
}
func (ad *AllDebrid) DeleteDownloadLink(linkId string) error {
return nil
}
func (ad *AllDebrid) GetAvailableSlots() (int, error) {
// This function is a placeholder for AllDebrid
//TODO: Implement the logic to check available slots for AllDebrid
return 0, fmt.Errorf("GetAvailableSlots not implemented for AllDebrid")
}
func (ad *AllDebrid) Accounts() *types.Accounts {
return ad.accounts
}

View File

@@ -1,10 +1,5 @@
package alldebrid
import (
"encoding/json"
"fmt"
)
type errorResponse struct {
Code string `json:"code"`
Message string `json:"message"`
@@ -37,8 +32,6 @@ type magnetInfo struct {
Files []MagnetFile `json:"files"`
}
type Magnets []magnetInfo
type TorrentInfoResponse struct {
Status string `json:"status"`
Data struct {
@@ -50,7 +43,7 @@ type TorrentInfoResponse struct {
type TorrentsListResponse struct {
Status string `json:"status"`
Data struct {
Magnets Magnets `json:"magnets"`
Magnets []magnetInfo `json:"magnets"`
} `json:"data"`
Error *errorResponse `json:"error"`
}
@@ -88,27 +81,3 @@ type DownloadLink struct {
} `json:"data"`
Error *errorResponse `json:"error"`
}
// UnmarshalJSON implements custom unmarshaling for Magnets type
// It can handle both an array of magnetInfo objects or a map with string keys.
// If the input is an array, it will be unmarshaled directly into the Magnets slice.
// If the input is a map, it will extract the values and append them to the Magnets slice.
// If the input is neither, it will return an error.
func (m *Magnets) UnmarshalJSON(data []byte) error {
// Try to unmarshal as array
var arr []magnetInfo
if err := json.Unmarshal(data, &arr); err == nil {
*m = arr
return nil
}
// Try to unmarshal as map
var obj map[string]magnetInfo
if err := json.Unmarshal(data, &obj); err == nil {
for _, v := range obj {
*m = append(*m, v)
}
return nil
}
return fmt.Errorf("magnets: unsupported JSON format")
}

View File

@@ -1,241 +0,0 @@
package debrid
import (
"context"
"errors"
"fmt"
"github.com/sirrobot01/decypharr/internal/config"
"github.com/sirrobot01/decypharr/internal/logger"
"github.com/sirrobot01/decypharr/internal/utils"
"github.com/sirrobot01/decypharr/pkg/arr"
"github.com/sirrobot01/decypharr/pkg/debrid/providers/alldebrid"
"github.com/sirrobot01/decypharr/pkg/debrid/providers/debrid_link"
"github.com/sirrobot01/decypharr/pkg/debrid/providers/realdebrid"
"github.com/sirrobot01/decypharr/pkg/debrid/providers/torbox"
"github.com/sirrobot01/decypharr/pkg/debrid/store"
"github.com/sirrobot01/decypharr/pkg/debrid/types"
"sync"
)
type Debrid struct {
cache *store.Cache // Could be nil if not using WebDAV
client types.Client // HTTP client for making requests to the debrid service
}
func (de *Debrid) Client() types.Client {
return de.client
}
func (de *Debrid) Cache() *store.Cache {
return de.cache
}
type Storage struct {
debrids map[string]*Debrid
mu sync.RWMutex
lastUsed string
}
func NewStorage() *Storage {
cfg := config.Get()
_logger := logger.Default()
debrids := make(map[string]*Debrid)
for _, dc := range cfg.Debrids {
client, err := createDebridClient(dc)
if err != nil {
_logger.Error().Err(err).Str("Debrid", dc.Name).Msg("failed to connect to debrid client")
continue
}
var cache *store.Cache
_log := client.Logger()
if dc.UseWebDav {
cache = store.NewDebridCache(dc, client)
_log.Info().Msg("Debrid Service started with WebDAV")
} else {
_log.Info().Msg("Debrid Service started")
}
debrids[dc.Name] = &Debrid{
cache: cache,
client: client,
}
}
d := &Storage{
debrids: debrids,
lastUsed: "",
}
return d
}
func (d *Storage) Debrid(name string) *Debrid {
d.mu.RLock()
defer d.mu.RUnlock()
if debrid, exists := d.debrids[name]; exists {
return debrid
}
return nil
}
func (d *Storage) Debrids() map[string]*Debrid {
d.mu.RLock()
defer d.mu.RUnlock()
debridsCopy := make(map[string]*Debrid)
for name, debrid := range d.debrids {
if debrid != nil {
debridsCopy[name] = debrid
}
}
return debridsCopy
}
func (d *Storage) Client(name string) types.Client {
d.mu.RLock()
defer d.mu.RUnlock()
if client, exists := d.debrids[name]; exists {
return client.client
}
return nil
}
func (d *Storage) Reset() {
d.mu.Lock()
d.debrids = make(map[string]*Debrid)
d.mu.Unlock()
d.lastUsed = ""
}
func (d *Storage) Clients() map[string]types.Client {
d.mu.RLock()
defer d.mu.RUnlock()
clientsCopy := make(map[string]types.Client)
for name, debrid := range d.debrids {
if debrid != nil && debrid.client != nil {
clientsCopy[name] = debrid.client
}
}
return clientsCopy
}
func (d *Storage) Caches() map[string]*store.Cache {
d.mu.RLock()
defer d.mu.RUnlock()
cachesCopy := make(map[string]*store.Cache)
for name, debrid := range d.debrids {
if debrid != nil && debrid.cache != nil {
cachesCopy[name] = debrid.cache
}
}
return cachesCopy
}
func (d *Storage) FilterClients(filter func(types.Client) bool) map[string]types.Client {
d.mu.Lock()
defer d.mu.Unlock()
filteredClients := make(map[string]types.Client)
for name, client := range d.debrids {
if client != nil && filter(client.client) {
filteredClients[name] = client.client
}
}
return filteredClients
}
func createDebridClient(dc config.Debrid) (types.Client, error) {
switch dc.Name {
case "realdebrid":
return realdebrid.New(dc)
case "torbox":
return torbox.New(dc)
case "debridlink":
return debrid_link.New(dc)
case "alldebrid":
return alldebrid.New(dc)
default:
return realdebrid.New(dc)
}
}
func Process(ctx context.Context, store *Storage, selectedDebrid string, magnet *utils.Magnet, a *arr.Arr, action string, overrideDownloadUncached bool) (*types.Torrent, error) {
debridTorrent := &types.Torrent{
InfoHash: magnet.InfoHash,
Magnet: magnet,
Name: magnet.Name,
Arr: a,
Size: magnet.Size,
Files: make(map[string]types.File),
}
clients := store.FilterClients(func(c types.Client) bool {
if selectedDebrid != "" && c.Name() != selectedDebrid {
return false
}
return true
})
if len(clients) == 0 {
return nil, fmt.Errorf("no debrid clients available")
}
errs := make([]error, 0, len(clients))
// Override first, arr second, debrid third
if overrideDownloadUncached {
debridTorrent.DownloadUncached = true
} else if a.DownloadUncached != nil {
// Arr cached is set
debridTorrent.DownloadUncached = *a.DownloadUncached
} else {
debridTorrent.DownloadUncached = false
}
for index, db := range clients {
_logger := db.Logger()
_logger.Info().
Str("Debrid", db.Name()).
Str("Arr", a.Name).
Str("Hash", debridTorrent.InfoHash).
Str("Name", debridTorrent.Name).
Str("Action", action).
Msg("Processing torrent")
if !overrideDownloadUncached && a.DownloadUncached == nil {
debridTorrent.DownloadUncached = db.GetDownloadUncached()
}
dbt, err := db.SubmitMagnet(debridTorrent)
if err != nil || dbt == nil || dbt.Id == "" {
errs = append(errs, err)
continue
}
dbt.Arr = a
_logger.Info().Str("id", dbt.Id).Msgf("Torrent: %s submitted to %s", dbt.Name, db.Name())
store.lastUsed = index
torrent, err := db.CheckStatus(dbt)
if err != nil && torrent != nil && torrent.Id != "" {
// Delete the torrent if it was not downloaded
go func(id string) {
_ = db.DeleteTorrent(id)
}(torrent.Id)
}
if err != nil {
errs = append(errs, err)
continue
}
if torrent == nil {
errs = append(errs, fmt.Errorf("torrent %s returned nil after checking status", dbt.Name))
continue
}
return torrent, nil
}
if len(errs) == 0 {
return nil, fmt.Errorf("failed to process torrent: no clients available")
}
joinedErrors := errors.Join(errs...)
return nil, fmt.Errorf("failed to process torrent: %w", joinedErrors)
}

View File

@@ -1,4 +1,4 @@
package store
package debrid
import (
"bufio"
@@ -16,16 +16,14 @@ import (
"sync/atomic"
"time"
"github.com/sirrobot01/decypharr/pkg/debrid/types"
"encoding/json"
_ "time/tzdata"
"github.com/go-co-op/gocron/v2"
"github.com/rs/zerolog"
"github.com/sirrobot01/decypharr/internal/config"
"github.com/sirrobot01/decypharr/internal/logger"
"github.com/sirrobot01/decypharr/internal/utils"
"github.com/sirrobot01/decypharr/pkg/debrid/types"
_ "time/tzdata"
)
type WebDavFolderNaming string
@@ -75,6 +73,7 @@ type Cache struct {
logger zerolog.Logger
torrents *torrentCache
downloadLinks *downloadLinkCache
invalidDownloadLinks sync.Map
folderNaming WebDavFolderNaming
@@ -91,9 +90,10 @@ type Cache struct {
ready chan struct{}
// config
workers int
torrentRefreshInterval string
downloadLinksRefreshInterval string
workers int
torrentRefreshInterval string
downloadLinksRefreshInterval string
autoExpiresLinksAfterDuration time.Duration
// refresh mutex
downloadLinksRefreshMu sync.RWMutex // for refreshing download links
@@ -108,16 +108,9 @@ type Cache struct {
customFolders []string
}
func NewDebridCache(dc config.Debrid, client types.Client) *Cache {
func New(dc config.Debrid, client types.Client) *Cache {
cfg := config.Get()
cet, err := time.LoadLocation("CET")
if err != nil {
cet, err = time.LoadLocation("Europe/Berlin") // Fallback to Berlin if CET fails
if err != nil {
cet = time.FixedZone("CET", 1*60*60) // Fallback to a fixed CET zone
}
}
cetSc, err := gocron.NewScheduler(gocron.WithLocation(cet))
cetSc, err := gocron.NewScheduler(gocron.WithLocation(time.UTC))
if err != nil {
// If we can't create a CET scheduler, fallback to local time
cetSc, _ = gocron.NewScheduler(gocron.WithLocation(time.Local))
@@ -128,6 +121,10 @@ func NewDebridCache(dc config.Debrid, client types.Client) *Cache {
scheduler = cetSc
}
autoExpiresLinksAfter, err := time.ParseDuration(dc.AutoExpireLinksAfter)
if autoExpiresLinksAfter == 0 || err != nil {
autoExpiresLinksAfter = 48 * time.Hour
}
var customFolders []string
dirFilters := map[string][]directoryFilter{}
for name, value := range dc.Directories {
@@ -146,20 +143,22 @@ func NewDebridCache(dc config.Debrid, client types.Client) *Cache {
customFolders = append(customFolders, name)
}
_log := logger.New(fmt.Sprintf("%s-webdav", client.Name()))
_log := logger.New(fmt.Sprintf("%s-webdav", client.GetName()))
c := &Cache{
dir: filepath.Join(cfg.Path, "cache", dc.Name), // path to save cache files
torrents: newTorrentCache(dirFilters),
client: client,
logger: _log,
workers: dc.Workers,
torrentRefreshInterval: dc.TorrentsRefreshInterval,
downloadLinksRefreshInterval: dc.DownloadLinksRefreshInterval,
folderNaming: WebDavFolderNaming(dc.FolderNaming),
saveSemaphore: make(chan struct{}, 50),
cetScheduler: cetSc,
scheduler: scheduler,
torrents: newTorrentCache(dirFilters),
client: client,
logger: _log,
workers: dc.Workers,
downloadLinks: newDownloadLinkCache(),
torrentRefreshInterval: dc.TorrentsRefreshInterval,
downloadLinksRefreshInterval: dc.DownloadLinksRefreshInterval,
folderNaming: WebDavFolderNaming(dc.FolderNaming),
autoExpiresLinksAfterDuration: autoExpiresLinksAfter,
saveSemaphore: make(chan struct{}, 50),
cetScheduler: cetSc,
scheduler: scheduler,
config: dc,
customFolders: customFolders,
@@ -203,6 +202,9 @@ func (c *Cache) Reset() {
// 1. Reset torrent storage
c.torrents.reset()
// 2. Reset download-link cache
c.downloadLinks.reset()
// 3. Clear any sync.Maps
c.invalidDownloadLinks = sync.Map{}
c.repairRequest = sync.Map{}
@@ -226,14 +228,9 @@ func (c *Cache) Start(ctx context.Context) error {
return fmt.Errorf("failed to create cache directory: %w", err)
}
c.logger.Info().Msgf("Started indexing...")
if err := c.Sync(ctx); err != nil {
return fmt.Errorf("failed to sync cache: %w", err)
}
// Fire the ready channel
close(c.ready)
c.logger.Info().Msgf("Indexing complete, %d torrents loaded", len(c.torrents.getAll()))
// initial download links
go c.refreshDownloadLinks(ctx)
@@ -242,11 +239,13 @@ func (c *Cache) Start(ctx context.Context) error {
c.logger.Error().Err(err).Msg("Failed to start cache worker")
}
c.repairChan = make(chan RepairRequest, 100) // Initialize the repair channel, max 100 requests buffered
c.repairChan = make(chan RepairRequest, 100)
go c.repairWorker(ctx)
// Fire the ready channel
close(c.ready)
cfg := config.Get()
name := c.client.Name()
name := c.client.GetName()
addr := cfg.BindAddress + ":" + cfg.Port + cfg.URLBase + "webdav/" + name + "/"
c.logger.Info().Msgf("%s WebDav server running at %s", name, addr)
@@ -377,7 +376,7 @@ func (c *Cache) Sync(ctx context.Context) error {
totalTorrents := len(torrents)
c.logger.Info().Msgf("%d torrents found from %s", totalTorrents, c.client.Name())
c.logger.Info().Msgf("%d torrents found from %s", totalTorrents, c.client.GetName())
newTorrents := make([]*types.Torrent, 0)
idStore := make(map[string]struct{}, totalTorrents)
@@ -399,11 +398,9 @@ func (c *Cache) Sync(ctx context.Context) error {
if len(deletedTorrents) > 0 {
c.logger.Info().Msgf("Found %d deleted torrents", len(deletedTorrents))
for _, id := range deletedTorrents {
// Remove from cache and debrid service
delete(cachedTorrents, id)
// Remove the json file from disk
c.removeFile(id, false)
if _, ok := cachedTorrents[id]; ok {
c.deleteTorrent(id, false) // delete from cache
}
}
}
@@ -516,9 +513,9 @@ func (c *Cache) setTorrent(t CachedTorrent, callback func(torrent CachedTorrent)
updatedTorrent.Files = mergedFiles
}
c.torrents.set(torrentName, t, updatedTorrent)
go c.SaveTorrent(t)
c.SaveTorrent(t)
if callback != nil {
go callback(updatedTorrent)
callback(updatedTorrent)
}
}
@@ -561,10 +558,6 @@ func (c *Cache) GetTorrents() map[string]CachedTorrent {
return c.torrents.getAll()
}
func (c *Cache) TotalTorrents() int {
return c.torrents.getAllCount()
}
func (c *Cache) GetTorrentByName(name string) *CachedTorrent {
if torrent, ok := c.torrents.getByName(name); ok {
return &torrent
@@ -572,10 +565,6 @@ func (c *Cache) GetTorrentByName(name string) *CachedTorrent {
return nil
}
func (c *Cache) GetTorrentsName() map[string]CachedTorrent {
return c.torrents.getAllByName()
}
func (c *Cache) GetTorrent(torrentId string) *CachedTorrent {
if torrent, ok := c.torrents.getByID(torrentId); ok {
return &torrent
@@ -684,13 +673,8 @@ func (c *Cache) ProcessTorrent(t *types.Torrent) error {
}
if !isComplete(t.Files) {
c.logger.Debug().
Str("torrent_id", t.Id).
Str("torrent_name", t.Name).
Int("total_files", len(t.Files)).
Msg("Torrent still not complete after refresh")
c.logger.Debug().Msgf("Torrent %s is still not complete. Triggering a reinsert(disabled)", t.Id)
} else {
addedOn, err := time.Parse(time.RFC3339, t.Added)
if err != nil {
addedOn = time.Now()
@@ -707,9 +691,8 @@ func (c *Cache) ProcessTorrent(t *types.Torrent) error {
return nil
}
func (c *Cache) Add(t *types.Torrent) error {
func (c *Cache) AddTorrent(t *types.Torrent) error {
if len(t.Files) == 0 {
c.logger.Warn().Msgf("Torrent %s has no files to add. Refreshing", t.Id)
if err := c.client.UpdateTorrent(t); err != nil {
return fmt.Errorf("failed to update torrent: %w", err)
}
@@ -726,12 +709,12 @@ func (c *Cache) Add(t *types.Torrent) error {
c.setTorrent(ct, func(tor CachedTorrent) {
c.RefreshListings(true)
})
go c.GetFileDownloadLinks(ct)
go c.GenerateDownloadLinks(ct)
return nil
}
func (c *Cache) Client() types.Client {
func (c *Cache) GetClient() types.Client {
return c.client
}
@@ -769,13 +752,13 @@ func (c *Cache) deleteTorrent(id string, removeFromDebrid bool) bool {
if torrent, ok := c.torrents.getByID(id); ok {
c.torrents.removeId(id) // Delete id from cache
defer func() {
c.removeFile(id, false)
c.removeFromDB(id)
if removeFromDebrid {
_ = c.client.DeleteTorrent(id) // Skip error handling, we don't care if it fails
}
}() // defer delete from debrid
torrentName := c.GetTorrentFolder(torrent.Torrent)
torrentName := torrent.Name
if t, ok := c.torrents.getByName(torrentName); ok {
@@ -812,7 +795,7 @@ func (c *Cache) DeleteTorrents(ids []string) {
c.listingDebouncer.Call(true)
}
func (c *Cache) removeFile(torrentId string, moveToTrash bool) {
func (c *Cache) removeFromDB(torrentId string) {
// Moves the torrent file to the trash
filePath := filepath.Join(c.dir, torrentId+".json")
@@ -821,14 +804,6 @@ func (c *Cache) removeFile(torrentId string, moveToTrash bool) {
return
}
if !moveToTrash {
// If not moving to trash, delete the file directly
if err := os.Remove(filePath); err != nil {
c.logger.Error().Err(err).Msgf("Failed to remove file: %s", filePath)
return
}
return
}
// Move the file to the trash
trashPath := filepath.Join(c.dir, "trash", torrentId+".json")
if err := os.MkdirAll(filepath.Dir(trashPath), 0755); err != nil {
@@ -878,6 +853,6 @@ func (c *Cache) RemoveFile(torrentId string, filename string) error {
return nil
}
func (c *Cache) Logger() zerolog.Logger {
func (c *Cache) GetLogger() zerolog.Logger {
return c.logger
}

103
pkg/debrid/debrid/debrid.go Normal file
View File

@@ -0,0 +1,103 @@
package debrid
import (
"fmt"
"github.com/sirrobot01/decypharr/internal/config"
"github.com/sirrobot01/decypharr/internal/utils"
"github.com/sirrobot01/decypharr/pkg/arr"
"github.com/sirrobot01/decypharr/pkg/debrid/alldebrid"
"github.com/sirrobot01/decypharr/pkg/debrid/debrid_link"
"github.com/sirrobot01/decypharr/pkg/debrid/realdebrid"
"github.com/sirrobot01/decypharr/pkg/debrid/torbox"
"github.com/sirrobot01/decypharr/pkg/debrid/types"
"strings"
)
func createDebridClient(dc config.Debrid) types.Client {
switch dc.Name {
case "realdebrid":
return realdebrid.New(dc)
case "torbox":
return torbox.New(dc)
case "debridlink":
return debrid_link.New(dc)
case "alldebrid":
return alldebrid.New(dc)
default:
return realdebrid.New(dc)
}
}
func ProcessTorrent(d *Engine, magnet *utils.Magnet, a *arr.Arr, isSymlink, overrideDownloadUncached bool) (*types.Torrent, error) {
debridTorrent := &types.Torrent{
InfoHash: magnet.InfoHash,
Magnet: magnet,
Name: magnet.Name,
Arr: a,
Size: magnet.Size,
Files: make(map[string]types.File),
}
errs := make([]error, 0, len(d.Clients))
// Override first, arr second, debrid third
if overrideDownloadUncached {
debridTorrent.DownloadUncached = true
} else if a.DownloadUncached != nil {
// Arr cached is set
debridTorrent.DownloadUncached = *a.DownloadUncached
} else {
debridTorrent.DownloadUncached = false
}
for index, db := range d.Clients {
logger := db.GetLogger()
logger.Info().Str("Debrid", db.GetName()).Str("Hash", debridTorrent.InfoHash).Msg("Processing torrent")
if !overrideDownloadUncached && a.DownloadUncached == nil {
debridTorrent.DownloadUncached = db.GetDownloadUncached()
}
//if db.GetCheckCached() {
// hash, exists := db.IsAvailable([]string{debridTorrent.InfoHash})[debridTorrent.InfoHash]
// if !exists || !hash {
// logger.Info().Msgf("Torrent: %s is not cached", debridTorrent.Name)
// continue
// } else {
// logger.Info().Msgf("Torrent: %s is cached(or downloading)", debridTorrent.Name)
// }
//}
dbt, err := db.SubmitMagnet(debridTorrent)
if err != nil || dbt == nil || dbt.Id == "" {
errs = append(errs, err)
continue
}
dbt.Arr = a
logger.Info().Str("id", dbt.Id).Msgf("Torrent: %s submitted to %s", dbt.Name, db.GetName())
d.LastUsed = index
torrent, err := db.CheckStatus(dbt, isSymlink)
if err != nil && torrent != nil && torrent.Id != "" {
// Delete the torrent if it was not downloaded
go func(id string) {
_ = db.DeleteTorrent(id)
}(torrent.Id)
}
return torrent, err
}
if len(errs) == 0 {
return nil, fmt.Errorf("failed to process torrent: no clients available")
}
if len(errs) == 1 {
return nil, fmt.Errorf("failed to process torrent: %w", errs[0])
} else {
errStrings := make([]string, 0, len(errs))
for _, err := range errs {
errStrings = append(errStrings, err.Error())
}
return nil, fmt.Errorf("failed to process torrent: %s", strings.Join(errStrings, ", "))
}
}

View File

@@ -0,0 +1,236 @@
package debrid
import (
"errors"
"fmt"
"github.com/sirrobot01/decypharr/internal/request"
"github.com/sirrobot01/decypharr/pkg/debrid/types"
"sync"
"time"
)
type linkCache struct {
Id string
link string
accountId string
expiresAt time.Time
}
type downloadLinkCache struct {
data map[string]linkCache
mu sync.Mutex
}
func newDownloadLinkCache() *downloadLinkCache {
return &downloadLinkCache{
data: make(map[string]linkCache),
}
}
func (c *downloadLinkCache) reset() {
c.mu.Lock()
c.data = make(map[string]linkCache)
c.mu.Unlock()
}
func (c *downloadLinkCache) Load(key string) (linkCache, bool) {
c.mu.Lock()
defer c.mu.Unlock()
dl, ok := c.data[key]
return dl, ok
}
func (c *downloadLinkCache) Store(key string, value linkCache) {
c.mu.Lock()
defer c.mu.Unlock()
c.data[key] = value
}
func (c *downloadLinkCache) Delete(key string) {
c.mu.Lock()
defer c.mu.Unlock()
delete(c.data, key)
}
type downloadLinkRequest struct {
result string
err error
done chan struct{}
}
func newDownloadLinkRequest() *downloadLinkRequest {
return &downloadLinkRequest{
done: make(chan struct{}),
}
}
func (r *downloadLinkRequest) Complete(result string, err error) {
r.result = result
r.err = err
close(r.done)
}
func (r *downloadLinkRequest) Wait() (string, error) {
<-r.done
return r.result, r.err
}
func (c *Cache) GetDownloadLink(torrentName, filename, fileLink string) (string, error) {
// Check link cache
if dl := c.checkDownloadLink(fileLink); dl != "" {
return dl, nil
}
if req, inFlight := c.downloadLinkRequests.Load(fileLink); inFlight {
// Wait for the other request to complete and use its result
result := req.(*downloadLinkRequest)
return result.Wait()
}
// Create a new request object
req := newDownloadLinkRequest()
c.downloadLinkRequests.Store(fileLink, req)
downloadLink, err := c.fetchDownloadLink(torrentName, filename, fileLink)
// Complete the request and remove it from the map
req.Complete(downloadLink, err)
c.downloadLinkRequests.Delete(fileLink)
return downloadLink, err
}
func (c *Cache) fetchDownloadLink(torrentName, filename, fileLink string) (string, error) {
ct := c.GetTorrentByName(torrentName)
if ct == nil {
return "", fmt.Errorf("torrent not found")
}
file, ok := ct.GetFile(filename)
if !ok {
return "", fmt.Errorf("file %s not found in torrent %s", filename, torrentName)
}
if file.Link == "" {
// file link is empty, refresh the torrent to get restricted links
ct = c.refreshTorrent(file.TorrentId) // Refresh the torrent from the debrid
if ct == nil {
return "", fmt.Errorf("failed to refresh torrent")
} else {
file, ok = ct.GetFile(filename)
if !ok {
return "", fmt.Errorf("file %s not found in refreshed torrent %s", filename, torrentName)
}
}
}
// If file.Link is still empty, return
if file.Link == "" {
// Try to reinsert the torrent?
newCt, err := c.reInsertTorrent(ct)
if err != nil {
return "", fmt.Errorf("failed to reinsert torrent. %w", err)
}
ct = newCt
file, ok = ct.GetFile(filename)
if !ok {
return "", fmt.Errorf("file %s not found in reinserted torrent %s", filename, torrentName)
}
}
c.logger.Trace().Msgf("Getting download link for %s(%s)", filename, file.Link)
downloadLink, err := c.client.GetDownloadLink(ct.Torrent, &file)
if err != nil {
if errors.Is(err, request.HosterUnavailableError) {
newCt, err := c.reInsertTorrent(ct)
if err != nil {
return "", fmt.Errorf("failed to reinsert torrent: %w", err)
}
ct = newCt
file, ok = ct.GetFile(filename)
if !ok {
return "", fmt.Errorf("file %s not found in reinserted torrent %s", filename, torrentName)
}
// Retry getting the download link
downloadLink, err = c.client.GetDownloadLink(ct.Torrent, &file)
if err != nil {
return "", err
}
if downloadLink == nil {
return "", fmt.Errorf("download link is empty for")
}
c.updateDownloadLink(downloadLink)
return "", nil
} else if errors.Is(err, request.TrafficExceededError) {
// This is likely a fair usage limit error
return "", err
} else {
return "", fmt.Errorf("failed to get download link: %w", err)
}
}
if downloadLink == nil {
return "", fmt.Errorf("download link is empty")
}
c.updateDownloadLink(downloadLink)
return downloadLink.DownloadLink, nil
}
func (c *Cache) GenerateDownloadLinks(t CachedTorrent) {
if err := c.client.GenerateDownloadLinks(t.Torrent); err != nil {
c.logger.Error().Err(err).Str("torrent", t.Name).Msg("Failed to generate download links")
return
}
for _, file := range t.GetFiles() {
if file.DownloadLink != nil {
c.updateDownloadLink(file.DownloadLink)
}
}
c.setTorrent(t, nil)
}
func (c *Cache) updateDownloadLink(dl *types.DownloadLink) {
c.downloadLinks.Store(dl.Link, linkCache{
Id: dl.Id,
link: dl.DownloadLink,
expiresAt: time.Now().Add(c.autoExpiresLinksAfterDuration),
accountId: dl.AccountId,
})
}
func (c *Cache) checkDownloadLink(link string) string {
if dl, ok := c.downloadLinks.Load(link); ok {
if dl.expiresAt.After(time.Now()) && !c.IsDownloadLinkInvalid(dl.link) {
return dl.link
}
}
return ""
}
func (c *Cache) MarkDownloadLinkAsInvalid(link, downloadLink, reason string) {
c.invalidDownloadLinks.Store(downloadLink, reason)
// Remove the download api key from active
if reason == "bandwidth_exceeded" {
if dl, ok := c.downloadLinks.Load(link); ok {
if dl.accountId != "" && dl.link == downloadLink {
c.client.DisableAccount(dl.accountId)
}
}
}
c.removeDownloadLink(link)
}
func (c *Cache) removeDownloadLink(link string) {
if dl, ok := c.downloadLinks.Load(link); ok {
// Delete dl from cache
c.downloadLinks.Delete(link)
// Delete dl from debrid
if dl.Id != "" {
_ = c.client.DeleteDownloadLink(dl.Id)
}
}
}
func (c *Cache) IsDownloadLinkInvalid(downloadLink string) bool {
if reason, ok := c.invalidDownloadLinks.Load(downloadLink); ok {
c.logger.Debug().Msgf("Download link %s is invalid: %s", downloadLink, reason)
return true
}
return false
}

View File

@@ -0,0 +1,61 @@
package debrid
import (
"github.com/sirrobot01/decypharr/internal/config"
"github.com/sirrobot01/decypharr/pkg/debrid/types"
"sync"
)
type Engine struct {
Clients map[string]types.Client
clientsMu sync.Mutex
Caches map[string]*Cache
CacheMu sync.Mutex
LastUsed string
}
func NewEngine() *Engine {
cfg := config.Get()
clients := make(map[string]types.Client)
caches := make(map[string]*Cache)
for _, dc := range cfg.Debrids {
client := createDebridClient(dc)
logger := client.GetLogger()
if dc.UseWebDav {
caches[dc.Name] = New(dc, client)
logger.Info().Msg("Debrid Service started with WebDAV")
} else {
logger.Info().Msg("Debrid Service started")
}
clients[dc.Name] = client
}
d := &Engine{
Clients: clients,
LastUsed: "",
Caches: caches,
}
return d
}
func (d *Engine) GetClient(name string) types.Client {
d.clientsMu.Lock()
defer d.clientsMu.Unlock()
return d.Clients[name]
}
func (d *Engine) Reset() {
d.clientsMu.Lock()
d.Clients = make(map[string]types.Client)
d.clientsMu.Unlock()
d.CacheMu.Lock()
d.Caches = make(map[string]*Cache)
d.CacheMu.Unlock()
}
func (d *Engine) GetDebrids() map[string]types.Client {
return d.Clients
}

View File

@@ -1,4 +1,4 @@
package store
package debrid
import (
"github.com/sirrobot01/decypharr/pkg/debrid/types"
@@ -25,18 +25,3 @@ func mergeFiles(torrents ...CachedTorrent) map[string]types.File {
}
return merged
}
func (c *Cache) GetIngests() ([]types.IngestData, error) {
torrents := c.GetTorrents()
debridName := c.client.Name()
var ingests []types.IngestData
for _, torrent := range torrents {
ingests = append(ingests, types.IngestData{
Debrid: debridName,
Name: torrent.Filename,
Hash: torrent.InfoHash,
Size: torrent.Bytes,
})
}
return ingests, nil
}

View File

@@ -1,4 +1,4 @@
package store
package debrid
import (
"context"
@@ -136,67 +136,67 @@ func (c *Cache) refreshRclone() error {
return nil
}
client := http.DefaultClient
client := &http.Client{
Timeout: 10 * time.Second,
Transport: &http.Transport{
MaxIdleConns: 10,
IdleConnTimeout: 30 * time.Second,
DisableCompression: false,
MaxIdleConnsPerHost: 5,
},
}
// Create form data
data := c.buildRcloneRequestData()
if err := c.sendRcloneRequest(client, "vfs/forget", data); err != nil {
c.logger.Error().Err(err).Msg("Failed to send rclone vfs/forget request")
}
if err := c.sendRcloneRequest(client, "vfs/refresh", data); err != nil {
c.logger.Error().Err(err).Msg("Failed to send rclone vfs/refresh request")
}
return nil
}
func (c *Cache) buildRcloneRequestData() string {
cfg := c.config
data := ""
dirs := strings.FieldsFunc(cfg.RcRefreshDirs, func(r rune) bool {
return r == ',' || r == '&'
})
if len(dirs) == 0 {
return "dir=__all__"
}
var data strings.Builder
for index, dir := range dirs {
if dir != "" {
if index == 0 {
data.WriteString("dir=" + dir)
} else {
data.WriteString("&dir" + fmt.Sprint(index+1) + "=" + dir)
data = "dir=__all__"
} else {
for index, dir := range dirs {
if dir != "" {
if index == 0 {
data += "dir=" + dir
} else {
data += "&dir" + fmt.Sprint(index+1) + "=" + dir
}
}
}
}
return data.String()
}
func (c *Cache) sendRcloneRequest(client *http.Client, endpoint, data string) error {
req, err := http.NewRequest("POST", fmt.Sprintf("%s/%s", c.config.RcUrl, endpoint), strings.NewReader(data))
if err != nil {
sendRequest := func(endpoint string) error {
req, err := http.NewRequest("POST", fmt.Sprintf("%s/%s", cfg.RcUrl, endpoint), strings.NewReader(data))
if err != nil {
return err
}
req.Header.Set("Content-Type", "application/x-www-form-urlencoded")
if cfg.RcUser != "" && cfg.RcPass != "" {
req.SetBasicAuth(cfg.RcUser, cfg.RcPass)
}
resp, err := client.Do(req)
if err != nil {
return err
}
defer resp.Body.Close()
if resp.StatusCode != 200 {
body, _ := io.ReadAll(io.LimitReader(resp.Body, 1024))
return fmt.Errorf("failed to perform %s: %s - %s", endpoint, resp.Status, string(body))
}
_, _ = io.Copy(io.Discard, resp.Body)
return nil
}
if err := sendRequest("vfs/forget"); err != nil {
return err
}
if err := sendRequest("vfs/refresh"); err != nil {
return err
}
req.Header.Set("Content-Type", "application/x-www-form-urlencoded")
if c.config.RcUser != "" && c.config.RcPass != "" {
req.SetBasicAuth(c.config.RcUser, c.config.RcPass)
}
resp, err := client.Do(req)
if err != nil {
return err
}
defer resp.Body.Close()
if resp.StatusCode != 200 {
body, _ := io.ReadAll(io.LimitReader(resp.Body, 1024))
return fmt.Errorf("failed to perform %s: %s - %s", endpoint, resp.Status, string(body))
}
_, _ = io.Copy(io.Discard, resp.Body)
return nil
}
@@ -241,14 +241,27 @@ func (c *Cache) refreshDownloadLinks(ctx context.Context) {
}
defer c.downloadLinksRefreshMu.Unlock()
links, err := c.client.GetDownloadLinks()
downloadLinks, err := c.client.GetDownloads()
if err != nil {
c.logger.Error().Err(err).Msg("Failed to get download links")
return
}
for k, v := range downloadLinks {
// if link is generated in the last 24 hours, add it to cache
timeSince := time.Since(v.Generated)
if timeSince < c.autoExpiresLinksAfterDuration {
c.downloadLinks.Store(k, linkCache{
Id: v.Id,
accountId: v.AccountId,
link: v.DownloadLink,
expiresAt: v.Generated.Add(c.autoExpiresLinksAfterDuration - timeSince),
})
} else {
c.downloadLinks.Delete(k)
}
}
c.client.Accounts().SetDownloadLinks(links)
c.logger.Trace().Msgf("Refreshed %d download links", len(downloadLinks))
c.logger.Debug().Msgf("Refreshed download %d links", c.client.Accounts().GetLinksCount())
}

View File

@@ -1,10 +1,10 @@
package store
package debrid
import (
"context"
"errors"
"fmt"
"github.com/sirrobot01/decypharr/internal/config"
"github.com/sirrobot01/decypharr/internal/request"
"github.com/sirrobot01/decypharr/internal/utils"
"github.com/sirrobot01/decypharr/pkg/debrid/types"
"sync"
@@ -61,7 +61,6 @@ func (c *Cache) markAsSuccessfullyReinserted(torrentId string) {
func (c *Cache) GetBrokenFiles(t *CachedTorrent, filenames []string) []string {
files := make(map[string]types.File)
repairStrategy := config.Get().Repair.Strategy
brokenFiles := make([]string, 0)
if len(filenames) > 0 {
for name, f := range t.Files {
@@ -91,69 +90,20 @@ func (c *Cache) GetBrokenFiles(t *CachedTorrent, filenames []string) []string {
}
files = t.Files
var wg sync.WaitGroup
ctx, cancel := context.WithCancel(context.Background())
defer cancel()
// Use a mutex to protect brokenFiles slice and torrent-wide failure flag
var mu sync.Mutex
torrentWideFailed := false
wg.Add(len(files))
for _, f := range files {
go func(f types.File) {
defer wg.Done()
select {
case <-ctx.Done():
return
default:
}
if f.Link == "" {
mu.Lock()
if repairStrategy == config.RepairStrategyPerTorrent {
torrentWideFailed = true
mu.Unlock()
cancel() // Signal all other goroutines to stop
return
} else {
// per_file strategy - only mark this file as broken
// Check if file link is still missing
if f.Link == "" {
brokenFiles = append(brokenFiles, f.Name)
} else {
// Check if file.Link not in the downloadLink Cache
if err := c.client.CheckLink(f.Link); err != nil {
if errors.Is(err, request.HosterUnavailableError) {
brokenFiles = append(brokenFiles, f.Name)
}
mu.Unlock()
return
}
if err := c.client.CheckLink(f.Link); err != nil {
if errors.Is(err, utils.HosterUnavailableError) {
mu.Lock()
if repairStrategy == config.RepairStrategyPerTorrent {
torrentWideFailed = true
mu.Unlock()
cancel() // Signal all other goroutines to stop
return
} else {
// per_file strategy - only mark this file as broken
brokenFiles = append(brokenFiles, f.Name)
}
mu.Unlock()
}
}
}(f)
}
wg.Wait()
// Handle the result based on strategy
if repairStrategy == config.RepairStrategyPerTorrent && torrentWideFailed {
// Mark all files as broken for per_torrent strategy
for _, f := range files {
brokenFiles = append(brokenFiles, f.Name)
}
}
// For per_file strategy, brokenFiles already contains only the broken ones
// Try to reinsert the torrent if it's broken
if len(brokenFiles) > 0 && t.Torrent != nil {
@@ -253,7 +203,7 @@ func (c *Cache) reInsertTorrent(ct *CachedTorrent) (*CachedTorrent, error) {
return ct, fmt.Errorf("failed to submit magnet: empty torrent")
}
newTorrent.DownloadUncached = false // Set to false, avoid re-downloading
newTorrent, err = c.client.CheckStatus(newTorrent)
newTorrent, err = c.client.CheckStatus(newTorrent, true)
if err != nil {
if newTorrent != nil && newTorrent.Id != "" {
// Delete the torrent if it was not downloaded
@@ -301,11 +251,7 @@ func (c *Cache) reInsertTorrent(ct *CachedTorrent) (*CachedTorrent, error) {
return ct, nil
}
func (c *Cache) resetInvalidLinks(ctx context.Context) {
c.logger.Debug().Msgf("Resetting accounts")
func (c *Cache) resetInvalidLinks() {
c.invalidDownloadLinks = sync.Map{}
c.client.Accounts().Reset() // Reset the active download keys
// Refresh the download links
c.refreshDownloadLinks(ctx)
c.client.ResetActiveDownloadKeys() // Reset the active download keys
}

View File

@@ -1,4 +1,4 @@
package store
package debrid
import (
"fmt"
@@ -40,22 +40,13 @@ type directoryFilter struct {
ageThreshold time.Duration // only for last_added
}
type torrents struct {
sync.RWMutex
byID map[string]CachedTorrent
byName map[string]CachedTorrent
}
type folders struct {
sync.RWMutex
listing map[string][]os.FileInfo // folder name to file listing
}
type torrentCache struct {
torrents torrents
mu sync.Mutex
byID map[string]CachedTorrent
byName map[string]CachedTorrent
listing atomic.Value
folders folders
folderListing map[string][]os.FileInfo
folderListingMu sync.RWMutex
directoriesFilters map[string][]directoryFilter
sortNeeded atomic.Bool
}
@@ -71,13 +62,9 @@ type sortableFile struct {
func newTorrentCache(dirFilters map[string][]directoryFilter) *torrentCache {
tc := &torrentCache{
torrents: torrents{
byID: make(map[string]CachedTorrent),
byName: make(map[string]CachedTorrent),
},
folders: folders{
listing: make(map[string][]os.FileInfo),
},
byID: make(map[string]CachedTorrent),
byName: make(map[string]CachedTorrent),
folderListing: make(map[string][]os.FileInfo),
directoriesFilters: dirFilters,
}
@@ -87,42 +74,41 @@ func newTorrentCache(dirFilters map[string][]directoryFilter) *torrentCache {
}
func (tc *torrentCache) reset() {
tc.torrents.Lock()
tc.torrents.byID = make(map[string]CachedTorrent)
tc.torrents.byName = make(map[string]CachedTorrent)
tc.torrents.Unlock()
tc.mu.Lock()
tc.byID = make(map[string]CachedTorrent)
tc.byName = make(map[string]CachedTorrent)
tc.mu.Unlock()
// reset the sorted listing
tc.sortNeeded.Store(false)
tc.listing.Store(make([]os.FileInfo, 0))
// reset any per-folder views
tc.folders.Lock()
tc.folders.listing = make(map[string][]os.FileInfo)
tc.folders.Unlock()
tc.folderListingMu.Lock()
tc.folderListing = make(map[string][]os.FileInfo)
tc.folderListingMu.Unlock()
}
func (tc *torrentCache) getByID(id string) (CachedTorrent, bool) {
tc.torrents.RLock()
defer tc.torrents.RUnlock()
torrent, exists := tc.torrents.byID[id]
tc.mu.Lock()
defer tc.mu.Unlock()
torrent, exists := tc.byID[id]
return torrent, exists
}
func (tc *torrentCache) getByName(name string) (CachedTorrent, bool) {
tc.torrents.RLock()
defer tc.torrents.RUnlock()
torrent, exists := tc.torrents.byName[name]
tc.mu.Lock()
defer tc.mu.Unlock()
torrent, exists := tc.byName[name]
return torrent, exists
}
func (tc *torrentCache) set(name string, torrent, newTorrent CachedTorrent) {
tc.torrents.Lock()
tc.mu.Lock()
// Set the id first
tc.torrents.byName[name] = torrent
tc.torrents.byID[torrent.Id] = torrent // This is the unadulterated torrent
tc.torrents.Unlock()
tc.byID[newTorrent.Id] = torrent // This is the unadulterated torrent
tc.byName[name] = newTorrent // This is likely the modified torrent
tc.mu.Unlock()
tc.sortNeeded.Store(true)
}
@@ -138,12 +124,12 @@ func (tc *torrentCache) getListing() []os.FileInfo {
}
func (tc *torrentCache) getFolderListing(folderName string) []os.FileInfo {
tc.folders.RLock()
defer tc.folders.RUnlock()
tc.folderListingMu.RLock()
defer tc.folderListingMu.RUnlock()
if folderName == "" {
return tc.getListing()
}
if folder, ok := tc.folders.listing[folderName]; ok {
if folder, ok := tc.folderListing[folderName]; ok {
return folder
}
// If folder not found, return empty slice
@@ -152,13 +138,13 @@ func (tc *torrentCache) getFolderListing(folderName string) []os.FileInfo {
func (tc *torrentCache) refreshListing() {
tc.torrents.RLock()
all := make([]sortableFile, 0, len(tc.torrents.byName))
for name, t := range tc.torrents.byName {
tc.mu.Lock()
all := make([]sortableFile, 0, len(tc.byName))
for name, t := range tc.byName {
all = append(all, sortableFile{t.Id, name, t.AddedOn, t.Bytes, t.Bad})
}
tc.sortNeeded.Store(false)
tc.torrents.RUnlock()
tc.mu.Unlock()
sort.Slice(all, func(i, j int) bool {
if all[i].name != all[j].name {
@@ -171,18 +157,17 @@ func (tc *torrentCache) refreshListing() {
wg.Add(1) // for all listing
go func() {
defer wg.Done()
listing := make([]os.FileInfo, len(all))
for i, sf := range all {
listing[i] = &fileInfo{sf.id, sf.name, sf.size, 0755 | os.ModeDir, sf.modTime, true}
}
tc.listing.Store(listing)
}()
wg.Done()
wg.Add(1)
// For __bad__
go func() {
defer wg.Done()
listing := make([]os.FileInfo, 0)
for _, sf := range all {
if sf.bad {
@@ -196,14 +181,15 @@ func (tc *torrentCache) refreshListing() {
})
}
}
tc.folders.Lock()
tc.folderListingMu.Lock()
if len(listing) > 0 {
tc.folders.listing["__bad__"] = listing
tc.folderListing["__bad__"] = listing
} else {
delete(tc.folders.listing, "__bad__")
delete(tc.folderListing, "__bad__")
}
tc.folders.Unlock()
tc.folderListingMu.Unlock()
}()
wg.Done()
now := time.Now()
wg.Add(len(tc.directoriesFilters)) // for each directory filter
@@ -221,13 +207,13 @@ func (tc *torrentCache) refreshListing() {
}
}
tc.folders.Lock()
tc.folderListingMu.Lock()
if len(matched) > 0 {
tc.folders.listing[dir] = matched
tc.folderListing[dir] = matched
} else {
delete(tc.folders.listing, dir)
delete(tc.folderListing, dir)
}
tc.folders.Unlock()
tc.folderListingMu.Unlock()
}(dir, filters)
}
@@ -278,51 +264,35 @@ func (tc *torrentCache) torrentMatchDirectory(filters []directoryFilter, file so
}
func (tc *torrentCache) getAll() map[string]CachedTorrent {
tc.torrents.RLock()
defer tc.torrents.RUnlock()
result := make(map[string]CachedTorrent, len(tc.torrents.byID))
for name, torrent := range tc.torrents.byID {
tc.mu.Lock()
defer tc.mu.Unlock()
result := make(map[string]CachedTorrent)
for name, torrent := range tc.byID {
result[name] = torrent
}
return result
}
func (tc *torrentCache) getAllCount() int {
tc.torrents.RLock()
defer tc.torrents.RUnlock()
return len(tc.torrents.byID)
}
func (tc *torrentCache) getAllByName() map[string]CachedTorrent {
tc.torrents.RLock()
defer tc.torrents.RUnlock()
results := make(map[string]CachedTorrent, len(tc.torrents.byName))
for name, torrent := range tc.torrents.byName {
results[name] = torrent
}
return results
}
func (tc *torrentCache) getIdMaps() map[string]struct{} {
tc.torrents.RLock()
defer tc.torrents.RUnlock()
res := make(map[string]struct{}, len(tc.torrents.byID))
for id := range tc.torrents.byID {
tc.mu.Lock()
defer tc.mu.Unlock()
res := make(map[string]struct{}, len(tc.byID))
for id := range tc.byID {
res[id] = struct{}{}
}
return res
}
func (tc *torrentCache) removeId(id string) {
tc.torrents.Lock()
defer tc.torrents.Unlock()
delete(tc.torrents.byID, id)
tc.mu.Lock()
defer tc.mu.Unlock()
delete(tc.byID, id)
tc.sortNeeded.Store(true)
}
func (tc *torrentCache) remove(name string) {
tc.torrents.Lock()
defer tc.torrents.Unlock()
delete(tc.torrents.byName, name)
tc.mu.Lock()
defer tc.mu.Unlock()
delete(tc.byName, name)
tc.sortNeeded.Store(true)
}

View File

@@ -1,4 +1,4 @@
package store
package debrid
import (
"context"
@@ -45,7 +45,7 @@ func (c *Cache) StartSchedule(ctx context.Context) error {
} else {
// Schedule the job
if _, err := c.cetScheduler.NewJob(jd, gocron.NewTask(func() {
c.resetInvalidLinks(ctx)
c.resetInvalidLinks()
}), gocron.WithContext(ctx)); err != nil {
c.logger.Error().Err(err).Msg("Failed to create link reset job")
} else {

1
pkg/debrid/debrid/xml.go Normal file
View File

@@ -0,0 +1 @@
package debrid

View File

@@ -10,6 +10,7 @@ import (
"github.com/sirrobot01/decypharr/internal/request"
"github.com/sirrobot01/decypharr/internal/utils"
"github.com/sirrobot01/decypharr/pkg/debrid/types"
"strconv"
"time"
"net/http"
@@ -17,64 +18,24 @@ import (
)
type DebridLink struct {
name string
Name string
Host string `json:"host"`
APIKey string
accounts *types.Accounts
accounts map[string]types.Account
DownloadUncached bool
client *request.Client
autoExpiresLinksAfter time.Duration
MountPath string
logger zerolog.Logger
checkCached bool
addSamples bool
}
func New(dc config.Debrid) (*DebridLink, error) {
rl := request.ParseRateLimit(dc.RateLimit)
headers := map[string]string{
"Authorization": fmt.Sprintf("Bearer %s", dc.APIKey),
"Content-Type": "application/json",
}
_log := logger.New(dc.Name)
client := request.New(
request.WithHeaders(headers),
request.WithLogger(_log),
request.WithRateLimiter(rl),
request.WithProxy(dc.Proxy),
)
autoExpiresLinksAfter, err := time.ParseDuration(dc.AutoExpireLinksAfter)
if autoExpiresLinksAfter == 0 || err != nil {
autoExpiresLinksAfter = 48 * time.Hour
}
return &DebridLink{
name: "debridlink",
Host: "https://debrid-link.com/api/v2",
APIKey: dc.APIKey,
accounts: types.NewAccounts(dc),
DownloadUncached: dc.DownloadUncached,
autoExpiresLinksAfter: autoExpiresLinksAfter,
client: client,
MountPath: dc.Folder,
logger: logger.New(dc.Name),
checkCached: dc.CheckCached,
addSamples: dc.AddSamples,
}, nil
func (dl *DebridLink) GetName() string {
return dl.Name
}
func (dl *DebridLink) GetProfile() (*types.Profile, error) {
return nil, nil
}
func (dl *DebridLink) Name() string {
return dl.name
}
func (dl *DebridLink) Logger() zerolog.Logger {
func (dl *DebridLink) GetLogger() zerolog.Logger {
return dl.logger
}
@@ -107,13 +68,13 @@ func (dl *DebridLink) IsAvailable(hashes []string) map[string]bool {
req, _ := http.NewRequest(http.MethodGet, url, nil)
resp, err := dl.client.MakeRequest(req)
if err != nil {
dl.logger.Error().Err(err).Msgf("Error checking availability")
dl.logger.Info().Msgf("Error checking availability: %v", err)
return result
}
var data AvailableResponse
err = json.Unmarshal(resp, &data)
if err != nil {
dl.logger.Error().Err(err).Msgf("Error marshalling availability")
dl.logger.Info().Msgf("Error marshalling availability: %v", err)
return result
}
if data.Value == nil {
@@ -160,7 +121,7 @@ func (dl *DebridLink) GetTorrent(torrentId string) (*types.Torrent, error) {
Filename: name,
OriginalFilename: name,
MountPath: dl.MountPath,
Debrid: dl.name,
Debrid: dl.Name,
Added: time.Unix(t.Created, 0).Format(time.RFC3339),
}
cfg := config.Get()
@@ -174,7 +135,14 @@ func (dl *DebridLink) GetTorrent(torrentId string) (*types.Torrent, error) {
Name: f.Name,
Size: f.Size,
Path: f.Name,
Link: f.DownloadURL,
DownloadLink: &types.DownloadLink{
Filename: f.Name,
Link: f.DownloadURL,
DownloadLink: f.DownloadURL,
Generated: time.Now(),
AccountId: "0",
},
Link: f.DownloadURL,
}
torrent.Files[file.Name] = file
}
@@ -223,8 +191,6 @@ func (dl *DebridLink) UpdateTorrent(t *types.Torrent) error {
t.OriginalFilename = name
t.Added = time.Unix(data.Created, 0).Format(time.RFC3339)
cfg := config.Get()
links := make(map[string]*types.DownloadLink)
now := time.Now()
for _, f := range data.Files {
if !cfg.IsSizeAllowed(f.Size) {
continue
@@ -235,21 +201,17 @@ func (dl *DebridLink) UpdateTorrent(t *types.Torrent) error {
Name: f.Name,
Size: f.Size,
Path: f.Name,
Link: f.DownloadURL,
DownloadLink: &types.DownloadLink{
Filename: f.Name,
Link: f.DownloadURL,
DownloadLink: f.DownloadURL,
Generated: time.Now(),
AccountId: "0",
},
Link: f.DownloadURL,
}
link := &types.DownloadLink{
Filename: f.Name,
Link: f.DownloadURL,
DownloadLink: f.DownloadURL,
Generated: now,
ExpiresAt: now.Add(dl.autoExpiresLinksAfter),
}
links[file.Link] = link
file.DownloadLink = link
t.Files[f.Name] = file
}
dl.accounts.SetDownloadLinks(links)
return nil
}
@@ -284,11 +246,8 @@ func (dl *DebridLink) SubmitMagnet(t *types.Torrent) (*types.Torrent, error) {
t.Filename = name
t.OriginalFilename = name
t.MountPath = dl.MountPath
t.Debrid = dl.name
t.Debrid = dl.Name
t.Added = time.Unix(data.Created, 0).Format(time.RFC3339)
links := make(map[string]*types.DownloadLink)
now := time.Now()
for _, f := range data.Files {
file := types.File{
TorrentId: t.Id,
@@ -297,26 +256,22 @@ func (dl *DebridLink) SubmitMagnet(t *types.Torrent) (*types.Torrent, error) {
Size: f.Size,
Path: f.Name,
Link: f.DownloadURL,
Generated: now,
DownloadLink: &types.DownloadLink{
Filename: f.Name,
Link: f.DownloadURL,
DownloadLink: f.DownloadURL,
Generated: time.Now(),
AccountId: "0",
},
Generated: time.Now(),
}
link := &types.DownloadLink{
Filename: f.Name,
Link: f.DownloadURL,
DownloadLink: f.DownloadURL,
Generated: now,
ExpiresAt: now.Add(dl.autoExpiresLinksAfter),
}
links[file.Link] = link
file.DownloadLink = link
t.Files[f.Name] = file
}
dl.accounts.SetDownloadLinks(links)
return t, nil
}
func (dl *DebridLink) CheckStatus(torrent *types.Torrent) (*types.Torrent, error) {
func (dl *DebridLink) CheckStatus(torrent *types.Torrent, isSymlink bool) (*types.Torrent, error) {
for {
err := dl.UpdateTorrent(torrent)
if err != nil || torrent == nil {
@@ -325,7 +280,11 @@ func (dl *DebridLink) CheckStatus(torrent *types.Torrent) (*types.Torrent, error
status := torrent.Status
if status == "downloaded" {
dl.logger.Info().Msgf("Torrent: %s downloaded", torrent.Name)
return torrent, nil
err = dl.GenerateDownloadLinks(torrent)
if err != nil {
return torrent, err
}
break
} else if utils.Contains(dl.GetDownloadingStatus(), status) {
if !torrent.DownloadUncached {
return torrent, fmt.Errorf("torrent: %s not cached", torrent.Name)
@@ -338,6 +297,7 @@ func (dl *DebridLink) CheckStatus(torrent *types.Torrent) (*types.Torrent, error
}
}
return torrent, nil
}
func (dl *DebridLink) DeleteTorrent(torrentId string) error {
@@ -350,27 +310,69 @@ func (dl *DebridLink) DeleteTorrent(torrentId string) error {
return nil
}
func (dl *DebridLink) GetFileDownloadLinks(t *types.Torrent) error {
func (dl *DebridLink) GenerateDownloadLinks(t *types.Torrent) error {
// Download links are already generated
return nil
}
func (dl *DebridLink) GetDownloadLinks() (map[string]*types.DownloadLink, error) {
func (dl *DebridLink) GetDownloads() (map[string]types.DownloadLink, error) {
return nil, nil
}
func (dl *DebridLink) GetDownloadLink(t *types.Torrent, file *types.File) (*types.DownloadLink, error) {
return dl.accounts.GetDownloadLink(file.Link)
return file.DownloadLink, nil
}
func (dl *DebridLink) GetDownloadingStatus() []string {
return []string{"downloading"}
}
func (dl *DebridLink) GetCheckCached() bool {
return dl.checkCached
}
func (dl *DebridLink) GetDownloadUncached() bool {
return dl.DownloadUncached
}
func New(dc config.Debrid) *DebridLink {
rl := request.ParseRateLimit(dc.RateLimit)
headers := map[string]string{
"Authorization": fmt.Sprintf("Bearer %s", dc.APIKey),
"Content-Type": "application/json",
}
_log := logger.New(dc.Name)
client := request.New(
request.WithHeaders(headers),
request.WithLogger(_log),
request.WithRateLimiter(rl),
request.WithProxy(dc.Proxy),
)
accounts := make(map[string]types.Account)
for idx, key := range dc.DownloadAPIKeys {
id := strconv.Itoa(idx)
accounts[id] = types.Account{
Name: key,
ID: id,
Token: key,
}
}
return &DebridLink{
Name: "debridlink",
Host: "https://debrid-link.com/api/v2",
APIKey: dc.APIKey,
accounts: accounts,
DownloadUncached: dc.DownloadUncached,
client: client,
MountPath: dc.Folder,
logger: logger.New(dc.Name),
checkCached: dc.CheckCached,
addSamples: dc.AddSamples,
}
}
func (dl *DebridLink) GetTorrents() ([]*types.Torrent, error) {
page := 0
perPage := 100
@@ -400,12 +402,11 @@ func (dl *DebridLink) getTorrents(page, perPage int) ([]*types.Torrent, error) {
var res torrentInfo
err = json.Unmarshal(resp, &res)
if err != nil {
dl.logger.Error().Err(err).Msgf("Error unmarshalling torrent info")
dl.logger.Info().Msgf("Error unmarshalling torrent info: %s", err)
return torrents, err
}
data := *res.Value
links := make(map[string]*types.DownloadLink)
if len(data) == 0 {
return torrents, nil
@@ -423,12 +424,11 @@ func (dl *DebridLink) getTorrents(page, perPage int) ([]*types.Torrent, error) {
OriginalFilename: t.Name,
InfoHash: t.HashString,
Files: make(map[string]types.File),
Debrid: dl.name,
Debrid: dl.Name,
MountPath: dl.MountPath,
Added: time.Unix(t.Created, 0).Format(time.RFC3339),
}
cfg := config.Get()
now := time.Now()
for _, f := range t.Files {
if !cfg.IsSizeAllowed(f.Size) {
continue
@@ -439,23 +439,19 @@ func (dl *DebridLink) getTorrents(page, perPage int) ([]*types.Torrent, error) {
Name: f.Name,
Size: f.Size,
Path: f.Name,
Link: f.DownloadURL,
DownloadLink: &types.DownloadLink{
Filename: f.Name,
Link: f.DownloadURL,
DownloadLink: f.DownloadURL,
Generated: time.Now(),
AccountId: "0",
},
Link: f.DownloadURL,
}
link := &types.DownloadLink{
Filename: f.Name,
Link: f.DownloadURL,
DownloadLink: f.DownloadURL,
Generated: now,
ExpiresAt: now.Add(dl.autoExpiresLinksAfter),
}
links[file.Link] = link
file.DownloadLink = link
torrent.Files[f.Name] = file
}
torrents = append(torrents, torrent)
}
dl.accounts.SetDownloadLinks(links)
return torrents, nil
}
@@ -467,15 +463,12 @@ func (dl *DebridLink) GetMountPath() string {
return dl.MountPath
}
func (dl *DebridLink) DisableAccount(accountId string) {
}
func (dl *DebridLink) ResetActiveDownloadKeys() {
}
func (dl *DebridLink) DeleteDownloadLink(linkId string) error {
return nil
}
func (dl *DebridLink) GetAvailableSlots() (int, error) {
//TODO: Implement the logic to check available slots for DebridLink
return 0, fmt.Errorf("GetAvailableSlots not implemented for DebridLink")
}
func (dl *DebridLink) Accounts() *types.Accounts {
return dl.accounts
}

View File

@@ -1 +0,0 @@
package realdebrid

View File

@@ -2,237 +2,129 @@ package realdebrid
import (
"bytes"
"cmp"
"encoding/json"
"errors"
"fmt"
"github.com/sirrobot01/decypharr/pkg/debrid/types"
"io"
"net/http"
gourl "net/url"
"path/filepath"
"strconv"
"strings"
"sync"
"time"
"github.com/rs/zerolog"
"github.com/sirrobot01/decypharr/internal/config"
"github.com/sirrobot01/decypharr/internal/logger"
"github.com/sirrobot01/decypharr/internal/request"
"github.com/sirrobot01/decypharr/internal/utils"
"github.com/sirrobot01/decypharr/pkg/rar"
"github.com/sirrobot01/decypharr/pkg/debrid/types"
"io"
"net/http"
gourl "net/url"
"path/filepath"
"sort"
"strconv"
"strings"
"sync"
"time"
)
type RealDebrid struct {
name string
Name string
Host string `json:"host"`
APIKey string
accounts *types.Accounts
APIKey string
currentDownloadKey string
accounts map[string]types.Account
accountsMutex sync.RWMutex
DownloadUncached bool
client *request.Client
downloadClient *request.Client
repairClient *request.Client
autoExpiresLinksAfter time.Duration
MountPath string
logger zerolog.Logger
UnpackRar bool
rarSemaphore chan struct{}
checkCached bool
addSamples bool
Profile *types.Profile
minimumFreeSlot int // Minimum number of active pots to maintain (used for cached stuffs, etc.)
DownloadUncached bool
client *request.Client
downloadClient *request.Client
MountPath string
logger zerolog.Logger
checkCached bool
addSamples bool
}
func New(dc config.Debrid) (*RealDebrid, error) {
func New(dc config.Debrid) *RealDebrid {
rl := request.ParseRateLimit(dc.RateLimit)
repairRl := request.ParseRateLimit(cmp.Or(dc.RepairRateLimit, dc.RateLimit))
downloadRl := request.ParseRateLimit(cmp.Or(dc.DownloadRateLimit, dc.RateLimit))
headers := map[string]string{
"Authorization": fmt.Sprintf("Bearer %s", dc.APIKey),
}
_log := logger.New(dc.Name)
autoExpiresLinksAfter, err := time.ParseDuration(dc.AutoExpireLinksAfter)
if autoExpiresLinksAfter == 0 || err != nil {
autoExpiresLinksAfter = 48 * time.Hour
accounts := make(map[string]types.Account)
currentDownloadKey := dc.DownloadAPIKeys[0]
for idx, key := range dc.DownloadAPIKeys {
id := strconv.Itoa(idx)
accounts[id] = types.Account{
Name: key,
ID: id,
Token: key,
}
}
r := &RealDebrid{
name: "realdebrid",
Host: "https://api.real-debrid.com/rest/1.0",
APIKey: dc.APIKey,
accounts: types.NewAccounts(dc),
DownloadUncached: dc.DownloadUncached,
autoExpiresLinksAfter: autoExpiresLinksAfter,
UnpackRar: dc.UnpackRar,
downloadHeaders := map[string]string{
"Authorization": fmt.Sprintf("Bearer %s", currentDownloadKey),
}
return &RealDebrid{
Name: "realdebrid",
Host: "https://api.real-debrid.com/rest/1.0",
APIKey: dc.APIKey,
accounts: accounts,
DownloadUncached: dc.DownloadUncached,
client: request.New(
request.WithHeaders(headers),
request.WithRateLimiter(rl),
request.WithLogger(_log),
request.WithMaxRetries(10),
request.WithMaxRetries(5),
request.WithRetryableStatus(429, 502),
request.WithProxy(dc.Proxy),
),
downloadClient: request.New(
request.WithRateLimiter(downloadRl),
request.WithHeaders(downloadHeaders),
request.WithLogger(_log),
request.WithMaxRetries(10),
request.WithRetryableStatus(429, 447, 502),
request.WithProxy(dc.Proxy),
),
repairClient: request.New(
request.WithRateLimiter(repairRl),
request.WithHeaders(headers),
request.WithLogger(_log),
request.WithMaxRetries(4),
request.WithRetryableStatus(429, 502),
request.WithProxy(dc.Proxy),
),
MountPath: dc.Folder,
logger: logger.New(dc.Name),
rarSemaphore: make(chan struct{}, 2),
checkCached: dc.CheckCached,
addSamples: dc.AddSamples,
minimumFreeSlot: dc.MinimumFreeSlot,
}
if _, err := r.GetProfile(); err != nil {
return nil, err
} else {
return r, nil
currentDownloadKey: currentDownloadKey,
MountPath: dc.Folder,
logger: logger.New(dc.Name),
checkCached: dc.CheckCached,
addSamples: dc.AddSamples,
}
}
func (r *RealDebrid) Name() string {
return r.name
func (r *RealDebrid) GetName() string {
return r.Name
}
func (r *RealDebrid) Logger() zerolog.Logger {
func (r *RealDebrid) GetLogger() zerolog.Logger {
return r.logger
}
func (r *RealDebrid) getSelectedFiles(t *types.Torrent, data torrentInfo) (map[string]types.File, error) {
files := make(map[string]types.File)
func getSelectedFiles(t *types.Torrent, data torrentInfo) map[string]types.File {
selectedFiles := make([]types.File, 0)
for _, f := range data.Files {
if f.Selected == 1 {
selectedFiles = append(selectedFiles, types.File{
name := filepath.Base(f.Path)
file := types.File{
TorrentId: t.Id,
Name: filepath.Base(f.Path),
Path: filepath.Base(f.Path),
Name: name,
Path: name,
Size: f.Bytes,
Id: strconv.Itoa(f.ID),
})
}
selectedFiles = append(selectedFiles, file)
}
}
if len(selectedFiles) == 0 {
return files, nil
}
// Handle RARed torrents (single link, multiple files)
if len(data.Links) == 1 && len(selectedFiles) > 1 {
return r.handleRarArchive(t, data, selectedFiles)
}
// Standard case - map files to links
if len(selectedFiles) > len(data.Links) {
r.logger.Warn().Msgf("More files than links available: %d files, %d links for %s", len(selectedFiles), len(data.Links), t.Name)
}
for i, f := range selectedFiles {
if i < len(data.Links) {
f.Link = data.Links[i]
files[f.Name] = f
} else {
r.logger.Warn().Str("file", f.Name).Msg("No link available for file")
}
}
return files, nil
}
// handleRarArchive processes RAR archives with multiple files
func (r *RealDebrid) handleRarArchive(t *types.Torrent, data torrentInfo, selectedFiles []types.File) (map[string]types.File, error) {
// This will block if 2 RAR operations are already in progress
r.rarSemaphore <- struct{}{}
defer func() {
<-r.rarSemaphore
}()
files := make(map[string]types.File)
if !r.UnpackRar {
r.logger.Debug().Msgf("RAR file detected, but unpacking is disabled: %s", t.Name)
// Create a single file representing the RAR archive
file := types.File{
TorrentId: t.Id,
Id: "0",
Name: t.Name + ".rar",
Size: 0,
IsRar: true,
ByteRange: nil,
Path: t.Name + ".rar",
Link: data.Links[0],
Generated: time.Now(),
for index, f := range selectedFiles {
if index >= len(data.Links) {
break
}
files[file.Name] = file
return files, nil
f.Link = data.Links[index]
files[f.Name] = f
}
r.logger.Info().Msgf("RAR file detected, unpacking: %s", t.Name)
linkFile := &types.File{TorrentId: t.Id, Link: data.Links[0]}
downloadLinkObj, err := r.GetDownloadLink(t, linkFile)
if err != nil {
return nil, fmt.Errorf("failed to get download link for RAR file: %w", err)
}
dlLink := downloadLinkObj.DownloadLink
reader, err := rar.NewReader(dlLink)
if err != nil {
return nil, fmt.Errorf("failed to create RAR reader: %w", err)
}
rarFiles, err := reader.GetFiles()
if err != nil {
return nil, fmt.Errorf("failed to read RAR files: %w", err)
}
// Create lookup map for faster matching
fileMap := make(map[string]*types.File)
for i := range selectedFiles {
// RD converts special chars to '_' for RAR file paths
// @TODO: there might be more special chars to replace
safeName := strings.NewReplacer("|", "_", "\"", "_", "\\", "_", "?", "_", "*", "_", ":", "_", "<", "_", ">", "_").Replace(selectedFiles[i].Name)
fileMap[safeName] = &selectedFiles[i]
}
now := time.Now()
for _, rarFile := range rarFiles {
if file, exists := fileMap[rarFile.Name()]; exists {
file.IsRar = true
file.ByteRange = rarFile.ByteRange()
file.Link = data.Links[0]
file.Generated = now
files[file.Name] = *file
} else if !rarFile.IsDirectory {
r.logger.Warn().Msgf("RAR file %s not found in torrent files", rarFile.Name())
}
}
return files, nil
return files
}
// getTorrentFiles returns a list of torrent files from the torrent info
@@ -299,13 +191,13 @@ func (r *RealDebrid) IsAvailable(hashes []string) map[string]bool {
req, _ := http.NewRequest(http.MethodGet, url, nil)
resp, err := r.client.MakeRequest(req)
if err != nil {
r.logger.Error().Err(err).Msgf("Error checking availability")
r.logger.Info().Msgf("Error checking availability: %v", err)
return result
}
var data AvailabilityResponse
err = json.Unmarshal(resp, &data)
if err != nil {
r.logger.Error().Err(err).Msgf("Error marshalling availability")
r.logger.Info().Msgf("Error marshalling availability: %v", err)
return result
}
for _, h := range hashes[i:end] {
@@ -334,30 +226,15 @@ func (r *RealDebrid) addTorrent(t *types.Torrent) (*types.Torrent, error) {
return nil, err
}
req.Header.Add("Content-Type", "application/x-bittorrent")
resp, err := r.client.Do(req)
resp, err := r.client.MakeRequest(req)
if err != nil {
return nil, err
}
if resp.StatusCode != http.StatusOK && resp.StatusCode != http.StatusCreated {
// Handle multiple_downloads
if resp.StatusCode == 509 {
return nil, utils.TooManyActiveDownloadsError
}
bodyBytes, _ := io.ReadAll(resp.Body)
return nil, fmt.Errorf("realdebrid API error: Status: %d || Body: %s", resp.StatusCode, string(bodyBytes))
}
defer resp.Body.Close()
bodyBytes, err := io.ReadAll(resp.Body)
if err != nil {
return nil, fmt.Errorf("reading response body: %w", err)
}
if err = json.Unmarshal(bodyBytes, &data); err != nil {
if err = json.Unmarshal(resp, &data); err != nil {
return nil, err
}
t.Id = data.Id
t.Debrid = r.name
t.Debrid = r.Name
t.MountPath = r.MountPath
return t, nil
}
@@ -369,30 +246,15 @@ func (r *RealDebrid) addMagnet(t *types.Torrent) (*types.Torrent, error) {
}
var data AddMagnetSchema
req, _ := http.NewRequest(http.MethodPost, url, strings.NewReader(payload.Encode()))
resp, err := r.client.Do(req)
resp, err := r.client.MakeRequest(req)
if err != nil {
return nil, err
}
if resp.StatusCode != http.StatusOK && resp.StatusCode != http.StatusCreated {
// Handle multiple_downloads
if resp.StatusCode == 509 {
return nil, utils.TooManyActiveDownloadsError
}
bodyBytes, _ := io.ReadAll(resp.Body)
return nil, fmt.Errorf("realdebrid API error: Status: %d || Body: %s", resp.StatusCode, string(bodyBytes))
}
defer resp.Body.Close()
bodyBytes, err := io.ReadAll(resp.Body)
if err != nil {
return nil, fmt.Errorf("reading response body: %w", err)
}
if err = json.Unmarshal(bodyBytes, &data); err != nil {
if err = json.Unmarshal(resp, &data); err != nil {
return nil, err
}
t.Id = data.Id
t.Debrid = r.name
t.Debrid = r.Name
t.MountPath = r.MountPath
return t, nil
}
@@ -411,7 +273,7 @@ func (r *RealDebrid) GetTorrent(torrentId string) (*types.Torrent, error) {
}
if resp.StatusCode != http.StatusOK {
if resp.StatusCode == http.StatusNotFound {
return nil, utils.TorrentNotFoundError
return nil, request.TorrentNotFoundError
}
return nil, fmt.Errorf("realdebrid API error: Status: %d || Body: %s", resp.StatusCode, string(bodyBytes))
}
@@ -433,7 +295,7 @@ func (r *RealDebrid) GetTorrent(torrentId string) (*types.Torrent, error) {
Filename: data.Filename,
OriginalFilename: data.OriginalFilename,
Links: data.Links,
Debrid: r.name,
Debrid: r.Name,
MountPath: r.MountPath,
}
t.Files = r.getTorrentFiles(t, data) // Get selected files
@@ -454,7 +316,7 @@ func (r *RealDebrid) UpdateTorrent(t *types.Torrent) error {
}
if resp.StatusCode != http.StatusOK {
if resp.StatusCode == http.StatusNotFound {
return utils.TorrentNotFoundError
return request.TorrentNotFoundError
}
return fmt.Errorf("realdebrid API error: Status: %d || Body: %s", resp.StatusCode, string(bodyBytes))
}
@@ -474,14 +336,13 @@ func (r *RealDebrid) UpdateTorrent(t *types.Torrent) error {
t.OriginalFilename = data.OriginalFilename
t.Links = data.Links
t.MountPath = r.MountPath
t.Debrid = r.name
t.Debrid = r.Name
t.Added = data.Added
t.Files, _ = r.getSelectedFiles(t, data) // Get selected files
t.Files = getSelectedFiles(t, data) // Get selected files
return nil
}
func (r *RealDebrid) CheckStatus(t *types.Torrent) (*types.Torrent, error) {
func (r *RealDebrid) CheckStatus(t *types.Torrent, isSymlink bool) (*types.Torrent, error) {
url := fmt.Sprintf("%s/torrents/info/%s", r.Host, t.Id)
req, _ := http.NewRequest(http.MethodGet, url, nil)
for {
@@ -505,7 +366,7 @@ func (r *RealDebrid) CheckStatus(t *types.Torrent) (*types.Torrent, error) {
t.Seeders = data.Seeders
t.Links = data.Links
t.Status = status
t.Debrid = r.name
t.Debrid = r.Name
t.MountPath = r.MountPath
if status == "waiting_files_selection" {
t.Files = r.getTorrentFiles(t, data)
@@ -526,19 +387,18 @@ func (r *RealDebrid) CheckStatus(t *types.Torrent) (*types.Torrent, error) {
return t, err
}
if res.StatusCode != http.StatusNoContent {
if res.StatusCode == 509 {
return nil, utils.TooManyActiveDownloadsError
}
return t, fmt.Errorf("realdebrid API error: Status: %d", res.StatusCode)
}
} else if status == "downloaded" {
t.Files, err = r.getSelectedFiles(t, data) // Get selected files
if err != nil {
return t, err
}
t.Files = getSelectedFiles(t, data) // Get selected files
r.logger.Info().Msgf("Torrent: %s downloaded to RD", t.Name)
return t, nil
if !isSymlink {
err = r.GenerateDownloadLinks(t)
if err != nil {
return t, err
}
}
break
} else if utils.Contains(r.GetDownloadingStatus(), status) {
if !t.DownloadUncached {
return t, fmt.Errorf("torrent: %s not cached", t.Name)
@@ -549,6 +409,7 @@ func (r *RealDebrid) CheckStatus(t *types.Torrent) (*types.Torrent, error) {
}
}
return t, nil
}
func (r *RealDebrid) DeleteTorrent(torrentId string) error {
@@ -561,56 +422,46 @@ func (r *RealDebrid) DeleteTorrent(torrentId string) error {
return nil
}
func (r *RealDebrid) GetFileDownloadLinks(t *types.Torrent) error {
func (r *RealDebrid) GenerateDownloadLinks(t *types.Torrent) error {
filesCh := make(chan types.File, len(t.Files))
errCh := make(chan error, len(t.Files))
var wg sync.WaitGroup
var mu sync.Mutex
var firstErr error
files := make(map[string]types.File)
links := make(map[string]*types.DownloadLink)
_files := t.GetFiles()
wg.Add(len(_files))
for _, f := range _files {
wg.Add(len(t.Files))
for _, f := range t.Files {
go func(file types.File) {
defer wg.Done()
link, err := r.GetDownloadLink(t, &file)
if err != nil {
mu.Lock()
if firstErr == nil {
firstErr = err
}
mu.Unlock()
return
}
if link == nil {
mu.Lock()
if firstErr == nil {
firstErr = fmt.Errorf("realdebrid API error: download link not found for file %s", file.Name)
}
mu.Unlock()
errCh <- err
return
}
file.DownloadLink = link
mu.Lock()
files[file.Name] = file
links[link.Link] = link
mu.Unlock()
filesCh <- file
}(f)
}
wg.Wait()
go func() {
wg.Wait()
close(filesCh)
close(errCh)
}()
if firstErr != nil {
return firstErr
// Collect results
files := make(map[string]types.File, len(t.Files))
for file := range filesCh {
files[file.Name] = file
}
// Check for errors
for err := range errCh {
if err != nil {
return err // Return the first error encountered
}
}
// Add links to cache
r.accounts.SetDownloadLinks(links)
t.Files = files
return nil
}
@@ -621,24 +472,20 @@ func (r *RealDebrid) CheckLink(link string) error {
"link": {link},
}
req, _ := http.NewRequest(http.MethodPost, url, strings.NewReader(payload.Encode()))
resp, err := r.repairClient.Do(req)
resp, err := r.client.Do(req)
if err != nil {
return err
}
if resp.StatusCode == http.StatusNotFound {
return utils.HosterUnavailableError // File has been removed
return request.HosterUnavailableError // File has been removed
}
return nil
}
func (r *RealDebrid) _getDownloadLink(file *types.File) (*types.DownloadLink, error) {
url := fmt.Sprintf("%s/unrestrict/link/", r.Host)
_link := file.Link
if strings.HasPrefix(file.Link, "https://real-debrid.com/d/") && len(file.Link) > 39 {
_link = file.Link[0:39]
}
payload := gourl.Values{
"link": {_link},
"link": {file.Link},
}
req, _ := http.NewRequest(http.MethodPost, url, strings.NewReader(payload.Encode()))
resp, err := r.downloadClient.Do(req)
@@ -659,17 +506,17 @@ func (r *RealDebrid) _getDownloadLink(file *types.File) (*types.DownloadLink, er
}
switch data.ErrorCode {
case 19:
return nil, utils.HosterUnavailableError // File has been removed
return nil, request.HosterUnavailableError // File has been removed
case 23:
return nil, utils.TrafficExceededError
return nil, request.TrafficExceededError
case 24:
return nil, utils.HosterUnavailableError // Link has been nerfed
return nil, request.HosterUnavailableError // Link has been nerfed
case 34:
return nil, utils.TrafficExceededError // traffic exceeded
return nil, request.TrafficExceededError // traffic exceeded
case 35:
return nil, utils.HosterUnavailableError
return nil, request.HosterUnavailableError
case 36:
return nil, utils.TrafficExceededError // traffic exceeded
return nil, request.TrafficExceededError // traffic exceeded
default:
return nil, fmt.Errorf("realdebrid API error: Status: %d || Code: %d", resp.StatusCode, data.ErrorCode)
}
@@ -685,54 +532,58 @@ func (r *RealDebrid) _getDownloadLink(file *types.File) (*types.DownloadLink, er
if data.Download == "" {
return nil, fmt.Errorf("realdebrid API error: download link not found")
}
now := time.Now()
return &types.DownloadLink{
Filename: data.Filename,
Size: data.Filesize,
Link: data.Link,
DownloadLink: data.Download,
Generated: now,
ExpiresAt: now.Add(r.autoExpiresLinksAfter),
Generated: time.Now(),
}, nil
}
func (r *RealDebrid) GetDownloadLink(t *types.Torrent, file *types.File) (*types.DownloadLink, error) {
accounts := r.accounts.All()
for _, account := range accounts {
r.downloadClient.SetHeader("Authorization", fmt.Sprintf("Bearer %s", account.Token))
downloadLink, err := r._getDownloadLink(file)
if err == nil {
return downloadLink, nil
if r.currentDownloadKey == "" {
// If no download key is set, use the first one
accounts := r.getActiveAccounts()
if len(accounts) < 1 {
// No active download keys. It's likely that the key has reached bandwidth limit
return nil, fmt.Errorf("no active download keys")
}
r.currentDownloadKey = accounts[0].Token
}
retries := 0
if errors.Is(err, utils.TrafficExceededError) {
r.downloadClient.SetHeader("Authorization", fmt.Sprintf("Bearer %s", r.currentDownloadKey))
downloadLink, err := r._getDownloadLink(file)
retries := 0
if err != nil {
if errors.Is(err, request.TrafficExceededError) {
// Retries generating
retries = 5
} else {
// If the error is not traffic exceeded, return the error
return nil, err
}
backOff := 1 * time.Second
for retries > 0 {
downloadLink, err = r._getDownloadLink(file)
if err == nil {
return downloadLink, nil
}
if !errors.Is(err, utils.TrafficExceededError) {
return nil, err
}
// Add a delay before retrying
time.Sleep(backOff)
backOff *= 2 // Exponential backoff
retries--
}
}
return nil, fmt.Errorf("realdebrid API error: download link not found")
backOff := 1 * time.Second
for retries > 0 {
downloadLink, err = r._getDownloadLink(file)
if err == nil {
return downloadLink, nil
}
if !errors.Is(err, request.TrafficExceededError) {
return nil, err
}
// Add a delay before retrying
time.Sleep(backOff)
backOff *= 2 // Exponential backoff
}
return downloadLink, nil
}
func (r *RealDebrid) GetCheckCached() bool {
return r.checkCached
}
func (r *RealDebrid) getTorrents(offset int, limit int) (int, []*types.Torrent, error) {
@@ -783,7 +634,7 @@ func (r *RealDebrid) getTorrents(offset int, limit int) (int, []*types.Torrent,
Links: t.Links,
Files: make(map[string]types.File),
InfoHash: t.Hash,
Debrid: r.name,
Debrid: r.Name,
MountPath: r.MountPath,
Added: t.Added.Format(time.RFC3339),
})
@@ -821,19 +672,18 @@ func (r *RealDebrid) GetTorrents() ([]*types.Torrent, error) {
return allTorrents, nil
}
func (r *RealDebrid) GetDownloadLinks() (map[string]*types.DownloadLink, error) {
links := make(map[string]*types.DownloadLink)
func (r *RealDebrid) GetDownloads() (map[string]types.DownloadLink, error) {
links := make(map[string]types.DownloadLink)
offset := 0
limit := 1000
accounts := r.accounts.All()
accounts := r.getActiveAccounts()
if len(accounts) < 1 {
// No active download keys. It's likely that the key has reached bandwidth limit
return links, fmt.Errorf("no active download keys")
return nil, fmt.Errorf("no active download keys")
}
activeAccount := accounts[0]
r.downloadClient.SetHeader("Authorization", fmt.Sprintf("Bearer %s", activeAccount.Token))
r.downloadClient.SetHeader("Authorization", fmt.Sprintf("Bearer %s", accounts[0].Token))
for {
dl, err := r._getDownloads(offset, limit)
if err != nil {
@@ -848,12 +698,11 @@ func (r *RealDebrid) GetDownloadLinks() (map[string]*types.DownloadLink, error)
// This is ordered by date, so we can skip the rest
continue
}
links[d.Link] = &d
links[d.Link] = d
}
offset += len(dl)
}
return links, nil
}
@@ -879,7 +728,6 @@ func (r *RealDebrid) _getDownloads(offset int, limit int) ([]types.DownloadLink,
Link: d.Link,
DownloadLink: d.Download,
Generated: d.Generated,
ExpiresAt: d.Generated.Add(r.autoExpiresLinksAfter),
Id: d.Id,
})
@@ -899,6 +747,49 @@ func (r *RealDebrid) GetMountPath() string {
return r.MountPath
}
func (r *RealDebrid) DisableAccount(accountId string) {
r.accountsMutex.Lock()
defer r.accountsMutex.Unlock()
if len(r.accounts) == 1 {
r.logger.Info().Msgf("Cannot disable last account: %s", accountId)
return
}
r.currentDownloadKey = ""
if value, ok := r.accounts[accountId]; ok {
value.Disabled = true
r.accounts[accountId] = value
r.logger.Info().Msgf("Disabled account Index: %s", value.ID)
}
}
func (r *RealDebrid) ResetActiveDownloadKeys() {
r.accountsMutex.Lock()
defer r.accountsMutex.Unlock()
for key, value := range r.accounts {
value.Disabled = false
r.accounts[key] = value
}
}
func (r *RealDebrid) getActiveAccounts() []types.Account {
r.accountsMutex.RLock()
defer r.accountsMutex.RUnlock()
accounts := make([]types.Account, 0)
for _, value := range r.accounts {
if value.Disabled {
continue
}
accounts = append(accounts, value)
}
// Sort accounts by ID
sort.Slice(accounts, func(i, j int) bool {
return accounts[i].ID < accounts[j].ID
})
return accounts
}
func (r *RealDebrid) DeleteDownloadLink(linkId string) error {
url := fmt.Sprintf("%s/downloads/delete/%s", r.Host, linkId)
req, _ := http.NewRequest(http.MethodDelete, url, nil)
@@ -907,49 +798,3 @@ func (r *RealDebrid) DeleteDownloadLink(linkId string) error {
}
return nil
}
func (r *RealDebrid) GetProfile() (*types.Profile, error) {
if r.Profile != nil {
return r.Profile, nil
}
url := fmt.Sprintf("%s/user", r.Host)
req, _ := http.NewRequest(http.MethodGet, url, nil)
resp, err := r.client.MakeRequest(req)
if err != nil {
return nil, err
}
var data profileResponse
if json.Unmarshal(resp, &data) != nil {
return nil, err
}
profile := &types.Profile{
Id: data.Id,
Username: data.Username,
Email: data.Email,
Points: data.Points,
Premium: data.Premium,
Expiration: data.Expiration,
Type: data.Type,
}
r.Profile = profile
return profile, nil
}
func (r *RealDebrid) GetAvailableSlots() (int, error) {
url := fmt.Sprintf("%s/torrents/activeCount", r.Host)
req, _ := http.NewRequest(http.MethodGet, url, nil)
resp, err := r.client.MakeRequest(req)
if err != nil {
return 0, nil
}
var data AvailableSlotsResponse
if json.Unmarshal(resp, &data) != nil {
return 0, fmt.Errorf("error unmarshalling available slots response: %w", err)
}
return data.TotalSlots - data.ActiveSlots - r.minimumFreeSlot, nil // Ensure we maintain minimum active pots
}
func (r *RealDebrid) Accounts() *types.Accounts {
return r.accounts
}

View File

@@ -139,20 +139,3 @@ type ErrorResponse struct {
Error string `json:"error"`
ErrorCode int `json:"error_code"`
}
type profileResponse struct {
Id int64 `json:"id"`
Username string `json:"username"`
Email string `json:"email"`
Points int64 `json:"points"`
Locale string `json:"locale"`
Avatar string `json:"avatar"`
Type string `json:"type"`
Premium int `json:"premium"`
Expiration time.Time `json:"expiration"`
}
type AvailableSlotsResponse struct {
ActiveSlots int `json:"nb"`
TotalSlots int `json:"limit"`
}

View File

@@ -1,200 +0,0 @@
package store
import (
"errors"
"fmt"
"github.com/sirrobot01/decypharr/internal/utils"
"github.com/sirrobot01/decypharr/pkg/debrid/types"
)
type downloadLinkRequest struct {
result string
err error
done chan struct{}
}
func newDownloadLinkRequest() *downloadLinkRequest {
return &downloadLinkRequest{
done: make(chan struct{}),
}
}
func (r *downloadLinkRequest) Complete(result string, err error) {
r.result = result
r.err = err
close(r.done)
}
func (r *downloadLinkRequest) Wait() (string, error) {
<-r.done
return r.result, r.err
}
func (c *Cache) GetDownloadLink(torrentName, filename, fileLink string) (string, error) {
// Check link cache
if dl, err := c.checkDownloadLink(fileLink); dl != "" && err == nil {
return dl, nil
}
if req, inFlight := c.downloadLinkRequests.Load(fileLink); inFlight {
// Wait for the other request to complete and use its result
result := req.(*downloadLinkRequest)
return result.Wait()
}
// Create a new request object
req := newDownloadLinkRequest()
c.downloadLinkRequests.Store(fileLink, req)
dl, err := c.fetchDownloadLink(torrentName, filename, fileLink)
if err != nil {
req.Complete("", err)
c.downloadLinkRequests.Delete(fileLink)
return "", err
}
if dl == nil || dl.DownloadLink == "" {
err = fmt.Errorf("download link is empty for %s in torrent %s", filename, torrentName)
req.Complete("", err)
c.downloadLinkRequests.Delete(fileLink)
return "", err
}
req.Complete(dl.DownloadLink, err)
c.downloadLinkRequests.Delete(fileLink)
return dl.DownloadLink, err
}
func (c *Cache) fetchDownloadLink(torrentName, filename, fileLink string) (*types.DownloadLink, error) {
ct := c.GetTorrentByName(torrentName)
if ct == nil {
return nil, fmt.Errorf("torrent not found")
}
file, ok := ct.GetFile(filename)
if !ok {
return nil, fmt.Errorf("file %s not found in torrent %s", filename, torrentName)
}
if file.Link == "" {
// file link is empty, refresh the torrent to get restricted links
ct = c.refreshTorrent(file.TorrentId) // Refresh the torrent from the debrid
if ct == nil {
return nil, fmt.Errorf("failed to refresh torrent")
} else {
file, ok = ct.GetFile(filename)
if !ok {
return nil, fmt.Errorf("file %s not found in refreshed torrent %s", filename, torrentName)
}
}
}
// If file.Link is still empty, return
if file.Link == "" {
// Try to reinsert the torrent?
newCt, err := c.reInsertTorrent(ct)
if err != nil {
return nil, fmt.Errorf("failed to reinsert torrent. %w", err)
}
ct = newCt
file, ok = ct.GetFile(filename)
if !ok {
return nil, fmt.Errorf("file %s not found in reinserted torrent %s", filename, torrentName)
}
}
c.logger.Trace().Msgf("Getting download link for %s(%s)", filename, file.Link)
downloadLink, err := c.client.GetDownloadLink(ct.Torrent, &file)
if err != nil {
if errors.Is(err, utils.HosterUnavailableError) {
c.logger.Trace().
Str("filename", filename).
Str("torrent_id", ct.Id).
Msg("Hoster unavailable, attempting to reinsert torrent")
newCt, err := c.reInsertTorrent(ct)
if err != nil {
return nil, fmt.Errorf("failed to reinsert torrent: %w", err)
}
ct = newCt
file, ok = ct.GetFile(filename)
if !ok {
return nil, fmt.Errorf("file %s not found in reinserted torrent %s", filename, torrentName)
}
// Retry getting the download link
downloadLink, err = c.client.GetDownloadLink(ct.Torrent, &file)
if err != nil {
return nil, fmt.Errorf("retry failed to get download link: %w", err)
}
if downloadLink == nil {
return nil, fmt.Errorf("download link is empty after retry")
}
} else if errors.Is(err, utils.TrafficExceededError) {
// This is likely a fair usage limit error
return nil, err
} else {
return nil, fmt.Errorf("failed to get download link: %w", err)
}
}
if downloadLink == nil {
return nil, fmt.Errorf("download link is empty")
}
// Set link to cache
go c.client.Accounts().SetDownloadLink(fileLink, downloadLink)
return downloadLink, nil
}
func (c *Cache) GetFileDownloadLinks(t CachedTorrent) {
if err := c.client.GetFileDownloadLinks(t.Torrent); err != nil {
c.logger.Error().Err(err).Str("torrent", t.Name).Msg("Failed to generate download links")
return
}
}
func (c *Cache) checkDownloadLink(link string) (string, error) {
dl, err := c.client.Accounts().GetDownloadLink(link)
if err != nil {
return "", err
}
if !c.downloadLinkIsInvalid(dl.DownloadLink) {
return dl.DownloadLink, nil
}
return "", fmt.Errorf("download link not found for %s", link)
}
func (c *Cache) MarkDownloadLinkAsInvalid(link, downloadLink, reason string) {
c.invalidDownloadLinks.Store(downloadLink, reason)
// Remove the download api key from active
if reason == "bandwidth_exceeded" {
// Disable the account
_, account, err := c.client.Accounts().GetDownloadLinkWithAccount(link)
if err != nil {
return
}
c.client.Accounts().Disable(account)
}
}
func (c *Cache) downloadLinkIsInvalid(downloadLink string) bool {
if reason, ok := c.invalidDownloadLinks.Load(downloadLink); ok {
c.logger.Debug().Msgf("Download link %s is invalid: %s", downloadLink, reason)
return true
}
return false
}
func (c *Cache) GetDownloadByteRange(torrentName, filename string) (*[2]int64, error) {
ct := c.GetTorrentByName(torrentName)
if ct == nil {
return nil, fmt.Errorf("torrent not found")
}
file := ct.Files[filename]
return file.ByteRange, nil
}
func (c *Cache) GetTotalActiveDownloadLinks() int {
return c.client.Accounts().GetLinksCount()
}

View File

@@ -1 +0,0 @@
package store

View File

@@ -4,6 +4,13 @@ import (
"bytes"
"encoding/json"
"fmt"
"github.com/rs/zerolog"
"github.com/sirrobot01/decypharr/internal/config"
"github.com/sirrobot01/decypharr/internal/logger"
"github.com/sirrobot01/decypharr/internal/request"
"github.com/sirrobot01/decypharr/internal/utils"
"github.com/sirrobot01/decypharr/pkg/debrid/types"
"github.com/sirrobot01/decypharr/pkg/version"
"mime/multipart"
"net/http"
gourl "net/url"
@@ -14,23 +21,13 @@ import (
"strings"
"sync"
"time"
"github.com/rs/zerolog"
"github.com/sirrobot01/decypharr/internal/config"
"github.com/sirrobot01/decypharr/internal/logger"
"github.com/sirrobot01/decypharr/internal/request"
"github.com/sirrobot01/decypharr/internal/utils"
"github.com/sirrobot01/decypharr/pkg/debrid/types"
"github.com/sirrobot01/decypharr/pkg/version"
)
type Torbox struct {
name string
Host string `json:"host"`
APIKey string
accounts *types.Accounts
autoExpiresLinksAfter time.Duration
Name string
Host string `json:"host"`
APIKey string
accounts map[string]types.Account
DownloadUncached bool
client *request.Client
@@ -40,11 +37,7 @@ type Torbox struct {
addSamples bool
}
func (tb *Torbox) GetProfile() (*types.Profile, error) {
return nil, nil
}
func New(dc config.Debrid) (*Torbox, error) {
func New(dc config.Debrid) *Torbox {
rl := request.ParseRateLimit(dc.RateLimit)
headers := map[string]string{
@@ -58,31 +51,36 @@ func New(dc config.Debrid) (*Torbox, error) {
request.WithLogger(_log),
request.WithProxy(dc.Proxy),
)
autoExpiresLinksAfter, err := time.ParseDuration(dc.AutoExpireLinksAfter)
if autoExpiresLinksAfter == 0 || err != nil {
autoExpiresLinksAfter = 48 * time.Hour
accounts := make(map[string]types.Account)
for idx, key := range dc.DownloadAPIKeys {
id := strconv.Itoa(idx)
accounts[id] = types.Account{
Name: key,
ID: id,
Token: key,
}
}
return &Torbox{
name: "torbox",
Host: "https://api.torbox.app/v1",
APIKey: dc.APIKey,
accounts: types.NewAccounts(dc),
DownloadUncached: dc.DownloadUncached,
autoExpiresLinksAfter: autoExpiresLinksAfter,
client: client,
MountPath: dc.Folder,
logger: _log,
checkCached: dc.CheckCached,
addSamples: dc.AddSamples,
}, nil
Name: "torbox",
Host: "https://api.torbox.app/v1",
APIKey: dc.APIKey,
accounts: accounts,
DownloadUncached: dc.DownloadUncached,
client: client,
MountPath: dc.Folder,
logger: _log,
checkCached: dc.CheckCached,
addSamples: dc.AddSamples,
}
}
func (tb *Torbox) Name() string {
return tb.name
func (tb *Torbox) GetName() string {
return tb.Name
}
func (tb *Torbox) Logger() zerolog.Logger {
func (tb *Torbox) GetLogger() zerolog.Logger {
return tb.logger
}
@@ -115,13 +113,13 @@ func (tb *Torbox) IsAvailable(hashes []string) map[string]bool {
req, _ := http.NewRequest(http.MethodGet, url, nil)
resp, err := tb.client.MakeRequest(req)
if err != nil {
tb.logger.Error().Err(err).Msgf("Error checking availability")
tb.logger.Info().Msgf("Error checking availability: %v", err)
return result
}
var res AvailableResponse
err = json.Unmarshal(resp, &res)
if err != nil {
tb.logger.Error().Err(err).Msgf("Error marshalling availability")
tb.logger.Info().Msgf("Error marshalling availability: %v", err)
return result
}
if res.Data == nil {
@@ -164,12 +162,12 @@ func (tb *Torbox) SubmitMagnet(torrent *types.Torrent) (*types.Torrent, error) {
torrentId := strconv.Itoa(dt.Id)
torrent.Id = torrentId
torrent.MountPath = tb.MountPath
torrent.Debrid = tb.name
torrent.Debrid = tb.Name
return torrent, nil
}
func (tb *Torbox) getTorboxStatus(status string, finished bool) string {
func getTorboxStatus(status string, finished bool) string {
if finished {
return "downloaded"
}
@@ -177,16 +175,12 @@ func (tb *Torbox) getTorboxStatus(status string, finished bool) string {
"checkingResumeData", "metaDL", "pausedUP", "queuedUP", "checkingUP",
"forcedUP", "allocating", "downloading", "metaDL", "pausedDL",
"queuedDL", "checkingDL", "forcedDL", "checkingResumeData", "moving"}
var determinedStatus string
switch {
case utils.Contains(downloading, status):
determinedStatus = "downloading"
return "downloading"
default:
determinedStatus = "error"
return "error"
}
return determinedStatus
}
func (tb *Torbox) GetTorrent(torrentId string) (*types.Torrent, error) {
@@ -211,44 +205,30 @@ func (tb *Torbox) GetTorrent(torrentId string) (*types.Torrent, error) {
Bytes: data.Size,
Folder: data.Name,
Progress: data.Progress * 100,
Status: tb.getTorboxStatus(data.DownloadState, data.DownloadFinished),
Status: getTorboxStatus(data.DownloadState, data.DownloadFinished),
Speed: data.DownloadSpeed,
Seeders: data.Seeds,
Filename: data.Name,
OriginalFilename: data.Name,
MountPath: tb.MountPath,
Debrid: tb.name,
Debrid: tb.Name,
Files: make(map[string]types.File),
Added: data.CreatedAt.Format(time.RFC3339),
}
cfg := config.Get()
totalFiles := 0
skippedSamples := 0
skippedFileType := 0
skippedSize := 0
validFiles := 0
filesWithLinks := 0
for _, f := range data.Files {
totalFiles++
fileName := filepath.Base(f.Name)
if !tb.addSamples && utils.IsSampleFile(f.AbsolutePath) {
skippedSamples++
// Skip sample files
continue
}
if !cfg.IsAllowedFile(fileName) {
skippedFileType++
continue
}
if !cfg.IsSizeAllowed(f.Size) {
skippedSize++
continue
}
validFiles++
file := types.File{
TorrentId: t.Id,
Id: strconv.Itoa(f.Id),
@@ -256,26 +236,8 @@ func (tb *Torbox) GetTorrent(torrentId string) (*types.Torrent, error) {
Size: f.Size,
Path: f.Name,
}
// For downloaded torrents, set a placeholder link to indicate file is available
if data.DownloadFinished {
file.Link = fmt.Sprintf("torbox://%s/%d", t.Id, f.Id)
filesWithLinks++
}
t.Files[fileName] = file
}
// Log summary only if there are issues or for debugging
tb.logger.Debug().
Str("torrent_id", t.Id).
Str("torrent_name", t.Name).
Bool("download_finished", data.DownloadFinished).
Str("status", t.Status).
Int("total_files", totalFiles).
Int("valid_files", validFiles).
Int("final_file_count", len(t.Files)).
Msg("Torrent file processing completed")
var cleanPath string
if len(t.Files) > 0 {
cleanPath = path.Clean(data.Files[0].Name)
@@ -284,7 +246,7 @@ func (tb *Torbox) GetTorrent(torrentId string) (*types.Torrent, error) {
}
t.OriginalFilename = strings.Split(cleanPath, "/")[0]
t.Debrid = tb.name
t.Debrid = tb.Name
return t, nil
}
@@ -303,33 +265,24 @@ func (tb *Torbox) UpdateTorrent(t *types.Torrent) error {
}
data := res.Data
name := data.Name
t.Name = name
t.Bytes = data.Size
t.Folder = name
t.Progress = data.Progress * 100
t.Status = tb.getTorboxStatus(data.DownloadState, data.DownloadFinished)
t.Status = getTorboxStatus(data.DownloadState, data.DownloadFinished)
t.Speed = data.DownloadSpeed
t.Seeders = data.Seeds
t.Filename = name
t.OriginalFilename = name
t.MountPath = tb.MountPath
t.Debrid = tb.name
// Clear existing files map to rebuild it
t.Files = make(map[string]types.File)
t.Debrid = tb.Name
cfg := config.Get()
validFiles := 0
filesWithLinks := 0
for _, f := range data.Files {
fileName := filepath.Base(f.Name)
if !tb.addSamples && utils.IsSampleFile(f.AbsolutePath) {
// Skip sample files
continue
}
if !cfg.IsAllowedFile(fileName) {
continue
}
@@ -337,8 +290,6 @@ func (tb *Torbox) UpdateTorrent(t *types.Torrent) error {
if !cfg.IsSizeAllowed(f.Size) {
continue
}
validFiles++
file := types.File{
TorrentId: t.Id,
Id: strconv.Itoa(f.Id),
@@ -346,16 +297,8 @@ func (tb *Torbox) UpdateTorrent(t *types.Torrent) error {
Size: f.Size,
Path: fileName,
}
// For downloaded torrents, set a placeholder link to indicate file is available
if data.DownloadFinished {
file.Link = fmt.Sprintf("torbox://%s/%s", t.Id, strconv.Itoa(f.Id))
filesWithLinks++
}
t.Files[fileName] = file
}
var cleanPath string
if len(t.Files) > 0 {
cleanPath = path.Clean(data.Files[0].Name)
@@ -364,11 +307,11 @@ func (tb *Torbox) UpdateTorrent(t *types.Torrent) error {
}
t.OriginalFilename = strings.Split(cleanPath, "/")[0]
t.Debrid = tb.name
t.Debrid = tb.Name
return nil
}
func (tb *Torbox) CheckStatus(torrent *types.Torrent) (*types.Torrent, error) {
func (tb *Torbox) CheckStatus(torrent *types.Torrent, isSymlink bool) (*types.Torrent, error) {
for {
err := tb.UpdateTorrent(torrent)
@@ -378,7 +321,13 @@ func (tb *Torbox) CheckStatus(torrent *types.Torrent) (*types.Torrent, error) {
status := torrent.Status
if status == "downloaded" {
tb.logger.Info().Msgf("Torrent: %s downloaded", torrent.Name)
return torrent, nil
if !isSymlink {
err = tb.GenerateDownloadLinks(torrent)
if err != nil {
return torrent, err
}
}
break
} else if utils.Contains(tb.GetDownloadingStatus(), status) {
if !torrent.DownloadUncached {
return torrent, fmt.Errorf("torrent: %s not cached", torrent.Name)
@@ -391,6 +340,7 @@ func (tb *Torbox) CheckStatus(torrent *types.Torrent) (*types.Torrent, error) {
}
}
return torrent, nil
}
func (tb *Torbox) DeleteTorrent(torrentId string) error {
@@ -405,9 +355,8 @@ func (tb *Torbox) DeleteTorrent(torrentId string) error {
return nil
}
func (tb *Torbox) GetFileDownloadLinks(t *types.Torrent) error {
func (tb *Torbox) GenerateDownloadLinks(t *types.Torrent) error {
filesCh := make(chan types.File, len(t.Files))
linkCh := make(chan *types.DownloadLink)
errCh := make(chan error, len(t.Files))
var wg sync.WaitGroup
@@ -420,17 +369,13 @@ func (tb *Torbox) GetFileDownloadLinks(t *types.Torrent) error {
errCh <- err
return
}
if link != nil {
linkCh <- link
file.DownloadLink = link
}
file.DownloadLink = link
filesCh <- file
}()
}
go func() {
wg.Wait()
close(filesCh)
close(linkCh)
close(errCh)
}()
@@ -440,13 +385,6 @@ func (tb *Torbox) GetFileDownloadLinks(t *types.Torrent) error {
files[file.Name] = file
}
// Collect download links
for link := range linkCh {
if link != nil {
tb.accounts.SetDownloadLink(link.Link, link)
}
}
// Check for errors
for err := range errCh {
if err != nil {
@@ -465,153 +403,48 @@ func (tb *Torbox) GetDownloadLink(t *types.Torrent, file *types.File) (*types.Do
query.Add("token", tb.APIKey)
query.Add("file_id", file.Id)
url += "?" + query.Encode()
req, _ := http.NewRequest(http.MethodGet, url, nil)
resp, err := tb.client.MakeRequest(req)
if err != nil {
tb.logger.Error().
Err(err).
Str("torrent_id", t.Id).
Str("file_id", file.Id).
Msg("Failed to make request to Torbox API")
return nil, err
}
var data DownloadLinksResponse
if err = json.Unmarshal(resp, &data); err != nil {
tb.logger.Error().
Err(err).
Str("torrent_id", t.Id).
Str("file_id", file.Id).
Msg("Failed to unmarshal Torbox API response")
return nil, err
}
if data.Data == nil {
tb.logger.Error().
Str("torrent_id", t.Id).
Str("file_id", file.Id).
Bool("success", data.Success).
Interface("error", data.Error).
Str("detail", data.Detail).
Msg("Torbox API returned no data")
return nil, fmt.Errorf("error getting download links")
}
link := *data.Data
if link == "" {
tb.logger.Error().
Str("torrent_id", t.Id).
Str("file_id", file.Id).
Msg("Torbox API returned empty download link")
return nil, fmt.Errorf("error getting download links")
}
now := time.Now()
downloadLink := &types.DownloadLink{
return &types.DownloadLink{
Link: file.Link,
DownloadLink: link,
Id: file.Id,
Generated: now,
ExpiresAt: now.Add(tb.autoExpiresLinksAfter),
}
return downloadLink, nil
AccountId: "0",
Generated: time.Now(),
}, nil
}
func (tb *Torbox) GetDownloadingStatus() []string {
return []string{"downloading"}
}
func (tb *Torbox) GetCheckCached() bool {
return tb.checkCached
}
func (tb *Torbox) GetTorrents() ([]*types.Torrent, error) {
url := fmt.Sprintf("%s/api/torrents/mylist", tb.Host)
req, _ := http.NewRequest(http.MethodGet, url, nil)
resp, err := tb.client.MakeRequest(req)
if err != nil {
return nil, err
}
var res TorrentsListResponse
err = json.Unmarshal(resp, &res)
if err != nil {
return nil, err
}
if !res.Success || res.Data == nil {
return nil, fmt.Errorf("torbox API error: %v", res.Error)
}
torrents := make([]*types.Torrent, 0, len(*res.Data))
cfg := config.Get()
for _, data := range *res.Data {
t := &types.Torrent{
Id: strconv.Itoa(data.Id),
Name: data.Name,
Bytes: data.Size,
Folder: data.Name,
Progress: data.Progress * 100,
Status: tb.getTorboxStatus(data.DownloadState, data.DownloadFinished),
Speed: data.DownloadSpeed,
Seeders: data.Seeds,
Filename: data.Name,
OriginalFilename: data.Name,
MountPath: tb.MountPath,
Debrid: tb.name,
Files: make(map[string]types.File),
Added: data.CreatedAt.Format(time.RFC3339),
InfoHash: data.Hash,
}
// Process files
for _, f := range data.Files {
fileName := filepath.Base(f.Name)
if !tb.addSamples && utils.IsSampleFile(f.AbsolutePath) {
// Skip sample files
continue
}
if !cfg.IsAllowedFile(fileName) {
continue
}
if !cfg.IsSizeAllowed(f.Size) {
continue
}
file := types.File{
TorrentId: t.Id,
Id: strconv.Itoa(f.Id),
Name: fileName,
Size: f.Size,
Path: f.Name,
}
// For downloaded torrents, set a placeholder link to indicate file is available
if data.DownloadFinished {
file.Link = fmt.Sprintf("torbox://%s/%d", t.Id, f.Id)
}
t.Files[fileName] = file
}
// Set original filename based on first file or torrent name
var cleanPath string
if len(t.Files) > 0 {
cleanPath = path.Clean(data.Files[0].Name)
} else {
cleanPath = path.Clean(data.Name)
}
t.OriginalFilename = strings.Split(cleanPath, "/")[0]
torrents = append(torrents, t)
}
return torrents, nil
return nil, nil
}
func (tb *Torbox) GetDownloadUncached() bool {
return tb.DownloadUncached
}
func (tb *Torbox) GetDownloadLinks() (map[string]*types.DownloadLink, error) {
func (tb *Torbox) GetDownloads() (map[string]types.DownloadLink, error) {
return nil, nil
}
@@ -623,15 +456,13 @@ func (tb *Torbox) GetMountPath() string {
return tb.MountPath
}
func (tb *Torbox) DisableAccount(accountId string) {
}
func (tb *Torbox) ResetActiveDownloadKeys() {
}
func (tb *Torbox) DeleteDownloadLink(linkId string) error {
return nil
}
func (tb *Torbox) GetAvailableSlots() (int, error) {
//TODO: Implement the logic to check available slots for Torbox
return 0, fmt.Errorf("not implemented")
}
func (tb *Torbox) Accounts() *types.Accounts {
return tb.accounts
}

View File

@@ -57,7 +57,7 @@ type torboxInfo struct {
} `json:"files"`
DownloadPath string `json:"download_path"`
InactiveCheck int `json:"inactive_check"`
Availability float64 `json:"availability"`
Availability int `json:"availability"`
DownloadFinished bool `json:"download_finished"`
Tracker interface{} `json:"tracker"`
TotalUploaded int `json:"total_uploaded"`
@@ -73,5 +73,3 @@ type torboxInfo struct {
type InfoResponse APIResponse[torboxInfo]
type DownloadLinksResponse APIResponse[string]
type TorrentsListResponse APIResponse[[]torboxInfo]

View File

@@ -1,243 +0,0 @@
package types
import (
"github.com/sirrobot01/decypharr/internal/config"
"sync"
"time"
)
type Accounts struct {
current *Account
accounts []*Account
mu sync.RWMutex
}
func NewAccounts(debridConf config.Debrid) *Accounts {
accounts := make([]*Account, 0)
for idx, token := range debridConf.DownloadAPIKeys {
if token == "" {
continue
}
account := newAccount(debridConf.Name, token, idx)
accounts = append(accounts, account)
}
var current *Account
if len(accounts) > 0 {
current = accounts[0]
}
return &Accounts{
accounts: accounts,
current: current,
}
}
type Account struct {
Debrid string // e.g., "realdebrid", "torbox", etc.
Order int
Disabled bool
Token string
links map[string]*DownloadLink
mu sync.RWMutex
}
func (a *Accounts) All() []*Account {
a.mu.RLock()
defer a.mu.RUnlock()
activeAccounts := make([]*Account, 0)
for _, acc := range a.accounts {
if !acc.Disabled {
activeAccounts = append(activeAccounts, acc)
}
}
return activeAccounts
}
func (a *Accounts) Current() *Account {
a.mu.RLock()
if a.current != nil {
current := a.current
a.mu.RUnlock()
return current
}
a.mu.RUnlock()
a.mu.Lock()
defer a.mu.Unlock()
// Double-check after acquiring write lock
if a.current != nil {
return a.current
}
activeAccounts := make([]*Account, 0)
for _, acc := range a.accounts {
if !acc.Disabled {
activeAccounts = append(activeAccounts, acc)
}
}
if len(activeAccounts) > 0 {
a.current = activeAccounts[0]
}
return a.current
}
func (a *Accounts) Disable(account *Account) {
a.mu.Lock()
defer a.mu.Unlock()
account.disable()
if a.current == account {
var newCurrent *Account
for _, acc := range a.accounts {
if !acc.Disabled {
newCurrent = acc
break
}
}
a.current = newCurrent
}
}
func (a *Accounts) Reset() {
a.mu.Lock()
defer a.mu.Unlock()
for _, acc := range a.accounts {
acc.resetDownloadLinks()
acc.Disabled = false
}
if len(a.accounts) > 0 {
a.current = a.accounts[0]
} else {
a.current = nil
}
}
func (a *Accounts) GetDownloadLink(fileLink string) (*DownloadLink, error) {
if a.Current() == nil {
return nil, NoActiveAccountsError
}
dl, ok := a.Current().getLink(fileLink)
if !ok {
return nil, NoDownloadLinkError
}
if dl.ExpiresAt.IsZero() || dl.ExpiresAt.Before(time.Now()) {
return nil, DownloadLinkExpiredError
}
if dl.DownloadLink == "" {
return nil, EmptyDownloadLinkError
}
return dl, nil
}
func (a *Accounts) GetDownloadLinkWithAccount(fileLink string) (*DownloadLink, *Account, error) {
currentAccount := a.Current()
if currentAccount == nil {
return nil, nil, NoActiveAccountsError
}
dl, ok := currentAccount.getLink(fileLink)
if !ok {
return nil, nil, NoDownloadLinkError
}
if dl.ExpiresAt.IsZero() || dl.ExpiresAt.Before(time.Now()) {
return nil, currentAccount, DownloadLinkExpiredError
}
if dl.DownloadLink == "" {
return nil, currentAccount, EmptyDownloadLinkError
}
return dl, currentAccount, nil
}
func (a *Accounts) SetDownloadLink(fileLink string, dl *DownloadLink) {
if a.Current() == nil {
return
}
a.Current().setLink(fileLink, dl)
}
func (a *Accounts) DeleteDownloadLink(fileLink string) {
if a.Current() == nil {
return
}
a.Current().deleteLink(fileLink)
}
func (a *Accounts) GetLinksCount() int {
if a.Current() == nil {
return 0
}
return a.Current().LinksCount()
}
func (a *Accounts) SetDownloadLinks(links map[string]*DownloadLink) {
if a.Current() == nil {
return
}
a.Current().setLinks(links)
}
func newAccount(debridName, token string, index int) *Account {
return &Account{
Debrid: debridName,
Token: token,
Order: index,
links: make(map[string]*DownloadLink),
}
}
func (a *Account) getLink(fileLink string) (*DownloadLink, bool) {
a.mu.RLock()
defer a.mu.RUnlock()
dl, ok := a.links[a.sliceFileLink(fileLink)]
return dl, ok
}
func (a *Account) setLink(fileLink string, dl *DownloadLink) {
a.mu.Lock()
defer a.mu.Unlock()
a.links[a.sliceFileLink(fileLink)] = dl
}
func (a *Account) deleteLink(fileLink string) {
a.mu.Lock()
defer a.mu.Unlock()
delete(a.links, a.sliceFileLink(fileLink))
}
func (a *Account) resetDownloadLinks() {
a.mu.Lock()
defer a.mu.Unlock()
a.links = make(map[string]*DownloadLink)
}
func (a *Account) LinksCount() int {
a.mu.RLock()
defer a.mu.RUnlock()
return len(a.links)
}
func (a *Account) disable() {
a.Disabled = true
}
func (a *Account) setLinks(links map[string]*DownloadLink) {
a.mu.Lock()
defer a.mu.Unlock()
now := time.Now()
for _, dl := range links {
if !dl.ExpiresAt.IsZero() && dl.ExpiresAt.Before(now) {
// Expired, continue
continue
}
a.links[a.sliceFileLink(dl.Link)] = dl
}
}
// slice download link
func (a *Account) sliceFileLink(fileLink string) string {
if a.Debrid != "realdebrid" {
return fileLink
}
if len(fileLink) < 39 {
return fileLink
}
return fileLink[0:39]
}

View File

@@ -6,23 +6,23 @@ import (
type Client interface {
SubmitMagnet(tr *Torrent) (*Torrent, error)
CheckStatus(tr *Torrent) (*Torrent, error)
GetFileDownloadLinks(tr *Torrent) error
CheckStatus(tr *Torrent, isSymlink bool) (*Torrent, error)
GenerateDownloadLinks(tr *Torrent) error
GetDownloadLink(tr *Torrent, file *File) (*DownloadLink, error)
DeleteTorrent(torrentId string) error
IsAvailable(infohashes []string) map[string]bool
GetCheckCached() bool
GetDownloadUncached() bool
UpdateTorrent(torrent *Torrent) error
GetTorrent(torrentId string) (*Torrent, error)
GetTorrents() ([]*Torrent, error)
Name() string
Logger() zerolog.Logger
GetName() string
GetLogger() zerolog.Logger
GetDownloadingStatus() []string
GetDownloadLinks() (map[string]*DownloadLink, error)
GetDownloads() (map[string]DownloadLink, error)
CheckLink(link string) error
GetMountPath() string
Accounts() *Accounts // Returns the active download account/token
DisableAccount(string)
ResetActiveDownloadKeys()
DeleteDownloadLink(linkId string) error
GetProfile() (*Profile, error)
GetAvailableSlots() (int, error)
}

View File

@@ -1,30 +0,0 @@
package types
type Error struct {
Message string `json:"message"`
Code string `json:"code"`
}
func (e *Error) Error() string {
return e.Message
}
var NoActiveAccountsError = &Error{
Message: "No active accounts",
Code: "no_active_accounts",
}
var NoDownloadLinkError = &Error{
Message: "No download link found",
Code: "no_download_link",
}
var DownloadLinkExpiredError = &Error{
Message: "Download link expired",
Code: "download_link_expired",
}
var EmptyDownloadLinkError = &Error{
Message: "Download link is empty",
Code: "empty_download_link",
}

View File

@@ -2,14 +2,13 @@ package types
import (
"fmt"
"github.com/sirrobot01/decypharr/internal/logger"
"github.com/sirrobot01/decypharr/internal/utils"
"github.com/sirrobot01/decypharr/pkg/arr"
"os"
"path/filepath"
"sync"
"time"
"github.com/sirrobot01/decypharr/internal/logger"
"github.com/sirrobot01/decypharr/internal/utils"
"github.com/sirrobot01/decypharr/pkg/arr"
)
type Torrent struct {
@@ -34,12 +33,24 @@ type Torrent struct {
Debrid string `json:"debrid"`
Arr *arr.Arr `json:"arr"`
Arr *arr.Arr `json:"arr"`
Mu sync.Mutex `json:"-"`
SizeDownloaded int64 `json:"-"` // This is used for local download
DownloadUncached bool `json:"-"`
}
SizeDownloaded int64 `json:"-"` // This is used for local download
DownloadUncached bool `json:"-"`
type DownloadLink struct {
Filename string `json:"filename"`
Link string `json:"link"`
DownloadLink string `json:"download_link"`
Generated time.Time `json:"generated"`
Size int64 `json:"size"`
Id string `json:"id"`
AccountId string `json:"account_id"`
}
sync.Mutex
func (d *DownloadLink) String() string {
return d.DownloadLink
}
func (t *Torrent) GetSymlinkFolder(parent string) string {
@@ -88,14 +99,12 @@ type File struct {
Id string `json:"id"`
Name string `json:"name"`
Size int64 `json:"size"`
IsRar bool `json:"is_rar"`
ByteRange *[2]int64 `json:"byte_range,omitempty"`
Path string `json:"path"`
Link string `json:"link"`
DownloadLink *DownloadLink `json:"-"`
AccountId string `json:"account_id"`
Generated time.Time `json:"generated"`
Deleted bool `json:"deleted"`
DownloadLink *DownloadLink `json:"-"`
}
func (t *Torrent) Cleanup(remove bool) {
@@ -107,38 +116,9 @@ func (t *Torrent) Cleanup(remove bool) {
}
}
type IngestData struct {
Debrid string `json:"debrid"`
Name string `json:"name"`
Hash string `json:"hash"`
Size int64 `json:"size"`
}
type Profile struct {
Name string `json:"name"`
Id int64 `json:"id"`
Username string `json:"username"`
Email string `json:"email"`
Points int64 `json:"points"`
Type string `json:"type"`
Premium int `json:"premium"`
Expiration time.Time `json:"expiration"`
LibrarySize int `json:"library_size"`
BadTorrents int `json:"bad_torrents"`
ActiveLinks int `json:"active_links"`
}
type DownloadLink struct {
Filename string `json:"filename"`
Link string `json:"link"`
DownloadLink string `json:"download_link"`
Generated time.Time `json:"generated"`
Size int64 `json:"size"`
Id string `json:"id"`
ExpiresAt time.Time
}
func (d *DownloadLink) String() string {
return d.DownloadLink
type Account struct {
ID string `json:"id"`
Disabled bool `json:"disabled"`
Name string `json:"name"`
Token string `json:"token"`
}

View File

@@ -1,138 +0,0 @@
package qbit
import (
"context"
"encoding/base64"
"github.com/go-chi/chi/v5"
"github.com/sirrobot01/decypharr/internal/utils"
"github.com/sirrobot01/decypharr/pkg/arr"
"github.com/sirrobot01/decypharr/pkg/store"
"net/http"
"strings"
)
type contextKey string
const (
categoryKey contextKey = "category"
hashesKey contextKey = "hashes"
arrKey contextKey = "arr"
)
func getCategory(ctx context.Context) string {
if category, ok := ctx.Value(categoryKey).(string); ok {
return category
}
return ""
}
func getHashes(ctx context.Context) []string {
if hashes, ok := ctx.Value(hashesKey).([]string); ok {
return hashes
}
return nil
}
func getArrFromContext(ctx context.Context) *arr.Arr {
if a, ok := ctx.Value(arrKey).(*arr.Arr); ok {
return a
}
return nil
}
func decodeAuthHeader(header string) (string, string, error) {
encodedTokens := strings.Split(header, " ")
if len(encodedTokens) != 2 {
return "", "", nil
}
encodedToken := encodedTokens[1]
bytes, err := base64.StdEncoding.DecodeString(encodedToken)
if err != nil {
return "", "", err
}
bearer := string(bytes)
colonIndex := strings.LastIndex(bearer, ":")
host := bearer[:colonIndex]
token := bearer[colonIndex+1:]
return host, token, nil
}
func (q *QBit) categoryContext(next http.Handler) http.Handler {
return http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) {
category := strings.Trim(r.URL.Query().Get("category"), "")
if category == "" {
// Get from form
_ = r.ParseForm()
category = r.Form.Get("category")
if category == "" {
// Get from multipart form
_ = r.ParseMultipartForm(32 << 20)
category = r.FormValue("category")
}
}
ctx := context.WithValue(r.Context(), categoryKey, strings.TrimSpace(category))
next.ServeHTTP(w, r.WithContext(ctx))
})
}
// authContext creates a middleware that extracts the Arr host and token from the Authorization header
// and adds it to the request context.
// This is used to identify the Arr instance for the request.
// Only a valid host and token will be added to the context/config. The rest are manual
func (q *QBit) authContext(next http.Handler) http.Handler {
return http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) {
host, token, err := decodeAuthHeader(r.Header.Get("Authorization"))
category := getCategory(r.Context())
arrs := store.Get().Arr()
// Check if arr exists
a := arrs.Get(category)
if a == nil {
// Arr is not configured, create a new one
downloadUncached := false
a = arr.New(category, "", "", false, false, &downloadUncached, "", "auto")
}
if err == nil {
host = strings.TrimSpace(host)
if host != "" {
a.Host = host
}
token = strings.TrimSpace(token)
if token != "" {
a.Token = token
}
}
a.Source = "auto"
if err := utils.ValidateServiceURL(a.Host); err != nil {
// Return silently, no need to raise a problem. Just do not add the Arr to the context/config.json
next.ServeHTTP(w, r)
return
}
arrs.AddOrUpdate(a)
ctx := context.WithValue(r.Context(), arrKey, a)
next.ServeHTTP(w, r.WithContext(ctx))
})
}
func hashesContext(next http.Handler) http.Handler {
return http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) {
_hashes := chi.URLParam(r, "hashes")
var hashes []string
if _hashes != "" {
hashes = strings.Split(_hashes, "|")
}
if hashes == nil {
// Get hashes from form
_ = r.ParseForm()
hashes = r.Form["hashes"]
}
for i, hash := range hashes {
hashes[i] = strings.TrimSpace(hash)
}
ctx := context.WithValue(r.Context(), hashesKey, hashes)
next.ServeHTTP(w, r.WithContext(ctx))
})
}

View File

@@ -1,31 +1,23 @@
package store
package qbit
import (
"fmt"
"github.com/cavaliergopher/grab/v3"
"github.com/sirrobot01/decypharr/internal/utils"
debridTypes "github.com/sirrobot01/decypharr/pkg/debrid/types"
"io"
"net/http"
"os"
"path/filepath"
"sync"
"time"
"github.com/sirrobot01/decypharr/pkg/debrid/types"
"github.com/cavaliergopher/grab/v3"
"github.com/sirrobot01/decypharr/internal/utils"
)
func grabber(client *grab.Client, url, filename string, byterange *[2]int64, progressCallback func(int64, int64)) error {
func Download(client *grab.Client, url, filename string, progressCallback func(int64, int64)) error {
req, err := grab.NewRequest(filename, url)
if err != nil {
return err
}
// Set byte range if specified
if byterange != nil {
byterangeStr := fmt.Sprintf("%d-%d", byterange[0], byterange[1])
req.HTTPRequest.Header.Set("Range", "bytes="+byterangeStr)
}
resp := client.Do(req)
t := time.NewTicker(time.Second * 2)
@@ -57,35 +49,37 @@ Loop:
return resp.Err()
}
func (s *Store) processDownload(torrent *Torrent, debridTorrent *types.Torrent) (string, error) {
s.logger.Info().Msgf("Downloading %d files...", len(debridTorrent.Files))
torrentPath := filepath.Join(torrent.SavePath, utils.RemoveExtension(debridTorrent.OriginalFilename))
func (q *QBit) ProcessManualFile(torrent *Torrent) (string, error) {
debridTorrent := torrent.DebridTorrent
q.logger.Info().Msgf("Downloading %d files...", len(debridTorrent.Files))
torrentPath := filepath.Join(q.DownloadFolder, debridTorrent.Arr.Name, utils.RemoveExtension(debridTorrent.OriginalFilename))
torrentPath = utils.RemoveInvalidChars(torrentPath)
err := os.MkdirAll(torrentPath, os.ModePerm)
if err != nil {
// add the previous error to the error and return
// add previous error to the error and return
return "", fmt.Errorf("failed to create directory: %s: %v", torrentPath, err)
}
s.downloadFiles(torrent, debridTorrent, torrentPath)
q.downloadFiles(torrent, torrentPath)
return torrentPath, nil
}
func (s *Store) downloadFiles(torrent *Torrent, debridTorrent *types.Torrent, parent string) {
func (q *QBit) downloadFiles(torrent *Torrent, parent string) {
debridTorrent := torrent.DebridTorrent
var wg sync.WaitGroup
totalSize := int64(0)
for _, file := range debridTorrent.GetFiles() {
totalSize += file.Size
}
debridTorrent.Lock()
debridTorrent.Mu.Lock()
debridTorrent.SizeDownloaded = 0 // Reset downloaded bytes
debridTorrent.Progress = 0 // Reset progress
debridTorrent.Unlock()
debridTorrent.Mu.Unlock()
progressCallback := func(downloaded int64, speed int64) {
debridTorrent.Lock()
defer debridTorrent.Unlock()
torrent.Lock()
defer torrent.Unlock()
debridTorrent.Mu.Lock()
defer debridTorrent.Mu.Unlock()
torrent.Mu.Lock()
defer torrent.Mu.Unlock()
// Update total downloaded bytes
debridTorrent.SizeDownloaded += downloaded
@@ -95,7 +89,7 @@ func (s *Store) downloadFiles(torrent *Torrent, debridTorrent *types.Torrent, pa
if totalSize > 0 {
debridTorrent.Progress = float64(debridTorrent.SizeDownloaded) / float64(totalSize) * 100
}
s.partialTorrentUpdate(torrent, debridTorrent)
q.UpdateTorrentMin(torrent, debridTorrent)
}
client := &grab.Client{
UserAgent: "Decypharr[QBitTorrent]",
@@ -108,29 +102,28 @@ func (s *Store) downloadFiles(torrent *Torrent, debridTorrent *types.Torrent, pa
errChan := make(chan error, len(debridTorrent.Files))
for _, file := range debridTorrent.GetFiles() {
if file.DownloadLink == nil {
s.logger.Info().Msgf("No download link found for %s", file.Name)
q.logger.Info().Msgf("No download link found for %s", file.Name)
continue
}
wg.Add(1)
s.downloadSemaphore <- struct{}{}
go func(file types.File) {
q.downloadSemaphore <- struct{}{}
go func(file debridTypes.File) {
defer wg.Done()
defer func() { <-s.downloadSemaphore }()
defer func() { <-q.downloadSemaphore }()
filename := file.Name
err := grabber(
err := Download(
client,
file.DownloadLink.DownloadLink,
filepath.Join(parent, filename),
file.ByteRange,
progressCallback,
)
if err != nil {
s.logger.Error().Msgf("Failed to download %s: %v", filename, err)
q.logger.Error().Msgf("Failed to download %s: %v", filename, err)
errChan <- err
} else {
s.logger.Info().Msgf("Downloaded %s", filename)
q.logger.Info().Msgf("Downloaded %s", filename)
}
}(file)
}
@@ -144,20 +137,21 @@ func (s *Store) downloadFiles(torrent *Torrent, debridTorrent *types.Torrent, pa
}
}
if len(errors) > 0 {
s.logger.Error().Msgf("Errors occurred during download: %v", errors)
q.logger.Error().Msgf("Errors occurred during download: %v", errors)
return
}
s.logger.Info().Msgf("Downloaded all files for %s", debridTorrent.Name)
q.logger.Info().Msgf("Downloaded all files for %s", debridTorrent.Name)
}
func (s *Store) processSymlink(torrent *Torrent, debridTorrent *types.Torrent) (string, error) {
func (q *QBit) ProcessSymlink(torrent *Torrent) (string, error) {
debridTorrent := torrent.DebridTorrent
files := debridTorrent.Files
if len(files) == 0 {
return "", fmt.Errorf("no video files found")
}
s.logger.Info().Msgf("Checking symlinks for %d files...", len(files))
q.logger.Info().Msgf("Checking symlinks for %d files...", len(files))
rCloneBase := debridTorrent.MountPath
torrentPath, err := s.getTorrentPath(rCloneBase, debridTorrent) // /MyTVShow/
torrentPath, err := q.getTorrentPath(rCloneBase, debridTorrent) // /MyTVShow/
// This returns filename.ext for alldebrid instead of the parent folder filename/
torrentFolder := torrentPath
if err != nil {
@@ -170,7 +164,7 @@ func (s *Store) processSymlink(torrent *Torrent, debridTorrent *types.Torrent) (
torrentFolder = utils.RemoveExtension(torrentFolder)
torrentRclonePath = rCloneBase // /mnt/rclone/magnets/ // Remove the filename since it's in the root folder
}
torrentSymlinkPath := filepath.Join(torrent.SavePath, torrentFolder) // /mnt/symlinks/{category}/MyTVShow/
torrentSymlinkPath := filepath.Join(q.DownloadFolder, debridTorrent.Arr.Name, torrentFolder) // /mnt/symlinks/{category}/MyTVShow/
err = os.MkdirAll(torrentSymlinkPath, os.ModePerm)
if err != nil {
return "", fmt.Errorf("failed to create directory: %s: %v", torrentSymlinkPath, err)
@@ -189,19 +183,19 @@ func (s *Store) processSymlink(torrent *Torrent, debridTorrent *types.Torrent) (
return nil
})
if err != nil {
s.logger.Warn().Msgf("Error while scanning rclone path: %v", err)
q.logger.Warn().Msgf("Error while scanning rclone path: %v", err)
}
pending := make(map[string]types.File)
pending := make(map[string]debridTypes.File)
for _, file := range files {
if realRelPath, ok := realPaths[file.Name]; ok {
file.Path = realRelPath
}
pending[file.Path] = file
}
ticker := time.NewTicker(200 * time.Millisecond)
defer ticker.Stop()
timeout := time.After(30 * time.Minute)
filePaths := make([]string, 0, len(pending))
@@ -213,43 +207,43 @@ func (s *Store) processSymlink(torrent *Torrent, debridTorrent *types.Torrent) (
if _, err := os.Stat(fullFilePath); !os.IsNotExist(err) {
fileSymlinkPath := filepath.Join(torrentSymlinkPath, file.Name)
if err := os.Symlink(fullFilePath, fileSymlinkPath); err != nil && !os.IsExist(err) {
s.logger.Warn().Msgf("Failed to create symlink: %s: %v", fileSymlinkPath, err)
q.logger.Debug().Msgf("Failed to create symlink: %s: %v", fileSymlinkPath, err)
} else {
filePaths = append(filePaths, fileSymlinkPath)
delete(pending, path)
s.logger.Info().Msgf("File is ready: %s", file.Name)
q.logger.Info().Msgf("File is ready: %s", file.Name)
}
}
}
case <-timeout:
s.logger.Warn().Msgf("Timeout waiting for files, %d files still pending", len(pending))
q.logger.Warn().Msgf("Timeout waiting for files, %d files still pending", len(pending))
return torrentSymlinkPath, fmt.Errorf("timeout waiting for files: %d files still pending", len(pending))
}
}
if s.skipPreCache {
if q.SkipPreCache {
return torrentSymlinkPath, nil
}
go func() {
s.logger.Debug().Msgf("Pre-caching %s", debridTorrent.Name)
if err := utils.PreCacheFile(filePaths); err != nil {
s.logger.Error().Msgf("Failed to pre-cache file: %s", err)
if err := q.preCacheFile(debridTorrent.Name, filePaths); err != nil {
q.logger.Error().Msgf("Failed to pre-cache file: %s", err)
} else {
s.logger.Trace().Msgf("Pre-cached %d files", len(filePaths))
q.logger.Trace().Msgf("Pre-cached %d files", len(filePaths))
}
}()
return torrentSymlinkPath, nil
}
func (s *Store) createSymlinksWebdav(torrent *Torrent, debridTorrent *types.Torrent, rclonePath, torrentFolder string) (string, error) {
func (q *QBit) createSymlinksWebdav(debridTorrent *debridTypes.Torrent, rclonePath, torrentFolder string) (string, error) {
files := debridTorrent.Files
symlinkPath := filepath.Join(torrent.SavePath, torrentFolder) // /mnt/symlinks/{category}/MyTVShow/
symlinkPath := filepath.Join(q.DownloadFolder, debridTorrent.Arr.Name, torrentFolder) // /mnt/symlinks/{category}/MyTVShow/
err := os.MkdirAll(symlinkPath, os.ModePerm)
if err != nil {
return "", fmt.Errorf("failed to create directory: %s: %v", symlinkPath, err)
}
remainingFiles := make(map[string]types.File)
remainingFiles := make(map[string]debridTypes.File)
for _, file := range files {
remainingFiles[file.Name] = file
}
@@ -275,44 +269,107 @@ func (s *Store) createSymlinksWebdav(torrent *Torrent, debridTorrent *types.Torr
fileSymlinkPath := filepath.Join(symlinkPath, file.Name)
if err := os.Symlink(fullFilePath, fileSymlinkPath); err != nil && !os.IsExist(err) {
s.logger.Debug().Msgf("Failed to create symlink: %s: %v", fileSymlinkPath, err)
q.logger.Debug().Msgf("Failed to create symlink: %s: %v", fileSymlinkPath, err)
} else {
filePaths = append(filePaths, fileSymlinkPath)
delete(remainingFiles, filename)
s.logger.Info().Msgf("File is ready: %s", file.Name)
q.logger.Info().Msgf("File is ready: %s", file.Name)
}
}
}
case <-timeout:
s.logger.Warn().Msgf("Timeout waiting for files, %d files still pending", len(remainingFiles))
q.logger.Warn().Msgf("Timeout waiting for files, %d files still pending", len(remainingFiles))
return symlinkPath, fmt.Errorf("timeout waiting for files")
}
}
if s.skipPreCache {
if q.SkipPreCache {
return symlinkPath, nil
}
go func() {
s.logger.Debug().Msgf("Pre-caching %s", debridTorrent.Name)
if err := utils.PreCacheFile(filePaths); err != nil {
s.logger.Error().Msgf("Failed to pre-cache file: %s", err)
if err := q.preCacheFile(debridTorrent.Name, filePaths); err != nil {
q.logger.Error().Msgf("Failed to pre-cache file: %s", err)
} else {
s.logger.Debug().Msgf("Pre-cached %d files", len(filePaths))
q.logger.Debug().Msgf("Pre-cached %d files", len(filePaths))
}
}() // Pre-cache the files in the background
// Pre-cache the first 256KB and 1MB of the file
return symlinkPath, nil
}
func (s *Store) getTorrentPath(rclonePath string, debridTorrent *types.Torrent) (string, error) {
func (q *QBit) getTorrentPath(rclonePath string, debridTorrent *debridTypes.Torrent) (string, error) {
for {
torrentPath, err := debridTorrent.GetMountFolder(rclonePath)
if err == nil {
s.logger.Debug().Msgf("Found torrent path: %s", torrentPath)
q.logger.Debug().Msgf("Found torrent path: %s", torrentPath)
return torrentPath, err
}
time.Sleep(100 * time.Millisecond)
}
}
func (q *QBit) preCacheFile(name string, filePaths []string) error {
q.logger.Trace().Msgf("Pre-caching torrent: %s", name)
if len(filePaths) == 0 {
return fmt.Errorf("no file paths provided")
}
for _, filePath := range filePaths {
err := func(f string) error {
file, err := os.Open(f)
if err != nil {
if os.IsNotExist(err) {
// File has probably been moved by arr, return silently
return nil
}
return fmt.Errorf("failed to open file: %s: %v", f, err)
}
defer file.Close()
// Pre-cache the file header (first 256KB) using 16KB chunks.
if err := q.readSmallChunks(file, 0, 256*1024, 16*1024); err != nil {
return err
}
if err := q.readSmallChunks(file, 1024*1024, 64*1024, 16*1024); err != nil {
return err
}
return nil
}(filePath)
if err != nil {
return err
}
}
return nil
}
func (q *QBit) readSmallChunks(file *os.File, startPos int64, totalToRead int, chunkSize int) error {
_, err := file.Seek(startPos, 0)
if err != nil {
return err
}
buf := make([]byte, chunkSize)
bytesRemaining := totalToRead
for bytesRemaining > 0 {
toRead := chunkSize
if bytesRemaining < chunkSize {
toRead = bytesRemaining
}
n, err := file.Read(buf[:toRead])
if err != nil {
if err == io.EOF {
break
}
return err
}
bytesRemaining -= n
}
return nil
}

View File

@@ -1,24 +1,114 @@
package qbit
import (
"context"
"encoding/base64"
"github.com/go-chi/chi/v5"
"github.com/sirrobot01/decypharr/internal/request"
"github.com/sirrobot01/decypharr/pkg/arr"
"github.com/sirrobot01/decypharr/pkg/service"
"net/http"
"path/filepath"
"strings"
)
func decodeAuthHeader(header string) (string, string, error) {
encodedTokens := strings.Split(header, " ")
if len(encodedTokens) != 2 {
return "", "", nil
}
encodedToken := encodedTokens[1]
bytes, err := base64.StdEncoding.DecodeString(encodedToken)
if err != nil {
return "", "", err
}
bearer := string(bytes)
colonIndex := strings.LastIndex(bearer, ":")
host := bearer[:colonIndex]
token := bearer[colonIndex+1:]
return host, token, nil
}
func (q *QBit) CategoryContext(next http.Handler) http.Handler {
return http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) {
category := strings.Trim(r.URL.Query().Get("category"), "")
if category == "" {
// Get from form
_ = r.ParseForm()
category = r.Form.Get("category")
if category == "" {
// Get from multipart form
_ = r.ParseMultipartForm(32 << 20)
category = r.FormValue("category")
}
}
ctx := context.WithValue(r.Context(), "category", strings.TrimSpace(category))
next.ServeHTTP(w, r.WithContext(ctx))
})
}
func (q *QBit) authContext(next http.Handler) http.Handler {
return http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) {
host, token, err := decodeAuthHeader(r.Header.Get("Authorization"))
category := r.Context().Value("category").(string)
svc := service.GetService()
// Check if arr exists
a := svc.Arr.Get(category)
if a == nil {
downloadUncached := false
a = arr.New(category, "", "", false, false, &downloadUncached)
}
if err == nil {
host = strings.TrimSpace(host)
if host != "" {
a.Host = host
}
token = strings.TrimSpace(token)
if token != "" {
a.Token = token
}
}
svc.Arr.AddOrUpdate(a)
ctx := context.WithValue(r.Context(), "arr", a)
next.ServeHTTP(w, r.WithContext(ctx))
})
}
func HashesCtx(next http.Handler) http.Handler {
return http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) {
_hashes := chi.URLParam(r, "hashes")
var hashes []string
if _hashes != "" {
hashes = strings.Split(_hashes, "|")
}
if hashes == nil {
// Get hashes from form
_ = r.ParseForm()
hashes = r.Form["hashes"]
}
for i, hash := range hashes {
hashes[i] = strings.TrimSpace(hash)
}
ctx := context.WithValue(r.Context(), "hashes", hashes)
next.ServeHTTP(w, r.WithContext(ctx))
})
}
func (q *QBit) handleLogin(w http.ResponseWriter, r *http.Request) {
ctx := r.Context()
_arr := getArrFromContext(ctx)
_arr := ctx.Value("arr").(*arr.Arr)
if _arr == nil {
// Arr not in context, return OK
// No arr
_, _ = w.Write([]byte("Ok."))
return
}
if err := _arr.Validate(); err != nil {
q.logger.Error().Err(err).Msgf("Error validating arr")
http.Error(w, "Invalid arr configuration", http.StatusBadRequest)
q.logger.Info().Msgf("Error validating arr: %v", err)
}
_, _ = w.Write([]byte("Ok."))
}
@@ -32,7 +122,7 @@ func (q *QBit) handleWebAPIVersion(w http.ResponseWriter, r *http.Request) {
}
func (q *QBit) handlePreferences(w http.ResponseWriter, r *http.Request) {
preferences := getAppPreferences()
preferences := NewAppPreferences()
preferences.WebUiUsername = q.Username
preferences.SavePath = q.DownloadFolder
@@ -60,10 +150,10 @@ func (q *QBit) handleShutdown(w http.ResponseWriter, r *http.Request) {
func (q *QBit) handleTorrentsInfo(w http.ResponseWriter, r *http.Request) {
//log all url params
ctx := r.Context()
category := getCategory(ctx)
category := ctx.Value("category").(string)
filter := strings.Trim(r.URL.Query().Get("filter"), "")
hashes := getHashes(ctx)
torrents := q.storage.GetAllSorted(category, filter, hashes, "added_on", false)
hashes, _ := ctx.Value("hashes").([]string)
torrents := q.Storage.GetAllSorted(category, filter, hashes, "added_on", false)
request.JSONResponse(w, torrents, http.StatusOK)
}
@@ -74,13 +164,13 @@ func (q *QBit) handleTorrentsAdd(w http.ResponseWriter, r *http.Request) {
contentType := r.Header.Get("Content-Type")
if strings.Contains(contentType, "multipart/form-data") {
if err := r.ParseMultipartForm(32 << 20); err != nil {
q.logger.Error().Err(err).Msgf("Error parsing multipart form")
q.logger.Info().Msgf("Error parsing multipart form: %v", err)
http.Error(w, err.Error(), http.StatusBadRequest)
return
}
} else if strings.Contains(contentType, "application/x-www-form-urlencoded") {
if err := r.ParseForm(); err != nil {
q.logger.Error().Err(err).Msgf("Error parsing form")
q.logger.Info().Msgf("Error parsing form: %v", err)
http.Error(w, err.Error(), http.StatusBadRequest)
return
}
@@ -89,18 +179,10 @@ func (q *QBit) handleTorrentsAdd(w http.ResponseWriter, r *http.Request) {
return
}
action := "symlink"
if strings.ToLower(r.FormValue("sequentialDownload")) == "true" {
action = "download"
}
debridName := r.FormValue("debrid")
isSymlink := strings.ToLower(r.FormValue("sequentialDownload")) != "true"
category := r.FormValue("category")
_arr := getArrFromContext(ctx)
if _arr == nil {
// Arr is not in context
_arr = arr.New(category, "", "", false, false, nil, "", "")
}
atleastOne := false
ctx = context.WithValue(ctx, "isSymlink", isSymlink)
// Handle magnet URLs
if urls := r.FormValue("urls"); urls != "" {
@@ -109,8 +191,8 @@ func (q *QBit) handleTorrentsAdd(w http.ResponseWriter, r *http.Request) {
urlList = append(urlList, strings.TrimSpace(u))
}
for _, url := range urlList {
if err := q.addMagnet(ctx, url, _arr, debridName, action); err != nil {
q.logger.Debug().Msgf("Error adding magnet: %s", err.Error())
if err := q.AddMagnet(ctx, url, category); err != nil {
q.logger.Info().Msgf("Error adding magnet: %v", err)
http.Error(w, err.Error(), http.StatusBadRequest)
return
}
@@ -122,8 +204,8 @@ func (q *QBit) handleTorrentsAdd(w http.ResponseWriter, r *http.Request) {
if r.MultipartForm != nil && r.MultipartForm.File != nil {
if files := r.MultipartForm.File["torrents"]; len(files) > 0 {
for _, fileHeader := range files {
if err := q.addTorrent(ctx, fileHeader, _arr, debridName, action); err != nil {
q.logger.Debug().Err(err).Msgf("Error adding torrent")
if err := q.AddTorrent(ctx, fileHeader, category); err != nil {
q.logger.Info().Msgf("Error adding torrent: %v", err)
http.Error(w, err.Error(), http.StatusBadRequest)
return
}
@@ -142,14 +224,14 @@ func (q *QBit) handleTorrentsAdd(w http.ResponseWriter, r *http.Request) {
func (q *QBit) handleTorrentsDelete(w http.ResponseWriter, r *http.Request) {
ctx := r.Context()
hashes := getHashes(ctx)
hashes, _ := ctx.Value("hashes").([]string)
if len(hashes) == 0 {
http.Error(w, "No hashes provided", http.StatusBadRequest)
return
}
category := getCategory(ctx)
category := ctx.Value("category").(string)
for _, hash := range hashes {
q.storage.Delete(hash, category, false)
q.Storage.Delete(hash, category, false)
}
w.WriteHeader(http.StatusOK)
@@ -157,10 +239,10 @@ func (q *QBit) handleTorrentsDelete(w http.ResponseWriter, r *http.Request) {
func (q *QBit) handleTorrentsPause(w http.ResponseWriter, r *http.Request) {
ctx := r.Context()
hashes := getHashes(ctx)
category := getCategory(ctx)
hashes, _ := ctx.Value("hashes").([]string)
category := ctx.Value("category").(string)
for _, hash := range hashes {
torrent := q.storage.Get(hash, category)
torrent := q.Storage.Get(hash, category)
if torrent == nil {
continue
}
@@ -172,10 +254,10 @@ func (q *QBit) handleTorrentsPause(w http.ResponseWriter, r *http.Request) {
func (q *QBit) handleTorrentsResume(w http.ResponseWriter, r *http.Request) {
ctx := r.Context()
hashes := getHashes(ctx)
category := getCategory(ctx)
hashes, _ := ctx.Value("hashes").([]string)
category := ctx.Value("category").(string)
for _, hash := range hashes {
torrent := q.storage.Get(hash, category)
torrent := q.Storage.Get(hash, category)
if torrent == nil {
continue
}
@@ -187,10 +269,10 @@ func (q *QBit) handleTorrentsResume(w http.ResponseWriter, r *http.Request) {
func (q *QBit) handleTorrentRecheck(w http.ResponseWriter, r *http.Request) {
ctx := r.Context()
hashes := getHashes(ctx)
category := getCategory(ctx)
hashes, _ := ctx.Value("hashes").([]string)
category := ctx.Value("category").(string)
for _, hash := range hashes {
torrent := q.storage.Get(hash, category)
torrent := q.Storage.Get(hash, category)
if torrent == nil {
continue
}
@@ -233,7 +315,7 @@ func (q *QBit) handleCreateCategory(w http.ResponseWriter, r *http.Request) {
func (q *QBit) handleTorrentProperties(w http.ResponseWriter, r *http.Request) {
ctx := r.Context()
hash := r.URL.Query().Get("hash")
torrent := q.storage.Get(hash, getCategory(ctx))
torrent := q.Storage.Get(hash, ctx.Value("category").(string))
properties := q.GetTorrentProperties(torrent)
request.JSONResponse(w, properties, http.StatusOK)
@@ -242,21 +324,22 @@ func (q *QBit) handleTorrentProperties(w http.ResponseWriter, r *http.Request) {
func (q *QBit) handleTorrentFiles(w http.ResponseWriter, r *http.Request) {
ctx := r.Context()
hash := r.URL.Query().Get("hash")
torrent := q.storage.Get(hash, getCategory(ctx))
torrent := q.Storage.Get(hash, ctx.Value("category").(string))
if torrent == nil {
return
}
request.JSONResponse(w, torrent.Files, http.StatusOK)
files := q.GetTorrentFiles(torrent)
request.JSONResponse(w, files, http.StatusOK)
}
func (q *QBit) handleSetCategory(w http.ResponseWriter, r *http.Request) {
ctx := r.Context()
category := getCategory(ctx)
hashes := getHashes(ctx)
torrents := q.storage.GetAll("", "", hashes)
category := ctx.Value("category").(string)
hashes, _ := ctx.Value("hashes").([]string)
torrents := q.Storage.GetAll("", "", hashes)
for _, torrent := range torrents {
torrent.Category = category
q.storage.AddOrUpdate(torrent)
q.Storage.AddOrUpdate(torrent)
}
request.JSONResponse(w, nil, http.StatusOK)
}
@@ -268,14 +351,14 @@ func (q *QBit) handleAddTorrentTags(w http.ResponseWriter, r *http.Request) {
return
}
ctx := r.Context()
hashes := getHashes(ctx)
hashes, _ := ctx.Value("hashes").([]string)
tags := strings.Split(r.FormValue("tags"), ",")
for i, tag := range tags {
tags[i] = strings.TrimSpace(tag)
}
torrents := q.storage.GetAll("", "", hashes)
torrents := q.Storage.GetAll("", "", hashes)
for _, t := range torrents {
q.setTorrentTags(t, tags)
q.SetTorrentTags(t, tags)
}
request.JSONResponse(w, nil, http.StatusOK)
}
@@ -287,14 +370,14 @@ func (q *QBit) handleRemoveTorrentTags(w http.ResponseWriter, r *http.Request) {
return
}
ctx := r.Context()
hashes := getHashes(ctx)
hashes, _ := ctx.Value("hashes").([]string)
tags := strings.Split(r.FormValue("tags"), ",")
for i, tag := range tags {
tags[i] = strings.TrimSpace(tag)
}
torrents := q.storage.GetAll("", "", hashes)
torrents := q.Storage.GetAll("", "", hashes)
for _, torrent := range torrents {
q.removeTorrentTags(torrent, tags)
q.RemoveTorrentTags(torrent, tags)
}
request.JSONResponse(w, nil, http.StatusOK)
@@ -314,6 +397,6 @@ func (q *QBit) handleCreateTags(w http.ResponseWriter, r *http.Request) {
for i, tag := range tags {
tags[i] = strings.TrimSpace(tag)
}
q.addTags(tags)
q.AddTags(tags)
request.JSONResponse(w, nil, http.StatusOK)
}

80
pkg/qbit/import.go Normal file
View File

@@ -0,0 +1,80 @@
package qbit
import (
"github.com/sirrobot01/decypharr/internal/utils"
"github.com/sirrobot01/decypharr/pkg/debrid/debrid"
"github.com/sirrobot01/decypharr/pkg/service"
"time"
"github.com/google/uuid"
"github.com/sirrobot01/decypharr/pkg/arr"
)
type ImportRequest struct {
ID string `json:"id"`
Path string `json:"path"`
Magnet *utils.Magnet `json:"magnet"`
Arr *arr.Arr `json:"arr"`
IsSymlink bool `json:"isSymlink"`
SeriesId int `json:"series"`
Seasons []int `json:"seasons"`
Episodes []string `json:"episodes"`
DownloadUncached bool `json:"downloadUncached"`
Failed bool `json:"failed"`
FailedAt time.Time `json:"failedAt"`
Reason string `json:"reason"`
Completed bool `json:"completed"`
CompletedAt time.Time `json:"completedAt"`
Async bool `json:"async"`
}
type ManualImportResponseSchema struct {
Priority string `json:"priority"`
Status string `json:"status"`
Result string `json:"result"`
Queued time.Time `json:"queued"`
Trigger string `json:"trigger"`
SendUpdatesToClient bool `json:"sendUpdatesToClient"`
UpdateScheduledTask bool `json:"updateScheduledTask"`
Id int `json:"id"`
}
func NewImportRequest(magnet *utils.Magnet, arr *arr.Arr, isSymlink, downloadUncached bool) *ImportRequest {
return &ImportRequest{
ID: uuid.NewString(),
Magnet: magnet,
Arr: arr,
Failed: false,
Completed: false,
Async: false,
IsSymlink: isSymlink,
DownloadUncached: downloadUncached,
}
}
func (i *ImportRequest) Fail(reason string) {
i.Failed = true
i.FailedAt = time.Now()
i.Reason = reason
}
func (i *ImportRequest) Complete() {
i.Completed = true
i.CompletedAt = time.Now()
}
func (i *ImportRequest) Process(q *QBit) (err error) {
// Use this for now.
// This sends the torrent to the arr
svc := service.GetService()
torrent := createTorrentFromMagnet(i.Magnet, i.Arr.Name, "manual")
debridTorrent, err := debrid.ProcessTorrent(svc.Debrid, i.Magnet, i.Arr, i.IsSymlink, i.DownloadUncached)
if err != nil {
return err
}
torrent = q.UpdateTorrentMin(torrent, debridTorrent)
q.Storage.AddOrUpdate(torrent)
go q.ProcessFiles(torrent, debridTorrent, i.Arr, i.IsSymlink)
return nil
}

View File

@@ -1,21 +1,18 @@
package store
package qbit
import (
"os"
"path/filepath"
"github.com/sirrobot01/decypharr/internal/utils"
"strings"
)
func createTorrentFromMagnet(req *ImportRequest) *Torrent {
magnet := req.Magnet
arrName := req.Arr.Name
func createTorrentFromMagnet(magnet *utils.Magnet, category, source string) *Torrent {
torrent := &Torrent{
ID: req.Id,
ID: "",
Hash: strings.ToLower(magnet.InfoHash),
Name: magnet.Name,
Size: magnet.Size,
Category: arrName,
Source: string(req.Type),
Category: category,
Source: source,
State: "downloading",
MagnetUri: magnet.Link,
@@ -25,7 +22,6 @@ func createTorrentFromMagnet(req *ImportRequest) *Torrent {
AutoTmm: false,
Ratio: 1,
RatioLimit: 1,
SavePath: filepath.Join(req.DownloadFolder, arrName) + string(os.PathSeparator),
}
return torrent
}

View File

@@ -1,41 +1,52 @@
package qbit
import (
"cmp"
"github.com/rs/zerolog"
"github.com/sirrobot01/decypharr/internal/config"
"github.com/sirrobot01/decypharr/internal/logger"
"github.com/sirrobot01/decypharr/pkg/store"
"os"
"path/filepath"
)
type QBit struct {
Username string
Password string
DownloadFolder string
Categories []string
storage *store.TorrentStorage
logger zerolog.Logger
Tags []string
Username string `json:"username"`
Password string `json:"password"`
Port string `json:"port"`
DownloadFolder string `json:"download_folder"`
Categories []string `json:"categories"`
Storage *TorrentStorage
logger zerolog.Logger
Tags []string
RefreshInterval int
SkipPreCache bool
downloadSemaphore chan struct{}
}
func New() *QBit {
cfg := config.Get()
qbitCfg := cfg.QBitTorrent
if qbitCfg == nil {
return nil
}
_cfg := config.Get()
cfg := _cfg.QBitTorrent
port := cmp.Or(_cfg.Port, os.Getenv("QBIT_PORT"), "8282")
refreshInterval := cmp.Or(cfg.RefreshInterval, 10)
return &QBit{
Username: qbitCfg.Username,
Password: qbitCfg.Password,
DownloadFolder: qbitCfg.DownloadFolder,
Categories: qbitCfg.Categories,
storage: store.Get().Torrents(),
logger: logger.New("qbit"),
Username: cfg.Username,
Password: cfg.Password,
Port: port,
DownloadFolder: cfg.DownloadFolder,
Categories: cfg.Categories,
Storage: NewTorrentStorage(filepath.Join(_cfg.Path, "torrents.json")),
logger: logger.New("qbit"),
RefreshInterval: refreshInterval,
SkipPreCache: cfg.SkipPreCache,
downloadSemaphore: make(chan struct{}, cmp.Or(cfg.MaxDownloads, 5)),
}
}
func (q *QBit) Reset() {
if q.storage != nil {
q.storage.Reset()
if q.Storage != nil {
q.Storage.Reset()
}
q.Tags = nil
close(q.downloadSemaphore)
}

View File

@@ -7,12 +7,12 @@ import (
func (q *QBit) Routes() http.Handler {
r := chi.NewRouter()
r.Use(q.categoryContext)
r.Use(q.CategoryContext)
r.Group(func(r chi.Router) {
r.Use(q.authContext)
r.Post("/auth/login", q.handleLogin)
r.Route("/torrents", func(r chi.Router) {
r.Use(hashesContext)
r.Use(HashesCtx)
r.Get("/info", q.handleTorrentsInfo)
r.Post("/add", q.handleTorrentsAdd)
r.Post("/delete", q.handleTorrentsDelete)

View File

@@ -1,15 +1,18 @@
package store
package qbit
import (
"encoding/json"
"fmt"
"github.com/sirrobot01/decypharr/pkg/service"
"os"
"sort"
"sync"
"time"
)
func keyPair(hash, category string) string {
if category == "" {
category = "uncategorized"
}
return fmt.Sprintf("%s|%s", hash, category)
}
@@ -33,13 +36,13 @@ func loadTorrentsFromJSON(filename string) (Torrents, error) {
return torrents, nil
}
func newTorrentStorage(filename string) *TorrentStorage {
func NewTorrentStorage(filename string) *TorrentStorage {
// Open the JSON file and read the data
torrents, err := loadTorrentsFromJSON(filename)
if err != nil {
torrents = make(Torrents)
}
// Create a new Storage
// Create a new TorrentStorage
return &TorrentStorage{
torrents: torrents,
filename: filename,
@@ -183,18 +186,13 @@ func (ts *TorrentStorage) Delete(hash, category string, removeFromDebrid bool) {
if torrent == nil {
return
}
st := Get()
// Check if torrent is queued for download
if torrent.State == "queued" && torrent.ID != "" {
// Remove the torrent from the import queue if it exists
st.importsQueue.Delete(torrent.ID)
}
if removeFromDebrid && torrent.DebridID != "" && torrent.Debrid != "" {
dbClient := st.debrid.Client(torrent.Debrid)
if removeFromDebrid && torrent.ID != "" && torrent.Debrid != "" {
dbClient := service.GetDebrid().GetClient(torrent.Debrid)
if dbClient != nil {
_ = dbClient.DeleteTorrent(torrent.DebridID)
err := dbClient.DeleteTorrent(torrent.ID)
if err != nil {
fmt.Println(err)
}
}
}
@@ -220,21 +218,14 @@ func (ts *TorrentStorage) DeleteMultiple(hashes []string, removeFromDebrid bool)
defer ts.mu.Unlock()
toDelete := make(map[string]string)
st := Get()
for _, hash := range hashes {
for key, torrent := range ts.torrents {
if torrent == nil {
continue
}
if torrent.State == "queued" && torrent.ID != "" {
// Remove the torrent from the import queue if it exists
st.importsQueue.Delete(torrent.ID)
}
if torrent.Hash == hash {
if removeFromDebrid && torrent.DebridID != "" && torrent.Debrid != "" {
toDelete[torrent.DebridID] = torrent.Debrid
if removeFromDebrid && torrent.ID != "" && torrent.Debrid != "" {
toDelete[torrent.ID] = torrent.Debrid
}
delete(ts.torrents, key)
if torrent.ContentPath != "" {
@@ -253,12 +244,10 @@ func (ts *TorrentStorage) DeleteMultiple(hashes []string, removeFromDebrid bool)
}
}()
clients := st.debrid.Clients()
go func() {
for id, debrid := range toDelete {
dbClient, ok := clients[debrid]
if !ok {
dbClient := service.GetDebrid().GetClient(debrid)
if dbClient == nil {
continue
}
err := dbClient.DeleteTorrent(id)
@@ -289,22 +278,3 @@ func (ts *TorrentStorage) Reset() {
defer ts.mu.Unlock()
ts.torrents = make(Torrents)
}
// GetStalledTorrents returns a list of torrents that are stalled
// A torrent is considered stalled if it has no seeds, no progress, and has been downloading for longer than removeStalledAfter
// The torrent must have a DebridID and be in the "downloading" state
func (ts *TorrentStorage) GetStalledTorrents(removeAfter time.Duration) []*Torrent {
ts.mu.RLock()
defer ts.mu.RUnlock()
stalled := make([]*Torrent, 0)
currentTime := time.Now()
for _, torrent := range ts.torrents {
if torrent.DebridID != "" && torrent.State == "downloading" && torrent.NumSeeds == 0 && torrent.Progress == 0 {
addedOn := time.Unix(torrent.AddedOn, 0)
if currentTime.Sub(addedOn) > removeAfter {
stalled = append(stalled, torrent)
}
}
}
return stalled
}

View File

@@ -1,35 +1,38 @@
package qbit
import (
"cmp"
"context"
"fmt"
"github.com/sirrobot01/decypharr/internal/request"
"github.com/sirrobot01/decypharr/internal/utils"
"github.com/sirrobot01/decypharr/pkg/arr"
"github.com/sirrobot01/decypharr/pkg/store"
"github.com/sirrobot01/decypharr/pkg/debrid/debrid"
debridTypes "github.com/sirrobot01/decypharr/pkg/debrid/types"
"github.com/sirrobot01/decypharr/pkg/service"
"io"
"mime/multipart"
"os"
"path/filepath"
"strings"
"time"
)
// All torrent-related helpers goes here
func (q *QBit) addMagnet(ctx context.Context, url string, arr *arr.Arr, debrid string, action string) error {
// All torrent related helpers goes here
func (q *QBit) AddMagnet(ctx context.Context, url, category string) error {
magnet, err := utils.GetMagnetFromUrl(url)
if err != nil {
return fmt.Errorf("error parsing magnet link: %w", err)
}
_store := store.Get()
importReq := store.NewImportRequest(debrid, q.DownloadFolder, magnet, arr, action, false, "", store.ImportTypeQBitTorrent)
err = _store.AddTorrent(ctx, importReq)
err = q.Process(ctx, magnet, category)
if err != nil {
return fmt.Errorf("failed to process torrent: %w", err)
}
return nil
}
func (q *QBit) addTorrent(ctx context.Context, fileHeader *multipart.FileHeader, arr *arr.Arr, debrid string, action string) error {
func (q *QBit) AddTorrent(ctx context.Context, fileHeader *multipart.FileHeader, category string) error {
file, _ := fileHeader.Open()
defer file.Close()
var reader io.Reader = file
@@ -37,28 +40,226 @@ func (q *QBit) addTorrent(ctx context.Context, fileHeader *multipart.FileHeader,
if err != nil {
return fmt.Errorf("error reading file: %s \n %w", fileHeader.Filename, err)
}
_store := store.Get()
importReq := store.NewImportRequest(debrid, q.DownloadFolder, magnet, arr, action, false, "", store.ImportTypeQBitTorrent)
err = _store.AddTorrent(ctx, importReq)
err = q.Process(ctx, magnet, category)
if err != nil {
return fmt.Errorf("failed to process torrent: %w", err)
}
return nil
}
func (q *QBit) ResumeTorrent(t *store.Torrent) bool {
func (q *QBit) Process(ctx context.Context, magnet *utils.Magnet, category string) error {
svc := service.GetService()
torrent := createTorrentFromMagnet(magnet, category, "auto")
a, ok := ctx.Value("arr").(*arr.Arr)
if !ok {
return fmt.Errorf("arr not found in context")
}
isSymlink := ctx.Value("isSymlink").(bool)
debridTorrent, err := debrid.ProcessTorrent(svc.Debrid, magnet, a, isSymlink, false)
if err != nil || debridTorrent == nil {
if err == nil {
err = fmt.Errorf("failed to process torrent")
}
return err
}
torrent = q.UpdateTorrentMin(torrent, debridTorrent)
q.Storage.AddOrUpdate(torrent)
go q.ProcessFiles(torrent, debridTorrent, a, isSymlink) // We can send async for file processing not to delay the response
return nil
}
func (q *QBit) ProcessFiles(torrent *Torrent, debridTorrent *debridTypes.Torrent, arr *arr.Arr, isSymlink bool) {
svc := service.GetService()
client := svc.Debrid.GetClient(debridTorrent.Debrid)
downloadingStatuses := client.GetDownloadingStatus()
for debridTorrent.Status != "downloaded" {
q.logger.Debug().Msgf("%s <- (%s) Download Progress: %.2f%%", debridTorrent.Debrid, debridTorrent.Name, debridTorrent.Progress)
dbT, err := client.CheckStatus(debridTorrent, isSymlink)
if err != nil {
if dbT != nil && dbT.Id != "" {
// Delete the torrent if it was not downloaded
go func() {
_ = client.DeleteTorrent(dbT.Id)
}()
}
q.logger.Error().Msgf("Error checking status: %v", err)
q.MarkAsFailed(torrent)
go func() {
if err := arr.Refresh(); err != nil {
q.logger.Error().Msgf("Error refreshing arr: %v", err)
}
}()
return
}
debridTorrent = dbT
torrent = q.UpdateTorrentMin(torrent, debridTorrent)
// Exit the loop for downloading statuses to prevent memory buildup
if debridTorrent.Status == "downloaded" || !utils.Contains(downloadingStatuses, debridTorrent.Status) {
break
}
if !utils.Contains(client.GetDownloadingStatus(), debridTorrent.Status) {
break
}
time.Sleep(time.Duration(q.RefreshInterval) * time.Second)
}
var torrentSymlinkPath string
var err error
debridTorrent.Arr = arr
// Check if debrid supports webdav by checking cache
timer := time.Now()
if isSymlink {
cache, useWebdav := svc.Debrid.Caches[debridTorrent.Debrid]
if useWebdav {
q.logger.Info().Msgf("Using internal webdav for %s", debridTorrent.Debrid)
// Use webdav to download the file
if err := cache.AddTorrent(debridTorrent); err != nil {
q.logger.Error().Msgf("Error adding torrent to cache: %v", err)
q.MarkAsFailed(torrent)
return
}
rclonePath := filepath.Join(debridTorrent.MountPath, cache.GetTorrentFolder(debridTorrent)) // /mnt/remote/realdebrid/MyTVShow
torrentFolderNoExt := utils.RemoveExtension(debridTorrent.Name)
torrentSymlinkPath, err = q.createSymlinksWebdav(debridTorrent, rclonePath, torrentFolderNoExt) // /mnt/symlinks/{category}/MyTVShow/
} else {
// User is using either zurg or debrid webdav
torrentSymlinkPath, err = q.ProcessSymlink(torrent) // /mnt/symlinks/{category}/MyTVShow/
}
} else {
torrentSymlinkPath, err = q.ProcessManualFile(torrent)
}
if err != nil {
q.MarkAsFailed(torrent)
go func() {
_ = client.DeleteTorrent(debridTorrent.Id)
}()
q.logger.Info().Msgf("Error: %v", err)
return
}
torrent.TorrentPath = torrentSymlinkPath
q.UpdateTorrent(torrent, debridTorrent)
q.logger.Info().Msgf("Adding %s took %s", debridTorrent.Name, time.Since(timer))
go func() {
if err := request.SendDiscordMessage("download_complete", "success", torrent.discordContext()); err != nil {
q.logger.Error().Msgf("Error sending discord message: %v", err)
}
}()
if err := arr.Refresh(); err != nil {
q.logger.Error().Msgf("Error refreshing arr: %v", err)
}
}
func (q *QBit) MarkAsFailed(t *Torrent) *Torrent {
t.State = "error"
q.Storage.AddOrUpdate(t)
go func() {
if err := request.SendDiscordMessage("download_failed", "error", t.discordContext()); err != nil {
q.logger.Error().Msgf("Error sending discord message: %v", err)
}
}()
return t
}
func (q *QBit) UpdateTorrentMin(t *Torrent, debridTorrent *debridTypes.Torrent) *Torrent {
if debridTorrent == nil {
return t
}
addedOn, err := time.Parse(time.RFC3339, debridTorrent.Added)
if err != nil {
addedOn = time.Now()
}
totalSize := debridTorrent.Bytes
progress := (cmp.Or(debridTorrent.Progress, 0.0)) / 100.0
sizeCompleted := int64(float64(totalSize) * progress)
var speed int64
if debridTorrent.Speed != 0 {
speed = debridTorrent.Speed
}
var eta int
if speed != 0 {
eta = int((totalSize - sizeCompleted) / speed)
}
t.ID = debridTorrent.Id
t.Name = debridTorrent.Name
t.AddedOn = addedOn.Unix()
t.DebridTorrent = debridTorrent
t.Debrid = debridTorrent.Debrid
t.Size = totalSize
t.Completed = sizeCompleted
t.Downloaded = sizeCompleted
t.DownloadedSession = sizeCompleted
t.Uploaded = sizeCompleted
t.UploadedSession = sizeCompleted
t.AmountLeft = totalSize - sizeCompleted
t.Progress = progress
t.Eta = eta
t.Dlspeed = speed
t.Upspeed = speed
t.SavePath = filepath.Join(q.DownloadFolder, t.Category) + string(os.PathSeparator)
t.ContentPath = filepath.Join(t.SavePath, t.Name) + string(os.PathSeparator)
return t
}
func (q *QBit) UpdateTorrent(t *Torrent, debridTorrent *debridTypes.Torrent) *Torrent {
if debridTorrent == nil {
return t
}
if debridClient := service.GetDebrid().GetClient(debridTorrent.Debrid); debridClient != nil {
if debridTorrent.Status != "downloaded" {
_ = debridClient.UpdateTorrent(debridTorrent)
}
}
t = q.UpdateTorrentMin(t, debridTorrent)
t.ContentPath = t.TorrentPath + string(os.PathSeparator)
if t.IsReady() {
t.State = "pausedUP"
q.Storage.Update(t)
return t
}
ticker := time.NewTicker(100 * time.Millisecond)
defer ticker.Stop()
for {
select {
case <-ticker.C:
if t.IsReady() {
t.State = "pausedUP"
q.Storage.Update(t)
return t
}
updatedT := q.UpdateTorrent(t, debridTorrent)
t = updatedT
case <-time.After(10 * time.Minute): // Add a timeout
return t
}
}
}
func (q *QBit) ResumeTorrent(t *Torrent) bool {
return true
}
func (q *QBit) PauseTorrent(t *store.Torrent) bool {
func (q *QBit) PauseTorrent(t *Torrent) bool {
return true
}
func (q *QBit) RefreshTorrent(t *store.Torrent) bool {
func (q *QBit) RefreshTorrent(t *Torrent) bool {
return true
}
func (q *QBit) GetTorrentProperties(t *store.Torrent) *TorrentProperties {
func (q *QBit) GetTorrentProperties(t *Torrent) *TorrentProperties {
return &TorrentProperties{
AdditionDate: t.AddedOn,
Comment: "Debrid Blackhole <https://github.com/sirrobot01/decypharr>",
@@ -83,7 +284,21 @@ func (q *QBit) GetTorrentProperties(t *store.Torrent) *TorrentProperties {
}
}
func (q *QBit) setTorrentTags(t *store.Torrent, tags []string) bool {
func (q *QBit) GetTorrentFiles(t *Torrent) []*TorrentFile {
files := make([]*TorrentFile, 0)
if t.DebridTorrent == nil {
return files
}
for _, file := range t.DebridTorrent.GetFiles() {
files = append(files, &TorrentFile{
Name: file.Path,
Size: file.Size,
})
}
return files
}
func (q *QBit) SetTorrentTags(t *Torrent, tags []string) bool {
torrentTags := strings.Split(t.Tags, ",")
for _, tag := range tags {
if tag == "" {
@@ -97,20 +312,20 @@ func (q *QBit) setTorrentTags(t *store.Torrent, tags []string) bool {
}
}
t.Tags = strings.Join(torrentTags, ",")
q.storage.Update(t)
q.Storage.Update(t)
return true
}
func (q *QBit) removeTorrentTags(t *store.Torrent, tags []string) bool {
func (q *QBit) RemoveTorrentTags(t *Torrent, tags []string) bool {
torrentTags := strings.Split(t.Tags, ",")
newTorrentTags := utils.RemoveItem(torrentTags, tags...)
q.Tags = utils.RemoveItem(q.Tags, tags...)
t.Tags = strings.Join(newTorrentTags, ",")
q.storage.Update(t)
q.Storage.Update(t)
return true
}
func (q *QBit) addTags(tags []string) bool {
func (q *QBit) AddTags(tags []string) bool {
for _, tag := range tags {
if tag == "" {
continue
@@ -121,3 +336,8 @@ func (q *QBit) addTags(tags []string) bool {
}
return true
}
func (q *QBit) RemoveTags(tags []string) bool {
q.Tags = utils.RemoveItem(q.Tags, tags...)
return true
}

View File

@@ -1,5 +1,11 @@
package qbit
import (
"fmt"
"github.com/sirrobot01/decypharr/pkg/debrid/types"
"sync"
)
type BuildInfo struct {
Libtorrent string `json:"libtorrent"`
Bitness int `json:"bitness"`
@@ -166,6 +172,76 @@ type TorrentCategory struct {
SavePath string `json:"savePath"`
}
type Torrent struct {
ID string `json:"id"`
DebridTorrent *types.Torrent `json:"-"`
Debrid string `json:"debrid"`
TorrentPath string `json:"-"`
AddedOn int64 `json:"added_on,omitempty"`
AmountLeft int64 `json:"amount_left"`
AutoTmm bool `json:"auto_tmm"`
Availability float64 `json:"availability,omitempty"`
Category string `json:"category,omitempty"`
Completed int64 `json:"completed"`
CompletionOn int `json:"completion_on,omitempty"`
ContentPath string `json:"content_path"`
DlLimit int `json:"dl_limit"`
Dlspeed int64 `json:"dlspeed"`
Downloaded int64 `json:"downloaded"`
DownloadedSession int64 `json:"downloaded_session"`
Eta int `json:"eta"`
FlPiecePrio bool `json:"f_l_piece_prio,omitempty"`
ForceStart bool `json:"force_start,omitempty"`
Hash string `json:"hash"`
LastActivity int64 `json:"last_activity,omitempty"`
MagnetUri string `json:"magnet_uri,omitempty"`
MaxRatio int `json:"max_ratio,omitempty"`
MaxSeedingTime int `json:"max_seeding_time,omitempty"`
Name string `json:"name,omitempty"`
NumComplete int `json:"num_complete,omitempty"`
NumIncomplete int `json:"num_incomplete,omitempty"`
NumLeechs int `json:"num_leechs,omitempty"`
NumSeeds int `json:"num_seeds,omitempty"`
Priority int `json:"priority,omitempty"`
Progress float64 `json:"progress"`
Ratio int `json:"ratio,omitempty"`
RatioLimit int `json:"ratio_limit,omitempty"`
SavePath string `json:"save_path"`
SeedingTimeLimit int `json:"seeding_time_limit,omitempty"`
SeenComplete int64 `json:"seen_complete,omitempty"`
SeqDl bool `json:"seq_dl"`
Size int64 `json:"size,omitempty"`
State string `json:"state,omitempty"`
SuperSeeding bool `json:"super_seeding"`
Tags string `json:"tags,omitempty"`
TimeActive int `json:"time_active,omitempty"`
TotalSize int64 `json:"total_size,omitempty"`
Tracker string `json:"tracker,omitempty"`
UpLimit int64 `json:"up_limit,omitempty"`
Uploaded int64 `json:"uploaded,omitempty"`
UploadedSession int64 `json:"uploaded_session,omitempty"`
Upspeed int64 `json:"upspeed,omitempty"`
Source string `json:"source,omitempty"`
Mu sync.Mutex `json:"-"`
}
func (t *Torrent) IsReady() bool {
return (t.AmountLeft <= 0 || t.Progress == 1) && t.TorrentPath != ""
}
func (t *Torrent) discordContext() string {
format := `
**Name:** %s
**Arr:** %s
**Hash:** %s
**MagnetURI:** %s
**Debrid:** %s
`
return fmt.Sprintf(format, t.Name, t.Category, t.Hash, t.MagnetUri, t.Debrid)
}
type TorrentProperties struct {
AdditionDate int64 `json:"addition_date,omitempty"`
Comment string `json:"comment,omitempty"`
@@ -202,7 +278,18 @@ type TorrentProperties struct {
UpSpeedAvg int `json:"up_speed_avg,omitempty"`
}
func getAppPreferences() *AppPreferences {
type TorrentFile struct {
Index int `json:"index,omitempty"`
Name string `json:"name,omitempty"`
Size int64 `json:"size,omitempty"`
Progress int `json:"progress,omitempty"`
Priority int `json:"priority,omitempty"`
IsSeed bool `json:"is_seed,omitempty"`
PieceRange []int `json:"piece_range,omitempty"`
Availability float64 `json:"availability,omitempty"`
}
func NewAppPreferences() *AppPreferences {
preferences := &AppPreferences{
AddTrackers: "",
AddTrackersEnabled: false,

View File

@@ -1,686 +0,0 @@
// Source: https://github.com/eliasbenb/RARAR.py
// Note that this code only translates the original Python for RAR3 (not RAR5) support.
package rar
import (
"bytes"
"encoding/binary"
"errors"
"fmt"
"io"
"math/rand"
"net/http"
"strings"
"time"
"unicode/utf8"
)
// Constants from the Python code
var (
// Chunk sizes
DefaultChunkSize = 4096
HttpChunkSize = 32768
MaxSearchSize = 1 << 20 // 1MB
// RAR marker and block types
Rar3Marker = []byte{0x52, 0x61, 0x72, 0x21, 0x1A, 0x07, 0x00}
BlockFile = byte(0x74)
BlockHeader = byte(0x73)
BlockMarker = byte(0x72)
BlockEnd = byte(0x7B)
// Header flags
FlagDirectory = 0xE0
FlagHasHighSize = 0x100
FlagHasUnicodeName = 0x200
FlagHasData = 0x8000
)
// Compression methods
var CompressionMethods = map[byte]string{
0x30: "Store",
0x31: "Fastest",
0x32: "Fast",
0x33: "Normal",
0x34: "Good",
0x35: "Best",
}
// Error definitions
var (
ErrMarkerNotFound = errors.New("RAR marker not found within search limit")
ErrInvalidFormat = errors.New("invalid RAR format")
ErrNetworkError = errors.New("network error")
ErrRangeRequestsNotSupported = errors.New("server does not support range requests")
ErrCompressionNotSupported = errors.New("compression method not supported")
ErrDirectoryExtractNotSupported = errors.New("directory extract not supported")
)
// Name returns the base filename of the file
func (f *File) Name() string {
if i := strings.LastIndexAny(f.Path, "\\/"); i >= 0 {
return f.Path[i+1:]
}
return f.Path
}
func (f *File) ByteRange() *[2]int64 {
return &[2]int64{f.DataOffset, f.DataOffset + f.CompressedSize - 1}
}
func NewHttpFile(url string) (*HttpFile, error) {
client := &http.Client{}
file := &HttpFile{
URL: url,
Position: 0,
Client: client,
MaxRetries: 3,
RetryDelay: time.Second,
}
// Get file size
size, err := file.getFileSize()
if err != nil {
return nil, fmt.Errorf("failed to get file size: %w", err)
}
file.FileSize = size
return file, nil
}
func (f *HttpFile) doWithRetry(operation func() (interface{}, error)) (interface{}, error) {
var lastErr error
for attempt := 0; attempt <= f.MaxRetries; attempt++ {
if attempt > 0 {
// Jitter + exponential backoff delay
delay := f.RetryDelay * time.Duration(1<<uint(attempt-1))
jitter := time.Duration(rand.Int63n(int64(delay / 4)))
time.Sleep(delay + jitter)
}
result, err := operation()
if err == nil {
return result, nil
}
lastErr = err
// Only retry on network errors
if !errors.Is(err, ErrNetworkError) {
return nil, err
}
}
return nil, fmt.Errorf("after %d retries: %w", f.MaxRetries, lastErr)
}
// getFileSize gets the total file size from the server
func (f *HttpFile) getFileSize() (int64, error) {
result, err := f.doWithRetry(func() (interface{}, error) {
resp, err := f.Client.Head(f.URL)
if err != nil {
return int64(0), fmt.Errorf("%w: %v", ErrNetworkError, err)
}
defer resp.Body.Close()
if resp.StatusCode != http.StatusOK {
return int64(0), fmt.Errorf("%w: unexpected status code: %d", ErrNetworkError, resp.StatusCode)
}
contentLength := resp.Header.Get("Content-Length")
if contentLength == "" {
return int64(0), fmt.Errorf("%w: content length not provided", ErrNetworkError)
}
var size int64
_, err = fmt.Sscanf(contentLength, "%d", &size)
if err != nil {
return int64(0), fmt.Errorf("%w: %v", ErrNetworkError, err)
}
return size, nil
})
if err != nil {
return 0, err
}
return result.(int64), nil
}
// ReadAt implements the io.ReaderAt interface
func (f *HttpFile) ReadAt(p []byte, off int64) (n int, err error) {
if len(p) == 0 {
return 0, nil
}
// Ensure we don't read past the end of the file
size := int64(len(p))
if f.FileSize > 0 {
remaining := f.FileSize - off
if remaining <= 0 {
return 0, io.EOF
}
if size > remaining {
size = remaining
p = p[:size]
}
}
result, err := f.doWithRetry(func() (interface{}, error) {
// Create HTTP request with Range header
req, err := http.NewRequest("GET", f.URL, nil)
if err != nil {
return 0, fmt.Errorf("%w: %v", ErrNetworkError, err)
}
end := off + size - 1
req.Header.Set("Range", fmt.Sprintf("bytes=%d-%d", off, end))
// Make the request
resp, err := f.Client.Do(req)
if err != nil {
return 0, fmt.Errorf("%w: %v", ErrNetworkError, err)
}
defer resp.Body.Close()
// Handle response
switch resp.StatusCode {
case http.StatusPartialContent:
// Read the content
bytesRead, err := io.ReadFull(resp.Body, p)
return bytesRead, err
case http.StatusOK:
// Some servers return the full content instead of partial
fullData, err := io.ReadAll(resp.Body)
if err != nil {
return 0, fmt.Errorf("%w: %v", ErrNetworkError, err)
}
if int64(len(fullData)) <= off {
return 0, io.EOF
}
end = off + size
if int64(len(fullData)) < end {
end = int64(len(fullData))
}
copy(p, fullData[off:end])
return int(end - off), nil
case http.StatusRequestedRangeNotSatisfiable:
// We're at EOF
return 0, io.EOF
default:
return 0, fmt.Errorf("%w: unexpected status code: %d", ErrNetworkError, resp.StatusCode)
}
})
if err != nil {
return 0, err
}
return result.(int), nil
}
// NewReader creates a new RAR3 reader
func NewReader(url string) (*Reader, error) {
file, err := NewHttpFile(url)
if err != nil {
return nil, err
}
reader := &Reader{
File: file,
ChunkSize: HttpChunkSize,
Files: make([]*File, 0),
}
// Find RAR marker
marker, err := reader.findMarker()
if err != nil {
return nil, err
}
reader.Marker = marker
pos := reader.Marker + int64(len(Rar3Marker)) // Skip marker block
headerData, err := reader.readBytes(pos, 7)
if err != nil {
return nil, err
}
if len(headerData) < 7 {
return nil, ErrInvalidFormat
}
headType := headerData[2]
headSize := int(binary.LittleEndian.Uint16(headerData[5:7]))
if headType != BlockHeader {
return nil, ErrInvalidFormat
}
// Store the position after the archive header
reader.HeaderEndPos = pos + int64(headSize)
return reader, nil
}
// readBytes reads a range of bytes from the file
func (r *Reader) readBytes(start int64, length int) ([]byte, error) {
if length <= 0 {
return []byte{}, nil
}
data := make([]byte, length)
n, err := r.File.ReadAt(data, start)
if err != nil && err != io.EOF {
return nil, err
}
if n < length {
// Partial read, return what we got
return data[:n], nil
}
return data, nil
}
// findMarker finds the RAR marker in the file
func (r *Reader) findMarker() (int64, error) {
// First try to find marker in the first chunk
firstChunkSize := 8192 // 8KB
chunk, err := r.readBytes(0, firstChunkSize)
if err != nil {
return 0, err
}
markerPos := bytes.Index(chunk, Rar3Marker)
if markerPos != -1 {
return int64(markerPos), nil
}
// If not found, continue searching
position := int64(firstChunkSize - len(Rar3Marker) + 1)
maxSearch := int64(MaxSearchSize)
for position < maxSearch {
chunkSize := min(r.ChunkSize, int(maxSearch-position))
chunk, err := r.readBytes(position, chunkSize)
if err != nil || len(chunk) == 0 {
break
}
markerPos := bytes.Index(chunk, Rar3Marker)
if markerPos != -1 {
return position + int64(markerPos), nil
}
// Move forward by chunk size minus the marker length
position += int64(max(1, len(chunk)-len(Rar3Marker)+1))
}
return 0, ErrMarkerNotFound
}
// decodeUnicode decodes RAR3 Unicode encoding
func decodeUnicode(asciiStr string, unicodeData []byte) string {
if len(unicodeData) == 0 {
return asciiStr
}
result := []rune{}
asciiPos := 0
dataPos := 0
highByte := byte(0)
for dataPos < len(unicodeData) {
flags := unicodeData[dataPos]
dataPos++
// Determine the number of character positions this flag byte controls
var flagBits uint
var flagCount int
var bitCount int
if flags&0x80 != 0 {
// Extended flag - controls up to 32 characters (16 bit pairs)
flagBits = uint(flags)
bitCount = 1
for (flagBits&(0x80>>bitCount) != 0) && dataPos < len(unicodeData) {
flagBits = ((flagBits & ((0x80 >> bitCount) - 1)) << 8) | uint(unicodeData[dataPos])
dataPos++
bitCount++
}
flagCount = bitCount * 4
} else {
// Simple flag - controls 4 characters (4 bit pairs)
flagBits = uint(flags)
flagCount = 4
}
// Process each 2-bit flag
for i := 0; i < flagCount; i++ {
if asciiPos >= len(asciiStr) && dataPos >= len(unicodeData) {
break
}
flagValue := (flagBits >> (i * 2)) & 0x03
switch flagValue {
case 0:
// Use ASCII character
if asciiPos < len(asciiStr) {
result = append(result, rune(asciiStr[asciiPos]))
asciiPos++
}
case 1:
// Unicode character with high byte 0
if dataPos < len(unicodeData) {
result = append(result, rune(unicodeData[dataPos]))
dataPos++
}
case 2:
// Unicode character with current high byte
if dataPos < len(unicodeData) {
lowByte := uint(unicodeData[dataPos])
dataPos++
result = append(result, rune(lowByte|(uint(highByte)<<8)))
}
case 3:
// Set new high byte
if dataPos < len(unicodeData) {
highByte = unicodeData[dataPos]
dataPos++
}
}
}
}
// Append any remaining ASCII characters
for asciiPos < len(asciiStr) {
result = append(result, rune(asciiStr[asciiPos]))
asciiPos++
}
return string(result)
}
// readFiles reads all file entries in the archive
func (r *Reader) readFiles() error {
pos := r.Marker
pos += int64(len(Rar3Marker)) // Skip marker block
// Read archive header
headerData, err := r.readBytes(pos, 7)
if err != nil {
return err
}
if len(headerData) < 7 {
return ErrInvalidFormat
}
headType := headerData[2]
headSize := int(binary.LittleEndian.Uint16(headerData[5:7]))
if headType != BlockHeader {
return ErrInvalidFormat
}
pos += int64(headSize) // Skip archive header
// Track whether we've found the end marker
foundEndMarker := false
// Process file entries
for !foundEndMarker {
headerData, err := r.readBytes(pos, 7)
if err != nil {
// Don't stop on EOF, might be temporary network error
// For definitive errors, return the error
if !errors.Is(err, io.EOF) && !errors.Is(err, ErrNetworkError) {
return fmt.Errorf("error reading block header: %w", err)
}
// If we get EOF or network error, retry a few times
retryCount := 0
maxRetries := 3
retryDelay := time.Second
for retryCount < maxRetries {
time.Sleep(retryDelay * time.Duration(1<<uint(retryCount)))
retryCount++
headerData, err = r.readBytes(pos, 7)
if err == nil && len(headerData) >= 7 {
break // Successfully got data
}
}
if len(headerData) < 7 {
return fmt.Errorf("failed to read block header after retries: %w", err)
}
}
if len(headerData) < 7 {
return fmt.Errorf("incomplete block header at position %d", pos)
}
headType := headerData[2]
headFlags := int(binary.LittleEndian.Uint16(headerData[3:5]))
headSize := int(binary.LittleEndian.Uint16(headerData[5:7]))
if headType == BlockEnd {
// End of archive
foundEndMarker = true
break
}
if headType == BlockFile {
// Get complete header data
completeHeader, err := r.readBytes(pos, headSize)
if err != nil || len(completeHeader) < headSize {
// Retry logic for incomplete headers
retryCount := 0
maxRetries := 3
retryDelay := time.Second
for retryCount < maxRetries && (err != nil || len(completeHeader) < headSize) {
time.Sleep(retryDelay * time.Duration(1<<uint(retryCount)))
retryCount++
completeHeader, err = r.readBytes(pos, headSize)
if err == nil && len(completeHeader) >= headSize {
break // Successfully got data
}
}
if len(completeHeader) < headSize {
return fmt.Errorf("failed to read complete file header after retries: %w", err)
}
}
fileInfo, err := r.parseFileHeader(completeHeader, pos)
if err == nil && fileInfo != nil {
r.Files = append(r.Files, fileInfo)
pos = fileInfo.NextOffset
} else {
pos += int64(headSize)
}
} else {
// Skip non-file block
pos += int64(headSize)
// Skip data if present
if headFlags&FlagHasData != 0 {
// Read data size
sizeData, err := r.readBytes(pos-4, 4)
if err != nil || len(sizeData) < 4 {
// Retry logic for data size read errors
retryCount := 0
maxRetries := 3
retryDelay := time.Second
for retryCount < maxRetries && (err != nil || len(sizeData) < 4) {
time.Sleep(retryDelay * time.Duration(1<<uint(retryCount)))
retryCount++
sizeData, err = r.readBytes(pos-4, 4)
if err == nil && len(sizeData) >= 4 {
break // Successfully got data
}
}
if len(sizeData) < 4 {
return fmt.Errorf("failed to read data size after retries: %w", err)
}
}
dataSize := int64(binary.LittleEndian.Uint32(sizeData))
pos += dataSize
}
}
}
if !foundEndMarker {
return fmt.Errorf("end marker not found in archive")
}
return nil
}
// parseFileHeader parses a file header and returns file info
func (r *Reader) parseFileHeader(headerData []byte, position int64) (*File, error) {
if len(headerData) < 7 {
return nil, fmt.Errorf("header data too short")
}
headType := headerData[2]
headFlags := int(binary.LittleEndian.Uint16(headerData[3:5]))
headSize := int(binary.LittleEndian.Uint16(headerData[5:7]))
if headType != BlockFile {
return nil, fmt.Errorf("not a file block")
}
// Check if we have enough data
if len(headerData) < 32 {
return nil, fmt.Errorf("file header too short")
}
// Parse basic file header fields
packSize := binary.LittleEndian.Uint32(headerData[7:11])
unpackSize := binary.LittleEndian.Uint32(headerData[11:15])
// fileOS := headerData[15]
fileCRC := binary.LittleEndian.Uint32(headerData[16:20])
// fileTime := binary.LittleEndian.Uint32(headerData[20:24])
// unpVer := headerData[24]
method := headerData[25]
nameSize := binary.LittleEndian.Uint16(headerData[26:28])
// fileAttr := binary.LittleEndian.Uint32(headerData[28:32])
// Handle high pack/unp sizes
highPackSize := uint32(0)
highUnpSize := uint32(0)
offset := 32 // Start after basic header fields
if headFlags&FlagHasHighSize != 0 {
if offset+8 <= len(headerData) {
highPackSize = binary.LittleEndian.Uint32(headerData[offset : offset+4])
highUnpSize = binary.LittleEndian.Uint32(headerData[offset+4 : offset+8])
}
offset += 8
}
// Calculate actual sizes
fullPackSize := int64(packSize) + (int64(highPackSize) << 32)
fullUnpSize := int64(unpackSize) + (int64(highUnpSize) << 32)
// Read filename
var fileName string
if offset+int(nameSize) <= len(headerData) {
fileNameBytes := headerData[offset : offset+int(nameSize)]
if headFlags&FlagHasUnicodeName != 0 {
zeroPos := bytes.IndexByte(fileNameBytes, 0)
if zeroPos != -1 {
// Try UTF-8 first
asciiPart := fileNameBytes[:zeroPos]
if utf8.Valid(asciiPart) {
fileName = string(asciiPart)
} else {
// Fall back to custom decoder
asciiStr := string(asciiPart)
unicodePart := fileNameBytes[zeroPos+1:]
fileName = decodeUnicode(asciiStr, unicodePart)
}
} else {
// No null byte
if utf8.Valid(fileNameBytes) {
fileName = string(fileNameBytes)
} else {
fileName = string(fileNameBytes) // Last resort
}
}
} else {
// Non-Unicode filename
if utf8.Valid(fileNameBytes) {
fileName = string(fileNameBytes)
} else {
fileName = string(fileNameBytes) // Fallback
}
}
} else {
fileName = fmt.Sprintf("UnknownFile%d", len(r.Files))
}
isDirectory := (headFlags & FlagDirectory) == FlagDirectory
// Calculate data offsets
dataOffset := position + int64(headSize)
nextOffset := dataOffset
// Only add data size if it's not a directory and has data
if !isDirectory && headFlags&FlagHasData != 0 {
nextOffset += fullPackSize
}
return &File{
Path: fileName,
Size: fullUnpSize,
CompressedSize: fullPackSize,
Method: method,
CRC: fileCRC,
IsDirectory: isDirectory,
DataOffset: dataOffset,
NextOffset: nextOffset,
}, nil
}
// GetFiles returns all files in the archive
func (r *Reader) GetFiles() ([]*File, error) {
if len(r.Files) == 0 {
err := r.readFiles()
if err != nil {
return nil, err
}
}
return r.Files, nil
}
// ExtractFile extracts a file from the archive
func (r *Reader) ExtractFile(file *File) ([]byte, error) {
if file.IsDirectory {
return nil, ErrDirectoryExtractNotSupported
}
// Only support "Store" method
if file.Method != 0x30 { // 0x30 = "Store"
return nil, ErrCompressionNotSupported
}
return r.readBytes(file.DataOffset, int(file.CompressedSize))
}

View File

@@ -1,37 +0,0 @@
package rar
import (
"net/http"
"time"
)
// File represents a file entry in a RAR archive
type File struct {
Path string
Size int64
CompressedSize int64
Method byte
CRC uint32
IsDirectory bool
DataOffset int64
NextOffset int64
}
// Access point for a RAR archive served through HTTP
type HttpFile struct {
URL string
Position int64
Client *http.Client
FileSize int64
MaxRetries int
RetryDelay time.Duration
}
// Reader reads RAR3 format archives
type Reader struct {
File *HttpFile
ChunkSize int
Marker int64
HeaderEndPos int64 // Position after the archive header
Files []*File
}

159
pkg/repair/clean.go Normal file
View File

@@ -0,0 +1,159 @@
package repair
//func (r *Repair) clean(job *Job) error {
// // Create a new error group
// g, ctx := errgroup.WithContext(context.Background())
//
// uniqueItems := make(map[string]string)
// mu := sync.Mutex{}
//
// // Limit concurrent goroutines
// g.SetLimit(10)
//
// for _, a := range job.Arrs {
// a := a // Capture range variable
// g.Go(func() error {
// // Check if context was canceled
// select {
// case <-ctx.Done():
// return ctx.Err()
// default:
// }
//
// items, err := r.cleanArr(job, a, "")
// if err != nil {
// r.logger.Error().Err(err).Msgf("Error cleaning %s", a)
// return err
// }
//
// // Safely append the found items to the shared slice
// if len(items) > 0 {
// mu.Lock()
// for k, v := range items {
// uniqueItems[k] = v
// }
// mu.Unlock()
// }
//
// return nil
// })
// }
//
// if err := g.Wait(); err != nil {
// return err
// }
//
// if len(uniqueItems) == 0 {
// job.CompletedAt = time.Now()
// job.Status = JobCompleted
//
// go func() {
// if err := request.SendDiscordMessage("repair_clean_complete", "success", job.discordContext()); err != nil {
// r.logger.Error().Msgf("Error sending discord message: %v", err)
// }
// }()
//
// return nil
// }
//
// cache := r.deb.Caches["realdebrid"]
// if cache == nil {
// return fmt.Errorf("cache not found")
// }
// torrents := cache.GetTorrents()
//
// dangling := make([]string, 0)
// for _, t := range torrents {
// if _, ok := uniqueItems[t.Name]; !ok {
// dangling = append(dangling, t.Id)
// }
// }
//
// r.logger.Info().Msgf("Found %d delapitated items", len(dangling))
//
// if len(dangling) == 0 {
// job.CompletedAt = time.Now()
// job.Status = JobCompleted
// return nil
// }
//
// client := r.deb.Clients["realdebrid"]
// if client == nil {
// return fmt.Errorf("client not found")
// }
// for _, id := range dangling {
// err := client.DeleteTorrent(id)
// if err != nil {
// return err
// }
// }
//
// return nil
//}
//
//func (r *Repair) cleanArr(j *Job, _arr string, tmdbId string) (map[string]string, error) {
// uniqueItems := make(map[string]string)
// a := r.arrs.Get(_arr)
//
// r.logger.Info().Msgf("Starting repair for %s", a.Name)
// media, err := a.GetMedia(tmdbId)
// if err != nil {
// r.logger.Info().Msgf("Failed to get %s media: %v", a.Name, err)
// return uniqueItems, err
// }
//
// // Create a new error group
// g, ctx := errgroup.WithContext(context.Background())
//
// mu := sync.Mutex{}
//
// // Limit concurrent goroutines
// g.SetLimit(runtime.NumCPU() * 4)
//
// for _, m := range media {
// m := m // Create a new variable scoped to the loop iteration
// g.Go(func() error {
// // Check if context was canceled
// select {
// case <-ctx.Done():
// return ctx.Err()
// default:
// }
//
// u := r.getUniquePaths(m)
// for k, v := range u {
// mu.Lock()
// uniqueItems[k] = v
// mu.Unlock()
// }
// return nil
// })
// }
//
// if err := g.Wait(); err != nil {
// return uniqueItems, err
// }
//
// r.logger.Info().Msgf("Repair completed for %s. %d unique items", a.Name, len(uniqueItems))
// return uniqueItems, nil
//}
//func (r *Repair) getUniquePaths(media arr.Content) map[string]string {
// // Use zurg setup to check file availability with zurg
// // This reduces bandwidth usage significantly
//
// uniqueParents := make(map[string]string)
// files := media.Files
// for _, file := range files {
// target := getSymlinkTarget(file.Path)
// if target != "" {
// file.IsSymlink = true
// dir, f := filepath.Split(target)
// parent := filepath.Base(filepath.Clean(dir))
// // Set target path folder/file.mkv
// file.TargetPath = f
// uniqueParents[parent] = target
// }
// }
// return uniqueParents
//}

View File

@@ -3,8 +3,6 @@ package repair
import (
"fmt"
"github.com/sirrobot01/decypharr/pkg/arr"
"github.com/sirrobot01/decypharr/pkg/debrid/store"
"github.com/sirrobot01/decypharr/pkg/debrid/types"
"os"
"path/filepath"
)
@@ -84,96 +82,3 @@ func collectFiles(media arr.Content) map[string][]arr.ContentFile {
}
return uniqueParents
}
func (r *Repair) checkTorrentFiles(torrentPath string, files []arr.ContentFile, clients map[string]types.Client, caches map[string]*store.Cache) []arr.ContentFile {
brokenFiles := make([]arr.ContentFile, 0)
emptyFiles := make([]arr.ContentFile, 0)
r.logger.Debug().Msgf("Checking %s", torrentPath)
// Get the debrid client
dir := filepath.Dir(torrentPath)
debridName := r.findDebridForPath(dir, clients)
if debridName == "" {
r.logger.Debug().Msgf("No debrid found for %s. Skipping", torrentPath)
return emptyFiles
}
cache, ok := caches[debridName]
if !ok {
r.logger.Debug().Msgf("No cache found for %s. Skipping", debridName)
return emptyFiles
}
tor, ok := r.torrentsMap.Load(debridName)
if !ok {
r.logger.Debug().Msgf("Could not find torrents for %s. Skipping", debridName)
return emptyFiles
}
torrentsMap := tor.(map[string]store.CachedTorrent)
// Check if torrent exists
torrentName := filepath.Clean(filepath.Base(torrentPath))
torrent, ok := torrentsMap[torrentName]
if !ok {
r.logger.Debug().Msgf("Can't find torrent %s in %s. Marking as broken", torrentName, debridName)
// Return all files as broken
return files
}
// Batch check files
filePaths := make([]string, len(files))
for i, file := range files {
filePaths[i] = file.TargetPath
}
brokenFilePaths := cache.GetBrokenFiles(&torrent, filePaths)
if len(brokenFilePaths) > 0 {
r.logger.Debug().Msgf("%d broken files found in %s", len(brokenFilePaths), torrentName)
// Create a set for O(1) lookup
brokenSet := make(map[string]bool, len(brokenFilePaths))
for _, brokenPath := range brokenFilePaths {
brokenSet[brokenPath] = true
}
// Filter broken files
for _, contentFile := range files {
if brokenSet[contentFile.TargetPath] {
brokenFiles = append(brokenFiles, contentFile)
}
}
}
return brokenFiles
}
func (r *Repair) findDebridForPath(dir string, clients map[string]types.Client) string {
// Check cache first
if debridName, exists := r.debridPathCache.Load(dir); exists {
return debridName.(string)
}
// Find debrid client
for _, client := range clients {
mountPath := client.GetMountPath()
if mountPath == "" {
continue
}
if filepath.Clean(mountPath) == filepath.Clean(dir) {
debridName := client.Name()
// Cache the result
r.debridPathCache.Store(dir, debridName)
return debridName
}
}
// Cache empty result to avoid repeated lookups
r.debridPathCache.Store(dir, "")
return ""
}

View File

@@ -3,7 +3,6 @@ package repair
import (
"context"
"encoding/json"
"errors"
"fmt"
"github.com/go-co-op/gocron/v2"
"github.com/google/uuid"
@@ -13,7 +12,7 @@ import (
"github.com/sirrobot01/decypharr/internal/request"
"github.com/sirrobot01/decypharr/internal/utils"
"github.com/sirrobot01/decypharr/pkg/arr"
"github.com/sirrobot01/decypharr/pkg/debrid"
"github.com/sirrobot01/decypharr/pkg/debrid/debrid"
"golang.org/x/sync/errgroup"
"net"
"net/http"
@@ -30,8 +29,9 @@ import (
type Repair struct {
Jobs map[string]*Job
arrs *arr.Storage
deb *debrid.Storage
deb *debrid.Engine
interval string
runOnStart bool
ZurgURL string
IsZurg bool
useWebdav bool
@@ -40,10 +40,7 @@ type Repair struct {
filename string
workers int
scheduler gocron.Scheduler
debridPathCache sync.Map // debridPath:debridName cache.Emptied after each run
torrentsMap sync.Map //debridName: map[string]*store.CacheTorrent. Emptied after each run
ctx context.Context
ctx context.Context
}
type JobStatus string
@@ -54,7 +51,6 @@ const (
JobFailed JobStatus = "failed"
JobCompleted JobStatus = "completed"
JobProcessing JobStatus = "processing"
JobCancelled JobStatus = "cancelled"
)
type Job struct {
@@ -70,12 +66,9 @@ type Job struct {
Recurrent bool `json:"recurrent"`
Error string `json:"error"`
cancelFunc context.CancelFunc
ctx context.Context
}
func New(arrs *arr.Storage, engine *debrid.Storage) *Repair {
func New(arrs *arr.Storage, engine *debrid.Engine) *Repair {
cfg := config.Get()
workers := runtime.NumCPU() * 20
if cfg.Repair.Workers > 0 {
@@ -85,6 +78,7 @@ func New(arrs *arr.Storage, engine *debrid.Storage) *Repair {
arrs: arrs,
logger: logger.New("repair"),
interval: cfg.Repair.Interval,
runOnStart: cfg.Repair.RunOnStart,
ZurgURL: cfg.Repair.ZurgURL,
useWebdav: cfg.Repair.UseWebDav,
autoProcess: cfg.Repair.AutoProcess,
@@ -119,6 +113,15 @@ func (r *Repair) Reset() {
}
func (r *Repair) Start(ctx context.Context) error {
//r.ctx = ctx
if r.runOnStart {
r.logger.Info().Msgf("Running initial repair")
go func() {
if err := r.AddJob([]string{}, []string{}, r.autoProcess, true); err != nil {
r.logger.Error().Err(err).Msg("Error running initial repair")
}
}()
}
r.scheduler, _ = gocron.NewScheduler(gocron.WithLocation(time.Local))
@@ -214,31 +217,10 @@ func (r *Repair) newJob(arrsNames []string, mediaIDs []string) *Job {
}
}
func (r *Repair) initRun(ctx context.Context) {
if r.useWebdav {
// Webdav use is enabled, initialize debrid torrent caches
caches := r.deb.Caches()
if len(caches) == 0 {
return
}
for name, cache := range caches {
r.torrentsMap.Store(name, cache.GetTorrentsName())
}
}
}
// // onComplete is called when the repair job is completed
func (r *Repair) onComplete() {
// Set the cache maps to nil
r.torrentsMap = sync.Map{} // Clear the torrent map
r.debridPathCache = sync.Map{}
}
func (r *Repair) preRunChecks() error {
if r.useWebdav {
caches := r.deb.Caches()
if len(caches) == 0 {
if len(r.deb.Caches) == 0 {
return fmt.Errorf("no caches found")
}
return nil
@@ -272,75 +254,31 @@ func (r *Repair) AddJob(arrsNames []string, mediaIDs []string, autoProcess, recu
job.AutoProcess = autoProcess
job.Recurrent = recurrent
r.reset(job)
job.ctx, job.cancelFunc = context.WithCancel(r.ctx)
r.Jobs[key] = job
go r.saveToFile()
go func() {
if err := r.repair(job); err != nil {
r.logger.Error().Err(err).Msg("Error running repair")
if !errors.Is(job.ctx.Err(), context.Canceled) {
job.FailedAt = time.Now()
job.Error = err.Error()
job.Status = JobFailed
job.CompletedAt = time.Now()
} else {
job.FailedAt = time.Now()
job.Error = err.Error()
job.Status = JobFailed
job.CompletedAt = time.Now()
}
r.logger.Error().Err(err).Msg("Error running repair")
job.FailedAt = time.Now()
job.Error = err.Error()
job.Status = JobFailed
job.CompletedAt = time.Now()
}
r.onComplete() // Clear caches and maps after job completion
}()
return nil
}
func (r *Repair) StopJob(id string) error {
job := r.GetJob(id)
if job == nil {
return fmt.Errorf("job %s not found", id)
}
// Check if job can be stopped
if job.Status != JobStarted && job.Status != JobProcessing {
return fmt.Errorf("job %s cannot be stopped (status: %s)", id, job.Status)
}
// Cancel the job
if job.cancelFunc != nil {
job.cancelFunc()
r.logger.Info().Msgf("Job %s cancellation requested", id)
go func() {
if job.Status == JobStarted || job.Status == JobProcessing {
job.Status = JobCancelled
job.BrokenItems = nil
job.ctx = nil // Clear context to prevent further processing
job.CompletedAt = time.Now()
job.Error = "Job was cancelled by user"
r.saveToFile()
}
}()
return nil
}
return fmt.Errorf("job %s cannot be cancelled", id)
}
func (r *Repair) repair(job *Job) error {
defer r.saveToFile()
if err := r.preRunChecks(); err != nil {
return err
}
// Initialize the run
r.initRun(job.ctx)
// Use a mutex to protect concurrent access to brokenItems
var mu sync.Mutex
brokenItems := map[string][]arr.ContentFile{}
g, ctx := errgroup.WithContext(job.ctx)
g, ctx := errgroup.WithContext(r.ctx)
for _, a := range job.Arrs {
a := a // Capture range variable
@@ -383,14 +321,6 @@ func (r *Repair) repair(job *Job) error {
// Wait for all goroutines to complete and check for errors
if err := g.Wait(); err != nil {
// Check if j0b was canceled
if errors.Is(ctx.Err(), context.Canceled) {
job.Status = JobCancelled
job.CompletedAt = time.Now()
job.Error = "Job was cancelled"
return fmt.Errorf("job cancelled")
}
job.FailedAt = time.Now()
job.Error = err.Error()
job.Status = JobFailed
@@ -437,7 +367,7 @@ func (r *Repair) repair(job *Job) error {
return nil
}
func (r *Repair) repairArr(job *Job, _arr string, tmdbId string) ([]arr.ContentFile, error) {
func (r *Repair) repairArr(j *Job, _arr string, tmdbId string) ([]arr.ContentFile, error) {
brokenItems := make([]arr.ContentFile, 0)
a := r.arrs.Get(_arr)
@@ -454,9 +384,9 @@ func (r *Repair) repairArr(job *Job, _arr string, tmdbId string) ([]arr.ContentF
return brokenItems, nil
}
// Check first media to confirm mounts are accessible
if err := r.checkMountUp(media); err != nil {
r.logger.Error().Err(err).Msgf("Mount check failed for %s", a.Name)
return brokenItems, fmt.Errorf("mount check failed: %w", err)
if !r.isMediaAccessible(media[0]) {
r.logger.Info().Msgf("Skipping repair. Parent directory not accessible for. Check your mounts")
return brokenItems, nil
}
// Mutex for brokenItems
@@ -470,14 +400,14 @@ func (r *Repair) repairArr(job *Job, _arr string, tmdbId string) ([]arr.ContentF
defer wg.Done()
for m := range workerChan {
select {
case <-job.ctx.Done():
case <-r.ctx.Done():
return
default:
}
items := r.getBrokenFiles(job, m)
items := r.getBrokenFiles(m)
if items != nil {
r.logger.Debug().Msgf("Found %d broken files for %s", len(items), m.Title)
if job.AutoProcess {
if j.AutoProcess {
r.logger.Info().Msgf("Auto processing %d broken items for %s", len(items), m.Title)
// Delete broken items
@@ -499,17 +429,16 @@ func (r *Repair) repairArr(job *Job, _arr string, tmdbId string) ([]arr.ContentF
}()
}
go func() {
defer close(workerChan)
for _, m := range media {
select {
case <-job.ctx.Done():
return
case workerChan <- m:
}
for _, m := range media {
select {
case <-r.ctx.Done():
break
default:
workerChan <- m
}
}()
}
close(workerChan)
wg.Wait()
if len(brokenItems) == 0 {
r.logger.Info().Msgf("No broken items found for %s", a.Name)
@@ -520,50 +449,43 @@ func (r *Repair) repairArr(job *Job, _arr string, tmdbId string) ([]arr.ContentF
return brokenItems, nil
}
// checkMountUp checks if the mounts are accessible
func (r *Repair) checkMountUp(media []arr.Content) error {
firstMedia := media[0]
for _, m := range media {
if len(m.Files) > 0 {
firstMedia = m
break
}
}
files := firstMedia.Files
func (r *Repair) isMediaAccessible(m arr.Content) bool {
files := m.Files
if len(files) == 0 {
return fmt.Errorf("no files found in media %s", firstMedia.Title)
return false
}
for _, file := range files {
if _, err := os.Stat(file.Path); os.IsNotExist(err) {
// If the file does not exist, we can't check the symlink target
r.logger.Debug().Msgf("File %s does not exist, skipping repair", file.Path)
return fmt.Errorf("file %s does not exist, skipping repair", file.Path)
}
// Get the symlink target
symlinkPath := getSymlinkTarget(file.Path)
if symlinkPath != "" {
r.logger.Trace().Msgf("Found symlink target for %s: %s", file.Path, symlinkPath)
if _, err := os.Stat(symlinkPath); os.IsNotExist(err) {
r.logger.Debug().Msgf("Symlink target %s does not exist, skipping repair", symlinkPath)
return fmt.Errorf("symlink target %s does not exist for %s. skipping repair", symlinkPath, file.Path)
}
firstFile := files[0]
r.logger.Debug().Msgf("Checking parent directory for %s", firstFile.Path)
//if _, err := os.Stat(firstFile.Path); os.IsNotExist(err) {
// r.logger.Debug().Msgf("Parent directory not accessible for %s", firstFile.Path)
// return false
//}
// Check symlink parent directory
symlinkPath := getSymlinkTarget(firstFile.Path)
r.logger.Debug().Msgf("Checking symlink parent directory for %s", symlinkPath)
if symlinkPath != "" {
parentSymlink := filepath.Dir(filepath.Dir(symlinkPath)) // /mnt/zurg/torrents/movie/movie.mkv -> /mnt/zurg/torrents
if _, err := os.Stat(parentSymlink); os.IsNotExist(err) {
return false
}
}
return nil
return true
}
func (r *Repair) getBrokenFiles(job *Job, media arr.Content) []arr.ContentFile {
func (r *Repair) getBrokenFiles(media arr.Content) []arr.ContentFile {
if r.useWebdav {
return r.getWebdavBrokenFiles(job, media)
return r.getWebdavBrokenFiles(media)
} else if r.IsZurg {
return r.getZurgBrokenFiles(job, media)
return r.getZurgBrokenFiles(media)
} else {
return r.getFileBrokenFiles(job, media)
return r.getFileBrokenFiles(media)
}
}
func (r *Repair) getFileBrokenFiles(job *Job, media arr.Content) []arr.ContentFile {
func (r *Repair) getFileBrokenFiles(media arr.Content) []arr.ContentFile {
// This checks symlink target, try to get read a tiny bit of the file
brokenFiles := make([]arr.ContentFile, 0)
@@ -588,7 +510,7 @@ func (r *Repair) getFileBrokenFiles(job *Job, media arr.Content) []arr.ContentFi
return brokenFiles
}
func (r *Repair) getZurgBrokenFiles(job *Job, media arr.Content) []arr.ContentFile {
func (r *Repair) getZurgBrokenFiles(media arr.Content) []arr.ContentFile {
// Use zurg setup to check file availability with zurg
// This reduces bandwidth usage significantly
@@ -628,17 +550,12 @@ func (r *Repair) getZurgBrokenFiles(job *Job, media arr.Content) []arr.ContentFi
}
if resp.StatusCode < 200 || resp.StatusCode >= 300 {
r.logger.Debug().Msgf("Failed to get download url for %s", fullURL)
if err := resp.Body.Close(); err != nil {
return nil
}
resp.Body.Close()
brokenFiles = append(brokenFiles, file)
continue
}
downloadUrl := resp.Request.URL.String()
if err := resp.Body.Close(); err != nil {
return nil
}
resp.Body.Close()
if downloadUrl != "" {
r.logger.Trace().Msgf("Found download url: %s", downloadUrl)
} else {
@@ -656,16 +573,16 @@ func (r *Repair) getZurgBrokenFiles(job *Job, media arr.Content) []arr.ContentFi
return brokenFiles
}
func (r *Repair) getWebdavBrokenFiles(job *Job, media arr.Content) []arr.ContentFile {
func (r *Repair) getWebdavBrokenFiles(media arr.Content) []arr.ContentFile {
// Use internal webdav setup to check file availability
caches := r.deb.Caches()
caches := r.deb.Caches
if len(caches) == 0 {
r.logger.Info().Msg("No caches found. Can't use webdav")
return nil
}
clients := r.deb.Clients()
clients := r.deb.Clients
if len(clients) == 0 {
r.logger.Info().Msg("No clients found. Can't use webdav")
return nil
@@ -673,18 +590,58 @@ func (r *Repair) getWebdavBrokenFiles(job *Job, media arr.Content) []arr.Content
brokenFiles := make([]arr.ContentFile, 0)
uniqueParents := collectFiles(media)
for torrentPath, files := range uniqueParents {
select {
case <-job.ctx.Done():
return brokenFiles
default:
for torrentPath, f := range uniqueParents {
r.logger.Debug().Msgf("Checking %s", torrentPath)
// Get the debrid first
dir := filepath.Dir(torrentPath)
debridName := ""
for _, client := range clients {
mountPath := client.GetMountPath()
if mountPath == "" {
continue
}
if filepath.Clean(mountPath) == filepath.Clean(dir) {
debridName = client.GetName()
break
}
}
brokenFilesForTorrent := r.checkTorrentFiles(torrentPath, files, clients, caches)
if len(brokenFilesForTorrent) > 0 {
brokenFiles = append(brokenFiles, brokenFilesForTorrent...)
if debridName == "" {
r.logger.Debug().Msgf("No debrid found for %s. Skipping", torrentPath)
continue
}
cache, ok := caches[debridName]
if !ok {
r.logger.Debug().Msgf("No cache found for %s. Skipping", debridName)
continue
}
// Check if torrent exists
torrentName := filepath.Clean(filepath.Base(torrentPath))
torrent := cache.GetTorrentByName(torrentName)
if torrent == nil {
r.logger.Debug().Msgf("No torrent found for %s. Skipping", torrentName)
brokenFiles = append(brokenFiles, f...)
continue
}
files := make([]string, 0)
for _, file := range f {
files = append(files, file.TargetPath)
}
_brokenFiles := cache.GetBrokenFiles(torrent, files)
totalBrokenFiles := len(_brokenFiles)
if totalBrokenFiles > 0 {
r.logger.Debug().Msgf("%d broken files found in %s", totalBrokenFiles, torrentName)
for _, contentFile := range f {
if utils.Contains(_brokenFiles, contentFile.TargetPath) {
brokenFiles = append(brokenFiles, contentFile)
}
}
}
}
if len(brokenFiles) == 0 {
r.logger.Debug().Msgf("No broken files found for %s", media.Title)
return nil
}
r.logger.Debug().Msgf("%d broken files found for %s", len(brokenFiles), media.Title)
@@ -717,6 +674,7 @@ func (r *Repair) ProcessJob(id string) error {
if job == nil {
return fmt.Errorf("job %s not found", id)
}
// All validation checks remain the same
if job.Status != JobPending {
return fmt.Errorf("job %s not pending", id)
}
@@ -738,11 +696,7 @@ func (r *Repair) ProcessJob(id string) error {
return nil
}
if job.ctx == nil || job.ctx.Err() != nil {
job.ctx, job.cancelFunc = context.WithCancel(r.ctx)
}
g, ctx := errgroup.WithContext(job.ctx)
g, ctx := errgroup.WithContext(r.ctx)
g.SetLimit(r.workers)
for arrName, items := range brokenItems {

View File

@@ -1,171 +0,0 @@
package sabnzbd
// ConfigResponse represents configuration response
type ConfigResponse struct {
Config *Config `json:"config"`
}
type ConfigNewzbin struct {
Username string `json:"username"`
BookmarkRate int `json:"bookmark_rate"`
Url string `json:"url"`
Bookmarks int `json:"bookmarks"`
Password string `json:"password"`
Unbookmark int `json:"unbookmark"`
}
// Category represents a SABnzbd category
type Category struct {
Name string `json:"name"`
Order int `json:"order"`
Pp string `json:"pp"`
Script string `json:"script"`
Dir string `json:"dir"`
NewzBin string `json:"newzbin"`
Priority string `json:"priority"`
}
// Server represents a usenet server
type Server struct {
Name string `json:"name"`
Host string `json:"host"`
Port int `json:"port"`
Username string `json:"username"`
Password string `json:"password"`
Connections int `json:"connections"`
Retention int `json:"retention"`
Priority int `json:"priority"`
SSL bool `json:"ssl"`
Optional bool `json:"optional"`
}
type Config struct {
Misc MiscConfig `json:"misc"`
Categories []Category `json:"categories"`
Servers []Server `json:"servers"`
}
type MiscConfig struct {
// Directory Configuration
CompleteDir string `json:"complete_dir"`
DownloadDir string `json:"download_dir"`
AdminDir string `json:"admin_dir"`
NzbBackupDir string `json:"nzb_backup_dir"`
ScriptDir string `json:"script_dir"`
EmailDir string `json:"email_dir"`
WebDir string `json:"web_dir"`
// Processing Options
ParOption string `json:"par_option"`
ParOptionConvert string `json:"par_option_convert"`
ParOptionDuplicate string `json:"par_option_duplicate"`
DirectUnpack string `json:"direct_unpack"`
FlatUnpack string `json:"flat_unpack"`
EnableRecursiveUnpack string `json:"enable_recursive_unpack"`
OverwriteFiles string `json:"overwrite_files"`
IgnoreWrongUnrar string `json:"ignore_wrong_unrar"`
IgnoreUnrarDates string `json:"ignore_unrar_dates"`
PreCheck string `json:"pre_check"`
// File Handling
Permissions string `json:"permissions"`
FolderRename string `json:"folder_rename"`
FileRename string `json:"file_rename"`
ReplaceIllegal string `json:"replace_illegal"`
ReplaceDots string `json:"replace_dots"`
ReplaceSpaces string `json:"replace_spaces"`
SanitizeSafe string `json:"sanitize_safe"`
IgnoreSamples string `json:"ignore_samples"`
UnwantedExtensions []string `json:"unwanted_extensions"`
ActionOnUnwanted string `json:"action_on_unwanted"`
ActionOnDuplicate string `json:"action_on_duplicate"`
BackupForDuplicates string `json:"backup_for_duplicates"`
CleanupList []string `json:"cleanup_list"`
DeobfuscateFinalFilenames string `json:"deobfuscate_final_filenames"`
// Scripts and Processing
PreScript string `json:"pre_script"`
PostScript string `json:"post_script"`
EmptyPostproc string `json:"empty_postproc"`
PauseOnPostProcessing string `json:"pause_on_post_processing"`
// System Resources
Nice string `json:"nice"`
NiceUnpack string `json:"nice_unpack"`
Ionice string `json:"ionice"`
Fsync string `json:"fsync"`
// Bandwidth and Performance
BandwidthMax string `json:"bandwidth_max"`
BandwidthPerc string `json:"bandwidth_perc"`
RefreshRate string `json:"refresh_rate"`
DirscanSpeed string `json:"dirscan_speed"`
FolderMaxLength string `json:"folder_max_length"`
PropagationDelay string `json:"propagation_delay"`
// Storage Management
DownloadFree string `json:"download_free"`
CompleteFree string `json:"complete_free"`
// Queue Management
QueueComplete string `json:"queue_complete"`
QueueCompletePers string `json:"queue_complete_pers"`
AutoSort string `json:"auto_sort"`
NewNzbOnFailure string `json:"new_nzb_on_failure"`
PauseOnPwrar string `json:"pause_on_pwrar"`
WarnedOldQueue string `json:"warned_old_queue"`
// Web Interface
WebHost string `json:"web_host"`
WebPort string `json:"web_port"`
WebUsername string `json:"web_username"`
WebPassword string `json:"web_password"`
WebColor string `json:"web_color"`
WebColor2 string `json:"web_color2"`
AutoBrowser string `json:"auto_browser"`
Autobrowser string `json:"autobrowser"` // Duplicate field - may need to resolve
// HTTPS Configuration
EnableHTTPS string `json:"enable_https"`
EnableHTTPSVerification string `json:"enable_https_verification"`
HTTPSPort string `json:"https_port"`
HTTPSCert string `json:"https_cert"`
HTTPSKey string `json:"https_key"`
HTTPSChain string `json:"https_chain"`
// Security and API
APIKey string `json:"api_key"`
NzbKey string `json:"nzb_key"`
HostWhitelist string `json:"host_whitelist"`
LocalRanges []string `json:"local_ranges"`
InetExposure string `json:"inet_exposure"`
APILogging string `json:"api_logging"`
APIWarnings string `json:"api_warnings"`
// Logging
LogLevel string `json:"log_level"`
LogSize string `json:"log_size"`
MaxLogSize string `json:"max_log_size"`
LogBackups string `json:"log_backups"`
LogNew string `json:"log_new"`
// Notifications
MatrixUsername string `json:"matrix_username"`
MatrixPassword string `json:"matrix_password"`
MatrixServer string `json:"matrix_server"`
MatrixRoom string `json:"matrix_room"`
// Miscellaneous
ConfigLock string `json:"config_lock"`
Language string `json:"language"`
CheckNewRel string `json:"check_new_rel"`
RSSFilenames string `json:"rss_filenames"`
IPv6Hosting string `json:"ipv6_hosting"`
EnableBonjour string `json:"enable_bonjour"`
Cherryhost string `json:"cherryhost"`
WinMenu string `json:"win_menu"`
AMPM string `json:"ampm"`
NotifiedNewSkin string `json:"notified_new_skin"`
HelpURI string `json:"helpuri"`
SSDURI string `json:"ssduri"`
}

View File

@@ -1,121 +0,0 @@
package sabnzbd
import (
"context"
"github.com/sirrobot01/decypharr/internal/utils"
"github.com/sirrobot01/decypharr/pkg/store"
"net/http"
"strings"
"github.com/sirrobot01/decypharr/pkg/arr"
)
type contextKey string
const (
apiKeyKey contextKey = "apikey"
modeKey contextKey = "mode"
arrKey contextKey = "arr"
categoryKey contextKey = "category"
)
func getMode(ctx context.Context) string {
if mode, ok := ctx.Value(modeKey).(string); ok {
return mode
}
return ""
}
func (s *SABnzbd) categoryContext(next http.Handler) http.Handler {
return http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) {
category := r.URL.Query().Get("category")
if category == "" {
// Check form data
_ = r.ParseForm()
category = r.Form.Get("category")
}
if category == "" {
category = r.FormValue("category")
}
ctx := context.WithValue(r.Context(), categoryKey, strings.TrimSpace(category))
next.ServeHTTP(w, r.WithContext(ctx))
})
}
func getArrFromContext(ctx context.Context) *arr.Arr {
if a, ok := ctx.Value(arrKey).(*arr.Arr); ok {
return a
}
return nil
}
func getCategory(ctx context.Context) string {
if category, ok := ctx.Value(categoryKey).(string); ok {
return category
}
return ""
}
// modeContext extracts the mode parameter from the request
func (s *SABnzbd) modeContext(next http.Handler) http.Handler {
return http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) {
mode := r.URL.Query().Get("mode")
if mode == "" {
// Check form data
_ = r.ParseForm()
mode = r.Form.Get("mode")
}
// Extract category for Arr integration
category := r.URL.Query().Get("cat")
if category == "" {
category = r.Form.Get("cat")
}
// Create a default Arr instance for the category
downloadUncached := false
a := arr.New(category, "", "", false, false, &downloadUncached, "", "auto")
ctx := context.WithValue(r.Context(), modeKey, strings.TrimSpace(mode))
ctx = context.WithValue(ctx, arrKey, a)
next.ServeHTTP(w, r.WithContext(ctx))
})
}
// authContext creates a middleware that extracts the Arr host and token from the Authorization header
// and adds it to the request context.
// This is used to identify the Arr instance for the request.
// Only a valid host and token will be added to the context/config. The rest are manual
func (s *SABnzbd) authContext(next http.Handler) http.Handler {
return http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) {
host := r.FormValue("ma_username")
token := r.FormValue("ma_password")
category := getCategory(r.Context())
arrs := store.Get().Arr()
// Check if arr exists
a := arrs.Get(category)
if a == nil {
// Arr is not configured, create a new one
downloadUncached := false
a = arr.New(category, "", "", false, false, &downloadUncached, "", "auto")
}
host = strings.TrimSpace(host)
if host != "" {
a.Host = host
}
token = strings.TrimSpace(token)
if token != "" {
a.Token = token
}
a.Source = "auto"
if err := utils.ValidateServiceURL(a.Host); err != nil {
// Return silently, no need to raise a problem. Just do not add the Arr to the context/config.json
next.ServeHTTP(w, r)
return
}
arrs.AddOrUpdate(a)
ctx := context.WithValue(r.Context(), arrKey, a)
next.ServeHTTP(w, r.WithContext(ctx))
})
}

View File

@@ -1,476 +0,0 @@
package sabnzbd
import (
"context"
"fmt"
"github.com/sirrobot01/decypharr/internal/request"
"github.com/sirrobot01/decypharr/internal/utils"
"github.com/sirrobot01/decypharr/pkg/arr"
"github.com/sirrobot01/decypharr/pkg/usenet"
"io"
"net/http"
"strconv"
"strings"
"time"
)
// handleAPI is the main handler for all SABnzbd API requests
func (s *SABnzbd) handleAPI(w http.ResponseWriter, r *http.Request) {
ctx := r.Context()
mode := getMode(ctx)
switch mode {
case ModeQueue:
s.handleQueue(w, r)
case ModeHistory:
s.handleHistory(w, r)
case ModeConfig:
s.handleConfig(w, r)
case ModeStatus, ModeFullStatus:
s.handleStatus(w, r)
case ModeGetConfig:
s.handleConfig(w, r)
case ModeAddURL:
s.handleAddURL(w, r)
case ModeAddFile:
s.handleAddFile(w, r)
case ModeVersion:
s.handleVersion(w, r)
case ModeGetCats:
s.handleGetCategories(w, r)
case ModeGetScripts:
s.handleGetScripts(w, r)
case ModeGetFiles:
s.handleGetFiles(w, r)
default:
// Default to queue if no mode specified
s.logger.Warn().Str("mode", mode).Msg("Unknown API mode, returning 404")
http.Error(w, "Not Found", http.StatusNotFound)
}
}
func (s *SABnzbd) handleQueue(w http.ResponseWriter, r *http.Request) {
name := r.FormValue("name")
if name == "" {
s.handleListQueue(w, r)
return
}
name = strings.ToLower(strings.TrimSpace(name))
switch name {
case "delete":
s.handleQueueDelete(w, r)
case "pause":
s.handleQueuePause(w, r)
case "resume":
s.handleQueueResume(w, r)
}
}
// handleResume handles resume operations
func (s *SABnzbd) handleQueueResume(w http.ResponseWriter, r *http.Request) {
response := StatusResponse{Status: true}
request.JSONResponse(w, response, http.StatusOK)
}
// handleDelete handles delete operations
func (s *SABnzbd) handleQueueDelete(w http.ResponseWriter, r *http.Request) {
nzoIDs := r.FormValue("value")
if nzoIDs == "" {
s.writeError(w, "No NZB IDs provided", http.StatusBadRequest)
return
}
var successCount int
var errors []string
for _, nzoID := range strings.Split(nzoIDs, ",") {
nzoID = strings.TrimSpace(nzoID)
if nzoID == "" {
continue // Skip empty IDs
}
s.logger.Info().Str("nzo_id", nzoID).Msg("Deleting NZB")
// Use atomic delete operation
if err := s.usenet.Store().AtomicDelete(nzoID); err != nil {
s.logger.Error().
Err(err).
Str("nzo_id", nzoID).
Msg("Failed to delete NZB")
errors = append(errors, fmt.Sprintf("Failed to delete %s: %v", nzoID, err))
} else {
successCount++
}
}
// Return response with success/error information
if len(errors) > 0 {
if successCount == 0 {
// All deletions failed
s.writeError(w, fmt.Sprintf("All deletions failed: %s", strings.Join(errors, "; ")), http.StatusInternalServerError)
return
} else {
// Partial success
s.logger.Warn().
Int("success_count", successCount).
Int("error_count", len(errors)).
Strs("errors", errors).
Msg("Partial success in queue deletion")
}
}
response := StatusResponse{
Status: true,
Error: "", // Could add error details here if needed
}
request.JSONResponse(w, response, http.StatusOK)
}
// handlePause handles pause operations
func (s *SABnzbd) handleQueuePause(w http.ResponseWriter, r *http.Request) {
response := StatusResponse{Status: true}
request.JSONResponse(w, response, http.StatusOK)
}
// handleQueue returns the current download queue
func (s *SABnzbd) handleListQueue(w http.ResponseWriter, r *http.Request) {
nzbs := s.usenet.Store().GetQueue()
queue := Queue{
Version: Version,
Slots: []QueueSlot{},
}
// Convert NZBs to queue slots
for _, nzb := range nzbs {
if nzb.ETA <= 0 {
nzb.ETA = 0 // Ensure ETA is non-negative
}
var timeLeft string
if nzb.ETA == 0 {
timeLeft = "00:00:00" // If ETA is 0, set TimeLeft to "00:00:00"
} else {
// Convert ETA from seconds to "HH:MM:SS" format
duration := time.Duration(nzb.ETA) * time.Second
timeLeft = duration.String()
}
slot := QueueSlot{
Status: s.mapNZBStatus(nzb.Status),
Mb: nzb.TotalSize,
Filename: nzb.Name,
Cat: nzb.Category,
MBLeft: 0,
Percentage: nzb.Percentage,
NzoId: nzb.ID,
Size: nzb.TotalSize,
TimeLeft: timeLeft, // This is in "00:00:00" format
}
queue.Slots = append(queue.Slots, slot)
}
response := QueueResponse{
Queue: queue,
Status: true,
Version: Version,
}
request.JSONResponse(w, response, http.StatusOK)
}
// handleHistory returns the download history
func (s *SABnzbd) handleHistory(w http.ResponseWriter, r *http.Request) {
limitStr := r.FormValue("limit")
if limitStr == "" {
limitStr = "0"
}
limit, err := strconv.Atoi(limitStr)
if err != nil {
s.logger.Error().Err(err).Msg("Invalid limit parameter for history")
s.writeError(w, "Invalid limit parameter", http.StatusBadRequest)
return
}
if limit < 0 {
limit = 0
}
history := s.getHistory(r.Context(), limit)
response := HistoryResponse{
History: history,
}
request.JSONResponse(w, response, http.StatusOK)
}
// handleConfig returns the configuration
func (s *SABnzbd) handleConfig(w http.ResponseWriter, r *http.Request) {
response := ConfigResponse{
Config: s.config,
}
request.JSONResponse(w, response, http.StatusOK)
}
// handleAddURL handles adding NZB by URL
func (s *SABnzbd) handleAddURL(w http.ResponseWriter, r *http.Request) {
ctx := r.Context()
_arr := getArrFromContext(ctx)
cat := getCategory(ctx)
if _arr == nil {
// If Arr is not in context, create a new one with default values
_arr = arr.New(cat, "", "", false, false, nil, "", "")
}
if r.Method != http.MethodPost {
s.logger.Warn().Str("method", r.Method).Msg("Invalid method")
s.writeError(w, "Method not allowed", http.StatusMethodNotAllowed)
return
}
url := r.FormValue("name")
action := r.FormValue("action")
downloadDir := r.FormValue("download_dir")
if action == "" {
action = "symlink"
}
if downloadDir == "" {
downloadDir = s.config.Misc.DownloadDir
}
if url == "" {
s.writeError(w, "URL is required", http.StatusBadRequest)
return
}
nzoID, err := s.addNZBURL(ctx, url, _arr, action, downloadDir)
if err != nil {
s.writeError(w, err.Error(), http.StatusInternalServerError)
return
}
if nzoID == "" {
s.writeError(w, "Failed to add NZB", http.StatusInternalServerError)
return
}
response := AddNZBResponse{
Status: true,
NzoIds: []string{nzoID},
}
request.JSONResponse(w, response, http.StatusOK)
}
// handleAddFile handles NZB file uploads
func (s *SABnzbd) handleAddFile(w http.ResponseWriter, r *http.Request) {
ctx := r.Context()
_arr := getArrFromContext(ctx)
cat := getCategory(ctx)
if _arr == nil {
// If Arr is not in context, create a new one with default values
_arr = arr.New(cat, "", "", false, false, nil, "", "")
}
if r.Method != http.MethodPost {
s.writeError(w, "Method not allowed", http.StatusMethodNotAllowed)
return
}
// Parse multipart form
err := r.ParseMultipartForm(32 << 20) // 32 MB limit
if err != nil {
s.writeError(w, "Failed to parse multipart form", http.StatusBadRequest)
return
}
file, header, err := r.FormFile("name")
if err != nil {
s.writeError(w, "No file uploaded", http.StatusBadRequest)
return
}
defer file.Close()
// Read file content
content, err := io.ReadAll(file)
if err != nil {
s.writeError(w, "Failed to read file", http.StatusInternalServerError)
return
}
action := r.FormValue("action")
downloadDir := r.FormValue("download_dir")
if action == "" {
action = "symlink"
}
if downloadDir == "" {
downloadDir = s.config.Misc.DownloadDir
}
// Process NZB file
nzbID, err := s.addNZBFile(ctx, content, header.Filename, _arr, action, downloadDir)
if err != nil {
s.writeError(w, fmt.Sprintf("Failed to add NZB file: %s", err.Error()), http.StatusInternalServerError)
return
}
if nzbID == "" {
s.writeError(w, "Failed to add NZB file", http.StatusInternalServerError)
return
}
response := AddNZBResponse{
Status: true,
NzoIds: []string{nzbID},
}
request.JSONResponse(w, response, http.StatusOK)
}
// handleVersion returns version information
func (s *SABnzbd) handleVersion(w http.ResponseWriter, r *http.Request) {
response := VersionResponse{
Version: Version,
}
request.JSONResponse(w, response, http.StatusOK)
}
// handleGetCategories returns available categories
func (s *SABnzbd) handleGetCategories(w http.ResponseWriter, r *http.Request) {
categories := s.getCategories()
request.JSONResponse(w, categories, http.StatusOK)
}
// handleGetScripts returns available scripts
func (s *SABnzbd) handleGetScripts(w http.ResponseWriter, r *http.Request) {
scripts := []string{"None"}
request.JSONResponse(w, scripts, http.StatusOK)
}
// handleGetFiles returns files for a specific NZB
func (s *SABnzbd) handleGetFiles(w http.ResponseWriter, r *http.Request) {
nzoID := r.FormValue("value")
var files []string
if nzoID != "" {
nzb := s.usenet.Store().Get(nzoID)
if nzb != nil {
for _, file := range nzb.Files {
files = append(files, file.Name)
}
}
}
request.JSONResponse(w, files, http.StatusOK)
}
func (s *SABnzbd) handleStatus(w http.ResponseWriter, r *http.Request) {
type status struct {
CompletedDir string `json:"completed_dir"`
}
response := struct {
Status status `json:"status"`
}{
Status: status{
CompletedDir: s.config.Misc.DownloadDir,
},
}
request.JSONResponse(w, response, http.StatusOK)
}
// Helper methods
func (s *SABnzbd) getHistory(ctx context.Context, limit int) History {
cat := getCategory(ctx)
items := s.usenet.Store().GetHistory(cat, limit)
slots := make([]HistorySlot, 0, len(items))
history := History{
Version: Version,
Paused: false,
}
for _, item := range items {
slot := HistorySlot{
Status: s.mapNZBStatus(item.Status),
Name: item.Name,
NZBName: item.Name,
NzoId: item.ID,
Category: item.Category,
FailMessage: item.FailMessage,
Bytes: item.TotalSize,
Storage: item.Storage,
}
slots = append(slots, slot)
}
history.Slots = slots
return history
}
func (s *SABnzbd) writeError(w http.ResponseWriter, message string, status int) {
response := StatusResponse{
Status: false,
Error: message,
}
request.JSONResponse(w, response, status)
}
func (s *SABnzbd) mapNZBStatus(status string) string {
switch status {
case "downloading":
return StatusDownloading
case "completed":
return StatusCompleted
case "paused":
return StatusPaused
case "error", "failed":
return StatusFailed
case "processing":
return StatusProcessing
case "verifying":
return StatusVerifying
case "repairing":
return StatusRepairing
case "extracting":
return StatusExtracting
case "moving":
return StatusMoving
case "running":
return StatusRunning
default:
return StatusQueued
}
}
func (s *SABnzbd) addNZBURL(ctx context.Context, url string, arr *arr.Arr, action, downloadDir string) (string, error) {
if url == "" {
return "", fmt.Errorf("URL is required")
}
// Download NZB content
filename, content, err := utils.DownloadFile(url)
if err != nil {
s.logger.Error().Err(err).Str("url", url).Msg("Failed to download NZB from URL")
return "", fmt.Errorf("failed to download NZB from URL: %w", err)
}
if len(content) == 0 {
s.logger.Warn().Str("url", url).Msg("Downloaded content is empty")
return "", fmt.Errorf("downloaded content is empty")
}
return s.addNZBFile(ctx, content, filename, arr, action, downloadDir)
}
func (s *SABnzbd) addNZBFile(ctx context.Context, content []byte, filename string, arr *arr.Arr, action, downloadDir string) (string, error) {
if s.usenet == nil {
return "", fmt.Errorf("store not initialized")
}
req := &usenet.ProcessRequest{
NZBContent: content,
Name: filename,
Arr: arr,
Action: action,
DownloadDir: downloadDir,
}
nzb, err := s.usenet.ProcessNZB(ctx, req)
if err != nil {
return "", fmt.Errorf("failed to process NZB: %w", err)
}
return nzb.ID, nil
}

View File

@@ -1,24 +0,0 @@
package sabnzbd
import (
"net/http"
"github.com/go-chi/chi/v5"
)
func (s *SABnzbd) Routes() http.Handler {
r := chi.NewRouter()
r.Use(s.categoryContext)
r.Use(s.authContext)
// SABnzbd API endpoints - all under /api with mode parameter
r.Route("/api", func(r chi.Router) {
r.Use(s.modeContext)
// Queue operations
r.Get("/", s.handleAPI)
r.Post("/", s.handleAPI)
})
return r
}

View File

@@ -1,116 +0,0 @@
package sabnzbd
import (
"github.com/rs/zerolog"
"github.com/sirrobot01/decypharr/internal/config"
"github.com/sirrobot01/decypharr/internal/logger"
"github.com/sirrobot01/decypharr/pkg/store"
"github.com/sirrobot01/decypharr/pkg/usenet"
"path/filepath"
)
type SABnzbd struct {
downloadFolder string
config *Config
refreshInterval int
logger zerolog.Logger
usenet usenet.Usenet
defaultCategories []string
}
func New(usenetClient usenet.Usenet) *SABnzbd {
_cfg := config.Get()
cfg := _cfg.SABnzbd
var defaultCategories []string
for _, cat := range _cfg.SABnzbd.Categories {
if cat != "" {
defaultCategories = append(defaultCategories, cat)
}
}
sb := &SABnzbd{
downloadFolder: cfg.DownloadFolder,
refreshInterval: cfg.RefreshInterval,
logger: logger.New("sabnzbd"),
usenet: usenetClient,
defaultCategories: defaultCategories,
}
sb.SetConfig(_cfg)
return sb
}
func (s *SABnzbd) SetConfig(cfg *config.Config) {
sabnzbdConfig := &Config{
Misc: MiscConfig{
CompleteDir: s.downloadFolder,
DownloadDir: s.downloadFolder,
AdminDir: s.downloadFolder,
WebPort: cfg.Port,
Language: "en",
RefreshRate: "1",
QueueComplete: "0",
ConfigLock: "0",
Autobrowser: "1",
CheckNewRel: "1",
},
Categories: s.getCategories(),
}
if cfg.Usenet != nil || len(cfg.Usenet.Providers) == 0 {
for _, provider := range cfg.Usenet.Providers {
if provider.Host == "" || provider.Port == 0 {
continue
}
sabnzbdConfig.Servers = append(sabnzbdConfig.Servers, Server{
Name: provider.Name,
Host: provider.Host,
Port: provider.Port,
Username: provider.Username,
Password: provider.Password,
Connections: provider.Connections,
SSL: provider.SSL,
})
}
}
s.config = sabnzbdConfig
}
func (s *SABnzbd) getCategories() []Category {
_store := store.Get()
arrs := _store.Arr().GetAll()
categories := make([]Category, 0, len(arrs))
added := map[string]struct{}{}
for i, a := range arrs {
if _, ok := added[a.Name]; ok {
continue // Skip if category already added
}
categories = append(categories, Category{
Name: a.Name,
Order: i + 1,
Pp: "3",
Script: "None",
Dir: filepath.Join(s.downloadFolder, a.Name),
Priority: PriorityNormal,
})
}
// Add default categories if not already present
for _, defaultCat := range s.defaultCategories {
if _, ok := added[defaultCat]; ok {
continue // Skip if default category already added
}
categories = append(categories, Category{
Name: defaultCat,
Order: len(categories) + 1,
Pp: "3",
Script: "None",
Dir: filepath.Join(s.downloadFolder, defaultCat),
Priority: PriorityNormal,
})
added[defaultCat] = struct{}{}
}
return categories
}
func (s *SABnzbd) Reset() {
}

View File

@@ -1,150 +0,0 @@
package sabnzbd
// SABnzbd API response types based on official documentation
var (
Version = "4.5.0"
)
// QueueResponse represents the queue status response
type QueueResponse struct {
Queue Queue `json:"queue"`
Status bool `json:"status"`
Version string `json:"version"`
}
// Queue represents the download queue
type Queue struct {
Version string `json:"version"`
Slots []QueueSlot `json:"slots"`
}
// QueueSlot represents a download in the queue
type QueueSlot struct {
Status string `json:"status"`
TimeLeft string `json:"timeleft"`
Mb int64 `json:"mb"`
Filename string `json:"filename"`
Priority string `json:"priority"`
Cat string `json:"cat"`
MBLeft int64 `json:"mbleft"`
Percentage float64 `json:"percentage"`
NzoId string `json:"nzo_id"`
Size int64 `json:"size"`
}
// HistoryResponse represents the history response
type HistoryResponse struct {
History History `json:"history"`
}
// History represents the download history
type History struct {
Version string `json:"version"`
Paused bool `json:"paused"`
Slots []HistorySlot `json:"slots"`
}
// HistorySlot represents a completed download
type HistorySlot struct {
Status string `json:"status"`
Name string `json:"name"`
NZBName string `json:"nzb_name"`
NzoId string `json:"nzo_id"`
Category string `json:"category"`
FailMessage string `json:"fail_message"`
Bytes int64 `json:"bytes"`
Storage string `json:"storage"`
}
// StageLog represents processing stages
type StageLog struct {
Name string `json:"name"`
Actions []string `json:"actions"`
}
// VersionResponse represents version information
type VersionResponse struct {
Version string `json:"version"`
}
// StatusResponse represents general status
type StatusResponse struct {
Status bool `json:"status"`
Error string `json:"error,omitempty"`
}
// FullStatusResponse represents the full status response with queue and history
type FullStatusResponse struct {
Queue Queue `json:"queue"`
History History `json:"history"`
Status bool `json:"status"`
Version string `json:"version"`
}
// AddNZBRequest represents the request to add an NZB
type AddNZBRequest struct {
Name string `json:"name"`
Cat string `json:"cat"`
Script string `json:"script"`
Priority string `json:"priority"`
PP string `json:"pp"`
Password string `json:"password"`
NZBData []byte `json:"nzb_data"`
URL string `json:"url"`
}
// AddNZBResponse represents the response when adding an NZB
type AddNZBResponse struct {
Status bool `json:"status"`
NzoIds []string `json:"nzo_ids"`
Error string `json:"error,omitempty"`
}
// API Mode constants
const (
ModeQueue = "queue"
ModeHistory = "history"
ModeConfig = "config"
ModeGetConfig = "get_config"
ModeAddURL = "addurl"
ModeAddFile = "addfile"
ModeVersion = "version"
ModePause = "pause"
ModeResume = "resume"
ModeDelete = "delete"
ModeShutdown = "shutdown"
ModeRestart = "restart"
ModeGetCats = "get_cats"
ModeGetScripts = "get_scripts"
ModeGetFiles = "get_files"
ModeRetry = "retry"
ModeStatus = "status"
ModeFullStatus = "fullstatus"
)
// Status constants
const (
StatusQueued = "Queued"
StatusPaused = "Paused"
StatusDownloading = "downloading"
StatusProcessing = "Processing"
StatusCompleted = "Completed"
StatusFailed = "Failed"
StatusGrabbing = "Grabbing"
StatusPropagating = "Propagating"
StatusVerifying = "Verifying"
StatusRepairing = "Repairing"
StatusExtracting = "Extracting"
StatusMoving = "Moving"
StatusRunning = "Running"
)
// Priority constants
const (
PriorityForced = "2"
PriorityHigh = "1"
PriorityNormal = "0"
PriorityLow = "-1"
PriorityStop = "-2"
)

View File

@@ -1,141 +0,0 @@
package server
import (
"fmt"
"github.com/go-chi/chi/v5"
"github.com/sirrobot01/decypharr/internal/nntp"
"github.com/sirrobot01/decypharr/internal/request"
debridTypes "github.com/sirrobot01/decypharr/pkg/debrid/types"
"github.com/sirrobot01/decypharr/pkg/store"
"net/http"
"runtime"
)
func (s *Server) handleIngests(w http.ResponseWriter, r *http.Request) {
ingests := make([]debridTypes.IngestData, 0)
_store := store.Get()
debrids := _store.Debrid()
if debrids == nil {
http.Error(w, "Debrid service is not enabled", http.StatusInternalServerError)
return
}
for _, cache := range debrids.Caches() {
if cache == nil {
s.logger.Error().Msg("Debrid cache is nil, skipping")
continue
}
data, err := cache.GetIngests()
if err != nil {
s.logger.Error().Err(err).Msg("Failed to get ingests from debrid cache")
http.Error(w, "Failed to get ingests: "+err.Error(), http.StatusInternalServerError)
return
}
ingests = append(ingests, data...)
}
request.JSONResponse(w, ingests, 200)
}
func (s *Server) handleIngestsByDebrid(w http.ResponseWriter, r *http.Request) {
debridName := chi.URLParam(r, "debrid")
if debridName == "" {
http.Error(w, "Debrid name is required", http.StatusBadRequest)
return
}
_store := store.Get()
debrids := _store.Debrid()
if debrids == nil {
http.Error(w, "Debrid service is not enabled", http.StatusInternalServerError)
return
}
caches := debrids.Caches()
cache, exists := caches[debridName]
if !exists {
http.Error(w, "Debrid cache not found: "+debridName, http.StatusNotFound)
return
}
data, err := cache.GetIngests()
if err != nil {
s.logger.Error().Err(err).Msg("Failed to get ingests from debrid cache")
http.Error(w, "Failed to get ingests: "+err.Error(), http.StatusInternalServerError)
return
}
request.JSONResponse(w, data, 200)
}
func (s *Server) handleStats(w http.ResponseWriter, r *http.Request) {
var memStats runtime.MemStats
runtime.ReadMemStats(&memStats)
stats := map[string]any{
// Memory stats
"heap_alloc_mb": fmt.Sprintf("%.2fMB", float64(memStats.HeapAlloc)/1024/1024),
"total_alloc_mb": fmt.Sprintf("%.2fMB", float64(memStats.TotalAlloc)/1024/1024),
"memory_used": fmt.Sprintf("%.2fMB", float64(memStats.Sys)/1024/1024),
// GC stats
"gc_cycles": memStats.NumGC,
// Goroutine stats
"goroutines": runtime.NumGoroutine(),
// System info
"num_cpu": runtime.NumCPU(),
// OS info
"os": runtime.GOOS,
"arch": runtime.GOARCH,
"go_version": runtime.Version(),
}
debrids := store.Get().Debrid()
if debrids == nil {
request.JSONResponse(w, stats, http.StatusOK)
return
}
clients := debrids.Clients()
caches := debrids.Caches()
profiles := make([]*debridTypes.Profile, 0)
for debridName, client := range clients {
profile, err := client.GetProfile()
profile.Name = debridName
if err != nil {
s.logger.Error().Err(err).Msg("Failed to get debrid profile")
continue
}
cache, ok := caches[debridName]
if ok {
// Get torrent data
profile.LibrarySize = cache.TotalTorrents()
profile.BadTorrents = len(cache.GetListing("__bad__"))
profile.ActiveLinks = cache.GetTotalActiveDownloadLinks()
}
profiles = append(profiles, profile)
}
stats["debrids"] = profiles
if s.usenet != nil {
if client := s.usenet.Client(); client != nil {
usenetsData := make([]map[string]interface{}, 0)
client.Pools().Range(func(key string, value *nntp.Pool) bool {
if value != nil {
providerData := make(map[string]interface{})
providerData["name"] = key
providerData["active_connections"] = value.ActiveConnections()
providerData["total_connections"] = value.ConnectionCount()
usenetsData = append(usenetsData, providerData)
}
return true
})
stats["usenet"] = usenetsData
}
}
request.JSONResponse(w, stats, http.StatusOK)
}

View File

@@ -9,19 +9,20 @@ import (
"github.com/rs/zerolog"
"github.com/sirrobot01/decypharr/internal/config"
"github.com/sirrobot01/decypharr/internal/logger"
"github.com/sirrobot01/decypharr/pkg/usenet"
"github.com/sirrobot01/decypharr/internal/request"
"io"
"net/http"
"net/url"
"os"
"runtime"
)
type Server struct {
router *chi.Mux
logger zerolog.Logger
usenet usenet.Usenet
}
func New(usenet usenet.Usenet, handlers map[string]http.Handler) *Server {
func New(handlers map[string]http.Handler) *Server {
l := logger.New("http")
r := chi.NewRouter()
r.Use(middleware.Recoverer)
@@ -30,8 +31,11 @@ func New(usenet usenet.Usenet, handlers map[string]http.Handler) *Server {
s := &Server{
logger: l,
usenet: usenet,
}
staticPath, _ := url.JoinPath(cfg.URLBase, "static")
r.Handle(staticPath+"/*",
http.StripPrefix(staticPath, http.FileServer(http.Dir("static"))),
)
r.Route(cfg.URLBase, func(r chi.Router) {
for pattern, handler := range handlers {
@@ -41,12 +45,8 @@ func New(usenet usenet.Usenet, handlers map[string]http.Handler) *Server {
//logs
r.Get("/logs", s.getLogs)
//debugs
r.Route("/debug", func(r chi.Router) {
r.Get("/stats", s.handleStats)
r.Get("/ingests", s.handleIngests)
r.Get("/ingests/{debrid}", s.handleIngestsByDebrid)
})
//stats
r.Get("/stats", s.getStats)
//webhooks
r.Post("/webhooks/tautulli", s.handleTautulli)
@@ -68,7 +68,7 @@ func (s *Server) Start(ctx context.Context) error {
go func() {
if err := srv.ListenAndServe(); err != nil && !errors.Is(err, http.ErrServerClosed) {
s.logger.Error().Err(err).Msgf("Error starting server")
s.logger.Info().Msgf("Error starting server: %v", err)
}
}()
@@ -101,5 +101,36 @@ func (s *Server) getLogs(w http.ResponseWriter, r *http.Request) {
w.Header().Set("Expires", "0")
// Stream the file
_, _ = io.Copy(w, file)
_, err = io.Copy(w, file)
if err != nil {
s.logger.Error().Err(err).Msg("Error streaming log file")
http.Error(w, "Error streaming log file", http.StatusInternalServerError)
return
}
}
func (s *Server) getStats(w http.ResponseWriter, r *http.Request) {
var memStats runtime.MemStats
runtime.ReadMemStats(&memStats)
stats := map[string]interface{}{
// Memory stats
"heap_alloc_mb": fmt.Sprintf("%.2fMB", float64(memStats.HeapAlloc)/1024/1024),
"total_alloc_mb": fmt.Sprintf("%.2fMB", float64(memStats.TotalAlloc)/1024/1024),
"memory_used": fmt.Sprintf("%.2fMB", float64(memStats.Sys)/1024/1024),
// GC stats
"gc_cycles": memStats.NumGC,
// Goroutine stats
"goroutines": runtime.NumGoroutine(),
// System info
"num_cpu": runtime.NumCPU(),
// OS info
"os": runtime.GOOS,
"arch": runtime.GOARCH,
"go_version": runtime.Version(),
}
request.JSONResponse(w, stats, http.StatusOK)
}

View File

@@ -3,7 +3,7 @@ package server
import (
"cmp"
"encoding/json"
"github.com/sirrobot01/decypharr/pkg/store"
"github.com/sirrobot01/decypharr/pkg/service"
"net/http"
)
@@ -38,7 +38,8 @@ func (s *Server) handleTautulli(w http.ResponseWriter, r *http.Request) {
http.Error(w, "Invalid ID", http.StatusBadRequest)
return
}
repair := store.Get().Repair()
svc := service.GetService()
repair := svc.Repair
mediaId := cmp.Or(payload.TmdbID, payload.TvdbID)

53
pkg/service/service.go Normal file
View File

@@ -0,0 +1,53 @@
package service
import (
"github.com/sirrobot01/decypharr/pkg/arr"
"github.com/sirrobot01/decypharr/pkg/debrid/debrid"
"github.com/sirrobot01/decypharr/pkg/repair"
"sync"
)
type Service struct {
Repair *repair.Repair
Arr *arr.Storage
Debrid *debrid.Engine
}
var (
instance *Service
once sync.Once
)
// GetService returns the singleton instance
func GetService() *Service {
once.Do(func() {
arrs := arr.NewStorage()
deb := debrid.NewEngine()
instance = &Service{
Repair: repair.New(arrs, deb),
Arr: arrs,
Debrid: deb,
}
})
return instance
}
func Reset() {
if instance != nil {
if instance.Debrid != nil {
instance.Debrid.Reset()
}
if instance.Arr != nil {
//instance.Arr.Reset()
}
if instance.Repair != nil {
//instance.Repair.Reset()
}
}
once = sync.Once{}
instance = nil
}
func GetDebrid() *debrid.Engine {
return GetService().Debrid
}

View File

@@ -1,141 +0,0 @@
package store
import (
"context"
"fmt"
"time"
)
func (s *Store) addToQueue(importReq *ImportRequest) error {
if importReq.Magnet == nil {
return fmt.Errorf("magnet is required")
}
if importReq.Arr == nil {
return fmt.Errorf("arr is required")
}
importReq.Status = "queued"
importReq.CompletedAt = time.Time{}
importReq.Error = nil
err := s.importsQueue.Push(importReq)
if err != nil {
return err
}
return nil
}
func (s *Store) StartQueueSchedule(ctx context.Context) error {
// Start the slots processing in a separate goroutine
go func() {
if err := s.processSlotsQueue(ctx); err != nil {
s.logger.Error().Err(err).Msg("Error processing slots queue")
}
}()
// Start the remove stalled torrents processing in a separate goroutine
go func() {
if err := s.processRemoveStalledTorrents(ctx); err != nil {
s.logger.Error().Err(err).Msg("Error processing remove stalled torrents")
}
}()
return nil
}
func (s *Store) processSlotsQueue(ctx context.Context) error {
s.trackAvailableSlots(ctx) // Initial tracking of available slots
ticker := time.NewTicker(30 * time.Second)
defer ticker.Stop()
for {
select {
case <-ctx.Done():
return nil
case <-ticker.C:
s.trackAvailableSlots(ctx)
}
}
}
func (s *Store) processRemoveStalledTorrents(ctx context.Context) error {
if s.removeStalledAfter <= 0 {
return nil // No need to remove stalled torrents if the duration is not set
}
ticker := time.NewTicker(time.Minute)
defer ticker.Stop()
for {
select {
case <-ctx.Done():
return nil
case <-ticker.C:
if err := s.removeStalledTorrents(ctx); err != nil {
s.logger.Error().Err(err).Msg("Error removing stalled torrents")
}
}
}
}
func (s *Store) trackAvailableSlots(ctx context.Context) {
// This function tracks the available slots for each debrid client
availableSlots := make(map[string]int)
for name, deb := range s.debrid.Debrids() {
slots, err := deb.Client().GetAvailableSlots()
if err != nil {
continue
}
availableSlots[name] = slots
}
if s.importsQueue.Size() <= 0 {
// Queue is empty, no need to process
return
}
for _, slots := range availableSlots {
// If slots are available, process the next import request from the queue
for slots > 0 {
select {
case <-ctx.Done():
return // Exit if context is done
default:
if err := s.processFromQueue(ctx); err != nil {
s.logger.Error().Err(err).Msg("Error processing from queue")
return // Exit on error
}
slots-- // Decrease the available slots after processing
}
}
}
}
func (s *Store) processFromQueue(ctx context.Context) error {
// Pop the next import request from the queue
importReq, err := s.importsQueue.Pop()
if err != nil {
return err
}
if importReq == nil {
return nil
}
return s.AddTorrent(ctx, importReq)
}
func (s *Store) removeStalledTorrents(ctx context.Context) error {
// This function checks for stalled torrents and removes them
stalledTorrents := s.torrents.GetStalledTorrents(s.removeStalledAfter)
if len(stalledTorrents) == 0 {
return nil // No stalled torrents to remove
}
for _, torrent := range stalledTorrents {
s.logger.Warn().Msgf("Removing stalled torrent: %s", torrent.Name)
s.torrents.Delete(torrent.Hash, torrent.Category, true) // Remove from store and delete from debrid
}
return nil
}

View File

@@ -1,239 +0,0 @@
package store
import (
"bytes"
"cmp"
"context"
"encoding/json"
"fmt"
"github.com/google/uuid"
"github.com/sirrobot01/decypharr/internal/request"
"github.com/sirrobot01/decypharr/internal/utils"
"github.com/sirrobot01/decypharr/pkg/arr"
debridTypes "github.com/sirrobot01/decypharr/pkg/debrid/types"
"net/http"
"net/url"
"sync"
"time"
)
type ImportType string
const (
ImportTypeQBitTorrent ImportType = "qbit"
ImportTypeAPI ImportType = "api"
)
type ImportRequest struct {
Id string `json:"id"`
DownloadFolder string `json:"downloadFolder"`
SelectedDebrid string `json:"debrid"`
Magnet *utils.Magnet `json:"magnet"`
Arr *arr.Arr `json:"arr"`
Action string `json:"action"`
DownloadUncached bool `json:"downloadUncached"`
CallBackUrl string `json:"callBackUrl"`
Status string `json:"status"`
CompletedAt time.Time `json:"completedAt,omitempty"`
Error error `json:"error,omitempty"`
Type ImportType `json:"type"`
Async bool `json:"async"`
}
func NewImportRequest(debrid string, downloadFolder string, magnet *utils.Magnet, arr *arr.Arr, action string, downloadUncached bool, callBackUrl string, importType ImportType) *ImportRequest {
return &ImportRequest{
Id: uuid.New().String(),
Status: "started",
DownloadFolder: downloadFolder,
SelectedDebrid: cmp.Or(arr.SelectedDebrid, debrid), // Use debrid from arr if available
Magnet: magnet,
Arr: arr,
Action: action,
DownloadUncached: downloadUncached,
CallBackUrl: callBackUrl,
Type: importType,
}
}
type importResponse struct {
Status string `json:"status"`
CompletedAt time.Time `json:"completedAt"`
Error error `json:"error"`
Torrent *Torrent `json:"torrent"`
Debrid *debridTypes.Torrent `json:"debrid"`
}
func (i *ImportRequest) sendCallback(torrent *Torrent, debridTorrent *debridTypes.Torrent) {
if i.CallBackUrl == "" {
return
}
// Check if the callback URL is valid
if _, err := url.ParseRequestURI(i.CallBackUrl); err != nil {
return
}
client := request.New()
payload, err := json.Marshal(&importResponse{
Status: i.Status,
Error: i.Error,
CompletedAt: i.CompletedAt,
Torrent: torrent,
Debrid: debridTorrent,
})
if err != nil {
return
}
req, err := http.NewRequest("POST", i.CallBackUrl, bytes.NewReader(payload))
if err != nil {
return
}
req.Header.Set("Content-Type", "application/json")
_, _ = client.Do(req)
}
func (i *ImportRequest) markAsFailed(err error, torrent *Torrent, debridTorrent *debridTypes.Torrent) {
i.Status = "failed"
i.Error = err
i.CompletedAt = time.Now()
i.sendCallback(torrent, debridTorrent)
}
func (i *ImportRequest) markAsCompleted(torrent *Torrent, debridTorrent *debridTypes.Torrent) {
i.Status = "completed"
i.Error = nil
i.CompletedAt = time.Now()
i.sendCallback(torrent, debridTorrent)
}
type ImportQueue struct {
queue []*ImportRequest
mu sync.RWMutex
ctx context.Context
cancel context.CancelFunc
cond *sync.Cond // For blocking operations
}
func NewImportQueue(ctx context.Context, capacity int) *ImportQueue {
ctx, cancel := context.WithCancel(ctx)
iq := &ImportQueue{
queue: make([]*ImportRequest, 0, capacity),
ctx: ctx,
cancel: cancel,
}
iq.cond = sync.NewCond(&iq.mu)
return iq
}
func (iq *ImportQueue) Push(req *ImportRequest) error {
if req == nil {
return fmt.Errorf("import request cannot be nil")
}
iq.mu.Lock()
defer iq.mu.Unlock()
select {
case <-iq.ctx.Done():
return fmt.Errorf("queue is shutting down")
default:
}
if len(iq.queue) >= cap(iq.queue) {
return fmt.Errorf("queue is full")
}
iq.queue = append(iq.queue, req)
iq.cond.Signal() // Wake up any waiting Pop()
return nil
}
func (iq *ImportQueue) Pop() (*ImportRequest, error) {
iq.mu.Lock()
defer iq.mu.Unlock()
select {
case <-iq.ctx.Done():
return nil, fmt.Errorf("queue is shutting down")
default:
}
if len(iq.queue) == 0 {
return nil, fmt.Errorf("no import requests available")
}
req := iq.queue[0]
iq.queue = iq.queue[1:]
return req, nil
}
// Delete specific request by ID
func (iq *ImportQueue) Delete(requestID string) bool {
iq.mu.Lock()
defer iq.mu.Unlock()
for i, req := range iq.queue {
if req.Id == requestID {
// Remove from slice
iq.queue = append(iq.queue[:i], iq.queue[i+1:]...)
return true
}
}
return false
}
// DeleteWhere requests matching a condition
func (iq *ImportQueue) DeleteWhere(predicate func(*ImportRequest) bool) int {
iq.mu.Lock()
defer iq.mu.Unlock()
deleted := 0
for i := len(iq.queue) - 1; i >= 0; i-- {
if predicate(iq.queue[i]) {
iq.queue = append(iq.queue[:i], iq.queue[i+1:]...)
deleted++
}
}
return deleted
}
// Find request without removing it
func (iq *ImportQueue) Find(requestID string) *ImportRequest {
iq.mu.RLock()
defer iq.mu.RUnlock()
for _, req := range iq.queue {
if req.Id == requestID {
return req
}
}
return nil
}
func (iq *ImportQueue) Size() int {
iq.mu.RLock()
defer iq.mu.RUnlock()
return len(iq.queue)
}
func (iq *ImportQueue) IsEmpty() bool {
return iq.Size() == 0
}
// List all requests (copy to avoid race conditions)
func (iq *ImportQueue) List() []*ImportRequest {
iq.mu.RLock()
defer iq.mu.RUnlock()
result := make([]*ImportRequest, len(iq.queue))
copy(result, iq.queue)
return result
}
func (iq *ImportQueue) Close() {
iq.cancel()
iq.cond.Broadcast()
}

View File

@@ -1,92 +0,0 @@
package store
import (
"context"
"github.com/rs/zerolog"
"github.com/sirrobot01/decypharr/internal/config"
"github.com/sirrobot01/decypharr/internal/logger"
"github.com/sirrobot01/decypharr/pkg/arr"
"github.com/sirrobot01/decypharr/pkg/debrid"
"github.com/sirrobot01/decypharr/pkg/repair"
"sync"
"time"
)
type Store struct {
repair *repair.Repair
arr *arr.Storage
debrid *debrid.Storage
importsQueue *ImportQueue // Queued import requests(probably from too_many_active_downloads)
torrents *TorrentStorage
logger zerolog.Logger
refreshInterval time.Duration
skipPreCache bool
downloadSemaphore chan struct{}
removeStalledAfter time.Duration // Duration after which stalled torrents are removed
}
var (
instance *Store
once sync.Once
)
// Get returns the singleton instance
func Get() *Store {
once.Do(func() {
arrs := arr.NewStorage()
deb := debrid.NewStorage()
cfg := config.Get()
instance = &Store{
repair: repair.New(arrs, deb),
arr: arrs,
debrid: deb,
torrents: newTorrentStorage(cfg.TorrentsFile()),
logger: logger.Default(), // Use default logger [decypharr]
importsQueue: NewImportQueue(context.Background(), 1000),
refreshInterval: 10 * time.Minute, // Default refresh interval
skipPreCache: false, // Default skip pre-cache
downloadSemaphore: make(chan struct{}, 5), // Default max concurrent downloads
}
if cfg.QBitTorrent != nil {
instance.refreshInterval = time.Duration(cfg.QBitTorrent.RefreshInterval) * time.Minute
instance.skipPreCache = cfg.QBitTorrent.SkipPreCache
instance.downloadSemaphore = make(chan struct{}, cfg.QBitTorrent.MaxDownloads)
}
if cfg.RemoveStalledAfter != "" {
removeStalledAfter, err := time.ParseDuration(cfg.RemoveStalledAfter)
if err == nil {
instance.removeStalledAfter = removeStalledAfter
}
}
})
return instance
}
func Reset() {
if instance != nil {
if instance.debrid != nil {
instance.debrid.Reset()
}
if instance.importsQueue != nil {
instance.importsQueue.Close()
}
close(instance.downloadSemaphore)
}
once = sync.Once{}
instance = nil
}
func (s *Store) Arr() *arr.Storage {
return s.arr
}
func (s *Store) Debrid() *debrid.Storage {
return s.debrid
}
func (s *Store) Repair() *repair.Repair {
return s.repair
}
func (s *Store) Torrents() *TorrentStorage {
return s.torrents
}

View File

@@ -1,297 +0,0 @@
package store
import (
"cmp"
"context"
"errors"
"fmt"
"math"
"os"
"path/filepath"
"time"
"github.com/sirrobot01/decypharr/internal/request"
"github.com/sirrobot01/decypharr/internal/utils"
debridTypes "github.com/sirrobot01/decypharr/pkg/debrid"
"github.com/sirrobot01/decypharr/pkg/debrid/types"
)
func (s *Store) AddTorrent(ctx context.Context, importReq *ImportRequest) error {
torrent := createTorrentFromMagnet(importReq)
debridTorrent, err := debridTypes.Process(ctx, s.debrid, importReq.SelectedDebrid, importReq.Magnet, importReq.Arr, importReq.Action, importReq.DownloadUncached)
if err != nil {
var httpErr *utils.HTTPError
if ok := errors.As(err, &httpErr); ok {
switch httpErr.Code {
case "too_many_active_downloads":
// Handle too much active downloads error
s.logger.Warn().Msgf("Too many active downloads for %s, adding to queue", importReq.Magnet.Name)
if err := s.addToQueue(importReq); err != nil {
s.logger.Error().Err(err).Msgf("Failed to add %s to queue", importReq.Magnet.Name)
return err
}
torrent.State = "queued"
default:
// Unhandled error, return it, caller logs it
return err
}
} else {
// Unhandled error, return it, caller logs it
return err
}
}
torrent = s.partialTorrentUpdate(torrent, debridTorrent)
s.torrents.AddOrUpdate(torrent)
go s.processFiles(torrent, debridTorrent, importReq) // We can send async for file processing not to delay the response
return nil
}
func (s *Store) processFiles(torrent *Torrent, debridTorrent *types.Torrent, importReq *ImportRequest) {
if debridTorrent == nil {
// Early return if debridTorrent is nil
return
}
deb := s.debrid.Debrid(debridTorrent.Debrid)
client := deb.Client()
downloadingStatuses := client.GetDownloadingStatus()
_arr := importReq.Arr
backoff := time.NewTimer(s.refreshInterval)
defer backoff.Stop()
for debridTorrent.Status != "downloaded" {
dbT, err := client.CheckStatus(debridTorrent)
if err != nil {
s.logger.Error().
Str("torrent_id", debridTorrent.Id).
Str("torrent_name", debridTorrent.Name).
Err(err).
Msg("Error checking torrent status")
if dbT != nil && dbT.Id != "" {
// Delete the torrent if it was not downloaded
go func() {
_ = client.DeleteTorrent(dbT.Id)
}()
}
s.logger.Error().Msgf("Error checking status: %v", err)
s.markTorrentAsFailed(torrent)
go func() {
_arr.Refresh()
}()
importReq.markAsFailed(err, torrent, debridTorrent)
return
}
debridTorrent = dbT
torrent = s.partialTorrentUpdate(torrent, debridTorrent)
// Exit the loop for downloading statuses to prevent memory buildup
exitCondition1 := debridTorrent.Status == "downloaded"
exitCondition2 := !utils.Contains(downloadingStatuses, debridTorrent.Status)
if exitCondition1 || exitCondition2 {
break
}
<-backoff.C
// Increase interval gradually, cap at max
nextInterval := min(s.refreshInterval*2, 30*time.Second)
backoff.Reset(nextInterval)
}
var torrentSymlinkPath string
var err error
debridTorrent.Arr = _arr
// Check if debrid supports webdav by checking cache
timer := time.Now()
onFailed := func(err error) {
s.markTorrentAsFailed(torrent)
go func() {
if deleteErr := client.DeleteTorrent(debridTorrent.Id); deleteErr != nil {
s.logger.Warn().Err(deleteErr).Msgf("Failed to delete torrent %s", debridTorrent.Id)
}
}()
s.logger.Error().Err(err).Msgf("Error occured while processing torrent %s", debridTorrent.Name)
importReq.markAsFailed(err, torrent, debridTorrent)
}
onSuccess := func(torrentSymlinkPath string) {
torrent.TorrentPath = torrentSymlinkPath
s.updateTorrent(torrent, debridTorrent)
s.logger.Info().Msgf("Adding %s took %s", debridTorrent.Name, time.Since(timer))
go importReq.markAsCompleted(torrent, debridTorrent) // Mark the import request as completed, send callback if needed
go func() {
if err := request.SendDiscordMessage("download_complete", "success", torrent.discordContext()); err != nil {
s.logger.Error().Msgf("Error sending discord message: %v", err)
}
}()
go func() {
_arr.Refresh()
}()
}
switch importReq.Action {
case "symlink":
// Symlink action, we will create a symlink to the torrent
s.logger.Debug().Msgf("Post-Download Action: Symlink")
cache := deb.Cache()
if cache != nil {
s.logger.Info().Msgf("Using internal webdav for %s", debridTorrent.Debrid)
// Use webdav to download the file
if err := cache.Add(debridTorrent); err != nil {
onFailed(err)
return
}
rclonePath := filepath.Join(debridTorrent.MountPath, cache.GetTorrentFolder(debridTorrent)) // /mnt/remote/realdebrid/MyTVShow
torrentFolderNoExt := utils.RemoveExtension(debridTorrent.Name)
torrentSymlinkPath, err = s.createSymlinksWebdav(torrent, debridTorrent, rclonePath, torrentFolderNoExt) // /mnt/symlinks/{category}/MyTVShow/
} else {
// User is using either zurg or debrid webdav
torrentSymlinkPath, err = s.processSymlink(torrent, debridTorrent) // /mnt/symlinks/{category}/MyTVShow/
}
if err != nil {
onFailed(err)
return
}
if torrentSymlinkPath == "" {
err = fmt.Errorf("symlink path is empty for %s", debridTorrent.Name)
onFailed(err)
}
onSuccess(torrentSymlinkPath)
return
case "download":
// Download action, we will download the torrent to the specified folder
// Generate download links
s.logger.Debug().Msgf("Post-Download Action: Download")
if err := client.GetFileDownloadLinks(debridTorrent); err != nil {
onFailed(err)
return
}
torrentSymlinkPath, err = s.processDownload(torrent, debridTorrent)
if err != nil {
onFailed(err)
return
}
if torrentSymlinkPath == "" {
err = fmt.Errorf("download path is empty for %s", debridTorrent.Name)
onFailed(err)
return
}
onSuccess(torrentSymlinkPath)
case "none":
s.logger.Debug().Msgf("Post-Download Action: None")
// No action, just update the torrent and mark it as completed
onSuccess(torrent.TorrentPath)
default:
// Action is none, do nothing, fallthrough
}
}
func (s *Store) markTorrentAsFailed(t *Torrent) *Torrent {
t.State = "error"
s.torrents.AddOrUpdate(t)
go func() {
if err := request.SendDiscordMessage("download_failed", "error", t.discordContext()); err != nil {
s.logger.Error().Msgf("Error sending discord message: %v", err)
}
}()
return t
}
func (s *Store) partialTorrentUpdate(t *Torrent, debridTorrent *types.Torrent) *Torrent {
if debridTorrent == nil {
return t
}
addedOn, err := time.Parse(time.RFC3339, debridTorrent.Added)
if err != nil {
addedOn = time.Now()
}
totalSize := debridTorrent.Bytes
progress := (cmp.Or(debridTorrent.Progress, 0.0)) / 100.0
if math.IsNaN(progress) || math.IsInf(progress, 0) {
progress = 0
}
sizeCompleted := int64(float64(totalSize) * progress)
var speed int64
if debridTorrent.Speed != 0 {
speed = debridTorrent.Speed
}
var eta int
if speed != 0 {
eta = int((totalSize - sizeCompleted) / speed)
}
files := make([]*File, 0, len(debridTorrent.Files))
for index, file := range debridTorrent.GetFiles() {
files = append(files, &File{
Index: index,
Name: file.Path,
Size: file.Size,
})
}
t.DebridID = debridTorrent.Id
t.Name = debridTorrent.Name
t.AddedOn = addedOn.Unix()
t.Files = files
t.Debrid = debridTorrent.Debrid
t.Size = totalSize
t.Completed = sizeCompleted
t.NumSeeds = debridTorrent.Seeders
t.Downloaded = sizeCompleted
t.DownloadedSession = sizeCompleted
t.Uploaded = sizeCompleted
t.UploadedSession = sizeCompleted
t.AmountLeft = totalSize - sizeCompleted
t.Progress = progress
t.Eta = eta
t.Dlspeed = speed
t.Upspeed = speed
t.ContentPath = filepath.Join(t.SavePath, t.Name) + string(os.PathSeparator)
return t
}
func (s *Store) updateTorrent(t *Torrent, debridTorrent *types.Torrent) *Torrent {
if debridTorrent == nil {
return t
}
if debridClient := s.debrid.Clients()[debridTorrent.Debrid]; debridClient != nil {
if debridTorrent.Status != "downloaded" {
_ = debridClient.UpdateTorrent(debridTorrent)
}
}
t = s.partialTorrentUpdate(t, debridTorrent)
t.ContentPath = t.TorrentPath + string(os.PathSeparator)
if t.IsReady() {
t.State = "pausedUP"
s.torrents.Update(t)
return t
}
ticker := time.NewTicker(100 * time.Millisecond)
defer ticker.Stop()
for {
select {
case <-ticker.C:
if t.IsReady() {
t.State = "pausedUP"
s.torrents.Update(t)
return t
}
updatedT := s.updateTorrent(t, debridTorrent)
t = updatedT
case <-time.After(10 * time.Minute): // Add a timeout
return t
}
}
}

View File

@@ -1,88 +0,0 @@
package store
import (
"fmt"
"sync"
)
type File struct {
Index int `json:"index,omitempty"`
Name string `json:"name,omitempty"`
Size int64 `json:"size,omitempty"`
Progress int `json:"progress,omitempty"`
Priority int `json:"priority,omitempty"`
IsSeed bool `json:"is_seed,omitempty"`
PieceRange []int `json:"piece_range,omitempty"`
Availability float64 `json:"availability,omitempty"`
}
type Torrent struct {
ID string `json:"id"`
DebridID string `json:"debrid_id"`
Debrid string `json:"debrid"`
TorrentPath string `json:"-"`
Files []*File `json:"files,omitempty"`
AddedOn int64 `json:"added_on,omitempty"`
AmountLeft int64 `json:"amount_left"`
AutoTmm bool `json:"auto_tmm"`
Availability float64 `json:"availability,omitempty"`
Category string `json:"category,omitempty"`
Completed int64 `json:"completed"`
CompletionOn int `json:"completion_on,omitempty"`
ContentPath string `json:"content_path"`
DlLimit int `json:"dl_limit"`
Dlspeed int64 `json:"dlspeed"`
Downloaded int64 `json:"downloaded"`
DownloadedSession int64 `json:"downloaded_session"`
Eta int `json:"eta"`
FlPiecePrio bool `json:"f_l_piece_prio,omitempty"`
ForceStart bool `json:"force_start,omitempty"`
Hash string `json:"hash"`
LastActivity int64 `json:"last_activity,omitempty"`
MagnetUri string `json:"magnet_uri,omitempty"`
MaxRatio int `json:"max_ratio,omitempty"`
MaxSeedingTime int `json:"max_seeding_time,omitempty"`
Name string `json:"name,omitempty"`
NumComplete int `json:"num_complete,omitempty"`
NumIncomplete int `json:"num_incomplete,omitempty"`
NumLeechs int `json:"num_leechs,omitempty"`
NumSeeds int `json:"num_seeds,omitempty"`
Priority int `json:"priority,omitempty"`
Progress float64 `json:"progress"`
Ratio int `json:"ratio,omitempty"`
RatioLimit int `json:"ratio_limit,omitempty"`
SavePath string `json:"save_path"`
SeedingTimeLimit int `json:"seeding_time_limit,omitempty"`
SeenComplete int64 `json:"seen_complete,omitempty"`
SeqDl bool `json:"seq_dl"`
Size int64 `json:"size,omitempty"`
State string `json:"state,omitempty"`
SuperSeeding bool `json:"super_seeding"`
Tags string `json:"tags,omitempty"`
TimeActive int `json:"time_active,omitempty"`
TotalSize int64 `json:"total_size,omitempty"`
Tracker string `json:"tracker,omitempty"`
UpLimit int64 `json:"up_limit,omitempty"`
Uploaded int64 `json:"uploaded,omitempty"`
UploadedSession int64 `json:"uploaded_session,omitempty"`
Upspeed int64 `json:"upspeed,omitempty"`
Source string `json:"source,omitempty"`
sync.Mutex
}
func (t *Torrent) IsReady() bool {
return (t.AmountLeft <= 0 || t.Progress == 1) && t.TorrentPath != ""
}
func (t *Torrent) discordContext() string {
format := `
**Name:** %s
**Arr:** %s
**Hash:** %s
**MagnetURI:** %s
**Debrid:** %s
`
return fmt.Sprintf(format, t.Name, t.Category, t.Hash, t.MagnetUri, t.Debrid)
}

View File

@@ -1,141 +0,0 @@
package usenet
import (
"github.com/chrisfarms/yenc"
"github.com/puzpuzpuz/xsync/v4"
"github.com/rs/zerolog"
"sync/atomic"
"time"
)
// SegmentCache provides intelligent caching for NNTP segments
type SegmentCache struct {
cache *xsync.Map[string, *CachedSegment]
logger zerolog.Logger
maxSize int64
currentSize atomic.Int64
}
// CachedSegment represents a cached segment with metadata
type CachedSegment struct {
MessageID string `json:"message_id"`
Data []byte `json:"data"`
DecodedSize int64 `json:"decoded_size"` // Actual size after yEnc decoding
DeclaredSize int64 `json:"declared_size"` // Size declared in NZB
CachedAt time.Time `json:"cached_at"`
AccessCount int64 `json:"access_count"`
LastAccess time.Time `json:"last_access"`
FileBegin int64 `json:"file_begin"` // Start byte offset in the file
FileEnd int64 `json:"file_end"` // End byte offset in the file
}
// NewSegmentCache creates a new segment cache
func NewSegmentCache(logger zerolog.Logger) *SegmentCache {
sc := &SegmentCache{
cache: xsync.NewMap[string, *CachedSegment](),
logger: logger.With().Str("component", "segment_cache").Logger(),
maxSize: 50 * 1024 * 1024, // Default max size 100MB
}
return sc
}
// Get retrieves a segment from cache
func (sc *SegmentCache) Get(messageID string) (*CachedSegment, bool) {
segment, found := sc.cache.Load(messageID)
if !found {
return nil, false
}
segment.AccessCount++
segment.LastAccess = time.Now()
return segment, true
}
// Put stores a segment in cache with intelligent size management
func (sc *SegmentCache) Put(messageID string, data *yenc.Part, declaredSize int64) {
dataSize := data.Size
currentSize := sc.currentSize.Load()
// Check if we need to make room
wouldExceed := (currentSize + dataSize) > sc.maxSize
if wouldExceed {
sc.evictLRU(dataSize)
}
segment := &CachedSegment{
MessageID: messageID,
Data: make([]byte, data.Size),
DecodedSize: dataSize,
DeclaredSize: declaredSize,
CachedAt: time.Now(),
AccessCount: 1,
LastAccess: time.Now(),
}
copy(segment.Data, data.Body)
sc.cache.Store(messageID, segment)
sc.currentSize.Add(dataSize)
}
// evictLRU evicts least recently used segments to make room
func (sc *SegmentCache) evictLRU(neededSpace int64) {
if neededSpace <= 0 {
return // No need to evict if no space is needed
}
if sc.cache.Size() == 0 {
return // Nothing to evict
}
// Create a sorted list of segments by last access time
type segmentInfo struct {
key string
segment *CachedSegment
lastAccess time.Time
}
segments := make([]segmentInfo, 0, sc.cache.Size())
sc.cache.Range(func(key string, value *CachedSegment) bool {
segments = append(segments, segmentInfo{
key: key,
segment: value,
lastAccess: value.LastAccess,
})
return true // continue iteration
})
// Sort by last access time (oldest first)
for i := 0; i < len(segments)-1; i++ {
for j := i + 1; j < len(segments); j++ {
if segments[i].lastAccess.After(segments[j].lastAccess) {
segments[i], segments[j] = segments[j], segments[i]
}
}
}
// Evict segments until we have enough space
freedSpace := int64(0)
for _, seg := range segments {
if freedSpace >= neededSpace {
break
}
sc.cache.Delete(seg.key)
freedSpace += int64(len(seg.segment.Data))
}
}
// Clear removes all cached segments
func (sc *SegmentCache) Clear() {
sc.cache.Clear()
sc.currentSize.Store(0)
}
// Delete removes a specific segment from cache
func (sc *SegmentCache) Delete(messageID string) {
sc.cache.Delete(messageID)
}

View File

@@ -1,281 +0,0 @@
package usenet
import (
"context"
"errors"
"fmt"
"github.com/rs/zerolog"
"github.com/sirrobot01/decypharr/internal/config"
"github.com/sirrobot01/decypharr/internal/logger"
"github.com/sirrobot01/decypharr/internal/nntp"
"github.com/sirrobot01/decypharr/internal/utils"
"golang.org/x/sync/errgroup"
"os"
"path/filepath"
"time"
)
// DownloadWorker manages concurrent NZB downloads
type DownloadWorker struct {
client *nntp.Client
processor *Processor
logger zerolog.Logger
skipPreCache bool // Skip pre-caching for faster processing
mountFolder string // Folder where downloads are mounted
}
// DownloadJob represents a download job for an NZB
type DownloadJob struct {
NZB *NZB
Action string
Priority int
Callback func(*NZB, error)
DownloadDir string
}
// NewDownloadWorker creates a new download worker
func NewDownloadWorker(config *config.Usenet, client *nntp.Client, processor *Processor) *DownloadWorker {
dw := &DownloadWorker{
processor: processor,
client: client,
logger: logger.New("usenet-download-worker"),
skipPreCache: config.SkipPreCache,
mountFolder: config.MountFolder,
}
return dw
}
func (dw *DownloadWorker) CheckAvailability(ctx context.Context, job *DownloadJob) error {
dw.logger.Debug().
Str("nzb_id", job.NZB.ID).
Msg("Checking NZB availability")
// Grab first file to extract message IDs
firstFile := job.NZB.Files[0]
if len(firstFile.Segments) == 0 {
return fmt.Errorf("no segments found in first file of NZB")
}
segments := firstFile.Segments
// Smart sampling: check first, last, and some middle segments
samplesToCheck := dw.getSampleSegments(segments)
// Create error group for concurrent checking
g, gCtx := errgroup.WithContext(ctx)
// Limit concurrent goroutines to prevent overwhelming the NNTP server
maxConcurrency := len(samplesToCheck)
if maxConns := dw.client.MinimumMaxConns(); maxConns < maxConcurrency {
maxConcurrency = maxConns
}
g.SetLimit(maxConcurrency)
// Check each segment concurrently
for i, segment := range samplesToCheck {
segment := segment // capture loop variable
segmentNum := i + 1
g.Go(func() error {
select {
case <-gCtx.Done():
return gCtx.Err() // Return if context is canceled
default:
}
conn, cleanup, err := dw.client.GetConnection(gCtx)
if err != nil {
return fmt.Errorf("failed to get NNTP connection: %w", err)
}
defer cleanup() // Ensure connection is returned to the pool
// Check segment availability
seg, err := conn.GetSegment(segment.MessageID, segmentNum)
if err != nil {
return fmt.Errorf("failed to check segment %d availability: %w", segmentNum, err)
}
if seg == nil {
return fmt.Errorf("segment %d not found", segmentNum)
}
return nil
})
}
// Wait for all checks to complete
if err := g.Wait(); err != nil {
return fmt.Errorf("availability check failed: %w", err)
}
// Update storage with availability info
if err := dw.processor.store.Update(job.NZB); err != nil {
dw.logger.Warn().Err(err).Msg("Failed to update NZB with availability info")
}
return nil
}
func (dw *DownloadWorker) Process(ctx context.Context, job *DownloadJob) error {
var (
finalPath string
err error
)
defer func(err error) {
if job.Callback != nil {
job.Callback(job.NZB, err)
}
}(err)
switch job.Action {
case "download":
finalPath, err = dw.downloadNZB(ctx, job)
case "symlink":
finalPath, err = dw.symlinkNZB(ctx, job)
case "none":
return nil
default:
// Use symlink as default action
finalPath, err = dw.symlinkNZB(ctx, job)
}
if err != nil {
return err
}
if finalPath == "" {
err = fmt.Errorf("final path is empty after processing job: %s", job.Action)
return err
}
// Use atomic transition to completed state
return dw.processor.store.MarkAsCompleted(job.NZB.ID, finalPath)
}
// downloadNZB downloads an NZB to the specified directory
func (dw *DownloadWorker) downloadNZB(ctx context.Context, job *DownloadJob) (string, error) {
dw.logger.Info().
Str("nzb_id", job.NZB.ID).
Str("download_dir", job.DownloadDir).
Msg("Starting NZB download")
// TODO: implement download logic
return job.DownloadDir, nil
}
// getSampleMessageIDs returns a smart sample of message IDs to check
func (dw *DownloadWorker) getSampleSegments(segments []NZBSegment) []NZBSegment {
totalSegments := len(segments)
// For small NZBs, check all segments
if totalSegments <= 2 {
return segments
}
var samplesToCheck []NZBSegment
// Always check the first and last segments
samplesToCheck = append(samplesToCheck, segments[0]) // First segment
samplesToCheck = append(samplesToCheck, segments[totalSegments-1]) // Last segment
return samplesToCheck
}
func (dw *DownloadWorker) symlinkNZB(ctx context.Context, job *DownloadJob) (string, error) {
dw.logger.Info().
Str("nzb_id", job.NZB.ID).
Str("symlink_dir", job.DownloadDir).
Msg("Creating symlinks for NZB")
if job.NZB == nil {
return "", fmt.Errorf("NZB is nil")
}
mountFolder := filepath.Join(dw.mountFolder, job.NZB.Name) // e.g. /mnt/rclone/usenet/__all__/TV_SHOW
if mountFolder == "" {
return "", fmt.Errorf("mount folder is empty")
}
symlinkPath := filepath.Join(job.DownloadDir, job.NZB.Name) // e.g. /mnt/symlinks/usenet/sonarr/TV_SHOW
if err := os.MkdirAll(symlinkPath, 0755); err != nil {
return "", fmt.Errorf("failed to create symlink directory: %w", err)
}
return dw.createSymlinksWebdav(job.NZB, mountFolder, symlinkPath)
}
func (dw *DownloadWorker) createSymlinksWebdav(nzb *NZB, mountPath, symlinkPath string) (string, error) {
files := nzb.GetFiles()
remainingFiles := make(map[string]NZBFile)
for _, file := range files {
remainingFiles[file.Name] = file
}
ticker := time.NewTicker(100 * time.Millisecond)
defer ticker.Stop()
timeout := time.After(30 * time.Minute)
filePaths := make([]string, 0, len(files))
maxLogCount := 10 // Limit the number of log messages to avoid flooding
for len(remainingFiles) > 0 {
select {
case <-ticker.C:
entries, err := os.ReadDir(mountPath)
if err != nil {
if maxLogCount > 0 && !errors.Is(err, os.ErrNotExist) {
// Only log if it's not a "not found" error
// This is due to the fact the mount path may not exist YET
dw.logger.Warn().
Err(err).
Str("mount_path", mountPath).
Msg("Failed to read directory, retrying")
maxLogCount--
}
continue
}
// Check which files exist in this batch
for _, entry := range entries {
filename := entry.Name()
dw.logger.Info().
Str("filename", filename).
Msg("Checking file existence in mount path")
if file, exists := remainingFiles[filename]; exists {
fullFilePath := filepath.Join(mountPath, filename)
fileSymlinkPath := filepath.Join(symlinkPath, file.Name)
if err := os.Symlink(fullFilePath, fileSymlinkPath); err != nil && !os.IsExist(err) {
dw.logger.Debug().Msgf("Failed to create symlink: %s: %v", fileSymlinkPath, err)
} else {
filePaths = append(filePaths, fileSymlinkPath)
delete(remainingFiles, filename)
dw.logger.Info().Msgf("File is ready: %s", file.Name)
}
}
}
case <-timeout:
dw.logger.Warn().Msgf("Timeout waiting for files, %d files still pending", len(remainingFiles))
return symlinkPath, fmt.Errorf("timeout waiting for files")
}
}
if dw.skipPreCache {
return symlinkPath, nil
}
go func() {
defer func() {
if r := recover(); r != nil {
dw.logger.Error().
Interface("panic", r).
Str("nzbName", nzb.Name).
Msg("Recovered from panic in pre-cache goroutine")
}
}()
if err := utils.PreCacheFile(filePaths); err != nil {
dw.logger.Error().Msgf("Failed to pre-cache file: %s", err)
} else {
dw.logger.Debug().Msgf("Pre-cached %d files", len(filePaths))
}
}() // Pre-cache the files in the background
// Pre-cache the first 256KB and 1MB of the file
return symlinkPath, nil
}

View File

@@ -1,353 +0,0 @@
package usenet
import (
"errors"
"fmt"
"net"
"strings"
"sync"
"time"
)
var (
ErrConnectionFailed = errors.New("failed to connect to NNTP server")
ErrServerUnavailable = errors.New("NNTP server unavailable")
ErrRateLimitExceeded = errors.New("rate limit exceeded")
ErrDownloadTimeout = errors.New("download timeout")
)
// ErrInvalidNZBf creates a formatted error for NZB validation failures
func ErrInvalidNZBf(format string, args ...interface{}) error {
return fmt.Errorf("invalid NZB: "+format, args...)
}
// Error represents a structured usenet error
type Error struct {
Code string
Message string
Err error
ServerAddr string
Timestamp time.Time
Retryable bool
}
func (e *Error) Error() string {
if e.ServerAddr != "" {
return fmt.Sprintf("usenet error [%s] on %s: %s", e.Code, e.ServerAddr, e.Message)
}
return fmt.Sprintf("usenet error [%s]: %s", e.Code, e.Message)
}
func (e *Error) Unwrap() error {
return e.Err
}
func (e *Error) Is(target error) bool {
if target == nil {
return false
}
return e.Err != nil && errors.Is(e.Err, target)
}
// NewUsenetError creates a new UsenetError
func NewUsenetError(code, message string, err error) *Error {
return &Error{
Code: code,
Message: message,
Err: err,
Timestamp: time.Now(),
Retryable: isRetryableError(err),
}
}
// NewServerError creates a new UsenetError with server address
func NewServerError(code, message, serverAddr string, err error) *Error {
return &Error{
Code: code,
Message: message,
Err: err,
ServerAddr: serverAddr,
Timestamp: time.Now(),
Retryable: isRetryableError(err),
}
}
// isRetryableError determines if an error is retryable
func isRetryableError(err error) bool {
if err == nil {
return false
}
// Network errors are generally retryable
var netErr net.Error
if errors.As(err, &netErr) {
return netErr.Timeout()
}
// DNS errors are retryable
var dnsErr *net.DNSError
if errors.As(err, &dnsErr) {
return dnsErr.Temporary()
}
// Connection refused is retryable
if errors.Is(err, net.ErrClosed) {
return true
}
// Check error message for retryable conditions
errMsg := strings.ToLower(err.Error())
retryableMessages := []string{
"connection refused",
"connection reset",
"connection timed out",
"network is unreachable",
"host is unreachable",
"temporary failure",
"service unavailable",
"server overloaded",
"rate limit",
"too many connections",
}
for _, msg := range retryableMessages {
if strings.Contains(errMsg, msg) {
return true
}
}
return false
}
// RetryConfig defines retry behavior
type RetryConfig struct {
MaxRetries int
InitialDelay time.Duration
MaxDelay time.Duration
BackoffFactor float64
RetryableErrors []error
}
// DefaultRetryConfig returns a default retry configuration
func DefaultRetryConfig() *RetryConfig {
return &RetryConfig{
MaxRetries: 3,
InitialDelay: 1 * time.Second,
MaxDelay: 30 * time.Second,
BackoffFactor: 2.0,
RetryableErrors: []error{
ErrConnectionFailed,
ErrServerUnavailable,
ErrRateLimitExceeded,
ErrDownloadTimeout,
},
}
}
// ShouldRetry determines if an error should be retried
func (rc *RetryConfig) ShouldRetry(err error, attempt int) bool {
if attempt >= rc.MaxRetries {
return false
}
// Check if it's a retryable UsenetError
var usenetErr *Error
if errors.As(err, &usenetErr) {
return usenetErr.Retryable
}
// Check if it's in the list of retryable errors
for _, retryableErr := range rc.RetryableErrors {
if errors.Is(err, retryableErr) {
return true
}
}
return isRetryableError(err)
}
// GetDelay calculates the delay for the next retry
func (rc *RetryConfig) GetDelay(attempt int) time.Duration {
if attempt <= 0 {
return rc.InitialDelay
}
delay := time.Duration(float64(rc.InitialDelay) * float64(attempt) * rc.BackoffFactor)
if delay > rc.MaxDelay {
delay = rc.MaxDelay
}
return delay
}
// RetryWithBackoff retries a function with exponential backoff
func RetryWithBackoff(config *RetryConfig, operation func() error) error {
var lastErr error
for attempt := 0; attempt <= config.MaxRetries; attempt++ {
if attempt > 0 {
delay := config.GetDelay(attempt)
time.Sleep(delay)
}
err := operation()
if err == nil {
return nil
}
lastErr = err
if !config.ShouldRetry(err, attempt) {
break
}
}
return lastErr
}
// CircuitBreakerConfig defines circuit breaker behavior
type CircuitBreakerConfig struct {
MaxFailures int
ResetTimeout time.Duration
CheckInterval time.Duration
FailureCallback func(error)
}
// CircuitBreaker implements a circuit breaker pattern for NNTP connections
type CircuitBreaker struct {
config *CircuitBreakerConfig
failures int
lastFailure time.Time
state string // "closed", "open", "half-open"
mu *sync.RWMutex
}
// NewCircuitBreaker creates a new circuit breaker
func NewCircuitBreaker(config *CircuitBreakerConfig) *CircuitBreaker {
if config == nil {
config = &CircuitBreakerConfig{
MaxFailures: 5,
ResetTimeout: 60 * time.Second,
CheckInterval: 10 * time.Second,
}
}
return &CircuitBreaker{
config: config,
state: "closed",
mu: &sync.RWMutex{},
}
}
// Execute executes an operation through the circuit breaker
func (cb *CircuitBreaker) Execute(operation func() error) error {
cb.mu.RLock()
state := cb.state
failures := cb.failures
lastFailure := cb.lastFailure
cb.mu.RUnlock()
// Check if we should attempt reset
if state == "open" && time.Since(lastFailure) > cb.config.ResetTimeout {
cb.mu.Lock()
cb.state = "half-open"
cb.mu.Unlock()
state = "half-open"
}
if state == "open" {
return NewUsenetError("circuit_breaker_open",
fmt.Sprintf("circuit breaker is open (failures: %d)", failures),
ErrServerUnavailable)
}
err := operation()
cb.mu.Lock()
defer cb.mu.Unlock()
if err != nil {
cb.failures++
cb.lastFailure = time.Now()
if cb.failures >= cb.config.MaxFailures {
cb.state = "open"
}
if cb.config.FailureCallback != nil {
go func() {
cb.config.FailureCallback(err)
}()
}
return err
}
// Success - reset if we were in half-open state
if cb.state == "half-open" {
cb.state = "closed"
cb.failures = 0
}
return nil
}
// GetState returns the current circuit breaker state
func (cb *CircuitBreaker) GetState() string {
cb.mu.RLock()
defer cb.mu.RUnlock()
return cb.state
}
// Reset manually resets the circuit breaker
func (cb *CircuitBreaker) Reset() {
cb.mu.Lock()
defer cb.mu.Unlock()
cb.state = "closed"
cb.failures = 0
}
// ValidationError represents validation errors
type ValidationError struct {
Field string
Value interface{}
Message string
}
func (e *ValidationError) Error() string {
return fmt.Sprintf("validation error for field '%s': %s", e.Field, e.Message)
}
// ValidateNZBContent validates NZB content
func ValidateNZBContent(content []byte) error {
if len(content) == 0 {
return &ValidationError{
Field: "content",
Value: len(content),
Message: "NZB content cannot be empty",
}
}
if len(content) > 100*1024*1024 { // 100MB limit
return &ValidationError{
Field: "content",
Value: len(content),
Message: "NZB content exceeds maximum size limit (100MB)",
}
}
contentStr := string(content)
if !strings.Contains(contentStr, "<nzb") {
maxLen := 100
if len(contentStr) < maxLen {
maxLen = len(contentStr)
}
return &ValidationError{
Field: "content",
Value: contentStr[:maxLen],
Message: "content does not appear to be valid NZB format",
}
}
return nil
}

Some files were not shown because too many files have changed in this diff Show More